summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-03-12 17:44:08 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-03-12 17:44:08 -0700
commit9187210eee7d87eea37b45ea93454a88681894a4 (patch)
tree31b4610e62cdd5e1dfb700014aa619e41145d7d3
parent1f440397665f4241346e4cc6d93f8b73880815d1 (diff)
parented1f164038b50c5864aa85389f3ffd456f050cca (diff)
Merge tag 'net-next-6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from Jakub Kicinski: "Core & protocols: - Large effort by Eric to lower rtnl_lock pressure and remove locks: - Make commonly used parts of rtnetlink (address, route dumps etc) lockless, protected by RCU instead of rtnl_lock. - Add a netns exit callback which already holds rtnl_lock, allowing netns exit to take rtnl_lock once in the core instead of once for each driver / callback. - Remove locks / serialization in the socket diag interface. - Remove 6 calls to synchronize_rcu() while holding rtnl_lock. - Remove the dev_base_lock, depend on RCU where necessary. - Support busy polling on a per-epoll context basis. Poll length and budget parameters can be set independently of system defaults. - Introduce struct net_hotdata, to make sure read-mostly global config variables fit in as few cache lines as possible. - Add optional per-nexthop statistics to ease monitoring / debug of ECMP imbalance problems. - Support TCP_NOTSENT_LOWAT in MPTCP. - Ensure that IPv6 temporary addresses' preferred lifetimes are long enough, compared to other configured lifetimes, and at least 2 sec. - Support forwarding of ICMP Error messages in IPSec, per RFC 4301. - Add support for the independent control state machine for bonding per IEEE 802.1AX-2008 5.4.15 in addition to the existing coupled control state machine. - Add "network ID" to MCTP socket APIs to support hosts with multiple disjoint MCTP networks. - Re-use the mono_delivery_time skbuff bit for packets which user space wants to be sent at a specified time. Maintain the timing information while traversing veth links, bridge etc. - Take advantage of MSG_SPLICE_PAGES for RxRPC DATA and ACK packets. - Simplify many places iterating over netdevs by using an xarray instead of a hash table walk (hash table remains in place, for use on fastpaths). - Speed up scanning for expired routes by keeping a dedicated list. - Speed up "generic" XDP by trying harder to avoid large allocations. - Support attaching arbitrary metadata to netconsole messages. Things we sprinkled into general kernel code: - Enforce VM_IOREMAP flag and range in ioremap_page_range and introduce VM_SPARSE kind and vm_area_[un]map_pages (used by bpf_arena). - Rework selftest harness to enable the use of the full range of ksft exit code (pass, fail, skip, xfail, xpass). Netfilter: - Allow userspace to define a table that is exclusively owned by a daemon (via netlink socket aliveness) without auto-removing this table when the userspace program exits. Such table gets marked as orphaned and a restarting management daemon can re-attach/regain ownership. - Speed up element insertions to nftables' concatenated-ranges set type. Compact a few related data structures. BPF: - Add BPF token support for delegating a subset of BPF subsystem functionality from privileged system-wide daemons such as systemd through special mount options for userns-bound BPF fs to a trusted & unprivileged application. - Introduce bpf_arena which is sparse shared memory region between BPF program and user space where structures inside the arena can have pointers to other areas of the arena, and pointers work seamlessly for both user-space programs and BPF programs. - Introduce may_goto instruction that is a contract between the verifier and the program. The verifier allows the program to loop assuming it's behaving well, but reserves the right to terminate it. - Extend the BPF verifier to enable static subprog calls in spin lock critical sections. - Support registration of struct_ops types from modules which helps projects like fuse-bpf that seeks to implement a new struct_ops type. - Add support for retrieval of cookies for perf/kprobe multi links. - Support arbitrary TCP SYN cookie generation / validation in the TC layer with BPF to allow creating SYN flood handling in BPF firewalls. - Add code generation to inline the bpf_kptr_xchg() helper which improves performance when stashing/popping the allocated BPF objects. Wireless: - Add SPP (signaling and payload protected) AMSDU support. - Support wider bandwidth OFDMA, as required for EHT operation. Driver API: - Major overhaul of the Energy Efficient Ethernet internals to support new link modes (2.5GE, 5GE), share more code between drivers (especially those using phylib), and encourage more uniform behavior. Convert and clean up drivers. - Define an API for querying per netdev queue statistics from drivers. - IPSec: account in global stats for fully offloaded sessions. - Create a concept of Ethernet PHY Packages at the Device Tree level, to allow parameterizing the existing PHY package code. - Enable Rx hashing (RSS) on GTP protocol fields. Misc: - Improvements and refactoring all over networking selftests. - Create uniform module aliases for TC classifiers, actions, and packet schedulers to simplify creating modprobe policies. - Address all missing MODULE_DESCRIPTION() warnings in networking. - Extend the Netlink descriptions in YAML to cover message encapsulation or "Netlink polymorphism", where interpretation of nested attributes depends on link type, classifier type or some other "class type". Drivers: - Ethernet high-speed NICs: - Add a new driver for Marvell's Octeon PCI Endpoint NIC VF. - Intel (100G, ice, idpf): - support E825-C devices - nVidia/Mellanox: - support devices with one port and multiple PCIe links - Broadcom (bnxt): - support n-tuple filters - support configuring the RSS key - Wangxun (ngbe/txgbe): - implement irq_domain for TXGBE's sub-interrupts - Pensando/AMD: - support XDP - optimize queue submission and wakeup handling (+17% bps) - optimize struct layout, saving 28% of memory on queues - Ethernet NICs embedded and virtual: - Google cloud vNIC: - refactor driver to perform memory allocations for new queue config before stopping and freeing the old queue memory - Synopsys (stmmac): - obey queueMaxSDU and implement counters required by 802.1Qbv - Renesas (ravb): - support packet checksum offload - suspend to RAM and runtime PM support - Ethernet switches: - nVidia/Mellanox: - support for nexthop group statistics - Microchip: - ksz8: implement PHY loopback - add support for KSZ8567, a 7-port 10/100Mbps switch - PTP: - New driver for RENESAS FemtoClock3 Wireless clock generator. - Support OCP PTP cards designed and built by Adva. - CAN: - Support recvmsg() flags for own, local and remote traffic on CAN BCM sockets. - Support for esd GmbH PCIe/402 CAN device family. - m_can: - Rx/Tx submission coalescing - wake on frame Rx - WiFi: - Intel (iwlwifi): - enable signaling and payload protected A-MSDUs - support wider-bandwidth OFDMA - support for new devices - bump FW API to 89 for AX devices; 90 for BZ/SC devices - MediaTek (mt76): - mt7915: newer ADIE version support - mt7925: radio temperature sensor support - Qualcomm (ath11k): - support 6 GHz station power modes: Low Power Indoor (LPI), Standard Power) SP and Very Low Power (VLP) - QCA6390 & WCN6855: support 2 concurrent station interfaces - QCA2066 support - Qualcomm (ath12k): - refactoring in preparation for Multi-Link Operation (MLO) support - 1024 Block Ack window size support - firmware-2.bin support - support having multiple identical PCI devices (firmware needs to have ATH12K_FW_FEATURE_MULTI_QRTR_ID) - QCN9274: support split-PHY devices - WCN7850: enable Power Save Mode in station mode - WCN7850: P2P support - RealTek: - rtw88: support for more rtw8811cu and rtw8821cu devices - rtw89: support SCAN_RANDOM_SN and SET_SCAN_DWELL - rtlwifi: speed up USB firmware initialization - rtwl8xxxu: - RTL8188F: concurrent interface support - Channel Switch Announcement (CSA) support in AP mode - Broadcom (brcmfmac): - per-vendor feature support - per-vendor SAE password setup - DMI nvram filename quirk for ACEPC W5 Pro" * tag 'net-next-6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (2255 commits) nexthop: Fix splat with CONFIG_DEBUG_PREEMPT=y nexthop: Fix out-of-bounds access during attribute validation nexthop: Only parse NHA_OP_FLAGS for dump messages that require it nexthop: Only parse NHA_OP_FLAGS for get messages that require it bpf: move sleepable flag from bpf_prog_aux to bpf_prog bpf: hardcode BPF_PROG_PACK_SIZE to 2MB * num_possible_nodes() selftests/bpf: Add kprobe multi triggering benchmarks ptp: Move from simple ida to xarray vxlan: Remove generic .ndo_get_stats64 vxlan: Do not alloc tstats manually devlink: Add comments to use netlink gen tool nfp: flower: handle acti_netdevs allocation failure net/packet: Add getsockopt support for PACKET_COPY_THRESH net/netlink: Add getsockopt support for NETLINK_LISTEN_ALL_NSID selftests/bpf: Add bpf_arena_htab test. selftests/bpf: Add bpf_arena_list test. selftests/bpf: Add unit tests for bpf_arena_alloc/free_pages bpf: Add helper macro bpf_addr_space_cast() libbpf: Recognize __arena global variables. bpftool: Recognize arena map type ...
-rw-r--r--.get_maintainer.ignore1
-rw-r--r--.mailmap1
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-queues23
-rw-r--r--Documentation/admin-guide/sysctl/net.rst5
-rw-r--r--Documentation/bpf/kfuncs.rst8
-rw-r--r--Documentation/bpf/map_lpm_trie.rst2
-rw-r--r--Documentation/bpf/standardization/instruction-set.rst594
-rw-r--r--Documentation/bpf/verifier.rst2
-rw-r--r--Documentation/dev-tools/kselftest.rst12
-rw-r--r--Documentation/devicetree/bindings/leds/common.yaml12
-rw-r--r--Documentation/devicetree/bindings/leds/leds-bcm63138.yaml4
-rw-r--r--Documentation/devicetree/bindings/leds/leds-bcm6328.yaml4
-rw-r--r--Documentation/devicetree/bindings/leds/leds-bcm6358.txt2
-rw-r--r--Documentation/devicetree/bindings/leds/leds-pwm-multicolor.yaml4
-rw-r--r--Documentation/devicetree/bindings/leds/leds-pwm.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/brcm,asp-v2.0.yaml4
-rw-r--r--Documentation/devicetree/bindings/net/brcm,unimac-mdio.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/can/tcan4x5x.txt3
-rw-r--r--Documentation/devicetree/bindings/net/can/xilinx,can.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/cdns,macb.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/dsa/ar9331.txt147
-rw-r--r--Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/dsa/qca,ar9331.yaml161
-rw-r--r--Documentation/devicetree/bindings/net/dsa/realtek.yaml4
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-controller.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-phy-package.yaml52
-rw-r--r--Documentation/devicetree/bindings/net/fsl,fec.yaml3
-rw-r--r--Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/qca,qca808x.yaml54
-rw-r--r--Documentation/devicetree/bindings/net/qcom,ethqos.yaml9
-rw-r--r--Documentation/devicetree/bindings/net/qcom,ipa.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/qcom,ipq4019-mdio.yaml15
-rw-r--r--Documentation/devicetree/bindings/net/qcom,qca807x.yaml184
-rw-r--r--Documentation/devicetree/bindings/net/renesas,etheravb.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/snps,dwmac.yaml17
-rw-r--r--Documentation/devicetree/bindings/net/starfive,jh7110-dwmac.yaml72
-rw-r--r--Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/ti,dp83822.yaml34
-rw-r--r--Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml33
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml1
-rw-r--r--Documentation/netlink/genetlink-c.yaml41
-rw-r--r--Documentation/netlink/genetlink-legacy.yaml41
-rw-r--r--Documentation/netlink/genetlink.yaml21
-rw-r--r--Documentation/netlink/netlink-raw.yaml37
-rw-r--r--Documentation/netlink/specs/devlink.yaml2
-rw-r--r--Documentation/netlink/specs/dpll.yaml40
-rw-r--r--Documentation/netlink/specs/mptcp_pm.yaml3
-rw-r--r--Documentation/netlink/specs/netdev.yaml91
-rw-r--r--Documentation/netlink/specs/nlctrl.yaml206
-rw-r--r--Documentation/netlink/specs/tc.yaml2119
-rw-r--r--Documentation/networking/af_xdp.rst33
-rw-r--r--Documentation/networking/bonding.rst12
-rw-r--r--Documentation/networking/can.rst34
-rw-r--r--Documentation/networking/device_drivers/ethernet/amazon/ena.rst6
-rw-r--r--Documentation/networking/device_drivers/ethernet/index.rst1
-rw-r--r--Documentation/networking/device_drivers/ethernet/intel/ice.rst21
-rw-r--r--Documentation/networking/device_drivers/ethernet/marvell/octeon_ep_vf.rst24
-rw-r--r--Documentation/networking/device_drivers/wwan/t7xx.rst46
-rw-r--r--Documentation/networking/devlink/mlx5.rst9
-rw-r--r--Documentation/networking/index.rst1
-rw-r--r--Documentation/networking/ip-sysctl.rst14
-rw-r--r--Documentation/networking/l2tp.rst135
-rw-r--r--Documentation/networking/multi-pf-netdev.rst174
-rw-r--r--Documentation/networking/netconsole.rst66
-rw-r--r--Documentation/networking/netdevices.rst4
-rw-r--r--Documentation/networking/sfp-phylink.rst147
-rw-r--r--Documentation/networking/statistics.rst15
-rw-r--r--Documentation/networking/xfrm_device.rst4
-rw-r--r--Documentation/userspace-api/ioctl/ioctl-number.rst1
-rw-r--r--Documentation/userspace-api/netlink/netlink-raw.rst42
-rw-r--r--MAINTAINERS37
-rw-r--r--arch/arm/mm/ioremap.c8
-rw-r--r--arch/arm64/include/asm/patching.h2
-rw-r--r--arch/arm64/kernel/patching.c75
-rw-r--r--arch/arm64/kernel/stacktrace.c26
-rw-r--r--arch/arm64/net/bpf_jit_comp.c286
-rw-r--r--arch/loongarch/kernel/setup.c2
-rw-r--r--arch/mips/loongson64/init.c2
-rw-r--r--arch/powerpc/kernel/isa-bridge.c4
-rw-r--r--arch/riscv/include/asm/cfi.h17
-rw-r--r--arch/riscv/kernel/cfi.c53
-rw-r--r--arch/riscv/net/bpf_jit.h136
-rw-r--r--arch/riscv/net/bpf_jit_comp32.c2
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c229
-rw-r--r--arch/riscv/net/bpf_jit_core.c9
-rw-r--r--arch/x86/net/bpf_jit_comp.c236
-rw-r--r--drivers/atm/fore200e.c6
-rw-r--r--drivers/bcma/main.c2
-rw-r--r--drivers/bluetooth/btbcm.c12
-rw-r--r--drivers/bluetooth/btintel.c116
-rw-r--r--drivers/bluetooth/btmtk.c5
-rw-r--r--drivers/bluetooth/btmtk.h1
-rw-r--r--drivers/bluetooth/btnxpuart.c27
-rw-r--r--drivers/bluetooth/btrtl.c14
-rw-r--r--drivers/bluetooth/btusb.c30
-rw-r--r--drivers/bluetooth/hci_h5.c5
-rw-r--r--drivers/bluetooth/hci_qca.c6
-rw-r--r--drivers/bluetooth/hci_serdev.c9
-rw-r--r--drivers/bluetooth/hci_uart.h12
-rw-r--r--drivers/dpll/dpll_core.c13
-rw-r--r--drivers/dpll/dpll_netlink.c9
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c4
-rw-r--r--drivers/isdn/capi/capi.c21
-rw-r--r--drivers/isdn/mISDN/dsp_pipeline.c16
-rw-r--r--drivers/media/rc/bpf-lirc.c2
-rw-r--r--drivers/net/amt.c10
-rw-r--r--drivers/net/arcnet/arcnet.c1
-rw-r--r--drivers/net/bareudp.c25
-rw-r--r--drivers/net/bonding/bond_3ad.c165
-rw-r--r--drivers/net/bonding/bond_main.c56
-rw-r--r--drivers/net/bonding/bond_netlink.c16
-rw-r--r--drivers/net/bonding/bond_options.c28
-rw-r--r--drivers/net/can/Kconfig3
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/esd/Kconfig12
-rw-r--r--drivers/net/can/esd/Makefile7
-rw-r--r--drivers/net/can/esd/esd_402_pci-core.c514
-rw-r--r--drivers/net/can/esd/esdacc.c764
-rw-r--r--drivers/net/can/esd/esdacc.h356
-rw-r--r--drivers/net/can/kvaser_pciefd.c62
-rw-r--r--drivers/net/can/m_can/m_can.c579
-rw-r--r--drivers/net/can/m_can/m_can.h35
-rw-r--r--drivers/net/can/m_can/m_can_pci.c1
-rw-r--r--drivers/net/can/m_can/m_can_platform.c5
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c33
-rw-r--r--drivers/net/can/softing/softing_fw.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c2
-rw-r--r--drivers/net/can/usb/Kconfig1
-rw-r--r--drivers/net/can/usb/gs_usb.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c3
-rw-r--r--drivers/net/can/vxcan.c2
-rw-r--r--drivers/net/can/xilinx_can.c169
-rw-r--r--drivers/net/dsa/Kconfig2
-rw-r--r--drivers/net/dsa/b53/b53_common.c42
-rw-r--r--drivers/net/dsa/b53/b53_priv.h7
-rw-r--r--drivers/net/dsa/bcm_sf2.c2
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c400
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h1
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c112
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h2
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c5
-rw-r--r--drivers/net/dsa/mt7530-mdio.c7
-rw-r--r--drivers/net/dsa/mt7530.c570
-rw-r--r--drivers/net/dsa/mt7530.h38
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c11
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h4
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_scratch.c35
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6185.c3
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c19
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c4
-rw-r--r--drivers/net/dsa/qca/qca8k.h4
-rw-r--r--drivers/net/dsa/realtek/Kconfig20
-rw-r--r--drivers/net/dsa/realtek/Makefile13
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.c205
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.h48
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.c279
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.h48
-rw-r--r--drivers/net/dsa/realtek/realtek.h14
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c132
-rw-r--r--drivers/net/dsa/realtek/rtl8366-core.c22
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c119
-rw-r--r--drivers/net/dsa/realtek/rtl83xx.c335
-rw-r--r--drivers/net/dsa/realtek/rtl83xx.h24
-rw-r--r--drivers/net/dummy.c11
-rw-r--r--drivers/net/ethernet/Kconfig3
-rw-r--r--drivers/net/ethernet/adi/adin1110.c10
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c323
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h7
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c49
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h39
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c181
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_xdp.c1
-rw-r--r--drivers/net/ethernet/amd/pds_core/adminq.c10
-rw-r--r--drivers/net/ethernet/amd/pds_core/auxbus.c18
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c95
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h4
-rw-r--r--drivers/net/ethernet/amd/pds_core/debugfs.c8
-rw-r--r--drivers/net/ethernet/amd/pds_core/dev.c22
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c47
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c25
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c90
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h25
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c12
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c208
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c50
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c921
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h74
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c464
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c16
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c11
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c54
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c14
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c1
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c36
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c4
-rw-r--r--drivers/net/ethernet/freescale/fec.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c148
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve.h171
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c50
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h20
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h18
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c62
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c928
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c135
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c159
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c128
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c108
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c48
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c44
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h1
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c2
-rw-r--r--drivers/net/ethernet/intel/Kconfig9
-rw-r--r--drivers/net/ethernet/intel/e100.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c23
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h93
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c97
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c567
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c13
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c134
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c183
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_debugfs.c13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c68
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c95
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c221
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c271
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c229
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c22
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h146
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.c7
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq_api.h5
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c39
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c6
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c2278
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h70
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c43
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c27
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile1
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h10
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c25
-rw-r--r--drivers/net/ethernet/intel/igc/igc_leds.c280
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c41
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c70
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c155
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c262
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h112
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c26
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h35
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c70
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c62
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c242
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h54
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h189
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c66
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c294
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig19
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/Makefile10
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c489
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c500
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h160
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c273
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c1231
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h334
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c430
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.h166
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cn9k.h154
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cnxk.h162
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c510
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h224
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c330
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h276
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h617
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c186
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c11
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw_qos.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dpll.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c123
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c524
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c734
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c168
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c327
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c8
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c5
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c4
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c2
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c88
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c5
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c105
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h90
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c374
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h23
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c117
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c18
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c945
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c64
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c3
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c1
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k.c17
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k.h16
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k_common.c17
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k_common.h29
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c21
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.h15
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c71
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h22
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c17
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c1
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.h4
-rw-r--r--drivers/net/ethernet/realtek/r8169_leds.c145
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c267
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c7
-rw-r--r--drivers/net/ethernet/renesas/Kconfig1
-rw-r--r--drivers/net/ethernet/renesas/ravb.h60
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c1185
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h1
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c6
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c1
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c1
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_common.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.c1
-rw-r--r--drivers/net/ethernet/sfc/siena/tx_common.c5
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c5
-rw-r--r--drivers/net/ethernet/sfc/tx_tso.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c1
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c1
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c69
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c87
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c4
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h4
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_ethtool.c4
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c4
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c8
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c22
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h1
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile1
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c269
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h7
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c141
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c82
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h17
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c3
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c4
-rw-r--r--drivers/net/geneve.c40
-rw-r--r--drivers/net/gtp.c39
-rw-r--r--drivers/net/ieee802154/at86rf230.c5
-rw-r--r--drivers/net/ieee802154/ca8210.c10
-rw-r--r--drivers/net/ieee802154/mcr20a.c5
-rw-r--r--drivers/net/ieee802154/mrf24j40.c4
-rw-r--r--drivers/net/ipa/ipa.h5
-rw-r--r--drivers/net/ipa/ipa_cmd.c6
-rw-r--r--drivers/net/ipa/ipa_endpoint.c29
-rw-r--r--drivers/net/ipa/ipa_interrupt.c119
-rw-r--r--drivers/net/ipa/ipa_interrupt.h30
-rw-r--r--drivers/net/ipa/ipa_main.c60
-rw-r--r--drivers/net/ipa/ipa_mem.c37
-rw-r--r--drivers/net/ipa/ipa_mem.h5
-rw-r--r--drivers/net/ipa/ipa_modem.c110
-rw-r--r--drivers/net/ipa/ipa_power.c108
-rw-r--r--drivers/net/ipa/ipa_power.h29
-rw-r--r--drivers/net/ipa/ipa_qmi.c10
-rw-r--r--drivers/net/ipa/ipa_reg.c8
-rw-r--r--drivers/net/ipa/ipa_reg.h4
-rw-r--r--drivers/net/ipa/ipa_smp2p.c33
-rw-r--r--drivers/net/ipa/ipa_smp2p.h7
-rw-r--r--drivers/net/ipa/ipa_table.c18
-rw-r--r--drivers/net/ipa/ipa_uc.c9
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c2
-rw-r--r--drivers/net/loopback.c1
-rw-r--r--drivers/net/macsec.c12
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c94
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c109
-rw-r--r--drivers/net/mdio/of_mdio.c79
-rw-r--r--drivers/net/netconsole.c365
-rw-r--r--drivers/net/netdevsim/bus.c149
-rw-r--r--drivers/net/netdevsim/netdev.c53
-rw-r--r--drivers/net/netdevsim/netdevsim.h3
-rw-r--r--drivers/net/netkit.c2
-rw-r--r--drivers/net/nlmon.c24
-rw-r--r--drivers/net/pcs/pcs-lynx.c1
-rw-r--r--drivers/net/pcs/pcs-mtk-lynxi.c1
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c5
-rw-r--r--drivers/net/pcs/pcs-xpcs.c18
-rw-r--r--drivers/net/phy/Kconfig8
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/adin1100.c55
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c103
-rw-r--r--drivers/net/phy/at803x.c2432
-rw-r--r--drivers/net/phy/broadcom.c3
-rw-r--r--drivers/net/phy/dp83822.c211
-rw-r--r--drivers/net/phy/dp83867.c22
-rw-r--r--drivers/net/phy/marvell-88q2xxx.c640
-rw-r--r--drivers/net/phy/marvell-88x2222.c2
-rw-r--r--drivers/net/phy/marvell.c7
-rw-r--r--drivers/net/phy/mdio_bus.c48
-rw-r--r--drivers/net/phy/micrel.c109
-rw-r--r--drivers/net/phy/mxl-gpy.c20
-rw-r--r--drivers/net/phy/phy-c45.c137
-rw-r--r--drivers/net/phy/phy.c61
-rw-r--r--drivers/net/phy/phy_device.c208
-rw-r--r--drivers/net/phy/phylink.c8
-rw-r--r--drivers/net/phy/qcom/Kconfig30
-rw-r--r--drivers/net/phy/qcom/Makefile6
-rw-r--r--drivers/net/phy/qcom/at803x.c1106
-rw-r--r--drivers/net/phy/qcom/qca807x.c849
-rw-r--r--drivers/net/phy/qcom/qca808x.c663
-rw-r--r--drivers/net/phy/qcom/qca83xx.c275
-rw-r--r--drivers/net/phy/qcom/qcom-phy-lib.c676
-rw-r--r--drivers/net/phy/qcom/qcom.h243
-rw-r--r--drivers/net/phy/realtek.c44
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c20
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/tun.c34
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/ax88179_178a.c20
-rw-r--r--drivers/net/usb/cdc_mbim.c2
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/lan78xx.c4
-rw-r--r--drivers/net/usb/r8152.c49
-rw-r--r--drivers/net/usb/sr9800.c4
-rw-r--r--drivers/net/usb/usbnet.c13
-rw-r--r--drivers/net/veth.c77
-rw-r--r--drivers/net/vsockmon.c19
-rw-r--r--drivers/net/vxlan/vxlan_core.c68
-rw-r--r--drivers/net/wan/Kconfig12
-rw-r--r--drivers/net/wan/Makefile1
-rw-r--r--drivers/net/wan/framer/framer-core.c30
-rw-r--r--drivers/net/wan/framer/pef2256/pef2256.c6
-rw-r--r--drivers/net/wan/fsl_qmc_hdlc.c797
-rw-r--r--drivers/net/wireguard/receive.c2
-rw-r--r--drivers/net/wireless/admtek/adm8211.c4
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h12
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c26
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h62
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c108
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h42
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c20
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c6
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c19
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c1202
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h5
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c73
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c62
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c5
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c267
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h11
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.c5
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c303
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h151
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile4
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c270
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h84
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c25
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h20
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c9
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c166
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c30
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.c171
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.h33
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c415
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h20
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h20
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c15
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c33
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h55
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c1309
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h4
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c52
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.c142
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.h23
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c94
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c429
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h35
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c13
-rw-r--r--drivers/net/wireless/ath/ath12k/rx_desc.h116
-rw-r--r--drivers/net/wireless/ath/ath12k/trace.h29
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c330
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h202
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/reg_aic.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c4
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/b43.h16
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c20
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_ht.c6
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/pio.c6
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c76
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c46
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c152
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h60
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c116
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h127
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h40
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/ax210.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c617
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h216
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/coex.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c500
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h199
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c427
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h210
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c67
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c132
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c150
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c340
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c234
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c162
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h87
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sf.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c192
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/Makefile7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/devinfo.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/module.c10
-rw-r--r--drivers/net/wireless/intersil/p54/main.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.c13
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c22
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c2
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c106
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c32
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c107
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c55
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/soc.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/init.c56
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c212
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.h94
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c38
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_dma.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_regs.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_usb.c74
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c81
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c74
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c52
-rw-r--r--drivers/net/wireless/mediatek/mt76/wed.c213
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c4
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c16
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c110
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c93
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.h6
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c81
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c40
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.h11
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.c5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2400pci.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500pci.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500usb.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800pci.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800soc.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.c4
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c4
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h28
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c3
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c33
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c588
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c36
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c109
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c195
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h43
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c44
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c7
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cu.c40
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c40
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c61
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h109
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c646
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h5
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c393
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h21
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c382
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h362
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c9
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse_be.c142
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c2496
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h1532
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c341
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h93
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c19
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c363
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c215
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h67
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci_be.c121
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c1105
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h113
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy_be.c331
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c10
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h572
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c162
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_table.c72
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851be.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c78
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c82
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_table.c142
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c81
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c1773
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c378
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922ae.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c50
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c8
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c12
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c19
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_sdio.c42
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_spi.c75
-rw-r--r--drivers/net/wireless/st/cw1200/main.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c9
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c147
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.h5
-rw-r--r--drivers/net/wireless/virtual/virt_wifi.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_def.h2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c4
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c5
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.c47
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.h18
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.c14
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.h1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c103
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.h14
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port.h4
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.c110
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.h10
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_wwan.c115
-rw-r--r--drivers/net/wwan/t7xx/t7xx_reg.h24
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.c132
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.h1
-rw-r--r--drivers/net/wwan/wwan_core.c36
-rw-r--r--drivers/net/wwan/wwan_hwsim.c16
-rw-r--r--drivers/nvme/host/tcp.c7
-rw-r--r--drivers/nvme/target/tcp.c4
-rw-r--r--drivers/pci/pci.c4
-rw-r--r--drivers/ptp/Kconfig12
-rw-r--r--drivers/ptp/Makefile1
-rw-r--r--drivers/ptp/ptp_clock.c66
-rw-r--r--drivers/ptp/ptp_fc3.c1014
-rw-r--r--drivers/ptp/ptp_fc3.h45
-rw-r--r--drivers/ptp/ptp_ocp.c311
-rw-r--r--drivers/ptp/ptp_private.h2
-rw-r--r--drivers/ptp/ptp_sysfs.c13
-rw-r--r--drivers/ptp/ptp_vclock.c2
-rw-r--r--drivers/ssb/main.c2
-rw-r--r--drivers/staging/vt6655/device_main.c6
-rw-r--r--drivers/staging/vt6656/main_usb.c6
-rw-r--r--drivers/vhost/net.c91
-rw-r--r--fs/eventpoll.c131
-rw-r--r--fs/verity/measure.c4
-rw-r--r--include/linux/bitfield.h3
-rw-r--r--include/linux/bitmap.h113
-rw-r--r--include/linux/bpf-cgroup.h3
-rw-r--r--include/linux/bpf.h202
-rw-r--r--include/linux/bpf_local_storage.h30
-rw-r--r--include/linux/bpf_types.h1
-rw-r--r--include/linux/bpf_verifier.h16
-rw-r--r--include/linux/btf.h36
-rw-r--r--include/linux/btf_ids.h21
-rw-r--r--include/linux/cpumask.h16
-rw-r--r--include/linux/dpll.h1
-rw-r--r--include/linux/dynamic_queue_limits.h45
-rw-r--r--include/linux/ethtool.h14
-rw-r--r--include/linux/filter.h28
-rw-r--r--include/linux/framer/framer-provider.h15
-rw-r--r--include/linux/gfp.h16
-rw-r--r--include/linux/ieee80211.h169
-rw-r--r--include/linux/if_tun.h16
-rw-r--r--include/linux/inet_diag.h1
-rw-r--r--include/linux/inetdevice.h14
-rw-r--r--include/linux/io.h7
-rw-r--r--include/linux/ipv6.h14
-rw-r--r--include/linux/lsm_hook_defs.h15
-rw-r--r--include/linux/marvell_phy.h1
-rw-r--r--include/linux/mdio.h63
-rw-r--r--include/linux/mfd/idtRC38xxx_reg.h273
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h13
-rw-r--r--include/linux/net.h5
-rw-r--r--include/linux/netdevice.h132
-rw-r--r--include/linux/netfilter.h1
-rw-r--r--include/linux/netlink.h2
-rw-r--r--include/linux/phy.h53
-rw-r--r--include/linux/phylink.h7
-rw-r--r--include/linux/platform_data/brcmfmac.h2
-rw-r--r--include/linux/platform_data/mdio-bcm-unimac.h3
-rw-r--r--include/linux/platform_data/microchip-ksz.h1
-rw-r--r--include/linux/platform_data/net-cw1200.h4
-rw-r--r--include/linux/ptp_clock_kernel.h3
-rw-r--r--include/linux/rtnetlink.h3
-rw-r--r--include/linux/security.h43
-rw-r--r--include/linux/skbuff.h141
-rw-r--r--include/linux/sock_diag.h10
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/tcp.h10
-rw-r--r--include/linux/udp.h10
-rw-r--r--include/linux/units.h5
-rw-r--r--include/linux/vmalloc.h5
-rw-r--r--include/linux/wwan.h2
-rw-r--r--include/net/act_api.h2
-rw-r--r--include/net/addrconf.h7
-rw-r--r--include/net/af_unix.h22
-rw-r--r--include/net/bluetooth/bluetooth.h2
-rw-r--r--include/net/bluetooth/hci.h19
-rw-r--r--include/net/bluetooth/hci_core.h37
-rw-r--r--include/net/bluetooth/hci_sync.h22
-rw-r--r--include/net/bluetooth/l2cap.h44
-rw-r--r--include/net/bond_3ad.h2
-rw-r--r--include/net/bond_options.h1
-rw-r--r--include/net/bonding.h23
-rw-r--r--include/net/cfg80211.h138
-rw-r--r--include/net/cfg802154.h1
-rw-r--r--include/net/dropreason-core.h26
-rw-r--r--include/net/dsa.h4
-rw-r--r--include/net/dst.h1
-rw-r--r--include/net/eee.h38
-rw-r--r--include/net/genetlink.h6
-rw-r--r--include/net/gro.h46
-rw-r--r--include/net/hotdata.h52
-rw-r--r--include/net/if_inet6.h4
-rw-r--r--include/net/inet_sock.h1
-rw-r--r--include/net/ioam6.h4
-rw-r--r--include/net/ip6_fib.h52
-rw-r--r--include/net/ip6_route.h5
-rw-r--r--include/net/ip_fib.h1
-rw-r--r--include/net/ip_tunnels.h3
-rw-r--r--include/net/ipv6.h8
-rw-r--r--include/net/mac80211.h163
-rw-r--r--include/net/mctp.h6
-rw-r--r--include/net/net_namespace.h5
-rw-r--r--include/net/netdev_queues.h56
-rw-r--r--include/net/netfilter/nf_queue.h1
-rw-r--r--include/net/netfilter/nf_tables.h6
-rw-r--r--include/net/netlabel.h7
-rw-r--r--include/net/netmem.h41
-rw-r--r--include/net/nexthop.h34
-rw-r--r--include/net/nfc/nfc.h2
-rw-r--r--include/net/page_pool/types.h13
-rw-r--r--include/net/pkt_cls.h2
-rw-r--r--include/net/pkt_sched.h2
-rw-r--r--include/net/protocol.h3
-rw-r--r--include/net/request_sock.h39
-rw-r--r--include/net/route.h7
-rw-r--r--include/net/rps.h125
-rw-r--r--include/net/rtnetlink.h1
-rw-r--r--include/net/scm.h1
-rw-r--r--include/net/sctp/structs.h5
-rw-r--r--include/net/sock.h173
-rw-r--r--include/net/tcp.h51
-rw-r--r--include/net/xfrm.h14
-rw-r--r--include/trace/events/napi.h33
-rw-r--r--include/trace/events/rxrpc.h198
-rw-r--r--include/trace/events/tcp.h16
-rw-r--r--include/uapi/linux/bpf.h122
-rw-r--r--include/uapi/linux/can.h9
-rw-r--r--include/uapi/linux/can/isotp.h1
-rw-r--r--include/uapi/linux/can/raw.h16
-rw-r--r--include/uapi/linux/devlink.h5
-rw-r--r--include/uapi/linux/dpll.h30
-rw-r--r--include/uapi/linux/ethtool.h48
-rw-r--r--include/uapi/linux/eventpoll.h13
-rw-r--r--include/uapi/linux/if_link.h1
-rw-r--r--include/uapi/linux/ioam6_genl.h20
-rw-r--r--include/uapi/linux/mctp.h32
-rw-r--r--include/uapi/linux/mdio.h4
-rw-r--r--include/uapi/linux/netdev.h20
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h6
-rw-r--r--include/uapi/linux/nexthop.h45
-rw-r--r--include/uapi/linux/nl80211.h71
-rw-r--r--include/uapi/linux/ptp_clock.h13
-rw-r--r--include/uapi/linux/tc_act/tc_pedit.h2
-rw-r--r--init/Kconfig5
-rw-r--r--kernel/bpf/Kconfig1
-rw-r--r--kernel/bpf/Makefile5
-rw-r--r--kernel/bpf/arena.c558
-rw-r--r--kernel/bpf/arraymap.c2
-rw-r--r--kernel/bpf/bpf_iter.c4
-rw-r--r--kernel/bpf/bpf_local_storage.c52
-rw-r--r--kernel/bpf/bpf_lsm.c21
-rw-r--r--kernel/bpf/bpf_struct_ops.c737
-rw-r--r--kernel/bpf/bpf_struct_ops_types.h12
-rw-r--r--kernel/bpf/btf.c566
-rw-r--r--kernel/bpf/cgroup.c11
-rw-r--r--kernel/bpf/core.c46
-rw-r--r--kernel/bpf/cpumap.c4
-rw-r--r--kernel/bpf/cpumask.c4
-rw-r--r--kernel/bpf/devmap.c11
-rw-r--r--kernel/bpf/disasm.c14
-rw-r--r--kernel/bpf/hashtab.c14
-rw-r--r--kernel/bpf/helpers.c23
-rw-r--r--kernel/bpf/inode.c276
-rw-r--r--kernel/bpf/log.c65
-rw-r--r--kernel/bpf/lpm_trie.c20
-rw-r--r--kernel/bpf/map_iter.c4
-rw-r--r--kernel/bpf/stackmap.c9
-rw-r--r--kernel/bpf/syscall.c298
-rw-r--r--kernel/bpf/token.c278
-rw-r--r--kernel/bpf/trampoline.c4
-rw-r--r--kernel/bpf/verifier.c744
-rw-r--r--kernel/cgroup/rstat.c4
-rw-r--r--kernel/configs/debug.config6
-rw-r--r--kernel/events/core.c8
-rw-r--r--kernel/trace/bpf_trace.c27
-rw-r--r--lib/bitmap.c7
-rw-r--r--lib/dynamic_queue_limits.c74
-rw-r--r--lib/test_bitmap.c42
-rw-r--r--lib/test_blackhole_dev.c3
-rw-r--r--mm/page_alloc.c22
-rw-r--r--mm/vmalloc.c83
-rw-r--r--net/8021q/vlan_dev.c30
-rw-r--r--net/8021q/vlanproc.c46
-rw-r--r--net/Kconfig1
-rw-r--r--net/Makefile2
-rw-r--r--net/batman-adv/distributed-arp-table.c3
-rw-r--r--net/batman-adv/main.c14
-rw-r--r--net/batman-adv/main.h2
-rw-r--r--net/batman-adv/netlink.c1
-rw-r--r--net/bluetooth/6lowpan.c4
-rw-r--r--net/bluetooth/Kconfig8
-rw-r--r--net/bluetooth/Makefile1
-rw-r--r--net/bluetooth/a2mp.c1054
-rw-r--r--net/bluetooth/a2mp.h154
-rw-r--r--net/bluetooth/af_bluetooth.c10
-rw-r--r--net/bluetooth/amp.c590
-rw-r--r--net/bluetooth/amp.h60
-rw-r--r--net/bluetooth/bnep/core.c5
-rw-r--r--net/bluetooth/eir.c29
-rw-r--r--net/bluetooth/hci_conn.c200
-rw-r--r--net/bluetooth/hci_core.c170
-rw-r--r--net/bluetooth/hci_event.c236
-rw-r--r--net/bluetooth/hci_request.c2
-rw-r--r--net/bluetooth/hci_sock.c4
-rw-r--r--net/bluetooth/hci_sync.c433
-rw-r--r--net/bluetooth/iso.c104
-rw-r--r--net/bluetooth/l2cap_core.c1079
-rw-r--r--net/bluetooth/l2cap_sock.c21
-rw-r--r--net/bluetooth/mgmt.c120
-rw-r--r--net/bluetooth/msft.c3
-rw-r--r--net/bluetooth/sco.c3
-rw-r--r--net/bpf/bpf_dummy_struct_ops.c36
-rw-r--r--net/bpf/test_run.c12
-rw-r--r--net/bridge/br.c15
-rw-r--r--net/bridge/br_device.c27
-rw-r--r--net/bridge/br_fdb.c5
-rw-r--r--net/bridge/br_netlink.c3
-rw-r--r--net/bridge/br_vlan.c4
-rw-r--r--net/bridge/netfilter/Kconfig7
-rw-r--r--net/bridge/netfilter/Makefile2
-rw-r--r--net/can/af_can.c2
-rw-r--r--net/can/bcm.c69
-rw-r--r--net/can/isotp.c5
-rw-r--r--net/can/raw.c104
-rw-r--r--net/core/Makefile1
-rw-r--r--net/core/dev.c362
-rw-r--r--net/core/dev.h7
-rw-r--r--net/core/dst.c6
-rw-r--r--net/core/filter.c173
-rw-r--r--net/core/gro.c40
-rw-r--r--net/core/gro_cells.c3
-rw-r--r--net/core/gso.c4
-rw-r--r--net/core/hotdata.c22
-rw-r--r--net/core/link_watch.c13
-rw-r--r--net/core/net-procfs.c55
-rw-r--r--net/core/net-sysfs.c137
-rw-r--r--net/core/net_namespace.c33
-rw-r--r--net/core/netdev-genl-gen.c12
-rw-r--r--net/core/netdev-genl-gen.h2
-rw-r--r--net/core/netdev-genl.c227
-rw-r--r--net/core/page_pool.c64
-rw-r--r--net/core/page_pool_user.c2
-rw-r--r--net/core/rtnetlink.c130
-rw-r--r--net/core/scm.c5
-rw-r--r--net/core/skbuff.c196
-rw-r--r--net/core/sock.c82
-rw-r--r--net/core/sock_diag.c120
-rw-r--r--net/core/sysctl_net_core.c34
-rw-r--r--net/core/xdp.c15
-rw-r--r--net/dccp/ackvec.c8
-rw-r--r--net/dccp/diag.c1
-rw-r--r--net/devlink/netlink_gen.c2
-rw-r--r--net/dsa/dsa.c7
-rw-r--r--net/dsa/tag_sja1105.c4
-rw-r--r--net/dsa/user.c28
-rw-r--r--net/ethtool/eee.c62
-rw-r--r--net/ethtool/ioctl.c60
-rw-r--r--net/ethtool/netlink.c14
-rw-r--r--net/hsr/hsr_device.c30
-rw-r--r--net/ieee802154/6lowpan/core.c3
-rw-r--r--net/ieee802154/socket.c1
-rw-r--r--net/ieee802154/sysfs.c2
-rw-r--r--net/ieee802154/sysfs.h2
-rw-r--r--net/ipv4/af_inet.c53
-rw-r--r--net/ipv4/bpf_tcp_ca.c26
-rw-r--r--net/ipv4/cipso_ipv4.c5
-rw-r--r--net/ipv4/datagram.c2
-rw-r--r--net/ipv4/devinet.c309
-rw-r--r--net/ipv4/fib_frontend.c51
-rw-r--r--net/ipv4/fib_trie.c6
-rw-r--r--net/ipv4/fou_bpf.c4
-rw-r--r--net/ipv4/fou_core.c2
-rw-r--r--net/ipv4/gre_offload.c2
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/inet_connection_sock.c7
-rw-r--r--net/ipv4/inet_diag.c101
-rw-r--r--net/ipv4/inet_hashtables.c3
-rw-r--r--net/ipv4/inetpeer.c5
-rw-r--r--net/ipv4/ip_gre.c24
-rw-r--r--net/ipv4/ip_output.c3
-rw-r--r--net/ipv4/ip_sockglue.c13
-rw-r--r--net/ipv4/ip_tunnel.c53
-rw-r--r--net/ipv4/ip_vti.c8
-rw-r--r--net/ipv4/ipip.c8
-rw-r--r--net/ipv4/ipmr.c13
-rw-r--r--net/ipv4/netfilter/Kconfig44
-rw-r--r--net/ipv4/netfilter/Makefile2
-rw-r--r--net/ipv4/nexthop.c367
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/raw.c20
-rw-r--r--net/ipv4/raw_diag.c1
-rw-r--r--net/ipv4/route.c9
-rw-r--r--net/ipv4/syncookies.c61
-rw-r--r--net/ipv4/tcp.c9
-rw-r--r--net/ipv4/tcp_ao.c2
-rw-r--r--net/ipv4/tcp_bbr.c4
-rw-r--r--net/ipv4/tcp_cong.c6
-rw-r--r--net/ipv4/tcp_cubic.c4
-rw-r--r--net/ipv4/tcp_dctcp.c4
-rw-r--r--net/ipv4/tcp_diag.c1
-rw-r--r--net/ipv4/tcp_input.c51
-rw-r--r--net/ipv4/tcp_ipv4.c17
-rw-r--r--net/ipv4/tcp_minisocks.c10
-rw-r--r--net/ipv4/tcp_offload.c36
-rw-r--r--net/ipv4/udp.c14
-rw-r--r--net/ipv4/udp_diag.c2
-rw-r--r--net/ipv4/udp_offload.c17
-rw-r--r--net/ipv4/xfrm4_input.c2
-rw-r--r--net/ipv6/addrconf.c782
-rw-r--r--net/ipv6/af_inet6.c3
-rw-r--r--net/ipv6/anycast.c61
-rw-r--r--net/ipv6/calipso.c5
-rw-r--r--net/ipv6/exthdrs.c34
-rw-r--r--net/ipv6/fib6_rules.c6
-rw-r--r--net/ipv6/inet6_hashtables.c8
-rw-r--r--net/ipv6/ioam6.c72
-rw-r--r--net/ipv6/ip6_fib.c92
-rw-r--r--net/ipv6/ip6_gre.c14
-rw-r--r--net/ipv6/ip6_input.c6
-rw-r--r--net/ipv6/ip6_offload.c18
-rw-r--r--net/ipv6/ip6_output.c12
-rw-r--r--net/ipv6/ip6_tunnel.c25
-rw-r--r--net/ipv6/ip6_vti.c13
-rw-r--r--net/ipv6/ip6mr.c9
-rw-r--r--net/ipv6/ipv6_sockglue.c8
-rw-r--r--net/ipv6/mcast.c15
-rw-r--r--net/ipv6/ndisc.c84
-rw-r--r--net/ipv6/netfilter/Kconfig20
-rw-r--r--net/ipv6/netfilter/Makefile2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c4
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c4
-rw-r--r--net/ipv6/output_core.c4
-rw-r--r--net/ipv6/raw.c22
-rw-r--r--net/ipv6/reassembly.c4
-rw-r--r--net/ipv6/route.c47
-rw-r--r--net/ipv6/seg6_hmac.c8
-rw-r--r--net/ipv6/sit.c27
-rw-r--r--net/ipv6/syncookies.c31
-rw-r--r--net/ipv6/tcp_ipv6.c39
-rw-r--r--net/ipv6/tcpv6_offload.c16
-rw-r--r--net/ipv6/udp.c24
-rw-r--r--net/ipv6/udp_offload.c21
-rw-r--r--net/ipv6/xfrm6_input.c2
-rw-r--r--net/ipv6/xfrm6_tunnel.c5
-rw-r--r--net/iucv/af_iucv.c10
-rw-r--r--net/iucv/iucv.c15
-rw-r--r--net/kcm/kcmsock.c18
-rw-r--r--net/l2tp/l2tp_eth.c2
-rw-r--r--net/l2tp/l2tp_ip.c2
-rw-r--r--net/l2tp/l2tp_ppp.c4
-rw-r--r--net/mac80211/Makefile2
-rw-r--r--net/mac80211/agg-tx.c2
-rw-r--r--net/mac80211/cfg.c374
-rw-r--r--net/mac80211/chan.c708
-rw-r--r--net/mac80211/debug.h18
-rw-r--r--net/mac80211/debugfs.c3
-rw-r--r--net/mac80211/driver-ops.c14
-rw-r--r--net/mac80211/driver-ops.h27
-rw-r--r--net/mac80211/ht.c6
-rw-r--r--net/mac80211/ibss.c55
-rw-r--r--net/mac80211/ieee80211_i.h205
-rw-r--r--net/mac80211/iface.c36
-rw-r--r--net/mac80211/key.c20
-rw-r--r--net/mac80211/link.c15
-rw-r--r--net/mac80211/main.c231
-rw-r--r--net/mac80211/mesh.c162
-rw-r--r--net/mac80211/mesh.h3
-rw-r--r--net/mac80211/mesh_pathtbl.c6
-rw-r--r--net/mac80211/mesh_plink.c28
-rw-r--r--net/mac80211/mlme.c3040
-rw-r--r--net/mac80211/ocb.c5
-rw-r--r--net/mac80211/offchannel.c21
-rw-r--r--net/mac80211/parse.c971
-rw-r--r--net/mac80211/rate.c14
-rw-r--r--net/mac80211/rx.c53
-rw-r--r--net/mac80211/scan.c64
-rw-r--r--net/mac80211/spectmgmt.c337
-rw-r--r--net/mac80211/sta_info.c21
-rw-r--r--net/mac80211/sta_info.h20
-rw-r--r--net/mac80211/tdls.c73
-rw-r--r--net/mac80211/tests/elems.c5
-rw-r--r--net/mac80211/trace.h201
-rw-r--r--net/mac80211/trace_msg.h2
-rw-r--r--net/mac80211/tx.c60
-rw-r--r--net/mac80211/util.c1794
-rw-r--r--net/mac80211/vht.c52
-rw-r--r--net/mac80211/wpa.c33
-rw-r--r--net/mac802154/llsec.c18
-rw-r--r--net/mctp/Kconfig1
-rw-r--r--net/mctp/af_mctp.c117
-rw-r--r--net/mctp/route.c105
-rw-r--r--net/mctp/test/route-test.c413
-rw-r--r--net/mctp/test/utils.c2
-rw-r--r--net/mpls/af_mpls.c4
-rw-r--r--net/mpls/mpls_gso.c3
-rw-r--r--net/mpls/mpls_iptunnel.c2
-rw-r--r--net/mptcp/diag.c1
-rw-r--r--net/mptcp/mptcp_diag.c2
-rw-r--r--net/mptcp/mptcp_pm_gen.c7
-rw-r--r--net/mptcp/mptcp_pm_gen.h2
-rw-r--r--net/mptcp/options.c20
-rw-r--r--net/mptcp/pm.c29
-rw-r--r--net/mptcp/pm_netlink.c115
-rw-r--r--net/mptcp/pm_userspace.c221
-rw-r--r--net/mptcp/protocol.c107
-rw-r--r--net/mptcp/protocol.h93
-rw-r--r--net/mptcp/sockopt.c73
-rw-r--r--net/mptcp/subflow.c12
-rw-r--r--net/mptcp/token_test.c7
-rw-r--r--net/netfilter/Kconfig12
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c4
-rw-r--r--net/netfilter/nf_bpf_link.c2
-rw-r--r--net/netfilter/nf_conncount.c8
-rw-r--r--net/netfilter/nf_conntrack_bpf.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nf_conntrack_expect.c4
-rw-r--r--net/netfilter/nf_log.c9
-rw-r--r--net/netfilter/nf_nat_bpf.c4
-rw-r--r--net/netfilter/nf_queue.c106
-rw-r--r--net/netfilter/nf_synproxy_core.c2
-rw-r--r--net/netfilter/nf_tables_api.c35
-rw-r--r--net/netfilter/nfnetlink_queue.c142
-rw-r--r--net/netfilter/nft_osf.c11
-rw-r--r--net/netfilter/nft_set_pipapo.c193
-rw-r--r--net/netfilter/nft_set_pipapo.h37
-rw-r--r--net/netfilter/nft_set_pipapo_avx2.c59
-rw-r--r--net/netfilter/utils.c37
-rw-r--r--net/netfilter/x_tables.c3
-rw-r--r--net/netlabel/netlabel_kapi.c10
-rw-r--r--net/netlink/af_netlink.c76
-rw-r--r--net/netlink/af_netlink.h5
-rw-r--r--net/netlink/diag.c3
-rw-r--r--net/netlink/genetlink.c42
-rw-r--r--net/nfc/core.c2
-rw-r--r--net/nfc/hci/llc.c20
-rw-r--r--net/packet/af_packet.c11
-rw-r--r--net/packet/diag.c3
-rw-r--r--net/rds/connection.c4
-rw-r--r--net/rxrpc/af_rxrpc.c12
-rw-r--r--net/rxrpc/ar-internal.h88
-rw-r--r--net/rxrpc/call_event.c327
-rw-r--r--net/rxrpc/call_object.c56
-rw-r--r--net/rxrpc/conn_client.c4
-rw-r--r--net/rxrpc/conn_event.c16
-rw-r--r--net/rxrpc/conn_object.c4
-rw-r--r--net/rxrpc/input.c116
-rw-r--r--net/rxrpc/insecure.c11
-rw-r--r--net/rxrpc/io_thread.c11
-rw-r--r--net/rxrpc/local_object.c3
-rw-r--r--net/rxrpc/misc.c8
-rw-r--r--net/rxrpc/output.c441
-rw-r--r--net/rxrpc/proc.c10
-rw-r--r--net/rxrpc/protocol.h6
-rw-r--r--net/rxrpc/rtt.c36
-rw-r--r--net/rxrpc/rxkad.c57
-rw-r--r--net/rxrpc/sendmsg.c63
-rw-r--r--net/rxrpc/sysctl.c16
-rw-r--r--net/rxrpc/txbuf.c174
-rw-r--r--net/sched/Kconfig10
-rw-r--r--net/sched/act_api.c2
-rw-r--r--net/sched/act_bpf.c1
-rw-r--r--net/sched/act_connmark.c1
-rw-r--r--net/sched/act_csum.c1
-rw-r--r--net/sched/act_ct.c1
-rw-r--r--net/sched/act_ctinfo.c1
-rw-r--r--net/sched/act_gact.c1
-rw-r--r--net/sched/act_gate.c1
-rw-r--r--net/sched/act_ife.c1
-rw-r--r--net/sched/act_mirred.c1
-rw-r--r--net/sched/act_mpls.c1
-rw-r--r--net/sched/act_nat.c1
-rw-r--r--net/sched/act_pedit.c3
-rw-r--r--net/sched/act_police.c1
-rw-r--r--net/sched/act_sample.c1
-rw-r--r--net/sched/act_simple.c1
-rw-r--r--net/sched/act_skbedit.c1
-rw-r--r--net/sched/act_skbmod.c1
-rw-r--r--net/sched/act_tunnel_key.c1
-rw-r--r--net/sched/act_vlan.c1
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/sched/cls_basic.c1
-rw-r--r--net/sched/cls_bpf.c1
-rw-r--r--net/sched/cls_cgroup.c1
-rw-r--r--net/sched/cls_flow.c1
-rw-r--r--net/sched/cls_flower.c1
-rw-r--r--net/sched/cls_fw.c1
-rw-r--r--net/sched/cls_matchall.c1
-rw-r--r--net/sched/cls_route.c1
-rw-r--r--net/sched/cls_u32.c1
-rw-r--r--net/sched/sch_api.c4
-rw-r--r--net/sched/sch_cake.c1
-rw-r--r--net/sched/sch_cbs.c1
-rw-r--r--net/sched/sch_choke.c1
-rw-r--r--net/sched/sch_codel.c33
-rw-r--r--net/sched/sch_drr.c1
-rw-r--r--net/sched/sch_etf.c1
-rw-r--r--net/sched/sch_ets.c1
-rw-r--r--net/sched/sch_fq.c1
-rw-r--r--net/sched/sch_fq_codel.c1
-rw-r--r--net/sched/sch_generic.c3
-rw-r--r--net/sched/sch_gred.c1
-rw-r--r--net/sched/sch_hfsc.c1
-rw-r--r--net/sched/sch_hhf.c1
-rw-r--r--net/sched/sch_htb.c1
-rw-r--r--net/sched/sch_ingress.c3
-rw-r--r--net/sched/sch_mqprio.c1
-rw-r--r--net/sched/sch_multiq.c1
-rw-r--r--net/sched/sch_netem.c1
-rw-r--r--net/sched/sch_pie.c1
-rw-r--r--net/sched/sch_plug.c1
-rw-r--r--net/sched/sch_prio.c1
-rw-r--r--net/sched/sch_qfq.c1
-rw-r--r--net/sched/sch_red.c1
-rw-r--r--net/sched/sch_sfb.c1
-rw-r--r--net/sched/sch_sfq.c1
-rw-r--r--net/sched/sch_skbprio.c1
-rw-r--r--net/sched/sch_taprio.c73
-rw-r--r--net/sched/sch_tbf.c1
-rw-r--r--net/sctp/diag.c1
-rw-r--r--net/sctp/protocol.c10
-rw-r--r--net/sctp/socket.c1
-rw-r--r--net/smc/af_smc.c22
-rw-r--r--net/smc/smc.h4
-rw-r--r--net/smc/smc_clc.c6
-rw-r--r--net/smc/smc_clc.h2
-rw-r--r--net/smc/smc_core.c4
-rw-r--r--net/smc/smc_diag.c1
-rw-r--r--net/smc/smc_ism.h10
-rw-r--r--net/smc/smc_pnet.c10
-rw-r--r--net/socket.c2
-rw-r--r--net/tipc/Kconfig7
-rw-r--r--net/tipc/Makefile4
-rw-r--r--net/tipc/bearer.c15
-rw-r--r--net/tipc/diag.c1
-rw-r--r--net/tipc/node.c2
-rw-r--r--net/tipc/socket.c1
-rw-r--r--net/unix/Kconfig5
-rw-r--r--net/unix/Makefile2
-rw-r--r--net/unix/af_unix.c73
-rw-r--r--net/unix/diag.c1
-rw-r--r--net/unix/garbage.c200
-rw-r--r--net/unix/scm.c159
-rw-r--r--net/unix/scm.h10
-rw-r--r--net/vmw_vsock/diag.c1
-rw-r--r--net/wireless/chan.c377
-rw-r--r--net/wireless/core.h52
-rw-r--r--net/wireless/mlme.c146
-rw-r--r--net/wireless/nl80211.c389
-rw-r--r--net/wireless/reg.c17
-rw-r--r--net/wireless/scan.c788
-rw-r--r--net/wireless/sme.c3
-rw-r--r--net/wireless/tests/Makefile2
-rw-r--r--net/wireless/tests/chan.c228
-rw-r--r--net/wireless/tests/fragmentation.c30
-rw-r--r--net/wireless/tests/scan.c277
-rw-r--r--net/wireless/trace.h62
-rw-r--r--net/wireless/util.c90
-rw-r--r--net/x25/Kconfig2
-rw-r--r--net/x25/af_x25.c4
-rw-r--r--net/xdp/xsk.c5
-rw-r--r--net/xdp/xsk_diag.c1
-rw-r--r--net/xfrm/espintcp.c4
-rw-r--r--net/xfrm/xfrm_input.c3
-rw-r--r--net/xfrm/xfrm_interface_bpf.c4
-rw-r--r--net/xfrm/xfrm_interface_core.c26
-rw-r--r--net/xfrm/xfrm_policy.c147
-rw-r--r--net/xfrm/xfrm_proc.c1
-rw-r--r--net/xfrm/xfrm_state.c17
-rw-r--r--net/xfrm/xfrm_state_bpf.c4
-rw-r--r--net/xfrm/xfrm_user.c2
-rw-r--r--rust/kernel/net/phy.rs24
-rw-r--r--samples/bpf/map_perf_test_user.c2
-rw-r--r--samples/bpf/xdp_router_ipv4_user.c2
-rwxr-xr-xscripts/bpf_doc.py2
-rw-r--r--security/security.c101
-rw-r--r--security/selinux/hooks.c47
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-gen.rst58
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-map.rst2
-rw-r--r--tools/bpf/bpftool/gen.c277
-rw-r--r--tools/bpf/bpftool/link.c94
-rw-r--r--tools/bpf/bpftool/map.c2
-rw-r--r--tools/bpf/bpftool/prog.c2
-rw-r--r--tools/bpf/resolve_btfids/main.c70
-rw-r--r--tools/include/linux/btf_ids.h9
-rw-r--r--tools/include/uapi/linux/bpf.h123
-rw-r--r--tools/include/uapi/linux/if_link.h1
-rw-r--r--tools/include/uapi/linux/netdev.h20
-rw-r--r--tools/lib/bpf/Build2
-rw-r--r--tools/lib/bpf/bpf.c42
-rw-r--r--tools/lib/bpf/bpf.h79
-rw-r--r--tools/lib/bpf/bpf_core_read.h60
-rw-r--r--tools/lib/bpf/bpf_helpers.h4
-rw-r--r--tools/lib/bpf/btf.c43
-rw-r--r--tools/lib/bpf/elf.c2
-rw-r--r--tools/lib/bpf/features.c583
-rw-r--r--tools/lib/bpf/libbpf.c1158
-rw-r--r--tools/lib/bpf/libbpf.h23
-rw-r--r--tools/lib/bpf/libbpf.map6
-rw-r--r--tools/lib/bpf/libbpf_internal.h68
-rw-r--r--tools/lib/bpf/libbpf_probes.c19
-rw-r--r--tools/lib/bpf/linker.c2
-rw-r--r--tools/lib/bpf/netlink.c4
-rw-r--r--tools/lib/bpf/str_error.h3
-rw-r--r--tools/net/ynl/Makefile4
-rw-r--r--tools/net/ynl/Makefile.deps5
-rwxr-xr-xtools/net/ynl/cli.py43
-rw-r--r--tools/net/ynl/generated/Makefile9
-rw-r--r--tools/net/ynl/lib/Makefile5
-rw-r--r--tools/net/ynl/lib/__init__.py4
-rw-r--r--tools/net/ynl/lib/nlspec.py11
-rw-r--r--tools/net/ynl/lib/ynl-priv.h359
-rw-r--r--tools/net/ynl/lib/ynl.c399
-rw-r--r--tools/net/ynl/lib/ynl.h5
-rw-r--r--tools/net/ynl/lib/ynl.py311
-rw-r--r--tools/net/ynl/samples/.gitignore1
-rw-r--r--tools/net/ynl/samples/Makefile6
-rw-r--r--tools/net/ynl/samples/ovs.c60
-rw-r--r--tools/net/ynl/samples/page-pool.c2
-rwxr-xr-xtools/net/ynl/ynl-gen-c.py124
-rwxr-xr-xtools/net/ynl/ynl-gen-rst.py9
-rw-r--r--tools/testing/kunit/configs/all_tests.config6
-rw-r--r--tools/testing/selftests/Makefile7
-rw-r--r--tools/testing/selftests/alsa/test-pcmtest-driver.c4
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.aarch643
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.s390x3
-rw-r--r--tools/testing/selftests/bpf/Makefile51
-rw-r--r--tools/testing/selftests/bpf/README.rst32
-rw-r--r--tools/testing/selftests/bpf/bench.c40
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_trigger.c182
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_uprobes.sh9
-rw-r--r--tools/testing/selftests/bpf/bpf_arena_alloc.h67
-rw-r--r--tools/testing/selftests/bpf/bpf_arena_common.h70
-rw-r--r--tools/testing/selftests/bpf/bpf_arena_htab.h100
-rw-r--r--tools/testing/selftests/bpf/bpf_arena_list.h92
-rw-r--r--tools/testing/selftests/bpf/bpf_experimental.h76
-rw-r--r--tools/testing/selftests/bpf/bpf_kfuncs.h30
-rw-r--r--tools/testing/selftests/bpf/bpf_test_no_cfi/Makefile19
-rw-r--r--tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c84
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c129
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h65
-rw-r--r--tools/testing/selftests/bpf/config1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/arena_htab.c88
-rw-r--r--tools/testing/selftests/bpf/prog_tests/arena_list.c68
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bad_struct_ops.c67
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf.c29
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cpumask.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c44
-rw-r--r--tools/testing/selftests/bpf/prog_tests/decap_sanity.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fib_lookup.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fill_link_info.c114
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c52
-rw-r--r--tools/testing/selftests/bpf/prog_tests/libbpf_probes.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/libbpf_str.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/log_fixup.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lwt_helpers.h2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lwt_redirect.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lwt_reroute.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/mptcp.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/reg_bounds.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sock_destroy.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/spin_lock.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/struct_ops_autocreate.c159
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_local_storage.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_redirect.c90
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tcp_custom_syncookie.c150
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_struct_ops_maybe_null.c46
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c101
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_struct_ops_multi_pages.c30
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_struct_ops_no_cfi.c35
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_tunnel.c18
-rw-r--r--tools/testing/selftests/bpf/prog_tests/token.c1052
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tracing_failure.c37
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdpwall.c2
-rw-r--r--tools/testing/selftests/bpf/progs/arena_htab.c48
-rw-r--r--tools/testing/selftests/bpf/progs/arena_htab_asm.c5
-rw-r--r--tools/testing/selftests/bpf/progs/arena_list.c87
-rw-r--r--tools/testing/selftests/bpf/progs/async_stack_depth.c4
-rw-r--r--tools/testing/selftests/bpf/progs/bad_struct_ops.c25
-rw-r--r--tools/testing/selftests/bpf/progs/bad_struct_ops2.c14
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_compiler.h33
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_misc.h2
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_tracing_net.h16
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c26
-rw-r--r--tools/testing/selftests/bpf/progs/connect_unix_prog.c3
-rw-r--r--tools/testing/selftests/bpf/progs/cpumask_common.h57
-rw-r--r--tools/testing/selftests/bpf/progs/getpeername_unix_prog.c3
-rw-r--r--tools/testing/selftests/bpf/progs/getsockname_unix_prog.c3
-rw-r--r--tools/testing/selftests/bpf/progs/iters.c9
-rw-r--r--tools/testing/selftests/bpf/progs/kptr_xchg_inline.c48
-rw-r--r--tools/testing/selftests/bpf/progs/loop4.c4
-rw-r--r--tools/testing/selftests/bpf/progs/map_ptr_kern.c2
-rw-r--r--tools/testing/selftests/bpf/progs/priv_map.c13
-rw-r--r--tools/testing/selftests/bpf/progs/priv_prog.c13
-rw-r--r--tools/testing/selftests/bpf/progs/profiler.inc.h17
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf.h7
-rw-r--r--tools/testing/selftests/bpf/progs/rcu_read_lock.c120
-rw-r--r--tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c3
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c3
-rw-r--r--tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c4
-rw-r--r--tools/testing/selftests/bpf/progs/sock_iter_batch.c4
-rw-r--r--tools/testing/selftests/bpf/progs/strobemeta.h18
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_autocreate.c52
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_autocreate2.c32
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c29
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c24
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_module.c56
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c102
-rw-r--r--tools/testing/selftests/bpf/progs/task_ls_recursion.c17
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect.c7
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_fill_link_info.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func1.c8
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c19
-rw-r--r--tools/testing/selftests/bpf/progs/test_lwt_seg6local.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_in_map.c26
-rw-r--r--tools/testing/selftests/bpf/progs/test_ptr_untrusted.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_seg6_loop.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_siphash.h64
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_ctx.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_spin_lock.c65
-rw-r--r--tools/testing/selftests/bpf/progs/test_spin_lock_fail.c44
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_loop1.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_loop2.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_prog.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_tunnel.c5
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c595
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h140
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_dynptr.c10
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_loop.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_noinline.c5
-rw-r--r--tools/testing/selftests/bpf/progs/token_lsm.c32
-rw-r--r--tools/testing/selftests/bpf/progs/tracing_failure.c20
-rw-r--r--tools/testing/selftests/bpf/progs/trigger_bench.c28
-rw-r--r--tools/testing/selftests/bpf/progs/type_cast.c13
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_arena.c146
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c2
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c182
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_global_subprogs.c29
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c103
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_loops1.c24
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_spill_fill.c553
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_spin_lock.c2
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c6
-rw-r--r--tools/testing/selftests/bpf/progs/xdping_kern.c3
-rw-r--r--tools/testing/selftests/bpf/test_loader.c13
-rw-r--r--tools/testing/selftests/bpf/test_lpm_map.c18
-rw-r--r--tools/testing/selftests/bpf/test_maps.c6
-rw-r--r--tools/testing/selftests/bpf/test_progs.c77
-rw-r--r--tools/testing/selftests/bpf/test_progs.h10
-rw-r--r--tools/testing/selftests/bpf/test_sock_addr.c3
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c60
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.c96
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.h10
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/bpf_loop_inline.c6
-rw-r--r--tools/testing/selftests/bpf/verifier/precise.c6
-rw-r--r--tools/testing/selftests/bpf/xdp_hw_metadata.c2
-rw-r--r--tools/testing/selftests/drivers/net/bonding/Makefile7
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh19
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond-eth-type-change.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond-lladdr-target.sh21
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond_options.sh38
-rw-r--r--tools/testing/selftests/drivers/net/bonding/bond_topo_2d1c.sh8
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/dev_addr_lists.sh2
-rw-r--r--tools/testing/selftests/drivers/net/bonding/lag_lib.sh7
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh2
l---------tools/testing/selftests/drivers/net/bonding/net_forwarding_lib.sh1
-rw-r--r--tools/testing/selftests/drivers/net/dsa/Makefile18
l---------tools/testing/selftests/drivers/net/dsa/bridge_locked_port.sh2
l---------tools/testing/selftests/drivers/net/dsa/bridge_mdb.sh2
l---------tools/testing/selftests/drivers/net/dsa/bridge_mld.sh2
l---------tools/testing/selftests/drivers/net/dsa/bridge_vlan_aware.sh2
l---------tools/testing/selftests/drivers/net/dsa/bridge_vlan_mcast.sh2
l---------tools/testing/selftests/drivers/net/dsa/bridge_vlan_unaware.sh2
l---------tools/testing/selftests/drivers/net/dsa/lib.sh1
l---------tools/testing/selftests/drivers/net/dsa/local_termination.sh2
l---------tools/testing/selftests/drivers/net/dsa/no_forwarding.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/dsa/run_net_forwarding_test.sh9
l---------tools/testing/selftests/drivers/net/dsa/tc_actions.sh2
l---------tools/testing/selftests/drivers/net/dsa/tc_common.sh1
-rwxr-xr-xtools/testing/selftests/drivers/net/dsa/test_bridge_fdb_stress.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh2
-rw-r--r--tools/testing/selftests/drivers/net/netdevsim/Makefile18
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/devlink.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/ethtool-fec.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/peer.sh143
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh40
-rw-r--r--tools/testing/selftests/drivers/net/team/Makefile7
-rwxr-xr-xtools/testing/selftests/drivers/net/team/dev_addr_lists.sh4
l---------tools/testing/selftests/drivers/net/team/lag_lib.sh1
l---------tools/testing/selftests/drivers/net/team/net_forwarding_lib.sh1
-rw-r--r--tools/testing/selftests/kselftest.h45
-rw-r--r--tools/testing/selftests/kselftest_harness.h198
-rw-r--r--tools/testing/selftests/landlock/base_test.c2
-rw-r--r--tools/testing/selftests/landlock/common.h58
-rw-r--r--tools/testing/selftests/landlock/fs_test.c26
-rw-r--r--tools/testing/selftests/landlock/net_test.c4
-rw-r--r--tools/testing/selftests/landlock/ptrace_test.c7
-rw-r--r--tools/testing/selftests/lib.mk19
-rw-r--r--tools/testing/selftests/mm/hmm-tests.c4
-rw-r--r--tools/testing/selftests/net/Makefile2
-rwxr-xr-xtools/testing/selftests/net/fcnal-test.sh34
-rwxr-xr-xtools/testing/selftests/net/fib_nexthops.sh6
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh148
-rw-r--r--tools/testing/selftests/net/forwarding/Makefile4
-rw-r--r--tools/testing/selftests/net/forwarding/config35
-rwxr-xr-xtools/testing/selftests/net/forwarding/custom_multipath_hash.sh16
-rw-r--r--tools/testing/selftests/net/forwarding/forwarding.config.sample2
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh16
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_inner_v4_multipath.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_inner_v6_multipath.sh6
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_multipath.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_multipath_nh.sh41
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_multipath_nh_res.sh42
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh16
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_inner_v4_multipath.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_inner_v6_multipath.sh6
-rw-r--r--tools/testing/selftests/net/forwarding/ip6gre_lib.sh4
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh65
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_gre_lib.sh2
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_mpath_nh.sh52
-rw-r--r--tools/testing/selftests/net/forwarding/router_mpath_nh_lib.sh129
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_mpath_nh_res.sh17
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_multipath.sh43
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_police.sh16
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh4
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh4
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1q.sh10
-rwxr-xr-xtools/testing/selftests/net/fq_band_pktlimit.sh14
-rw-r--r--tools/testing/selftests/net/ip_local_port_range.c6
-rwxr-xr-xtools/testing/selftests/net/mptcp/diag.sh56
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh248
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh293
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_lib.sh213
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_sockopt.sh104
-rwxr-xr-xtools/testing/selftests/net/mptcp/pm_netlink.sh65
-rw-r--r--tools/testing/selftests/net/mptcp/pm_nl_ctl.c39
-rwxr-xr-xtools/testing/selftests/net/mptcp/simult_flows.sh60
-rwxr-xr-xtools/testing/selftests/net/mptcp/userspace_pm.sh173
-rwxr-xr-xtools/testing/selftests/net/openvswitch/openvswitch.sh62
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh2
-rw-r--r--tools/testing/selftests/net/so_txtime.c7
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_mdb.sh36
-rw-r--r--tools/testing/selftests/net/tls.c2
-rw-r--r--tools/testing/selftests/net/txtimestamp.c3
-rwxr-xr-xtools/testing/selftests/net/txtimestamp.sh12
-rw-r--r--tools/testing/selftests/net/udpgso.c134
-rwxr-xr-xtools/testing/selftests/net/udpgso.sh49
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c9
-rw-r--r--tools/testing/selftests/tc-testing/config1
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json403
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json2
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json2
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.py2
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.sh3
-rw-r--r--tools/testing/vsock/util.c17
-rw-r--r--tools/testing/vsock/util.h4
-rw-r--r--tools/testing/vsock/vsock_diag_test.c23
-rw-r--r--tools/testing/vsock/vsock_test.c102
-rw-r--r--tools/testing/vsock/vsock_test_zerocopy.c12
-rw-r--r--tools/testing/vsock/vsock_uring_test.c17
-rw-r--r--tools/virtio/.gitignore1
-rw-r--r--tools/virtio/Makefile8
-rw-r--r--tools/virtio/linux/virtio_config.h4
-rw-r--r--tools/virtio/vhost_net_test.c532
1881 files changed, 91460 insertions, 36042 deletions
diff --git a/.get_maintainer.ignore b/.get_maintainer.ignore
index c298bab3d320..7d1b30aae874 100644
--- a/.get_maintainer.ignore
+++ b/.get_maintainer.ignore
@@ -1,4 +1,5 @@
Alan Cox <alan@lxorguk.ukuu.org.uk>
Alan Cox <root@hraefn.swansea.linux.org.uk>
Christoph Hellwig <hch@lst.de>
+Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Marc Gonzalez <marc.w.gonzalez@free.fr>
diff --git a/.mailmap b/.mailmap
index bd9f1025ac44..e90797de3256 100644
--- a/.mailmap
+++ b/.mailmap
@@ -573,6 +573,7 @@ Simon Kelley <simon@thekelleys.org.uk>
Sricharan Ramabadhran <quic_srichara@quicinc.com> <sricharan@codeaurora.org>
Srinivas Ramana <quic_sramana@quicinc.com> <sramana@codeaurora.org>
Sriram R <quic_srirrama@quicinc.com> <srirrama@codeaurora.org>
+Stefan Wahren <wahrenst@gmx.net> <stefan.wahren@i2se.com>
Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
Stephen Hemminger <stephen@networkplumber.org> <shemminger@linux-foundation.org>
Stephen Hemminger <stephen@networkplumber.org> <shemminger@osdl.org>
diff --git a/Documentation/ABI/testing/sysfs-class-net-queues b/Documentation/ABI/testing/sysfs-class-net-queues
index 5bff64d256c2..84aa25e0d14d 100644
--- a/Documentation/ABI/testing/sysfs-class-net-queues
+++ b/Documentation/ABI/testing/sysfs-class-net-queues
@@ -96,3 +96,26 @@ Description:
Indicates the absolute minimum limit of bytes allowed to be
queued on this network device transmit queue. Default value is
0.
+
+What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/stall_thrs
+Date: Jan 2024
+KernelVersion: 6.9
+Contact: netdev@vger.kernel.org
+Description:
+ Tx completion stall detection threshold in ms. Kernel will
+ guarantee to detect all stalls longer than this threshold but
+ may also detect stalls longer than half of the threshold.
+
+What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/stall_cnt
+Date: Jan 2024
+KernelVersion: 6.9
+Contact: netdev@vger.kernel.org
+Description:
+ Number of detected Tx completion stalls.
+
+What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/stall_max
+Date: Jan 2024
+KernelVersion: 6.9
+Contact: netdev@vger.kernel.org
+Description:
+ Longest detected Tx completion stall. Write 0 to clear.
diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst
index 396091651955..7250c0542828 100644
--- a/Documentation/admin-guide/sysctl/net.rst
+++ b/Documentation/admin-guide/sysctl/net.rst
@@ -206,6 +206,11 @@ Will increase power usage.
Default: 0 (off)
+mem_pcpu_rsv
+------------
+
+Per-cpu reserved forward alloc cache size in page units. Default 1MB per CPU.
+
rmem_default
------------
diff --git a/Documentation/bpf/kfuncs.rst b/Documentation/bpf/kfuncs.rst
index 7985c6615f3c..a8f5782bd833 100644
--- a/Documentation/bpf/kfuncs.rst
+++ b/Documentation/bpf/kfuncs.rst
@@ -177,10 +177,10 @@ In addition to kfuncs' arguments, verifier may need more information about the
type of kfunc(s) being registered with the BPF subsystem. To do so, we define
flags on a set of kfuncs as follows::
- BTF_SET8_START(bpf_task_set)
+ BTF_KFUNCS_START(bpf_task_set)
BTF_ID_FLAGS(func, bpf_get_task_pid, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_put_pid, KF_RELEASE)
- BTF_SET8_END(bpf_task_set)
+ BTF_KFUNCS_END(bpf_task_set)
This set encodes the BTF ID of each kfunc listed above, and encodes the flags
along with it. Ofcourse, it is also allowed to specify no flags.
@@ -347,10 +347,10 @@ Once the kfunc is prepared for use, the final step to making it visible is
registering it with the BPF subsystem. Registration is done per BPF program
type. An example is shown below::
- BTF_SET8_START(bpf_task_set)
+ BTF_KFUNCS_START(bpf_task_set)
BTF_ID_FLAGS(func, bpf_get_task_pid, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_put_pid, KF_RELEASE)
- BTF_SET8_END(bpf_task_set)
+ BTF_KFUNCS_END(bpf_task_set)
static const struct btf_kfunc_id_set bpf_task_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/Documentation/bpf/map_lpm_trie.rst b/Documentation/bpf/map_lpm_trie.rst
index 74d64a30f500..f9cd579496c9 100644
--- a/Documentation/bpf/map_lpm_trie.rst
+++ b/Documentation/bpf/map_lpm_trie.rst
@@ -17,7 +17,7 @@ significant byte.
LPM tries may be created with a maximum prefix length that is a multiple
of 8, in the range from 8 to 2048. The key used for lookup and update
-operations is a ``struct bpf_lpm_trie_key``, extended by
+operations is a ``struct bpf_lpm_trie_key_u8``, extended by
``max_prefixlen/8`` bytes.
- For IPv4 addresses the data length is 4 bytes
diff --git a/Documentation/bpf/standardization/instruction-set.rst b/Documentation/bpf/standardization/instruction-set.rst
index 245b6defc298..a5ab00ac0b14 100644
--- a/Documentation/bpf/standardization/instruction-set.rst
+++ b/Documentation/bpf/standardization/instruction-set.rst
@@ -1,11 +1,11 @@
.. contents::
.. sectnum::
-=======================================
-BPF Instruction Set Specification, v1.0
-=======================================
+======================================
+BPF Instruction Set Architecture (ISA)
+======================================
-This document specifies version 1.0 of the BPF instruction set.
+This document specifies the BPF instruction set architecture (ISA).
Documentation conventions
=========================
@@ -24,22 +24,22 @@ a type's signedness (`S`) and bit width (`N`), respectively.
.. table:: Meaning of signedness notation.
==== =========
- `S` Meaning
+ S Meaning
==== =========
- `u` unsigned
- `s` signed
+ u unsigned
+ s signed
==== =========
.. table:: Meaning of bit-width notation.
===== =========
- `N` Bit width
+ N Bit width
===== =========
- `8` 8 bits
- `16` 16 bits
- `32` 32 bits
- `64` 64 bits
- `128` 128 bits
+ 8 8 bits
+ 16 16 bits
+ 32 32 bits
+ 64 64 bits
+ 128 128 bits
===== =========
For example, `u32` is a type whose valid values are all the 32-bit unsigned
@@ -48,31 +48,31 @@ numbers.
Functions
---------
-* `htobe16`: Takes an unsigned 16-bit number in host-endian format and
+* htobe16: Takes an unsigned 16-bit number in host-endian format and
returns the equivalent number as an unsigned 16-bit number in big-endian
format.
-* `htobe32`: Takes an unsigned 32-bit number in host-endian format and
+* htobe32: Takes an unsigned 32-bit number in host-endian format and
returns the equivalent number as an unsigned 32-bit number in big-endian
format.
-* `htobe64`: Takes an unsigned 64-bit number in host-endian format and
+* htobe64: Takes an unsigned 64-bit number in host-endian format and
returns the equivalent number as an unsigned 64-bit number in big-endian
format.
-* `htole16`: Takes an unsigned 16-bit number in host-endian format and
+* htole16: Takes an unsigned 16-bit number in host-endian format and
returns the equivalent number as an unsigned 16-bit number in little-endian
format.
-* `htole32`: Takes an unsigned 32-bit number in host-endian format and
+* htole32: Takes an unsigned 32-bit number in host-endian format and
returns the equivalent number as an unsigned 32-bit number in little-endian
format.
-* `htole64`: Takes an unsigned 64-bit number in host-endian format and
+* htole64: Takes an unsigned 64-bit number in host-endian format and
returns the equivalent number as an unsigned 64-bit number in little-endian
format.
-* `bswap16`: Takes an unsigned 16-bit number in either big- or little-endian
+* bswap16: Takes an unsigned 16-bit number in either big- or little-endian
format and returns the equivalent number with the same bit width but
opposite endianness.
-* `bswap32`: Takes an unsigned 32-bit number in either big- or little-endian
+* bswap32: Takes an unsigned 32-bit number in either big- or little-endian
format and returns the equivalent number with the same bit width but
opposite endianness.
-* `bswap64`: Takes an unsigned 64-bit number in either big- or little-endian
+* bswap64: Takes an unsigned 64-bit number in either big- or little-endian
format and returns the equivalent number with the same bit width but
opposite endianness.
@@ -97,40 +97,101 @@ Definitions
A: 10000110
B: 11111111 10000110
+Conformance groups
+------------------
+
+An implementation does not need to support all instructions specified in this
+document (e.g., deprecated instructions). Instead, a number of conformance
+groups are specified. An implementation must support the base32 conformance
+group and may support additional conformance groups, where supporting a
+conformance group means it must support all instructions in that conformance
+group.
+
+The use of named conformance groups enables interoperability between a runtime
+that executes instructions, and tools as such compilers that generate
+instructions for the runtime. Thus, capability discovery in terms of
+conformance groups might be done manually by users or automatically by tools.
+
+Each conformance group has a short ASCII label (e.g., "base32") that
+corresponds to a set of instructions that are mandatory. That is, each
+instruction has one or more conformance groups of which it is a member.
+
+This document defines the following conformance groups:
+
+* base32: includes all instructions defined in this
+ specification unless otherwise noted.
+* base64: includes base32, plus instructions explicitly noted
+ as being in the base64 conformance group.
+* atomic32: includes 32-bit atomic operation instructions (see `Atomic operations`_).
+* atomic64: includes atomic32, plus 64-bit atomic operation instructions.
+* divmul32: includes 32-bit division, multiplication, and modulo instructions.
+* divmul64: includes divmul32, plus 64-bit division, multiplication,
+ and modulo instructions.
+* packet: deprecated packet access instructions.
+
Instruction encoding
====================
BPF has two instruction encodings:
* the basic instruction encoding, which uses 64 bits to encode an instruction
-* the wide instruction encoding, which appends a second 64-bit immediate (i.e.,
- constant) value after the basic instruction for a total of 128 bits.
+* the wide instruction encoding, which appends a second 64 bits
+ after the basic instruction for a total of 128 bits.
-The fields conforming an encoded basic instruction are stored in the
-following order::
+Basic instruction encoding
+--------------------------
- opcode:8 src_reg:4 dst_reg:4 offset:16 imm:32 // In little-endian BPF.
- opcode:8 dst_reg:4 src_reg:4 offset:16 imm:32 // In big-endian BPF.
+A basic instruction is encoded as follows::
-**imm**
- signed integer immediate value
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | opcode | regs | offset |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | imm |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-**offset**
- signed integer offset used with pointer arithmetic
+**opcode**
+ operation to perform, encoded as follows::
-**src_reg**
- the source register number (0-10), except where otherwise specified
- (`64-bit immediate instructions`_ reuse this field for other purposes)
+ +-+-+-+-+-+-+-+-+
+ |specific |class|
+ +-+-+-+-+-+-+-+-+
-**dst_reg**
- destination register number (0-10)
+ **specific**
+ The format of these bits varies by instruction class
-**opcode**
- operation to perform
+ **class**
+ The instruction class (see `Instruction classes`_)
+
+**regs**
+ The source and destination register numbers, encoded as follows
+ on a little-endian host::
+
+ +-+-+-+-+-+-+-+-+
+ |src_reg|dst_reg|
+ +-+-+-+-+-+-+-+-+
+
+ and as follows on a big-endian host::
+
+ +-+-+-+-+-+-+-+-+
+ |dst_reg|src_reg|
+ +-+-+-+-+-+-+-+-+
+
+ **src_reg**
+ the source register number (0-10), except where otherwise specified
+ (`64-bit immediate instructions`_ reuse this field for other purposes)
+
+ **dst_reg**
+ destination register number (0-10)
+
+**offset**
+ signed integer offset used with pointer arithmetic
+
+**imm**
+ signed integer immediate value
-Note that the contents of multi-byte fields ('imm' and 'offset') are
-stored using big-endian byte ordering in big-endian BPF and
-little-endian byte ordering in little-endian BPF.
+Note that the contents of multi-byte fields ('offset' and 'imm') are
+stored using big-endian byte ordering on big-endian hosts and
+little-endian byte ordering on little-endian hosts.
For example::
@@ -143,71 +204,83 @@ For example::
Note that most instructions do not use all of the fields.
Unused fields shall be cleared to zero.
-As discussed below in `64-bit immediate instructions`_, a 64-bit immediate
-instruction uses a 64-bit immediate value that is constructed as follows.
-The 64 bits following the basic instruction contain a pseudo instruction
-using the same format but with opcode, dst_reg, src_reg, and offset all set to zero,
-and imm containing the high 32 bits of the immediate value.
+Wide instruction encoding
+--------------------------
+
+Some instructions are defined to use the wide instruction encoding,
+which uses two 32-bit immediate values. The 64 bits following
+the basic instruction format contain a pseudo instruction
+with 'opcode', 'dst_reg', 'src_reg', and 'offset' all set to zero.
This is depicted in the following figure::
- basic_instruction
- .-----------------------------.
- | |
- code:8 regs:8 offset:16 imm:32 unused:32 imm:32
- | |
- '--------------'
- pseudo instruction
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | opcode | regs | offset |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | imm |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | reserved |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | next_imm |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+**opcode**
+ operation to perform, encoded as explained above
-Thus the 64-bit immediate value is constructed as follows:
+**regs**
+ The source and destination register numbers, encoded as explained above
+
+**offset**
+ signed integer offset used with pointer arithmetic
+
+**imm**
+ signed integer immediate value
- imm64 = (next_imm << 32) | imm
+**reserved**
+ unused, set to zero
-where 'next_imm' refers to the imm value of the pseudo instruction
-following the basic instruction. The unused bytes in the pseudo
-instruction are reserved and shall be cleared to zero.
+**next_imm**
+ second signed integer immediate value
Instruction classes
-------------------
-The three LSB bits of the 'opcode' field store the instruction class:
-
-========= ===== =============================== ===================================
-class value description reference
-========= ===== =============================== ===================================
-BPF_LD 0x00 non-standard load operations `Load and store instructions`_
-BPF_LDX 0x01 load into register operations `Load and store instructions`_
-BPF_ST 0x02 store from immediate operations `Load and store instructions`_
-BPF_STX 0x03 store from register operations `Load and store instructions`_
-BPF_ALU 0x04 32-bit arithmetic operations `Arithmetic and jump instructions`_
-BPF_JMP 0x05 64-bit jump operations `Arithmetic and jump instructions`_
-BPF_JMP32 0x06 32-bit jump operations `Arithmetic and jump instructions`_
-BPF_ALU64 0x07 64-bit arithmetic operations `Arithmetic and jump instructions`_
-========= ===== =============================== ===================================
+The three least significant bits of the 'opcode' field store the instruction class:
+
+===== ===== =============================== ===================================
+class value description reference
+===== ===== =============================== ===================================
+LD 0x0 non-standard load operations `Load and store instructions`_
+LDX 0x1 load into register operations `Load and store instructions`_
+ST 0x2 store from immediate operations `Load and store instructions`_
+STX 0x3 store from register operations `Load and store instructions`_
+ALU 0x4 32-bit arithmetic operations `Arithmetic and jump instructions`_
+JMP 0x5 64-bit jump operations `Arithmetic and jump instructions`_
+JMP32 0x6 32-bit jump operations `Arithmetic and jump instructions`_
+ALU64 0x7 64-bit arithmetic operations `Arithmetic and jump instructions`_
+===== ===== =============================== ===================================
Arithmetic and jump instructions
================================
-For arithmetic and jump instructions (``BPF_ALU``, ``BPF_ALU64``, ``BPF_JMP`` and
-``BPF_JMP32``), the 8-bit 'opcode' field is divided into three parts:
+For arithmetic and jump instructions (``ALU``, ``ALU64``, ``JMP`` and
+``JMP32``), the 8-bit 'opcode' field is divided into three parts::
-============== ====== =================
-4 bits (MSB) 1 bit 3 bits (LSB)
-============== ====== =================
-code source instruction class
-============== ====== =================
+ +-+-+-+-+-+-+-+-+
+ | code |s|class|
+ +-+-+-+-+-+-+-+-+
**code**
the operation code, whose meaning varies by instruction class
-**source**
+**s (source)**
the source operand location, which unless otherwise specified is one of:
====== ===== ==============================================
source value description
====== ===== ==============================================
- BPF_K 0x00 use 32-bit 'imm' value as source operand
- BPF_X 0x08 use 'src_reg' register value as source operand
+ K 0 use 32-bit 'imm' value as source operand
+ X 1 use 'src_reg' register value as source operand
====== ===== ==============================================
**instruction class**
@@ -216,70 +289,75 @@ code source instruction class
Arithmetic instructions
-----------------------
-``BPF_ALU`` uses 32-bit wide operands while ``BPF_ALU64`` uses 64-bit wide operands for
-otherwise identical operations.
+``ALU`` uses 32-bit wide operands while ``ALU64`` uses 64-bit wide operands for
+otherwise identical operations. ``ALU64`` instructions belong to the
+base64 conformance group unless noted otherwise.
The 'code' field encodes the operation as below, where 'src' and 'dst' refer
to the values of the source and destination registers, respectively.
-========= ===== ======= ==========================================================
-code value offset description
-========= ===== ======= ==========================================================
-BPF_ADD 0x00 0 dst += src
-BPF_SUB 0x10 0 dst -= src
-BPF_MUL 0x20 0 dst \*= src
-BPF_DIV 0x30 0 dst = (src != 0) ? (dst / src) : 0
-BPF_SDIV 0x30 1 dst = (src != 0) ? (dst s/ src) : 0
-BPF_OR 0x40 0 dst \|= src
-BPF_AND 0x50 0 dst &= src
-BPF_LSH 0x60 0 dst <<= (src & mask)
-BPF_RSH 0x70 0 dst >>= (src & mask)
-BPF_NEG 0x80 0 dst = -dst
-BPF_MOD 0x90 0 dst = (src != 0) ? (dst % src) : dst
-BPF_SMOD 0x90 1 dst = (src != 0) ? (dst s% src) : dst
-BPF_XOR 0xa0 0 dst ^= src
-BPF_MOV 0xb0 0 dst = src
-BPF_MOVSX 0xb0 8/16/32 dst = (s8,s16,s32)src
-BPF_ARSH 0xc0 0 :term:`sign extending<Sign Extend>` dst >>= (src & mask)
-BPF_END 0xd0 0 byte swap operations (see `Byte swap instructions`_ below)
-========= ===== ======= ==========================================================
+===== ===== ======= ==========================================================
+name code offset description
+===== ===== ======= ==========================================================
+ADD 0x0 0 dst += src
+SUB 0x1 0 dst -= src
+MUL 0x2 0 dst \*= src
+DIV 0x3 0 dst = (src != 0) ? (dst / src) : 0
+SDIV 0x3 1 dst = (src != 0) ? (dst s/ src) : 0
+OR 0x4 0 dst \|= src
+AND 0x5 0 dst &= src
+LSH 0x6 0 dst <<= (src & mask)
+RSH 0x7 0 dst >>= (src & mask)
+NEG 0x8 0 dst = -dst
+MOD 0x9 0 dst = (src != 0) ? (dst % src) : dst
+SMOD 0x9 1 dst = (src != 0) ? (dst s% src) : dst
+XOR 0xa 0 dst ^= src
+MOV 0xb 0 dst = src
+MOVSX 0xb 8/16/32 dst = (s8,s16,s32)src
+ARSH 0xc 0 :term:`sign extending<Sign Extend>` dst >>= (src & mask)
+END 0xd 0 byte swap operations (see `Byte swap instructions`_ below)
+===== ===== ======= ==========================================================
Underflow and overflow are allowed during arithmetic operations, meaning
the 64-bit or 32-bit value will wrap. If BPF program execution would
result in division by zero, the destination register is instead set to zero.
-If execution would result in modulo by zero, for ``BPF_ALU64`` the value of
-the destination register is unchanged whereas for ``BPF_ALU`` the upper
+If execution would result in modulo by zero, for ``ALU64`` the value of
+the destination register is unchanged whereas for ``ALU`` the upper
32 bits of the destination register are zeroed.
-``BPF_ADD | BPF_X | BPF_ALU`` means::
+``{ADD, X, ALU}``, where 'code' = ``ADD``, 'source' = ``X``, and 'class' = ``ALU``, means::
dst = (u32) ((u32) dst + (u32) src)
where '(u32)' indicates that the upper 32 bits are zeroed.
-``BPF_ADD | BPF_X | BPF_ALU64`` means::
+``{ADD, X, ALU64}`` means::
dst = dst + src
-``BPF_XOR | BPF_K | BPF_ALU`` means::
+``{XOR, K, ALU}`` means::
- dst = (u32) dst ^ (u32) imm32
+ dst = (u32) dst ^ (u32) imm
-``BPF_XOR | BPF_K | BPF_ALU64`` means::
+``{XOR, K, ALU64}`` means::
- dst = dst ^ imm32
+ dst = dst ^ imm
Note that most instructions have instruction offset of 0. Only three instructions
-(``BPF_SDIV``, ``BPF_SMOD``, ``BPF_MOVSX``) have a non-zero offset.
+(``SDIV``, ``SMOD``, ``MOVSX``) have a non-zero offset.
+Division, multiplication, and modulo operations for ``ALU`` are part
+of the "divmul32" conformance group, and division, multiplication, and
+modulo operations for ``ALU64`` are part of the "divmul64" conformance
+group.
The division and modulo operations support both unsigned and signed flavors.
-For unsigned operations (``BPF_DIV`` and ``BPF_MOD``), for ``BPF_ALU``,
-'imm' is interpreted as a 32-bit unsigned value. For ``BPF_ALU64``,
+For unsigned operations (``DIV`` and ``MOD``), for ``ALU``,
+'imm' is interpreted as a 32-bit unsigned value. For ``ALU64``,
'imm' is first :term:`sign extended<Sign Extend>` from 32 to 64 bits, and then
interpreted as a 64-bit unsigned value.
-For signed operations (``BPF_SDIV`` and ``BPF_SMOD``), for ``BPF_ALU``,
-'imm' is interpreted as a 32-bit signed value. For ``BPF_ALU64``, 'imm'
+For signed operations (``SDIV`` and ``SMOD``), for ``ALU``,
+'imm' is interpreted as a 32-bit signed value. For ``ALU64``, 'imm'
is first :term:`sign extended<Sign Extend>` from 32 to 64 bits, and then
interpreted as a 64-bit signed value.
@@ -291,11 +369,15 @@ etc. This specification requires that signed modulo use truncated division
a % n = a - n * trunc(a / n)
-The ``BPF_MOVSX`` instruction does a move operation with sign extension.
-``BPF_ALU | BPF_MOVSX`` :term:`sign extends<Sign Extend>` 8-bit and 16-bit operands into 32
+The ``MOVSX`` instruction does a move operation with sign extension.
+``{MOVSX, X, ALU}`` :term:`sign extends<Sign Extend>` 8-bit and 16-bit operands into 32
bit operands, and zeroes the remaining upper 32 bits.
-``BPF_ALU64 | BPF_MOVSX`` :term:`sign extends<Sign Extend>` 8-bit, 16-bit, and 32-bit
-operands into 64 bit operands.
+``{MOVSX, X, ALU64}`` :term:`sign extends<Sign Extend>` 8-bit, 16-bit, and 32-bit
+operands into 64 bit operands. Unlike other arithmetic instructions,
+``MOVSX`` is only defined for register source operands (``X``).
+
+The ``NEG`` instruction is only defined when the source bit is clear
+(``K``).
Shift operations use a mask of 0x3F (63) for 64-bit operations and 0x1F (31)
for 32-bit operations.
@@ -303,43 +385,45 @@ for 32-bit operations.
Byte swap instructions
----------------------
-The byte swap instructions use instruction classes of ``BPF_ALU`` and ``BPF_ALU64``
-and a 4-bit 'code' field of ``BPF_END``.
+The byte swap instructions use instruction classes of ``ALU`` and ``ALU64``
+and a 4-bit 'code' field of ``END``.
The byte swap instructions operate on the destination register
only and do not use a separate source register or immediate value.
-For ``BPF_ALU``, the 1-bit source operand field in the opcode is used to
+For ``ALU``, the 1-bit source operand field in the opcode is used to
select what byte order the operation converts from or to. For
-``BPF_ALU64``, the 1-bit source operand field in the opcode is reserved
+``ALU64``, the 1-bit source operand field in the opcode is reserved
and must be set to 0.
-========= ========= ===== =================================================
-class source value description
-========= ========= ===== =================================================
-BPF_ALU BPF_TO_LE 0x00 convert between host byte order and little endian
-BPF_ALU BPF_TO_BE 0x08 convert between host byte order and big endian
-BPF_ALU64 Reserved 0x00 do byte swap unconditionally
-========= ========= ===== =================================================
+===== ======== ===== =================================================
+class source value description
+===== ======== ===== =================================================
+ALU TO_LE 0 convert between host byte order and little endian
+ALU TO_BE 1 convert between host byte order and big endian
+ALU64 Reserved 0 do byte swap unconditionally
+===== ======== ===== =================================================
The 'imm' field encodes the width of the swap operations. The following widths
-are supported: 16, 32 and 64.
+are supported: 16, 32 and 64. Width 64 operations belong to the base64
+conformance group and other swap operations belong to the base32
+conformance group.
Examples:
-``BPF_ALU | BPF_TO_LE | BPF_END`` with imm = 16/32/64 means::
+``{END, TO_LE, ALU}`` with imm = 16/32/64 means::
dst = htole16(dst)
dst = htole32(dst)
dst = htole64(dst)
-``BPF_ALU | BPF_TO_BE | BPF_END`` with imm = 16/32/64 means::
+``{END, TO_BE, ALU}`` with imm = 16/32/64 means::
dst = htobe16(dst)
dst = htobe32(dst)
dst = htobe64(dst)
-``BPF_ALU64 | BPF_TO_LE | BPF_END`` with imm = 16/32/64 means::
+``{END, TO_LE, ALU64}`` with imm = 16/32/64 means::
dst = bswap16(dst)
dst = bswap32(dst)
@@ -348,56 +432,61 @@ Examples:
Jump instructions
-----------------
-``BPF_JMP32`` uses 32-bit wide operands while ``BPF_JMP`` uses 64-bit wide operands for
-otherwise identical operations.
+``JMP32`` uses 32-bit wide operands and indicates the base32
+conformance group, while ``JMP`` uses 64-bit wide operands for
+otherwise identical operations, and indicates the base64 conformance
+group unless otherwise specified.
The 'code' field encodes the operation as below:
-======== ===== === =========================================== =========================================
-code value src description notes
-======== ===== === =========================================== =========================================
-BPF_JA 0x0 0x0 PC += offset BPF_JMP class
-BPF_JA 0x0 0x0 PC += imm BPF_JMP32 class
-BPF_JEQ 0x1 any PC += offset if dst == src
-BPF_JGT 0x2 any PC += offset if dst > src unsigned
-BPF_JGE 0x3 any PC += offset if dst >= src unsigned
-BPF_JSET 0x4 any PC += offset if dst & src
-BPF_JNE 0x5 any PC += offset if dst != src
-BPF_JSGT 0x6 any PC += offset if dst > src signed
-BPF_JSGE 0x7 any PC += offset if dst >= src signed
-BPF_CALL 0x8 0x0 call helper function by address see `Helper functions`_
-BPF_CALL 0x8 0x1 call PC += imm see `Program-local functions`_
-BPF_CALL 0x8 0x2 call helper function by BTF ID see `Helper functions`_
-BPF_EXIT 0x9 0x0 return BPF_JMP only
-BPF_JLT 0xa any PC += offset if dst < src unsigned
-BPF_JLE 0xb any PC += offset if dst <= src unsigned
-BPF_JSLT 0xc any PC += offset if dst < src signed
-BPF_JSLE 0xd any PC += offset if dst <= src signed
-======== ===== === =========================================== =========================================
-
-The BPF program needs to store the return value into register R0 before doing a
-``BPF_EXIT``.
+======== ===== ======= =============================== ===================================================
+code value src_reg description notes
+======== ===== ======= =============================== ===================================================
+JA 0x0 0x0 PC += offset {JA, K, JMP} only
+JA 0x0 0x0 PC += imm {JA, K, JMP32} only
+JEQ 0x1 any PC += offset if dst == src
+JGT 0x2 any PC += offset if dst > src unsigned
+JGE 0x3 any PC += offset if dst >= src unsigned
+JSET 0x4 any PC += offset if dst & src
+JNE 0x5 any PC += offset if dst != src
+JSGT 0x6 any PC += offset if dst > src signed
+JSGE 0x7 any PC += offset if dst >= src signed
+CALL 0x8 0x0 call helper function by address {CALL, K, JMP} only, see `Helper functions`_
+CALL 0x8 0x1 call PC += imm {CALL, K, JMP} only, see `Program-local functions`_
+CALL 0x8 0x2 call helper function by BTF ID {CALL, K, JMP} only, see `Helper functions`_
+EXIT 0x9 0x0 return {CALL, K, JMP} only
+JLT 0xa any PC += offset if dst < src unsigned
+JLE 0xb any PC += offset if dst <= src unsigned
+JSLT 0xc any PC += offset if dst < src signed
+JSLE 0xd any PC += offset if dst <= src signed
+======== ===== ======= =============================== ===================================================
+
+The BPF program needs to store the return value into register R0 before doing an
+``EXIT``.
Example:
-``BPF_JSGE | BPF_X | BPF_JMP32`` (0x7e) means::
+``{JSGE, X, JMP32}`` means::
if (s32)dst s>= (s32)src goto +offset
where 's>=' indicates a signed '>=' comparison.
-``BPF_JA | BPF_K | BPF_JMP32`` (0x06) means::
+``{JA, K, JMP32}`` means::
gotol +imm
where 'imm' means the branch offset comes from insn 'imm' field.
-Note that there are two flavors of ``BPF_JA`` instructions. The
-``BPF_JMP`` class permits a 16-bit jump offset specified by the 'offset'
-field, whereas the ``BPF_JMP32`` class permits a 32-bit jump offset
+Note that there are two flavors of ``JA`` instructions. The
+``JMP`` class permits a 16-bit jump offset specified by the 'offset'
+field, whereas the ``JMP32`` class permits a 32-bit jump offset
specified by the 'imm' field. A > 16-bit conditional jump may be
converted to a < 16-bit conditional jump plus a 32-bit unconditional
jump.
+All ``CALL`` and ``JA`` instructions belong to the
+base32 conformance group.
+
Helper functions
~~~~~~~~~~~~~~~~
@@ -416,78 +505,83 @@ Program-local functions
~~~~~~~~~~~~~~~~~~~~~~~
Program-local functions are functions exposed by the same BPF program as the
caller, and are referenced by offset from the call instruction, similar to
-``BPF_JA``. The offset is encoded in the imm field of the call instruction.
-A ``BPF_EXIT`` within the program-local function will return to the caller.
+``JA``. The offset is encoded in the imm field of the call instruction.
+A ``EXIT`` within the program-local function will return to the caller.
Load and store instructions
===========================
-For load and store instructions (``BPF_LD``, ``BPF_LDX``, ``BPF_ST``, and ``BPF_STX``), the
-8-bit 'opcode' field is divided as:
-
-============ ====== =================
-3 bits (MSB) 2 bits 3 bits (LSB)
-============ ====== =================
-mode size instruction class
-============ ====== =================
-
-The mode modifier is one of:
-
- ============= ===== ==================================== =============
- mode modifier value description reference
- ============= ===== ==================================== =============
- BPF_IMM 0x00 64-bit immediate instructions `64-bit immediate instructions`_
- BPF_ABS 0x20 legacy BPF packet access (absolute) `Legacy BPF Packet access instructions`_
- BPF_IND 0x40 legacy BPF packet access (indirect) `Legacy BPF Packet access instructions`_
- BPF_MEM 0x60 regular load and store operations `Regular load and store operations`_
- BPF_MEMSX 0x80 sign-extension load operations `Sign-extension load operations`_
- BPF_ATOMIC 0xc0 atomic operations `Atomic operations`_
- ============= ===== ==================================== =============
-
-The size modifier is one of:
-
- ============= ===== =====================
- size modifier value description
- ============= ===== =====================
- BPF_W 0x00 word (4 bytes)
- BPF_H 0x08 half word (2 bytes)
- BPF_B 0x10 byte
- BPF_DW 0x18 double word (8 bytes)
- ============= ===== =====================
+For load and store instructions (``LD``, ``LDX``, ``ST``, and ``STX``), the
+8-bit 'opcode' field is divided as::
+
+ +-+-+-+-+-+-+-+-+
+ |mode |sz |class|
+ +-+-+-+-+-+-+-+-+
+
+**mode**
+ The mode modifier is one of:
+
+ ============= ===== ==================================== =============
+ mode modifier value description reference
+ ============= ===== ==================================== =============
+ IMM 0 64-bit immediate instructions `64-bit immediate instructions`_
+ ABS 1 legacy BPF packet access (absolute) `Legacy BPF Packet access instructions`_
+ IND 2 legacy BPF packet access (indirect) `Legacy BPF Packet access instructions`_
+ MEM 3 regular load and store operations `Regular load and store operations`_
+ MEMSX 4 sign-extension load operations `Sign-extension load operations`_
+ ATOMIC 6 atomic operations `Atomic operations`_
+ ============= ===== ==================================== =============
+
+**sz (size)**
+ The size modifier is one of:
+
+ ==== ===== =====================
+ size value description
+ ==== ===== =====================
+ W 0 word (4 bytes)
+ H 1 half word (2 bytes)
+ B 2 byte
+ DW 3 double word (8 bytes)
+ ==== ===== =====================
+
+ Instructions using ``DW`` belong to the base64 conformance group.
+
+**class**
+ The instruction class (see `Instruction classes`_)
Regular load and store operations
---------------------------------
-The ``BPF_MEM`` mode modifier is used to encode regular load and store
+The ``MEM`` mode modifier is used to encode regular load and store
instructions that transfer data between a register and memory.
-``BPF_MEM | <size> | BPF_STX`` means::
+``{MEM, <size>, STX}`` means::
*(size *) (dst + offset) = src
-``BPF_MEM | <size> | BPF_ST`` means::
+``{MEM, <size>, ST}`` means::
- *(size *) (dst + offset) = imm32
+ *(size *) (dst + offset) = imm
-``BPF_MEM | <size> | BPF_LDX`` means::
+``{MEM, <size>, LDX}`` means::
dst = *(unsigned size *) (src + offset)
-Where size is one of: ``BPF_B``, ``BPF_H``, ``BPF_W``, or ``BPF_DW`` and
-'unsigned size' is one of u8, u16, u32 or u64.
+Where '<size>' is one of: ``B``, ``H``, ``W``, or ``DW``, and
+'unsigned size' is one of: u8, u16, u32, or u64.
Sign-extension load operations
------------------------------
-The ``BPF_MEMSX`` mode modifier is used to encode :term:`sign-extension<Sign Extend>` load
+The ``MEMSX`` mode modifier is used to encode :term:`sign-extension<Sign Extend>` load
instructions that transfer data between a register and memory.
-``BPF_MEMSX | <size> | BPF_LDX`` means::
+``{MEMSX, <size>, LDX}`` means::
dst = *(signed size *) (src + offset)
-Where size is one of: ``BPF_B``, ``BPF_H`` or ``BPF_W``, and
-'signed size' is one of s8, s16 or s32.
+Where size is one of: ``B``, ``H``, or ``W``, and
+'signed size' is one of: s8, s16, or s32.
Atomic operations
-----------------
@@ -497,10 +591,12 @@ interrupted or corrupted by other access to the same memory region
by other BPF programs or means outside of this specification.
All atomic operations supported by BPF are encoded as store operations
-that use the ``BPF_ATOMIC`` mode modifier as follows:
+that use the ``ATOMIC`` mode modifier as follows:
-* ``BPF_ATOMIC | BPF_W | BPF_STX`` for 32-bit operations
-* ``BPF_ATOMIC | BPF_DW | BPF_STX`` for 64-bit operations
+* ``{ATOMIC, W, STX}`` for 32-bit operations, which are
+ part of the "atomic32" conformance group.
+* ``{ATOMIC, DW, STX}`` for 64-bit operations, which are
+ part of the "atomic64" conformance group.
* 8-bit and 16-bit wide atomic operations are not supported.
The 'imm' field is used to encode the actual atomic operation.
@@ -510,18 +606,18 @@ arithmetic operations in the 'imm' field to encode the atomic operation:
======== ===== ===========
imm value description
======== ===== ===========
-BPF_ADD 0x00 atomic add
-BPF_OR 0x40 atomic or
-BPF_AND 0x50 atomic and
-BPF_XOR 0xa0 atomic xor
+ADD 0x00 atomic add
+OR 0x40 atomic or
+AND 0x50 atomic and
+XOR 0xa0 atomic xor
======== ===== ===========
-``BPF_ATOMIC | BPF_W | BPF_STX`` with 'imm' = BPF_ADD means::
+``{ATOMIC, W, STX}`` with 'imm' = ADD means::
*(u32 *)(dst + offset) += src
-``BPF_ATOMIC | BPF_DW | BPF_STX`` with 'imm' = BPF ADD means::
+``{ATOMIC, DW, STX}`` with 'imm' = ADD means::
*(u64 *)(dst + offset) += src
@@ -531,20 +627,20 @@ two complex atomic operations:
=========== ================ ===========================
imm value description
=========== ================ ===========================
-BPF_FETCH 0x01 modifier: return old value
-BPF_XCHG 0xe0 | BPF_FETCH atomic exchange
-BPF_CMPXCHG 0xf0 | BPF_FETCH atomic compare and exchange
+FETCH 0x01 modifier: return old value
+XCHG 0xe0 | FETCH atomic exchange
+CMPXCHG 0xf0 | FETCH atomic compare and exchange
=========== ================ ===========================
-The ``BPF_FETCH`` modifier is optional for simple atomic operations, and
-always set for the complex atomic operations. If the ``BPF_FETCH`` flag
+The ``FETCH`` modifier is optional for simple atomic operations, and
+always set for the complex atomic operations. If the ``FETCH`` flag
is set, then the operation also overwrites ``src`` with the value that
was in memory before it was modified.
-The ``BPF_XCHG`` operation atomically exchanges ``src`` with the value
+The ``XCHG`` operation atomically exchanges ``src`` with the value
addressed by ``dst + offset``.
-The ``BPF_CMPXCHG`` operation atomically compares the value addressed by
+The ``CMPXCHG`` operation atomically compares the value addressed by
``dst + offset`` with ``R0``. If they match, the value addressed by
``dst + offset`` is replaced with ``src``. In either case, the
value that was at ``dst + offset`` before the operation is zero-extended
@@ -553,25 +649,25 @@ and loaded back to ``R0``.
64-bit immediate instructions
-----------------------------
-Instructions with the ``BPF_IMM`` 'mode' modifier use the wide instruction
-encoding defined in `Instruction encoding`_, and use the 'src' field of the
+Instructions with the ``IMM`` 'mode' modifier use the wide instruction
+encoding defined in `Instruction encoding`_, and use the 'src_reg' field of the
basic instruction to hold an opcode subtype.
-The following table defines a set of ``BPF_IMM | BPF_DW | BPF_LD`` instructions
-with opcode subtypes in the 'src' field, using new terms such as "map"
+The following table defines a set of ``{IMM, DW, LD}`` instructions
+with opcode subtypes in the 'src_reg' field, using new terms such as "map"
defined further below:
-========================= ====== === ========================================= =========== ==============
-opcode construction opcode src pseudocode imm type dst type
-========================= ====== === ========================================= =========== ==============
-BPF_IMM | BPF_DW | BPF_LD 0x18 0x0 dst = imm64 integer integer
-BPF_IMM | BPF_DW | BPF_LD 0x18 0x1 dst = map_by_fd(imm) map fd map
-BPF_IMM | BPF_DW | BPF_LD 0x18 0x2 dst = map_val(map_by_fd(imm)) + next_imm map fd data pointer
-BPF_IMM | BPF_DW | BPF_LD 0x18 0x3 dst = var_addr(imm) variable id data pointer
-BPF_IMM | BPF_DW | BPF_LD 0x18 0x4 dst = code_addr(imm) integer code pointer
-BPF_IMM | BPF_DW | BPF_LD 0x18 0x5 dst = map_by_idx(imm) map index map
-BPF_IMM | BPF_DW | BPF_LD 0x18 0x6 dst = map_val(map_by_idx(imm)) + next_imm map index data pointer
-========================= ====== === ========================================= =========== ==============
+======= ========================================= =========== ==============
+src_reg pseudocode imm type dst type
+======= ========================================= =========== ==============
+0x0 dst = (next_imm << 32) | imm integer integer
+0x1 dst = map_by_fd(imm) map fd map
+0x2 dst = map_val(map_by_fd(imm)) + next_imm map fd data pointer
+0x3 dst = var_addr(imm) variable id data pointer
+0x4 dst = code_addr(imm) integer code pointer
+0x5 dst = map_by_idx(imm) map index map
+0x6 dst = map_val(map_by_idx(imm)) + next_imm map index data pointer
+======= ========================================= =========== ==============
where
@@ -609,5 +705,9 @@ Legacy BPF Packet access instructions
-------------------------------------
BPF previously introduced special instructions for access to packet data that were
-carried over from classic BPF. However, these instructions are
-deprecated and should no longer be used.
+carried over from classic BPF. These instructions used an instruction
+class of ``LD``, a size modifier of ``W``, ``H``, or ``B``, and a
+mode modifier of ``ABS`` or ``IND``. The 'dst_reg' and 'offset' fields were
+set to zero, and 'src_reg' was set to zero for ``ABS``. However, these
+instructions are deprecated and should no longer be used. All legacy packet
+access instructions belong to the "packet" conformance group.
diff --git a/Documentation/bpf/verifier.rst b/Documentation/bpf/verifier.rst
index f0ec19db301c..356894399fbf 100644
--- a/Documentation/bpf/verifier.rst
+++ b/Documentation/bpf/verifier.rst
@@ -562,7 +562,7 @@ works::
* ``checkpoint[0].r1`` is marked as read;
* At instruction #5 exit is reached and ``checkpoint[0]`` can now be processed
- by ``clean_live_states()``. After this processing ``checkpoint[0].r0`` has a
+ by ``clean_live_states()``. After this processing ``checkpoint[0].r1`` has a
read mark and all other registers and stack slots are marked as ``NOT_INIT``
or ``STACK_INVALID``
diff --git a/Documentation/dev-tools/kselftest.rst b/Documentation/dev-tools/kselftest.rst
index 7f3582a67318..ff10dc6eef5d 100644
--- a/Documentation/dev-tools/kselftest.rst
+++ b/Documentation/dev-tools/kselftest.rst
@@ -259,9 +259,21 @@ Contributing new tests (details)
TEST_PROGS_EXTENDED, TEST_GEN_PROGS_EXTENDED mean it is the
executable which is not tested by default.
+
TEST_FILES, TEST_GEN_FILES mean it is the file which is used by
test.
+ TEST_INCLUDES is similar to TEST_FILES, it lists files which should be
+ included when exporting or installing the tests, with the following
+ differences:
+
+ * symlinks to files in other directories are preserved
+ * the part of paths below tools/testing/selftests/ is preserved when
+ copying the files to the output directory
+
+ TEST_INCLUDES is meant to list dependencies located in other directories of
+ the selftests hierarchy.
+
* First use the headers inside the kernel source and/or git repo, and then the
system headers. Headers for the kernel release as opposed to headers
installed by the distro on the system should be the primary focus to be able
diff --git a/Documentation/devicetree/bindings/leds/common.yaml b/Documentation/devicetree/bindings/leds/common.yaml
index 55a8d1385e21..8a3c2398b10c 100644
--- a/Documentation/devicetree/bindings/leds/common.yaml
+++ b/Documentation/devicetree/bindings/leds/common.yaml
@@ -200,6 +200,18 @@ properties:
#trigger-source-cells property in the source node.
$ref: /schemas/types.yaml#/definitions/phandle-array
+ active-low:
+ type: boolean
+ description:
+ Makes LED active low. To turn the LED ON, line needs to be
+ set to low voltage instead of high.
+
+ inactive-high-impedance:
+ type: boolean
+ description:
+ Set LED to high-impedance mode to turn the LED OFF. LED might also
+ describe this mode as tristate.
+
# Required properties for flash LED child nodes:
flash-max-microamp:
description:
diff --git a/Documentation/devicetree/bindings/leds/leds-bcm63138.yaml b/Documentation/devicetree/bindings/leds/leds-bcm63138.yaml
index 52252fb6bb32..bb20394fca5c 100644
--- a/Documentation/devicetree/bindings/leds/leds-bcm63138.yaml
+++ b/Documentation/devicetree/bindings/leds/leds-bcm63138.yaml
@@ -52,10 +52,6 @@ patternProperties:
maxItems: 1
description: LED pin number
- active-low:
- type: boolean
- description: Makes LED active low
-
required:
- reg
diff --git a/Documentation/devicetree/bindings/leds/leds-bcm6328.yaml b/Documentation/devicetree/bindings/leds/leds-bcm6328.yaml
index 51cc0d82c12e..f3a3ef992929 100644
--- a/Documentation/devicetree/bindings/leds/leds-bcm6328.yaml
+++ b/Documentation/devicetree/bindings/leds/leds-bcm6328.yaml
@@ -78,10 +78,6 @@ patternProperties:
- maximum: 23
description: LED pin number (only LEDs 0 to 23 are valid).
- active-low:
- type: boolean
- description: Makes LED active low.
-
brcm,hardware-controlled:
type: boolean
description: Makes this LED hardware controlled.
diff --git a/Documentation/devicetree/bindings/leds/leds-bcm6358.txt b/Documentation/devicetree/bindings/leds/leds-bcm6358.txt
index 6e51c6b91ee5..211ffc3c4a20 100644
--- a/Documentation/devicetree/bindings/leds/leds-bcm6358.txt
+++ b/Documentation/devicetree/bindings/leds/leds-bcm6358.txt
@@ -25,8 +25,6 @@ LED sub-node required properties:
LED sub-node optional properties:
- label : see Documentation/devicetree/bindings/leds/common.txt
- - active-low : Boolean, makes LED active low.
- Default : false
- default-state : see
Documentation/devicetree/bindings/leds/common.txt
- linux,default-trigger : see
diff --git a/Documentation/devicetree/bindings/leds/leds-pwm-multicolor.yaml b/Documentation/devicetree/bindings/leds/leds-pwm-multicolor.yaml
index bd6ec04a8727..a31a202afe5c 100644
--- a/Documentation/devicetree/bindings/leds/leds-pwm-multicolor.yaml
+++ b/Documentation/devicetree/bindings/leds/leds-pwm-multicolor.yaml
@@ -41,9 +41,7 @@ properties:
pwm-names: true
- active-low:
- description: For PWMs where the LED is wired to supply rather than ground.
- type: boolean
+ active-low: true
color: true
diff --git a/Documentation/devicetree/bindings/leds/leds-pwm.yaml b/Documentation/devicetree/bindings/leds/leds-pwm.yaml
index 7de6da58be3c..113b7c218303 100644
--- a/Documentation/devicetree/bindings/leds/leds-pwm.yaml
+++ b/Documentation/devicetree/bindings/leds/leds-pwm.yaml
@@ -34,11 +34,6 @@ patternProperties:
Maximum brightness possible for the LED
$ref: /schemas/types.yaml#/definitions/uint32
- active-low:
- description:
- For PWMs where the LED is wired to supply rather than ground.
- type: boolean
-
required:
- pwms
- max-brightness
diff --git a/Documentation/devicetree/bindings/net/brcm,asp-v2.0.yaml b/Documentation/devicetree/bindings/net/brcm,asp-v2.0.yaml
index 75d8138298fb..660e2ca42daf 100644
--- a/Documentation/devicetree/bindings/net/brcm,asp-v2.0.yaml
+++ b/Documentation/devicetree/bindings/net/brcm,asp-v2.0.yaml
@@ -17,6 +17,10 @@ properties:
oneOf:
- items:
- enum:
+ - brcm,bcm74165b0-asp
+ - const: brcm,asp-v2.2
+ - items:
+ - enum:
- brcm,bcm74165-asp
- const: brcm,asp-v2.1
- items:
diff --git a/Documentation/devicetree/bindings/net/brcm,unimac-mdio.yaml b/Documentation/devicetree/bindings/net/brcm,unimac-mdio.yaml
index 6684810fcbf0..23dfe0838dca 100644
--- a/Documentation/devicetree/bindings/net/brcm,unimac-mdio.yaml
+++ b/Documentation/devicetree/bindings/net/brcm,unimac-mdio.yaml
@@ -24,6 +24,7 @@ properties:
- brcm,genet-mdio-v5
- brcm,asp-v2.0-mdio
- brcm,asp-v2.1-mdio
+ - brcm,asp-v2.2-mdio
- brcm,unimac-mdio
reg:
diff --git a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
index 170e23f0610d..20c0572c9853 100644
--- a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
+++ b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
@@ -28,6 +28,8 @@ Optional properties:
available with tcan4552/4553.
- device-wake-gpios: Wake up GPIO to wake up the TCAN device. Not
available with tcan4552/4553.
+ - wakeup-source: Leave the chip running when suspended, and configure
+ the RX interrupt to wake up the device.
Example:
tcan4x5x: tcan4x5x@0 {
@@ -42,4 +44,5 @@ tcan4x5x: tcan4x5x@0 {
device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
device-wake-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
reset-gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>;
+ wakeup-source;
};
diff --git a/Documentation/devicetree/bindings/net/can/xilinx,can.yaml b/Documentation/devicetree/bindings/net/can/xilinx,can.yaml
index 64d57c343e6f..8d4e5af6fd6c 100644
--- a/Documentation/devicetree/bindings/net/can/xilinx,can.yaml
+++ b/Documentation/devicetree/bindings/net/can/xilinx,can.yaml
@@ -49,6 +49,10 @@ properties:
resets:
maxItems: 1
+ xlnx,has-ecc:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: CAN TX_OL, TX_TL and RX FIFOs have ECC support(AXI CAN)
+
required:
- compatible
- reg
@@ -137,6 +141,7 @@ examples:
interrupts = <GIC_SPI 59 IRQ_TYPE_EDGE_RISING>;
tx-fifo-depth = <0x40>;
rx-fifo-depth = <0x40>;
+ xlnx,has-ecc;
};
- |
diff --git a/Documentation/devicetree/bindings/net/cdns,macb.yaml b/Documentation/devicetree/bindings/net/cdns,macb.yaml
index bf8894a0257e..2c71e2cf3a2f 100644
--- a/Documentation/devicetree/bindings/net/cdns,macb.yaml
+++ b/Documentation/devicetree/bindings/net/cdns,macb.yaml
@@ -59,6 +59,11 @@ properties:
- cdns,gem # Generic
- cdns,macb # Generic
+ - items:
+ - enum:
+ - microchip,sam9x7-gem # Microchip SAM9X7 gigabit ethernet interface
+ - const: microchip,sama7g5-gem # Microchip SAMA7G5 gigabit ethernet interface
+
reg:
minItems: 1
items:
diff --git a/Documentation/devicetree/bindings/net/dsa/ar9331.txt b/Documentation/devicetree/bindings/net/dsa/ar9331.txt
deleted file mode 100644
index f824fdae0da2..000000000000
--- a/Documentation/devicetree/bindings/net/dsa/ar9331.txt
+++ /dev/null
@@ -1,147 +0,0 @@
-Atheros AR9331 built-in switch
-=============================
-
-It is a switch built-in to Atheros AR9331 WiSoC and addressable over internal
-MDIO bus. All PHYs are built-in as well.
-
-Required properties:
-
- - compatible: should be: "qca,ar9331-switch"
- - reg: Address on the MII bus for the switch.
- - resets : Must contain an entry for each entry in reset-names.
- - reset-names : Must include the following entries: "switch"
- - interrupt-parent: Phandle to the parent interrupt controller
- - interrupts: IRQ line for the switch
- - interrupt-controller: Indicates the switch is itself an interrupt
- controller. This is used for the PHY interrupts.
- - #interrupt-cells: must be 1
- - mdio: Container of PHY and devices on the switches MDIO bus.
-
-See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional
-required and optional properties.
-Examples:
-
-eth0: ethernet@19000000 {
- compatible = "qca,ar9330-eth";
- reg = <0x19000000 0x200>;
- interrupts = <4>;
-
- resets = <&rst 9>, <&rst 22>;
- reset-names = "mac", "mdio";
- clocks = <&pll ATH79_CLK_AHB>, <&pll ATH79_CLK_AHB>;
- clock-names = "eth", "mdio";
-
- phy-mode = "mii";
- phy-handle = <&phy_port4>;
-};
-
-eth1: ethernet@1a000000 {
- compatible = "qca,ar9330-eth";
- reg = <0x1a000000 0x200>;
- interrupts = <5>;
- resets = <&rst 13>, <&rst 23>;
- reset-names = "mac", "mdio";
- clocks = <&pll ATH79_CLK_AHB>, <&pll ATH79_CLK_AHB>;
- clock-names = "eth", "mdio";
-
- phy-mode = "gmii";
-
- fixed-link {
- speed = <1000>;
- full-duplex;
- };
-
- mdio {
- #address-cells = <1>;
- #size-cells = <0>;
-
- switch10: switch@10 {
- #address-cells = <1>;
- #size-cells = <0>;
-
- compatible = "qca,ar9331-switch";
- reg = <0x10>;
- resets = <&rst 8>;
- reset-names = "switch";
-
- interrupt-parent = <&miscintc>;
- interrupts = <12>;
-
- interrupt-controller;
- #interrupt-cells = <1>;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- switch_port0: port@0 {
- reg = <0x0>;
- ethernet = <&eth1>;
-
- phy-mode = "gmii";
-
- fixed-link {
- speed = <1000>;
- full-duplex;
- };
- };
-
- switch_port1: port@1 {
- reg = <0x1>;
- phy-handle = <&phy_port0>;
- phy-mode = "internal";
- };
-
- switch_port2: port@2 {
- reg = <0x2>;
- phy-handle = <&phy_port1>;
- phy-mode = "internal";
- };
-
- switch_port3: port@3 {
- reg = <0x3>;
- phy-handle = <&phy_port2>;
- phy-mode = "internal";
- };
-
- switch_port4: port@4 {
- reg = <0x4>;
- phy-handle = <&phy_port3>;
- phy-mode = "internal";
- };
- };
-
- mdio {
- #address-cells = <1>;
- #size-cells = <0>;
-
- interrupt-parent = <&switch10>;
-
- phy_port0: phy@0 {
- reg = <0x0>;
- interrupts = <0>;
- };
-
- phy_port1: phy@1 {
- reg = <0x1>;
- interrupts = <0>;
- };
-
- phy_port2: phy@2 {
- reg = <0x2>;
- interrupts = <0>;
- };
-
- phy_port3: phy@3 {
- reg = <0x3>;
- interrupts = <0>;
- };
-
- phy_port4: phy@4 {
- reg = <0x4>;
- interrupts = <0>;
- };
- };
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml b/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
index c963dc09e8e1..52acc15ebcbf 100644
--- a/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
@@ -31,6 +31,7 @@ properties:
- microchip,ksz9893
- microchip,ksz9563
- microchip,ksz8563
+ - microchip,ksz8567
reset-gpios:
description:
diff --git a/Documentation/devicetree/bindings/net/dsa/qca,ar9331.yaml b/Documentation/devicetree/bindings/net/dsa/qca,ar9331.yaml
new file mode 100644
index 000000000000..fd9ddc59d38c
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/qca,ar9331.yaml
@@ -0,0 +1,161 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/dsa/qca,ar9331.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Atheros AR9331 built-in switch
+
+maintainers:
+ - Oleksij Rempel <o.rempel@pengutronix.de>
+
+description:
+ Qualcomm Atheros AR9331 is a switch built-in to Atheros AR9331 WiSoC and
+ addressable over internal MDIO bus. All PHYs are built-in as well.
+
+properties:
+ compatible:
+ const: qca,ar9331-switch
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+
+ mdio:
+ $ref: /schemas/net/mdio.yaml#
+ unevaluatedProperties: false
+ properties:
+ interrupt-parent: true
+
+ patternProperties:
+ '(ethernet-)?phy@[0-4]+$':
+ type: object
+ unevaluatedProperties: false
+
+ properties:
+ reg: true
+ interrupts:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ items:
+ - const: switch
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-controller
+ - '#interrupt-cells'
+ - mdio
+ - ports
+ - resets
+ - reset-names
+
+allOf:
+ - $ref: dsa.yaml#/$defs/ethernet-ports
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch10: switch@10 {
+ compatible = "qca,ar9331-switch";
+ reg = <0x10>;
+
+ interrupt-parent = <&miscintc>;
+ interrupts = <12>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ resets = <&rst 8>;
+ reset-names = "switch";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0x0>;
+ ethernet = <&eth1>;
+
+ phy-mode = "gmii";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ port@1 {
+ reg = <0x1>;
+ phy-handle = <&phy_port0>;
+ phy-mode = "internal";
+ };
+
+ port@2 {
+ reg = <0x2>;
+ phy-handle = <&phy_port1>;
+ phy-mode = "internal";
+ };
+
+ port@3 {
+ reg = <0x3>;
+ phy-handle = <&phy_port2>;
+ phy-mode = "internal";
+ };
+
+ port@4 {
+ reg = <0x4>;
+ phy-handle = <&phy_port3>;
+ phy-mode = "internal";
+ };
+ };
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupt-parent = <&switch10>;
+
+ phy_port0: ethernet-phy@0 {
+ reg = <0x0>;
+ interrupts = <0>;
+ };
+
+ phy_port1: ethernet-phy@1 {
+ reg = <0x1>;
+ interrupts = <0>;
+ };
+
+ phy_port2: ethernet-phy@2 {
+ reg = <0x2>;
+ interrupts = <0>;
+ };
+
+ phy_port3: ethernet-phy@3 {
+ reg = <0x3>;
+ interrupts = <0>;
+ };
+
+ phy_port4: ethernet-phy@4 {
+ reg = <0x4>;
+ interrupts = <0>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/dsa/realtek.yaml b/Documentation/devicetree/bindings/net/dsa/realtek.yaml
index cce692f57b08..70b6bda3cf98 100644
--- a/Documentation/devicetree/bindings/net/dsa/realtek.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/realtek.yaml
@@ -59,6 +59,9 @@ properties:
description: GPIO to be used to reset the whole device
maxItems: 1
+ resets:
+ maxItems: 1
+
realtek,disable-leds:
type: boolean
description: |
@@ -127,7 +130,6 @@ else:
- mdc-gpios
- mdio-gpios
- mdio
- - reset-gpios
required:
- compatible
diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
index d14d123ad7a0..b2785b03139f 100644
--- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
@@ -14,7 +14,6 @@ properties:
pattern: "^ethernet(@.*)?$"
label:
- $ref: /schemas/types.yaml#/definitions/string
description: Human readable label on a port of a box.
local-mac-address:
diff --git a/Documentation/devicetree/bindings/net/ethernet-phy-package.yaml b/Documentation/devicetree/bindings/net/ethernet-phy-package.yaml
new file mode 100644
index 000000000000..e567101e6f38
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ethernet-phy-package.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/ethernet-phy-package.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ethernet PHY Package Common Properties
+
+maintainers:
+ - Christian Marangi <ansuelsmth@gmail.com>
+
+description:
+ PHY packages are multi-port Ethernet PHY of the same family
+ and each Ethernet PHY is affected by the global configuration
+ of the PHY package.
+
+ Each reg of the PHYs defined in the PHY package node is
+ absolute and describe the real address of the Ethernet PHY on
+ the MDIO bus.
+
+properties:
+ $nodename:
+ pattern: "^ethernet-phy-package@[a-f0-9]+$"
+
+ reg:
+ minimum: 0
+ maximum: 31
+ description:
+ The base ID number for the PHY package.
+ Commonly the ID of the first PHY in the PHY package.
+
+ Some PHY in the PHY package might be not defined but
+ still occupy ID on the device (just not attached to
+ anything) hence the PHY package reg might correspond
+ to a not attached PHY (offset 0).
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+patternProperties:
+ ^ethernet-phy@[a-f0-9]+$:
+ $ref: ethernet-phy.yaml#
+
+required:
+ - reg
+ - '#address-cells'
+ - '#size-cells'
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/net/fsl,fec.yaml b/Documentation/devicetree/bindings/net/fsl,fec.yaml
index 8948a11c994e..5536c06139ca 100644
--- a/Documentation/devicetree/bindings/net/fsl,fec.yaml
+++ b/Documentation/devicetree/bindings/net/fsl,fec.yaml
@@ -224,6 +224,9 @@ properties:
Can be omitted thus no delay is observed. Delay is in range of 1ms to 1000ms.
Other delays are invalid.
+ iommus:
+ maxItems: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml b/Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml
index 9cc236ec42f2..d0332eb76ad2 100644
--- a/Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml
+++ b/Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml
@@ -73,7 +73,7 @@ examples:
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
- i2c {
+ spi {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/net/qca,qca808x.yaml b/Documentation/devicetree/bindings/net/qca,qca808x.yaml
new file mode 100644
index 000000000000..e2552655902a
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/qca,qca808x.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/qca,qca808x.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Atheros QCA808X PHY
+
+maintainers:
+ - Christian Marangi <ansuelsmth@gmail.com>
+
+description:
+ QCA808X PHYs can have up to 3 LEDs attached.
+ All 3 LEDs are disabled by default.
+ 2 LEDs have dedicated pins with the 3rd LED having the
+ double function of Interrupt LEDs/GPIO or additional LED.
+
+ By default this special PIN is set to LED function.
+
+allOf:
+ - $ref: ethernet-phy.yaml#
+
+properties:
+ compatible:
+ enum:
+ - ethernet-phy-id004d.d101
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/leds/common.h>
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-phy@0 {
+ compatible = "ethernet-phy-id004d.d101";
+ reg = <0>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_WAN;
+ default-state = "keep";
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/qcom,ethqos.yaml b/Documentation/devicetree/bindings/net/qcom,ethqos.yaml
index 7bdb412a0185..69a337c7e345 100644
--- a/Documentation/devicetree/bindings/net/qcom,ethqos.yaml
+++ b/Documentation/devicetree/bindings/net/qcom,ethqos.yaml
@@ -37,12 +37,14 @@ properties:
items:
- description: Combined signal for various interrupt events
- description: The interrupt that occurs when Rx exits the LPI state
+ - description: The interrupt that occurs when HW safety error triggered
interrupt-names:
minItems: 1
items:
- const: macirq
- - const: eth_lpi
+ - enum: [eth_lpi, sfty]
+ - const: sfty
clocks:
maxItems: 4
@@ -89,8 +91,9 @@ examples:
<&gcc GCC_ETH_PTP_CLK>,
<&gcc GCC_ETH_RGMII_CLK>;
interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "macirq", "eth_lpi";
+ <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 782 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_lpi", "sfty";
rx-fifo-depth = <4096>;
tx-fifo-depth = <4096>;
diff --git a/Documentation/devicetree/bindings/net/qcom,ipa.yaml b/Documentation/devicetree/bindings/net/qcom,ipa.yaml
index c30218684cfe..53cae71d9957 100644
--- a/Documentation/devicetree/bindings/net/qcom,ipa.yaml
+++ b/Documentation/devicetree/bindings/net/qcom,ipa.yaml
@@ -159,7 +159,7 @@ properties:
when the AP (not the modem) performs early initialization.
firmware-name:
- $ref: /schemas/types.yaml#/definitions/string
+ maxItems: 1
description:
If present, name (or relative path) of the file within the
firmware search path containing the firmware image used when
diff --git a/Documentation/devicetree/bindings/net/qcom,ipq4019-mdio.yaml b/Documentation/devicetree/bindings/net/qcom,ipq4019-mdio.yaml
index 3407e909e8a7..0029e197a825 100644
--- a/Documentation/devicetree/bindings/net/qcom,ipq4019-mdio.yaml
+++ b/Documentation/devicetree/bindings/net/qcom,ipq4019-mdio.yaml
@@ -44,6 +44,21 @@ properties:
items:
- const: gcc_mdio_ahb_clk
+ clock-frequency:
+ description:
+ The MDIO bus clock that must be output by the MDIO bus hardware, if
+ absent, the default hardware values are used.
+
+ MDC rate is feed by an external clock (fixed 100MHz) and is divider
+ internally. The default divider is /256 resulting in the default rate
+ applied of 390KHz.
+
+ To follow 802.3 standard that instruct up to 2.5MHz by default, if
+ this property is not declared and the divider is set to /256, by
+ default 1.5625Mhz is select.
+ enum: [ 390625, 781250, 1562500, 3125000, 6250000, 12500000 ]
+ default: 1562500
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/net/qcom,qca807x.yaml b/Documentation/devicetree/bindings/net/qcom,qca807x.yaml
new file mode 100644
index 000000000000..7290024024f5
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/qcom,qca807x.yaml
@@ -0,0 +1,184 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/qcom,qca807x.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm QCA807x Ethernet PHY
+
+maintainers:
+ - Christian Marangi <ansuelsmth@gmail.com>
+ - Robert Marko <robert.marko@sartura.hr>
+
+description: |
+ Qualcomm QCA8072/5 Ethernet PHY is PHY package of 2 or 5
+ IEEE 802.3 clause 22 compliant 10BASE-Te, 100BASE-TX and
+ 1000BASE-T PHY-s.
+
+ They feature 2 SerDes, one for PSGMII or QSGMII connection with
+ MAC, while second one is SGMII for connection to MAC or fiber.
+
+ Both models have a combo port that supports 1000BASE-X and
+ 100BASE-FX fiber.
+
+ Each PHY inside of QCA807x series has 4 digitally controlled
+ output only pins that natively drive LED-s for up to 2 attached
+ LEDs. Some vendor also use these 4 output for GPIO usage without
+ attaching LEDs.
+
+ Note that output pins can be set to drive LEDs OR GPIO, mixed
+ definition are not accepted.
+
+$ref: ethernet-phy-package.yaml#
+
+properties:
+ compatible:
+ enum:
+ - qcom,qca8072-package
+ - qcom,qca8075-package
+
+ qcom,package-mode:
+ description: |
+ PHY package can be configured in 3 mode following this table:
+
+ First Serdes mode Second Serdes mode
+ Option 1 PSGMII for copper Disabled
+ ports 0-4
+ Option 2 PSGMII for copper 1000BASE-X / 100BASE-FX
+ ports 0-4
+ Option 3 QSGMII for copper SGMII for
+ ports 0-3 copper port 4
+
+ PSGMII mode (option 1 or 2) is configured dynamically based on
+ the presence of a connected SFP device.
+ $ref: /schemas/types.yaml#/definitions/string
+ enum:
+ - qsgmii
+ - psgmii
+ default: psgmii
+
+ qcom,tx-drive-strength-milliwatt:
+ description: set the TX Amplifier value in mv.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [140, 160, 180, 200, 220,
+ 240, 260, 280, 300, 320,
+ 400, 500, 600]
+ default: 600
+
+patternProperties:
+ ^ethernet-phy@[a-f0-9]+$:
+ $ref: ethernet-phy.yaml#
+
+ properties:
+ qcom,dac-full-amplitude:
+ description:
+ Set Analog MDI driver amplitude to FULL.
+
+ With this not defined, amplitude is set to DSP.
+ (amplitude is adjusted based on cable length)
+
+ With this enabled and qcom,dac-full-bias-current
+ and qcom,dac-disable-bias-current-tweak disabled,
+ bias current is half.
+ type: boolean
+
+ qcom,dac-full-bias-current:
+ description:
+ Set Analog MDI driver bias current to FULL.
+
+ With this not defined, bias current is set to DSP.
+ (bias current is adjusted based on cable length)
+
+ Actual bias current might be different with
+ qcom,dac-disable-bias-current-tweak disabled.
+ type: boolean
+
+ qcom,dac-disable-bias-current-tweak:
+ description: |
+ Set Analog MDI driver bias current to disable tweak
+ to bias current.
+
+ With this not defined, bias current tweak are enabled
+ by default.
+
+ With this enabled the following tweak are NOT applied:
+ - With both FULL amplitude and FULL bias current: bias current
+ is set to half.
+ - With only DSP amplitude: bias current is set to half and
+ is set to 1/4 with cable < 10m.
+ - With DSP bias current (included both DSP amplitude and
+ DSP bias current): bias current is half the detected current
+ with cable < 10m.
+ type: boolean
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ const: 2
+
+ if:
+ required:
+ - gpio-controller
+ then:
+ properties:
+ leds: false
+
+ unevaluatedProperties: false
+
+required:
+ - compatible
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/leds/common.h>
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-phy-package@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "qcom,qca8075-package";
+ reg = <0>;
+
+ qcom,package-mode = "qsgmii";
+
+ ethernet-phy@0 {
+ reg = <0>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+ };
+ };
+
+ ethernet-phy@1 {
+ reg = <1>;
+ };
+
+ ethernet-phy@2 {
+ reg = <2>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ ethernet-phy@3 {
+ reg = <3>;
+ };
+
+ ethernet-phy@4 {
+ reg = <4>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
index 890f7858d0dc..de7ba7f345a9 100644
--- a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
+++ b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
@@ -46,6 +46,7 @@ properties:
- enum:
- renesas,etheravb-r8a779a0 # R-Car V3U
- renesas,etheravb-r8a779g0 # R-Car V4H
+ - renesas,etheravb-r8a779h0 # R-Car V4M
- const: renesas,etheravb-rcar-gen4 # R-Car Gen4
- items:
diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
index 5c2769dc689a..6b0341a8e0ea 100644
--- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
@@ -95,6 +95,7 @@ properties:
- snps,dwmac-5.20
- snps,dwxgmac
- snps,dwxgmac-2.10
+ - starfive,jh7100-dwmac
- starfive,jh7110-dwmac
reg:
@@ -107,13 +108,15 @@ properties:
- description: Combined signal for various interrupt events
- description: The interrupt to manage the remote wake-up packet detection
- description: The interrupt that occurs when Rx exits the LPI state
+ - description: The interrupt that occurs when HW safety error triggered
interrupt-names:
minItems: 1
items:
- const: macirq
- - enum: [eth_wake_irq, eth_lpi]
- - const: eth_lpi
+ - enum: [eth_wake_irq, eth_lpi, sfty]
+ - enum: [eth_wake_irq, eth_lpi, sfty]
+ - enum: [eth_wake_irq, eth_lpi, sfty]
clocks:
minItems: 1
@@ -144,10 +147,12 @@ properties:
- description: AHB reset
reset-names:
- minItems: 1
- items:
- - const: stmmaceth
- - const: ahb
+ oneOf:
+ - items:
+ - enum: [stmmaceth, ahb]
+ - items:
+ - const: stmmaceth
+ - const: ahb
power-domains:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/net/starfive,jh7110-dwmac.yaml b/Documentation/devicetree/bindings/net/starfive,jh7110-dwmac.yaml
index 5e7cfbbebce6..0d1962980f57 100644
--- a/Documentation/devicetree/bindings/net/starfive,jh7110-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/starfive,jh7110-dwmac.yaml
@@ -16,16 +16,20 @@ select:
compatible:
contains:
enum:
+ - starfive,jh7100-dwmac
- starfive,jh7110-dwmac
required:
- compatible
properties:
compatible:
- items:
- - enum:
- - starfive,jh7110-dwmac
- - const: snps,dwmac-5.20
+ oneOf:
+ - items:
+ - const: starfive,jh7100-dwmac
+ - const: snps,dwmac
+ - items:
+ - const: starfive,jh7110-dwmac
+ - const: snps,dwmac-5.20
reg:
maxItems: 1
@@ -46,24 +50,6 @@ properties:
- const: tx
- const: gtx
- interrupts:
- minItems: 3
- maxItems: 3
-
- interrupt-names:
- minItems: 3
- maxItems: 3
-
- resets:
- items:
- - description: MAC Reset signal.
- - description: AHB Reset signal.
-
- reset-names:
- items:
- - const: stmmaceth
- - const: ahb
-
starfive,tx-use-rgmii-clk:
description:
Tx clock is provided by external rgmii clock.
@@ -94,6 +80,48 @@ required:
allOf:
- $ref: snps,dwmac.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: starfive,jh7100-dwmac
+ then:
+ properties:
+ interrupts:
+ minItems: 2
+ maxItems: 2
+
+ interrupt-names:
+ minItems: 2
+ maxItems: 2
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ const: ahb
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: starfive,jh7110-dwmac
+ then:
+ properties:
+ interrupts:
+ minItems: 3
+ maxItems: 3
+
+ interrupt-names:
+ minItems: 3
+ maxItems: 3
+
+ resets:
+ minItems: 2
+
+ reset-names:
+ minItems: 2
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml b/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
index f07ae3173b03..d5bd93ee4dbb 100644
--- a/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
+++ b/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
@@ -7,8 +7,9 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: TI SoC Ethernet Switch Controller (CPSW)
maintainers:
- - Grygorii Strashko <grygorii.strashko@ti.com>
- - Sekhar Nori <nsekhar@ti.com>
+ - Siddharth Vadapalli <s-vadapalli@ti.com>
+ - Ravi Gunasekaran <r-gunasekaran@ti.com>
+ - Roger Quadros <rogerq@kernel.org>
description:
The 3-port switch gigabit ethernet subsystem provides ethernet packet
diff --git a/Documentation/devicetree/bindings/net/ti,dp83822.yaml b/Documentation/devicetree/bindings/net/ti,dp83822.yaml
index db74474207ed..784866ea392b 100644
--- a/Documentation/devicetree/bindings/net/ti,dp83822.yaml
+++ b/Documentation/devicetree/bindings/net/ti,dp83822.yaml
@@ -62,6 +62,40 @@ properties:
for the PHY. The internal delay for the PHY is fixed to 3.5ns relative
to transmit data.
+ ti,cfg-dac-minus-one-bp:
+ description: |
+ DP83826 PHY only.
+ Sets the voltage ratio (with respect to the nominal value)
+ of the logical level -1 for the MLT-3 encoded TX data.
+ enum: [5000, 5625, 6250, 6875, 7500, 8125, 8750, 9375, 10000,
+ 10625, 11250, 11875, 12500, 13125, 13750, 14375, 15000]
+ default: 10000
+
+ ti,cfg-dac-plus-one-bp:
+ description: |
+ DP83826 PHY only.
+ Sets the voltage ratio (with respect to the nominal value)
+ of the logical level +1 for the MLT-3 encoded TX data.
+ enum: [5000, 5625, 6250, 6875, 7500, 8125, 8750, 9375, 10000,
+ 10625, 11250, 11875, 12500, 13125, 13750, 14375, 15000]
+ default: 10000
+
+ ti,rmii-mode:
+ description: |
+ If present, select the RMII operation mode. Two modes are
+ available:
+ - RMII master, where the PHY outputs a 50MHz reference clock which can
+ be connected to the MAC.
+ - RMII slave, where the PHY expects a 50MHz reference clock input
+ shared with the MAC.
+ The RMII operation mode can also be configured by its straps.
+ If the strap pin is not set correctly or not set at all, then this can be
+ used to configure it.
+ $ref: /schemas/types.yaml#/definitions/string
+ enum:
+ - master
+ - slave
+
required:
- reg
diff --git a/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml b/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
index c9c25132d154..73ed5951d296 100644
--- a/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
+++ b/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
@@ -7,8 +7,9 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: The TI AM654x/J721E/AM642x SoC Gigabit Ethernet MAC (Media Access Controller)
maintainers:
- - Grygorii Strashko <grygorii.strashko@ti.com>
- - Sekhar Nori <nsekhar@ti.com>
+ - Siddharth Vadapalli <s-vadapalli@ti.com>
+ - Ravi Gunasekaran <r-gunasekaran@ti.com>
+ - Roger Quadros <rogerq@kernel.org>
description:
The TI AM654x/J721E SoC Gigabit Ethernet MAC (CPSW2G NUSS) has two ports
diff --git a/Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml b/Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml
index 3e910d3b24a0..b1c875325776 100644
--- a/Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml
+++ b/Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml
@@ -7,8 +7,9 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: The TI AM654x/J721E Common Platform Time Sync (CPTS) module
maintainers:
- - Grygorii Strashko <grygorii.strashko@ti.com>
- - Sekhar Nori <nsekhar@ti.com>
+ - Siddharth Vadapalli <s-vadapalli@ti.com>
+ - Ravi Gunasekaran <r-gunasekaran@ti.com>
+ - Roger Quadros <rogerq@kernel.org>
description: |+
The TI AM654x/J721E CPTS module is used to facilitate host control of time
diff --git a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml
index 252207adbc54..eabceb849537 100644
--- a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml
@@ -19,9 +19,6 @@ description: |
Alternatively, it can specify the wireless part of the MT7628/MT7688
or MT7622/MT7986 SoC.
-allOf:
- - $ref: ieee80211.yaml#
-
properties:
compatible:
enum:
@@ -38,7 +35,12 @@ properties:
MT7986 should contain 3 regions consys, dcm, and sku, in this order.
interrupts:
- maxItems: 1
+ minItems: 1
+ items:
+ - description: major interrupt for rings
+ - description: additional interrupt for ring 19
+ - description: additional interrupt for ring 4
+ - description: additional interrupt for ring 5
power-domains:
maxItems: 1
@@ -217,6 +219,24 @@ required:
- compatible
- reg
+allOf:
+ - $ref: ieee80211.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - mediatek,mt7981-wmac
+ - mediatek,mt7986-wmac
+ then:
+ properties:
+ interrupts:
+ minItems: 4
+ else:
+ properties:
+ interrupts:
+ maxItems: 1
+
unevaluatedProperties: false
examples:
@@ -293,7 +313,10 @@ examples:
reg = <0x18000000 0x1000000>,
<0x10003000 0x1000>,
<0x11d10000 0x1000>;
- interrupts = <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&topckgen 50>,
<&topckgen 62>;
clock-names = "mcu", "ap2conn";
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml
index 7758a55dd328..9b3ef4bc3732 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml
@@ -8,6 +8,7 @@ title: Qualcomm Technologies ath10k wireless devices
maintainers:
- Kalle Valo <kvalo@kernel.org>
+ - Jeff Johnson <jjohnson@kernel.org>
description:
Qualcomm Technologies, Inc. IEEE 802.11ac devices.
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml
index 817f02a8b481..41d023797d7d 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml
@@ -9,6 +9,7 @@ title: Qualcomm Technologies ath11k wireless devices (PCIe)
maintainers:
- Kalle Valo <kvalo@kernel.org>
+ - Jeff Johnson <jjohnson@kernel.org>
description: |
Qualcomm Technologies IEEE 802.11ax PCIe devices
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
index 7d5f982a3d09..672282cdfc2f 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
@@ -9,6 +9,7 @@ title: Qualcomm Technologies ath11k wireless devices
maintainers:
- Kalle Valo <kvalo@kernel.org>
+ - Jeff Johnson <jjohnson@kernel.org>
description: |
These are dt entries for Qualcomm Technologies, Inc. IEEE 802.11ax
diff --git a/Documentation/netlink/genetlink-c.yaml b/Documentation/netlink/genetlink-c.yaml
index c58f7153fcf8..4dfd899a1661 100644
--- a/Documentation/netlink/genetlink-c.yaml
+++ b/Documentation/netlink/genetlink-c.yaml
@@ -11,7 +11,7 @@ $defs:
minimum: 0
len-or-define:
type: [ string, integer ]
- pattern: ^[0-9A-Za-z_]+( - 1)?$
+ pattern: ^[0-9A-Za-z_-]+( - 1)?$
minimum: 0
len-or-limit:
# literal int or limit based on fixed-width type e.g. u8-min, u16-max, etc.
@@ -126,8 +126,9 @@ properties:
Prefix for the C enum name of the attributes. Default family[name]-set[name]-a-
type: string
enum-name:
- description: Name for the enum type of the attribute.
- type: string
+ description: |
+ Name for the enum type of the attribute, if empty no name will be used.
+ type: [ string, "null" ]
doc:
description: Documentation of the space.
type: string
@@ -208,6 +209,11 @@ properties:
exact-len:
description: Exact length for a string or a binary attribute.
$ref: '#/$defs/len-or-define'
+ unterminated-ok:
+ description: |
+ For string attributes, do not check whether attribute
+ contains the terminating null character.
+ type: boolean
sub-type: *attr-type
display-hint: &display-hint
description: |
@@ -261,14 +267,16 @@ properties:
the prefix with the upper case name of the command, with dashes replaced by underscores.
type: string
enum-name:
- description: Name for the enum type with commands.
- type: string
+ description: |
+ Name for the enum type with commands, if empty no name will be used.
+ type: [ string, "null" ]
async-prefix:
description: Same as name-prefix but used to render notifications and events to separate enum.
type: string
async-enum:
- description: Name for the enum type with notifications/events.
- type: string
+ description: |
+ Name for the enum type with commands, if empty no name will be used.
+ type: [ string, "null" ]
list:
description: List of commands
type: array
@@ -370,3 +378,22 @@ properties:
type: string
# End genetlink-c
flags: *cmd_flags
+
+ kernel-family:
+ description: Additional global attributes used for kernel C code generation.
+ type: object
+ additionalProperties: False
+ properties:
+ headers:
+ description: |
+ List of extra headers which should be included in the source
+ of the generated code.
+ type: array
+ items:
+ type: string
+ sock-priv:
+ description: |
+ Literal name of the type which is used within the kernel
+ to store the socket state. The type / structure is internal
+ to the kernel, and is not defined in the spec.
+ type: string
diff --git a/Documentation/netlink/genetlink-legacy.yaml b/Documentation/netlink/genetlink-legacy.yaml
index 938703088306..b48ad3b1cc32 100644
--- a/Documentation/netlink/genetlink-legacy.yaml
+++ b/Documentation/netlink/genetlink-legacy.yaml
@@ -11,7 +11,7 @@ $defs:
minimum: 0
len-or-define:
type: [ string, integer ]
- pattern: ^[0-9A-Za-z_]+( - 1)?$
+ pattern: ^[0-9A-Za-z_-]+( - 1)?$
minimum: 0
len-or-limit:
# literal int or limit based on fixed-width type e.g. u8-min, u16-max, etc.
@@ -168,8 +168,9 @@ properties:
Prefix for the C enum name of the attributes. Default family[name]-set[name]-a-
type: string
enum-name:
- description: Name for the enum type of the attribute.
- type: string
+ description: |
+ Name for the enum type of the attribute, if empty no name will be used.
+ type: [ string, "null" ]
doc:
description: Documentation of the space.
type: string
@@ -251,6 +252,11 @@ properties:
exact-len:
description: Exact length for a string or a binary attribute.
$ref: '#/$defs/len-or-define'
+ unterminated-ok:
+ description: |
+ For string attributes, do not check whether attribute
+ contains the terminating null character.
+ type: boolean
sub-type: *attr-type
display-hint: *display-hint
# Start genetlink-c
@@ -304,14 +310,16 @@ properties:
the prefix with the upper case name of the command, with dashes replaced by underscores.
type: string
enum-name:
- description: Name for the enum type with commands.
- type: string
+ description: |
+ Name for the enum type with commands, if empty no name will be used.
+ type: [ string, "null" ]
async-prefix:
description: Same as name-prefix but used to render notifications and events to separate enum.
type: string
async-enum:
- description: Name for the enum type with notifications/events.
- type: string
+ description: |
+ Name for the enum type with commands, if empty no name will be used.
+ type: [ string, "null" ]
# Start genetlink-legacy
fixed-header: &fixed-header
description: |
@@ -431,3 +439,22 @@ properties:
type: string
# End genetlink-c
flags: *cmd_flags
+
+ kernel-family:
+ description: Additional global attributes used for kernel C code generation.
+ type: object
+ additionalProperties: False
+ properties:
+ headers:
+ description: |
+ List of extra headers which should be included in the source
+ of the generated code.
+ type: array
+ items:
+ type: string
+ sock-priv:
+ description: |
+ Literal name of the type which is used within the kernel
+ to store the socket state. The type / structure is internal
+ to the kernel, and is not defined in the spec.
+ type: string
diff --git a/Documentation/netlink/genetlink.yaml b/Documentation/netlink/genetlink.yaml
index 3283bf458ff1..ebd6ee743fcc 100644
--- a/Documentation/netlink/genetlink.yaml
+++ b/Documentation/netlink/genetlink.yaml
@@ -11,7 +11,7 @@ $defs:
minimum: 0
len-or-define:
type: [ string, integer ]
- pattern: ^[0-9A-Za-z_]+( - 1)?$
+ pattern: ^[0-9A-Za-z_-]+( - 1)?$
minimum: 0
len-or-limit:
# literal int or limit based on fixed-width type e.g. u8-min, u16-max, etc.
@@ -328,3 +328,22 @@ properties:
The name for the group, used to form the define and the value of the define.
type: string
flags: *cmd_flags
+
+ kernel-family:
+ description: Additional global attributes used for kernel C code generation.
+ type: object
+ additionalProperties: False
+ properties:
+ headers:
+ description: |
+ List of extra headers which should be included in the source
+ of the generated code.
+ type: array
+ items:
+ type: string
+ sock-priv:
+ description: |
+ Literal name of the type which is used within the kernel
+ to store the socket state. The type / structure is internal
+ to the kernel, and is not defined in the spec.
+ type: string
diff --git a/Documentation/netlink/netlink-raw.yaml b/Documentation/netlink/netlink-raw.yaml
index 04b92f1a5cd6..a76e54cbadbc 100644
--- a/Documentation/netlink/netlink-raw.yaml
+++ b/Documentation/netlink/netlink-raw.yaml
@@ -11,7 +11,7 @@ $defs:
minimum: 0
len-or-define:
type: [ string, integer ]
- pattern: ^[0-9A-Za-z_]+( - 1)?$
+ pattern: ^[0-9A-Za-z_-]+( - 1)?$
minimum: 0
# Schema for specs
@@ -152,14 +152,23 @@ properties:
the right formatting mechanism when displaying values of this
type.
enum: [ hex, mac, fddi, ipv4, ipv6, uuid ]
+ struct:
+ description: Name of the nested struct type.
+ type: string
if:
properties:
type:
- oneOf:
- - const: binary
- - const: pad
+ const: pad
then:
required: [ len ]
+ if:
+ properties:
+ type:
+ const: binary
+ then:
+ oneOf:
+ - required: [ len ]
+ - required: [ struct ]
# End genetlink-legacy
attribute-sets:
@@ -180,8 +189,9 @@ properties:
Prefix for the C enum name of the attributes. Default family[name]-set[name]-a-
type: string
enum-name:
- description: Name for the enum type of the attribute.
- type: string
+ description: |
+ Name for the enum type of the attribute, if empty no name will be used.
+ type: [ string, "null" ]
doc:
description: Documentation of the space.
type: string
@@ -261,6 +271,11 @@ properties:
exact-len:
description: Exact length for a string or a binary attribute.
$ref: '#/$defs/len-or-define'
+ unterminated-ok:
+ description: |
+ For string attributes, do not check whether attribute
+ contains the terminating null character.
+ type: boolean
sub-type: *attr-type
display-hint: *display-hint
# Start genetlink-c
@@ -362,14 +377,16 @@ properties:
the prefix with the upper case name of the command, with dashes replaced by underscores.
type: string
enum-name:
- description: Name for the enum type with commands.
- type: string
+ description: |
+ Name for the enum type with commands, if empty no name will be used.
+ type: [ string, "null" ]
async-prefix:
description: Same as name-prefix but used to render notifications and events to separate enum.
type: string
async-enum:
- description: Name for the enum type with notifications/events.
- type: string
+ description: |
+ Name for the enum type with commands, if empty no name will be used.
+ type: [ string, "null" ]
# Start genetlink-legacy
fixed-header: &fixed-header
description: |
diff --git a/Documentation/netlink/specs/devlink.yaml b/Documentation/netlink/specs/devlink.yaml
index cf6eaa0da821..09fbb4c03fc8 100644
--- a/Documentation/netlink/specs/devlink.yaml
+++ b/Documentation/netlink/specs/devlink.yaml
@@ -290,7 +290,7 @@ attribute-sets:
enum: eswitch-mode
-
name: eswitch-inline-mode
- type: u16
+ type: u8
enum: eswitch-inline-mode
-
name: dpipe-tables
diff --git a/Documentation/netlink/specs/dpll.yaml b/Documentation/netlink/specs/dpll.yaml
index 3dcc9ece272a..95b0eb1486bf 100644
--- a/Documentation/netlink/specs/dpll.yaml
+++ b/Documentation/netlink/specs/dpll.yaml
@@ -52,6 +52,40 @@ definitions:
dpll's lock-state shall remain DPLL_LOCK_STATUS_UNLOCKED)
render-max: true
-
+ type: enum
+ name: lock-status-error
+ doc: |
+ if previous status change was done due to a failure, this provides
+ information of dpll device lock status error.
+ Valid values for DPLL_A_LOCK_STATUS_ERROR attribute
+ entries:
+ -
+ name: none
+ doc: |
+ dpll device lock status was changed without any error
+ value: 1
+ -
+ name: undefined
+ doc: |
+ dpll device lock status was changed due to undefined error.
+ Driver fills this value up in case it is not able
+ to obtain suitable exact error type.
+ -
+ name: media-down
+ doc: |
+ dpll device lock status was changed because of associated
+ media got down.
+ This may happen for example if dpll device was previously
+ locked on an input pin of type PIN_TYPE_SYNCE_ETH_PORT.
+ -
+ name: fractional-frequency-offset-too-high
+ doc: |
+ the FFO (Fractional Frequency Offset) between the RX and TX
+ symbol rate on the media got too high.
+ This may happen for example if dpll device was previously
+ locked on an input pin of type PIN_TYPE_SYNCE_ETH_PORT.
+ render-max: true
+ -
type: const
name: temp-divider
value: 1000
@@ -214,6 +248,10 @@ attribute-sets:
name: type
type: u32
enum: type
+ -
+ name: lock-status-error
+ type: u32
+ enum: lock-status-error
-
name: pin
enum-name: dpll_a_pin
@@ -274,6 +312,7 @@ attribute-sets:
-
name: capabilities
type: u32
+ enum: pin-capabilities
-
name: parent-device
type: nest
@@ -379,6 +418,7 @@ operations:
- mode
- mode-supported
- lock-status
+ - lock-status-error
- temp
- clock-id
- type
diff --git a/Documentation/netlink/specs/mptcp_pm.yaml b/Documentation/netlink/specs/mptcp_pm.yaml
index 49f90cfb4698..af525ed29792 100644
--- a/Documentation/netlink/specs/mptcp_pm.yaml
+++ b/Documentation/netlink/specs/mptcp_pm.yaml
@@ -292,13 +292,14 @@ operations:
-
name: get-addr
doc: Get endpoint information
- attribute-set: endpoint
+ attribute-set: attr
dont-validate: [ strict ]
flags: [ uns-admin-perm ]
do: &get-addr-attrs
request:
attributes:
- addr
+ - token
reply:
attributes:
- addr
diff --git a/Documentation/netlink/specs/netdev.yaml b/Documentation/netlink/specs/netdev.yaml
index 3addac970680..76352dbd2be4 100644
--- a/Documentation/netlink/specs/netdev.yaml
+++ b/Documentation/netlink/specs/netdev.yaml
@@ -74,6 +74,10 @@ definitions:
name: queue-type
type: enum
entries: [ rx, tx ]
+ -
+ name: qstats-scope
+ type: flags
+ entries: [ queue ]
attribute-sets:
-
@@ -265,6 +269,73 @@ attribute-sets:
doc: ID of the NAPI instance which services this queue.
type: u32
+ -
+ name: qstats
+ doc: |
+ Get device statistics, scoped to a device or a queue.
+ These statistics extend (and partially duplicate) statistics available
+ in struct rtnl_link_stats64.
+ Value of the `scope` attribute determines how statistics are
+ aggregated. When aggregated for the entire device the statistics
+ represent the total number of events since last explicit reset of
+ the device (i.e. not a reconfiguration like changing queue count).
+ When reported per-queue, however, the statistics may not add
+ up to the total number of events, will only be reported for currently
+ active objects, and will likely report the number of events since last
+ reconfiguration.
+ attributes:
+ -
+ name: ifindex
+ doc: ifindex of the netdevice to which stats belong.
+ type: u32
+ checks:
+ min: 1
+ -
+ name: queue-type
+ doc: Queue type as rx, tx, for queue-id.
+ type: u32
+ enum: queue-type
+ -
+ name: queue-id
+ doc: Queue ID, if stats are scoped to a single queue instance.
+ type: u32
+ -
+ name: scope
+ doc: |
+ What object type should be used to iterate over the stats.
+ type: uint
+ enum: qstats-scope
+ -
+ name: rx-packets
+ doc: |
+ Number of wire packets successfully received and passed to the stack.
+ For drivers supporting XDP, XDP is considered the first layer
+ of the stack, so packets consumed by XDP are still counted here.
+ type: uint
+ value: 8 # reserve some attr ids in case we need more metadata later
+ -
+ name: rx-bytes
+ doc: Successfully received bytes, see `rx-packets`.
+ type: uint
+ -
+ name: tx-packets
+ doc: |
+ Number of wire packets successfully sent. Packet is considered to be
+ successfully sent once it is in device memory (usually this means
+ the device has issued a DMA completion for the packet).
+ type: uint
+ -
+ name: tx-bytes
+ doc: Successfully sent bytes, see `tx-packets`.
+ type: uint
+ -
+ name: rx-alloc-fail
+ doc: |
+ Number of times skb or buffer allocation failed on the Rx datapath.
+ Allocation failure may, or may not result in a packet drop, depending
+ on driver implementation and whether system recovers quickly.
+ type: uint
+
operations:
list:
-
@@ -405,6 +476,26 @@ operations:
attributes:
- ifindex
reply: *napi-get-op
+ -
+ name: qstats-get
+ doc: |
+ Get / dump fine grained statistics. Which statistics are reported
+ depends on the device and the driver, and whether the driver stores
+ software counters per-queue.
+ attribute-set: qstats
+ dump:
+ request:
+ attributes:
+ - scope
+ reply:
+ attributes:
+ - ifindex
+ - queue-type
+ - queue-id
+ - rx-packets
+ - rx-bytes
+ - tx-packets
+ - tx-bytes
mcast-groups:
list:
diff --git a/Documentation/netlink/specs/nlctrl.yaml b/Documentation/netlink/specs/nlctrl.yaml
new file mode 100644
index 000000000000..b1632b95f725
--- /dev/null
+++ b/Documentation/netlink/specs/nlctrl.yaml
@@ -0,0 +1,206 @@
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+
+name: nlctrl
+protocol: genetlink-legacy
+uapi-header: linux/genetlink.h
+
+doc: |
+ genetlink meta-family that exposes information about all genetlink
+ families registered in the kernel (including itself).
+
+definitions:
+ -
+ name: op-flags
+ type: flags
+ enum-name:
+ entries:
+ - admin-perm
+ - cmd-cap-do
+ - cmd-cap-dump
+ - cmd-cap-haspol
+ - uns-admin-perm
+ -
+ name: attr-type
+ enum-name: netlink-attribute-type
+ type: enum
+ entries:
+ - invalid
+ - flag
+ - u8
+ - u16
+ - u32
+ - u64
+ - s8
+ - s16
+ - s32
+ - s64
+ - binary
+ - string
+ - nul-string
+ - nested
+ - nested-array
+ - bitfield32
+ - sint
+ - uint
+
+attribute-sets:
+ -
+ name: ctrl-attrs
+ name-prefix: ctrl-attr-
+ attributes:
+ -
+ name: family-id
+ type: u16
+ -
+ name: family-name
+ type: string
+ -
+ name: version
+ type: u32
+ -
+ name: hdrsize
+ type: u32
+ -
+ name: maxattr
+ type: u32
+ -
+ name: ops
+ type: array-nest
+ nested-attributes: op-attrs
+ -
+ name: mcast-groups
+ type: array-nest
+ nested-attributes: mcast-group-attrs
+ -
+ name: policy
+ type: nest-type-value
+ type-value: [ policy-id, attr-id ]
+ nested-attributes: policy-attrs
+ -
+ name: op-policy
+ type: nest-type-value
+ type-value: [ op-id ]
+ nested-attributes: op-policy-attrs
+ -
+ name: op
+ type: u32
+ -
+ name: mcast-group-attrs
+ name-prefix: ctrl-attr-mcast-grp-
+ enum-name:
+ attributes:
+ -
+ name: name
+ type: string
+ -
+ name: id
+ type: u32
+ -
+ name: op-attrs
+ name-prefix: ctrl-attr-op-
+ enum-name:
+ attributes:
+ -
+ name: id
+ type: u32
+ -
+ name: flags
+ type: u32
+ enum: op-flags
+ enum-as-flags: true
+ -
+ name: policy-attrs
+ name-prefix: nl-policy-type-attr-
+ enum-name:
+ attributes:
+ -
+ name: type
+ type: u32
+ enum: attr-type
+ -
+ name: min-value-s
+ type: s64
+ -
+ name: max-value-s
+ type: s64
+ -
+ name: min-value-u
+ type: u64
+ -
+ name: max-value-u
+ type: u64
+ -
+ name: min-length
+ type: u32
+ -
+ name: max-length
+ type: u32
+ -
+ name: policy-idx
+ type: u32
+ -
+ name: policy-maxtype
+ type: u32
+ -
+ name: bitfield32-mask
+ type: u32
+ -
+ name: mask
+ type: u64
+ -
+ name: pad
+ type: pad
+ -
+ name: op-policy-attrs
+ name-prefix: ctrl-attr-policy-
+ enum-name:
+ attributes:
+ -
+ name: do
+ type: u32
+ -
+ name: dump
+ type: u32
+
+operations:
+ enum-model: directional
+ name-prefix: ctrl-cmd-
+ list:
+ -
+ name: getfamily
+ doc: Get / dump genetlink families
+ attribute-set: ctrl-attrs
+ do:
+ request:
+ value: 3
+ attributes:
+ - family-name
+ reply: &all-attrs
+ value: 1
+ attributes:
+ - family-id
+ - family-name
+ - hdrsize
+ - maxattr
+ - mcast-groups
+ - ops
+ - version
+ dump:
+ reply: *all-attrs
+ -
+ name: getpolicy
+ doc: Get / dump genetlink policies
+ attribute-set: ctrl-attrs
+ dump:
+ request:
+ value: 10
+ attributes:
+ - family-name
+ - family-id
+ - op
+ reply:
+ value: 10
+ attributes:
+ - family-id
+ - op-policy
+ - policy
diff --git a/Documentation/netlink/specs/tc.yaml b/Documentation/netlink/specs/tc.yaml
index 4346fa402fc9..324fa182cd14 100644
--- a/Documentation/netlink/specs/tc.yaml
+++ b/Documentation/netlink/specs/tc.yaml
@@ -48,21 +48,28 @@ definitions:
-
name: bytes
type: u64
+ doc: Number of enqueued bytes
-
name: packets
type: u32
+ doc: Number of enqueued packets
-
name: drops
type: u32
+ doc: Packets dropped because of lack of resources
-
name: overlimits
type: u32
+ doc: |
+ Number of throttle events when this flow goes out of allocated bandwidth
-
name: bps
type: u32
+ doc: Current flow byte rate
-
name: pps
type: u32
+ doc: Current flow packet rate
-
name: qlen
type: u32
@@ -112,6 +119,7 @@ definitions:
-
name: limit
type: u32
+ doc: Queue length; bytes for bfifo, packets for pfifo
-
name: tc-htb-opt
type: struct
@@ -119,11 +127,11 @@ definitions:
-
name: rate
type: binary
- len: 12
+ struct: tc-ratespec
-
name: ceil
type: binary
- len: 12
+ struct: tc-ratespec
-
name: buffer
type: u32
@@ -149,15 +157,19 @@ definitions:
-
name: rate2quantum
type: u32
+ doc: bps->quantum divisor
-
name: defcls
type: u32
+ doc: Default class number
-
name: debug
type: u32
+ doc: Debug flags
-
name: direct-pkts
type: u32
+ doc: Count of non shaped packets
-
name: tc-gred-qopt
type: struct
@@ -165,15 +177,19 @@ definitions:
-
name: limit
type: u32
+ doc: HARD maximal queue length in bytes
-
name: qth-min
type: u32
+ doc: Min average length threshold in bytes
-
name: qth-max
type: u32
+ doc: Max average length threshold in bytes
-
name: DP
type: u32
+ doc: Up to 2^32 DPs
-
name: backlog
type: u32
@@ -195,15 +211,19 @@ definitions:
-
name: Wlog
type: u8
+ doc: log(W)
-
name: Plog
type: u8
+ doc: log(P_max / (qth-max - qth-min))
-
name: Scell_log
type: u8
+ doc: cell size for idle damping
-
name: prio
type: u8
+ doc: Priority of this VQ
-
name: packets
type: u32
@@ -266,9 +286,11 @@ definitions:
-
name: bands
type: u16
+ doc: Number of bands
-
name: max-bands
type: u16
+ doc: Maximum number of queues
-
name: tc-netem-qopt
type: struct
@@ -276,21 +298,138 @@ definitions:
-
name: latency
type: u32
+ doc: Added delay in microseconds
-
name: limit
type: u32
+ doc: Fifo limit in packets
-
name: loss
type: u32
+ doc: Random packet loss (0=none, ~0=100%)
-
name: gap
type: u32
+ doc: Re-ordering gap (0 for none)
-
name: duplicate
type: u32
+ doc: Random packet duplication (0=none, ~0=100%)
-
name: jitter
type: u32
+ doc: Random jitter latency in microseconds
+ -
+ name: tc-netem-gimodel
+ doc: State transition probabilities for 4 state model
+ type: struct
+ members:
+ -
+ name: p13
+ type: u32
+ -
+ name: p31
+ type: u32
+ -
+ name: p32
+ type: u32
+ -
+ name: p14
+ type: u32
+ -
+ name: p23
+ type: u32
+ -
+ name: tc-netem-gemodel
+ doc: Gilbert-Elliot models
+ type: struct
+ members:
+ -
+ name: p
+ type: u32
+ -
+ name: r
+ type: u32
+ -
+ name: h
+ type: u32
+ -
+ name: k1
+ type: u32
+ -
+ name: tc-netem-corr
+ type: struct
+ members:
+ -
+ name: delay-corr
+ type: u32
+ doc: Delay correlation
+ -
+ name: loss-corr
+ type: u32
+ doc: Packet loss correlation
+ -
+ name: dup-corr
+ type: u32
+ doc: Duplicate correlation
+ -
+ name: tc-netem-reorder
+ type: struct
+ members:
+ -
+ name: probability
+ type: u32
+ -
+ name: correlation
+ type: u32
+ -
+ name: tc-netem-corrupt
+ type: struct
+ members:
+ -
+ name: probability
+ type: u32
+ -
+ name: correlation
+ type: u32
+ -
+ name: tc-netem-rate
+ type: struct
+ members:
+ -
+ name: rate
+ type: u32
+ -
+ name: packet-overhead
+ type: s32
+ -
+ name: cell-size
+ type: u32
+ -
+ name: cell-overhead
+ type: s32
+ -
+ name: tc-netem-slot
+ type: struct
+ members:
+ -
+ name: min-delay
+ type: s64
+ -
+ name: max-delay
+ type: s64
+ -
+ name: max-packets
+ type: s32
+ -
+ name: max-bytes
+ type: s32
+ -
+ name: dist-delay
+ type: s64
+ -
+ name: dist-jitter
+ type: s64
-
name: tc-plug-qopt
type: struct
@@ -307,11 +446,13 @@ definitions:
members:
-
name: bands
- type: u16
+ type: u32
+ doc: Number of bands
-
name: priomap
type: binary
len: 16
+ doc: Map of logical priority -> PRIO band
-
name: tc-red-qopt
type: struct
@@ -319,21 +460,27 @@ definitions:
-
name: limit
type: u32
+ doc: Hard queue length in packets
-
name: qth-min
type: u32
+ doc: Min average threshold in packets
-
name: qth-max
type: u32
+ doc: Max average threshold in packets
-
name: Wlog
type: u8
+ doc: log(W)
-
name: Plog
type: u8
+ doc: log(P_max / (qth-max - qth-min))
-
name: Scell-log
type: u8
+ doc: Cell size for idle damping
-
name: flags
type: u8
@@ -369,71 +516,128 @@ definitions:
name: penalty-burst
type: u32
-
- name: tc-sfq-qopt-v1 # TODO nested structs
+ name: tc-sfq-qopt
type: struct
members:
-
name: quantum
type: u32
+ doc: Bytes per round allocated to flow
-
name: perturb-period
type: s32
+ doc: Period of hash perturbation
-
name: limit
type: u32
+ doc: Maximal packets in queue
-
name: divisor
type: u32
+ doc: Hash divisor
-
name: flows
type: u32
+ doc: Maximal number of flows
+ -
+ name: tc-sfqred-stats
+ type: struct
+ members:
+ -
+ name: prob-drop
+ type: u32
+ doc: Early drops, below max threshold
+ -
+ name: forced-drop
+ type: u32
+ doc: Early drops, after max threshold
+ -
+ name: prob-mark
+ type: u32
+ doc: Marked packets, below max threshold
+ -
+ name: forced-mark
+ type: u32
+ doc: Marked packets, after max threshold
+ -
+ name: prob-mark-head
+ type: u32
+ doc: Marked packets, below max threshold
+ -
+ name: forced-mark-head
+ type: u32
+ doc: Marked packets, after max threshold
+ -
+ name: tc-sfq-qopt-v1
+ type: struct
+ members:
+ -
+ name: v0
+ type: binary
+ struct: tc-sfq-qopt
-
name: depth
type: u32
+ doc: Maximum number of packets per flow
-
name: headdrop
type: u32
-
name: limit
type: u32
+ doc: HARD maximal flow queue length in bytes
-
name: qth-min
type: u32
+ doc: Min average length threshold in bytes
-
- name: qth-mac
+ name: qth-max
type: u32
+ doc: Max average length threshold in bytes
-
name: Wlog
type: u8
+ doc: log(W)
-
name: Plog
type: u8
+ doc: log(P_max / (qth-max - qth-min))
-
name: Scell-log
type: u8
+ doc: Cell size for idle damping
-
name: flags
type: u8
-
name: max-P
type: u32
+ doc: probabilty, high resolution
-
- name: prob-drop
- type: u32
+ name: stats
+ type: binary
+ struct: tc-sfqred-stats
+ -
+ name: tc-ratespec
+ type: struct
+ members:
-
- name: forced-drop
- type: u32
+ name: cell-log
+ type: u8
-
- name: prob-mark
- type: u32
+ name: linklayer
+ type: u8
-
- name: forced-mark
- type: u32
+ name: overhead
+ type: u8
-
- name: prob-mark-head
- type: u32
+ name: cell-align
+ type: u8
-
- name: forced-mark-head
+ name: mpu
+ type: u8
+ -
+ name: rate
type: u32
-
name: tc-tbf-qopt
@@ -441,12 +645,12 @@ definitions:
members:
-
name: rate
- type: binary # TODO nested struct tc_ratespec
- len: 12
+ type: binary
+ struct: tc-ratespec
-
name: peakrate
- type: binary # TODO nested struct tc_ratespec
- len: 12
+ type: binary
+ struct: tc-ratespec
-
name: limit
type: u32
@@ -491,9 +695,663 @@ definitions:
-
name: interval
type: s8
+ doc: Sampling period
-
name: ewma-log
type: u8
+ doc: The log() of measurement window weight
+ -
+ name: tc-choke-xstats
+ type: struct
+ members:
+ -
+ name: early
+ type: u32
+ doc: Early drops
+ -
+ name: pdrop
+ type: u32
+ doc: Drops due to queue limits
+ -
+ name: other
+ type: u32
+ doc: Drops due to drop() calls
+ -
+ name: marked
+ type: u32
+ doc: Marked packets
+ -
+ name: matched
+ type: u32
+ doc: Drops due to flow match
+ -
+ name: tc-codel-xstats
+ type: struct
+ members:
+ -
+ name: maxpacket
+ type: u32
+ doc: Largest packet we've seen so far
+ -
+ name: count
+ type: u32
+ doc: How many drops we've done since the last time we entered dropping state
+ -
+ name: lastcount
+ type: u32
+ doc: Count at entry to dropping state
+ -
+ name: ldelay
+ type: u32
+ doc: in-queue delay seen by most recently dequeued packet
+ -
+ name: drop-next
+ type: s32
+ doc: Time to drop next packet
+ -
+ name: drop-overlimit
+ type: u32
+ doc: Number of times max qdisc packet limit was hit
+ -
+ name: ecn-mark
+ type: u32
+ doc: Number of packets we've ECN marked instead of dropped
+ -
+ name: dropping
+ type: u32
+ doc: Are we in a dropping state?
+ -
+ name: ce-mark
+ type: u32
+ doc: Number of CE marked packets because of ce-threshold
+ -
+ name: tc-fq-codel-xstats
+ type: struct
+ members:
+ -
+ name: type
+ type: u32
+ -
+ name: maxpacket
+ type: u32
+ doc: Largest packet we've seen so far
+ -
+ name: drop-overlimit
+ type: u32
+ doc: Number of times max qdisc packet limit was hit
+ -
+ name: ecn-mark
+ type: u32
+ doc: Number of packets we ECN marked instead of being dropped
+ -
+ name: new-flow-count
+ type: u32
+ doc: Number of times packets created a new flow
+ -
+ name: new-flows-len
+ type: u32
+ doc: Count of flows in new list
+ -
+ name: old-flows-len
+ type: u32
+ doc: Count of flows in old list
+ -
+ name: ce-mark
+ type: u32
+ doc: Packets above ce-threshold
+ -
+ name: memory-usage
+ type: u32
+ doc: Memory usage in bytes
+ -
+ name: drop-overmemory
+ type: u32
+ -
+ name: tc-fq-pie-xstats
+ type: struct
+ members:
+ -
+ name: packets-in
+ type: u32
+ doc: Total number of packets enqueued
+ -
+ name: dropped
+ type: u32
+ doc: Packets dropped due to fq_pie_action
+ -
+ name: overlimit
+ type: u32
+ doc: Dropped due to lack of space in queue
+ -
+ name: overmemory
+ type: u32
+ doc: Dropped due to lack of memory in queue
+ -
+ name: ecn-mark
+ type: u32
+ doc: Packets marked with ecn
+ -
+ name: new-flow-count
+ type: u32
+ doc: Count of new flows created by packets
+ -
+ name: new-flows-len
+ type: u32
+ doc: Count of flows in new list
+ -
+ name: old-flows-len
+ type: u32
+ doc: Count of flows in old list
+ -
+ name: memory-usage
+ type: u32
+ doc: Total memory across all queues
+ -
+ name: tc-fq-qd-stats
+ type: struct
+ members:
+ -
+ name: gc-flows
+ type: u64
+ -
+ name: highprio-packets
+ type: u64
+ doc: obsolete
+ -
+ name: tcp-retrans
+ type: u64
+ doc: obsolete
+ -
+ name: throttled
+ type: u64
+ -
+ name: flows-plimit
+ type: u64
+ -
+ name: pkts-too-long
+ type: u64
+ -
+ name: allocation-errors
+ type: u64
+ -
+ name: time-next-delayed-flow
+ type: s64
+ -
+ name: flows
+ type: u32
+ -
+ name: inactive-flows
+ type: u32
+ -
+ name: throttled-flows
+ type: u32
+ -
+ name: unthrottle-latency-ns
+ type: u32
+ -
+ name: ce-mark
+ type: u64
+ doc: Packets above ce-threshold
+ -
+ name: horizon-drops
+ type: u64
+ -
+ name: horizon-caps
+ type: u64
+ -
+ name: fastpath-packets
+ type: u64
+ -
+ name: band-drops
+ type: binary
+ len: 24
+ -
+ name: band-pkt-count
+ type: binary
+ len: 12
+ -
+ name: pad
+ type: pad
+ len: 4
+ -
+ name: tc-hhf-xstats
+ type: struct
+ members:
+ -
+ name: drop-overlimit
+ type: u32
+ doc: Number of times max qdisc packet limit was hit
+ -
+ name: hh-overlimit
+ type: u32
+ doc: Number of times max heavy-hitters was hit
+ -
+ name: hh-tot-count
+ type: u32
+ doc: Number of captured heavy-hitters so far
+ -
+ name: hh-cur-count
+ type: u32
+ doc: Number of current heavy-hitters
+ -
+ name: tc-pie-xstats
+ type: struct
+ members:
+ -
+ name: prob
+ type: u64
+ doc: Current probability
+ -
+ name: delay
+ type: u32
+ doc: Current delay in ms
+ -
+ name: avg-dq-rate
+ type: u32
+ doc: Current average dq rate in bits/pie-time
+ -
+ name: dq-rate-estimating
+ type: u32
+ doc: Is avg-dq-rate being calculated?
+ -
+ name: packets-in
+ type: u32
+ doc: Total number of packets enqueued
+ -
+ name: dropped
+ type: u32
+ doc: Packets dropped due to pie action
+ -
+ name: overlimit
+ type: u32
+ doc: Dropped due to lack of space in queue
+ -
+ name: maxq
+ type: u32
+ doc: Maximum queue size
+ -
+ name: ecn-mark
+ type: u32
+ doc: Packets marked with ecn
+ -
+ name: tc-red-xstats
+ type: struct
+ members:
+ -
+ name: early
+ type: u32
+ doc: Early drops
+ -
+ name: pdrop
+ type: u32
+ doc: Drops due to queue limits
+ -
+ name: other
+ type: u32
+ doc: Drops due to drop() calls
+ -
+ name: marked
+ type: u32
+ doc: Marked packets
+ -
+ name: tc-sfb-xstats
+ type: struct
+ members:
+ -
+ name: earlydrop
+ type: u32
+ -
+ name: penaltydrop
+ type: u32
+ -
+ name: bucketdrop
+ type: u32
+ -
+ name: queuedrop
+ type: u32
+ -
+ name: childdrop
+ type: u32
+ doc: drops in child qdisc
+ -
+ name: marked
+ type: u32
+ -
+ name: maxqlen
+ type: u32
+ -
+ name: maxprob
+ type: u32
+ -
+ name: avgprob
+ type: u32
+ -
+ name: tc-sfq-xstats
+ type: struct
+ members:
+ -
+ name: allot
+ type: s32
+ -
+ name: gnet-stats-basic
+ type: struct
+ members:
+ -
+ name: bytes
+ type: u64
+ -
+ name: packets
+ type: u32
+ -
+ name: gnet-stats-rate-est
+ type: struct
+ members:
+ -
+ name: bps
+ type: u32
+ -
+ name: pps
+ type: u32
+ -
+ name: gnet-stats-rate-est64
+ type: struct
+ members:
+ -
+ name: bps
+ type: u64
+ -
+ name: pps
+ type: u64
+ -
+ name: gnet-stats-queue
+ type: struct
+ members:
+ -
+ name: qlen
+ type: u32
+ -
+ name: backlog
+ type: u32
+ -
+ name: drops
+ type: u32
+ -
+ name: requeues
+ type: u32
+ -
+ name: overlimits
+ type: u32
+ -
+ name: tc-u32-key
+ type: struct
+ members:
+ -
+ name: mask
+ type: u32
+ byte-order: big-endian
+ -
+ name: val
+ type: u32
+ byte-order: big-endian
+ -
+ name: "off"
+ type: s32
+ -
+ name: offmask
+ type: s32
+ -
+ name: tc-u32-sel
+ type: struct
+ members:
+ -
+ name: flags
+ type: u8
+ -
+ name: offshift
+ type: u8
+ -
+ name: nkeys
+ type: u8
+ -
+ name: offmask
+ type: u16
+ byte-order: big-endian
+ -
+ name: "off"
+ type: u16
+ -
+ name: offoff
+ type: s16
+ -
+ name: hoff
+ type: s16
+ -
+ name: hmask
+ type: u32
+ byte-order: big-endian
+ -
+ name: keys
+ type: binary
+ struct: tc-u32-key # TODO: array
+ -
+ name: tc-u32-pcnt
+ type: struct
+ members:
+ -
+ name: rcnt
+ type: u64
+ -
+ name: rhit
+ type: u64
+ -
+ name: kcnts
+ type: u64 # TODO: array
+ -
+ name: tcf-t
+ type: struct
+ members:
+ -
+ name: install
+ type: u64
+ -
+ name: lastuse
+ type: u64
+ -
+ name: expires
+ type: u64
+ -
+ name: firstuse
+ type: u64
+ -
+ name: tc-gen
+ type: struct
+ members:
+ -
+ name: index
+ type: u32
+ -
+ name: capab
+ type: u32
+ -
+ name: action
+ type: s32
+ -
+ name: refcnt
+ type: s32
+ -
+ name: bindcnt
+ type: s32
+ -
+ name: tc-gact-p
+ type: struct
+ members:
+ -
+ name: ptype
+ type: u16
+ -
+ name: pval
+ type: u16
+ -
+ name: paction
+ type: s32
+ -
+ name: tcf-ematch-tree-hdr
+ type: struct
+ members:
+ -
+ name: nmatches
+ type: u16
+ -
+ name: progid
+ type: u16
+ -
+ name: tc-basic-pcnt
+ type: struct
+ members:
+ -
+ name: rcnt
+ type: u64
+ -
+ name: rhit
+ type: u64
+ -
+ name: tc-matchall-pcnt
+ type: struct
+ members:
+ -
+ name: rhit
+ type: u64
+ -
+ name: tc-mpls
+ type: struct
+ members:
+ -
+ name: index
+ type: u32
+ -
+ name: capab
+ type: u32
+ -
+ name: action
+ type: s32
+ -
+ name: refcnt
+ type: s32
+ -
+ name: bindcnt
+ type: s32
+ -
+ name: m-action
+ type: s32
+ -
+ name: tc-police
+ type: struct
+ members:
+ -
+ name: index
+ type: u32
+ -
+ name: action
+ type: s32
+ -
+ name: limit
+ type: u32
+ -
+ name: burst
+ type: u32
+ -
+ name: mtu
+ type: u32
+ -
+ name: rate
+ type: binary
+ struct: tc-ratespec
+ -
+ name: peakrate
+ type: binary
+ struct: tc-ratespec
+ -
+ name: refcnt
+ type: s32
+ -
+ name: bindcnt
+ type: s32
+ -
+ name: capab
+ type: u32
+ -
+ name: tc-pedit-sel
+ type: struct
+ members:
+ -
+ name: index
+ type: u32
+ -
+ name: capab
+ type: u32
+ -
+ name: action
+ type: s32
+ -
+ name: refcnt
+ type: s32
+ -
+ name: bindcnt
+ type: s32
+ -
+ name: nkeys
+ type: u8
+ -
+ name: flags
+ type: u8
+ -
+ name: keys
+ type: binary
+ struct: tc-pedit-key # TODO: array
+ -
+ name: tc-pedit-key
+ type: struct
+ members:
+ -
+ name: mask
+ type: u32
+ -
+ name: val
+ type: u32
+ -
+ name: "off"
+ type: u32
+ -
+ name: at
+ type: u32
+ -
+ name: offmask
+ type: u32
+ -
+ name: shift
+ type: u32
+ -
+ name: tc-vlan
+ type: struct
+ members:
+ -
+ name: index
+ type: u32
+ -
+ name: capab
+ type: u32
+ -
+ name: action
+ type: s32
+ -
+ name: refcnt
+ type: s32
+ -
+ name: bindcnt
+ type: s32
+ -
+ name: v-action
+ type: s32
attribute-sets:
-
name: tc-attrs
@@ -512,7 +1370,9 @@ attribute-sets:
struct: tc-stats
-
name: xstats
- type: binary
+ type: sub-message
+ sub-message: tca-stats-app-msg
+ selector: kind
-
name: rate
type: binary
@@ -553,6 +1413,582 @@ attribute-sets:
name: ext-warn-msg
type: string
-
+ name: tc-act-attrs
+ attributes:
+ -
+ name: kind
+ type: string
+ -
+ name: options
+ type: sub-message
+ sub-message: tc-act-options-msg
+ selector: kind
+ -
+ name: index
+ type: u32
+ -
+ name: stats
+ type: nest
+ nested-attributes: tc-act-stats-attrs
+ -
+ name: pad
+ type: pad
+ -
+ name: cookie
+ type: binary
+ -
+ name: flags
+ type: bitfield32
+ -
+ name: hw-stats
+ type: bitfield32
+ -
+ name: used-hw-stats
+ type: bitfield32
+ -
+ name: in-hw-count
+ type: u32
+ -
+ name: tc-act-stats-attrs
+ attributes:
+ -
+ name: basic
+ type: binary
+ struct: gnet-stats-basic
+ -
+ name: rate-est
+ type: binary
+ struct: gnet-stats-rate-est
+ -
+ name: queue
+ type: binary
+ struct: gnet-stats-queue
+ -
+ name: app
+ type: binary
+ -
+ name: rate-est64
+ type: binary
+ struct: gnet-stats-rate-est64
+ -
+ name: pad
+ type: pad
+ -
+ name: basic-hw
+ type: binary
+ struct: gnet-stats-basic
+ -
+ name: pkt64
+ type: u64
+ -
+ name: tc-act-bpf-attrs
+ attributes:
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: parms
+ type: binary
+ -
+ name: ops-len
+ type: u16
+ -
+ name: ops
+ type: binary
+ -
+ name: fd
+ type: u32
+ -
+ name: name
+ type: string
+ -
+ name: pad
+ type: pad
+ -
+ name: tag
+ type: binary
+ -
+ name: id
+ type: binary
+ -
+ name: tc-act-connmark-attrs
+ attributes:
+ -
+ name: parms
+ type: binary
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: pad
+ type: pad
+ -
+ name: tc-act-csum-attrs
+ attributes:
+ -
+ name: parms
+ type: binary
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: pad
+ type: pad
+ -
+ name: tc-act-ct-attrs
+ attributes:
+ -
+ name: parms
+ type: binary
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: action
+ type: u16
+ -
+ name: zone
+ type: u16
+ -
+ name: mark
+ type: u32
+ -
+ name: mark-mask
+ type: u32
+ -
+ name: labels
+ type: binary
+ -
+ name: labels-mask
+ type: binary
+ -
+ name: nat-ipv4-min
+ type: u32
+ byte-order: big-endian
+ -
+ name: nat-ipv4-max
+ type: u32
+ byte-order: big-endian
+ -
+ name: nat-ipv6-min
+ type: binary
+ -
+ name: nat-ipv6-max
+ type: binary
+ -
+ name: nat-port-min
+ type: u16
+ byte-order: big-endian
+ -
+ name: nat-port-max
+ type: u16
+ byte-order: big-endian
+ -
+ name: pad
+ type: pad
+ -
+ name: helper-name
+ type: string
+ -
+ name: helper-family
+ type: u8
+ -
+ name: helper-proto
+ type: u8
+ -
+ name: tc-act-ctinfo-attrs
+ attributes:
+ -
+ name: pad
+ type: pad
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: act
+ type: binary
+ -
+ name: zone
+ type: u16
+ -
+ name: parms-dscp-mask
+ type: u32
+ -
+ name: parms-dscp-statemask
+ type: u32
+ -
+ name: parms-cpmark-mask
+ type: u32
+ -
+ name: stats-dscp-set
+ type: u64
+ -
+ name: stats-dscp-error
+ type: u64
+ -
+ name: stats-cpmark-set
+ type: u64
+ -
+ name: tc-act-gate-attrs
+ attributes:
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: parms
+ type: binary
+ -
+ name: pad
+ type: pad
+ -
+ name: priority
+ type: s32
+ -
+ name: entry-list
+ type: binary
+ -
+ name: base-time
+ type: u64
+ -
+ name: cycle-time
+ type: u64
+ -
+ name: cycle-time-ext
+ type: u64
+ -
+ name: flags
+ type: u32
+ -
+ name: clockid
+ type: s32
+ -
+ name: tc-act-ife-attrs
+ attributes:
+ -
+ name: parms
+ type: binary
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: dmac
+ type: binary
+ -
+ name: smac
+ type: binary
+ -
+ name: type
+ type: u16
+ -
+ name: metalst
+ type: binary
+ -
+ name: pad
+ type: pad
+ -
+ name: tc-act-mirred-attrs
+ attributes:
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: parms
+ type: binary
+ -
+ name: pad
+ type: pad
+ -
+ name: blockid
+ type: binary
+ -
+ name: tc-act-mpls-attrs
+ attributes:
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: parms
+ type: binary
+ struct: tc-mpls
+ -
+ name: pad
+ type: pad
+ -
+ name: proto
+ type: u16
+ byte-order: big-endian
+ -
+ name: label
+ type: u32
+ -
+ name: tc
+ type: u8
+ -
+ name: ttl
+ type: u8
+ -
+ name: bos
+ type: u8
+ -
+ name: tc-act-nat-attrs
+ attributes:
+ -
+ name: parms
+ type: binary
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: pad
+ type: pad
+ -
+ name: tc-act-pedit-attrs
+ attributes:
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: parms
+ type: binary
+ struct: tc-pedit-sel
+ -
+ name: pad
+ type: pad
+ -
+ name: parms-ex
+ type: binary
+ -
+ name: keys-ex
+ type: binary
+ -
+ name: key-ex
+ type: binary
+ -
+ name: tc-act-simple-attrs
+ attributes:
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: parms
+ type: binary
+ -
+ name: data
+ type: binary
+ -
+ name: pad
+ type: pad
+ -
+ name: tc-act-skbedit-attrs
+ attributes:
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: parms
+ type: binary
+ -
+ name: priority
+ type: u32
+ -
+ name: queue-mapping
+ type: u16
+ -
+ name: mark
+ type: u32
+ -
+ name: pad
+ type: pad
+ -
+ name: ptype
+ type: u16
+ -
+ name: mask
+ type: u32
+ -
+ name: flags
+ type: u64
+ -
+ name: queue-mapping-max
+ type: u16
+ -
+ name: tc-act-skbmod-attrs
+ attributes:
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: parms
+ type: binary
+ -
+ name: dmac
+ type: binary
+ -
+ name: smac
+ type: binary
+ -
+ name: etype
+ type: binary
+ -
+ name: pad
+ type: pad
+ -
+ name: tc-act-tunnel-key-attrs
+ attributes:
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: parms
+ type: binary
+ -
+ name: enc-ipv4-src
+ type: u32
+ byte-order: big-endian
+ -
+ name: enc-ipv4-dst
+ type: u32
+ byte-order: big-endian
+ -
+ name: enc-ipv6-src
+ type: binary
+ -
+ name: enc-ipv6-dst
+ type: binary
+ -
+ name: enc-key-id
+ type: u64
+ byte-order: big-endian
+ -
+ name: pad
+ type: pad
+ -
+ name: enc-dst-port
+ type: u16
+ byte-order: big-endian
+ -
+ name: no-csum
+ type: u8
+ -
+ name: enc-opts
+ type: binary
+ -
+ name: enc-tos
+ type: u8
+ -
+ name: enc-ttl
+ type: u8
+ -
+ name: no-frag
+ type: flag
+ -
+ name: tc-act-vlan-attrs
+ attributes:
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: parms
+ type: binary
+ struct: tc-vlan
+ -
+ name: push-vlan-id
+ type: u16
+ -
+ name: push-vlan-protocol
+ type: u16
+ -
+ name: pad
+ type: pad
+ -
+ name: push-vlan-priority
+ type: u8
+ -
+ name: push-eth-dst
+ type: binary
+ -
+ name: push-eth-src
+ type: binary
+ -
+ name: tc-basic-attrs
+ attributes:
+ -
+ name: classid
+ type: u32
+ -
+ name: ematches
+ type: nest
+ nested-attributes: tc-ematch-attrs
+ -
+ name: act
+ type: array-nest
+ nested-attributes: tc-act-attrs
+ -
+ name: police
+ type: nest
+ nested-attributes: tc-police-attrs
+ -
+ name: pcnt
+ type: binary
+ struct: tc-basic-pcnt
+ -
+ name: pad
+ type: pad
+ -
+ name: tc-bpf-attrs
+ attributes:
+ -
+ name: act
+ type: nest
+ nested-attributes: tc-act-attrs
+ -
+ name: police
+ type: nest
+ nested-attributes: tc-police-attrs
+ -
+ name: classid
+ type: u32
+ -
+ name: ops-len
+ type: u16
+ -
+ name: ops
+ type: binary
+ -
+ name: fd
+ type: u32
+ -
+ name: name
+ type: string
+ -
+ name: flags
+ type: u32
+ -
+ name: flags-gen
+ type: u32
+ -
+ name: tag
+ type: binary
+ -
+ name: id
+ type: u32
+ -
name: tc-cake-attrs
attributes:
-
@@ -641,7 +2077,8 @@ attribute-sets:
type: u32
-
name: tin-stats
- type: binary
+ type: array-nest
+ nested-attributes: tc-cake-tin-stats-attrs
-
name: deficit
type: s32
@@ -661,6 +2098,84 @@ attribute-sets:
name: blue-timer-us
type: s32
-
+ name: tc-cake-tin-stats-attrs
+ attributes:
+ -
+ name: pad
+ type: pad
+ -
+ name: sent-packets
+ type: u32
+ -
+ name: sent-bytes64
+ type: u64
+ -
+ name: dropped-packets
+ type: u32
+ -
+ name: dropped-bytes64
+ type: u64
+ -
+ name: acks-dropped-packets
+ type: u32
+ -
+ name: acks-dropped-bytes64
+ type: u64
+ -
+ name: ecn-marked-packets
+ type: u32
+ -
+ name: ecn-marked-bytes64
+ type: u64
+ -
+ name: backlog-packets
+ type: u32
+ -
+ name: backlog-bytes
+ type: u32
+ -
+ name: threshold-rate64
+ type: u64
+ -
+ name: target-us
+ type: u32
+ -
+ name: interval-us
+ type: u32
+ -
+ name: way-indirect-hits
+ type: u32
+ -
+ name: way-misses
+ type: u32
+ -
+ name: way-collisions
+ type: u32
+ -
+ name: peak-delay-us
+ type: u32
+ -
+ name: avg-delay-us
+ type: u32
+ -
+ name: base-delay-us
+ type: u32
+ -
+ name: sparse-flows
+ type: u32
+ -
+ name: bulk-flows
+ type: u32
+ -
+ name: unresponsive-flows
+ type: u32
+ -
+ name: max-skblen
+ type: u32
+ -
+ name: flow-quantum
+ type: u32
+ -
name: tc-cbs-attrs
attributes:
-
@@ -668,6 +2183,20 @@ attribute-sets:
type: binary
struct: tc-cbs-qopt
-
+ name: tc-cgroup-attrs
+ attributes:
+ -
+ name: act
+ type: nest
+ nested-attributes: tc-act-attrs
+ -
+ name: police
+ type: nest
+ nested-attributes: tc-police-attrs
+ -
+ name: ematches
+ type: binary
+ -
name: tc-choke-attrs
attributes:
-
@@ -677,6 +2206,9 @@ attribute-sets:
-
name: stab
type: binary
+ checks:
+ min-len: 256
+ max-len: 256
-
name: max-p
type: u32
@@ -705,6 +2237,56 @@ attribute-sets:
name: quantum
type: u32
-
+ name: tc-ematch-attrs
+ attributes:
+ -
+ name: tree-hdr
+ type: binary
+ struct: tcf-ematch-tree-hdr
+ -
+ name: tree-list
+ type: binary
+ -
+ name: tc-flow-attrs
+ attributes:
+ -
+ name: keys
+ type: u32
+ -
+ name: mode
+ type: u32
+ -
+ name: baseclass
+ type: u32
+ -
+ name: rshift
+ type: u32
+ -
+ name: addend
+ type: u32
+ -
+ name: mask
+ type: u32
+ -
+ name: xor
+ type: u32
+ -
+ name: divisor
+ type: u32
+ -
+ name: act
+ type: binary
+ -
+ name: police
+ type: nest
+ nested-attributes: tc-police-attrs
+ -
+ name: ematches
+ type: binary
+ -
+ name: perturb
+ type: u32
+ -
name: tc-flower-attrs
attributes:
-
@@ -953,15 +2535,19 @@ attribute-sets:
-
name: key-arp-sha
type: binary
+ display-hint: mac
-
name: key-arp-sha-mask
type: binary
+ display-hint: mac
-
name: key-arp-tha
type: binary
+ display-hint: mac
-
name: key-arp-tha-mask
type: binary
+ display-hint: mac
-
name: key-mpls-ttl
type: u8
@@ -1020,10 +2606,12 @@ attribute-sets:
type: u8
-
name: key-enc-opts
- type: binary
+ type: nest
+ nested-attributes: tc-flower-key-enc-opts-attrs
-
name: key-enc-opts-mask
- type: binary
+ type: nest
+ nested-attributes: tc-flower-key-enc-opts-attrs
-
name: in-hw-count
type: u32
@@ -1069,7 +2657,8 @@ attribute-sets:
type: binary
-
name: key-mpls-opts
- type: binary
+ type: nest
+ nested-attributes: tc-flower-key-mpls-opt-attrs
-
name: key-hash
type: u32
@@ -1091,6 +2680,129 @@ attribute-sets:
name: key-l2-tpv3-sid
type: u32
byte-order: big-endian
+ -
+ name: l2-miss
+ type: u8
+ -
+ name: key-cfm
+ type: nest
+ nested-attributes: tc-flower-key-cfm-attrs
+ -
+ name: key-spi
+ type: u32
+ byte-order: big-endian
+ -
+ name: key-spi-mask
+ type: u32
+ byte-order: big-endian
+ -
+ name: tc-flower-key-enc-opts-attrs
+ attributes:
+ -
+ name: geneve
+ type: nest
+ nested-attributes: tc-flower-key-enc-opt-geneve-attrs
+ -
+ name: vxlan
+ type: nest
+ nested-attributes: tc-flower-key-enc-opt-vxlan-attrs
+ -
+ name: erspan
+ type: nest
+ nested-attributes: tc-flower-key-enc-opt-erspan-attrs
+ -
+ name: gtp
+ type: nest
+ nested-attributes: tc-flower-key-enc-opt-gtp-attrs
+ -
+ name: tc-flower-key-enc-opt-geneve-attrs
+ attributes:
+ -
+ name: class
+ type: u16
+ -
+ name: type
+ type: u8
+ -
+ name: data
+ type: binary
+ -
+ name: tc-flower-key-enc-opt-vxlan-attrs
+ attributes:
+ -
+ name: gbp
+ type: u32
+ -
+ name: tc-flower-key-enc-opt-erspan-attrs
+ attributes:
+ -
+ name: ver
+ type: u8
+ -
+ name: index
+ type: u32
+ -
+ name: dir
+ type: u8
+ -
+ name: hwid
+ type: u8
+ -
+ name: tc-flower-key-enc-opt-gtp-attrs
+ attributes:
+ -
+ name: pdu-type
+ type: u8
+ -
+ name: qfi
+ type: u8
+ -
+ name: tc-flower-key-mpls-opt-attrs
+ attributes:
+ -
+ name: lse-depth
+ type: u8
+ -
+ name: lse-ttl
+ type: u8
+ -
+ name: lse-bos
+ type: u8
+ -
+ name: lse-tc
+ type: u8
+ -
+ name: lse-label
+ type: u32
+ -
+ name: tc-flower-key-cfm-attrs
+ attributes:
+ -
+ name: md-level
+ type: u8
+ -
+ name: opcode
+ type: u8
+ -
+ name: tc-fw-attrs
+ attributes:
+ -
+ name: classid
+ type: u32
+ -
+ name: police
+ type: nest
+ nested-attributes: tc-police-attrs
+ -
+ name: indev
+ type: string
+ -
+ name: act
+ type: array-nest
+ nested-attributes: tc-act-attrs
+ -
+ name: mask
+ type: u32
-
name: tc-gred-attrs
attributes:
@@ -1135,7 +2847,7 @@ attribute-sets:
type: u32
-
name: stat-bytes
- type: u32
+ type: u64
-
name: stat-packets
type: u32
@@ -1232,40 +2944,25 @@ attribute-sets:
name: offload
type: flag
-
- name: tc-act-attrs
+ name: tc-matchall-attrs
attributes:
-
- name: kind
- type: string
+ name: classid
+ type: u32
-
- name: options
- type: sub-message
- sub-message: tc-act-options-msg
- selector: kind
+ name: act
+ type: array-nest
+ nested-attributes: tc-act-attrs
-
- name: index
+ name: flags
type: u32
-
- name: stats
+ name: pcnt
type: binary
+ struct: tc-matchall-pcnt
-
name: pad
type: pad
- -
- name: cookie
- type: binary
- -
- name: flags
- type: bitfield32
- -
- name: hw-stats
- type: bitfield32
- -
- name: used-hw-stats
- type: bitfield32
- -
- name: in-hw-count
- type: u32
-
name: tc-etf-attrs
attributes:
@@ -1304,48 +3001,71 @@ attribute-sets:
-
name: plimit
type: u32
+ doc: Limit of total number of packets in queue
-
name: flow-plimit
type: u32
+ doc: Limit of packets per flow
-
name: quantum
type: u32
+ doc: RR quantum
-
name: initial-quantum
type: u32
+ doc: RR quantum for new flow
-
name: rate-enable
type: u32
+ doc: Enable / disable rate limiting
-
name: flow-default-rate
type: u32
+ doc: Obsolete, do not use
-
name: flow-max-rate
type: u32
+ doc: Per flow max rate
-
name: buckets-log
type: u32
+ doc: log2(number of buckets)
-
name: flow-refill-delay
type: u32
+ doc: Flow credit refill delay in usec
-
name: orphan-mask
type: u32
+ doc: Mask applied to orphaned skb hashes
-
name: low-rate-threshold
type: u32
+ doc: Per packet delay under this rate
-
name: ce-threshold
type: u32
+ doc: DCTCP-like CE marking threshold
-
name: timer-slack
type: u32
-
name: horizon
type: u32
+ doc: Time horizon in usec
-
name: horizon-drop
type: u8
+ doc: Drop packets beyond horizon, or cap their EDT
+ -
+ name: priomap
+ type: binary
+ struct: tc-prio-qopt
+ -
+ name: weights
+ type: binary
+ sub-type: s32
+ doc: Weights for each band
-
name: tc-fq-codel-attrs
attributes:
@@ -1427,6 +3147,7 @@ attribute-sets:
-
name: corr
type: binary
+ struct: tc-netem-corr
-
name: delay-dist
type: binary
@@ -1434,15 +3155,19 @@ attribute-sets:
-
name: reorder
type: binary
+ struct: tc-netem-reorder
-
name: corrupt
type: binary
+ struct: tc-netem-corrupt
-
name: loss
- type: binary
+ type: nest
+ nested-attributes: tc-netem-loss-attrs
-
name: rate
type: binary
+ struct: tc-netem-rate
-
name: ecn
type: u32
@@ -1461,10 +3186,27 @@ attribute-sets:
-
name: slot
type: binary
+ struct: tc-netem-slot
-
name: slot-dist
type: binary
sub-type: s16
+ -
+ name: prng-seed
+ type: u64
+ -
+ name: tc-netem-loss-attrs
+ attributes:
+ -
+ name: gi
+ type: binary
+ doc: General Intuitive - 4 state model
+ struct: tc-netem-gimodel
+ -
+ name: ge
+ type: binary
+ doc: Gilbert Elliot models
+ struct: tc-netem-gemodel
-
name: tc-pie-attrs
attributes:
@@ -1493,6 +3235,44 @@ attribute-sets:
name: dq-rate-estimator
type: u32
-
+ name: tc-police-attrs
+ attributes:
+ -
+ name: tbf
+ type: binary
+ struct: tc-police
+ -
+ name: rate
+ type: binary
+ -
+ name: peakrate
+ type: binary
+ -
+ name: avrate
+ type: u32
+ -
+ name: result
+ type: u32
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: pad
+ type: pad
+ -
+ name: rate64
+ type: u64
+ -
+ name: peakrate64
+ type: u64
+ -
+ name: pktrate64
+ type: u64
+ -
+ name: pktburst64
+ type: u64
+ -
name: tc-qfq-attrs
attributes:
-
@@ -1516,7 +3296,7 @@ attribute-sets:
type: u32
-
name: flags
- type: binary
+ type: bitfield32
-
name: early-drop-block
type: u32
@@ -1524,6 +3304,29 @@ attribute-sets:
name: mark-block
type: u32
-
+ name: tc-route-attrs
+ attributes:
+ -
+ name: classid
+ type: u32
+ -
+ name: to
+ type: u32
+ -
+ name: from
+ type: u32
+ -
+ name: iif
+ type: u32
+ -
+ name: police
+ type: nest
+ nested-attributes: tc-police-attrs
+ -
+ name: act
+ type: array-nest
+ nested-attributes: tc-act-attrs
+ -
name: tc-taprio-attrs
attributes:
-
@@ -1573,6 +3376,7 @@ attribute-sets:
name: entry
type: nest
nested-attributes: tc-taprio-sched-entry
+ multi-attr: true
-
name: tc-taprio-sched-entry
attributes:
@@ -1629,17 +3433,43 @@ attribute-sets:
name: pad
type: pad
-
- name: tca-gact-attrs
+ name: tc-act-sample-attrs
attributes:
-
name: tm
type: binary
+ struct: tcf-t
-
name: parms
type: binary
+ struct: tc-gen
+ -
+ name: rate
+ type: u32
+ -
+ name: trunc-size
+ type: u32
+ -
+ name: psample-group
+ type: u32
+ -
+ name: pad
+ type: pad
+ -
+ name: tc-act-gact-attrs
+ attributes:
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: parms
+ type: binary
+ struct: tc-gen
-
name: prob
type: binary
+ struct: tc-gact-p
-
name: pad
type: pad
@@ -1659,35 +3489,90 @@ attribute-sets:
-
name: basic
type: binary
+ struct: gnet-stats-basic
-
name: rate-est
type: binary
+ struct: gnet-stats-rate-est
-
name: queue
type: binary
+ struct: gnet-stats-queue
-
name: app
- type: binary # TODO sub-message needs 2+ level deep lookup
+ type: sub-message
sub-message: tca-stats-app-msg
selector: kind
-
name: rate-est64
type: binary
+ struct: gnet-stats-rate-est64
-
name: pad
type: pad
-
name: basic-hw
type: binary
+ struct: gnet-stats-basic
-
name: pkt64
+ type: u64
+ -
+ name: tc-u32-attrs
+ attributes:
+ -
+ name: classid
+ type: u32
+ -
+ name: hash
+ type: u32
+ -
+ name: link
+ type: u32
+ -
+ name: divisor
+ type: u32
+ -
+ name: sel
type: binary
+ struct: tc-u32-sel
+ -
+ name: police
+ type: nest
+ nested-attributes: tc-police-attrs
+ -
+ name: act
+ type: array-nest
+ nested-attributes: tc-act-attrs
+ -
+ name: indev
+ type: string
+ -
+ name: pcnt
+ type: binary
+ struct: tc-u32-pcnt
+ -
+ name: mark
+ type: binary
+ struct: tc-u32-mark
+ -
+ name: flags
+ type: u32
+ -
+ name: pad
+ type: pad
sub-messages:
-
name: tc-options-msg
formats:
-
+ value: basic
+ attribute-set: tc-basic-attrs
+ -
+ value: bpf
+ attribute-set: tc-bpf-attrs
+ -
value: bfifo
fixed-header: tc-fifo-qopt
-
@@ -1697,6 +3582,9 @@ sub-messages:
value: cbs
attribute-set: tc-cbs-attrs
-
+ value: cgroup
+ attribute-set: tc-cgroup-attrs
+ -
value: choke
attribute-set: tc-choke-attrs
-
@@ -1714,6 +3602,12 @@ sub-messages:
value: ets
attribute-set: tc-ets-attrs
-
+ value: flow
+ attribute-set: tc-flow-attrs
+ -
+ value: flower
+ attribute-set: tc-flower-attrs
+ -
value: fq
attribute-set: tc-fq-attrs
-
@@ -1723,8 +3617,8 @@ sub-messages:
value: fq_pie
attribute-set: tc-fq-pie-attrs
-
- value: flower
- attribute-set: tc-flower-attrs
+ value: fw
+ attribute-set: tc-fw-attrs
-
value: gred
attribute-set: tc-gred-attrs
@@ -1740,6 +3634,9 @@ sub-messages:
-
value: ingress # no content
-
+ value: matchall
+ attribute-set: tc-matchall-attrs
+ -
value: mq # no content
-
value: mqprio
@@ -1776,6 +3673,9 @@ sub-messages:
value: red
attribute-set: tc-red-attrs
-
+ value: route
+ attribute-set: tc-route-attrs
+ -
value: sfb
fixed-header: tc-sfb-qopt
-
@@ -1787,88 +3687,105 @@ sub-messages:
-
value: tbf
attribute-set: tc-tbf-attrs
+ -
+ value: u32
+ attribute-set: tc-u32-attrs
-
name: tc-act-options-msg
formats:
-
- value: gact
- attribute-set: tca-gact-attrs
- -
- name: tca-stats-app-msg
- formats:
+ value: bpf
+ attribute-set: tc-act-bpf-attrs
-
- value: bfifo
+ value: connmark
+ attribute-set: tc-act-connmark-attrs
-
- value: blackhole
+ value: csum
+ attribute-set: tc-act-csum-attrs
-
- value: cake
- attribute-set: tc-cake-stats-attrs
+ value: ct
+ attribute-set: tc-act-ct-attrs
-
- value: cbs
+ value: ctinfo
+ attribute-set: tc-act-ctinfo-attrs
-
- value: choke
+ value: gact
+ attribute-set: tc-act-gact-attrs
-
- value: clsact
+ value: gate
+ attribute-set: tc-act-gate-attrs
-
- value: codel
+ value: ife
+ attribute-set: tc-act-ife-attrs
-
- value: drr
+ value: mirred
+ attribute-set: tc-act-mirred-attrs
-
- value: etf
+ value: mpls
+ attribute-set: tc-act-mpls-attrs
-
- value: ets
- -
- value: fq
- -
- value: fq_codel
+ value: nat
+ attribute-set: tc-act-nat-attrs
-
- value: fq_pie
+ value: pedit
+ attribute-set: tc-act-pedit-attrs
-
- value: flower
+ value: police
+ attribute-set: tc-act-police-attrs
-
- value: gred
+ value: sample
+ attribute-set: tc-act-sample-attrs
-
- value: hfsc
+ value: simple
+ attribute-set: tc-act-simple-attrs
-
- value: hhf
+ value: skbedit
+ attribute-set: tc-act-skbedit-attrs
-
- value: htb
+ value: skbmod
+ attribute-set: tc-act-skbmod-attrs
-
- value: ingress
+ value: tunnel_key
+ attribute-set: tc-act-tunnel-key-attrs
-
- value: mq
+ value: vlan
+ attribute-set: tc-act-vlan-attrs
+ -
+ name: tca-stats-app-msg
+ formats:
-
- value: mqprio
+ value: cake
+ attribute-set: tc-cake-stats-attrs
-
- value: multiq
+ value: choke
+ fixed-header: tc-choke-xstats
-
- value: netem
+ value: codel
+ fixed-header: tc-codel-xstats
-
- value: noqueue
+ value: fq
+ fixed-header: tc-fq-qd-stats
-
- value: pfifo
+ value: fq_codel
+ fixed-header: tc-fq-codel-xstats
-
- value: pfifo_fast
+ value: fq_pie
+ fixed-header: tc-fq-pie-xstats
-
- value: pfifo_head_drop
+ value: hhf
+ fixed-header: tc-hhf-xstats
-
value: pie
- -
- value: plug
- -
- value: prio
- -
- value: qfq
+ fixed-header: tc-pie-xstats
-
value: red
+ fixed-header: tc-red-xstats
-
value: sfb
+ fixed-header: tc-sfb-xstats
-
value: sfq
- -
- value: taprio
- -
- value: tbf
+ fixed-header: tc-sfq-xstats
operations:
enum-model: directional
diff --git a/Documentation/networking/af_xdp.rst b/Documentation/networking/af_xdp.rst
index dceeb0d763aa..72da7057e4cf 100644
--- a/Documentation/networking/af_xdp.rst
+++ b/Documentation/networking/af_xdp.rst
@@ -329,23 +329,24 @@ XDP_SHARED_UMEM option and provide the initial socket's fd in the
sxdp_shared_umem_fd field as you registered the UMEM on that
socket. These two sockets will now share one and the same UMEM.
-There is no need to supply an XDP program like the one in the previous
-case where sockets were bound to the same queue id and
-device. Instead, use the NIC's packet steering capabilities to steer
-the packets to the right queue. In the previous example, there is only
-one queue shared among sockets, so the NIC cannot do this steering. It
-can only steer between queues.
-
-In libbpf, you need to use the xsk_socket__create_shared() API as it
-takes a reference to a FILL ring and a COMPLETION ring that will be
-created for you and bound to the shared UMEM. You can use this
-function for all the sockets you create, or you can use it for the
-second and following ones and use xsk_socket__create() for the first
-one. Both methods yield the same result.
+In this case, it is possible to use the NIC's packet steering
+capabilities to steer the packets to the right queue. This is not
+possible in the previous example as there is only one queue shared
+among sockets, so the NIC cannot do this steering as it can only steer
+between queues.
+
+In libxdp (or libbpf prior to version 1.0), you need to use the
+xsk_socket__create_shared() API as it takes a reference to a FILL ring
+and a COMPLETION ring that will be created for you and bound to the
+shared UMEM. You can use this function for all the sockets you create,
+or you can use it for the second and following ones and use
+xsk_socket__create() for the first one. Both methods yield the same
+result.
Note that a UMEM can be shared between sockets on the same queue id
and device, as well as between queues on the same device and between
-devices at the same time.
+devices at the same time. It is also possible to redirect to any
+socket as long as it is bound to the same umem with XDP_SHARED_UMEM.
XDP_USE_NEED_WAKEUP bind flag
-----------------------------
@@ -822,6 +823,10 @@ A: The short answer is no, that is not supported at the moment. The
switch, or other distribution mechanism, in your NIC to direct
traffic to the correct queue id and socket.
+ Note that if you are using the XDP_SHARED_UMEM option, it is
+ possible to switch traffic between any socket bound to the same
+ umem.
+
Q: My packets are sometimes corrupted. What is wrong?
A: Care has to be taken not to feed the same buffer in the UMEM into
diff --git a/Documentation/networking/bonding.rst b/Documentation/networking/bonding.rst
index f7a73421eb76..e774b48de9f5 100644
--- a/Documentation/networking/bonding.rst
+++ b/Documentation/networking/bonding.rst
@@ -444,6 +444,18 @@ arp_missed_max
The default value is 2, and the allowable range is 1 - 255.
+coupled_control
+
+ Specifies whether the LACP state machine's MUX in the 802.3ad mode
+ should have separate Collecting and Distributing states.
+
+ This is by implementing the independent control state machine per
+ IEEE 802.1AX-2008 5.4.15 in addition to the existing coupled control
+ state machine.
+
+ The default value is 1. This setting does not separate the Collecting
+ and Distributing states, maintaining the bond in coupled control.
+
downdelay
Specifies the time, in milliseconds, to wait before disabling
diff --git a/Documentation/networking/can.rst b/Documentation/networking/can.rst
index d7e1ada905b2..62519d38c58b 100644
--- a/Documentation/networking/can.rst
+++ b/Documentation/networking/can.rst
@@ -444,6 +444,24 @@ definitions are specified for CAN specific MTUs in include/linux/can.h:
#define CANFD_MTU (sizeof(struct canfd_frame)) == 72 => CAN FD frame
+Returned Message Flags
+----------------------
+
+When using the system call recvmsg(2) on a RAW or a BCM socket, the
+msg->msg_flags field may contain the following flags:
+
+MSG_DONTROUTE:
+ set when the received frame was created on the local host.
+
+MSG_CONFIRM:
+ set when the frame was sent via the socket it is received on.
+ This flag can be interpreted as a 'transmission confirmation' when the
+ CAN driver supports the echo of frames on driver level, see
+ :ref:`socketcan-local-loopback1` and :ref:`socketcan-local-loopback2`.
+ (Note: In order to receive such messages on a RAW socket,
+ CAN_RAW_RECV_OWN_MSGS must be set.)
+
+
.. _socketcan-raw-sockets:
RAW Protocol Sockets with can_filters (SOCK_RAW)
@@ -693,22 +711,6 @@ where the CAN_INV_FILTER flag is set in order to notch single CAN IDs or
CAN ID ranges from the incoming traffic.
-RAW Socket Returned Message Flags
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-When using recvmsg() call, the msg->msg_flags may contain following flags:
-
-MSG_DONTROUTE:
- set when the received frame was created on the local host.
-
-MSG_CONFIRM:
- set when the frame was sent via the socket it is received on.
- This flag can be interpreted as a 'transmission confirmation' when the
- CAN driver supports the echo of frames on driver level, see
- :ref:`socketcan-local-loopback1` and :ref:`socketcan-local-loopback2`.
- In order to receive such messages, CAN_RAW_RECV_OWN_MSGS must be set.
-
-
Broadcast Manager Protocol Sockets (SOCK_DGRAM)
-----------------------------------------------
diff --git a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
index b842bcb14255..a4c7d0c65fd7 100644
--- a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
+++ b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
@@ -211,10 +211,16 @@ Documentation/networking/net_dim.rst
RX copybreak
============
+
The rx_copybreak is initialized by default to ENA_DEFAULT_RX_COPYBREAK
and can be configured by the ETHTOOL_STUNABLE command of the
SIOCETHTOOL ioctl.
+This option controls the maximum packet length for which the RX
+descriptor it was received on would be recycled. When a packet smaller
+than RX copybreak bytes is received, it is copied into a new memory
+buffer and the RX descriptor is returned to HW.
+
Statistics
==========
diff --git a/Documentation/networking/device_drivers/ethernet/index.rst b/Documentation/networking/device_drivers/ethernet/index.rst
index 43de285b8a92..6932d8c043c2 100644
--- a/Documentation/networking/device_drivers/ethernet/index.rst
+++ b/Documentation/networking/device_drivers/ethernet/index.rst
@@ -42,6 +42,7 @@ Contents:
intel/ice
marvell/octeontx2
marvell/octeon_ep
+ marvell/octeon_ep_vf
mellanox/mlx5/index
microsoft/netvsc
neterion/s2io
diff --git a/Documentation/networking/device_drivers/ethernet/intel/ice.rst b/Documentation/networking/device_drivers/ethernet/intel/ice.rst
index 5038e54586af..934752f675ba 100644
--- a/Documentation/networking/device_drivers/ethernet/intel/ice.rst
+++ b/Documentation/networking/device_drivers/ethernet/intel/ice.rst
@@ -368,15 +368,28 @@ more options for Receive Side Scaling (RSS) hash byte configuration.
# ethtool -N <ethX> rx-flow-hash <type> <option>
Where <type> is:
- tcp4 signifying TCP over IPv4
- udp4 signifying UDP over IPv4
- tcp6 signifying TCP over IPv6
- udp6 signifying UDP over IPv6
+ tcp4 signifying TCP over IPv4
+ udp4 signifying UDP over IPv4
+ gtpc4 signifying GTP-C over IPv4
+ gtpc4t signifying GTP-C (include TEID) over IPv4
+ gtpu4 signifying GTP-U over IPV4
+ gtpu4e signifying GTP-U and Extension Header over IPV4
+ gtpu4u signifying GTP-U PSC Uplink over IPV4
+ gtpu4d signifying GTP-U PSC Downlink over IPV4
+ tcp6 signifying TCP over IPv6
+ udp6 signifying UDP over IPv6
+ gtpc6 signifying GTP-C over IPv6
+ gtpc6t signifying GTP-C (include TEID) over IPv6
+ gtpu6 signifying GTP-U over IPV6
+ gtpu6e signifying GTP-U and Extension Header over IPV6
+ gtpu6u signifying GTP-U PSC Uplink over IPV6
+ gtpu6d signifying GTP-U PSC Downlink over IPV6
And <option> is one or more of:
s Hash on the IP source address of the Rx packet.
d Hash on the IP destination address of the Rx packet.
f Hash on bytes 0 and 1 of the Layer 4 header of the Rx packet.
n Hash on bytes 2 and 3 of the Layer 4 header of the Rx packet.
+ e Hash on GTP Packet on TEID (4bytes) of the Rx packet.
Accelerated Receive Flow Steering (aRFS)
diff --git a/Documentation/networking/device_drivers/ethernet/marvell/octeon_ep_vf.rst b/Documentation/networking/device_drivers/ethernet/marvell/octeon_ep_vf.rst
new file mode 100644
index 000000000000..603133d0b92f
--- /dev/null
+++ b/Documentation/networking/device_drivers/ethernet/marvell/octeon_ep_vf.rst
@@ -0,0 +1,24 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+=======================================================================
+Linux kernel networking driver for Marvell's Octeon PCI Endpoint NIC VF
+=======================================================================
+
+Network driver for Marvell's Octeon PCI EndPoint NIC VF.
+Copyright (c) 2020 Marvell International Ltd.
+
+Overview
+========
+This driver implements networking functionality of Marvell's Octeon PCI
+EndPoint NIC VF.
+
+Supported Devices
+=================
+Currently, this driver support following devices:
+ * Network controller: Cavium, Inc. Device b203
+ * Network controller: Cavium, Inc. Device b403
+ * Network controller: Cavium, Inc. Device b103
+ * Network controller: Cavium, Inc. Device b903
+ * Network controller: Cavium, Inc. Device ba03
+ * Network controller: Cavium, Inc. Device bc03
+ * Network controller: Cavium, Inc. Device bd03
diff --git a/Documentation/networking/device_drivers/wwan/t7xx.rst b/Documentation/networking/device_drivers/wwan/t7xx.rst
index dd5b731957ca..f346f5f85f15 100644
--- a/Documentation/networking/device_drivers/wwan/t7xx.rst
+++ b/Documentation/networking/device_drivers/wwan/t7xx.rst
@@ -39,6 +39,34 @@ command and receive response:
- open the AT control channel using a UART tool or a special user tool
+Sysfs
+=====
+The driver provides sysfs interfaces to userspace.
+
+t7xx_mode
+---------
+The sysfs interface provides userspace with access to the device mode, this interface
+supports read and write operations.
+
+Device mode:
+
+- ``unknown`` represents that device in unknown status
+- ``ready`` represents that device in ready status
+- ``reset`` represents that device in reset status
+- ``fastboot_switching`` represents that device in fastboot switching status
+- ``fastboot_download`` represents that device in fastboot download status
+- ``fastboot_dump`` represents that device in fastboot dump status
+
+Read from userspace to get the current device mode.
+
+::
+ $ cat /sys/bus/pci/devices/${bdf}/t7xx_mode
+
+Write from userspace to set the device mode.
+
+::
+ $ echo fastboot_switching > /sys/bus/pci/devices/${bdf}/t7xx_mode
+
Management application development
==================================
The driver and userspace interfaces are described below. The MBIM protocol is
@@ -97,6 +125,20 @@ The driver exposes an AT port by implementing AT WWAN Port.
The userspace end of the control port is a /dev/wwan0at0 character
device. Application shall use this interface to issue AT commands.
+fastboot port userspace ABI
+---------------------------
+
+/dev/wwan0fastboot0 character device
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The driver exposes a fastboot protocol interface by implementing
+fastboot WWAN Port. The userspace end of the fastboot channel pipe is a
+/dev/wwan0fastboot0 character device. Application shall use this interface for
+fastboot protocol communication.
+
+Please note that driver needs to be reloaded to export /dev/wwan0fastboot0
+port, because device needs a cold reset after enter ``fastboot_switching``
+mode.
+
The MediaTek's T700 modem supports the 3GPP TS 27.007 [4] specification.
References
@@ -118,3 +160,7 @@ speak the Mobile Interface Broadband Model (MBIM) protocol"*
[4] *Specification # 27.007 - 3GPP*
- https://www.3gpp.org/DynaReport/27007.htm
+
+[5] *fastboot "a mechanism for communicating with bootloaders"*
+
+- https://android.googlesource.com/platform/system/core/+/refs/heads/main/fastboot/README.md
diff --git a/Documentation/networking/devlink/mlx5.rst b/Documentation/networking/devlink/mlx5.rst
index 702f204a3dbd..456985407475 100644
--- a/Documentation/networking/devlink/mlx5.rst
+++ b/Documentation/networking/devlink/mlx5.rst
@@ -97,6 +97,10 @@ parameters.
When metadata is disabled, the above use cases will fail to initialize if
users try to enable them.
+
+ Note: Setting this parameter does not take effect immediately. Setting
+ must happen in legacy mode and eswitch port metadata takes effect after
+ enabling switchdev mode.
* - ``hairpin_num_queues``
- u32
- driverinit
@@ -246,7 +250,7 @@ them in realtime.
Description of the vnic counters:
-- total_q_under_processor_handle
+- total_error_queues
number of queues in an error state due to
an async error or errored command.
- send_queue_priority_update_flow
@@ -255,7 +259,8 @@ Description of the vnic counters:
number of times CQ entered an error state due to an overflow.
- async_eq_overrun
number of times an EQ mapped to async events was overrun.
- comp_eq_overrun number of times an EQ mapped to completion events was
+- comp_eq_overrun
+ number of times an EQ mapped to completion events was
overrun.
- quota_exceeded_command
number of commands issued and failed due to quota exceeded.
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index 69f3d6dcd9fd..473d72c36d61 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -74,6 +74,7 @@ Contents:
mpls-sysctl
mptcp-sysctl
multiqueue
+ multi-pf-netdev
napi
net_cachelines/index
netconsole
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index 7afff42612e9..bd50df6a5a42 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -2503,7 +2503,7 @@ use_tempaddr - INTEGER
temp_valid_lft - INTEGER
valid lifetime (in seconds) for temporary addresses. If less than the
- minimum required lifetime (typically 5 seconds), temporary addresses
+ minimum required lifetime (typically 5-7 seconds), temporary addresses
will not be created.
Default: 172800 (2 days)
@@ -2511,7 +2511,7 @@ temp_valid_lft - INTEGER
temp_prefered_lft - INTEGER
Preferred lifetime (in seconds) for temporary addresses. If
temp_prefered_lft is less than the minimum required lifetime (typically
- 5 seconds), temporary addresses will not be created. If
+ 5-7 seconds), the preferred lifetime is the minimum required. If
temp_prefered_lft is greater than temp_valid_lft, the preferred lifetime
is temp_valid_lft.
@@ -2535,6 +2535,16 @@ max_desync_factor - INTEGER
Default: 600
+regen_min_advance - INTEGER
+ How far in advance (in seconds), at minimum, to create a new temporary
+ address before the current one is deprecated. This value is added to
+ the amount of time that may be required for duplicate address detection
+ to determine when to create a new address. Linux permits setting this
+ value to less than the default of 2 seconds, but a value less than 2
+ does not conform to RFC 8981.
+
+ Default: 2
+
regen_max_retry - INTEGER
Number of attempts before give up attempting to generate
valid temporary addresses.
diff --git a/Documentation/networking/l2tp.rst b/Documentation/networking/l2tp.rst
index 7f383e99dbad..8496b467dea4 100644
--- a/Documentation/networking/l2tp.rst
+++ b/Documentation/networking/l2tp.rst
@@ -386,12 +386,19 @@ Sample userspace code:
- Create session PPPoX data socket::
+ /* Input: the L2TP tunnel UDP socket `tunnel_fd`, which needs to be
+ * bound already (both sockname and peername), otherwise it will not be
+ * ready.
+ */
+
struct sockaddr_pppol2tp sax;
- int fd;
+ int session_fd;
+ int ret;
+
+ session_fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP);
+ if (session_fd < 0)
+ return -errno;
- /* Note, the tunnel socket must be bound already, else it
- * will not be ready
- */
sax.sa_family = AF_PPPOX;
sax.sa_protocol = PX_PROTO_OL2TP;
sax.pppol2tp.fd = tunnel_fd;
@@ -406,12 +413,128 @@ Sample userspace code:
/* session_fd is the fd of the session's PPPoL2TP socket.
* tunnel_fd is the fd of the tunnel UDP / L2TPIP socket.
*/
- fd = connect(session_fd, (struct sockaddr *)&sax, sizeof(sax));
- if (fd < 0 ) {
+ ret = connect(session_fd, (struct sockaddr *)&sax, sizeof(sax));
+ if (ret < 0 ) {
+ close(session_fd);
+ return -errno;
+ }
+
+ return session_fd;
+
+L2TP control packets will still be available for read on `tunnel_fd`.
+
+ - Create PPP channel::
+
+ /* Input: the session PPPoX data socket `session_fd` which was created
+ * as described above.
+ */
+
+ int ppp_chan_fd;
+ int chindx;
+ int ret;
+
+ ret = ioctl(session_fd, PPPIOCGCHAN, &chindx);
+ if (ret < 0)
+ return -errno;
+
+ ppp_chan_fd = open("/dev/ppp", O_RDWR);
+ if (ppp_chan_fd < 0)
+ return -errno;
+
+ ret = ioctl(ppp_chan_fd, PPPIOCATTCHAN, &chindx);
+ if (ret < 0) {
+ close(ppp_chan_fd);
return -errno;
}
+
+ return ppp_chan_fd;
+
+LCP PPP frames will be available for read on `ppp_chan_fd`.
+
+ - Create PPP interface::
+
+ /* Input: the PPP channel `ppp_chan_fd` which was created as described
+ * above.
+ */
+
+ int ifunit = -1;
+ int ppp_if_fd;
+ int ret;
+
+ ppp_if_fd = open("/dev/ppp", O_RDWR);
+ if (ppp_if_fd < 0)
+ return -errno;
+
+ ret = ioctl(ppp_if_fd, PPPIOCNEWUNIT, &ifunit);
+ if (ret < 0) {
+ close(ppp_if_fd);
+ return -errno;
+ }
+
+ ret = ioctl(ppp_chan_fd, PPPIOCCONNECT, &ifunit);
+ if (ret < 0) {
+ close(ppp_if_fd);
+ return -errno;
+ }
+
+ return ppp_if_fd;
+
+IPCP/IPv6CP PPP frames will be available for read on `ppp_if_fd`.
+
+The ppp<ifunit> interface can then be configured as usual with netlink's
+RTM_NEWLINK, RTM_NEWADDR, RTM_NEWROUTE, or ioctl's SIOCSIFMTU, SIOCSIFADDR,
+SIOCSIFDSTADDR, SIOCSIFNETMASK, SIOCSIFFLAGS, or with the `ip` command.
+
+ - Bridging L2TP sessions which have PPP pseudowire types (this is also called
+ L2TP tunnel switching or L2TP multihop) is supported by bridging the PPP
+ channels of the two L2TP sessions to be bridged::
+
+ /* Input: the session PPPoX data sockets `session_fd1` and `session_fd2`
+ * which were created as described further above.
+ */
+
+ int ppp_chan_fd;
+ int chindx1;
+ int chindx2;
+ int ret;
+
+ ret = ioctl(session_fd1, PPPIOCGCHAN, &chindx1);
+ if (ret < 0)
+ return -errno;
+
+ ret = ioctl(session_fd2, PPPIOCGCHAN, &chindx2);
+ if (ret < 0)
+ return -errno;
+
+ ppp_chan_fd = open("/dev/ppp", O_RDWR);
+ if (ppp_chan_fd < 0)
+ return -errno;
+
+ ret = ioctl(ppp_chan_fd, PPPIOCATTCHAN, &chindx1);
+ if (ret < 0) {
+ close(ppp_chan_fd);
+ return -errno;
+ }
+
+ ret = ioctl(ppp_chan_fd, PPPIOCBRIDGECHAN, &chindx2);
+ close(ppp_chan_fd);
+ if (ret < 0)
+ return -errno;
+
return 0;
+It can be noted that when bridging PPP channels, the PPP session is not locally
+terminated, and no local PPP interface is created. PPP frames arriving on one
+channel are directly passed to the other channel, and vice versa.
+
+The PPP channel does not need to be kept open. Only the session PPPoX data
+sockets need to be kept open.
+
+More generally, it is also possible in the same way to e.g. bridge a PPPoL2TP
+PPP channel with other types of PPP channels, such as PPPoE.
+
+See more details for the PPP side in ppp_generic.rst.
+
Old L2TPv2-only API
-------------------
diff --git a/Documentation/networking/multi-pf-netdev.rst b/Documentation/networking/multi-pf-netdev.rst
new file mode 100644
index 000000000000..be8e4bcadf11
--- /dev/null
+++ b/Documentation/networking/multi-pf-netdev.rst
@@ -0,0 +1,174 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+===============
+Multi-PF Netdev
+===============
+
+Contents
+========
+
+- `Background`_
+- `Overview`_
+- `mlx5 implementation`_
+- `Channels distribution`_
+- `Observability`_
+- `Steering`_
+- `Mutually exclusive features`_
+
+Background
+==========
+
+The Multi-PF NIC technology enables several CPUs within a multi-socket server to connect directly to
+the network, each through its own dedicated PCIe interface. Through either a connection harness that
+splits the PCIe lanes between two cards or by bifurcating a PCIe slot for a single card. This
+results in eliminating the network traffic traversing over the internal bus between the sockets,
+significantly reducing overhead and latency, in addition to reducing CPU utilization and increasing
+network throughput.
+
+Overview
+========
+
+The feature adds support for combining multiple PFs of the same port in a Multi-PF environment under
+one netdev instance. It is implemented in the netdev layer. Lower-layer instances like pci func,
+sysfs entry, and devlink are kept separate.
+Passing traffic through different devices belonging to different NUMA sockets saves cross-NUMA
+traffic and allows apps running on the same netdev from different NUMAs to still feel a sense of
+proximity to the device and achieve improved performance.
+
+mlx5 implementation
+===================
+
+Multi-PF or Socket-direct in mlx5 is achieved by grouping PFs together which belong to the same
+NIC and has the socket-direct property enabled, once all PFs are probed, we create a single netdev
+to represent all of them, symmetrically, we destroy the netdev whenever any of the PFs is removed.
+
+The netdev network channels are distributed between all devices, a proper configuration would utilize
+the correct close NUMA node when working on a certain app/CPU.
+
+We pick one PF to be a primary (leader), and it fills a special role. The other devices
+(secondaries) are disconnected from the network at the chip level (set to silent mode). In silent
+mode, no south <-> north traffic flowing directly through a secondary PF. It needs the assistance of
+the leader PF (east <-> west traffic) to function. All Rx/Tx traffic is steered through the primary
+to/from the secondaries.
+
+Currently, we limit the support to PFs only, and up to two PFs (sockets).
+
+Channels distribution
+=====================
+
+We distribute the channels between the different PFs to achieve local NUMA node performance
+on multiple NUMA nodes.
+
+Each combined channel works against one specific PF, creating all its datapath queues against it. We
+distribute channels to PFs in a round-robin policy.
+
+::
+
+ Example for 2 PFs and 5 channels:
+ +--------+--------+
+ | ch idx | PF idx |
+ +--------+--------+
+ | 0 | 0 |
+ | 1 | 1 |
+ | 2 | 0 |
+ | 3 | 1 |
+ | 4 | 0 |
+ +--------+--------+
+
+
+The reason we prefer round-robin is, it is less influenced by changes in the number of channels. The
+mapping between a channel index and a PF is fixed, no matter how many channels the user configures.
+As the channel stats are persistent across channel's closure, changing the mapping every single time
+would turn the accumulative stats less representing of the channel's history.
+
+This is achieved by using the correct core device instance (mdev) in each channel, instead of them
+all using the same instance under "priv->mdev".
+
+Observability
+=============
+The relation between PF, irq, napi, and queue can be observed via netlink spec:
+
+$ ./tools/net/ynl/cli.py --spec Documentation/netlink/specs/netdev.yaml --dump queue-get --json='{"ifindex": 13}'
+[{'id': 0, 'ifindex': 13, 'napi-id': 539, 'type': 'rx'},
+ {'id': 1, 'ifindex': 13, 'napi-id': 540, 'type': 'rx'},
+ {'id': 2, 'ifindex': 13, 'napi-id': 541, 'type': 'rx'},
+ {'id': 3, 'ifindex': 13, 'napi-id': 542, 'type': 'rx'},
+ {'id': 4, 'ifindex': 13, 'napi-id': 543, 'type': 'rx'},
+ {'id': 0, 'ifindex': 13, 'napi-id': 539, 'type': 'tx'},
+ {'id': 1, 'ifindex': 13, 'napi-id': 540, 'type': 'tx'},
+ {'id': 2, 'ifindex': 13, 'napi-id': 541, 'type': 'tx'},
+ {'id': 3, 'ifindex': 13, 'napi-id': 542, 'type': 'tx'},
+ {'id': 4, 'ifindex': 13, 'napi-id': 543, 'type': 'tx'}]
+
+$ ./tools/net/ynl/cli.py --spec Documentation/netlink/specs/netdev.yaml --dump napi-get --json='{"ifindex": 13}'
+[{'id': 543, 'ifindex': 13, 'irq': 42},
+ {'id': 542, 'ifindex': 13, 'irq': 41},
+ {'id': 541, 'ifindex': 13, 'irq': 40},
+ {'id': 540, 'ifindex': 13, 'irq': 39},
+ {'id': 539, 'ifindex': 13, 'irq': 36}]
+
+Here you can clearly observe our channels distribution policy:
+
+$ ls /proc/irq/{36,39,40,41,42}/mlx5* -d -1
+/proc/irq/36/mlx5_comp1@pci:0000:08:00.0
+/proc/irq/39/mlx5_comp1@pci:0000:09:00.0
+/proc/irq/40/mlx5_comp2@pci:0000:08:00.0
+/proc/irq/41/mlx5_comp2@pci:0000:09:00.0
+/proc/irq/42/mlx5_comp3@pci:0000:08:00.0
+
+Steering
+========
+Secondary PFs are set to "silent" mode, meaning they are disconnected from the network.
+
+In Rx, the steering tables belong to the primary PF only, and it is its role to distribute incoming
+traffic to other PFs, via cross-vhca steering capabilities. Still maintain a single default RSS table,
+that is capable of pointing to the receive queues of a different PF.
+
+In Tx, the primary PF creates a new Tx flow table, which is aliased by the secondaries, so they can
+go out to the network through it.
+
+In addition, we set default XPS configuration that, based on the CPU, selects an SQ belonging to the
+PF on the same node as the CPU.
+
+XPS default config example:
+
+NUMA node(s): 2
+NUMA node0 CPU(s): 0-11
+NUMA node1 CPU(s): 12-23
+
+PF0 on node0, PF1 on node1.
+
+- /sys/class/net/eth2/queues/tx-0/xps_cpus:000001
+- /sys/class/net/eth2/queues/tx-1/xps_cpus:001000
+- /sys/class/net/eth2/queues/tx-2/xps_cpus:000002
+- /sys/class/net/eth2/queues/tx-3/xps_cpus:002000
+- /sys/class/net/eth2/queues/tx-4/xps_cpus:000004
+- /sys/class/net/eth2/queues/tx-5/xps_cpus:004000
+- /sys/class/net/eth2/queues/tx-6/xps_cpus:000008
+- /sys/class/net/eth2/queues/tx-7/xps_cpus:008000
+- /sys/class/net/eth2/queues/tx-8/xps_cpus:000010
+- /sys/class/net/eth2/queues/tx-9/xps_cpus:010000
+- /sys/class/net/eth2/queues/tx-10/xps_cpus:000020
+- /sys/class/net/eth2/queues/tx-11/xps_cpus:020000
+- /sys/class/net/eth2/queues/tx-12/xps_cpus:000040
+- /sys/class/net/eth2/queues/tx-13/xps_cpus:040000
+- /sys/class/net/eth2/queues/tx-14/xps_cpus:000080
+- /sys/class/net/eth2/queues/tx-15/xps_cpus:080000
+- /sys/class/net/eth2/queues/tx-16/xps_cpus:000100
+- /sys/class/net/eth2/queues/tx-17/xps_cpus:100000
+- /sys/class/net/eth2/queues/tx-18/xps_cpus:000200
+- /sys/class/net/eth2/queues/tx-19/xps_cpus:200000
+- /sys/class/net/eth2/queues/tx-20/xps_cpus:000400
+- /sys/class/net/eth2/queues/tx-21/xps_cpus:400000
+- /sys/class/net/eth2/queues/tx-22/xps_cpus:000800
+- /sys/class/net/eth2/queues/tx-23/xps_cpus:800000
+
+Mutually exclusive features
+===========================
+
+The nature of Multi-PF, where different channels work with different PFs, conflicts with
+stateful features where the state is maintained in one of the PFs.
+For example, in the TLS device-offload feature, special context objects are created per connection
+and maintained in the PF. Transitioning between different RQs/SQs would break the feature. Hence,
+we disable this combination for now.
diff --git a/Documentation/networking/netconsole.rst b/Documentation/networking/netconsole.rst
index 390730a74332..d55c2a22ec7a 100644
--- a/Documentation/networking/netconsole.rst
+++ b/Documentation/networking/netconsole.rst
@@ -15,6 +15,8 @@ Extended console support by Tejun Heo <tj@kernel.org>, May 1 2015
Release prepend support by Breno Leitao <leitao@debian.org>, Jul 7 2023
+Userdata append support by Matthew Wood <thepacketgeek@gmail.com>, Jan 22 2024
+
Please send bug reports to Matt Mackall <mpm@selenic.com>
Satyam Sharma <satyam.sharma@gmail.com>, and Cong Wang <xiyou.wangcong@gmail.com>
@@ -171,6 +173,70 @@ You can modify these targets in runtime by creating the following targets::
cat cmdline1/remote_ip
10.0.0.3
+Append User Data
+----------------
+
+Custom user data can be appended to the end of messages with netconsole
+dynamic configuration enabled. User data entries can be modified without
+changing the "enabled" attribute of a target.
+
+Directories (keys) under `userdata` are limited to 53 character length, and
+data in `userdata/<key>/value` are limited to 200 bytes::
+
+ cd /sys/kernel/config/netconsole && mkdir cmdline0
+ cd cmdline0
+ mkdir userdata/foo
+ echo bar > userdata/foo/value
+ mkdir userdata/qux
+ echo baz > userdata/qux/value
+
+Messages will now include this additional user data::
+
+ echo "This is a message" > /dev/kmsg
+
+Sends::
+
+ 12,607,22085407756,-;This is a message
+ foo=bar
+ qux=baz
+
+Preview the userdata that will be appended with::
+
+ cd /sys/kernel/config/netconsole/cmdline0/userdata
+ for f in `ls userdata`; do echo $f=$(cat userdata/$f/value); done
+
+If a `userdata` entry is created but no data is written to the `value` file,
+the entry will be omitted from netconsole messages::
+
+ cd /sys/kernel/config/netconsole && mkdir cmdline0
+ cd cmdline0
+ mkdir userdata/foo
+ echo bar > userdata/foo/value
+ mkdir userdata/qux
+
+The `qux` key is omitted since it has no value::
+
+ echo "This is a message" > /dev/kmsg
+ 12,607,22085407756,-;This is a message
+ foo=bar
+
+Delete `userdata` entries with `rmdir`::
+
+ rmdir /sys/kernel/config/netconsole/cmdline0/userdata/qux
+
+.. warning::
+ When writing strings to user data values, input is broken up per line in
+ configfs store calls and this can cause confusing behavior::
+
+ mkdir userdata/testing
+ printf "val1\nval2" > userdata/testing/value
+ # userdata store value is called twice, first with "val1\n" then "val2"
+ # so "val2" is stored, being the last value stored
+ cat userdata/testing/value
+ val2
+
+ It is recommended to not write user data values with newlines.
+
Extended console:
=================
diff --git a/Documentation/networking/netdevices.rst b/Documentation/networking/netdevices.rst
index 9e4cccb90b87..c2476917a6c3 100644
--- a/Documentation/networking/netdevices.rst
+++ b/Documentation/networking/netdevices.rst
@@ -252,8 +252,8 @@ ndo_eth_ioctl:
Context: process
ndo_get_stats:
- Synchronization: rtnl_lock() semaphore, dev_base_lock rwlock, or RCU.
- Context: atomic (can't sleep under rwlock or RCU)
+ Synchronization: rtnl_lock() semaphore, or RCU.
+ Context: atomic (can't sleep under RCU)
ndo_start_xmit:
Synchronization: __netif_tx_lock spinlock.
diff --git a/Documentation/networking/sfp-phylink.rst b/Documentation/networking/sfp-phylink.rst
index 8054d33f449f..5bf285d73e8a 100644
--- a/Documentation/networking/sfp-phylink.rst
+++ b/Documentation/networking/sfp-phylink.rst
@@ -231,16 +231,136 @@ this documentation.
For further information on these methods, please see the inline
documentation in :c:type:`struct phylink_mac_ops <phylink_mac_ops>`.
-9. Remove calls to of_parse_phandle() for the PHY,
- of_phy_register_fixed_link() for fixed links etc. from the probe
- function, and replace with:
+9. Fill-in the :c:type:`struct phylink_config <phylink_config>` fields with
+ a reference to the :c:type:`struct device <device>` associated to your
+ :c:type:`struct net_device <net_device>`:
.. code-block:: c
- struct phylink *phylink;
priv->phylink_config.dev = &dev.dev;
priv->phylink_config.type = PHYLINK_NETDEV;
+ Fill-in the various speeds, pause and duplex modes your MAC can handle:
+
+ .. code-block:: c
+
+ priv->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
+
+10. Some Ethernet controllers work in pair with a PCS (Physical Coding Sublayer)
+ block, that can handle among other things the encoding/decoding, link
+ establishment detection and autonegotiation. While some MACs have internal
+ PCS whose operation is transparent, some other require dedicated PCS
+ configuration for the link to become functional. In that case, phylink
+ provides a PCS abstraction through :c:type:`struct phylink_pcs <phylink_pcs>`.
+
+ Identify if your driver has one or more internal PCS blocks, and/or if
+ your controller can use an external PCS block that might be internally
+ connected to your controller.
+
+ If your controller doesn't have any internal PCS, you can go to step 11.
+
+ If your Ethernet controller contains one or several PCS blocks, create
+ one :c:type:`struct phylink_pcs <phylink_pcs>` instance per PCS block within
+ your driver's private data structure:
+
+ .. code-block:: c
+
+ struct phylink_pcs pcs;
+
+ Populate the relevant :c:type:`struct phylink_pcs_ops <phylink_pcs_ops>` to
+ configure your PCS. Create a :c:func:`pcs_get_state` function that reports
+ the inband link state, a :c:func:`pcs_config` function to configure your
+ PCS according to phylink-provided parameters, and a :c:func:`pcs_validate`
+ function that report to phylink all accepted configuration parameters for
+ your PCS:
+
+ .. code-block:: c
+
+ struct phylink_pcs_ops foo_pcs_ops = {
+ .pcs_validate = foo_pcs_validate,
+ .pcs_get_state = foo_pcs_get_state,
+ .pcs_config = foo_pcs_config,
+ };
+
+ Arrange for PCS link state interrupts to be forwarded into
+ phylink, via:
+
+ .. code-block:: c
+
+ phylink_pcs_change(pcs, link_is_up);
+
+ where ``link_is_up`` is true if the link is currently up or false
+ otherwise. If a PCS is unable to provide these interrupts, then
+ it should set ``pcs->pcs_poll = true;`` when creating the PCS.
+
+11. If your controller relies on, or accepts the presence of an external PCS
+ controlled through its own driver, add a pointer to a phylink_pcs instance
+ in your driver private data structure:
+
+ .. code-block:: c
+
+ struct phylink_pcs *pcs;
+
+ The way of getting an instance of the actual PCS depends on the platform,
+ some PCS sit on an MDIO bus and are grabbed by passing a pointer to the
+ corresponding :c:type:`struct mii_bus <mii_bus>` and the PCS's address on
+ that bus. In this example, we assume the controller attaches to a Lynx PCS
+ instance:
+
+ .. code-block:: c
+
+ priv->pcs = lynx_pcs_create_mdiodev(bus, 0);
+
+ Some PCS can be recovered based on firmware information:
+
+ .. code-block:: c
+
+ priv->pcs = lynx_pcs_create_fwnode(of_fwnode_handle(node));
+
+12. Populate the :c:func:`mac_select_pcs` callback and add it to your
+ :c:type:`struct phylink_mac_ops <phylink_mac_ops>` set of ops. This function
+ must return a pointer to the relevant :c:type:`struct phylink_pcs <phylink_pcs>`
+ that will be used for the requested link configuration:
+
+ .. code-block:: c
+
+ static struct phylink_pcs *foo_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+ {
+ struct foo_priv *priv = container_of(config, struct foo_priv,
+ phylink_config);
+
+ if ( /* 'interface' needs a PCS to function */ )
+ return priv->pcs;
+
+ return NULL;
+ }
+
+ See :c:func:`mvpp2_select_pcs` for an example of a driver that has multiple
+ internal PCS.
+
+13. Fill-in all the :c:type:`phy_interface_t <phy_interface_t>` (i.e. all MAC to
+ PHY link modes) that your MAC can output. The following example shows a
+ configuration for a MAC that can handle all RGMII modes, SGMII and 1000BaseX.
+ You must adjust these according to what your MAC and all PCS associated
+ with this MAC are capable of, and not just the interface you wish to use:
+
+ .. code-block:: c
+
+ phy_interface_set_rgmii(priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ priv->phylink_config.supported_interfaces);
+
+14. Remove calls to of_parse_phandle() for the PHY,
+ of_phy_register_fixed_link() for fixed links etc. from the probe
+ function, and replace with:
+
+ .. code-block:: c
+
+ struct phylink *phylink;
+
phylink = phylink_create(&priv->phylink_config, node, phy_mode, &phylink_ops);
if (IS_ERR(phylink)) {
err = PTR_ERR(phylink);
@@ -249,14 +369,14 @@ this documentation.
priv->phylink = phylink;
- and arrange to destroy the phylink in the probe failure path as
- appropriate and the removal path too by calling:
+ and arrange to destroy the phylink in the probe failure path as
+ appropriate and the removal path too by calling:
- .. code-block:: c
+ .. code-block:: c
phylink_destroy(priv->phylink);
-10. Arrange for MAC link state interrupts to be forwarded into
+15. Arrange for MAC link state interrupts to be forwarded into
phylink, via:
.. code-block:: c
@@ -264,17 +384,16 @@ this documentation.
phylink_mac_change(priv->phylink, link_is_up);
where ``link_is_up`` is true if the link is currently up or false
- otherwise. If a MAC is unable to provide these interrupts, then
- it should set ``priv->phylink_config.pcs_poll = true;`` in step 9.
+ otherwise.
-11. Verify that the driver does not call::
+16. Verify that the driver does not call::
netif_carrier_on()
netif_carrier_off()
- as these will interfere with phylink's tracking of the link state,
- and cause phylink to omit calls via the :c:func:`mac_link_up` and
- :c:func:`mac_link_down` methods.
+ as these will interfere with phylink's tracking of the link state,
+ and cause phylink to omit calls via the :c:func:`mac_link_up` and
+ :c:func:`mac_link_down` methods.
Network drivers should call phylink_stop() and phylink_start() via their
suspend/resume paths, which ensures that the appropriate
diff --git a/Documentation/networking/statistics.rst b/Documentation/networking/statistics.rst
index 551b3cc29a41..75e017dfa825 100644
--- a/Documentation/networking/statistics.rst
+++ b/Documentation/networking/statistics.rst
@@ -41,6 +41,15 @@ If `-s` is specified once the detailed errors won't be shown.
`ip` supports JSON formatting via the `-j` option.
+Queue statistics
+~~~~~~~~~~~~~~~~
+
+Queue statistics are accessible via the netdev netlink family.
+
+Currently no widely distributed CLI exists to access those statistics.
+Kernel development tools (ynl) can be used to experiment with them,
+see `Documentation/userspace-api/netlink/intro-specs.rst`.
+
Protocol-specific statistics
----------------------------
@@ -147,6 +156,12 @@ Statistics are reported both in the responses to link information
requests (`RTM_GETLINK`) and statistic requests (`RTM_GETSTATS`,
when `IFLA_STATS_LINK_64` bit is set in the `.filter_mask` of the request).
+netdev (netlink)
+~~~~~~~~~~~~~~~~
+
+`netdev` generic netlink family allows accessing page pool and per queue
+statistics.
+
ethtool
-------
diff --git a/Documentation/networking/xfrm_device.rst b/Documentation/networking/xfrm_device.rst
index 535077cbeb07..bfea9d8579ed 100644
--- a/Documentation/networking/xfrm_device.rst
+++ b/Documentation/networking/xfrm_device.rst
@@ -71,9 +71,9 @@ Callbacks to implement
bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
struct xfrm_state *x);
void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
+ void (*xdo_dev_state_update_stats) (struct xfrm_state *x);
/* Solely packet offload callbacks */
- void (*xdo_dev_state_update_curlft) (struct xfrm_state *x);
int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack);
void (*xdo_dev_policy_delete) (struct xfrm_policy *x);
void (*xdo_dev_policy_free) (struct xfrm_policy *x);
@@ -191,6 +191,6 @@ xdo_dev_policy_free() on any remaining offloaded states.
Outcome of HW handling packets, the XFRM core can't count hard, soft limits.
The HW/driver are responsible to perform it and provide accurate data when
-xdo_dev_state_update_curlft() is called. In case of one of these limits
+xdo_dev_state_update_stats() is called. In case of one of these limits
occuried, the driver needs to call to xfrm_state_check_expire() to make sure
that XFRM performs rekeying sequence.
diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
index 3731ecf1e437..c472423412bf 100644
--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
@@ -310,6 +310,7 @@ Code Seq# Include File Comments
0x89 0B-DF linux/sockios.h
0x89 E0-EF linux/sockios.h SIOCPROTOPRIVATE range
0x89 F0-FF linux/sockios.h SIOCDEVPRIVATE range
+0x8A 00-1F linux/eventpoll.h
0x8B all linux/wireless.h
0x8C 00-3F WiNRADiO driver
<http://www.winradio.com.au/>
diff --git a/Documentation/userspace-api/netlink/netlink-raw.rst b/Documentation/userspace-api/netlink/netlink-raw.rst
index 1e14f5f22b8e..1990eea772d0 100644
--- a/Documentation/userspace-api/netlink/netlink-raw.rst
+++ b/Documentation/userspace-api/netlink/netlink-raw.rst
@@ -150,3 +150,45 @@ attributes from an ``attribute-set``. For example the following
Note that a selector attribute must appear in a netlink message before any
sub-message attributes that depend on it.
+
+If an attribute such as ``kind`` is defined at more than one nest level, then a
+sub-message selector will be resolved using the value 'closest' to the selector.
+For example, if the same attribute name is defined in a nested ``attribute-set``
+alongside a sub-message selector and also in a top level ``attribute-set``, then
+the selector will be resolved using the value 'closest' to the selector. If the
+value is not present in the message at the same level as defined in the spec
+then this is an error.
+
+Nested struct definitions
+-------------------------
+
+Many raw netlink families such as :doc:`tc<../../networking/netlink_spec/tc>`
+make use of nested struct definitions. The ``netlink-raw`` schema makes it
+possible to embed a struct within a struct definition using the ``struct``
+property. For example, the following struct definition embeds the
+``tc-ratespec`` struct definition for both the ``rate`` and the ``peakrate``
+members of ``struct tc-tbf-qopt``.
+
+.. code-block:: yaml
+
+ -
+ name: tc-tbf-qopt
+ type: struct
+ members:
+ -
+ name: rate
+ type: binary
+ struct: tc-ratespec
+ -
+ name: peakrate
+ type: binary
+ struct: tc-ratespec
+ -
+ name: limit
+ type: u32
+ -
+ name: buffer
+ type: u32
+ -
+ name: mtu
+ type: u32
diff --git a/MAINTAINERS b/MAINTAINERS
index 4a7d45c3e91d..54775eaaf7b3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3807,6 +3807,7 @@ M: Alexei Starovoitov <ast@kernel.org>
M: Daniel Borkmann <daniel@iogearbox.net>
M: Andrii Nakryiko <andrii@kernel.org>
R: Martin KaFai Lau <martin.lau@linux.dev>
+R: Eduard Zingerman <eddyz87@gmail.com>
R: Song Liu <song@kernel.org>
R: Yonghong Song <yonghong.song@linux.dev>
R: John Fastabend <john.fastabend@gmail.com>
@@ -3867,6 +3868,7 @@ F: net/unix/unix_bpf.c
BPF [LIBRARY] (libbpf)
M: Andrii Nakryiko <andrii@kernel.org>
+M: Eduard Zingerman <eddyz87@gmail.com>
L: bpf@vger.kernel.org
S: Maintained
F: tools/lib/bpf/
@@ -3924,6 +3926,7 @@ F: security/bpf/
BPF [SELFTESTS] (Test Runners & Infrastructure)
M: Andrii Nakryiko <andrii@kernel.org>
+M: Eduard Zingerman <eddyz87@gmail.com>
R: Mykola Lysenko <mykolal@fb.com>
L: bpf@vger.kernel.org
S: Maintained
@@ -4637,8 +4640,8 @@ S: Maintained
F: net/sched/sch_cake.c
CAN NETWORK DRIVERS
-M: Wolfgang Grandegger <wg@grandegger.com>
M: Marc Kleine-Budde <mkl@pengutronix.de>
+M: Vincent Mailhol <mailhol.vincent@wanadoo.fr>
L: linux-can@vger.kernel.org
S: Maintained
W: https://github.com/linux-can
@@ -7897,6 +7900,13 @@ S: Maintained
F: include/linux/errseq.h
F: lib/errseq.c
+ESD CAN NETWORK DRIVERS
+M: Stefan Mätje <stefan.maetje@esd.eu>
+R: socketcan@esd.eu
+L: linux-can@vger.kernel.org
+S: Maintained
+F: drivers/net/can/esd/
+
ESD CAN/USB DRIVERS
M: Frank Jungclaus <frank.jungclaus@esd.eu>
R: socketcan@esd.eu
@@ -8599,6 +8609,13 @@ F: Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,cpm1-scc-qmc.yaml
F: drivers/soc/fsl/qe/qmc.c
F: include/soc/fsl/qe/qmc.h
+FREESCALE QUICC ENGINE QMC HDLC DRIVER
+M: Herve Codina <herve.codina@bootlin.com>
+L: netdev@vger.kernel.org
+L: linuxppc-dev@lists.ozlabs.org
+S: Maintained
+F: drivers/net/wan/fsl_qmc_hdlc.c
+
FREESCALE QUICC ENGINE TSA DRIVER
M: Herve Codina <herve.codina@bootlin.com>
L: linuxppc-dev@lists.ozlabs.org
@@ -13082,6 +13099,15 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/marvell/octeon_ep
+MARVELL OCTEON ENDPOINT VF DRIVER
+M: Veerasenareddy Burru <vburru@marvell.com>
+M: Sathesh Edara <sedara@marvell.com>
+M: Shinas Rasheed <srasheed@marvell.com>
+M: Satananda Burla <sburla@marvell.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/ethernet/marvell/octeon_ep_vf
+
MARVELL OCTEONTX2 PHYSICAL FUNCTION DRIVER
M: Sunil Goutham <sgoutham@marvell.com>
M: Geetha sowjanya <gakula@marvell.com>
@@ -15120,6 +15146,7 @@ NETDEVSIM
M: Jakub Kicinski <kuba@kernel.org>
S: Maintained
F: drivers/net/netdevsim/*
+F: tools/testing/selftests/drivers/net/netdevsim/*
NETEM NETWORK EMULATOR
M: Stephen Hemminger <stephen@networkplumber.org>
@@ -18041,6 +18068,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
F: Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml
F: drivers/net/wireless/ath/ath9k/
+QUALCOMM ATHEROS QCA7K ETHERNET DRIVER
+M: Stefan Wahren <wahrenst@gmx.net>
+L: netdev@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/net/qca,qca7000.txt
+F: drivers/net/ethernet/qualcomm/qca*
+
QUALCOMM BAM-DMUX WWAN NETWORK DRIVER
M: Stephan Gerhold <stephan@gerhold.net>
L: netdev@vger.kernel.org
@@ -24168,7 +24202,6 @@ F: drivers/net/ethernet/xilinx/xilinx_axienet*
XILINX CAN DRIVER
M: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
-R: Naga Sureshkumar Relli <naga.sureshkumar.relli@xilinx.com>
L: linux-can@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/net/can/xilinx,can.yaml
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 2129070065c3..794cfea9f9d4 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -110,8 +110,8 @@ void __init add_static_vm_early(struct static_vm *svm)
int ioremap_page(unsigned long virt, unsigned long phys,
const struct mem_type *mtype)
{
- return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
- __pgprot(mtype->prot_pte));
+ return vmap_page_range(virt, virt + PAGE_SIZE, phys,
+ __pgprot(mtype->prot_pte));
}
EXPORT_SYMBOL(ioremap_page);
@@ -466,8 +466,8 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
if (res->end > IO_SPACE_LIMIT)
return -EINVAL;
- return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
- __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
+ return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
+ __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
}
EXPORT_SYMBOL(pci_remap_iospace);
diff --git a/arch/arm64/include/asm/patching.h b/arch/arm64/include/asm/patching.h
index 68908b82b168..587bdb91ab7a 100644
--- a/arch/arm64/include/asm/patching.h
+++ b/arch/arm64/include/asm/patching.h
@@ -8,6 +8,8 @@ int aarch64_insn_read(void *addr, u32 *insnp);
int aarch64_insn_write(void *addr, u32 insn);
int aarch64_insn_write_literal_u64(void *addr, u64 val);
+void *aarch64_insn_set(void *dst, u32 insn, size_t len);
+void *aarch64_insn_copy(void *dst, void *src, size_t len);
int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
diff --git a/arch/arm64/kernel/patching.c b/arch/arm64/kernel/patching.c
index b4835f6d594b..255534930368 100644
--- a/arch/arm64/kernel/patching.c
+++ b/arch/arm64/kernel/patching.c
@@ -105,6 +105,81 @@ noinstr int aarch64_insn_write_literal_u64(void *addr, u64 val)
return ret;
}
+typedef void text_poke_f(void *dst, void *src, size_t patched, size_t len);
+
+static void *__text_poke(text_poke_f func, void *addr, void *src, size_t len)
+{
+ unsigned long flags;
+ size_t patched = 0;
+ size_t size;
+ void *waddr;
+ void *ptr;
+
+ raw_spin_lock_irqsave(&patch_lock, flags);
+
+ while (patched < len) {
+ ptr = addr + patched;
+ size = min_t(size_t, PAGE_SIZE - offset_in_page(ptr),
+ len - patched);
+
+ waddr = patch_map(ptr, FIX_TEXT_POKE0);
+ func(waddr, src, patched, size);
+ patch_unmap(FIX_TEXT_POKE0);
+
+ patched += size;
+ }
+ raw_spin_unlock_irqrestore(&patch_lock, flags);
+
+ flush_icache_range((uintptr_t)addr, (uintptr_t)addr + len);
+
+ return addr;
+}
+
+static void text_poke_memcpy(void *dst, void *src, size_t patched, size_t len)
+{
+ copy_to_kernel_nofault(dst, src + patched, len);
+}
+
+static void text_poke_memset(void *dst, void *src, size_t patched, size_t len)
+{
+ u32 c = *(u32 *)src;
+
+ memset32(dst, c, len / 4);
+}
+
+/**
+ * aarch64_insn_copy - Copy instructions into (an unused part of) RX memory
+ * @dst: address to modify
+ * @src: source of the copy
+ * @len: length to copy
+ *
+ * Useful for JITs to dump new code blocks into unused regions of RX memory.
+ */
+noinstr void *aarch64_insn_copy(void *dst, void *src, size_t len)
+{
+ /* A64 instructions must be word aligned */
+ if ((uintptr_t)dst & 0x3)
+ return NULL;
+
+ return __text_poke(text_poke_memcpy, dst, src, len);
+}
+
+/**
+ * aarch64_insn_set - memset for RX memory regions.
+ * @dst: address to modify
+ * @insn: value to set
+ * @len: length of memory region.
+ *
+ * Useful for JITs to fill regions of RX memory with illegal instructions.
+ */
+noinstr void *aarch64_insn_set(void *dst, u32 insn, size_t len)
+{
+ if ((uintptr_t)dst & 0x3)
+ return NULL;
+
+ return __text_poke(text_poke_memset, dst, &insn, len);
+}
+
int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
{
u32 *tp = addr;
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index b2a60e0bcfd2..684c26511696 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <linux/efi.h>
#include <linux/export.h>
+#include <linux/filter.h>
#include <linux/ftrace.h>
#include <linux/kprobes.h>
#include <linux/sched.h>
@@ -266,6 +267,31 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
}
+struct bpf_unwind_consume_entry_data {
+ bool (*consume_entry)(void *cookie, u64 ip, u64 sp, u64 fp);
+ void *cookie;
+};
+
+static bool
+arch_bpf_unwind_consume_entry(const struct kunwind_state *state, void *cookie)
+{
+ struct bpf_unwind_consume_entry_data *data = cookie;
+
+ return data->consume_entry(data->cookie, state->common.pc, 0,
+ state->common.fp);
+}
+
+noinline noinstr void arch_bpf_stack_walk(bool (*consume_entry)(void *cookie, u64 ip, u64 sp,
+ u64 fp), void *cookie)
+{
+ struct bpf_unwind_consume_entry_data data = {
+ .consume_entry = consume_entry,
+ .cookie = cookie,
+ };
+
+ kunwind_stack_walk(arch_bpf_unwind_consume_entry, &data, current, NULL);
+}
+
static bool dump_backtrace_entry(void *arg, unsigned long where)
{
char *loglvl = arg;
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 8955da5c47cf..c5b461dda438 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -76,6 +76,7 @@ struct jit_ctx {
int *offset;
int exentry_idx;
__le32 *image;
+ __le32 *ro_image;
u32 stack_size;
int fpb_offset;
};
@@ -205,6 +206,14 @@ static void jit_fill_hole(void *area, unsigned int size)
*ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
}
+int bpf_arch_text_invalidate(void *dst, size_t len)
+{
+ if (!aarch64_insn_set(dst, AARCH64_BREAK_FAULT, len))
+ return -EINVAL;
+
+ return 0;
+}
+
static inline int epilogue_offset(const struct jit_ctx *ctx)
{
int to = ctx->epilogue_offset;
@@ -285,7 +294,8 @@ static bool is_lsi_offset(int offset, int scale)
/* Tail call offset to jump into */
#define PROLOGUE_OFFSET (BTI_INSNS + 2 + PAC_INSNS + 8)
-static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
+static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
+ bool is_exception_cb)
{
const struct bpf_prog *prog = ctx->prog;
const bool is_main_prog = !bpf_is_subprog(prog);
@@ -333,19 +343,34 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
emit(A64_MOV(1, A64_R(9), A64_LR), ctx);
emit(A64_NOP, ctx);
- /* Sign lr */
- if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
- emit(A64_PACIASP, ctx);
-
- /* Save FP and LR registers to stay align with ARM64 AAPCS */
- emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
- emit(A64_MOV(1, A64_FP, A64_SP), ctx);
-
- /* Save callee-saved registers */
- emit(A64_PUSH(r6, r7, A64_SP), ctx);
- emit(A64_PUSH(r8, r9, A64_SP), ctx);
- emit(A64_PUSH(fp, tcc, A64_SP), ctx);
- emit(A64_PUSH(fpb, A64_R(28), A64_SP), ctx);
+ if (!is_exception_cb) {
+ /* Sign lr */
+ if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
+ emit(A64_PACIASP, ctx);
+ /* Save FP and LR registers to stay align with ARM64 AAPCS */
+ emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
+ emit(A64_MOV(1, A64_FP, A64_SP), ctx);
+
+ /* Save callee-saved registers */
+ emit(A64_PUSH(r6, r7, A64_SP), ctx);
+ emit(A64_PUSH(r8, r9, A64_SP), ctx);
+ emit(A64_PUSH(fp, tcc, A64_SP), ctx);
+ emit(A64_PUSH(fpb, A64_R(28), A64_SP), ctx);
+ } else {
+ /*
+ * Exception callback receives FP of Main Program as third
+ * parameter
+ */
+ emit(A64_MOV(1, A64_FP, A64_R(2)), ctx);
+ /*
+ * Main Program already pushed the frame record and the
+ * callee-saved registers. The exception callback will not push
+ * anything and re-use the main program's stack.
+ *
+ * 10 registers are on the stack
+ */
+ emit(A64_SUB_I(1, A64_SP, A64_FP, 80), ctx);
+ }
/* Set up BPF prog stack base register */
emit(A64_MOV(1, fp, A64_SP), ctx);
@@ -365,6 +390,20 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
emit_bti(A64_BTI_J, ctx);
}
+ /*
+ * Program acting as exception boundary should save all ARM64
+ * Callee-saved registers as the exception callback needs to recover
+ * all ARM64 Callee-saved registers in its epilogue.
+ */
+ if (prog->aux->exception_boundary) {
+ /*
+ * As we are pushing two more registers, BPF_FP should be moved
+ * 16 bytes
+ */
+ emit(A64_SUB_I(1, fp, fp, 16), ctx);
+ emit(A64_PUSH(A64_R(23), A64_R(24), A64_SP), ctx);
+ }
+
emit(A64_SUB_I(1, fpb, fp, ctx->fpb_offset), ctx);
/* Stack must be multiples of 16B */
@@ -653,7 +692,7 @@ static void build_plt(struct jit_ctx *ctx)
plt->target = (u64)&dummy_tramp;
}
-static void build_epilogue(struct jit_ctx *ctx)
+static void build_epilogue(struct jit_ctx *ctx, bool is_exception_cb)
{
const u8 r0 = bpf2a64[BPF_REG_0];
const u8 r6 = bpf2a64[BPF_REG_6];
@@ -666,6 +705,15 @@ static void build_epilogue(struct jit_ctx *ctx)
/* We're done with BPF stack */
emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
+ /*
+ * Program acting as exception boundary pushes R23 and R24 in addition
+ * to BPF callee-saved registers. Exception callback uses the boundary
+ * program's stack frame, so recover these extra registers in the above
+ * two cases.
+ */
+ if (ctx->prog->aux->exception_boundary || is_exception_cb)
+ emit(A64_POP(A64_R(23), A64_R(24), A64_SP), ctx);
+
/* Restore x27 and x28 */
emit(A64_POP(fpb, A64_R(28), A64_SP), ctx);
/* Restore fs (x25) and x26 */
@@ -707,7 +755,8 @@ static int add_exception_handler(const struct bpf_insn *insn,
struct jit_ctx *ctx,
int dst_reg)
{
- off_t offset;
+ off_t ins_offset;
+ off_t fixup_offset;
unsigned long pc;
struct exception_table_entry *ex;
@@ -724,12 +773,17 @@ static int add_exception_handler(const struct bpf_insn *insn,
return -EINVAL;
ex = &ctx->prog->aux->extable[ctx->exentry_idx];
- pc = (unsigned long)&ctx->image[ctx->idx - 1];
+ pc = (unsigned long)&ctx->ro_image[ctx->idx - 1];
- offset = pc - (long)&ex->insn;
- if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
+ /*
+ * This is the relative offset of the instruction that may fault from
+ * the exception table itself. This will be written to the exception
+ * table and if this instruction faults, the destination register will
+ * be set to '0' and the execution will jump to the next instruction.
+ */
+ ins_offset = pc - (long)&ex->insn;
+ if (WARN_ON_ONCE(ins_offset >= 0 || ins_offset < INT_MIN))
return -ERANGE;
- ex->insn = offset;
/*
* Since the extable follows the program, the fixup offset is always
@@ -738,12 +792,25 @@ static int add_exception_handler(const struct bpf_insn *insn,
* bits. We don't need to worry about buildtime or runtime sort
* modifying the upper bits because the table is already sorted, and
* isn't part of the main exception table.
+ *
+ * The fixup_offset is set to the next instruction from the instruction
+ * that may fault. The execution will jump to this after handling the
+ * fault.
*/
- offset = (long)&ex->fixup - (pc + AARCH64_INSN_SIZE);
- if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset))
+ fixup_offset = (long)&ex->fixup - (pc + AARCH64_INSN_SIZE);
+ if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, fixup_offset))
return -ERANGE;
- ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) |
+ /*
+ * The offsets above have been calculated using the RO buffer but we
+ * need to use the R/W buffer for writes.
+ * switch ex to rw buffer for writing.
+ */
+ ex = (void *)ctx->image + ((void *)ex - (void *)ctx->ro_image);
+
+ ex->insn = ins_offset;
+
+ ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, fixup_offset) |
FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
ex->type = EX_TYPE_BPF;
@@ -1511,7 +1578,8 @@ static inline void bpf_flush_icache(void *start, void *end)
struct arm64_jit_data {
struct bpf_binary_header *header;
- u8 *image;
+ u8 *ro_image;
+ struct bpf_binary_header *ro_header;
struct jit_ctx ctx;
};
@@ -1520,12 +1588,14 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
int image_size, prog_size, extable_size, extable_align, extable_offset;
struct bpf_prog *tmp, *orig_prog = prog;
struct bpf_binary_header *header;
+ struct bpf_binary_header *ro_header;
struct arm64_jit_data *jit_data;
bool was_classic = bpf_prog_was_classic(prog);
bool tmp_blinded = false;
bool extra_pass = false;
struct jit_ctx ctx;
u8 *image_ptr;
+ u8 *ro_image_ptr;
if (!prog->jit_requested)
return orig_prog;
@@ -1552,8 +1622,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
}
if (jit_data->ctx.offset) {
ctx = jit_data->ctx;
- image_ptr = jit_data->image;
+ ro_image_ptr = jit_data->ro_image;
+ ro_header = jit_data->ro_header;
header = jit_data->header;
+ image_ptr = (void *)header + ((void *)ro_image_ptr
+ - (void *)ro_header);
extra_pass = true;
prog_size = sizeof(u32) * ctx.idx;
goto skip_init_ctx;
@@ -1575,7 +1648,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
* BPF line info needs ctx->offset[i] to be the offset of
* instruction[i] in jited image, so build prologue first.
*/
- if (build_prologue(&ctx, was_classic)) {
+ if (build_prologue(&ctx, was_classic, prog->aux->exception_cb)) {
prog = orig_prog;
goto out_off;
}
@@ -1586,7 +1659,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
}
ctx.epilogue_offset = ctx.idx;
- build_epilogue(&ctx);
+ build_epilogue(&ctx, prog->aux->exception_cb);
build_plt(&ctx);
extable_align = __alignof__(struct exception_table_entry);
@@ -1598,63 +1671,81 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* also allocate space for plt target */
extable_offset = round_up(prog_size + PLT_TARGET_SIZE, extable_align);
image_size = extable_offset + extable_size;
- header = bpf_jit_binary_alloc(image_size, &image_ptr,
- sizeof(u32), jit_fill_hole);
- if (header == NULL) {
+ ro_header = bpf_jit_binary_pack_alloc(image_size, &ro_image_ptr,
+ sizeof(u32), &header, &image_ptr,
+ jit_fill_hole);
+ if (!ro_header) {
prog = orig_prog;
goto out_off;
}
/* 2. Now, the actual pass. */
+ /*
+ * Use the image(RW) for writing the JITed instructions. But also save
+ * the ro_image(RX) for calculating the offsets in the image. The RW
+ * image will be later copied to the RX image from where the program
+ * will run. The bpf_jit_binary_pack_finalize() will do this copy in the
+ * final step.
+ */
ctx.image = (__le32 *)image_ptr;
+ ctx.ro_image = (__le32 *)ro_image_ptr;
if (extable_size)
- prog->aux->extable = (void *)image_ptr + extable_offset;
+ prog->aux->extable = (void *)ro_image_ptr + extable_offset;
skip_init_ctx:
ctx.idx = 0;
ctx.exentry_idx = 0;
- build_prologue(&ctx, was_classic);
+ build_prologue(&ctx, was_classic, prog->aux->exception_cb);
if (build_body(&ctx, extra_pass)) {
- bpf_jit_binary_free(header);
prog = orig_prog;
- goto out_off;
+ goto out_free_hdr;
}
- build_epilogue(&ctx);
+ build_epilogue(&ctx, prog->aux->exception_cb);
build_plt(&ctx);
/* 3. Extra pass to validate JITed code. */
if (validate_ctx(&ctx)) {
- bpf_jit_binary_free(header);
prog = orig_prog;
- goto out_off;
+ goto out_free_hdr;
}
/* And we're done. */
if (bpf_jit_enable > 1)
bpf_jit_dump(prog->len, prog_size, 2, ctx.image);
- bpf_flush_icache(header, ctx.image + ctx.idx);
-
if (!prog->is_func || extra_pass) {
if (extra_pass && ctx.idx != jit_data->ctx.idx) {
pr_err_once("multi-func JIT bug %d != %d\n",
ctx.idx, jit_data->ctx.idx);
- bpf_jit_binary_free(header);
prog->bpf_func = NULL;
prog->jited = 0;
prog->jited_len = 0;
+ goto out_free_hdr;
+ }
+ if (WARN_ON(bpf_jit_binary_pack_finalize(prog, ro_header,
+ header))) {
+ /* ro_header has been freed */
+ ro_header = NULL;
+ prog = orig_prog;
goto out_off;
}
- bpf_jit_binary_lock_ro(header);
+ /*
+ * The instructions have now been copied to the ROX region from
+ * where they will execute. Now the data cache has to be cleaned to
+ * the PoU and the I-cache has to be invalidated for the VAs.
+ */
+ bpf_flush_icache(ro_header, ctx.ro_image + ctx.idx);
} else {
jit_data->ctx = ctx;
- jit_data->image = image_ptr;
+ jit_data->ro_image = ro_image_ptr;
jit_data->header = header;
+ jit_data->ro_header = ro_header;
}
- prog->bpf_func = (void *)ctx.image;
+
+ prog->bpf_func = (void *)ctx.ro_image;
prog->jited = 1;
prog->jited_len = prog_size;
@@ -1675,6 +1766,14 @@ out:
bpf_jit_prog_release_other(prog, prog == orig_prog ?
tmp : orig_prog);
return prog;
+
+out_free_hdr:
+ if (header) {
+ bpf_arch_text_copy(&ro_header->size, &header->size,
+ sizeof(header->size));
+ bpf_jit_binary_pack_free(ro_header, header);
+ }
+ goto out_off;
}
bool bpf_jit_supports_kfunc_call(void)
@@ -1682,6 +1781,13 @@ bool bpf_jit_supports_kfunc_call(void)
return true;
}
+void *bpf_arch_text_copy(void *dst, void *src, size_t len)
+{
+ if (!aarch64_insn_copy(dst, src, len))
+ return ERR_PTR(-EINVAL);
+ return dst;
+}
+
u64 bpf_jit_alloc_exec_limit(void)
{
return VMALLOC_END - VMALLOC_START;
@@ -1970,7 +2076,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
/* store return value */
emit(A64_STR64I(A64_R(0), A64_SP, retval_off), ctx);
/* reserve a nop for bpf_tramp_image_put */
- im->ip_after_call = ctx->image + ctx->idx;
+ im->ip_after_call = ctx->ro_image + ctx->idx;
emit(A64_NOP, ctx);
}
@@ -1985,7 +2091,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
run_ctx_off, false);
if (flags & BPF_TRAMP_F_CALL_ORIG) {
- im->ip_epilogue = ctx->image + ctx->idx;
+ im->ip_epilogue = ctx->ro_image + ctx->idx;
emit_addr_mov_i64(A64_R(0), (const u64)im, ctx);
emit_call((const u64)__bpf_tramp_exit, ctx);
}
@@ -2018,9 +2124,6 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
emit(A64_RET(A64_R(10)), ctx);
}
- if (ctx->image)
- bpf_flush_icache(ctx->image, ctx->image + ctx->idx);
-
kfree(branches);
return ctx->idx;
@@ -2063,14 +2166,43 @@ int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
return ret < 0 ? ret : ret * AARCH64_INSN_SIZE;
}
-int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
- void *image_end, const struct btf_func_model *m,
+void *arch_alloc_bpf_trampoline(unsigned int size)
+{
+ return bpf_prog_pack_alloc(size, jit_fill_hole);
+}
+
+void arch_free_bpf_trampoline(void *image, unsigned int size)
+{
+ bpf_prog_pack_free(image, size);
+}
+
+void arch_protect_bpf_trampoline(void *image, unsigned int size)
+{
+}
+
+void arch_unprotect_bpf_trampoline(void *image, unsigned int size)
+{
+}
+
+int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image,
+ void *ro_image_end, const struct btf_func_model *m,
u32 flags, struct bpf_tramp_links *tlinks,
void *func_addr)
{
int ret, nregs;
+ void *image, *tmp;
+ u32 size = ro_image_end - ro_image;
+
+ /* image doesn't need to be in module memory range, so we can
+ * use kvmalloc.
+ */
+ image = kvmalloc(size, GFP_KERNEL);
+ if (!image)
+ return -ENOMEM;
+
struct jit_ctx ctx = {
.image = image,
+ .ro_image = ro_image,
.idx = 0,
};
@@ -2079,15 +2211,26 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
if (nregs > 8)
return -ENOTSUPP;
- jit_fill_hole(image, (unsigned int)(image_end - image));
+ jit_fill_hole(image, (unsigned int)(ro_image_end - ro_image));
ret = prepare_trampoline(&ctx, im, tlinks, func_addr, nregs, flags);
- if (ret > 0 && validate_code(&ctx) < 0)
+ if (ret > 0 && validate_code(&ctx) < 0) {
ret = -EINVAL;
+ goto out;
+ }
if (ret > 0)
ret *= AARCH64_INSN_SIZE;
+ tmp = bpf_arch_text_copy(ro_image, image, size);
+ if (IS_ERR(tmp)) {
+ ret = PTR_ERR(tmp);
+ goto out;
+ }
+
+ bpf_flush_icache(ro_image, ro_image + size);
+out:
+ kvfree(image);
return ret;
}
@@ -2305,3 +2448,42 @@ out:
return ret;
}
+
+bool bpf_jit_supports_ptr_xchg(void)
+{
+ return true;
+}
+
+bool bpf_jit_supports_exceptions(void)
+{
+ /* We unwind through both kernel frames starting from within bpf_throw
+ * call and BPF frames. Therefore we require FP unwinder to be enabled
+ * to walk kernel frames and reach BPF frames in the stack trace.
+ * ARM64 kernel is aways compiled with CONFIG_FRAME_POINTER=y
+ */
+ return true;
+}
+
+void bpf_jit_free(struct bpf_prog *prog)
+{
+ if (prog->jited) {
+ struct arm64_jit_data *jit_data = prog->aux->jit_data;
+ struct bpf_binary_header *hdr;
+
+ /*
+ * If we fail the final pass of JIT (from jit_subprogs),
+ * the program may not be finalized yet. Call finalize here
+ * before freeing it.
+ */
+ if (jit_data) {
+ bpf_arch_text_copy(&jit_data->ro_header->size, &jit_data->header->size,
+ sizeof(jit_data->header->size));
+ kfree(jit_data);
+ }
+ hdr = bpf_jit_binary_pack_hdr(prog);
+ bpf_jit_binary_pack_free(hdr, NULL);
+ WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
+ }
+
+ bpf_prog_unlock_free(prog);
+}
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index 634ef17fd38b..fd915ad69c09 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -490,7 +490,7 @@ static int __init add_legacy_isa_io(struct fwnode_handle *fwnode,
}
vaddr = (unsigned long)(PCI_IOBASE + range->io_start);
- ioremap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
+ vmap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
return 0;
}
diff --git a/arch/mips/loongson64/init.c b/arch/mips/loongson64/init.c
index 553142c1f14f..a35dd7311795 100644
--- a/arch/mips/loongson64/init.c
+++ b/arch/mips/loongson64/init.c
@@ -180,7 +180,7 @@ static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, resource_size_
vaddr = PCI_IOBASE + range->io_start;
- ioremap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
+ vmap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
return 0;
}
diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c
index 48e0eaf1ad61..5c064485197a 100644
--- a/arch/powerpc/kernel/isa-bridge.c
+++ b/arch/powerpc/kernel/isa-bridge.c
@@ -46,8 +46,8 @@ static void remap_isa_base(phys_addr_t pa, unsigned long size)
WARN_ON_ONCE(size & ~PAGE_MASK);
if (slab_is_available()) {
- if (ioremap_page_range(ISA_IO_BASE, ISA_IO_BASE + size, pa,
- pgprot_noncached(PAGE_KERNEL)))
+ if (vmap_page_range(ISA_IO_BASE, ISA_IO_BASE + size, pa,
+ pgprot_noncached(PAGE_KERNEL)))
vunmap_range(ISA_IO_BASE, ISA_IO_BASE + size);
} else {
early_ioremap_range(ISA_IO_BASE, pa, size,
diff --git a/arch/riscv/include/asm/cfi.h b/arch/riscv/include/asm/cfi.h
index 8f7a62257044..fb9696d7a3f2 100644
--- a/arch/riscv/include/asm/cfi.h
+++ b/arch/riscv/include/asm/cfi.h
@@ -13,11 +13,28 @@ struct pt_regs;
#ifdef CONFIG_CFI_CLANG
enum bug_trap_type handle_cfi_failure(struct pt_regs *regs);
+#define __bpfcall
+static inline int cfi_get_offset(void)
+{
+ return 4;
+}
+
+#define cfi_get_offset cfi_get_offset
+extern u32 cfi_bpf_hash;
+extern u32 cfi_bpf_subprog_hash;
+extern u32 cfi_get_func_hash(void *func);
#else
static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs)
{
return BUG_TRAP_TYPE_NONE;
}
+
+#define cfi_bpf_hash 0U
+#define cfi_bpf_subprog_hash 0U
+static inline u32 cfi_get_func_hash(void *func)
+{
+ return 0;
+}
#endif /* CONFIG_CFI_CLANG */
#endif /* _ASM_RISCV_CFI_H */
diff --git a/arch/riscv/kernel/cfi.c b/arch/riscv/kernel/cfi.c
index 6ec9dbd7292e..64bdd3e1ab8c 100644
--- a/arch/riscv/kernel/cfi.c
+++ b/arch/riscv/kernel/cfi.c
@@ -75,3 +75,56 @@ enum bug_trap_type handle_cfi_failure(struct pt_regs *regs)
return report_cfi_failure(regs, regs->epc, &target, type);
}
+
+#ifdef CONFIG_CFI_CLANG
+struct bpf_insn;
+
+/* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */
+extern unsigned int __bpf_prog_runX(const void *ctx,
+ const struct bpf_insn *insn);
+
+/*
+ * Force a reference to the external symbol so the compiler generates
+ * __kcfi_typid.
+ */
+__ADDRESSABLE(__bpf_prog_runX);
+
+/* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */
+asm (
+" .pushsection .data..ro_after_init,\"aw\",@progbits \n"
+" .type cfi_bpf_hash,@object \n"
+" .globl cfi_bpf_hash \n"
+" .p2align 2, 0x0 \n"
+"cfi_bpf_hash: \n"
+" .word __kcfi_typeid___bpf_prog_runX \n"
+" .size cfi_bpf_hash, 4 \n"
+" .popsection \n"
+);
+
+/* Must match bpf_callback_t */
+extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64);
+
+__ADDRESSABLE(__bpf_callback_fn);
+
+/* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */
+asm (
+" .pushsection .data..ro_after_init,\"aw\",@progbits \n"
+" .type cfi_bpf_subprog_hash,@object \n"
+" .globl cfi_bpf_subprog_hash \n"
+" .p2align 2, 0x0 \n"
+"cfi_bpf_subprog_hash: \n"
+" .word __kcfi_typeid___bpf_callback_fn \n"
+" .size cfi_bpf_subprog_hash, 4 \n"
+" .popsection \n"
+);
+
+u32 cfi_get_func_hash(void *func)
+{
+ u32 hash;
+
+ if (get_kernel_nofault(hash, func - cfi_get_offset()))
+ return 0;
+
+ return hash;
+}
+#endif
diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h
index a5ce1ab76ece..f4b6b3b9edda 100644
--- a/arch/riscv/net/bpf_jit.h
+++ b/arch/riscv/net/bpf_jit.h
@@ -18,6 +18,11 @@ static inline bool rvc_enabled(void)
return IS_ENABLED(CONFIG_RISCV_ISA_C);
}
+static inline bool rvzbb_enabled(void)
+{
+ return IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && riscv_has_extension_likely(RISCV_ISA_EXT_ZBB);
+}
+
enum {
RV_REG_ZERO = 0, /* The constant value 0 */
RV_REG_RA = 1, /* Return address */
@@ -730,6 +735,33 @@ static inline u16 rvc_swsp(u32 imm8, u8 rs2)
return rv_css_insn(0x6, imm, rs2, 0x2);
}
+/* RVZBB instrutions. */
+static inline u32 rvzbb_sextb(u8 rd, u8 rs1)
+{
+ return rv_i_insn(0x604, rs1, 1, rd, 0x13);
+}
+
+static inline u32 rvzbb_sexth(u8 rd, u8 rs1)
+{
+ return rv_i_insn(0x605, rs1, 1, rd, 0x13);
+}
+
+static inline u32 rvzbb_zexth(u8 rd, u8 rs)
+{
+ if (IS_ENABLED(CONFIG_64BIT))
+ return rv_i_insn(0x80, rs, 4, rd, 0x3b);
+
+ return rv_i_insn(0x80, rs, 4, rd, 0x33);
+}
+
+static inline u32 rvzbb_rev8(u8 rd, u8 rs)
+{
+ if (IS_ENABLED(CONFIG_64BIT))
+ return rv_i_insn(0x6b8, rs, 5, rd, 0x13);
+
+ return rv_i_insn(0x698, rs, 5, rd, 0x13);
+}
+
/*
* RV64-only instructions.
*
@@ -1087,9 +1119,111 @@ static inline void emit_subw(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
emit(rv_subw(rd, rs1, rs2), ctx);
}
+static inline void emit_sextb(u8 rd, u8 rs, struct rv_jit_context *ctx)
+{
+ if (rvzbb_enabled()) {
+ emit(rvzbb_sextb(rd, rs), ctx);
+ return;
+ }
+
+ emit_slli(rd, rs, 56, ctx);
+ emit_srai(rd, rd, 56, ctx);
+}
+
+static inline void emit_sexth(u8 rd, u8 rs, struct rv_jit_context *ctx)
+{
+ if (rvzbb_enabled()) {
+ emit(rvzbb_sexth(rd, rs), ctx);
+ return;
+ }
+
+ emit_slli(rd, rs, 48, ctx);
+ emit_srai(rd, rd, 48, ctx);
+}
+
+static inline void emit_sextw(u8 rd, u8 rs, struct rv_jit_context *ctx)
+{
+ emit_addiw(rd, rs, 0, ctx);
+}
+
+static inline void emit_zexth(u8 rd, u8 rs, struct rv_jit_context *ctx)
+{
+ if (rvzbb_enabled()) {
+ emit(rvzbb_zexth(rd, rs), ctx);
+ return;
+ }
+
+ emit_slli(rd, rs, 48, ctx);
+ emit_srli(rd, rd, 48, ctx);
+}
+
+static inline void emit_zextw(u8 rd, u8 rs, struct rv_jit_context *ctx)
+{
+ emit_slli(rd, rs, 32, ctx);
+ emit_srli(rd, rd, 32, ctx);
+}
+
+static inline void emit_bswap(u8 rd, s32 imm, struct rv_jit_context *ctx)
+{
+ if (rvzbb_enabled()) {
+ int bits = 64 - imm;
+
+ emit(rvzbb_rev8(rd, rd), ctx);
+ if (bits)
+ emit_srli(rd, rd, bits, ctx);
+ return;
+ }
+
+ emit_li(RV_REG_T2, 0, ctx);
+
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
+ if (imm == 16)
+ goto out_be;
+
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
+
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
+ if (imm == 32)
+ goto out_be;
+
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
+
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
+
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
+
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
+out_be:
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+
+ emit_mv(rd, RV_REG_T2, ctx);
+}
+
#endif /* __riscv_xlen == 64 */
-void bpf_jit_build_prologue(struct rv_jit_context *ctx);
+void bpf_jit_build_prologue(struct rv_jit_context *ctx, bool is_subprog);
void bpf_jit_build_epilogue(struct rv_jit_context *ctx);
int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
diff --git a/arch/riscv/net/bpf_jit_comp32.c b/arch/riscv/net/bpf_jit_comp32.c
index 529a83b85c1c..f5ba73bb153d 100644
--- a/arch/riscv/net/bpf_jit_comp32.c
+++ b/arch/riscv/net/bpf_jit_comp32.c
@@ -1301,7 +1301,7 @@ notsupported:
return 0;
}
-void bpf_jit_build_prologue(struct rv_jit_context *ctx)
+void bpf_jit_build_prologue(struct rv_jit_context *ctx, bool is_subprog)
{
const s8 *fp = bpf2rv32[BPF_REG_FP];
const s8 *r1 = bpf2rv32[BPF_REG_1];
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 719a97e7edb2..aac190085472 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -11,6 +11,7 @@
#include <linux/memory.h>
#include <linux/stop_machine.h>
#include <asm/patch.h>
+#include <asm/cfi.h>
#include "bpf_jit.h"
#define RV_FENTRY_NINSNS 2
@@ -141,6 +142,19 @@ static bool in_auipc_jalr_range(s64 val)
val < ((1L << 31) - (1L << 11));
}
+/* Modify rd pointer to alternate reg to avoid corrupting original reg */
+static void emit_sextw_alt(u8 *rd, u8 ra, struct rv_jit_context *ctx)
+{
+ emit_sextw(ra, *rd, ctx);
+ *rd = ra;
+}
+
+static void emit_zextw_alt(u8 *rd, u8 ra, struct rv_jit_context *ctx)
+{
+ emit_zextw(ra, *rd, ctx);
+ *rd = ra;
+}
+
/* Emit fixed-length instructions for address */
static int emit_addr(u8 rd, u64 addr, bool extra_pass, struct rv_jit_context *ctx)
{
@@ -326,12 +340,6 @@ static void emit_branch(u8 cond, u8 rd, u8 rs, int rvoff,
emit(rv_jalr(RV_REG_ZERO, RV_REG_T1, lower), ctx);
}
-static void emit_zext_32(u8 reg, struct rv_jit_context *ctx)
-{
- emit_slli(reg, reg, 32, ctx);
- emit_srli(reg, reg, 32, ctx);
-}
-
static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
{
int tc_ninsn, off, start_insn = ctx->ninsns;
@@ -346,7 +354,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
*/
tc_ninsn = insn ? ctx->offset[insn] - ctx->offset[insn - 1] :
ctx->offset[0];
- emit_zext_32(RV_REG_A2, ctx);
+ emit_zextw(RV_REG_A2, RV_REG_A2, ctx);
off = offsetof(struct bpf_array, map.max_entries);
if (is_12b_check(off, insn))
@@ -405,38 +413,6 @@ static void init_regs(u8 *rd, u8 *rs, const struct bpf_insn *insn,
*rs = bpf_to_rv_reg(insn->src_reg, ctx);
}
-static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx)
-{
- emit_mv(RV_REG_T2, *rd, ctx);
- emit_zext_32(RV_REG_T2, ctx);
- emit_mv(RV_REG_T1, *rs, ctx);
- emit_zext_32(RV_REG_T1, ctx);
- *rd = RV_REG_T2;
- *rs = RV_REG_T1;
-}
-
-static void emit_sext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx)
-{
- emit_addiw(RV_REG_T2, *rd, 0, ctx);
- emit_addiw(RV_REG_T1, *rs, 0, ctx);
- *rd = RV_REG_T2;
- *rs = RV_REG_T1;
-}
-
-static void emit_zext_32_rd_t1(u8 *rd, struct rv_jit_context *ctx)
-{
- emit_mv(RV_REG_T2, *rd, ctx);
- emit_zext_32(RV_REG_T2, ctx);
- emit_zext_32(RV_REG_T1, ctx);
- *rd = RV_REG_T2;
-}
-
-static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx)
-{
- emit_addiw(RV_REG_T2, *rd, 0, ctx);
- *rd = RV_REG_T2;
-}
-
static int emit_jump_and_link(u8 rd, s64 rvoff, bool fixed_addr,
struct rv_jit_context *ctx)
{
@@ -480,6 +456,12 @@ static int emit_call(u64 addr, bool fixed_addr, struct rv_jit_context *ctx)
return emit_jump_and_link(RV_REG_RA, off, fixed_addr, ctx);
}
+static inline void emit_kcfi(u32 hash, struct rv_jit_context *ctx)
+{
+ if (IS_ENABLED(CONFIG_CFI_CLANG))
+ emit(hash, ctx);
+}
+
static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
struct rv_jit_context *ctx)
{
@@ -519,32 +501,32 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
emit(is64 ? rv_amoadd_d(rs, rs, rd, 0, 0) :
rv_amoadd_w(rs, rs, rd, 0, 0), ctx);
if (!is64)
- emit_zext_32(rs, ctx);
+ emit_zextw(rs, rs, ctx);
break;
case BPF_AND | BPF_FETCH:
emit(is64 ? rv_amoand_d(rs, rs, rd, 0, 0) :
rv_amoand_w(rs, rs, rd, 0, 0), ctx);
if (!is64)
- emit_zext_32(rs, ctx);
+ emit_zextw(rs, rs, ctx);
break;
case BPF_OR | BPF_FETCH:
emit(is64 ? rv_amoor_d(rs, rs, rd, 0, 0) :
rv_amoor_w(rs, rs, rd, 0, 0), ctx);
if (!is64)
- emit_zext_32(rs, ctx);
+ emit_zextw(rs, rs, ctx);
break;
case BPF_XOR | BPF_FETCH:
emit(is64 ? rv_amoxor_d(rs, rs, rd, 0, 0) :
rv_amoxor_w(rs, rs, rd, 0, 0), ctx);
if (!is64)
- emit_zext_32(rs, ctx);
+ emit_zextw(rs, rs, ctx);
break;
/* src_reg = atomic_xchg(dst_reg + off16, src_reg); */
case BPF_XCHG:
emit(is64 ? rv_amoswap_d(rs, rs, rd, 0, 0) :
rv_amoswap_w(rs, rs, rd, 0, 0), ctx);
if (!is64)
- emit_zext_32(rs, ctx);
+ emit_zextw(rs, rs, ctx);
break;
/* r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg); */
case BPF_CMPXCHG:
@@ -894,6 +876,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
emit_sd(RV_REG_SP, stack_size - 16, RV_REG_FP, ctx);
emit_addi(RV_REG_FP, RV_REG_SP, stack_size, ctx);
} else {
+ /* emit kcfi hash */
+ emit_kcfi(cfi_get_func_hash(func_addr), ctx);
/* For the trampoline called directly, just handle
* the frame of trampoline.
*/
@@ -1091,7 +1075,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case BPF_ALU64 | BPF_MOV | BPF_X:
if (imm == 1) {
/* Special mov32 for zext */
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
}
switch (insn->off) {
@@ -1099,16 +1083,17 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
emit_mv(rd, rs, ctx);
break;
case 8:
+ emit_sextb(rd, rs, ctx);
+ break;
case 16:
- emit_slli(RV_REG_T1, rs, 64 - insn->off, ctx);
- emit_srai(rd, RV_REG_T1, 64 - insn->off, ctx);
+ emit_sexth(rd, rs, ctx);
break;
case 32:
- emit_addiw(rd, rs, 0, ctx);
+ emit_sextw(rd, rs, ctx);
break;
}
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
/* dst = dst OP src */
@@ -1116,7 +1101,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case BPF_ALU64 | BPF_ADD | BPF_X:
emit_add(rd, rd, rs, ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_SUB | BPF_X:
case BPF_ALU64 | BPF_SUB | BPF_X:
@@ -1126,31 +1111,31 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
emit_subw(rd, rd, rs, ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_AND | BPF_X:
case BPF_ALU64 | BPF_AND | BPF_X:
emit_and(rd, rd, rs, ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_OR | BPF_X:
case BPF_ALU64 | BPF_OR | BPF_X:
emit_or(rd, rd, rs, ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_XOR | BPF_X:
case BPF_ALU64 | BPF_XOR | BPF_X:
emit_xor(rd, rd, rs, ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_MUL | BPF_X:
case BPF_ALU64 | BPF_MUL | BPF_X:
emit(is64 ? rv_mul(rd, rd, rs) : rv_mulw(rd, rd, rs), ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_DIV | BPF_X:
case BPF_ALU64 | BPF_DIV | BPF_X:
@@ -1159,7 +1144,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
else
emit(is64 ? rv_divu(rd, rd, rs) : rv_divuw(rd, rd, rs), ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_MOD | BPF_X:
case BPF_ALU64 | BPF_MOD | BPF_X:
@@ -1168,25 +1153,25 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
else
emit(is64 ? rv_remu(rd, rd, rs) : rv_remuw(rd, rd, rs), ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_LSH | BPF_X:
case BPF_ALU64 | BPF_LSH | BPF_X:
emit(is64 ? rv_sll(rd, rd, rs) : rv_sllw(rd, rd, rs), ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_RSH | BPF_X:
case BPF_ALU64 | BPF_RSH | BPF_X:
emit(is64 ? rv_srl(rd, rd, rs) : rv_srlw(rd, rd, rs), ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_ARSH | BPF_X:
case BPF_ALU64 | BPF_ARSH | BPF_X:
emit(is64 ? rv_sra(rd, rd, rs) : rv_sraw(rd, rd, rs), ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
/* dst = -dst */
@@ -1194,73 +1179,27 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case BPF_ALU64 | BPF_NEG:
emit_sub(rd, RV_REG_ZERO, rd, ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
/* dst = BSWAP##imm(dst) */
case BPF_ALU | BPF_END | BPF_FROM_LE:
switch (imm) {
case 16:
- emit_slli(rd, rd, 48, ctx);
- emit_srli(rd, rd, 48, ctx);
+ emit_zexth(rd, rd, ctx);
break;
case 32:
if (!aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case 64:
/* Do nothing */
break;
}
break;
-
case BPF_ALU | BPF_END | BPF_FROM_BE:
case BPF_ALU64 | BPF_END | BPF_FROM_LE:
- emit_li(RV_REG_T2, 0, ctx);
-
- emit_andi(RV_REG_T1, rd, 0xff, ctx);
- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
- emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
- emit_srli(rd, rd, 8, ctx);
- if (imm == 16)
- goto out_be;
-
- emit_andi(RV_REG_T1, rd, 0xff, ctx);
- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
- emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
- emit_srli(rd, rd, 8, ctx);
-
- emit_andi(RV_REG_T1, rd, 0xff, ctx);
- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
- emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
- emit_srli(rd, rd, 8, ctx);
- if (imm == 32)
- goto out_be;
-
- emit_andi(RV_REG_T1, rd, 0xff, ctx);
- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
- emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
- emit_srli(rd, rd, 8, ctx);
-
- emit_andi(RV_REG_T1, rd, 0xff, ctx);
- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
- emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
- emit_srli(rd, rd, 8, ctx);
-
- emit_andi(RV_REG_T1, rd, 0xff, ctx);
- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
- emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
- emit_srli(rd, rd, 8, ctx);
-
- emit_andi(RV_REG_T1, rd, 0xff, ctx);
- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
- emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
- emit_srli(rd, rd, 8, ctx);
-out_be:
- emit_andi(RV_REG_T1, rd, 0xff, ctx);
- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
-
- emit_mv(rd, RV_REG_T2, ctx);
+ emit_bswap(rd, imm, ctx);
break;
/* dst = imm */
@@ -1268,7 +1207,7 @@ out_be:
case BPF_ALU64 | BPF_MOV | BPF_K:
emit_imm(rd, imm, ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
/* dst = dst OP imm */
@@ -1281,7 +1220,7 @@ out_be:
emit_add(rd, rd, RV_REG_T1, ctx);
}
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_SUB | BPF_K:
case BPF_ALU64 | BPF_SUB | BPF_K:
@@ -1292,7 +1231,7 @@ out_be:
emit_sub(rd, rd, RV_REG_T1, ctx);
}
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_AND | BPF_K:
case BPF_ALU64 | BPF_AND | BPF_K:
@@ -1303,7 +1242,7 @@ out_be:
emit_and(rd, rd, RV_REG_T1, ctx);
}
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_OR | BPF_K:
case BPF_ALU64 | BPF_OR | BPF_K:
@@ -1314,7 +1253,7 @@ out_be:
emit_or(rd, rd, RV_REG_T1, ctx);
}
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_XOR | BPF_K:
case BPF_ALU64 | BPF_XOR | BPF_K:
@@ -1325,7 +1264,7 @@ out_be:
emit_xor(rd, rd, RV_REG_T1, ctx);
}
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_MUL | BPF_K:
case BPF_ALU64 | BPF_MUL | BPF_K:
@@ -1333,7 +1272,7 @@ out_be:
emit(is64 ? rv_mul(rd, rd, RV_REG_T1) :
rv_mulw(rd, rd, RV_REG_T1), ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_DIV | BPF_K:
case BPF_ALU64 | BPF_DIV | BPF_K:
@@ -1345,7 +1284,7 @@ out_be:
emit(is64 ? rv_divu(rd, rd, RV_REG_T1) :
rv_divuw(rd, rd, RV_REG_T1), ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_MOD | BPF_K:
case BPF_ALU64 | BPF_MOD | BPF_K:
@@ -1357,14 +1296,14 @@ out_be:
emit(is64 ? rv_remu(rd, rd, RV_REG_T1) :
rv_remuw(rd, rd, RV_REG_T1), ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_LSH | BPF_K:
case BPF_ALU64 | BPF_LSH | BPF_K:
emit_slli(rd, rd, imm, ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_RSH | BPF_K:
case BPF_ALU64 | BPF_RSH | BPF_K:
@@ -1374,7 +1313,7 @@ out_be:
emit(rv_srliw(rd, rd, imm), ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_ARSH | BPF_K:
case BPF_ALU64 | BPF_ARSH | BPF_K:
@@ -1384,7 +1323,7 @@ out_be:
emit(rv_sraiw(rd, rd, imm), ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
/* JUMP off */
@@ -1425,10 +1364,13 @@ out_be:
rvoff = rv_offset(i, off, ctx);
if (!is64) {
s = ctx->ninsns;
- if (is_signed_bpf_cond(BPF_OP(code)))
- emit_sext_32_rd_rs(&rd, &rs, ctx);
- else
- emit_zext_32_rd_rs(&rd, &rs, ctx);
+ if (is_signed_bpf_cond(BPF_OP(code))) {
+ emit_sextw_alt(&rs, RV_REG_T1, ctx);
+ emit_sextw_alt(&rd, RV_REG_T2, ctx);
+ } else {
+ emit_zextw_alt(&rs, RV_REG_T1, ctx);
+ emit_zextw_alt(&rd, RV_REG_T2, ctx);
+ }
e = ctx->ninsns;
/* Adjust for extra insns */
@@ -1439,8 +1381,7 @@ out_be:
/* Adjust for and */
rvoff -= 4;
emit_and(RV_REG_T1, rd, rs, ctx);
- emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff,
- ctx);
+ emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, ctx);
} else {
emit_branch(BPF_OP(code), rd, rs, rvoff, ctx);
}
@@ -1469,18 +1410,18 @@ out_be:
case BPF_JMP32 | BPF_JSLE | BPF_K:
rvoff = rv_offset(i, off, ctx);
s = ctx->ninsns;
- if (imm) {
+ if (imm)
emit_imm(RV_REG_T1, imm, ctx);
- rs = RV_REG_T1;
- } else {
- /* If imm is 0, simply use zero register. */
- rs = RV_REG_ZERO;
- }
+ rs = imm ? RV_REG_T1 : RV_REG_ZERO;
if (!is64) {
- if (is_signed_bpf_cond(BPF_OP(code)))
- emit_sext_32_rd(&rd, ctx);
- else
- emit_zext_32_rd_t1(&rd, ctx);
+ if (is_signed_bpf_cond(BPF_OP(code))) {
+ emit_sextw_alt(&rd, RV_REG_T2, ctx);
+ /* rs has been sign extended */
+ } else {
+ emit_zextw_alt(&rd, RV_REG_T2, ctx);
+ if (imm)
+ emit_zextw(rs, rs, ctx);
+ }
}
e = ctx->ninsns;
@@ -1504,7 +1445,7 @@ out_be:
* as t1 is used only in comparison against zero.
*/
if (!is64 && imm < 0)
- emit_addiw(RV_REG_T1, RV_REG_T1, 0, ctx);
+ emit_sextw(RV_REG_T1, RV_REG_T1, ctx);
e = ctx->ninsns;
rvoff -= ninsns_rvoff(e - s);
emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, ctx);
@@ -1779,7 +1720,7 @@ out_be:
return 0;
}
-void bpf_jit_build_prologue(struct rv_jit_context *ctx)
+void bpf_jit_build_prologue(struct rv_jit_context *ctx, bool is_subprog)
{
int i, stack_adjust = 0, store_offset, bpf_stack_adjust;
@@ -1808,6 +1749,9 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx)
store_offset = stack_adjust - 8;
+ /* emit kcfi type preamble immediately before the first insn */
+ emit_kcfi(is_subprog ? cfi_bpf_subprog_hash : cfi_bpf_hash, ctx);
+
/* nops reserved for auipc+jalr pair */
for (i = 0; i < RV_FENTRY_NINSNS; i++)
emit(rv_nop(), ctx);
@@ -1874,3 +1818,8 @@ bool bpf_jit_supports_kfunc_call(void)
{
return true;
}
+
+bool bpf_jit_supports_ptr_xchg(void)
+{
+ return true;
+}
diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c
index 7b70ccb7fec3..6b3acac30c06 100644
--- a/arch/riscv/net/bpf_jit_core.c
+++ b/arch/riscv/net/bpf_jit_core.c
@@ -10,6 +10,7 @@
#include <linux/filter.h>
#include <linux/memory.h>
#include <asm/patch.h>
+#include <asm/cfi.h>
#include "bpf_jit.h"
/* Number of iterations to try until offsets converge. */
@@ -100,7 +101,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
pass++;
ctx->ninsns = 0;
- bpf_jit_build_prologue(ctx);
+ bpf_jit_build_prologue(ctx, bpf_is_subprog(prog));
ctx->prologue_len = ctx->ninsns;
if (build_body(ctx, extra_pass, ctx->offset)) {
@@ -160,7 +161,7 @@ skip_init_ctx:
ctx->ninsns = 0;
ctx->nexentries = 0;
- bpf_jit_build_prologue(ctx);
+ bpf_jit_build_prologue(ctx, bpf_is_subprog(prog));
if (build_body(ctx, extra_pass, NULL)) {
prog = orig_prog;
goto out_free_hdr;
@@ -170,9 +171,9 @@ skip_init_ctx:
if (bpf_jit_enable > 1)
bpf_jit_dump(prog->len, prog_size, pass, ctx->insns);
- prog->bpf_func = (void *)ctx->ro_insns;
+ prog->bpf_func = (void *)ctx->ro_insns + cfi_get_offset();
prog->jited = 1;
- prog->jited_len = prog_size;
+ prog->jited_len = prog_size - cfi_get_offset();
if (!prog->is_func || extra_pass) {
if (WARN_ON(bpf_jit_binary_pack_finalize(prog, jit_data->ro_header,
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index f3b4716317c1..a7ba8e178645 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -113,6 +113,7 @@ static int bpf_size_to_x86_bytes(int bpf_size)
/* Pick a register outside of BPF range for JIT internal work */
#define AUX_REG (MAX_BPF_JIT_REG + 1)
#define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
+#define X86_REG_R12 (MAX_BPF_JIT_REG + 3)
/*
* The following table maps BPF registers to x86-64 registers.
@@ -139,6 +140,7 @@ static const int reg2hex[] = {
[BPF_REG_AX] = 2, /* R10 temp register */
[AUX_REG] = 3, /* R11 temp register */
[X86_REG_R9] = 1, /* R9 register, 6th function argument */
+ [X86_REG_R12] = 4, /* R12 callee saved */
};
static const int reg2pt_regs[] = {
@@ -167,6 +169,7 @@ static bool is_ereg(u32 reg)
BIT(BPF_REG_8) |
BIT(BPF_REG_9) |
BIT(X86_REG_R9) |
+ BIT(X86_REG_R12) |
BIT(BPF_REG_AX));
}
@@ -205,6 +208,17 @@ static u8 add_2mod(u8 byte, u32 r1, u32 r2)
return byte;
}
+static u8 add_3mod(u8 byte, u32 r1, u32 r2, u32 index)
+{
+ if (is_ereg(r1))
+ byte |= 1;
+ if (is_ereg(index))
+ byte |= 2;
+ if (is_ereg(r2))
+ byte |= 4;
+ return byte;
+}
+
/* Encode 'dst_reg' register into x86-64 opcode 'byte' */
static u8 add_1reg(u8 byte, u32 dst_reg)
{
@@ -645,6 +659,8 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog,
pop_r12(&prog);
} else {
pop_callee_regs(&prog, callee_regs_used);
+ if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena))
+ pop_r12(&prog);
}
EMIT1(0x58); /* pop rax */
@@ -704,6 +720,8 @@ static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog,
pop_r12(&prog);
} else {
pop_callee_regs(&prog, callee_regs_used);
+ if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena))
+ pop_r12(&prog);
}
EMIT1(0x58); /* pop rax */
@@ -887,6 +905,18 @@ static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
*pprog = prog;
}
+static void emit_insn_suffix_SIB(u8 **pprog, u32 ptr_reg, u32 val_reg, u32 index_reg, int off)
+{
+ u8 *prog = *pprog;
+
+ if (is_imm8(off)) {
+ EMIT3(add_2reg(0x44, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off);
+ } else {
+ EMIT2_off32(add_2reg(0x84, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off);
+ }
+ *pprog = prog;
+}
+
/*
* Emit a REX byte if it will be necessary to address these registers
*/
@@ -968,6 +998,37 @@ static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
*pprog = prog;
}
+static void emit_ldx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
+{
+ u8 *prog = *pprog;
+
+ switch (size) {
+ case BPF_B:
+ /* movzx rax, byte ptr [rax + r12 + off] */
+ EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB6);
+ break;
+ case BPF_H:
+ /* movzx rax, word ptr [rax + r12 + off] */
+ EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB7);
+ break;
+ case BPF_W:
+ /* mov eax, dword ptr [rax + r12 + off] */
+ EMIT2(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x8B);
+ break;
+ case BPF_DW:
+ /* mov rax, qword ptr [rax + r12 + off] */
+ EMIT2(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x8B);
+ break;
+ }
+ emit_insn_suffix_SIB(&prog, src_reg, dst_reg, index_reg, off);
+ *pprog = prog;
+}
+
+static void emit_ldx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
+{
+ emit_ldx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off);
+}
+
/* STX: *(u8*)(dst_reg + off) = src_reg */
static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
{
@@ -1002,6 +1063,71 @@ static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
*pprog = prog;
}
+/* STX: *(u8*)(dst_reg + index_reg + off) = src_reg */
+static void emit_stx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
+{
+ u8 *prog = *pprog;
+
+ switch (size) {
+ case BPF_B:
+ /* mov byte ptr [rax + r12 + off], al */
+ EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x88);
+ break;
+ case BPF_H:
+ /* mov word ptr [rax + r12 + off], ax */
+ EMIT3(0x66, add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89);
+ break;
+ case BPF_W:
+ /* mov dword ptr [rax + r12 + 1], eax */
+ EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89);
+ break;
+ case BPF_DW:
+ /* mov qword ptr [rax + r12 + 1], rax */
+ EMIT2(add_3mod(0x48, dst_reg, src_reg, index_reg), 0x89);
+ break;
+ }
+ emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off);
+ *pprog = prog;
+}
+
+static void emit_stx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
+{
+ emit_stx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off);
+}
+
+/* ST: *(u8*)(dst_reg + index_reg + off) = imm32 */
+static void emit_st_index(u8 **pprog, u32 size, u32 dst_reg, u32 index_reg, int off, int imm)
+{
+ u8 *prog = *pprog;
+
+ switch (size) {
+ case BPF_B:
+ /* mov byte ptr [rax + r12 + off], imm8 */
+ EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC6);
+ break;
+ case BPF_H:
+ /* mov word ptr [rax + r12 + off], imm16 */
+ EMIT3(0x66, add_3mod(0x40, dst_reg, 0, index_reg), 0xC7);
+ break;
+ case BPF_W:
+ /* mov dword ptr [rax + r12 + 1], imm32 */
+ EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC7);
+ break;
+ case BPF_DW:
+ /* mov qword ptr [rax + r12 + 1], imm32 */
+ EMIT2(add_3mod(0x48, dst_reg, 0, index_reg), 0xC7);
+ break;
+ }
+ emit_insn_suffix_SIB(&prog, dst_reg, 0, index_reg, off);
+ EMIT(imm, bpf_size_to_x86_bytes(size));
+ *pprog = prog;
+}
+
+static void emit_st_r12(u8 **pprog, u32 size, u32 dst_reg, int off, int imm)
+{
+ emit_st_index(pprog, size, dst_reg, X86_REG_R12, off, imm);
+}
+
static int emit_atomic(u8 **pprog, u8 atomic_op,
u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
{
@@ -1043,12 +1169,15 @@ static int emit_atomic(u8 **pprog, u8 atomic_op,
return 0;
}
+#define DONT_CLEAR 1
+
bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
{
u32 reg = x->fixup >> 8;
/* jump over faulting load and clear dest register */
- *(unsigned long *)((void *)regs + reg) = 0;
+ if (reg != DONT_CLEAR)
+ *(unsigned long *)((void *)regs + reg) = 0;
regs->ip += x->fixup & 0xff;
return true;
}
@@ -1147,11 +1276,15 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
bool tail_call_seen = false;
bool seen_exit = false;
u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
+ u64 arena_vm_start, user_vm_start;
int i, excnt = 0;
int ilen, proglen = 0;
u8 *prog = temp;
int err;
+ arena_vm_start = bpf_arena_get_kern_vm_start(bpf_prog->aux->arena);
+ user_vm_start = bpf_arena_get_user_vm_start(bpf_prog->aux->arena);
+
detect_reg_usage(insn, insn_cnt, callee_regs_used,
&tail_call_seen);
@@ -1172,8 +1305,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
push_r12(&prog);
push_callee_regs(&prog, all_callee_regs_used);
} else {
+ if (arena_vm_start)
+ push_r12(&prog);
push_callee_regs(&prog, callee_regs_used);
}
+ if (arena_vm_start)
+ emit_mov_imm64(&prog, X86_REG_R12,
+ arena_vm_start >> 32, (u32) arena_vm_start);
ilen = prog - temp;
if (rw_image)
@@ -1213,6 +1351,40 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
break;
case BPF_ALU64 | BPF_MOV | BPF_X:
+ if (insn->off == BPF_ADDR_SPACE_CAST &&
+ insn->imm == 1U << 16) {
+ if (dst_reg != src_reg)
+ /* 32-bit mov */
+ emit_mov_reg(&prog, false, dst_reg, src_reg);
+ /* shl dst_reg, 32 */
+ maybe_emit_1mod(&prog, dst_reg, true);
+ EMIT3(0xC1, add_1reg(0xE0, dst_reg), 32);
+
+ /* or dst_reg, user_vm_start */
+ maybe_emit_1mod(&prog, dst_reg, true);
+ if (is_axreg(dst_reg))
+ EMIT1_off32(0x0D, user_vm_start >> 32);
+ else
+ EMIT2_off32(0x81, add_1reg(0xC8, dst_reg), user_vm_start >> 32);
+
+ /* rol dst_reg, 32 */
+ maybe_emit_1mod(&prog, dst_reg, true);
+ EMIT3(0xC1, add_1reg(0xC0, dst_reg), 32);
+
+ /* xor r11, r11 */
+ EMIT3(0x4D, 0x31, 0xDB);
+
+ /* test dst_reg32, dst_reg32; check if lower 32-bit are zero */
+ maybe_emit_mod(&prog, dst_reg, dst_reg, false);
+ EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
+
+ /* cmove r11, dst_reg; if so, set dst_reg to zero */
+ /* WARNING: Intel swapped src/dst register encoding in CMOVcc !!! */
+ maybe_emit_mod(&prog, AUX_REG, dst_reg, true);
+ EMIT3(0x0F, 0x44, add_2reg(0xC0, AUX_REG, dst_reg));
+ break;
+ }
+ fallthrough;
case BPF_ALU | BPF_MOV | BPF_X:
if (insn->off == 0)
emit_mov_reg(&prog,
@@ -1564,6 +1736,56 @@ st: if (is_imm8(insn->off))
emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
break;
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
+ start_of_ldx = prog;
+ emit_st_r12(&prog, BPF_SIZE(insn->code), dst_reg, insn->off, insn->imm);
+ goto populate_extable;
+
+ /* LDX: dst_reg = *(u8*)(src_reg + r12 + off) */
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
+ start_of_ldx = prog;
+ if (BPF_CLASS(insn->code) == BPF_LDX)
+ emit_ldx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
+ else
+ emit_stx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
+populate_extable:
+ {
+ struct exception_table_entry *ex;
+ u8 *_insn = image + proglen + (start_of_ldx - temp);
+ s64 delta;
+
+ if (!bpf_prog->aux->extable)
+ break;
+
+ if (excnt >= bpf_prog->aux->num_exentries) {
+ pr_err("mem32 extable bug\n");
+ return -EFAULT;
+ }
+ ex = &bpf_prog->aux->extable[excnt++];
+
+ delta = _insn - (u8 *)&ex->insn;
+ /* switch ex to rw buffer for writes */
+ ex = (void *)rw_image + ((void *)ex - (void *)image);
+
+ ex->insn = delta;
+
+ ex->data = EX_TYPE_BPF;
+
+ ex->fixup = (prog - start_of_ldx) |
+ ((BPF_CLASS(insn->code) == BPF_LDX ? reg2pt_regs[dst_reg] : DONT_CLEAR) << 8);
+ }
+ break;
+
/* LDX: dst_reg = *(u8*)(src_reg + off) */
case BPF_LDX | BPF_MEM | BPF_B:
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
@@ -2036,6 +2258,8 @@ emit_jmp:
pop_r12(&prog);
} else {
pop_callee_regs(&prog, callee_regs_used);
+ if (arena_vm_start)
+ pop_r12(&prog);
}
EMIT1(0xC9); /* leave */
emit_return(&prog, image + addrs[i - 1] + (prog - temp));
@@ -3242,3 +3466,13 @@ void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
BUG_ON(ret < 0);
}
}
+
+bool bpf_jit_supports_arena(void)
+{
+ return true;
+}
+
+bool bpf_jit_supports_ptr_xchg(void)
+{
+ return true;
+}
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 50d8ce20ae5b..9fb1575f8d88 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2550,14 +2550,12 @@ static int fore200e_sba_probe(struct platform_device *op)
return 0;
}
-static int fore200e_sba_remove(struct platform_device *op)
+static void fore200e_sba_remove(struct platform_device *op)
{
struct fore200e *fore200e = dev_get_drvdata(&op->dev);
fore200e_shutdown(fore200e);
kfree(fore200e);
-
- return 0;
}
static const struct of_device_id fore200e_sba_match[] = {
@@ -2574,7 +2572,7 @@ static struct platform_driver fore200e_sba_driver = {
.of_match_table = fore200e_sba_match,
},
.probe = fore200e_sba_probe,
- .remove = fore200e_sba_remove,
+ .remove_new = fore200e_sba_remove,
};
#endif
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 7061d3ee836a..6b5d34919c72 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -68,7 +68,7 @@ static struct attribute *bcma_device_attrs[] = {
};
ATTRIBUTE_GROUPS(bcma_device);
-static struct bus_type bcma_bus_type = {
+static const struct bus_type bcma_bus_type = {
.name = "bcma",
.match = bcma_bus_match,
.probe = bcma_device_probe,
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 0a5445ac5e1b..f9a7c790d7e2 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -11,6 +11,7 @@
#include <linux/firmware.h>
#include <linux/dmi.h>
#include <linux/of.h>
+#include <linux/string.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -543,8 +544,6 @@ static const char *btbcm_get_board_name(struct device *dev)
struct device_node *root;
char *board_type;
const char *tmp;
- int len;
- int i;
root = of_find_node_by_path("/");
if (!root)
@@ -554,13 +553,8 @@ static const char *btbcm_get_board_name(struct device *dev)
return NULL;
/* get rid of any '/' in the compatible string */
- len = strlen(tmp) + 1;
- board_type = devm_kzalloc(dev, len, GFP_KERNEL);
- strscpy(board_type, tmp, len);
- for (i = 0; i < len; i++) {
- if (board_type[i] == '/')
- board_type[i] = '-';
- }
+ board_type = devm_kstrdup(dev, tmp, GFP_KERNEL);
+ strreplace(board_type, '/', '-');
of_node_put(root);
return board_type;
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index cdc5c08824a0..6ba7f5d1b837 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -441,7 +441,7 @@ int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver)
return PTR_ERR(skb);
}
- if (skb->len != sizeof(*ver)) {
+ if (!skb || skb->len != sizeof(*ver)) {
bt_dev_err(hdev, "Intel version event size mismatch");
kfree_skb(skb);
return -EILSEQ;
@@ -2670,6 +2670,119 @@ static void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
}
}
+static void btintel_print_fseq_info(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ u8 *p;
+ u32 val;
+ const char *str;
+
+ skb = __hci_cmd_sync(hdev, 0xfcb3, 0, NULL, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_dbg(hdev, "Reading fseq status command failed (%ld)",
+ PTR_ERR(skb));
+ return;
+ }
+
+ if (skb->len < (sizeof(u32) * 16 + 2)) {
+ bt_dev_dbg(hdev, "Malformed packet of length %u received",
+ skb->len);
+ kfree_skb(skb);
+ return;
+ }
+
+ p = skb_pull_data(skb, 1);
+ if (*p) {
+ bt_dev_dbg(hdev, "Failed to get fseq status (0x%2.2x)", *p);
+ kfree_skb(skb);
+ return;
+ }
+
+ p = skb_pull_data(skb, 1);
+ switch (*p) {
+ case 0:
+ str = "Success";
+ break;
+ case 1:
+ str = "Fatal error";
+ break;
+ case 2:
+ str = "Semaphore acquire error";
+ break;
+ default:
+ str = "Unknown error";
+ break;
+ }
+
+ if (*p) {
+ bt_dev_err(hdev, "Fseq status: %s (0x%2.2x)", str, *p);
+ kfree_skb(skb);
+ return;
+ }
+
+ bt_dev_info(hdev, "Fseq status: %s (0x%2.2x)", str, *p);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Reason: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Global version: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Installed version: 0x%8.8x", val);
+
+ p = skb->data;
+ skb_pull_data(skb, 4);
+ bt_dev_info(hdev, "Fseq executed: %2.2u.%2.2u.%2.2u.%2.2u", p[0], p[1],
+ p[2], p[3]);
+
+ p = skb->data;
+ skb_pull_data(skb, 4);
+ bt_dev_info(hdev, "Fseq BT Top: %2.2u.%2.2u.%2.2u.%2.2u", p[0], p[1],
+ p[2], p[3]);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq Top init version: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq Cnvio init version: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq MBX Wifi file version: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq BT version: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq Top reset address: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq MBX timeout: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq MBX ack: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq CNVi id: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq CNVr id: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq Error handle: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq Magic noalive indication: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq OTP version: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq MBX otp version: 0x%8.8x", val);
+
+ kfree_skb(skb);
+}
+
static int btintel_setup_combined(struct hci_dev *hdev)
{
const u8 param[1] = { 0xFF };
@@ -2902,6 +3015,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)
err = btintel_bootloader_setup_tlv(hdev, &ver_tlv);
btintel_register_devcoredump_support(hdev);
+ btintel_print_fseq_info(hdev);
break;
default:
bt_dev_err(hdev, "Unsupported Intel hw variant (%u)",
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index aaabb732082c..ac8ebccd3507 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -372,8 +372,10 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
struct btmediatek_data *data = hci_get_priv(hdev);
int err;
- if (!IS_ENABLED(CONFIG_DEV_COREDUMP))
+ if (!IS_ENABLED(CONFIG_DEV_COREDUMP)) {
+ kfree_skb(skb);
return 0;
+ }
switch (data->cd_info.state) {
case HCI_DEVCOREDUMP_IDLE:
@@ -420,5 +422,6 @@ MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_MT7622);
MODULE_FIRMWARE(FIRMWARE_MT7663);
MODULE_FIRMWARE(FIRMWARE_MT7668);
+MODULE_FIRMWARE(FIRMWARE_MT7922);
MODULE_FIRMWARE(FIRMWARE_MT7961);
MODULE_FIRMWARE(FIRMWARE_MT7925);
diff --git a/drivers/bluetooth/btmtk.h b/drivers/bluetooth/btmtk.h
index 56f5502baadf..cbcdb99a22e6 100644
--- a/drivers/bluetooth/btmtk.h
+++ b/drivers/bluetooth/btmtk.h
@@ -4,6 +4,7 @@
#define FIRMWARE_MT7622 "mediatek/mt7622pr2h.bin"
#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
+#define FIRMWARE_MT7922 "mediatek/BT_RAM_CODE_MT7922_1_1_hdr.bin"
#define FIRMWARE_MT7961 "mediatek/BT_RAM_CODE_MT7961_1_2_hdr.bin"
#define FIRMWARE_MT7925 "mediatek/mt7925/BT_RAM_CODE_MT7925_1_1_hdr.bin"
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 1d592ac413d1..0b93c2ff29e4 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -126,6 +126,7 @@ struct ps_data {
struct hci_dev *hdev;
struct work_struct work;
struct timer_list ps_timer;
+ struct mutex ps_lock;
};
struct wakeup_cmd_payload {
@@ -317,6 +318,9 @@ static void ps_start_timer(struct btnxpuart_dev *nxpdev)
if (psdata->cur_psmode == PS_MODE_ENABLE)
mod_timer(&psdata->ps_timer, jiffies + msecs_to_jiffies(psdata->h2c_ps_interval));
+
+ if (psdata->ps_state == PS_STATE_AWAKE && psdata->ps_cmd == PS_CMD_ENTER_PS)
+ cancel_work_sync(&psdata->work);
}
static void ps_cancel_timer(struct btnxpuart_dev *nxpdev)
@@ -337,6 +341,7 @@ static void ps_control(struct hci_dev *hdev, u8 ps_state)
!test_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state))
return;
+ mutex_lock(&psdata->ps_lock);
switch (psdata->cur_h2c_wakeupmode) {
case WAKEUP_METHOD_DTR:
if (ps_state == PS_STATE_AWAKE)
@@ -350,12 +355,15 @@ static void ps_control(struct hci_dev *hdev, u8 ps_state)
status = serdev_device_break_ctl(nxpdev->serdev, 0);
else
status = serdev_device_break_ctl(nxpdev->serdev, -1);
+ msleep(20); /* Allow chip to detect UART-break and enter sleep */
bt_dev_dbg(hdev, "Set UART break: %s, status=%d",
str_on_off(ps_state == PS_STATE_SLEEP), status);
break;
}
if (!status)
psdata->ps_state = ps_state;
+ mutex_unlock(&psdata->ps_lock);
+
if (ps_state == PS_STATE_AWAKE)
btnxpuart_tx_wakeup(nxpdev);
}
@@ -391,17 +399,25 @@ static void ps_setup(struct hci_dev *hdev)
psdata->hdev = hdev;
INIT_WORK(&psdata->work, ps_work_func);
+ mutex_init(&psdata->ps_lock);
timer_setup(&psdata->ps_timer, ps_timeout_func, 0);
}
-static void ps_wakeup(struct btnxpuart_dev *nxpdev)
+static bool ps_wakeup(struct btnxpuart_dev *nxpdev)
{
struct ps_data *psdata = &nxpdev->psdata;
+ u8 ps_state;
+
+ mutex_lock(&psdata->ps_lock);
+ ps_state = psdata->ps_state;
+ mutex_unlock(&psdata->ps_lock);
- if (psdata->ps_state != PS_STATE_AWAKE) {
+ if (ps_state != PS_STATE_AWAKE) {
psdata->ps_cmd = PS_CMD_EXIT_PS;
schedule_work(&psdata->work);
+ return true;
}
+ return false;
}
static int send_ps_cmd(struct hci_dev *hdev, void *data)
@@ -1171,7 +1187,6 @@ static struct sk_buff *nxp_dequeue(void *data)
{
struct btnxpuart_dev *nxpdev = (struct btnxpuart_dev *)data;
- ps_wakeup(nxpdev);
ps_start_timer(nxpdev);
return skb_dequeue(&nxpdev->txq);
}
@@ -1186,6 +1201,9 @@ static void btnxpuart_tx_work(struct work_struct *work)
struct sk_buff *skb;
int len;
+ if (ps_wakeup(nxpdev))
+ return;
+
while ((skb = nxp_dequeue(nxpdev))) {
len = serdev_device_write_buf(serdev, skb->data, skb->len);
hdev->stat.byte_tx += len;
@@ -1234,6 +1252,9 @@ static int btnxpuart_close(struct hci_dev *hdev)
ps_wakeup(nxpdev);
serdev_device_close(nxpdev->serdev);
+ skb_queue_purge(&nxpdev->txq);
+ kfree_skb(nxpdev->rx_skb);
+ nxpdev->rx_skb = NULL;
clear_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state);
return 0;
}
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 277d039ecbb4..cc50de69e8dc 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -69,6 +69,7 @@ enum btrtl_chip_id {
CHIP_ID_8852B = 20,
CHIP_ID_8852C = 25,
CHIP_ID_8851B = 36,
+ CHIP_ID_8852BT = 47,
};
struct id_table {
@@ -307,6 +308,15 @@ static const struct id_table ic_id_table[] = {
.fw_name = "rtl_bt/rtl8851bu_fw",
.cfg_name = "rtl_bt/rtl8851bu_config",
.hw_info = "rtl8851bu" },
+
+ /* 8852BT/8852BE-VT */
+ { IC_INFO(RTL_ROM_LMP_8852A, 0x87, 0xc, HCI_USB),
+ .config_needed = false,
+ .has_rom_version = true,
+ .has_msft_ext = true,
+ .fw_name = "rtl_bt/rtl8852btu_fw",
+ .cfg_name = "rtl_bt/rtl8852btu_config",
+ .hw_info = "rtl8852btu" },
};
static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev,
@@ -645,6 +655,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
{ RTL_ROM_LMP_8852A, 20 }, /* 8852B */
{ RTL_ROM_LMP_8852A, 25 }, /* 8852C */
{ RTL_ROM_LMP_8851B, 36 }, /* 8851B */
+ { RTL_ROM_LMP_8852A, 47 }, /* 8852BT */
};
if (btrtl_dev->fw_len <= 8)
@@ -1275,6 +1286,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
case CHIP_ID_8852B:
case CHIP_ID_8852C:
case CHIP_ID_8851B:
+ case CHIP_ID_8852BT:
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
@@ -1505,6 +1517,8 @@ MODULE_FIRMWARE("rtl_bt/rtl8852bs_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852bs_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852bu_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852bu_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8852btu_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8852btu_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw_v2.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852cu_config.bin");
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index d31edad7a056..06e915b57283 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -553,6 +553,9 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x13d3, 0x3572), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ /* Realtek 8852BT/8852BE-VT Bluetooth devices */
+ { USB_DEVICE(0x0bda, 0x8520), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
.driver_info = BTUSB_REALTEK },
@@ -655,6 +658,11 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ /* Additional MediaTek MT7925 Bluetooth devices */
+ { USB_DEVICE(0x13d3, 0x3602), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3394), .driver_info = BTUSB_REALTEK },
@@ -3080,7 +3088,7 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
int err, status;
u32 dev_id = 0;
char fw_bin_name[64];
- u32 fw_version = 0;
+ u32 fw_version = 0, fw_flavor = 0;
u8 param;
struct btmediatek_data *mediatek;
@@ -3103,6 +3111,11 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
bt_dev_err(hdev, "Failed to get fw version (%d)", err);
return err;
}
+ err = btusb_mtk_id_get(data, 0x70010020, &fw_flavor);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to get fw flavor (%d)", err);
+ return err;
+ }
}
mediatek = hci_get_priv(hdev);
@@ -3127,6 +3140,10 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
snprintf(fw_bin_name, sizeof(fw_bin_name),
"mediatek/mt%04x/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
dev_id & 0xffff, dev_id & 0xffff, (fw_version & 0xff) + 1);
+ else if (dev_id == 0x7961 && fw_flavor)
+ snprintf(fw_bin_name, sizeof(fw_bin_name),
+ "mediatek/BT_RAM_CODE_MT%04x_1a_%x_hdr.bin",
+ dev_id & 0xffff, (fw_version & 0xff) + 1);
else
snprintf(fw_bin_name, sizeof(fw_bin_name),
"mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
@@ -3273,7 +3290,6 @@ static int btusb_recv_acl_mtk(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btusb_data *data = hci_get_drvdata(hdev);
u16 handle = le16_to_cpu(hci_acl_hdr(skb)->handle);
- struct sk_buff *skb_cd;
switch (handle) {
case 0xfc6f: /* Firmware dump from device */
@@ -3286,9 +3302,12 @@ static int btusb_recv_acl_mtk(struct hci_dev *hdev, struct sk_buff *skb)
* for backward compatibility, so we have to clone the packet
* extraly for the in-kernel coredump support.
*/
- skb_cd = skb_clone(skb, GFP_ATOMIC);
- if (skb_cd)
- btmtk_process_coredump(hdev, skb_cd);
+ if (IS_ENABLED(CONFIG_DEV_COREDUMP)) {
+ struct sk_buff *skb_cd = skb_clone(skb, GFP_ATOMIC);
+
+ if (skb_cd)
+ btmtk_process_coredump(hdev, skb_cd);
+ }
fallthrough;
case 0x05ff: /* Firmware debug logging 1 */
@@ -4481,6 +4500,7 @@ static int btusb_probe(struct usb_interface *intf,
set_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks);
set_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks);
set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
+ set_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks);
}
if (!reset)
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 71e748a9477e..c0436881a533 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -113,6 +113,7 @@ struct h5_vnd {
int (*suspend)(struct h5 *h5);
int (*resume)(struct h5 *h5);
const struct acpi_gpio_mapping *acpi_gpio_map;
+ int sizeof_priv;
};
struct h5_device_data {
@@ -863,7 +864,8 @@ static int h5_serdev_probe(struct serdev_device *serdev)
if (IS_ERR(h5->device_wake_gpio))
return PTR_ERR(h5->device_wake_gpio);
- return hci_uart_register_device(&h5->serdev_hu, &h5p);
+ return hci_uart_register_device_priv(&h5->serdev_hu, &h5p,
+ h5->vnd->sizeof_priv);
}
static void h5_serdev_remove(struct serdev_device *serdev)
@@ -1070,6 +1072,7 @@ static struct h5_vnd rtl_vnd = {
.suspend = h5_btrtl_suspend,
.resume = h5_btrtl_resume,
.acpi_gpio_map = acpi_btrtl_gpios,
+ .sizeof_priv = sizeof(struct btrealtek_data),
};
static const struct h5_device_data h5_data_rtl8822cs = {
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index edd2a81b4d5e..8a60ad7acd70 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -2326,7 +2326,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR_OR_NULL(qcadev->bt_en) &&
+ if (IS_ERR(qcadev->bt_en) &&
(data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855)) {
dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
@@ -2335,7 +2335,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
GPIOD_IN);
- if (IS_ERR_OR_NULL(qcadev->sw_ctrl) &&
+ if (IS_ERR(qcadev->sw_ctrl) &&
(data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855 ||
data->soc_type == QCA_WCN7850))
@@ -2357,7 +2357,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
default:
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR_OR_NULL(qcadev->bt_en)) {
+ if (IS_ERR(qcadev->bt_en)) {
dev_warn(&serdev->dev, "failed to acquire enable gpio\n");
power_ctrl_enabled = false;
}
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 39c8b567da3c..214fff876eae 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -300,8 +300,9 @@ static const struct serdev_device_ops hci_serdev_client_ops = {
.write_wakeup = hci_uart_write_wakeup,
};
-int hci_uart_register_device(struct hci_uart *hu,
- const struct hci_uart_proto *p)
+int hci_uart_register_device_priv(struct hci_uart *hu,
+ const struct hci_uart_proto *p,
+ int sizeof_priv)
{
int err;
struct hci_dev *hdev;
@@ -325,7 +326,7 @@ int hci_uart_register_device(struct hci_uart *hu,
set_bit(HCI_UART_PROTO_READY, &hu->flags);
/* Initialize and register HCI device */
- hdev = hci_alloc_dev();
+ hdev = hci_alloc_dev_priv(sizeof_priv);
if (!hdev) {
BT_ERR("Can't allocate HCI device");
err = -ENOMEM;
@@ -394,7 +395,7 @@ err_rwsem:
percpu_free_rwsem(&hu->proto_lock);
return err;
}
-EXPORT_SYMBOL_GPL(hci_uart_register_device);
+EXPORT_SYMBOL_GPL(hci_uart_register_device_priv);
void hci_uart_unregister_device(struct hci_uart *hu)
{
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index fb4a2d0d8cc8..68c8c7e95d64 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -97,7 +97,17 @@ struct hci_uart {
int hci_uart_register_proto(const struct hci_uart_proto *p);
int hci_uart_unregister_proto(const struct hci_uart_proto *p);
-int hci_uart_register_device(struct hci_uart *hu, const struct hci_uart_proto *p);
+
+int hci_uart_register_device_priv(struct hci_uart *hu,
+ const struct hci_uart_proto *p,
+ int sizeof_priv);
+
+static inline int hci_uart_register_device(struct hci_uart *hu,
+ const struct hci_uart_proto *p)
+{
+ return hci_uart_register_device_priv(hu, p, 0);
+}
+
void hci_uart_unregister_device(struct hci_uart *hu);
int hci_uart_tx_wakeup(struct hci_uart *hu);
diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
index 7f686d179fc9..64eaca80d736 100644
--- a/drivers/dpll/dpll_core.c
+++ b/drivers/dpll/dpll_core.c
@@ -29,6 +29,8 @@ static u32 dpll_pin_xa_id;
WARN_ON_ONCE(!xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
#define ASSERT_DPLL_NOT_REGISTERED(d) \
WARN_ON_ONCE(xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
+#define ASSERT_DPLL_PIN_REGISTERED(p) \
+ WARN_ON_ONCE(!xa_get_mark(&dpll_pin_xa, (p)->id, DPLL_REGISTERED))
struct dpll_device_registration {
struct list_head list;
@@ -129,9 +131,9 @@ static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
reg = dpll_pin_registration_find(ref, ops, priv);
if (WARN_ON(!reg))
return -EINVAL;
+ list_del(&reg->list);
+ kfree(reg);
if (refcount_dec_and_test(&ref->refcount)) {
- list_del(&reg->list);
- kfree(reg);
xa_erase(xa_pins, i);
WARN_ON(!list_empty(&ref->registration_list));
kfree(ref);
@@ -209,9 +211,9 @@ dpll_xa_ref_dpll_del(struct xarray *xa_dplls, struct dpll_device *dpll,
reg = dpll_pin_registration_find(ref, ops, priv);
if (WARN_ON(!reg))
return;
+ list_del(&reg->list);
+ kfree(reg);
if (refcount_dec_and_test(&ref->refcount)) {
- list_del(&reg->list);
- kfree(reg);
xa_erase(xa_dplls, i);
WARN_ON(!list_empty(&ref->registration_list));
kfree(ref);
@@ -580,9 +582,9 @@ void dpll_pin_put(struct dpll_pin *pin)
{
mutex_lock(&dpll_lock);
if (refcount_dec_and_test(&pin->refcount)) {
+ xa_erase(&dpll_pin_xa, pin->id);
xa_destroy(&pin->dpll_refs);
xa_destroy(&pin->parent_refs);
- xa_erase(&dpll_pin_xa, pin->id);
dpll_pin_prop_free(&pin->prop);
kfree_rcu(pin, rcu);
}
@@ -651,6 +653,7 @@ static void
__dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
const struct dpll_pin_ops *ops, void *priv)
{
+ ASSERT_DPLL_PIN_REGISTERED(pin);
dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv);
dpll_xa_ref_dpll_del(&pin->dpll_refs, dpll, ops, priv);
if (xa_empty(&pin->dpll_refs))
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
index b57355e0c214..98e6ad8528d3 100644
--- a/drivers/dpll/dpll_netlink.c
+++ b/drivers/dpll/dpll_netlink.c
@@ -131,14 +131,21 @@ dpll_msg_add_lock_status(struct sk_buff *msg, struct dpll_device *dpll,
struct netlink_ext_ack *extack)
{
const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ enum dpll_lock_status_error status_error = 0;
enum dpll_lock_status status;
int ret;
- ret = ops->lock_status_get(dpll, dpll_priv(dpll), &status, extack);
+ ret = ops->lock_status_get(dpll, dpll_priv(dpll), &status,
+ &status_error, extack);
if (ret)
return ret;
if (nla_put_u32(msg, DPLL_A_LOCK_STATUS, status))
return -EMSGSIZE;
+ if (status_error &&
+ (status == DPLL_LOCK_STATUS_UNLOCKED ||
+ status == DPLL_LOCK_STATUS_HOLDOVER) &&
+ nla_put_u32(msg, DPLL_A_LOCK_STATUS_ERROR, status_error))
+ return -EMSGSIZE;
return 0;
}
diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c
index 470ae2c29c94..e630caf644e8 100644
--- a/drivers/hid/bpf/hid_bpf_dispatch.c
+++ b/drivers/hid/bpf/hid_bpf_dispatch.c
@@ -176,9 +176,9 @@ __bpf_kfunc_end_defs();
* The following set contains all functions we agree BPF programs
* can use.
*/
-BTF_SET8_START(hid_bpf_kfunc_ids)
+BTF_KFUNCS_START(hid_bpf_kfunc_ids)
BTF_ID_FLAGS(func, hid_bpf_get_data, KF_RET_NULL)
-BTF_SET8_END(hid_bpf_kfunc_ids)
+BTF_KFUNCS_END(hid_bpf_kfunc_ids)
static const struct btf_kfunc_id_set hid_bpf_kfunc_set = {
.owner = THIS_MODULE,
@@ -487,12 +487,12 @@ static const struct btf_kfunc_id_set hid_bpf_fmodret_set = {
};
/* for syscall HID-BPF */
-BTF_SET8_START(hid_bpf_syscall_kfunc_ids)
+BTF_KFUNCS_START(hid_bpf_syscall_kfunc_ids)
BTF_ID_FLAGS(func, hid_bpf_attach_prog)
BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE)
BTF_ID_FLAGS(func, hid_bpf_hw_request)
-BTF_SET8_END(hid_bpf_syscall_kfunc_ids)
+BTF_KFUNCS_END(hid_bpf_syscall_kfunc_ids)
static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 7a5be705d718..6f2a688fccbf 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1272,10 +1272,10 @@ static int ipoib_get_iflink(const struct net_device *dev)
/* parent interface */
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
- return dev->ifindex;
+ return READ_ONCE(dev->ifindex);
/* child/vlan interface */
- return priv->parent->ifindex;
+ return READ_ONCE(priv->parent->ifindex);
}
static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 6e80d7bd3c4d..3ed257334562 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -49,7 +49,9 @@ MODULE_LICENSE("GPL");
/* -------- driver information -------------------------------------- */
static DEFINE_MUTEX(capi_mutex);
-static struct class *capi_class;
+static const struct class capi_class = {
+ .name = "capi",
+};
static int capi_major = 68; /* allocated */
module_param_named(major, capi_major, uint, 0);
@@ -1393,18 +1395,19 @@ static int __init capi_init(void)
kcapi_exit();
return major_ret;
}
- capi_class = class_create("capi");
- if (IS_ERR(capi_class)) {
+
+ ret = class_register(&capi_class);
+ if (ret) {
unregister_chrdev(capi_major, "capi20");
kcapi_exit();
- return PTR_ERR(capi_class);
+ return ret;
}
- device_create(capi_class, NULL, MKDEV(capi_major, 0), NULL, "capi20");
+ device_create(&capi_class, NULL, MKDEV(capi_major, 0), NULL, "capi20");
if (capinc_tty_init() < 0) {
- device_destroy(capi_class, MKDEV(capi_major, 0));
- class_destroy(capi_class);
+ device_destroy(&capi_class, MKDEV(capi_major, 0));
+ class_unregister(&capi_class);
unregister_chrdev(capi_major, "capi20");
kcapi_exit();
return -ENOMEM;
@@ -1427,8 +1430,8 @@ static void __exit capi_exit(void)
{
proc_exit();
- device_destroy(capi_class, MKDEV(capi_major, 0));
- class_destroy(capi_class);
+ device_destroy(&capi_class, MKDEV(capi_major, 0));
+ class_unregister(&capi_class);
unregister_chrdev(capi_major, "capi20");
capinc_tty_exit();
diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c
index 09b72f14d4b7..b4ed0bb8ddfb 100644
--- a/drivers/isdn/mISDN/dsp_pipeline.c
+++ b/drivers/isdn/mISDN/dsp_pipeline.c
@@ -31,7 +31,9 @@ struct dsp_element_entry {
static LIST_HEAD(dsp_elements);
/* sysfs */
-static struct class *elements_class;
+static const struct class elements_class = {
+ .name = "dsp_pipeline",
+};
static ssize_t
attr_show_args(struct device *dev, struct device_attribute *attr, char *buf)
@@ -80,7 +82,7 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem)
INIT_LIST_HEAD(&entry->list);
entry->elem = elem;
- entry->dev.class = elements_class;
+ entry->dev.class = &elements_class;
entry->dev.release = mISDN_dsp_dev_release;
dev_set_drvdata(&entry->dev, elem);
dev_set_name(&entry->dev, "%s", elem->name);
@@ -131,9 +133,11 @@ EXPORT_SYMBOL(mISDN_dsp_element_unregister);
int dsp_pipeline_module_init(void)
{
- elements_class = class_create("dsp_pipeline");
- if (IS_ERR(elements_class))
- return PTR_ERR(elements_class);
+ int err;
+
+ err = class_register(&elements_class);
+ if (err)
+ return err;
dsp_hwec_init();
@@ -146,7 +150,7 @@ void dsp_pipeline_module_exit(void)
dsp_hwec_exit();
- class_destroy(elements_class);
+ class_unregister(&elements_class);
list_for_each_entry_safe(entry, n, &dsp_elements, list) {
list_del(&entry->list);
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
index 52d82cbe7685..2f7564f26445 100644
--- a/drivers/media/rc/bpf-lirc.c
+++ b/drivers/media/rc/bpf-lirc.c
@@ -110,7 +110,7 @@ lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_get_prandom_u32:
return &bpf_get_prandom_u32_proto;
case BPF_FUNC_trace_printk:
- if (perfmon_capable())
+ if (bpf_token_capable(prog->aux->token, CAP_PERFMON))
return bpf_get_trace_printk_proto();
fallthrough;
default:
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index 68e79b1272f6..6d15ab3bfbbc 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -3063,15 +3063,10 @@ static int amt_dev_init(struct net_device *dev)
int err;
amt->dev = dev;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
err = gro_cells_init(&amt->gro_cells, dev);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
return 0;
}
@@ -3081,7 +3076,6 @@ static void amt_dev_uninit(struct net_device *dev)
struct amt_dev *amt = netdev_priv(dev);
gro_cells_destroy(&amt->gro_cells);
- free_percpu(dev->tstats);
}
static const struct net_device_ops amt_netdev_ops = {
@@ -3090,7 +3084,6 @@ static const struct net_device_ops amt_netdev_ops = {
.ndo_open = amt_dev_open,
.ndo_stop = amt_dev_stop,
.ndo_start_xmit = amt_dev_xmit,
- .ndo_get_stats64 = dev_get_tstats64,
};
static void amt_link_setup(struct net_device *dev)
@@ -3111,6 +3104,7 @@ static void amt_link_setup(struct net_device *dev)
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->hw_features |= NETIF_F_FRAGLIST | NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
eth_hw_addr_random(dev);
eth_zero_addr(dev->broadcast);
ether_setup(dev);
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index d9e052c49ba1..166bfc3c8e6c 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -108,6 +108,7 @@ static int go_tx(struct net_device *dev);
static int debug = ARCNET_DEBUG;
module_param(debug, int, 0);
+MODULE_DESCRIPTION("ARCnet core driver");
MODULE_LICENSE("GPL");
static int __init arcnet_init(void)
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 31377bb1cc97..339db6e4a1d5 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -194,15 +194,10 @@ static int bareudp_init(struct net_device *dev)
struct bareudp_dev *bareudp = netdev_priv(dev);
int err;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
err = gro_cells_init(&bareudp->gro_cells, dev);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
+
return 0;
}
@@ -211,7 +206,6 @@ static void bareudp_uninit(struct net_device *dev)
struct bareudp_dev *bareudp = netdev_priv(dev);
gro_cells_destroy(&bareudp->gro_cells);
- free_percpu(dev->tstats);
}
static struct socket *bareudp_create_sock(struct net *net, __be16 port)
@@ -529,7 +523,6 @@ static const struct net_device_ops bareudp_netdev_ops = {
.ndo_open = bareudp_open,
.ndo_stop = bareudp_stop,
.ndo_start_xmit = bareudp_xmit,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_fill_metadata_dst = bareudp_fill_metadata_dst,
};
@@ -567,6 +560,7 @@ static void bareudp_setup(struct net_device *dev)
netif_keep_dst(dev);
dev->priv_flags |= IFF_NO_QUEUE;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
}
static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -760,23 +754,18 @@ static void bareudp_destroy_tunnels(struct net *net, struct list_head *head)
unregister_netdevice_queue(bareudp->dev, head);
}
-static void __net_exit bareudp_exit_batch_net(struct list_head *net_list)
+static void __net_exit bareudp_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_kill_list)
{
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
list_for_each_entry(net, net_list, exit_list)
- bareudp_destroy_tunnels(net, &list);
-
- /* unregister the devices gathered above */
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ bareudp_destroy_tunnels(net, dev_kill_list);
}
static struct pernet_operations bareudp_net_ops = {
.init = bareudp_init_net,
- .exit_batch = bareudp_exit_batch_net,
+ .exit_batch_rtnl = bareudp_exit_batch_rtnl,
.id = &bareudp_net_id,
.size = sizeof(struct bareudp_net),
};
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index c99ffe6c683a..c6807e473ab7 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -82,10 +82,6 @@ enum ad_link_speed_type {
#define MAC_ADDRESS_EQUAL(A, B) \
ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
-static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
- 0, 0, 0, 0, 0, 0
-};
-
static const u16 ad_ticks_per_sec = 1000 / AD_TIMER_INTERVAL;
static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
@@ -106,6 +102,9 @@ static void ad_agg_selection_logic(struct aggregator *aggregator,
static void ad_clear_agg(struct aggregator *aggregator);
static void ad_initialize_agg(struct aggregator *aggregator);
static void ad_initialize_port(struct port *port, int lacp_fast);
+static void ad_enable_collecting(struct port *port);
+static void ad_disable_distributing(struct port *port,
+ bool *update_slave_arr);
static void ad_enable_collecting_distributing(struct port *port,
bool *update_slave_arr);
static void ad_disable_collecting_distributing(struct port *port,
@@ -172,8 +171,37 @@ static inline int __agg_has_partner(struct aggregator *agg)
}
/**
+ * __disable_distributing_port - disable the port's slave for distributing.
+ * Port will still be able to collect.
+ * @port: the port we're looking at
+ *
+ * This will disable only distributing on the port's slave.
+ */
+static void __disable_distributing_port(struct port *port)
+{
+ bond_set_slave_tx_disabled_flags(port->slave, BOND_SLAVE_NOTIFY_LATER);
+}
+
+/**
+ * __enable_collecting_port - enable the port's slave for collecting,
+ * if it's up
+ * @port: the port we're looking at
+ *
+ * This will enable only collecting on the port's slave.
+ */
+static void __enable_collecting_port(struct port *port)
+{
+ struct slave *slave = port->slave;
+
+ if (slave->link == BOND_LINK_UP && bond_slave_is_up(slave))
+ bond_set_slave_rx_enabled_flags(slave, BOND_SLAVE_NOTIFY_LATER);
+}
+
+/**
* __disable_port - disable the port's slave
* @port: the port we're looking at
+ *
+ * This will disable both collecting and distributing on the port's slave.
*/
static inline void __disable_port(struct port *port)
{
@@ -183,6 +211,8 @@ static inline void __disable_port(struct port *port)
/**
* __enable_port - enable the port's slave, if it's up
* @port: the port we're looking at
+ *
+ * This will enable both collecting and distributing on the port's slave.
*/
static inline void __enable_port(struct port *port)
{
@@ -193,10 +223,27 @@ static inline void __enable_port(struct port *port)
}
/**
- * __port_is_enabled - check if the port's slave is in active state
+ * __port_move_to_attached_state - check if port should transition back to attached
+ * state.
+ * @port: the port we're looking at
+ */
+static bool __port_move_to_attached_state(struct port *port)
+{
+ if (!(port->sm_vars & AD_PORT_SELECTED) ||
+ (port->sm_vars & AD_PORT_STANDBY) ||
+ !(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) ||
+ !(port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION))
+ port->sm_mux_state = AD_MUX_ATTACHED;
+
+ return port->sm_mux_state == AD_MUX_ATTACHED;
+}
+
+/**
+ * __port_is_collecting_distributing - check if the port's slave is in the
+ * combined collecting/distributing state
* @port: the port we're looking at
*/
-static inline int __port_is_enabled(struct port *port)
+static int __port_is_collecting_distributing(struct port *port)
{
return bond_is_active_slave(port->slave);
}
@@ -942,6 +989,7 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
*/
static void ad_mux_machine(struct port *port, bool *update_slave_arr)
{
+ struct bonding *bond = __get_bond_by_port(port);
mux_states_t last_state;
/* keep current State Machine state to compare later if it was
@@ -999,9 +1047,13 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
if ((port->sm_vars & AD_PORT_SELECTED) &&
(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) &&
!__check_agg_selection_timer(port)) {
- if (port->aggregator->is_active)
- port->sm_mux_state =
- AD_MUX_COLLECTING_DISTRIBUTING;
+ if (port->aggregator->is_active) {
+ int state = AD_MUX_COLLECTING_DISTRIBUTING;
+
+ if (!bond->params.coupled_control)
+ state = AD_MUX_COLLECTING;
+ port->sm_mux_state = state;
+ }
} else if (!(port->sm_vars & AD_PORT_SELECTED) ||
(port->sm_vars & AD_PORT_STANDBY)) {
/* if UNSELECTED or STANDBY */
@@ -1019,11 +1071,45 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
}
break;
case AD_MUX_COLLECTING_DISTRIBUTING:
+ if (!__port_move_to_attached_state(port)) {
+ /* if port state hasn't changed make
+ * sure that a collecting distributing
+ * port in an active aggregator is enabled
+ */
+ if (port->aggregator->is_active &&
+ !__port_is_collecting_distributing(port)) {
+ __enable_port(port);
+ *update_slave_arr = true;
+ }
+ }
+ break;
+ case AD_MUX_COLLECTING:
+ if (!__port_move_to_attached_state(port)) {
+ if ((port->sm_vars & AD_PORT_SELECTED) &&
+ (port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) &&
+ (port->partner_oper.port_state & LACP_STATE_COLLECTING)) {
+ port->sm_mux_state = AD_MUX_DISTRIBUTING;
+ } else {
+ /* If port state hasn't changed, make sure that a collecting
+ * port is enabled for an active aggregator.
+ */
+ struct slave *slave = port->slave;
+
+ if (port->aggregator->is_active &&
+ bond_is_slave_rx_disabled(slave)) {
+ ad_enable_collecting(port);
+ *update_slave_arr = true;
+ }
+ }
+ }
+ break;
+ case AD_MUX_DISTRIBUTING:
if (!(port->sm_vars & AD_PORT_SELECTED) ||
(port->sm_vars & AD_PORT_STANDBY) ||
+ !(port->partner_oper.port_state & LACP_STATE_COLLECTING) ||
!(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) ||
!(port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION)) {
- port->sm_mux_state = AD_MUX_ATTACHED;
+ port->sm_mux_state = AD_MUX_COLLECTING;
} else {
/* if port state hasn't changed make
* sure that a collecting distributing
@@ -1031,7 +1117,7 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
*/
if (port->aggregator &&
port->aggregator->is_active &&
- !__port_is_enabled(port)) {
+ !__port_is_collecting_distributing(port)) {
__enable_port(port);
*update_slave_arr = true;
}
@@ -1082,6 +1168,20 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
update_slave_arr);
port->ntt = true;
break;
+ case AD_MUX_COLLECTING:
+ port->actor_oper_port_state |= LACP_STATE_COLLECTING;
+ port->actor_oper_port_state &= ~LACP_STATE_DISTRIBUTING;
+ port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION;
+ ad_enable_collecting(port);
+ ad_disable_distributing(port, update_slave_arr);
+ port->ntt = true;
+ break;
+ case AD_MUX_DISTRIBUTING:
+ port->actor_oper_port_state |= LACP_STATE_DISTRIBUTING;
+ port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION;
+ ad_enable_collecting_distributing(port,
+ update_slave_arr);
+ break;
default:
break;
}
@@ -1484,7 +1584,7 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
(aggregator->partner_system_priority == port->partner_oper.system_priority) &&
(aggregator->partner_oper_aggregator_key == port->partner_oper.key)
) &&
- ((!MAC_ADDRESS_EQUAL(&(port->partner_oper.system), &(null_mac_addr)) && /* partner answers */
+ ((__agg_has_partner(aggregator) && /* partner answers */
!aggregator->is_individual) /* but is not individual OR */
)
) {
@@ -1907,6 +2007,43 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
}
/**
+ * ad_enable_collecting - enable a port's receive
+ * @port: the port we're looking at
+ *
+ * Enable @port if it's in an active aggregator
+ */
+static void ad_enable_collecting(struct port *port)
+{
+ if (port->aggregator->is_active) {
+ struct slave *slave = port->slave;
+
+ slave_dbg(slave->bond->dev, slave->dev,
+ "Enabling collecting on port %d (LAG %d)\n",
+ port->actor_port_number,
+ port->aggregator->aggregator_identifier);
+ __enable_collecting_port(port);
+ }
+}
+
+/**
+ * ad_disable_distributing - disable a port's transmit
+ * @port: the port we're looking at
+ * @update_slave_arr: Does slave array need update?
+ */
+static void ad_disable_distributing(struct port *port, bool *update_slave_arr)
+{
+ if (port->aggregator && __agg_has_partner(port->aggregator)) {
+ slave_dbg(port->slave->bond->dev, port->slave->dev,
+ "Disabling distributing on port %d (LAG %d)\n",
+ port->actor_port_number,
+ port->aggregator->aggregator_identifier);
+ __disable_distributing_port(port);
+ /* Slave array needs an update */
+ *update_slave_arr = true;
+ }
+}
+
+/**
* ad_enable_collecting_distributing - enable a port's transmit/receive
* @port: the port we're looking at
* @update_slave_arr: Does slave array need update?
@@ -1935,9 +2072,7 @@ static void ad_enable_collecting_distributing(struct port *port,
static void ad_disable_collecting_distributing(struct port *port,
bool *update_slave_arr)
{
- if (port->aggregator &&
- !MAC_ADDRESS_EQUAL(&(port->aggregator->partner_system),
- &(null_mac_addr))) {
+ if (port->aggregator && __agg_has_partner(port->aggregator)) {
slave_dbg(port->slave->bond->dev, port->slave->dev,
"Disabling port %d (LAG %d)\n",
port->actor_port_number,
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index cd0683bcca03..2c5ed0a7cb18 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2611,7 +2611,7 @@ static int bond_miimon_inspect(struct bonding *bond)
bond_propose_link_state(slave, BOND_LINK_FAIL);
commit++;
slave->delay = bond->params.downdelay;
- if (slave->delay) {
+ if (slave->delay && net_ratelimit()) {
slave_info(bond->dev, slave->dev, "link status down for %sinterface, disabling it in %d ms\n",
(BOND_MODE(bond) ==
BOND_MODE_ACTIVEBACKUP) ?
@@ -2625,9 +2625,10 @@ static int bond_miimon_inspect(struct bonding *bond)
/* recovered before downdelay expired */
bond_propose_link_state(slave, BOND_LINK_UP);
slave->last_link_up = jiffies;
- slave_info(bond->dev, slave->dev, "link status up again after %d ms\n",
- (bond->params.downdelay - slave->delay) *
- bond->params.miimon);
+ if (net_ratelimit())
+ slave_info(bond->dev, slave->dev, "link status up again after %d ms\n",
+ (bond->params.downdelay - slave->delay) *
+ bond->params.miimon);
commit++;
continue;
}
@@ -2649,7 +2650,7 @@ static int bond_miimon_inspect(struct bonding *bond)
commit++;
slave->delay = bond->params.updelay;
- if (slave->delay) {
+ if (slave->delay && net_ratelimit()) {
slave_info(bond->dev, slave->dev, "link status up, enabling it in %d ms\n",
ignore_updelay ? 0 :
bond->params.updelay *
@@ -2659,9 +2660,10 @@ static int bond_miimon_inspect(struct bonding *bond)
case BOND_LINK_BACK:
if (!link_state) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
- slave_info(bond->dev, slave->dev, "link status down again after %d ms\n",
- (bond->params.updelay - slave->delay) *
- bond->params.miimon);
+ if (net_ratelimit())
+ slave_info(bond->dev, slave->dev, "link status down again after %d ms\n",
+ (bond->params.updelay - slave->delay) *
+ bond->params.miimon);
commit++;
continue;
}
@@ -6305,6 +6307,7 @@ static int __init bond_check_params(struct bond_params *params)
params->ad_actor_sys_prio = ad_actor_sys_prio;
eth_zero_addr(params->ad_actor_system);
params->ad_user_port_key = ad_user_port_key;
+ params->coupled_control = 1;
if (packets_per_slave > 0) {
params->reciprocal_packets_per_slave =
reciprocal_value(packets_per_slave);
@@ -6414,28 +6417,41 @@ static int __net_init bond_net_init(struct net *net)
return 0;
}
-static void __net_exit bond_net_exit_batch(struct list_head *net_list)
+/* According to commit 69b0216ac255 ("bonding: fix bonding_masters
+ * race condition in bond unloading") we need to remove sysfs files
+ * before we remove our devices (done later in bond_net_exit_batch_rtnl())
+ */
+static void __net_exit bond_net_pre_exit(struct net *net)
+{
+ struct bond_net *bn = net_generic(net, bond_net_id);
+
+ bond_destroy_sysfs(bn);
+}
+
+static void __net_exit bond_net_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_kill_list)
{
struct bond_net *bn;
struct net *net;
- LIST_HEAD(list);
-
- list_for_each_entry(net, net_list, exit_list) {
- bn = net_generic(net, bond_net_id);
- bond_destroy_sysfs(bn);
- }
/* Kill off any bonds created after unregistering bond rtnl ops */
- rtnl_lock();
list_for_each_entry(net, net_list, exit_list) {
struct bonding *bond, *tmp_bond;
bn = net_generic(net, bond_net_id);
list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
- unregister_netdevice_queue(bond->dev, &list);
+ unregister_netdevice_queue(bond->dev, dev_kill_list);
}
- unregister_netdevice_many(&list);
- rtnl_unlock();
+}
+
+/* According to commit 23fa5c2caae0 ("bonding: destroy proc directory
+ * only after all bonds are gone") bond_destroy_proc_dir() is called
+ * after bond_net_exit_batch_rtnl() has completed.
+ */
+static void __net_exit bond_net_exit_batch(struct list_head *net_list)
+{
+ struct bond_net *bn;
+ struct net *net;
list_for_each_entry(net, net_list, exit_list) {
bn = net_generic(net, bond_net_id);
@@ -6445,6 +6461,8 @@ static void __net_exit bond_net_exit_batch(struct list_head *net_list)
static struct pernet_operations bond_net_ops = {
.init = bond_net_init,
+ .pre_exit = bond_net_pre_exit,
+ .exit_batch_rtnl = bond_net_exit_batch_rtnl,
.exit_batch = bond_net_exit_batch,
.id = &bond_net_id,
.size = sizeof(struct bond_net),
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index cfa74cf8bb1a..29b4c3d1b9b6 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -122,6 +122,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_PEER_NOTIF_DELAY] = NLA_POLICY_FULL_RANGE(NLA_U32, &delay_range),
[IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 },
[IFLA_BOND_NS_IP6_TARGET] = { .type = NLA_NESTED },
+ [IFLA_BOND_COUPLED_CONTROL] = { .type = NLA_U8 },
};
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
@@ -549,6 +550,16 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
return err;
}
+ if (data[IFLA_BOND_COUPLED_CONTROL]) {
+ int coupled_control = nla_get_u8(data[IFLA_BOND_COUPLED_CONTROL]);
+
+ bond_opt_initval(&newval, coupled_control);
+ err = __bond_opt_set(bond, BOND_OPT_COUPLED_CONTROL, &newval,
+ data[IFLA_BOND_COUPLED_CONTROL], extack);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -615,6 +626,7 @@ static size_t bond_get_size(const struct net_device *bond_dev)
/* IFLA_BOND_NS_IP6_TARGET */
nla_total_size(sizeof(struct nlattr)) +
nla_total_size(sizeof(struct in6_addr)) * BOND_MAX_NS_TARGETS +
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_COUPLED_CONTROL */
0;
}
@@ -774,6 +786,10 @@ static int bond_fill_info(struct sk_buff *skb,
bond->params.missed_max))
goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_BOND_COUPLED_CONTROL,
+ bond->params.coupled_control))
+ goto nla_put_failure;
+
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info info;
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index f3f27f0bd2a6..4cdbc7e084f4 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -84,7 +84,8 @@ static int bond_option_ad_user_port_key_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_missed_max_set(struct bonding *bond,
const struct bond_opt_value *newval);
-
+static int bond_option_coupled_control_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static const struct bond_opt_value bond_mode_tbl[] = {
{ "balance-rr", BOND_MODE_ROUNDROBIN, BOND_VALFLAG_DEFAULT},
@@ -232,6 +233,12 @@ static const struct bond_opt_value bond_missed_max_tbl[] = {
{ NULL, -1, 0},
};
+static const struct bond_opt_value bond_coupled_control_tbl[] = {
+ { "on", 1, BOND_VALFLAG_DEFAULT},
+ { "off", 0, 0},
+ { NULL, -1, 0},
+};
+
static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_MODE] = {
.id = BOND_OPT_MODE,
@@ -496,6 +503,15 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.desc = "Delay between each peer notification on failover event, in milliseconds",
.values = bond_peer_notif_delay_tbl,
.set = bond_option_peer_notif_delay_set
+ },
+ [BOND_OPT_COUPLED_CONTROL] = {
+ .id = BOND_OPT_COUPLED_CONTROL,
+ .name = "coupled_control",
+ .desc = "Opt into using coupled control MUX for LACP states",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+ .flags = BOND_OPTFLAG_IFDOWN,
+ .values = bond_coupled_control_tbl,
+ .set = bond_option_coupled_control_set,
}
};
@@ -1692,3 +1708,13 @@ static int bond_option_ad_user_port_key_set(struct bonding *bond,
bond->params.ad_user_port_key = newval->value;
return 0;
}
+
+static int bond_option_coupled_control_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ netdev_info(bond->dev, "Setting coupled_control to %s (%llu)\n",
+ newval->string, newval->value);
+
+ bond->params.coupled_control = newval->value;
+ return 0;
+}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index eb410714afc2..2e31db55d927 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -168,6 +168,8 @@ config CAN_KVASER_PCIEFD
Kvaser Mini PCI Express 2xHS v2
Kvaser Mini PCI Express 1xCAN v3
Kvaser Mini PCI Express 2xCAN v3
+ Kvaser M.2 PCIe 4xCAN
+ Kvaser PCIe 8xCAN
config CAN_SLCAN
tristate "Serial / USB serial CAN Adaptors (slcan)"
@@ -218,6 +220,7 @@ config CAN_XILINXCAN
source "drivers/net/can/c_can/Kconfig"
source "drivers/net/can/cc770/Kconfig"
source "drivers/net/can/ctucanfd/Kconfig"
+source "drivers/net/can/esd/Kconfig"
source "drivers/net/can/ifi_canfd/Kconfig"
source "drivers/net/can/m_can/Kconfig"
source "drivers/net/can/mscan/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index ff8f76295d13..4669cd51e7bf 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_CAN_VXCAN) += vxcan.o
obj-$(CONFIG_CAN_SLCAN) += slcan/
obj-y += dev/
+obj-y += esd/
obj-y += rcar/
obj-y += spi/
obj-y += usb/
diff --git a/drivers/net/can/esd/Kconfig b/drivers/net/can/esd/Kconfig
new file mode 100644
index 000000000000..54bfc366634c
--- /dev/null
+++ b/drivers/net/can/esd/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config CAN_ESD_402_PCI
+ tristate "esd electronics gmbh CAN-PCI(e)/402 family"
+ depends on PCI && HAS_DMA
+ help
+ Support for C402 card family from esd electronics gmbh.
+ This card family is based on the ESDACC CAN controller and
+ available in several form factors: PCI, PCIe, PCIe Mini,
+ M.2 PCIe, CPCIserial, PMC, XMC (see https://esd.eu/en)
+
+ This driver can also be built as a module. In this case the
+ module will be called esd_402_pci.
diff --git a/drivers/net/can/esd/Makefile b/drivers/net/can/esd/Makefile
new file mode 100644
index 000000000000..5dd2d470c286
--- /dev/null
+++ b/drivers/net/can/esd/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for esd gmbh ESDACC controller driver
+#
+esd_402_pci-objs := esdacc.o esd_402_pci-core.o
+
+obj-$(CONFIG_CAN_ESD_402_PCI) += esd_402_pci.o
diff --git a/drivers/net/can/esd/esd_402_pci-core.c b/drivers/net/can/esd/esd_402_pci-core.c
new file mode 100644
index 000000000000..b7cdcffd0e45
--- /dev/null
+++ b/drivers/net/can/esd/esd_402_pci-core.c
@@ -0,0 +1,514 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2015 - 2016 Thomas Körper, esd electronic system design gmbh
+ * Copyright (C) 2017 - 2023 Stefan Mätje, esd electronics gmbh
+ */
+
+#include <linux/can/dev.h>
+#include <linux/can.h>
+#include <linux/can/netlink.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+
+#include "esdacc.h"
+
+#define ESD_PCI_DEVICE_ID_PCIE402 0x0402
+
+#define PCI402_FPGA_VER_MIN 0x003d
+#define PCI402_MAX_CORES 6
+#define PCI402_BAR 0
+#define PCI402_IO_OV_OFFS 0
+#define PCI402_IO_PCIEP_OFFS 0x10000
+#define PCI402_IO_LEN_TOTAL 0x20000
+#define PCI402_IO_LEN_CORE 0x2000
+#define PCI402_PCICFG_MSICAP 0x50
+
+#define PCI402_DMA_MASK DMA_BIT_MASK(32)
+#define PCI402_DMA_SIZE ALIGN(0x10000, PAGE_SIZE)
+
+#define PCI402_PCIEP_OF_INT_ENABLE 0x0050
+#define PCI402_PCIEP_OF_BM_ADDR_LO 0x1000
+#define PCI402_PCIEP_OF_BM_ADDR_HI 0x1004
+#define PCI402_PCIEP_OF_MSI_ADDR_LO 0x1008
+#define PCI402_PCIEP_OF_MSI_ADDR_HI 0x100c
+
+struct pci402_card {
+ /* Actually mapped io space, all other iomem derived from this */
+ void __iomem *addr;
+ void __iomem *addr_pciep;
+
+ void *dma_buf;
+ dma_addr_t dma_hnd;
+
+ struct acc_ov ov;
+ struct acc_core *cores;
+
+ bool msi_enabled;
+};
+
+/* The BTR register capabilities described by the can_bittiming_const structures
+ * below are valid since esdACC version 0x0032.
+ */
+
+/* Used if the esdACC FPGA is built as CAN-Classic version. */
+static const struct can_bittiming_const pci402_bittiming_const = {
+ .name = "esd_402",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1,
+};
+
+/* Used if the esdACC FPGA is built as CAN-FD version. */
+static const struct can_bittiming_const pci402_bittiming_const_canfd = {
+ .name = "esd_402fd",
+ .tseg1_min = 1,
+ .tseg1_max = 256,
+ .tseg2_min = 1,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
+static const struct net_device_ops pci402_acc_netdev_ops = {
+ .ndo_open = acc_open,
+ .ndo_stop = acc_close,
+ .ndo_start_xmit = acc_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+ .ndo_eth_ioctl = can_eth_ioctl_hwts,
+};
+
+static const struct ethtool_ops pci402_acc_ethtool_ops = {
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
+};
+
+static irqreturn_t pci402_interrupt(int irq, void *dev_id)
+{
+ struct pci_dev *pdev = dev_id;
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ irqreturn_t irq_status;
+
+ irq_status = acc_card_interrupt(&card->ov, card->cores);
+
+ return irq_status;
+}
+
+static int pci402_set_msiconfig(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ u32 addr_lo_offs = 0;
+ u32 addr_lo = 0;
+ u32 addr_hi = 0;
+ u32 data = 0;
+ u16 csr = 0;
+ int err;
+
+ /* The FPGA hard IP PCIe core implements a 64-bit MSI Capability
+ * Register Format
+ */
+ err = pci_read_config_word(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_FLAGS, &csr);
+ if (err)
+ goto failed;
+
+ err = pci_read_config_dword(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_ADDRESS_LO,
+ &addr_lo);
+ if (err)
+ goto failed;
+ err = pci_read_config_dword(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_ADDRESS_HI,
+ &addr_hi);
+ if (err)
+ goto failed;
+
+ err = pci_read_config_dword(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_DATA_64,
+ &data);
+ if (err)
+ goto failed;
+
+ addr_lo_offs = addr_lo & 0x0000ffff;
+ addr_lo &= 0xffff0000;
+
+ if (addr_hi)
+ addr_lo |= 1; /* To enable 64-Bit addressing in PCIe endpoint */
+
+ if (!(csr & PCI_MSI_FLAGS_ENABLE)) {
+ err = -EINVAL;
+ goto failed;
+ }
+
+ iowrite32(addr_lo, card->addr_pciep + PCI402_PCIEP_OF_MSI_ADDR_LO);
+ iowrite32(addr_hi, card->addr_pciep + PCI402_PCIEP_OF_MSI_ADDR_HI);
+ acc_ov_write32(&card->ov, ACC_OV_OF_MSI_ADDRESSOFFSET, addr_lo_offs);
+ acc_ov_write32(&card->ov, ACC_OV_OF_MSI_DATA, data);
+
+ return 0;
+
+failed:
+ pci_warn(pdev, "Error while setting MSI configuration:\n"
+ "CSR: 0x%.4x, addr: 0x%.8x%.8x, offs: 0x%.4x, data: 0x%.8x\n",
+ csr, addr_hi, addr_lo, addr_lo_offs, data);
+
+ return err;
+}
+
+static int pci402_init_card(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+
+ card->ov.addr = card->addr + PCI402_IO_OV_OFFS;
+ card->addr_pciep = card->addr + PCI402_IO_PCIEP_OFFS;
+
+ acc_reset_fpga(&card->ov);
+ acc_init_ov(&card->ov, &pdev->dev);
+
+ if (card->ov.version < PCI402_FPGA_VER_MIN) {
+ pci_err(pdev,
+ "esdACC version (0x%.4x) outdated, please update\n",
+ card->ov.version);
+ return -EINVAL;
+ }
+
+ if (card->ov.timestamp_frequency != ACC_TS_FREQ_80MHZ) {
+ pci_err(pdev,
+ "esdACC timestamp frequency of %uHz not supported by driver. Aborted.\n",
+ card->ov.timestamp_frequency);
+ return -EINVAL;
+ }
+
+ if (card->ov.active_cores > PCI402_MAX_CORES) {
+ pci_err(pdev,
+ "Card with %u active cores not supported by driver. Aborted.\n",
+ card->ov.active_cores);
+ return -EINVAL;
+ }
+ card->cores = devm_kcalloc(&pdev->dev, card->ov.active_cores,
+ sizeof(struct acc_core), GFP_KERNEL);
+ if (!card->cores)
+ return -ENOMEM;
+
+ if (card->ov.features & ACC_OV_REG_FEAT_MASK_CANFD) {
+ pci_warn(pdev,
+ "esdACC with CAN-FD feature detected. This driver doesn't support CAN-FD yet.\n");
+ }
+
+#ifdef __LITTLE_ENDIAN
+ /* So card converts all busmastered data to LE for us: */
+ acc_ov_set_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_ENDIAN_LITTLE);
+#endif
+
+ return 0;
+}
+
+static int pci402_init_interrupt(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ int err;
+
+ err = pci_enable_msi(pdev);
+ if (!err) {
+ err = pci402_set_msiconfig(pdev);
+ if (!err) {
+ card->msi_enabled = true;
+ acc_ov_set_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_MSI_ENABLE);
+ pci_dbg(pdev, "MSI preparation done\n");
+ }
+ }
+
+ err = devm_request_irq(&pdev->dev, pdev->irq, pci402_interrupt,
+ IRQF_SHARED, dev_name(&pdev->dev), pdev);
+ if (err)
+ goto failure_msidis;
+
+ iowrite32(1, card->addr_pciep + PCI402_PCIEP_OF_INT_ENABLE);
+
+ return 0;
+
+failure_msidis:
+ if (card->msi_enabled) {
+ acc_ov_clear_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_MSI_ENABLE);
+ pci_disable_msi(pdev);
+ card->msi_enabled = false;
+ }
+
+ return err;
+}
+
+static void pci402_finish_interrupt(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+
+ iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_INT_ENABLE);
+ devm_free_irq(&pdev->dev, pdev->irq, pdev);
+
+ if (card->msi_enabled) {
+ acc_ov_clear_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_MSI_ENABLE);
+ pci_disable_msi(pdev);
+ card->msi_enabled = false;
+ }
+}
+
+static int pci402_init_dma(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ int err;
+
+ err = dma_set_coherent_mask(&pdev->dev, PCI402_DMA_MASK);
+ if (err) {
+ pci_err(pdev, "DMA set mask failed!\n");
+ return err;
+ }
+
+ /* The esdACC DMA engine needs the DMA buffer aligned to a 64k
+ * boundary. The DMA API guarantees to align the returned buffer to the
+ * smallest PAGE_SIZE order which is greater than or equal to the
+ * requested size. With PCI402_DMA_SIZE == 64kB this suffices here.
+ */
+ card->dma_buf = dma_alloc_coherent(&pdev->dev, PCI402_DMA_SIZE,
+ &card->dma_hnd, GFP_KERNEL);
+ if (!card->dma_buf)
+ return -ENOMEM;
+
+ acc_init_bm_ptr(&card->ov, card->cores, card->dma_buf);
+
+ iowrite32(card->dma_hnd,
+ card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_LO);
+ iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_HI);
+
+ pci_set_master(pdev);
+
+ acc_ov_set_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_BM_ENABLE);
+
+ return 0;
+}
+
+static void pci402_finish_dma(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ int i;
+
+ acc_ov_clear_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_BM_ENABLE);
+
+ pci_clear_master(pdev);
+
+ iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_LO);
+ iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_HI);
+
+ card->ov.bmfifo.messages = NULL;
+ card->ov.bmfifo.irq_cnt = NULL;
+ for (i = 0; i < card->ov.active_cores; i++) {
+ struct acc_core *core = &card->cores[i];
+
+ core->bmfifo.messages = NULL;
+ core->bmfifo.irq_cnt = NULL;
+ }
+
+ dma_free_coherent(&pdev->dev, PCI402_DMA_SIZE, card->dma_buf,
+ card->dma_hnd);
+ card->dma_buf = NULL;
+}
+
+static void pci402_unregister_core(struct acc_core *core)
+{
+ netdev_info(core->netdev, "unregister\n");
+ unregister_candev(core->netdev);
+
+ free_candev(core->netdev);
+ core->netdev = NULL;
+}
+
+static int pci402_init_cores(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ int err;
+ int i;
+
+ for (i = 0; i < card->ov.active_cores; i++) {
+ struct acc_core *core = &card->cores[i];
+ struct acc_net_priv *priv;
+ struct net_device *netdev;
+ u32 fifo_config;
+
+ core->addr = card->ov.addr + (i + 1) * PCI402_IO_LEN_CORE;
+
+ fifo_config = acc_read32(core, ACC_CORE_OF_TXFIFO_CONFIG);
+ core->tx_fifo_size = (fifo_config >> 24);
+ if (core->tx_fifo_size <= 1) {
+ pci_err(pdev, "Invalid tx_fifo_size!\n");
+ err = -EINVAL;
+ goto failure;
+ }
+
+ netdev = alloc_candev(sizeof(*priv), core->tx_fifo_size);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto failure;
+ }
+ core->netdev = netdev;
+
+ netdev->flags |= IFF_ECHO;
+ netdev->dev_port = i;
+ netdev->netdev_ops = &pci402_acc_netdev_ops;
+ netdev->ethtool_ops = &pci402_acc_ethtool_ops;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ priv = netdev_priv(netdev);
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_BERR_REPORTING |
+ CAN_CTRLMODE_CC_LEN8_DLC;
+
+ priv->can.clock.freq = card->ov.core_frequency;
+ if (card->ov.features & ACC_OV_REG_FEAT_MASK_CANFD)
+ priv->can.bittiming_const = &pci402_bittiming_const_canfd;
+ else
+ priv->can.bittiming_const = &pci402_bittiming_const;
+ priv->can.do_set_bittiming = acc_set_bittiming;
+ priv->can.do_set_mode = acc_set_mode;
+ priv->can.do_get_berr_counter = acc_get_berr_counter;
+
+ priv->core = core;
+ priv->ov = &card->ov;
+
+ err = register_candev(netdev);
+ if (err) {
+ free_candev(core->netdev);
+ core->netdev = NULL;
+ goto failure;
+ }
+
+ netdev_info(netdev, "registered\n");
+ }
+
+ return 0;
+
+failure:
+ for (i--; i >= 0; i--)
+ pci402_unregister_core(&card->cores[i]);
+
+ return err;
+}
+
+static void pci402_finish_cores(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < card->ov.active_cores; i++)
+ pci402_unregister_core(&card->cores[i]);
+}
+
+static int pci402_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct pci402_card *card = NULL;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL);
+ if (!card) {
+ err = -ENOMEM;
+ goto failure_disable_pci;
+ }
+
+ pci_set_drvdata(pdev, card);
+
+ err = pci_request_regions(pdev, pci_name(pdev));
+ if (err)
+ goto failure_disable_pci;
+
+ card->addr = pci_iomap(pdev, PCI402_BAR, PCI402_IO_LEN_TOTAL);
+ if (!card->addr) {
+ err = -ENOMEM;
+ goto failure_release_regions;
+ }
+
+ err = pci402_init_card(pdev);
+ if (err)
+ goto failure_unmap;
+
+ err = pci402_init_dma(pdev);
+ if (err)
+ goto failure_unmap;
+
+ err = pci402_init_interrupt(pdev);
+ if (err)
+ goto failure_finish_dma;
+
+ err = pci402_init_cores(pdev);
+ if (err)
+ goto failure_finish_interrupt;
+
+ return 0;
+
+failure_finish_interrupt:
+ pci402_finish_interrupt(pdev);
+
+failure_finish_dma:
+ pci402_finish_dma(pdev);
+
+failure_unmap:
+ pci_iounmap(pdev, card->addr);
+
+failure_release_regions:
+ pci_release_regions(pdev);
+
+failure_disable_pci:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void pci402_remove(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+
+ pci402_finish_interrupt(pdev);
+ pci402_finish_cores(pdev);
+ pci402_finish_dma(pdev);
+ pci_iounmap(pdev, card->addr);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id pci402_tbl[] = {
+ {
+ .vendor = PCI_VENDOR_ID_ESDGMBH,
+ .device = ESD_PCI_DEVICE_ID_PCIE402,
+ .subvendor = PCI_VENDOR_ID_ESDGMBH,
+ .subdevice = PCI_ANY_ID,
+ },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, pci402_tbl);
+
+static struct pci_driver pci402_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = pci402_tbl,
+ .probe = pci402_probe,
+ .remove = pci402_remove,
+};
+module_pci_driver(pci402_driver);
+
+MODULE_DESCRIPTION("Socket-CAN driver for esd CAN 402 card family with esdACC core on PCIe");
+MODULE_AUTHOR("Thomas Körper <socketcan@esd.eu>");
+MODULE_AUTHOR("Stefan Mätje <stefan.maetje@esd.eu>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/esd/esdacc.c b/drivers/net/can/esd/esdacc.c
new file mode 100644
index 000000000000..121cbbf81458
--- /dev/null
+++ b/drivers/net/can/esd/esdacc.c
@@ -0,0 +1,764 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2015 - 2016 Thomas Körper, esd electronic system design gmbh
+ * Copyright (C) 2017 - 2023 Stefan Mätje, esd electronics gmbh
+ */
+
+#include "esdacc.h"
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/ktime.h>
+
+/* esdACC ID register layout */
+#define ACC_ID_ID_MASK GENMASK(28, 0)
+#define ACC_ID_EFF_FLAG BIT(29)
+
+/* esdACC DLC register layout */
+#define ACC_DLC_DLC_MASK GENMASK(3, 0)
+#define ACC_DLC_RTR_FLAG BIT(4)
+#define ACC_DLC_TXD_FLAG BIT(5)
+
+/* ecc value of esdACC equals SJA1000's ECC register */
+#define ACC_ECC_SEG 0x1f
+#define ACC_ECC_DIR 0x20
+#define ACC_ECC_BIT 0x00
+#define ACC_ECC_FORM 0x40
+#define ACC_ECC_STUFF 0x80
+#define ACC_ECC_MASK 0xc0
+
+/* esdACC Status Register bits. Unused bits not documented. */
+#define ACC_REG_STATUS_MASK_STATUS_ES BIT(17)
+#define ACC_REG_STATUS_MASK_STATUS_EP BIT(18)
+#define ACC_REG_STATUS_MASK_STATUS_BS BIT(19)
+
+/* esdACC Overview Module BM_IRQ_Mask register related defines */
+/* Two bit wide command masks to mask or unmask a single core IRQ */
+#define ACC_BM_IRQ_UNMASK BIT(0)
+#define ACC_BM_IRQ_MASK (ACC_BM_IRQ_UNMASK << 1)
+/* Command to unmask all IRQ sources. Created by shifting
+ * and oring the two bit wide ACC_BM_IRQ_UNMASK 16 times.
+ */
+#define ACC_BM_IRQ_UNMASK_ALL 0x55555555U
+
+static void acc_resetmode_enter(struct acc_core *core)
+{
+ acc_set_bits(core, ACC_CORE_OF_CTRL_MODE,
+ ACC_REG_CONTROL_MASK_MODE_RESETMODE);
+
+ /* Read back reset mode bit to flush PCI write posting */
+ acc_resetmode_entered(core);
+}
+
+static void acc_resetmode_leave(struct acc_core *core)
+{
+ acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
+ ACC_REG_CONTROL_MASK_MODE_RESETMODE);
+
+ /* Read back reset mode bit to flush PCI write posting */
+ acc_resetmode_entered(core);
+}
+
+static void acc_txq_put(struct acc_core *core, u32 acc_id, u8 acc_dlc,
+ const void *data)
+{
+ acc_write32_noswap(core, ACC_CORE_OF_TXFIFO_DATA_1,
+ *((const u32 *)(data + 4)));
+ acc_write32_noswap(core, ACC_CORE_OF_TXFIFO_DATA_0,
+ *((const u32 *)data));
+ acc_write32(core, ACC_CORE_OF_TXFIFO_DLC, acc_dlc);
+ /* CAN id must be written at last. This write starts TX. */
+ acc_write32(core, ACC_CORE_OF_TXFIFO_ID, acc_id);
+}
+
+static u8 acc_tx_fifo_next(struct acc_core *core, u8 tx_fifo_idx)
+{
+ ++tx_fifo_idx;
+ if (tx_fifo_idx >= core->tx_fifo_size)
+ tx_fifo_idx = 0U;
+ return tx_fifo_idx;
+}
+
+/* Convert timestamp from esdACC time stamp ticks to ns
+ *
+ * The conversion factor ts2ns from time stamp counts to ns is basically
+ * ts2ns = NSEC_PER_SEC / timestamp_frequency
+ *
+ * We handle here only a fixed timestamp frequency of 80MHz. The
+ * resulting ts2ns factor would be 12.5.
+ *
+ * At the end we multiply by 12 and add the half of the HW timestamp
+ * to get a multiplication by 12.5. This way any overflow is
+ * avoided until ktime_t itself overflows.
+ */
+#define ACC_TS_FACTOR (NSEC_PER_SEC / ACC_TS_FREQ_80MHZ)
+#define ACC_TS_80MHZ_SHIFT 1
+
+static ktime_t acc_ts2ktime(struct acc_ov *ov, u64 ts)
+{
+ u64 ns;
+
+ ns = (ts * ACC_TS_FACTOR) + (ts >> ACC_TS_80MHZ_SHIFT);
+
+ return ns_to_ktime(ns);
+}
+
+#undef ACC_TS_FACTOR
+#undef ACC_TS_80MHZ_SHIFT
+
+void acc_init_ov(struct acc_ov *ov, struct device *dev)
+{
+ u32 temp;
+
+ temp = acc_ov_read32(ov, ACC_OV_OF_VERSION);
+ ov->version = temp;
+ ov->features = (temp >> 16);
+
+ temp = acc_ov_read32(ov, ACC_OV_OF_INFO);
+ ov->total_cores = temp;
+ ov->active_cores = (temp >> 8);
+
+ ov->core_frequency = acc_ov_read32(ov, ACC_OV_OF_CANCORE_FREQ);
+ ov->timestamp_frequency = acc_ov_read32(ov, ACC_OV_OF_TS_FREQ_LO);
+
+ /* Depending on esdACC feature NEW_PSC enable the new prescaler
+ * or adjust core_frequency according to the implicit division by 2.
+ */
+ if (ov->features & ACC_OV_REG_FEAT_MASK_NEW_PSC) {
+ acc_ov_set_bits(ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_NEW_PSC_ENABLE);
+ } else {
+ ov->core_frequency /= 2;
+ }
+
+ dev_dbg(dev,
+ "esdACC v%u, freq: %u/%u, feat/strap: 0x%x/0x%x, cores: %u/%u\n",
+ ov->version, ov->core_frequency, ov->timestamp_frequency,
+ ov->features, acc_ov_read32(ov, ACC_OV_OF_INFO) >> 16,
+ ov->active_cores, ov->total_cores);
+}
+
+void acc_init_bm_ptr(struct acc_ov *ov, struct acc_core *cores, const void *mem)
+{
+ unsigned int u;
+
+ /* DMA buffer layout as follows where N is the number of CAN cores
+ * implemented in the FPGA, i.e. N = ov->total_cores
+ *
+ * Section Layout Section size
+ * ----------------------------------------------
+ * FIFO Card/Overview ACC_CORE_DMABUF_SIZE
+ * FIFO Core0 ACC_CORE_DMABUF_SIZE
+ * ... ...
+ * FIFO CoreN ACC_CORE_DMABUF_SIZE
+ * irq_cnt Card/Overview sizeof(u32)
+ * irq_cnt Core0 sizeof(u32)
+ * ... ...
+ * irq_cnt CoreN sizeof(u32)
+ */
+ ov->bmfifo.messages = mem;
+ ov->bmfifo.irq_cnt = mem + (ov->total_cores + 1U) * ACC_CORE_DMABUF_SIZE;
+
+ for (u = 0U; u < ov->active_cores; u++) {
+ struct acc_core *core = &cores[u];
+
+ core->bmfifo.messages = mem + (u + 1U) * ACC_CORE_DMABUF_SIZE;
+ core->bmfifo.irq_cnt = ov->bmfifo.irq_cnt + (u + 1U);
+ }
+}
+
+int acc_open(struct net_device *netdev)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+ struct acc_core *core = priv->core;
+ u32 tx_fifo_status;
+ u32 ctrl_mode;
+ int err;
+
+ /* Retry to enter RESET mode if out of sync. */
+ if (priv->can.state != CAN_STATE_STOPPED) {
+ netdev_warn(netdev, "Entered %s() with bad can.state: %s\n",
+ __func__, can_get_state_str(priv->can.state));
+ acc_resetmode_enter(core);
+ priv->can.state = CAN_STATE_STOPPED;
+ }
+
+ err = open_candev(netdev);
+ if (err)
+ return err;
+
+ ctrl_mode = ACC_REG_CONTROL_MASK_IE_RXTX |
+ ACC_REG_CONTROL_MASK_IE_TXERROR |
+ ACC_REG_CONTROL_MASK_IE_ERRWARN |
+ ACC_REG_CONTROL_MASK_IE_OVERRUN |
+ ACC_REG_CONTROL_MASK_IE_ERRPASS;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ ctrl_mode |= ACC_REG_CONTROL_MASK_IE_BUSERR;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ ctrl_mode |= ACC_REG_CONTROL_MASK_MODE_LOM;
+
+ acc_set_bits(core, ACC_CORE_OF_CTRL_MODE, ctrl_mode);
+
+ acc_resetmode_leave(core);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ /* Resync TX FIFO indices to HW state after (re-)start. */
+ tx_fifo_status = acc_read32(core, ACC_CORE_OF_TXFIFO_STATUS);
+ core->tx_fifo_head = tx_fifo_status & 0xff;
+ core->tx_fifo_tail = (tx_fifo_status >> 8) & 0xff;
+
+ netif_start_queue(netdev);
+ return 0;
+}
+
+int acc_close(struct net_device *netdev)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+ struct acc_core *core = priv->core;
+
+ acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
+ ACC_REG_CONTROL_MASK_IE_RXTX |
+ ACC_REG_CONTROL_MASK_IE_TXERROR |
+ ACC_REG_CONTROL_MASK_IE_ERRWARN |
+ ACC_REG_CONTROL_MASK_IE_OVERRUN |
+ ACC_REG_CONTROL_MASK_IE_ERRPASS |
+ ACC_REG_CONTROL_MASK_IE_BUSERR);
+
+ netif_stop_queue(netdev);
+ acc_resetmode_enter(core);
+ priv->can.state = CAN_STATE_STOPPED;
+
+ /* Mark pending TX requests to be aborted after controller restart. */
+ acc_write32(core, ACC_CORE_OF_TX_ABORT_MASK, 0xffff);
+
+ /* ACC_REG_CONTROL_MASK_MODE_LOM is only accessible in RESET mode */
+ acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
+ ACC_REG_CONTROL_MASK_MODE_LOM);
+
+ close_candev(netdev);
+ return 0;
+}
+
+netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+ struct acc_core *core = priv->core;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ u8 tx_fifo_head = core->tx_fifo_head;
+ int fifo_usage;
+ u32 acc_id;
+ u8 acc_dlc;
+
+ if (can_dropped_invalid_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
+ /* Access core->tx_fifo_tail only once because it may be changed
+ * from the interrupt level.
+ */
+ fifo_usage = tx_fifo_head - core->tx_fifo_tail;
+ if (fifo_usage < 0)
+ fifo_usage += core->tx_fifo_size;
+
+ if (fifo_usage >= core->tx_fifo_size - 1) {
+ netdev_err(core->netdev,
+ "BUG: TX ring full when queue awake!\n");
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (fifo_usage == core->tx_fifo_size - 2)
+ netif_stop_queue(netdev);
+
+ acc_dlc = can_get_cc_dlc(cf, priv->can.ctrlmode);
+ if (cf->can_id & CAN_RTR_FLAG)
+ acc_dlc |= ACC_DLC_RTR_FLAG;
+
+ if (cf->can_id & CAN_EFF_FLAG) {
+ acc_id = cf->can_id & CAN_EFF_MASK;
+ acc_id |= ACC_ID_EFF_FLAG;
+ } else {
+ acc_id = cf->can_id & CAN_SFF_MASK;
+ }
+
+ can_put_echo_skb(skb, netdev, core->tx_fifo_head, 0);
+
+ core->tx_fifo_head = acc_tx_fifo_next(core, tx_fifo_head);
+
+ acc_txq_put(core, acc_id, acc_dlc, cf->data);
+
+ return NETDEV_TX_OK;
+}
+
+int acc_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+ u32 core_status = acc_read32(priv->core, ACC_CORE_OF_STATUS);
+
+ bec->txerr = (core_status >> 8) & 0xff;
+ bec->rxerr = core_status & 0xff;
+
+ return 0;
+}
+
+int acc_set_mode(struct net_device *netdev, enum can_mode mode)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+
+ switch (mode) {
+ case CAN_MODE_START:
+ /* Paranoid FIFO index check. */
+ {
+ const u32 tx_fifo_status =
+ acc_read32(priv->core, ACC_CORE_OF_TXFIFO_STATUS);
+ const u8 hw_fifo_head = tx_fifo_status;
+
+ if (hw_fifo_head != priv->core->tx_fifo_head ||
+ hw_fifo_head != priv->core->tx_fifo_tail) {
+ netdev_warn(netdev,
+ "TX FIFO mismatch: T %2u H %2u; TFHW %#08x\n",
+ priv->core->tx_fifo_tail,
+ priv->core->tx_fifo_head,
+ tx_fifo_status);
+ }
+ }
+ acc_resetmode_leave(priv->core);
+ /* To leave the bus-off state the esdACC controller begins
+ * here a grace period where it counts 128 "idle conditions" (each
+ * of 11 consecutive recessive bits) on the bus as required
+ * by the CAN spec.
+ *
+ * During this time the TX FIFO may still contain already
+ * aborted "zombie" frames that are only drained from the FIFO
+ * at the end of the grace period.
+ *
+ * To not to interfere with this drain process we don't
+ * call netif_wake_queue() here. When the controller reaches
+ * the error-active state again, it informs us about that
+ * with an acc_bmmsg_errstatechange message. Then
+ * netif_wake_queue() is called from
+ * handle_core_msg_errstatechange() instead.
+ */
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int acc_set_bittiming(struct net_device *netdev)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ u32 brp;
+ u32 btr;
+
+ if (priv->ov->features & ACC_OV_REG_FEAT_MASK_CANFD) {
+ u32 fbtr = 0;
+
+ netdev_dbg(netdev, "bit timing: brp %u, prop %u, ph1 %u ph2 %u, sjw %u\n",
+ bt->brp, bt->prop_seg,
+ bt->phase_seg1, bt->phase_seg2, bt->sjw);
+
+ brp = FIELD_PREP(ACC_REG_BRP_FD_MASK_BRP, bt->brp - 1);
+
+ btr = FIELD_PREP(ACC_REG_BTR_FD_MASK_TSEG1, bt->phase_seg1 + bt->prop_seg - 1);
+ btr |= FIELD_PREP(ACC_REG_BTR_FD_MASK_TSEG2, bt->phase_seg2 - 1);
+ btr |= FIELD_PREP(ACC_REG_BTR_FD_MASK_SJW, bt->sjw - 1);
+
+ /* Keep order of accesses to ACC_CORE_OF_BRP and ACC_CORE_OF_BTR. */
+ acc_write32(priv->core, ACC_CORE_OF_BRP, brp);
+ acc_write32(priv->core, ACC_CORE_OF_BTR, btr);
+
+ netdev_dbg(netdev, "esdACC: BRP %u, NBTR 0x%08x, DBTR 0x%08x",
+ brp, btr, fbtr);
+ } else {
+ netdev_dbg(netdev, "bit timing: brp %u, prop %u, ph1 %u ph2 %u, sjw %u\n",
+ bt->brp, bt->prop_seg,
+ bt->phase_seg1, bt->phase_seg2, bt->sjw);
+
+ brp = FIELD_PREP(ACC_REG_BRP_CL_MASK_BRP, bt->brp - 1);
+
+ btr = FIELD_PREP(ACC_REG_BTR_CL_MASK_TSEG1, bt->phase_seg1 + bt->prop_seg - 1);
+ btr |= FIELD_PREP(ACC_REG_BTR_CL_MASK_TSEG2, bt->phase_seg2 - 1);
+ btr |= FIELD_PREP(ACC_REG_BTR_CL_MASK_SJW, bt->sjw - 1);
+
+ /* Keep order of accesses to ACC_CORE_OF_BRP and ACC_CORE_OF_BTR. */
+ acc_write32(priv->core, ACC_CORE_OF_BRP, brp);
+ acc_write32(priv->core, ACC_CORE_OF_BTR, btr);
+
+ netdev_dbg(netdev, "esdACC: BRP %u, BTR 0x%08x", brp, btr);
+ }
+
+ return 0;
+}
+
+static void handle_core_msg_rxtxdone(struct acc_core *core,
+ const struct acc_bmmsg_rxtxdone *msg)
+{
+ struct acc_net_priv *priv = netdev_priv(core->netdev);
+ struct net_device_stats *stats = &core->netdev->stats;
+ struct sk_buff *skb;
+
+ if (msg->acc_dlc.len & ACC_DLC_TXD_FLAG) {
+ u8 tx_fifo_tail = core->tx_fifo_tail;
+
+ if (core->tx_fifo_head == tx_fifo_tail) {
+ netdev_warn(core->netdev,
+ "TX interrupt, but queue is empty!?\n");
+ return;
+ }
+
+ /* Direct access echo skb to attach HW time stamp. */
+ skb = priv->can.echo_skb[tx_fifo_tail];
+ if (skb) {
+ skb_hwtstamps(skb)->hwtstamp =
+ acc_ts2ktime(priv->ov, msg->ts);
+ }
+
+ stats->tx_packets++;
+ stats->tx_bytes += can_get_echo_skb(core->netdev, tx_fifo_tail,
+ NULL);
+
+ core->tx_fifo_tail = acc_tx_fifo_next(core, tx_fifo_tail);
+
+ netif_wake_queue(core->netdev);
+
+ } else {
+ struct can_frame *cf;
+
+ skb = alloc_can_skb(core->netdev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ cf->can_id = msg->id & ACC_ID_ID_MASK;
+ if (msg->id & ACC_ID_EFF_FLAG)
+ cf->can_id |= CAN_EFF_FLAG;
+
+ can_frame_set_cc_len(cf, msg->acc_dlc.len & ACC_DLC_DLC_MASK,
+ priv->can.ctrlmode);
+
+ if (msg->acc_dlc.len & ACC_DLC_RTR_FLAG) {
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ memcpy(cf->data, msg->data, cf->len);
+ stats->rx_bytes += cf->len;
+ }
+ stats->rx_packets++;
+
+ skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
+
+ netif_rx(skb);
+ }
+}
+
+static void handle_core_msg_txabort(struct acc_core *core,
+ const struct acc_bmmsg_txabort *msg)
+{
+ struct net_device_stats *stats = &core->netdev->stats;
+ u8 tx_fifo_tail = core->tx_fifo_tail;
+ u32 abort_mask = msg->abort_mask; /* u32 extend to avoid warnings later */
+
+ /* The abort_mask shows which frames were aborted in esdACC's FIFO. */
+ while (tx_fifo_tail != core->tx_fifo_head && (abort_mask)) {
+ const u32 tail_mask = (1U << tx_fifo_tail);
+
+ if (!(abort_mask & tail_mask))
+ break;
+ abort_mask &= ~tail_mask;
+
+ can_free_echo_skb(core->netdev, tx_fifo_tail, NULL);
+ stats->tx_dropped++;
+ stats->tx_aborted_errors++;
+
+ tx_fifo_tail = acc_tx_fifo_next(core, tx_fifo_tail);
+ }
+ core->tx_fifo_tail = tx_fifo_tail;
+ if (abort_mask)
+ netdev_warn(core->netdev, "Unhandled aborted messages\n");
+
+ if (!acc_resetmode_entered(core))
+ netif_wake_queue(core->netdev);
+}
+
+static void handle_core_msg_overrun(struct acc_core *core,
+ const struct acc_bmmsg_overrun *msg)
+{
+ struct acc_net_priv *priv = netdev_priv(core->netdev);
+ struct net_device_stats *stats = &core->netdev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ /* lost_cnt may be 0 if not supported by esdACC version */
+ if (msg->lost_cnt) {
+ stats->rx_errors += msg->lost_cnt;
+ stats->rx_over_errors += msg->lost_cnt;
+ } else {
+ stats->rx_errors++;
+ stats->rx_over_errors++;
+ }
+
+ skb = alloc_can_err_skb(core->netdev, &cf);
+ if (!skb)
+ return;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+ skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
+
+ netif_rx(skb);
+}
+
+static void handle_core_msg_buserr(struct acc_core *core,
+ const struct acc_bmmsg_buserr *msg)
+{
+ struct acc_net_priv *priv = netdev_priv(core->netdev);
+ struct net_device_stats *stats = &core->netdev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ const u32 reg_status = msg->reg_status;
+ const u8 rxerr = reg_status;
+ const u8 txerr = (reg_status >> 8);
+ u8 can_err_prot_type = 0U;
+
+ priv->can.can_stats.bus_error++;
+
+ /* Error occurred during transmission? */
+ if (msg->ecc & ACC_ECC_DIR) {
+ stats->rx_errors++;
+ } else {
+ can_err_prot_type |= CAN_ERR_PROT_TX;
+ stats->tx_errors++;
+ }
+ /* Determine error type */
+ switch (msg->ecc & ACC_ECC_MASK) {
+ case ACC_ECC_BIT:
+ can_err_prot_type |= CAN_ERR_PROT_BIT;
+ break;
+ case ACC_ECC_FORM:
+ can_err_prot_type |= CAN_ERR_PROT_FORM;
+ break;
+ case ACC_ECC_STUFF:
+ can_err_prot_type |= CAN_ERR_PROT_STUFF;
+ break;
+ default:
+ can_err_prot_type |= CAN_ERR_PROT_UNSPEC;
+ break;
+ }
+
+ skb = alloc_can_err_skb(core->netdev, &cf);
+ if (!skb)
+ return;
+
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR | CAN_ERR_CNT;
+
+ /* Set protocol error type */
+ cf->data[2] = can_err_prot_type;
+ /* Set error location */
+ cf->data[3] = msg->ecc & ACC_ECC_SEG;
+
+ /* Insert CAN TX and RX error counters. */
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+
+ skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
+
+ netif_rx(skb);
+}
+
+static void
+handle_core_msg_errstatechange(struct acc_core *core,
+ const struct acc_bmmsg_errstatechange *msg)
+{
+ struct acc_net_priv *priv = netdev_priv(core->netdev);
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb;
+ const u32 reg_status = msg->reg_status;
+ const u8 rxerr = reg_status;
+ const u8 txerr = (reg_status >> 8);
+ enum can_state new_state;
+
+ if (reg_status & ACC_REG_STATUS_MASK_STATUS_BS) {
+ new_state = CAN_STATE_BUS_OFF;
+ } else if (reg_status & ACC_REG_STATUS_MASK_STATUS_EP) {
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ } else if (reg_status & ACC_REG_STATUS_MASK_STATUS_ES) {
+ new_state = CAN_STATE_ERROR_WARNING;
+ } else {
+ new_state = CAN_STATE_ERROR_ACTIVE;
+ if (priv->can.state == CAN_STATE_BUS_OFF) {
+ /* See comment in acc_set_mode() for CAN_MODE_START */
+ netif_wake_queue(core->netdev);
+ }
+ }
+
+ skb = alloc_can_err_skb(core->netdev, &cf);
+
+ if (new_state != priv->can.state) {
+ enum can_state tx_state, rx_state;
+
+ tx_state = (txerr >= rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ rx_state = (rxerr >= txerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+
+ /* Always call can_change_state() to update the state
+ * even if alloc_can_err_skb() may have failed.
+ * can_change_state() can cope with a NULL cf pointer.
+ */
+ can_change_state(core->netdev, cf, tx_state, rx_state);
+ }
+
+ if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+
+ skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
+
+ netif_rx(skb);
+ }
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ acc_write32(core, ACC_CORE_OF_TX_ABORT_MASK, 0xffff);
+ can_bus_off(core->netdev);
+ }
+}
+
+static void handle_core_interrupt(struct acc_core *core)
+{
+ u32 msg_fifo_head = core->bmfifo.local_irq_cnt & 0xff;
+
+ while (core->bmfifo.msg_fifo_tail != msg_fifo_head) {
+ const union acc_bmmsg *msg =
+ &core->bmfifo.messages[core->bmfifo.msg_fifo_tail];
+
+ switch (msg->msg_id) {
+ case BM_MSG_ID_RXTXDONE:
+ handle_core_msg_rxtxdone(core, &msg->rxtxdone);
+ break;
+
+ case BM_MSG_ID_TXABORT:
+ handle_core_msg_txabort(core, &msg->txabort);
+ break;
+
+ case BM_MSG_ID_OVERRUN:
+ handle_core_msg_overrun(core, &msg->overrun);
+ break;
+
+ case BM_MSG_ID_BUSERR:
+ handle_core_msg_buserr(core, &msg->buserr);
+ break;
+
+ case BM_MSG_ID_ERRPASSIVE:
+ case BM_MSG_ID_ERRWARN:
+ handle_core_msg_errstatechange(core,
+ &msg->errstatechange);
+ break;
+
+ default:
+ /* Ignore all other BM messages (like the CAN-FD messages) */
+ break;
+ }
+
+ core->bmfifo.msg_fifo_tail =
+ (core->bmfifo.msg_fifo_tail + 1) & 0xff;
+ }
+}
+
+/**
+ * acc_card_interrupt() - handle the interrupts of an esdACC FPGA
+ *
+ * @ov: overview module structure
+ * @cores: array of core structures
+ *
+ * This function handles all interrupts pending for the overview module and the
+ * CAN cores of the esdACC FPGA.
+ *
+ * It examines for all cores (the overview module core and the CAN cores)
+ * the bmfifo.irq_cnt and compares it with the previously saved
+ * bmfifo.local_irq_cnt. An IRQ is pending if they differ. The esdACC FPGA
+ * updates the bmfifo.irq_cnt values by DMA.
+ *
+ * The pending interrupts are masked by writing to the IRQ mask register at
+ * ACC_OV_OF_BM_IRQ_MASK. This register has for each core a two bit command
+ * field evaluated as follows:
+ *
+ * Define, bit pattern: meaning
+ * 00: no action
+ * ACC_BM_IRQ_UNMASK, 01: unmask interrupt
+ * ACC_BM_IRQ_MASK, 10: mask interrupt
+ * 11: no action
+ *
+ * For each CAN core with a pending IRQ handle_core_interrupt() handles all
+ * busmaster messages from the message FIFO. The last handled message (FIFO
+ * index) is written to the CAN core to acknowledge its handling.
+ *
+ * Last step is to unmask all interrupts in the FPGA using
+ * ACC_BM_IRQ_UNMASK_ALL.
+ *
+ * Return:
+ * IRQ_HANDLED, if card generated an interrupt that was handled
+ * IRQ_NONE, if the interrupt is not ours
+ */
+irqreturn_t acc_card_interrupt(struct acc_ov *ov, struct acc_core *cores)
+{
+ u32 irqmask;
+ int i;
+
+ /* First we look for whom interrupts are pending, card/overview
+ * or any of the cores. Two bits in irqmask are used for each;
+ * Each two bit field is set to ACC_BM_IRQ_MASK if an IRQ is
+ * pending.
+ */
+ irqmask = 0U;
+ if (READ_ONCE(*ov->bmfifo.irq_cnt) != ov->bmfifo.local_irq_cnt) {
+ irqmask |= ACC_BM_IRQ_MASK;
+ ov->bmfifo.local_irq_cnt = READ_ONCE(*ov->bmfifo.irq_cnt);
+ }
+
+ for (i = 0; i < ov->active_cores; i++) {
+ struct acc_core *core = &cores[i];
+
+ if (READ_ONCE(*core->bmfifo.irq_cnt) != core->bmfifo.local_irq_cnt) {
+ irqmask |= (ACC_BM_IRQ_MASK << (2 * (i + 1)));
+ core->bmfifo.local_irq_cnt = READ_ONCE(*core->bmfifo.irq_cnt);
+ }
+ }
+
+ if (!irqmask)
+ return IRQ_NONE;
+
+ /* At second we tell the card we're working on them by writing irqmask,
+ * call handle_{ov|core}_interrupt and then acknowledge the
+ * interrupts by writing irq_cnt:
+ */
+ acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_MASK, irqmask);
+
+ if (irqmask & ACC_BM_IRQ_MASK) {
+ /* handle_ov_interrupt(); - no use yet. */
+ acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_COUNTER,
+ ov->bmfifo.local_irq_cnt);
+ }
+
+ for (i = 0; i < ov->active_cores; i++) {
+ struct acc_core *core = &cores[i];
+
+ if (irqmask & (ACC_BM_IRQ_MASK << (2 * (i + 1)))) {
+ handle_core_interrupt(core);
+ acc_write32(core, ACC_OV_OF_BM_IRQ_COUNTER,
+ core->bmfifo.local_irq_cnt);
+ }
+ }
+
+ acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_MASK, ACC_BM_IRQ_UNMASK_ALL);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/net/can/esd/esdacc.h b/drivers/net/can/esd/esdacc.h
new file mode 100644
index 000000000000..a70488b25d39
--- /dev/null
+++ b/drivers/net/can/esd/esdacc.h
@@ -0,0 +1,356 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2015 - 2016 Thomas Körper, esd electronic system design gmbh
+ * Copyright (C) 2017 - 2023 Stefan Mätje, esd electronics gmbh
+ */
+
+#include <linux/bits.h>
+#include <linux/can/dev.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/units.h>
+
+#define ACC_TS_FREQ_80MHZ (80 * HZ_PER_MHZ)
+#define ACC_I2C_ADDON_DETECT_DELAY_MS 10
+
+/* esdACC Overview Module */
+#define ACC_OV_OF_PROBE 0x0000
+#define ACC_OV_OF_VERSION 0x0004
+#define ACC_OV_OF_INFO 0x0008
+#define ACC_OV_OF_CANCORE_FREQ 0x000c
+#define ACC_OV_OF_TS_FREQ_LO 0x0010
+#define ACC_OV_OF_TS_FREQ_HI 0x0014
+#define ACC_OV_OF_IRQ_STATUS_CORES 0x0018
+#define ACC_OV_OF_TS_CURR_LO 0x001c
+#define ACC_OV_OF_TS_CURR_HI 0x0020
+#define ACC_OV_OF_IRQ_STATUS 0x0028
+#define ACC_OV_OF_MODE 0x002c
+#define ACC_OV_OF_BM_IRQ_COUNTER 0x0070
+#define ACC_OV_OF_BM_IRQ_MASK 0x0074
+#define ACC_OV_OF_MSI_DATA 0x0080
+#define ACC_OV_OF_MSI_ADDRESSOFFSET 0x0084
+
+/* Feature flags are contained in the upper 16 bit of the version
+ * register at ACC_OV_OF_VERSION but only used with these masks after
+ * extraction into an extra variable => (xx - 16).
+ */
+#define ACC_OV_REG_FEAT_MASK_CANFD BIT(27 - 16)
+#define ACC_OV_REG_FEAT_MASK_NEW_PSC BIT(28 - 16)
+
+#define ACC_OV_REG_MODE_MASK_ENDIAN_LITTLE BIT(0)
+#define ACC_OV_REG_MODE_MASK_BM_ENABLE BIT(1)
+#define ACC_OV_REG_MODE_MASK_MODE_LED BIT(2)
+#define ACC_OV_REG_MODE_MASK_TIMER_ENABLE BIT(4)
+#define ACC_OV_REG_MODE_MASK_TIMER_ONE_SHOT BIT(5)
+#define ACC_OV_REG_MODE_MASK_TIMER_ABSOLUTE BIT(6)
+#define ACC_OV_REG_MODE_MASK_TIMER GENMASK(6, 4)
+#define ACC_OV_REG_MODE_MASK_TS_SRC GENMASK(8, 7)
+#define ACC_OV_REG_MODE_MASK_I2C_ENABLE BIT(11)
+#define ACC_OV_REG_MODE_MASK_MSI_ENABLE BIT(14)
+#define ACC_OV_REG_MODE_MASK_NEW_PSC_ENABLE BIT(15)
+#define ACC_OV_REG_MODE_MASK_FPGA_RESET BIT(31)
+
+/* esdACC CAN Core Module */
+#define ACC_CORE_OF_CTRL_MODE 0x0000
+#define ACC_CORE_OF_STATUS_IRQ 0x0008
+#define ACC_CORE_OF_BRP 0x000c
+#define ACC_CORE_OF_BTR 0x0010
+#define ACC_CORE_OF_FBTR 0x0014
+#define ACC_CORE_OF_STATUS 0x0030
+#define ACC_CORE_OF_TXFIFO_CONFIG 0x0048
+#define ACC_CORE_OF_TXFIFO_STATUS 0x004c
+#define ACC_CORE_OF_TX_STATUS_IRQ 0x0050
+#define ACC_CORE_OF_TX_ABORT_MASK 0x0054
+#define ACC_CORE_OF_BM_IRQ_COUNTER 0x0070
+#define ACC_CORE_OF_TXFIFO_ID 0x00c0
+#define ACC_CORE_OF_TXFIFO_DLC 0x00c4
+#define ACC_CORE_OF_TXFIFO_DATA_0 0x00c8
+#define ACC_CORE_OF_TXFIFO_DATA_1 0x00cc
+
+#define ACC_REG_CONTROL_MASK_MODE_RESETMODE BIT(0)
+#define ACC_REG_CONTROL_MASK_MODE_LOM BIT(1)
+#define ACC_REG_CONTROL_MASK_MODE_STM BIT(2)
+#define ACC_REG_CONTROL_MASK_MODE_TRANSEN BIT(5)
+#define ACC_REG_CONTROL_MASK_MODE_TS BIT(6)
+#define ACC_REG_CONTROL_MASK_MODE_SCHEDULE BIT(7)
+
+#define ACC_REG_CONTROL_MASK_IE_RXTX BIT(8)
+#define ACC_REG_CONTROL_MASK_IE_TXERROR BIT(9)
+#define ACC_REG_CONTROL_MASK_IE_ERRWARN BIT(10)
+#define ACC_REG_CONTROL_MASK_IE_OVERRUN BIT(11)
+#define ACC_REG_CONTROL_MASK_IE_TSI BIT(12)
+#define ACC_REG_CONTROL_MASK_IE_ERRPASS BIT(13)
+#define ACC_REG_CONTROL_MASK_IE_ALI BIT(14)
+#define ACC_REG_CONTROL_MASK_IE_BUSERR BIT(15)
+
+/* BRP and BTR register layout for CAN-Classic version */
+#define ACC_REG_BRP_CL_MASK_BRP GENMASK(8, 0)
+#define ACC_REG_BTR_CL_MASK_TSEG1 GENMASK(3, 0)
+#define ACC_REG_BTR_CL_MASK_TSEG2 GENMASK(18, 16)
+#define ACC_REG_BTR_CL_MASK_SJW GENMASK(25, 24)
+
+/* BRP and BTR register layout for CAN-FD version */
+#define ACC_REG_BRP_FD_MASK_BRP GENMASK(7, 0)
+#define ACC_REG_BTR_FD_MASK_TSEG1 GENMASK(7, 0)
+#define ACC_REG_BTR_FD_MASK_TSEG2 GENMASK(22, 16)
+#define ACC_REG_BTR_FD_MASK_SJW GENMASK(30, 24)
+
+/* 256 BM_MSGs of 32 byte size */
+#define ACC_CORE_DMAMSG_SIZE 32U
+#define ACC_CORE_DMABUF_SIZE (256U * ACC_CORE_DMAMSG_SIZE)
+
+enum acc_bmmsg_id {
+ BM_MSG_ID_RXTXDONE = 0x01,
+ BM_MSG_ID_TXABORT = 0x02,
+ BM_MSG_ID_OVERRUN = 0x03,
+ BM_MSG_ID_BUSERR = 0x04,
+ BM_MSG_ID_ERRPASSIVE = 0x05,
+ BM_MSG_ID_ERRWARN = 0x06,
+ BM_MSG_ID_TIMESLICE = 0x07,
+ BM_MSG_ID_HWTIMER = 0x08,
+ BM_MSG_ID_HOTPLUG = 0x09,
+};
+
+/* The struct acc_bmmsg_* structure declarations that follow here provide
+ * access to the ring buffer of bus master messages maintained by the FPGA
+ * bus master engine. All bus master messages have the same size of
+ * ACC_CORE_DMAMSG_SIZE and a minimum alignment of ACC_CORE_DMAMSG_SIZE in
+ * memory.
+ *
+ * All structure members are natural aligned. Therefore we should not need
+ * a __packed attribute. All struct acc_bmmsg_* declarations have at least
+ * reserved* members to fill the structure to the full ACC_CORE_DMAMSG_SIZE.
+ *
+ * A failure of this property due padding will be detected at compile time
+ * by static_assert(sizeof(union acc_bmmsg) == ACC_CORE_DMAMSG_SIZE).
+ */
+
+struct acc_bmmsg_rxtxdone {
+ u8 msg_id;
+ u8 txfifo_level;
+ u8 reserved1[2];
+ u8 txtsfifo_level;
+ u8 reserved2[3];
+ u32 id;
+ struct {
+ u8 len;
+ u8 txdfifo_idx;
+ u8 zeroes8;
+ u8 reserved;
+ } acc_dlc;
+ u8 data[CAN_MAX_DLEN];
+ /* Time stamps in struct acc_ov::timestamp_frequency ticks. */
+ u64 ts;
+};
+
+struct acc_bmmsg_txabort {
+ u8 msg_id;
+ u8 txfifo_level;
+ u16 abort_mask;
+ u8 txtsfifo_level;
+ u8 reserved2[1];
+ u16 abort_mask_txts;
+ u64 ts;
+ u32 reserved3[4];
+};
+
+struct acc_bmmsg_overrun {
+ u8 msg_id;
+ u8 txfifo_level;
+ u8 lost_cnt;
+ u8 reserved1;
+ u8 txtsfifo_level;
+ u8 reserved2[3];
+ u64 ts;
+ u32 reserved3[4];
+};
+
+struct acc_bmmsg_buserr {
+ u8 msg_id;
+ u8 txfifo_level;
+ u8 ecc;
+ u8 reserved1;
+ u8 txtsfifo_level;
+ u8 reserved2[3];
+ u64 ts;
+ u32 reg_status;
+ u32 reg_btr;
+ u32 reserved3[2];
+};
+
+struct acc_bmmsg_errstatechange {
+ u8 msg_id;
+ u8 txfifo_level;
+ u8 reserved1[2];
+ u8 txtsfifo_level;
+ u8 reserved2[3];
+ u64 ts;
+ u32 reg_status;
+ u32 reserved3[3];
+};
+
+struct acc_bmmsg_timeslice {
+ u8 msg_id;
+ u8 txfifo_level;
+ u8 reserved1[2];
+ u8 txtsfifo_level;
+ u8 reserved2[3];
+ u64 ts;
+ u32 reserved3[4];
+};
+
+struct acc_bmmsg_hwtimer {
+ u8 msg_id;
+ u8 reserved1[3];
+ u32 reserved2[1];
+ u64 timer;
+ u32 reserved3[4];
+};
+
+struct acc_bmmsg_hotplug {
+ u8 msg_id;
+ u8 reserved1[3];
+ u32 reserved2[7];
+};
+
+union acc_bmmsg {
+ u8 msg_id;
+ struct acc_bmmsg_rxtxdone rxtxdone;
+ struct acc_bmmsg_txabort txabort;
+ struct acc_bmmsg_overrun overrun;
+ struct acc_bmmsg_buserr buserr;
+ struct acc_bmmsg_errstatechange errstatechange;
+ struct acc_bmmsg_timeslice timeslice;
+ struct acc_bmmsg_hwtimer hwtimer;
+};
+
+/* Check size of union acc_bmmsg to be of expected size. */
+static_assert(sizeof(union acc_bmmsg) == ACC_CORE_DMAMSG_SIZE);
+
+struct acc_bmfifo {
+ const union acc_bmmsg *messages;
+ /* irq_cnt points to an u32 value where the esdACC FPGA deposits
+ * the bm_fifo head index in coherent DMA memory. Only bits 7..0
+ * are valid. Use READ_ONCE() to access this memory location.
+ */
+ const u32 *irq_cnt;
+ u32 local_irq_cnt;
+ u32 msg_fifo_tail;
+};
+
+struct acc_core {
+ void __iomem *addr;
+ struct net_device *netdev;
+ struct acc_bmfifo bmfifo;
+ u8 tx_fifo_size;
+ u8 tx_fifo_head;
+ u8 tx_fifo_tail;
+};
+
+struct acc_ov {
+ void __iomem *addr;
+ struct acc_bmfifo bmfifo;
+ u32 timestamp_frequency;
+ u32 core_frequency;
+ u16 version;
+ u16 features;
+ u8 total_cores;
+ u8 active_cores;
+};
+
+struct acc_net_priv {
+ struct can_priv can; /* must be the first member! */
+ struct acc_core *core;
+ struct acc_ov *ov;
+};
+
+static inline u32 acc_read32(struct acc_core *core, unsigned short offs)
+{
+ return ioread32be(core->addr + offs);
+}
+
+static inline void acc_write32(struct acc_core *core,
+ unsigned short offs, u32 v)
+{
+ iowrite32be(v, core->addr + offs);
+}
+
+static inline void acc_write32_noswap(struct acc_core *core,
+ unsigned short offs, u32 v)
+{
+ iowrite32(v, core->addr + offs);
+}
+
+static inline void acc_set_bits(struct acc_core *core,
+ unsigned short offs, u32 mask)
+{
+ u32 v = acc_read32(core, offs);
+
+ v |= mask;
+ acc_write32(core, offs, v);
+}
+
+static inline void acc_clear_bits(struct acc_core *core,
+ unsigned short offs, u32 mask)
+{
+ u32 v = acc_read32(core, offs);
+
+ v &= ~mask;
+ acc_write32(core, offs, v);
+}
+
+static inline int acc_resetmode_entered(struct acc_core *core)
+{
+ u32 ctrl = acc_read32(core, ACC_CORE_OF_CTRL_MODE);
+
+ return (ctrl & ACC_REG_CONTROL_MASK_MODE_RESETMODE) != 0;
+}
+
+static inline u32 acc_ov_read32(struct acc_ov *ov, unsigned short offs)
+{
+ return ioread32be(ov->addr + offs);
+}
+
+static inline void acc_ov_write32(struct acc_ov *ov,
+ unsigned short offs, u32 v)
+{
+ iowrite32be(v, ov->addr + offs);
+}
+
+static inline void acc_ov_set_bits(struct acc_ov *ov,
+ unsigned short offs, u32 b)
+{
+ u32 v = acc_ov_read32(ov, offs);
+
+ v |= b;
+ acc_ov_write32(ov, offs, v);
+}
+
+static inline void acc_ov_clear_bits(struct acc_ov *ov,
+ unsigned short offs, u32 b)
+{
+ u32 v = acc_ov_read32(ov, offs);
+
+ v &= ~b;
+ acc_ov_write32(ov, offs, v);
+}
+
+static inline void acc_reset_fpga(struct acc_ov *ov)
+{
+ acc_ov_write32(ov, ACC_OV_OF_MODE, ACC_OV_REG_MODE_MASK_FPGA_RESET);
+
+ /* (Re-)start and wait for completion of addon detection on the I^2C bus */
+ acc_ov_set_bits(ov, ACC_OV_OF_MODE, ACC_OV_REG_MODE_MASK_I2C_ENABLE);
+ mdelay(ACC_I2C_ADDON_DETECT_DELAY_MS);
+}
+
+void acc_init_ov(struct acc_ov *ov, struct device *dev);
+void acc_init_bm_ptr(struct acc_ov *ov, struct acc_core *cores,
+ const void *mem);
+int acc_open(struct net_device *netdev);
+int acc_close(struct net_device *netdev);
+netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+int acc_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec);
+int acc_set_mode(struct net_device *netdev, enum can_mode mode);
+int acc_set_bittiming(struct net_device *netdev);
+irqreturn_t acc_card_interrupt(struct acc_ov *ov, struct acc_core *cores);
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index a57005faa04f..f81b598147b3 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -27,7 +27,7 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200))
#define KVASER_PCIEFD_MAX_ERR_REP 256U
#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U
-#define KVASER_PCIEFD_MAX_CAN_CHANNELS 4UL
+#define KVASER_PCIEFD_MAX_CAN_CHANNELS 8UL
#define KVASER_PCIEFD_DMA_COUNT 2U
#define KVASER_PCIEFD_DMA_SIZE (4U * 1024U)
@@ -47,12 +47,19 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID 0x0015
#define KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID 0x0016
+/* Xilinx based devices */
+#define KVASER_PCIEFD_M2_4CAN_DEVICE_ID 0x0017
+#define KVASER_PCIEFD_8CAN_DEVICE_ID 0x0019
+
/* Altera SerDes Enable 64-bit DMA address translation */
#define KVASER_PCIEFD_ALTERA_DMA_64BIT BIT(0)
/* SmartFusion2 SerDes LSB address translation mask */
#define KVASER_PCIEFD_SF2_DMA_LSB_MASK GENMASK(31, 12)
+/* Xilinx SerDes LSB address translation mask */
+#define KVASER_PCIEFD_XILINX_DMA_LSB_MASK GENMASK(31, 12)
+
/* Kvaser KCAN CAN controller registers */
#define KVASER_PCIEFD_KCAN_FIFO_REG 0x100
#define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180
@@ -281,6 +288,8 @@ static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie,
dma_addr_t addr, int index);
static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie,
dma_addr_t addr, int index);
+static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie,
+ dma_addr_t addr, int index);
struct kvaser_pciefd_address_offset {
u32 serdes;
@@ -335,6 +344,18 @@ static const struct kvaser_pciefd_address_offset kvaser_pciefd_sf2_address_offse
.kcan_ch1 = 0x142000,
};
+static const struct kvaser_pciefd_address_offset kvaser_pciefd_xilinx_address_offset = {
+ .serdes = 0x00208,
+ .pci_ien = 0x102004,
+ .pci_irq = 0x102008,
+ .sysid = 0x100000,
+ .loopback = 0x103000,
+ .kcan_srb_fifo = 0x120000,
+ .kcan_srb = 0x121000,
+ .kcan_ch0 = 0x140000,
+ .kcan_ch1 = 0x142000,
+};
+
static const struct kvaser_pciefd_irq_mask kvaser_pciefd_altera_irq_mask = {
.kcan_rx0 = BIT(4),
.kcan_tx = { BIT(0), BIT(1), BIT(2), BIT(3) },
@@ -347,6 +368,12 @@ static const struct kvaser_pciefd_irq_mask kvaser_pciefd_sf2_irq_mask = {
.all = GENMASK(19, 16) | BIT(4),
};
+static const struct kvaser_pciefd_irq_mask kvaser_pciefd_xilinx_irq_mask = {
+ .kcan_rx0 = BIT(4),
+ .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19) },
+ .all = GENMASK(19, 16) | BIT(4),
+};
+
static const struct kvaser_pciefd_dev_ops kvaser_pciefd_altera_dev_ops = {
.kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_altera,
};
@@ -355,6 +382,10 @@ static const struct kvaser_pciefd_dev_ops kvaser_pciefd_sf2_dev_ops = {
.kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_sf2,
};
+static const struct kvaser_pciefd_dev_ops kvaser_pciefd_xilinx_dev_ops = {
+ .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_xilinx,
+};
+
static const struct kvaser_pciefd_driver_data kvaser_pciefd_altera_driver_data = {
.address_offset = &kvaser_pciefd_altera_address_offset,
.irq_mask = &kvaser_pciefd_altera_irq_mask,
@@ -367,6 +398,12 @@ static const struct kvaser_pciefd_driver_data kvaser_pciefd_sf2_driver_data = {
.ops = &kvaser_pciefd_sf2_dev_ops,
};
+static const struct kvaser_pciefd_driver_data kvaser_pciefd_xilinx_driver_data = {
+ .address_offset = &kvaser_pciefd_xilinx_address_offset,
+ .irq_mask = &kvaser_pciefd_xilinx_irq_mask,
+ .ops = &kvaser_pciefd_xilinx_dev_ops,
+};
+
struct kvaser_pciefd_can {
struct can_priv can;
struct kvaser_pciefd *kv_pcie;
@@ -457,6 +494,14 @@ static struct pci_device_id kvaser_pciefd_id_table[] = {
.driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data,
},
{
+ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_M2_4CAN_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_xilinx_driver_data,
+ },
+ {
+ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_8CAN_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_xilinx_driver_data,
+ },
+ {
0,
},
};
@@ -1035,6 +1080,21 @@ static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie,
iowrite32(msb, serdes_base + 0x4);
}
+static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie,
+ dma_addr_t addr, int index)
+{
+ void __iomem *serdes_base;
+ u32 lsb = addr & KVASER_PCIEFD_XILINX_DMA_LSB_MASK;
+ u32 msb = 0x0;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ msb = addr >> 32;
+#endif
+ serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index;
+ iowrite32(msb, serdes_base);
+ iowrite32(lsb, serdes_base + 0x4);
+}
+
static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
{
int i;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 16ecc11c7f62..14b231c4d7ec 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -255,6 +255,7 @@ enum m_can_reg {
#define TXESC_TBDS_64B 0x7
/* Tx Event FIFO Configuration (TXEFC) */
+#define TXEFC_EFWM_MASK GENMASK(29, 24)
#define TXEFC_EFS_MASK GENMASK(21, 16)
/* Tx Event FIFO Status (TXEFS) */
@@ -320,6 +321,12 @@ struct id_and_dlc {
u32 dlc;
};
+struct m_can_fifo_element {
+ u32 id;
+ u32 dlc;
+ u8 data[CANFD_MAX_DLEN];
+};
+
static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg)
{
return cdev->ops->read_reg(cdev, reg);
@@ -372,16 +379,6 @@ m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset, u32 *val)
return cdev->ops->read_fifo(cdev, addr_offset, val, 1);
}
-static inline bool _m_can_tx_fifo_full(u32 txfqs)
-{
- return !!(txfqs & TXFQS_TFQF);
-}
-
-static inline bool m_can_tx_fifo_full(struct m_can_classdev *cdev)
-{
- return _m_can_tx_fifo_full(m_can_read(cdev, M_CAN_TXFQS));
-}
-
static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable)
{
u32 cccr = m_can_read(cdev, M_CAN_CCCR);
@@ -416,15 +413,48 @@ static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable)
}
}
+static void m_can_interrupt_enable(struct m_can_classdev *cdev, u32 interrupts)
+{
+ if (cdev->active_interrupts == interrupts)
+ return;
+ cdev->ops->write_reg(cdev, M_CAN_IE, interrupts);
+ cdev->active_interrupts = interrupts;
+}
+
+static void m_can_coalescing_disable(struct m_can_classdev *cdev)
+{
+ u32 new_interrupts = cdev->active_interrupts | IR_RF0N | IR_TEFN;
+
+ if (!cdev->net->irq)
+ return;
+
+ hrtimer_cancel(&cdev->hrtimer);
+ m_can_interrupt_enable(cdev, new_interrupts);
+}
+
static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev)
{
+ if (!cdev->net->irq) {
+ dev_dbg(cdev->dev, "Start hrtimer\n");
+ hrtimer_start(&cdev->hrtimer,
+ ms_to_ktime(HRTIMER_POLL_INTERVAL_MS),
+ HRTIMER_MODE_REL_PINNED);
+ }
+
/* Only interrupt line 0 is used in this driver */
m_can_write(cdev, M_CAN_ILE, ILE_EINT0);
}
static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev)
{
+ m_can_coalescing_disable(cdev);
m_can_write(cdev, M_CAN_ILE, 0x0);
+ cdev->active_interrupts = 0x0;
+
+ if (!cdev->net->irq) {
+ dev_dbg(cdev->dev, "Stop hrtimer\n");
+ hrtimer_cancel(&cdev->hrtimer);
+ }
}
/* Retrieve internal timestamp counter from TSCV.TSC, and shift it to 32-bit
@@ -444,18 +474,26 @@ static u32 m_can_get_timestamp(struct m_can_classdev *cdev)
static void m_can_clean(struct net_device *net)
{
struct m_can_classdev *cdev = netdev_priv(net);
+ unsigned long irqflags;
- if (cdev->tx_skb) {
- int putidx = 0;
+ if (cdev->tx_ops) {
+ for (int i = 0; i != cdev->tx_fifo_size; ++i) {
+ if (!cdev->tx_ops[i].skb)
+ continue;
- net->stats.tx_errors++;
- if (cdev->version > 30)
- putidx = FIELD_GET(TXFQS_TFQPI_MASK,
- m_can_read(cdev, M_CAN_TXFQS));
-
- can_free_echo_skb(cdev->net, putidx, NULL);
- cdev->tx_skb = NULL;
+ net->stats.tx_errors++;
+ cdev->tx_ops[i].skb = NULL;
+ }
}
+
+ for (int i = 0; i != cdev->can.echo_skb_max; ++i)
+ can_free_echo_skb(cdev->net, i, NULL);
+
+ netdev_reset_queue(cdev->net);
+
+ spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
+ cdev->tx_fifo_in_flight = 0;
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
}
/* For peripherals, pass skb to rx-offload, which will push skb from
@@ -1007,23 +1045,60 @@ static int m_can_poll(struct napi_struct *napi, int quota)
* echo. timestamp is used for peripherals to ensure correct ordering
* by rx-offload, and is ignored for non-peripherals.
*/
-static void m_can_tx_update_stats(struct m_can_classdev *cdev,
- unsigned int msg_mark,
- u32 timestamp)
+static unsigned int m_can_tx_update_stats(struct m_can_classdev *cdev,
+ unsigned int msg_mark, u32 timestamp)
{
struct net_device *dev = cdev->net;
struct net_device_stats *stats = &dev->stats;
+ unsigned int frame_len;
if (cdev->is_peripheral)
stats->tx_bytes +=
can_rx_offload_get_echo_skb_queue_timestamp(&cdev->offload,
msg_mark,
timestamp,
- NULL);
+ &frame_len);
else
- stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL);
+ stats->tx_bytes += can_get_echo_skb(dev, msg_mark, &frame_len);
stats->tx_packets++;
+
+ return frame_len;
+}
+
+static void m_can_finish_tx(struct m_can_classdev *cdev, int transmitted,
+ unsigned int transmitted_frame_len)
+{
+ unsigned long irqflags;
+
+ netdev_completed_queue(cdev->net, transmitted, transmitted_frame_len);
+
+ spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
+ if (cdev->tx_fifo_in_flight >= cdev->tx_fifo_size && transmitted > 0)
+ netif_wake_queue(cdev->net);
+ cdev->tx_fifo_in_flight -= transmitted;
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
+}
+
+static netdev_tx_t m_can_start_tx(struct m_can_classdev *cdev)
+{
+ unsigned long irqflags;
+ int tx_fifo_in_flight;
+
+ spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
+ tx_fifo_in_flight = cdev->tx_fifo_in_flight + 1;
+ if (tx_fifo_in_flight >= cdev->tx_fifo_size) {
+ netif_stop_queue(cdev->net);
+ if (tx_fifo_in_flight > cdev->tx_fifo_size) {
+ netdev_err_once(cdev->net, "hard_xmit called while TX FIFO full\n");
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
+ return NETDEV_TX_BUSY;
+ }
+ }
+ cdev->tx_fifo_in_flight = tx_fifo_in_flight;
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
+
+ return NETDEV_TX_OK;
}
static int m_can_echo_tx_event(struct net_device *dev)
@@ -1035,6 +1110,8 @@ static int m_can_echo_tx_event(struct net_device *dev)
int i = 0;
int err = 0;
unsigned int msg_mark;
+ int processed = 0;
+ unsigned int processed_frame_len = 0;
struct m_can_classdev *cdev = netdev_priv(dev);
@@ -1063,25 +1140,62 @@ static int m_can_echo_tx_event(struct net_device *dev)
fgi = (++fgi >= cdev->mcfg[MRAM_TXE].num ? 0 : fgi);
/* update stats */
- m_can_tx_update_stats(cdev, msg_mark, timestamp);
+ processed_frame_len += m_can_tx_update_stats(cdev, msg_mark,
+ timestamp);
+
+ ++processed;
}
if (ack_fgi != -1)
m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK,
ack_fgi));
+ m_can_finish_tx(cdev, processed, processed_frame_len);
+
return err;
}
+static void m_can_coalescing_update(struct m_can_classdev *cdev, u32 ir)
+{
+ u32 new_interrupts = cdev->active_interrupts;
+ bool enable_rx_timer = false;
+ bool enable_tx_timer = false;
+
+ if (!cdev->net->irq)
+ return;
+
+ if (cdev->rx_coalesce_usecs_irq > 0 && (ir & (IR_RF0N | IR_RF0W))) {
+ enable_rx_timer = true;
+ new_interrupts &= ~IR_RF0N;
+ }
+ if (cdev->tx_coalesce_usecs_irq > 0 && (ir & (IR_TEFN | IR_TEFW))) {
+ enable_tx_timer = true;
+ new_interrupts &= ~IR_TEFN;
+ }
+ if (!enable_rx_timer && !hrtimer_active(&cdev->hrtimer))
+ new_interrupts |= IR_RF0N;
+ if (!enable_tx_timer && !hrtimer_active(&cdev->hrtimer))
+ new_interrupts |= IR_TEFN;
+
+ m_can_interrupt_enable(cdev, new_interrupts);
+ if (enable_rx_timer | enable_tx_timer)
+ hrtimer_start(&cdev->hrtimer, cdev->irq_timer_wait,
+ HRTIMER_MODE_REL);
+}
+
static irqreturn_t m_can_isr(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct m_can_classdev *cdev = netdev_priv(dev);
u32 ir;
- if (pm_runtime_suspended(cdev->dev))
+ if (pm_runtime_suspended(cdev->dev)) {
+ m_can_coalescing_disable(cdev);
return IRQ_NONE;
+ }
+
ir = m_can_read(cdev, M_CAN_IR);
+ m_can_coalescing_update(cdev, ir);
if (!ir)
return IRQ_NONE;
@@ -1096,13 +1210,17 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
* - state change IRQ
* - bus error IRQ and bus error reporting
*/
- if ((ir & IR_RF0N) || (ir & IR_ERR_ALL_30X)) {
+ if (ir & (IR_RF0N | IR_RF0W | IR_ERR_ALL_30X)) {
cdev->irqstatus = ir;
if (!cdev->is_peripheral) {
m_can_disable_all_interrupts(cdev);
napi_schedule(&cdev->napi);
- } else if (m_can_rx_peripheral(dev, ir) < 0) {
- goto out_fail;
+ } else {
+ int pkts;
+
+ pkts = m_can_rx_peripheral(dev, ir);
+ if (pkts < 0)
+ goto out_fail;
}
}
@@ -1110,21 +1228,18 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
if (ir & IR_TC) {
/* Transmission Complete Interrupt*/
u32 timestamp = 0;
+ unsigned int frame_len;
if (cdev->is_peripheral)
timestamp = m_can_get_timestamp(cdev);
- m_can_tx_update_stats(cdev, 0, timestamp);
- netif_wake_queue(dev);
+ frame_len = m_can_tx_update_stats(cdev, 0, timestamp);
+ m_can_finish_tx(cdev, 1, frame_len);
}
} else {
- if (ir & IR_TEFN) {
+ if (ir & (IR_TEFN | IR_TEFW)) {
/* New TX FIFO Element arrived */
if (m_can_echo_tx_event(dev) != 0)
goto out_fail;
-
- if (netif_queue_stopped(dev) &&
- !m_can_tx_fifo_full(cdev))
- netif_wake_queue(dev);
}
}
@@ -1138,6 +1253,15 @@ out_fail:
return IRQ_HANDLED;
}
+static enum hrtimer_restart m_can_coalescing_timer(struct hrtimer *timer)
+{
+ struct m_can_classdev *cdev = container_of(timer, struct m_can_classdev, hrtimer);
+
+ irq_wake_thread(cdev->net->irq, cdev->net);
+
+ return HRTIMER_NORESTART;
+}
+
static const struct can_bittiming_const m_can_bittiming_const_30X = {
.name = KBUILD_MODNAME,
.tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
@@ -1276,9 +1400,8 @@ static int m_can_chip_config(struct net_device *dev)
}
/* Disable unused interrupts */
- interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TEFW | IR_TFE |
- IR_TCF | IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N |
- IR_RF0F | IR_RF0W);
+ interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TFE | IR_TCF |
+ IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N | IR_RF0F);
m_can_config_endisable(cdev, true);
@@ -1315,6 +1438,8 @@ static int m_can_chip_config(struct net_device *dev)
} else {
/* Full TX Event FIFO is used */
m_can_write(cdev, M_CAN_TXEFC,
+ FIELD_PREP(TXEFC_EFWM_MASK,
+ cdev->tx_max_coalesced_frames_irq) |
FIELD_PREP(TXEFC_EFS_MASK,
cdev->mcfg[MRAM_TXE].num) |
cdev->mcfg[MRAM_TXE].off);
@@ -1322,6 +1447,7 @@ static int m_can_chip_config(struct net_device *dev)
/* rx fifo configuration, blocking mode, fifo size 1 */
m_can_write(cdev, M_CAN_RXF0C,
+ FIELD_PREP(RXFC_FWM_MASK, cdev->rx_max_coalesced_frames_irq) |
FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF0].num) |
cdev->mcfg[MRAM_RXF0].off);
@@ -1380,7 +1506,7 @@ static int m_can_chip_config(struct net_device *dev)
else
interrupts &= ~(IR_ERR_LEC_31X);
}
- m_can_write(cdev, M_CAN_IE, interrupts);
+ m_can_interrupt_enable(cdev, interrupts);
/* route all interrupts to INT0 */
m_can_write(cdev, M_CAN_ILS, ILS_ALL_INT0);
@@ -1413,15 +1539,16 @@ static int m_can_start(struct net_device *dev)
if (ret)
return ret;
+ netdev_queue_set_dql_min_limit(netdev_get_tx_queue(cdev->net, 0),
+ cdev->tx_max_coalesced_frames);
+
cdev->can.state = CAN_STATE_ERROR_ACTIVE;
m_can_enable_all_interrupts(cdev);
- if (!dev->irq) {
- dev_dbg(cdev->dev, "Start hrtimer\n");
- hrtimer_start(&cdev->hrtimer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS),
- HRTIMER_MODE_REL_PINNED);
- }
+ if (cdev->version > 30)
+ cdev->tx_fifo_putidx = FIELD_GET(TXFQS_TFQPI_MASK,
+ m_can_read(cdev, M_CAN_TXFQS));
return 0;
}
@@ -1577,11 +1704,6 @@ static void m_can_stop(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
- if (!dev->irq) {
- dev_dbg(cdev->dev, "Stop hrtimer\n");
- hrtimer_cancel(&cdev->hrtimer);
- }
-
/* disable all interrupts */
m_can_disable_all_interrupts(cdev);
@@ -1605,8 +1727,9 @@ static int m_can_close(struct net_device *dev)
m_can_clk_stop(cdev);
free_irq(dev->irq, dev);
+ m_can_clean(dev);
+
if (cdev->is_peripheral) {
- cdev->tx_skb = NULL;
destroy_workqueue(cdev->tx_wq);
cdev->tx_wq = NULL;
can_rx_offload_disable(&cdev->offload);
@@ -1619,57 +1742,42 @@ static int m_can_close(struct net_device *dev)
return 0;
}
-static int m_can_next_echo_skb_occupied(struct net_device *dev, int putidx)
-{
- struct m_can_classdev *cdev = netdev_priv(dev);
- /*get wrap around for loopback skb index */
- unsigned int wrap = cdev->can.echo_skb_max;
- int next_idx;
-
- /* calculate next index */
- next_idx = (++putidx >= wrap ? 0 : putidx);
-
- /* check if occupied */
- return !!cdev->can.echo_skb[next_idx];
-}
-
-static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
+static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev,
+ struct sk_buff *skb)
{
- struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data;
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ u8 len_padded = DIV_ROUND_UP(cf->len, 4);
+ struct m_can_fifo_element fifo_element;
struct net_device *dev = cdev->net;
- struct sk_buff *skb = cdev->tx_skb;
- struct id_and_dlc fifo_header;
u32 cccr, fdflags;
- u32 txfqs;
int err;
- int putidx;
-
- cdev->tx_skb = NULL;
+ u32 putidx;
+ unsigned int frame_len = can_skb_get_frame_len(skb);
/* Generate ID field for TX buffer Element */
/* Common to all supported M_CAN versions */
if (cf->can_id & CAN_EFF_FLAG) {
- fifo_header.id = cf->can_id & CAN_EFF_MASK;
- fifo_header.id |= TX_BUF_XTD;
+ fifo_element.id = cf->can_id & CAN_EFF_MASK;
+ fifo_element.id |= TX_BUF_XTD;
} else {
- fifo_header.id = ((cf->can_id & CAN_SFF_MASK) << 18);
+ fifo_element.id = ((cf->can_id & CAN_SFF_MASK) << 18);
}
if (cf->can_id & CAN_RTR_FLAG)
- fifo_header.id |= TX_BUF_RTR;
+ fifo_element.id |= TX_BUF_RTR;
if (cdev->version == 30) {
netif_stop_queue(dev);
- fifo_header.dlc = can_fd_len2dlc(cf->len) << 16;
+ fifo_element.dlc = can_fd_len2dlc(cf->len) << 16;
/* Write the frame ID, DLC, and payload to the FIFO element. */
- err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, &fifo_header, 2);
+ err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, &fifo_element, 2);
if (err)
goto out_fail;
err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_DATA,
- cf->data, DIV_ROUND_UP(cf->len, 4));
+ cf->data, len_padded);
if (err)
goto out_fail;
@@ -1690,33 +1798,15 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
}
m_can_write(cdev, M_CAN_TXBTIE, 0x1);
- can_put_echo_skb(skb, dev, 0, 0);
+ can_put_echo_skb(skb, dev, 0, frame_len);
m_can_write(cdev, M_CAN_TXBAR, 0x1);
/* End of xmit function for version 3.0.x */
} else {
/* Transmit routine for version >= v3.1.x */
- txfqs = m_can_read(cdev, M_CAN_TXFQS);
-
- /* Check if FIFO full */
- if (_m_can_tx_fifo_full(txfqs)) {
- /* This shouldn't happen */
- netif_stop_queue(dev);
- netdev_warn(dev,
- "TX queue active although FIFO is full.");
-
- if (cdev->is_peripheral) {
- kfree_skb(skb);
- dev->stats.tx_dropped++;
- return NETDEV_TX_OK;
- } else {
- return NETDEV_TX_BUSY;
- }
- }
-
/* get put index for frame */
- putidx = FIELD_GET(TXFQS_TFQPI_MASK, txfqs);
+ putidx = cdev->tx_fifo_putidx;
/* Construct DLC Field, with CAN-FD configuration.
* Use the put index of the fifo as the message marker,
@@ -1731,30 +1821,32 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
fdflags |= TX_BUF_BRS;
}
- fifo_header.dlc = FIELD_PREP(TX_BUF_MM_MASK, putidx) |
+ fifo_element.dlc = FIELD_PREP(TX_BUF_MM_MASK, putidx) |
FIELD_PREP(TX_BUF_DLC_MASK, can_fd_len2dlc(cf->len)) |
fdflags | TX_BUF_EFC;
- err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, &fifo_header, 2);
- if (err)
- goto out_fail;
- err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DATA,
- cf->data, DIV_ROUND_UP(cf->len, 4));
+ memcpy_and_pad(fifo_element.data, CANFD_MAX_DLEN, &cf->data,
+ cf->len, 0);
+
+ err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID,
+ &fifo_element, 2 + len_padded);
if (err)
goto out_fail;
/* Push loopback echo.
* Will be looped back on TX interrupt based on message marker
*/
- can_put_echo_skb(skb, dev, putidx, 0);
+ can_put_echo_skb(skb, dev, putidx, frame_len);
- /* Enable TX FIFO element to start transfer */
- m_can_write(cdev, M_CAN_TXBAR, (1 << putidx));
-
- /* stop network queue if fifo full */
- if (m_can_tx_fifo_full(cdev) ||
- m_can_next_echo_skb_occupied(dev, putidx))
- netif_stop_queue(dev);
+ if (cdev->is_peripheral) {
+ /* Delay enabling TX FIFO element */
+ cdev->tx_peripheral_submit |= BIT(putidx);
+ } else {
+ /* Enable TX FIFO element to start transfer */
+ m_can_write(cdev, M_CAN_TXBAR, BIT(putidx));
+ }
+ cdev->tx_fifo_putidx = (++cdev->tx_fifo_putidx >= cdev->can.echo_skb_max ?
+ 0 : cdev->tx_fifo_putidx);
}
return NETDEV_TX_OK;
@@ -1765,46 +1857,91 @@ out_fail:
return NETDEV_TX_BUSY;
}
+static void m_can_tx_submit(struct m_can_classdev *cdev)
+{
+ if (cdev->version == 30)
+ return;
+ if (!cdev->is_peripheral)
+ return;
+
+ m_can_write(cdev, M_CAN_TXBAR, cdev->tx_peripheral_submit);
+ cdev->tx_peripheral_submit = 0;
+}
+
static void m_can_tx_work_queue(struct work_struct *ws)
{
- struct m_can_classdev *cdev = container_of(ws, struct m_can_classdev,
- tx_work);
+ struct m_can_tx_op *op = container_of(ws, struct m_can_tx_op, work);
+ struct m_can_classdev *cdev = op->cdev;
+ struct sk_buff *skb = op->skb;
+
+ op->skb = NULL;
+ m_can_tx_handler(cdev, skb);
+ if (op->submit)
+ m_can_tx_submit(cdev);
+}
+
+static void m_can_tx_queue_skb(struct m_can_classdev *cdev, struct sk_buff *skb,
+ bool submit)
+{
+ cdev->tx_ops[cdev->next_tx_op].skb = skb;
+ cdev->tx_ops[cdev->next_tx_op].submit = submit;
+ queue_work(cdev->tx_wq, &cdev->tx_ops[cdev->next_tx_op].work);
+
+ ++cdev->next_tx_op;
+ if (cdev->next_tx_op >= cdev->tx_fifo_size)
+ cdev->next_tx_op = 0;
+}
+
+static netdev_tx_t m_can_start_peripheral_xmit(struct m_can_classdev *cdev,
+ struct sk_buff *skb)
+{
+ bool submit;
+
+ ++cdev->nr_txs_without_submit;
+ if (cdev->nr_txs_without_submit >= cdev->tx_max_coalesced_frames ||
+ !netdev_xmit_more()) {
+ cdev->nr_txs_without_submit = 0;
+ submit = true;
+ } else {
+ submit = false;
+ }
+ m_can_tx_queue_skb(cdev, skb, submit);
- m_can_tx_handler(cdev);
+ return NETDEV_TX_OK;
}
static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
+ unsigned int frame_len;
+ netdev_tx_t ret;
if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
- if (cdev->is_peripheral) {
- if (cdev->tx_skb) {
- netdev_err(dev, "hard_xmit called while tx busy\n");
- return NETDEV_TX_BUSY;
- }
+ frame_len = can_skb_get_frame_len(skb);
- if (cdev->can.state == CAN_STATE_BUS_OFF) {
- m_can_clean(dev);
- } else {
- /* Need to stop the queue to avoid numerous requests
- * from being sent. Suggested improvement is to create
- * a queueing mechanism that will queue the skbs and
- * process them in order.
- */
- cdev->tx_skb = skb;
- netif_stop_queue(cdev->net);
- queue_work(cdev->tx_wq, &cdev->tx_work);
- }
- } else {
- cdev->tx_skb = skb;
- return m_can_tx_handler(cdev);
+ if (cdev->can.state == CAN_STATE_BUS_OFF) {
+ m_can_clean(cdev->net);
+ return NETDEV_TX_OK;
}
- return NETDEV_TX_OK;
+ ret = m_can_start_tx(cdev);
+ if (ret != NETDEV_TX_OK)
+ return ret;
+
+ netdev_sent_queue(dev, frame_len);
+
+ if (cdev->is_peripheral)
+ ret = m_can_start_peripheral_xmit(cdev, skb);
+ else
+ ret = m_can_tx_handler(cdev, skb);
+
+ if (ret != NETDEV_TX_OK)
+ netdev_completed_queue(dev, 1, frame_len);
+
+ return ret;
}
static enum hrtimer_restart hrtimer_callback(struct hrtimer *timer)
@@ -1844,15 +1981,17 @@ static int m_can_open(struct net_device *dev)
/* register interrupt handler */
if (cdev->is_peripheral) {
- cdev->tx_skb = NULL;
- cdev->tx_wq = alloc_workqueue("mcan_wq",
- WQ_FREEZABLE | WQ_MEM_RECLAIM, 0);
+ cdev->tx_wq = alloc_ordered_workqueue("mcan_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM);
if (!cdev->tx_wq) {
err = -ENOMEM;
goto out_wq_fail;
}
- INIT_WORK(&cdev->tx_work, m_can_tx_work_queue);
+ for (int i = 0; i != cdev->tx_fifo_size; ++i) {
+ cdev->tx_ops[i].cdev = cdev;
+ INIT_WORK(&cdev->tx_ops[i].work, m_can_tx_work_queue);
+ }
err = request_threaded_irq(dev->irq, NULL, m_can_isr,
IRQF_ONESHOT,
@@ -1900,7 +2039,108 @@ static const struct net_device_ops m_can_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static int m_can_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+
+ ec->rx_max_coalesced_frames_irq = cdev->rx_max_coalesced_frames_irq;
+ ec->rx_coalesce_usecs_irq = cdev->rx_coalesce_usecs_irq;
+ ec->tx_max_coalesced_frames = cdev->tx_max_coalesced_frames;
+ ec->tx_max_coalesced_frames_irq = cdev->tx_max_coalesced_frames_irq;
+ ec->tx_coalesce_usecs_irq = cdev->tx_coalesce_usecs_irq;
+
+ return 0;
+}
+
+static int m_can_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+
+ if (cdev->can.state != CAN_STATE_STOPPED) {
+ netdev_err(dev, "Device is in use, please shut it down first\n");
+ return -EBUSY;
+ }
+
+ if (ec->rx_max_coalesced_frames_irq > cdev->mcfg[MRAM_RXF0].num) {
+ netdev_err(dev, "rx-frames-irq %u greater than the RX FIFO %u\n",
+ ec->rx_max_coalesced_frames_irq,
+ cdev->mcfg[MRAM_RXF0].num);
+ return -EINVAL;
+ }
+ if ((ec->rx_max_coalesced_frames_irq == 0) != (ec->rx_coalesce_usecs_irq == 0)) {
+ netdev_err(dev, "rx-frames-irq and rx-usecs-irq can only be set together\n");
+ return -EINVAL;
+ }
+ if (ec->tx_max_coalesced_frames_irq > cdev->mcfg[MRAM_TXE].num) {
+ netdev_err(dev, "tx-frames-irq %u greater than the TX event FIFO %u\n",
+ ec->tx_max_coalesced_frames_irq,
+ cdev->mcfg[MRAM_TXE].num);
+ return -EINVAL;
+ }
+ if (ec->tx_max_coalesced_frames_irq > cdev->mcfg[MRAM_TXB].num) {
+ netdev_err(dev, "tx-frames-irq %u greater than the TX FIFO %u\n",
+ ec->tx_max_coalesced_frames_irq,
+ cdev->mcfg[MRAM_TXB].num);
+ return -EINVAL;
+ }
+ if ((ec->tx_max_coalesced_frames_irq == 0) != (ec->tx_coalesce_usecs_irq == 0)) {
+ netdev_err(dev, "tx-frames-irq and tx-usecs-irq can only be set together\n");
+ return -EINVAL;
+ }
+ if (ec->tx_max_coalesced_frames > cdev->mcfg[MRAM_TXE].num) {
+ netdev_err(dev, "tx-frames %u greater than the TX event FIFO %u\n",
+ ec->tx_max_coalesced_frames,
+ cdev->mcfg[MRAM_TXE].num);
+ return -EINVAL;
+ }
+ if (ec->tx_max_coalesced_frames > cdev->mcfg[MRAM_TXB].num) {
+ netdev_err(dev, "tx-frames %u greater than the TX FIFO %u\n",
+ ec->tx_max_coalesced_frames,
+ cdev->mcfg[MRAM_TXB].num);
+ return -EINVAL;
+ }
+ if (ec->rx_coalesce_usecs_irq != 0 && ec->tx_coalesce_usecs_irq != 0 &&
+ ec->rx_coalesce_usecs_irq != ec->tx_coalesce_usecs_irq) {
+ netdev_err(dev, "rx-usecs-irq %u needs to be equal to tx-usecs-irq %u if both are enabled\n",
+ ec->rx_coalesce_usecs_irq,
+ ec->tx_coalesce_usecs_irq);
+ return -EINVAL;
+ }
+
+ cdev->rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
+ cdev->rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
+ cdev->tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
+ cdev->tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
+ cdev->tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
+
+ if (cdev->rx_coalesce_usecs_irq)
+ cdev->irq_timer_wait =
+ ns_to_ktime(cdev->rx_coalesce_usecs_irq * NSEC_PER_USEC);
+ else
+ cdev->irq_timer_wait =
+ ns_to_ktime(cdev->tx_coalesce_usecs_irq * NSEC_PER_USEC);
+
+ return 0;
+}
+
static const struct ethtool_ops m_can_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_IRQ |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_TX_USECS_IRQ |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_coalesce = m_can_get_coalesce,
+ .set_coalesce = m_can_set_coalesce,
+};
+
+static const struct ethtool_ops m_can_ethtool_ops_polling = {
.get_ts_info = ethtool_op_get_ts_info,
};
@@ -1908,7 +2148,10 @@ static int register_m_can_dev(struct net_device *dev)
{
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &m_can_netdev_ops;
- dev->ethtool_ops = &m_can_ethtool_ops;
+ if (dev->irq)
+ dev->ethtool_ops = &m_can_ethtool_ops;
+ else
+ dev->ethtool_ops = &m_can_ethtool_ops_polling;
return register_candev(dev);
}
@@ -2056,12 +2299,23 @@ int m_can_class_register(struct m_can_classdev *cdev)
{
int ret;
- if (cdev->pm_clock_support) {
- ret = m_can_clk_start(cdev);
- if (ret)
- return ret;
+ cdev->tx_fifo_size = max(1, min(cdev->mcfg[MRAM_TXB].num,
+ cdev->mcfg[MRAM_TXE].num));
+ if (cdev->is_peripheral) {
+ cdev->tx_ops =
+ devm_kzalloc(cdev->dev,
+ cdev->tx_fifo_size * sizeof(*cdev->tx_ops),
+ GFP_KERNEL);
+ if (!cdev->tx_ops) {
+ dev_err(cdev->dev, "Failed to allocate tx_ops for workqueue\n");
+ return -ENOMEM;
+ }
}
+ ret = m_can_clk_start(cdev);
+ if (ret)
+ return ret;
+
if (cdev->is_peripheral) {
ret = can_rx_offload_add_manual(cdev->net, &cdev->offload,
NAPI_POLL_WEIGHT);
@@ -2069,8 +2323,15 @@ int m_can_class_register(struct m_can_classdev *cdev)
goto clk_disable;
}
- if (!cdev->net->irq)
+ if (!cdev->net->irq) {
+ dev_dbg(cdev->dev, "Polling enabled, initialize hrtimer");
+ hrtimer_init(&cdev->hrtimer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
cdev->hrtimer.function = &hrtimer_callback;
+ } else {
+ hrtimer_init(&cdev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ cdev->hrtimer.function = m_can_coalescing_timer;
+ }
ret = m_can_dev_setup(cdev);
if (ret)
@@ -2121,7 +2382,15 @@ int m_can_class_suspend(struct device *dev)
if (netif_running(ndev)) {
netif_stop_queue(ndev);
netif_device_detach(ndev);
- m_can_stop(ndev);
+
+ /* leave the chip running with rx interrupt enabled if it is
+ * used as a wake-up source.
+ */
+ if (cdev->pm_wake_source)
+ m_can_write(cdev, M_CAN_IE, IR_RF0N);
+ else
+ m_can_stop(ndev);
+
m_can_clk_stop(cdev);
}
@@ -2148,11 +2417,15 @@ int m_can_class_resume(struct device *dev)
ret = m_can_clk_start(cdev);
if (ret)
return ret;
- ret = m_can_start(ndev);
- if (ret) {
- m_can_clk_stop(cdev);
- return ret;
+ if (cdev->pm_wake_source) {
+ m_can_write(cdev, M_CAN_IE, cdev->active_interrupts);
+ } else {
+ ret = m_can_start(ndev);
+ if (ret) {
+ m_can_clk_stop(cdev);
+ return ret;
+ }
}
netif_device_attach(ndev);
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index 520e14277dff..3a9edc292593 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -70,6 +70,13 @@ struct m_can_ops {
int (*init)(struct m_can_classdev *cdev);
};
+struct m_can_tx_op {
+ struct m_can_classdev *cdev;
+ struct work_struct work;
+ struct sk_buff *skb;
+ bool submit;
+};
+
struct m_can_classdev {
struct can_priv can;
struct can_rx_offload offload;
@@ -80,18 +87,42 @@ struct m_can_classdev {
struct clk *cclk;
struct workqueue_struct *tx_wq;
- struct work_struct tx_work;
- struct sk_buff *tx_skb;
struct phy *transceiver;
+ ktime_t irq_timer_wait;
+
struct m_can_ops *ops;
int version;
u32 irqstatus;
int pm_clock_support;
+ int pm_wake_source;
int is_peripheral;
+ // Cached M_CAN_IE register content
+ u32 active_interrupts;
+ u32 rx_max_coalesced_frames_irq;
+ u32 rx_coalesce_usecs_irq;
+ u32 tx_max_coalesced_frames;
+ u32 tx_max_coalesced_frames_irq;
+ u32 tx_coalesce_usecs_irq;
+
+ // Store this internally to avoid fetch delays on peripheral chips
+ u32 tx_fifo_putidx;
+
+ /* Protects shared state between start_xmit and m_can_isr */
+ spinlock_t tx_handling_spinlock;
+ int tx_fifo_in_flight;
+
+ struct m_can_tx_op *tx_ops;
+ int tx_fifo_size;
+ int next_tx_op;
+
+ int nr_txs_without_submit;
+ /* bitfield of fifo elements that will be submitted together */
+ u32 tx_peripheral_submit;
+
struct mram_cfg mcfg[MRAM_CFG_NUM];
struct hrtimer hrtimer;
diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c
index f2219aa2824b..45400de4163d 100644
--- a/drivers/net/can/m_can/m_can_pci.c
+++ b/drivers/net/can/m_can/m_can_pci.c
@@ -125,6 +125,7 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
mcan_class->dev = &pci->dev;
mcan_class->net->irq = pci_irq_vector(pci, 0);
mcan_class->pm_clock_support = 1;
+ mcan_class->pm_wake_source = 0;
mcan_class->can.clock.freq = id->driver_data;
mcan_class->ops = &m_can_pci_ops;
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index cdb28d6a092c..df0367124b4c 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -109,10 +109,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
ret = irq;
goto probe_fail;
}
- } else {
- dev_dbg(mcan_class->dev, "Polling enabled, initialize hrtimer");
- hrtimer_init(&mcan_class->hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
}
/* message ram could be shared */
@@ -143,6 +139,7 @@ static int m_can_plat_probe(struct platform_device *pdev)
mcan_class->net->irq = irq;
mcan_class->pm_clock_support = 1;
+ mcan_class->pm_wake_source = 0;
mcan_class->can.clock.freq = clk_get_rate(mcan_class->cclk);
mcan_class->dev = &pdev->dev;
mcan_class->transceiver = transceiver;
diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c
index ae8c42f5debd..a42600dac70d 100644
--- a/drivers/net/can/m_can/tcan4x5x-core.c
+++ b/drivers/net/can/m_can/tcan4x5x-core.c
@@ -411,6 +411,7 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
priv->spi = spi;
mcan_class->pm_clock_support = 0;
+ mcan_class->pm_wake_source = device_property_read_bool(&spi->dev, "wakeup-source");
mcan_class->can.clock.freq = freq;
mcan_class->dev = &spi->dev;
mcan_class->ops = &tcan4x5x_ops;
@@ -459,6 +460,9 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
goto out_power;
}
+ if (mcan_class->pm_wake_source)
+ device_init_wakeup(&spi->dev, true);
+
ret = m_can_class_register(mcan_class);
if (ret) {
dev_err(&spi->dev, "Failed registering m_can device %pe\n",
@@ -487,6 +491,29 @@ static void tcan4x5x_can_remove(struct spi_device *spi)
m_can_class_free_dev(priv->cdev.net);
}
+static int __maybe_unused tcan4x5x_suspend(struct device *dev)
+{
+ struct m_can_classdev *cdev = dev_get_drvdata(dev);
+ struct spi_device *spi = to_spi_device(dev);
+
+ if (cdev->pm_wake_source)
+ enable_irq_wake(spi->irq);
+
+ return m_can_class_suspend(dev);
+}
+
+static int __maybe_unused tcan4x5x_resume(struct device *dev)
+{
+ struct m_can_classdev *cdev = dev_get_drvdata(dev);
+ struct spi_device *spi = to_spi_device(dev);
+ int ret = m_can_class_resume(dev);
+
+ if (cdev->pm_wake_source)
+ disable_irq_wake(spi->irq);
+
+ return ret;
+}
+
static const struct of_device_id tcan4x5x_of_match[] = {
{
.compatible = "ti,tcan4x5x",
@@ -505,11 +532,15 @@ static const struct spi_device_id tcan4x5x_id_table[] = {
};
MODULE_DEVICE_TABLE(spi, tcan4x5x_id_table);
+static const struct dev_pm_ops tcan4x5x_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tcan4x5x_suspend, tcan4x5x_resume)
+};
+
static struct spi_driver tcan4x5x_can_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = tcan4x5x_of_match,
- .pm = NULL,
+ .pm = &tcan4x5x_pm_ops,
},
.id_table = tcan4x5x_id_table,
.probe = tcan4x5x_can_probe,
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
index 32286f861a19..721df91cdbfb 100644
--- a/drivers/net/can/softing/softing_fw.c
+++ b/drivers/net/can/softing/softing_fw.c
@@ -436,7 +436,7 @@ int softing_startstop(struct net_device *dev, int up)
return ret;
bus_bitmask_start = 0;
- if (dev && up)
+ if (up)
/* prepare to start this bus as well */
bus_bitmask_start |= (1 << priv->index);
/* bring netdevs down */
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index eebf967f4711..1d9057dc44f2 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -837,7 +837,7 @@ static int __mcp251xfd_get_berr_counter(const struct net_device *ndev,
return err;
if (trec & MCP251XFD_REG_TREC_TXBO)
- bec->txerr = 256;
+ bec->txerr = CAN_BUS_OFF_THRESHOLD;
else
bec->txerr = FIELD_GET(MCP251XFD_REG_TREC_TEC_MASK, trec);
bec->rxerr = FIELD_GET(MCP251XFD_REG_TREC_REC_MASK, trec);
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index d1450722cb3c..bd58c636d465 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -100,6 +100,7 @@ config CAN_KVASER_USB
- Scania VCI2 (if you have the Kvaser logo on top)
- Kvaser BlackBird v2
- Kvaser Leaf Pro HS v2
+ - Kvaser Leaf v3
- Kvaser Hybrid CAN/LIN
- Kvaser Hybrid 2xCAN/LIN
- Kvaser Hybrid Pro CAN/LIN
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 95b0fdb602c8..65c962f76898 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -385,7 +385,7 @@ static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev,
static int gs_cmd_reset(struct gs_can *dev)
{
struct gs_device_mode dm = {
- .mode = GS_CAN_MODE_RESET,
+ .mode = cpu_to_le32(GS_CAN_MODE_RESET),
};
return usb_control_msg_send(dev->udev, 0, GS_USB_BREQ_MODE,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 71ef4db5c09f..8faf8a462c05 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -88,6 +88,7 @@
#define USB_USBCAN_PRO_4HS_PRODUCT_ID 0x0114
#define USB_HYBRID_CANLIN_PRODUCT_ID 0x0115
#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 0x0116
+#define USB_LEAF_V3_PRODUCT_ID 0x0117
static const struct kvaser_usb_driver_info kvaser_usb_driver_info_hydra = {
.quirks = KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP,
@@ -235,6 +236,8 @@ static const struct usb_device_id kvaser_usb_table[] = {
.driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID),
.driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_V3_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
{ }
};
MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 98c669ad5141..f7fabba707ea 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -119,7 +119,7 @@ static int vxcan_get_iflink(const struct net_device *dev)
rcu_read_lock();
peer = rcu_dereference(priv->peer);
- iflink = peer ? peer->ifindex : 0;
+ iflink = peer ? READ_ONCE(peer->ifindex) : 0;
rcu_read_unlock();
return iflink;
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 3722eaa84234..fae0120473f8 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -31,6 +31,7 @@
#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <linux/u64_stats_sync.h>
#define DRIVER_NAME "xilinx_can"
@@ -58,6 +59,13 @@ enum xcan_reg {
*/
XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */
XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */
+
+ /* only on AXI CAN cores */
+ XCAN_ECC_CFG_OFFSET = 0xC8, /* ECC Configuration */
+ XCAN_TXTLFIFO_ECC_OFFSET = 0xCC, /* TXTL FIFO ECC error counter */
+ XCAN_TXOLFIFO_ECC_OFFSET = 0xD0, /* TXOL FIFO ECC error counter */
+ XCAN_RXFIFO_ECC_OFFSET = 0xD4, /* RX FIFO ECC error counter */
+
XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */
XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */
XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */
@@ -124,6 +132,18 @@ enum xcan_reg {
#define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */
#define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */
#define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */
+#define XCAN_IXR_E2BERX_MASK BIT(23) /* RX FIFO two bit ECC error */
+#define XCAN_IXR_E1BERX_MASK BIT(22) /* RX FIFO one bit ECC error */
+#define XCAN_IXR_E2BETXOL_MASK BIT(21) /* TXOL FIFO two bit ECC error */
+#define XCAN_IXR_E1BETXOL_MASK BIT(20) /* TXOL FIFO One bit ECC error */
+#define XCAN_IXR_E2BETXTL_MASK BIT(19) /* TXTL FIFO Two bit ECC error */
+#define XCAN_IXR_E1BETXTL_MASK BIT(18) /* TXTL FIFO One bit ECC error */
+#define XCAN_IXR_ECC_MASK (XCAN_IXR_E2BERX_MASK | \
+ XCAN_IXR_E1BERX_MASK | \
+ XCAN_IXR_E2BETXOL_MASK | \
+ XCAN_IXR_E1BETXOL_MASK | \
+ XCAN_IXR_E2BETXTL_MASK | \
+ XCAN_IXR_E1BETXTL_MASK)
#define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */
#define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */
#define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */
@@ -137,6 +157,11 @@ enum xcan_reg {
#define XCAN_2_FSR_RI_MASK 0x0000003F /* RX Read Index */
#define XCAN_DLCR_EDL_MASK 0x08000000 /* EDL Mask in DLC */
#define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */
+#define XCAN_ECC_CFG_REECRX_MASK BIT(2) /* Reset RX FIFO ECC error counters */
+#define XCAN_ECC_CFG_REECTXOL_MASK BIT(1) /* Reset TXOL FIFO ECC error counters */
+#define XCAN_ECC_CFG_REECTXTL_MASK BIT(0) /* Reset TXTL FIFO ECC error counters */
+#define XCAN_ECC_1BIT_CNT_MASK GENMASK(15, 0) /* FIFO ECC 1bit count mask */
+#define XCAN_ECC_2BIT_CNT_MASK GENMASK(31, 16) /* FIFO ECC 2bit count mask */
/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
#define XCAN_BRPR_TDC_ENABLE BIT(16) /* Transmitter Delay Compensation (TDC) Enable */
@@ -202,6 +227,14 @@ struct xcan_devtype_data {
* @devtype: Device type specific constants
* @transceiver: Optional pointer to associated CAN transceiver
* @rstc: Pointer to reset control
+ * @ecc_enable: ECC enable flag
+ * @syncp: synchronization for ECC error stats
+ * @ecc_rx_2_bit_errors: RXFIFO 2bit ECC count
+ * @ecc_rx_1_bit_errors: RXFIFO 1bit ECC count
+ * @ecc_txol_2_bit_errors: TXOLFIFO 2bit ECC count
+ * @ecc_txol_1_bit_errors: TXOLFIFO 1bit ECC count
+ * @ecc_txtl_2_bit_errors: TXTLFIFO 2bit ECC count
+ * @ecc_txtl_1_bit_errors: TXTLFIFO 1bit ECC count
*/
struct xcan_priv {
struct can_priv can;
@@ -221,6 +254,14 @@ struct xcan_priv {
struct xcan_devtype_data devtype;
struct phy *transceiver;
struct reset_control *rstc;
+ bool ecc_enable;
+ struct u64_stats_sync syncp;
+ u64_stats_t ecc_rx_2_bit_errors;
+ u64_stats_t ecc_rx_1_bit_errors;
+ u64_stats_t ecc_txol_2_bit_errors;
+ u64_stats_t ecc_txol_1_bit_errors;
+ u64_stats_t ecc_txtl_2_bit_errors;
+ u64_stats_t ecc_txtl_1_bit_errors;
};
/* CAN Bittiming constants as per Xilinx CAN specs */
@@ -308,6 +349,24 @@ static const struct can_tdc_const xcan_tdc_const_canfd2 = {
.tdcf_max = 0,
};
+enum xcan_stats_type {
+ XCAN_ECC_RX_2_BIT_ERRORS,
+ XCAN_ECC_RX_1_BIT_ERRORS,
+ XCAN_ECC_TXOL_2_BIT_ERRORS,
+ XCAN_ECC_TXOL_1_BIT_ERRORS,
+ XCAN_ECC_TXTL_2_BIT_ERRORS,
+ XCAN_ECC_TXTL_1_BIT_ERRORS,
+};
+
+static const char xcan_priv_flags_strings[][ETH_GSTRING_LEN] = {
+ [XCAN_ECC_RX_2_BIT_ERRORS] = "ecc_rx_2_bit_errors",
+ [XCAN_ECC_RX_1_BIT_ERRORS] = "ecc_rx_1_bit_errors",
+ [XCAN_ECC_TXOL_2_BIT_ERRORS] = "ecc_txol_2_bit_errors",
+ [XCAN_ECC_TXOL_1_BIT_ERRORS] = "ecc_txol_1_bit_errors",
+ [XCAN_ECC_TXTL_2_BIT_ERRORS] = "ecc_txtl_2_bit_errors",
+ [XCAN_ECC_TXTL_1_BIT_ERRORS] = "ecc_txtl_1_bit_errors",
+};
+
/**
* xcan_write_reg_le - Write a value to the device register little endian
* @priv: Driver private data structure
@@ -523,6 +582,9 @@ static int xcan_chip_start(struct net_device *ndev)
XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv);
+ if (priv->ecc_enable)
+ ier |= XCAN_IXR_ECC_MASK;
+
if (priv->devtype.flags & XCAN_FLAG_RXMNF)
ier |= XCAN_IXR_RXMNF_MASK;
@@ -1127,6 +1189,54 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
priv->can.can_stats.bus_error++;
}
+ if (priv->ecc_enable && isr & XCAN_IXR_ECC_MASK) {
+ u32 reg_rx_ecc, reg_txol_ecc, reg_txtl_ecc;
+
+ reg_rx_ecc = priv->read_reg(priv, XCAN_RXFIFO_ECC_OFFSET);
+ reg_txol_ecc = priv->read_reg(priv, XCAN_TXOLFIFO_ECC_OFFSET);
+ reg_txtl_ecc = priv->read_reg(priv, XCAN_TXTLFIFO_ECC_OFFSET);
+
+ /* The counter reaches its maximum at 0xffff and does not overflow.
+ * Accept the small race window between reading and resetting ECC counters.
+ */
+ priv->write_reg(priv, XCAN_ECC_CFG_OFFSET, XCAN_ECC_CFG_REECRX_MASK |
+ XCAN_ECC_CFG_REECTXOL_MASK | XCAN_ECC_CFG_REECTXTL_MASK);
+
+ u64_stats_update_begin(&priv->syncp);
+
+ if (isr & XCAN_IXR_E2BERX_MASK) {
+ u64_stats_add(&priv->ecc_rx_2_bit_errors,
+ FIELD_GET(XCAN_ECC_2BIT_CNT_MASK, reg_rx_ecc));
+ }
+
+ if (isr & XCAN_IXR_E1BERX_MASK) {
+ u64_stats_add(&priv->ecc_rx_1_bit_errors,
+ FIELD_GET(XCAN_ECC_1BIT_CNT_MASK, reg_rx_ecc));
+ }
+
+ if (isr & XCAN_IXR_E2BETXOL_MASK) {
+ u64_stats_add(&priv->ecc_txol_2_bit_errors,
+ FIELD_GET(XCAN_ECC_2BIT_CNT_MASK, reg_txol_ecc));
+ }
+
+ if (isr & XCAN_IXR_E1BETXOL_MASK) {
+ u64_stats_add(&priv->ecc_txol_1_bit_errors,
+ FIELD_GET(XCAN_ECC_1BIT_CNT_MASK, reg_txol_ecc));
+ }
+
+ if (isr & XCAN_IXR_E2BETXTL_MASK) {
+ u64_stats_add(&priv->ecc_txtl_2_bit_errors,
+ FIELD_GET(XCAN_ECC_2BIT_CNT_MASK, reg_txtl_ecc));
+ }
+
+ if (isr & XCAN_IXR_E1BETXTL_MASK) {
+ u64_stats_add(&priv->ecc_txtl_1_bit_errors,
+ FIELD_GET(XCAN_ECC_1BIT_CNT_MASK, reg_txtl_ecc));
+ }
+
+ u64_stats_update_end(&priv->syncp);
+ }
+
if (cf.can_id) {
struct can_frame *skb_cf;
struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf);
@@ -1354,8 +1464,8 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = (struct net_device *)dev_id;
struct xcan_priv *priv = netdev_priv(ndev);
+ u32 isr_errors, mask;
u32 isr, ier;
- u32 isr_errors;
u32 rx_int_mask = xcan_rx_int_mask(priv);
/* Get the interrupt status from Xilinx CAN */
@@ -1374,10 +1484,15 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
if (isr & XCAN_IXR_TXOK_MASK)
xcan_tx_interrupt(ndev, isr);
+ mask = XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
+ XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
+ XCAN_IXR_RXMNF_MASK;
+
+ if (priv->ecc_enable)
+ mask |= XCAN_IXR_ECC_MASK;
+
/* Check for the type of error interrupt and Processing it */
- isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
- XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
- XCAN_IXR_RXMNF_MASK);
+ isr_errors = isr & mask;
if (isr_errors) {
priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
xcan_err_interrupt(ndev, isr);
@@ -1546,6 +1661,43 @@ static int xcan_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv)
return 0;
}
+static void xcan_get_strings(struct net_device *ndev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, &xcan_priv_flags_strings,
+ sizeof(xcan_priv_flags_strings));
+ }
+}
+
+static int xcan_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(xcan_priv_flags_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void xcan_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&priv->syncp);
+
+ data[XCAN_ECC_RX_2_BIT_ERRORS] = u64_stats_read(&priv->ecc_rx_2_bit_errors);
+ data[XCAN_ECC_RX_1_BIT_ERRORS] = u64_stats_read(&priv->ecc_rx_1_bit_errors);
+ data[XCAN_ECC_TXOL_2_BIT_ERRORS] = u64_stats_read(&priv->ecc_txol_2_bit_errors);
+ data[XCAN_ECC_TXOL_1_BIT_ERRORS] = u64_stats_read(&priv->ecc_txol_1_bit_errors);
+ data[XCAN_ECC_TXTL_2_BIT_ERRORS] = u64_stats_read(&priv->ecc_txtl_2_bit_errors);
+ data[XCAN_ECC_TXTL_1_BIT_ERRORS] = u64_stats_read(&priv->ecc_txtl_1_bit_errors);
+ } while (u64_stats_fetch_retry(&priv->syncp, start));
+}
+
static const struct net_device_ops xcan_netdev_ops = {
.ndo_open = xcan_open,
.ndo_stop = xcan_close,
@@ -1555,6 +1707,9 @@ static const struct net_device_ops xcan_netdev_ops = {
static const struct ethtool_ops xcan_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
+ .get_strings = xcan_get_strings,
+ .get_sset_count = xcan_get_sset_count,
+ .get_ethtool_stats = xcan_get_ethtool_stats,
};
/**
@@ -1793,6 +1948,7 @@ static int xcan_probe(struct platform_device *pdev)
return -ENOMEM;
priv = netdev_priv(ndev);
+ priv->ecc_enable = of_property_read_bool(pdev->dev.of_node, "xlnx,has-ecc");
priv->dev = &pdev->dev;
priv->can.bittiming_const = devtype->bittiming_const;
priv->can.do_set_mode = xcan_do_set_mode;
@@ -1909,6 +2065,11 @@ static int xcan_probe(struct platform_device *pdev)
priv->reg_base, ndev->irq, priv->can.clock.freq,
hw_tx_max, priv->tx_max);
+ if (priv->ecc_enable) {
+ /* Reset FIFO ECC counters */
+ priv->write_reg(priv, XCAN_ECC_CFG_OFFSET, XCAN_ECC_CFG_REECRX_MASK |
+ XCAN_ECC_CFG_REECTXOL_MASK | XCAN_ECC_CFG_REECTXTL_MASK);
+ }
return 0;
err_disableclks:
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index f8c1d73b251d..3092b391031a 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -48,7 +48,7 @@ config NET_DSA_MT7530
config NET_DSA_MT7530_MDIO
tristate "MediaTek MT7530 MDIO interface driver"
depends on NET_DSA_MT7530
- imply MEDIATEK_GE_PHY
+ select MEDIATEK_GE_PHY
select PCS_MTK_LYNXI
help
This enables support for the MediaTek MT7530 and MT7531 switch
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 0d628b35fd5c..b2eeff04f4c8 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -559,6 +559,19 @@ static void b53_port_set_learning(struct b53_device *dev, int port,
b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg);
}
+static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
+{
+ struct b53_device *dev = ds->priv;
+ u16 reg;
+
+ b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, &reg);
+ if (enable)
+ reg |= BIT(port);
+ else
+ reg &= ~BIT(port);
+ b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
+}
+
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
{
struct b53_device *dev = ds->priv;
@@ -1257,7 +1270,7 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
struct b53_device *dev = ds->priv;
- struct ethtool_eee *p = &dev->ports[port].eee;
+ struct ethtool_keee *p = &dev->ports[port].eee;
u8 rgmii_ctrl = 0, reg = 0, off;
bool tx_pause = false;
bool rx_pause = false;
@@ -2193,21 +2206,6 @@ void b53_mirror_del(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_mirror_del);
-void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
-{
- struct b53_device *dev = ds->priv;
- u16 reg;
-
- b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, &reg);
- if (enable)
- reg |= BIT(port);
- else
- reg &= ~BIT(port);
- b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
-}
-EXPORT_SYMBOL(b53_eee_enable_set);
-
-
/* Returns 0 if EEE was not enabled, or 1 otherwise
*/
int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
@@ -2224,27 +2222,21 @@ int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
}
EXPORT_SYMBOL(b53_eee_init);
-int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
+int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
{
struct b53_device *dev = ds->priv;
- struct ethtool_eee *p = &dev->ports[port].eee;
- u16 reg;
if (is5325(dev) || is5365(dev))
return -EOPNOTSUPP;
- b53_read16(dev, B53_EEE_PAGE, B53_EEE_LPI_INDICATE, &reg);
- e->eee_enabled = p->eee_enabled;
- e->eee_active = !!(reg & BIT(port));
-
return 0;
}
EXPORT_SYMBOL(b53_get_mac_eee);
-int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
+int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
{
struct b53_device *dev = ds->priv;
- struct ethtool_eee *p = &dev->ports[port].eee;
+ struct ethtool_keee *p = &dev->ports[port].eee;
if (is5325(dev) || is5365(dev))
return -EOPNOTSUPP;
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index fdcfd5081c28..c13a907947f1 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -95,7 +95,7 @@ struct b53_pcs {
struct b53_port {
u16 vlan_ctl_mask;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
};
struct b53_vlan {
@@ -395,9 +395,8 @@ void b53_mirror_del(struct dsa_switch *ds, int port,
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
void b53_disable_port(struct dsa_switch *ds, int port);
void b53_brcm_hdr_setup(struct dsa_switch *ds, int port);
-void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable);
int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy);
-int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
-int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
+int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
+int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
#endif
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 4a52ccbe393f..bc77ee9e6d0a 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -835,7 +835,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
bool tx_pause, bool rx_pause)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- struct ethtool_eee *p = &priv->dev->ports[port].eee;
+ struct ethtool_keee *p = &priv->dev->ports[port].eee;
u32 reg_rgmii_ctrl = 0;
u32 reg, offset;
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index c3da97abce20..14923535ca7e 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -633,6 +633,57 @@ static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
}
/**
+ * ksz879x_get_loopback - KSZ879x specific function to get loopback
+ * configuration status for a specific port
+ * @dev: Pointer to the device structure
+ * @port: Port number to query
+ * @val: Pointer to store the result
+ *
+ * This function reads the SMI registers to determine whether loopback mode
+ * is enabled for a specific port.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int ksz879x_get_loopback(struct ksz_device *dev, u16 port,
+ u16 *val)
+{
+ u8 stat3;
+ int ret;
+
+ ret = ksz_pread8(dev, port, REG_PORT_STATUS_3, &stat3);
+ if (ret)
+ return ret;
+
+ if (stat3 & PORT_PHY_LOOPBACK)
+ *val |= BMCR_LOOPBACK;
+
+ return 0;
+}
+
+/**
+ * ksz879x_set_loopback - KSZ879x specific function to set loopback mode for
+ * a specific port
+ * @dev: Pointer to the device structure.
+ * @port: Port number to modify.
+ * @val: Value indicating whether to enable or disable loopback mode.
+ *
+ * This function translates loopback bit of the BMCR register into the
+ * corresponding hardware register bit value and writes it to the SMI interface.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int ksz879x_set_loopback(struct ksz_device *dev, u16 port, u16 val)
+{
+ u8 stat3 = 0;
+
+ if (val & BMCR_LOOPBACK)
+ stat3 |= PORT_PHY_LOOPBACK;
+
+ return ksz_prmw8(dev, port, REG_PORT_STATUS_3, PORT_PHY_LOOPBACK,
+ stat3);
+}
+
+/**
* ksz8_r_phy_ctrl - Translates and reads from the SMI interface to a MIIM PHY
* Control register (Reg. 31).
* @dev: The KSZ device instance.
@@ -676,59 +727,122 @@ static int ksz8_r_phy_ctrl(struct ksz_device *dev, int port, u16 *val)
return 0;
}
+/**
+ * ksz8_r_phy_bmcr - Translates and reads from the SMI interface to a MIIM PHY
+ * Basic mode control register (Reg. 0).
+ * @dev: The KSZ device instance.
+ * @port: The port number to be read.
+ * @val: The value read from the SMI interface.
+ *
+ * This function reads the SMI interface and translates the hardware register
+ * bit values into their corresponding control settings for a MIIM PHY Basic
+ * mode control register.
+ *
+ * MIIM Bit Mapping Comparison between KSZ8794 and KSZ8873
+ * -------------------------------------------------------------------
+ * MIIM Bit | KSZ8794 Reg/Bit | KSZ8873 Reg/Bit
+ * ----------------------------+-----------------------------+----------------
+ * Bit 15 - Soft Reset | 0xF/4 | Not supported
+ * Bit 14 - Loopback | 0xD/0 (MAC), 0xF/7 (PHY) ~ 0xD/0 (PHY)
+ * Bit 13 - Force 100 | 0xC/6 = 0xC/6
+ * Bit 12 - AN Enable | 0xC/7 (reverse logic) ~ 0xC/7
+ * Bit 11 - Power Down | 0xD/3 = 0xD/3
+ * Bit 10 - PHY Isolate | 0xF/5 | Not supported
+ * Bit 9 - Restart AN | 0xD/5 = 0xD/5
+ * Bit 8 - Force Full-Duplex | 0xC/5 = 0xC/5
+ * Bit 7 - Collision Test/Res. | Not supported | Not supported
+ * Bit 6 - Reserved | Not supported | Not supported
+ * Bit 5 - Hp_mdix | 0x9/7 ~ 0xF/7
+ * Bit 4 - Force MDI | 0xD/1 = 0xD/1
+ * Bit 3 - Disable MDIX | 0xD/2 = 0xD/2
+ * Bit 2 - Disable Far-End F. | ???? | 0xD/4
+ * Bit 1 - Disable Transmit | 0xD/6 = 0xD/6
+ * Bit 0 - Disable LED | 0xD/7 = 0xD/7
+ * -------------------------------------------------------------------
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int ksz8_r_phy_bmcr(struct ksz_device *dev, u16 port, u16 *val)
+{
+ const u16 *regs = dev->info->regs;
+ u8 restart, speed, ctrl;
+ int ret;
+
+ *val = 0;
+
+ ret = ksz_pread8(dev, port, regs[P_NEG_RESTART_CTRL], &restart);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, port, regs[P_SPEED_STATUS], &speed);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, port, regs[P_FORCE_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
+ if (ctrl & PORT_FORCE_100_MBIT)
+ *val |= BMCR_SPEED100;
+
+ if (ksz_is_ksz88x3(dev)) {
+ if (restart & KSZ8873_PORT_PHY_LOOPBACK)
+ *val |= BMCR_LOOPBACK;
+
+ if ((ctrl & PORT_AUTO_NEG_ENABLE))
+ *val |= BMCR_ANENABLE;
+ } else {
+ ret = ksz879x_get_loopback(dev, port, val);
+ if (ret)
+ return ret;
+
+ if (!(ctrl & PORT_AUTO_NEG_DISABLE))
+ *val |= BMCR_ANENABLE;
+ }
+
+ if (restart & PORT_POWER_DOWN)
+ *val |= BMCR_PDOWN;
+
+ if (restart & PORT_AUTO_NEG_RESTART)
+ *val |= BMCR_ANRESTART;
+
+ if (ctrl & PORT_FORCE_FULL_DUPLEX)
+ *val |= BMCR_FULLDPLX;
+
+ if (speed & PORT_HP_MDIX)
+ *val |= KSZ886X_BMCR_HP_MDIX;
+
+ if (restart & PORT_FORCE_MDIX)
+ *val |= KSZ886X_BMCR_FORCE_MDI;
+
+ if (restart & PORT_AUTO_MDIX_DISABLE)
+ *val |= KSZ886X_BMCR_DISABLE_AUTO_MDIX;
+
+ if (restart & PORT_TX_DISABLE)
+ *val |= KSZ886X_BMCR_DISABLE_TRANSMIT;
+
+ if (restart & PORT_LED_OFF)
+ *val |= KSZ886X_BMCR_DISABLE_LED;
+
+ return 0;
+}
+
int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
{
- u8 restart, speed, ctrl, link;
+ u8 ctrl, link, val1, val2;
int processed = true;
const u16 *regs;
- u8 val1, val2;
u16 data = 0;
- u8 p = phy;
+ u16 p = phy;
int ret;
regs = dev->info->regs;
switch (reg) {
case MII_BMCR:
- ret = ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
- if (ret)
- return ret;
-
- ret = ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
+ ret = ksz8_r_phy_bmcr(dev, p, &data);
if (ret)
return ret;
-
- ret = ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
- if (ret)
- return ret;
-
- if (restart & PORT_PHY_LOOPBACK)
- data |= BMCR_LOOPBACK;
- if (ctrl & PORT_FORCE_100_MBIT)
- data |= BMCR_SPEED100;
- if (ksz_is_ksz88x3(dev)) {
- if ((ctrl & PORT_AUTO_NEG_ENABLE))
- data |= BMCR_ANENABLE;
- } else {
- if (!(ctrl & PORT_AUTO_NEG_DISABLE))
- data |= BMCR_ANENABLE;
- }
- if (restart & PORT_POWER_DOWN)
- data |= BMCR_PDOWN;
- if (restart & PORT_AUTO_NEG_RESTART)
- data |= BMCR_ANRESTART;
- if (ctrl & PORT_FORCE_FULL_DUPLEX)
- data |= BMCR_FULLDPLX;
- if (speed & PORT_HP_MDIX)
- data |= KSZ886X_BMCR_HP_MDIX;
- if (restart & PORT_FORCE_MDIX)
- data |= KSZ886X_BMCR_FORCE_MDI;
- if (restart & PORT_AUTO_MDIX_DISABLE)
- data |= KSZ886X_BMCR_DISABLE_AUTO_MDIX;
- if (restart & PORT_TX_DISABLE)
- data |= KSZ886X_BMCR_DISABLE_TRANSMIT;
- if (restart & PORT_LED_OFF)
- data |= KSZ886X_BMCR_DISABLE_LED;
break;
case MII_BMSR:
ret = ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
@@ -860,113 +974,137 @@ static int ksz8_w_phy_ctrl(struct ksz_device *dev, int port, u16 val)
return ret;
}
-int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
+/**
+ * ksz8_w_phy_bmcr - Translates and writes to the SMI interface from a MIIM PHY
+ * Basic mode control register (Reg. 0).
+ * @dev: The KSZ device instance.
+ * @port: The port number to be configured.
+ * @val: The register value to be written.
+ *
+ * This function translates control settings from a MIIM PHY Basic mode control
+ * register into their corresponding hardware register bit values for the SMI
+ * interface.
+ *
+ * MIIM Bit Mapping Comparison between KSZ8794 and KSZ8873
+ * -------------------------------------------------------------------
+ * MIIM Bit | KSZ8794 Reg/Bit | KSZ8873 Reg/Bit
+ * ----------------------------+-----------------------------+----------------
+ * Bit 15 - Soft Reset | 0xF/4 | Not supported
+ * Bit 14 - Loopback | 0xD/0 (MAC), 0xF/7 (PHY) ~ 0xD/0 (PHY)
+ * Bit 13 - Force 100 | 0xC/6 = 0xC/6
+ * Bit 12 - AN Enable | 0xC/7 (reverse logic) ~ 0xC/7
+ * Bit 11 - Power Down | 0xD/3 = 0xD/3
+ * Bit 10 - PHY Isolate | 0xF/5 | Not supported
+ * Bit 9 - Restart AN | 0xD/5 = 0xD/5
+ * Bit 8 - Force Full-Duplex | 0xC/5 = 0xC/5
+ * Bit 7 - Collision Test/Res. | Not supported | Not supported
+ * Bit 6 - Reserved | Not supported | Not supported
+ * Bit 5 - Hp_mdix | 0x9/7 ~ 0xF/7
+ * Bit 4 - Force MDI | 0xD/1 = 0xD/1
+ * Bit 3 - Disable MDIX | 0xD/2 = 0xD/2
+ * Bit 2 - Disable Far-End F. | ???? | 0xD/4
+ * Bit 1 - Disable Transmit | 0xD/6 = 0xD/6
+ * Bit 0 - Disable LED | 0xD/7 = 0xD/7
+ * -------------------------------------------------------------------
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int ksz8_w_phy_bmcr(struct ksz_device *dev, u16 port, u16 val)
{
- u8 restart, speed, ctrl, data;
- const u16 *regs;
- u8 p = phy;
+ u8 restart, speed, ctrl, restart_mask;
+ const u16 *regs = dev->info->regs;
int ret;
- regs = dev->info->regs;
+ /* Do not support PHY reset function. */
+ if (val & BMCR_RESET)
+ return 0;
- switch (reg) {
- case MII_BMCR:
+ speed = 0;
+ if (val & KSZ886X_BMCR_HP_MDIX)
+ speed |= PORT_HP_MDIX;
- /* Do not support PHY reset function. */
- if (val & BMCR_RESET)
- break;
- ret = ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
- if (ret)
- return ret;
+ ret = ksz_prmw8(dev, port, regs[P_SPEED_STATUS], PORT_HP_MDIX, speed);
+ if (ret)
+ return ret;
- data = speed;
- if (val & KSZ886X_BMCR_HP_MDIX)
- data |= PORT_HP_MDIX;
- else
- data &= ~PORT_HP_MDIX;
+ ctrl = 0;
+ if (ksz_is_ksz88x3(dev)) {
+ if ((val & BMCR_ANENABLE))
+ ctrl |= PORT_AUTO_NEG_ENABLE;
+ } else {
+ if (!(val & BMCR_ANENABLE))
+ ctrl |= PORT_AUTO_NEG_DISABLE;
- if (data != speed) {
- ret = ksz_pwrite8(dev, p, regs[P_SPEED_STATUS], data);
- if (ret)
- return ret;
- }
+ /* Fiber port does not support auto-negotiation. */
+ if (dev->ports[port].fiber)
+ ctrl |= PORT_AUTO_NEG_DISABLE;
+ }
- ret = ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
- if (ret)
- return ret;
+ if (val & BMCR_SPEED100)
+ ctrl |= PORT_FORCE_100_MBIT;
- data = ctrl;
- if (ksz_is_ksz88x3(dev)) {
- if ((val & BMCR_ANENABLE))
- data |= PORT_AUTO_NEG_ENABLE;
- else
- data &= ~PORT_AUTO_NEG_ENABLE;
- } else {
- if (!(val & BMCR_ANENABLE))
- data |= PORT_AUTO_NEG_DISABLE;
- else
- data &= ~PORT_AUTO_NEG_DISABLE;
-
- /* Fiber port does not support auto-negotiation. */
- if (dev->ports[p].fiber)
- data |= PORT_AUTO_NEG_DISABLE;
- }
+ if (val & BMCR_FULLDPLX)
+ ctrl |= PORT_FORCE_FULL_DUPLEX;
- if (val & BMCR_SPEED100)
- data |= PORT_FORCE_100_MBIT;
- else
- data &= ~PORT_FORCE_100_MBIT;
- if (val & BMCR_FULLDPLX)
- data |= PORT_FORCE_FULL_DUPLEX;
- else
- data &= ~PORT_FORCE_FULL_DUPLEX;
+ ret = ksz_prmw8(dev, port, regs[P_FORCE_CTRL], PORT_FORCE_100_MBIT |
+ /* PORT_AUTO_NEG_ENABLE and PORT_AUTO_NEG_DISABLE are the same
+ * bits
+ */
+ PORT_FORCE_FULL_DUPLEX | PORT_AUTO_NEG_ENABLE, ctrl);
+ if (ret)
+ return ret;
- if (data != ctrl) {
- ret = ksz_pwrite8(dev, p, regs[P_FORCE_CTRL], data);
- if (ret)
- return ret;
- }
+ restart = 0;
+ restart_mask = PORT_LED_OFF | PORT_TX_DISABLE | PORT_AUTO_NEG_RESTART |
+ PORT_POWER_DOWN | PORT_AUTO_MDIX_DISABLE | PORT_FORCE_MDIX;
+
+ if (val & KSZ886X_BMCR_DISABLE_LED)
+ restart |= PORT_LED_OFF;
+
+ if (val & KSZ886X_BMCR_DISABLE_TRANSMIT)
+ restart |= PORT_TX_DISABLE;
+
+ if (val & BMCR_ANRESTART)
+ restart |= PORT_AUTO_NEG_RESTART;
+
+ if (val & BMCR_PDOWN)
+ restart |= PORT_POWER_DOWN;
+
+ if (val & KSZ886X_BMCR_DISABLE_AUTO_MDIX)
+ restart |= PORT_AUTO_MDIX_DISABLE;
+
+ if (val & KSZ886X_BMCR_FORCE_MDI)
+ restart |= PORT_FORCE_MDIX;
- ret = ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
+ if (ksz_is_ksz88x3(dev)) {
+ restart_mask |= KSZ8873_PORT_PHY_LOOPBACK;
+
+ if (val & BMCR_LOOPBACK)
+ restart |= KSZ8873_PORT_PHY_LOOPBACK;
+ } else {
+ ret = ksz879x_set_loopback(dev, port, val);
if (ret)
return ret;
+ }
- data = restart;
- if (val & KSZ886X_BMCR_DISABLE_LED)
- data |= PORT_LED_OFF;
- else
- data &= ~PORT_LED_OFF;
- if (val & KSZ886X_BMCR_DISABLE_TRANSMIT)
- data |= PORT_TX_DISABLE;
- else
- data &= ~PORT_TX_DISABLE;
- if (val & BMCR_ANRESTART)
- data |= PORT_AUTO_NEG_RESTART;
- else
- data &= ~(PORT_AUTO_NEG_RESTART);
- if (val & BMCR_PDOWN)
- data |= PORT_POWER_DOWN;
- else
- data &= ~PORT_POWER_DOWN;
- if (val & KSZ886X_BMCR_DISABLE_AUTO_MDIX)
- data |= PORT_AUTO_MDIX_DISABLE;
- else
- data &= ~PORT_AUTO_MDIX_DISABLE;
- if (val & KSZ886X_BMCR_FORCE_MDI)
- data |= PORT_FORCE_MDIX;
- else
- data &= ~PORT_FORCE_MDIX;
- if (val & BMCR_LOOPBACK)
- data |= PORT_PHY_LOOPBACK;
- else
- data &= ~PORT_PHY_LOOPBACK;
+ return ksz_prmw8(dev, port, regs[P_NEG_RESTART_CTRL], restart_mask,
+ restart);
+}
- if (data != restart) {
- ret = ksz_pwrite8(dev, p, regs[P_NEG_RESTART_CTRL],
- data);
- if (ret)
- return ret;
- }
+int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
+{
+ const u16 *regs;
+ u8 ctrl, data;
+ u16 p = phy;
+ int ret;
+
+ regs = dev->info->regs;
+
+ switch (reg) {
+ case MII_BMCR:
+ ret = ksz8_w_phy_bmcr(dev, p, val);
+ if (ret)
+ return ret;
break;
case MII_ADVERTISE:
ret = ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
index beca974e0171..7c9341ef73b0 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -265,6 +265,7 @@
#define PORT_AUTO_MDIX_DISABLE BIT(2)
#define PORT_FORCE_MDIX BIT(1)
#define PORT_MAC_LOOPBACK BIT(0)
+#define KSZ8873_PORT_PHY_LOOPBACK BIT(0)
#define REG_PORT_1_STATUS_2 0x1E
#define REG_PORT_2_STATUS_2 0x2E
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index cac4a607e54a..82bebee4615c 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -104,6 +104,10 @@ static const struct of_device_id ksz9477_dt_ids[] = {
.data = &ksz_switch_chips[KSZ8563]
},
{
+ .compatible = "microchip,ksz8567",
+ .data = &ksz_switch_chips[KSZ8567]
+ },
+ {
.compatible = "microchip,ksz9567",
.data = &ksz_switch_chips[KSZ9567]
},
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 245dfb7a7a31..2b510f150dd8 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -1476,6 +1476,39 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.gbit_capable = {true, true, true},
},
+ [KSZ8567] = {
+ .chip_id = KSZ8567_CHIP_ID,
+ .dev_name = "KSZ8567",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total port count */
+ .port_nirqs = 3,
+ .num_tx_queues = 4,
+ .tc_cbs_supported = true,
+ .tc_ets_supported = true,
+ .ops = &ksz9477_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true, true},
+ .supports_rmii = {false, false, false, false,
+ false, true, true},
+ .supports_rgmii = {false, false, false, false,
+ false, true, true},
+ .internal_phy = {true, true, true, true,
+ true, false, false},
+ .gbit_capable = {false, false, false, false, false,
+ true, true},
+ },
+
[KSZ9567] = {
.chip_id = KSZ9567_CHIP_ID,
.dev_name = "KSZ9567",
@@ -1864,6 +1897,29 @@ static void ksz_get_strings(struct dsa_switch *ds, int port,
}
}
+/**
+ * ksz_update_port_member - Adjust port forwarding rules based on STP state and
+ * isolation settings.
+ * @dev: A pointer to the struct ksz_device representing the device.
+ * @port: The port number to adjust.
+ *
+ * This function dynamically adjusts the port membership configuration for a
+ * specified port and other device ports, based on Spanning Tree Protocol (STP)
+ * states and port isolation settings. Each port, including the CPU port, has a
+ * membership register, represented as a bitfield, where each bit corresponds
+ * to a port number. A set bit indicates permission to forward frames to that
+ * port. This function iterates over all ports, updating the membership register
+ * to reflect current forwarding permissions:
+ *
+ * 1. Forwards frames only to ports that are part of the same bridge group and
+ * in the BR_STATE_FORWARDING state.
+ * 2. Takes into account the isolation status of ports; ports in the
+ * BR_STATE_FORWARDING state with BR_ISOLATED configuration will not forward
+ * frames to each other, even if they are in the same bridge group.
+ * 3. Ensures that the CPU port is included in the membership based on its
+ * upstream port configuration, allowing for management and control traffic
+ * to flow as required.
+ */
static void ksz_update_port_member(struct ksz_device *dev, int port)
{
struct ksz_port *p = &dev->ports[port];
@@ -1892,7 +1948,14 @@ static void ksz_update_port_member(struct ksz_device *dev, int port)
if (other_p->stp_state != BR_STATE_FORWARDING)
continue;
- if (p->stp_state == BR_STATE_FORWARDING) {
+ /* At this point we know that "port" and "other" port [i] are in
+ * the same bridge group and that "other" port [i] is in
+ * forwarding stp state. If "port" is also in forwarding stp
+ * state, we can allow forwarding from port [port] to port [i].
+ * Except if both ports are isolated.
+ */
+ if (p->stp_state == BR_STATE_FORWARDING &&
+ !(p->isolated && other_p->isolated)) {
val |= BIT(port);
port_member |= BIT(i);
}
@@ -1911,8 +1974,19 @@ static void ksz_update_port_member(struct ksz_device *dev, int port)
third_p = &dev->ports[j];
if (third_p->stp_state != BR_STATE_FORWARDING)
continue;
+
third_dp = dsa_to_port(ds, j);
- if (dsa_port_bridge_same(other_dp, third_dp))
+
+ /* Now we updating relation of the "other" port [i] to
+ * the "third" port [j]. We already know that "other"
+ * port [i] is in forwarding stp state and that "third"
+ * port [j] is in forwarding stp state too.
+ * We need to check if "other" port [i] and "third" port
+ * [j] are in the same bridge group and not isolated
+ * before allowing forwarding from port [i] to port [j].
+ */
+ if (dsa_port_bridge_same(other_dp, third_dp) &&
+ !(other_p->isolated && third_p->isolated))
val |= BIT(j);
}
@@ -2185,6 +2259,8 @@ static int ksz_pirq_setup(struct ksz_device *dev, u8 p)
return ksz_irq_common_setup(dev, pirq);
}
+static int ksz_parse_drive_strength(struct ksz_device *dev);
+
static int ksz_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
@@ -2206,6 +2282,10 @@ static int ksz_setup(struct dsa_switch *ds)
return ret;
}
+ ret = ksz_parse_drive_strength(dev);
+ if (ret)
+ return ret;
+
/* set broadcast storm protection 10% rate */
regmap_update_bits(ksz_regmap_16(dev), regs[S_BROADCAST_CTRL],
BROADCAST_STORM_RATE,
@@ -2649,6 +2729,7 @@ static void ksz_port_teardown(struct dsa_switch *ds, int port)
switch (dev->chip_id) {
case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
@@ -2664,7 +2745,7 @@ static int ksz_port_pre_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
- if (flags.mask & ~BR_LEARNING)
+ if (flags.mask & ~(BR_LEARNING | BR_ISOLATED))
return -EINVAL;
return 0;
@@ -2677,8 +2758,12 @@ static int ksz_port_bridge_flags(struct dsa_switch *ds, int port,
struct ksz_device *dev = ds->priv;
struct ksz_port *p = &dev->ports[port];
- if (flags.mask & BR_LEARNING) {
- p->learning = !!(flags.val & BR_LEARNING);
+ if (flags.mask & (BR_LEARNING | BR_ISOLATED)) {
+ if (flags.mask & BR_LEARNING)
+ p->learning = !!(flags.val & BR_LEARNING);
+
+ if (flags.mask & BR_ISOLATED)
+ p->isolated = !!(flags.val & BR_ISOLATED);
/* Make the change take effect immediately */
ksz_port_stp_state_set(ds, port, p->stp_state);
@@ -2705,7 +2790,8 @@ static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
dev->chip_id == KSZ9563_CHIP_ID)
proto = DSA_TAG_PROTO_KSZ9893;
- if (dev->chip_id == KSZ9477_CHIP_ID ||
+ if (dev->chip_id == KSZ8567_CHIP_ID ||
+ dev->chip_id == KSZ9477_CHIP_ID ||
dev->chip_id == KSZ9896_CHIP_ID ||
dev->chip_id == KSZ9897_CHIP_ID ||
dev->chip_id == KSZ9567_CHIP_ID)
@@ -2813,6 +2899,7 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port)
case KSZ8830_CHIP_ID:
return KSZ8863_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
@@ -2839,6 +2926,7 @@ static int ksz_validate_eee(struct dsa_switch *ds, int port)
switch (dev->chip_id) {
case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
@@ -2852,7 +2940,7 @@ static int ksz_validate_eee(struct dsa_switch *ds, int port)
}
static int ksz_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
int ret;
@@ -2872,7 +2960,7 @@ static int ksz_get_mac_eee(struct dsa_switch *ds, int port,
}
static int ksz_set_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
struct ksz_device *dev = ds->priv;
int ret;
@@ -3183,6 +3271,7 @@ static int ksz_switch_detect(struct ksz_device *dev)
case KSZ9896_CHIP_ID:
case KSZ9897_CHIP_ID:
case KSZ9567_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case LAN9370_CHIP_ID:
case LAN9371_CHIP_ID:
case LAN9372_CHIP_ID:
@@ -3220,6 +3309,7 @@ static int ksz_cls_flower_add(struct dsa_switch *ds, int port,
switch (dev->chip_id) {
case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
@@ -3239,6 +3329,7 @@ static int ksz_cls_flower_del(struct dsa_switch *ds, int port,
switch (dev->chip_id) {
case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
@@ -4142,6 +4233,7 @@ static int ksz_parse_drive_strength(struct ksz_device *dev)
case KSZ8794_CHIP_ID:
case KSZ8765_CHIP_ID:
case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
@@ -4242,10 +4334,6 @@ int ksz_switch_register(struct ksz_device *dev)
for (port_num = 0; port_num < dev->info->port_cnt; ++port_num)
dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA;
if (dev->dev->of_node) {
- ret = ksz_parse_drive_strength(dev);
- if (ret)
- return ret;
-
ret = of_get_phy_mode(dev->dev->of_node, &interface);
if (ret == 0)
dev->compat_interface = interface;
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 15612101a155..40c11b0d6b62 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -110,6 +110,7 @@ struct ksz_switch_macaddr {
struct ksz_port {
bool remove_tag; /* Remove Tag flag set, for ksz8795 only */
bool learning;
+ bool isolated;
int stp_state;
struct phy_device phydev;
@@ -187,6 +188,7 @@ struct ksz_device {
/* List of supported models */
enum ksz_model {
KSZ8563,
+ KSZ8567,
KSZ8795,
KSZ8794,
KSZ8765,
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
index 6f6d878e742c..c8166fb440ab 100644
--- a/drivers/net/dsa/microchip/ksz_spi.c
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -165,6 +165,10 @@ static const struct of_device_id ksz_dt_ids[] = {
.data = &ksz_switch_chips[KSZ8563]
},
{
+ .compatible = "microchip,ksz8567",
+ .data = &ksz_switch_chips[KSZ8567]
+ },
+ {
.compatible = "microchip,ksz9567",
.data = &ksz_switch_chips[KSZ9567]
},
@@ -204,6 +208,7 @@ static const struct spi_device_id ksz_spi_ids[] = {
{ "ksz9893" },
{ "ksz9563" },
{ "ksz8563" },
+ { "ksz8567" },
{ "ksz9567" },
{ "lan9370" },
{ "lan9371" },
diff --git a/drivers/net/dsa/mt7530-mdio.c b/drivers/net/dsa/mt7530-mdio.c
index 088533663b83..fa3ee85a99c1 100644
--- a/drivers/net/dsa/mt7530-mdio.c
+++ b/drivers/net/dsa/mt7530-mdio.c
@@ -81,17 +81,14 @@ static const struct regmap_bus mt7530_regmap_bus = {
};
static int
-mt7531_create_sgmii(struct mt7530_priv *priv, bool dual_sgmii)
+mt7531_create_sgmii(struct mt7530_priv *priv)
{
struct regmap_config *mt7531_pcs_config[2] = {};
struct phylink_pcs *pcs;
struct regmap *regmap;
int i, ret = 0;
- /* MT7531AE has two SGMII units for port 5 and port 6
- * MT7531BE has only one SGMII unit for port 6
- */
- for (i = dual_sgmii ? 0 : 1; i < 2; i++) {
+ for (i = priv->p5_sgmii ? 0 : 1; i < 2; i++) {
mt7531_pcs_config[i] = devm_kzalloc(priv->dev,
sizeof(struct regmap_config),
GFP_KERNEL);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 3c1f657593a8..678b51f9cea6 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -414,92 +414,57 @@ mt753x_preferred_default_local_cpu_port(struct dsa_switch *ds)
}
/* Setup port 6 interface mode and TRGMII TX circuit */
-static int
-mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+static void
+mt7530_setup_port6(struct dsa_switch *ds, phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
- u32 ncpo1, ssc_delta, trgint, xtal;
-
- xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK;
+ u32 ncpo1, ssc_delta, xtal;
- if (xtal == HWTRAP_XTAL_20MHZ) {
- dev_err(priv->dev,
- "%s: MT7530 with a 20MHz XTAL is not supported!\n",
- __func__);
- return -EINVAL;
- }
+ /* Disable the MT7530 TRGMII clocks */
+ core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
- switch (interface) {
- case PHY_INTERFACE_MODE_RGMII:
- trgint = 0;
- break;
- case PHY_INTERFACE_MODE_TRGMII:
- trgint = 1;
- if (xtal == HWTRAP_XTAL_25MHZ)
- ssc_delta = 0x57;
- else
- ssc_delta = 0x87;
- if (priv->id == ID_MT7621) {
- /* PLL frequency: 125MHz: 1.0GBit */
- if (xtal == HWTRAP_XTAL_40MHZ)
- ncpo1 = 0x0640;
- if (xtal == HWTRAP_XTAL_25MHZ)
- ncpo1 = 0x0a00;
- } else { /* PLL frequency: 250MHz: 2.0Gbit */
- if (xtal == HWTRAP_XTAL_40MHZ)
- ncpo1 = 0x0c80;
- if (xtal == HWTRAP_XTAL_25MHZ)
- ncpo1 = 0x1400;
- }
- break;
- default:
- dev_err(priv->dev, "xMII interface %d not supported\n",
- interface);
- return -EINVAL;
+ if (interface == PHY_INTERFACE_MODE_RGMII) {
+ mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
+ P6_INTF_MODE(0));
+ return;
}
- mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
- P6_INTF_MODE(trgint));
+ mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK, P6_INTF_MODE(1));
- if (trgint) {
- /* Disable the MT7530 TRGMII clocks */
- core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
+ xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK;
- /* Setup the MT7530 TRGMII Tx Clock */
- core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
- core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
- core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
- core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
- core_write(priv, CORE_PLL_GROUP4,
- RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN |
- RG_SYSPLL_BIAS_LPF_EN);
- core_write(priv, CORE_PLL_GROUP2,
- RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
- RG_SYSPLL_POSDIV(1));
- core_write(priv, CORE_PLL_GROUP7,
- RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
- RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
+ if (xtal == HWTRAP_XTAL_25MHZ)
+ ssc_delta = 0x57;
+ else
+ ssc_delta = 0x87;
- /* Enable the MT7530 TRGMII clocks */
- core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
+ if (priv->id == ID_MT7621) {
+ /* PLL frequency: 125MHz: 1.0GBit */
+ if (xtal == HWTRAP_XTAL_40MHZ)
+ ncpo1 = 0x0640;
+ if (xtal == HWTRAP_XTAL_25MHZ)
+ ncpo1 = 0x0a00;
+ } else { /* PLL frequency: 250MHz: 2.0Gbit */
+ if (xtal == HWTRAP_XTAL_40MHZ)
+ ncpo1 = 0x0c80;
+ if (xtal == HWTRAP_XTAL_25MHZ)
+ ncpo1 = 0x1400;
}
- return 0;
-}
+ /* Setup the MT7530 TRGMII Tx Clock */
+ core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
+ core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
+ core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
+ core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
+ core_write(priv, CORE_PLL_GROUP4, RG_SYSPLL_DDSFBK_EN |
+ RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN);
+ core_write(priv, CORE_PLL_GROUP2, RG_SYSPLL_EN_NORMAL |
+ RG_SYSPLL_VODEN | RG_SYSPLL_POSDIV(1));
+ core_write(priv, CORE_PLL_GROUP7, RG_LCDDS_PCW_NCPO_CHG |
+ RG_LCCDS_C(3) | RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
-static bool mt7531_dual_sgmii_supported(struct mt7530_priv *priv)
-{
- u32 val;
-
- val = mt7530_read(priv, MT7531_TOP_SIG_SR);
-
- return (val & PAD_DUAL_SGMII_EN) != 0;
-}
-
-static int
-mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
-{
- return 0;
+ /* Enable the MT7530 TRGMII clocks */
+ core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
}
static void
@@ -510,9 +475,6 @@ mt7531_pll_setup(struct mt7530_priv *priv)
u32 xtal;
u32 val;
- if (mt7531_dual_sgmii_supported(priv))
- return;
-
val = mt7530_read(priv, MT7531_CREV);
top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR);
hwstrap = mt7530_read(priv, MT7531_HWTRAP);
@@ -920,8 +882,6 @@ static const char *p5_intf_modes(unsigned int p5_interface)
return "PHY P4";
case P5_INTF_SEL_GMAC5:
return "GMAC5";
- case P5_INTF_SEL_GMAC5_SGMII:
- return "GMAC5_SGMII";
default:
return "unknown";
}
@@ -956,13 +916,8 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
/* MT7530_P5_MODE_GMAC: P5 -> External phy or 2nd GMAC */
val &= ~MHWTRAP_P5_DIS;
break;
- case P5_DISABLED:
- interface = PHY_INTERFACE_MODE_NA;
- break;
default:
- dev_err(ds->dev, "Unsupported p5_intf_sel %d\n",
- priv->p5_intf_sel);
- goto unlock_exit;
+ break;
}
/* Setup RGMII settings */
@@ -992,9 +947,6 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, intf_sel=%s, phy-mode=%s\n",
val, p5_intf_modes(priv->p5_intf_sel), phy_modes(interface));
- priv->p5_interface = interface;
-
-unlock_exit:
mutex_unlock(&priv->reg_mutex);
}
@@ -1014,18 +966,10 @@ mt753x_trap_frames(struct mt7530_priv *priv)
MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY));
}
-static int
+static void
mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
- int ret;
-
- /* Setup max capability of CPU port at first */
- if (priv->info->cpu_port_config) {
- ret = priv->info->cpu_port_config(ds, port);
- if (ret)
- return ret;
- }
/* Enable Mediatek header mode on the cpu port */
mt7530_write(priv, MT7530_PVC_P(port),
@@ -1035,10 +979,6 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
mt7530_set(priv, MT7530_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) |
UNU_FFP(BIT(port)));
- /* Set CPU port number */
- if (priv->id == ID_MT7530 || priv->id == ID_MT7621)
- mt7530_rmw(priv, MT7530_MFC, CPU_MASK, CPU_EN | CPU_PORT(port));
-
/* Add the CPU port to the CPU port bitmap for MT7531 and the switch on
* the MT7988 SoC. Trapped frames will be forwarded to the CPU port that
* is affine to the inbound user port.
@@ -1055,8 +995,6 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
/* Set to fallback mode for independent VLAN learning */
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
MT7530_PORT_FALLBACK_MODE);
-
- return 0;
}
static int
@@ -1080,7 +1018,6 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
priv->ports[port].enable = true;
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
priv->ports[port].pm);
- mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
mutex_unlock(&priv->reg_mutex);
@@ -1100,7 +1037,6 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
priv->ports[port].enable = false;
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
PCR_MATRIX_CLR);
- mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
mutex_unlock(&priv->reg_mutex);
}
@@ -2107,7 +2043,7 @@ mt7530_setup_irq(struct mt7530_priv *priv)
}
/* This register must be set for MT7530 to properly fire interrupts */
- if (priv->id != ID_MT7531)
+ if (priv->id == ID_MT7530 || priv->id == ID_MT7621)
mt7530_set(priv, MT7530_TOP_SIG_CTRL, TOP_SIG_CTRL_NORMAL);
ret = request_threaded_irq(priv->irq, NULL, mt7530_irq_thread_fn,
@@ -2146,24 +2082,40 @@ mt7530_free_irq_common(struct mt7530_priv *priv)
static void
mt7530_free_irq(struct mt7530_priv *priv)
{
- mt7530_free_mdio_irq(priv);
+ struct device_node *mnp, *np = priv->dev->of_node;
+
+ mnp = of_get_child_by_name(np, "mdio");
+ if (!mnp)
+ mt7530_free_mdio_irq(priv);
+ of_node_put(mnp);
+
mt7530_free_irq_common(priv);
}
static int
mt7530_setup_mdio(struct mt7530_priv *priv)
{
+ struct device_node *mnp, *np = priv->dev->of_node;
struct dsa_switch *ds = priv->ds;
struct device *dev = priv->dev;
struct mii_bus *bus;
static int idx;
- int ret;
+ int ret = 0;
+
+ mnp = of_get_child_by_name(np, "mdio");
+
+ if (mnp && !of_device_is_available(mnp))
+ goto out;
bus = devm_mdiobus_alloc(dev);
- if (!bus)
- return -ENOMEM;
+ if (!bus) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (!mnp)
+ ds->user_mii_bus = bus;
- ds->user_mii_bus = bus;
bus->priv = priv;
bus->name = KBUILD_MODNAME "-mii";
snprintf(bus->id, MII_BUS_ID_SIZE, KBUILD_MODNAME "-%d", idx++);
@@ -2174,16 +2126,18 @@ mt7530_setup_mdio(struct mt7530_priv *priv)
bus->parent = dev;
bus->phy_mask = ~ds->phys_mii_mask;
- if (priv->irq)
+ if (priv->irq && !mnp)
mt7530_setup_mdio_irq(priv);
- ret = devm_mdiobus_register(dev, bus);
+ ret = devm_of_mdiobus_register(dev, bus, mnp);
if (ret) {
dev_err(dev, "failed to register MDIO bus: %d\n", ret);
- if (priv->irq)
+ if (priv->irq && !mnp)
mt7530_free_mdio_irq(priv);
}
+out:
+ of_node_put(mnp);
return ret;
}
@@ -2238,6 +2192,12 @@ mt7530_setup(struct dsa_switch *ds)
}
}
+ /* Disable LEDs before reset to prevent the MT7530 sampling a
+ * potentially incorrect HT_XTAL_FSEL value.
+ */
+ mt7530_write(priv, MT7530_LED_EN, 0);
+ usleep_range(1000, 1100);
+
/* Reset whole chip through gpio pin or memory-mapped registers for
* different type of hardware
*/
@@ -2267,6 +2227,12 @@ mt7530_setup(struct dsa_switch *ds)
return -ENODEV;
}
+ if ((val & HWTRAP_XTAL_MASK) == HWTRAP_XTAL_20MHZ) {
+ dev_err(priv->dev,
+ "MT7530 with a 20MHz XTAL is not supported!\n");
+ return -EINVAL;
+ }
+
/* Reset the switch through internal reset */
mt7530_write(priv, MT7530_SYS_CTRL,
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
@@ -2289,14 +2255,18 @@ mt7530_setup(struct dsa_switch *ds)
val |= MHWTRAP_MANUAL;
mt7530_write(priv, MT7530_MHWTRAP, val);
- priv->p6_interface = PHY_INTERFACE_MODE_NA;
-
mt753x_trap_frames(priv);
/* Enable and reset MIB counters */
mt7530_mib_reset(ds);
for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ /* Clear link settings and enable force mode to force link down
+ * on all ports until they're enabled later.
+ */
+ mt7530_rmw(priv, MT7530_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
+ PMCR_FORCE_MODE, PMCR_FORCE_MODE);
+
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
PCR_MATRIX_CLR);
@@ -2305,9 +2275,7 @@ mt7530_setup(struct dsa_switch *ds)
mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
if (dsa_is_cpu_port(ds, i)) {
- ret = mt753x_cpu_port_enable(ds, i);
- if (ret)
- return ret;
+ mt753x_cpu_port_enable(ds, i);
} else {
mt7530_port_disable(ds, i);
@@ -2326,16 +2294,13 @@ mt7530_setup(struct dsa_switch *ds)
return ret;
/* Setup port 5 */
- priv->p5_intf_sel = P5_DISABLED;
- interface = PHY_INTERFACE_MODE_NA;
-
if (!dsa_is_unused_port(ds, 5)) {
priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
- ret = of_get_phy_mode(dsa_to_port(ds, 5)->dn, &interface);
- if (ret && ret != -ENODEV)
- return ret;
} else {
- /* Scan the ethernet nodes. look for GMAC1, lookup used phy */
+ /* Scan the ethernet nodes. Look for GMAC1, lookup the used PHY.
+ * Set priv->p5_intf_sel to the appropriate value if PHY muxing
+ * is detected.
+ */
for_each_child_of_node(dn, mac_np) {
if (!of_device_is_compatible(mac_np,
"mediatek,eth-mac"))
@@ -2366,6 +2331,10 @@ mt7530_setup(struct dsa_switch *ds)
of_node_put(phy_node);
break;
}
+
+ if (priv->p5_intf_sel == P5_INTF_SEL_PHY_P0 ||
+ priv->p5_intf_sel == P5_INTF_SEL_PHY_P4)
+ mt7530_setup_port5(ds, interface);
}
#ifdef CONFIG_GPIOLIB
@@ -2376,8 +2345,6 @@ mt7530_setup(struct dsa_switch *ds)
}
#endif /* CONFIG_GPIOLIB */
- mt7530_setup_port5(ds, interface);
-
/* Flush the FDB table */
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
if (ret < 0)
@@ -2402,6 +2369,12 @@ mt7531_setup_common(struct dsa_switch *ds)
UNU_FFP_MASK);
for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ /* Clear link settings and enable force mode to force link down
+ * on all ports until they're enabled later.
+ */
+ mt7530_rmw(priv, MT7530_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
+ MT7531_FORCE_MODE, MT7531_FORCE_MODE);
+
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
PCR_MATRIX_CLR);
@@ -2412,9 +2385,7 @@ mt7531_setup_common(struct dsa_switch *ds)
mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR);
if (dsa_is_cpu_port(ds, i)) {
- ret = mt753x_cpu_port_enable(ds, i);
- if (ret)
- return ret;
+ mt753x_cpu_port_enable(ds, i);
} else {
mt7530_port_disable(ds, i);
@@ -2474,38 +2445,35 @@ mt7531_setup(struct dsa_switch *ds)
return -ENODEV;
}
- /* all MACs must be forced link-down before sw reset */
+ /* MT7531AE has got two SGMII units. One for port 5, one for port 6.
+ * MT7531BE has got only one SGMII unit which is for port 6.
+ */
+ val = mt7530_read(priv, MT7531_TOP_SIG_SR);
+ priv->p5_sgmii = !!(val & PAD_DUAL_SGMII_EN);
+
+ /* Force link down on all ports before internal reset */
for (i = 0; i < MT7530_NUM_PORTS; i++)
mt7530_write(priv, MT7530_PMCR_P(i), MT7531_FORCE_LNK);
/* Reset the switch through internal reset */
- mt7530_write(priv, MT7530_SYS_CTRL,
- SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
- SYS_CTRL_REG_RST);
-
- mt7531_pll_setup(priv);
-
- if (mt7531_dual_sgmii_supported(priv)) {
- priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII;
+ mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_SW_RST | SYS_CTRL_REG_RST);
+ if (!priv->p5_sgmii) {
+ mt7531_pll_setup(priv);
+ } else {
/* Let ds->user_mii_bus be able to access external phy. */
mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO11_RG_RXD2_MASK,
MT7531_EXT_P_MDC_11);
mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO12_RG_RXD3_MASK,
MT7531_EXT_P_MDIO_12);
- } else {
- priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
}
- dev_dbg(ds->dev, "P5 support %s interface\n",
- p5_intf_modes(priv->p5_intf_sel));
+
+ if (!dsa_is_unused_port(ds, 5))
+ priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
mt7530_rmw(priv, MT7531_GPIO_MODE0, MT7531_GPIO0_MASK,
MT7531_GPIO0_INTERRUPT);
- /* Let phylink decide the interface later. */
- priv->p5_interface = PHY_INTERFACE_MODE_NA;
- priv->p6_interface = PHY_INTERFACE_MODE_NA;
-
/* Enable PHY core PLL, since phy_device has not yet been created
* provided for phy_[read,write]_mmd_indirect is called, we provide
* our own mt7531_ind_mmd_phy_[read,write] to complete this
@@ -2535,12 +2503,14 @@ static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
switch (port) {
- case 0 ... 4: /* Internal phy */
+ /* Ports which are connected to switch PHYs. There is no MII pinout. */
+ case 0 ... 4:
__set_bit(PHY_INTERFACE_MODE_GMII,
config->supported_interfaces);
break;
- case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
+ /* Port 5 supports rgmii with delays, mii, and gmii. */
+ case 5:
phy_interface_set_rgmii(config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_MII,
config->supported_interfaces);
@@ -2548,7 +2518,8 @@ static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port,
config->supported_interfaces);
break;
- case 6: /* 1st cpu port */
+ /* Port 6 supports rgmii and trgmii. */
+ case 6:
__set_bit(PHY_INTERFACE_MODE_RGMII,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_TRGMII,
@@ -2557,30 +2528,30 @@ static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port,
}
}
-static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port)
-{
- return (port == 5) && (priv->p5_intf_sel != P5_INTF_SEL_GMAC5_SGMII);
-}
-
static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
struct mt7530_priv *priv = ds->priv;
switch (port) {
- case 0 ... 4: /* Internal phy */
+ /* Ports which are connected to switch PHYs. There is no MII pinout. */
+ case 0 ... 4:
__set_bit(PHY_INTERFACE_MODE_GMII,
config->supported_interfaces);
break;
- case 5: /* 2nd cpu port supports either rgmii or sgmii/8023z */
- if (mt7531_is_rgmii_port(priv, port)) {
+ /* Port 5 supports rgmii with delays on MT7531BE, sgmii/802.3z on
+ * MT7531AE.
+ */
+ case 5:
+ if (!priv->p5_sgmii) {
phy_interface_set_rgmii(config->supported_interfaces);
break;
}
fallthrough;
- case 6: /* 1st cpu port supports sgmii/8023z only */
+ /* Port 6 supports sgmii/802.3z. */
+ case 6:
__set_bit(PHY_INTERFACE_MODE_SGMII,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_1000BASEX,
@@ -2596,14 +2567,14 @@ static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port,
static void mt7988_mac_port_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
- phy_interface_zero(config->supported_interfaces);
-
switch (port) {
- case 0 ... 4: /* Internal phy */
+ /* Ports which are connected to switch PHYs. There is no MII pinout. */
+ case 0 ... 3:
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
break;
+ /* Port 6 is connected to SoC's XGMII MAC. There is no MII pinout. */
case 6:
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
@@ -2612,41 +2583,24 @@ static void mt7988_mac_port_get_caps(struct dsa_switch *ds, int port,
}
}
-static int
-mt753x_pad_setup(struct dsa_switch *ds, const struct phylink_link_state *state)
-{
- struct mt7530_priv *priv = ds->priv;
-
- return priv->info->pad_setup(ds, state->interface);
-}
-
-static int
+static void
mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
- /* Only need to setup port5. */
- if (port != 5)
- return 0;
-
- mt7530_setup_port5(priv->ds, interface);
-
- return 0;
+ if (port == 5)
+ mt7530_setup_port5(priv->ds, interface);
+ else if (port == 6)
+ mt7530_setup_port6(priv->ds, interface);
}
-static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
- phy_interface_t interface,
- struct phy_device *phydev)
+static void mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
+ phy_interface_t interface,
+ struct phy_device *phydev)
{
u32 val;
- if (!mt7531_is_rgmii_port(priv, port)) {
- dev_err(priv->dev, "RGMII mode is not available for port %d\n",
- port);
- return -EINVAL;
- }
-
val = mt7530_read(priv, MT7531_CLKGEN_CTRL);
val |= GP_CLK_EN;
val &= ~GP_MODE_MASK;
@@ -2674,31 +2628,14 @@ static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
case PHY_INTERFACE_MODE_RGMII_ID:
break;
default:
- return -EINVAL;
+ break;
}
}
- mt7530_write(priv, MT7531_CLKGEN_CTRL, val);
-
- return 0;
-}
-
-static bool mt753x_is_mac_port(u32 port)
-{
- return (port == 5 || port == 6);
-}
-
-static int
-mt7988_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface)
-{
- if (dsa_is_cpu_port(ds, port) &&
- interface == PHY_INTERFACE_MODE_INTERNAL)
- return 0;
- return -EINVAL;
+ mt7530_write(priv, MT7531_CLKGEN_CTRL, val);
}
-static int
+static void
mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
{
@@ -2706,39 +2643,11 @@ mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
struct phy_device *phydev;
struct dsa_port *dp;
- if (!mt753x_is_mac_port(port)) {
- dev_err(priv->dev, "port %d is not a MAC port\n", port);
- return -EINVAL;
- }
-
- switch (interface) {
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (phy_interface_mode_is_rgmii(interface)) {
dp = dsa_to_port(ds, port);
phydev = dp->user->phydev;
- return mt7531_rgmii_setup(priv, port, interface, phydev);
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_NA:
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_2500BASEX:
- /* handled in SGMII PCS driver */
- return 0;
- default:
- return -EINVAL;
+ mt7531_rgmii_setup(priv, port, interface, phydev);
}
-
- return -EINVAL;
-}
-
-static int
-mt753x_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
- const struct phylink_link_state *state)
-{
- struct mt7530_priv *priv = ds->priv;
-
- return priv->info->mac_port_config(ds, port, mode, state->interface);
}
static struct phylink_pcs *
@@ -2764,54 +2673,13 @@ mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
const struct phylink_link_state *state)
{
struct mt7530_priv *priv = ds->priv;
- u32 mcr_cur, mcr_new;
-
- switch (port) {
- case 0 ... 4: /* Internal phy */
- if (state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_INTERNAL)
- goto unsupported;
- break;
- case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
- if (priv->p5_interface == state->interface)
- break;
-
- if (mt753x_mac_config(ds, port, mode, state) < 0)
- goto unsupported;
-
- if (priv->p5_intf_sel != P5_DISABLED)
- priv->p5_interface = state->interface;
- break;
- case 6: /* 1st cpu port */
- if (priv->p6_interface == state->interface)
- break;
-
- mt753x_pad_setup(ds, state);
- if (mt753x_mac_config(ds, port, mode, state) < 0)
- goto unsupported;
-
- priv->p6_interface = state->interface;
- break;
- default:
-unsupported:
- dev_err(ds->dev, "%s: unsupported %s port: %i\n",
- __func__, phy_modes(state->interface), port);
- return;
- }
-
- mcr_cur = mt7530_read(priv, MT7530_PMCR_P(port));
- mcr_new = mcr_cur;
- mcr_new &= ~PMCR_LINK_SETTINGS_MASK;
- mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN |
- PMCR_BACKPR_EN | PMCR_FORCE_MODE_ID(priv->id);
+ if ((port == 5 || port == 6) && priv->info->mac_port_config)
+ priv->info->mac_port_config(ds, port, mode, state->interface);
/* Are we connected to external phy */
if (port == 5 && dsa_is_user_port(ds, 5))
- mcr_new |= PMCR_EXT_PHY;
-
- if (mcr_new != mcr_cur)
- mt7530_write(priv, MT7530_PMCR_P(port), mcr_new);
+ mt7530_set(priv, MT7530_PMCR_P(port), PMCR_EXT_PHY);
}
static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port,
@@ -2835,17 +2703,10 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK;
- /* MT753x MAC works in 1G full duplex mode for all up-clocked
- * variants.
- */
- if (interface == PHY_INTERFACE_MODE_TRGMII ||
- (phy_interface_mode_is_8023z(interface))) {
- speed = SPEED_1000;
- duplex = DUPLEX_FULL;
- }
-
switch (speed) {
case SPEED_1000:
+ case SPEED_2500:
+ case SPEED_10000:
mcr |= PMCR_FORCE_SPEED_1000;
break;
case SPEED_100:
@@ -2863,6 +2724,7 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, false) >= 0) {
switch (speed) {
case SPEED_1000:
+ case SPEED_2500:
mcr |= PMCR_FORCE_EEE1G;
break;
case SPEED_100:
@@ -2874,63 +2736,6 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
mt7530_set(priv, MT7530_PMCR_P(port), mcr);
}
-static int
-mt7531_cpu_port_config(struct dsa_switch *ds, int port)
-{
- struct mt7530_priv *priv = ds->priv;
- phy_interface_t interface;
- int speed;
- int ret;
-
- switch (port) {
- case 5:
- if (mt7531_is_rgmii_port(priv, port))
- interface = PHY_INTERFACE_MODE_RGMII;
- else
- interface = PHY_INTERFACE_MODE_2500BASEX;
-
- priv->p5_interface = interface;
- break;
- case 6:
- interface = PHY_INTERFACE_MODE_2500BASEX;
-
- priv->p6_interface = interface;
- break;
- default:
- return -EINVAL;
- }
-
- if (interface == PHY_INTERFACE_MODE_2500BASEX)
- speed = SPEED_2500;
- else
- speed = SPEED_1000;
-
- ret = mt7531_mac_config(ds, port, MLO_AN_FIXED, interface);
- if (ret)
- return ret;
- mt7530_write(priv, MT7530_PMCR_P(port),
- PMCR_CPU_PORT_SETTING(priv->id));
- mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED, interface, NULL,
- speed, DUPLEX_FULL, true, true);
-
- return 0;
-}
-
-static int
-mt7988_cpu_port_config(struct dsa_switch *ds, int port)
-{
- struct mt7530_priv *priv = ds->priv;
-
- mt7530_write(priv, MT7530_PMCR_P(port),
- PMCR_CPU_PORT_SETTING(priv->id));
-
- mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED,
- PHY_INTERFACE_MODE_INTERNAL, NULL,
- SPEED_10000, DUPLEX_FULL, true, true);
-
- return 0;
-}
-
static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
@@ -3013,17 +2818,9 @@ static int
mt753x_setup(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
- int i, ret;
-
- /* Initialise the PCS devices */
- for (i = 0; i < priv->ds->num_ports; i++) {
- priv->pcs[i].pcs.ops = priv->info->pcs_ops;
- priv->pcs[i].pcs.neg_mode = true;
- priv->pcs[i].priv = priv;
- priv->pcs[i].port = i;
- }
+ int ret = priv->info->sw_setup(ds);
+ int i;
- ret = priv->info->sw_setup(ds);
if (ret)
return ret;
@@ -3035,8 +2832,16 @@ mt753x_setup(struct dsa_switch *ds)
if (ret && priv->irq)
mt7530_free_irq_common(priv);
+ /* Initialise the PCS devices */
+ for (i = 0; i < priv->ds->num_ports; i++) {
+ priv->pcs[i].pcs.ops = priv->info->pcs_ops;
+ priv->pcs[i].pcs.neg_mode = true;
+ priv->pcs[i].priv = priv;
+ priv->pcs[i].port = i;
+ }
+
if (priv->create_sgmii) {
- ret = priv->create_sgmii(priv, mt7531_dual_sgmii_supported(priv));
+ ret = priv->create_sgmii(priv);
if (ret && priv->irq)
mt7530_free_irq(priv);
}
@@ -3045,7 +2850,7 @@ mt753x_setup(struct dsa_switch *ds)
}
static int mt753x_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
struct mt7530_priv *priv = ds->priv;
u32 eeecr = mt7530_read(priv, MT7530_PMEEECR_P(port));
@@ -3057,7 +2862,7 @@ static int mt753x_get_mac_eee(struct dsa_switch *ds, int port,
}
static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
struct mt7530_priv *priv = ds->priv;
u32 set, mask = LPI_THRESH_MASK | LPI_MODE_EN;
@@ -3074,9 +2879,34 @@ static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
return 0;
}
-static int mt7988_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
+static void
+mt753x_conduit_state_change(struct dsa_switch *ds,
+ const struct net_device *conduit,
+ bool operational)
{
- return 0;
+ struct dsa_port *cpu_dp = conduit->dsa_ptr;
+ struct mt7530_priv *priv = ds->priv;
+ int val = 0;
+ u8 mask;
+
+ /* Set the CPU port to trap frames to for MT7530. Trapped frames will be
+ * forwarded to the numerically smallest CPU port whose conduit
+ * interface is up.
+ */
+ if (priv->id != ID_MT7530 && priv->id != ID_MT7621)
+ return;
+
+ mask = BIT(cpu_dp->index);
+
+ if (operational)
+ priv->active_cpu_ports |= mask;
+ else
+ priv->active_cpu_ports &= ~mask;
+
+ if (priv->active_cpu_ports)
+ val = CPU_EN | CPU_PORT(__ffs(priv->active_cpu_ports));
+
+ mt7530_rmw(priv, MT7530_MFC, CPU_EN | CPU_PORT_MASK, val);
}
static int mt7988_setup(struct dsa_switch *ds)
@@ -3129,6 +2959,7 @@ const struct dsa_switch_ops mt7530_switch_ops = {
.phylink_mac_link_up = mt753x_phylink_mac_link_up,
.get_mac_eee = mt753x_get_mac_eee,
.set_mac_eee = mt753x_set_mac_eee,
+ .conduit_state_change = mt753x_conduit_state_change,
};
EXPORT_SYMBOL_GPL(mt7530_switch_ops);
@@ -3141,7 +2972,6 @@ const struct mt753x_info mt753x_table[] = {
.phy_write_c22 = mt7530_phy_write_c22,
.phy_read_c45 = mt7530_phy_read_c45,
.phy_write_c45 = mt7530_phy_write_c45,
- .pad_setup = mt7530_pad_clk_setup,
.mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
},
@@ -3153,7 +2983,6 @@ const struct mt753x_info mt753x_table[] = {
.phy_write_c22 = mt7530_phy_write_c22,
.phy_read_c45 = mt7530_phy_read_c45,
.phy_write_c45 = mt7530_phy_write_c45,
- .pad_setup = mt7530_pad_clk_setup,
.mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
},
@@ -3165,8 +2994,6 @@ const struct mt753x_info mt753x_table[] = {
.phy_write_c22 = mt7531_ind_c22_phy_write,
.phy_read_c45 = mt7531_ind_c45_phy_read,
.phy_write_c45 = mt7531_ind_c45_phy_write,
- .pad_setup = mt7531_pad_setup,
- .cpu_port_config = mt7531_cpu_port_config,
.mac_port_get_caps = mt7531_mac_port_get_caps,
.mac_port_config = mt7531_mac_config,
},
@@ -3178,10 +3005,7 @@ const struct mt753x_info mt753x_table[] = {
.phy_write_c22 = mt7531_ind_c22_phy_write,
.phy_read_c45 = mt7531_ind_c45_phy_read,
.phy_write_c45 = mt7531_ind_c45_phy_write,
- .pad_setup = mt7988_pad_setup,
- .cpu_port_config = mt7988_cpu_port_config,
.mac_port_get_caps = mt7988_mac_port_get_caps,
- .mac_port_config = mt7988_mac_config,
},
};
EXPORT_SYMBOL_GPL(mt753x_table);
@@ -3208,10 +3032,8 @@ mt7530_probe_common(struct mt7530_priv *priv)
/* Sanity check if these required device operations are filled
* properly.
*/
- if (!priv->info->sw_setup || !priv->info->pad_setup ||
- !priv->info->phy_read_c22 || !priv->info->phy_write_c22 ||
- !priv->info->mac_port_get_caps ||
- !priv->info->mac_port_config)
+ if (!priv->info->sw_setup || !priv->info->phy_read_c22 ||
+ !priv->info->phy_write_c22 || !priv->info->mac_port_get_caps)
return -EINVAL;
priv->id = priv->info->id;
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 17e42d30fff4..a71166e0a7fc 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -41,8 +41,8 @@ enum mt753x_id {
#define UNU_FFP(x) (((x) & 0xff) << 8)
#define UNU_FFP_MASK UNU_FFP(~0)
#define CPU_EN BIT(7)
-#define CPU_PORT(x) ((x) << 4)
-#define CPU_MASK (0xf << 4)
+#define CPU_PORT_MASK GENMASK(6, 4)
+#define CPU_PORT(x) FIELD_PREP(CPU_PORT_MASK, x)
#define MIRROR_EN BIT(3)
#define MIRROR_PORT(x) ((x) & 0x7)
#define MIRROR_MASK 0x7
@@ -304,20 +304,11 @@ enum mt7530_vlan_port_acc_frm {
MT7531_FORCE_DPX | \
MT7531_FORCE_RX_FC | \
MT7531_FORCE_TX_FC)
-#define PMCR_FORCE_MODE_ID(id) ((((id) == ID_MT7531) || ((id) == ID_MT7988)) ? \
- MT7531_FORCE_MODE : PMCR_FORCE_MODE)
#define PMCR_LINK_SETTINGS_MASK (PMCR_TX_EN | PMCR_FORCE_SPEED_1000 | \
PMCR_RX_EN | PMCR_FORCE_SPEED_100 | \
PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
PMCR_FORCE_FDX | PMCR_FORCE_LNK | \
PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100)
-#define PMCR_CPU_PORT_SETTING(id) (PMCR_FORCE_MODE_ID((id)) | \
- PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
- PMCR_BACKOFF_EN | PMCR_BACKPR_EN | \
- PMCR_TX_EN | PMCR_RX_EN | \
- PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
- PMCR_FORCE_SPEED_1000 | \
- PMCR_FORCE_FDX | PMCR_FORCE_LNK)
#define MT7530_PMEEECR_P(x) (0x3004 + (x) * 0x100)
#define WAKEUP_TIME_1000(x) (((x) & 0xFF) << 24)
@@ -683,11 +674,10 @@ struct mt7530_port {
/* Port 5 interface select definitions */
enum p5_interface_select {
- P5_DISABLED = 0,
+ P5_DISABLED,
P5_INTF_SEL_PHY_P0,
P5_INTF_SEL_PHY_P4,
P5_INTF_SEL_GMAC5,
- P5_INTF_SEL_GMAC5_SGMII,
};
struct mt7530_priv;
@@ -705,8 +695,6 @@ struct mt753x_pcs {
* @phy_write_c22: Holding the way writing PHY port using C22
* @phy_read_c45: Holding the way reading PHY port using C45
* @phy_write_c45: Holding the way writing PHY port using C45
- * @pad_setup: Holding the way setting up the bus pad for a certain
- * MAC port
* @phy_mode_supported: Check if the PHY type is being supported on a certain
* port
* @mac_port_validate: Holding the way to set addition validate type for a
@@ -727,16 +715,14 @@ struct mt753x_info {
int regnum);
int (*phy_write_c45)(struct mt7530_priv *priv, int port, int devad,
int regnum, u16 val);
- int (*pad_setup)(struct dsa_switch *ds, phy_interface_t interface);
- int (*cpu_port_config)(struct dsa_switch *ds, int port);
void (*mac_port_get_caps)(struct dsa_switch *ds, int port,
struct phylink_config *config);
void (*mac_port_validate)(struct dsa_switch *ds, int port,
phy_interface_t interface,
unsigned long *supported);
- int (*mac_port_config)(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface);
+ void (*mac_port_config)(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface);
};
/* struct mt7530_priv - This is the main data structure for holding the state
@@ -754,12 +740,14 @@ struct mt753x_info {
* @ports: Holding the state among ports
* @reg_mutex: The lock for protecting among process accessing
* registers
- * @p6_interface Holding the current port 6 interface
* @p5_intf_sel: Holding the current port 5 interface select
+ * @p5_sgmii: Flag for distinguishing if port 5 of the MT7531 switch
+ * has got SGMII
* @irq: IRQ number of the switch
* @irq_domain: IRQ domain of the switch irq_chip
* @irq_enable: IRQ enable bits, synced to SYS_INT_EN
* @create_sgmii: Pointer to function creating SGMII PCS instance(s)
+ * @active_cpu_ports: Holding the active CPU ports
*/
struct mt7530_priv {
struct device *dev;
@@ -773,9 +761,8 @@ struct mt7530_priv {
const struct mt753x_info *info;
unsigned int id;
bool mcm;
- phy_interface_t p6_interface;
- phy_interface_t p5_interface;
- unsigned int p5_intf_sel;
+ enum p5_interface_select p5_intf_sel;
+ bool p5_sgmii;
u8 mirror_rx;
u8 mirror_tx;
struct mt7530_port ports[MT7530_NUM_PORTS];
@@ -785,7 +772,8 @@ struct mt7530_priv {
int irq;
struct irq_domain *irq_domain;
u32 irq_enable;
- int (*create_sgmii)(struct mt7530_priv *priv, bool dual_sgmii);
+ int (*create_sgmii)(struct mt7530_priv *priv);
+ u8 active_cpu_ports;
};
struct mt7530_hw_vlan_entry {
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 614cabb5c1b0..9ed1821184ec 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1451,14 +1451,14 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
/* Nothing to do on the port's MAC */
return 0;
}
static int mv88e6xxx_set_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
/* Nothing to do on the port's MAC */
return 0;
@@ -3659,7 +3659,7 @@ static int mv88e6xxx_mdio_read_c45(struct mii_bus *bus, int phy, int devad,
int err;
if (!chip->info->ops->phy_read_c45)
- return 0xffff;
+ return -ENODEV;
mv88e6xxx_reg_lock(chip);
err = chip->info->ops->phy_read_c45(chip, bus, phy, devad, reg, &val);
@@ -3712,7 +3712,10 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
if (external) {
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_g2_scratch_gpio_set_smi(chip, true);
+ if (chip->info->family == MV88E6XXX_FAMILY_6393)
+ err = mv88e6393x_g2_scratch_gpio_set_smi(chip, true);
+ else
+ err = mv88e6390_g2_scratch_gpio_set_smi(chip, true);
mv88e6xxx_reg_unlock(chip);
if (err)
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index d9434f7cae53..82f9b410de0b 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -378,8 +378,10 @@ extern const struct mv88e6xxx_avb_ops mv88e6390_avb_ops;
extern const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops;
-int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+int mv88e6390_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
bool external);
+int mv88e6393x_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+ bool external);
int mv88e6352_g2_scratch_port_has_serdes(struct mv88e6xxx_chip *chip, int port);
int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin);
int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats);
diff --git a/drivers/net/dsa/mv88e6xxx/global2_scratch.c b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
index a9d6e40321a2..61ab6cc4fbfc 100644
--- a/drivers/net/dsa/mv88e6xxx/global2_scratch.c
+++ b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
@@ -240,7 +240,7 @@ const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops = {
};
/**
- * mv88e6xxx_g2_scratch_gpio_set_smi - set gpio muxing for external smi
+ * mv88e6390_g2_scratch_gpio_set_smi - set gpio muxing for external smi
* @chip: chip private data
* @external: set mux for external smi, or free for gpio usage
*
@@ -248,7 +248,7 @@ const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops = {
* an external SMI interface, or they may be made free for other
* GPIO uses.
*/
-int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+int mv88e6390_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
bool external)
{
int misc_cfg = MV88E6352_G2_SCRATCH_MISC_CFG;
@@ -291,6 +291,37 @@ int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
}
/**
+ * mv88e6393x_g2_scratch_gpio_set_smi - set gpio muxing for external smi
+ * @chip: chip private data
+ * @external: set mux for external smi, or free for gpio usage
+ *
+ * MV88E6191X/6193X/6393X GPIO pins 9 and 10 can be configured as an
+ * external SMI interface or as regular GPIO-s.
+ *
+ * They however have a different register layout then the existing
+ * function.
+ */
+
+int mv88e6393x_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+ bool external)
+{
+ int misc_cfg = MV88E6352_G2_SCRATCH_MISC_CFG;
+ int err;
+ u8 val;
+
+ err = mv88e6xxx_g2_scratch_read(chip, misc_cfg, &val);
+ if (err)
+ return err;
+
+ if (external)
+ val &= ~MV88E6352_G2_SCRATCH_MISC_CFG_NORMALSMI;
+ else
+ val |= MV88E6352_G2_SCRATCH_MISC_CFG_NORMALSMI;
+
+ return mv88e6xxx_g2_scratch_write(chip, misc_cfg, val);
+}
+
+/**
* mv88e6352_g2_scratch_port_has_serdes - indicate if a port can have a serdes
* @chip: chip private data
* @port: port number to check for serdes
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6185.c b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
index 4d677f836807..5a27d047a38e 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-6185.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
@@ -95,7 +95,7 @@ static void mv88e6185_pcs_get_state(struct phylink_pcs *pcs,
}
}
-static int mv88e6185_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+static int mv88e6185_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface,
const unsigned long *advertising,
bool permit_pause_to_mac)
@@ -137,6 +137,7 @@ static int mv88e6185_pcs_init(struct mv88e6xxx_chip *chip, int port)
mpcs->chip = chip;
mpcs->port = port;
mpcs->phylink_pcs.ops = &mv88e6185_phylink_pcs_ops;
+ mpcs->phylink_pcs.neg_mode = true;
irq = mv88e6xxx_serdes_irq_mapping(chip, port);
if (irq) {
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index 7a864329cb72..dab66c0c6f64 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -950,15 +950,15 @@ qca8k_mdio_register(struct qca8k_priv *priv)
struct device *dev = ds->dev;
struct device_node *mdio;
struct mii_bus *bus;
- int err = 0;
+ int ret = 0;
mdio = of_get_child_by_name(dev->of_node, "mdio");
if (mdio && !of_device_is_available(mdio))
- goto out;
+ goto out_put_node;
bus = devm_mdiobus_alloc(dev);
if (!bus) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto out_put_node;
}
@@ -984,12 +984,11 @@ qca8k_mdio_register(struct qca8k_priv *priv)
bus->write = qca8k_legacy_mdio_write;
}
- err = devm_of_mdiobus_register(dev, bus, mdio);
+ ret = devm_of_mdiobus_register(dev, bus, mdio);
out_put_node:
of_node_put(mdio);
-out:
- return err;
+ return ret;
}
static int
@@ -998,7 +997,7 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
struct device_node *ports, *port;
phy_interface_t mode;
- int err;
+ int ret;
ports = of_get_child_by_name(priv->dev->of_node, "ports");
if (!ports)
@@ -1008,11 +1007,11 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
return -EINVAL;
for_each_available_child_of_node(ports, port) {
- err = of_property_read_u32(port, "reg", &reg);
- if (err) {
+ ret = of_property_read_u32(port, "reg", &reg);
+ if (ret) {
of_node_put(port);
of_node_put(ports);
- return err;
+ return ret;
}
if (!dsa_is_user_port(priv->ds, reg))
diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
index 2358cd399c7e..7f80035c5441 100644
--- a/drivers/net/dsa/qca/qca8k-common.c
+++ b/drivers/net/dsa/qca/qca8k-common.c
@@ -534,7 +534,7 @@ int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
}
int qca8k_set_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *eee)
+ struct ethtool_keee *eee)
{
u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
struct qca8k_priv *priv = ds->priv;
@@ -558,7 +558,7 @@ exit:
}
int qca8k_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
/* Nothing to do on the port's MAC */
return 0;
diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
index c8785c36c54e..2184d8d2d5a9 100644
--- a/drivers/net/dsa/qca/qca8k.h
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -518,8 +518,8 @@ void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset);
/* Common eee function */
-int qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee);
-int qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
+int qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *eee);
+int qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
/* Common bridge function */
void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
index 060165a85fb7..6989972eebc3 100644
--- a/drivers/net/dsa/realtek/Kconfig
+++ b/drivers/net/dsa/realtek/Kconfig
@@ -16,37 +16,29 @@ menuconfig NET_DSA_REALTEK
if NET_DSA_REALTEK
config NET_DSA_REALTEK_MDIO
- tristate "Realtek MDIO interface driver"
+ bool "Realtek MDIO interface support"
depends on OF
- depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
- depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
- depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
help
Select to enable support for registering switches configured
through MDIO.
config NET_DSA_REALTEK_SMI
- tristate "Realtek SMI interface driver"
+ bool "Realtek SMI interface support"
depends on OF
- depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
- depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
- depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
help
Select to enable support for registering switches connected
through SMI.
config NET_DSA_REALTEK_RTL8365MB
- tristate "Realtek RTL8365MB switch subdriver"
- imply NET_DSA_REALTEK_SMI
- imply NET_DSA_REALTEK_MDIO
+ tristate "Realtek RTL8365MB switch driver"
+ depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
select NET_DSA_TAG_RTL8_4
help
Select to enable support for Realtek RTL8365MB-VC and RTL8367S.
config NET_DSA_REALTEK_RTL8366RB
- tristate "Realtek RTL8366RB switch subdriver"
- imply NET_DSA_REALTEK_SMI
- imply NET_DSA_REALTEK_MDIO
+ tristate "Realtek RTL8366RB switch driver"
+ depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
select NET_DSA_TAG_RTL4_A
help
Select to enable support for Realtek RTL8366RB.
diff --git a/drivers/net/dsa/realtek/Makefile b/drivers/net/dsa/realtek/Makefile
index 0aab57252a7c..35491dc20d6d 100644
--- a/drivers/net/dsa/realtek/Makefile
+++ b/drivers/net/dsa/realtek/Makefile
@@ -1,6 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_NET_DSA_REALTEK_MDIO) += realtek-mdio.o
-obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o
+obj-$(CONFIG_NET_DSA_REALTEK) += realtek_dsa.o
+realtek_dsa-objs := rtl83xx.o
+
+ifdef CONFIG_NET_DSA_REALTEK_MDIO
+realtek_dsa-objs += realtek-mdio.o
+endif
+
+ifdef CONFIG_NET_DSA_REALTEK_SMI
+realtek_dsa-objs += realtek-smi.o
+endif
+
obj-$(CONFIG_NET_DSA_REALTEK_RTL8366RB) += rtl8366.o
rtl8366-objs := rtl8366-core.o rtl8366rb.o
obj-$(CONFIG_NET_DSA_REALTEK_RTL8365MB) += rtl8365mb.o
diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c
index 292e6d087e8b..04b758e5a680 100644
--- a/drivers/net/dsa/realtek/realtek-mdio.c
+++ b/drivers/net/dsa/realtek/realtek-mdio.c
@@ -25,6 +25,8 @@
#include <linux/regmap.h>
#include "realtek.h"
+#include "realtek-mdio.h"
+#include "rtl83xx.h"
/* Read/write via mdiobus */
#define REALTEK_MDIO_CTRL0_REG 31
@@ -99,192 +101,87 @@ out_unlock:
return ret;
}
-static void realtek_mdio_lock(void *ctx)
-{
- struct realtek_priv *priv = ctx;
-
- mutex_lock(&priv->map_lock);
-}
-
-static void realtek_mdio_unlock(void *ctx)
-{
- struct realtek_priv *priv = ctx;
-
- mutex_unlock(&priv->map_lock);
-}
-
-static const struct regmap_config realtek_mdio_regmap_config = {
- .reg_bits = 10, /* A4..A0 R4..R0 */
- .val_bits = 16,
- .reg_stride = 1,
- /* PHY regs are at 0x8000 */
- .max_register = 0xffff,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
+static const struct realtek_interface_info realtek_mdio_info = {
.reg_read = realtek_mdio_read,
.reg_write = realtek_mdio_write,
- .cache_type = REGCACHE_NONE,
- .lock = realtek_mdio_lock,
- .unlock = realtek_mdio_unlock,
};
-static const struct regmap_config realtek_mdio_nolock_regmap_config = {
- .reg_bits = 10, /* A4..A0 R4..R0 */
- .val_bits = 16,
- .reg_stride = 1,
- /* PHY regs are at 0x8000 */
- .max_register = 0xffff,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
- .reg_read = realtek_mdio_read,
- .reg_write = realtek_mdio_write,
- .cache_type = REGCACHE_NONE,
- .disable_locking = true,
-};
-
-static int realtek_mdio_probe(struct mdio_device *mdiodev)
+/**
+ * realtek_mdio_probe() - Probe a platform device for an MDIO-connected switch
+ * @mdiodev: mdio_device to probe on.
+ *
+ * This function should be used as the .probe in an mdio_driver. After
+ * calling the common probe function for both interfaces, it initializes the
+ * values specific for MDIO-connected devices. Finally, it calls a common
+ * function to register the DSA switch.
+ *
+ * Context: Can sleep. Takes and releases priv->map_lock.
+ * Return: Returns 0 on success, a negative error on failure.
+ */
+int realtek_mdio_probe(struct mdio_device *mdiodev)
{
- struct realtek_priv *priv;
struct device *dev = &mdiodev->dev;
- const struct realtek_variant *var;
- struct regmap_config rc;
- struct device_node *np;
+ struct realtek_priv *priv;
int ret;
- var = of_device_get_match_data(dev);
- if (!var)
- return -EINVAL;
-
- priv = devm_kzalloc(&mdiodev->dev,
- size_add(sizeof(*priv), var->chip_data_sz),
- GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- mutex_init(&priv->map_lock);
+ priv = rtl83xx_probe(dev, &realtek_mdio_info);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
- rc = realtek_mdio_regmap_config;
- rc.lock_arg = priv;
- priv->map = devm_regmap_init(dev, NULL, priv, &rc);
- if (IS_ERR(priv->map)) {
- ret = PTR_ERR(priv->map);
- dev_err(dev, "regmap init failed: %d\n", ret);
- return ret;
- }
-
- rc = realtek_mdio_nolock_regmap_config;
- priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc);
- if (IS_ERR(priv->map_nolock)) {
- ret = PTR_ERR(priv->map_nolock);
- dev_err(dev, "regmap init failed: %d\n", ret);
- return ret;
- }
-
- priv->mdio_addr = mdiodev->addr;
priv->bus = mdiodev->bus;
- priv->dev = &mdiodev->dev;
- priv->chip_data = (void *)priv + sizeof(*priv);
-
- priv->clk_delay = var->clk_delay;
- priv->cmd_read = var->cmd_read;
- priv->cmd_write = var->cmd_write;
- priv->ops = var->ops;
-
+ priv->mdio_addr = mdiodev->addr;
priv->write_reg_noack = realtek_mdio_write;
- np = dev->of_node;
-
- dev_set_drvdata(dev, priv);
-
- /* TODO: if power is software controlled, set up any regulators here */
- priv->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
-
- priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(priv->reset)) {
- dev_err(dev, "failed to get RESET GPIO\n");
- return PTR_ERR(priv->reset);
- }
-
- if (priv->reset) {
- gpiod_set_value(priv->reset, 1);
- dev_dbg(dev, "asserted RESET\n");
- msleep(REALTEK_HW_STOP_DELAY);
- gpiod_set_value(priv->reset, 0);
- msleep(REALTEK_HW_START_DELAY);
- dev_dbg(dev, "deasserted RESET\n");
- }
-
- ret = priv->ops->detect(priv);
- if (ret) {
- dev_err(dev, "unable to detect switch\n");
- return ret;
- }
-
- priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
- if (!priv->ds)
- return -ENOMEM;
-
- priv->ds->dev = dev;
- priv->ds->num_ports = priv->num_ports;
- priv->ds->priv = priv;
- priv->ds->ops = var->ds_ops_mdio;
-
- ret = dsa_register_switch(priv->ds);
+ ret = rtl83xx_register_switch(priv);
if (ret) {
- dev_err(priv->dev, "unable to register switch ret = %d\n", ret);
+ rtl83xx_remove(priv);
return ret;
}
return 0;
}
+EXPORT_SYMBOL_NS_GPL(realtek_mdio_probe, REALTEK_DSA);
-static void realtek_mdio_remove(struct mdio_device *mdiodev)
+/**
+ * realtek_mdio_remove() - Remove the driver of an MDIO-connected switch
+ * @mdiodev: mdio_device to be removed.
+ *
+ * This function should be used as the .remove_new in an mdio_driver. First
+ * it unregisters the DSA switch and then it calls the common remove function.
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void realtek_mdio_remove(struct mdio_device *mdiodev)
{
struct realtek_priv *priv = dev_get_drvdata(&mdiodev->dev);
if (!priv)
return;
- dsa_unregister_switch(priv->ds);
+ rtl83xx_unregister_switch(priv);
- /* leave the device reset asserted */
- if (priv->reset)
- gpiod_set_value(priv->reset, 1);
+ rtl83xx_remove(priv);
}
+EXPORT_SYMBOL_NS_GPL(realtek_mdio_remove, REALTEK_DSA);
-static void realtek_mdio_shutdown(struct mdio_device *mdiodev)
+/**
+ * realtek_mdio_shutdown() - Shutdown the driver of a MDIO-connected switch
+ * @mdiodev: mdio_device shutting down.
+ *
+ * This function should be used as the .shutdown in a platform_driver. It calls
+ * the common shutdown function.
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void realtek_mdio_shutdown(struct mdio_device *mdiodev)
{
struct realtek_priv *priv = dev_get_drvdata(&mdiodev->dev);
if (!priv)
return;
- dsa_switch_shutdown(priv->ds);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
+ rtl83xx_shutdown(priv);
}
-
-static const struct of_device_id realtek_mdio_of_match[] = {
-#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB)
- { .compatible = "realtek,rtl8366rb", .data = &rtl8366rb_variant, },
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
- { .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, },
-#endif
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, realtek_mdio_of_match);
-
-static struct mdio_driver realtek_mdio_driver = {
- .mdiodrv.driver = {
- .name = "realtek-mdio",
- .of_match_table = realtek_mdio_of_match,
- },
- .probe = realtek_mdio_probe,
- .remove = realtek_mdio_remove,
- .shutdown = realtek_mdio_shutdown,
-};
-
-mdio_module_driver(realtek_mdio_driver);
-
-MODULE_AUTHOR("Luiz Angelo Daros de Luca <luizluca@gmail.com>");
-MODULE_DESCRIPTION("Driver for Realtek ethernet switch connected via MDIO interface");
-MODULE_LICENSE("GPL");
+EXPORT_SYMBOL_NS_GPL(realtek_mdio_shutdown, REALTEK_DSA);
diff --git a/drivers/net/dsa/realtek/realtek-mdio.h b/drivers/net/dsa/realtek/realtek-mdio.h
new file mode 100644
index 000000000000..ee70f6a5b8ff
--- /dev/null
+++ b/drivers/net/dsa/realtek/realtek-mdio.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _REALTEK_MDIO_H
+#define _REALTEK_MDIO_H
+
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_MDIO)
+
+static inline int realtek_mdio_driver_register(struct mdio_driver *drv)
+{
+ return mdio_driver_register(drv);
+}
+
+static inline void realtek_mdio_driver_unregister(struct mdio_driver *drv)
+{
+ mdio_driver_unregister(drv);
+}
+
+int realtek_mdio_probe(struct mdio_device *mdiodev);
+void realtek_mdio_remove(struct mdio_device *mdiodev);
+void realtek_mdio_shutdown(struct mdio_device *mdiodev);
+
+#else /* IS_ENABLED(CONFIG_NET_DSA_REALTEK_MDIO) */
+
+static inline int realtek_mdio_driver_register(struct mdio_driver *drv)
+{
+ return 0;
+}
+
+static inline void realtek_mdio_driver_unregister(struct mdio_driver *drv)
+{
+}
+
+static inline int realtek_mdio_probe(struct mdio_device *mdiodev)
+{
+ return -ENOENT;
+}
+
+static inline void realtek_mdio_remove(struct mdio_device *mdiodev)
+{
+}
+
+static inline void realtek_mdio_shutdown(struct mdio_device *mdiodev)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_NET_DSA_REALTEK_MDIO) */
+
+#endif /* _REALTEK_MDIO_H */
diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c
index 755546ed8db6..88590ae95a75 100644
--- a/drivers/net/dsa/realtek/realtek-smi.c
+++ b/drivers/net/dsa/realtek/realtek-smi.c
@@ -31,7 +31,6 @@
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/of.h>
-#include <linux/of_mdio.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
@@ -40,12 +39,14 @@
#include <linux/if_bridge.h>
#include "realtek.h"
+#include "realtek-smi.h"
+#include "rtl83xx.h"
#define REALTEK_SMI_ACK_RETRY_COUNT 5
static inline void realtek_smi_clk_delay(struct realtek_priv *priv)
{
- ndelay(priv->clk_delay);
+ ndelay(priv->variant->clk_delay);
}
static void realtek_smi_start(struct realtek_priv *priv)
@@ -208,7 +209,7 @@ static int realtek_smi_read_reg(struct realtek_priv *priv, u32 addr, u32 *data)
realtek_smi_start(priv);
/* Send READ command */
- ret = realtek_smi_write_byte(priv, priv->cmd_read);
+ ret = realtek_smi_write_byte(priv, priv->variant->cmd_read);
if (ret)
goto out;
@@ -249,7 +250,7 @@ static int realtek_smi_write_reg(struct realtek_priv *priv,
realtek_smi_start(priv);
/* Send WRITE command */
- ret = realtek_smi_write_byte(priv, priv->cmd_write);
+ ret = realtek_smi_write_byte(priv, priv->variant->cmd_write);
if (ret)
goto out;
@@ -310,258 +311,98 @@ static int realtek_smi_read(void *ctx, u32 reg, u32 *val)
return realtek_smi_read_reg(priv, reg, val);
}
-static void realtek_smi_lock(void *ctx)
-{
- struct realtek_priv *priv = ctx;
-
- mutex_lock(&priv->map_lock);
-}
-
-static void realtek_smi_unlock(void *ctx)
-{
- struct realtek_priv *priv = ctx;
-
- mutex_unlock(&priv->map_lock);
-}
-
-static const struct regmap_config realtek_smi_regmap_config = {
- .reg_bits = 10, /* A4..A0 R4..R0 */
- .val_bits = 16,
- .reg_stride = 1,
- /* PHY regs are at 0x8000 */
- .max_register = 0xffff,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
+static const struct realtek_interface_info realtek_smi_info = {
.reg_read = realtek_smi_read,
.reg_write = realtek_smi_write,
- .cache_type = REGCACHE_NONE,
- .lock = realtek_smi_lock,
- .unlock = realtek_smi_unlock,
};
-static const struct regmap_config realtek_smi_nolock_regmap_config = {
- .reg_bits = 10, /* A4..A0 R4..R0 */
- .val_bits = 16,
- .reg_stride = 1,
- /* PHY regs are at 0x8000 */
- .max_register = 0xffff,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
- .reg_read = realtek_smi_read,
- .reg_write = realtek_smi_write,
- .cache_type = REGCACHE_NONE,
- .disable_locking = true,
-};
-
-static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum)
-{
- struct realtek_priv *priv = bus->priv;
-
- return priv->ops->phy_read(priv, addr, regnum);
-}
-
-static int realtek_smi_mdio_write(struct mii_bus *bus, int addr, int regnum,
- u16 val)
-{
- struct realtek_priv *priv = bus->priv;
-
- return priv->ops->phy_write(priv, addr, regnum, val);
-}
-
-static int realtek_smi_setup_mdio(struct dsa_switch *ds)
-{
- struct realtek_priv *priv = ds->priv;
- struct device_node *mdio_np;
- int ret;
-
- mdio_np = of_get_compatible_child(priv->dev->of_node, "realtek,smi-mdio");
- if (!mdio_np) {
- dev_err(priv->dev, "no MDIO bus node\n");
- return -ENODEV;
- }
-
- priv->user_mii_bus = devm_mdiobus_alloc(priv->dev);
- if (!priv->user_mii_bus) {
- ret = -ENOMEM;
- goto err_put_node;
- }
- priv->user_mii_bus->priv = priv;
- priv->user_mii_bus->name = "SMI user MII";
- priv->user_mii_bus->read = realtek_smi_mdio_read;
- priv->user_mii_bus->write = realtek_smi_mdio_write;
- snprintf(priv->user_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d",
- ds->index);
- priv->user_mii_bus->dev.of_node = mdio_np;
- priv->user_mii_bus->parent = priv->dev;
- ds->user_mii_bus = priv->user_mii_bus;
-
- ret = devm_of_mdiobus_register(priv->dev, priv->user_mii_bus, mdio_np);
- if (ret) {
- dev_err(priv->dev, "unable to register MDIO bus %s\n",
- priv->user_mii_bus->id);
- goto err_put_node;
- }
-
- return 0;
-
-err_put_node:
- of_node_put(mdio_np);
-
- return ret;
-}
-
-static int realtek_smi_probe(struct platform_device *pdev)
+/**
+ * realtek_smi_probe() - Probe a platform device for an SMI-connected switch
+ * @pdev: platform_device to probe on.
+ *
+ * This function should be used as the .probe in a platform_driver. After
+ * calling the common probe function for both interfaces, it initializes the
+ * values specific for SMI-connected devices. Finally, it calls a common
+ * function to register the DSA switch.
+ *
+ * Context: Can sleep. Takes and releases priv->map_lock.
+ * Return: Returns 0 on success, a negative error on failure.
+ */
+int realtek_smi_probe(struct platform_device *pdev)
{
- const struct realtek_variant *var;
struct device *dev = &pdev->dev;
struct realtek_priv *priv;
- struct regmap_config rc;
- struct device_node *np;
int ret;
- var = of_device_get_match_data(dev);
- np = dev->of_node;
-
- priv = devm_kzalloc(dev, sizeof(*priv) + var->chip_data_sz, GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- priv->chip_data = (void *)priv + sizeof(*priv);
-
- mutex_init(&priv->map_lock);
-
- rc = realtek_smi_regmap_config;
- rc.lock_arg = priv;
- priv->map = devm_regmap_init(dev, NULL, priv, &rc);
- if (IS_ERR(priv->map)) {
- ret = PTR_ERR(priv->map);
- dev_err(dev, "regmap init failed: %d\n", ret);
- return ret;
- }
-
- rc = realtek_smi_nolock_regmap_config;
- priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc);
- if (IS_ERR(priv->map_nolock)) {
- ret = PTR_ERR(priv->map_nolock);
- dev_err(dev, "regmap init failed: %d\n", ret);
- return ret;
- }
-
- /* Link forward and backward */
- priv->dev = dev;
- priv->clk_delay = var->clk_delay;
- priv->cmd_read = var->cmd_read;
- priv->cmd_write = var->cmd_write;
- priv->ops = var->ops;
-
- priv->setup_interface = realtek_smi_setup_mdio;
- priv->write_reg_noack = realtek_smi_write_reg_noack;
-
- dev_set_drvdata(dev, priv);
- spin_lock_init(&priv->lock);
-
- /* TODO: if power is software controlled, set up any regulators here */
-
- priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(priv->reset)) {
- dev_err(dev, "failed to get RESET GPIO\n");
- return PTR_ERR(priv->reset);
- }
- if (priv->reset) {
- gpiod_set_value(priv->reset, 1);
- dev_dbg(dev, "asserted RESET\n");
- msleep(REALTEK_HW_STOP_DELAY);
- gpiod_set_value(priv->reset, 0);
- msleep(REALTEK_HW_START_DELAY);
- dev_dbg(dev, "deasserted RESET\n");
- }
+ priv = rtl83xx_probe(dev, &realtek_smi_info);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
/* Fetch MDIO pins */
priv->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW);
- if (IS_ERR(priv->mdc))
+ if (IS_ERR(priv->mdc)) {
+ rtl83xx_remove(priv);
return PTR_ERR(priv->mdc);
+ }
+
priv->mdio = devm_gpiod_get_optional(dev, "mdio", GPIOD_OUT_LOW);
- if (IS_ERR(priv->mdio))
+ if (IS_ERR(priv->mdio)) {
+ rtl83xx_remove(priv);
return PTR_ERR(priv->mdio);
-
- priv->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
-
- ret = priv->ops->detect(priv);
- if (ret) {
- dev_err(dev, "unable to detect switch\n");
- return ret;
}
- priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
- if (!priv->ds)
- return -ENOMEM;
-
- priv->ds->dev = dev;
- priv->ds->num_ports = priv->num_ports;
- priv->ds->priv = priv;
+ priv->write_reg_noack = realtek_smi_write_reg_noack;
- priv->ds->ops = var->ds_ops_smi;
- ret = dsa_register_switch(priv->ds);
+ ret = rtl83xx_register_switch(priv);
if (ret) {
- dev_err_probe(dev, ret, "unable to register switch\n");
+ rtl83xx_remove(priv);
return ret;
}
+
return 0;
}
+EXPORT_SYMBOL_NS_GPL(realtek_smi_probe, REALTEK_DSA);
-static void realtek_smi_remove(struct platform_device *pdev)
+/**
+ * realtek_smi_remove() - Remove the driver of a SMI-connected switch
+ * @pdev: platform_device to be removed.
+ *
+ * This function should be used as the .remove_new in a platform_driver. First
+ * it unregisters the DSA switch and then it calls the common remove function.
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void realtek_smi_remove(struct platform_device *pdev)
{
struct realtek_priv *priv = platform_get_drvdata(pdev);
if (!priv)
return;
- dsa_unregister_switch(priv->ds);
- if (priv->user_mii_bus)
- of_node_put(priv->user_mii_bus->dev.of_node);
+ rtl83xx_unregister_switch(priv);
- /* leave the device reset asserted */
- if (priv->reset)
- gpiod_set_value(priv->reset, 1);
+ rtl83xx_remove(priv);
}
+EXPORT_SYMBOL_NS_GPL(realtek_smi_remove, REALTEK_DSA);
-static void realtek_smi_shutdown(struct platform_device *pdev)
+/**
+ * realtek_smi_shutdown() - Shutdown the driver of a SMI-connected switch
+ * @pdev: platform_device shutting down.
+ *
+ * This function should be used as the .shutdown in a platform_driver. It calls
+ * the common shutdown function.
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void realtek_smi_shutdown(struct platform_device *pdev)
{
struct realtek_priv *priv = platform_get_drvdata(pdev);
if (!priv)
return;
- dsa_switch_shutdown(priv->ds);
-
- platform_set_drvdata(pdev, NULL);
+ rtl83xx_shutdown(priv);
}
-
-static const struct of_device_id realtek_smi_of_match[] = {
-#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB)
- {
- .compatible = "realtek,rtl8366rb",
- .data = &rtl8366rb_variant,
- },
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
- {
- .compatible = "realtek,rtl8365mb",
- .data = &rtl8365mb_variant,
- },
-#endif
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
-
-static struct platform_driver realtek_smi_driver = {
- .driver = {
- .name = "realtek-smi",
- .of_match_table = realtek_smi_of_match,
- },
- .probe = realtek_smi_probe,
- .remove_new = realtek_smi_remove,
- .shutdown = realtek_smi_shutdown,
-};
-module_platform_driver(realtek_smi_driver);
-
-MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
-MODULE_DESCRIPTION("Driver for Realtek ethernet switch connected via SMI interface");
-MODULE_LICENSE("GPL");
+EXPORT_SYMBOL_NS_GPL(realtek_smi_shutdown, REALTEK_DSA);
diff --git a/drivers/net/dsa/realtek/realtek-smi.h b/drivers/net/dsa/realtek/realtek-smi.h
new file mode 100644
index 000000000000..ea49a2edd3c8
--- /dev/null
+++ b/drivers/net/dsa/realtek/realtek-smi.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _REALTEK_SMI_H
+#define _REALTEK_SMI_H
+
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_SMI)
+
+static inline int realtek_smi_driver_register(struct platform_driver *drv)
+{
+ return platform_driver_register(drv);
+}
+
+static inline void realtek_smi_driver_unregister(struct platform_driver *drv)
+{
+ platform_driver_unregister(drv);
+}
+
+int realtek_smi_probe(struct platform_device *pdev);
+void realtek_smi_remove(struct platform_device *pdev);
+void realtek_smi_shutdown(struct platform_device *pdev);
+
+#else /* IS_ENABLED(CONFIG_NET_DSA_REALTEK_SMI) */
+
+static inline int realtek_smi_driver_register(struct platform_driver *drv)
+{
+ return 0;
+}
+
+static inline void realtek_smi_driver_unregister(struct platform_driver *drv)
+{
+}
+
+static inline int realtek_smi_probe(struct platform_device *pdev)
+{
+ return -ENOENT;
+}
+
+static inline void realtek_smi_remove(struct platform_device *pdev)
+{
+}
+
+static inline void realtek_smi_shutdown(struct platform_device *pdev)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_NET_DSA_REALTEK_SMI) */
+
+#endif /* _REALTEK_SMI_H */
diff --git a/drivers/net/dsa/realtek/realtek.h b/drivers/net/dsa/realtek/realtek.h
index 790488e9c667..e0b1aa01337b 100644
--- a/drivers/net/dsa/realtek/realtek.h
+++ b/drivers/net/dsa/realtek/realtek.h
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
#include <net/dsa.h>
+#include <linux/reset.h>
#define REALTEK_HW_STOP_DELAY 25 /* msecs */
#define REALTEK_HW_START_DELAY 100 /* msecs */
@@ -48,6 +49,7 @@ struct rtl8366_vlan_4k {
struct realtek_priv {
struct device *dev;
+ struct reset_control *reset_ctl;
struct gpio_desc *reset;
struct gpio_desc *mdc;
struct gpio_desc *mdio;
@@ -58,11 +60,10 @@ struct realtek_priv {
struct mii_bus *bus;
int mdio_addr;
- unsigned int clk_delay;
- u8 cmd_read;
- u8 cmd_write;
+ const struct realtek_variant *variant;
+
spinlock_t lock; /* Locks around command writes */
- struct dsa_switch *ds;
+ struct dsa_switch ds;
struct irq_domain *irqdomain;
bool leds_disabled;
@@ -73,7 +74,6 @@ struct realtek_priv {
struct rtl8366_mib_counter *mib_counters;
const struct realtek_ops *ops;
- int (*setup_interface)(struct dsa_switch *ds);
int (*write_reg_noack)(void *ctx, u32 addr, u32 data);
int vlan_enabled;
@@ -91,7 +91,6 @@ struct realtek_ops {
int (*detect)(struct realtek_priv *priv);
int (*reset_chip)(struct realtek_priv *priv);
int (*setup)(struct realtek_priv *priv);
- void (*cleanup)(struct realtek_priv *priv);
int (*get_mib_counter)(struct realtek_priv *priv,
int port,
struct rtl8366_mib_counter *mib,
@@ -116,8 +115,7 @@ struct realtek_ops {
};
struct realtek_variant {
- const struct dsa_switch_ops *ds_ops_smi;
- const struct dsa_switch_ops *ds_ops_mdio;
+ const struct dsa_switch_ops *ds_ops;
const struct realtek_ops *ops;
unsigned int clk_delay;
u8 cmd_read;
diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
index b072045eb154..12665a8a3412 100644
--- a/drivers/net/dsa/realtek/rtl8365mb.c
+++ b/drivers/net/dsa/realtek/rtl8365mb.c
@@ -101,6 +101,9 @@
#include <linux/if_vlan.h>
#include "realtek.h"
+#include "realtek-smi.h"
+#include "realtek-mdio.h"
+#include "rtl83xx.h"
/* Family-specific data and limits */
#define RTL8365MB_PHYADDRMAX 7
@@ -206,10 +209,10 @@
#define RTL8365MB_EXT_PORT_MODE_100FX 13
/* External interface mode configuration registers 0~1 */
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305 /* EXT1 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305 /* EXT0,EXT1 */
#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 0x13C3 /* EXT2 */
#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(_extint) \
- ((_extint) == 1 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 : \
+ ((_extint) <= 1 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 : \
(_extint) == 2 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 : \
0x0)
#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(_extint) \
@@ -689,7 +692,7 @@ static int rtl8365mb_phy_ocp_read(struct realtek_priv *priv, int phy,
u32 val;
int ret;
- mutex_lock(&priv->map_lock);
+ rtl83xx_lock(priv);
ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
@@ -722,7 +725,7 @@ static int rtl8365mb_phy_ocp_read(struct realtek_priv *priv, int phy,
*data = val & 0xFFFF;
out:
- mutex_unlock(&priv->map_lock);
+ rtl83xx_unlock(priv);
return ret;
}
@@ -733,7 +736,7 @@ static int rtl8365mb_phy_ocp_write(struct realtek_priv *priv, int phy,
u32 val;
int ret;
- mutex_lock(&priv->map_lock);
+ rtl83xx_lock(priv);
ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
@@ -764,7 +767,7 @@ static int rtl8365mb_phy_ocp_write(struct realtek_priv *priv, int phy,
goto out;
out:
- mutex_unlock(&priv->map_lock);
+ rtl83xx_unlock(priv);
return 0;
}
@@ -825,17 +828,6 @@ static int rtl8365mb_phy_write(struct realtek_priv *priv, int phy, int regnum,
return 0;
}
-static int rtl8365mb_dsa_phy_read(struct dsa_switch *ds, int phy, int regnum)
-{
- return rtl8365mb_phy_read(ds->priv, phy, regnum);
-}
-
-static int rtl8365mb_dsa_phy_write(struct dsa_switch *ds, int phy, int regnum,
- u16 val)
-{
- return rtl8365mb_phy_write(ds->priv, phy, regnum, val);
-}
-
static const struct rtl8365mb_extint *
rtl8365mb_get_port_extint(struct realtek_priv *priv, int port)
{
@@ -878,6 +870,7 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_priv *priv, int port,
{
const struct rtl8365mb_extint *extint =
rtl8365mb_get_port_extint(priv, port);
+ struct dsa_switch *ds = &priv->ds;
struct device_node *dn;
struct dsa_port *dp;
int tx_delay = 0;
@@ -888,7 +881,7 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_priv *priv, int port,
if (!extint)
return -ENODEV;
- dp = dsa_to_port(priv->ds, port);
+ dp = dsa_to_port(ds, port);
dn = dp->dn;
/* Set the RGMII TX/RX delay
@@ -1541,6 +1534,7 @@ static void rtl8365mb_get_stats64(struct dsa_switch *ds, int port,
static void rtl8365mb_stats_setup(struct realtek_priv *priv)
{
struct rtl8365mb *mb = priv->chip_data;
+ struct dsa_switch *ds = &priv->ds;
int i;
/* Per-chip global mutex to protect MIB counter access, since doing
@@ -1551,7 +1545,7 @@ static void rtl8365mb_stats_setup(struct realtek_priv *priv)
for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(priv->ds, i))
+ if (dsa_is_unused_port(ds, i))
continue;
/* Per-port spinlock to protect the stats64 data */
@@ -1567,12 +1561,13 @@ static void rtl8365mb_stats_setup(struct realtek_priv *priv)
static void rtl8365mb_stats_teardown(struct realtek_priv *priv)
{
struct rtl8365mb *mb = priv->chip_data;
+ struct dsa_switch *ds = &priv->ds;
int i;
for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(priv->ds, i))
+ if (dsa_is_unused_port(ds, i))
continue;
cancel_delayed_work_sync(&p->mib_work);
@@ -1971,7 +1966,7 @@ static int rtl8365mb_setup(struct dsa_switch *ds)
dev_info(priv->dev, "no interrupt support\n");
/* Configure CPU tagging */
- dsa_switch_for_each_cpu_port(cpu_dp, priv->ds) {
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
cpu->mask |= BIT(cpu_dp->index);
if (cpu->trap_port == RTL8365MB_MAX_NUM_PORTS)
@@ -1986,7 +1981,7 @@ static int rtl8365mb_setup(struct dsa_switch *ds)
for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(priv->ds, i))
+ if (dsa_is_unused_port(ds, i))
continue;
/* Forward only to the CPU */
@@ -2003,7 +1998,7 @@ static int rtl8365mb_setup(struct dsa_switch *ds)
* ports will still forward frames to the CPU despite being
* administratively down by default.
*/
- rtl8365mb_port_stp_state_set(priv->ds, i, BR_STATE_DISABLED);
+ rtl8365mb_port_stp_state_set(ds, i, BR_STATE_DISABLED);
/* Set up per-port private data */
p->priv = priv;
@@ -2014,12 +2009,10 @@ static int rtl8365mb_setup(struct dsa_switch *ds)
if (ret)
goto out_teardown_irq;
- if (priv->setup_interface) {
- ret = priv->setup_interface(ds);
- if (ret) {
- dev_err(priv->dev, "could not set up MDIO bus\n");
- goto out_teardown_irq;
- }
+ ret = rtl83xx_setup_user_mdio(ds);
+ if (ret) {
+ dev_err(priv->dev, "could not set up MDIO bus\n");
+ goto out_teardown_irq;
}
/* Start statistics counter polling */
@@ -2113,7 +2106,7 @@ static int rtl8365mb_detect(struct realtek_priv *priv)
return 0;
}
-static const struct dsa_switch_ops rtl8365mb_switch_ops_smi = {
+static const struct dsa_switch_ops rtl8365mb_switch_ops = {
.get_tag_protocol = rtl8365mb_get_tag_protocol,
.change_tag_protocol = rtl8365mb_change_tag_protocol,
.setup = rtl8365mb_setup,
@@ -2134,29 +2127,6 @@ static const struct dsa_switch_ops rtl8365mb_switch_ops_smi = {
.port_max_mtu = rtl8365mb_port_max_mtu,
};
-static const struct dsa_switch_ops rtl8365mb_switch_ops_mdio = {
- .get_tag_protocol = rtl8365mb_get_tag_protocol,
- .change_tag_protocol = rtl8365mb_change_tag_protocol,
- .setup = rtl8365mb_setup,
- .teardown = rtl8365mb_teardown,
- .phylink_get_caps = rtl8365mb_phylink_get_caps,
- .phylink_mac_config = rtl8365mb_phylink_mac_config,
- .phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
- .phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
- .phy_read = rtl8365mb_dsa_phy_read,
- .phy_write = rtl8365mb_dsa_phy_write,
- .port_stp_state_set = rtl8365mb_port_stp_state_set,
- .get_strings = rtl8365mb_get_strings,
- .get_ethtool_stats = rtl8365mb_get_ethtool_stats,
- .get_sset_count = rtl8365mb_get_sset_count,
- .get_eth_phy_stats = rtl8365mb_get_phy_stats,
- .get_eth_mac_stats = rtl8365mb_get_mac_stats,
- .get_eth_ctrl_stats = rtl8365mb_get_ctrl_stats,
- .get_stats64 = rtl8365mb_get_stats64,
- .port_change_mtu = rtl8365mb_port_change_mtu,
- .port_max_mtu = rtl8365mb_port_max_mtu,
-};
-
static const struct realtek_ops rtl8365mb_ops = {
.detect = rtl8365mb_detect,
.phy_read = rtl8365mb_phy_read,
@@ -2164,16 +2134,66 @@ static const struct realtek_ops rtl8365mb_ops = {
};
const struct realtek_variant rtl8365mb_variant = {
- .ds_ops_smi = &rtl8365mb_switch_ops_smi,
- .ds_ops_mdio = &rtl8365mb_switch_ops_mdio,
+ .ds_ops = &rtl8365mb_switch_ops,
.ops = &rtl8365mb_ops,
.clk_delay = 10,
.cmd_read = 0xb9,
.cmd_write = 0xb8,
.chip_data_sz = sizeof(struct rtl8365mb),
};
-EXPORT_SYMBOL_GPL(rtl8365mb_variant);
+
+static const struct of_device_id rtl8365mb_of_match[] = {
+ { .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rtl8365mb_of_match);
+
+static struct platform_driver rtl8365mb_smi_driver = {
+ .driver = {
+ .name = "rtl8365mb-smi",
+ .of_match_table = rtl8365mb_of_match,
+ },
+ .probe = realtek_smi_probe,
+ .remove_new = realtek_smi_remove,
+ .shutdown = realtek_smi_shutdown,
+};
+
+static struct mdio_driver rtl8365mb_mdio_driver = {
+ .mdiodrv.driver = {
+ .name = "rtl8365mb-mdio",
+ .of_match_table = rtl8365mb_of_match,
+ },
+ .probe = realtek_mdio_probe,
+ .remove = realtek_mdio_remove,
+ .shutdown = realtek_mdio_shutdown,
+};
+
+static int rtl8365mb_init(void)
+{
+ int ret;
+
+ ret = realtek_mdio_driver_register(&rtl8365mb_mdio_driver);
+ if (ret)
+ return ret;
+
+ ret = realtek_smi_driver_register(&rtl8365mb_smi_driver);
+ if (ret) {
+ realtek_mdio_driver_unregister(&rtl8365mb_mdio_driver);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(rtl8365mb_init);
+
+static void __exit rtl8365mb_exit(void)
+{
+ realtek_smi_driver_unregister(&rtl8365mb_smi_driver);
+ realtek_mdio_driver_unregister(&rtl8365mb_mdio_driver);
+}
+module_exit(rtl8365mb_exit);
MODULE_AUTHOR("Alvin Å ipraga <alsi@bang-olufsen.dk>");
MODULE_DESCRIPTION("Driver for RTL8365MB-VC ethernet switch");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(REALTEK_DSA);
diff --git a/drivers/net/dsa/realtek/rtl8366-core.c b/drivers/net/dsa/realtek/rtl8366-core.c
index 59f98d2c8769..7c6520ba3a26 100644
--- a/drivers/net/dsa/realtek/rtl8366-core.c
+++ b/drivers/net/dsa/realtek/rtl8366-core.c
@@ -34,7 +34,7 @@ int rtl8366_mc_is_used(struct realtek_priv *priv, int mc_index, int *used)
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
+EXPORT_SYMBOL_NS_GPL(rtl8366_mc_is_used, REALTEK_DSA);
/**
* rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
@@ -187,7 +187,7 @@ int rtl8366_set_vlan(struct realtek_priv *priv, int vid, u32 member,
return ret;
}
-EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
+EXPORT_SYMBOL_NS_GPL(rtl8366_set_vlan, REALTEK_DSA);
int rtl8366_set_pvid(struct realtek_priv *priv, unsigned int port,
unsigned int vid)
@@ -217,7 +217,7 @@ int rtl8366_set_pvid(struct realtek_priv *priv, unsigned int port,
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
+EXPORT_SYMBOL_NS_GPL(rtl8366_set_pvid, REALTEK_DSA);
int rtl8366_enable_vlan4k(struct realtek_priv *priv, bool enable)
{
@@ -243,7 +243,7 @@ int rtl8366_enable_vlan4k(struct realtek_priv *priv, bool enable)
priv->vlan4k_enabled = enable;
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_enable_vlan4k);
+EXPORT_SYMBOL_NS_GPL(rtl8366_enable_vlan4k, REALTEK_DSA);
int rtl8366_enable_vlan(struct realtek_priv *priv, bool enable)
{
@@ -265,7 +265,7 @@ int rtl8366_enable_vlan(struct realtek_priv *priv, bool enable)
return ret;
}
-EXPORT_SYMBOL_GPL(rtl8366_enable_vlan);
+EXPORT_SYMBOL_NS_GPL(rtl8366_enable_vlan, REALTEK_DSA);
int rtl8366_reset_vlan(struct realtek_priv *priv)
{
@@ -290,7 +290,7 @@ int rtl8366_reset_vlan(struct realtek_priv *priv)
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_reset_vlan);
+EXPORT_SYMBOL_NS_GPL(rtl8366_reset_vlan, REALTEK_DSA);
int rtl8366_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
@@ -345,7 +345,7 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
+EXPORT_SYMBOL_NS_GPL(rtl8366_vlan_add, REALTEK_DSA);
int rtl8366_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
@@ -389,7 +389,7 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_vlan_del);
+EXPORT_SYMBOL_NS_GPL(rtl8366_vlan_del, REALTEK_DSA);
void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
uint8_t *data)
@@ -403,7 +403,7 @@ void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
for (i = 0; i < priv->num_mib_counters; i++)
ethtool_puts(&data, priv->mib_counters[i].name);
}
-EXPORT_SYMBOL_GPL(rtl8366_get_strings);
+EXPORT_SYMBOL_NS_GPL(rtl8366_get_strings, REALTEK_DSA);
int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
@@ -417,7 +417,7 @@ int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset)
return priv->num_mib_counters;
}
-EXPORT_SYMBOL_GPL(rtl8366_get_sset_count);
+EXPORT_SYMBOL_NS_GPL(rtl8366_get_sset_count, REALTEK_DSA);
void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
{
@@ -441,4 +441,4 @@ void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
data[i] = mibvalue;
}
}
-EXPORT_SYMBOL_GPL(rtl8366_get_ethtool_stats);
+EXPORT_SYMBOL_NS_GPL(rtl8366_get_ethtool_stats, REALTEK_DSA);
diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index e3b6a470ca67..e10ae94cf771 100644
--- a/drivers/net/dsa/realtek/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -23,6 +23,9 @@
#include <linux/regmap.h>
#include "realtek.h"
+#include "realtek-smi.h"
+#include "realtek-mdio.h"
+#include "rtl83xx.h"
#define RTL8366RB_PORT_NUM_CPU 5
#define RTL8366RB_NUM_PORTS 6
@@ -1030,12 +1033,10 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
if (ret)
dev_info(priv->dev, "no interrupt support\n");
- if (priv->setup_interface) {
- ret = priv->setup_interface(ds);
- if (ret) {
- dev_err(priv->dev, "could not set up MDIO bus\n");
- return -ENODEV;
- }
+ ret = rtl83xx_setup_user_mdio(ds);
+ if (ret) {
+ dev_err(priv->dev, "could not set up MDIO bus\n");
+ return -ENODEV;
}
return 0;
@@ -1650,6 +1651,7 @@ static int rtl8366rb_get_mc_index(struct realtek_priv *priv, int port, int *val)
static int rtl8366rb_set_mc_index(struct realtek_priv *priv, int port, int index)
{
+ struct dsa_switch *ds = &priv->ds;
struct rtl8366rb *rb;
bool pvid_enabled;
int ret;
@@ -1674,7 +1676,7 @@ static int rtl8366rb_set_mc_index(struct realtek_priv *priv, int port, int index
* not drop any untagged or C-tagged frames. Make sure to update the
* filtering setting.
*/
- if (dsa_port_is_vlan_filtering(dsa_to_port(priv->ds, port)))
+ if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
ret = rtl8366rb_drop_untagged(priv, port, !pvid_enabled);
return ret;
@@ -1718,7 +1720,7 @@ static int rtl8366rb_phy_read(struct realtek_priv *priv, int phy, int regnum)
if (phy > RTL8366RB_PHY_NO_MAX)
return -EINVAL;
- mutex_lock(&priv->map_lock);
+ rtl83xx_lock(priv);
ret = regmap_write(priv->map_nolock, RTL8366RB_PHY_ACCESS_CTRL_REG,
RTL8366RB_PHY_CTRL_READ);
@@ -1746,7 +1748,7 @@ static int rtl8366rb_phy_read(struct realtek_priv *priv, int phy, int regnum)
phy, regnum, reg, val);
out:
- mutex_unlock(&priv->map_lock);
+ rtl83xx_unlock(priv);
return ret;
}
@@ -1760,7 +1762,7 @@ static int rtl8366rb_phy_write(struct realtek_priv *priv, int phy, int regnum,
if (phy > RTL8366RB_PHY_NO_MAX)
return -EINVAL;
- mutex_lock(&priv->map_lock);
+ rtl83xx_lock(priv);
ret = regmap_write(priv->map_nolock, RTL8366RB_PHY_ACCESS_CTRL_REG,
RTL8366RB_PHY_CTRL_WRITE);
@@ -1777,22 +1779,11 @@ static int rtl8366rb_phy_write(struct realtek_priv *priv, int phy, int regnum,
goto out;
out:
- mutex_unlock(&priv->map_lock);
+ rtl83xx_unlock(priv);
return ret;
}
-static int rtl8366rb_dsa_phy_read(struct dsa_switch *ds, int phy, int regnum)
-{
- return rtl8366rb_phy_read(ds->priv, phy, regnum);
-}
-
-static int rtl8366rb_dsa_phy_write(struct dsa_switch *ds, int phy, int regnum,
- u16 val)
-{
- return rtl8366rb_phy_write(ds->priv, phy, regnum, val);
-}
-
static int rtl8366rb_reset_chip(struct realtek_priv *priv)
{
int timeout = 10;
@@ -1858,7 +1849,7 @@ static int rtl8366rb_detect(struct realtek_priv *priv)
return 0;
}
-static const struct dsa_switch_ops rtl8366rb_switch_ops_smi = {
+static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.get_tag_protocol = rtl8366_get_tag_protocol,
.setup = rtl8366rb_setup,
.phylink_get_caps = rtl8366rb_phylink_get_caps,
@@ -1882,32 +1873,6 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops_smi = {
.port_max_mtu = rtl8366rb_max_mtu,
};
-static const struct dsa_switch_ops rtl8366rb_switch_ops_mdio = {
- .get_tag_protocol = rtl8366_get_tag_protocol,
- .setup = rtl8366rb_setup,
- .phy_read = rtl8366rb_dsa_phy_read,
- .phy_write = rtl8366rb_dsa_phy_write,
- .phylink_get_caps = rtl8366rb_phylink_get_caps,
- .phylink_mac_link_up = rtl8366rb_mac_link_up,
- .phylink_mac_link_down = rtl8366rb_mac_link_down,
- .get_strings = rtl8366_get_strings,
- .get_ethtool_stats = rtl8366_get_ethtool_stats,
- .get_sset_count = rtl8366_get_sset_count,
- .port_bridge_join = rtl8366rb_port_bridge_join,
- .port_bridge_leave = rtl8366rb_port_bridge_leave,
- .port_vlan_filtering = rtl8366rb_vlan_filtering,
- .port_vlan_add = rtl8366_vlan_add,
- .port_vlan_del = rtl8366_vlan_del,
- .port_enable = rtl8366rb_port_enable,
- .port_disable = rtl8366rb_port_disable,
- .port_pre_bridge_flags = rtl8366rb_port_pre_bridge_flags,
- .port_bridge_flags = rtl8366rb_port_bridge_flags,
- .port_stp_state_set = rtl8366rb_port_stp_state_set,
- .port_fast_age = rtl8366rb_port_fast_age,
- .port_change_mtu = rtl8366rb_change_mtu,
- .port_max_mtu = rtl8366rb_max_mtu,
-};
-
static const struct realtek_ops rtl8366rb_ops = {
.detect = rtl8366rb_detect,
.get_vlan_mc = rtl8366rb_get_vlan_mc,
@@ -1925,16 +1890,66 @@ static const struct realtek_ops rtl8366rb_ops = {
};
const struct realtek_variant rtl8366rb_variant = {
- .ds_ops_smi = &rtl8366rb_switch_ops_smi,
- .ds_ops_mdio = &rtl8366rb_switch_ops_mdio,
+ .ds_ops = &rtl8366rb_switch_ops,
.ops = &rtl8366rb_ops,
.clk_delay = 10,
.cmd_read = 0xa9,
.cmd_write = 0xa8,
.chip_data_sz = sizeof(struct rtl8366rb),
};
-EXPORT_SYMBOL_GPL(rtl8366rb_variant);
+
+static const struct of_device_id rtl8366rb_of_match[] = {
+ { .compatible = "realtek,rtl8366rb", .data = &rtl8366rb_variant, },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rtl8366rb_of_match);
+
+static struct platform_driver rtl8366rb_smi_driver = {
+ .driver = {
+ .name = "rtl8366rb-smi",
+ .of_match_table = rtl8366rb_of_match,
+ },
+ .probe = realtek_smi_probe,
+ .remove_new = realtek_smi_remove,
+ .shutdown = realtek_smi_shutdown,
+};
+
+static struct mdio_driver rtl8366rb_mdio_driver = {
+ .mdiodrv.driver = {
+ .name = "rtl8366rb-mdio",
+ .of_match_table = rtl8366rb_of_match,
+ },
+ .probe = realtek_mdio_probe,
+ .remove = realtek_mdio_remove,
+ .shutdown = realtek_mdio_shutdown,
+};
+
+static int rtl8366rb_init(void)
+{
+ int ret;
+
+ ret = realtek_mdio_driver_register(&rtl8366rb_mdio_driver);
+ if (ret)
+ return ret;
+
+ ret = realtek_smi_driver_register(&rtl8366rb_smi_driver);
+ if (ret) {
+ realtek_mdio_driver_unregister(&rtl8366rb_mdio_driver);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(rtl8366rb_init);
+
+static void __exit rtl8366rb_exit(void)
+{
+ realtek_smi_driver_unregister(&rtl8366rb_smi_driver);
+ realtek_mdio_driver_unregister(&rtl8366rb_mdio_driver);
+}
+module_exit(rtl8366rb_exit);
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
MODULE_DESCRIPTION("Driver for RTL8366RB ethernet switch");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(REALTEK_DSA);
diff --git a/drivers/net/dsa/realtek/rtl83xx.c b/drivers/net/dsa/realtek/rtl83xx.c
new file mode 100644
index 000000000000..d2e876805393
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl83xx.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/of_mdio.h>
+
+#include "realtek.h"
+#include "rtl83xx.h"
+
+/**
+ * rtl83xx_lock() - Locks the mutex used by regmaps
+ * @ctx: realtek_priv pointer
+ *
+ * This function is passed to regmap to be used as the lock function.
+ * It is also used externally to block regmap before executing multiple
+ * operations that must happen in sequence (which will use
+ * realtek_priv.map_nolock instead).
+ *
+ * Context: Can sleep. Holds priv->map_lock lock.
+ * Return: nothing
+ */
+void rtl83xx_lock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_lock(&priv->map_lock);
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_lock, REALTEK_DSA);
+
+/**
+ * rtl83xx_unlock() - Unlocks the mutex used by regmaps
+ * @ctx: realtek_priv pointer
+ *
+ * This function unlocks the lock acquired by rtl83xx_lock.
+ *
+ * Context: Releases priv->map_lock lock.
+ * Return: nothing
+ */
+void rtl83xx_unlock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_unlock(&priv->map_lock);
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_unlock, REALTEK_DSA);
+
+static int rtl83xx_user_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct realtek_priv *priv = bus->priv;
+
+ return priv->ops->phy_read(priv, addr, regnum);
+}
+
+static int rtl83xx_user_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct realtek_priv *priv = bus->priv;
+
+ return priv->ops->phy_write(priv, addr, regnum, val);
+}
+
+/**
+ * rtl83xx_setup_user_mdio() - register the user mii bus driver
+ * @ds: DSA switch associated with this user_mii_bus
+ *
+ * Registers the MDIO bus for built-in Ethernet PHYs, and associates it with
+ * the mandatory 'mdio' child OF node of the switch.
+ *
+ * Context: Can sleep.
+ * Return: 0 on success, negative value for failure.
+ */
+int rtl83xx_setup_user_mdio(struct dsa_switch *ds)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct device_node *mdio_np;
+ struct mii_bus *bus;
+ int ret = 0;
+
+ mdio_np = of_get_child_by_name(priv->dev->of_node, "mdio");
+ if (!mdio_np) {
+ dev_err(priv->dev, "no MDIO bus node\n");
+ return -ENODEV;
+ }
+
+ bus = devm_mdiobus_alloc(priv->dev);
+ if (!bus) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
+
+ bus->priv = priv;
+ bus->name = "Realtek user MII";
+ bus->read = rtl83xx_user_mdio_read;
+ bus->write = rtl83xx_user_mdio_write;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s:user_mii", dev_name(priv->dev));
+ bus->parent = priv->dev;
+
+ ret = devm_of_mdiobus_register(priv->dev, bus, mdio_np);
+ if (ret) {
+ dev_err(priv->dev, "unable to register MDIO bus %s\n",
+ bus->id);
+ goto err_put_node;
+ }
+
+ priv->user_mii_bus = bus;
+
+err_put_node:
+ of_node_put(mdio_np);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_setup_user_mdio, REALTEK_DSA);
+
+/**
+ * rtl83xx_probe() - probe a Realtek switch
+ * @dev: the device being probed
+ * @interface_info: specific management interface info.
+ *
+ * This function initializes realtek_priv and reads data from the device tree
+ * node. The switch is hard resetted if a method is provided.
+ *
+ * Context: Can sleep.
+ * Return: Pointer to the realtek_priv or ERR_PTR() in case of failure.
+ *
+ * The realtek_priv pointer does not need to be freed as it is controlled by
+ * devres.
+ */
+struct realtek_priv *
+rtl83xx_probe(struct device *dev,
+ const struct realtek_interface_info *interface_info)
+{
+ const struct realtek_variant *var;
+ struct realtek_priv *priv;
+ struct regmap_config rc = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = interface_info->reg_read,
+ .reg_write = interface_info->reg_write,
+ .cache_type = REGCACHE_NONE,
+ .lock = rtl83xx_lock,
+ .unlock = rtl83xx_unlock,
+ };
+ int ret;
+
+ var = of_device_get_match_data(dev);
+ if (!var)
+ return ERR_PTR(-EINVAL);
+
+ priv = devm_kzalloc(dev, size_add(sizeof(*priv), var->chip_data_sz),
+ GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&priv->map_lock);
+
+ rc.lock_arg = priv;
+ priv->map = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map)) {
+ ret = PTR_ERR(priv->map);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ rc.disable_locking = true;
+ priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map_nolock)) {
+ ret = PTR_ERR(priv->map_nolock);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ /* Link forward and backward */
+ priv->dev = dev;
+ priv->variant = var;
+ priv->ops = var->ops;
+ priv->chip_data = (void *)priv + sizeof(*priv);
+
+ spin_lock_init(&priv->lock);
+
+ priv->leds_disabled = of_property_read_bool(dev->of_node,
+ "realtek,disable-leds");
+
+ /* TODO: if power is software controlled, set up any regulators here */
+ priv->reset_ctl = devm_reset_control_get_optional(dev, NULL);
+ if (IS_ERR(priv->reset_ctl)) {
+ ret = PTR_ERR(priv->reset_ctl);
+ dev_err_probe(dev, ret, "failed to get reset control\n");
+ return ERR_CAST(priv->reset_ctl);
+ }
+
+ priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset)) {
+ dev_err(dev, "failed to get RESET GPIO\n");
+ return ERR_CAST(priv->reset);
+ }
+
+ dev_set_drvdata(dev, priv);
+
+ if (priv->reset_ctl || priv->reset) {
+ rtl83xx_reset_assert(priv);
+ dev_dbg(dev, "asserted RESET\n");
+ msleep(REALTEK_HW_STOP_DELAY);
+ rtl83xx_reset_deassert(priv);
+ msleep(REALTEK_HW_START_DELAY);
+ dev_dbg(dev, "deasserted RESET\n");
+ }
+
+ return priv;
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_probe, REALTEK_DSA);
+
+/**
+ * rtl83xx_register_switch() - detects and register a switch
+ * @priv: realtek_priv pointer
+ *
+ * This function first checks the switch chip ID and register a DSA
+ * switch.
+ *
+ * Context: Can sleep. Takes and releases priv->map_lock.
+ * Return: 0 on success, negative value for failure.
+ */
+int rtl83xx_register_switch(struct realtek_priv *priv)
+{
+ struct dsa_switch *ds = &priv->ds;
+ int ret;
+
+ ret = priv->ops->detect(priv);
+ if (ret) {
+ dev_err_probe(priv->dev, ret, "unable to detect switch\n");
+ return ret;
+ }
+
+ ds->priv = priv;
+ ds->dev = priv->dev;
+ ds->ops = priv->variant->ds_ops;
+ ds->num_ports = priv->num_ports;
+
+ ret = dsa_register_switch(ds);
+ if (ret) {
+ dev_err_probe(priv->dev, ret, "unable to register switch\n");
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_register_switch, REALTEK_DSA);
+
+/**
+ * rtl83xx_unregister_switch() - unregister a switch
+ * @priv: realtek_priv pointer
+ *
+ * This function unregister a DSA switch.
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void rtl83xx_unregister_switch(struct realtek_priv *priv)
+{
+ struct dsa_switch *ds = &priv->ds;
+
+ dsa_unregister_switch(ds);
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_unregister_switch, REALTEK_DSA);
+
+/**
+ * rtl83xx_shutdown() - shutdown a switch
+ * @priv: realtek_priv pointer
+ *
+ * This function shuts down the DSA switch and cleans the platform driver data,
+ * to prevent realtek_{smi,mdio}_remove() from running afterwards, which is
+ * possible if the parent bus implements its own .shutdown() as .remove().
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void rtl83xx_shutdown(struct realtek_priv *priv)
+{
+ struct dsa_switch *ds = &priv->ds;
+
+ dsa_switch_shutdown(ds);
+
+ dev_set_drvdata(priv->dev, NULL);
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_shutdown, REALTEK_DSA);
+
+/**
+ * rtl83xx_remove() - Cleanup a realtek switch driver
+ * @priv: realtek_priv pointer
+ *
+ * If a method is provided, this function asserts the hard reset of the switch
+ * in order to avoid leaking traffic when the driver is gone.
+ *
+ * Context: Might sleep if priv->gdev->chip->can_sleep.
+ * Return: nothing
+ */
+void rtl83xx_remove(struct realtek_priv *priv)
+{
+ /* leave the device reset asserted */
+ rtl83xx_reset_assert(priv);
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_remove, REALTEK_DSA);
+
+void rtl83xx_reset_assert(struct realtek_priv *priv)
+{
+ int ret;
+
+ ret = reset_control_assert(priv->reset_ctl);
+ if (ret)
+ dev_warn(priv->dev,
+ "Failed to assert the switch reset control: %pe\n",
+ ERR_PTR(ret));
+
+ gpiod_set_value(priv->reset, true);
+}
+
+void rtl83xx_reset_deassert(struct realtek_priv *priv)
+{
+ int ret;
+
+ ret = reset_control_deassert(priv->reset_ctl);
+ if (ret)
+ dev_warn(priv->dev,
+ "Failed to deassert the switch reset control: %pe\n",
+ ERR_PTR(ret));
+
+ gpiod_set_value(priv->reset, false);
+}
+
+MODULE_AUTHOR("Luiz Angelo Daros de Luca <luizluca@gmail.com>");
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Realtek DSA switches common module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/realtek/rtl83xx.h b/drivers/net/dsa/realtek/rtl83xx.h
new file mode 100644
index 000000000000..c8a0ff8fd75e
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl83xx.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _RTL83XX_H
+#define _RTL83XX_H
+
+struct realtek_interface_info {
+ int (*reg_read)(void *ctx, u32 reg, u32 *val);
+ int (*reg_write)(void *ctx, u32 reg, u32 val);
+};
+
+void rtl83xx_lock(void *ctx);
+void rtl83xx_unlock(void *ctx);
+int rtl83xx_setup_user_mdio(struct dsa_switch *ds);
+struct realtek_priv *
+rtl83xx_probe(struct device *dev,
+ const struct realtek_interface_info *interface_info);
+int rtl83xx_register_switch(struct realtek_priv *priv);
+void rtl83xx_unregister_switch(struct realtek_priv *priv);
+void rtl83xx_shutdown(struct realtek_priv *priv);
+void rtl83xx_remove(struct realtek_priv *priv);
+void rtl83xx_reset_assert(struct realtek_priv *priv);
+void rtl83xx_reset_deassert(struct realtek_priv *priv);
+
+#endif /* _RTL83XX_H */
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 768454aa36d6..d29b5d7af0d7 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -67,18 +67,12 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
static int dummy_dev_init(struct net_device *dev)
{
- dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
- if (!dev->lstats)
- return -ENOMEM;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_LSTATS;
+ netdev_lockdep_set_classes(dev);
return 0;
}
-static void dummy_dev_uninit(struct net_device *dev)
-{
- free_percpu(dev->lstats);
-}
-
static int dummy_change_carrier(struct net_device *dev, bool new_carrier)
{
if (new_carrier)
@@ -90,7 +84,6 @@ static int dummy_change_carrier(struct net_device *dev, bool new_carrier)
static const struct net_device_ops dummy_netdev_ops = {
.ndo_init = dummy_dev_init,
- .ndo_uninit = dummy_dev_uninit,
.ndo_start_xmit = dummy_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = set_multicast_list,
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 5a274b99f299..6a19b5393ed1 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -15,9 +15,6 @@ if ETHERNET
config MDIO
tristate
-config SUNGEM_PHY
- tristate
-
source "drivers/net/ethernet/3com/Kconfig"
source "drivers/net/ethernet/actions/Kconfig"
source "drivers/net/ethernet/adaptec/Kconfig"
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index d7c274af6d4d..8b4ef5121308 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -464,8 +464,9 @@ static int adin1110_mdio_read(struct mii_bus *bus, int phy_id, int reg)
* bitfield of ADIN1110_MDIOACC register will contain
* the requested register value.
*/
- ret = readx_poll_timeout(adin1110_read_mdio_acc, priv, val,
- (val & ADIN1110_MDIO_TRDONE), 10000, 30000);
+ ret = readx_poll_timeout_atomic(adin1110_read_mdio_acc, priv, val,
+ (val & ADIN1110_MDIO_TRDONE),
+ 100, 30000);
if (ret < 0)
return ret;
@@ -495,8 +496,9 @@ static int adin1110_mdio_write(struct mii_bus *bus, int phy_id,
if (ret < 0)
return ret;
- return readx_poll_timeout(adin1110_read_mdio_acc, priv, val,
- (val & ADIN1110_MDIO_TRDONE), 10000, 30000);
+ return readx_poll_timeout_atomic(adin1110_read_mdio_acc, priv, val,
+ (val & ADIN1110_MDIO_TRDONE),
+ 100, 30000);
}
/* ADIN1110 MAC-PHY contains an ADIN1100 PHY.
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 633b321d7fdd..9e9e4a03f1a8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -90,8 +90,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
struct ena_com_admin_sq *sq = &admin_queue->sq;
u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
- sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
- &sq->dma_addr, GFP_KERNEL);
+ sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &sq->dma_addr, GFP_KERNEL);
if (!sq->entries) {
netdev_err(ena_dev->net_device, "Memory allocation failed\n");
@@ -113,8 +112,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
struct ena_com_admin_cq *cq = &admin_queue->cq;
u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
- cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
- &cq->dma_addr, GFP_KERNEL);
+ cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &cq->dma_addr, GFP_KERNEL);
if (!cq->entries) {
netdev_err(ena_dev->net_device, "Memory allocation failed\n");
@@ -136,8 +134,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
- aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size,
- &aenq->dma_addr, GFP_KERNEL);
+ aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size, &aenq->dma_addr, GFP_KERNEL);
if (!aenq->entries) {
netdev_err(ena_dev->net_device, "Memory allocation failed\n");
@@ -155,14 +152,13 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
aenq_caps = 0;
aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
- aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
- << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
- ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
+ aenq_caps |=
+ (sizeof(struct ena_admin_aenq_entry) << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
+ ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
if (unlikely(!aenq_handlers)) {
- netdev_err(ena_dev->net_device,
- "AENQ handlers pointer is NULL\n");
+ netdev_err(ena_dev->net_device, "AENQ handlers pointer is NULL\n");
return -EINVAL;
}
@@ -189,14 +185,12 @@ static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queu
}
if (unlikely(!admin_queue->comp_ctx)) {
- netdev_err(admin_queue->ena_dev->net_device,
- "Completion context is NULL\n");
+ netdev_err(admin_queue->ena_dev->net_device, "Completion context is NULL\n");
return NULL;
}
if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
- netdev_err(admin_queue->ena_dev->net_device,
- "Completion context is occupied\n");
+ netdev_err(admin_queue->ena_dev->net_device, "Completion context is occupied\n");
return NULL;
}
@@ -226,8 +220,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
/* In case of queue FULL */
cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
if (cnt >= admin_queue->q_depth) {
- netdev_dbg(admin_queue->ena_dev->net_device,
- "Admin queue is full.\n");
+ netdev_dbg(admin_queue->ena_dev->net_device, "Admin queue is full.\n");
admin_queue->stats.out_of_space++;
return ERR_PTR(-ENOSPC);
}
@@ -274,8 +267,7 @@ static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
struct ena_comp_ctx *comp_ctx;
u16 i;
- admin_queue->comp_ctx =
- devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
+ admin_queue->comp_ctx = devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
if (unlikely(!admin_queue->comp_ctx)) {
netdev_err(ena_dev->net_device, "Memory allocation failed\n");
return -ENOMEM;
@@ -336,20 +328,17 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
dev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_sq->desc_addr.virt_addr =
- dma_alloc_coherent(ena_dev->dmadev, size,
- &io_sq->desc_addr.phys_addr,
+ dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr,
GFP_KERNEL);
set_dev_node(ena_dev->dmadev, dev_node);
if (!io_sq->desc_addr.virt_addr) {
io_sq->desc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size,
- &io_sq->desc_addr.phys_addr,
- GFP_KERNEL);
+ &io_sq->desc_addr.phys_addr, GFP_KERNEL);
}
if (!io_sq->desc_addr.virt_addr) {
- netdev_err(ena_dev->net_device,
- "Memory allocation failed\n");
+ netdev_err(ena_dev->net_device, "Memory allocation failed\n");
return -ENOMEM;
}
}
@@ -367,16 +356,14 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
dev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
- io_sq->bounce_buf_ctrl.base_buffer =
- devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+ io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
set_dev_node(ena_dev->dmadev, dev_node);
if (!io_sq->bounce_buf_ctrl.base_buffer)
io_sq->bounce_buf_ctrl.base_buffer =
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
if (!io_sq->bounce_buf_ctrl.base_buffer) {
- netdev_err(ena_dev->net_device,
- "Bounce buffer memory allocation failed\n");
+ netdev_err(ena_dev->net_device, "Bounce buffer memory allocation failed\n");
return -ENOMEM;
}
@@ -425,13 +412,11 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
prev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_cq->cdesc_addr.virt_addr =
- dma_alloc_coherent(ena_dev->dmadev, size,
- &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
set_dev_node(ena_dev->dmadev, prev_node);
if (!io_cq->cdesc_addr.virt_addr) {
io_cq->cdesc_addr.virt_addr =
- dma_alloc_coherent(ena_dev->dmadev, size,
- &io_cq->cdesc_addr.phys_addr,
+ dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr,
GFP_KERNEL);
}
@@ -514,8 +499,8 @@ static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
u8 comp_status)
{
if (unlikely(comp_status != 0))
- netdev_err(admin_queue->ena_dev->net_device,
- "Admin command failed[%u]\n", comp_status);
+ netdev_err(admin_queue->ena_dev->net_device, "Admin command failed[%u]\n",
+ comp_status);
switch (comp_status) {
case ENA_ADMIN_SUCCESS:
@@ -580,8 +565,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
}
if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
- netdev_err(admin_queue->ena_dev->net_device,
- "Command was aborted\n");
+ netdev_err(admin_queue->ena_dev->net_device, "Command was aborted\n");
spin_lock_irqsave(&admin_queue->q_lock, flags);
admin_queue->stats.aborted_cmd++;
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
@@ -589,8 +573,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
goto err;
}
- WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
- comp_ctx->status);
+ WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", comp_ctx->status);
ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
err:
@@ -634,8 +617,7 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to set LLQ configurations: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to set LLQ configurations: %d\n", ret);
return ret;
}
@@ -658,8 +640,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
llq_default_cfg->llq_header_location;
} else {
netdev_err(ena_dev->net_device,
- "Invalid header location control, supported: 0x%x\n",
- supported_feat);
+ "Invalid header location control, supported: 0x%x\n", supported_feat);
return -EINVAL;
}
@@ -681,8 +662,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
netdev_err(ena_dev->net_device,
"Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
- llq_default_cfg->llq_stride_ctrl,
- supported_feat, llq_info->desc_stride_ctrl);
+ llq_default_cfg->llq_stride_ctrl, supported_feat,
+ llq_info->desc_stride_ctrl);
}
} else {
llq_info->desc_stride_ctrl = 0;
@@ -704,8 +685,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
llq_info->desc_list_entry_size = 256;
} else {
netdev_err(ena_dev->net_device,
- "Invalid entry_size_ctrl, supported: 0x%x\n",
- supported_feat);
+ "Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat);
return -EINVAL;
}
@@ -750,8 +730,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
netdev_err(ena_dev->net_device,
"Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
- llq_default_cfg->llq_num_decs_before_header,
- supported_feat, llq_info->descs_num_before_header);
+ llq_default_cfg->llq_num_decs_before_header, supported_feat,
+ llq_info->descs_num_before_header);
}
/* Check for accelerated queue supported */
llq_accel_mode_get = llq_features->accel_mode.u.get;
@@ -767,8 +747,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
rc = ena_com_set_llq(ena_dev);
if (rc)
- netdev_err(ena_dev->net_device,
- "Cannot set LLQ configuration: %d\n", rc);
+ netdev_err(ena_dev->net_device, "Cannot set LLQ configuration: %d\n", rc);
return rc;
}
@@ -780,8 +759,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
int ret;
wait_for_completion_timeout(&comp_ctx->wait_event,
- usecs_to_jiffies(
- admin_queue->completion_timeout));
+ usecs_to_jiffies(admin_queue->completion_timeout));
/* In case the command wasn't completed find out the root cause.
* There might be 2 kinds of errors
@@ -797,8 +775,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
if (comp_ctx->status == ENA_CMD_COMPLETED) {
netdev_err(admin_queue->ena_dev->net_device,
"The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
- comp_ctx->cmd_opcode,
- admin_queue->auto_polling ? "ON" : "OFF");
+ comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
/* Check if fallback to polling is enabled */
if (admin_queue->auto_polling)
admin_queue->polling = true;
@@ -867,15 +844,13 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
if (unlikely(i == timeout)) {
netdev_err(ena_dev->net_device,
"Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n",
- mmio_read->seq_num, offset, read_resp->req_id,
- read_resp->reg_off);
+ mmio_read->seq_num, offset, read_resp->req_id, read_resp->reg_off);
ret = ENA_MMIO_READ_TIMEOUT;
goto err;
}
if (read_resp->reg_off != offset) {
- netdev_err(ena_dev->net_device,
- "Read failure: wrong offset provided\n");
+ netdev_err(ena_dev->net_device, "Read failure: wrong offset provided\n");
ret = ENA_MMIO_READ_TIMEOUT;
} else {
ret = read_resp->reg_val;
@@ -934,8 +909,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
sizeof(destroy_resp));
if (unlikely(ret && (ret != -ENODEV)))
- netdev_err(ena_dev->net_device,
- "Failed to destroy io sq error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to destroy io sq error: %d\n", ret);
return ret;
}
@@ -949,8 +923,7 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
if (io_cq->cdesc_addr.virt_addr) {
size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
- dma_free_coherent(ena_dev->dmadev, size,
- io_cq->cdesc_addr.virt_addr,
+ dma_free_coherent(ena_dev->dmadev, size, io_cq->cdesc_addr.virt_addr,
io_cq->cdesc_addr.phys_addr);
io_cq->cdesc_addr.virt_addr = NULL;
@@ -959,8 +932,7 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
if (io_sq->desc_addr.virt_addr) {
size = io_sq->desc_entry_size * io_sq->q_depth;
- dma_free_coherent(ena_dev->dmadev, size,
- io_sq->desc_addr.virt_addr,
+ dma_free_coherent(ena_dev->dmadev, size, io_sq->desc_addr.virt_addr,
io_sq->desc_addr.phys_addr);
io_sq->desc_addr.virt_addr = NULL;
@@ -985,8 +957,7 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
- netdev_err(ena_dev->net_device,
- "Reg read timeout occurred\n");
+ netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
return -ETIME;
}
@@ -1026,8 +997,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
int ret;
if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
- netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
- feature_id);
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", feature_id);
return -EOPNOTSUPP;
}
@@ -1064,8 +1034,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
if (unlikely(ret))
netdev_err(ena_dev->net_device,
- "Failed to submit get_feature command %d error: %d\n",
- feature_id, ret);
+ "Failed to submit get_feature command %d error: %d\n", feature_id, ret);
return ret;
}
@@ -1104,13 +1073,11 @@ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
{
struct ena_rss *rss = &ena_dev->rss;
- if (!ena_com_check_supported_feature_id(ena_dev,
- ENA_ADMIN_RSS_HASH_FUNCTION))
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION))
return -EOPNOTSUPP;
- rss->hash_key =
- dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
- &rss->hash_key_dma_addr, GFP_KERNEL);
+ rss->hash_key = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
+ &rss->hash_key_dma_addr, GFP_KERNEL);
if (unlikely(!rss->hash_key))
return -ENOMEM;
@@ -1123,8 +1090,8 @@ static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
struct ena_rss *rss = &ena_dev->rss;
if (rss->hash_key)
- dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
- rss->hash_key, rss->hash_key_dma_addr);
+ dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), rss->hash_key,
+ rss->hash_key_dma_addr);
rss->hash_key = NULL;
}
@@ -1132,9 +1099,8 @@ static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
{
struct ena_rss *rss = &ena_dev->rss;
- rss->hash_ctrl =
- dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
- &rss->hash_ctrl_dma_addr, GFP_KERNEL);
+ rss->hash_ctrl = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
+ &rss->hash_ctrl_dma_addr, GFP_KERNEL);
if (unlikely(!rss->hash_ctrl))
return -ENOMEM;
@@ -1147,8 +1113,8 @@ static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
struct ena_rss *rss = &ena_dev->rss;
if (rss->hash_ctrl)
- dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
- rss->hash_ctrl, rss->hash_ctrl_dma_addr);
+ dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), rss->hash_ctrl,
+ rss->hash_ctrl_dma_addr);
rss->hash_ctrl = NULL;
}
@@ -1177,15 +1143,13 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
tbl_size = (1ULL << log_size) *
sizeof(struct ena_admin_rss_ind_table_entry);
- rss->rss_ind_tbl =
- dma_alloc_coherent(ena_dev->dmadev, tbl_size,
- &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
+ rss->rss_ind_tbl = dma_alloc_coherent(ena_dev->dmadev, tbl_size, &rss->rss_ind_tbl_dma_addr,
+ GFP_KERNEL);
if (unlikely(!rss->rss_ind_tbl))
goto mem_err1;
tbl_size = (1ULL << log_size) * sizeof(u16);
- rss->host_rss_ind_tbl =
- devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
+ rss->host_rss_ind_tbl = devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
if (unlikely(!rss->host_rss_ind_tbl))
goto mem_err2;
@@ -1197,8 +1161,7 @@ mem_err2:
tbl_size = (1ULL << log_size) *
sizeof(struct ena_admin_rss_ind_table_entry);
- dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
- rss->rss_ind_tbl_dma_addr);
+ dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, rss->rss_ind_tbl_dma_addr);
rss->rss_ind_tbl = NULL;
mem_err1:
rss->tbl_log_size = 0;
@@ -1261,8 +1224,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
&create_cmd.sq_ba,
io_sq->desc_addr.phys_addr);
if (unlikely(ret)) {
- netdev_err(ena_dev->net_device,
- "Memory address set failed\n");
+ netdev_err(ena_dev->net_device, "Memory address set failed\n");
return ret;
}
}
@@ -1273,8 +1235,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
(struct ena_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (unlikely(ret)) {
- netdev_err(ena_dev->net_device,
- "Failed to create IO SQ. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to create IO SQ. error: %d\n", ret);
return ret;
}
@@ -1284,16 +1245,12 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
(uintptr_t)cmd_completion.sq_doorbell_offset);
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
- + cmd_completion.llq_headers_offset);
-
io_sq->desc_addr.pbuf_dev_addr =
(u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
cmd_completion.llq_descriptors_offset);
}
- netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n",
- io_sq->idx, io_sq->q_depth);
+ netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
return ret;
}
@@ -1420,8 +1377,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
(struct ena_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (unlikely(ret)) {
- netdev_err(ena_dev->net_device,
- "Failed to create IO CQ. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to create IO CQ. error: %d\n", ret);
return ret;
}
@@ -1430,18 +1386,12 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
cmd_completion.cq_interrupt_unmask_register_offset);
- if (cmd_completion.cq_head_db_register_offset)
- io_cq->cq_head_db_reg =
- (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
- cmd_completion.cq_head_db_register_offset);
-
if (cmd_completion.numa_node_register_offset)
io_cq->numa_node_cfg_reg =
(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
cmd_completion.numa_node_register_offset);
- netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n",
- io_cq->idx, io_cq->q_depth);
+ netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
return ret;
}
@@ -1451,8 +1401,7 @@ int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
struct ena_com_io_cq **io_cq)
{
if (qid >= ENA_TOTAL_NUM_QUEUES) {
- netdev_err(ena_dev->net_device,
- "Invalid queue number %d but the max is %d\n", qid,
+ netdev_err(ena_dev->net_device, "Invalid queue number %d but the max is %d\n", qid,
ENA_TOTAL_NUM_QUEUES);
return -EINVAL;
}
@@ -1492,8 +1441,7 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
spin_lock_irqsave(&admin_queue->q_lock, flags);
while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
- ena_delay_exponential_backoff_us(exp++,
- ena_dev->ena_min_poll_delay_us);
+ ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
spin_lock_irqsave(&admin_queue->q_lock, flags);
}
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
@@ -1519,8 +1467,7 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
sizeof(destroy_resp));
if (unlikely(ret && (ret != -ENODEV)))
- netdev_err(ena_dev->net_device,
- "Failed to destroy IO CQ. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to destroy IO CQ. error: %d\n", ret);
return ret;
}
@@ -1588,8 +1535,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to config AENQ ret: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to config AENQ ret: %d\n", ret);
return ret;
}
@@ -1610,8 +1556,7 @@ int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
netdev_dbg(ena_dev->net_device, "ENA dma width: %d\n", width);
if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
- netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n",
- width);
+ netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n", width);
return -EINVAL;
}
@@ -1633,19 +1578,16 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
ctrl_ver = ena_com_reg_bar_read32(ena_dev,
ENA_REGS_CONTROLLER_VERSION_OFF);
- if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
- (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
+ if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
return -ETIME;
}
dev_info(ena_dev->dmadev, "ENA device version: %d.%d\n",
- (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
- ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
+ (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
- dev_info(ena_dev->dmadev,
- "ENA controller version: %d.%d.%d implementation version %d\n",
+ dev_info(ena_dev->dmadev, "ENA controller version: %d.%d.%d implementation version %d\n",
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
@@ -1694,20 +1636,17 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
size = ADMIN_SQ_SIZE(admin_queue->q_depth);
if (sq->entries)
- dma_free_coherent(ena_dev->dmadev, size, sq->entries,
- sq->dma_addr);
+ dma_free_coherent(ena_dev->dmadev, size, sq->entries, sq->dma_addr);
sq->entries = NULL;
size = ADMIN_CQ_SIZE(admin_queue->q_depth);
if (cq->entries)
- dma_free_coherent(ena_dev->dmadev, size, cq->entries,
- cq->dma_addr);
+ dma_free_coherent(ena_dev->dmadev, size, cq->entries, cq->dma_addr);
cq->entries = NULL;
size = ADMIN_AENQ_SIZE(aenq->q_depth);
if (ena_dev->aenq.entries)
- dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
- aenq->dma_addr);
+ dma_free_coherent(ena_dev->dmadev, size, aenq->entries, aenq->dma_addr);
aenq->entries = NULL;
}
@@ -1733,10 +1672,8 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
spin_lock_init(&mmio_read->lock);
- mmio_read->read_resp =
- dma_alloc_coherent(ena_dev->dmadev,
- sizeof(*mmio_read->read_resp),
- &mmio_read->read_resp_dma_addr, GFP_KERNEL);
+ mmio_read->read_resp = dma_alloc_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
+ &mmio_read->read_resp_dma_addr, GFP_KERNEL);
if (unlikely(!mmio_read->read_resp))
goto err;
@@ -1767,8 +1704,8 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
- dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
- mmio_read->read_resp, mmio_read->read_resp_dma_addr);
+ dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), mmio_read->read_resp,
+ mmio_read->read_resp_dma_addr);
mmio_read->read_resp = NULL;
}
@@ -1800,8 +1737,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
}
if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
- netdev_err(ena_dev->net_device,
- "Device isn't ready, abort com init\n");
+ netdev_err(ena_dev->net_device, "Device isn't ready, abort com init\n");
return -ENODEV;
}
@@ -1878,8 +1814,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
int ret;
if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
- netdev_err(ena_dev->net_device,
- "Qid (%d) is bigger than max num of queues (%d)\n",
+ netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n",
ctx->qid, ENA_TOTAL_NUM_QUEUES);
return -EINVAL;
}
@@ -1905,8 +1840,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
/* header length is limited to 8 bits */
- io_sq->tx_max_header_size =
- min_t(u32, ena_dev->tx_max_header_size, SZ_256);
+ io_sq->tx_max_header_size = min_t(u32, ena_dev->tx_max_header_size, SZ_256);
ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
if (ret)
@@ -1938,8 +1872,7 @@ void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
struct ena_com_io_cq *io_cq;
if (qid >= ENA_TOTAL_NUM_QUEUES) {
- netdev_err(ena_dev->net_device,
- "Qid (%d) is bigger than max num of queues (%d)\n",
+ netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n",
qid, ENA_TOTAL_NUM_QUEUES);
return;
}
@@ -1983,8 +1916,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
if (rc)
return rc;
- if (get_resp.u.max_queue_ext.version !=
- ENA_FEATURE_MAX_QUEUE_EXT_VER)
+ if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
return -EINVAL;
memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
@@ -2025,18 +1957,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
if (!rc)
- memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
- sizeof(get_resp.u.hw_hints));
+ memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, sizeof(get_resp.u.hw_hints));
else if (rc == -EOPNOTSUPP)
- memset(&get_feat_ctx->hw_hints, 0x0,
- sizeof(get_feat_ctx->hw_hints));
+ memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
else
return rc;
rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
if (!rc)
- memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
- sizeof(get_resp.u.llq));
+ memcpy(&get_feat_ctx->llq, &get_resp.u.llq, sizeof(get_resp.u.llq));
else if (rc == -EOPNOTSUPP)
memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
else
@@ -2084,8 +2013,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
aenq_common = &aenq_e->aenq_common_desc;
/* Go over all the events */
- while ((READ_ONCE(aenq_common->flags) &
- ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
+ while ((READ_ONCE(aenq_common->flags) & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
/* Make sure the phase bit (ownership) is as expected before
* reading the rest of the descriptor.
*/
@@ -2094,8 +2022,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
timestamp = (u64)aenq_common->timestamp_low |
((u64)aenq_common->timestamp_high << 32);
- netdev_dbg(ena_dev->net_device,
- "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
+ netdev_dbg(ena_dev->net_device, "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
aenq_common->group, aenq_common->syndrome, timestamp);
/* Handle specific event*/
@@ -2124,8 +2051,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
/* write the aenq doorbell after all AENQ descriptors were read */
mb();
- writel_relaxed((u32)aenq->head,
- ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+ writel_relaxed((u32)aenq->head, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
}
int ena_com_dev_reset(struct ena_com_dev *ena_dev,
@@ -2137,15 +2063,13 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
- if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
- (cap == ENA_MMIO_READ_TIMEOUT))) {
+ if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || (cap == ENA_MMIO_READ_TIMEOUT))) {
netdev_err(ena_dev->net_device, "Reg read32 timeout occurred\n");
return -ETIME;
}
if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
- netdev_err(ena_dev->net_device,
- "Device isn't ready, can't reset device\n");
+ netdev_err(ena_dev->net_device, "Device isn't ready, can't reset device\n");
return -EINVAL;
}
@@ -2168,8 +2092,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
rc = wait_for_reset_state(ena_dev, timeout,
ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
if (rc != 0) {
- netdev_err(ena_dev->net_device,
- "Reset indication didn't turn on\n");
+ netdev_err(ena_dev->net_device, "Reset indication didn't turn on\n");
return rc;
}
@@ -2177,8 +2100,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
rc = wait_for_reset_state(ena_dev, timeout, 0);
if (rc != 0) {
- netdev_err(ena_dev->net_device,
- "Reset indication didn't turn off\n");
+ netdev_err(ena_dev->net_device, "Reset indication didn't turn off\n");
return rc;
}
@@ -2215,8 +2137,7 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
sizeof(*get_resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to get stats. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret);
return ret;
}
@@ -2228,8 +2149,7 @@ int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
int ret;
if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
- netdev_err(ena_dev->net_device,
- "Capability %d isn't supported\n",
+ netdev_err(ena_dev->net_device, "Capability %d isn't supported\n",
ENA_ADMIN_ENI_STATS);
return -EOPNOTSUPP;
}
@@ -2266,8 +2186,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
int ret;
if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
- netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
- ENA_ADMIN_MTU);
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_MTU);
return -EOPNOTSUPP;
}
@@ -2286,8 +2205,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to set mtu %d. error: %d\n", mtu, ret);
+ netdev_err(ena_dev->net_device, "Failed to set mtu %d. error: %d\n", mtu, ret);
return ret;
}
@@ -2301,8 +2219,7 @@ int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
ret = ena_com_get_feature(ena_dev, &resp,
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
if (unlikely(ret)) {
- netdev_err(ena_dev->net_device,
- "Failed to get offload capabilities %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to get offload capabilities %d\n", ret);
return ret;
}
@@ -2320,8 +2237,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
struct ena_admin_get_feat_resp get_resp;
int ret;
- if (!ena_com_check_supported_feature_id(ena_dev,
- ENA_ADMIN_RSS_HASH_FUNCTION)) {
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) {
netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_FUNCTION);
return -EOPNOTSUPP;
@@ -2334,8 +2250,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
return ret;
if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
- netdev_err(ena_dev->net_device,
- "Func hash %d isn't supported by device, abort\n",
+ netdev_err(ena_dev->net_device, "Func hash %d isn't supported by device, abort\n",
rss->hash_func);
return -EOPNOTSUPP;
}
@@ -2365,8 +2280,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
if (unlikely(ret)) {
- netdev_err(ena_dev->net_device,
- "Failed to set hash function %d. error: %d\n",
+ netdev_err(ena_dev->net_device, "Failed to set hash function %d. error: %d\n",
rss->hash_func, ret);
return -EINVAL;
}
@@ -2398,16 +2312,15 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
return rc;
if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
- netdev_err(ena_dev->net_device,
- "Flow hash function %d isn't supported\n", func);
+ netdev_err(ena_dev->net_device, "Flow hash function %d isn't supported\n", func);
return -EOPNOTSUPP;
}
if ((func == ENA_ADMIN_TOEPLITZ) && key) {
if (key_len != sizeof(hash_key->key)) {
netdev_err(ena_dev->net_device,
- "key len (%u) doesn't equal the supported size (%zu)\n",
- key_len, sizeof(hash_key->key));
+ "key len (%u) doesn't equal the supported size (%zu)\n", key_len,
+ sizeof(hash_key->key));
return -EINVAL;
}
memcpy(hash_key->key, key, key_len);
@@ -2495,8 +2408,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
struct ena_admin_set_feat_resp resp;
int ret;
- if (!ena_com_check_supported_feature_id(ena_dev,
- ENA_ADMIN_RSS_HASH_INPUT)) {
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_INPUT)) {
netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_INPUT);
return -EOPNOTSUPP;
@@ -2527,8 +2439,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to set hash input. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to set hash input. error: %d\n", ret);
return ret;
}
@@ -2605,8 +2516,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
int rc;
if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
- netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n",
- proto);
+ netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n", proto);
return -EINVAL;
}
@@ -2658,8 +2568,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
struct ena_admin_set_feat_resp resp;
int ret;
- if (!ena_com_check_supported_feature_id(
- ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
return -EOPNOTSUPP;
@@ -2699,8 +2608,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to set indirect table. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to set indirect table. error: %d\n", ret);
return ret;
}
@@ -2779,9 +2687,8 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
{
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
- host_attr->host_info =
- dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
- &host_attr->host_info_dma_addr, GFP_KERNEL);
+ host_attr->host_info = dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
+ &host_attr->host_info_dma_addr, GFP_KERNEL);
if (unlikely(!host_attr->host_info))
return -ENOMEM;
@@ -2827,8 +2734,7 @@ void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
if (host_attr->debug_area_virt_addr) {
dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
- host_attr->debug_area_virt_addr,
- host_attr->debug_area_dma_addr);
+ host_attr->debug_area_virt_addr, host_attr->debug_area_dma_addr);
host_attr->debug_area_virt_addr = NULL;
}
}
@@ -2877,8 +2783,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to set host attributes: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to set host attributes: %d\n", ret);
return ret;
}
@@ -2896,8 +2801,7 @@ static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *en
u32 *intr_moder_interval)
{
if (!intr_delay_resolution) {
- netdev_err(ena_dev->net_device,
- "Illegal interrupt delay granularity value\n");
+ netdev_err(ena_dev->net_device, "Illegal interrupt delay granularity value\n");
return -EFAULT;
}
@@ -2935,14 +2839,12 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
if (rc) {
if (rc == -EOPNOTSUPP) {
- netdev_dbg(ena_dev->net_device,
- "Feature %d isn't supported\n",
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
ENA_ADMIN_INTERRUPT_MODERATION);
rc = 0;
} else {
netdev_err(ena_dev->net_device,
- "Failed to get interrupt moderation admin cmd. rc: %d\n",
- rc);
+ "Failed to get interrupt moderation admin cmd. rc: %d\n", rc);
}
/* no moderation supported, disable adaptive support */
@@ -2990,8 +2892,7 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
(llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
if (unlikely(ena_dev->tx_max_header_size == 0)) {
- netdev_err(ena_dev->net_device,
- "The size of the LLQ entry is smaller than needed\n");
+ netdev_err(ena_dev->net_device, "The size of the LLQ entry is smaller than needed\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 3c5081d9d25d..fea57eb8e58b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -109,16 +109,13 @@ struct ena_com_io_cq {
/* Interrupt unmask register */
u32 __iomem *unmask_reg;
- /* The completion queue head doorbell register */
- u32 __iomem *cq_head_db_reg;
-
/* numa configuration register (for TPH) */
u32 __iomem *numa_node_cfg_reg;
/* The value to write to the above register to unmask
* the interrupt of this queue
*/
- u32 msix_vector;
+ u32 msix_vector ____cacheline_aligned;
enum queue_direction direction;
@@ -134,7 +131,6 @@ struct ena_com_io_cq {
/* Device queue index */
u16 idx;
u16 head;
- u16 last_head_update;
u8 phase;
u8 cdesc_entry_size_in_bytes;
@@ -158,7 +154,6 @@ struct ena_com_io_sq {
struct ena_com_io_desc_addr desc_addr;
u32 __iomem *db_addr;
- u8 __iomem *header_addr;
enum queue_direction direction;
enum ena_admin_placement_policy_type mem_queue_type;
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index f9f886289b97..933e619b3a31 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -18,8 +18,7 @@ static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
- desc_phase = (READ_ONCE(cdesc->status) &
- ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
+ desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
if (desc_phase != expected_phase)
@@ -65,8 +64,8 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
io_sq->entries_in_tx_burst_left--;
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Decreasing entries_in_tx_burst_left of queue %d to %d\n",
- io_sq->qid, io_sq->entries_in_tx_burst_left);
+ "Decreasing entries_in_tx_burst_left of queue %d to %d\n", io_sq->qid,
+ io_sq->entries_in_tx_burst_left);
}
/* Make sure everything was written into the bounce buffer before
@@ -75,8 +74,8 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
wmb();
/* The line is completed. Copy it to dev */
- __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
- bounce_buffer, (llq_info->desc_list_entry_size) / 8);
+ __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, bounce_buffer,
+ (llq_info->desc_list_entry_size) / 8);
io_sq->tail++;
@@ -102,16 +101,14 @@ static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
header_offset =
llq_info->descs_num_before_header * io_sq->desc_entry_size;
- if (unlikely((header_offset + header_len) >
- llq_info->desc_list_entry_size)) {
+ if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) {
netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
"Trying to write header larger than llq entry can accommodate\n");
return -EFAULT;
}
if (unlikely(!bounce_buffer)) {
- netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Bounce buffer is NULL\n");
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
return -EFAULT;
}
@@ -129,8 +126,7 @@ static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
bounce_buffer = pkt_ctrl->curr_bounce_buf;
if (unlikely(!bounce_buffer)) {
- netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Bounce buffer is NULL\n");
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
return NULL;
}
@@ -247,8 +243,7 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
ena_com_cq_inc_head(io_cq);
count++;
- last = (READ_ONCE(cdesc->status) &
- ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+ last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
} while (!last);
@@ -369,9 +364,8 @@ static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
"l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
- ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
- ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
- ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
+ ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, ena_rx_ctx->l3_csum_err,
+ ena_rx_ctx->l4_csum_err, ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
}
/*****************************************************************************/
@@ -403,13 +397,12 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
if (unlikely(header_len > io_sq->tx_max_header_size)) {
netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Header size is too large %d max header: %d\n",
- header_len, io_sq->tx_max_header_size);
+ "Header size is too large %d max header: %d\n", header_len,
+ io_sq->tx_max_header_size);
return -EINVAL;
}
- if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
- !buffer_to_push)) {
+ if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && !buffer_to_push)) {
netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
"Push header wasn't provided in LLQ mode\n");
return -EINVAL;
@@ -556,13 +549,11 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
}
netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
- "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
- nb_hw_desc);
+ "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, nb_hw_desc);
if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
- "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
- ena_rx_ctx->max_bufs);
+ "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, ena_rx_ctx->max_bufs);
return -ENOSPC;
}
@@ -586,8 +577,8 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
io_sq->next_to_comp += nb_hw_desc;
netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
- "[%s][QID#%d] Updating SQ head to: %d\n", __func__,
- io_sq->qid, io_sq->next_to_comp);
+ "[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
+ io_sq->next_to_comp);
/* Get rx flags from the last pkt */
ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
@@ -624,8 +615,8 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
desc->req_id = req_id;
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "[%s] Adding single RX desc, Queue: %u, req_id: %u\n",
- __func__, io_sq->qid, req_id);
+ "[%s] Adding single RX desc, Queue: %u, req_id: %u\n", __func__, io_sq->qid,
+ req_id);
desc->buff_addr_lo = (u32)ena_buf->paddr;
desc->buff_addr_hi =
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 372b259279ec..72b019758caa 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -8,8 +8,6 @@
#include "ena_com.h"
-/* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
-#define ENA_COMP_HEAD_THRESH 4
/* we allow 2 DMA descriptors per LLQ entry */
#define ENA_LLQ_ENTRY_DESC_CHUNK_SIZE (2 * sizeof(struct ena_eth_io_tx_desc))
#define ENA_LLQ_HEADER (128UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
@@ -145,8 +143,8 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
}
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Queue: %d num_descs: %d num_entries_needed: %d\n",
- io_sq->qid, num_descs, num_entries_needed);
+ "Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid, num_descs,
+ num_entries_needed);
return num_entries_needed > io_sq->entries_in_tx_burst_left;
}
@@ -157,43 +155,20 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
u16 tail = io_sq->tail;
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Write submission queue doorbell for queue: %d tail: %d\n",
- io_sq->qid, tail);
+ "Write submission queue doorbell for queue: %d tail: %d\n", io_sq->qid, tail);
writel(tail, io_sq->db_addr);
if (is_llq_max_tx_burst_exists(io_sq)) {
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Reset available entries in tx burst for queue %d to %d\n",
- io_sq->qid, max_entries_in_tx_burst);
+ "Reset available entries in tx burst for queue %d to %d\n", io_sq->qid,
+ max_entries_in_tx_burst);
io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
}
return 0;
}
-static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
-{
- u16 unreported_comp, head;
- bool need_update;
-
- if (unlikely(io_cq->cq_head_db_reg)) {
- head = io_cq->head;
- unreported_comp = head - io_cq->last_head_update;
- need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
-
- if (unlikely(need_update)) {
- netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
- "Write completion queue doorbell for queue %d: head: %d\n",
- io_cq->qid, head);
- writel(head, io_cq->cq_head_db_reg);
- io_cq->last_head_update = head;
- }
- }
-
- return 0;
-}
-
static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
u8 numa_node)
{
@@ -248,8 +223,8 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
*req_id = READ_ONCE(cdesc->req_id);
if (unlikely(*req_id >= io_cq->q_depth)) {
- netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
- "Invalid req id %d\n", cdesc->req_id);
+ netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device, "Invalid req id %d\n",
+ cdesc->req_id);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 1c0a7828d397..09e7da1a69c9 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -32,7 +32,7 @@ MODULE_LICENSE("GPL");
#define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus())
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
- NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
+ NETIF_MSG_IFDOWN | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
static struct ena_aenq_handlers aenq_handlers;
@@ -47,19 +47,44 @@ static int ena_restore_device(struct ena_adapter *adapter);
static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
+ enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
struct ena_adapter *adapter = netdev_priv(dev);
+ unsigned int time_since_last_napi, threshold;
+ struct ena_ring *tx_ring;
+ int napi_scheduled;
+
+ if (txqueue >= adapter->num_io_queues) {
+ netdev_err(dev, "TX timeout on invalid queue %u\n", txqueue);
+ goto schedule_reset;
+ }
+
+ threshold = jiffies_to_usecs(dev->watchdog_timeo);
+ tx_ring = &adapter->tx_ring[txqueue];
+ time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies);
+ napi_scheduled = !!(tx_ring->napi->state & NAPIF_STATE_SCHED);
+
+ netdev_err(dev,
+ "TX q %d is paused for too long (threshold %u). Time since last napi %u usec. napi scheduled: %d\n",
+ txqueue,
+ threshold,
+ time_since_last_napi,
+ napi_scheduled);
+
+ if (threshold < time_since_last_napi && napi_scheduled) {
+ netdev_err(dev,
+ "napi handler hasn't been called for a long time but is scheduled\n");
+ reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
+ }
+schedule_reset:
/* Change the state of the device to trigger reset
* Check that we are not in the middle or a trigger already
*/
-
if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
return;
- ena_reset_device(adapter, ENA_REGS_RESET_OS_NETDEV_WD);
+ ena_reset_device(adapter, reset_reason);
ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp);
-
- netif_err(adapter, tx_err, dev, "Transmit time out\n");
}
static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
@@ -116,11 +141,9 @@ int ena_xmit_common(struct ena_adapter *adapter,
if (unlikely(rc)) {
netif_err(adapter, tx_queued, adapter->netdev,
"Failed to prepare tx bufs\n");
- ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1,
- &ring->syncp);
+ ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1, &ring->syncp);
if (rc != -ENOMEM)
- ena_reset_device(adapter,
- ENA_REGS_RESET_DRIVER_INVALID_STATE);
+ ena_reset_device(adapter, ENA_REGS_RESET_DRIVER_INVALID_STATE);
return rc;
}
@@ -485,8 +508,7 @@ static struct page *ena_alloc_map_page(struct ena_ring *rx_ring,
*/
page = dev_alloc_page();
if (!page) {
- ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1,
- &rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, &rx_ring->syncp);
return ERR_PTR(-ENOSPC);
}
@@ -523,7 +545,7 @@ static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
/* We handle DMA here */
page = ena_alloc_map_page(rx_ring, &dma);
- if (unlikely(IS_ERR(page)))
+ if (IS_ERR(page))
return PTR_ERR(page);
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
@@ -545,8 +567,8 @@ static void ena_unmap_rx_buff_attrs(struct ena_ring *rx_ring,
struct ena_rx_buffer *rx_info,
unsigned long attrs)
{
- dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE,
- DMA_BIDIRECTIONAL, attrs);
+ dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE, DMA_BIDIRECTIONAL,
+ attrs);
}
static void ena_free_rx_page(struct ena_ring *rx_ring,
@@ -819,8 +841,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
&req_id);
if (rc) {
if (unlikely(rc == -EINVAL))
- handle_invalid_req_id(tx_ring, req_id, NULL,
- false);
+ handle_invalid_req_id(tx_ring, req_id, NULL, false);
break;
}
@@ -856,7 +877,6 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
tx_ring->next_to_clean = next_to_clean;
ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
- ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
@@ -1046,8 +1066,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
DMA_FROM_DEVICE);
if (!reuse_rx_buf_page)
- ena_unmap_rx_buff_attrs(rx_ring, rx_info,
- DMA_ATTR_SKIP_CPU_SYNC);
+ ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
page_offset + buf_offset, len, buf_len);
@@ -1303,10 +1322,8 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
ENA_RX_REFILL_THRESH_PACKET);
/* Optimization, try to batch new rx buffers */
- if (refill_required > refill_threshold) {
- ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
+ if (refill_required > refill_threshold)
ena_refill_rx_bufs(rx_ring, refill_required);
- }
if (xdp_flags & ENA_XDP_REDIRECT)
xdp_do_flush();
@@ -1320,8 +1337,7 @@ error:
adapter = netdev_priv(rx_ring->netdev);
if (rc == -ENOSPC) {
- ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1,
- &rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, &rx_ring->syncp);
ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS);
} else {
ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
@@ -1811,8 +1827,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
if (!ena_dev->rss.tbl_log_size) {
rc = ena_rss_init_default(adapter);
if (rc && (rc != -EOPNOTSUPP)) {
- netif_err(adapter, ifup, adapter->netdev,
- "Failed to init RSS rc: %d\n", rc);
+ netif_err(adapter, ifup, adapter->netdev, "Failed to init RSS rc: %d\n", rc);
return rc;
}
}
@@ -2134,6 +2149,12 @@ int ena_up(struct ena_adapter *adapter)
*/
ena_init_napi_in_range(adapter, 0, io_queue_count);
+ /* Enabling DIM needs to happen before enabling IRQs since DIM
+ * is run from napi routine
+ */
+ if (ena_com_interrupt_moderation_supported(adapter->ena_dev))
+ ena_com_enable_adaptive_moderation(adapter->ena_dev);
+
rc = ena_request_io_irq(adapter);
if (rc)
goto err_req_irq;
@@ -2184,7 +2205,7 @@ void ena_down(struct ena_adapter *adapter)
{
int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
- netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
+ netif_dbg(adapter, ifdown, adapter->netdev, "%s\n", __func__);
clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
@@ -2197,8 +2218,6 @@ void ena_down(struct ena_adapter *adapter)
/* After this point the napi handler won't enable the tx queue */
ena_napi_disable_in_range(adapter, 0, io_queue_count);
- /* After destroy the queue there won't be any new interrupts */
-
if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
int rc;
@@ -2588,8 +2607,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(rc))
goto error_drop_packet;
- skb_tx_timestamp(skb);
-
next_to_use = tx_ring->next_to_use;
req_id = tx_ring->free_ids[next_to_use];
tx_info = &tx_ring->tx_buffer_info[req_id];
@@ -2653,6 +2670,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
+ skb_tx_timestamp(skb);
+
if (netif_xmit_stopped(txq) || !netdev_xmit_more())
/* trigger the dma engine. ena_ring_tx_doorbell()
* calls a memory barrier inside it.
@@ -2670,22 +2689,6 @@ error_drop_packet:
return NETDEV_TX_OK;
}
-static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev)
-{
- u16 qid;
- /* we suspect that this is good for in--kernel network services that
- * want to loop incoming skb rx to tx in normal user generated traffic,
- * most probably we will not get to this
- */
- if (skb_rx_queue_recorded(skb))
- qid = skb_get_rx_queue(skb);
- else
- qid = netdev_pick_tx(dev, skb, NULL);
-
- return qid;
-}
-
static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
@@ -2764,8 +2767,7 @@ static void ena_config_debug_area(struct ena_adapter *adapter)
rc = ena_com_set_host_attributes(adapter->ena_dev);
if (rc) {
if (rc == -EOPNOTSUPP)
- netif_warn(adapter, drv, adapter->netdev,
- "Cannot set host attributes\n");
+ netif_warn(adapter, drv, adapter->netdev, "Cannot set host attributes\n");
else
netif_err(adapter, drv, adapter->netdev,
"Cannot set host attributes\n");
@@ -2863,18 +2865,16 @@ static const struct net_device_ops ena_netdev_ops = {
.ndo_open = ena_open,
.ndo_stop = ena_close,
.ndo_start_xmit = ena_start_xmit,
- .ndo_select_queue = ena_select_queue,
.ndo_get_stats64 = ena_get_stats64,
.ndo_tx_timeout = ena_tx_timeout,
.ndo_change_mtu = ena_change_mtu,
- .ndo_set_mac_address = NULL,
.ndo_validate_addr = eth_validate_addr,
.ndo_bpf = ena_xdp,
.ndo_xdp_xmit = ena_xdp_xmit,
};
-static void ena_calc_io_queue_size(struct ena_adapter *adapter,
- struct ena_com_dev_get_features_ctx *get_feat_ctx)
+static int ena_calc_io_queue_size(struct ena_adapter *adapter,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq;
struct ena_com_dev *ena_dev = adapter->ena_dev;
@@ -2933,6 +2933,18 @@ static void ena_calc_io_queue_size(struct ena_adapter *adapter,
max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
+ if (max_tx_queue_size < ENA_MIN_RING_SIZE) {
+ netdev_err(adapter->netdev, "Device max TX queue size: %d < minimum: %d\n",
+ max_tx_queue_size, ENA_MIN_RING_SIZE);
+ return -EINVAL;
+ }
+
+ if (max_rx_queue_size < ENA_MIN_RING_SIZE) {
+ netdev_err(adapter->netdev, "Device max RX queue size: %d < minimum: %d\n",
+ max_rx_queue_size, ENA_MIN_RING_SIZE);
+ return -EINVAL;
+ }
+
/* When forcing large headers, we multiply the entry size by 2, and therefore divide
* the queue size by 2, leaving the amount of memory used by the queues unchanged.
*/
@@ -2963,6 +2975,8 @@ static void ena_calc_io_queue_size(struct ena_adapter *adapter,
adapter->max_rx_ring_size = max_rx_queue_size;
adapter->requested_tx_ring_size = tx_queue_size;
adapter->requested_rx_ring_size = rx_queue_size;
+
+ return 0;
}
static int ena_device_validate_params(struct ena_adapter *adapter,
@@ -3070,6 +3084,7 @@ static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
bool *wd_state)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct net_device *netdev = adapter->netdev;
struct ena_llq_configurations llq_config;
struct device *dev = &pdev->dev;
bool readless_supported;
@@ -3159,15 +3174,19 @@ static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
&llq_config);
if (rc) {
- dev_err(dev, "ENA device init failed\n");
+ netdev_err(netdev, "Cannot set queues placement policy rc= %d\n", rc);
goto err_admin_init;
}
- ena_calc_io_queue_size(adapter, get_feat_ctx);
+ rc = ena_calc_io_queue_size(adapter, get_feat_ctx);
+ if (unlikely(rc))
+ goto err_admin_init;
return 0;
err_admin_init:
+ ena_com_abort_admin_commands(ena_dev);
+ ena_com_wait_for_abort_completion(ena_dev);
ena_com_delete_host_info(ena_dev);
ena_com_admin_destroy(ena_dev);
err_mmio_read_less:
@@ -3226,7 +3245,7 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
if (!graceful)
ena_com_set_admin_running_state(ena_dev, false);
- if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+ if (dev_up)
ena_down(adapter);
/* Stop the device from sending AENQ events (in case reset flag is set
@@ -3372,14 +3391,18 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
struct ena_ring *tx_ring)
{
struct ena_napi *ena_napi = container_of(tx_ring->napi, struct ena_napi, napi);
+ enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_MISS_TX_CMPL;
unsigned int time_since_last_napi;
unsigned int missing_tx_comp_to;
bool is_tx_comp_time_expired;
struct ena_tx_buffer *tx_buf;
unsigned long last_jiffies;
+ int napi_scheduled;
u32 missed_tx = 0;
int i, rc = 0;
+ missing_tx_comp_to = jiffies_to_msecs(adapter->missing_tx_completion_to);
+
for (i = 0; i < tx_ring->ring_size; i++) {
tx_buf = &tx_ring->tx_buffer_info[i];
last_jiffies = tx_buf->last_jiffies;
@@ -3406,25 +3429,45 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
adapter->missing_tx_completion_to);
if (unlikely(is_tx_comp_time_expired)) {
- if (!tx_buf->print_once) {
- time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies);
- missing_tx_comp_to = jiffies_to_msecs(adapter->missing_tx_completion_to);
- netif_notice(adapter, tx_err, adapter->netdev,
- "Found a Tx that wasn't completed on time, qid %d, index %d. %u usecs have passed since last napi execution. Missing Tx timeout value %u msecs\n",
- tx_ring->qid, i, time_since_last_napi, missing_tx_comp_to);
+ time_since_last_napi =
+ jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies);
+ napi_scheduled = !!(ena_napi->napi.state & NAPIF_STATE_SCHED);
+
+ if (missing_tx_comp_to < time_since_last_napi && napi_scheduled) {
+ /* We suspect napi isn't called because the
+ * bottom half is not run. Require a bigger
+ * timeout for these cases
+ */
+ if (!time_is_before_jiffies(last_jiffies +
+ 2 * adapter->missing_tx_completion_to))
+ continue;
+
+ reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
}
- tx_buf->print_once = 1;
missed_tx++;
+
+ if (tx_buf->print_once)
+ continue;
+
+ netif_notice(adapter, tx_err, adapter->netdev,
+ "TX hasn't completed, qid %d, index %d. %u usecs from last napi execution, napi scheduled: %d\n",
+ tx_ring->qid, i, time_since_last_napi, napi_scheduled);
+
+ tx_buf->print_once = 1;
}
}
if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
netif_err(adapter, tx_err, adapter->netdev,
- "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
+ "Lost TX completions are above the threshold (%d > %d). Completion transmission timeout: %u.\n",
missed_tx,
- adapter->missing_tx_completion_threshold);
- ena_reset_device(adapter, ENA_REGS_RESET_MISS_TX_CMPL);
+ adapter->missing_tx_completion_threshold,
+ missing_tx_comp_to);
+ netif_err(adapter, tx_err, adapter->netdev,
+ "Resetting the device\n");
+
+ ena_reset_device(adapter, reset_reason);
rc = -EIO;
}
@@ -3762,8 +3805,8 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
}
}
- rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL,
- ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
+ rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL, ENA_HASH_KEY_SIZE,
+ 0xFFFFFFFF);
if (unlikely(rc && (rc != -EOPNOTSUPP))) {
dev_err(dev, "Cannot fill hash function\n");
goto err_fill_indir;
@@ -4040,8 +4083,8 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
free_irq_cpu_rmap(netdev->rx_cpu_rmap);
netdev->rx_cpu_rmap = NULL;
}
-#endif /* CONFIG_RFS_ACCEL */
+#endif /* CONFIG_RFS_ACCEL */
/* Make sure timer and reset routine won't be called after
* freeing device resources.
*/
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index 1e007a41a525..2c3d6a77ea79 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -21,6 +21,7 @@ enum ena_regs_reset_reason_types {
ENA_REGS_RESET_USER_TRIGGER = 12,
ENA_REGS_RESET_GENERIC = 13,
ENA_REGS_RESET_MISS_INTERRUPT = 14,
+ ENA_REGS_RESET_SUSPECTED_POLL_STARVATION = 15,
};
/* ena_registers offsets */
diff --git a/drivers/net/ethernet/amazon/ena/ena_xdp.c b/drivers/net/ethernet/amazon/ena/ena_xdp.c
index fc1c4ef73ba3..337c435d3ce9 100644
--- a/drivers/net/ethernet/amazon/ena/ena_xdp.c
+++ b/drivers/net/ethernet/amazon/ena/ena_xdp.c
@@ -412,7 +412,6 @@ static int ena_clean_xdp_irq(struct ena_ring *tx_ring, u32 budget)
tx_ring->next_to_clean = next_to_clean;
ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
- ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
"tx_poll: q %d done. total pkts: %d\n",
diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c
index ea773cfa0af6..c83a0a80d533 100644
--- a/drivers/net/ethernet/amd/pds_core/adminq.c
+++ b/drivers/net/ethernet/amd/pds_core/adminq.c
@@ -82,7 +82,6 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq)
unsigned long irqflags;
int nq_work = 0;
int aq_work = 0;
- int credits;
/* Don't process AdminQ when it's not up */
if (!pdsc_adminq_inc_if_up(pdsc)) {
@@ -128,11 +127,9 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq)
credits:
/* Return the interrupt credits, one for each completion */
- credits = nq_work + aq_work;
- if (credits)
- pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx],
- credits,
- PDS_CORE_INTR_CRED_REARM);
+ pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx],
+ nq_work + aq_work,
+ PDS_CORE_INTR_CRED_REARM);
refcount_dec(&pdsc->adminq_refcnt);
}
@@ -157,7 +154,6 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data)
qcq = &pdsc->adminqcq;
queue_work(pdsc->wq, &qcq->work);
- pds_core_intr_mask(&pdsc->intr_ctrl[qcq->intx], PDS_CORE_INTR_MASK_CLEAR);
refcount_dec(&pdsc->adminq_refcnt);
return IRQ_HANDLED;
diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c
index fd1a5149c003..2babea110991 100644
--- a/drivers/net/ethernet/amd/pds_core/auxbus.c
+++ b/drivers/net/ethernet/amd/pds_core/auxbus.c
@@ -180,6 +180,9 @@ int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
struct pds_auxiliary_dev *padev;
int err = 0;
+ if (!cf)
+ return -ENODEV;
+
mutex_lock(&pf->config_lock);
padev = pf->vfs[cf->vf_id].padev;
@@ -198,14 +201,27 @@ int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
{
struct pds_auxiliary_dev *padev;
- enum pds_core_vif_types vt;
char devname[PDS_DEVNAME_LEN];
+ enum pds_core_vif_types vt;
+ unsigned long mask;
u16 vt_support;
int client_id;
int err = 0;
+ if (!cf)
+ return -ENODEV;
+
mutex_lock(&pf->config_lock);
+ mask = BIT_ULL(PDSC_S_FW_DEAD) |
+ BIT_ULL(PDSC_S_STOPPING_DRIVER);
+ if (cf->state & mask) {
+ dev_err(pf->dev, "%s: can't add dev, VF client in bad state %#lx\n",
+ __func__, cf->state);
+ err = -ENXIO;
+ goto out_unlock;
+ }
+
/* We only support vDPA so far, so it is the only one to
* be verified that it is available in the Core device and
* enabled in the devlink param. In the future this might
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index 7658a7286767..9662ee72814c 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -129,6 +129,7 @@ static int pdsc_qcq_intr_alloc(struct pdsc *pdsc, struct pdsc_qcq *qcq)
if (index < 0)
return index;
qcq->intx = index;
+ qcq->cq.bound_intr = &pdsc->intr_info[index];
return 0;
}
@@ -222,7 +223,6 @@ int pdsc_qcq_alloc(struct pdsc *pdsc, unsigned int type, unsigned int index,
goto err_out_free_irq;
}
- qcq->cq.bound_intr = &pdsc->intr_info[qcq->intx];
qcq->cq.num_descs = num_descs;
qcq->cq.desc_size = cq_desc_size;
qcq->cq.tail_idx = 0;
@@ -300,6 +300,17 @@ err_out:
return err;
}
+static void pdsc_core_uninit(struct pdsc *pdsc)
+{
+ pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
+ pdsc_qcq_free(pdsc, &pdsc->adminqcq);
+
+ if (pdsc->kern_dbpage) {
+ iounmap(pdsc->kern_dbpage);
+ pdsc->kern_dbpage = NULL;
+ }
+}
+
static int pdsc_core_init(struct pdsc *pdsc)
{
union pds_core_dev_comp comp = {};
@@ -310,9 +321,32 @@ static int pdsc_core_init(struct pdsc *pdsc)
struct pds_core_dev_init_data_in cidi;
u32 dbid_count;
u32 dbpage_num;
+ int numdescs;
size_t sz;
int err;
+ /* Scale the descriptor ring length based on number of CPUs and VFs */
+ numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus());
+ numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev);
+ numdescs = roundup_pow_of_two(numdescs);
+ err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq",
+ PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR,
+ numdescs,
+ sizeof(union pds_core_adminq_cmd),
+ sizeof(union pds_core_adminq_comp),
+ 0, &pdsc->adminqcq);
+ if (err)
+ return err;
+
+ err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_NOTIFYQ, 0, "notifyq",
+ PDS_CORE_QCQ_F_NOTIFYQ,
+ PDSC_NOTIFYQ_LENGTH,
+ sizeof(struct pds_core_notifyq_cmd),
+ sizeof(union pds_core_notifyq_comp),
+ 0, &pdsc->notifyqcq);
+ if (err)
+ goto err_out_uninit;
+
cidi.adminq_q_base = cpu_to_le64(pdsc->adminqcq.q_base_pa);
cidi.adminq_cq_base = cpu_to_le64(pdsc->adminqcq.cq_base_pa);
cidi.notifyq_cq_base = cpu_to_le64(pdsc->notifyqcq.cq.base_pa);
@@ -336,7 +370,7 @@ static int pdsc_core_init(struct pdsc *pdsc)
if (err) {
dev_err(pdsc->dev, "Device init command failed: %pe\n",
ERR_PTR(err));
- return err;
+ goto err_out_uninit;
}
pdsc->hw_index = le32_to_cpu(cido.core_hw_index);
@@ -346,7 +380,8 @@ static int pdsc_core_init(struct pdsc *pdsc)
pdsc->kern_dbpage = pdsc_map_dbpage(pdsc, dbpage_num);
if (!pdsc->kern_dbpage) {
dev_err(pdsc->dev, "Cannot map dbpage, aborting\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_out_uninit;
}
pdsc->adminqcq.q.hw_type = cido.adminq_hw_type;
@@ -359,6 +394,10 @@ static int pdsc_core_init(struct pdsc *pdsc)
pdsc->last_eid = 0;
+ return 0;
+
+err_out_uninit:
+ pdsc_core_uninit(pdsc);
return err;
}
@@ -401,38 +440,12 @@ static int pdsc_viftypes_init(struct pdsc *pdsc)
int pdsc_setup(struct pdsc *pdsc, bool init)
{
- int numdescs;
int err;
err = pdsc_dev_init(pdsc);
if (err)
return err;
- /* Scale the descriptor ring length based on number of CPUs and VFs */
- numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus());
- numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev);
- numdescs = roundup_pow_of_two(numdescs);
- err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq",
- PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR,
- numdescs,
- sizeof(union pds_core_adminq_cmd),
- sizeof(union pds_core_adminq_comp),
- 0, &pdsc->adminqcq);
- if (err)
- goto err_out_teardown;
-
- err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_NOTIFYQ, 0, "notifyq",
- PDS_CORE_QCQ_F_NOTIFYQ,
- PDSC_NOTIFYQ_LENGTH,
- sizeof(struct pds_core_notifyq_cmd),
- sizeof(union pds_core_notifyq_comp),
- 0, &pdsc->notifyqcq);
- if (err)
- goto err_out_teardown;
-
- /* NotifyQ rides on the AdminQ interrupt */
- pdsc->notifyqcq.intx = pdsc->adminqcq.intx;
-
/* Set up the Core with the AdminQ and NotifyQ info */
err = pdsc_core_init(pdsc);
if (err)
@@ -458,35 +471,20 @@ err_out_teardown:
void pdsc_teardown(struct pdsc *pdsc, bool removing)
{
- int i;
-
if (!pdsc->pdev->is_virtfn)
pdsc_devcmd_reset(pdsc);
if (pdsc->adminqcq.work.func)
cancel_work_sync(&pdsc->adminqcq.work);
- pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
- pdsc_qcq_free(pdsc, &pdsc->adminqcq);
+
+ pdsc_core_uninit(pdsc);
if (removing) {
kfree(pdsc->viftype_status);
pdsc->viftype_status = NULL;
}
- if (pdsc->intr_info) {
- for (i = 0; i < pdsc->nintrs; i++)
- pdsc_intr_free(pdsc, i);
-
- kfree(pdsc->intr_info);
- pdsc->intr_info = NULL;
- pdsc->nintrs = 0;
- }
-
- if (pdsc->kern_dbpage) {
- iounmap(pdsc->kern_dbpage);
- pdsc->kern_dbpage = NULL;
- }
+ pdsc_dev_uninit(pdsc);
- pci_free_irq_vectors(pdsc->pdev);
set_bit(PDSC_S_FW_DEAD, &pdsc->state);
}
@@ -609,8 +607,7 @@ static void pdsc_check_pci_health(struct pdsc *pdsc)
if (fw_status != PDS_RC_BAD_PCI)
return;
- pdsc_reset_prepare(pdsc->pdev);
- pdsc_reset_done(pdsc->pdev);
+ pci_reset_function(pdsc->pdev);
}
void pdsc_health_thread(struct work_struct *work)
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index 110c4b826b22..92d7657dd614 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -282,9 +282,7 @@ int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
int pdsc_devcmd_init(struct pdsc *pdsc);
int pdsc_devcmd_reset(struct pdsc *pdsc);
int pdsc_dev_init(struct pdsc *pdsc);
-
-void pdsc_reset_prepare(struct pci_dev *pdev);
-void pdsc_reset_done(struct pci_dev *pdev);
+void pdsc_dev_uninit(struct pdsc *pdsc);
int pdsc_intr_alloc(struct pdsc *pdsc, char *name,
irq_handler_t handler, void *data);
diff --git a/drivers/net/ethernet/amd/pds_core/debugfs.c b/drivers/net/ethernet/amd/pds_core/debugfs.c
index 4e8579ca1c8c..6bdd02b7aa6d 100644
--- a/drivers/net/ethernet/amd/pds_core/debugfs.c
+++ b/drivers/net/ethernet/amd/pds_core/debugfs.c
@@ -32,8 +32,8 @@ void pdsc_debugfs_del_dev(struct pdsc *pdsc)
static int identity_show(struct seq_file *seq, void *v)
{
- struct pdsc *pdsc = seq->private;
struct pds_core_dev_identity *ident;
+ struct pdsc *pdsc = seq->private;
int vt;
ident = &pdsc->dev_ident;
@@ -106,10 +106,8 @@ static const struct debugfs_reg32 intr_ctrl_regs[] = {
void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
{
- struct dentry *qcq_dentry, *q_dentry, *cq_dentry;
- struct dentry *intr_dentry;
+ struct dentry *qcq_dentry, *q_dentry, *cq_dentry, *intr_dentry;
struct debugfs_regset32 *intr_ctrl_regset;
- struct pdsc_intr_info *intr = &pdsc->intr_info[qcq->intx];
struct pdsc_queue *q = &qcq->q;
struct pdsc_cq *cq = &qcq->cq;
@@ -147,6 +145,8 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
debugfs_create_u16("tail", 0400, cq_dentry, &cq->tail_idx);
if (qcq->flags & PDS_CORE_QCQ_F_INTR) {
+ struct pdsc_intr_info *intr = &pdsc->intr_info[qcq->intx];
+
intr_dentry = debugfs_create_dir("intr", qcq->dentry);
if (IS_ERR_OR_NULL(intr_dentry))
return;
diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
index e65a1632df50..e494e1298dc9 100644
--- a/drivers/net/ethernet/amd/pds_core/dev.c
+++ b/drivers/net/ethernet/amd/pds_core/dev.c
@@ -316,6 +316,22 @@ static int pdsc_identify(struct pdsc *pdsc)
return 0;
}
+void pdsc_dev_uninit(struct pdsc *pdsc)
+{
+ if (pdsc->intr_info) {
+ int i;
+
+ for (i = 0; i < pdsc->nintrs; i++)
+ pdsc_intr_free(pdsc, i);
+
+ kfree(pdsc->intr_info);
+ pdsc->intr_info = NULL;
+ pdsc->nintrs = 0;
+ }
+
+ pci_free_irq_vectors(pdsc->pdev);
+}
+
int pdsc_dev_init(struct pdsc *pdsc)
{
unsigned int nintrs;
@@ -341,10 +357,8 @@ int pdsc_dev_init(struct pdsc *pdsc)
/* Get intr_info struct array for tracking */
pdsc->intr_info = kcalloc(nintrs, sizeof(*pdsc->intr_info), GFP_KERNEL);
- if (!pdsc->intr_info) {
- err = -ENOMEM;
- goto err_out;
- }
+ if (!pdsc->intr_info)
+ return -ENOMEM;
err = pci_alloc_irq_vectors(pdsc->pdev, nintrs, nintrs, PCI_IRQ_MSIX);
if (err != nintrs) {
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
index 0050c5894563..ab6133e7db42 100644
--- a/drivers/net/ethernet/amd/pds_core/main.c
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -45,6 +45,7 @@ static void pdsc_unmap_bars(struct pdsc *pdsc)
for (i = 0; i < PDS_CORE_BARS_MAX; i++) {
if (bars[i].vaddr)
pci_iounmap(pdsc->pdev, bars[i].vaddr);
+ bars[i].vaddr = NULL;
}
}
@@ -468,19 +469,28 @@ static void pdsc_restart_health_thread(struct pdsc *pdsc)
mod_timer(&pdsc->wdtimer, jiffies + 1);
}
-void pdsc_reset_prepare(struct pci_dev *pdev)
+static void pdsc_reset_prepare(struct pci_dev *pdev)
{
struct pdsc *pdsc = pci_get_drvdata(pdev);
pdsc_stop_health_thread(pdsc);
pdsc_fw_down(pdsc);
+ if (pdev->is_virtfn) {
+ struct pdsc *pf;
+
+ pf = pdsc_get_pf_struct(pdsc->pdev);
+ if (!IS_ERR(pf))
+ pdsc_auxbus_dev_del(pdsc, pf);
+ }
+
pdsc_unmap_bars(pdsc);
pci_release_regions(pdev);
- pci_disable_device(pdev);
+ if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
}
-void pdsc_reset_done(struct pci_dev *pdev)
+static void pdsc_reset_done(struct pci_dev *pdev)
{
struct pdsc *pdsc = pci_get_drvdata(pdev);
struct device *dev = pdsc->dev;
@@ -510,12 +520,43 @@ void pdsc_reset_done(struct pci_dev *pdev)
pdsc_fw_up(pdsc);
pdsc_restart_health_thread(pdsc);
+
+ if (pdev->is_virtfn) {
+ struct pdsc *pf;
+
+ pf = pdsc_get_pf_struct(pdsc->pdev);
+ if (!IS_ERR(pf))
+ pdsc_auxbus_dev_add(pdsc, pf);
+ }
+}
+
+static pci_ers_result_t pdsc_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t error)
+{
+ if (error == pci_channel_io_frozen) {
+ pdsc_reset_prepare(pdev);
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+
+ return PCI_ERS_RESULT_NONE;
+}
+
+static void pdsc_pci_error_resume(struct pci_dev *pdev)
+{
+ struct pdsc *pdsc = pci_get_drvdata(pdev);
+
+ if (test_bit(PDSC_S_FW_DEAD, &pdsc->state))
+ pci_reset_function_locked(pdev);
}
static const struct pci_error_handlers pdsc_err_handler = {
/* FLR handling */
.reset_prepare = pdsc_reset_prepare,
.reset_done = pdsc_reset_done,
+
+ /* AER handling */
+ .error_detected = pdsc_pci_error_detected,
+ .resume = pdsc_pci_error_resume,
};
static struct pci_driver pdsc_driver = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 18a6c8d99fa0..a2606ee3b0a5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -15,6 +15,7 @@
#include "aq_macsec.h"
#include "aq_main.h"
+#include <linux/linkmode.h>
#include <linux/ptp_clock_kernel.h>
static void aq_ethtool_get_regs(struct net_device *ndev,
@@ -681,23 +682,19 @@ static int aq_ethtool_get_ts_info(struct net_device *ndev,
return 0;
}
-static u32 eee_mask_to_ethtool_mask(u32 speed)
+static void eee_mask_to_ethtool_mask(unsigned long *mode, u32 speed)
{
- u32 rate = 0;
-
if (speed & AQ_NIC_RATE_EEE_10G)
- rate |= SUPPORTED_10000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode);
if (speed & AQ_NIC_RATE_EEE_1G)
- rate |= SUPPORTED_1000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode);
if (speed & AQ_NIC_RATE_EEE_100M)
- rate |= SUPPORTED_100baseT_Full;
-
- return rate;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode);
}
-static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee)
+static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_keee *eee)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
u32 rate, supported_rates;
@@ -713,14 +710,14 @@ static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee)
if (err < 0)
return err;
- eee->supported = eee_mask_to_ethtool_mask(supported_rates);
+ eee_mask_to_ethtool_mask(eee->supported, supported_rates);
if (aq_nic->aq_nic_cfg.eee_speeds)
- eee->advertised = eee->supported;
+ linkmode_copy(eee->advertised, eee->supported);
- eee->lp_advertised = eee_mask_to_ethtool_mask(rate);
+ eee_mask_to_ethtool_mask(eee->lp_advertised, rate);
- eee->eee_enabled = !!eee->advertised;
+ eee->eee_enabled = !linkmode_empty(eee->advertised);
eee->tx_lpi_enabled = eee->eee_enabled;
if ((supported_rates & rate) & AQ_NIC_RATE_EEE_MSK)
@@ -729,7 +726,7 @@ static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee)
return 0;
}
-static int aq_ethtool_set_eee(struct net_device *ndev, struct ethtool_eee *eee)
+static int aq_ethtool_set_eee(struct net_device *ndev, struct ethtool_keee *eee)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
u32 rate, supported_rates;
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index 80245c65cc90..a806dadc4196 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -31,6 +31,20 @@ static void _intr2_mask_set(struct bcmasp_priv *priv, u32 mask)
priv->irq_mask |= mask;
}
+void bcmasp_enable_phy_irq(struct bcmasp_intf *intf, int en)
+{
+ struct bcmasp_priv *priv = intf->parent;
+
+ /* Only supported with internal phys */
+ if (!intf->internal_phy)
+ return;
+
+ if (en)
+ _intr2_mask_clear(priv, ASP_INTR2_PHY_EVENT(intf->channel));
+ else
+ _intr2_mask_set(priv, ASP_INTR2_PHY_EVENT(intf->channel));
+}
+
void bcmasp_enable_tx_irq(struct bcmasp_intf *intf, int en)
{
struct bcmasp_priv *priv = intf->parent;
@@ -79,6 +93,9 @@ static void bcmasp_intr2_handling(struct bcmasp_intf *intf, u32 status)
__napi_schedule_irqoff(&intf->tx_napi);
}
}
+
+ if (status & ASP_INTR2_PHY_EVENT(intf->channel))
+ phy_mac_interrupt(intf->ndev->phydev);
}
static irqreturn_t bcmasp_isr(int irq, void *data)
@@ -972,7 +989,26 @@ static void bcmasp_core_init(struct bcmasp_priv *priv)
ASP_INTR2_CLEAR);
}
-static void bcmasp_core_clock_select(struct bcmasp_priv *priv, bool slow)
+static void bcmasp_core_clock_select_many(struct bcmasp_priv *priv, bool slow)
+{
+ u32 reg;
+
+ reg = ctrl2_core_rl(priv, ASP_CTRL2_CORE_CLOCK_SELECT);
+ if (slow)
+ reg &= ~ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
+ else
+ reg |= ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
+ ctrl2_core_wl(priv, reg, ASP_CTRL2_CORE_CLOCK_SELECT);
+
+ reg = ctrl2_core_rl(priv, ASP_CTRL2_CPU_CLOCK_SELECT);
+ if (slow)
+ reg &= ~ASP_CTRL2_CPU_CLOCK_SELECT_MAIN;
+ else
+ reg |= ASP_CTRL2_CPU_CLOCK_SELECT_MAIN;
+ ctrl2_core_wl(priv, reg, ASP_CTRL2_CPU_CLOCK_SELECT);
+}
+
+static void bcmasp_core_clock_select_one(struct bcmasp_priv *priv, bool slow)
{
u32 reg;
@@ -1166,6 +1202,24 @@ static void bcmasp_wol_irq_destroy_per_intf(struct bcmasp_priv *priv)
}
}
+static void bcmasp_eee_fixup(struct bcmasp_intf *intf, bool en)
+{
+ u32 reg, phy_lpi_overwrite;
+
+ reg = rx_edpkt_core_rl(intf->parent, ASP_EDPKT_SPARE_REG);
+ phy_lpi_overwrite = intf->internal_phy ? ASP_EDPKT_SPARE_REG_EPHY_LPI :
+ ASP_EDPKT_SPARE_REG_GPHY_LPI;
+
+ if (en)
+ reg |= phy_lpi_overwrite;
+ else
+ reg &= ~phy_lpi_overwrite;
+
+ rx_edpkt_core_wl(intf->parent, reg, ASP_EDPKT_SPARE_REG);
+
+ usleep_range(50, 100);
+}
+
static struct bcmasp_hw_info v20_hw_info = {
.rx_ctrl_flush = ASP_RX_CTRL_FLUSH,
.umac2fb = UMAC2FB_OFFSET,
@@ -1178,6 +1232,7 @@ static const struct bcmasp_plat_data v20_plat_data = {
.init_wol = bcmasp_init_wol_per_intf,
.enable_wol = bcmasp_enable_wol_per_intf,
.destroy_wol = bcmasp_wol_irq_destroy_per_intf,
+ .core_clock_select = bcmasp_core_clock_select_one,
.hw_info = &v20_hw_info,
};
@@ -1194,17 +1249,39 @@ static const struct bcmasp_plat_data v21_plat_data = {
.init_wol = bcmasp_init_wol_shared,
.enable_wol = bcmasp_enable_wol_shared,
.destroy_wol = bcmasp_wol_irq_destroy_shared,
+ .core_clock_select = bcmasp_core_clock_select_one,
+ .hw_info = &v21_hw_info,
+};
+
+static const struct bcmasp_plat_data v22_plat_data = {
+ .init_wol = bcmasp_init_wol_shared,
+ .enable_wol = bcmasp_enable_wol_shared,
+ .destroy_wol = bcmasp_wol_irq_destroy_shared,
+ .core_clock_select = bcmasp_core_clock_select_many,
.hw_info = &v21_hw_info,
+ .eee_fixup = bcmasp_eee_fixup,
};
+static void bcmasp_set_pdata(struct bcmasp_priv *priv, const struct bcmasp_plat_data *pdata)
+{
+ priv->init_wol = pdata->init_wol;
+ priv->enable_wol = pdata->enable_wol;
+ priv->destroy_wol = pdata->destroy_wol;
+ priv->core_clock_select = pdata->core_clock_select;
+ priv->eee_fixup = pdata->eee_fixup;
+ priv->hw_info = pdata->hw_info;
+}
+
static const struct of_device_id bcmasp_of_match[] = {
{ .compatible = "brcm,asp-v2.0", .data = &v20_plat_data },
{ .compatible = "brcm,asp-v2.1", .data = &v21_plat_data },
+ { .compatible = "brcm,asp-v2.2", .data = &v22_plat_data },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, bcmasp_of_match);
static const struct of_device_id bcmasp_mdio_of_match[] = {
+ { .compatible = "brcm,asp-v2.2-mdio", },
{ .compatible = "brcm,asp-v2.1-mdio", },
{ .compatible = "brcm,asp-v2.0-mdio", },
{ /* sentinel */ },
@@ -1265,16 +1342,13 @@ static int bcmasp_probe(struct platform_device *pdev)
if (!pdata)
return dev_err_probe(dev, -EINVAL, "unable to find platform data\n");
- priv->init_wol = pdata->init_wol;
- priv->enable_wol = pdata->enable_wol;
- priv->destroy_wol = pdata->destroy_wol;
- priv->hw_info = pdata->hw_info;
+ bcmasp_set_pdata(priv, pdata);
/* Enable all clocks to ensure successful probing */
bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0);
/* Switch to the main clock */
- bcmasp_core_clock_select(priv, false);
+ priv->core_clock_select(priv, false);
bcmasp_intr2_mask_set_all(priv);
bcmasp_intr2_clear_all(priv);
@@ -1381,7 +1455,7 @@ static int __maybe_unused bcmasp_suspend(struct device *d)
*/
bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE);
- bcmasp_core_clock_select(priv, true);
+ priv->core_clock_select(priv, true);
clk_disable_unprepare(priv->clk);
@@ -1399,7 +1473,7 @@ static int __maybe_unused bcmasp_resume(struct device *d)
return ret;
/* Switch to the main clock domain */
- bcmasp_core_clock_select(priv, false);
+ priv->core_clock_select(priv, false);
/* Re-enable all clocks for re-initialization */
bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0);
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
index ec90add6b03e..f93cb3da44b0 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.h
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
@@ -19,6 +19,8 @@
#define ASP_INTR2_TX_DESC(intr) BIT((intr) + 14)
#define ASP_INTR2_UMC0_WAKE BIT(22)
#define ASP_INTR2_UMC1_WAKE BIT(28)
+#define ASP_INTR2_PHY_EVENT(intr) ((intr) ? BIT(30) | BIT(31) : \
+ BIT(24) | BIT(25))
#define ASP_WAKEUP_INTR2_OFFSET 0x1200
#define ASP_WAKEUP_INTR2_STATUS 0x0
@@ -33,6 +35,12 @@
#define ASP_WAKEUP_INTR2_FILT_1 BIT(3)
#define ASP_WAKEUP_INTR2_FW BIT(4)
+#define ASP_CTRL2_OFFSET 0x2000
+#define ASP_CTRL2_CORE_CLOCK_SELECT 0x0
+#define ASP_CTRL2_CORE_CLOCK_SELECT_MAIN BIT(0)
+#define ASP_CTRL2_CPU_CLOCK_SELECT 0x4
+#define ASP_CTRL2_CPU_CLOCK_SELECT_MAIN BIT(0)
+
#define ASP_TX_ANALYTICS_OFFSET 0x4c000
#define ASP_TX_ANALYTICS_CTRL 0x0
@@ -134,8 +142,11 @@ enum asp_rx_net_filter_block {
#define ASP_EDPKT_RX_PKT_CNT 0x138
#define ASP_EDPKT_HDR_EXTR_CNT 0x13c
#define ASP_EDPKT_HDR_OUT_CNT 0x140
+#define ASP_EDPKT_SPARE_REG 0x174
+#define ASP_EDPKT_SPARE_REG_EPHY_LPI BIT(4)
+#define ASP_EDPKT_SPARE_REG_GPHY_LPI BIT(3)
-#define ASP_CTRL 0x101000
+#define ASP_CTRL_OFFSET 0x101000
#define ASP_CTRL_ASP_SW_INIT 0x04
#define ASP_CTRL_ASP_SW_INIT_ACPUSS_CORE BIT(0)
#define ASP_CTRL_ASP_SW_INIT_ASP_TX BIT(1)
@@ -306,6 +317,7 @@ struct bcmasp_intf {
struct bcmasp_desc *rx_edpkt_cpu;
dma_addr_t rx_edpkt_dma_addr;
dma_addr_t rx_edpkt_dma_read;
+ dma_addr_t rx_edpkt_dma_valid;
/* RX buffer prefetcher ring*/
void *rx_ring_cpu;
@@ -337,7 +349,7 @@ struct bcmasp_intf {
int wol_irq;
unsigned int wol_irq_enabled:1;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
};
#define NUM_NET_FILTERS 32
@@ -372,6 +384,8 @@ struct bcmasp_plat_data {
void (*init_wol)(struct bcmasp_priv *priv);
void (*enable_wol)(struct bcmasp_intf *intf, bool en);
void (*destroy_wol)(struct bcmasp_priv *priv);
+ void (*core_clock_select)(struct bcmasp_priv *priv, bool slow);
+ void (*eee_fixup)(struct bcmasp_intf *priv, bool en);
struct bcmasp_hw_info *hw_info;
};
@@ -390,6 +404,8 @@ struct bcmasp_priv {
void (*init_wol)(struct bcmasp_priv *priv);
void (*enable_wol)(struct bcmasp_intf *intf, bool en);
void (*destroy_wol)(struct bcmasp_priv *priv);
+ void (*core_clock_select)(struct bcmasp_priv *priv, bool slow);
+ void (*eee_fixup)(struct bcmasp_intf *intf, bool en);
void __iomem *base;
struct bcmasp_hw_info *hw_info;
@@ -530,7 +546,8 @@ BCMASP_CORE_IO_MACRO(rx_analytics, ASP_RX_ANALYTICS_OFFSET);
BCMASP_CORE_IO_MACRO(rx_ctrl, ASP_RX_CTRL_OFFSET);
BCMASP_CORE_IO_MACRO(rx_filter, ASP_RX_FILTER_OFFSET);
BCMASP_CORE_IO_MACRO(rx_edpkt, ASP_EDPKT_OFFSET);
-BCMASP_CORE_IO_MACRO(ctrl, ASP_CTRL);
+BCMASP_CORE_IO_MACRO(ctrl, ASP_CTRL_OFFSET);
+BCMASP_CORE_IO_MACRO(ctrl2, ASP_CTRL2_OFFSET);
struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
struct device_node *ndev_dn, int i);
@@ -541,6 +558,8 @@ void bcmasp_enable_tx_irq(struct bcmasp_intf *intf, int en);
void bcmasp_enable_rx_irq(struct bcmasp_intf *intf, int en);
+void bcmasp_enable_phy_irq(struct bcmasp_intf *intf, int en);
+
void bcmasp_flush_rx_port(struct bcmasp_intf *intf);
extern const struct ethtool_ops bcmasp_ethtool_ops;
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
index ce6a3d56fb23..484fc2b5626f 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
@@ -360,29 +360,26 @@ void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable)
umac_wl(intf, reg, UMC_EEE_CTRL);
intf->eee.eee_enabled = enable;
- intf->eee.eee_active = enable;
}
-static int bcmasp_get_eee(struct net_device *dev, struct ethtool_eee *e)
+static int bcmasp_get_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct bcmasp_intf *intf = netdev_priv(dev);
- struct ethtool_eee *p = &intf->eee;
+ struct ethtool_keee *p = &intf->eee;
if (!dev->phydev)
return -ENODEV;
- e->eee_enabled = p->eee_enabled;
- e->eee_active = p->eee_active;
e->tx_lpi_enabled = p->tx_lpi_enabled;
e->tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER);
return phy_ethtool_get_eee(dev->phydev, e);
}
-static int bcmasp_set_eee(struct net_device *dev, struct ethtool_eee *e)
+static int bcmasp_set_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct bcmasp_intf *intf = netdev_priv(dev);
- struct ethtool_eee *p = &intf->eee;
+ struct ethtool_keee *p = &intf->eee;
int ret;
if (!dev->phydev)
@@ -399,7 +396,6 @@ static int bcmasp_set_eee(struct net_device *dev, struct ethtool_eee *e)
}
umac_wl(intf, e->tx_lpi_timer, UMC_EEE_LPI_TIMER);
- intf->eee.eee_active = ret >= 0;
intf->eee.tx_lpi_enabled = e->tx_lpi_enabled;
bcmasp_eee_enable_set(intf, true);
}
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index 6ad1366270f7..dd06b68b33ed 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -382,6 +382,7 @@ static void bcmasp_netif_start(struct net_device *dev)
bcmasp_enable_rx_irq(intf, 1);
bcmasp_enable_tx_irq(intf, 1);
+ bcmasp_enable_phy_irq(intf, 1);
phy_start(dev->phydev);
}
@@ -607,6 +608,7 @@ static void bcmasp_adj_link(struct net_device *dev)
struct phy_device *phydev = dev->phydev;
u32 cmd_bits = 0, reg;
int changed = 0;
+ bool active;
if (intf->old_link != phydev->link) {
changed = 1;
@@ -658,8 +660,8 @@ static void bcmasp_adj_link(struct net_device *dev)
reg |= cmd_bits;
umac_wl(intf, reg, UMC_CMD);
- intf->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
- bcmasp_eee_enable_set(intf, intf->eee.eee_active);
+ active = phy_init_eee(phydev, 0) >= 0;
+ bcmasp_eee_enable_set(intf, active);
}
reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
@@ -673,40 +675,78 @@ static void bcmasp_adj_link(struct net_device *dev)
phy_print_status(phydev);
}
-static int bcmasp_init_rx(struct bcmasp_intf *intf)
+static int bcmasp_alloc_buffers(struct bcmasp_intf *intf)
{
struct device *kdev = &intf->parent->pdev->dev;
struct page *buffer_pg;
- dma_addr_t dma;
- void *p;
- u32 reg;
- int ret;
+ /* Alloc RX */
intf->rx_buf_order = get_order(RING_BUFFER_SIZE);
buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order);
if (!buffer_pg)
return -ENOMEM;
- dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(kdev, dma)) {
- __free_pages(buffer_pg, intf->rx_buf_order);
- return -ENOMEM;
- }
intf->rx_ring_cpu = page_to_virt(buffer_pg);
- intf->rx_ring_dma = dma;
- intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1;
+ intf->rx_ring_dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(kdev, intf->rx_ring_dma))
+ goto free_rx_buffer;
+
+ intf->rx_edpkt_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
+ &intf->rx_edpkt_dma_addr, GFP_KERNEL);
+ if (!intf->rx_edpkt_cpu)
+ goto free_rx_buffer_dma;
+
+ /* Alloc TX */
+ intf->tx_spb_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
+ &intf->tx_spb_dma_addr, GFP_KERNEL);
+ if (!intf->tx_spb_cpu)
+ goto free_rx_edpkt_dma;
- p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->rx_edpkt_dma_addr,
+ intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb),
GFP_KERNEL);
- if (!p) {
- ret = -ENOMEM;
- goto free_rx_ring;
- }
- intf->rx_edpkt_cpu = p;
+ if (!intf->tx_cbs)
+ goto free_tx_spb_dma;
- netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
+ return 0;
+
+free_tx_spb_dma:
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
+ intf->tx_spb_dma_addr);
+free_rx_edpkt_dma:
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
+ intf->rx_edpkt_dma_addr);
+free_rx_buffer_dma:
+ dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+free_rx_buffer:
+ __free_pages(buffer_pg, intf->rx_buf_order);
+
+ return -ENOMEM;
+}
+
+static void bcmasp_reclaim_free_buffers(struct bcmasp_intf *intf)
+{
+ struct device *kdev = &intf->parent->pdev->dev;
+
+ /* RX buffers */
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
+ intf->rx_edpkt_dma_addr);
+ dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
+ /* TX buffers */
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
+ intf->tx_spb_dma_addr);
+ kfree(intf->tx_cbs);
+}
+
+static void bcmasp_init_rx(struct bcmasp_intf *intf)
+{
+ /* Restart from index 0 */
+ intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1;
+ intf->rx_edpkt_dma_valid = intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1);
intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr;
intf->rx_edpkt_index = 0;
@@ -732,64 +772,23 @@ static int bcmasp_init_rx(struct bcmasp_intf *intf)
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE);
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ);
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE);
- rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1),
- RX_EDPKT_DMA_END);
- rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1),
- RX_EDPKT_DMA_VALID);
-
- reg = UMAC2FB_CFG_DEFAULT_EN |
- ((intf->channel + 11) << UMAC2FB_CFG_CHID_SHIFT);
- reg |= (0xd << UMAC2FB_CFG_OK_SEND_SHIFT);
- umac2fb_wl(intf, reg, UMAC2FB_CFG);
-
- return 0;
-
-free_rx_ring:
- dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
- DMA_FROM_DEVICE);
- __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_END);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_VALID);
- return ret;
+ umac2fb_wl(intf, UMAC2FB_CFG_DEFAULT_EN | ((intf->channel + 11) <<
+ UMAC2FB_CFG_CHID_SHIFT) | (0xd << UMAC2FB_CFG_OK_SEND_SHIFT),
+ UMAC2FB_CFG);
}
-static void bcmasp_reclaim_free_all_rx(struct bcmasp_intf *intf)
-{
- struct device *kdev = &intf->parent->pdev->dev;
- dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
- intf->rx_edpkt_dma_addr);
- dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
- DMA_FROM_DEVICE);
- __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
-}
-
-static int bcmasp_init_tx(struct bcmasp_intf *intf)
+static void bcmasp_init_tx(struct bcmasp_intf *intf)
{
- struct device *kdev = &intf->parent->pdev->dev;
- void *p;
- int ret;
-
- p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->tx_spb_dma_addr,
- GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
- intf->tx_spb_cpu = p;
+ /* Restart from index 0 */
intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1;
intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
-
- intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb),
- GFP_KERNEL);
- if (!intf->tx_cbs) {
- ret = -ENOMEM;
- goto free_tx_spb;
- }
-
intf->tx_spb_index = 0;
intf->tx_spb_clean_index = 0;
- netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
-
/* Make sure channels are disabled */
tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
@@ -805,26 +804,6 @@ static int bcmasp_init_tx(struct bcmasp_intf *intf)
tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE);
tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_END);
tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID);
-
- return 0;
-
-free_tx_spb:
- dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
- intf->tx_spb_dma_addr);
-
- return ret;
-}
-
-static void bcmasp_reclaim_free_all_tx(struct bcmasp_intf *intf)
-{
- struct device *kdev = &intf->parent->pdev->dev;
-
- /* Free descriptors */
- dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
- intf->tx_spb_dma_addr);
-
- /* Free cbs */
- kfree(intf->tx_cbs);
}
static void bcmasp_ephy_enable_set(struct bcmasp_intf *intf, bool enable)
@@ -912,12 +891,10 @@ static void bcmasp_netif_deinit(struct net_device *dev)
/* Disable interrupts */
bcmasp_enable_tx_irq(intf, 0);
bcmasp_enable_rx_irq(intf, 0);
+ bcmasp_enable_phy_irq(intf, 0);
netif_napi_del(&intf->tx_napi);
- bcmasp_reclaim_free_all_tx(intf);
-
netif_napi_del(&intf->rx_napi);
- bcmasp_reclaim_free_all_rx(intf);
}
static int bcmasp_stop(struct net_device *dev)
@@ -931,6 +908,8 @@ static int bcmasp_stop(struct net_device *dev)
bcmasp_netif_deinit(dev);
+ bcmasp_reclaim_free_buffers(intf);
+
phy_disconnect(dev->phydev);
/* Disable internal EPHY or external PHY */
@@ -1051,6 +1030,9 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
goto err_phy_disable;
}
+ if (intf->internal_phy)
+ dev->phydev->irq = PHY_MAC_INTERRUPT;
+
/* Indicate that the MAC is responsible for PHY PM */
phydev->mac_managed_pm = true;
} else if (!intf->wolopts) {
@@ -1072,17 +1054,12 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
intf->old_link = -1;
intf->old_pause = -1;
- ret = bcmasp_init_tx(intf);
- if (ret)
- goto err_phy_disconnect;
-
- /* Turn on asp */
+ bcmasp_init_tx(intf);
+ netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
bcmasp_enable_tx(intf, 1);
- ret = bcmasp_init_rx(intf);
- if (ret)
- goto err_reclaim_tx;
-
+ bcmasp_init_rx(intf);
+ netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
bcmasp_enable_rx(intf, 1);
/* Turn on UniMAC TX/RX */
@@ -1096,12 +1073,6 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
return 0;
-err_reclaim_tx:
- netif_napi_del(&intf->tx_napi);
- bcmasp_reclaim_free_all_tx(intf);
-err_phy_disconnect:
- if (phydev)
- phy_disconnect(phydev);
err_phy_disable:
if (intf->internal_phy)
bcmasp_ephy_enable_set(intf, false);
@@ -1117,13 +1088,24 @@ static int bcmasp_open(struct net_device *dev)
netif_dbg(intf, ifup, dev, "bcmasp open\n");
- ret = clk_prepare_enable(intf->parent->clk);
+ ret = bcmasp_alloc_buffers(intf);
if (ret)
return ret;
- ret = bcmasp_netif_init(dev, true);
+ ret = clk_prepare_enable(intf->parent->clk);
if (ret)
+ goto err_free_mem;
+
+ ret = bcmasp_netif_init(dev, true);
+ if (ret) {
clk_disable_unprepare(intf->parent->clk);
+ goto err_free_mem;
+ }
+
+ return ret;
+
+err_free_mem:
+ bcmasp_reclaim_free_buffers(intf);
return ret;
}
@@ -1332,6 +1314,9 @@ static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
ASP_WAKEUP_INTR2_MASK_CLEAR);
}
+ if (intf->eee.eee_enabled && intf->parent->eee_fixup)
+ intf->parent->eee_fixup(intf, true);
+
netif_dbg(intf, wol, ndev, "entered WOL mode\n");
}
@@ -1380,6 +1365,9 @@ static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
{
u32 reg;
+ if (intf->eee.eee_enabled && intf->parent->eee_fixup)
+ intf->parent->eee_fixup(intf, false);
+
reg = umac_rl(intf, UMC_MPD_CTRL);
reg &= ~UMC_MPD_CTRL_MPD_EN;
umac_wl(intf, reg, UMC_MPD_CTRL);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e9c1e1bb5580..c9b6acd8c892 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -147,10 +147,11 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
phy_fw_ver[0] = '\0';
bnx2x_get_ext_phy_fw_version(&bp->link_params,
- phy_fw_ver, PHY_FW_VER_LEN);
- strscpy(buf, bp->fw_ver, buf_len);
- snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
- "bc %d.%d.%d%s%s",
+ phy_fw_ver, sizeof(phy_fw_ver));
+ /* This may become truncated. */
+ scnprintf(buf, buf_len,
+ "%sbc %d.%d.%d%s%s",
+ bp->fw_ver,
(bp->common.bc_ver & 0xff0000) >> 16,
(bp->common.bc_ver & 0xff00) >> 8,
(bp->common.bc_ver & 0xff),
@@ -3537,7 +3538,7 @@ static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
u32 *parsing_data, u32 xmit_type)
{
*parsing_data |=
- ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
+ ((skb_inner_transport_offset(skb) >> 1) <<
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
@@ -3569,7 +3570,7 @@ static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
u32 *parsing_data, u32 xmit_type)
{
*parsing_data |=
- ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
+ ((skb_transport_offset(skb) >> 1) <<
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
@@ -3612,7 +3613,7 @@ static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
struct eth_tx_parse_bd_e1x *pbd,
u32 xmit_type)
{
- u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
+ u8 hlen = skb_network_offset(skb) >> 1;
/* for now NS flag is not used in Linux */
pbd->global_data =
@@ -3620,8 +3621,7 @@ static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
- pbd->ip_hlen_w = (skb_transport_header(skb) -
- skb_network_header(skb)) >> 1;
+ pbd->ip_hlen_w = skb_network_header_len(skb) >> 1;
hlen += pbd->ip_hlen_w;
@@ -3666,8 +3666,7 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
u8 outerip_off, outerip_len = 0;
/* from outer IP to transport */
- hlen_w = (skb_inner_transport_header(skb) -
- skb_network_header(skb)) >> 1;
+ hlen_w = skb_inner_transport_offset(skb) >> 1;
/* transport len */
hlen_w += inner_tcp_hdrlen(skb) >> 1;
@@ -3713,7 +3712,7 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
0, IPPROTO_TCP, 0));
}
- outerip_off = (skb_network_header(skb) - skb->data) >> 1;
+ outerip_off = (skb_network_offset(skb)) >> 1;
*global_data |=
outerip_off |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 81d232e6d05f..58956ed8f531 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1132,7 +1132,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
}
memset(version, 0, sizeof(version));
- bnx2x_fill_fw_str(bp, version, ETHTOOL_FWVERS_LEN);
+ bnx2x_fill_fw_str(bp, version, sizeof(version));
strlcat(info->fw_version, version, sizeof(info->fw_version));
strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
@@ -2081,34 +2081,31 @@ static const char bnx2x_private_arr[BNX2X_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
"Storage only interface"
};
-static u32 bnx2x_eee_to_adv(u32 eee_adv)
+static void bnx2x_eee_to_linkmode(unsigned long *mode, u32 eee_adv)
{
- u32 modes = 0;
-
if (eee_adv & SHMEM_EEE_100M_ADV)
- modes |= ADVERTISED_100baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode);
if (eee_adv & SHMEM_EEE_1G_ADV)
- modes |= ADVERTISED_1000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode);
if (eee_adv & SHMEM_EEE_10G_ADV)
- modes |= ADVERTISED_10000baseT_Full;
-
- return modes;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode);
}
-static u32 bnx2x_adv_to_eee(u32 modes, u32 shift)
+static u32 bnx2x_linkmode_to_eee(const unsigned long *mode, u32 shift)
{
u32 eee_adv = 0;
- if (modes & ADVERTISED_100baseT_Full)
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode))
eee_adv |= SHMEM_EEE_100M_ADV;
- if (modes & ADVERTISED_1000baseT_Full)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode))
eee_adv |= SHMEM_EEE_1G_ADV;
- if (modes & ADVERTISED_10000baseT_Full)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode))
eee_adv |= SHMEM_EEE_10G_ADV;
return eee_adv << shift;
}
-static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int bnx2x_get_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct bnx2x *bp = netdev_priv(dev);
u32 eee_cfg;
@@ -2120,16 +2117,17 @@ static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
eee_cfg = bp->link_vars.eee_status;
- edata->supported =
- bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
- SHMEM_EEE_SUPPORTED_SHIFT);
+ bnx2x_eee_to_linkmode(edata->supported,
+ (eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
+ SHMEM_EEE_SUPPORTED_SHIFT);
+
+ bnx2x_eee_to_linkmode(edata->advertised,
+ (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >>
+ SHMEM_EEE_ADV_STATUS_SHIFT);
- edata->advertised =
- bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >>
- SHMEM_EEE_ADV_STATUS_SHIFT);
- edata->lp_advertised =
- bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >>
- SHMEM_EEE_LP_ADV_STATUS_SHIFT);
+ bnx2x_eee_to_linkmode(edata->lp_advertised,
+ (eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >>
+ SHMEM_EEE_LP_ADV_STATUS_SHIFT);
/* SHMEM value is in 16u units --> Convert to 1u units. */
edata->tx_lpi_timer = (eee_cfg & SHMEM_EEE_TIMER_MASK) << 4;
@@ -2141,7 +2139,7 @@ static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
return 0;
}
-static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int bnx2x_set_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct bnx2x *bp = netdev_priv(dev);
u32 eee_cfg;
@@ -2162,8 +2160,8 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
- advertised = bnx2x_adv_to_eee(edata->advertised,
- SHMEM_EEE_ADV_STATUS_SHIFT);
+ advertised = bnx2x_linkmode_to_eee(edata->advertised,
+ SHMEM_EEE_ADV_STATUS_SHIFT);
if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
DP(BNX2X_MSG_ETHTOOL,
"Direct manipulation of EEE advertisement is not supported\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 02808513ffe4..ea310057fe3a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6163,8 +6163,8 @@ static void bnx2x_link_int_ack(struct link_params *params,
static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
{
- str[0] = '\0';
- (*len)--;
+ if (*len)
+ str[0] = '\0';
return 0;
}
@@ -6173,7 +6173,7 @@ static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
u16 ret;
if (*len < 10) {
- /* Need more than 10chars for this format */
+ /* Need more than 10 chars for this format */
bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
@@ -6188,8 +6188,8 @@ static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
{
u16 ret;
- if (*len < 10) {
- /* Need more than 10chars for this format */
+ if (*len < 9) {
+ /* Need more than 9 chars for this format */
bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
@@ -6208,7 +6208,7 @@ int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version,
int status = 0;
u8 *ver_p = version;
u16 remain_len = len;
- if (version == NULL || params == NULL)
+ if (version == NULL || params == NULL || len == 0)
return -EINVAL;
bp = params->bp;
@@ -11546,7 +11546,7 @@ static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
str[2] = (spirom_ver & 0xFF0000) >> 16;
str[3] = (spirom_ver & 0xFF000000) >> 24;
str[4] = '\0';
- *len -= 5;
+ *len -= 4;
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 39845d556baf..493b724848c8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -246,6 +246,49 @@ static const u16 bnxt_async_events_arr[] = {
static struct workqueue_struct *bnxt_pf_wq;
+#define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
+#define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
+
+const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = {
+ .ports = {
+ .src = 0,
+ .dst = 0,
+ },
+ .addrs = {
+ .v6addrs = {
+ .src = BNXT_IPV6_MASK_NONE,
+ .dst = BNXT_IPV6_MASK_NONE,
+ },
+ },
+};
+
+const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = {
+ .ports = {
+ .src = cpu_to_be16(0xffff),
+ .dst = cpu_to_be16(0xffff),
+ },
+ .addrs = {
+ .v6addrs = {
+ .src = BNXT_IPV6_MASK_ALL,
+ .dst = BNXT_IPV6_MASK_ALL,
+ },
+ },
+};
+
+const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = {
+ .ports = {
+ .src = cpu_to_be16(0xffff),
+ .dst = cpu_to_be16(0xffff),
+ },
+ .addrs = {
+ .v4addrs = {
+ .src = cpu_to_be32(0xffffffff),
+ .dst = cpu_to_be32(0xffffffff),
+ },
+ },
+};
+
static bool bnxt_vf_pciid(enum board_idx idx)
{
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
@@ -4168,8 +4211,12 @@ static int bnxt_alloc_vnics(struct bnxt *bp)
int num_vnics = 1;
#ifdef CONFIG_RFS_ACCEL
- if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) == BNXT_FLAG_RFS)
- num_vnics += bp->rx_nr_rings;
+ if (bp->flags & BNXT_FLAG_RFS) {
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ num_vnics++;
+ else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ num_vnics += bp->rx_nr_rings;
+ }
#endif
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
@@ -4186,6 +4233,7 @@ static int bnxt_alloc_vnics(struct bnxt *bp)
static void bnxt_init_vnics(struct bnxt *bp)
{
+ struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
int i;
for (i = 0; i < bp->nr_vnics; i++) {
@@ -4199,20 +4247,33 @@ static void bnxt_init_vnics(struct bnxt *bp)
vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
if (bp->vnic_info[i].rss_hash_key) {
- if (!i) {
+ if (i == BNXT_VNIC_DEFAULT) {
u8 *key = (void *)vnic->rss_hash_key;
int k;
+ if (!bp->rss_hash_key_valid &&
+ !bp->rss_hash_key_updated) {
+ get_random_bytes(bp->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+ bp->rss_hash_key_updated = true;
+ }
+
+ memcpy(vnic->rss_hash_key, bp->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+
+ if (!bp->rss_hash_key_updated)
+ continue;
+
+ bp->rss_hash_key_updated = false;
+ bp->rss_hash_key_valid = true;
+
bp->toeplitz_prefix = 0;
- get_random_bytes(vnic->rss_hash_key,
- HW_HASH_KEY_SIZE);
for (k = 0; k < 8; k++) {
bp->toeplitz_prefix <<= 8;
bp->toeplitz_prefix |= key[k];
}
} else {
- memcpy(vnic->rss_hash_key,
- bp->vnic_info[0].rss_hash_key,
+ memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
HW_HASH_KEY_SIZE);
}
}
@@ -4798,6 +4859,44 @@ static void bnxt_clear_ring_indices(struct bnxt *bp)
}
}
+void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
+{
+ u8 type = fltr->type, flags = fltr->flags;
+
+ INIT_LIST_HEAD(&fltr->list);
+ if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) ||
+ (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING))
+ list_add_tail(&fltr->list, &bp->usr_fltr_list);
+}
+
+void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
+{
+ if (!list_empty(&fltr->list))
+ list_del_init(&fltr->list);
+}
+
+void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
+{
+ struct bnxt_filter_base *usr_fltr, *tmp;
+
+ list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
+ if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2)
+ continue;
+ bnxt_del_one_usr_fltr(bp, usr_fltr);
+ }
+}
+
+static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
+{
+ hlist_del(&fltr->hash);
+ bnxt_del_one_usr_fltr(bp, fltr);
+ if (fltr->flags) {
+ clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
+ bp->ntp_fltr_count--;
+ }
+ kfree(fltr);
+}
+
static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
{
int i;
@@ -4813,12 +4912,10 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
head = &bp->ntp_fltr_hash_tbl[i];
hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
bnxt_del_l2_filter(bp, fltr->l2_fltr);
- if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST))
+ if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
+ !list_empty(&fltr->base.list)))
continue;
- hlist_del(&fltr->base.hash);
- clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
- bp->ntp_fltr_count--;
- kfree(fltr);
+ bnxt_del_fltr(bp, &fltr->base);
}
}
if (!all)
@@ -4840,7 +4937,7 @@ static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
bp->ntp_fltr_count = 0;
- bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_MAX_FLTR, GFP_KERNEL);
+ bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL);
if (!bp->ntp_fltr_bmap)
rc = -ENOMEM;
@@ -4859,14 +4956,10 @@ static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
head = &bp->l2_fltr_hash_tbl[i];
hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
- if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST))
+ if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
+ !list_empty(&fltr->base.list)))
continue;
- hlist_del(&fltr->base.hash);
- if (fltr->base.flags) {
- clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
- bp->ntp_fltr_count--;
- }
- kfree(fltr);
+ bnxt_del_fltr(bp, &fltr->base);
}
}
}
@@ -5039,8 +5132,13 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
if (rc)
goto alloc_mem_err;
- bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
- BNXT_VNIC_UCAST_FLAG;
+ bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG |
+ BNXT_VNIC_MCAST_FLAG |
+ BNXT_VNIC_UCAST_FLAG;
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS))
+ bp->vnic_info[BNXT_VNIC_NTUPLE].flags |=
+ BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG;
+
rc = bnxt_alloc_vnic_attributes(bp);
if (rc)
goto alloc_mem_err;
@@ -5342,6 +5440,7 @@ void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
return;
}
hlist_del_rcu(&fltr->base.hash);
+ bnxt_del_one_usr_fltr(bp, &fltr->base);
if (fltr->base.flags) {
clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
bp->ntp_fltr_count--;
@@ -5480,13 +5579,15 @@ static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
int bit_id;
bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
- BNXT_MAX_FLTR, 0);
+ bp->max_fltr, 0);
if (bit_id < 0)
return -ENOMEM;
fltr->base.sw_id = (u16)bit_id;
+ bp->ntp_fltr_count++;
}
head = &bp->l2_fltr_hash_tbl[idx];
hlist_add_head_rcu(&fltr->base.hash, head);
+ bnxt_insert_usr_fltr(bp, &fltr->base);
set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
atomic_set(&fltr->refcnt, 1);
return 0;
@@ -5519,6 +5620,40 @@ static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
return fltr;
}
+struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
+ struct bnxt_l2_key *key,
+ u16 flags)
+{
+ struct bnxt_l2_filter *fltr;
+ u32 idx;
+ int rc;
+
+ idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
+ BNXT_L2_FLTR_HASH_MASK;
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ fltr = __bnxt_lookup_l2_filter(bp, key, idx);
+ if (fltr) {
+ fltr = ERR_PTR(-EEXIST);
+ goto l2_filter_exit;
+ }
+ fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC);
+ if (!fltr) {
+ fltr = ERR_PTR(-ENOMEM);
+ goto l2_filter_exit;
+ }
+ fltr->base.flags = flags;
+ rc = bnxt_init_l2_filter(bp, fltr, key, idx);
+ if (rc) {
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ bnxt_del_l2_filter(bp, fltr);
+ return ERR_PTR(rc);
+ }
+
+l2_filter_exit:
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ return fltr;
+}
+
static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
{
#ifdef CONFIG_BNXT_SRIOV
@@ -5650,15 +5785,38 @@ void bnxt_fill_ipv6_mask(__be32 mask[4])
mask[i] = cpu_to_be32(~0);
}
+static void
+bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
+ struct hwrm_cfa_ntuple_filter_alloc_input *req,
+ u16 rxq)
+{
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
+ struct bnxt_vnic_info *vnic;
+ u32 enables;
+
+ vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
+ req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
+ enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
+ req->enables |= cpu_to_le32(enables);
+ req->rfs_ring_tbl_idx = cpu_to_le16(rxq);
+ } else {
+ u32 flags;
+
+ flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
+ req->flags |= cpu_to_le32(flags);
+ req->dst_id = cpu_to_le16(rxq);
+ }
+}
+
int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr)
{
struct hwrm_cfa_ntuple_filter_alloc_output *resp;
struct hwrm_cfa_ntuple_filter_alloc_input *req;
+ struct bnxt_flow_masks *masks = &fltr->fmasks;
struct flow_keys *keys = &fltr->fkeys;
struct bnxt_l2_filter *l2_fltr;
struct bnxt_vnic_info *vnic;
- u32 flags = 0;
int rc;
rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
@@ -5668,16 +5826,16 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
l2_fltr = fltr->l2_fltr;
req->l2_filter_id = l2_fltr->base.filter_id;
-
- if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
- flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
- req->dst_id = cpu_to_le16(fltr->base.rxq);
+ if (fltr->base.flags & BNXT_ACT_DROP) {
+ req->flags =
+ cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
+ } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
+ bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr->base.rxq);
} else {
vnic = &bp->vnic_info[fltr->base.rxq + 1];
req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
}
- req->flags = cpu_to_le32(flags);
- req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
+ req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
req->ethertype = htons(ETH_P_IP);
req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
@@ -5687,25 +5845,15 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
req->ethertype = htons(ETH_P_IPV6);
req->ip_addr_type =
CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
- *(struct in6_addr *)&req->src_ipaddr[0] =
- keys->addrs.v6addrs.src;
- bnxt_fill_ipv6_mask(req->src_ipaddr_mask);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
- *(struct in6_addr *)&req->dst_ipaddr[0] =
- keys->addrs.v6addrs.dst;
- bnxt_fill_ipv6_mask(req->dst_ipaddr_mask);
- }
+ *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src;
+ *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src;
+ *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst;
+ *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst;
} else {
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
- req->src_ipaddr[0] = keys->addrs.v4addrs.src;
- req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
- req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
- req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
- }
+ req->src_ipaddr[0] = keys->addrs.v4addrs.src;
+ req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src;
+ req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
+ req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst;
}
if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
@@ -5713,14 +5861,10 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
}
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
- req->src_port = keys->ports.src;
- req->src_port_mask = cpu_to_be16(0xffff);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
- req->dst_port = keys->ports.dst;
- req->dst_port_mask = cpu_to_be16(0xffff);
- }
+ req->src_port = keys->ports.src;
+ req->src_port_mask = masks->ports.src;
+ req->dst_port = keys->ports.dst;
+ req->dst_port_mask = masks->ports.dst;
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
@@ -5971,7 +6115,10 @@ static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
for (i = 0; i < tbl_size; i++) {
u16 ring_id, j;
- j = bp->rss_indir_tbl[i];
+ if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
+ j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
+ else
+ j = bp->rss_indir_tbl[i];
rxr = &bp->rx_ring[j];
ring_id = rxr->rx_ring_struct.fw_ring_id;
@@ -5985,10 +6132,13 @@ static void
__bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
struct bnxt_vnic_info *vnic)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
bnxt_fill_hw_rss_tbl_p5(bp, vnic);
- else
+ if (bp->flags & BNXT_FLAG_CHIP_P7)
+ req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
+ } else {
bnxt_fill_hw_rss_tbl(bp, vnic);
+ }
if (bp->rss_hash_delta) {
req->hash_type = cpu_to_le32(bp->rss_hash_delta);
@@ -6061,7 +6211,7 @@ exit:
static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct hwrm_vnic_rss_qcfg_output *resp;
struct hwrm_vnic_rss_qcfg_input *req;
@@ -6165,6 +6315,7 @@ static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
{
+ struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_cfg_input *req;
unsigned int ring = 0, grp_idx;
@@ -6194,8 +6345,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
VNIC_CFG_REQ_ENABLES_MRU);
} else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
- req->rss_rule =
- cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
+ req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]);
req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
VNIC_CFG_REQ_ENABLES_MRU);
req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
@@ -6292,7 +6442,7 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
vnic_no_ring_grps:
for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
- if (vnic_id == 0)
+ if (vnic_id == BNXT_VNIC_DEFAULT)
req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
resp = hwrm_req_hold(bp, req);
@@ -6351,6 +6501,14 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
}
if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
}
hwrm_req_drop(bp, req);
return rc;
@@ -6918,6 +7076,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
hw_resc->resv_hw_ring_grps =
le32_to_cpu(resp->alloc_hw_ring_grps);
hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
+ hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
cp = le16_to_cpu(resp->alloc_cmpl_rings);
stats = le16_to_cpu(resp->alloc_stat_ctx);
hw_resc->resv_irqs = cp;
@@ -6973,8 +7132,7 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
static bool bnxt_rfs_supported(struct bnxt *bp);
static struct hwrm_func_cfg_input *
-__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_cfg_input *req;
u32 enables = 0;
@@ -6983,52 +7141,42 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return NULL;
req->fid = cpu_to_le16(0xffff);
- enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
- req->num_tx_rings = cpu_to_le16(tx_rings);
+ enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ req->num_tx_rings = cpu_to_le16(hwr->tx);
if (BNXT_NEW_RM(bp)) {
- enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
- enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+ enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
- enables |= tx_rings + ring_grps ?
+ enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
+ enables |= hwr->cp_p5 ?
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
- enables |= rx_rings ?
- FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
} else {
- enables |= cp_rings ?
+ enables |= hwr->cp ?
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
- enables |= ring_grps ?
- FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
- FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
- }
- enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
-
- req->num_rx_rings = cpu_to_le16(rx_rings);
+ enables |= hwr->grp ?
+ FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+ }
+ enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS :
+ 0;
+ req->num_rx_rings = cpu_to_le16(hwr->rx);
+ req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
-
- req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
- req->num_msix = cpu_to_le16(cp_rings);
- req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
+ req->num_msix = cpu_to_le16(hwr->cp);
} else {
- req->num_cmpl_rings = cpu_to_le16(cp_rings);
- req->num_hw_ring_grps = cpu_to_le16(ring_grps);
- req->num_rsscos_ctxs = cpu_to_le16(1);
- if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
- bnxt_rfs_supported(bp))
- req->num_rsscos_ctxs =
- cpu_to_le16(ring_grps + 1);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp);
+ req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
}
- req->num_stat_ctxs = cpu_to_le16(stats);
- req->num_vnics = cpu_to_le16(vnics);
+ req->num_stat_ctxs = cpu_to_le16(hwr->stat);
+ req->num_vnics = cpu_to_le16(hwr->vnic);
}
req->enables = cpu_to_le32(enables);
return req;
}
static struct hwrm_func_vf_cfg_input *
-__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_vf_cfg_input *req;
u32 enables = 0;
@@ -7036,51 +7184,46 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
return NULL;
- enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
- enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
- FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
- enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+ enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- enables |= tx_rings + ring_grps ?
+ enables |= hwr->cp_p5 ?
FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
} else {
- enables |= cp_rings ?
- FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
- enables |= ring_grps ?
+ enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
+ enables |= hwr->grp ?
FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
}
- enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
- req->num_tx_rings = cpu_to_le16(tx_rings);
- req->num_rx_rings = cpu_to_le16(rx_rings);
+ req->num_tx_rings = cpu_to_le16(hwr->tx);
+ req->num_rx_rings = cpu_to_le16(hwr->rx);
+ req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
-
- req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
- req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
} else {
- req->num_cmpl_rings = cpu_to_le16(cp_rings);
- req->num_hw_ring_grps = cpu_to_le16(ring_grps);
- req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp);
+ req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
}
- req->num_stat_ctxs = cpu_to_le16(stats);
- req->num_vnics = cpu_to_le16(vnics);
+ req->num_stat_ctxs = cpu_to_le16(hwr->stat);
+ req->num_vnics = cpu_to_le16(hwr->vnic);
req->enables = cpu_to_le32(enables);
return req;
}
static int
-bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_cfg_input *req;
int rc;
- req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
if (!req)
return -ENOMEM;
@@ -7094,25 +7237,23 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return rc;
if (bp->hwrm_spec_code < 0x10601)
- bp->hw_resc.resv_tx_rings = tx_rings;
+ bp->hw_resc.resv_tx_rings = hwr->tx;
return bnxt_hwrm_get_rings(bp);
}
static int
-bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_vf_cfg_input *req;
int rc;
if (!BNXT_NEW_RM(bp)) {
- bp->hw_resc.resv_tx_rings = tx_rings;
+ bp->hw_resc.resv_tx_rings = hwr->tx;
return 0;
}
- req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
if (!req)
return -ENOMEM;
@@ -7123,15 +7264,12 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return bnxt_hwrm_get_rings(bp);
}
-static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
- int cp, int stat, int vnic)
+static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
if (BNXT_PF(bp))
- return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
- vnic);
+ return bnxt_hwrm_reserve_pf_rings(bp, hwr);
else
- return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
- vnic);
+ return bnxt_hwrm_reserve_vf_rings(bp, hwr);
}
int bnxt_nq_rings_in_use(struct bnxt *bp)
@@ -7174,6 +7312,24 @@ static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
return cp + ulp_stat;
}
+static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
+{
+ if (!hwr->grp)
+ return 0;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
+
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ rss_ctx *= hwr->vnic;
+ return rss_ctx;
+ }
+ if (BNXT_VF(bp))
+ return BNXT_VF_MAX_RSS_CTX;
+ if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp))
+ return hwr->grp + 1;
+ return 1;
+}
+
/* Check if a default RSS map needs to be setup. This function is only
* used on older firmware that does not require reserving RX rings.
*/
@@ -7189,13 +7345,24 @@ static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
}
}
+static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
+{
+ if (bp->flags & BNXT_FLAG_RFS) {
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ return 2;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ return rx_rings + 1;
+ }
+ return 1;
+}
+
static bool bnxt_need_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int cp = bnxt_cp_rings_in_use(bp);
int nq = bnxt_nq_rings_in_use(bp);
int rx = bp->rx_nr_rings, stat;
- int vnic = 1, grp = rx;
+ int vnic, grp = rx;
if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
bp->hwrm_spec_code >= 0x10601)
@@ -7210,9 +7377,9 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
bnxt_check_rss_tbl_no_rmgr(bp);
return false;
}
- if ((bp->flags & BNXT_FLAG_RFS) &&
- !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
- vnic = rx + 1;
+
+ vnic = bnxt_get_total_vnics(bp, rx);
+
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
stat = bnxt_get_func_stat_ctxs(bp);
@@ -7227,47 +7394,65 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
return false;
}
-static int __bnxt_reserve_rings(struct bnxt *bp)
+static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- int cp = bnxt_nq_rings_in_use(bp);
- int tx = bp->tx_nr_rings;
- int rx = bp->rx_nr_rings;
- int grp, rx_rings, rc;
- int vnic = 1, stat;
+
+ hwr->tx = hw_resc->resv_tx_rings;
+ if (BNXT_NEW_RM(bp)) {
+ hwr->rx = hw_resc->resv_rx_rings;
+ hwr->cp = hw_resc->resv_irqs;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ hwr->cp_p5 = hw_resc->resv_cp_rings;
+ hwr->grp = hw_resc->resv_hw_ring_grps;
+ hwr->vnic = hw_resc->resv_vnics;
+ hwr->stat = hw_resc->resv_stat_ctxs;
+ hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
+ }
+}
+
+static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
+{
+ return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic &&
+ hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
+}
+
+static int __bnxt_reserve_rings(struct bnxt *bp)
+{
+ struct bnxt_hw_rings hwr = {0};
+ int rx_rings, rc;
bool sh = false;
int tx_cp;
if (!bnxt_need_reserve_rings(bp))
return 0;
+ hwr.cp = bnxt_nq_rings_in_use(bp);
+ hwr.tx = bp->tx_nr_rings;
+ hwr.rx = bp->rx_nr_rings;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
sh = true;
- if ((bp->flags & BNXT_FLAG_RFS) &&
- !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
- vnic = rx + 1;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ hwr.cp_p5 = hwr.rx + hwr.tx;
+
+ hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx);
+
if (bp->flags & BNXT_FLAG_AGG_RINGS)
- rx <<= 1;
- grp = bp->rx_nr_rings;
- stat = bnxt_get_func_stat_ctxs(bp);
+ hwr.rx <<= 1;
+ hwr.grp = bp->rx_nr_rings;
+ hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
+ hwr.stat = bnxt_get_func_stat_ctxs(bp);
- rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
+ rc = bnxt_hwrm_reserve_rings(bp, &hwr);
if (rc)
return rc;
- tx = hw_resc->resv_tx_rings;
- if (BNXT_NEW_RM(bp)) {
- rx = hw_resc->resv_rx_rings;
- cp = hw_resc->resv_irqs;
- grp = hw_resc->resv_hw_ring_grps;
- vnic = hw_resc->resv_vnics;
- stat = hw_resc->resv_stat_ctxs;
- }
+ bnxt_copy_reserved_rings(bp, &hwr);
- rx_rings = rx;
+ rx_rings = hwr.rx;
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
- if (rx >= 2) {
- rx_rings = rx >> 1;
+ if (hwr.rx >= 2) {
+ rx_rings = hwr.rx >> 1;
} else {
if (netif_running(bp->dev))
return -ENOMEM;
@@ -7279,17 +7464,17 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
bnxt_set_ring_params(bp);
}
}
- rx_rings = min_t(int, rx_rings, grp);
- cp = min_t(int, cp, bp->cp_nr_rings);
- if (stat > bnxt_get_ulp_stat_ctxs(bp))
- stat -= bnxt_get_ulp_stat_ctxs(bp);
- cp = min_t(int, cp, stat);
- rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
+ rx_rings = min_t(int, rx_rings, hwr.grp);
+ hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
+ if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
+ hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
+ hwr.cp = min_t(int, hwr.cp, hwr.stat);
+ rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
- rx = rx_rings << 1;
- tx_cp = bnxt_num_tx_to_cp(bp, tx);
- cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
- bp->tx_nr_rings = tx;
+ hwr.rx = rx_rings << 1;
+ tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
+ hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
+ bp->tx_nr_rings = hwr.tx;
/* If we cannot reserve all the RX rings, reset the RSS map only
* if absolutely necessary
@@ -7306,9 +7491,9 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
}
}
bp->rx_nr_rings = rx_rings;
- bp->cp_nr_rings = cp;
+ bp->cp_nr_rings = hwr.cp;
- if (!tx || !rx || !cp || !grp || !vnic || !stat)
+ if (!bnxt_rings_ok(bp, &hwr))
return -ENOMEM;
if (!netif_is_rxfh_configured(bp->dev))
@@ -7317,9 +7502,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
return rc;
}
-static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats,
- int vnics)
+static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_vf_cfg_input *req;
u32 flags;
@@ -7327,8 +7510,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
if (!BNXT_NEW_RM(bp))
return 0;
- req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
@@ -7342,15 +7524,12 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return hwrm_req_send_silent(bp, req);
}
-static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats,
- int vnics)
+static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_cfg_input *req;
u32 flags;
- req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
if (BNXT_NEW_RM(bp)) {
flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
@@ -7368,20 +7547,15 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return hwrm_req_send_silent(bp, req);
}
-static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats,
- int vnics)
+static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
if (bp->hwrm_spec_code < 0x10801)
return 0;
if (BNXT_PF(bp))
- return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
- ring_grps, cp_rings, stats,
- vnics);
+ return bnxt_hwrm_check_pf_rings(bp, hwr);
- return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ return bnxt_hwrm_check_vf_rings(bp, hwr);
}
static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
@@ -8709,6 +8883,13 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+ hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records);
+ hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records);
+ hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
+ hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
+ hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
+ hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
+
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
@@ -8717,12 +8898,6 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
pf->max_vfs = le16_to_cpu(resp->max_vfs);
- pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
- pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
- pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
- pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
- pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
- pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
bp->flags &= ~BNXT_FLAG_WOL_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
bp->flags |= BNXT_FLAG_WOL_CAP;
@@ -8825,6 +9000,14 @@ static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
+ if (flags &
+ CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3;
+
+ if (flags &
+ CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO;
+
hwrm_cfa_adv_qcaps_exit:
hwrm_req_drop(bp, req);
return rc;
@@ -9689,10 +9872,28 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
return __bnxt_setup_vnic(bp, vnic_id);
}
+static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, u16 vnic_id,
+ u16 start_rx_ring_idx, int rx_rings)
+{
+ int rc;
+
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, start_rx_ring_idx, rx_rings);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
+ vnic_id, rc);
+ return rc;
+ }
+ return bnxt_setup_vnic(bp, vnic_id);
+}
+
static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
{
int i, rc = 0;
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ return bnxt_alloc_and_setup_vnic(bp, BNXT_VNIC_NTUPLE, 0,
+ bp->rx_nr_rings);
+
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return 0;
@@ -9708,14 +9909,7 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
vnic->flags |= BNXT_VNIC_RFS_FLAG;
if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
- rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
- if (rc) {
- netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
- vnic_id, rc);
- break;
- }
- rc = bnxt_setup_vnic(bp, vnic_id);
- if (rc)
+ if (bnxt_alloc_and_setup_vnic(bp, vnic_id, ring_id, 1))
break;
}
return rc;
@@ -9756,7 +9950,7 @@ static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
int rc = 0;
unsigned int rx_nr_rings = bp->rx_nr_rings;
@@ -9785,7 +9979,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
rx_nr_rings--;
/* default vnic 0 */
- rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
+ rc = bnxt_hwrm_vnic_alloc(bp, BNXT_VNIC_DEFAULT, 0, rx_nr_rings);
if (rc) {
netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
goto err_out;
@@ -9794,7 +9988,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
if (BNXT_VF(bp))
bnxt_hwrm_func_qcfg(bp);
- rc = bnxt_setup_vnic(bp, 0);
+ rc = bnxt_setup_vnic(bp, BNXT_VNIC_DEFAULT);
if (rc)
goto err_out;
if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
@@ -10621,10 +10815,10 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
- eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ _bnxt_fw_to_linkmode(eee->supported, fw_speeds);
bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
@@ -10766,7 +10960,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
link_info->module_status = resp->module_status;
if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
u16 fw_speeds;
eee->eee_active = 0;
@@ -10775,8 +10969,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
eee->eee_active = 1;
fw_speeds = le16_to_cpu(
resp->link_partner_adv_eee_link_speed_mask);
- eee->lp_advertised =
- _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ _bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds);
}
/* Pull initial EEE config */
@@ -10786,8 +10979,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
eee->eee_enabled = 1;
fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
- eee->advertised =
- _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ _bnxt_fw_to_linkmode(eee->advertised, fw_speeds);
if (resp->eee_config_phy_addr &
PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
@@ -10957,7 +11149,7 @@ int bnxt_hwrm_set_pause(struct bnxt *bp)
static void bnxt_hwrm_set_eee(struct bnxt *bp,
struct hwrm_port_phy_cfg_input *req)
{
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
if (eee->eee_enabled) {
u16 eee_speeds;
@@ -11087,6 +11279,7 @@ static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
hw_resc->resv_rx_rings = 0;
hw_resc->resv_hw_ring_grps = 0;
hw_resc->resv_vnics = 0;
+ hw_resc->resv_rsscos_ctxs = 0;
if (!fw_reset) {
bp->tx_nr_rings = 0;
bp->rx_nr_rings = 0;
@@ -11322,22 +11515,25 @@ static void bnxt_get_wol_settings(struct bnxt *bp)
static bool bnxt_eee_config_ok(struct bnxt *bp)
{
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
struct bnxt_link_info *link_info = &bp->link_info;
if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
return true;
if (eee->eee_enabled) {
- u32 advertising =
- _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
+
+ _bnxt_fw_to_linkmode(advertising, link_info->advertising);
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
eee->eee_enabled = 0;
return false;
}
- if (eee->advertised & ~advertising) {
- eee->advertised = advertising & eee->supported;
+ if (linkmode_andnot(tmp, eee->advertised, advertising)) {
+ linkmode_and(eee->advertised, advertising,
+ eee->supported);
return false;
}
}
@@ -11442,6 +11638,42 @@ static int bnxt_reinit_after_abort(struct bnxt *bp)
return rc;
}
+static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
+{
+ struct bnxt_ntuple_filter *ntp_fltr;
+ struct bnxt_l2_filter *l2_fltr;
+
+ if (list_empty(&fltr->list))
+ return;
+
+ if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) {
+ ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base);
+ l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
+ atomic_inc(&l2_fltr->refcnt);
+ ntp_fltr->l2_fltr = l2_fltr;
+ if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) {
+ bnxt_del_ntp_filter(bp, ntp_fltr);
+ netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n",
+ fltr->sw_id);
+ }
+ } else if (fltr->type == BNXT_FLTR_TYPE_L2) {
+ l2_fltr = container_of(fltr, struct bnxt_l2_filter, base);
+ if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) {
+ bnxt_del_l2_filter(bp, l2_fltr);
+ netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n",
+ fltr->sw_id);
+ }
+ }
+}
+
+static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
+{
+ struct bnxt_filter_base *usr_fltr, *tmp;
+
+ list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list)
+ bnxt_cfg_one_usr_fltr(bp, usr_fltr);
+}
+
static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -11528,6 +11760,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
bnxt_vf_reps_open(bp);
bnxt_ptp_init_rtc(bp, true);
bnxt_ptp_cfg_tstamp_filters(bp);
+ bnxt_cfg_usr_fltrs(bp);
return 0;
open_err_irq:
@@ -11969,8 +12202,8 @@ void bnxt_get_ring_err_stats(struct bnxt *bp,
static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct net_device *dev = bp->dev;
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
struct netdev_hw_addr *ha;
u8 *haddr;
int mc_count = 0;
@@ -12004,7 +12237,7 @@ static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
static bool bnxt_uc_list_updated(struct bnxt *bp)
{
struct net_device *dev = bp->dev;
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct netdev_hw_addr *ha;
int off = 0;
@@ -12031,7 +12264,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
if (!test_bit(BNXT_STATE_OPEN, &bp->state))
return;
- vnic = &bp->vnic_info[0];
+ vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
mask = vnic->rx_mask;
mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
@@ -12062,7 +12295,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
static int bnxt_cfg_rx_mode(struct bnxt *bp)
{
struct net_device *dev = bp->dev;
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct netdev_hw_addr *ha;
int i, off = 0, rc;
bool uc_update;
@@ -12174,21 +12407,32 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
/* If runtime conditions support RFS */
static bool bnxt_rfs_capable(struct bnxt *bp)
{
- int vnics, max_vnics, max_rss_ctxs;
+ struct bnxt_hw_rings hwr = {0};
+ int max_vnics, max_rss_ctxs;
+ hwr.rss_ctx = 1;
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
+ /* 2 VNICS: default + Ntuple */
+ hwr.vnic = 2;
+ hwr.rss_ctx = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
+ hwr.vnic;
+ goto check_reserve_vnic;
+ }
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return bnxt_rfs_supported(bp);
if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
return false;
- vnics = 1 + bp->rx_nr_rings;
+ hwr.vnic = 1 + bp->rx_nr_rings;
+check_reserve_vnic:
max_vnics = bnxt_get_max_func_vnics(bp);
max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
- /* RSS contexts not a limiting factor */
- if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
- max_rss_ctxs = max_vnics;
- if (vnics > max_vnics || vnics > max_rss_ctxs) {
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
+ !(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP))
+ hwr.rss_ctx = hwr.vnic;
+
+ if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
if (bp->rx_nr_rings > 1)
netdev_warn(bp->dev,
"Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
@@ -12199,15 +12443,19 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
if (!BNXT_NEW_RM(bp))
return true;
- if (vnics == bp->hw_resc.resv_vnics)
+ if (hwr.vnic == bp->hw_resc.resv_vnics &&
+ hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
return true;
- bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
- if (vnics <= bp->hw_resc.resv_vnics)
+ bnxt_hwrm_reserve_rings(bp, &hwr);
+ if (hwr.vnic <= bp->hw_resc.resv_vnics &&
+ hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
return true;
netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
- bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
+ hwr.vnic = 1;
+ hwr.rss_ctx = 0;
+ bnxt_hwrm_reserve_rings(bp, &hwr);
return false;
}
@@ -12246,14 +12494,24 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
return features;
}
+static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init,
+ bool link_re_init, u32 flags, bool update_tpa)
+{
+ bnxt_close_nic(bp, irq_re_init, link_re_init);
+ bp->flags = flags;
+ if (update_tpa)
+ bnxt_set_ring_params(bp);
+ return bnxt_open_nic(bp, irq_re_init, link_re_init);
+}
+
static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
{
+ bool update_tpa = false, update_ntuple = false;
struct bnxt *bp = netdev_priv(dev);
u32 flags = bp->flags;
u32 changes;
int rc = 0;
bool re_init = false;
- bool update_tpa = false;
flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
if (features & NETIF_F_GRO_HW)
@@ -12269,6 +12527,8 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (features & NETIF_F_NTUPLE)
flags |= BNXT_FLAG_RFS;
+ else
+ bnxt_clear_usr_fltrs(bp, true);
changes = flags ^ bp->flags;
if (changes & BNXT_FLAG_TPA) {
@@ -12282,6 +12542,9 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (changes & ~BNXT_FLAG_TPA)
re_init = true;
+ if (changes & BNXT_FLAG_RFS)
+ update_ntuple = true;
+
if (flags != bp->flags) {
u32 old_flags = bp->flags;
@@ -12292,14 +12555,12 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
return rc;
}
- if (re_init) {
- bnxt_close_nic(bp, false, false);
- bp->flags = flags;
- if (update_tpa)
- bnxt_set_ring_params(bp);
+ if (update_ntuple)
+ return bnxt_reinit_features(bp, true, false, flags, update_tpa);
+
+ if (re_init)
+ return bnxt_reinit_features(bp, false, false, flags, update_tpa);
- return bnxt_open_nic(bp, false, false);
- }
if (update_tpa) {
bp->flags = flags;
rc = bnxt_set_tpa(bp,
@@ -13129,9 +13390,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp)
{
int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
- int tx_rings_needed, stats;
+ struct bnxt_hw_rings hwr = {0};
int rx_rings = rx;
- int cp, vnics;
if (tcs)
tx_sets = tcs;
@@ -13144,26 +13404,27 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx_rings <<= 1;
- tx_rings_needed = tx * tx_sets + tx_xdp;
- if (max_tx < tx_rings_needed)
+ hwr.rx = rx_rings;
+ hwr.tx = tx * tx_sets + tx_xdp;
+ if (max_tx < hwr.tx)
return -ENOMEM;
- vnics = 1;
- if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) ==
- BNXT_FLAG_RFS)
- vnics += rx;
+ hwr.vnic = bnxt_get_total_vnics(bp, rx);
- tx_cp = __bnxt_num_tx_to_cp(bp, tx_rings_needed, tx_sets, tx_xdp);
- cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
- if (max_cp < cp)
+ tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp);
+ hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
+ if (max_cp < hwr.cp)
return -ENOMEM;
- stats = cp;
+ hwr.stat = hwr.cp;
if (BNXT_NEW_RM(bp)) {
- cp += bnxt_get_ulp_msix_num(bp);
- stats += bnxt_get_ulp_stat_ctxs(bp);
+ hwr.cp += bnxt_get_ulp_msix_num(bp);
+ hwr.stat += bnxt_get_ulp_stat_ctxs(bp);
+ hwr.grp = rx;
+ hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
}
- return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
- stats, vnics);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ hwr.cp_p5 = hwr.tx + rx;
+ return bnxt_hwrm_check_rings(bp, &hwr);
}
static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
@@ -13766,6 +14027,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
return rc;
eth_hw_addr_set(dev, addr->sa_data);
+ bnxt_clear_usr_fltrs(bp, true);
if (netif_running(dev)) {
bnxt_close_nic(bp, false, false);
rc = bnxt_open_nic(bp, false, false);
@@ -13888,7 +14150,7 @@ u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
if (skb)
return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
- vnic = &bp->vnic_info[0];
+ vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
}
@@ -13899,7 +14161,7 @@ int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
int bit_id;
spin_lock_bh(&bp->ntp_fltr_lock);
- bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, BNXT_MAX_FLTR, 0);
+ bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0);
if (bit_id < 0) {
spin_unlock_bh(&bp->ntp_fltr_lock);
return -ENOMEM;
@@ -13911,6 +14173,7 @@ int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
head = &bp->ntp_fltr_hash_tbl[idx];
hlist_add_head_rcu(&fltr->base.hash, head);
set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
+ bnxt_insert_usr_fltr(bp, &fltr->base);
bp->ntp_fltr_count++;
spin_unlock_bh(&bp->ntp_fltr_lock);
return 0;
@@ -13919,45 +14182,39 @@ int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
struct bnxt_ntuple_filter *f2)
{
+ struct bnxt_flow_masks *masks1 = &f1->fmasks;
+ struct bnxt_flow_masks *masks2 = &f2->fmasks;
struct flow_keys *keys1 = &f1->fkeys;
struct flow_keys *keys2 = &f2->fkeys;
- if (f1->ntuple_flags != f2->ntuple_flags)
- return false;
-
if (keys1->basic.n_proto != keys2->basic.n_proto ||
keys1->basic.ip_proto != keys2->basic.ip_proto)
return false;
if (keys1->basic.n_proto == htons(ETH_P_IP)) {
- if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) &&
- keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src) ||
- ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) &&
- keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst))
+ if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
+ masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src ||
+ keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst ||
+ masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst)
return false;
} else {
- if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) &&
- memcmp(&keys1->addrs.v6addrs.src,
- &keys2->addrs.v6addrs.src,
- sizeof(keys1->addrs.v6addrs.src))) ||
- ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) &&
- memcmp(&keys1->addrs.v6addrs.dst,
- &keys2->addrs.v6addrs.dst,
- sizeof(keys1->addrs.v6addrs.dst))))
+ if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src,
+ &keys2->addrs.v6addrs.src) ||
+ !ipv6_addr_equal(&masks1->addrs.v6addrs.src,
+ &masks2->addrs.v6addrs.src) ||
+ !ipv6_addr_equal(&keys1->addrs.v6addrs.dst,
+ &keys2->addrs.v6addrs.dst) ||
+ !ipv6_addr_equal(&masks1->addrs.v6addrs.dst,
+ &masks2->addrs.v6addrs.dst))
return false;
}
- if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) &&
- keys1->ports.src != keys2->ports.src) ||
- ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) &&
- keys1->ports.dst != keys2->ports.dst))
- return false;
-
- if (keys1->control.flags == keys2->control.flags &&
- f1->l2_fltr == f2->l2_fltr)
- return true;
-
- return false;
+ return keys1->ports.src == keys2->ports.src &&
+ masks1->ports.src == masks2->ports.src &&
+ keys1->ports.dst == keys2->ports.dst &&
+ masks1->ports.dst == masks2->ports.dst &&
+ keys1->control.flags == keys2->control.flags &&
+ f1->l2_fltr == f2->l2_fltr;
}
struct bnxt_ntuple_filter *
@@ -13988,7 +14245,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u32 flags;
if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
- l2_fltr = bp->vnic_info[0].l2_filters[0];
+ l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
atomic_inc(&l2_fltr->refcnt);
} else {
struct bnxt_l2_key key;
@@ -14022,10 +14279,13 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
rc = -EPROTONOSUPPORT;
goto err_free;
}
- if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
- bp->hwrm_spec_code < 0x10601) {
- rc = -EPROTONOSUPPORT;
- goto err_free;
+ new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL;
+ if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
+ if (bp->hwrm_spec_code < 0x10601) {
+ rc = -EPROTONOSUPPORT;
+ goto err_free;
+ }
+ new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL;
}
flags = fkeys->control.flags;
if (((flags & FLOW_DIS_ENCAPSULATION) &&
@@ -14033,9 +14293,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
rc = -EPROTONOSUPPORT;
goto err_free;
}
-
new_fltr->l2_fltr = l2_fltr;
- new_fltr->ntuple_flags = BNXT_NTUPLE_MATCH_ALL;
idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
rcu_read_lock();
@@ -14070,6 +14328,7 @@ void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
return;
}
hlist_del_rcu(&fltr->base.hash);
+ bnxt_del_one_usr_fltr(bp, &fltr->base);
bp->ntp_fltr_count--;
spin_unlock_bh(&bp->ntp_fltr_lock);
bnxt_del_l2_filter(bp, fltr->l2_fltr);
@@ -14264,6 +14523,70 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_bridge_setlink = bnxt_bridge_setlink,
};
+static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
+ struct netdev_queue_stats_rx *stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_cp_ring_info *cpr;
+ u64 *sw;
+
+ cpr = &bp->bnapi[i]->cp_ring;
+ sw = cpr->stats.sw_stats;
+
+ stats->packets = 0;
+ stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
+ stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
+ stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
+
+ stats->bytes = 0;
+ stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
+ stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
+ stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
+
+ stats->alloc_fail = cpr->sw_stats.rx.rx_oom_discards;
+}
+
+static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
+ struct netdev_queue_stats_tx *stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_napi *bnapi;
+ u64 *sw;
+
+ bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
+ sw = bnapi->cp_ring.stats.sw_stats;
+
+ stats->packets = 0;
+ stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
+ stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
+ stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
+
+ stats->bytes = 0;
+ stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
+ stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
+ stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
+}
+
+static void bnxt_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ rx->packets = bp->net_stats_prev.rx_packets;
+ rx->bytes = bp->net_stats_prev.rx_bytes;
+ rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards;
+
+ tx->packets = bp->net_stats_prev.tx_packets;
+ tx->bytes = bp->net_stats_prev.tx_bytes;
+}
+
+static const struct netdev_stat_ops bnxt_stat_ops = {
+ .get_queue_stats_rx = bnxt_get_queue_stats_rx,
+ .get_queue_stats_tx = bnxt_get_queue_stats_tx,
+ .get_base_stats = bnxt_get_base_stats,
+};
+
static void bnxt_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -14669,6 +14992,7 @@ void bnxt_print_device_info(struct bnxt *bp)
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ struct bnxt_hw_resc *hw_resc;
struct net_device *dev;
struct bnxt *bp;
int rc, max_irqs;
@@ -14710,6 +15034,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_err_free;
dev->netdev_ops = &bnxt_netdev_ops;
+ dev->stat_ops = &bnxt_stat_ops;
dev->watchdog_timeo = BNXT_TX_TIMEOUT;
dev->ethtool_ops = &bnxt_ethtool_ops;
pci_set_drvdata(pdev, dev);
@@ -14827,6 +15152,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ hw_resc = &bp->hw_resc;
+ bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
+ BNXT_L2_FLTR_MAX_FLTR;
+ /* Older firmware may not report these filters properly */
+ if (bp->max_fltr < BNXT_MAX_FLTR)
+ bp->max_fltr = BNXT_MAX_FLTR;
bnxt_init_l2_fltr_tbl(bp);
bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
@@ -14879,6 +15210,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_dl;
+ INIT_LIST_HEAD(&bp->usr_fltr_list);
+
rc = register_netdev(dev);
if (rc)
goto init_err_cleanup;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 47338b48ca20..dd849e715c9b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1213,6 +1213,9 @@ struct bnxt_ring_grp_info {
u16 cp_fw_ring_id;
};
+#define BNXT_VNIC_DEFAULT 0
+#define BNXT_VNIC_NTUPLE 1
+
struct bnxt_vnic_info {
u16 fw_vnic_id; /* returned by Chimp during alloc */
#define BNXT_MAX_CTX_PER_VNIC 8
@@ -1252,11 +1255,24 @@ struct bnxt_vnic_info {
#define BNXT_VNIC_MCAST_FLAG 4
#define BNXT_VNIC_UCAST_FLAG 8
#define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10
+#define BNXT_VNIC_NTUPLE_FLAG 0x20
+};
+
+struct bnxt_hw_rings {
+ int tx;
+ int rx;
+ int grp;
+ int cp;
+ int cp_p5;
+ int stat;
+ int vnic;
+ int rss_ctx;
};
struct bnxt_hw_resc {
u16 min_rsscos_ctxs;
u16 max_rsscos_ctxs;
+ u16 resv_rsscos_ctxs;
u16 min_cp_rings;
u16 max_cp_rings;
u16 resv_cp_rings;
@@ -1281,6 +1297,12 @@ struct bnxt_hw_resc {
u16 max_nqs;
u16 max_irqs;
u16 resv_irqs;
+ u32 max_encap_records;
+ u32 max_decap_records;
+ u32 max_tx_em_flows;
+ u32 max_tx_wm_flows;
+ u32 max_rx_em_flows;
+ u32 max_rx_wm_flows;
};
#if defined(CONFIG_BNXT_SRIOV)
@@ -1315,12 +1337,6 @@ struct bnxt_pf_info {
u16 active_vfs;
u16 registered_vfs;
u16 max_vfs;
- u32 max_encap_records;
- u32 max_decap_records;
- u32 max_tx_em_flows;
- u32 max_tx_wm_flows;
- u32 max_rx_em_flows;
- u32 max_rx_wm_flows;
unsigned long *vf_event_bmap;
u16 hwrm_cmd_req_pages;
u8 vf_resv_strategy;
@@ -1334,6 +1350,7 @@ struct bnxt_pf_info {
struct bnxt_filter_base {
struct hlist_node hash;
+ struct list_head list;
__le64 filter_id;
u8 type;
#define BNXT_FLTR_TYPE_NTUPLE 1
@@ -1355,19 +1372,21 @@ struct bnxt_filter_base {
struct rcu_head rcu;
};
+struct bnxt_flow_masks {
+ struct flow_dissector_key_ports ports;
+ struct flow_dissector_key_addrs addrs;
+};
+
+extern const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE;
+extern const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL;
+extern const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL;
+
struct bnxt_ntuple_filter {
+ /* base filter must be the first member */
struct bnxt_filter_base base;
struct flow_keys fkeys;
+ struct bnxt_flow_masks fmasks;
struct bnxt_l2_filter *l2_fltr;
- u32 ntuple_flags;
-#define BNXT_NTUPLE_MATCH_SRC_IP 1
-#define BNXT_NTUPLE_MATCH_DST_IP 2
-#define BNXT_NTUPLE_MATCH_SRC_PORT 4
-#define BNXT_NTUPLE_MATCH_DST_PORT 8
-#define BNXT_NTUPLE_MATCH_ALL (BNXT_NTUPLE_MATCH_SRC_IP | \
- BNXT_NTUPLE_MATCH_DST_IP | \
- BNXT_NTUPLE_MATCH_SRC_PORT | \
- BNXT_NTUPLE_MATCH_DST_PORT)
u32 flow_id;
};
@@ -1394,6 +1413,7 @@ struct bnxt_ipv6_tuple {
#define BNXT_L2_KEY_SIZE (sizeof(struct bnxt_l2_key) / 4)
struct bnxt_l2_filter {
+ /* base filter must be the first member */
struct bnxt_filter_base base;
struct bnxt_l2_key l2_key;
atomic_t refcnt;
@@ -2217,6 +2237,14 @@ struct bnxt {
#define BNXT_RSS_CAP_UDP_RSS_CAP BIT(1)
#define BNXT_RSS_CAP_NEW_RSS_CAP BIT(2)
#define BNXT_RSS_CAP_RSS_TCAM BIT(3)
+#define BNXT_RSS_CAP_AH_V4_RSS_CAP BIT(4)
+#define BNXT_RSS_CAP_AH_V6_RSS_CAP BIT(5)
+#define BNXT_RSS_CAP_ESP_V4_RSS_CAP BIT(6)
+#define BNXT_RSS_CAP_ESP_V6_RSS_CAP BIT(7)
+
+ u8 rss_hash_key[HW_HASH_KEY_SIZE];
+ u8 rss_hash_key_valid:1;
+ u8 rss_hash_key_updated:1;
u16 max_mtu;
u8 max_tc;
@@ -2301,12 +2329,17 @@ struct bnxt {
#define BNXT_FW_CAP_PRE_RESV_VNICS BIT_ULL(35)
#define BNXT_FW_CAP_BACKING_STORE_V2 BIT_ULL(36)
#define BNXT_FW_CAP_VNIC_TUNNEL_TPA BIT_ULL(37)
+ #define BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO BIT_ULL(38)
+ #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(39)
u32 fw_dbg_cap;
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
#define BNXT_PTP_USE_RTC(bp) (!BNXT_MH(bp) && \
((bp)->fw_cap & BNXT_FW_CAP_PTP_RTC))
+#define BNXT_SUPPORTS_NTUPLE_VNIC(bp) \
+ (BNXT_PF(bp) && ((bp)->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3))
+
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
u16 hwrm_cmd_kong_seq;
@@ -2428,6 +2461,7 @@ struct bnxt {
unsigned long *ntp_fltr_bmap;
int ntp_fltr_count;
+ int max_fltr;
#define BNXT_L2_FLTR_MAX_FLTR 1024
#define BNXT_L2_FLTR_HASH_SIZE 32
@@ -2437,12 +2471,14 @@ struct bnxt {
u32 hash_seed;
u64 toeplitz_prefix;
+ struct list_head usr_fltr_list;
+
/* To protect link related settings during link changes and
* ethtool settings changes.
*/
struct mutex link_lock;
struct bnxt_link_info link_info;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
u32 lpi_tmr_lo;
u32 lpi_tmr_hi;
@@ -2641,10 +2677,16 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx);
void bnxt_set_tpa_flags(struct bnxt *bp);
void bnxt_set_ring_params(struct bnxt *);
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
+void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
+void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
+void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
int bmap_size, bool async_only);
int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp);
void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr);
+struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
+ struct bnxt_l2_key *key,
+ u16 flags);
int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr);
int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr);
int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index dc4ca706b0e2..1d240a27455a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -968,6 +968,7 @@ static int bnxt_set_channels(struct net_device *dev,
return -EINVAL;
}
+ bnxt_clear_usr_fltrs(bp, true);
if (netif_running(dev)) {
if (BNXT_PF(bp)) {
/* TODO CHIMP_FW: Send message to all VF's
@@ -1058,11 +1059,17 @@ static struct bnxt_filter_base *bnxt_get_one_fltr_rcu(struct bnxt *bp,
static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
+ u32 count;
+
cmd->data = bp->ntp_fltr_count;
rcu_read_lock();
+ count = bnxt_get_all_fltr_ids_rcu(bp, bp->l2_fltr_hash_tbl,
+ BNXT_L2_FLTR_HASH_SIZE, rule_locs, 0,
+ cmd->rule_cnt);
cmd->rule_cnt = bnxt_get_all_fltr_ids_rcu(bp, bp->ntp_fltr_hash_tbl,
BNXT_NTP_FLTR_HASH_SIZE,
- rule_locs, 0, cmd->rule_cnt);
+ rule_locs, count,
+ cmd->rule_cnt);
rcu_read_unlock();
return 0;
@@ -1074,13 +1081,44 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
(struct ethtool_rx_flow_spec *)&cmd->fs;
struct bnxt_filter_base *fltr_base;
struct bnxt_ntuple_filter *fltr;
+ struct bnxt_flow_masks *fmasks;
struct flow_keys *fkeys;
int rc = -EINVAL;
- if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
+ if (fs->location >= bp->max_fltr)
return rc;
rcu_read_lock();
+ fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl,
+ BNXT_L2_FLTR_HASH_SIZE,
+ fs->location);
+ if (fltr_base) {
+ struct ethhdr *h_ether = &fs->h_u.ether_spec;
+ struct ethhdr *m_ether = &fs->m_u.ether_spec;
+ struct bnxt_l2_filter *l2_fltr;
+ struct bnxt_l2_key *l2_key;
+
+ l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base);
+ l2_key = &l2_fltr->l2_key;
+ fs->flow_type = ETHER_FLOW;
+ ether_addr_copy(h_ether->h_dest, l2_key->dst_mac_addr);
+ eth_broadcast_addr(m_ether->h_dest);
+ if (l2_key->vlan) {
+ struct ethtool_flow_ext *m_ext = &fs->m_ext;
+ struct ethtool_flow_ext *h_ext = &fs->h_ext;
+
+ fs->flow_type |= FLOW_EXT;
+ m_ext->vlan_tci = htons(0xfff);
+ h_ext->vlan_tci = htons(l2_key->vlan);
+ }
+ if (fltr_base->flags & BNXT_ACT_RING_DST)
+ fs->ring_cookie = fltr_base->rxq;
+ if (fltr_base->flags & BNXT_ACT_FUNC_DST)
+ fs->ring_cookie = (u64)(fltr_base->vf_idx + 1) <<
+ ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+ rcu_read_unlock();
+ return 0;
+ }
fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
BNXT_NTP_FLTR_HASH_SIZE,
fs->location);
@@ -1091,59 +1129,74 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
fkeys = &fltr->fkeys;
+ fmasks = &fltr->fmasks;
if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
- if (fkeys->basic.ip_proto == IPPROTO_TCP)
+ if (fkeys->basic.ip_proto == IPPROTO_ICMP ||
+ fkeys->basic.ip_proto == IPPROTO_RAW) {
+ fs->flow_type = IP_USER_FLOW;
+ fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ if (fkeys->basic.ip_proto == IPPROTO_ICMP)
+ fs->h_u.usr_ip4_spec.proto = IPPROTO_ICMP;
+ else
+ fs->h_u.usr_ip4_spec.proto = IPPROTO_RAW;
+ fs->m_u.usr_ip4_spec.proto = BNXT_IP_PROTO_FULL_MASK;
+ } else if (fkeys->basic.ip_proto == IPPROTO_TCP) {
fs->flow_type = TCP_V4_FLOW;
- else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+ } else if (fkeys->basic.ip_proto == IPPROTO_UDP) {
fs->flow_type = UDP_V4_FLOW;
- else
+ } else {
goto fltr_err;
-
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
- fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
- fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
- fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
- fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
}
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
+
+ fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
+ fs->m_u.tcp_ip4_spec.ip4src = fmasks->addrs.v4addrs.src;
+ fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
+ fs->m_u.tcp_ip4_spec.ip4dst = fmasks->addrs.v4addrs.dst;
+ if (fs->flow_type == TCP_V4_FLOW ||
+ fs->flow_type == UDP_V4_FLOW) {
fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
- fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
+ fs->m_u.tcp_ip4_spec.psrc = fmasks->ports.src;
fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
- fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+ fs->m_u.tcp_ip4_spec.pdst = fmasks->ports.dst;
}
} else {
- if (fkeys->basic.ip_proto == IPPROTO_TCP)
+ if (fkeys->basic.ip_proto == IPPROTO_ICMPV6 ||
+ fkeys->basic.ip_proto == IPPROTO_RAW) {
+ fs->flow_type = IPV6_USER_FLOW;
+ if (fkeys->basic.ip_proto == IPPROTO_ICMPV6)
+ fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6;
+ else
+ fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_RAW;
+ fs->m_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_FULL_MASK;
+ } else if (fkeys->basic.ip_proto == IPPROTO_TCP) {
fs->flow_type = TCP_V6_FLOW;
- else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+ } else if (fkeys->basic.ip_proto == IPPROTO_UDP) {
fs->flow_type = UDP_V6_FLOW;
- else
+ } else {
goto fltr_err;
-
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
- *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
- fkeys->addrs.v6addrs.src;
- bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6src);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
- *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
- fkeys->addrs.v6addrs.dst;
- bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6dst);
}
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
+
+ *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
+ fkeys->addrs.v6addrs.src;
+ *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6src[0] =
+ fmasks->addrs.v6addrs.src;
+ *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
+ fkeys->addrs.v6addrs.dst;
+ *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6dst[0] =
+ fmasks->addrs.v6addrs.dst;
+ if (fs->flow_type == TCP_V6_FLOW ||
+ fs->flow_type == UDP_V6_FLOW) {
fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
- fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
+ fs->m_u.tcp_ip6_spec.psrc = fmasks->ports.src;
fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
- fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
+ fs->m_u.tcp_ip6_spec.pdst = fmasks->ports.dst;
}
}
- fs->ring_cookie = fltr->base.rxq;
+ if (fltr->base.flags & BNXT_ACT_DROP)
+ fs->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ fs->ring_cookie = fltr->base.rxq;
rc = 0;
fltr_err:
@@ -1152,17 +1205,78 @@ fltr_err:
return rc;
}
-#define IPV4_ALL_MASK ((__force __be32)~0)
-#define L4_PORT_ALL_MASK ((__force __be16)~0)
+static int bnxt_add_l2_cls_rule(struct bnxt *bp,
+ struct ethtool_rx_flow_spec *fs)
+{
+ u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ struct ethhdr *h_ether = &fs->h_u.ether_spec;
+ struct ethhdr *m_ether = &fs->m_u.ether_spec;
+ struct bnxt_l2_filter *fltr;
+ struct bnxt_l2_key key;
+ u16 vnic_id;
+ u8 flags;
+ int rc;
+
+ if (BNXT_CHIP_P5_PLUS(bp))
+ return -EOPNOTSUPP;
-static bool ipv6_mask_is_full(__be32 mask[4])
+ if (!is_broadcast_ether_addr(m_ether->h_dest))
+ return -EINVAL;
+ ether_addr_copy(key.dst_mac_addr, h_ether->h_dest);
+ key.vlan = 0;
+ if (fs->flow_type & FLOW_EXT) {
+ struct ethtool_flow_ext *m_ext = &fs->m_ext;
+ struct ethtool_flow_ext *h_ext = &fs->h_ext;
+
+ if (m_ext->vlan_tci != htons(0xfff) || !h_ext->vlan_tci)
+ return -EINVAL;
+ key.vlan = ntohs(h_ext->vlan_tci);
+ }
+
+ if (vf) {
+ flags = BNXT_ACT_FUNC_DST;
+ vnic_id = 0xffff;
+ vf--;
+ } else {
+ flags = BNXT_ACT_RING_DST;
+ vnic_id = bp->vnic_info[ring + 1].fw_vnic_id;
+ }
+ fltr = bnxt_alloc_new_l2_filter(bp, &key, flags);
+ if (IS_ERR(fltr))
+ return PTR_ERR(fltr);
+
+ fltr->base.fw_vnic_id = vnic_id;
+ fltr->base.rxq = ring;
+ fltr->base.vf_idx = vf;
+ rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
+ if (rc)
+ bnxt_del_l2_filter(bp, fltr);
+ else
+ fs->location = fltr->base.sw_id;
+ return rc;
+}
+
+static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec *ip_spec,
+ struct ethtool_usrip4_spec *ip_mask)
{
- return (mask[0] & mask[1] & mask[2] & mask[3]) == IPV4_ALL_MASK;
+ if (ip_mask->l4_4_bytes || ip_mask->tos ||
+ ip_spec->ip_ver != ETH_RX_NFC_IP4 ||
+ ip_mask->proto != BNXT_IP_PROTO_FULL_MASK ||
+ (ip_spec->proto != IPPROTO_RAW && ip_spec->proto != IPPROTO_ICMP))
+ return false;
+ return true;
}
-static bool ipv6_mask_is_zero(__be32 mask[4])
+static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec,
+ struct ethtool_usrip6_spec *ip_mask)
{
- return !(mask[0] | mask[1] | mask[2] | mask[3]);
+ if (ip_mask->l4_4_bytes || ip_mask->tclass ||
+ ip_mask->l4_proto != BNXT_IP_PROTO_FULL_MASK ||
+ (ip_spec->l4_proto != IPPROTO_RAW &&
+ ip_spec->l4_proto != IPPROTO_ICMPV6))
+ return false;
+ return true;
}
static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
@@ -1172,6 +1286,7 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
struct bnxt_ntuple_filter *new_fltr, *fltr;
struct bnxt_l2_filter *l2_fltr;
+ struct bnxt_flow_masks *fmasks;
u32 flow_type = fs->flow_type;
struct flow_keys *fkeys;
u32 idx;
@@ -1183,17 +1298,42 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
if ((flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
return -EOPNOTSUPP;
+ if (flow_type == IP_USER_FLOW) {
+ if (!bnxt_verify_ntuple_ip4_flow(&fs->h_u.usr_ip4_spec,
+ &fs->m_u.usr_ip4_spec))
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_type == IPV6_USER_FLOW) {
+ if (!bnxt_verify_ntuple_ip6_flow(&fs->h_u.usr_ip6_spec,
+ &fs->m_u.usr_ip6_spec))
+ return -EOPNOTSUPP;
+ }
+
new_fltr = kzalloc(sizeof(*new_fltr), GFP_KERNEL);
if (!new_fltr)
return -ENOMEM;
- l2_fltr = bp->vnic_info[0].l2_filters[0];
+ l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
atomic_inc(&l2_fltr->refcnt);
new_fltr->l2_fltr = l2_fltr;
+ fmasks = &new_fltr->fmasks;
fkeys = &new_fltr->fkeys;
rc = -EOPNOTSUPP;
switch (flow_type) {
+ case IP_USER_FLOW: {
+ struct ethtool_usrip4_spec *ip_spec = &fs->h_u.usr_ip4_spec;
+ struct ethtool_usrip4_spec *ip_mask = &fs->m_u.usr_ip4_spec;
+
+ fkeys->basic.ip_proto = ip_spec->proto;
+ fkeys->basic.n_proto = htons(ETH_P_IP);
+ fkeys->addrs.v4addrs.src = ip_spec->ip4src;
+ fmasks->addrs.v4addrs.src = ip_mask->ip4src;
+ fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
+ fmasks->addrs.v4addrs.dst = ip_mask->ip4dst;
+ break;
+ }
case TCP_V4_FLOW:
case UDP_V4_FLOW: {
struct ethtool_tcpip4_spec *ip_spec = &fs->h_u.tcp_ip4_spec;
@@ -1203,32 +1343,26 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
if (flow_type == UDP_V4_FLOW)
fkeys->basic.ip_proto = IPPROTO_UDP;
fkeys->basic.n_proto = htons(ETH_P_IP);
+ fkeys->addrs.v4addrs.src = ip_spec->ip4src;
+ fmasks->addrs.v4addrs.src = ip_mask->ip4src;
+ fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
+ fmasks->addrs.v4addrs.dst = ip_mask->ip4dst;
+ fkeys->ports.src = ip_spec->psrc;
+ fmasks->ports.src = ip_mask->psrc;
+ fkeys->ports.dst = ip_spec->pdst;
+ fmasks->ports.dst = ip_mask->pdst;
+ break;
+ }
+ case IPV6_USER_FLOW: {
+ struct ethtool_usrip6_spec *ip_spec = &fs->h_u.usr_ip6_spec;
+ struct ethtool_usrip6_spec *ip_mask = &fs->m_u.usr_ip6_spec;
- if (ip_mask->ip4src == IPV4_ALL_MASK) {
- fkeys->addrs.v4addrs.src = ip_spec->ip4src;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP;
- } else if (ip_mask->ip4src) {
- goto ntuple_err;
- }
- if (ip_mask->ip4dst == IPV4_ALL_MASK) {
- fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
- } else if (ip_mask->ip4dst) {
- goto ntuple_err;
- }
-
- if (ip_mask->psrc == L4_PORT_ALL_MASK) {
- fkeys->ports.src = ip_spec->psrc;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
- } else if (ip_mask->psrc) {
- goto ntuple_err;
- }
- if (ip_mask->pdst == L4_PORT_ALL_MASK) {
- fkeys->ports.dst = ip_spec->pdst;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
- } else if (ip_mask->pdst) {
- goto ntuple_err;
- }
+ fkeys->basic.ip_proto = ip_spec->l4_proto;
+ fkeys->basic.n_proto = htons(ETH_P_IPV6);
+ fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src;
+ fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src;
+ fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst;
+ fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst;
break;
}
case TCP_V6_FLOW:
@@ -1241,40 +1375,21 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
fkeys->basic.ip_proto = IPPROTO_UDP;
fkeys->basic.n_proto = htons(ETH_P_IPV6);
- if (ipv6_mask_is_full(ip_mask->ip6src)) {
- fkeys->addrs.v6addrs.src =
- *(struct in6_addr *)&ip_spec->ip6src;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP;
- } else if (!ipv6_mask_is_zero(ip_mask->ip6src)) {
- goto ntuple_err;
- }
- if (ipv6_mask_is_full(ip_mask->ip6dst)) {
- fkeys->addrs.v6addrs.dst =
- *(struct in6_addr *)&ip_spec->ip6dst;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
- } else if (!ipv6_mask_is_zero(ip_mask->ip6dst)) {
- goto ntuple_err;
- }
-
- if (ip_mask->psrc == L4_PORT_ALL_MASK) {
- fkeys->ports.src = ip_spec->psrc;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
- } else if (ip_mask->psrc) {
- goto ntuple_err;
- }
- if (ip_mask->pdst == L4_PORT_ALL_MASK) {
- fkeys->ports.dst = ip_spec->pdst;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
- } else if (ip_mask->pdst) {
- goto ntuple_err;
- }
+ fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src;
+ fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src;
+ fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst;
+ fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst;
+ fkeys->ports.src = ip_spec->psrc;
+ fmasks->ports.src = ip_mask->psrc;
+ fkeys->ports.dst = ip_spec->pdst;
+ fmasks->ports.dst = ip_mask->pdst;
break;
}
default:
rc = -EOPNOTSUPP;
goto ntuple_err;
}
- if (!new_fltr->ntuple_flags)
+ if (!memcmp(&BNXT_FLOW_MASK_NONE, fmasks, sizeof(*fmasks)))
goto ntuple_err;
idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL);
@@ -1287,8 +1402,11 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
}
rcu_read_unlock();
- new_fltr->base.rxq = ring;
new_fltr->base.flags = BNXT_ACT_NO_AGING;
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC)
+ new_fltr->base.flags |= BNXT_ACT_DROP;
+ else
+ new_fltr->base.rxq = ring;
__set_bit(BNXT_FLTR_VALID, &new_fltr->base.state);
rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
if (!rc) {
@@ -1321,6 +1439,18 @@ static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (fs->location != RX_CLS_LOC_ANY)
return -EINVAL;
+ flow_type = fs->flow_type;
+ if ((flow_type == IP_USER_FLOW ||
+ flow_type == IPV6_USER_FLOW) &&
+ !(bp->fw_cap & BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO))
+ return -EOPNOTSUPP;
+ if (flow_type & (FLOW_MAC_EXT | FLOW_RSS))
+ return -EINVAL;
+ flow_type &= ~FLOW_EXT;
+
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC && flow_type != ETHER_FLOW)
+ return bnxt_add_ntuple_cls_rule(bp, fs);
+
ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
if (BNXT_VF(bp) && vf)
@@ -1330,12 +1460,8 @@ static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (!vf && ring >= bp->rx_nr_rings)
return -EINVAL;
- flow_type = fs->flow_type;
- if (flow_type & (FLOW_MAC_EXT | FLOW_RSS))
- return -EINVAL;
- flow_type &= ~FLOW_EXT;
if (flow_type == ETHER_FLOW)
- rc = -EOPNOTSUPP;
+ rc = bnxt_add_l2_cls_rule(bp, fs);
else
rc = bnxt_add_ntuple_cls_rule(bp, fs);
return rc;
@@ -1346,11 +1472,22 @@ static int bnxt_srxclsrldel(struct bnxt *bp, struct ethtool_rxnfc *cmd)
struct ethtool_rx_flow_spec *fs = &cmd->fs;
struct bnxt_filter_base *fltr_base;
struct bnxt_ntuple_filter *fltr;
+ u32 id = fs->location;
rcu_read_lock();
+ fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl,
+ BNXT_L2_FLTR_HASH_SIZE, id);
+ if (fltr_base) {
+ struct bnxt_l2_filter *l2_fltr;
+
+ l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base);
+ rcu_read_unlock();
+ bnxt_hwrm_l2_filter_free(bp, l2_fltr);
+ bnxt_del_l2_filter(bp, l2_fltr);
+ return 0;
+ }
fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
- BNXT_NTP_FLTR_HASH_SIZE,
- fs->location);
+ BNXT_NTP_FLTR_HASH_SIZE, id);
if (!fltr_base) {
rcu_read_unlock();
return -ENOENT;
@@ -1396,8 +1533,14 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
cmd->data |= RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
fallthrough;
- case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
+ if (bp->rss_hash_cfg &
+ (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4))
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case SCTP_V4_FLOW:
case AH_V4_FLOW:
case ESP_V4_FLOW:
case IPV4_FLOW:
@@ -1415,8 +1558,14 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
cmd->data |= RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
fallthrough;
- case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
+ if (bp->rss_hash_cfg &
+ (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6))
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case SCTP_V6_FLOW:
case AH_V6_FLOW:
case ESP_V6_FLOW:
case IPV6_FLOW:
@@ -1463,6 +1612,24 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
if (tuple == 4)
rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
+ } else if (cmd->flow_type == AH_ESP_V4_FLOW) {
+ if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V4_RSS_CAP) ||
+ !(bp->rss_cap & BNXT_RSS_CAP_ESP_V4_RSS_CAP)))
+ return -EINVAL;
+ rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4);
+ if (tuple == 4)
+ rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4;
+ } else if (cmd->flow_type == AH_ESP_V6_FLOW) {
+ if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V6_RSS_CAP) ||
+ !(bp->rss_cap & BNXT_RSS_CAP_ESP_V6_RSS_CAP)))
+ return -EINVAL;
+ rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6);
+ if (tuple == 4)
+ rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6;
} else if (tuple == 4) {
return -EINVAL;
}
@@ -1521,7 +1688,7 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = bp->ntp_fltr_count;
- cmd->data = BNXT_NTP_FLTR_MAX_FLTR | RX_CLS_LOC_SPECIAL;
+ cmd->data = bp->max_fltr | RX_CLS_LOC_SPECIAL;
break;
case ETHTOOL_GRXCLSRLALL:
@@ -1596,7 +1763,7 @@ static int bnxt_get_rxfh(struct net_device *dev,
if (!bp->vnic_info)
return 0;
- vnic = &bp->vnic_info[0];
+ vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
if (rxfh->indir && bp->rss_indir_tbl) {
tbl_size = bnxt_get_rxfh_indir_size(dev);
for (i = 0; i < tbl_size; i++)
@@ -1619,8 +1786,10 @@ static int bnxt_set_rxfh(struct net_device *dev,
if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (rxfh->key)
- return -EOPNOTSUPP;
+ if (rxfh->key) {
+ memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE);
+ bp->rss_hash_key_updated = true;
+ }
if (rxfh->indir) {
u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
@@ -1631,7 +1800,7 @@ static int bnxt_set_rxfh(struct net_device *dev,
if (pad)
memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
}
-
+ bnxt_clear_usr_fltrs(bp, false);
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
rc = bnxt_open_nic(bp, false, false);
@@ -1751,31 +1920,21 @@ static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return 0;
}
-u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
+/* TODO: support 25GB, 40GB, 50GB with different cable type */
+void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds)
{
- u32 speed_mask = 0;
+ linkmode_zero(mode);
- /* TODO: support 25GB, 40GB, 50GB with different cable type */
- /* set the advertised speeds */
if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
- speed_mask |= ADVERTISED_100baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode);
if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
- speed_mask |= ADVERTISED_1000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode);
if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
- speed_mask |= ADVERTISED_2500baseX_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, mode);
if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
- speed_mask |= ADVERTISED_10000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode);
if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
- speed_mask |= ADVERTISED_40000baseCR4_Full;
-
- if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
- speed_mask |= ADVERTISED_Pause;
- else if (fw_pause & BNXT_LINK_PAUSE_TX)
- speed_mask |= ADVERTISED_Asym_Pause;
- else if (fw_pause & BNXT_LINK_PAUSE_RX)
- speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
-
- return speed_mask;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode);
}
enum bnxt_media_type {
@@ -2643,23 +2802,22 @@ bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
return 0;
}
-u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
+u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode)
{
u16 fw_speed_mask = 0;
- /* only support autoneg at speed 100, 1000, and 10000 */
- if (advertising & (ADVERTISED_100baseT_Full |
- ADVERTISED_100baseT_Half)) {
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mode))
fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
- }
- if (advertising & (ADVERTISED_1000baseT_Full |
- ADVERTISED_1000baseT_Half)) {
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mode))
fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
- }
- if (advertising & ADVERTISED_10000baseT_Full)
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode))
fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
- if (advertising & ADVERTISED_40000baseCR4_Full)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode))
fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
return fw_speed_mask;
@@ -3884,12 +4042,13 @@ static int bnxt_set_eeprom(struct net_device *dev,
eeprom->len);
}
-static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int bnxt_set_eee(struct net_device *dev, struct ethtool_keee *edata)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
struct bnxt *bp = netdev_priv(dev);
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
struct bnxt_link_info *link_info = &bp->link_info;
- u32 advertising;
int rc = 0;
if (!BNXT_PHY_CFG_ABLE(bp))
@@ -3899,7 +4058,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
mutex_lock(&bp->link_lock);
- advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+ _bnxt_fw_to_linkmode(advertising, link_info->advertising);
if (!edata->eee_enabled)
goto eee_ok;
@@ -3919,16 +4078,15 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
edata->tx_lpi_timer = eee->tx_lpi_timer;
}
}
- if (!edata->advertised) {
- edata->advertised = advertising & eee->supported;
- } else if (edata->advertised & ~advertising) {
- netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
- edata->advertised, advertising);
+ if (linkmode_empty(edata->advertised)) {
+ linkmode_and(edata->advertised, advertising, eee->supported);
+ } else if (linkmode_andnot(tmp, edata->advertised, advertising)) {
+ netdev_warn(dev, "EEE advertised must be a subset of autoneg advertised speeds\n");
rc = -EINVAL;
goto eee_exit;
}
- eee->advertised = edata->advertised;
+ linkmode_copy(eee->advertised, edata->advertised);
eee->tx_lpi_enabled = edata->tx_lpi_enabled;
eee->tx_lpi_timer = edata->tx_lpi_timer;
eee_ok:
@@ -3942,7 +4100,7 @@ eee_exit:
return rc;
}
-static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct bnxt *bp = netdev_priv(dev);
@@ -3954,12 +4112,12 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
/* Preserve tx_lpi_timer so that the last value will be used
* by default when it is re-enabled.
*/
- edata->advertised = 0;
+ linkmode_zero(edata->advertised);
edata->tx_lpi_enabled = 0;
}
if (!bp->eee.eee_active)
- edata->lp_advertised = 0;
+ linkmode_zero(edata->lp_advertised);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index a8ecef8ab82c..e2ee030237d4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -43,12 +43,14 @@ struct bnxt_led_cfg {
#define BNXT_PXP_REG_LEN 0x3110
+#define BNXT_IP_PROTO_FULL_MASK 0xFF
+
extern const struct ethtool_ops bnxt_ethtool_ops;
u32 bnxt_get_rxfh_indir_size(struct net_device *dev);
-u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
+void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds);
u32 bnxt_fw_to_ethtool_speed(u16);
-u16 bnxt_get_fw_auto_link_speeds(u32);
+u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode);
int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
struct hwrm_nvm_get_dev_info_output *nvm_dev_info);
int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 2d7ae71287b1..7396e2823e32 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1313,14 +1313,13 @@ void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
}
priv->eee.eee_enabled = enable;
- priv->eee.eee_active = enable;
priv->eee.tx_lpi_enabled = tx_lpi_enabled;
}
-static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
+static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct ethtool_eee *p = &priv->eee;
+ struct ethtool_keee *p = &priv->eee;
if (GENET_IS_V1(priv))
return -EOPNOTSUPP;
@@ -1328,18 +1327,17 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
if (!dev->phydev)
return -ENODEV;
- e->eee_enabled = p->eee_enabled;
- e->eee_active = p->eee_active;
e->tx_lpi_enabled = p->tx_lpi_enabled;
e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
return phy_ethtool_get_eee(dev->phydev, e);
}
-static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
+static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct ethtool_eee *p = &priv->eee;
+ struct ethtool_keee *p = &priv->eee;
+ bool active;
if (GENET_IS_V1(priv))
return -EOPNOTSUPP;
@@ -1352,9 +1350,9 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
if (!p->eee_enabled) {
bcmgenet_eee_enable_set(dev, false, false);
} else {
- p->eee_active = phy_init_eee(dev->phydev, false) >= 0;
+ active = phy_init_eee(dev->phydev, false) >= 0;
bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
- bcmgenet_eee_enable_set(dev, p->eee_active, e->tx_lpi_enabled);
+ bcmgenet_eee_enable_set(dev, active, e->tx_lpi_enabled);
}
return phy_ethtool_set_eee(dev->phydev, e);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 1985c0ec4da2..7523b60b3c1c 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -645,7 +645,7 @@ struct bcmgenet_priv {
struct bcmgenet_mib_counters mib;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
};
#define GENET_IO_MACRO(name, offset) \
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 97ea76d443ab..9ada89355747 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -30,6 +30,7 @@ static void bcmgenet_mac_config(struct net_device *dev)
struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
u32 reg, cmd_bits = 0;
+ bool active;
/* speed */
if (phydev->speed == SPEED_1000)
@@ -88,9 +89,9 @@ static void bcmgenet_mac_config(struct net_device *dev)
}
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
- priv->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
+ active = phy_init_eee(phydev, 0) >= 0;
bcmgenet_eee_enable_set(dev,
- priv->eee.eee_enabled && priv->eee.eee_active,
+ priv->eee.eee_enabled && active,
priv->eee.tx_lpi_enabled);
}
@@ -475,6 +476,10 @@ static int bcmgenet_mii_register(struct bcmgenet_priv *priv)
ppd.wait_func = bcmgenet_mii_wait;
ppd.wait_func_data = priv;
ppd.bus_name = "bcmgenet MII bus";
+ /* Pass a reference to our "main" clock which is used for MDIO
+ * transfers
+ */
+ ppd.clk = priv->clk;
/* Unimac MDIO bus controller starts at UniMAC offset + MDIO_CMD
* and is 2 * 32-bits word long, 8 bytes total.
@@ -673,7 +678,5 @@ void bcmgenet_mii_exit(struct net_device *dev)
if (of_phy_is_fixed_link(dn))
of_phy_deregister_fixed_link(dn);
of_node_put(priv->phy_dn);
- clk_prepare_enable(priv->clk);
platform_device_unregister(priv->mii_pdev);
- clk_disable_unprepare(priv->clk);
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 04964bbe08cf..eee759054aad 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -2338,10 +2338,10 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
tg3_phy_toggle_auxctl_smdsp(tp, false);
}
-static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
+static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_keee *eee)
{
u32 val;
- struct ethtool_eee *dest = &tp->eee;
+ struct ethtool_keee *dest = &tp->eee;
if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
return;
@@ -2362,13 +2362,13 @@ static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
/* Pull lp advertised settings */
if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
return;
- dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(dest->lp_advertised, val);
/* Pull advertised and eee_enabled settings */
if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
return;
dest->eee_enabled = !!val;
- dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(dest->advertised, val);
/* Pull tx_lpi_enabled */
val = tr32(TG3_CPMU_EEE_MODE);
@@ -4354,23 +4354,12 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
if (!err) {
u32 err2;
- val = 0;
- /* Advertise 100-BaseTX EEE ability */
- if (advertise & ADVERTISED_100baseT_Full)
- val |= MDIO_AN_EEE_ADV_100TX;
- /* Advertise 1000-BaseT EEE ability */
- if (advertise & ADVERTISED_1000baseT_Full)
- val |= MDIO_AN_EEE_ADV_1000T;
-
- if (!tp->eee.eee_enabled) {
+ if (!tp->eee.eee_enabled)
val = 0;
- tp->eee.advertised = 0;
- } else {
- tp->eee.advertised = advertise &
- (ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Full);
- }
+ else
+ val = ethtool_adv_to_mmd_eee_adv_t(advertise);
+ mii_eee_cap1_mod_linkmode_t(tp->eee.advertised, val);
err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
if (err)
val = 0;
@@ -4618,7 +4607,7 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
static bool tg3_phy_eee_config_ok(struct tg3 *tp)
{
- struct ethtool_eee eee;
+ struct ethtool_keee eee = {};
if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
return true;
@@ -4626,13 +4615,13 @@ static bool tg3_phy_eee_config_ok(struct tg3 *tp)
tg3_eee_pull_config(tp, &eee);
if (tp->eee.eee_enabled) {
- if (tp->eee.advertised != eee.advertised ||
+ if (!linkmode_equal(tp->eee.advertised, eee.advertised) ||
tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
return false;
} else {
/* EEE is disabled but we're advertising */
- if (eee.advertised)
+ if (!linkmode_empty(eee.advertised))
return false;
}
@@ -14180,7 +14169,7 @@ static int tg3_set_coalesce(struct net_device *dev,
return 0;
}
-static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int tg3_set_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct tg3 *tp = netdev_priv(dev);
@@ -14189,7 +14178,7 @@ static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
- if (edata->advertised != tp->eee.advertised) {
+ if (!linkmode_equal(edata->advertised, tp->eee.advertised)) {
netdev_warn(tp->dev,
"Direct manipulation of EEE advertisement is not supported\n");
return -EINVAL;
@@ -14202,7 +14191,9 @@ static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EINVAL;
}
- tp->eee = *edata;
+ tp->eee.eee_enabled = edata->eee_enabled;
+ tp->eee.tx_lpi_enabled = edata->tx_lpi_enabled;
+ tp->eee.tx_lpi_timer = edata->tx_lpi_timer;
tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
tg3_warn_mgmt_link_flap(tp);
@@ -14217,7 +14208,7 @@ static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return 0;
}
-static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int tg3_get_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct tg3 *tp = netdev_priv(dev);
@@ -15655,10 +15646,13 @@ static int tg3_phy_probe(struct tg3 *tp)
tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
- tp->eee.supported = SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full;
- tp->eee.advertised = ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Full;
+ linkmode_zero(tp->eee.supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ tp->eee.supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ tp->eee.supported);
+ linkmode_copy(tp->eee.advertised, tp->eee.supported);
+
tp->eee.eee_enabled = 1;
tp->eee.tx_lpi_enabled = 1;
tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 5016475e5005..cf1b2b123c7e 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -3419,7 +3419,7 @@ struct tg3 {
unsigned int irq_cnt;
struct ethtool_coalesce coal;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
/* firmware info */
const char *fw_needed;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index b5ff2e1a9975..49d5808b7d11 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -804,20 +804,6 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
}
/**
- * calc_tx_descs - calculate the number of Tx descriptors for a packet
- * @skb: the packet
- * @chip_ver: chip version
- *
- * Returns the number of Tx descriptors needed for the given Ethernet
- * packet, including the needed WR and CPL headers.
- */
-static inline unsigned int calc_tx_descs(const struct sk_buff *skb,
- unsigned int chip_ver)
-{
- return flits_to_desc(calc_tx_flits(skb, chip_ver));
-}
-
-/**
* cxgb4_write_sgl - populate a scatter/gather list for a packet
* @skb: the packet
* @q: the Tx queue we are writing into
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 37bd38d772e8..d266a87297a5 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -872,7 +872,7 @@ error:
return NETDEV_TX_OK;
}
-/* dev_base_lock rwlock held, nominally process context */
+/* rcu_read_lock potentially held, nominally process context */
static void enic_get_stats(struct net_device *netdev,
struct rtnl_link_stats64 *net_stats)
{
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index c2c5c589a5e3..44af1d13d931 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -590,5 +590,6 @@ module_pci_driver(pci_driver);
module_param(polling_frequency, long, 0444);
MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
+MODULE_DESCRIPTION("Beckhoff CX5020 EtherCAT Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 64eadd320798..4b15af6b7122 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -229,8 +229,10 @@ static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
* would delay a working loopback anyway, let's ensure that loopback
* is working immediately by setting link mode directly
*/
- if (!retval && enable)
+ if (!retval && enable) {
+ netif_carrier_on(adapter->netdev);
tsnep_set_link_mode(adapter);
+ }
return retval;
}
@@ -238,7 +240,7 @@ static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
static int tsnep_phy_open(struct tsnep_adapter *adapter)
{
struct phy_device *phydev;
- struct ethtool_eee ethtool_eee;
+ struct ethtool_keee ethtool_keee;
int retval;
retval = phy_connect_direct(adapter->netdev, adapter->phydev,
@@ -257,8 +259,8 @@ static int tsnep_phy_open(struct tsnep_adapter *adapter)
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
/* disable EEE autoneg, EEE not supported by TSNEP */
- memset(&ethtool_eee, 0, sizeof(ethtool_eee));
- phy_ethtool_set_eee(adapter->phydev, &ethtool_eee);
+ memset(&ethtool_keee, 0, sizeof(ethtool_keee));
+ phy_ethtool_set_eee(adapter->phydev, &ethtool_keee);
adapter->phydev->irq = PHY_MAC_INTERRUPT;
phy_start(adapter->phydev);
@@ -1266,6 +1268,14 @@ static int tsnep_rx_refill_zc(struct tsnep_rx *rx, int count, bool reuse)
return desc_refilled;
}
+static void tsnep_xsk_rx_need_wakeup(struct tsnep_rx *rx, int desc_available)
+{
+ if (desc_available)
+ xsk_set_rx_need_wakeup(rx->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(rx->xsk_pool);
+}
+
static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog,
struct xdp_buff *xdp, int *status,
struct netdev_queue *tx_nq, struct tsnep_tx *tx)
@@ -1627,10 +1637,7 @@ static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
desc_available -= tsnep_rx_refill_zc(rx, desc_available, false);
if (xsk_uses_need_wakeup(rx->xsk_pool)) {
- if (desc_available)
- xsk_set_rx_need_wakeup(rx->xsk_pool);
- else
- xsk_clear_rx_need_wakeup(rx->xsk_pool);
+ tsnep_xsk_rx_need_wakeup(rx, desc_available);
return done;
}
@@ -1775,14 +1782,8 @@ static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx)
* first polling would be too late as need wakeup signalisation would
* be delayed for an indefinite time
*/
- if (xsk_uses_need_wakeup(rx->xsk_pool)) {
- int desc_available = tsnep_rx_desc_available(rx);
-
- if (desc_available)
- xsk_set_rx_need_wakeup(rx->xsk_pool);
- else
- xsk_clear_rx_need_wakeup(rx->xsk_pool);
- }
+ if (xsk_uses_need_wakeup(rx->xsk_pool))
+ tsnep_xsk_rx_need_wakeup(rx, tsnep_rx_desc_available(rx));
}
static bool tsnep_pending(struct tsnep_queue *queue)
@@ -2570,8 +2571,7 @@ static int tsnep_probe(struct platform_device *pdev)
mutex_init(&adapter->rxnfc_lock);
INIT_LIST_HEAD(&adapter->rxnfc_rules);
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- adapter->addr = devm_ioremap_resource(&pdev->dev, io);
+ adapter->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &io);
if (IS_ERR(adapter->addr))
return PTR_ERR(adapter->addr);
netdev->mem_start = io->start;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index bfdbdab443ae..9f07f4947b63 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -2402,7 +2402,7 @@ static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
static int enetc_phylink_connect(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct ethtool_eee edata;
+ struct ethtool_keee edata;
int err;
if (!priv->phylink) {
@@ -2418,7 +2418,7 @@ static int enetc_phylink_connect(struct net_device *ndev)
}
/* disable EEE autoneg, until ENETC driver supports it */
- memset(&edata, 0, sizeof(struct ethtool_eee));
+ memset(&edata, 0, sizeof(struct ethtool_keee));
phylink_ethtool_set_eee(priv->phylink, &edata);
phylink_start(priv->phylink);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index a8fbcada6b01..a19cb2a786fd 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -672,7 +672,7 @@ struct fec_enet_private {
unsigned int itr_clk_rate;
/* tx lpi eee mode */
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
unsigned int clk_ref_rate;
/* ptp clock period in ns*/
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 432523b2c789..d7693fdf640d 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -85,8 +85,6 @@ static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2};
-/* Pause frame feild and FIFO threshold */
-#define FEC_ENET_FCE (1 << 5)
#define FEC_ENET_RSEM_V 0x84
#define FEC_ENET_RSFL_V 16
#define FEC_ENET_RAEM_V 0x8
@@ -240,8 +238,8 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define PKT_MINBUF_SIZE 64
/* FEC receive acceleration */
-#define FEC_RACC_IPDIS (1 << 1)
-#define FEC_RACC_PRODIS (1 << 2)
+#define FEC_RACC_IPDIS BIT(1)
+#define FEC_RACC_PRODIS BIT(2)
#define FEC_RACC_SHIFT16 BIT(7)
#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
@@ -273,8 +271,23 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_MMFR_TA (2 << 16)
#define FEC_MMFR_DATA(v) (v & 0xffff)
/* FEC ECR bits definition */
-#define FEC_ECR_MAGICEN (1 << 2)
-#define FEC_ECR_SLEEP (1 << 3)
+#define FEC_ECR_RESET BIT(0)
+#define FEC_ECR_ETHEREN BIT(1)
+#define FEC_ECR_MAGICEN BIT(2)
+#define FEC_ECR_SLEEP BIT(3)
+#define FEC_ECR_EN1588 BIT(4)
+#define FEC_ECR_BYTESWP BIT(8)
+/* FEC RCR bits definition */
+#define FEC_RCR_LOOP BIT(0)
+#define FEC_RCR_HALFDPX BIT(1)
+#define FEC_RCR_MII BIT(2)
+#define FEC_RCR_PROMISC BIT(3)
+#define FEC_RCR_BC_REJ BIT(4)
+#define FEC_RCR_FLOWCTL BIT(5)
+#define FEC_RCR_RMII BIT(8)
+#define FEC_RCR_10BASET BIT(9)
+/* TX WMARK bits */
+#define FEC_TXWMRK_STRFWD BIT(8)
#define FEC_MII_TIMEOUT 30000 /* us */
@@ -1062,7 +1075,7 @@ fec_restart(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
u32 temp_mac[2];
u32 rcntl = OPT_FRAME_SIZE | 0x04;
- u32 ecntl = 0x2; /* ETHEREN */
+ u32 ecntl = FEC_ECR_ETHEREN;
/* Whack a reset. We should wait for this.
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
@@ -1137,18 +1150,18 @@ fec_restart(struct net_device *ndev)
fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
rcntl |= (1 << 6);
else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
- rcntl |= (1 << 8);
+ rcntl |= FEC_RCR_RMII;
else
- rcntl &= ~(1 << 8);
+ rcntl &= ~FEC_RCR_RMII;
/* 1G, 100M or 10M */
if (ndev->phydev) {
if (ndev->phydev->speed == SPEED_1000)
ecntl |= (1 << 5);
else if (ndev->phydev->speed == SPEED_100)
- rcntl &= ~(1 << 9);
+ rcntl &= ~FEC_RCR_10BASET;
else
- rcntl |= (1 << 9);
+ rcntl |= FEC_RCR_10BASET;
}
} else {
#ifdef FEC_MIIGSK_ENR
@@ -1181,7 +1194,7 @@ fec_restart(struct net_device *ndev)
if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
ndev->phydev && ndev->phydev->pause)) {
- rcntl |= FEC_ENET_FCE;
+ rcntl |= FEC_RCR_FLOWCTL;
/* set FIFO threshold parameter to reduce overrun */
writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
@@ -1192,7 +1205,7 @@ fec_restart(struct net_device *ndev)
/* OPD */
writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
} else {
- rcntl &= ~FEC_ENET_FCE;
+ rcntl &= ~FEC_RCR_FLOWCTL;
}
#endif /* !defined(CONFIG_M5272) */
@@ -1207,13 +1220,13 @@ fec_restart(struct net_device *ndev)
if (fep->quirks & FEC_QUIRK_ENET_MAC) {
/* enable ENET endian swap */
- ecntl |= (1 << 8);
+ ecntl |= FEC_ECR_BYTESWP;
/* enable ENET store and forward mode */
- writel(1 << 8, fep->hwp + FEC_X_WMRK);
+ writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
}
if (fep->bufdesc_ex)
- ecntl |= (1 << 4);
+ ecntl |= FEC_ECR_EN1588;
if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
fep->rgmii_txc_dly)
@@ -1312,7 +1325,7 @@ static void
fec_stop(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
+ u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & FEC_RCR_RMII;
u32 val;
/* We cannot expect a graceful transmit stop without link !!! */
@@ -1331,7 +1344,7 @@ fec_stop(struct net_device *ndev)
if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
writel(0, fep->hwp + FEC_ECNTRL);
} else {
- writel(1, fep->hwp + FEC_ECNTRL);
+ writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
udelay(10);
}
} else {
@@ -1345,12 +1358,11 @@ fec_stop(struct net_device *ndev)
/* We have to keep ENET enabled to have MII interrupt stay working */
if (fep->quirks & FEC_QUIRK_ENET_MAC &&
!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
- writel(2, fep->hwp + FEC_ECNTRL);
+ writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL);
writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
}
}
-
static void
fec_timeout(struct net_device *ndev, unsigned int txqueue)
{
@@ -2005,6 +2017,37 @@ static int fec_get_mac(struct net_device *ndev)
/*
* Phy section
*/
+
+/* LPI Sleep Ts count base on tx clk (clk_ref).
+ * The lpi sleep cnt value = X us / (cycle_ns).
+ */
+static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ return us * (fep->clk_ref_rate / 1000) / 1000;
+}
+
+static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct ethtool_keee *p = &fep->eee;
+ unsigned int sleep_cycle, wake_cycle;
+
+ if (enable) {
+ sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer);
+ wake_cycle = sleep_cycle;
+ } else {
+ sleep_cycle = 0;
+ wake_cycle = 0;
+ }
+
+ writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP);
+ writel(wake_cycle, fep->hwp + FEC_LPI_WAKE);
+
+ return 0;
+}
+
static void fec_enet_adjust_link(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -2044,6 +2087,8 @@ static void fec_enet_adjust_link(struct net_device *ndev)
netif_tx_unlock_bh(ndev);
napi_enable(&fep->napi);
}
+ if (fep->quirks & FEC_QUIRK_HAS_EEE)
+ fec_enet_eee_mode_set(ndev, phy_dev->enable_tx_lpi);
} else {
if (fep->link) {
netif_stop_queue(ndev);
@@ -2403,6 +2448,9 @@ static int fec_enet_mii_probe(struct net_device *ndev)
else
phy_set_max_speed(phy_dev, 100);
+ if (fep->quirks & FEC_QUIRK_HAS_EEE)
+ phy_support_eee(phy_dev);
+
fep->link = 0;
fep->full_duplex = 0;
@@ -3109,50 +3157,11 @@ static int fec_enet_set_coalesce(struct net_device *ndev,
return 0;
}
-/* LPI Sleep Ts count base on tx clk (clk_ref).
- * The lpi sleep cnt value = X us / (cycle_ns).
- */
-static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
-
- return us * (fep->clk_ref_rate / 1000) / 1000;
-}
-
-static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_eee *p = &fep->eee;
- unsigned int sleep_cycle, wake_cycle;
- int ret = 0;
-
- if (enable) {
- ret = phy_init_eee(ndev->phydev, false);
- if (ret)
- return ret;
-
- sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer);
- wake_cycle = sleep_cycle;
- } else {
- sleep_cycle = 0;
- wake_cycle = 0;
- }
-
- p->tx_lpi_enabled = enable;
- p->eee_enabled = enable;
- p->eee_active = enable;
-
- writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP);
- writel(wake_cycle, fep->hwp + FEC_LPI_WAKE);
-
- return 0;
-}
-
static int
-fec_enet_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_eee *p = &fep->eee;
+ struct ethtool_keee *p = &fep->eee;
if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
return -EOPNOTSUPP;
@@ -3160,20 +3169,16 @@ fec_enet_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
if (!netif_running(ndev))
return -ENETDOWN;
- edata->eee_enabled = p->eee_enabled;
- edata->eee_active = p->eee_active;
edata->tx_lpi_timer = p->tx_lpi_timer;
- edata->tx_lpi_enabled = p->tx_lpi_enabled;
return phy_ethtool_get_eee(ndev->phydev, edata);
}
static int
-fec_enet_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_eee *p = &fep->eee;
- int ret = 0;
+ struct ethtool_keee *p = &fep->eee;
if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
return -EOPNOTSUPP;
@@ -3183,15 +3188,6 @@ fec_enet_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
p->tx_lpi_timer = edata->tx_lpi_timer;
- if (!edata->eee_enabled || !edata->tx_lpi_enabled ||
- !edata->tx_lpi_timer)
- ret = fec_enet_eee_mode_set(ndev, false);
- else
- ret = fec_enet_eee_mode_set(ndev, true);
-
- if (ret)
- return ret;
-
return phy_ethtool_set_eee(ndev->phydev, edata);
}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index e3dfbd7a4236..a811238c018d 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1649,7 +1649,7 @@ static int init_phy(struct net_device *dev)
struct gfar_private *priv = netdev_priv(dev);
phy_interface_t interface = priv->interface;
struct phy_device *phydev;
- struct ethtool_eee edata;
+ struct ethtool_keee edata;
linkmode_set_bit_array(phy_10_100_features_array,
ARRAY_SIZE(phy_10_100_features_array),
@@ -1681,7 +1681,7 @@ static int init_phy(struct net_device *dev)
phy_support_asym_pause(phydev);
/* disable EEE autoneg, EEE not supported by eTSEC */
- memset(&edata, 0, sizeof(struct ethtool_eee));
+ memset(&edata, 0, sizeof(struct ethtool_keee));
phy_ethtool_set_eee(phydev, &edata);
return 0;
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index b80349154604..4814c96d5fe7 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -9,6 +9,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
+#include <linux/ethtool_netlink.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/u64_stats_sync.h>
@@ -51,12 +52,16 @@
#define GVE_DEFAULT_RX_BUFFER_SIZE 2048
+#define GVE_MAX_RX_BUFFER_SIZE 4096
+
#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
#define GVE_XDP_ACTIONS 5
#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
+#define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
+
#define DQO_QPL_DEFAULT_TX_PAGES 512
#define DQO_QPL_DEFAULT_RX_PAGES 2048
@@ -150,6 +155,11 @@ struct gve_rx_compl_queue_dqo {
u32 mask; /* Mask for indices to the size of the ring */
};
+struct gve_header_buf {
+ u8 *data;
+ dma_addr_t addr;
+};
+
/* Stores state for tracking buffers posted to HW */
struct gve_rx_buf_state_dqo {
/* The page posted to HW. */
@@ -252,19 +262,26 @@ struct gve_rx_ring {
/* track number of used buffers */
u16 used_buf_states_cnt;
+
+ /* Address info of the buffers for header-split */
+ struct gve_header_buf hdr_bufs;
} dqo;
};
u64 rbytes; /* free-running bytes received */
+ u64 rx_hsplit_bytes; /* free-running header bytes received */
u64 rpackets; /* free-running packets received */
u32 cnt; /* free-running total number of completed packets */
u32 fill_cnt; /* free-running total number of descs and buffs posted */
u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
+ u64 rx_hsplit_pkt; /* free-running packets with headers split */
u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
u64 rx_copied_pkt; /* free-running total number of copied packets */
u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
+ /* free-running count of unsplit packets due to header buffer overflow or hdr_len is 0 */
+ u64 rx_hsplit_unsplit_pkt;
u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
@@ -622,6 +639,56 @@ struct gve_ptype_lut {
struct gve_ptype ptypes[GVE_NUM_PTYPES];
};
+/* Parameters for allocating queue page lists */
+struct gve_qpls_alloc_cfg {
+ struct gve_qpl_config *qpl_cfg;
+ struct gve_queue_config *tx_cfg;
+ struct gve_queue_config *rx_cfg;
+
+ u16 num_xdp_queues;
+ bool raw_addressing;
+ bool is_gqi;
+
+ /* Allocated resources are returned here */
+ struct gve_queue_page_list *qpls;
+};
+
+/* Parameters for allocating resources for tx queues */
+struct gve_tx_alloc_rings_cfg {
+ struct gve_queue_config *qcfg;
+
+ /* qpls and qpl_cfg must already be allocated */
+ struct gve_queue_page_list *qpls;
+ struct gve_qpl_config *qpl_cfg;
+
+ u16 ring_size;
+ u16 start_idx;
+ u16 num_rings;
+ bool raw_addressing;
+
+ /* Allocated resources are returned here */
+ struct gve_tx_ring *tx;
+};
+
+/* Parameters for allocating resources for rx queues */
+struct gve_rx_alloc_rings_cfg {
+ /* tx config is also needed to determine QPL ids */
+ struct gve_queue_config *qcfg;
+ struct gve_queue_config *qcfg_tx;
+
+ /* qpls and qpl_cfg must already be allocated */
+ struct gve_queue_page_list *qpls;
+ struct gve_qpl_config *qpl_cfg;
+
+ u16 ring_size;
+ u16 packet_buffer_size;
+ bool raw_addressing;
+ bool enable_header_split;
+
+ /* Allocated resources are returned here */
+ struct gve_rx_ring *rx;
+};
+
/* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
* when the entire configure_device_resources command is zeroed out and the
* queue_format is not specified.
@@ -729,13 +796,17 @@ struct gve_priv {
struct gve_ptype_lut *ptype_lut_dqo;
/* Must be a power of two. */
- int data_buffer_size_dqo;
+ u16 data_buffer_size_dqo;
+ u16 max_rx_buffer_size; /* device limit */
enum gve_queue_format queue_format;
/* Interrupt coalescing settings */
u32 tx_coalesce_usecs;
u32 rx_coalesce_usecs;
+
+ u16 header_buf_size; /* device configured, header-split supported if non-zero */
+ bool header_split_enabled; /* True if the header split is enabled by the user */
};
enum gve_service_task_flags_bit {
@@ -917,14 +988,14 @@ static inline bool gve_is_qpl(struct gve_priv *priv)
priv->queue_format == GVE_DQO_QPL_FORMAT;
}
-/* Returns the number of tx queue page lists
- */
-static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
+/* Returns the number of tx queue page lists */
+static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg,
+ int num_xdp_queues,
+ bool is_qpl)
{
- if (!gve_is_qpl(priv))
+ if (!is_qpl)
return 0;
-
- return priv->tx_cfg.num_queues + priv->num_xdp_queues;
+ return tx_cfg->num_queues + num_xdp_queues;
}
/* Returns the number of XDP tx queue page lists
@@ -937,14 +1008,13 @@ static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
return priv->num_xdp_queues;
}
-/* Returns the number of rx queue page lists
- */
-static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
+/* Returns the number of rx queue page lists */
+static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg,
+ bool is_qpl)
{
- if (!gve_is_qpl(priv))
+ if (!is_qpl)
return 0;
-
- return priv->rx_cfg.num_queues;
+ return rx_cfg->num_queues;
}
static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
@@ -957,59 +1027,59 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
return priv->tx_cfg.max_queues + rx_qid;
}
+/* Returns the index into priv->qpls where a certain rx queue's QPL resides */
+static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
+{
+ return tx_cfg->max_queues + rx_qid;
+}
+
static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
{
return gve_tx_qpl_id(priv, 0);
}
-static inline u32 gve_rx_start_qpl_id(struct gve_priv *priv)
+/* Returns the index into priv->qpls where the first rx queue's QPL resides */
+static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
{
- return gve_rx_qpl_id(priv, 0);
+ return gve_get_rx_qpl_id(tx_cfg, 0);
}
-/* Returns a pointer to the next available tx qpl in the list of qpls
- */
+/* Returns a pointer to the next available tx qpl in the list of qpls */
static inline
-struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv, int tx_qid)
+struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_tx_alloc_rings_cfg *cfg,
+ int tx_qid)
{
- int id = gve_tx_qpl_id(priv, tx_qid);
-
/* QPL already in use */
- if (test_bit(id, priv->qpl_cfg.qpl_id_map))
+ if (test_bit(tx_qid, cfg->qpl_cfg->qpl_id_map))
return NULL;
-
- set_bit(id, priv->qpl_cfg.qpl_id_map);
- return &priv->qpls[id];
+ set_bit(tx_qid, cfg->qpl_cfg->qpl_id_map);
+ return &cfg->qpls[tx_qid];
}
-/* Returns a pointer to the next available rx qpl in the list of qpls
- */
+/* Returns a pointer to the next available rx qpl in the list of qpls */
static inline
-struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv, int rx_qid)
+struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_rx_alloc_rings_cfg *cfg,
+ int rx_qid)
{
- int id = gve_rx_qpl_id(priv, rx_qid);
-
+ int id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx_qid);
/* QPL already in use */
- if (test_bit(id, priv->qpl_cfg.qpl_id_map))
+ if (test_bit(id, cfg->qpl_cfg->qpl_id_map))
return NULL;
-
- set_bit(id, priv->qpl_cfg.qpl_id_map);
- return &priv->qpls[id];
+ set_bit(id, cfg->qpl_cfg->qpl_id_map);
+ return &cfg->qpls[id];
}
-/* Unassigns the qpl with the given id
- */
-static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
+/* Unassigns the qpl with the given id */
+static inline void gve_unassign_qpl(struct gve_qpl_config *qpl_cfg, int id)
{
- clear_bit(id, priv->qpl_cfg.qpl_id_map);
+ clear_bit(id, qpl_cfg->qpl_id_map);
}
-/* Returns the correct dma direction for tx and rx qpls
- */
+/* Returns the correct dma direction for tx and rx qpls */
static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
int id)
{
- if (id < gve_rx_start_qpl_id(priv))
+ if (id < gve_rx_start_qpl_id(&priv->tx_cfg))
return DMA_TO_DEVICE;
else
return DMA_FROM_DEVICE;
@@ -1036,6 +1106,9 @@ static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
return gve_xdp_tx_queue_id(priv, 0);
}
+/* gqi napi handler defined in gve_main.c */
+int gve_napi_poll(struct napi_struct *napi, int budget);
+
/* buffers */
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
@@ -1051,8 +1124,12 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
bool gve_tx_poll(struct gve_notify_block *block, int budget);
bool gve_xdp_poll(struct gve_notify_block *block, int budget);
-int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings);
-void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings);
+int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg);
+void gve_tx_free_rings_gqi(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg);
+void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx);
+void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx);
u32 gve_tx_load_event_counter(struct gve_priv *priv,
struct gve_tx_ring *tx);
bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
@@ -1061,7 +1138,15 @@ void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
int gve_rx_poll(struct gve_notify_block *block, int budget);
bool gve_rx_work_pending(struct gve_rx_ring *rx);
int gve_rx_alloc_rings(struct gve_priv *priv);
-void gve_rx_free_rings_gqi(struct gve_priv *priv);
+int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg);
+void gve_rx_free_rings_gqi(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg);
+void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
+void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
+u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit);
+bool gve_header_split_supported(const struct gve_priv *priv);
+int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
/* Reset */
void gve_schedule_reset(struct gve_priv *priv);
int gve_reset(struct gve_priv *priv, bool attempt_teardown);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 12fbd723ecc6..ae12ac38e18b 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -40,7 +40,8 @@ void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
struct gve_device_option_dqo_rda **dev_op_dqo_rda,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
- struct gve_device_option_dqo_qpl **dev_op_dqo_qpl)
+ struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
+ struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
{
u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
u16 option_length = be16_to_cpu(option->option_length);
@@ -147,6 +148,23 @@ void gve_parse_device_option(struct gve_priv *priv,
}
*dev_op_jumbo_frames = (void *)(option + 1);
break;
+ case GVE_DEV_OPT_ID_BUFFER_SIZES:
+ if (option_length < sizeof(**dev_op_buffer_sizes) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "Buffer Sizes",
+ (int)sizeof(**dev_op_buffer_sizes),
+ GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_buffer_sizes))
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT,
+ "Buffer Sizes");
+ *dev_op_buffer_sizes = (void *)(option + 1);
+ break;
default:
/* If we don't recognize the option just continue
* without doing anything.
@@ -164,7 +182,8 @@ gve_process_device_options(struct gve_priv *priv,
struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
struct gve_device_option_dqo_rda **dev_op_dqo_rda,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
- struct gve_device_option_dqo_qpl **dev_op_dqo_qpl)
+ struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
+ struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
{
const int num_options = be16_to_cpu(descriptor->num_device_options);
struct gve_device_option *dev_opt;
@@ -185,7 +204,7 @@ gve_process_device_options(struct gve_priv *priv,
gve_parse_device_option(priv, descriptor, dev_opt,
dev_op_gqi_rda, dev_op_gqi_qpl,
dev_op_dqo_rda, dev_op_jumbo_frames,
- dev_op_dqo_qpl);
+ dev_op_dqo_qpl, dev_op_buffer_sizes);
dev_opt = next_opt;
}
@@ -640,6 +659,9 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
cpu_to_be16(rx_buff_ring_entries);
cmd.create_rx_queue.enable_rsc =
!!(priv->dev->features & NETIF_F_LRO);
+ if (priv->header_split_enabled)
+ cmd.create_rx_queue.header_buffer_size =
+ cpu_to_be16(priv->header_buf_size);
}
return gve_adminq_issue_cmd(priv, &cmd);
@@ -755,7 +777,9 @@ static void gve_enable_supported_features(struct gve_priv *priv,
const struct gve_device_option_jumbo_frames
*dev_op_jumbo_frames,
const struct gve_device_option_dqo_qpl
- *dev_op_dqo_qpl)
+ *dev_op_dqo_qpl,
+ const struct gve_device_option_buffer_sizes
+ *dev_op_buffer_sizes)
{
/* Before control reaches this point, the page-size-capped max MTU from
* the gve_device_descriptor field has already been stored in
@@ -779,10 +803,22 @@ static void gve_enable_supported_features(struct gve_priv *priv,
if (priv->rx_pages_per_qpl == 0)
priv->rx_pages_per_qpl = DQO_QPL_DEFAULT_RX_PAGES;
}
+
+ if (dev_op_buffer_sizes &&
+ (supported_features_mask & GVE_SUP_BUFFER_SIZES_MASK)) {
+ priv->max_rx_buffer_size =
+ be16_to_cpu(dev_op_buffer_sizes->packet_buffer_size);
+ priv->header_buf_size =
+ be16_to_cpu(dev_op_buffer_sizes->header_buffer_size);
+ dev_info(&priv->pdev->dev,
+ "BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n",
+ priv->max_rx_buffer_size, priv->header_buf_size);
+ }
}
int gve_adminq_describe_device(struct gve_priv *priv)
{
+ struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
@@ -816,7 +852,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
&dev_op_gqi_qpl, &dev_op_dqo_rda,
&dev_op_jumbo_frames,
- &dev_op_dqo_qpl);
+ &dev_op_dqo_qpl,
+ &dev_op_buffer_sizes);
if (err)
goto free_device_descriptor;
@@ -885,7 +922,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
gve_enable_supported_features(priv, supported_features_mask,
- dev_op_jumbo_frames, dev_op_dqo_qpl);
+ dev_op_jumbo_frames, dev_op_dqo_qpl,
+ dev_op_buffer_sizes);
free_device_descriptor:
dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 5865ccdccbd0..5ac972e45ff8 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -125,6 +125,15 @@ struct gve_device_option_jumbo_frames {
static_assert(sizeof(struct gve_device_option_jumbo_frames) == 8);
+struct gve_device_option_buffer_sizes {
+ /* GVE_SUP_BUFFER_SIZES_MASK bit should be set */
+ __be32 supported_features_mask;
+ __be16 packet_buffer_size;
+ __be16 header_buffer_size;
+};
+
+static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8);
+
/* Terminology:
*
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
@@ -140,6 +149,7 @@ enum gve_dev_opt_id {
GVE_DEV_OPT_ID_DQO_RDA = 0x4,
GVE_DEV_OPT_ID_DQO_QPL = 0x7,
GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
+ GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
};
enum gve_dev_opt_req_feat_mask {
@@ -149,10 +159,12 @@ enum gve_dev_opt_req_feat_mask {
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
};
enum gve_sup_feature_mask {
GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
+ GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
};
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
@@ -165,6 +177,7 @@ enum gve_driver_capbility {
gve_driver_capability_dqo_qpl = 2, /* reserved for future use */
gve_driver_capability_dqo_rda = 3,
gve_driver_capability_alt_miss_compl = 4,
+ gve_driver_capability_flexible_buffer_size = 5,
};
#define GVE_CAP1(a) BIT((int)a)
@@ -176,7 +189,8 @@ enum gve_driver_capbility {
(GVE_CAP1(gve_driver_capability_gqi_qpl) | \
GVE_CAP1(gve_driver_capability_gqi_rda) | \
GVE_CAP1(gve_driver_capability_dqo_rda) | \
- GVE_CAP1(gve_driver_capability_alt_miss_compl))
+ GVE_CAP1(gve_driver_capability_alt_miss_compl) | \
+ GVE_CAP1(gve_driver_capability_flexible_buffer_size))
#define GVE_DRIVER_CAPABILITY_FLAGS2 0x0
#define GVE_DRIVER_CAPABILITY_FLAGS3 0x0
@@ -260,7 +274,9 @@ struct gve_adminq_create_rx_queue {
__be16 packet_buffer_size;
__be16 rx_buff_ring_size;
u8 enable_rsc;
- u8 padding[5];
+ u8 padding1;
+ __be16 header_buffer_size;
+ u8 padding2[2];
};
static_assert(sizeof(struct gve_adminq_create_rx_queue) == 56);
diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h
index c36b93f0de15..b81584829c40 100644
--- a/drivers/net/ethernet/google/gve/gve_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_dqo.h
@@ -38,10 +38,18 @@ netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
netdev_features_t features);
bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean);
int gve_rx_poll_dqo(struct gve_notify_block *block, int budget);
-int gve_tx_alloc_rings_dqo(struct gve_priv *priv);
-void gve_tx_free_rings_dqo(struct gve_priv *priv);
-int gve_rx_alloc_rings_dqo(struct gve_priv *priv);
-void gve_rx_free_rings_dqo(struct gve_priv *priv);
+int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg);
+void gve_tx_free_rings_dqo(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg);
+void gve_tx_start_ring_dqo(struct gve_priv *priv, int idx);
+void gve_tx_stop_ring_dqo(struct gve_priv *priv, int idx);
+int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg);
+void gve_rx_free_rings_dqo(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg);
+void gve_rx_start_ring_dqo(struct gve_priv *priv, int idx);
+void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx);
int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
struct napi_struct *napi);
void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx);
@@ -93,4 +101,6 @@ gve_set_itr_coalesce_usecs_dqo(struct gve_priv *priv,
gve_write_irq_doorbell_dqo(priv, block,
gve_setup_itr_interval_dqo(usecs));
}
+
+int gve_napi_poll_dqo(struct napi_struct *napi, int budget);
#endif /* _GVE_DQO_H_ */
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index e5397aa1e48f..9aebfb843d9d 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -4,7 +4,6 @@
* Copyright (C) 2015-2021 Google, Inc.
*/
-#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include "gve.h"
#include "gve_adminq.h"
@@ -40,17 +39,18 @@ static u32 gve_get_msglevel(struct net_device *netdev)
* as declared in enum xdp_action inside file uapi/linux/bpf.h .
*/
static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
- "rx_packets", "tx_packets", "rx_bytes", "tx_bytes",
- "rx_dropped", "tx_dropped", "tx_timeouts",
+ "rx_packets", "rx_hsplit_pkt", "tx_packets", "rx_bytes",
+ "tx_bytes", "rx_dropped", "tx_dropped", "tx_timeouts",
"rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt",
+ "rx_hsplit_unsplit_pkt",
"interface_up_cnt", "interface_down_cnt", "reset_cnt",
"page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt",
};
static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
- "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]", "rx_bytes[%u]",
- "rx_cont_packet_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]",
- "rx_frag_alloc_cnt[%u]",
+ "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]",
+ "rx_bytes[%u]", "rx_hsplit_bytes[%u]", "rx_cont_packet_cnt[%u]",
+ "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]", "rx_frag_alloc_cnt[%u]",
"rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
"rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
"rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
@@ -154,11 +154,13 @@ static void
gve_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
- u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail,
- tmp_rx_buf_alloc_fail, tmp_rx_desc_err_dropped_pkt,
+ u64 tmp_rx_pkts, tmp_rx_hsplit_pkt, tmp_rx_bytes, tmp_rx_hsplit_bytes,
+ tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail,
+ tmp_rx_desc_err_dropped_pkt, tmp_rx_hsplit_unsplit_pkt,
tmp_tx_pkts, tmp_tx_bytes;
- u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_pkts,
- rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes, tx_dropped;
+ u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_hsplit_unsplit_pkt,
+ rx_pkts, rx_hsplit_pkt, rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes,
+ tx_dropped;
int stats_idx, base_stats_idx, max_stats_idx;
struct stats *report_stats;
int *rx_qid_to_stats_idx;
@@ -185,8 +187,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
kfree(rx_qid_to_stats_idx);
return;
}
- for (rx_pkts = 0, rx_bytes = 0, rx_skb_alloc_fail = 0,
- rx_buf_alloc_fail = 0, rx_desc_err_dropped_pkt = 0, ring = 0;
+ for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0,
+ rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0,
+ rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0,
+ ring = 0;
ring < priv->rx_cfg.num_queues; ring++) {
if (priv->rx) {
do {
@@ -195,18 +199,23 @@ gve_get_ethtool_stats(struct net_device *netdev,
start =
u64_stats_fetch_begin(&priv->rx[ring].statss);
tmp_rx_pkts = rx->rpackets;
+ tmp_rx_hsplit_pkt = rx->rx_hsplit_pkt;
tmp_rx_bytes = rx->rbytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt =
rx->rx_desc_err_dropped_pkt;
+ tmp_rx_hsplit_unsplit_pkt =
+ rx->rx_hsplit_unsplit_pkt;
} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start));
rx_pkts += tmp_rx_pkts;
+ rx_hsplit_pkt += tmp_rx_hsplit_pkt;
rx_bytes += tmp_rx_bytes;
rx_skb_alloc_fail += tmp_rx_skb_alloc_fail;
rx_buf_alloc_fail += tmp_rx_buf_alloc_fail;
rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt;
+ rx_hsplit_unsplit_pkt += tmp_rx_hsplit_unsplit_pkt;
}
}
for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0;
@@ -227,6 +236,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
i = 0;
data[i++] = rx_pkts;
+ data[i++] = rx_hsplit_pkt;
data[i++] = tx_pkts;
data[i++] = rx_bytes;
data[i++] = tx_bytes;
@@ -238,6 +248,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = rx_skb_alloc_fail;
data[i++] = rx_buf_alloc_fail;
data[i++] = rx_desc_err_dropped_pkt;
+ data[i++] = rx_hsplit_unsplit_pkt;
data[i++] = priv->interface_up_cnt;
data[i++] = priv->interface_down_cnt;
data[i++] = priv->reset_cnt;
@@ -277,6 +288,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
start =
u64_stats_fetch_begin(&priv->rx[ring].statss);
tmp_rx_bytes = rx->rbytes;
+ tmp_rx_hsplit_bytes = rx->rx_hsplit_bytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt =
@@ -284,6 +296,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start));
data[i++] = tmp_rx_bytes;
+ data[i++] = tmp_rx_hsplit_bytes;
data[i++] = rx->rx_cont_packet_cnt;
data[i++] = rx->rx_frag_flip_cnt;
data[i++] = rx->rx_frag_copy_cnt;
@@ -480,6 +493,29 @@ static void gve_get_ringparam(struct net_device *netdev,
cmd->tx_max_pending = priv->tx_desc_cnt;
cmd->rx_pending = priv->rx_desc_cnt;
cmd->tx_pending = priv->tx_desc_cnt;
+
+ if (!gve_header_split_supported(priv))
+ kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
+ else if (priv->header_split_enabled)
+ kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
+ else
+ kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
+}
+
+static int gve_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *cmd,
+ struct kernel_ethtool_ringparam *kernel_cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ if (priv->tx_desc_cnt != cmd->tx_pending ||
+ priv->rx_desc_cnt != cmd->rx_pending) {
+ dev_info(&priv->pdev->dev, "Modify ring size is not supported.\n");
+ return -EOPNOTSUPP;
+ }
+
+ return gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split);
}
static int gve_user_reset(struct net_device *netdev, u32 *flags)
@@ -655,6 +691,7 @@ static int gve_set_coalesce(struct net_device *netdev,
const struct ethtool_ops gve_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
.get_drvinfo = gve_get_drvinfo,
.get_strings = gve_get_strings,
.get_sset_count = gve_get_sset_count,
@@ -667,6 +704,7 @@ const struct ethtool_ops gve_ethtool_ops = {
.get_coalesce = gve_get_coalesce,
.set_coalesce = gve_set_coalesce,
.get_ringparam = gve_get_ringparam,
+ .set_ringparam = gve_set_ringparam,
.reset = gve_user_reset,
.get_tunable = gve_get_tunable,
.set_tunable = gve_set_tunable,
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 619bf63ec935..166bd827a6d7 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -22,6 +22,7 @@
#include "gve_dqo.h"
#include "gve_adminq.h"
#include "gve_register.h"
+#include "gve_utils.h"
#define GVE_DEFAULT_RX_COPYBREAK (256)
@@ -252,7 +253,7 @@ static irqreturn_t gve_intr_dqo(int irq, void *arg)
return IRQ_HANDLED;
}
-static int gve_napi_poll(struct napi_struct *napi, int budget)
+int gve_napi_poll(struct napi_struct *napi, int budget)
{
struct gve_notify_block *block;
__be32 __iomem *irq_doorbell;
@@ -302,7 +303,7 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
+int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
{
struct gve_notify_block *block =
container_of(napi, struct gve_notify_block, napi);
@@ -581,19 +582,59 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
gve_clear_device_resources_ok(priv);
}
-static void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
- int (*gve_poll)(struct napi_struct *, int))
+static int gve_unregister_qpl(struct gve_priv *priv, u32 i)
{
- struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+ int err;
+
+ err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "Failed to unregister queue page list %d\n",
+ priv->qpls[i].id);
+ return err;
+ }
- netif_napi_add(priv->dev, &block->napi, gve_poll);
+ priv->num_registered_pages -= priv->qpls[i].num_entries;
+ return 0;
}
-static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
+static int gve_register_qpl(struct gve_priv *priv, u32 i)
{
- struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+ int num_rx_qpls;
+ int pages;
+ int err;
+
+ /* Rx QPLs succeed Tx QPLs in the priv->qpls array. */
+ num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
+ if (i >= gve_rx_start_qpl_id(&priv->tx_cfg) + num_rx_qpls) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot register nonexisting QPL at index %d\n", i);
+ return -EINVAL;
+ }
+
+ pages = priv->qpls[i].num_entries;
+
+ if (pages + priv->num_registered_pages > priv->max_registered_pages) {
+ netif_err(priv, drv, priv->dev,
+ "Reached max number of registered pages %llu > %llu\n",
+ pages + priv->num_registered_pages,
+ priv->max_registered_pages);
+ return -EINVAL;
+ }
+
+ err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "failed to register queue page list %d\n",
+ priv->qpls[i].id);
+ /* This failure will trigger a reset - no need to clean
+ * up
+ */
+ return err;
+ }
- netif_napi_del(&block->napi);
+ priv->num_registered_pages += pages;
+ return 0;
}
static int gve_register_xdp_qpls(struct gve_priv *priv)
@@ -602,55 +643,41 @@ static int gve_register_xdp_qpls(struct gve_priv *priv)
int err;
int i;
- start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv));
+ start_id = gve_xdp_tx_start_queue_id(priv);
for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
- if (err) {
- netif_err(priv, drv, priv->dev,
- "failed to register queue page list %d\n",
- priv->qpls[i].id);
- /* This failure will trigger a reset - no need to clean
- * up
- */
+ err = gve_register_qpl(priv, i);
+ /* This failure will trigger a reset - no need to clean up */
+ if (err)
return err;
- }
}
return 0;
}
static int gve_register_qpls(struct gve_priv *priv)
{
+ int num_tx_qpls, num_rx_qpls;
int start_id;
int err;
int i;
- start_id = gve_tx_start_qpl_id(priv);
- for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
- err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
- if (err) {
- netif_err(priv, drv, priv->dev,
- "failed to register queue page list %d\n",
- priv->qpls[i].id);
- /* This failure will trigger a reset - no need to clean
- * up
- */
+ num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
+ gve_is_qpl(priv));
+ num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
+
+ for (i = 0; i < num_tx_qpls; i++) {
+ err = gve_register_qpl(priv, i);
+ if (err)
return err;
- }
}
- start_id = gve_rx_start_qpl_id(priv);
- for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
- err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
- if (err) {
- netif_err(priv, drv, priv->dev,
- "failed to register queue page list %d\n",
- priv->qpls[i].id);
- /* This failure will trigger a reset - no need to clean
- * up
- */
+ /* there might be a gap between the tx and rx qpl ids */
+ start_id = gve_rx_start_qpl_id(&priv->tx_cfg);
+ for (i = 0; i < num_rx_qpls; i++) {
+ err = gve_register_qpl(priv, start_id + i);
+ if (err)
return err;
- }
}
+
return 0;
}
@@ -660,48 +687,40 @@ static int gve_unregister_xdp_qpls(struct gve_priv *priv)
int err;
int i;
- start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv));
+ start_id = gve_xdp_tx_start_queue_id(priv);
for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
- /* This failure will trigger a reset - no need to clean up */
- if (err) {
- netif_err(priv, drv, priv->dev,
- "Failed to unregister queue page list %d\n",
- priv->qpls[i].id);
+ err = gve_unregister_qpl(priv, i);
+ /* This failure will trigger a reset - no need to clean */
+ if (err)
return err;
- }
}
return 0;
}
static int gve_unregister_qpls(struct gve_priv *priv)
{
+ int num_tx_qpls, num_rx_qpls;
int start_id;
int err;
int i;
- start_id = gve_tx_start_qpl_id(priv);
- for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
- err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
- /* This failure will trigger a reset - no need to clean up */
- if (err) {
- netif_err(priv, drv, priv->dev,
- "Failed to unregister queue page list %d\n",
- priv->qpls[i].id);
+ num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
+ gve_is_qpl(priv));
+ num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
+
+ for (i = 0; i < num_tx_qpls; i++) {
+ err = gve_unregister_qpl(priv, i);
+ /* This failure will trigger a reset - no need to clean */
+ if (err)
return err;
- }
}
- start_id = gve_rx_start_qpl_id(priv);
- for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
- err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
- /* This failure will trigger a reset - no need to clean up */
- if (err) {
- netif_err(priv, drv, priv->dev,
- "Failed to unregister queue page list %d\n",
- priv->qpls[i].id);
+ start_id = gve_rx_start_qpl_id(&priv->tx_cfg);
+ for (i = 0; i < num_rx_qpls; i++) {
+ err = gve_unregister_qpl(priv, start_id + i);
+ /* This failure will trigger a reset - no need to clean */
+ if (err)
return err;
- }
}
return 0;
}
@@ -776,120 +795,124 @@ static int gve_create_rings(struct gve_priv *priv)
return 0;
}
-static void add_napi_init_xdp_sync_stats(struct gve_priv *priv,
- int (*napi_poll)(struct napi_struct *napi,
- int budget))
+static void init_xdp_sync_stats(struct gve_priv *priv)
{
int start_id = gve_xdp_tx_start_queue_id(priv);
int i;
- /* Add xdp tx napi & init sync stats*/
+ /* Init stats */
for (i = start_id; i < start_id + priv->num_xdp_queues; i++) {
int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
u64_stats_init(&priv->tx[i].statss);
priv->tx[i].ntfy_id = ntfy_idx;
- gve_add_napi(priv, ntfy_idx, napi_poll);
}
}
-static void add_napi_init_sync_stats(struct gve_priv *priv,
- int (*napi_poll)(struct napi_struct *napi,
- int budget))
+static void gve_init_sync_stats(struct gve_priv *priv)
{
int i;
- /* Add tx napi & init sync stats*/
- for (i = 0; i < gve_num_tx_queues(priv); i++) {
- int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
-
+ for (i = 0; i < priv->tx_cfg.num_queues; i++)
u64_stats_init(&priv->tx[i].statss);
- priv->tx[i].ntfy_id = ntfy_idx;
- gve_add_napi(priv, ntfy_idx, napi_poll);
- }
- /* Add rx napi & init sync stats*/
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- int ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
+ /* Init stats for XDP TX queues */
+ init_xdp_sync_stats(priv);
+
+ for (i = 0; i < priv->rx_cfg.num_queues; i++)
u64_stats_init(&priv->rx[i].statss);
- priv->rx[i].ntfy_id = ntfy_idx;
- gve_add_napi(priv, ntfy_idx, napi_poll);
+}
+
+static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg)
+{
+ cfg->qcfg = &priv->tx_cfg;
+ cfg->raw_addressing = !gve_is_qpl(priv);
+ cfg->qpls = priv->qpls;
+ cfg->qpl_cfg = &priv->qpl_cfg;
+ cfg->ring_size = priv->tx_desc_cnt;
+ cfg->start_idx = 0;
+ cfg->num_rings = gve_num_tx_queues(priv);
+ cfg->tx = priv->tx;
+}
+
+static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings)
+{
+ int i;
+
+ if (!priv->tx)
+ return;
+
+ for (i = start_id; i < start_id + num_rings; i++) {
+ if (gve_is_gqi(priv))
+ gve_tx_stop_ring_gqi(priv, i);
+ else
+ gve_tx_stop_ring_dqo(priv, i);
}
}
-static void gve_tx_free_rings(struct gve_priv *priv, int start_id, int num_rings)
+static void gve_tx_start_rings(struct gve_priv *priv, int start_id,
+ int num_rings)
{
- if (gve_is_gqi(priv)) {
- gve_tx_free_rings_gqi(priv, start_id, num_rings);
- } else {
- gve_tx_free_rings_dqo(priv);
+ int i;
+
+ for (i = start_id; i < start_id + num_rings; i++) {
+ if (gve_is_gqi(priv))
+ gve_tx_start_ring_gqi(priv, i);
+ else
+ gve_tx_start_ring_dqo(priv, i);
}
}
static int gve_alloc_xdp_rings(struct gve_priv *priv)
{
- int start_id;
+ struct gve_tx_alloc_rings_cfg cfg = {0};
int err = 0;
if (!priv->num_xdp_queues)
return 0;
- start_id = gve_xdp_tx_start_queue_id(priv);
- err = gve_tx_alloc_rings(priv, start_id, priv->num_xdp_queues);
+ gve_tx_get_curr_alloc_cfg(priv, &cfg);
+ cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
+ cfg.num_rings = priv->num_xdp_queues;
+
+ err = gve_tx_alloc_rings_gqi(priv, &cfg);
if (err)
return err;
- add_napi_init_xdp_sync_stats(priv, gve_napi_poll);
+
+ gve_tx_start_rings(priv, cfg.start_idx, cfg.num_rings);
+ init_xdp_sync_stats(priv);
return 0;
}
-static int gve_alloc_rings(struct gve_priv *priv)
+static int gve_alloc_rings(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
int err;
- /* Setup tx rings */
- priv->tx = kvcalloc(priv->tx_cfg.max_queues, sizeof(*priv->tx),
- GFP_KERNEL);
- if (!priv->tx)
- return -ENOMEM;
-
if (gve_is_gqi(priv))
- err = gve_tx_alloc_rings(priv, 0, gve_num_tx_queues(priv));
+ err = gve_tx_alloc_rings_gqi(priv, tx_alloc_cfg);
else
- err = gve_tx_alloc_rings_dqo(priv);
+ err = gve_tx_alloc_rings_dqo(priv, tx_alloc_cfg);
if (err)
- goto free_tx;
-
- /* Setup rx rings */
- priv->rx = kvcalloc(priv->rx_cfg.max_queues, sizeof(*priv->rx),
- GFP_KERNEL);
- if (!priv->rx) {
- err = -ENOMEM;
- goto free_tx_queue;
- }
+ return err;
if (gve_is_gqi(priv))
- err = gve_rx_alloc_rings(priv);
+ err = gve_rx_alloc_rings_gqi(priv, rx_alloc_cfg);
else
- err = gve_rx_alloc_rings_dqo(priv);
+ err = gve_rx_alloc_rings_dqo(priv, rx_alloc_cfg);
if (err)
- goto free_rx;
-
- if (gve_is_gqi(priv))
- add_napi_init_sync_stats(priv, gve_napi_poll);
- else
- add_napi_init_sync_stats(priv, gve_napi_poll_dqo);
+ goto free_tx;
return 0;
-free_rx:
- kvfree(priv->rx);
- priv->rx = NULL;
-free_tx_queue:
- gve_tx_free_rings(priv, 0, gve_num_tx_queues(priv));
free_tx:
- kvfree(priv->tx);
- priv->tx = NULL;
+ if (gve_is_gqi(priv))
+ gve_tx_free_rings_gqi(priv, tx_alloc_cfg);
+ else
+ gve_tx_free_rings_dqo(priv, tx_alloc_cfg);
return err;
}
@@ -937,52 +960,30 @@ static int gve_destroy_rings(struct gve_priv *priv)
return 0;
}
-static void gve_rx_free_rings(struct gve_priv *priv)
-{
- if (gve_is_gqi(priv))
- gve_rx_free_rings_gqi(priv);
- else
- gve_rx_free_rings_dqo(priv);
-}
-
static void gve_free_xdp_rings(struct gve_priv *priv)
{
- int ntfy_idx, start_id;
- int i;
+ struct gve_tx_alloc_rings_cfg cfg = {0};
+
+ gve_tx_get_curr_alloc_cfg(priv, &cfg);
+ cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
+ cfg.num_rings = priv->num_xdp_queues;
- start_id = gve_xdp_tx_start_queue_id(priv);
if (priv->tx) {
- for (i = start_id; i < start_id + priv->num_xdp_queues; i++) {
- ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
- gve_remove_napi(priv, ntfy_idx);
- }
- gve_tx_free_rings(priv, start_id, priv->num_xdp_queues);
+ gve_tx_stop_rings(priv, cfg.start_idx, cfg.num_rings);
+ gve_tx_free_rings_gqi(priv, &cfg);
}
}
-static void gve_free_rings(struct gve_priv *priv)
+static void gve_free_rings(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_cfg)
{
- int num_tx_queues = gve_num_tx_queues(priv);
- int ntfy_idx;
- int i;
-
- if (priv->tx) {
- for (i = 0; i < num_tx_queues; i++) {
- ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
- gve_remove_napi(priv, ntfy_idx);
- }
- gve_tx_free_rings(priv, 0, num_tx_queues);
- kvfree(priv->tx);
- priv->tx = NULL;
- }
- if (priv->rx) {
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
- gve_remove_napi(priv, ntfy_idx);
- }
- gve_rx_free_rings(priv);
- kvfree(priv->rx);
- priv->rx = NULL;
+ if (gve_is_gqi(priv)) {
+ gve_tx_free_rings_gqi(priv, tx_cfg);
+ gve_rx_free_rings_gqi(priv, rx_cfg);
+ } else {
+ gve_tx_free_rings_dqo(priv, tx_cfg);
+ gve_rx_free_rings_dqo(priv, rx_cfg);
}
}
@@ -1004,21 +1005,13 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev,
return 0;
}
-static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
- int pages)
+static int gve_alloc_queue_page_list(struct gve_priv *priv,
+ struct gve_queue_page_list *qpl,
+ u32 id, int pages)
{
- struct gve_queue_page_list *qpl = &priv->qpls[id];
int err;
int i;
- if (pages + priv->num_registered_pages > priv->max_registered_pages) {
- netif_err(priv, drv, priv->dev,
- "Reached max number of registered pages %llu > %llu\n",
- pages + priv->num_registered_pages,
- priv->max_registered_pages);
- return -EINVAL;
- }
-
qpl->id = id;
qpl->num_entries = 0;
qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL);
@@ -1039,7 +1032,6 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
return -ENOMEM;
qpl->num_entries++;
}
- priv->num_registered_pages += pages;
return 0;
}
@@ -1053,9 +1045,10 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
put_page(page);
}
-static void gve_free_queue_page_list(struct gve_priv *priv, u32 id)
+static void gve_free_queue_page_list(struct gve_priv *priv,
+ struct gve_queue_page_list *qpl,
+ int id)
{
- struct gve_queue_page_list *qpl = &priv->qpls[id];
int i;
if (!qpl->pages)
@@ -1072,19 +1065,30 @@ static void gve_free_queue_page_list(struct gve_priv *priv, u32 id)
free_pages:
kvfree(qpl->pages);
qpl->pages = NULL;
- priv->num_registered_pages -= qpl->num_entries;
}
-static int gve_alloc_xdp_qpls(struct gve_priv *priv)
+static void gve_free_n_qpls(struct gve_priv *priv,
+ struct gve_queue_page_list *qpls,
+ int start_id,
+ int num_qpls)
+{
+ int i;
+
+ for (i = start_id; i < start_id + num_qpls; i++)
+ gve_free_queue_page_list(priv, &qpls[i], i);
+}
+
+static int gve_alloc_n_qpls(struct gve_priv *priv,
+ struct gve_queue_page_list *qpls,
+ int page_count,
+ int start_id,
+ int num_qpls)
{
- int start_id;
- int i, j;
int err;
+ int i;
- start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv));
- for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_alloc_queue_page_list(priv, i,
- priv->tx_pages_per_qpl);
+ for (i = start_id; i < start_id + num_qpls; i++) {
+ err = gve_alloc_queue_page_list(priv, &qpls[i], i, page_count);
if (err)
goto free_qpls;
}
@@ -1092,95 +1096,89 @@ static int gve_alloc_xdp_qpls(struct gve_priv *priv)
return 0;
free_qpls:
- for (j = start_id; j <= i; j++)
- gve_free_queue_page_list(priv, j);
+ /* Must include the failing QPL too for gve_alloc_queue_page_list fails
+ * without cleaning up.
+ */
+ gve_free_n_qpls(priv, qpls, start_id, i - start_id + 1);
return err;
}
-static int gve_alloc_qpls(struct gve_priv *priv)
+static int gve_alloc_qpls(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *cfg)
{
- int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues;
+ int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
+ int rx_start_id, tx_num_qpls, rx_num_qpls;
+ struct gve_queue_page_list *qpls;
int page_count;
- int start_id;
- int i, j;
int err;
- if (!gve_is_qpl(priv))
+ if (cfg->raw_addressing)
return 0;
- priv->qpls = kvcalloc(max_queues, sizeof(*priv->qpls), GFP_KERNEL);
- if (!priv->qpls)
+ qpls = kvcalloc(max_queues, sizeof(*qpls), GFP_KERNEL);
+ if (!qpls)
return -ENOMEM;
- start_id = gve_tx_start_qpl_id(priv);
- page_count = priv->tx_pages_per_qpl;
- for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
- err = gve_alloc_queue_page_list(priv, i,
- page_count);
- if (err)
- goto free_qpls;
+ cfg->qpl_cfg->qpl_map_size = BITS_TO_LONGS(max_queues) *
+ sizeof(unsigned long) * BITS_PER_BYTE;
+ cfg->qpl_cfg->qpl_id_map = kvcalloc(BITS_TO_LONGS(max_queues),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!cfg->qpl_cfg->qpl_id_map) {
+ err = -ENOMEM;
+ goto free_qpl_array;
}
- start_id = gve_rx_start_qpl_id(priv);
+ /* Allocate TX QPLs */
+ page_count = priv->tx_pages_per_qpl;
+ tx_num_qpls = gve_num_tx_qpls(cfg->tx_cfg, cfg->num_xdp_queues,
+ gve_is_qpl(priv));
+ err = gve_alloc_n_qpls(priv, qpls, page_count, 0, tx_num_qpls);
+ if (err)
+ goto free_qpl_map;
+ /* Allocate RX QPLs */
+ rx_start_id = gve_rx_start_qpl_id(cfg->tx_cfg);
/* For GQI_QPL number of pages allocated have 1:1 relationship with
* number of descriptors. For DQO, number of pages required are
* more than descriptors (because of out of order completions).
*/
- page_count = priv->queue_format == GVE_GQI_QPL_FORMAT ?
- priv->rx_data_slot_cnt : priv->rx_pages_per_qpl;
- for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
- err = gve_alloc_queue_page_list(priv, i,
- page_count);
- if (err)
- goto free_qpls;
- }
-
- priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(max_queues) *
- sizeof(unsigned long) * BITS_PER_BYTE;
- priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(max_queues),
- sizeof(unsigned long), GFP_KERNEL);
- if (!priv->qpl_cfg.qpl_id_map) {
- err = -ENOMEM;
- goto free_qpls;
- }
+ page_count = cfg->is_gqi ? priv->rx_data_slot_cnt : priv->rx_pages_per_qpl;
+ rx_num_qpls = gve_num_rx_qpls(cfg->rx_cfg, gve_is_qpl(priv));
+ err = gve_alloc_n_qpls(priv, qpls, page_count, rx_start_id, rx_num_qpls);
+ if (err)
+ goto free_tx_qpls;
+ cfg->qpls = qpls;
return 0;
-free_qpls:
- for (j = 0; j <= i; j++)
- gve_free_queue_page_list(priv, j);
- kvfree(priv->qpls);
- priv->qpls = NULL;
+free_tx_qpls:
+ gve_free_n_qpls(priv, qpls, 0, tx_num_qpls);
+free_qpl_map:
+ kvfree(cfg->qpl_cfg->qpl_id_map);
+ cfg->qpl_cfg->qpl_id_map = NULL;
+free_qpl_array:
+ kvfree(qpls);
return err;
}
-static void gve_free_xdp_qpls(struct gve_priv *priv)
-{
- int start_id;
- int i;
-
- start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv));
- for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++)
- gve_free_queue_page_list(priv, i);
-}
-
-static void gve_free_qpls(struct gve_priv *priv)
+static void gve_free_qpls(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *cfg)
{
- int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues;
+ int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
+ struct gve_queue_page_list *qpls = cfg->qpls;
int i;
- if (!priv->qpls)
+ if (!qpls)
return;
- kvfree(priv->qpl_cfg.qpl_id_map);
- priv->qpl_cfg.qpl_id_map = NULL;
+ kvfree(cfg->qpl_cfg->qpl_id_map);
+ cfg->qpl_cfg->qpl_id_map = NULL;
for (i = 0; i < max_queues; i++)
- gve_free_queue_page_list(priv, i);
+ gve_free_queue_page_list(priv, &qpls[i], i);
- kvfree(priv->qpls);
- priv->qpls = NULL;
+ kvfree(qpls);
+ cfg->qpls = NULL;
}
/* Use this to schedule a reset when the device is capable of continuing
@@ -1278,58 +1276,178 @@ static void gve_unreg_xdp_info(struct gve_priv *priv)
static void gve_drain_page_cache(struct gve_priv *priv)
{
- struct page_frag_cache *nc;
int i;
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- nc = &priv->rx[i].page_cache;
- if (nc->va) {
- __page_frag_cache_drain(virt_to_page(nc->va),
- nc->pagecnt_bias);
- nc->va = NULL;
- }
+ for (i = 0; i < priv->rx_cfg.num_queues; i++)
+ page_frag_cache_drain(&priv->rx[i].page_cache);
+}
+
+static void gve_qpls_get_curr_alloc_cfg(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *cfg)
+{
+ cfg->raw_addressing = !gve_is_qpl(priv);
+ cfg->is_gqi = gve_is_gqi(priv);
+ cfg->num_xdp_queues = priv->num_xdp_queues;
+ cfg->qpl_cfg = &priv->qpl_cfg;
+ cfg->tx_cfg = &priv->tx_cfg;
+ cfg->rx_cfg = &priv->rx_cfg;
+ cfg->qpls = priv->qpls;
+}
+
+static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg)
+{
+ cfg->qcfg = &priv->rx_cfg;
+ cfg->qcfg_tx = &priv->tx_cfg;
+ cfg->raw_addressing = !gve_is_qpl(priv);
+ cfg->enable_header_split = priv->header_split_enabled;
+ cfg->qpls = priv->qpls;
+ cfg->qpl_cfg = &priv->qpl_cfg;
+ cfg->ring_size = priv->rx_desc_cnt;
+ cfg->packet_buffer_size = gve_is_gqi(priv) ?
+ GVE_DEFAULT_RX_BUFFER_SIZE :
+ priv->data_buffer_size_dqo;
+ cfg->rx = priv->rx;
+}
+
+static void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+{
+ gve_qpls_get_curr_alloc_cfg(priv, qpls_alloc_cfg);
+ gve_tx_get_curr_alloc_cfg(priv, tx_alloc_cfg);
+ gve_rx_get_curr_alloc_cfg(priv, rx_alloc_cfg);
+}
+
+static void gve_rx_start_rings(struct gve_priv *priv, int num_rings)
+{
+ int i;
+
+ for (i = 0; i < num_rings; i++) {
+ if (gve_is_gqi(priv))
+ gve_rx_start_ring_gqi(priv, i);
+ else
+ gve_rx_start_ring_dqo(priv, i);
}
}
-static int gve_open(struct net_device *dev)
+static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings)
{
- struct gve_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (!priv->rx)
+ return;
+
+ for (i = 0; i < num_rings; i++) {
+ if (gve_is_gqi(priv))
+ gve_rx_stop_ring_gqi(priv, i);
+ else
+ gve_rx_stop_ring_dqo(priv, i);
+ }
+}
+
+static void gve_queues_mem_free(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+{
+ gve_free_rings(priv, tx_alloc_cfg, rx_alloc_cfg);
+ gve_free_qpls(priv, qpls_alloc_cfg);
+}
+
+static int gve_queues_mem_alloc(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+{
+ int err;
+
+ err = gve_alloc_qpls(priv, qpls_alloc_cfg);
+ if (err) {
+ netif_err(priv, drv, priv->dev, "Failed to alloc QPLs\n");
+ return err;
+ }
+ tx_alloc_cfg->qpls = qpls_alloc_cfg->qpls;
+ rx_alloc_cfg->qpls = qpls_alloc_cfg->qpls;
+ err = gve_alloc_rings(priv, tx_alloc_cfg, rx_alloc_cfg);
+ if (err) {
+ netif_err(priv, drv, priv->dev, "Failed to alloc rings\n");
+ goto free_qpls;
+ }
+
+ return 0;
+
+free_qpls:
+ gve_free_qpls(priv, qpls_alloc_cfg);
+ return err;
+}
+
+static void gve_queues_mem_remove(struct gve_priv *priv)
+{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+ struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
+
+ gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ gve_queues_mem_free(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ priv->qpls = NULL;
+ priv->tx = NULL;
+ priv->rx = NULL;
+}
+
+/* The passed-in queue memory is stored into priv and the queues are made live.
+ * No memory is allocated. Passed-in memory is freed on errors.
+ */
+static int gve_queues_start(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+{
+ struct net_device *dev = priv->dev;
int err;
+ /* Record new resources into priv */
+ priv->qpls = qpls_alloc_cfg->qpls;
+ priv->tx = tx_alloc_cfg->tx;
+ priv->rx = rx_alloc_cfg->rx;
+
+ /* Record new configs into priv */
+ priv->qpl_cfg = *qpls_alloc_cfg->qpl_cfg;
+ priv->tx_cfg = *tx_alloc_cfg->qcfg;
+ priv->rx_cfg = *rx_alloc_cfg->qcfg;
+ priv->tx_desc_cnt = tx_alloc_cfg->ring_size;
+ priv->rx_desc_cnt = rx_alloc_cfg->ring_size;
+
if (priv->xdp_prog)
priv->num_xdp_queues = priv->rx_cfg.num_queues;
else
priv->num_xdp_queues = 0;
- err = gve_alloc_qpls(priv);
- if (err)
- return err;
-
- err = gve_alloc_rings(priv);
- if (err)
- goto free_qpls;
+ gve_tx_start_rings(priv, 0, tx_alloc_cfg->num_rings);
+ gve_rx_start_rings(priv, rx_alloc_cfg->qcfg->num_queues);
+ gve_init_sync_stats(priv);
err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
if (err)
- goto free_rings;
+ goto stop_and_free_rings;
err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
if (err)
- goto free_rings;
+ goto stop_and_free_rings;
err = gve_reg_xdp_info(priv, dev);
if (err)
- goto free_rings;
+ goto stop_and_free_rings;
err = gve_register_qpls(priv);
if (err)
goto reset;
- if (!gve_is_gqi(priv)) {
- /* Hard code this for now. This may be tuned in the future for
- * performance.
- */
- priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
- }
+ priv->header_split_enabled = rx_alloc_cfg->enable_header_split;
+ priv->data_buffer_size_dqo = rx_alloc_cfg->packet_buffer_size;
+
err = gve_create_rings(priv);
if (err)
goto reset;
@@ -1346,32 +1464,53 @@ static int gve_open(struct net_device *dev)
priv->interface_up_cnt++;
return 0;
-free_rings:
- gve_free_rings(priv);
-free_qpls:
- gve_free_qpls(priv);
- return err;
-
reset:
- /* This must have been called from a reset due to the rtnl lock
- * so just return at this point.
- */
if (gve_get_reset_in_progress(priv))
- return err;
- /* Otherwise reset before returning */
+ goto stop_and_free_rings;
gve_reset_and_teardown(priv, true);
/* if this fails there is nothing we can do so just ignore the return */
gve_reset_recovery(priv, false);
/* return the original error */
return err;
+stop_and_free_rings:
+ gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
+ gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
+ gve_queues_mem_remove(priv);
+ return err;
}
-static int gve_close(struct net_device *dev)
+static int gve_open(struct net_device *dev)
{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+ struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
struct gve_priv *priv = netdev_priv(dev);
int err;
- netif_carrier_off(dev);
+ gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+
+ err = gve_queues_mem_alloc(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ if (err)
+ return err;
+
+ /* No need to free on error: ownership of resources is lost after
+ * calling gve_queues_start.
+ */
+ err = gve_queues_start(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int gve_queues_stop(struct gve_priv *priv)
+{
+ int err;
+
+ netif_carrier_off(priv->dev);
if (gve_get_device_rings_ok(priv)) {
gve_turndown(priv);
gve_drain_page_cache(priv);
@@ -1386,8 +1525,10 @@ static int gve_close(struct net_device *dev)
del_timer_sync(&priv->stats_report_timer);
gve_unreg_xdp_info(priv);
- gve_free_rings(priv);
- gve_free_qpls(priv);
+
+ gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
+ gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
+
priv->interface_down_cnt++;
return 0;
@@ -1402,10 +1543,26 @@ err:
return gve_reset_recovery(priv, false);
}
+static int gve_close(struct net_device *dev)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ int err;
+
+ err = gve_queues_stop(priv);
+ if (err)
+ return err;
+
+ gve_queues_mem_remove(priv);
+ return 0;
+}
+
static int gve_remove_xdp_queues(struct gve_priv *priv)
{
+ int qpl_start_id;
int err;
+ qpl_start_id = gve_xdp_tx_start_queue_id(priv);
+
err = gve_destroy_xdp_rings(priv);
if (err)
return err;
@@ -1416,18 +1573,22 @@ static int gve_remove_xdp_queues(struct gve_priv *priv)
gve_unreg_xdp_info(priv);
gve_free_xdp_rings(priv);
- gve_free_xdp_qpls(priv);
+
+ gve_free_n_qpls(priv, priv->qpls, qpl_start_id, gve_num_xdp_qpls(priv));
priv->num_xdp_queues = 0;
return 0;
}
static int gve_add_xdp_queues(struct gve_priv *priv)
{
+ int start_id;
int err;
- priv->num_xdp_queues = priv->tx_cfg.num_queues;
+ priv->num_xdp_queues = priv->rx_cfg.num_queues;
- err = gve_alloc_xdp_qpls(priv);
+ start_id = gve_xdp_tx_start_queue_id(priv);
+ err = gve_alloc_n_qpls(priv, priv->qpls, priv->tx_pages_per_qpl,
+ start_id, gve_num_xdp_qpls(priv));
if (err)
goto err;
@@ -1452,7 +1613,7 @@ static int gve_add_xdp_queues(struct gve_priv *priv)
free_xdp_rings:
gve_free_xdp_rings(priv);
free_xdp_qpls:
- gve_free_xdp_qpls(priv);
+ gve_free_n_qpls(priv, priv->qpls, start_id, gve_num_xdp_qpls(priv));
err:
priv->num_xdp_queues = 0;
return err;
@@ -1702,42 +1863,87 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
+static int gve_adjust_config(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+{
+ int err;
+
+ /* Allocate resources for the new confiugration */
+ err = gve_queues_mem_alloc(priv, qpls_alloc_cfg,
+ tx_alloc_cfg, rx_alloc_cfg);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "Adjust config failed to alloc new queues");
+ return err;
+ }
+
+ /* Teardown the device and free existing resources */
+ err = gve_close(priv->dev);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "Adjust config failed to close old queues");
+ gve_queues_mem_free(priv, qpls_alloc_cfg,
+ tx_alloc_cfg, rx_alloc_cfg);
+ return err;
+ }
+
+ /* Bring the device back up again with the new resources. */
+ err = gve_queues_start(priv, qpls_alloc_cfg,
+ tx_alloc_cfg, rx_alloc_cfg);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "Adjust config failed to start new queues, !!! DISABLING ALL QUEUES !!!\n");
+ /* No need to free on error: ownership of resources is lost after
+ * calling gve_queues_start.
+ */
+ gve_turndown(priv);
+ return err;
+ }
+
+ return 0;
+}
+
int gve_adjust_queues(struct gve_priv *priv,
struct gve_queue_config new_rx_config,
struct gve_queue_config new_tx_config)
{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+ struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
+ struct gve_qpl_config new_qpl_cfg;
int err;
- if (netif_carrier_ok(priv->dev)) {
- /* To make this process as simple as possible we teardown the
- * device, set the new configuration, and then bring the device
- * up again.
- */
- err = gve_close(priv->dev);
- /* we have already tried to reset in close,
- * just fail at this point
- */
- if (err)
- return err;
- priv->tx_cfg = new_tx_config;
- priv->rx_cfg = new_rx_config;
+ gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
- err = gve_open(priv->dev);
- if (err)
- goto err;
+ /* qpl_cfg is not read-only, it contains a map that gets updated as
+ * rings are allocated, which is why we cannot use the yet unreleased
+ * one in priv.
+ */
+ qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+ tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+ rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+
+ /* Relay the new config from ethtool */
+ qpls_alloc_cfg.tx_cfg = &new_tx_config;
+ tx_alloc_cfg.qcfg = &new_tx_config;
+ rx_alloc_cfg.qcfg_tx = &new_tx_config;
+ qpls_alloc_cfg.rx_cfg = &new_rx_config;
+ rx_alloc_cfg.qcfg = &new_rx_config;
+ tx_alloc_cfg.num_rings = new_tx_config.num_queues;
- return 0;
+ if (netif_carrier_ok(priv->dev)) {
+ err = gve_adjust_config(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ return err;
}
/* Set the config for the next up. */
priv->tx_cfg = new_tx_config;
priv->rx_cfg = new_rx_config;
return 0;
-err:
- netif_err(priv, drv, priv->dev,
- "Adjust queues failed! !!! DISABLING ALL QUEUES !!!\n");
- gve_turndown(priv);
- return err;
}
static void gve_turndown(struct gve_priv *priv)
@@ -1853,40 +2059,91 @@ out:
priv->tx_timeo_cnt++;
}
+u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hsplit)
+{
+ if (enable_hsplit && priv->max_rx_buffer_size >= GVE_MAX_RX_BUFFER_SIZE)
+ return GVE_MAX_RX_BUFFER_SIZE;
+ else
+ return GVE_DEFAULT_RX_BUFFER_SIZE;
+}
+
+/* header-split is not supported on non-DQO_RDA yet even if device advertises it */
+bool gve_header_split_supported(const struct gve_priv *priv)
+{
+ return priv->header_buf_size && priv->queue_format == GVE_DQO_RDA_FORMAT;
+}
+
+int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
+{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+ struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
+ bool enable_hdr_split;
+ int err = 0;
+
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
+ return 0;
+
+ if (!gve_header_split_supported(priv)) {
+ dev_err(&priv->pdev->dev, "Header-split not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED)
+ enable_hdr_split = true;
+ else
+ enable_hdr_split = false;
+
+ if (enable_hdr_split == priv->header_split_enabled)
+ return 0;
+
+ gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+
+ rx_alloc_cfg.enable_header_split = enable_hdr_split;
+ rx_alloc_cfg.packet_buffer_size = gve_get_pkt_buf_size(priv, enable_hdr_split);
+
+ if (netif_running(priv->dev))
+ err = gve_adjust_config(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ return err;
+}
+
static int gve_set_features(struct net_device *netdev,
netdev_features_t features)
{
const netdev_features_t orig_features = netdev->features;
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+ struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
struct gve_priv *priv = netdev_priv(netdev);
+ struct gve_qpl_config new_qpl_cfg;
int err;
+ gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ /* qpl_cfg is not read-only, it contains a map that gets updated as
+ * rings are allocated, which is why we cannot use the yet unreleased
+ * one in priv.
+ */
+ qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+ tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+ rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+
if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
netdev->features ^= NETIF_F_LRO;
if (netif_carrier_ok(netdev)) {
- /* To make this process as simple as possible we
- * teardown the device, set the new configuration,
- * and then bring the device up again.
- */
- err = gve_close(netdev);
- /* We have already tried to reset in close, just fail
- * at this point.
- */
- if (err)
- goto err;
-
- err = gve_open(netdev);
- if (err)
- goto err;
+ err = gve_adjust_config(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ if (err) {
+ /* Revert the change on error. */
+ netdev->features = orig_features;
+ return err;
+ }
}
}
return 0;
-err:
- /* Reverts the change on error. */
- netdev->features = orig_features;
- netif_err(priv, drv, netdev,
- "Set features failed! !!! DISABLING ALL QUEUES !!!\n");
- return err;
}
static const struct net_device_ops gve_netdev_ops = {
@@ -2051,6 +2308,8 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
goto err;
}
+ priv->num_registered_pages = 0;
+
if (skip_describe_device)
goto setup_device;
@@ -2080,7 +2339,6 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
if (!gve_is_gqi(priv))
netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX);
- priv->num_registered_pages = 0;
priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
/* gvnic has one Notification Block per MSI-x vector, except for the
* management vector
@@ -2297,6 +2555,8 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->service_task_flags = 0x0;
priv->state_flags = 0x0;
priv->ethtool_flags = 0x0;
+ priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
+ priv->max_rx_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_set_probe_in_progress(priv);
priv->gve_wq = alloc_ordered_workqueue("gve", 0);
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 76615d47e055..20f5a9e7fae9 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -23,7 +23,9 @@ static void gve_rx_free_buffer(struct device *dev,
gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE);
}
-static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)
+static void gve_rx_unfill_pages(struct gve_priv *priv,
+ struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
u32 slots = rx->mask + 1;
int i;
@@ -36,7 +38,7 @@ static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)
for (i = 0; i < slots; i++)
page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1);
- gve_unassign_qpl(priv, rx->data.qpl->id);
+ gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id);
rx->data.qpl = NULL;
for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) {
@@ -49,16 +51,26 @@ static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)
rx->data.page_info = NULL;
}
-static void gve_rx_free_ring(struct gve_priv *priv, int idx)
+void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx)
+{
+ int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
+
+ if (!gve_rx_was_added_to_block(priv, idx))
+ return;
+
+ gve_remove_napi(priv, ntfy_idx);
+ gve_rx_remove_from_block(priv, idx);
+}
+
+static void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
- struct gve_rx_ring *rx = &priv->rx[idx];
struct device *dev = &priv->pdev->dev;
u32 slots = rx->mask + 1;
+ int idx = rx->q_num;
size_t bytes;
- gve_rx_remove_from_block(priv, idx);
-
- bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt;
+ bytes = sizeof(struct gve_rx_desc) * cfg->ring_size;
dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus);
rx->desc.desc_ring = NULL;
@@ -66,7 +78,7 @@ static void gve_rx_free_ring(struct gve_priv *priv, int idx)
rx->q_resources, rx->q_resources_bus);
rx->q_resources = NULL;
- gve_rx_unfill_pages(priv, rx);
+ gve_rx_unfill_pages(priv, rx, cfg);
bytes = sizeof(*rx->data.data_ring) * slots;
dma_free_coherent(dev, bytes, rx->data.data_ring,
@@ -93,7 +105,8 @@ static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
struct gve_rx_slot_page_info *page_info,
- union gve_rx_data_slot *data_slot)
+ union gve_rx_data_slot *data_slot,
+ struct gve_rx_ring *rx)
{
struct page *page;
dma_addr_t dma;
@@ -101,14 +114,19 @@ static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE,
GFP_ATOMIC);
- if (err)
+ if (err) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_buf_alloc_fail++;
+ u64_stats_update_end(&rx->statss);
return err;
+ }
gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr);
return 0;
}
-static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
+static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
struct gve_priv *priv = rx->gve;
u32 slots;
@@ -127,7 +145,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
return -ENOMEM;
if (!rx->data.raw_addressing) {
- rx->data.qpl = gve_assign_rx_qpl(priv, rx->q_num);
+ rx->data.qpl = gve_assign_rx_qpl(cfg, rx->q_num);
if (!rx->data.qpl) {
kvfree(rx->data.page_info);
rx->data.page_info = NULL;
@@ -143,8 +161,9 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
&rx->data.data_ring[i].qpl_offset);
continue;
}
- err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, &rx->data.page_info[i],
- &rx->data.data_ring[i]);
+ err = gve_rx_alloc_buffer(priv, &priv->pdev->dev,
+ &rx->data.page_info[i],
+ &rx->data.data_ring[i], rx);
if (err)
goto alloc_err_rda;
}
@@ -185,7 +204,7 @@ alloc_err_qpl:
page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1);
- gve_unassign_qpl(priv, rx->data.qpl->id);
+ gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id);
rx->data.qpl = NULL;
return err;
@@ -207,13 +226,23 @@ static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
ctx->drop_pkt = false;
}
-static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
+void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx)
+{
+ int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
+
+ gve_rx_add_to_block(priv, idx);
+ gve_add_napi(priv, ntfy_idx, gve_napi_poll);
+}
+
+static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx)
{
- struct gve_rx_ring *rx = &priv->rx[idx];
struct device *hdev = &priv->pdev->dev;
+ u32 slots = priv->rx_data_slot_cnt;
int filled_pages;
size_t bytes;
- u32 slots;
int err;
netif_dbg(priv, drv, priv->dev, "allocating rx ring\n");
@@ -223,9 +252,8 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
rx->gve = priv;
rx->q_num = idx;
- slots = priv->rx_data_slot_cnt;
rx->mask = slots - 1;
- rx->data.raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT;
+ rx->data.raw_addressing = cfg->raw_addressing;
/* alloc rx data ring */
bytes = sizeof(*rx->data.data_ring) * slots;
@@ -246,7 +274,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
goto abort_with_slots;
}
- filled_pages = gve_prefill_rx_pages(rx);
+ filled_pages = gve_rx_prefill_pages(rx, cfg);
if (filled_pages < 0) {
err = -ENOMEM;
goto abort_with_copy_pool;
@@ -269,7 +297,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
(unsigned long)rx->data.data_bus);
/* alloc rx desc ring */
- bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt;
+ bytes = sizeof(struct gve_rx_desc) * cfg->ring_size;
rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus,
GFP_KERNEL);
if (!rx->desc.desc_ring) {
@@ -277,15 +305,11 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
goto abort_with_q_resources;
}
rx->cnt = 0;
- rx->db_threshold = priv->rx_desc_cnt / 2;
+ rx->db_threshold = slots / 2;
rx->desc.seqno = 1;
- /* Allocating half-page buffers allows page-flipping which is faster
- * than copying or allocating new pages.
- */
rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_rx_ctx_clear(&rx->ctx);
- gve_rx_add_to_block(priv, idx);
return 0;
@@ -294,7 +318,7 @@ abort_with_q_resources:
rx->q_resources, rx->q_resources_bus);
rx->q_resources = NULL;
abort_filled:
- gve_rx_unfill_pages(priv, rx);
+ gve_rx_unfill_pages(priv, rx, cfg);
abort_with_copy_pool:
kvfree(rx->qpl_copy_pool);
rx->qpl_copy_pool = NULL;
@@ -306,36 +330,58 @@ abort_with_slots:
return err;
}
-int gve_rx_alloc_rings(struct gve_priv *priv)
+int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
+ struct gve_rx_ring *rx;
int err = 0;
- int i;
+ int i, j;
+
+ if (!cfg->raw_addressing && !cfg->qpls) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot alloc QPL ring before allocing QPLs\n");
+ return -EINVAL;
+ }
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- err = gve_rx_alloc_ring(priv, i);
+ rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
+ GFP_KERNEL);
+ if (!rx)
+ return -ENOMEM;
+
+ for (i = 0; i < cfg->qcfg->num_queues; i++) {
+ err = gve_rx_alloc_ring_gqi(priv, cfg, &rx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
"Failed to alloc rx ring=%d: err=%d\n",
i, err);
- break;
+ goto cleanup;
}
}
- /* Unallocate if there was an error */
- if (err) {
- int j;
- for (j = 0; j < i; j++)
- gve_rx_free_ring(priv, j);
- }
+ cfg->rx = rx;
+ return 0;
+
+cleanup:
+ for (j = 0; j < i; j++)
+ gve_rx_free_ring_gqi(priv, &rx[j], cfg);
+ kvfree(rx);
return err;
}
-void gve_rx_free_rings_gqi(struct gve_priv *priv)
+void gve_rx_free_rings_gqi(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
+ struct gve_rx_ring *rx = cfg->rx;
int i;
- for (i = 0; i < priv->rx_cfg.num_queues; i++)
- gve_rx_free_ring(priv, i);
+ if (!rx)
+ return;
+
+ for (i = 0; i < cfg->qcfg->num_queues; i++)
+ gve_rx_free_ring_gqi(priv, &rx[i], cfg);
+
+ kvfree(rx);
+ cfg->rx = NULL;
}
void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx)
@@ -896,10 +942,7 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
gve_rx_free_buffer(dev, page_info, data_slot);
page_info->page = NULL;
if (gve_rx_alloc_buffer(priv, dev, page_info,
- data_slot)) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_buf_alloc_fail++;
- u64_stats_update_end(&rx->statss);
+ data_slot, rx)) {
break;
}
}
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index f281e42a7ef9..8e8071308aeb 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -199,20 +199,42 @@ static int gve_alloc_page_dqo(struct gve_rx_ring *rx,
return 0;
}
-static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx)
+static void gve_rx_free_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
+{
+ struct device *hdev = &priv->pdev->dev;
+ int buf_count = rx->dqo.bufq.mask + 1;
+
+ if (rx->dqo.hdr_bufs.data) {
+ dma_free_coherent(hdev, priv->header_buf_size * buf_count,
+ rx->dqo.hdr_bufs.data, rx->dqo.hdr_bufs.addr);
+ rx->dqo.hdr_bufs.data = NULL;
+ }
+}
+
+void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
+{
+ int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
+
+ if (!gve_rx_was_added_to_block(priv, idx))
+ return;
+
+ gve_remove_napi(priv, ntfy_idx);
+ gve_rx_remove_from_block(priv, idx);
+}
+
+static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
- struct gve_rx_ring *rx = &priv->rx[idx];
struct device *hdev = &priv->pdev->dev;
size_t completion_queue_slots;
size_t buffer_queue_slots;
+ int idx = rx->q_num;
size_t size;
int i;
completion_queue_slots = rx->dqo.complq.mask + 1;
buffer_queue_slots = rx->dqo.bufq.mask + 1;
- gve_rx_remove_from_block(priv, idx);
-
if (rx->q_resources) {
dma_free_coherent(hdev, sizeof(*rx->q_resources),
rx->q_resources, rx->q_resources_bus);
@@ -226,7 +248,7 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx)
gve_free_page_dqo(priv, bs, !rx->dqo.qpl);
}
if (rx->dqo.qpl) {
- gve_unassign_qpl(priv, rx->dqo.qpl->id);
+ gve_unassign_qpl(cfg->qpl_cfg, rx->dqo.qpl->id);
rx->dqo.qpl = NULL;
}
@@ -248,20 +270,44 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx)
kvfree(rx->dqo.buf_states);
rx->dqo.buf_states = NULL;
+ gve_rx_free_hdr_bufs(priv, rx);
+
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
}
-static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
+static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
+{
+ struct device *hdev = &priv->pdev->dev;
+ int buf_count = rx->dqo.bufq.mask + 1;
+
+ rx->dqo.hdr_bufs.data = dma_alloc_coherent(hdev, priv->header_buf_size * buf_count,
+ &rx->dqo.hdr_bufs.addr, GFP_KERNEL);
+ if (!rx->dqo.hdr_bufs.data)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void gve_rx_start_ring_dqo(struct gve_priv *priv, int idx)
+{
+ int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
+
+ gve_rx_add_to_block(priv, idx);
+ gve_add_napi(priv, ntfy_idx, gve_napi_poll_dqo);
+}
+
+static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx)
{
- struct gve_rx_ring *rx = &priv->rx[idx];
struct device *hdev = &priv->pdev->dev;
size_t size;
int i;
- const u32 buffer_queue_slots =
- priv->queue_format == GVE_DQO_RDA_FORMAT ?
- priv->options_dqo_rda.rx_buff_ring_entries : priv->rx_desc_cnt;
- const u32 completion_queue_slots = priv->rx_desc_cnt;
+ const u32 buffer_queue_slots = cfg->raw_addressing ?
+ priv->options_dqo_rda.rx_buff_ring_entries : cfg->ring_size;
+ const u32 completion_queue_slots = cfg->ring_size;
netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n");
@@ -274,7 +320,7 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
rx->ctx.skb_head = NULL;
rx->ctx.skb_tail = NULL;
- rx->dqo.num_buf_states = priv->queue_format == GVE_DQO_RDA_FORMAT ?
+ rx->dqo.num_buf_states = cfg->raw_addressing ?
min_t(s16, S16_MAX, buffer_queue_slots * 4) :
priv->rx_pages_per_qpl;
rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
@@ -283,6 +329,11 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
if (!rx->dqo.buf_states)
return -ENOMEM;
+ /* Allocate header buffers for header-split */
+ if (cfg->enable_header_split)
+ if (gve_rx_alloc_hdr_bufs(priv, rx))
+ goto err;
+
/* Set up linked list of buffer IDs */
for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
rx->dqo.buf_states[i].next = i + 1;
@@ -308,8 +359,8 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
if (!rx->dqo.bufq.desc_ring)
goto err;
- if (priv->queue_format != GVE_DQO_RDA_FORMAT) {
- rx->dqo.qpl = gve_assign_rx_qpl(priv, rx->q_num);
+ if (!cfg->raw_addressing) {
+ rx->dqo.qpl = gve_assign_rx_qpl(cfg, rx->q_num);
if (!rx->dqo.qpl)
goto err;
rx->dqo.next_qpl_page_idx = 0;
@@ -320,12 +371,10 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
if (!rx->q_resources)
goto err;
- gve_rx_add_to_block(priv, idx);
-
return 0;
err:
- gve_rx_free_ring_dqo(priv, idx);
+ gve_rx_free_ring_dqo(priv, rx, cfg);
return -ENOMEM;
}
@@ -337,13 +386,26 @@ void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx)
iowrite32(rx->dqo.bufq.tail, &priv->db_bar2[index]);
}
-int gve_rx_alloc_rings_dqo(struct gve_priv *priv)
+int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
- int err = 0;
+ struct gve_rx_ring *rx;
+ int err;
int i;
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- err = gve_rx_alloc_ring_dqo(priv, i);
+ if (!cfg->raw_addressing && !cfg->qpls) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot alloc QPL ring before allocing QPLs\n");
+ return -EINVAL;
+ }
+
+ rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
+ GFP_KERNEL);
+ if (!rx)
+ return -ENOMEM;
+
+ for (i = 0; i < cfg->qcfg->num_queues; i++) {
+ err = gve_rx_alloc_ring_dqo(priv, cfg, &rx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
"Failed to alloc rx ring=%d: err=%d\n",
@@ -352,21 +414,30 @@ int gve_rx_alloc_rings_dqo(struct gve_priv *priv)
}
}
+ cfg->rx = rx;
return 0;
err:
for (i--; i >= 0; i--)
- gve_rx_free_ring_dqo(priv, i);
-
+ gve_rx_free_ring_dqo(priv, &rx[i], cfg);
+ kvfree(rx);
return err;
}
-void gve_rx_free_rings_dqo(struct gve_priv *priv)
+void gve_rx_free_rings_dqo(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
+ struct gve_rx_ring *rx = cfg->rx;
int i;
- for (i = 0; i < priv->rx_cfg.num_queues; i++)
- gve_rx_free_ring_dqo(priv, i);
+ if (!rx)
+ return;
+
+ for (i = 0; i < cfg->qcfg->num_queues; i++)
+ gve_rx_free_ring_dqo(priv, &rx[i], cfg);
+
+ kvfree(rx);
+ cfg->rx = NULL;
}
void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
@@ -404,6 +475,10 @@ void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
desc->buf_addr = cpu_to_le64(buf_state->addr +
buf_state->page_info.page_offset);
+ if (rx->dqo.hdr_bufs.data)
+ desc->header_buf_addr =
+ cpu_to_le64(rx->dqo.hdr_bufs.addr +
+ priv->header_buf_size * bufq->tail);
bufq->tail = (bufq->tail + 1) & bufq->mask;
complq->num_free_slots--;
@@ -419,7 +494,7 @@ void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
static void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
- const int data_buffer_size = priv->data_buffer_size_dqo;
+ const u16 data_buffer_size = priv->data_buffer_size_dqo;
int pagecount;
/* Can't reuse if we only fit one buffer per page */
@@ -606,13 +681,16 @@ static int gve_rx_append_frags(struct napi_struct *napi,
*/
static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
const struct gve_rx_compl_desc_dqo *compl_desc,
- int queue_idx)
+ u32 desc_idx, int queue_idx)
{
const u16 buffer_id = le16_to_cpu(compl_desc->buf_id);
+ const bool hbo = compl_desc->header_buffer_overflow;
const bool eop = compl_desc->end_of_packet != 0;
+ const bool hsplit = compl_desc->split_header;
struct gve_rx_buf_state_dqo *buf_state;
struct gve_priv *priv = rx->gve;
u16 buf_len;
+ u16 hdr_len;
if (unlikely(buffer_id >= rx->dqo.num_buf_states)) {
net_err_ratelimited("%s: Invalid RX buffer_id=%u\n",
@@ -633,12 +711,35 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
}
buf_len = compl_desc->packet_len;
+ hdr_len = compl_desc->header_len;
/* Page might have not been used for awhile and was likely last written
* by a different thread.
*/
prefetch(buf_state->page_info.page);
+ /* Copy the header into the skb in the case of header split */
+ if (hsplit) {
+ int unsplit = 0;
+
+ if (hdr_len && !hbo) {
+ rx->ctx.skb_head = gve_rx_copy_data(priv->dev, napi,
+ rx->dqo.hdr_bufs.data +
+ desc_idx * priv->header_buf_size,
+ hdr_len);
+ if (unlikely(!rx->ctx.skb_head))
+ goto error;
+ rx->ctx.skb_tail = rx->ctx.skb_head;
+ } else {
+ unsplit = 1;
+ }
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_hsplit_pkt++;
+ rx->rx_hsplit_unsplit_pkt += unsplit;
+ rx->rx_hsplit_bytes += hdr_len;
+ u64_stats_update_end(&rx->statss);
+ }
+
/* Sync the portion of dma buffer for CPU to read. */
dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
buf_state->page_info.page_offset,
@@ -781,7 +882,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
/* Do not read data until we own the descriptor */
dma_rmb();
- err = gve_rx_dqo(napi, rx, compl_desc, rx->q_num);
+ err = gve_rx_dqo(napi, rx, compl_desc, complq->head, rx->q_num);
if (err < 0) {
gve_rx_free_skb(rx);
u64_stats_update_begin(&rx->statss);
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 07ba124780df..4b9853adc113 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -196,29 +196,36 @@ static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx,
static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
u32 to_do, bool try_to_wake);
-static void gve_tx_free_ring(struct gve_priv *priv, int idx)
+void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx)
{
+ int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_tx_ring *tx = &priv->tx[idx];
+
+ if (!gve_tx_was_added_to_block(priv, idx))
+ return;
+
+ gve_remove_napi(priv, ntfy_idx);
+ gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
+ netdev_tx_reset_queue(tx->netdev_txq);
+ gve_tx_remove_from_block(priv, idx);
+}
+
+static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx,
+ struct gve_tx_alloc_rings_cfg *cfg)
+{
struct device *hdev = &priv->pdev->dev;
+ int idx = tx->q_num;
size_t bytes;
u32 slots;
- gve_tx_remove_from_block(priv, idx);
slots = tx->mask + 1;
- if (tx->q_num < priv->tx_cfg.num_queues) {
- gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
- netdev_tx_reset_queue(tx->netdev_txq);
- } else {
- gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt);
- }
-
dma_free_coherent(hdev, sizeof(*tx->q_resources),
tx->q_resources, tx->q_resources_bus);
tx->q_resources = NULL;
if (!tx->raw_addressing) {
gve_tx_fifo_release(priv, &tx->tx_fifo);
- gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
+ gve_unassign_qpl(cfg->qpl_cfg, tx->tx_fifo.qpl->id);
tx->tx_fifo.qpl = NULL;
}
@@ -232,11 +239,23 @@ static void gve_tx_free_ring(struct gve_priv *priv, int idx)
netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
}
-static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
+void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx)
{
+ int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_tx_ring *tx = &priv->tx[idx];
+
+ gve_tx_add_to_block(priv, idx);
+
+ tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
+ gve_add_napi(priv, ntfy_idx, gve_napi_poll);
+}
+
+static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg,
+ struct gve_tx_ring *tx,
+ int idx)
+{
struct device *hdev = &priv->pdev->dev;
- u32 slots = priv->tx_desc_cnt;
size_t bytes;
/* Make sure everything is zeroed to start */
@@ -245,23 +264,23 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
spin_lock_init(&tx->xdp_lock);
tx->q_num = idx;
- tx->mask = slots - 1;
+ tx->mask = cfg->ring_size - 1;
/* alloc metadata */
- tx->info = vcalloc(slots, sizeof(*tx->info));
+ tx->info = vcalloc(cfg->ring_size, sizeof(*tx->info));
if (!tx->info)
return -ENOMEM;
/* alloc tx queue */
- bytes = sizeof(*tx->desc) * slots;
+ bytes = sizeof(*tx->desc) * cfg->ring_size;
tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL);
if (!tx->desc)
goto abort_with_info;
- tx->raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT;
- tx->dev = &priv->pdev->dev;
+ tx->raw_addressing = cfg->raw_addressing;
+ tx->dev = hdev;
if (!tx->raw_addressing) {
- tx->tx_fifo.qpl = gve_assign_tx_qpl(priv, idx);
+ tx->tx_fifo.qpl = gve_assign_tx_qpl(cfg, idx);
if (!tx->tx_fifo.qpl)
goto abort_with_desc;
/* map Tx FIFO */
@@ -277,12 +296,6 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
if (!tx->q_resources)
goto abort_with_fifo;
- netif_dbg(priv, drv, priv->dev, "tx[%d]->bus=%lx\n", idx,
- (unsigned long)tx->bus);
- if (idx < priv->tx_cfg.num_queues)
- tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
- gve_tx_add_to_block(priv, idx);
-
return 0;
abort_with_fifo:
@@ -290,7 +303,7 @@ abort_with_fifo:
gve_tx_fifo_release(priv, &tx->tx_fifo);
abort_with_qpl:
if (!tx->raw_addressing)
- gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
+ gve_unassign_qpl(cfg->qpl_cfg, tx->tx_fifo.qpl->id);
abort_with_desc:
dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
tx->desc = NULL;
@@ -300,36 +313,73 @@ abort_with_info:
return -ENOMEM;
}
-int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings)
+int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg)
{
+ struct gve_tx_ring *tx = cfg->tx;
int err = 0;
- int i;
+ int i, j;
+
+ if (!cfg->raw_addressing && !cfg->qpls) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot alloc QPL ring before allocing QPLs\n");
+ return -EINVAL;
+ }
+
+ if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot alloc more than the max num of Tx rings\n");
+ return -EINVAL;
+ }
+
+ if (cfg->start_idx == 0) {
+ tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
+ GFP_KERNEL);
+ if (!tx)
+ return -ENOMEM;
+ } else if (!tx) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot alloc tx rings from a nonzero start idx without tx array\n");
+ return -EINVAL;
+ }
- for (i = start_id; i < start_id + num_rings; i++) {
- err = gve_tx_alloc_ring(priv, i);
+ for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) {
+ err = gve_tx_alloc_ring_gqi(priv, cfg, &tx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
"Failed to alloc tx ring=%d: err=%d\n",
i, err);
- break;
+ goto cleanup;
}
}
- /* Unallocate if there was an error */
- if (err) {
- int j;
- for (j = start_id; j < i; j++)
- gve_tx_free_ring(priv, j);
- }
+ cfg->tx = tx;
+ return 0;
+
+cleanup:
+ for (j = 0; j < i; j++)
+ gve_tx_free_ring_gqi(priv, &tx[j], cfg);
+ if (cfg->start_idx == 0)
+ kvfree(tx);
return err;
}
-void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings)
+void gve_tx_free_rings_gqi(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg)
{
+ struct gve_tx_ring *tx = cfg->tx;
int i;
- for (i = start_id; i < start_id + num_rings; i++)
- gve_tx_free_ring(priv, i);
+ if (!tx)
+ return;
+
+ for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++)
+ gve_tx_free_ring_gqi(priv, &tx[i], cfg);
+
+ if (cfg->start_idx == 0) {
+ kvfree(tx);
+ cfg->tx = NULL;
+ }
}
/* gve_tx_avail - Calculates the number of slots available in the ring
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index f59c4710f118..bc34b6cd3a3e 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -188,13 +188,27 @@ static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
}
}
-static void gve_tx_free_ring_dqo(struct gve_priv *priv, int idx)
+void gve_tx_stop_ring_dqo(struct gve_priv *priv, int idx)
{
+ int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_tx_ring *tx = &priv->tx[idx];
- struct device *hdev = &priv->pdev->dev;
- size_t bytes;
+ if (!gve_tx_was_added_to_block(priv, idx))
+ return;
+
+ gve_remove_napi(priv, ntfy_idx);
+ gve_clean_tx_done_dqo(priv, tx, /*napi=*/NULL);
+ netdev_tx_reset_queue(tx->netdev_txq);
+ gve_tx_clean_pending_packets(tx);
gve_tx_remove_from_block(priv, idx);
+}
+
+static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
+ struct gve_tx_alloc_rings_cfg *cfg)
+{
+ struct device *hdev = &priv->pdev->dev;
+ int idx = tx->q_num;
+ size_t bytes;
if (tx->q_resources) {
dma_free_coherent(hdev, sizeof(*tx->q_resources),
@@ -223,7 +237,7 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, int idx)
tx->dqo.tx_qpl_buf_next = NULL;
if (tx->dqo.qpl) {
- gve_unassign_qpl(priv, tx->dqo.qpl->id);
+ gve_unassign_qpl(cfg->qpl_cfg, tx->dqo.qpl->id);
tx->dqo.qpl = NULL;
}
@@ -253,9 +267,22 @@ static int gve_tx_qpl_buf_init(struct gve_tx_ring *tx)
return 0;
}
-static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx)
+void gve_tx_start_ring_dqo(struct gve_priv *priv, int idx)
{
+ int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_tx_ring *tx = &priv->tx[idx];
+
+ gve_tx_add_to_block(priv, idx);
+
+ tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
+ gve_add_napi(priv, ntfy_idx, gve_napi_poll_dqo);
+}
+
+static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg,
+ struct gve_tx_ring *tx,
+ int idx)
+{
struct device *hdev = &priv->pdev->dev;
int num_pending_packets;
size_t bytes;
@@ -263,12 +290,11 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx)
memset(tx, 0, sizeof(*tx));
tx->q_num = idx;
- tx->dev = &priv->pdev->dev;
- tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
+ tx->dev = hdev;
atomic_set_release(&tx->dqo_compl.hw_tx_head, 0);
/* Queue sizes must be a power of 2 */
- tx->mask = priv->tx_desc_cnt - 1;
+ tx->mask = cfg->ring_size - 1;
tx->dqo.complq_mask = priv->queue_format == GVE_DQO_RDA_FORMAT ?
priv->options_dqo_rda.tx_comp_ring_entries - 1 :
tx->mask;
@@ -327,8 +353,8 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx)
if (!tx->q_resources)
goto err;
- if (gve_is_qpl(priv)) {
- tx->dqo.qpl = gve_assign_tx_qpl(priv, idx);
+ if (!cfg->raw_addressing) {
+ tx->dqo.qpl = gve_assign_tx_qpl(cfg, idx);
if (!tx->dqo.qpl)
goto err;
@@ -336,22 +362,45 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx)
goto err;
}
- gve_tx_add_to_block(priv, idx);
-
return 0;
err:
- gve_tx_free_ring_dqo(priv, idx);
+ gve_tx_free_ring_dqo(priv, tx, cfg);
return -ENOMEM;
}
-int gve_tx_alloc_rings_dqo(struct gve_priv *priv)
+int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg)
{
+ struct gve_tx_ring *tx = cfg->tx;
int err = 0;
- int i;
+ int i, j;
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
- err = gve_tx_alloc_ring_dqo(priv, i);
+ if (!cfg->raw_addressing && !cfg->qpls) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot alloc QPL ring before allocing QPLs\n");
+ return -EINVAL;
+ }
+
+ if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot alloc more than the max num of Tx rings\n");
+ return -EINVAL;
+ }
+
+ if (cfg->start_idx == 0) {
+ tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
+ GFP_KERNEL);
+ if (!tx)
+ return -ENOMEM;
+ } else if (!tx) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot alloc tx rings from a nonzero start idx without tx array\n");
+ return -EINVAL;
+ }
+
+ for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) {
+ err = gve_tx_alloc_ring_dqo(priv, cfg, &tx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
"Failed to alloc tx ring=%d: err=%d\n",
@@ -360,27 +409,32 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv)
}
}
+ cfg->tx = tx;
return 0;
err:
- for (i--; i >= 0; i--)
- gve_tx_free_ring_dqo(priv, i);
-
+ for (j = 0; j < i; j++)
+ gve_tx_free_ring_dqo(priv, &tx[j], cfg);
+ if (cfg->start_idx == 0)
+ kvfree(tx);
return err;
}
-void gve_tx_free_rings_dqo(struct gve_priv *priv)
+void gve_tx_free_rings_dqo(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg)
{
+ struct gve_tx_ring *tx = cfg->tx;
int i;
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
- struct gve_tx_ring *tx = &priv->tx[i];
+ if (!tx)
+ return;
- gve_clean_tx_done_dqo(priv, tx, /*napi=*/NULL);
- netdev_tx_reset_queue(tx->netdev_txq);
- gve_tx_clean_pending_packets(tx);
+ for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++)
+ gve_tx_free_ring_dqo(priv, &tx[i], cfg);
- gve_tx_free_ring_dqo(priv, i);
+ if (cfg->start_idx == 0) {
+ kvfree(tx);
+ cfg->tx = NULL;
}
}
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
index 26e08d753270..2349750075a5 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.c
+++ b/drivers/net/ethernet/google/gve/gve_utils.c
@@ -8,6 +8,14 @@
#include "gve_adminq.h"
#include "gve_utils.h"
+bool gve_tx_was_added_to_block(struct gve_priv *priv, int queue_idx)
+{
+ struct gve_notify_block *block =
+ &priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)];
+
+ return block->tx != NULL;
+}
+
void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx)
{
struct gve_notify_block *block =
@@ -30,6 +38,14 @@ void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx)
queue_idx);
}
+bool gve_rx_was_added_to_block(struct gve_priv *priv, int queue_idx)
+{
+ struct gve_notify_block *block =
+ &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)];
+
+ return block->rx != NULL;
+}
+
void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx)
{
struct gve_notify_block *block =
@@ -48,11 +64,9 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
rx->ntfy_id = ntfy_idx;
}
-struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
- struct gve_rx_slot_page_info *page_info, u16 len)
+struct sk_buff *gve_rx_copy_data(struct net_device *dev, struct napi_struct *napi,
+ u8 *data, u16 len)
{
- void *va = page_info->page_address + page_info->page_offset +
- page_info->pad;
struct sk_buff *skb;
skb = napi_alloc_skb(napi, len);
@@ -60,12 +74,21 @@ struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
return NULL;
__skb_put(skb, len);
- skb_copy_to_linear_data_offset(skb, 0, va, len);
+ skb_copy_to_linear_data_offset(skb, 0, data, len);
skb->protocol = eth_type_trans(skb, dev);
return skb;
}
+struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
+ struct gve_rx_slot_page_info *page_info, u16 len)
+{
+ void *va = page_info->page_address + page_info->page_offset +
+ page_info->pad;
+
+ return gve_rx_copy_data(dev, napi, va, len);
+}
+
void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info)
{
page_info->pagecnt_bias--;
@@ -81,3 +104,18 @@ void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info)
page_ref_add(page_info->page, INT_MAX - pagecount);
}
}
+
+void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
+ int (*gve_poll)(struct napi_struct *, int))
+{
+ struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+
+ netif_napi_add(priv->dev, &block->napi, gve_poll);
+}
+
+void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
+{
+ struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+
+ netif_napi_del(&block->napi);
+}
diff --git a/drivers/net/ethernet/google/gve/gve_utils.h b/drivers/net/ethernet/google/gve/gve_utils.h
index 324fd98a6112..bf2e9a0adb36 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.h
+++ b/drivers/net/ethernet/google/gve/gve_utils.h
@@ -11,17 +11,25 @@
#include "gve.h"
+bool gve_tx_was_added_to_block(struct gve_priv *priv, int queue_idx);
void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx);
void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx);
+bool gve_rx_was_added_to_block(struct gve_priv *priv, int queue_idx);
void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx);
void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx);
+struct sk_buff *gve_rx_copy_data(struct net_device *dev, struct napi_struct *napi,
+ u8 *data, u16 len);
+
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info, u16 len);
/* Decrement pagecnt_bias. Set it back to INT_MAX if it reached zero. */
void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info);
+void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
+ int (*gve_poll)(struct napi_struct *, int));
+void gve_remove_napi(struct gve_priv *priv, int ntfy_idx);
#endif /* _GVE_UTILS_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 8a1027ad340d..d4293f76d69d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -12,7 +12,9 @@
#define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev)
-static struct class *hnae_class;
+static const struct class hnae_class = {
+ .name = "hnae",
+};
static void
hnae_list_add(spinlock_t *lock, struct list_head *node, struct list_head *head)
@@ -111,7 +113,7 @@ static struct hnae_ae_dev *find_ae(const struct fwnode_handle *fwnode)
WARN_ON(!fwnode);
- dev = class_find_device(hnae_class, NULL, fwnode, __ae_match);
+ dev = class_find_device(&hnae_class, NULL, fwnode, __ae_match);
return dev ? cls_to_ae_dev(dev) : NULL;
}
@@ -415,7 +417,7 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
hdev->owner = owner;
hdev->id = (int)atomic_inc_return(&id);
hdev->cls_dev.parent = hdev->dev;
- hdev->cls_dev.class = hnae_class;
+ hdev->cls_dev.class = &hnae_class;
hdev->cls_dev.release = hnae_release;
(void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
ret = device_register(&hdev->cls_dev);
@@ -448,13 +450,12 @@ EXPORT_SYMBOL(hnae_ae_unregister);
static int __init hnae_init(void)
{
- hnae_class = class_create("hnae");
- return PTR_ERR_OR_ZERO(hnae_class);
+ return class_register(&hnae_class);
}
static void __exit hnae_exit(void)
{
- class_destroy(hnae_class);
+ class_unregister(&hnae_class);
}
subsys_initcall(hnae_init);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index d7e175a9cb49..f19f1e1d1f9f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -388,6 +388,7 @@ struct hnae3_dev_specs {
u16 mc_mac_size;
u32 mac_stats_num;
u8 tnl_num;
+ u8 hilink_version;
};
struct hnae3_client_ops {
@@ -819,6 +820,7 @@ struct hnae3_tc_info {
u8 max_tc; /* Total number of TCs */
u8 num_tc; /* Total number of enabled TCs */
bool mqprio_active;
+ bool mqprio_destroy;
bool dcb_ets_active;
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
index d92ad6082d8e..652d71326231 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -351,7 +351,7 @@ static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw)
static u32 hclge_get_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
{
static const struct hclge_cmdq_tx_timeout_map cmdq_tx_timeout_map[] = {
- {HCLGE_OPC_CFG_RST_TRIGGER, HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS},
+ {HCLGE_OPC_CFG_RST_TRIGGER, HCLGE_COMM_CMDQ_CFG_RST_TIMEOUT},
};
u32 i;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
index 533c19d25e4f..552396518e08 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
@@ -55,7 +55,7 @@
#define HCLGE_COMM_NIC_CMQ_DESC_NUM_S 3
#define HCLGE_COMM_NIC_CMQ_DESC_NUM 1024
#define HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT 30000
-#define HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS 500000
+#define HCLGE_COMM_CMDQ_CFG_RST_TIMEOUT 1000000
enum hclge_opcode_type {
/* Generic commands */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index 3b6dbf158b98..f72dc0cee30e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -76,7 +76,7 @@ static int hns3_dcbnl_ieee_delapp(struct net_device *ndev, struct dcb_app *app)
if (hns3_nic_resetting(ndev))
return -EBUSY;
- if (h->kinfo.dcb_ops->ieee_setapp)
+ if (h->kinfo.dcb_ops->ieee_delapp)
return h->kinfo.dcb_ops->ieee_delapp(h, app);
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index c083d1d10767..807eb3bbb11c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -1097,6 +1097,8 @@ hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos)
*pos += scnprintf(buf + *pos, len - *pos,
"TX timeout threshold: %d seconds\n",
dev->watchdog_timeo / HZ);
+ *pos += scnprintf(buf + *pos, len - *pos, "Hilink Version: %u\n",
+ dev_specs->hilink_version);
}
static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index f1695c889d3a..19668a8d22f7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2473,9 +2473,9 @@ static netdev_features_t hns3_features_check(struct sk_buff *skb,
return features;
if (skb->encapsulation)
- len = skb_inner_transport_header(skb) - skb->data;
+ len = skb_inner_transport_offset(skb);
else
- len = skb_transport_header(skb) - skb->data;
+ len = skb_transport_offset(skb);
/* Assume L4 is 60 byte as TCP is the only protocol with a
* a flexible value, and it's max len is 60 bytes.
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 4d15eb73b972..9bb708fa42f2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -828,7 +828,8 @@ struct hclge_dev_specs_1_cmd {
__le16 mc_mac_size;
u8 rsv1[6];
u8 tnl_num;
- u8 rsv2[5];
+ u8 hilink_version;
+ u8 rsv2[4];
};
/* mac speed type defined in firmware command */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index b98301e205f7..eabbacb1c714 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -619,6 +619,8 @@ static int hclge_setup_tc(struct hnae3_handle *h,
return ret;
}
+ kinfo->tc_info.mqprio_destroy = !tc;
+
ret = hclge_notify_down_uinit(hdev);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 5ea9e59569ef..b4afb66efe5c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -645,8 +645,12 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
}
- count += 1;
- handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+ if (hdev->ae_dev->dev_specs.hilink_version !=
+ HCLGE_HILINK_H60) {
+ count += 1;
+ handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+ }
+
count += 1;
handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
count += 1;
@@ -884,7 +888,7 @@ static const struct hclge_speed_bit_map speed_bit_map[] = {
{HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
{HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BITS},
{HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BITS},
- {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
+ {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BITS},
};
static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
@@ -940,7 +944,7 @@ static void hclge_update_fec_support(struct hclge_mac *mac)
mac->supported);
}
-static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[8] = {
+static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[] = {
{HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT},
{HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT},
{HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT},
@@ -948,10 +952,12 @@ static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[8] = {
{HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseSR_Full_BIT},
{HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT},
{HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT},
- {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_EXT_BIT,
+ ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT},
};
-static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[6] = {
+static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[] = {
{HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT},
{HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT},
{HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT},
@@ -959,11 +965,13 @@ static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[6] = {
ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT},
{HCLGE_SUPPORT_100G_R2_BIT,
ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT},
- {HCLGE_SUPPORT_200G_BIT,
+ {HCLGE_SUPPORT_200G_R4_EXT_BIT,
+ ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_BIT,
ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT},
};
-static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[8] = {
+static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[] = {
{HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT},
{HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT},
{HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT},
@@ -971,10 +979,12 @@ static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[8] = {
{HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseCR_Full_BIT},
{HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT},
{HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT},
- {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_EXT_BIT,
+ ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT},
};
-static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[9] = {
+static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[] = {
{HCLGE_SUPPORT_1G_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT},
{HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
{HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT},
@@ -983,7 +993,9 @@ static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[9] = {
{HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseKR_Full_BIT},
{HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT},
{HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT},
- {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_EXT_BIT,
+ ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT},
};
static void hclge_convert_setting_sr(u16 speed_ability,
@@ -1154,7 +1166,7 @@ static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
static u32 hclge_get_max_speed(u16 speed_ability)
{
- if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ if (speed_ability & HCLGE_SUPPORT_200G_BITS)
return HCLGE_MAC_SPEED_200G;
if (speed_ability & HCLGE_SUPPORT_100G_BITS)
@@ -1350,6 +1362,7 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev,
ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
ae_dev->dev_specs.tnl_num = req1->tnl_num;
+ ae_dev->dev_specs.hilink_version = req1->hilink_version;
}
static void hclge_check_dev_specs(struct hclge_dev *hdev)
@@ -2890,7 +2903,10 @@ static int hclge_mac_init(struct hclge_dev *hdev)
int ret;
hdev->support_sfp_query = true;
- hdev->hw.mac.duplex = HCLGE_MAC_FULL;
+
+ if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ hdev->hw.mac.duplex = HCLGE_MAC_FULL;
+
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
hdev->hw.mac.duplex, hdev->hw.mac.lane_num);
if (ret)
@@ -12092,6 +12108,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ hclge_reset_tc_config(hdev);
+
ret = hclge_tm_init_hw(hdev, true);
if (ret) {
dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 51979cf71262..e821dd2f1528 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -191,9 +191,10 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_SUPPORT_40G_BIT BIT(5)
#define HCLGE_SUPPORT_100M_BIT BIT(6)
#define HCLGE_SUPPORT_10M_BIT BIT(7)
-#define HCLGE_SUPPORT_200G_BIT BIT(8)
+#define HCLGE_SUPPORT_200G_R4_EXT_BIT BIT(8)
#define HCLGE_SUPPORT_50G_R1_BIT BIT(9)
#define HCLGE_SUPPORT_100G_R2_BIT BIT(10)
+#define HCLGE_SUPPORT_200G_R4_BIT BIT(11)
#define HCLGE_SUPPORT_GE \
(HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT)
@@ -201,6 +202,8 @@ enum HLCGE_PORT_TYPE {
(HCLGE_SUPPORT_50G_R2_BIT | HCLGE_SUPPORT_50G_R1_BIT)
#define HCLGE_SUPPORT_100G_BITS \
(HCLGE_SUPPORT_100G_R4_BIT | HCLGE_SUPPORT_100G_R2_BIT)
+#define HCLGE_SUPPORT_200G_BITS \
+ (HCLGE_SUPPORT_200G_R4_EXT_BIT | HCLGE_SUPPORT_200G_R4_BIT)
enum HCLGE_DEV_STATE {
HCLGE_STATE_REINITING,
@@ -253,6 +256,12 @@ enum HCLGE_MAC_DUPLEX {
HCLGE_MAC_FULL
};
+/* hilink version */
+enum hclge_hilink_version {
+ HCLGE_HILINK_H32 = 0,
+ HCLGE_HILINK_H60 = 1,
+};
+
#define QUERY_SFP_SPEED 0
#define QUERY_ACTIVE_SPEED 1
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 4b0d07ca2505..d4a0e0be7a72 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -1123,10 +1123,11 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
- if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
+ if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B) ||
+ req->mbx_src_vfid > hdev->num_req_vfs)) {
dev_warn(&hdev->pdev->dev,
- "dropped invalid mailbox message, code = %u\n",
- req->msg.code);
+ "dropped invalid mailbox message, code = %u, vfid = %u\n",
+ req->msg.code, req->mbx_src_vfid);
/* dropping/not processing this invalid message */
crq->desc[crq->next_to_use].flag = 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index 80a2a0073d97..507d7ce26d83 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -108,7 +108,7 @@ void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb,
u64 ns = nsec;
u32 sec_h;
- if (!test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags))
+ if (!hdev->ptp || !test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags))
return;
/* Since the BD does not have enough space for the higher 16 bits of
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index c58c31221762..00c3f2548bf6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -2143,3 +2143,19 @@ int hclge_tm_flush_cfg(struct hclge_dev *hdev, bool enable)
return ret;
}
+
+void hclge_reset_tc_config(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = &hdev->vport[0];
+ struct hnae3_knic_private_info *kinfo;
+
+ kinfo = &vport->nic.kinfo;
+
+ if (!kinfo->tc_info.mqprio_destroy)
+ return;
+
+ /* clear tc info, including mqprio_destroy and mqprio_active */
+ memset(&kinfo->tc_info, 0, sizeof(kinfo->tc_info));
+ hclge_tm_schd_info_update(hdev, 0);
+ hclge_comm_rss_indir_init_cfg(hdev->ae_dev, &hdev->rss_cfg);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 53eec6df5194..0985916629d3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -277,4 +277,5 @@ int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
int hclge_up_to_tc_map(struct hclge_dev *hdev);
int hclge_dscp_to_tc_map(struct hclge_dev *hdev);
int hclge_tm_flush_cfg(struct hclge_dev *hdev, bool enable);
+void hclge_reset_tc_config(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 5e27470c6b1e..f2d4669c81cf 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -987,7 +987,7 @@ static void sun3_82586_timeout(struct net_device *dev, unsigned int txqueue)
{
#ifdef DEBUG
printk("%s: xmitter timed out, try to restart! stat: %02x\n",dev->name,p->scb->cus);
- printk("%s: command-stats: %04x %04x\n",dev->name,swab16(p->xmit_cmds[0]->cmd_status),swab16(p->xmit_cmds[1]->cmd_status));
+ printk("%s: command-stats: %04x\n", dev->name, swab16(p->xmit_cmds[0]->cmd_status));
printk("%s: check, whether you set the right interrupt number!\n",dev->name);
#endif
sun3_82586_close(dev);
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index d55638ad8704..639fbb12bd35 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -368,6 +368,15 @@ config IGC
To compile this driver as a module, choose M here. The module
will be called igc.
+
+config IGC_LEDS
+ def_bool LEDS_TRIGGER_NETDEV
+ depends on IGC && LEDS_CLASS
+ depends on LEDS_CLASS=y || IGC=m
+ help
+ Optional support for controlling the NIC LED's with the netdev
+ LED trigger.
+
config IDPF
tristate "Intel(R) Infrastructure Data Path Function Support"
depends on PCI_MSI
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 01f0f12035ca..3fcb8daaa243 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -171,8 +171,8 @@ static int debug = 3;
static int eeprom_bad_csum_allow = 0;
static int use_io = 0;
module_param(debug, int, 0);
-module_param(eeprom_bad_csum_allow, int, 0);
-module_param(use_io, int, 0);
+module_param(eeprom_bad_csum_allow, int, 0444);
+module_param(use_io, int, 0444);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index fc0f98ea6133..dc553c51d79a 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -2186,7 +2186,7 @@ static int e1000_get_rxnfc(struct net_device *netdev,
}
}
-static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int e1000e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -2223,16 +2223,16 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data);
if (ret_val)
goto release;
- edata->supported = mmd_eee_cap_to_ethtool_sup_t(phy_data);
+ mii_eee_cap1_mod_linkmode_t(edata->supported, phy_data);
/* EEE Advertised */
- edata->advertised = mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+ mii_eee_cap1_mod_linkmode_t(edata->advertised, adapter->eee_advert);
/* EEE Link Partner Advertised */
ret_val = e1000_read_emi_reg_locked(hw, lpa_addr, &phy_data);
if (ret_val)
goto release;
- edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+ mii_eee_cap1_mod_linkmode_t(edata->lp_advertised, phy_data);
/* EEE PCS Status */
ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data);
@@ -2262,11 +2262,13 @@ release:
return ret_val;
}
-static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int e1000e_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {};
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = {};
struct e1000_hw *hw = &adapter->hw;
- struct ethtool_eee eee_curr;
+ struct ethtool_keee eee_curr;
s32 ret_val;
ret_val = e1000e_get_eee(netdev, &eee_curr);
@@ -2283,12 +2285,17 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
return -EINVAL;
}
- if (edata->advertised & ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ supported);
+
+ if (linkmode_andnot(tmp, edata->advertised, supported)) {
e_err("EEE advertisement supports only 100TX and/or 1000T full-duplex\n");
return -EINVAL;
}
- adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+ adapter->eee_advert = linkmode_to_mii_eee_cap1_t(edata->advertised);
hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index af5d9d97a0d6..cc8c531ec3df 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6688,14 +6688,14 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
if (adapter->hw.phy.type == e1000_phy_igp_3) {
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
} else if (hw->mac.type >= e1000_pch_lpt) {
- if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
+ if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) {
/* ULP does not support wake from unicast, multicast
* or broadcast.
*/
retval = e1000_enable_ulp_lpt_lp(hw, !runtime);
-
- if (retval)
- return retval;
+ if (retval)
+ return retval;
+ }
}
/* Ensure that the appropriate bits are set in LPI_CTRL
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 9b701615c7c6..ba24f3fa92c3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -687,6 +687,54 @@ struct i40e_pf {
};
/**
+ * __i40e_pf_next_vsi - get next valid VSI
+ * @pf: pointer to the PF struct
+ * @idx: pointer to start position number
+ *
+ * Find and return next non-NULL VSI pointer in pf->vsi array and
+ * updates idx position. Returns NULL if no VSI is found.
+ **/
+static __always_inline struct i40e_vsi *
+__i40e_pf_next_vsi(struct i40e_pf *pf, int *idx)
+{
+ while (*idx < pf->num_alloc_vsi) {
+ if (pf->vsi[*idx])
+ return pf->vsi[*idx];
+ (*idx)++;
+ }
+ return NULL;
+}
+
+#define i40e_pf_for_each_vsi(_pf, _i, _vsi) \
+ for (_i = 0, _vsi = __i40e_pf_next_vsi(_pf, &_i); \
+ _vsi; \
+ _i++, _vsi = __i40e_pf_next_vsi(_pf, &_i))
+
+/**
+ * __i40e_pf_next_veb - get next valid VEB
+ * @pf: pointer to the PF struct
+ * @idx: pointer to start position number
+ *
+ * Find and return next non-NULL VEB pointer in pf->veb array and
+ * updates idx position. Returns NULL if no VEB is found.
+ **/
+static __always_inline struct i40e_veb *
+__i40e_pf_next_veb(struct i40e_pf *pf, int *idx)
+{
+ while (*idx < I40E_MAX_VEB) {
+ if (pf->veb[*idx])
+ return pf->veb[*idx];
+ (*idx)++;
+ }
+ return NULL;
+}
+
+#define i40e_pf_for_each_veb(_pf, _i, _veb) \
+ for (_i = 0, _veb = __i40e_pf_next_veb(_pf, &_i); \
+ _veb; \
+ _i++, _veb = __i40e_pf_next_veb(_pf, &_i))
+
+/**
* i40e_mac_to_hkey - Convert a 6-byte MAC Address to a u64 hash key
* @macaddr: the MAC Address as the base key
*
@@ -735,7 +783,6 @@ struct i40e_new_mac_filter {
struct i40e_veb {
struct i40e_pf *pf;
u16 idx;
- u16 veb_idx; /* index of VEB parent */
u16 seid;
u16 uplink_seid;
u16 stats_idx; /* index of VEB parent */
@@ -1120,14 +1167,12 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
static inline struct i40e_vsi *
i40e_find_vsi_by_type(struct i40e_pf *pf, u16 type)
{
+ struct i40e_vsi *vsi;
int i;
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- struct i40e_vsi *vsi = pf->vsi[i];
-
- if (vsi && vsi->type == type)
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->type == type)
return vsi;
- }
return NULL;
}
@@ -1309,4 +1354,40 @@ static inline struct i40e_pf *i40e_hw_to_pf(struct i40e_hw *hw)
struct device *i40e_hw_to_dev(struct i40e_hw *hw);
+/**
+ * i40e_pf_get_vsi_by_seid - find VSI by SEID
+ * @pf: pointer to a PF
+ * @seid: SEID of the VSI
+ **/
+static inline struct i40e_vsi *
+i40e_pf_get_vsi_by_seid(struct i40e_pf *pf, u16 seid)
+{
+ struct i40e_vsi *vsi;
+ int i;
+
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->seid == seid)
+ return vsi;
+
+ return NULL;
+}
+
+/**
+ * i40e_pf_get_veb_by_seid - find VEB by SEID
+ * @pf: pointer to a PF
+ * @seid: SEID of the VSI
+ **/
+static inline struct i40e_veb *
+i40e_pf_get_veb_by_seid(struct i40e_pf *pf, u16 seid)
+{
+ struct i40e_veb *veb;
+ int i;
+
+ i40e_pf_for_each_veb(pf, i, veb)
+ if (veb->seid == seid)
+ return veb;
+
+ return NULL;
+}
+
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 306758428aef..b32071ee84af 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -148,8 +148,6 @@ static void i40e_client_release_qvlist(struct i40e_info *ldev)
u32 reg_idx;
qv_info = &qvlist_info->qv_info[i];
- if (!qv_info)
- continue;
reg_idx = I40E_PFINT_LNKLSTN(qv_info->v_idx - 1);
wr32(&pf->hw, reg_idx, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
}
@@ -576,8 +574,6 @@ static int i40e_client_setup_qvlist(struct i40e_info *ldev,
for (i = 0; i < qvlist_info->num_vectors; i++) {
qv_info = &qvlist_info->qv_info[i];
- if (!qv_info)
- continue;
v_idx = qv_info->v_idx;
/* Validate vector id belongs to this client */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index b96a92187ab3..8aa43aefe84c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -947,16 +947,16 @@ static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi,
static void i40e_dcbnl_del_app(struct i40e_pf *pf,
struct i40e_dcb_app_priority_table *app)
{
+ struct i40e_vsi *vsi;
int v, err;
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v] && pf->vsi[v]->netdev) {
- err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
+ i40e_pf_for_each_vsi(pf, v, vsi)
+ if (vsi->netdev) {
+ err = i40e_dcbnl_vsi_del_app(vsi, app);
dev_dbg(&pf->pdev->dev, "Deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
- pf->vsi[v]->seid, err, app->selector,
+ vsi->seid, err, app->selector,
app->protocolid, app->priority);
}
- }
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index ef70ddbe9c2f..f9ba45f596c9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -24,31 +24,13 @@ enum ring_type {
**/
static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
{
- int i;
-
- if (seid < 0)
+ if (seid < 0) {
dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
- else
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
- return pf->vsi[i];
-
- return NULL;
-}
-/**
- * i40e_dbg_find_veb - searches for the veb with the given seid
- * @pf: the PF structure to search for the veb
- * @seid: seid of the veb it is searching for
- **/
-static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
-{
- int i;
+ return NULL;
+ }
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i] && pf->veb[i]->seid == seid)
- return pf->veb[i];
- return NULL;
+ return i40e_pf_get_vsi_by_seid(pf, seid);
}
/**************************************************************
@@ -653,12 +635,11 @@ out:
**/
static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int i;
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i])
- dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",
- i, pf->vsi[i]->seid);
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n", i, vsi->seid);
}
/**
@@ -696,15 +677,14 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
{
struct i40e_veb *veb;
- veb = i40e_dbg_find_veb(pf, seid);
+ veb = i40e_pf_get_veb_by_seid(pf, seid);
if (!veb) {
dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
return;
}
dev_info(&pf->pdev->dev,
- "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d mode=%s\n",
- veb->idx, veb->veb_idx, veb->stats_idx, veb->seid,
- veb->uplink_seid,
+ "veb idx=%d stats_ic=%d seid=%d uplink=%d mode=%s\n",
+ veb->idx, veb->stats_idx, veb->seid, veb->uplink_seid,
veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
i40e_dbg_dump_eth_stats(pf, &veb->stats);
}
@@ -718,11 +698,8 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
struct i40e_veb *veb;
int i;
- for (i = 0; i < I40E_MAX_VEB; i++) {
- veb = pf->veb[i];
- if (veb)
- i40e_dbg_dump_veb_seid(pf, veb->seid);
- }
+ i40e_pf_for_each_veb(pf, i, veb)
+ i40e_dbg_dump_veb_seid(pf, veb->seid);
}
/**
@@ -851,10 +828,14 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else if (strncmp(cmd_buf, "add relay", 9) == 0) {
struct i40e_veb *veb;
- int uplink_seid, i;
+ u8 enabled_tc = 0x1;
+ int uplink_seid;
cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid);
- if (cnt != 2) {
+ if (cnt == 0) {
+ uplink_seid = 0;
+ vsi_seid = 0;
+ } else if (cnt != 2) {
dev_info(&pf->pdev->dev,
"add relay: bad command string, cnt=%d\n",
cnt);
@@ -866,33 +847,36 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
- vsi = i40e_dbg_find_vsi(pf, vsi_seid);
- if (!vsi) {
- dev_info(&pf->pdev->dev,
- "add relay: VSI %d not found\n", vsi_seid);
- goto command_write_done;
- }
-
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i] && pf->veb[i]->seid == uplink_seid)
- break;
- if (i >= I40E_MAX_VEB && uplink_seid != 0 &&
- uplink_seid != pf->mac_seid) {
+ if (uplink_seid != 0 && uplink_seid != pf->mac_seid) {
dev_info(&pf->pdev->dev,
"add relay: relay uplink %d not found\n",
uplink_seid);
goto command_write_done;
+ } else if (uplink_seid) {
+ vsi = i40e_pf_get_vsi_by_seid(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "add relay: VSI %d not found\n",
+ vsi_seid);
+ goto command_write_done;
+ }
+ enabled_tc = vsi->tc_config.enabled_tc;
+ } else if (vsi_seid) {
+ dev_info(&pf->pdev->dev,
+ "add relay: VSI must be 0 for floating relay\n");
+ goto command_write_done;
}
- veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid,
- vsi->tc_config.enabled_tc);
+ veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid, enabled_tc);
if (veb)
dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid);
else
dev_info(&pf->pdev->dev, "add relay failed\n");
} else if (strncmp(cmd_buf, "del relay", 9) == 0) {
+ struct i40e_veb *veb;
int i;
+
cnt = sscanf(&cmd_buf[9], "%i", &veb_seid);
if (cnt != 1) {
dev_info(&pf->pdev->dev,
@@ -906,9 +890,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
/* find the veb */
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i] && pf->veb[i]->seid == veb_seid)
+ i40e_pf_for_each_veb(pf, i, veb)
+ if (veb->seid == veb_seid)
break;
+
if (i >= I40E_MAX_VEB) {
dev_info(&pf->pdev->dev,
"del relay: relay %d not found\n", veb_seid);
@@ -916,7 +901,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
- i40e_veb_release(pf->veb[i]);
+ i40e_veb_release(veb);
} else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
unsigned int v;
int ret;
@@ -1251,8 +1236,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
if (cnt == 0) {
int i;
- for (i = 0; i < pf->num_alloc_vsi; i++)
- i40e_vsi_reset_stats(pf->vsi[i]);
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ i40e_vsi_reset_stats(vsi);
dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
} else if (cnt == 1) {
vsi = i40e_dbg_find_vsi(pf, vsi_seid);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index c841779713f6..42e7e6cdaa6d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -5644,7 +5644,7 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
return 0;
}
-static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int i40e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_aq_get_phy_abilities_resp phy_cfg;
@@ -5664,16 +5664,12 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
if (phy_cfg.eee_capability == 0)
return -EOPNOTSUPP;
- edata->supported = SUPPORTED_Autoneg;
- edata->lp_advertised = edata->supported;
-
/* Get current configuration */
status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_cfg, NULL);
if (status)
return -EAGAIN;
- edata->advertised = phy_cfg.eee_capability ? SUPPORTED_Autoneg : 0U;
- edata->eee_enabled = !!edata->advertised;
+ edata->eee_enabled = !!phy_cfg.eee_capability;
edata->tx_lpi_enabled = pf->stats.tx_lpi_status;
edata->eee_active = pf->stats.tx_lpi_status && pf->stats.rx_lpi_status;
@@ -5682,7 +5678,7 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
}
static int i40e_is_eee_param_supported(struct net_device *netdev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -5691,7 +5687,6 @@ static int i40e_is_eee_param_supported(struct net_device *netdev,
u32 value;
const char *name;
} param[] = {
- {edata->advertised & ~SUPPORTED_Autoneg, "advertise"},
{edata->tx_lpi_timer, "tx-timer"},
{edata->tx_lpi_enabled != pf->stats.tx_lpi_status, "tx-lpi"}
};
@@ -5709,7 +5704,7 @@ static int i40e_is_eee_param_supported(struct net_device *netdev,
return 0;
}
-static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int i40e_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_aq_get_phy_abilities_resp abilities;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 89a3401d20ab..f86578857e8a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -310,11 +310,12 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
**/
struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
{
+ struct i40e_vsi *vsi;
int i;
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && (pf->vsi[i]->id == id))
- return pf->vsi[i];
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->id == id)
+ return vsi;
return NULL;
}
@@ -552,24 +553,19 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
**/
void i40e_pf_reset_stats(struct i40e_pf *pf)
{
+ struct i40e_veb *veb;
int i;
memset(&pf->stats, 0, sizeof(pf->stats));
memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
pf->stat_offsets_loaded = false;
- for (i = 0; i < I40E_MAX_VEB; i++) {
- if (pf->veb[i]) {
- memset(&pf->veb[i]->stats, 0,
- sizeof(pf->veb[i]->stats));
- memset(&pf->veb[i]->stats_offsets, 0,
- sizeof(pf->veb[i]->stats_offsets));
- memset(&pf->veb[i]->tc_stats, 0,
- sizeof(pf->veb[i]->tc_stats));
- memset(&pf->veb[i]->tc_stats_offsets, 0,
- sizeof(pf->veb[i]->tc_stats_offsets));
- pf->veb[i]->stat_offsets_loaded = false;
- }
+ i40e_pf_for_each_veb(pf, i, veb) {
+ memset(&veb->stats, 0, sizeof(veb->stats));
+ memset(&veb->stats_offsets, 0, sizeof(veb->stats_offsets));
+ memset(&veb->tc_stats, 0, sizeof(veb->tc_stats));
+ memset(&veb->tc_stats_offsets, 0, sizeof(veb->tc_stats_offsets));
+ veb->stat_offsets_loaded = false;
}
pf->hw_csum_rx_error = 0;
}
@@ -2879,6 +2875,7 @@ err_no_memory_locked:
**/
static void i40e_sync_filters_subtask(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int v;
if (!pf)
@@ -2890,11 +2887,10 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
return;
}
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v] &&
- (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
- !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) {
- int ret = i40e_sync_vsi_filters(pf->vsi[v]);
+ i40e_pf_for_each_vsi(pf, v, vsi) {
+ if ((vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
+ !test_bit(__I40E_VSI_RELEASING, vsi->state)) {
+ int ret = i40e_sync_vsi_filters(vsi);
if (ret) {
/* come back and try again later */
@@ -5166,6 +5162,7 @@ static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
**/
static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int i;
if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state))
@@ -5175,9 +5172,10 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
I40E_IWARP_IRQ_PILE_ID);
i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i])
- i40e_vsi_free_q_vectors(pf->vsi[i]);
+
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ i40e_vsi_free_q_vectors(vsi);
+
i40e_reset_interrupt_capability(pf);
}
@@ -5274,12 +5272,11 @@ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
**/
static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int v;
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v])
- i40e_quiesce_vsi(pf->vsi[v]);
- }
+ i40e_pf_for_each_vsi(pf, v, vsi)
+ i40e_quiesce_vsi(vsi);
}
/**
@@ -5288,12 +5285,11 @@ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
**/
static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int v;
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v])
- i40e_unquiesce_vsi(pf->vsi[v]);
- }
+ i40e_pf_for_each_vsi(pf, v, vsi)
+ i40e_unquiesce_vsi(vsi);
}
/**
@@ -5354,14 +5350,13 @@ wait_rx:
**/
static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int v, ret = 0;
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v]) {
- ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
- if (ret)
- break;
- }
+ i40e_pf_for_each_vsi(pf, v, vsi) {
+ ret = i40e_vsi_wait_queues_disabled(vsi);
+ if (ret)
+ break;
}
return ret;
@@ -6778,32 +6773,29 @@ out:
**/
static void i40e_dcb_reconfigure(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
+ struct i40e_veb *veb;
u8 tc_map = 0;
int ret;
- u8 v;
+ int v;
/* Enable the TCs available on PF to all VEBs */
tc_map = i40e_pf_get_tc_map(pf);
if (tc_map == I40E_DEFAULT_TRAFFIC_CLASS)
return;
- for (v = 0; v < I40E_MAX_VEB; v++) {
- if (!pf->veb[v])
- continue;
- ret = i40e_veb_config_tc(pf->veb[v], tc_map);
+ i40e_pf_for_each_veb(pf, v, veb) {
+ ret = i40e_veb_config_tc(veb, tc_map);
if (ret) {
dev_info(&pf->pdev->dev,
"Failed configuring TC for VEB seid=%d\n",
- pf->veb[v]->seid);
+ veb->seid);
/* Will try to configure as many components */
}
}
/* Update each VSI */
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (!pf->vsi[v])
- continue;
-
+ i40e_pf_for_each_vsi(pf, v, vsi) {
/* - Enable all TCs for the LAN VSI
* - For all others keep them at TC0 for now
*/
@@ -6812,17 +6804,17 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
else
tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
- ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
+ ret = i40e_vsi_config_tc(vsi, tc_map);
if (ret) {
dev_info(&pf->pdev->dev,
"Failed configuring TC for VSI seid=%d\n",
- pf->vsi[v]->seid);
+ vsi->seid);
/* Will try to configure as many components */
} else {
/* Re-configure VSI vectors based on updated TC map */
- i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
- if (pf->vsi[v]->netdev)
- i40e_dcbnl_set_all(pf->vsi[v]);
+ i40e_vsi_map_rings_to_vectors(vsi);
+ if (vsi->netdev)
+ i40e_dcbnl_set_all(vsi);
}
}
}
@@ -9257,7 +9249,9 @@ int i40e_close(struct net_device *netdev)
**/
void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
{
+ struct i40e_vsi *vsi;
u32 val;
+ int i;
/* do the biggest reset indicated */
if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
@@ -9313,29 +9307,20 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
"FW LLDP is enabled\n");
} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
- int v;
-
/* Find the VSI(s) that requested a re-init */
- dev_info(&pf->pdev->dev,
- "VSI reinit requested\n");
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- struct i40e_vsi *vsi = pf->vsi[v];
+ dev_info(&pf->pdev->dev, "VSI reinit requested\n");
- if (vsi != NULL &&
- test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
+ i40e_pf_for_each_vsi(pf, i, vsi) {
+ if (test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
vsi->state))
- i40e_vsi_reinit_locked(pf->vsi[v]);
+ i40e_vsi_reinit_locked(vsi);
}
} else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
- int v;
-
/* Find the VSI(s) that needs to be brought down */
dev_info(&pf->pdev->dev, "VSI down requested\n");
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- struct i40e_vsi *vsi = pf->vsi[v];
- if (vsi != NULL &&
- test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
+ i40e_pf_for_each_vsi(pf, i, vsi) {
+ if (test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
vsi->state)) {
set_bit(__I40E_VSI_DOWN, vsi->state);
i40e_down(vsi);
@@ -9888,6 +9873,7 @@ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
**/
static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
{
+ struct i40e_vsi *vsi;
struct i40e_pf *pf;
int i;
@@ -9895,15 +9881,10 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
return;
pf = veb->pf;
- /* depth first... */
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
- i40e_veb_link_event(pf->veb[i], link_up);
-
- /* ... now the local VSIs */
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
- i40e_vsi_link_event(pf->vsi[i], link_up);
+ /* Send link event to contained VSIs */
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->uplink_seid == veb->seid)
+ i40e_vsi_link_event(vsi, link_up);
}
/**
@@ -9995,6 +9976,8 @@ static void i40e_link_event(struct i40e_pf *pf)
**/
static void i40e_watchdog_subtask(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
+ struct i40e_veb *veb;
int i;
/* if interface is down do nothing */
@@ -10015,15 +9998,14 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
/* Update the stats for active netdevs so the network stack
* can look at updated numbers whenever it cares to
*/
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && pf->vsi[i]->netdev)
- i40e_update_stats(pf->vsi[i]);
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->netdev)
+ i40e_update_stats(vsi);
if (test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags)) {
/* Update the stats for the active switching components */
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i])
- i40e_update_veb_stats(pf->veb[i]);
+ i40e_pf_for_each_veb(pf, i, veb)
+ i40e_update_veb_stats(veb);
}
i40e_ptp_rx_hang(pf);
@@ -10368,89 +10350,84 @@ static void i40e_config_bridge_mode(struct i40e_veb *veb)
}
/**
- * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
+ * i40e_reconstitute_veb - rebuild the VEB and VSIs connected to it
* @veb: pointer to the VEB instance
*
- * This is a recursive function that first builds the attached VSIs then
- * recurses in to build the next layer of VEB. We track the connections
- * through our own index numbers because the seid's from the HW could
- * change across the reset.
+ * This is a function that builds the attached VSIs. We track the connections
+ * through our own index numbers because the seid's from the HW could change
+ * across the reset.
**/
static int i40e_reconstitute_veb(struct i40e_veb *veb)
{
struct i40e_vsi *ctl_vsi = NULL;
struct i40e_pf *pf = veb->pf;
- int v, veb_idx;
- int ret;
+ struct i40e_vsi *vsi;
+ int v, ret;
- /* build VSI that owns this VEB, temporarily attached to base VEB */
- for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
- if (pf->vsi[v] &&
- pf->vsi[v]->veb_idx == veb->idx &&
- pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
- ctl_vsi = pf->vsi[v];
- break;
- }
- }
- if (!ctl_vsi) {
- dev_info(&pf->pdev->dev,
- "missing owner VSI for veb_idx %d\n", veb->idx);
- ret = -ENOENT;
- goto end_reconstitute;
+ /* As we do not maintain PV (port virtualizer) switch element then
+ * there can be only one non-floating VEB that have uplink to MAC SEID
+ * and its control VSI is the main one.
+ */
+ if (WARN_ON(veb->uplink_seid && veb->uplink_seid != pf->mac_seid)) {
+ dev_err(&pf->pdev->dev,
+ "Invalid uplink SEID for VEB %d\n", veb->idx);
+ return -ENOENT;
}
- if (ctl_vsi != pf->vsi[pf->lan_vsi])
- ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
- ret = i40e_add_vsi(ctl_vsi);
- if (ret) {
- dev_info(&pf->pdev->dev,
- "rebuild of veb_idx %d owner VSI failed: %d\n",
- veb->idx, ret);
- goto end_reconstitute;
+
+ if (veb->uplink_seid == pf->mac_seid) {
+ /* Check that the LAN VSI has VEB owning flag set */
+ ctl_vsi = pf->vsi[pf->lan_vsi];
+
+ if (WARN_ON(ctl_vsi->veb_idx != veb->idx ||
+ !(ctl_vsi->flags & I40E_VSI_FLAG_VEB_OWNER))) {
+ dev_err(&pf->pdev->dev,
+ "Invalid control VSI for VEB %d\n", veb->idx);
+ return -ENOENT;
+ }
+
+ /* Add the control VSI to switch */
+ ret = i40e_add_vsi(ctl_vsi);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Rebuild of owner VSI for VEB %d failed: %d\n",
+ veb->idx, ret);
+ return ret;
+ }
+
+ i40e_vsi_reset_stats(ctl_vsi);
}
- i40e_vsi_reset_stats(ctl_vsi);
/* create the VEB in the switch and move the VSI onto the VEB */
ret = i40e_add_veb(veb, ctl_vsi);
if (ret)
- goto end_reconstitute;
+ return ret;
- if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags))
- veb->bridge_mode = BRIDGE_MODE_VEB;
- else
- veb->bridge_mode = BRIDGE_MODE_VEPA;
- i40e_config_bridge_mode(veb);
+ if (veb->uplink_seid) {
+ if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags))
+ veb->bridge_mode = BRIDGE_MODE_VEB;
+ else
+ veb->bridge_mode = BRIDGE_MODE_VEPA;
+ i40e_config_bridge_mode(veb);
+ }
/* create the remaining VSIs attached to this VEB */
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
+ i40e_pf_for_each_vsi(pf, v, vsi) {
+ if (vsi == ctl_vsi)
continue;
- if (pf->vsi[v]->veb_idx == veb->idx) {
- struct i40e_vsi *vsi = pf->vsi[v];
-
+ if (vsi->veb_idx == veb->idx) {
vsi->uplink_seid = veb->seid;
ret = i40e_add_vsi(vsi);
if (ret) {
dev_info(&pf->pdev->dev,
"rebuild of vsi_idx %d failed: %d\n",
v, ret);
- goto end_reconstitute;
+ return ret;
}
i40e_vsi_reset_stats(vsi);
}
}
- /* create any VEBs attached to this VEB - RECURSION */
- for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
- if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
- pf->veb[veb_idx]->uplink_seid = veb->seid;
- ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
- if (ret)
- break;
- }
- }
-
-end_reconstitute:
return ret;
}
@@ -10718,6 +10695,7 @@ static void i40e_clean_xps_state(struct i40e_vsi *vsi)
static void i40e_prep_for_reset(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
+ struct i40e_vsi *vsi;
int ret = 0;
u32 v;
@@ -10732,11 +10710,9 @@ static void i40e_prep_for_reset(struct i40e_pf *pf)
/* quiesce the VSIs and their queues that are not already DOWN */
i40e_pf_quiesce_all_vsi(pf);
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v]) {
- i40e_clean_xps_state(pf->vsi[v]);
- pf->vsi[v]->seid = 0;
- }
+ i40e_pf_for_each_vsi(pf, v, vsi) {
+ i40e_clean_xps_state(vsi);
+ vsi->seid = 0;
}
i40e_shutdown_adminq(&pf->hw);
@@ -10850,6 +10826,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
+ struct i40e_veb *veb;
int ret;
u32 val;
int v;
@@ -10991,35 +10968,29 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
*/
if (vsi->uplink_seid != pf->mac_seid) {
dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
- /* find the one VEB connected to the MAC, and find orphans */
- for (v = 0; v < I40E_MAX_VEB; v++) {
- if (!pf->veb[v])
- continue;
-
- if (pf->veb[v]->uplink_seid == pf->mac_seid ||
- pf->veb[v]->uplink_seid == 0) {
- ret = i40e_reconstitute_veb(pf->veb[v]);
- if (!ret)
- continue;
+ /* Rebuild VEBs */
+ i40e_pf_for_each_veb(pf, v, veb) {
+ ret = i40e_reconstitute_veb(veb);
+ if (!ret)
+ continue;
- /* If Main VEB failed, we're in deep doodoo,
- * so give up rebuilding the switch and set up
- * for minimal rebuild of PF VSI.
- * If orphan failed, we'll report the error
- * but try to keep going.
- */
- if (pf->veb[v]->uplink_seid == pf->mac_seid) {
- dev_info(&pf->pdev->dev,
- "rebuild of switch failed: %d, will try to set up simple PF connection\n",
- ret);
- vsi->uplink_seid = pf->mac_seid;
- break;
- } else if (pf->veb[v]->uplink_seid == 0) {
- dev_info(&pf->pdev->dev,
- "rebuild of orphan VEB failed: %d\n",
- ret);
- }
+ /* If Main VEB failed, we're in deep doodoo,
+ * so give up rebuilding the switch and set up
+ * for minimal rebuild of PF VSI.
+ * If orphan failed, we'll report the error
+ * but try to keep going.
+ */
+ if (veb->uplink_seid == pf->mac_seid) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of switch failed: %d, will try to set up simple PF connection\n",
+ ret);
+ vsi->uplink_seid = pf->mac_seid;
+ break;
+ } else if (veb->uplink_seid == 0) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of orphan VEB failed: %d\n",
+ ret);
}
}
}
@@ -12098,6 +12069,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
*/
static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int err, i;
/* We cleared the MSI and MSI-X flags when disabling the old interrupt
@@ -12114,13 +12086,12 @@ static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
/* Now that we've re-acquired IRQs, we need to remap the vectors and
* rings together again.
*/
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i]) {
- err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
- if (err)
- goto err_unwind;
- i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
- }
+ i40e_pf_for_each_vsi(pf, i, vsi) {
+ err = i40e_vsi_alloc_q_vectors(vsi);
+ if (err)
+ goto err_unwind;
+
+ i40e_vsi_map_rings_to_vectors(vsi);
}
err = i40e_setup_misc_vector(pf);
@@ -13122,19 +13093,16 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- struct i40e_veb *veb = NULL;
struct nlattr *attr, *br_spec;
- int i, rem;
+ struct i40e_veb *veb;
+ int rem;
/* Only for PF VSI for now */
if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
return -EOPNOTSUPP;
/* Find the HW bridge for PF VSI */
- for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
- if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
- veb = pf->veb[i];
- }
+ veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid);
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (!br_spec)
@@ -13199,19 +13167,14 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- struct i40e_veb *veb = NULL;
- int i;
+ struct i40e_veb *veb;
/* Only for PF VSI for now */
if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
return -EOPNOTSUPP;
/* Find the HW bridge for the PF VSI */
- for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
- if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
- veb = pf->veb[i];
- }
-
+ veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid);
if (!veb)
return 0;
@@ -13245,12 +13208,12 @@ static netdev_features_t i40e_features_check(struct sk_buff *skb,
features &= ~NETIF_F_GSO_MASK;
/* MACLEN can support at most 63 words */
- len = skb_network_header(skb) - skb->data;
+ len = skb_network_offset(skb);
if (len & ~(63 * 2))
goto out_err;
/* IPLEN and EIPLEN can support at most 127 dwords */
- len = skb_transport_header(skb) - skb_network_header(skb);
+ len = skb_network_header_len(skb);
if (len & ~(127 * 4))
goto out_err;
@@ -14145,7 +14108,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
{
struct i40e_mac_filter *f;
struct hlist_node *h;
- struct i40e_veb *veb = NULL;
+ struct i40e_veb *veb;
struct i40e_pf *pf;
u16 uplink_seid;
int i, n, bkt;
@@ -14209,29 +14172,28 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
/* If this was the last thing on the VEB, except for the
* controlling VSI, remove the VEB, which puts the controlling
- * VSI onto the next level down in the switch.
+ * VSI onto the uplink port.
*
* Well, okay, there's one more exception here: don't remove
- * the orphan VEBs yet. We'll wait for an explicit remove request
+ * the floating VEBs yet. We'll wait for an explicit remove request
* from up the network stack.
*/
- for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] &&
- pf->vsi[i]->uplink_seid == uplink_seid &&
- (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
- n++; /* count the VSIs */
- }
- }
- for (i = 0; i < I40E_MAX_VEB; i++) {
- if (!pf->veb[i])
- continue;
- if (pf->veb[i]->uplink_seid == uplink_seid)
- n++; /* count the VEBs */
- if (pf->veb[i]->seid == uplink_seid)
- veb = pf->veb[i];
+ veb = i40e_pf_get_veb_by_seid(pf, uplink_seid);
+ if (veb && veb->uplink_seid) {
+ n = 0;
+
+ /* Count non-controlling VSIs present on the VEB */
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->uplink_seid == uplink_seid &&
+ (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
+ n++;
+
+ /* If there is no VSI except the control one then release
+ * the VEB and put the control VSI onto VEB uplink.
+ */
+ if (!n)
+ i40e_veb_release(veb);
}
- if (n == 0 && veb && veb->uplink_seid != 0)
- i40e_veb_release(veb);
return 0;
}
@@ -14389,8 +14351,8 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
struct i40e_vsi *vsi = NULL;
struct i40e_veb *veb = NULL;
u16 alloc_queue_pairs;
- int ret, i;
int v_idx;
+ int ret;
/* The requested uplink_seid must be either
* - the PF's port seid
@@ -14405,21 +14367,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
*
* Find which uplink_seid we were given and create a new VEB if needed
*/
- for (i = 0; i < I40E_MAX_VEB; i++) {
- if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
- veb = pf->veb[i];
- break;
- }
- }
-
+ veb = i40e_pf_get_veb_by_seid(pf, uplink_seid);
if (!veb && uplink_seid != pf->mac_seid) {
-
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
- vsi = pf->vsi[i];
- break;
- }
- }
+ vsi = i40e_pf_get_vsi_by_seid(pf, uplink_seid);
if (!vsi) {
dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
uplink_seid);
@@ -14448,10 +14398,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
}
i40e_config_bridge_mode(veb);
}
- for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
- if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
- veb = pf->veb[i];
- }
+ veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid);
if (!veb) {
dev_info(&pf->pdev->dev, "couldn't add VEB\n");
return NULL;
@@ -14681,29 +14628,24 @@ static void i40e_switch_branch_release(struct i40e_veb *branch)
struct i40e_pf *pf = branch->pf;
u16 branch_seid = branch->seid;
u16 veb_idx = branch->idx;
+ struct i40e_vsi *vsi;
+ struct i40e_veb *veb;
int i;
/* release any VEBs on this VEB - RECURSION */
- for (i = 0; i < I40E_MAX_VEB; i++) {
- if (!pf->veb[i])
- continue;
- if (pf->veb[i]->uplink_seid == branch->seid)
- i40e_switch_branch_release(pf->veb[i]);
- }
+ i40e_pf_for_each_veb(pf, i, veb)
+ if (veb->uplink_seid == branch->seid)
+ i40e_switch_branch_release(veb);
/* Release the VSIs on this VEB, but not the owner VSI.
*
* NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
* the VEB itself, so don't use (*branch) after this loop.
*/
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (!pf->vsi[i])
- continue;
- if (pf->vsi[i]->uplink_seid == branch_seid &&
- (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
- i40e_vsi_release(pf->vsi[i]);
- }
- }
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->uplink_seid == branch_seid &&
+ (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
+ i40e_vsi_release(vsi);
/* There's one corner case where the VEB might not have been
* removed, so double check it here and remove it if needed.
@@ -14741,38 +14683,35 @@ static void i40e_veb_clear(struct i40e_veb *veb)
**/
void i40e_veb_release(struct i40e_veb *veb)
{
- struct i40e_vsi *vsi = NULL;
+ struct i40e_vsi *vsi, *vsi_it;
struct i40e_pf *pf;
int i, n = 0;
pf = veb->pf;
/* find the remaining VSI and check for extras */
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
+ i40e_pf_for_each_vsi(pf, i, vsi_it)
+ if (vsi_it->uplink_seid == veb->seid) {
+ if (vsi_it->flags & I40E_VSI_FLAG_VEB_OWNER)
+ vsi = vsi_it;
n++;
- vsi = pf->vsi[i];
}
- }
- if (n != 1) {
+
+ /* Floating VEB has to be empty and regular one must have
+ * single owner VSI.
+ */
+ if ((veb->uplink_seid && n != 1) || (!veb->uplink_seid && n != 0)) {
dev_info(&pf->pdev->dev,
"can't remove VEB %d with %d VSIs left\n",
veb->seid, n);
return;
}
- /* move the remaining VSI to uplink veb */
- vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
+ /* For regular VEB move the owner VSI to uplink port */
if (veb->uplink_seid) {
+ vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
vsi->uplink_seid = veb->uplink_seid;
- if (veb->uplink_seid == pf->mac_seid)
- vsi->veb_idx = I40E_NO_VEB;
- else
- vsi->veb_idx = veb->veb_idx;
- } else {
- /* floating VEB */
- vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
- vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
+ vsi->veb_idx = I40E_NO_VEB;
}
i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
@@ -14790,8 +14729,8 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
bool enable_stats = !!test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags);
int ret;
- ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
- veb->enabled_tc, false,
+ ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi ? vsi->seid : 0,
+ veb->enabled_tc, vsi ? false : true,
&veb->seid, enable_stats, NULL);
/* get a VEB from the hardware */
@@ -14823,9 +14762,11 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
return -ENOENT;
}
- vsi->uplink_seid = veb->seid;
- vsi->veb_idx = veb->idx;
- vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
+ if (vsi) {
+ vsi->uplink_seid = veb->seid;
+ vsi->veb_idx = veb->idx;
+ vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
+ }
return 0;
}
@@ -14850,8 +14791,9 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
u16 uplink_seid, u16 vsi_seid,
u8 enabled_tc)
{
- struct i40e_veb *veb, *uplink_veb = NULL;
- int vsi_idx, veb_idx;
+ struct i40e_vsi *vsi = NULL;
+ struct i40e_veb *veb;
+ int veb_idx;
int ret;
/* if one seid is 0, the other must be 0 to create a floating relay */
@@ -14864,26 +14806,11 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
}
/* make sure there is such a vsi and uplink */
- for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
- if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
- break;
- if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
- dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
- vsi_seid);
- return NULL;
- }
-
- if (uplink_seid && uplink_seid != pf->mac_seid) {
- for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
- if (pf->veb[veb_idx] &&
- pf->veb[veb_idx]->seid == uplink_seid) {
- uplink_veb = pf->veb[veb_idx];
- break;
- }
- }
- if (!uplink_veb) {
- dev_info(&pf->pdev->dev,
- "uplink seid %d not found\n", uplink_seid);
+ if (vsi_seid) {
+ vsi = i40e_pf_get_vsi_by_seid(pf, vsi_seid);
+ if (!vsi) {
+ dev_err(&pf->pdev->dev, "vsi seid %d not found\n",
+ vsi_seid);
return NULL;
}
}
@@ -14895,14 +14822,14 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
veb = pf->veb[veb_idx];
veb->flags = flags;
veb->uplink_seid = uplink_seid;
- veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
/* create the VEB in the switch */
- ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
+ ret = i40e_add_veb(veb, vsi);
if (ret)
goto err_veb;
- if (vsi_idx == pf->lan_vsi)
+
+ if (vsi && vsi->idx == pf->lan_vsi)
pf->lan_veb = veb->idx;
return veb;
@@ -14930,6 +14857,7 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
u8 element_type = ele->element_type;
u16 seid = le16_to_cpu(ele->seid);
+ struct i40e_veb *veb;
if (printconfig)
dev_info(&pf->pdev->dev,
@@ -14948,13 +14876,10 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
int v;
/* find existing or else empty VEB */
- for (v = 0; v < I40E_MAX_VEB; v++) {
- if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
- pf->lan_veb = v;
- break;
- }
- }
- if (pf->lan_veb >= I40E_MAX_VEB) {
+ veb = i40e_pf_get_veb_by_seid(pf, seid);
+ if (veb) {
+ pf->lan_veb = veb->idx;
+ } else {
v = i40e_veb_mem_alloc(pf);
if (v < 0)
break;
@@ -14967,7 +14892,6 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
pf->veb[pf->lan_veb]->seid = seid;
pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
pf->veb[pf->lan_veb]->pf = pf;
- pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
break;
case I40E_SWITCH_ELEMENT_TYPE_VSI:
if (num_reported != 1)
@@ -15630,6 +15554,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_I40E_DCB
enum i40e_get_fw_lldp_status_resp lldp_status;
#endif /* CONFIG_I40E_DCB */
+ struct i40e_vsi *vsi;
struct i40e_pf *pf;
struct i40e_hw *hw;
u16 wol_nvm_bits;
@@ -15640,7 +15565,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif /* CONFIG_I40E_DCB */
int err;
u32 val;
- u32 i;
err = pci_enable_device_mem(pdev);
if (err)
@@ -15990,12 +15914,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
/* if FDIR VSI was set up, start it now */
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
- i40e_vsi_open(pf->vsi[i]);
- break;
- }
- }
+ vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
+ if (vsi)
+ i40e_vsi_open(vsi);
/* The driver only wants link up/down and module qualification
* reports from firmware. Note the negative logic.
@@ -16241,6 +16162,8 @@ static void i40e_remove(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
struct i40e_hw *hw = &pf->hw;
+ struct i40e_vsi *vsi;
+ struct i40e_veb *veb;
int ret_code;
int i;
@@ -16298,24 +16221,19 @@ static void i40e_remove(struct pci_dev *pdev)
/* If there is a switch structure or any orphans, remove them.
* This will leave only the PF's VSI remaining.
*/
- for (i = 0; i < I40E_MAX_VEB; i++) {
- if (!pf->veb[i])
- continue;
-
- if (pf->veb[i]->uplink_seid == pf->mac_seid ||
- pf->veb[i]->uplink_seid == 0)
- i40e_switch_branch_release(pf->veb[i]);
- }
+ i40e_pf_for_each_veb(pf, i, veb)
+ if (veb->uplink_seid == pf->mac_seid ||
+ veb->uplink_seid == 0)
+ i40e_switch_branch_release(veb);
/* Now we can shutdown the PF's VSIs, just before we kill
* adminq and hmc.
*/
- for (i = pf->num_alloc_vsi; i--;)
- if (pf->vsi[i]) {
- i40e_vsi_close(pf->vsi[i]);
- i40e_vsi_release(pf->vsi[i]);
- pf->vsi[i] = NULL;
- }
+ i40e_pf_for_each_vsi(pf, i, vsi) {
+ i40e_vsi_close(vsi);
+ i40e_vsi_release(vsi);
+ pf->vsi[i] = NULL;
+ }
i40e_cloud_filter_exit(pf);
@@ -16352,18 +16270,17 @@ unmap:
/* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
rtnl_lock();
i40e_clear_interrupt_scheme(pf);
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i]) {
- if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
- i40e_vsi_clear_rings(pf->vsi[i]);
- i40e_vsi_clear(pf->vsi[i]);
- pf->vsi[i] = NULL;
- }
+ i40e_pf_for_each_vsi(pf, i, vsi) {
+ if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
+ i40e_vsi_clear_rings(vsi);
+
+ i40e_vsi_clear(vsi);
+ pf->vsi[i] = NULL;
}
rtnl_unlock();
- for (i = 0; i < I40E_MAX_VEB; i++) {
- kfree(pf->veb[i]);
+ i40e_pf_for_each_veb(pf, i, veb) {
+ kfree(veb);
pf->veb[i] = NULL;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index b34c71770887..83a34e98bdc7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -491,8 +491,6 @@ static void i40e_release_rdma_qvlist(struct i40e_vf *vf)
u32 v_idx, reg_idx, reg;
qv_info = &qvlist_info->qv_info[i];
- if (!qv_info)
- continue;
v_idx = qv_info->v_idx;
if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
/* Figure out the queue after CEQ and make that the
@@ -562,8 +560,6 @@ i40e_config_rdma_qvlist(struct i40e_vf *vf,
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
for (i = 0; i < qvlist_info->num_vectors; i++) {
qv_info = &qvlist_info->qv_info[i];
- if (!qv_info)
- continue;
/* Validate vector id belongs to this vf */
if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 335fd13e86f7..ef2440f3abf8 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -2170,19 +2170,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_add_cloud_filter(adapter);
return 0;
}
-
- if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
- iavf_del_cloud_filter(adapter);
- return 0;
- }
if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
iavf_del_cloud_filter(adapter);
return 0;
}
- if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
- iavf_add_cloud_filter(adapter);
- return 0;
- }
if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
iavf_add_fdir_filter(adapter);
return IAVF_SUCCESS;
@@ -4423,12 +4414,12 @@ static netdev_features_t iavf_features_check(struct sk_buff *skb,
features &= ~NETIF_F_GSO_MASK;
/* MACLEN can support at most 63 words */
- len = skb_network_header(skb) - skb->data;
+ len = skb_network_offset(skb);
if (len & ~(63 * 2))
goto out_err;
/* IPLEN and EIPLEN can support at most 127 dwords */
- len = skb_transport_header(skb) - skb_network_header(skb);
+ len = skb_network_header_len(skb);
if (len & ~(127 * 4))
goto out_err;
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 367b613d92c0..365c03d1c462 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -493,7 +493,6 @@ enum ice_pf_flags {
ICE_FLAG_DCB_ENA,
ICE_FLAG_FD_ENA,
ICE_FLAG_PTP_SUPPORTED, /* PTP is supported by NVM */
- ICE_FLAG_PTP, /* PTP is enabled by software */
ICE_FLAG_ADV_FEATURES,
ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */
ICE_FLAG_CLS_FLOWER,
@@ -606,6 +605,7 @@ struct ice_pf {
wait_queue_head_t reset_wait_queue;
u32 hw_csum_rx_error;
+ u32 hw_rx_eipe_error;
u32 oicr_err_reg;
struct msi_map oicr_irq; /* Other interrupt cause MSIX vector */
struct msi_map ll_ts_irq; /* LL_TS interrupt MSIX vector */
@@ -896,6 +896,7 @@ static inline bool ice_is_adq_active(struct ice_pf *pf)
}
void ice_debugfs_fwlog_init(struct ice_pf *pf);
+void ice_debugfs_pf_deinit(struct ice_pf *pf);
void ice_debugfs_init(void);
void ice_debugfs_exit(void);
void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module);
@@ -983,6 +984,8 @@ void ice_service_task_schedule(struct ice_pf *pf);
int ice_load(struct ice_pf *pf);
void ice_unload(struct ice_pf *pf);
void ice_adv_lnk_speed_maps_init(void);
+int ice_init_dev(struct ice_pf *pf);
+void ice_deinit_dev(struct ice_pf *pf);
/**
* ice_set_rdma_cap - enable RDMA support
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index cca0e753f38f..7cee365cc7d1 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2018-2020, Intel Corporation. */
#include "ice.h"
+#include <net/rps.h>
/**
* ice_is_arfs_active - helper to check is aRFS is active
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index c979192e44d1..d2fd315556a3 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -536,7 +536,7 @@ static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring)
*
* Return 0 on success and a negative value on error.
*/
-int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
+static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
{
struct device *dev = ice_pf_to_dev(ring->vsi->back);
u32 num_bufs = ICE_RX_DESC_UNUSED(ring);
@@ -631,6 +631,62 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return 0;
}
+int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
+{
+ if (q_idx >= vsi->num_rxq)
+ return -EINVAL;
+
+ return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
+}
+
+/**
+ * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
+ * @vsi: VSI
+ */
+static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
+{
+ if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
+ vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
+ vsi->rx_buf_len = ICE_RXBUF_1664;
+#if (PAGE_SIZE < 8192)
+ } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
+ (vsi->netdev->mtu <= ETH_DATA_LEN)) {
+ vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
+ vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
+#endif
+ } else {
+ vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
+ vsi->rx_buf_len = ICE_RXBUF_3072;
+ }
+}
+
+/**
+ * ice_vsi_cfg_rxqs - Configure the VSI for Rx
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Rx VSI for operation.
+ */
+int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
+{
+ u16 i;
+
+ if (vsi->type == ICE_VSI_VF)
+ goto setup_rings;
+
+ ice_vsi_cfg_frame_size(vsi);
+setup_rings:
+ /* set up individual rings */
+ ice_for_each_rxq(vsi, i) {
+ int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
/**
* __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
* @qs_cfg: gathered variables needed for pf->vsi queues assignment
@@ -826,7 +882,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
* @ring: Tx ring to be configured
* @qg_buf: queue group buffer
*/
-int
+static int
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf)
{
@@ -897,6 +953,80 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
return 0;
}
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
+ u16 q_idx)
+{
+ DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
+
+ if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
+ return -EINVAL;
+
+ qg_buf->num_txqs = 1;
+
+ return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
+}
+
+/**
+ * ice_vsi_cfg_txqs - Configure the VSI for Tx
+ * @vsi: the VSI being configured
+ * @rings: Tx ring array to be configured
+ * @count: number of Tx ring array elements
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Tx VSI for operation.
+ */
+static int
+ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
+{
+ DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
+ int err = 0;
+ u16 q_idx;
+
+ qg_buf->num_txqs = 1;
+
+ for (q_idx = 0; q_idx < count; q_idx++) {
+ err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Tx VSI for operation.
+ */
+int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
+{
+ return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
+}
+
+/**
+ * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Tx queues dedicated for XDP in given VSI for operation.
+ */
+int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
+{
+ int ret;
+ int i;
+
+ ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
+ if (ret)
+ return ret;
+
+ ice_for_each_rxq(vsi, i)
+ ice_tx_xsk_pool(vsi, i);
+
+ return 0;
+}
+
/**
* ice_cfg_itr - configure the initial interrupt throttle values
* @hw: pointer to the HW structure
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
index 17321ba75602..b711bc921928 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.h
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -6,7 +6,8 @@
#include "ice.h"
-int ice_vsi_cfg_rxq(struct ice_rx_ring *ring);
+int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
+int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
int
ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait);
@@ -14,9 +15,10 @@ int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
-int
-ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
- struct ice_aqc_add_tx_qgrp *qg_buf);
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
+ u16 q_idx);
+int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
+int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector);
void
ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 10c32cd80fff..4d8111aeb0ff 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -154,6 +154,12 @@ static int ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E823L_SFP:
hw->mac_type = ICE_MAC_GENERIC;
break;
+ case ICE_DEV_ID_E825C_BACKPLANE:
+ case ICE_DEV_ID_E825C_QSFP:
+ case ICE_DEV_ID_E825C_SFP:
+ case ICE_DEV_ID_E825C_SGMII:
+ hw->mac_type = ICE_MAC_GENERIC_3K_E825;
+ break;
case ICE_DEV_ID_E830_BACKPLANE:
case ICE_DEV_ID_E830_QSFP56:
case ICE_DEV_ID_E830_SFP:
@@ -170,6 +176,18 @@ static int ice_set_mac_type(struct ice_hw *hw)
}
/**
+ * ice_is_generic_mac - check if device's mac_type is generic
+ * @hw: pointer to the hardware structure
+ *
+ * Return: true if mac_type is generic (with SBQ support), false if not
+ */
+bool ice_is_generic_mac(struct ice_hw *hw)
+{
+ return (hw->mac_type == ICE_MAC_GENERIC ||
+ hw->mac_type == ICE_MAC_GENERIC_3K_E825);
+}
+
+/**
* ice_is_e810
* @hw: pointer to the hardware structure
*
@@ -241,6 +259,25 @@ bool ice_is_e823(struct ice_hw *hw)
}
/**
+ * ice_is_e825c - Check if a device is E825C family device
+ * @hw: pointer to the hardware structure
+ *
+ * Return: true if the device is E825-C based, false if not.
+ */
+bool ice_is_e825c(struct ice_hw *hw)
+{
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E825C_BACKPLANE:
+ case ICE_DEV_ID_E825C_QSFP:
+ case ICE_DEV_ID_E825C_SFP:
+ case ICE_DEV_ID_E825C_SGMII:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -965,9 +1002,9 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
*/
int ice_init_hw(struct ice_hw *hw)
{
- struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
+ void *mac_buf __free(kfree);
u16 mac_buf_len;
- void *mac_buf;
int status;
/* Set MAC type based on DeviceID */
@@ -1045,7 +1082,7 @@ int ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_sched;
- pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps) {
status = -ENOMEM;
goto err_unroll_sched;
@@ -1055,7 +1092,6 @@ int ice_init_hw(struct ice_hw *hw)
status = ice_aq_get_phy_caps(hw->port_info, false,
ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
NULL);
- devm_kfree(ice_hw_to_dev(hw), pcaps);
if (status)
dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
status);
@@ -1082,18 +1118,15 @@ int ice_init_hw(struct ice_hw *hw)
/* Get MAC information */
/* A single port can report up to two (LAN and WoL) addresses */
- mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
- sizeof(struct ice_aqc_manage_mac_read_resp),
- GFP_KERNEL);
- mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
-
+ mac_buf = kcalloc(2, sizeof(struct ice_aqc_manage_mac_read_resp),
+ GFP_KERNEL);
if (!mac_buf) {
status = -ENOMEM;
goto err_unroll_fltr_mgmt_struct;
}
+ mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
- devm_kfree(ice_hw_to_dev(hw), mac_buf);
if (status)
goto err_unroll_fltr_mgmt_struct;
@@ -1362,9 +1395,8 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* it to HW register space and enables the hardware to prefetch descriptors
* instead of only fetching them on demand
*/
-int
-ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
- u32 rxq_index)
+int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
+ u32 rxq_index)
{
u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
@@ -3240,19 +3272,14 @@ int ice_update_link_info(struct ice_port_info *pi)
return status;
if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
- struct ice_aqc_get_phy_caps_data *pcaps;
- struct ice_hw *hw;
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
- hw = pi->hw;
- pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
- GFP_KERNEL);
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return -ENOMEM;
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
pcaps, NULL);
-
- devm_kfree(ice_hw_to_dev(hw), pcaps);
}
return status;
@@ -3393,8 +3420,8 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
int
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
{
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
struct ice_aqc_set_phy_cfg_data cfg = { 0 };
- struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_hw *hw;
int status;
@@ -3404,7 +3431,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
*aq_failures = 0;
hw = pi->hw;
- pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return -ENOMEM;
@@ -3456,7 +3483,6 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
}
out:
- devm_kfree(ice_hw_to_dev(hw), pcaps);
return status;
}
@@ -3535,7 +3561,7 @@ int
ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fec_mode fec)
{
- struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
struct ice_hw *hw;
int status;
@@ -3604,8 +3630,6 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
}
out:
- kfree(pcaps);
-
return status;
}
@@ -4325,13 +4349,13 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
/* End of FW Admin Queue command wrappers */
/**
- * ice_write_byte - write a byte to a packed context structure
- * @src_ctx: the context structure to read from
- * @dest_ctx: the context to be written to
- * @ce_info: a description of the struct to be filled
+ * ice_pack_ctx_byte - write a byte to a packed context structure
+ * @src_ctx: unpacked source context structure
+ * @dest_ctx: packed destination context data
+ * @ce_info: context element description
*/
-static void
-ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
u8 src_byte, dest_byte, mask;
u8 *from, *dest;
@@ -4342,14 +4366,11 @@ ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
- mask = (u8)(BIT(ce_info->width) - 1);
+ mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
src_byte = *from;
- src_byte &= mask;
-
- /* shift to correct alignment */
- mask <<= shift_width;
src_byte <<= shift_width;
+ src_byte &= mask;
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
@@ -4364,13 +4385,13 @@ ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
}
/**
- * ice_write_word - write a word to a packed context structure
- * @src_ctx: the context structure to read from
- * @dest_ctx: the context to be written to
- * @ce_info: a description of the struct to be filled
+ * ice_pack_ctx_word - write a word to a packed context structure
+ * @src_ctx: unpacked source context structure
+ * @dest_ctx: packed destination context data
+ * @ce_info: context element description
*/
-static void
-ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
u16 src_word, mask;
__le16 dest_word;
@@ -4382,17 +4403,14 @@ ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
- mask = BIT(ce_info->width) - 1;
+ mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
*/
src_word = *(u16 *)from;
- src_word &= mask;
-
- /* shift to correct alignment */
- mask <<= shift_width;
src_word <<= shift_width;
+ src_word &= mask;
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
@@ -4407,13 +4425,13 @@ ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
}
/**
- * ice_write_dword - write a dword to a packed context structure
- * @src_ctx: the context structure to read from
- * @dest_ctx: the context to be written to
- * @ce_info: a description of the struct to be filled
+ * ice_pack_ctx_dword - write a dword to a packed context structure
+ * @src_ctx: unpacked source context structure
+ * @dest_ctx: packed destination context data
+ * @ce_info: context element description
*/
-static void
-ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
u32 src_dword, mask;
__le32 dest_dword;
@@ -4425,25 +4443,14 @@ ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
-
- /* if the field width is exactly 32 on an x86 machine, then the shift
- * operation will not work because the SHL instructions count is masked
- * to 5 bits so the shift will do nothing
- */
- if (ce_info->width < 32)
- mask = BIT(ce_info->width) - 1;
- else
- mask = (u32)~0;
+ mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
*/
src_dword = *(u32 *)from;
- src_dword &= mask;
-
- /* shift to correct alignment */
- mask <<= shift_width;
src_dword <<= shift_width;
+ src_dword &= mask;
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
@@ -4458,13 +4465,13 @@ ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
}
/**
- * ice_write_qword - write a qword to a packed context structure
- * @src_ctx: the context structure to read from
- * @dest_ctx: the context to be written to
- * @ce_info: a description of the struct to be filled
+ * ice_pack_ctx_qword - write a qword to a packed context structure
+ * @src_ctx: unpacked source context structure
+ * @dest_ctx: packed destination context data
+ * @ce_info: context element description
*/
-static void
-ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
u64 src_qword, mask;
__le64 dest_qword;
@@ -4476,25 +4483,14 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
-
- /* if the field width is exactly 64 on an x86 machine, then the shift
- * operation will not work because the SHL instructions count is masked
- * to 6 bits so the shift will do nothing
- */
- if (ce_info->width < 64)
- mask = BIT_ULL(ce_info->width) - 1;
- else
- mask = (u64)~0;
+ mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width);
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
*/
src_qword = *(u64 *)from;
- src_qword &= mask;
-
- /* shift to correct alignment */
- mask <<= shift_width;
src_qword <<= shift_width;
+ src_qword &= mask;
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
@@ -4513,11 +4509,10 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* @hw: pointer to the hardware structure
* @src_ctx: pointer to a generic non-packed context structure
* @dest_ctx: pointer to memory for the packed structure
- * @ce_info: a description of the structure to be transformed
+ * @ce_info: List of Rx context elements
*/
-int
-ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
+int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
int f;
@@ -4533,16 +4528,16 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
}
switch (ce_info[f].size_of) {
case sizeof(u8):
- ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
+ ice_pack_ctx_byte(src_ctx, dest_ctx, &ce_info[f]);
break;
case sizeof(u16):
- ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
+ ice_pack_ctx_word(src_ctx, dest_ctx, &ce_info[f]);
break;
case sizeof(u32):
- ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
+ ice_pack_ctx_dword(src_ctx, dest_ctx, &ce_info[f]);
break;
case sizeof(u64):
- ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
+ ice_pack_ctx_qword(src_ctx, dest_ctx, &ce_info[f]);
break;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 3e933f75e948..ffb22c7ce28b 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -53,9 +53,8 @@ int ice_get_caps(struct ice_hw *hw);
void ice_set_safe_mode_caps(struct ice_hw *hw);
-int
-ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
- u32 rxq_index);
+int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
+ u32 rxq_index);
int
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params);
@@ -72,9 +71,8 @@ bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
extern const struct ice_ctx_ele ice_tlan_ctx_info[];
-int
-ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info);
+int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info);
extern struct mutex ice_global_cfg_lock_sw;
@@ -112,6 +110,7 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
+bool ice_is_generic_mac(struct ice_hw *hw);
bool ice_is_e810(struct ice_hw *hw);
int ice_clear_pf_cfg(struct ice_hw *hw);
int
@@ -251,6 +250,7 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
bool ice_is_e810t(struct ice_hw *hw);
bool ice_is_e823(struct ice_hw *hw);
+bool ice_is_e825c(struct ice_hw *hw);
int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index e7d2474c431c..ffe660f34992 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -666,7 +666,7 @@ bool ice_is_sbq_supported(struct ice_hw *hw)
/* The device sideband queue is only supported on devices with the
* generic MAC type.
*/
- return hw->mac_type == ICE_MAC_GENERIC;
+ return ice_is_generic_mac(hw);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index 8b7504a9df31..7532d11ad7f3 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -1825,6 +1825,7 @@ static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type)
seg_id = SEGMENT_TYPE_ICE_E830;
break;
case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
default:
seg_id = SEGMENT_TYPE_ICE_E810;
break;
@@ -1845,6 +1846,9 @@ static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type)
case ICE_MAC_E830:
sign_type = SEGMENT_SIGN_TYPE_RSA3K_SBB;
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ sign_type = SEGMENT_SIGN_TYPE_RSA3K_E825;
+ break;
case ICE_MAC_GENERIC:
default:
sign_type = SEGMENT_SIGN_TYPE_RSA2K;
diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c
index c2bfba6b9ead..d252d98218d0 100644
--- a/drivers/net/ethernet/intel/ice/ice_debugfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c
@@ -64,9 +64,6 @@ static const char * const ice_fwlog_level_string[] = {
"verbose",
};
-/* the order in this array is important. it matches the ordering of the
- * values in the FW so the index is the same value as in ice_fwlog_level
- */
static const char * const ice_fwlog_log_size[] = {
"128K",
"256K",
@@ -648,6 +645,16 @@ err_create_module_files:
}
/**
+ * ice_debugfs_pf_deinit - cleanup PF's debugfs
+ * @pf: pointer to the PF struct
+ */
+void ice_debugfs_pf_deinit(struct ice_pf *pf)
+{
+ debugfs_remove_recursive(pf->ice_debugfs_pf);
+ pf->ice_debugfs_pf = NULL;
+}
+
+/**
* ice_debugfs_init - create root directory for debugfs entries
*/
void ice_debugfs_init(void)
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index a2d384dbfc76..9dfae9bce758 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -71,5 +71,13 @@
#define ICE_DEV_ID_E822L_10G_BASE_T 0x1899
/* Intel(R) Ethernet Connection E822-L 1GbE */
#define ICE_DEV_ID_E822L_SGMII 0x189A
+/* Intel(R) Ethernet Connection E825-C for backplane */
+#define ICE_DEV_ID_E825C_BACKPLANE 0x579c
+/* Intel(R) Ethernet Connection E825-C for QSFP */
+#define ICE_DEV_ID_E825C_QSFP 0x579d
+/* Intel(R) Ethernet Connection E825-C for SFP */
+#define ICE_DEV_ID_E825C_SFP 0x579e
+/* Intel(R) Ethernet Connection E825-C 1GbE */
+#define ICE_DEV_ID_E825C_SGMII 0x579f
#endif /* _ICE_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 65be56f2af9e..b516e42b41f0 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -445,6 +445,20 @@ ice_devlink_reload_empr_start(struct ice_pf *pf,
}
/**
+ * ice_devlink_reinit_down - unload given PF
+ * @pf: pointer to the PF struct
+ */
+static void ice_devlink_reinit_down(struct ice_pf *pf)
+{
+ /* No need to take devl_lock, it's already taken by devlink API */
+ ice_unload(pf);
+ rtnl_lock();
+ ice_vsi_decfg(ice_get_main_vsi(pf));
+ rtnl_unlock();
+ ice_deinit_dev(pf);
+}
+
+/**
* ice_devlink_reload_down - prepare for reload
* @devlink: pointer to the devlink instance to reload
* @netns_change: if true, the network namespace is changing
@@ -477,7 +491,7 @@ ice_devlink_reload_down(struct devlink *devlink, bool netns_change,
"Remove all VFs before doing reinit\n");
return -EOPNOTSUPP;
}
- ice_unload(pf);
+ ice_devlink_reinit_down(pf);
return 0;
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
return ice_devlink_reload_empr_start(pf, extack);
@@ -1270,6 +1284,45 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
}
/**
+ * ice_devlink_reinit_up - do reinit of the given PF
+ * @pf: pointer to the PF struct
+ */
+static int ice_devlink_reinit_up(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ struct ice_vsi_cfg_params params;
+ int err;
+
+ err = ice_init_dev(pf);
+ if (err)
+ return err;
+
+ params = ice_vsi_to_params(vsi);
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ rtnl_lock();
+ err = ice_vsi_cfg(vsi, &params);
+ rtnl_unlock();
+ if (err)
+ goto err_vsi_cfg;
+
+ /* No need to take devl_lock, it's already taken by devlink API */
+ err = ice_load(pf);
+ if (err)
+ goto err_load;
+
+ return 0;
+
+err_load:
+ rtnl_lock();
+ ice_vsi_decfg(vsi);
+ rtnl_unlock();
+err_vsi_cfg:
+ ice_deinit_dev(pf);
+ return err;
+}
+
+/**
* ice_devlink_reload_up - do reload up after reinit
* @devlink: pointer to the devlink instance reloading
* @action: the action requested
@@ -1289,7 +1342,7 @@ ice_devlink_reload_up(struct devlink *devlink,
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
- return ice_load(pf);
+ return ice_devlink_reinit_up(pf);
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
return ice_devlink_reload_empr_finish(pf, extack);
@@ -1569,6 +1622,7 @@ static const struct devlink_port_ops ice_devlink_port_ops = {
* @pf: the PF to create a devlink port for
*
* Create and register a devlink_port for this PF.
+ * This function has to be called under devl_lock.
*
* Return: zero on success or an error code on failure.
*/
@@ -1581,6 +1635,8 @@ int ice_devlink_create_pf_port(struct ice_pf *pf)
struct device *dev;
int err;
+ devlink = priv_to_devlink(pf);
+
dev = ice_pf_to_dev(pf);
devlink_port = &pf->devlink_port;
@@ -1601,10 +1657,9 @@ int ice_devlink_create_pf_port(struct ice_pf *pf)
ice_devlink_set_switch_id(pf, &attrs.switch_id);
devlink_port_attrs_set(devlink_port, &attrs);
- devlink = priv_to_devlink(pf);
- err = devlink_port_register_with_ops(devlink, devlink_port, vsi->idx,
- &ice_devlink_port_ops);
+ err = devl_port_register_with_ops(devlink, devlink_port, vsi->idx,
+ &ice_devlink_port_ops);
if (err) {
dev_err(dev, "Failed to create devlink port for PF %d, error %d\n",
pf->hw.pf_id, err);
@@ -1619,10 +1674,11 @@ int ice_devlink_create_pf_port(struct ice_pf *pf)
* @pf: the PF to cleanup
*
* Unregisters the devlink_port structure associated with this PF.
+ * This function has to be called under devl_lock.
*/
void ice_devlink_destroy_pf_port(struct ice_pf *pf)
{
- devlink_port_unregister(&pf->devlink_port);
+ devl_port_unregister(&pf->devlink_port);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index bd9b1fed74ab..e92be6f130a3 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -527,6 +527,7 @@ ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll,
* @dpll: registered dpll pointer
* @dpll_priv: private data pointer passed on dpll registration
* @status: on success holds dpll's lock status
+ * @status_error: status error value
* @extack: error reporting
*
* Dpll subsystem callback, provides dpll's lock status.
@@ -539,6 +540,7 @@ ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll,
static int
ice_dpll_lock_status_get(const struct dpll_device *dpll, void *dpll_priv,
enum dpll_lock_status *status,
+ enum dpll_lock_status_error *status_error,
struct netlink_ext_ack *extack)
{
struct ice_dpll *d = dpll_priv;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index a19b06f18e40..255a9c8151b4 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -129,6 +129,7 @@ static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("rx_oversize.nic", stats.rx_oversize),
ICE_PF_STAT("rx_jabber.nic", stats.rx_jabber),
ICE_PF_STAT("rx_csum_bad.nic", hw_csum_rx_error),
+ ICE_PF_STAT("rx_eipe_error.nic", hw_rx_eipe_error),
ICE_PF_STAT("rx_dropped.nic", stats.eth.rx_discards),
ICE_PF_STAT("rx_crc_errors.nic", stats.crc_errors),
ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes),
@@ -801,7 +802,7 @@ static int ice_lbtest_create_frame(struct ice_pf *pf, u8 **ret_data, u16 size)
if (!pf)
return -EINVAL;
- data = devm_kzalloc(ice_pf_to_dev(pf), size, GFP_KERNEL);
+ data = kzalloc(size, GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -944,11 +945,9 @@ static u64 ice_loopback_test(struct net_device *netdev)
int num_frames, valid_frames;
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
- struct device *dev;
- u8 *tx_frame;
+ u8 *tx_frame __free(kfree);
int i;
- dev = ice_pf_to_dev(pf);
netdev_info(netdev, "loopback test\n");
test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info);
@@ -993,7 +992,7 @@ static u64 ice_loopback_test(struct net_device *netdev)
for (i = 0; i < num_frames; i++) {
if (ice_diag_send(tx_ring, tx_frame, ICE_LB_FRAME_SIZE)) {
ret = 8;
- goto lbtest_free_frame;
+ goto remove_mac_filters;
}
}
@@ -1003,8 +1002,6 @@ static u64 ice_loopback_test(struct net_device *netdev)
else if (valid_frames != num_frames)
ret = 10;
-lbtest_free_frame:
- devm_kfree(dev, tx_frame);
remove_mac_filters:
if (ice_fltr_remove_mac(test_vsi, broadcast, ICE_FWD_TO_VSI))
netdev_err(netdev, "Could not remove MAC filter for the test VSI\n");
@@ -2486,6 +2483,24 @@ static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
case SCTP_V4_FLOW:
hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4;
break;
+ case GTPU_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPC_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPC_TEID_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_EH_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_UL_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_DL_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV4;
+ break;
case TCP_V6_FLOW:
hdrs |= ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6;
break;
@@ -2495,6 +2510,24 @@ static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
case SCTP_V6_FLOW:
hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6;
break;
+ case GTPU_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPC_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPC_TEID_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPU_EH_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPU_UL_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPU_DL_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV6;
+ break;
default:
break;
}
@@ -2518,6 +2551,12 @@ static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm)
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
+ case GTPU_V4_FLOW:
+ case GTPC_V4_FLOW:
+ case GTPC_TEID_V4_FLOW:
+ case GTPU_EH_V4_FLOW:
+ case GTPU_UL_V4_FLOW:
+ case GTPU_DL_V4_FLOW:
if (nfc->data & RXH_IP_SRC)
hfld |= ICE_FLOW_HASH_FLD_IPV4_SA;
if (nfc->data & RXH_IP_DST)
@@ -2526,6 +2565,12 @@ static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm)
case TCP_V6_FLOW:
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
+ case GTPU_V6_FLOW:
+ case GTPC_V6_FLOW:
+ case GTPC_TEID_V6_FLOW:
+ case GTPU_EH_V6_FLOW:
+ case GTPU_UL_V6_FLOW:
+ case GTPU_DL_V6_FLOW:
if (nfc->data & RXH_IP_SRC)
hfld |= ICE_FLOW_HASH_FLD_IPV6_SA;
if (nfc->data & RXH_IP_DST)
@@ -2564,6 +2609,33 @@ static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm)
}
}
+ if (nfc->data & RXH_GTP_TEID) {
+ switch (nfc->flow_type) {
+ case GTPC_TEID_V4_FLOW:
+ case GTPC_TEID_V6_FLOW:
+ hfld |= ICE_FLOW_HASH_FLD_GTPC_TEID;
+ break;
+ case GTPU_V4_FLOW:
+ case GTPU_V6_FLOW:
+ hfld |= ICE_FLOW_HASH_FLD_GTPU_IP_TEID;
+ break;
+ case GTPU_EH_V4_FLOW:
+ case GTPU_EH_V6_FLOW:
+ hfld |= ICE_FLOW_HASH_FLD_GTPU_EH_TEID;
+ break;
+ case GTPU_UL_V4_FLOW:
+ case GTPU_UL_V6_FLOW:
+ hfld |= ICE_FLOW_HASH_FLD_GTPU_UP_TEID;
+ break;
+ case GTPU_DL_V4_FLOW:
+ case GTPU_DL_V6_FLOW:
+ hfld |= ICE_FLOW_HASH_FLD_GTPU_DWN_TEID;
+ break;
+ default:
+ break;
+ }
+ }
+
return hfld;
}
@@ -2676,6 +2748,13 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
hash_flds & ICE_FLOW_HASH_FLD_UDP_DST_PORT ||
hash_flds & ICE_FLOW_HASH_FLD_SCTP_DST_PORT)
nfc->data |= (u64)RXH_L4_B_2_3;
+
+ if (hash_flds & ICE_FLOW_HASH_FLD_GTPC_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_IP_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_EH_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_UP_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_DWN_TEID)
+ nfc->data |= (u64)RXH_GTP_TEID;
}
/**
@@ -3360,7 +3439,7 @@ ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
struct ice_pf *pf = ice_netdev_to_pf(dev);
/* only report timestamping if PTP is enabled */
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return ethtool_op_get_ts_info(dev, info);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index ff82915ab497..2fd2e0cb483d 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -37,13 +37,13 @@
#define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT)
#define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT)
-#define ICE_FLOW_HASH_GTP_TEID \
+#define ICE_FLOW_HASH_GTP_C_TEID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
-#define ICE_FLOW_HASH_GTP_IPV4_TEID \
- (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_TEID)
-#define ICE_FLOW_HASH_GTP_IPV6_TEID \
- (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_TEID)
+#define ICE_FLOW_HASH_GTP_C_IPV4_TEID \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_C_TEID)
+#define ICE_FLOW_HASH_GTP_C_IPV6_TEID \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_C_TEID)
#define ICE_FLOW_HASH_GTP_U_TEID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID))
@@ -66,6 +66,20 @@
(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
ICE_FLOW_HASH_GTP_U_EH_QFI)
+#define ICE_FLOW_HASH_GTP_U_UP \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_UP_TEID))
+#define ICE_FLOW_HASH_GTP_U_DWN \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID))
+
+#define ICE_FLOW_HASH_GTP_U_IPV4_UP \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_UP)
+#define ICE_FLOW_HASH_GTP_U_IPV6_UP \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_UP)
+#define ICE_FLOW_HASH_GTP_U_IPV4_DWN \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_DWN)
+#define ICE_FLOW_HASH_GTP_U_IPV6_DWN \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_DWN)
+
#define ICE_FLOW_HASH_PPPOE_SESS_ID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID))
@@ -242,6 +256,13 @@ enum ice_flow_field {
#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \
BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)
+#define ICE_FLOW_HASH_FLD_GTPC_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)
+#define ICE_FLOW_HASH_FLD_GTPU_IP_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)
+#define ICE_FLOW_HASH_FLD_GTPU_EH_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_TEID)
+#define ICE_FLOW_HASH_FLD_GTPU_UP_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_UP_TEID)
+#define ICE_FLOW_HASH_FLD_GTPU_DWN_TEID \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID)
+
/* Flow headers and fields for AVF support */
enum ice_flow_avf_hdr_field {
/* Values 0 - 28 are reserved for future use */
diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.c b/drivers/net/ethernet/intel/ice/ice_fwlog.c
index 92b5dac481cd..4fd15387a7e5 100644
--- a/drivers/net/ethernet/intel/ice/ice_fwlog.c
+++ b/drivers/net/ethernet/intel/ice/ice_fwlog.c
@@ -188,6 +188,8 @@ void ice_fwlog_deinit(struct ice_hw *hw)
if (hw->bus.func)
return;
+ ice_debugfs_pf_deinit(hw->back);
+
/* make sure FW logging is disabled to not put the FW in a weird state
* for the next driver load
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index fc23dbe302b4..ee3f0d3e3f6d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1618,6 +1618,25 @@ static const struct ice_rss_hash_cfg default_rss_cfgs[] = {
*/
{ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4,
ICE_HASH_SCTP_IPV4, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpc4 with input set IPv4 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_HASH_IPV4, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpc4t with input set IPv4 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_HASH_GTP_C_IPV4_TEID, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu4 with input set IPv4 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_HASH_GTP_U_IPV4_TEID, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu4e with input set IPv4 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_HASH_GTP_U_IPV4_EH, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu4u with input set IPv4 src/dst */
+ { ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_HASH_GTP_U_IPV4_UP, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu4d with input set IPv4 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_HASH_GTP_U_IPV4_DWN, ICE_RSS_OUTER_HEADERS, false},
+
/* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
{ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6,
ICE_HASH_TCP_IPV6, ICE_RSS_ANY_HEADERS, false},
@@ -1632,6 +1651,24 @@ static const struct ice_rss_hash_cfg default_rss_cfgs[] = {
/* configure RSS for IPSEC ESP SPI with input set MAC_IPV4_SPI */
{ICE_FLOW_SEG_HDR_ESP,
ICE_FLOW_HASH_ESP_SPI, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpc6 with input set IPv6 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_HASH_IPV6, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpc6t with input set IPv6 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_HASH_GTP_C_IPV6_TEID, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu6 with input set IPv6 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_HASH_GTP_U_IPV6_TEID, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu6e with input set IPv6 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_HASH_GTP_U_IPV6_EH, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu6u with input set IPv6 src/dst */
+ { ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_HASH_GTP_U_IPV6_UP, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu6d with input set IPv6 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_HASH_GTP_U_IPV6_DWN, ICE_RSS_OUTER_HEADERS, false},
};
/**
@@ -1672,27 +1709,6 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
}
/**
- * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
- * @vsi: VSI
- */
-static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
-{
- if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
- vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
- vsi->rx_buf_len = ICE_RXBUF_1664;
-#if (PAGE_SIZE < 8192)
- } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
- (vsi->netdev->mtu <= ETH_DATA_LEN)) {
- vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
- vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
-#endif
- } else {
- vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
- vsi->rx_buf_len = ICE_RXBUF_3072;
- }
-}
-
-/**
* ice_pf_state_is_nominal - checks the PF for nominal state
* @pf: pointer to PF to check
*
@@ -1795,114 +1811,6 @@ ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
}
-int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
-{
- if (q_idx >= vsi->num_rxq)
- return -EINVAL;
-
- return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
-}
-
-int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx)
-{
- DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
-
- if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
- return -EINVAL;
-
- qg_buf->num_txqs = 1;
-
- return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
-}
-
-/**
- * ice_vsi_cfg_rxqs - Configure the VSI for Rx
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- * Configure the Rx VSI for operation.
- */
-int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
-{
- u16 i;
-
- if (vsi->type == ICE_VSI_VF)
- goto setup_rings;
-
- ice_vsi_cfg_frame_size(vsi);
-setup_rings:
- /* set up individual rings */
- ice_for_each_rxq(vsi, i) {
- int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
-
- if (err)
- return err;
- }
-
- return 0;
-}
-
-/**
- * ice_vsi_cfg_txqs - Configure the VSI for Tx
- * @vsi: the VSI being configured
- * @rings: Tx ring array to be configured
- * @count: number of Tx ring array elements
- *
- * Return 0 on success and a negative value on error
- * Configure the Tx VSI for operation.
- */
-static int
-ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
-{
- DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
- int err = 0;
- u16 q_idx;
-
- qg_buf->num_txqs = 1;
-
- for (q_idx = 0; q_idx < count; q_idx++) {
- err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
- if (err)
- break;
- }
-
- return err;
-}
-
-/**
- * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- * Configure the Tx VSI for operation.
- */
-int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
-{
- return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
-}
-
-/**
- * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- * Configure the Tx queues dedicated for XDP in given VSI for operation.
- */
-int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
-{
- int ret;
- int i;
-
- ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
- if (ret)
- return ret;
-
- ice_for_each_rxq(vsi, i)
- ice_tx_xsk_pool(vsi, i);
-
- return 0;
-}
-
/**
* ice_intrl_usec_to_reg - convert interrupt rate limit to register value
* @intrl: interrupt rate limit in usecs
@@ -2849,61 +2757,6 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
}
/**
- * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
- * @vsi: the VSI being un-configured
- */
-void ice_vsi_dis_irq(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
- u32 val;
- int i;
-
- /* disable interrupt causation from each queue */
- if (vsi->tx_rings) {
- ice_for_each_txq(vsi, i) {
- if (vsi->tx_rings[i]) {
- u16 reg;
-
- reg = vsi->tx_rings[i]->reg_idx;
- val = rd32(hw, QINT_TQCTL(reg));
- val &= ~QINT_TQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_TQCTL(reg), val);
- }
- }
- }
-
- if (vsi->rx_rings) {
- ice_for_each_rxq(vsi, i) {
- if (vsi->rx_rings[i]) {
- u16 reg;
-
- reg = vsi->rx_rings[i]->reg_idx;
- val = rd32(hw, QINT_RQCTL(reg));
- val &= ~QINT_RQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_RQCTL(reg), val);
- }
- }
- }
-
- /* disable each interrupt */
- ice_for_each_q_vector(vsi, i) {
- if (!vsi->q_vectors[i])
- continue;
- wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
- }
-
- ice_flush(hw);
-
- /* don't call synchronize_irq() for VF's from the host */
- if (vsi->type == ICE_VSI_VF)
- return;
-
- ice_for_each_q_vector(vsi, i)
- synchronize_irq(vsi->q_vectors[i]->irq.virq);
-}
-
-/**
* __ice_queue_set_napi - Set the napi instance for the queue
* @dev: device to which NAPI and queue belong
* @queue_index: Index of queue
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index bfcfc582a4c0..9cd23afe5f15 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -54,14 +54,6 @@ bool ice_pf_state_is_nominal(struct ice_pf *pf);
void ice_update_eth_stats(struct ice_vsi *vsi);
-int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
-
-int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx);
-
-int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
-
-int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
-
void ice_vsi_cfg_msix(struct ice_vsi *vsi);
int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi);
@@ -72,8 +64,6 @@ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num);
-int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
-
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
@@ -120,8 +110,6 @@ void
ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
bool ena_ts);
-void ice_vsi_dis_irq(struct ice_vsi *vsi);
-
void ice_vsi_free_irq(struct ice_vsi *vsi);
void ice_vsi_free_rx_rings(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index df6a68ab747e..33a164fa325a 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -613,7 +613,7 @@ skip:
ice_pf_dis_all_vsi(pf, false);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_prepare_for_reset(pf);
+ ice_ptp_prepare_for_reset(pf, reset_type);
if (ice_is_feature_supported(pf, ICE_F_GNSS))
ice_gnss_exit(pf);
@@ -1649,8 +1649,10 @@ static void ice_clean_sbq_subtask(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
- /* Nothing to do here if sideband queue is not supported */
- if (!ice_is_sbq_supported(hw)) {
+ /* if mac_type is not generic, sideband is not supported
+ * and there's nothing to do here
+ */
+ if (!ice_is_generic_mac(hw)) {
clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
return;
}
@@ -4572,90 +4574,6 @@ static void ice_decfg_netdev(struct ice_vsi *vsi)
vsi->netdev = NULL;
}
-static int ice_start_eth(struct ice_vsi *vsi)
-{
- int err;
-
- err = ice_init_mac_fltr(vsi->back);
- if (err)
- return err;
-
- err = ice_vsi_open(vsi);
- if (err)
- ice_fltr_remove_all(vsi);
-
- return err;
-}
-
-static void ice_stop_eth(struct ice_vsi *vsi)
-{
- ice_fltr_remove_all(vsi);
- ice_vsi_close(vsi);
-}
-
-static int ice_init_eth(struct ice_pf *pf)
-{
- struct ice_vsi *vsi = ice_get_main_vsi(pf);
- int err;
-
- if (!vsi)
- return -EINVAL;
-
- /* init channel list */
- INIT_LIST_HEAD(&vsi->ch_list);
-
- err = ice_cfg_netdev(vsi);
- if (err)
- return err;
- /* Setup DCB netlink interface */
- ice_dcbnl_setup(vsi);
-
- err = ice_init_mac_fltr(pf);
- if (err)
- goto err_init_mac_fltr;
-
- err = ice_devlink_create_pf_port(pf);
- if (err)
- goto err_devlink_create_pf_port;
-
- SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
-
- err = ice_register_netdev(vsi);
- if (err)
- goto err_register_netdev;
-
- err = ice_tc_indir_block_register(vsi);
- if (err)
- goto err_tc_indir_block_register;
-
- ice_napi_add(vsi);
-
- return 0;
-
-err_tc_indir_block_register:
- ice_unregister_netdev(vsi);
-err_register_netdev:
- ice_devlink_destroy_pf_port(pf);
-err_devlink_create_pf_port:
-err_init_mac_fltr:
- ice_decfg_netdev(vsi);
- return err;
-}
-
-static void ice_deinit_eth(struct ice_pf *pf)
-{
- struct ice_vsi *vsi = ice_get_main_vsi(pf);
-
- if (!vsi)
- return;
-
- ice_vsi_close(vsi);
- ice_unregister_netdev(vsi);
- ice_devlink_destroy_pf_port(pf);
- ice_tc_indir_block_unregister(vsi);
- ice_decfg_netdev(vsi);
-}
-
/**
* ice_wait_for_fw - wait for full FW readiness
* @hw: pointer to the hardware structure
@@ -4681,7 +4599,7 @@ static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
return -ETIMEDOUT;
}
-static int ice_init_dev(struct ice_pf *pf)
+int ice_init_dev(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
@@ -4774,7 +4692,7 @@ err_init_pf:
return err;
}
-static void ice_deinit_dev(struct ice_pf *pf)
+void ice_deinit_dev(struct ice_pf *pf)
{
ice_free_irq_msix_misc(pf);
ice_deinit_pf(pf);
@@ -5079,31 +4997,47 @@ static void ice_deinit(struct ice_pf *pf)
/**
* ice_load - load pf by init hw and starting VSI
* @pf: pointer to the pf instance
+ *
+ * This function has to be called under devl_lock.
*/
int ice_load(struct ice_pf *pf)
{
- struct ice_vsi_cfg_params params = {};
struct ice_vsi *vsi;
int err;
- err = ice_init_dev(pf);
+ devl_assert_locked(priv_to_devlink(pf));
+
+ vsi = ice_get_main_vsi(pf);
+
+ /* init channel list */
+ INIT_LIST_HEAD(&vsi->ch_list);
+
+ err = ice_cfg_netdev(vsi);
if (err)
return err;
- vsi = ice_get_main_vsi(pf);
+ /* Setup DCB netlink interface */
+ ice_dcbnl_setup(vsi);
- params = ice_vsi_to_params(vsi);
- params.flags = ICE_VSI_FLAG_INIT;
+ err = ice_init_mac_fltr(pf);
+ if (err)
+ goto err_init_mac_fltr;
- rtnl_lock();
- err = ice_vsi_cfg(vsi, &params);
+ err = ice_devlink_create_pf_port(pf);
+ if (err)
+ goto err_devlink_create_pf_port;
+
+ SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
+
+ err = ice_register_netdev(vsi);
if (err)
- goto err_vsi_cfg;
+ goto err_register_netdev;
- err = ice_start_eth(ice_get_main_vsi(pf));
+ err = ice_tc_indir_block_register(vsi);
if (err)
- goto err_start_eth;
- rtnl_unlock();
+ goto err_tc_indir_block_register;
+
+ ice_napi_add(vsi);
err = ice_init_rdma(pf);
if (err)
@@ -5117,29 +5051,35 @@ int ice_load(struct ice_pf *pf)
return 0;
err_init_rdma:
- ice_vsi_close(ice_get_main_vsi(pf));
- rtnl_lock();
-err_start_eth:
- ice_vsi_decfg(ice_get_main_vsi(pf));
-err_vsi_cfg:
- rtnl_unlock();
- ice_deinit_dev(pf);
+ ice_tc_indir_block_unregister(vsi);
+err_tc_indir_block_register:
+ ice_unregister_netdev(vsi);
+err_register_netdev:
+ ice_devlink_destroy_pf_port(pf);
+err_devlink_create_pf_port:
+err_init_mac_fltr:
+ ice_decfg_netdev(vsi);
return err;
}
/**
* ice_unload - unload pf by stopping VSI and deinit hw
* @pf: pointer to the pf instance
+ *
+ * This function has to be called under devl_lock.
*/
void ice_unload(struct ice_pf *pf)
{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+
+ devl_assert_locked(priv_to_devlink(pf));
+
ice_deinit_features(pf);
ice_deinit_rdma(pf);
- rtnl_lock();
- ice_stop_eth(ice_get_main_vsi(pf));
- ice_vsi_decfg(ice_get_main_vsi(pf));
- rtnl_unlock();
- ice_deinit_dev(pf);
+ ice_tc_indir_block_unregister(vsi);
+ ice_unregister_netdev(vsi);
+ ice_devlink_destroy_pf_port(pf);
+ ice_decfg_netdev(vsi);
}
/**
@@ -5237,27 +5177,23 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
if (err)
goto err_init;
- err = ice_init_eth(pf);
- if (err)
- goto err_init_eth;
-
- err = ice_init_rdma(pf);
+ devl_lock(priv_to_devlink(pf));
+ err = ice_load(pf);
+ devl_unlock(priv_to_devlink(pf));
if (err)
- goto err_init_rdma;
+ goto err_load;
err = ice_init_devlink(pf);
if (err)
goto err_init_devlink;
- ice_init_features(pf);
-
return 0;
err_init_devlink:
- ice_deinit_rdma(pf);
-err_init_rdma:
- ice_deinit_eth(pf);
-err_init_eth:
+ devl_lock(priv_to_devlink(pf));
+ ice_unload(pf);
+ devl_unlock(priv_to_devlink(pf));
+err_load:
ice_deinit(pf);
err_init:
pci_disable_device(pdev);
@@ -5340,8 +5276,6 @@ static void ice_remove(struct pci_dev *pdev)
msleep(100);
}
- ice_debugfs_exit();
-
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(ICE_VF_RESETS_DISABLED, pf->state);
ice_free_vfs(pf);
@@ -5355,12 +5289,14 @@ static void ice_remove(struct pci_dev *pdev)
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
- ice_deinit_features(pf);
+
ice_deinit_devlink(pf);
- ice_deinit_rdma(pf);
- ice_deinit_eth(pf);
- ice_deinit(pf);
+ devl_lock(priv_to_devlink(pf));
+ ice_unload(pf);
+ devl_unlock(priv_to_devlink(pf));
+
+ ice_deinit(pf);
ice_vsi_release_all(pf);
ice_setup_mc_magic_wake(pf);
@@ -5753,6 +5689,10 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_BACKPLANE) },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_QSFP56) },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP) },
@@ -5842,6 +5782,7 @@ module_init(ice_module_init);
static void __exit ice_module_exit(void)
{
pci_unregister_driver(&ice_driver);
+ ice_debugfs_exit();
destroy_workqueue(ice_wq);
destroy_workqueue(ice_lag_wq);
pr_info("module unloaded\n");
@@ -6737,6 +6678,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
{
struct rtnl_link_stats64 *net_stats, *stats_prev;
struct rtnl_link_stats64 *vsi_stats;
+ struct ice_pf *pf = vsi->back;
u64 pkts, bytes;
int i;
@@ -6782,21 +6724,18 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
net_stats = &vsi->net_stats;
stats_prev = &vsi->net_stats_prev;
- /* clear prev counters after reset */
- if (vsi_stats->tx_packets < stats_prev->tx_packets ||
- vsi_stats->rx_packets < stats_prev->rx_packets) {
- stats_prev->tx_packets = 0;
- stats_prev->tx_bytes = 0;
- stats_prev->rx_packets = 0;
- stats_prev->rx_bytes = 0;
+ /* Update netdev counters, but keep in mind that values could start at
+ * random value after PF reset. And as we increase the reported stat by
+ * diff of Prev-Cur, we need to be sure that Prev is valid. If it's not,
+ * let's skip this round.
+ */
+ if (likely(pf->stat_prev_loaded)) {
+ net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
+ net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
+ net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
+ net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
}
- /* update netdev counters */
- net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
- net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
- net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
- net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
-
stats_prev->tx_packets = vsi_stats->tx_packets;
stats_prev->tx_bytes = vsi_stats->tx_bytes;
stats_prev->rx_packets = vsi_stats->rx_packets;
@@ -7061,6 +7000,50 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
}
/**
+ * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
+ * @vsi: the VSI being un-configured
+ */
+static void ice_vsi_dis_irq(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+ int i;
+
+ /* disable interrupt causation from each Rx queue; Tx queues are
+ * handled in ice_vsi_stop_tx_ring()
+ */
+ if (vsi->rx_rings) {
+ ice_for_each_rxq(vsi, i) {
+ if (vsi->rx_rings[i]) {
+ u16 reg;
+
+ reg = vsi->rx_rings[i]->reg_idx;
+ val = rd32(hw, QINT_RQCTL(reg));
+ val &= ~QINT_RQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_RQCTL(reg), val);
+ }
+ }
+ }
+
+ /* disable each interrupt */
+ ice_for_each_q_vector(vsi, i) {
+ if (!vsi->q_vectors[i])
+ continue;
+ wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
+ }
+
+ ice_flush(hw);
+
+ /* don't call synchronize_irq() for VF's from the host */
+ if (vsi->type == ICE_VSI_VF)
+ return;
+
+ ice_for_each_q_vector(vsi, i)
+ synchronize_irq(vsi->q_vectors[i]->irq.virq);
+}
+
+/**
* ice_down - Shutdown the connection
* @vsi: The VSI being stopped
*
@@ -7549,7 +7532,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
* fail.
*/
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_reset(pf);
+ ice_ptp_rebuild(pf, reset_type);
if (ice_is_feature_supported(pf, ICE_F_GNSS))
ice_gnss_init(pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 3b6605c8585e..c11eba07283c 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -601,17 +601,13 @@ void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
/* Read the low 32 bit value */
raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH);
- /* For PHYs which don't implement a proper timestamp ready bitmap,
- * verify that the timestamp value is different from the last cached
- * timestamp. If it is not, skip this for now assuming it hasn't yet
- * been captured by hardware.
+ /* Devices using this interface always verify the timestamp differs
+ * relative to the last cached timestamp value.
*/
- if (!drop_ts && tx->verify_cached &&
- raw_tstamp == tx->tstamps[idx].cached_tstamp)
+ if (raw_tstamp == tx->tstamps[idx].cached_tstamp)
return;
- if (tx->verify_cached && raw_tstamp)
- tx->tstamps[idx].cached_tstamp = raw_tstamp;
+ tx->tstamps[idx].cached_tstamp = raw_tstamp;
clear_bit(idx, tx->in_use);
skb = tx->tstamps[idx].skb;
tx->tstamps[idx].skb = NULL;
@@ -701,9 +697,11 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
hw = &pf->hw;
/* Read the Tx ready status first */
- err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
- if (err)
- return;
+ if (tx->has_ready_bitmap) {
+ err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
+ if (err)
+ return;
+ }
/* Drop packets if the link went down */
link_up = ptp_port->link_up;
@@ -731,7 +729,8 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
* If we do not, the hardware logic for generating a new
* interrupt can get stuck on some devices.
*/
- if (!(tstamp_ready & BIT_ULL(phy_idx))) {
+ if (tx->has_ready_bitmap &&
+ !(tstamp_ready & BIT_ULL(phy_idx))) {
if (drop_ts)
goto skip_ts_read;
@@ -751,7 +750,7 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
* from the last cached timestamp. If it is not, skip this for
* now assuming it hasn't yet been captured by hardware.
*/
- if (!drop_ts && tx->verify_cached &&
+ if (!drop_ts && !tx->has_ready_bitmap &&
raw_tstamp == tx->tstamps[idx].cached_tstamp)
continue;
@@ -761,7 +760,7 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
skip_ts_read:
spin_lock_irqsave(&tx->lock, flags);
- if (tx->verify_cached && raw_tstamp)
+ if (!tx->has_ready_bitmap && raw_tstamp)
tx->tstamps[idx].cached_tstamp = raw_tstamp;
clear_bit(idx, tx->in_use);
skb = tx->tstamps[idx].skb;
@@ -965,6 +964,22 @@ ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx)
}
/**
+ * ice_ptp_flush_all_tx_tracker - Flush all timestamp trackers on this clock
+ * @pf: Board private structure
+ *
+ * Called by the clock owner to flush all the Tx timestamp trackers associated
+ * with the clock.
+ */
+static void
+ice_ptp_flush_all_tx_tracker(struct ice_pf *pf)
+{
+ struct ice_ptp_port *port;
+
+ list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member)
+ ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx);
+}
+
+/**
* ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
* @pf: Board private structure
* @tx: Tx tracking structure to release
@@ -1014,7 +1029,7 @@ ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
tx->block = port / ICE_PORTS_PER_QUAD;
tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
tx->len = INDEX_PER_PORT_E82X;
- tx->verify_cached = 0;
+ tx->has_ready_bitmap = 1;
return ice_ptp_alloc_tx_tracker(tx);
}
@@ -1037,7 +1052,7 @@ ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
* verify new timestamps against cached copy of the last read
* timestamp.
*/
- tx->verify_cached = 1;
+ tx->has_ready_bitmap = 0;
return ice_ptp_alloc_tx_tracker(tx);
}
@@ -1430,7 +1445,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
struct ice_ptp_port *ptp_port;
struct ice_hw *hw = &pf->hw;
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return;
if (WARN_ON_ONCE(port >= ICE_NUM_EXTERNAL_PORTS))
@@ -1456,14 +1471,14 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
}
/**
- * ice_ptp_tx_ena_intr - Enable or disable the Tx timestamp interrupt
+ * ice_ptp_cfg_phy_interrupt - Configure PHY interrupt settings
* @pf: PF private structure
* @ena: bool value to enable or disable interrupt
* @threshold: Minimum number of packets at which intr is triggered
*
* Utility function to enable or disable Tx timestamp interrupt and threshold
*/
-static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold)
+static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
{
struct ice_hw *hw = &pf->hw;
int err = 0;
@@ -2162,7 +2177,7 @@ int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
{
struct hwtstamp_config *config;
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return -EIO;
config = &pf->ptp.tstamp_config;
@@ -2232,7 +2247,7 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
struct hwtstamp_config config;
int err;
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return -EAGAIN;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
@@ -2616,7 +2631,7 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
int err;
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return;
err = ice_ptp_update_cached_phctime(pf);
@@ -2629,36 +2644,72 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
}
/**
- * ice_ptp_reset - Initialize PTP hardware clock support after reset
+ * ice_ptp_prepare_for_reset - Prepare PTP for reset
+ * @pf: Board private structure
+ * @reset_type: the reset type being performed
+ */
+void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
+{
+ struct ice_ptp *ptp = &pf->ptp;
+ u8 src_tmr;
+
+ if (ptp->state != ICE_PTP_READY)
+ return;
+
+ ptp->state = ICE_PTP_RESETTING;
+
+ /* Disable timestamping for both Tx and Rx */
+ ice_ptp_disable_timestamp_mode(pf);
+
+ kthread_cancel_delayed_work_sync(&ptp->work);
+
+ if (reset_type == ICE_RESET_PFR)
+ return;
+
+ ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
+
+ /* Disable periodic outputs */
+ ice_ptp_disable_all_clkout(pf);
+
+ src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
+
+ /* Disable source clock */
+ wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
+
+ /* Acquire PHC and system timer to restore after reset */
+ ptp->reset_time = ktime_get_real_ns();
+}
+
+/**
+ * ice_ptp_rebuild_owner - Initialize PTP clock owner after reset
* @pf: Board private structure
+ *
+ * Companion function for ice_ptp_rebuild() which handles tasks that only the
+ * PTP clock owner instance should perform.
*/
-void ice_ptp_reset(struct ice_pf *pf)
+static int ice_ptp_rebuild_owner(struct ice_pf *pf)
{
struct ice_ptp *ptp = &pf->ptp;
struct ice_hw *hw = &pf->hw;
struct timespec64 ts;
- int err, itr = 1;
u64 time_diff;
-
- if (test_bit(ICE_PFR_REQ, pf->state) ||
- !ice_pf_src_tmr_owned(pf))
- goto pfr;
+ int err;
err = ice_ptp_init_phc(hw);
if (err)
- goto err;
+ return err;
/* Acquire the global hardware lock */
if (!ice_ptp_lock(hw)) {
err = -EBUSY;
- goto err;
+ return err;
}
/* Write the increment time value to PHY and LAN */
err = ice_ptp_write_incval(hw, ice_base_incval(pf));
if (err) {
ice_ptp_unlock(hw);
- goto err;
+ return err;
}
/* Write the initial Time value to PHY and LAN using the cached PHC
@@ -2674,38 +2725,54 @@ void ice_ptp_reset(struct ice_pf *pf)
err = ice_ptp_write_init(pf, &ts);
if (err) {
ice_ptp_unlock(hw);
- goto err;
+ return err;
}
/* Release the global hardware lock */
ice_ptp_unlock(hw);
+ /* Flush software tracking of any outstanding timestamps since we're
+ * about to flush the PHY timestamp block.
+ */
+ ice_ptp_flush_all_tx_tracker(pf);
+
if (!ice_is_e810(hw)) {
/* Enable quad interrupts */
- err = ice_ptp_tx_ena_intr(pf, true, itr);
+ err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
if (err)
- goto err;
- }
+ return err;
-pfr:
- /* Init Tx structures */
- if (ice_is_e810(&pf->hw)) {
- err = ice_ptp_init_tx_e810(pf, &ptp->port.tx);
- } else {
- kthread_init_delayed_work(&ptp->port.ov_work,
- ice_ptp_wait_for_offsets);
- err = ice_ptp_init_tx_e82x(pf, &ptp->port.tx,
- ptp->port.port_num);
+ ice_ptp_restart_all_phy(pf);
}
- if (err)
+
+ return 0;
+}
+
+/**
+ * ice_ptp_rebuild - Initialize PTP hardware clock support after reset
+ * @pf: Board private structure
+ * @reset_type: the reset type being performed
+ */
+void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
+{
+ struct ice_ptp *ptp = &pf->ptp;
+ int err;
+
+ if (ptp->state == ICE_PTP_READY) {
+ ice_ptp_prepare_for_reset(pf, reset_type);
+ } else if (ptp->state != ICE_PTP_RESETTING) {
+ err = -EINVAL;
+ dev_err(ice_pf_to_dev(pf), "PTP was not initialized\n");
goto err;
+ }
- set_bit(ICE_FLAG_PTP, pf->flags);
+ if (ice_pf_src_tmr_owned(pf) && reset_type != ICE_RESET_PFR) {
+ err = ice_ptp_rebuild_owner(pf);
+ if (err)
+ goto err;
+ }
- /* Restart the PHY timestamping block */
- if (!test_bit(ICE_PFR_REQ, pf->state) &&
- ice_pf_src_tmr_owned(pf))
- ice_ptp_restart_all_phy(pf);
+ ptp->state = ICE_PTP_READY;
/* Start periodic work going */
kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
@@ -2714,6 +2781,7 @@ pfr:
return;
err:
+ ptp->state = ICE_PTP_ERROR;
dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
}
@@ -2923,39 +2991,6 @@ int ice_ptp_clock_index(struct ice_pf *pf)
}
/**
- * ice_ptp_prepare_for_reset - Prepare PTP for reset
- * @pf: Board private structure
- */
-void ice_ptp_prepare_for_reset(struct ice_pf *pf)
-{
- struct ice_ptp *ptp = &pf->ptp;
- u8 src_tmr;
-
- clear_bit(ICE_FLAG_PTP, pf->flags);
-
- /* Disable timestamping for both Tx and Rx */
- ice_ptp_disable_timestamp_mode(pf);
-
- kthread_cancel_delayed_work_sync(&ptp->work);
-
- if (test_bit(ICE_PFR_REQ, pf->state))
- return;
-
- ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
-
- /* Disable periodic outputs */
- ice_ptp_disable_all_clkout(pf);
-
- src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
-
- /* Disable source clock */
- wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
-
- /* Acquire PHC and system timer to restore after reset */
- ptp->reset_time = ktime_get_real_ns();
-}
-
-/**
* ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
* @pf: Board private structure
*
@@ -2967,7 +3002,7 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
struct timespec64 ts;
- int err, itr = 1;
+ int err;
err = ice_ptp_init_phc(hw);
if (err) {
@@ -3002,7 +3037,7 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
if (!ice_is_e810(hw)) {
/* Enable quad interrupts */
- err = ice_ptp_tx_ena_intr(pf, true, itr);
+ err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
if (err)
goto err_exit;
}
@@ -3195,6 +3230,8 @@ void ice_ptp_init(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
int err;
+ ptp->state = ICE_PTP_INITIALIZING;
+
ice_ptp_init_phy_model(hw);
ice_ptp_init_tx_interrupt_mode(pf);
@@ -3219,12 +3256,13 @@ void ice_ptp_init(struct ice_pf *pf)
/* Configure initial Tx interrupt settings */
ice_ptp_cfg_tx_interrupt(pf);
- set_bit(ICE_FLAG_PTP, pf->flags);
- err = ice_ptp_init_work(pf, ptp);
+ err = ice_ptp_create_auxbus_device(pf);
if (err)
goto err;
- err = ice_ptp_create_auxbus_device(pf);
+ ptp->state = ICE_PTP_READY;
+
+ err = ice_ptp_init_work(pf, ptp);
if (err)
goto err;
@@ -3237,7 +3275,7 @@ err:
ptp_clock_unregister(ptp->clock);
pf->ptp.clock = NULL;
}
- clear_bit(ICE_FLAG_PTP, pf->flags);
+ ptp->state = ICE_PTP_ERROR;
dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
}
@@ -3250,9 +3288,11 @@ err:
*/
void ice_ptp_release(struct ice_pf *pf)
{
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return;
+ pf->ptp.state = ICE_PTP_UNINIT;
+
/* Disable timestamping for both Tx and Rx */
ice_ptp_disable_timestamp_mode(pf);
@@ -3260,8 +3300,6 @@ void ice_ptp_release(struct ice_pf *pf)
ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
- clear_bit(ICE_FLAG_PTP, pf->flags);
-
kthread_cancel_delayed_work_sync(&pf->ptp.work);
ice_ptp_port_phy_stop(&pf->ptp.port);
@@ -3271,6 +3309,9 @@ void ice_ptp_release(struct ice_pf *pf)
pf->ptp.kworker = NULL;
}
+ if (ice_pf_src_tmr_owned(pf))
+ ice_ptp_unregister_auxbus_driver(pf);
+
if (!pf->ptp.clock)
return;
@@ -3280,7 +3321,5 @@ void ice_ptp_release(struct ice_pf *pf)
ptp_clock_unregister(pf->ptp.clock);
pf->ptp.clock = NULL;
- ice_ptp_unregister_auxbus_driver(pf);
-
dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 087dd32d8762..3af20025043a 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -100,7 +100,7 @@ struct ice_perout_channel {
* the last timestamp we read for a given index. If the current timestamp
* value is the same as the cached value, we assume a new timestamp hasn't
* been captured. This avoids reporting stale timestamps to the stack. This is
- * only done if the verify_cached flag is set in ice_ptp_tx structure.
+ * only done if the has_ready_bitmap flag is not set in ice_ptp_tx structure.
*/
struct ice_tx_tstamp {
struct sk_buff *skb;
@@ -130,7 +130,9 @@ enum ice_tx_tstamp_work {
* @init: if true, the tracker is initialized;
* @calibrating: if true, the PHY is calibrating the Tx offset. During this
* window, timestamps are temporarily disabled.
- * @verify_cached: if true, verify new timestamp differs from last read value
+ * @has_ready_bitmap: if true, the hardware has a valid Tx timestamp ready
+ * bitmap register. If false, fall back to verifying new
+ * timestamp values against previously cached copy.
* @last_ll_ts_idx_read: index of the last LL TS read by the FW
*/
struct ice_ptp_tx {
@@ -143,7 +145,7 @@ struct ice_ptp_tx {
u8 len;
u8 init : 1;
u8 calibrating : 1;
- u8 verify_cached : 1;
+ u8 has_ready_bitmap : 1;
s8 last_ll_ts_idx_read;
};
@@ -203,8 +205,17 @@ struct ice_ptp_port_owner {
#define GLTSYN_TGT_H_IDX_MAX 4
+enum ice_ptp_state {
+ ICE_PTP_UNINIT = 0,
+ ICE_PTP_INITIALIZING,
+ ICE_PTP_READY,
+ ICE_PTP_RESETTING,
+ ICE_PTP_ERROR,
+};
+
/**
* struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK
+ * @state: current state of PTP state machine
* @tx_interrupt_mode: the TX interrupt mode for the PTP clock
* @port: data for the PHY port initialization procedure
* @ports_owner: data for the auxiliary driver owner
@@ -227,6 +238,7 @@ struct ice_ptp_port_owner {
* @late_cached_phc_updates: number of times cached PHC update is late
*/
struct ice_ptp {
+ enum ice_ptp_state state;
enum ice_ptp_tx_interrupt tx_interrupt_mode;
struct ice_ptp_port port;
struct ice_ptp_port_owner ports_owner;
@@ -304,8 +316,9 @@ enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf);
u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
const struct ice_pkt_ctx *pkt_ctx);
-void ice_ptp_reset(struct ice_pf *pf);
-void ice_ptp_prepare_for_reset(struct ice_pf *pf);
+void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
+void ice_ptp_prepare_for_reset(struct ice_pf *pf,
+ enum ice_reset_req reset_type);
void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup);
@@ -345,8 +358,15 @@ ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
return 0;
}
-static inline void ice_ptp_reset(struct ice_pf *pf) { }
-static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf) { }
+static inline void ice_ptp_rebuild(struct ice_pf *pf,
+ enum ice_reset_req reset_type)
+{
+}
+
+static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf,
+ enum ice_reset_req reset_type)
+{
+}
static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { }
static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index b0f78c2f2790..a958fcf3e6be 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -240,7 +240,6 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
}
vf->lan_vsi_idx = vsi->idx;
- vf->lan_vsi_num = vsi->vsi_num;
return vsi;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 839e5da24ad5..f8f1d2bdc1be 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -143,8 +143,12 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
(decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
- if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
- BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
+ if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) {
+ ring->vsi->back->hw_rx_eipe_error++;
+ return;
+ }
+
+ if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S))))
goto checksum_fail;
if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index a508e917ce5f..9ff92dba5823 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -132,6 +132,7 @@ enum ice_mac_type {
ICE_MAC_E810,
ICE_MAC_E830,
ICE_MAC_GENERIC,
+ ICE_MAC_GENERIC_3K_E825,
};
/* Media Types */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 2ffdae9a82df..21d26e19338a 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -280,12 +280,6 @@ int ice_vf_reconfig_vsi(struct ice_vf *vf)
return err;
}
- /* Update the lan_vsi_num field since it might have been changed. The
- * PF lan_vsi_idx number remains the same so we don't need to change
- * that.
- */
- vf->lan_vsi_num = vsi->vsi_num;
-
return 0;
}
@@ -315,7 +309,6 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf)
* vf->lan_vsi_idx
*/
vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
- vf->lan_vsi_num = vsi->vsi_num;
return 0;
}
@@ -1315,13 +1308,12 @@ int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
}
/**
- * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
+ * ice_vf_invalidate_vsi - invalidate vsi_idx to remove VSI access
* @vf: VF to remove access to VSI for
*/
void ice_vf_invalidate_vsi(struct ice_vf *vf)
{
vf->lan_vsi_idx = ICE_NO_VSI;
- vf->lan_vsi_num = ICE_NO_VSI;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 0cc9034065c5..fec16919ec19 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -109,11 +109,6 @@ struct ice_vf {
u8 spoofchk:1;
u8 link_forced:1;
u8 link_up:1; /* only valid if VF link is forced */
- /* VSI indices - actual VSI pointers are maintained in the PF structure
- * When assigned, these will be non-zero, because VSI 0 is always
- * the main LAN VSI for the PF.
- */
- u16 lan_vsi_num; /* ID as used by firmware */
unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 6f2328a049bf..1ff9818b4c84 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -499,7 +499,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->rss_lut_size = ICE_LUT_VSI_SIZE;
vfres->max_mtu = ice_vc_get_max_frame_size(vf);
- vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
+ vfres->vsi_res[0].vsi_id = ICE_VF_VSI_ID;
vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
@@ -545,27 +545,20 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf)
*/
bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
{
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
-
- vsi = ice_find_vsi(pf, vsi_id);
-
- return (vsi && (vsi->vf == vf));
+ return vsi_id == ICE_VF_VSI_ID;
}
/**
* ice_vc_isvalid_q_id
- * @vf: pointer to the VF info
- * @vsi_id: VSI ID
+ * @vsi: VSI to check queue ID against
* @qid: VSI relative queue ID
*
* check for the valid queue ID
*/
-static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
+static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u8 qid)
{
- struct ice_vsi *vsi = ice_find_vsi(vf->pf, vsi_id);
/* allocated Tx and Rx queues should be always equal for VF VSI */
- return (vsi && (qid < vsi->alloc_txq));
+ return qid < vsi->alloc_txq;
}
/**
@@ -1323,7 +1316,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
*/
q_map = vqs->rx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1345,7 +1338,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1450,7 +1443,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1476,7 +1469,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
} else if (q_map) {
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1532,7 +1525,7 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
vsi_q_id = vsi_q_id_idx;
- if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
+ if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
return VIRTCHNL_STATUS_ERR_PARAM;
q_vector->num_ring_rx++;
@@ -1546,7 +1539,7 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
vsi_q_id = vsi_q_id_idx;
- if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
+ if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
return VIRTCHNL_STATUS_ERR_PARAM;
q_vector->num_ring_tx++;
@@ -1703,7 +1696,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
qpi->txq.headwb_enabled ||
!ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
!ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
- !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
+ !ice_vc_isvalid_q_id(vsi, qpi->txq.queue_id)) {
goto error_param;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
index 60dfbe05980a..3a4115869153 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
@@ -19,6 +19,15 @@
#define ICE_MAX_MACADDR_PER_VF 18
#define ICE_FLEX_DESC_RXDID_MAX_NUM 64
+/* VFs only get a single VSI. For ice hardware, the VF does not need to know
+ * its VSI index. However, the virtchnl interface requires a VSI number,
+ * mainly due to legacy hardware.
+ *
+ * Since the VF doesn't need this information, report a static value to the VF
+ * instead of leaking any information about the PF or hardware setup.
+ */
+#define ICE_VF_VSI_ID 1
+
struct ice_virtchnl_ops {
int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index f001553e1a1a..8e4ff3af86c6 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -94,9 +94,6 @@ ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
return -EINVAL;
- if (vsi_id != vf->lan_vsi_num)
- return -EINVAL;
-
if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 2eecd0f39aa6..1857220d27fe 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -218,42 +218,28 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
*/
static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
{
- DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
- u16 size = __struct_size(qg_buf);
struct ice_q_vector *q_vector;
- struct ice_tx_ring *tx_ring;
- struct ice_rx_ring *rx_ring;
int err;
- if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
- return -EINVAL;
-
- qg_buf->num_txqs = 1;
-
- tx_ring = vsi->tx_rings[q_idx];
- rx_ring = vsi->rx_rings[q_idx];
- q_vector = rx_ring->q_vector;
-
- err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
+ err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
if (err)
return err;
if (ice_is_xdp_ena_vsi(vsi)) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
- memset(qg_buf, 0, size);
- qg_buf->num_txqs = 1;
- err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
+ err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
if (err)
return err;
ice_set_ring_xdp(xdp_ring);
ice_tx_xsk_pool(vsi, q_idx);
}
- err = ice_vsi_cfg_rxq(rx_ring);
+ err = ice_vsi_cfg_single_rxq(vsi, q_idx);
if (err)
return err;
+ q_vector = vsi->rx_rings[q_idx]->q_vector;
ice_qvec_cfg_msix(vsi, q_vector);
err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index 0acc125decb3..e7a036538246 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -37,8 +37,6 @@ struct idpf_vport_max_q;
#define IDPF_MB_MAX_ERR 20
#define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz) \
((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz))
-#define IDPF_WAIT_FOR_EVENT_TIMEO_MIN 2000
-#define IDPF_WAIT_FOR_EVENT_TIMEO 60000
#define IDPF_MAX_WAIT 500
@@ -66,14 +64,12 @@ struct idpf_mac_filter {
/**
* enum idpf_state - State machine to handle bring up
- * @__IDPF_STARTUP: Start the state machine
* @__IDPF_VER_CHECK: Negotiate virtchnl version
* @__IDPF_GET_CAPS: Negotiate capabilities
* @__IDPF_INIT_SW: Init based on given capabilities
* @__IDPF_STATE_LAST: Must be last, used to determine size
*/
enum idpf_state {
- __IDPF_STARTUP,
__IDPF_VER_CHECK,
__IDPF_GET_CAPS,
__IDPF_INIT_SW,
@@ -87,6 +83,7 @@ enum idpf_state {
* @IDPF_HR_RESET_IN_PROG: Reset in progress
* @IDPF_REMOVE_IN_PROG: Driver remove in progress
* @IDPF_MB_INTR_MODE: Mailbox in interrupt mode
+ * @IDPF_VC_CORE_INIT: virtchnl core has been init
* @IDPF_FLAGS_NBITS: Must be last
*/
enum idpf_flags {
@@ -95,6 +92,7 @@ enum idpf_flags {
IDPF_HR_RESET_IN_PROG,
IDPF_REMOVE_IN_PROG,
IDPF_MB_INTR_MODE,
+ IDPF_VC_CORE_INIT,
IDPF_FLAGS_NBITS,
};
@@ -209,71 +207,6 @@ struct idpf_dev_ops {
struct idpf_reg_ops reg_ops;
};
-/* These macros allow us to generate an enum and a matching char * array of
- * stringified enums that are always in sync. Checkpatch issues a bogus warning
- * about this being a complex macro; but it's wrong, these are never used as a
- * statement and instead only used to define the enum and array.
- */
-#define IDPF_FOREACH_VPORT_VC_STATE(STATE) \
- STATE(IDPF_VC_CREATE_VPORT) \
- STATE(IDPF_VC_CREATE_VPORT_ERR) \
- STATE(IDPF_VC_ENA_VPORT) \
- STATE(IDPF_VC_ENA_VPORT_ERR) \
- STATE(IDPF_VC_DIS_VPORT) \
- STATE(IDPF_VC_DIS_VPORT_ERR) \
- STATE(IDPF_VC_DESTROY_VPORT) \
- STATE(IDPF_VC_DESTROY_VPORT_ERR) \
- STATE(IDPF_VC_CONFIG_TXQ) \
- STATE(IDPF_VC_CONFIG_TXQ_ERR) \
- STATE(IDPF_VC_CONFIG_RXQ) \
- STATE(IDPF_VC_CONFIG_RXQ_ERR) \
- STATE(IDPF_VC_ENA_QUEUES) \
- STATE(IDPF_VC_ENA_QUEUES_ERR) \
- STATE(IDPF_VC_DIS_QUEUES) \
- STATE(IDPF_VC_DIS_QUEUES_ERR) \
- STATE(IDPF_VC_MAP_IRQ) \
- STATE(IDPF_VC_MAP_IRQ_ERR) \
- STATE(IDPF_VC_UNMAP_IRQ) \
- STATE(IDPF_VC_UNMAP_IRQ_ERR) \
- STATE(IDPF_VC_ADD_QUEUES) \
- STATE(IDPF_VC_ADD_QUEUES_ERR) \
- STATE(IDPF_VC_DEL_QUEUES) \
- STATE(IDPF_VC_DEL_QUEUES_ERR) \
- STATE(IDPF_VC_ALLOC_VECTORS) \
- STATE(IDPF_VC_ALLOC_VECTORS_ERR) \
- STATE(IDPF_VC_DEALLOC_VECTORS) \
- STATE(IDPF_VC_DEALLOC_VECTORS_ERR) \
- STATE(IDPF_VC_SET_SRIOV_VFS) \
- STATE(IDPF_VC_SET_SRIOV_VFS_ERR) \
- STATE(IDPF_VC_GET_RSS_LUT) \
- STATE(IDPF_VC_GET_RSS_LUT_ERR) \
- STATE(IDPF_VC_SET_RSS_LUT) \
- STATE(IDPF_VC_SET_RSS_LUT_ERR) \
- STATE(IDPF_VC_GET_RSS_KEY) \
- STATE(IDPF_VC_GET_RSS_KEY_ERR) \
- STATE(IDPF_VC_SET_RSS_KEY) \
- STATE(IDPF_VC_SET_RSS_KEY_ERR) \
- STATE(IDPF_VC_GET_STATS) \
- STATE(IDPF_VC_GET_STATS_ERR) \
- STATE(IDPF_VC_ADD_MAC_ADDR) \
- STATE(IDPF_VC_ADD_MAC_ADDR_ERR) \
- STATE(IDPF_VC_DEL_MAC_ADDR) \
- STATE(IDPF_VC_DEL_MAC_ADDR_ERR) \
- STATE(IDPF_VC_GET_PTYPE_INFO) \
- STATE(IDPF_VC_GET_PTYPE_INFO_ERR) \
- STATE(IDPF_VC_LOOPBACK_STATE) \
- STATE(IDPF_VC_LOOPBACK_STATE_ERR) \
- STATE(IDPF_VC_NBITS)
-
-#define IDPF_GEN_ENUM(ENUM) ENUM,
-#define IDPF_GEN_STRING(STRING) #STRING,
-
-enum idpf_vport_vc_state {
- IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_ENUM)
-};
-
-extern const char * const idpf_vport_vc_state_str[];
-
/**
* enum idpf_vport_reset_cause - Vport soft reset causes
* @IDPF_SR_Q_CHANGE: Soft reset queue change
@@ -358,11 +291,7 @@ struct idpf_port_stats {
* @port_stats: per port csum, header split, and other offload stats
* @link_up: True if link is up
* @link_speed_mbps: Link speed in mbps
- * @vc_msg: Virtchnl message buffer
- * @vc_state: Virtchnl message state
- * @vchnl_wq: Wait queue for virtchnl messages
* @sw_marker_wq: workqueue for marker packets
- * @vc_buf_lock: Lock to protect virtchnl buffer
*/
struct idpf_vport {
u16 num_txq;
@@ -408,12 +337,7 @@ struct idpf_vport {
bool link_up;
u32 link_speed_mbps;
- char vc_msg[IDPF_CTLQ_MAX_BUF_LEN];
- DECLARE_BITMAP(vc_state, IDPF_VC_NBITS);
-
- wait_queue_head_t vchnl_wq;
wait_queue_head_t sw_marker_wq;
- struct mutex vc_buf_lock;
};
/**
@@ -476,15 +400,11 @@ struct idpf_vport_user_config_data {
* enum idpf_vport_config_flags - Vport config flags
* @IDPF_VPORT_REG_NETDEV: Register netdev
* @IDPF_VPORT_UP_REQUESTED: Set if interface up is requested on core reset
- * @IDPF_VPORT_ADD_MAC_REQ: Asynchronous add ether address in flight
- * @IDPF_VPORT_DEL_MAC_REQ: Asynchronous delete ether address in flight
* @IDPF_VPORT_CONFIG_FLAGS_NBITS: Must be last
*/
enum idpf_vport_config_flags {
IDPF_VPORT_REG_NETDEV,
IDPF_VPORT_UP_REQUESTED,
- IDPF_VPORT_ADD_MAC_REQ,
- IDPF_VPORT_DEL_MAC_REQ,
IDPF_VPORT_CONFIG_FLAGS_NBITS,
};
@@ -555,11 +475,13 @@ struct idpf_vector_lifo {
struct idpf_vport_config {
struct idpf_vport_user_config_data user_config;
struct idpf_vport_max_q max_q;
- void *req_qs_chunks;
+ struct virtchnl2_add_queues *req_qs_chunks;
spinlock_t mac_filter_list_lock;
DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS);
};
+struct idpf_vc_xn_manager;
+
/**
* struct idpf_adapter - Device data struct generated on probe
* @pdev: PCI device struct given on probe
@@ -601,9 +523,7 @@ struct idpf_vport_config {
* @stats_task: Periodic statistics retrieval task
* @stats_wq: Workqueue for statistics task
* @caps: Negotiated capabilities with device
- * @vchnl_wq: Wait queue for virtchnl messages
- * @vc_state: Virtchnl message state
- * @vc_msg: Virtchnl message buffer
+ * @vcxn_mngr: Virtchnl transaction manager
* @dev_ops: See idpf_dev_ops
* @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk
* to VFs but is used to initialize them
@@ -659,10 +579,8 @@ struct idpf_adapter {
struct delayed_work stats_task;
struct workqueue_struct *stats_wq;
struct virtchnl2_get_capabilities caps;
+ struct idpf_vc_xn_manager *vcxn_mngr;
- wait_queue_head_t vchnl_wq;
- DECLARE_BITMAP(vc_state, IDPF_VC_NBITS);
- char vc_msg[IDPF_CTLQ_MAX_BUF_LEN];
struct idpf_dev_ops dev_ops;
int num_vfs;
bool crc_enable;
@@ -903,68 +821,18 @@ void idpf_mbx_task(struct work_struct *work);
void idpf_vc_event_task(struct work_struct *work);
void idpf_dev_ops_init(struct idpf_adapter *adapter);
void idpf_vf_dev_ops_init(struct idpf_adapter *adapter);
-int idpf_vport_adjust_qs(struct idpf_vport *vport);
-int idpf_init_dflt_mbx(struct idpf_adapter *adapter);
-void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
-int idpf_vc_core_init(struct idpf_adapter *adapter);
-void idpf_vc_core_deinit(struct idpf_adapter *adapter);
int idpf_intr_req(struct idpf_adapter *adapter);
void idpf_intr_rel(struct idpf_adapter *adapter);
-int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
- struct idpf_vec_regs *reg_vals);
u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter);
-int idpf_send_delete_queues_msg(struct idpf_vport *vport);
-int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
- u16 num_complq, u16 num_rx_q, u16 num_rx_bufq);
int idpf_initiate_soft_reset(struct idpf_vport *vport,
enum idpf_vport_reset_cause reset_cause);
-int idpf_send_enable_vport_msg(struct idpf_vport *vport);
-int idpf_send_disable_vport_msg(struct idpf_vport *vport);
-int idpf_send_destroy_vport_msg(struct idpf_vport *vport);
-int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport);
-int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport);
-int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
-int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
-int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter);
-int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors);
void idpf_deinit_task(struct idpf_adapter *adapter);
int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter,
u16 *q_vector_idxs,
struct idpf_vector_info *vec_info);
-int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
-int idpf_send_get_stats_msg(struct idpf_vport *vport);
-int idpf_get_vec_ids(struct idpf_adapter *adapter,
- u16 *vecids, int num_vecids,
- struct virtchnl2_vector_chunks *chunks);
-int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op,
- void *msg, int msg_size);
-int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
- u16 msg_size, u8 *msg);
void idpf_set_ethtool_ops(struct net_device *netdev);
-int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
- struct idpf_vport_max_q *max_q);
-void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
- struct idpf_vport_max_q *max_q);
-int idpf_add_del_mac_filters(struct idpf_vport *vport,
- struct idpf_netdev_priv *np,
- bool add, bool async);
-int idpf_set_promiscuous(struct idpf_adapter *adapter,
- struct idpf_vport_user_config_data *config_data,
- u32 vport_id);
-int idpf_send_disable_queues_msg(struct idpf_vport *vport);
-void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
-u32 idpf_get_vport_id(struct idpf_vport *vport);
-int idpf_vport_queue_ids_init(struct idpf_vport *vport);
-int idpf_queue_reg_init(struct idpf_vport *vport);
-int idpf_send_config_queues_msg(struct idpf_vport *vport);
-int idpf_send_enable_queues_msg(struct idpf_vport *vport);
-int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
- struct idpf_vport_max_q *max_q);
-int idpf_check_supported_desc_ids(struct idpf_vport *vport);
void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector,
u16 itr, bool tx);
-int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
-int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs);
u8 idpf_vport_get_hsplit(const struct idpf_vport *vport);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
index c7f43d2fcd13..4849590a5591 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
@@ -516,6 +516,8 @@ post_buffs_out:
/* Wrap to end of end ring since current ntp is 0 */
cq->next_to_post = cq->ring_size - 1;
+ dma_wmb();
+
wr32(hw, cq->reg.tail, cq->next_to_post);
}
@@ -546,11 +548,6 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
int err = 0;
u16 i;
- if (*num_q_msg == 0)
- return 0;
- else if (*num_q_msg > cq->ring_size)
- return -EBADR;
-
/* take the lock before we start messing with the ring */
mutex_lock(&cq->cq_lock);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
index 8dee098bbfb0..e8e046ef2f0d 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
@@ -69,6 +69,11 @@ struct idpf_ctlq_msg {
u8 context[IDPF_INDIRECT_CTX_SIZE];
struct idpf_dma_mem *payload;
} indirect;
+ struct {
+ u32 rsvd;
+ u16 data;
+ u16 flags;
+ } sw_cookie;
} ctx;
};
diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c
index 34ad1ac46b78..3df9935685e9 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c
@@ -3,6 +3,7 @@
#include "idpf.h"
#include "idpf_lan_pf_regs.h"
+#include "idpf_virtchnl.h"
#define IDPF_PF_ITR_IDX_SPACING 0x4
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 58179bd733ff..5d3532c27d57 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -2,14 +2,11 @@
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
+#include "idpf_virtchnl.h"
static const struct net_device_ops idpf_netdev_ops_splitq;
static const struct net_device_ops idpf_netdev_ops_singleq;
-const char * const idpf_vport_vc_state_str[] = {
- IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_STRING)
-};
-
/**
* idpf_init_vector_stack - Fill the MSIX vector stack with vector index
* @adapter: private data struct
@@ -82,19 +79,12 @@ static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter)
*/
void idpf_intr_rel(struct idpf_adapter *adapter)
{
- int err;
-
if (!adapter->msix_entries)
return;
idpf_mb_intr_rel_irq(adapter);
pci_free_irq_vectors(adapter->pdev);
-
- err = idpf_send_dealloc_vectors_msg(adapter);
- if (err)
- dev_err(&adapter->pdev->dev,
- "Failed to deallocate vectors: %d\n", err);
-
+ idpf_send_dealloc_vectors_msg(adapter);
idpf_deinit_vector_stack(adapter);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
@@ -975,7 +965,6 @@ static void idpf_vport_rel(struct idpf_vport *vport)
struct idpf_rss_data *rss_data;
struct idpf_vport_max_q max_q;
u16 idx = vport->idx;
- int i;
vport_config = adapter->vport_config[vport->idx];
idpf_deinit_rss(vport);
@@ -985,20 +974,6 @@ static void idpf_vport_rel(struct idpf_vport *vport)
idpf_send_destroy_vport_msg(vport);
- /* Set all bits as we dont know on which vc_state the vport vhnl_wq
- * is waiting on and wakeup the virtchnl workqueue even if it is
- * waiting for the response as we are going down
- */
- for (i = 0; i < IDPF_VC_NBITS; i++)
- set_bit(i, vport->vc_state);
- wake_up(&vport->vchnl_wq);
-
- mutex_destroy(&vport->vc_buf_lock);
-
- /* Clear all the bits */
- for (i = 0; i < IDPF_VC_NBITS; i++)
- clear_bit(i, vport->vc_state);
-
/* Release all max queues allocated to the adapter's pool */
max_q.max_rxq = vport_config->max_q.max_rxq;
max_q.max_txq = vport_config->max_q.max_txq;
@@ -1253,7 +1228,7 @@ void idpf_mbx_task(struct work_struct *work)
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task,
msecs_to_jiffies(300));
- idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_UNKNOWN, NULL, 0);
+ idpf_recv_mb_msg(adapter);
}
/**
@@ -1543,9 +1518,7 @@ void idpf_init_task(struct work_struct *work)
vport_config = adapter->vport_config[index];
init_waitqueue_head(&vport->sw_marker_wq);
- init_waitqueue_head(&vport->vchnl_wq);
- mutex_init(&vport->vc_buf_lock);
spin_lock_init(&vport_config->mac_filter_list_lock);
INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
@@ -1823,6 +1796,8 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
goto unlock_mutex;
}
+ queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
+
/* Initialize the state machine, also allocate memory and request
* resources
*/
@@ -1902,7 +1877,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
* mess with. Nothing below should use those variables from new_vport
* and should instead always refer to them in vport if they need to.
*/
- memcpy(new_vport, vport, offsetof(struct idpf_vport, vc_state));
+ memcpy(new_vport, vport, offsetof(struct idpf_vport, link_speed_mbps));
/* Adjust resource parameters prior to reallocating resources */
switch (reset_cause) {
@@ -1951,7 +1926,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
/* Same comment as above regarding avoiding copying the wait_queues and
* mutexes applies here. We do not want to mess with those if possible.
*/
- memcpy(vport, new_vport, offsetof(struct idpf_vport, vc_state));
+ memcpy(vport, new_vport, offsetof(struct idpf_vport, link_speed_mbps));
/* Since idpf_vport_queues_alloc was called with new_port, the queue
* back pointers are currently pointing to the local new_vport. Reset
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
index e1febc74cefd..f784eea044bd 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -3,6 +3,7 @@
#include "idpf.h"
#include "idpf_devids.h"
+#include "idpf_virtchnl.h"
#define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver"
@@ -30,6 +31,7 @@ static void idpf_remove(struct pci_dev *pdev)
idpf_sriov_configure(pdev, 0);
idpf_vc_core_deinit(adapter);
+
/* Be a good citizen and leave the device clean on exit */
adapter->dev_ops.reg_ops.trigger_reset(adapter, IDPF_HR_FUNC_RESET);
idpf_deinit_dflt_mbx(adapter);
@@ -66,6 +68,8 @@ destroy_wqs:
adapter->vport_config = NULL;
kfree(adapter->netdevs);
adapter->netdevs = NULL;
+ kfree(adapter->vcxn_mngr);
+ adapter->vcxn_mngr = NULL;
mutex_destroy(&adapter->vport_ctrl_lock);
mutex_destroy(&adapter->vector_lock);
@@ -229,8 +233,6 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mutex_init(&adapter->queue_lock);
mutex_init(&adapter->vc_buf_lock);
- init_waitqueue_head(&adapter->vchnl_wq);
-
INIT_DELAYED_WORK(&adapter->init_task, idpf_init_task);
INIT_DELAYED_WORK(&adapter->serv_task, idpf_service_task);
INIT_DELAYED_WORK(&adapter->mbx_task, idpf_mbx_task);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 2f8ad79ae3f0..6dd7a66bb897 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
+#include "idpf_virtchnl.h"
/**
* idpf_buf_lifo_push - push a buffer pointer onto stack
diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
index 8ade4e3a9fe1..629cb5cb7c9f 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
@@ -3,6 +3,7 @@
#include "idpf.h"
#include "idpf_lan_vf_regs.h"
+#include "idpf_virtchnl.h"
#define IDPF_VF_ITR_IDX_SPACING 0x40
@@ -137,7 +138,7 @@ static void idpf_vf_trigger_reset(struct idpf_adapter *adapter,
/* Do not send VIRTCHNL2_OP_RESET_VF message on driver unload */
if (trig_cause == IDPF_HR_FUNC_RESET &&
!test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
- idpf_send_mb_msg(adapter, VIRTCHNL2_OP_RESET_VF, 0, NULL);
+ idpf_send_mb_msg(adapter, VIRTCHNL2_OP_RESET_VF, 0, NULL, 0);
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index 390977a76de2..a5f9b7a5effe 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -2,46 +2,192 @@
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
+#include "idpf_virtchnl.h"
+
+#define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000
+#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000)
+#define IDPF_VC_XN_IDX_M GENMASK(7, 0)
+#define IDPF_VC_XN_SALT_M GENMASK(15, 8)
+#define IDPF_VC_XN_RING_LEN U8_MAX
+
+/**
+ * enum idpf_vc_xn_state - Virtchnl transaction status
+ * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used
+ * @IDPF_VC_XN_WAITING: expecting a reply, not yet received
+ * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received,
+ * buffer updated
+ * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there
+ * was an error, buffer not updated
+ * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down
+ * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the
+ * return context; a callback may be provided to handle
+ * return
+ */
+enum idpf_vc_xn_state {
+ IDPF_VC_XN_IDLE = 1,
+ IDPF_VC_XN_WAITING,
+ IDPF_VC_XN_COMPLETED_SUCCESS,
+ IDPF_VC_XN_COMPLETED_FAILED,
+ IDPF_VC_XN_SHUTDOWN,
+ IDPF_VC_XN_ASYNC,
+};
+
+struct idpf_vc_xn;
+/* Callback for asynchronous messages */
+typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *,
+ const struct idpf_ctlq_msg *);
+
+/**
+ * struct idpf_vc_xn - Data structure representing virtchnl transactions
+ * @completed: virtchnl event loop uses that to signal when a reply is
+ * available, uses kernel completion API
+ * @state: virtchnl event loop stores the data below, protected by the
+ * completion's lock.
+ * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be
+ * truncated on its way to the receiver thread according to
+ * reply_buf.iov_len.
+ * @reply: Reference to the buffer(s) where the reply data should be written
+ * to. May be 0-length (then NULL address permitted) if the reply data
+ * should be ignored.
+ * @async_handler: if sent asynchronously, a callback can be provided to handle
+ * the reply when it's received
+ * @vc_op: corresponding opcode sent with this transaction
+ * @idx: index used as retrieval on reply receive, used for cookie
+ * @salt: changed every message to make unique, used for cookie
+ */
+struct idpf_vc_xn {
+ struct completion completed;
+ enum idpf_vc_xn_state state;
+ size_t reply_sz;
+ struct kvec reply;
+ async_vc_cb async_handler;
+ u32 vc_op;
+ u8 idx;
+ u8 salt;
+};
+
+/**
+ * struct idpf_vc_xn_params - Parameters for executing transaction
+ * @send_buf: kvec for send buffer
+ * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length
+ * @timeout_ms: timeout to wait for reply
+ * @async: send message asynchronously, will not wait on completion
+ * @async_handler: If sent asynchronously, optional callback handler. The user
+ * must be careful when using async handlers as the memory for
+ * the recv_buf _cannot_ be on stack if this is async.
+ * @vc_op: virtchnl op to send
+ */
+struct idpf_vc_xn_params {
+ struct kvec send_buf;
+ struct kvec recv_buf;
+ int timeout_ms;
+ bool async;
+ async_vc_cb async_handler;
+ u32 vc_op;
+};
+
+/**
+ * struct idpf_vc_xn_manager - Manager for tracking transactions
+ * @ring: backing and lookup for transactions
+ * @free_xn_bm: bitmap for free transactions
+ * @xn_bm_lock: make bitmap access synchronous where necessary
+ * @salt: used to make cookie unique every message
+ */
+struct idpf_vc_xn_manager {
+ struct idpf_vc_xn ring[IDPF_VC_XN_RING_LEN];
+ DECLARE_BITMAP(free_xn_bm, IDPF_VC_XN_RING_LEN);
+ spinlock_t xn_bm_lock;
+ u8 salt;
+};
+
+/**
+ * idpf_vid_to_vport - Translate vport id to vport pointer
+ * @adapter: private data struct
+ * @v_id: vport id to translate
+ *
+ * Returns vport matching v_id, NULL if not found.
+ */
+static
+struct idpf_vport *idpf_vid_to_vport(struct idpf_adapter *adapter, u32 v_id)
+{
+ u16 num_max_vports = idpf_get_max_vports(adapter);
+ int i;
+
+ for (i = 0; i < num_max_vports; i++)
+ if (adapter->vport_ids[i] == v_id)
+ return adapter->vports[i];
+
+ return NULL;
+}
+
+/**
+ * idpf_handle_event_link - Handle link event message
+ * @adapter: private data struct
+ * @v2e: virtchnl event message
+ */
+static void idpf_handle_event_link(struct idpf_adapter *adapter,
+ const struct virtchnl2_event *v2e)
+{
+ struct idpf_netdev_priv *np;
+ struct idpf_vport *vport;
+
+ vport = idpf_vid_to_vport(adapter, le32_to_cpu(v2e->vport_id));
+ if (!vport) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Failed to find vport_id %d for link event\n",
+ v2e->vport_id);
+ return;
+ }
+ np = netdev_priv(vport->netdev);
+
+ vport->link_speed_mbps = le32_to_cpu(v2e->link_speed);
+
+ if (vport->link_up == v2e->link_status)
+ return;
+
+ vport->link_up = v2e->link_status;
+
+ if (np->state != __IDPF_VPORT_UP)
+ return;
+
+ if (vport->link_up) {
+ netif_tx_start_all_queues(vport->netdev);
+ netif_carrier_on(vport->netdev);
+ } else {
+ netif_tx_stop_all_queues(vport->netdev);
+ netif_carrier_off(vport->netdev);
+ }
+}
/**
* idpf_recv_event_msg - Receive virtchnl event message
- * @vport: virtual port structure
+ * @adapter: Driver specific private structure
* @ctlq_msg: message to copy from
*
* Receive virtchnl event message
*/
-static void idpf_recv_event_msg(struct idpf_vport *vport,
+static void idpf_recv_event_msg(struct idpf_adapter *adapter,
struct idpf_ctlq_msg *ctlq_msg)
{
- struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ int payload_size = ctlq_msg->ctx.indirect.payload->size;
struct virtchnl2_event *v2e;
- bool link_status;
u32 event;
+ if (payload_size < sizeof(*v2e)) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Failed to receive valid payload for event msg (op %d len %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode,
+ payload_size);
+ return;
+ }
+
v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va;
event = le32_to_cpu(v2e->event);
switch (event) {
case VIRTCHNL2_EVENT_LINK_CHANGE:
- vport->link_speed_mbps = le32_to_cpu(v2e->link_speed);
- link_status = v2e->link_status;
-
- if (vport->link_up == link_status)
- break;
-
- vport->link_up = link_status;
- if (np->state == __IDPF_VPORT_UP) {
- if (vport->link_up) {
- netif_carrier_on(vport->netdev);
- netif_tx_start_all_queues(vport->netdev);
- } else {
- netif_tx_stop_all_queues(vport->netdev);
- netif_carrier_off(vport->netdev);
- }
- }
- break;
+ idpf_handle_event_link(adapter, v2e);
+ return;
default:
- dev_err(&vport->adapter->pdev->dev,
+ dev_err(&adapter->pdev->dev,
"Unknown event %d from PF\n", event);
break;
}
@@ -93,13 +239,14 @@ err_kfree:
* @op: virtchnl opcode
* @msg_size: size of the payload
* @msg: pointer to buffer holding the payload
+ * @cookie: unique SW generated cookie per message
*
* Will prepare the control queue message and initiates the send api
*
* Returns 0 on success, negative on failure
*/
int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
- u16 msg_size, u8 *msg)
+ u16 msg_size, u8 *msg, u16 cookie)
{
struct idpf_ctlq_msg *ctlq_msg;
struct idpf_dma_mem *dma_mem;
@@ -139,8 +286,12 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
err = -ENOMEM;
goto dma_alloc_error;
}
- memcpy(dma_mem->va, msg, msg_size);
+
+ /* It's possible we're just sending an opcode but no buffer */
+ if (msg && msg_size)
+ memcpy(dma_mem->va, msg, msg_size);
ctlq_msg->ctx.indirect.payload = dma_mem;
+ ctlq_msg->ctx.sw_cookie.data = cookie;
err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);
if (err)
@@ -159,592 +310,432 @@ dma_mem_error:
return err;
}
-/**
- * idpf_find_vport - Find vport pointer from control queue message
- * @adapter: driver specific private structure
- * @vport: address of vport pointer to copy the vport from adapters vport list
- * @ctlq_msg: control queue message
+/* API for virtchnl "transaction" support ("xn" for short).
*
- * Return 0 on success, error value on failure. Also this function does check
- * for the opcodes which expect to receive payload and return error value if
- * it is not the case.
+ * We are reusing the completion lock to serialize the accesses to the
+ * transaction state for simplicity, but it could be its own separate synchro
+ * as well. For now, this API is only used from within a workqueue context;
+ * raw_spin_lock() is enough.
*/
-static int idpf_find_vport(struct idpf_adapter *adapter,
- struct idpf_vport **vport,
- struct idpf_ctlq_msg *ctlq_msg)
-{
- bool no_op = false, vid_found = false;
- int i, err = 0;
- char *vc_msg;
- u32 v_id;
+/**
+ * idpf_vc_xn_lock - Request exclusive access to vc transaction
+ * @xn: struct idpf_vc_xn* to access
+ */
+#define idpf_vc_xn_lock(xn) \
+ raw_spin_lock(&(xn)->completed.wait.lock)
- vc_msg = kcalloc(IDPF_CTLQ_MAX_BUF_LEN, sizeof(char), GFP_KERNEL);
- if (!vc_msg)
- return -ENOMEM;
+/**
+ * idpf_vc_xn_unlock - Release exclusive access to vc transaction
+ * @xn: struct idpf_vc_xn* to access
+ */
+#define idpf_vc_xn_unlock(xn) \
+ raw_spin_unlock(&(xn)->completed.wait.lock)
- if (ctlq_msg->data_len) {
- size_t payload_size = ctlq_msg->ctx.indirect.payload->size;
+/**
+ * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and
+ * reset the transaction state.
+ * @xn: struct idpf_vc_xn to update
+ */
+static void idpf_vc_xn_release_bufs(struct idpf_vc_xn *xn)
+{
+ xn->reply.iov_base = NULL;
+ xn->reply.iov_len = 0;
- if (!payload_size) {
- dev_err(&adapter->pdev->dev, "Failed to receive payload buffer\n");
- kfree(vc_msg);
+ if (xn->state != IDPF_VC_XN_SHUTDOWN)
+ xn->state = IDPF_VC_XN_IDLE;
+}
- return -EINVAL;
- }
+/**
+ * idpf_vc_xn_init - Initialize virtchnl transaction object
+ * @vcxn_mngr: pointer to vc transaction manager struct
+ */
+static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr)
+{
+ int i;
- memcpy(vc_msg, ctlq_msg->ctx.indirect.payload->va,
- min_t(size_t, payload_size, IDPF_CTLQ_MAX_BUF_LEN));
- }
-
- switch (ctlq_msg->cookie.mbx.chnl_opcode) {
- case VIRTCHNL2_OP_VERSION:
- case VIRTCHNL2_OP_GET_CAPS:
- case VIRTCHNL2_OP_CREATE_VPORT:
- case VIRTCHNL2_OP_SET_SRIOV_VFS:
- case VIRTCHNL2_OP_ALLOC_VECTORS:
- case VIRTCHNL2_OP_DEALLOC_VECTORS:
- case VIRTCHNL2_OP_GET_PTYPE_INFO:
- goto free_vc_msg;
- case VIRTCHNL2_OP_ENABLE_VPORT:
- case VIRTCHNL2_OP_DISABLE_VPORT:
- case VIRTCHNL2_OP_DESTROY_VPORT:
- v_id = le32_to_cpu(((struct virtchnl2_vport *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
- v_id = le32_to_cpu(((struct virtchnl2_config_tx_queues *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
- v_id = le32_to_cpu(((struct virtchnl2_config_rx_queues *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_ENABLE_QUEUES:
- case VIRTCHNL2_OP_DISABLE_QUEUES:
- case VIRTCHNL2_OP_DEL_QUEUES:
- v_id = le32_to_cpu(((struct virtchnl2_del_ena_dis_queues *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_ADD_QUEUES:
- v_id = le32_to_cpu(((struct virtchnl2_add_queues *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
- case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
- v_id = le32_to_cpu(((struct virtchnl2_queue_vector_maps *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_GET_STATS:
- v_id = le32_to_cpu(((struct virtchnl2_vport_stats *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_GET_RSS_LUT:
- case VIRTCHNL2_OP_SET_RSS_LUT:
- v_id = le32_to_cpu(((struct virtchnl2_rss_lut *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_GET_RSS_KEY:
- case VIRTCHNL2_OP_SET_RSS_KEY:
- v_id = le32_to_cpu(((struct virtchnl2_rss_key *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_EVENT:
- v_id = le32_to_cpu(((struct virtchnl2_event *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_LOOPBACK:
- v_id = le32_to_cpu(((struct virtchnl2_loopback *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE:
- v_id = le32_to_cpu(((struct virtchnl2_promisc_info *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_ADD_MAC_ADDR:
- case VIRTCHNL2_OP_DEL_MAC_ADDR:
- v_id = le32_to_cpu(((struct virtchnl2_mac_addr_list *)vc_msg)->vport_id);
- break;
- default:
- no_op = true;
- break;
- }
+ spin_lock_init(&vcxn_mngr->xn_bm_lock);
- if (no_op)
- goto free_vc_msg;
+ for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
+ struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
- for (i = 0; i < idpf_get_max_vports(adapter); i++) {
- if (adapter->vport_ids[i] == v_id) {
- vid_found = true;
- break;
- }
+ xn->state = IDPF_VC_XN_IDLE;
+ xn->idx = i;
+ idpf_vc_xn_release_bufs(xn);
+ init_completion(&xn->completed);
}
- if (vid_found)
- *vport = adapter->vports[i];
- else
- err = -EINVAL;
-
-free_vc_msg:
- kfree(vc_msg);
-
- return err;
+ bitmap_fill(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
}
/**
- * idpf_copy_data_to_vc_buf - Copy the virtchnl response data into the buffer.
- * @adapter: driver specific private structure
- * @vport: virtual port structure
- * @ctlq_msg: msg to copy from
- * @err_enum: err bit to set on error
+ * idpf_vc_xn_shutdown - Uninitialize virtchnl transaction object
+ * @vcxn_mngr: pointer to vc transaction manager struct
*
- * Copies the payload from ctlq_msg into virtchnl buffer. Returns 0 on success,
- * negative on failure.
+ * All waiting threads will be woken-up and their transaction aborted. Further
+ * operations on that object will fail.
*/
-static int idpf_copy_data_to_vc_buf(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- struct idpf_ctlq_msg *ctlq_msg,
- enum idpf_vport_vc_state err_enum)
+static void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
{
- if (ctlq_msg->cookie.mbx.chnl_retval) {
- if (vport)
- set_bit(err_enum, vport->vc_state);
- else
- set_bit(err_enum, adapter->vc_state);
+ int i;
- return -EINVAL;
- }
+ spin_lock_bh(&vcxn_mngr->xn_bm_lock);
+ bitmap_zero(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
+ spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
- if (vport)
- memcpy(vport->vc_msg, ctlq_msg->ctx.indirect.payload->va,
- min_t(int, ctlq_msg->ctx.indirect.payload->size,
- IDPF_CTLQ_MAX_BUF_LEN));
- else
- memcpy(adapter->vc_msg, ctlq_msg->ctx.indirect.payload->va,
- min_t(int, ctlq_msg->ctx.indirect.payload->size,
- IDPF_CTLQ_MAX_BUF_LEN));
+ for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
+ struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
- return 0;
+ idpf_vc_xn_lock(xn);
+ xn->state = IDPF_VC_XN_SHUTDOWN;
+ idpf_vc_xn_release_bufs(xn);
+ idpf_vc_xn_unlock(xn);
+ complete_all(&xn->completed);
+ }
}
/**
- * idpf_recv_vchnl_op - helper function with common logic when handling the
- * reception of VIRTCHNL OPs.
- * @adapter: driver specific private structure
- * @vport: virtual port structure
- * @ctlq_msg: msg to copy from
- * @state: state bit used on timeout check
- * @err_state: err bit to set on error
+ * idpf_vc_xn_pop_free - Pop a free transaction from free list
+ * @vcxn_mngr: transaction manager to pop from
+ *
+ * Returns NULL if no free transactions
*/
-static void idpf_recv_vchnl_op(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- struct idpf_ctlq_msg *ctlq_msg,
- enum idpf_vport_vc_state state,
- enum idpf_vport_vc_state err_state)
+static
+struct idpf_vc_xn *idpf_vc_xn_pop_free(struct idpf_vc_xn_manager *vcxn_mngr)
{
- wait_queue_head_t *vchnl_wq;
- int err;
+ struct idpf_vc_xn *xn = NULL;
+ unsigned long free_idx;
- if (vport)
- vchnl_wq = &vport->vchnl_wq;
- else
- vchnl_wq = &adapter->vchnl_wq;
+ spin_lock_bh(&vcxn_mngr->xn_bm_lock);
+ free_idx = find_first_bit(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
+ if (free_idx == IDPF_VC_XN_RING_LEN)
+ goto do_unlock;
- err = idpf_copy_data_to_vc_buf(adapter, vport, ctlq_msg, err_state);
- if (wq_has_sleeper(vchnl_wq)) {
- if (vport)
- set_bit(state, vport->vc_state);
- else
- set_bit(state, adapter->vc_state);
+ clear_bit(free_idx, vcxn_mngr->free_xn_bm);
+ xn = &vcxn_mngr->ring[free_idx];
+ xn->salt = vcxn_mngr->salt++;
- wake_up(vchnl_wq);
- } else {
- if (!err) {
- dev_warn(&adapter->pdev->dev, "opcode %d received without waiting thread\n",
- ctlq_msg->cookie.mbx.chnl_opcode);
- } else {
- /* Clear the errors since there is no sleeper to pass
- * them on
- */
- if (vport)
- clear_bit(err_state, vport->vc_state);
- else
- clear_bit(err_state, adapter->vc_state);
- }
- }
+do_unlock:
+ spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
+
+ return xn;
}
/**
- * idpf_recv_mb_msg - Receive message over mailbox
- * @adapter: Driver specific private structure
- * @op: virtchannel operation code
- * @msg: Received message holding buffer
- * @msg_size: message size
- *
- * Will receive control queue message and posts the receive buffer. Returns 0
- * on success and negative on failure.
+ * idpf_vc_xn_push_free - Push a free transaction to free list
+ * @vcxn_mngr: transaction manager to push to
+ * @xn: transaction to push
*/
-int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op,
- void *msg, int msg_size)
+static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr,
+ struct idpf_vc_xn *xn)
{
- struct idpf_vport *vport = NULL;
- struct idpf_ctlq_msg ctlq_msg;
- struct idpf_dma_mem *dma_mem;
- bool work_done = false;
- int num_retry = 2000;
- u16 num_q_msg;
- int err;
-
- while (1) {
- struct idpf_vport_config *vport_config;
- int payload_size = 0;
-
- /* Try to get one message */
- num_q_msg = 1;
- dma_mem = NULL;
- err = idpf_ctlq_recv(adapter->hw.arq, &num_q_msg, &ctlq_msg);
- /* If no message then decide if we have to retry based on
- * opcode
- */
- if (err || !num_q_msg) {
- /* Increasing num_retry to consider the delayed
- * responses because of large number of VF's mailbox
- * messages. If the mailbox message is received from
- * the other side, we come out of the sleep cycle
- * immediately else we wait for more time.
- */
- if (!op || !num_retry--)
- break;
- if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
- err = -EIO;
- break;
- }
- msleep(20);
- continue;
- }
+ idpf_vc_xn_release_bufs(xn);
+ set_bit(xn->idx, vcxn_mngr->free_xn_bm);
+}
- /* If we are here a message is received. Check if we are looking
- * for a specific message based on opcode. If it is different
- * ignore and post buffers
+/**
+ * idpf_vc_xn_exec - Perform a send/recv virtchnl transaction
+ * @adapter: driver specific private structure with vcxn_mngr
+ * @params: parameters for this particular transaction including
+ * -vc_op: virtchannel operation to send
+ * -send_buf: kvec iov for send buf and len
+ * -recv_buf: kvec iov for recv buf and len (ignored if NULL)
+ * -timeout_ms: timeout waiting for a reply (milliseconds)
+ * -async: don't wait for message reply, will lose caller context
+ * -async_handler: callback to handle async replies
+ *
+ * @returns >= 0 for success, the size of the initial reply (may or may not be
+ * >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for
+ * error.
+ */
+static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
+ const struct idpf_vc_xn_params *params)
+{
+ const struct kvec *send_buf = &params->send_buf;
+ struct idpf_vc_xn *xn;
+ ssize_t retval;
+ u16 cookie;
+
+ xn = idpf_vc_xn_pop_free(adapter->vcxn_mngr);
+ /* no free transactions available */
+ if (!xn)
+ return -ENOSPC;
+
+ idpf_vc_xn_lock(xn);
+ if (xn->state == IDPF_VC_XN_SHUTDOWN) {
+ retval = -ENXIO;
+ goto only_unlock;
+ } else if (xn->state != IDPF_VC_XN_IDLE) {
+ /* We're just going to clobber this transaction even though
+ * it's not IDLE. If we don't reuse it we could theoretically
+ * eventually leak all the free transactions and not be able to
+ * send any messages. At least this way we make an attempt to
+ * remain functional even though something really bad is
+ * happening that's corrupting what was supposed to be free
+ * transactions.
*/
- if (op && ctlq_msg.cookie.mbx.chnl_opcode != op)
- goto post_buffs;
+ WARN_ONCE(1, "There should only be idle transactions in free list (idx %d op %d)\n",
+ xn->idx, xn->vc_op);
+ }
- err = idpf_find_vport(adapter, &vport, &ctlq_msg);
- if (err)
- goto post_buffs;
+ xn->reply = params->recv_buf;
+ xn->reply_sz = 0;
+ xn->state = params->async ? IDPF_VC_XN_ASYNC : IDPF_VC_XN_WAITING;
+ xn->vc_op = params->vc_op;
+ xn->async_handler = params->async_handler;
+ idpf_vc_xn_unlock(xn);
- if (ctlq_msg.data_len)
- payload_size = ctlq_msg.ctx.indirect.payload->size;
+ if (!params->async)
+ reinit_completion(&xn->completed);
+ cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) |
+ FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx);
- /* All conditions are met. Either a message requested is
- * received or we received a message to be processed
- */
- switch (ctlq_msg.cookie.mbx.chnl_opcode) {
- case VIRTCHNL2_OP_VERSION:
- case VIRTCHNL2_OP_GET_CAPS:
- if (ctlq_msg.cookie.mbx.chnl_retval) {
- dev_err(&adapter->pdev->dev, "Failure initializing, vc op: %u retval: %u\n",
- ctlq_msg.cookie.mbx.chnl_opcode,
- ctlq_msg.cookie.mbx.chnl_retval);
- err = -EBADMSG;
- } else if (msg) {
- memcpy(msg, ctlq_msg.ctx.indirect.payload->va,
- min_t(int, payload_size, msg_size));
- }
- work_done = true;
- break;
- case VIRTCHNL2_OP_CREATE_VPORT:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_CREATE_VPORT,
- IDPF_VC_CREATE_VPORT_ERR);
- break;
- case VIRTCHNL2_OP_ENABLE_VPORT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_ENA_VPORT,
- IDPF_VC_ENA_VPORT_ERR);
- break;
- case VIRTCHNL2_OP_DISABLE_VPORT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DIS_VPORT,
- IDPF_VC_DIS_VPORT_ERR);
- break;
- case VIRTCHNL2_OP_DESTROY_VPORT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DESTROY_VPORT,
- IDPF_VC_DESTROY_VPORT_ERR);
- break;
- case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_CONFIG_TXQ,
- IDPF_VC_CONFIG_TXQ_ERR);
- break;
- case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_CONFIG_RXQ,
- IDPF_VC_CONFIG_RXQ_ERR);
- break;
- case VIRTCHNL2_OP_ENABLE_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_ENA_QUEUES,
- IDPF_VC_ENA_QUEUES_ERR);
- break;
- case VIRTCHNL2_OP_DISABLE_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DIS_QUEUES,
- IDPF_VC_DIS_QUEUES_ERR);
- break;
- case VIRTCHNL2_OP_ADD_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_ADD_QUEUES,
- IDPF_VC_ADD_QUEUES_ERR);
- break;
- case VIRTCHNL2_OP_DEL_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DEL_QUEUES,
- IDPF_VC_DEL_QUEUES_ERR);
- break;
- case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_MAP_IRQ,
- IDPF_VC_MAP_IRQ_ERR);
- break;
- case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_UNMAP_IRQ,
- IDPF_VC_UNMAP_IRQ_ERR);
- break;
- case VIRTCHNL2_OP_GET_STATS:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_GET_STATS,
- IDPF_VC_GET_STATS_ERR);
- break;
- case VIRTCHNL2_OP_GET_RSS_LUT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_GET_RSS_LUT,
- IDPF_VC_GET_RSS_LUT_ERR);
- break;
- case VIRTCHNL2_OP_SET_RSS_LUT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_SET_RSS_LUT,
- IDPF_VC_SET_RSS_LUT_ERR);
- break;
- case VIRTCHNL2_OP_GET_RSS_KEY:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_GET_RSS_KEY,
- IDPF_VC_GET_RSS_KEY_ERR);
- break;
- case VIRTCHNL2_OP_SET_RSS_KEY:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_SET_RSS_KEY,
- IDPF_VC_SET_RSS_KEY_ERR);
- break;
- case VIRTCHNL2_OP_SET_SRIOV_VFS:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_SET_SRIOV_VFS,
- IDPF_VC_SET_SRIOV_VFS_ERR);
- break;
- case VIRTCHNL2_OP_ALLOC_VECTORS:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_ALLOC_VECTORS,
- IDPF_VC_ALLOC_VECTORS_ERR);
- break;
- case VIRTCHNL2_OP_DEALLOC_VECTORS:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_DEALLOC_VECTORS,
- IDPF_VC_DEALLOC_VECTORS_ERR);
- break;
- case VIRTCHNL2_OP_GET_PTYPE_INFO:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_GET_PTYPE_INFO,
- IDPF_VC_GET_PTYPE_INFO_ERR);
- break;
- case VIRTCHNL2_OP_LOOPBACK:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_LOOPBACK_STATE,
- IDPF_VC_LOOPBACK_STATE_ERR);
- break;
- case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE:
- /* This message can only be sent asynchronously. As
- * such we'll have lost the context in which it was
- * called and thus can only really report if it looks
- * like an error occurred. Don't bother setting ERR bit
- * or waking chnl_wq since no work queue will be waiting
- * to read the message.
- */
- if (ctlq_msg.cookie.mbx.chnl_retval) {
- dev_err(&adapter->pdev->dev, "Failed to set promiscuous mode: %d\n",
- ctlq_msg.cookie.mbx.chnl_retval);
- }
- break;
- case VIRTCHNL2_OP_ADD_MAC_ADDR:
- vport_config = adapter->vport_config[vport->idx];
- if (test_and_clear_bit(IDPF_VPORT_ADD_MAC_REQ,
- vport_config->flags)) {
- /* Message was sent asynchronously. We don't
- * normally print errors here, instead
- * prefer to handle errors in the function
- * calling wait_for_event. However, if
- * asynchronous, the context in which the
- * message was sent is lost. We can't really do
- * anything about at it this point, but we
- * should at a minimum indicate that it looks
- * like something went wrong. Also don't bother
- * setting ERR bit or waking vchnl_wq since no
- * one will be waiting to read the async
- * message.
- */
- if (ctlq_msg.cookie.mbx.chnl_retval)
- dev_err(&adapter->pdev->dev, "Failed to add MAC address: %d\n",
- ctlq_msg.cookie.mbx.chnl_retval);
- break;
- }
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_ADD_MAC_ADDR,
- IDPF_VC_ADD_MAC_ADDR_ERR);
- break;
- case VIRTCHNL2_OP_DEL_MAC_ADDR:
- vport_config = adapter->vport_config[vport->idx];
- if (test_and_clear_bit(IDPF_VPORT_DEL_MAC_REQ,
- vport_config->flags)) {
- /* Message was sent asynchronously like the
- * VIRTCHNL2_OP_ADD_MAC_ADDR
- */
- if (ctlq_msg.cookie.mbx.chnl_retval)
- dev_err(&adapter->pdev->dev, "Failed to delete MAC address: %d\n",
- ctlq_msg.cookie.mbx.chnl_retval);
- break;
- }
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DEL_MAC_ADDR,
- IDPF_VC_DEL_MAC_ADDR_ERR);
- break;
- case VIRTCHNL2_OP_EVENT:
- idpf_recv_event_msg(vport, &ctlq_msg);
- break;
- default:
- dev_warn(&adapter->pdev->dev,
- "Unhandled virtchnl response %d\n",
- ctlq_msg.cookie.mbx.chnl_opcode);
- break;
- }
+ retval = idpf_send_mb_msg(adapter, params->vc_op,
+ send_buf->iov_len, send_buf->iov_base,
+ cookie);
+ if (retval) {
+ idpf_vc_xn_lock(xn);
+ goto release_and_unlock;
+ }
-post_buffs:
- if (ctlq_msg.data_len)
- dma_mem = ctlq_msg.ctx.indirect.payload;
- else
- num_q_msg = 0;
+ if (params->async)
+ return 0;
- err = idpf_ctlq_post_rx_buffs(&adapter->hw, adapter->hw.arq,
- &num_q_msg, &dma_mem);
- /* If post failed clear the only buffer we supplied */
- if (err && dma_mem)
- dma_free_coherent(&adapter->pdev->dev, dma_mem->size,
- dma_mem->va, dma_mem->pa);
+ wait_for_completion_timeout(&xn->completed,
+ msecs_to_jiffies(params->timeout_ms));
- /* Applies only if we are looking for a specific opcode */
- if (work_done)
- break;
+ /* No need to check the return value; we check the final state of the
+ * transaction below. It's possible the transaction actually gets more
+ * timeout than specified if we get preempted here but after
+ * wait_for_completion_timeout returns. This should be non-issue
+ * however.
+ */
+ idpf_vc_xn_lock(xn);
+ switch (xn->state) {
+ case IDPF_VC_XN_SHUTDOWN:
+ retval = -ENXIO;
+ goto only_unlock;
+ case IDPF_VC_XN_WAITING:
+ dev_notice_ratelimited(&adapter->pdev->dev, "Transaction timed-out (op %d, %dms)\n",
+ params->vc_op, params->timeout_ms);
+ retval = -ETIME;
+ break;
+ case IDPF_VC_XN_COMPLETED_SUCCESS:
+ retval = xn->reply_sz;
+ break;
+ case IDPF_VC_XN_COMPLETED_FAILED:
+ dev_notice_ratelimited(&adapter->pdev->dev, "Transaction failed (op %d)\n",
+ params->vc_op);
+ retval = -EIO;
+ break;
+ default:
+ /* Invalid state. */
+ WARN_ON_ONCE(1);
+ retval = -EIO;
+ break;
}
- return err;
+release_and_unlock:
+ idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
+ /* If we receive a VC reply after here, it will be dropped. */
+only_unlock:
+ idpf_vc_xn_unlock(xn);
+
+ return retval;
}
/**
- * __idpf_wait_for_event - wrapper function for wait on virtchannel response
- * @adapter: Driver private data structure
- * @vport: virtual port structure
- * @state: check on state upon timeout
- * @err_check: check if this specific error bit is set
- * @timeout: Max time to wait
+ * idpf_vc_xn_forward_async - Handle async reply receives
+ * @adapter: private data struct
+ * @xn: transaction to handle
+ * @ctlq_msg: corresponding ctlq_msg
*
- * Checks if state is set upon expiry of timeout. Returns 0 on success,
- * negative on failure.
+ * For async sends we're going to lose the caller's context so, if an
+ * async_handler was provided, it can deal with the reply, otherwise we'll just
+ * check and report if there is an error.
*/
-static int __idpf_wait_for_event(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- enum idpf_vport_vc_state state,
- enum idpf_vport_vc_state err_check,
- int timeout)
+static int
+idpf_vc_xn_forward_async(struct idpf_adapter *adapter, struct idpf_vc_xn *xn,
+ const struct idpf_ctlq_msg *ctlq_msg)
{
- int time_to_wait, num_waits;
- wait_queue_head_t *vchnl_wq;
- unsigned long *vc_state;
+ int err = 0;
- time_to_wait = ((timeout <= IDPF_MAX_WAIT) ? timeout : IDPF_MAX_WAIT);
- num_waits = ((timeout <= IDPF_MAX_WAIT) ? 1 : timeout / IDPF_MAX_WAIT);
+ if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Async message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
+ xn->reply_sz = 0;
+ err = -EINVAL;
+ goto release_bufs;
+ }
- if (vport) {
- vchnl_wq = &vport->vchnl_wq;
- vc_state = vport->vc_state;
- } else {
- vchnl_wq = &adapter->vchnl_wq;
- vc_state = adapter->vc_state;
+ if (xn->async_handler) {
+ err = xn->async_handler(adapter, xn, ctlq_msg);
+ goto release_bufs;
+ }
+
+ if (ctlq_msg->cookie.mbx.chnl_retval) {
+ xn->reply_sz = 0;
+ dev_err_ratelimited(&adapter->pdev->dev, "Async message failure (op %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode);
+ err = -EINVAL;
}
- while (num_waits) {
- int event;
+release_bufs:
+ idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
+
+ return err;
+}
+
+/**
+ * idpf_vc_xn_forward_reply - copy a reply back to receiving thread
+ * @adapter: driver specific private structure with vcxn_mngr
+ * @ctlq_msg: controlq message to send back to receiving thread
+ */
+static int
+idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
+ const struct idpf_ctlq_msg *ctlq_msg)
+{
+ const void *payload = NULL;
+ size_t payload_size = 0;
+ struct idpf_vc_xn *xn;
+ u16 msg_info;
+ int err = 0;
+ u16 xn_idx;
+ u16 salt;
+
+ msg_info = ctlq_msg->ctx.sw_cookie.data;
+ xn_idx = FIELD_GET(IDPF_VC_XN_IDX_M, msg_info);
+ if (xn_idx >= ARRAY_SIZE(adapter->vcxn_mngr->ring)) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Out of bounds cookie received: %02x\n",
+ xn_idx);
+ return -EINVAL;
+ }
+ xn = &adapter->vcxn_mngr->ring[xn_idx];
+ salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info);
+ if (xn->salt != salt) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (%02x != %02x)\n",
+ xn->salt, salt);
+ return -EINVAL;
+ }
- /* If we are here and a reset is detected do not wait but
- * return. Reset timing is out of drivers control. So
- * while we are cleaning resources as part of reset if the
- * underlying HW mailbox is gone, wait on mailbox messages
- * is not meaningful
+ idpf_vc_xn_lock(xn);
+ switch (xn->state) {
+ case IDPF_VC_XN_WAITING:
+ /* success */
+ break;
+ case IDPF_VC_XN_IDLE:
+ dev_err_ratelimited(&adapter->pdev->dev, "Unexpected or belated VC reply (op %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode);
+ err = -EINVAL;
+ goto out_unlock;
+ case IDPF_VC_XN_SHUTDOWN:
+ /* ENXIO is a bit special here as the recv msg loop uses that
+ * know if it should stop trying to clean the ring if we lost
+ * the virtchnl. We need to stop playing with registers and
+ * yield.
*/
- if (idpf_is_reset_detected(adapter))
- return 0;
+ err = -ENXIO;
+ goto out_unlock;
+ case IDPF_VC_XN_ASYNC:
+ err = idpf_vc_xn_forward_async(adapter, xn, ctlq_msg);
+ idpf_vc_xn_unlock(xn);
+ return err;
+ default:
+ dev_err_ratelimited(&adapter->pdev->dev, "Overwriting VC reply (op %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode);
+ err = -EBUSY;
+ goto out_unlock;
+ }
- event = wait_event_timeout(*vchnl_wq,
- test_and_clear_bit(state, vc_state),
- msecs_to_jiffies(time_to_wait));
- if (event) {
- if (test_and_clear_bit(err_check, vc_state)) {
- dev_err(&adapter->pdev->dev, "VC response error %s\n",
- idpf_vport_vc_state_str[err_check]);
+ if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
+ xn->reply_sz = 0;
+ xn->state = IDPF_VC_XN_COMPLETED_FAILED;
+ err = -EINVAL;
+ goto out_unlock;
+ }
- return -EINVAL;
- }
+ if (ctlq_msg->cookie.mbx.chnl_retval) {
+ xn->reply_sz = 0;
+ xn->state = IDPF_VC_XN_COMPLETED_FAILED;
+ err = -EINVAL;
+ goto out_unlock;
+ }
- return 0;
- }
- num_waits--;
+ if (ctlq_msg->data_len) {
+ payload = ctlq_msg->ctx.indirect.payload->va;
+ payload_size = ctlq_msg->ctx.indirect.payload->size;
}
- /* Timeout occurred */
- dev_err(&adapter->pdev->dev, "VC timeout, state = %s\n",
- idpf_vport_vc_state_str[state]);
+ xn->reply_sz = payload_size;
+ xn->state = IDPF_VC_XN_COMPLETED_SUCCESS;
- return -ETIMEDOUT;
+ if (xn->reply.iov_base && xn->reply.iov_len && payload_size)
+ memcpy(xn->reply.iov_base, payload,
+ min_t(size_t, xn->reply.iov_len, payload_size));
+
+out_unlock:
+ idpf_vc_xn_unlock(xn);
+ /* we _cannot_ hold lock while calling complete */
+ complete(&xn->completed);
+
+ return err;
}
/**
- * idpf_min_wait_for_event - wait for virtchannel response
- * @adapter: Driver private data structure
- * @vport: virtual port structure
- * @state: check on state upon timeout
- * @err_check: check if this specific error bit is set
+ * idpf_recv_mb_msg - Receive message over mailbox
+ * @adapter: Driver specific private structure
*
- * Returns 0 on success, negative on failure.
+ * Will receive control queue message and posts the receive buffer. Returns 0
+ * on success and negative on failure.
*/
-static int idpf_min_wait_for_event(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- enum idpf_vport_vc_state state,
- enum idpf_vport_vc_state err_check)
+int idpf_recv_mb_msg(struct idpf_adapter *adapter)
{
- return __idpf_wait_for_event(adapter, vport, state, err_check,
- IDPF_WAIT_FOR_EVENT_TIMEO_MIN);
-}
+ struct idpf_ctlq_msg ctlq_msg;
+ struct idpf_dma_mem *dma_mem;
+ int post_err, err;
+ u16 num_recv;
-/**
- * idpf_wait_for_event - wait for virtchannel response
- * @adapter: Driver private data structure
- * @vport: virtual port structure
- * @state: check on state upon timeout after 500ms
- * @err_check: check if this specific error bit is set
- *
- * Returns 0 on success, negative on failure.
- */
-static int idpf_wait_for_event(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- enum idpf_vport_vc_state state,
- enum idpf_vport_vc_state err_check)
-{
- /* Increasing the timeout in __IDPF_INIT_SW flow to consider large
- * number of VF's mailbox message responses. When a message is received
- * on mailbox, this thread is woken up by the idpf_recv_mb_msg before
- * the timeout expires. Only in the error case i.e. if no message is
- * received on mailbox, we wait for the complete timeout which is
- * less likely to happen.
- */
- return __idpf_wait_for_event(adapter, vport, state, err_check,
- IDPF_WAIT_FOR_EVENT_TIMEO);
+ while (1) {
+ /* This will get <= num_recv messages and output how many
+ * actually received on num_recv.
+ */
+ num_recv = 1;
+ err = idpf_ctlq_recv(adapter->hw.arq, &num_recv, &ctlq_msg);
+ if (err || !num_recv)
+ break;
+
+ if (ctlq_msg.data_len) {
+ dma_mem = ctlq_msg.ctx.indirect.payload;
+ } else {
+ dma_mem = NULL;
+ num_recv = 0;
+ }
+
+ if (ctlq_msg.cookie.mbx.chnl_opcode == VIRTCHNL2_OP_EVENT)
+ idpf_recv_event_msg(adapter, &ctlq_msg);
+ else
+ err = idpf_vc_xn_forward_reply(adapter, &ctlq_msg);
+
+ post_err = idpf_ctlq_post_rx_buffs(&adapter->hw,
+ adapter->hw.arq,
+ &num_recv, &dma_mem);
+
+ /* If post failed clear the only buffer we supplied */
+ if (post_err) {
+ if (dma_mem)
+ dmam_free_coherent(&adapter->pdev->dev,
+ dma_mem->size, dma_mem->va,
+ dma_mem->pa);
+ break;
+ }
+
+ /* virtchnl trying to shutdown, stop cleaning */
+ if (err == -ENXIO)
+ break;
+ }
+
+ return err;
}
/**
@@ -785,7 +776,11 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport)
*/
static int idpf_send_ver_msg(struct idpf_adapter *adapter)
{
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_version_info vvi;
+ ssize_t reply_sz;
+ u32 major, minor;
+ int err = 0;
if (adapter->virt_ver_maj) {
vvi.major = cpu_to_le32(adapter->virt_ver_maj);
@@ -795,43 +790,29 @@ static int idpf_send_ver_msg(struct idpf_adapter *adapter)
vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR);
}
- return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_VERSION, sizeof(vvi),
- (u8 *)&vvi);
-}
-
-/**
- * idpf_recv_ver_msg - Receive virtchnl version message
- * @adapter: Driver specific private structure
- *
- * Receive virtchnl version message. Returns 0 on success, -EAGAIN if we need
- * to send version message again, otherwise negative on failure.
- */
-static int idpf_recv_ver_msg(struct idpf_adapter *adapter)
-{
- struct virtchnl2_version_info vvi;
- u32 major, minor;
- int err;
+ xn_params.vc_op = VIRTCHNL2_OP_VERSION;
+ xn_params.send_buf.iov_base = &vvi;
+ xn_params.send_buf.iov_len = sizeof(vvi);
+ xn_params.recv_buf = xn_params.send_buf;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- err = idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_VERSION, &vvi,
- sizeof(vvi));
- if (err)
- return err;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz < sizeof(vvi))
+ return -EIO;
major = le32_to_cpu(vvi.major);
minor = le32_to_cpu(vvi.minor);
if (major > IDPF_VIRTCHNL_VERSION_MAJOR) {
- dev_warn(&adapter->pdev->dev,
- "Virtchnl major version (%d) greater than supported\n",
- major);
-
+ dev_warn(&adapter->pdev->dev, "Virtchnl major version greater than supported\n");
return -EINVAL;
}
if (major == IDPF_VIRTCHNL_VERSION_MAJOR &&
minor > IDPF_VIRTCHNL_VERSION_MINOR)
- dev_warn(&adapter->pdev->dev,
- "Virtchnl minor version (%d) didn't match\n", minor);
+ dev_warn(&adapter->pdev->dev, "Virtchnl minor version didn't match\n");
/* If we have a mismatch, resend version to update receiver on what
* version we will use.
@@ -856,7 +837,9 @@ static int idpf_recv_ver_msg(struct idpf_adapter *adapter)
*/
static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
{
- struct virtchnl2_get_capabilities caps = { };
+ struct virtchnl2_get_capabilities caps = {};
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
caps.csum_caps =
cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 |
@@ -913,21 +896,20 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
VIRTCHNL2_CAP_PROMISC |
VIRTCHNL2_CAP_LOOPBACK);
- return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, sizeof(caps),
- (u8 *)&caps);
-}
+ xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS;
+ xn_params.send_buf.iov_base = &caps;
+ xn_params.send_buf.iov_len = sizeof(caps);
+ xn_params.recv_buf.iov_base = &adapter->caps;
+ xn_params.recv_buf.iov_len = sizeof(adapter->caps);
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
-/**
- * idpf_recv_get_caps_msg - Receive virtchnl get capabilities message
- * @adapter: Driver specific private structure
- *
- * Receive virtchnl get capabilities message. Returns 0 on success, negative on
- * failure.
- */
-static int idpf_recv_get_caps_msg(struct idpf_adapter *adapter)
-{
- return idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, &adapter->caps,
- sizeof(struct virtchnl2_get_capabilities));
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz < sizeof(adapter->caps))
+ return -EIO;
+
+ return 0;
}
/**
@@ -1254,8 +1236,10 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
struct idpf_vport_max_q *max_q)
{
struct virtchnl2_create_vport *vport_msg;
+ struct idpf_vc_xn_params xn_params = {};
u16 idx = adapter->next_vport;
int err, buf_size;
+ ssize_t reply_sz;
buf_size = sizeof(struct virtchnl2_create_vport);
if (!adapter->vport_params_reqd[idx]) {
@@ -1286,35 +1270,38 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
return err;
}
- mutex_lock(&adapter->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CREATE_VPORT, buf_size,
- (u8 *)vport_msg);
- if (err)
- goto rel_lock;
-
- err = idpf_wait_for_event(adapter, NULL, IDPF_VC_CREATE_VPORT,
- IDPF_VC_CREATE_VPORT_ERR);
- if (err) {
- dev_err(&adapter->pdev->dev, "Failed to receive create vport message");
-
- goto rel_lock;
- }
-
if (!adapter->vport_params_recvd[idx]) {
adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN,
GFP_KERNEL);
if (!adapter->vport_params_recvd[idx]) {
err = -ENOMEM;
- goto rel_lock;
+ goto free_vport_params;
}
}
- vport_msg = adapter->vport_params_recvd[idx];
- memcpy(vport_msg, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN);
+ xn_params.vc_op = VIRTCHNL2_OP_CREATE_VPORT;
+ xn_params.send_buf.iov_base = vport_msg;
+ xn_params.send_buf.iov_len = buf_size;
+ xn_params.recv_buf.iov_base = adapter->vport_params_recvd[idx];
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0) {
+ err = reply_sz;
+ goto free_vport_params;
+ }
+ if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN) {
+ err = -EIO;
+ goto free_vport_params;
+ }
-rel_lock:
- mutex_unlock(&adapter->vc_buf_lock);
+ return 0;
+
+free_vport_params:
+ kfree(adapter->vport_params_recvd[idx]);
+ adapter->vport_params_recvd[idx] = NULL;
+ kfree(adapter->vport_params_reqd[idx]);
+ adapter->vport_params_reqd[idx] = NULL;
return err;
}
@@ -1366,26 +1353,19 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
*/
int idpf_send_destroy_vport_msg(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
- int err;
+ ssize_t reply_sz;
v_id.vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT;
+ xn_params.send_buf.iov_base = &v_id;
+ xn_params.send_buf.iov_len = sizeof(v_id);
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DESTROY_VPORT,
- sizeof(v_id), (u8 *)&v_id);
- if (err)
- goto rel_lock;
-
- err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DESTROY_VPORT,
- IDPF_VC_DESTROY_VPORT_ERR);
-
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -1397,26 +1377,19 @@ rel_lock:
*/
int idpf_send_enable_vport_msg(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
- int err;
+ ssize_t reply_sz;
v_id.vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ENABLE_VPORT,
- sizeof(v_id), (u8 *)&v_id);
- if (err)
- goto rel_lock;
-
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_ENA_VPORT,
- IDPF_VC_ENA_VPORT_ERR);
+ xn_params.vc_op = VIRTCHNL2_OP_ENABLE_VPORT;
+ xn_params.send_buf.iov_base = &v_id;
+ xn_params.send_buf.iov_len = sizeof(v_id);
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -1428,26 +1401,19 @@ rel_lock:
*/
int idpf_send_disable_vport_msg(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
- int err;
+ ssize_t reply_sz;
v_id.vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT;
+ xn_params.send_buf.iov_base = &v_id;
+ xn_params.send_buf.iov_len = sizeof(v_id);
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DISABLE_VPORT,
- sizeof(v_id), (u8 *)&v_id);
- if (err)
- goto rel_lock;
-
- err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DIS_VPORT,
- IDPF_VC_DIS_VPORT_ERR);
-
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -1459,11 +1425,13 @@ rel_lock:
*/
static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
{
- struct virtchnl2_config_tx_queues *ctq;
+ struct virtchnl2_config_tx_queues *ctq __free(kfree) = NULL;
+ struct virtchnl2_txq_info *qi __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
u32 config_sz, chunk_sz, buf_sz;
int totqs, num_msgs, num_chunks;
- struct virtchnl2_txq_info *qi;
- int err = 0, i, k = 0;
+ ssize_t reply_sz;
+ int i, k = 0;
totqs = vport->num_txq + vport->num_complq;
qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL);
@@ -1524,10 +1492,8 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
}
/* Make sure accounting agrees */
- if (k != totqs) {
- err = -EINVAL;
- goto error;
- }
+ if (k != totqs)
+ return -EINVAL;
/* Chunk up the queue contexts into multiple messages to avoid
* sending a control queue message buffer that is too large
@@ -1541,12 +1507,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
buf_sz = struct_size(ctq, qinfo, num_chunks);
ctq = kzalloc(buf_sz, GFP_KERNEL);
- if (!ctq) {
- err = -ENOMEM;
- goto error;
- }
+ if (!ctq)
+ return -ENOMEM;
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
for (i = 0, k = 0; i < num_msgs; i++) {
memset(ctq, 0, buf_sz);
@@ -1554,17 +1519,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
ctq->num_qinfo = cpu_to_le16(num_chunks);
memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks);
- err = idpf_send_mb_msg(vport->adapter,
- VIRTCHNL2_OP_CONFIG_TX_QUEUES,
- buf_sz, (u8 *)ctq);
- if (err)
- goto mbx_error;
-
- err = idpf_wait_for_event(vport->adapter, vport,
- IDPF_VC_CONFIG_TXQ,
- IDPF_VC_CONFIG_TXQ_ERR);
- if (err)
- goto mbx_error;
+ xn_params.send_buf.iov_base = ctq;
+ xn_params.send_buf.iov_len = buf_sz;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_chunks;
totqs -= num_chunks;
@@ -1573,13 +1532,7 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
buf_sz = struct_size(ctq, qinfo, num_chunks);
}
-mbx_error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(ctq);
-error:
- kfree(qi);
-
- return err;
+ return 0;
}
/**
@@ -1591,11 +1544,13 @@ error:
*/
static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
{
- struct virtchnl2_config_rx_queues *crq;
+ struct virtchnl2_config_rx_queues *crq __free(kfree) = NULL;
+ struct virtchnl2_rxq_info *qi __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
u32 config_sz, chunk_sz, buf_sz;
int totqs, num_msgs, num_chunks;
- struct virtchnl2_rxq_info *qi;
- int err = 0, i, k = 0;
+ ssize_t reply_sz;
+ int i, k = 0;
totqs = vport->num_rxq + vport->num_bufq;
qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL);
@@ -1676,10 +1631,8 @@ common_qi_fields:
}
/* Make sure accounting agrees */
- if (k != totqs) {
- err = -EINVAL;
- goto error;
- }
+ if (k != totqs)
+ return -EINVAL;
/* Chunk up the queue contexts into multiple messages to avoid
* sending a control queue message buffer that is too large
@@ -1693,12 +1646,11 @@ common_qi_fields:
buf_sz = struct_size(crq, qinfo, num_chunks);
crq = kzalloc(buf_sz, GFP_KERNEL);
- if (!crq) {
- err = -ENOMEM;
- goto error;
- }
+ if (!crq)
+ return -ENOMEM;
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
for (i = 0, k = 0; i < num_msgs; i++) {
memset(crq, 0, buf_sz);
@@ -1706,17 +1658,11 @@ common_qi_fields:
crq->num_qinfo = cpu_to_le16(num_chunks);
memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks);
- err = idpf_send_mb_msg(vport->adapter,
- VIRTCHNL2_OP_CONFIG_RX_QUEUES,
- buf_sz, (u8 *)crq);
- if (err)
- goto mbx_error;
-
- err = idpf_wait_for_event(vport->adapter, vport,
- IDPF_VC_CONFIG_RXQ,
- IDPF_VC_CONFIG_RXQ_ERR);
- if (err)
- goto mbx_error;
+ xn_params.send_buf.iov_base = crq;
+ xn_params.send_buf.iov_len = buf_sz;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_chunks;
totqs -= num_chunks;
@@ -1725,42 +1671,28 @@ common_qi_fields:
buf_sz = struct_size(crq, qinfo, num_chunks);
}
-mbx_error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(crq);
-error:
- kfree(qi);
-
- return err;
+ return 0;
}
/**
* idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable
* queues message
* @vport: virtual port data structure
- * @vc_op: virtchnl op code to send
+ * @ena: if true enable, false disable
*
* Send enable or disable queues virtchnl message. Returns 0 on success,
* negative on failure.
*/
-static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
+static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
{
+ struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
+ struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
u32 num_msgs, num_chunks, num_txq, num_rxq, num_q;
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_del_ena_dis_queues *eq;
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_queue_chunks *qcs;
- struct virtchnl2_queue_chunk *qc;
u32 config_sz, chunk_sz, buf_sz;
- int i, j, k = 0, err = 0;
-
- /* validate virtchnl op */
- switch (vc_op) {
- case VIRTCHNL2_OP_ENABLE_QUEUES:
- case VIRTCHNL2_OP_DISABLE_QUEUES:
- break;
- default:
- return -EINVAL;
- }
+ ssize_t reply_sz;
+ int i, j, k = 0;
num_txq = vport->num_txq + vport->num_complq;
num_rxq = vport->num_rxq + vport->num_bufq;
@@ -1779,10 +1711,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
}
- if (vport->num_txq != k) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_txq != k)
+ return -EINVAL;
if (!idpf_is_queue_model_split(vport->txq_model))
goto setup_rx;
@@ -1794,10 +1724,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
- if (vport->num_complq != (k - vport->num_txq)) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_complq != (k - vport->num_txq))
+ return -EINVAL;
setup_rx:
for (i = 0; i < vport->num_rxq_grp; i++) {
@@ -1823,10 +1751,8 @@ setup_rx:
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
}
- if (vport->num_rxq != k - (vport->num_txq + vport->num_complq)) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_rxq != k - (vport->num_txq + vport->num_complq))
+ return -EINVAL;
if (!idpf_is_queue_model_split(vport->rxq_model))
goto send_msg;
@@ -1845,10 +1771,8 @@ setup_rx:
}
if (vport->num_bufq != k - (vport->num_txq +
vport->num_complq +
- vport->num_rxq)) {
- err = -EINVAL;
- goto error;
- }
+ vport->num_rxq))
+ return -EINVAL;
send_msg:
/* Chunk up the queue info into multiple messages */
@@ -1861,12 +1785,16 @@ send_msg:
buf_sz = struct_size(eq, chunks.chunks, num_chunks);
eq = kzalloc(buf_sz, GFP_KERNEL);
- if (!eq) {
- err = -ENOMEM;
- goto error;
- }
+ if (!eq)
+ return -ENOMEM;
- mutex_lock(&vport->vc_buf_lock);
+ if (ena) {
+ xn_params.vc_op = VIRTCHNL2_OP_ENABLE_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ } else {
+ xn_params.vc_op = VIRTCHNL2_OP_DISABLE_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ }
for (i = 0, k = 0; i < num_msgs; i++) {
memset(eq, 0, buf_sz);
@@ -1875,20 +1803,11 @@ send_msg:
qcs = &eq->chunks;
memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks);
- err = idpf_send_mb_msg(adapter, vc_op, buf_sz, (u8 *)eq);
- if (err)
- goto mbx_error;
-
- if (vc_op == VIRTCHNL2_OP_ENABLE_QUEUES)
- err = idpf_wait_for_event(adapter, vport,
- IDPF_VC_ENA_QUEUES,
- IDPF_VC_ENA_QUEUES_ERR);
- else
- err = idpf_min_wait_for_event(adapter, vport,
- IDPF_VC_DIS_QUEUES,
- IDPF_VC_DIS_QUEUES_ERR);
- if (err)
- goto mbx_error;
+ xn_params.send_buf.iov_base = eq;
+ xn_params.send_buf.iov_len = buf_sz;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_chunks;
num_q -= num_chunks;
@@ -1897,13 +1816,7 @@ send_msg:
buf_sz = struct_size(eq, chunks.chunks, num_chunks);
}
-mbx_error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(eq);
-error:
- kfree(qc);
-
- return err;
+ return 0;
}
/**
@@ -1917,12 +1830,13 @@ error:
*/
int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_queue_vector_maps *vqvm;
- struct virtchnl2_queue_vector *vqv;
+ struct virtchnl2_queue_vector_maps *vqvm __free(kfree) = NULL;
+ struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
u32 config_sz, chunk_sz, buf_sz;
u32 num_msgs, num_chunks, num_q;
- int i, j, k = 0, err = 0;
+ ssize_t reply_sz;
+ int i, j, k = 0;
num_q = vport->num_txq + vport->num_rxq;
@@ -1952,10 +1866,8 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
}
}
- if (vport->num_txq != k) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_txq != k)
+ return -EINVAL;
for (i = 0; i < vport->num_rxq_grp; i++) {
struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
@@ -1982,15 +1894,11 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
}
if (idpf_is_queue_model_split(vport->txq_model)) {
- if (vport->num_rxq != k - vport->num_complq) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_rxq != k - vport->num_complq)
+ return -EINVAL;
} else {
- if (vport->num_rxq != k - vport->num_txq) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_rxq != k - vport->num_txq)
+ return -EINVAL;
}
/* Chunk up the vector info into multiple messages */
@@ -2003,39 +1911,28 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
buf_sz = struct_size(vqvm, qv_maps, num_chunks);
vqvm = kzalloc(buf_sz, GFP_KERNEL);
- if (!vqvm) {
- err = -ENOMEM;
- goto error;
- }
+ if (!vqvm)
+ return -ENOMEM;
- mutex_lock(&vport->vc_buf_lock);
+ if (map) {
+ xn_params.vc_op = VIRTCHNL2_OP_MAP_QUEUE_VECTOR;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ } else {
+ xn_params.vc_op = VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ }
for (i = 0, k = 0; i < num_msgs; i++) {
memset(vqvm, 0, buf_sz);
+ xn_params.send_buf.iov_base = vqvm;
+ xn_params.send_buf.iov_len = buf_sz;
vqvm->vport_id = cpu_to_le32(vport->vport_id);
vqvm->num_qv_maps = cpu_to_le16(num_chunks);
memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks);
- if (map) {
- err = idpf_send_mb_msg(adapter,
- VIRTCHNL2_OP_MAP_QUEUE_VECTOR,
- buf_sz, (u8 *)vqvm);
- if (!err)
- err = idpf_wait_for_event(adapter, vport,
- IDPF_VC_MAP_IRQ,
- IDPF_VC_MAP_IRQ_ERR);
- } else {
- err = idpf_send_mb_msg(adapter,
- VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR,
- buf_sz, (u8 *)vqvm);
- if (!err)
- err =
- idpf_min_wait_for_event(adapter, vport,
- IDPF_VC_UNMAP_IRQ,
- IDPF_VC_UNMAP_IRQ_ERR);
- }
- if (err)
- goto mbx_error;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_chunks;
num_q -= num_chunks;
@@ -2044,13 +1941,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
buf_sz = struct_size(vqvm, qv_maps, num_chunks);
}
-mbx_error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(vqvm);
-error:
- kfree(vqv);
-
- return err;
+ return 0;
}
/**
@@ -2062,7 +1953,7 @@ error:
*/
int idpf_send_enable_queues_msg(struct idpf_vport *vport)
{
- return idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_ENABLE_QUEUES);
+ return idpf_send_ena_dis_queues_msg(vport, true);
}
/**
@@ -2076,7 +1967,7 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
{
int err, i;
- err = idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_DISABLE_QUEUES);
+ err = idpf_send_ena_dis_queues_msg(vport, false);
if (err)
return err;
@@ -2124,22 +2015,21 @@ static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchun
*/
int idpf_send_delete_queues_msg(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
struct virtchnl2_create_vport *vport_params;
struct virtchnl2_queue_reg_chunks *chunks;
- struct virtchnl2_del_ena_dis_queues *eq;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_vport_config *vport_config;
u16 vport_idx = vport->idx;
- int buf_size, err;
+ ssize_t reply_sz;
u16 num_chunks;
+ int buf_size;
- vport_config = adapter->vport_config[vport_idx];
+ vport_config = vport->adapter->vport_config[vport_idx];
if (vport_config->req_qs_chunks) {
- struct virtchnl2_add_queues *vc_aq =
- (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
- chunks = &vc_aq->chunks;
+ chunks = &vport_config->req_qs_chunks->chunks;
} else {
- vport_params = adapter->vport_params_recvd[vport_idx];
+ vport_params = vport->adapter->vport_params_recvd[vport_idx];
chunks = &vport_params->chunks;
}
@@ -2156,21 +2046,13 @@ int idpf_send_delete_queues_msg(struct idpf_vport *vport)
idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks,
num_chunks);
- mutex_lock(&vport->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEL_QUEUES,
- buf_size, (u8 *)eq);
- if (err)
- goto rel_lock;
-
- err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DEL_QUEUES,
- IDPF_VC_DEL_QUEUES_ERR);
-
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(eq);
+ xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = eq;
+ xn_params.send_buf.iov_len = buf_size;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -2205,14 +2087,21 @@ int idpf_send_config_queues_msg(struct idpf_vport *vport)
int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
u16 num_complq, u16 num_rx_q, u16 num_rx_bufq)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_add_queues *vc_msg __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_vport_config *vport_config;
- struct virtchnl2_add_queues aq = { };
- struct virtchnl2_add_queues *vc_msg;
+ struct virtchnl2_add_queues aq = {};
u16 vport_idx = vport->idx;
- int size, err;
+ ssize_t reply_sz;
+ int size;
+
+ vc_msg = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!vc_msg)
+ return -ENOMEM;
- vport_config = adapter->vport_config[vport_idx];
+ vport_config = vport->adapter->vport_config[vport_idx];
+ kfree(vport_config->req_qs_chunks);
+ vport_config->req_qs_chunks = NULL;
aq.vport_id = cpu_to_le32(vport->vport_id);
aq.num_tx_q = cpu_to_le16(num_tx_q);
@@ -2220,47 +2109,33 @@ int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
aq.num_rx_q = cpu_to_le16(num_rx_q);
aq.num_rx_bufq = cpu_to_le16(num_rx_bufq);
- mutex_lock(&((struct idpf_vport *)vport)->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ADD_QUEUES,
- sizeof(struct virtchnl2_add_queues), (u8 *)&aq);
- if (err)
- goto rel_lock;
-
- /* We want vport to be const to prevent incidental code changes making
- * changes to the vport config. We're making a special exception here
- * to discard const to use the virtchnl.
- */
- err = idpf_wait_for_event(adapter, (struct idpf_vport *)vport,
- IDPF_VC_ADD_QUEUES, IDPF_VC_ADD_QUEUES_ERR);
- if (err)
- goto rel_lock;
-
- kfree(vport_config->req_qs_chunks);
- vport_config->req_qs_chunks = NULL;
+ xn_params.vc_op = VIRTCHNL2_OP_ADD_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = &aq;
+ xn_params.send_buf.iov_len = sizeof(aq);
+ xn_params.recv_buf.iov_base = vc_msg;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
- vc_msg = (struct virtchnl2_add_queues *)vport->vc_msg;
/* compare vc_msg num queues with vport num queues */
if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q ||
le16_to_cpu(vc_msg->num_rx_q) != num_rx_q ||
le16_to_cpu(vc_msg->num_tx_complq) != num_complq ||
- le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq) {
- err = -EINVAL;
- goto rel_lock;
- }
+ le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq)
+ return -EINVAL;
size = struct_size(vc_msg, chunks.chunks,
le16_to_cpu(vc_msg->chunks.num_chunks));
- vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
- if (!vport_config->req_qs_chunks) {
- err = -ENOMEM;
- goto rel_lock;
- }
+ if (reply_sz < size)
+ return -EIO;
-rel_lock:
- mutex_unlock(&((struct idpf_vport *)vport)->vc_buf_lock);
+ vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
+ if (!vport_config->req_qs_chunks)
+ return -ENOMEM;
- return err;
+ return 0;
}
/**
@@ -2272,53 +2147,49 @@ rel_lock:
*/
int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors)
{
- struct virtchnl2_alloc_vectors *alloc_vec, *rcvd_vec;
- struct virtchnl2_alloc_vectors ac = { };
+ struct virtchnl2_alloc_vectors *rcvd_vec __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
+ struct virtchnl2_alloc_vectors ac = {};
+ ssize_t reply_sz;
u16 num_vchunks;
- int size, err;
+ int size;
ac.num_vectors = cpu_to_le16(num_vectors);
- mutex_lock(&adapter->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ALLOC_VECTORS,
- sizeof(ac), (u8 *)&ac);
- if (err)
- goto rel_lock;
+ rcvd_vec = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!rcvd_vec)
+ return -ENOMEM;
- err = idpf_wait_for_event(adapter, NULL, IDPF_VC_ALLOC_VECTORS,
- IDPF_VC_ALLOC_VECTORS_ERR);
- if (err)
- goto rel_lock;
+ xn_params.vc_op = VIRTCHNL2_OP_ALLOC_VECTORS;
+ xn_params.send_buf.iov_base = &ac;
+ xn_params.send_buf.iov_len = sizeof(ac);
+ xn_params.recv_buf.iov_base = rcvd_vec;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
- rcvd_vec = (struct virtchnl2_alloc_vectors *)adapter->vc_msg;
num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks);
-
size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks);
- if (size > sizeof(adapter->vc_msg)) {
- err = -EINVAL;
- goto rel_lock;
- }
+ if (reply_sz < size)
+ return -EIO;
+
+ if (size > IDPF_CTLQ_MAX_BUF_LEN)
+ return -EINVAL;
kfree(adapter->req_vec_chunks);
- adapter->req_vec_chunks = NULL;
- adapter->req_vec_chunks = kmemdup(adapter->vc_msg, size, GFP_KERNEL);
- if (!adapter->req_vec_chunks) {
- err = -ENOMEM;
- goto rel_lock;
- }
+ adapter->req_vec_chunks = kmemdup(rcvd_vec, size, GFP_KERNEL);
+ if (!adapter->req_vec_chunks)
+ return -ENOMEM;
- alloc_vec = adapter->req_vec_chunks;
- if (le16_to_cpu(alloc_vec->num_vectors) < num_vectors) {
+ if (le16_to_cpu(adapter->req_vec_chunks->num_vectors) < num_vectors) {
kfree(adapter->req_vec_chunks);
adapter->req_vec_chunks = NULL;
- err = -EINVAL;
+ return -EINVAL;
}
-rel_lock:
- mutex_unlock(&adapter->vc_buf_lock);
-
- return err;
+ return 0;
}
/**
@@ -2331,29 +2202,24 @@ int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter)
{
struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks;
struct virtchnl2_vector_chunks *vcs = &ac->vchunks;
- int buf_size, err;
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
+ int buf_size;
buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks));
- mutex_lock(&adapter->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEALLOC_VECTORS, buf_size,
- (u8 *)vcs);
- if (err)
- goto rel_lock;
-
- err = idpf_min_wait_for_event(adapter, NULL, IDPF_VC_DEALLOC_VECTORS,
- IDPF_VC_DEALLOC_VECTORS_ERR);
- if (err)
- goto rel_lock;
+ xn_params.vc_op = VIRTCHNL2_OP_DEALLOC_VECTORS;
+ xn_params.send_buf.iov_base = vcs;
+ xn_params.send_buf.iov_len = buf_size;
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
kfree(adapter->req_vec_chunks);
adapter->req_vec_chunks = NULL;
-rel_lock:
- mutex_unlock(&adapter->vc_buf_lock);
-
- return err;
+ return 0;
}
/**
@@ -2376,25 +2242,18 @@ static int idpf_get_max_vfs(struct idpf_adapter *adapter)
*/
int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs)
{
- struct virtchnl2_sriov_vfs_info svi = { };
- int err;
+ struct virtchnl2_sriov_vfs_info svi = {};
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
svi.num_vfs = cpu_to_le16(num_vfs);
+ xn_params.vc_op = VIRTCHNL2_OP_SET_SRIOV_VFS;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = &svi;
+ xn_params.send_buf.iov_len = sizeof(svi);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
- mutex_lock(&adapter->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_SRIOV_VFS,
- sizeof(svi), (u8 *)&svi);
- if (err)
- goto rel_lock;
-
- err = idpf_wait_for_event(adapter, NULL, IDPF_VC_SET_SRIOV_VFS,
- IDPF_VC_SET_SRIOV_VFS_ERR);
-
-rel_lock:
- mutex_unlock(&adapter->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -2407,10 +2266,10 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
struct rtnl_link_stats64 *netstats = &np->netstats;
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_vport_stats stats_msg = { };
- struct virtchnl2_vport_stats *stats;
- int err;
+ struct virtchnl2_vport_stats stats_msg = {};
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
+
/* Don't send get_stats message if the link is down */
if (np->state <= __IDPF_VPORT_DOWN)
@@ -2418,46 +2277,38 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
stats_msg.vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_GET_STATS;
+ xn_params.send_buf.iov_base = &stats_msg;
+ xn_params.send_buf.iov_len = sizeof(stats_msg);
+ xn_params.recv_buf = xn_params.send_buf;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_STATS,
- sizeof(struct virtchnl2_vport_stats),
- (u8 *)&stats_msg);
- if (err)
- goto rel_lock;
-
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_STATS,
- IDPF_VC_GET_STATS_ERR);
- if (err)
- goto rel_lock;
-
- stats = (struct virtchnl2_vport_stats *)vport->vc_msg;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz < sizeof(stats_msg))
+ return -EIO;
spin_lock_bh(&np->stats_lock);
- netstats->rx_packets = le64_to_cpu(stats->rx_unicast) +
- le64_to_cpu(stats->rx_multicast) +
- le64_to_cpu(stats->rx_broadcast);
- netstats->rx_bytes = le64_to_cpu(stats->rx_bytes);
- netstats->rx_dropped = le64_to_cpu(stats->rx_discards);
- netstats->rx_over_errors = le64_to_cpu(stats->rx_overflow_drop);
- netstats->rx_length_errors = le64_to_cpu(stats->rx_invalid_frame_length);
-
- netstats->tx_packets = le64_to_cpu(stats->tx_unicast) +
- le64_to_cpu(stats->tx_multicast) +
- le64_to_cpu(stats->tx_broadcast);
- netstats->tx_bytes = le64_to_cpu(stats->tx_bytes);
- netstats->tx_errors = le64_to_cpu(stats->tx_errors);
- netstats->tx_dropped = le64_to_cpu(stats->tx_discards);
-
- vport->port_stats.vport_stats = *stats;
+ netstats->rx_packets = le64_to_cpu(stats_msg.rx_unicast) +
+ le64_to_cpu(stats_msg.rx_multicast) +
+ le64_to_cpu(stats_msg.rx_broadcast);
+ netstats->tx_packets = le64_to_cpu(stats_msg.tx_unicast) +
+ le64_to_cpu(stats_msg.tx_multicast) +
+ le64_to_cpu(stats_msg.tx_broadcast);
+ netstats->rx_bytes = le64_to_cpu(stats_msg.rx_bytes);
+ netstats->tx_bytes = le64_to_cpu(stats_msg.tx_bytes);
+ netstats->rx_errors = le64_to_cpu(stats_msg.rx_errors);
+ netstats->tx_errors = le64_to_cpu(stats_msg.tx_errors);
+ netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards);
+ netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards);
+
+ vport->port_stats.vport_stats = stats_msg;
spin_unlock_bh(&np->stats_lock);
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return 0;
}
/**
@@ -2469,70 +2320,70 @@ rel_lock:
*/
int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_rss_lut *recv_rl;
+ struct virtchnl2_rss_lut *recv_rl __free(kfree) = NULL;
+ struct virtchnl2_rss_lut *rl __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_rss_data *rss_data;
- struct virtchnl2_rss_lut *rl;
int buf_size, lut_buf_size;
- int i, err;
+ ssize_t reply_sz;
+ int i;
- rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+ rss_data =
+ &vport->adapter->vport_config[vport->idx]->user_config.rss_data;
buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
rl = kzalloc(buf_size, GFP_KERNEL);
if (!rl)
return -ENOMEM;
rl->vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
- if (!get) {
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = rl;
+ xn_params.send_buf.iov_len = buf_size;
+
+ if (get) {
+ recv_rl = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!recv_rl)
+ return -ENOMEM;
+ xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_LUT;
+ xn_params.recv_buf.iov_base = recv_rl;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ } else {
rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size);
for (i = 0; i < rss_data->rss_lut_size; i++)
rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_LUT,
- buf_size, (u8 *)rl);
- if (err)
- goto free_mem;
-
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_LUT,
- IDPF_VC_SET_RSS_LUT_ERR);
-
- goto free_mem;
+ xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT;
}
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (!get)
+ return 0;
+ if (reply_sz < sizeof(struct virtchnl2_rss_lut))
+ return -EIO;
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_LUT,
- buf_size, (u8 *)rl);
- if (err)
- goto free_mem;
+ lut_buf_size = le16_to_cpu(recv_rl->lut_entries) * sizeof(u32);
+ if (reply_sz < lut_buf_size)
+ return -EIO;
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_LUT,
- IDPF_VC_GET_RSS_LUT_ERR);
- if (err)
- goto free_mem;
-
- recv_rl = (struct virtchnl2_rss_lut *)vport->vc_msg;
+ /* size didn't change, we can reuse existing lut buf */
if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries))
goto do_memcpy;
rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries);
kfree(rss_data->rss_lut);
- lut_buf_size = rss_data->rss_lut_size * sizeof(u32);
rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL);
if (!rss_data->rss_lut) {
rss_data->rss_lut_size = 0;
- err = -ENOMEM;
- goto free_mem;
+ return -ENOMEM;
}
do_memcpy:
- memcpy(rss_data->rss_lut, vport->vc_msg, rss_data->rss_lut_size);
-free_mem:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(rl);
+ memcpy(rss_data->rss_lut, recv_rl->lut, rss_data->rss_lut_size);
- return err;
+ return 0;
}
/**
@@ -2544,68 +2395,70 @@ free_mem:
*/
int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_rss_key *recv_rk;
+ struct virtchnl2_rss_key *recv_rk __free(kfree) = NULL;
+ struct virtchnl2_rss_key *rk __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_rss_data *rss_data;
- struct virtchnl2_rss_key *rk;
- int i, buf_size, err;
+ ssize_t reply_sz;
+ int i, buf_size;
+ u16 key_size;
- rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+ rss_data =
+ &vport->adapter->vport_config[vport->idx]->user_config.rss_data;
buf_size = struct_size(rk, key_flex, rss_data->rss_key_size);
rk = kzalloc(buf_size, GFP_KERNEL);
if (!rk)
return -ENOMEM;
rk->vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
-
+ xn_params.send_buf.iov_base = rk;
+ xn_params.send_buf.iov_len = buf_size;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
if (get) {
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_KEY,
- buf_size, (u8 *)rk);
- if (err)
- goto error;
-
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_KEY,
- IDPF_VC_GET_RSS_KEY_ERR);
- if (err)
- goto error;
-
- recv_rk = (struct virtchnl2_rss_key *)vport->vc_msg;
- if (rss_data->rss_key_size !=
- le16_to_cpu(recv_rk->key_len)) {
- rss_data->rss_key_size =
- min_t(u16, NETDEV_RSS_KEY_LEN,
- le16_to_cpu(recv_rk->key_len));
- kfree(rss_data->rss_key);
- rss_data->rss_key = kzalloc(rss_data->rss_key_size,
- GFP_KERNEL);
- if (!rss_data->rss_key) {
- rss_data->rss_key_size = 0;
- err = -ENOMEM;
- goto error;
- }
- }
- memcpy(rss_data->rss_key, recv_rk->key_flex,
- rss_data->rss_key_size);
+ recv_rk = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!recv_rk)
+ return -ENOMEM;
+
+ xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_KEY;
+ xn_params.recv_buf.iov_base = recv_rk;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
} else {
rk->key_len = cpu_to_le16(rss_data->rss_key_size);
for (i = 0; i < rss_data->rss_key_size; i++)
rk->key_flex[i] = rss_data->rss_key[i];
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_KEY,
- buf_size, (u8 *)rk);
- if (err)
- goto error;
+ xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_KEY;
+ }
+
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (!get)
+ return 0;
+ if (reply_sz < sizeof(struct virtchnl2_rss_key))
+ return -EIO;
+
+ key_size = min_t(u16, NETDEV_RSS_KEY_LEN,
+ le16_to_cpu(recv_rk->key_len));
+ if (reply_sz < key_size)
+ return -EIO;
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_KEY,
- IDPF_VC_SET_RSS_KEY_ERR);
+ /* key len didn't change, reuse existing buf */
+ if (rss_data->rss_key_size == key_size)
+ goto do_memcpy;
+
+ rss_data->rss_key_size = key_size;
+ kfree(rss_data->rss_key);
+ rss_data->rss_key = kzalloc(key_size, GFP_KERNEL);
+ if (!rss_data->rss_key) {
+ rss_data->rss_key_size = 0;
+ return -ENOMEM;
}
-error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(rk);
+do_memcpy:
+ memcpy(rss_data->rss_key, recv_rk->key_flex, rss_data->rss_key_size);
- return err;
+ return 0;
}
/**
@@ -2657,13 +2510,15 @@ static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype,
*/
int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
{
+ struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;
+ struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;
struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup;
- struct virtchnl2_get_ptype_info get_ptype_info;
int max_ptype, ptypes_recvd = 0, ptype_offset;
struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_get_ptype_info *ptype_info;
+ struct idpf_vc_xn_params xn_params = {};
u16 next_ptype_id = 0;
- int err = 0, i, j, k;
+ ssize_t reply_sz;
+ int i, j, k;
if (idpf_is_queue_model_split(vport->rxq_model))
max_ptype = IDPF_RX_MAX_PTYPE;
@@ -2672,43 +2527,44 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup));
+ get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL);
+ if (!get_ptype_info)
+ return -ENOMEM;
+
ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
if (!ptype_info)
return -ENOMEM;
- mutex_lock(&adapter->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_GET_PTYPE_INFO;
+ xn_params.send_buf.iov_base = get_ptype_info;
+ xn_params.send_buf.iov_len = sizeof(*get_ptype_info);
+ xn_params.recv_buf.iov_base = ptype_info;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
while (next_ptype_id < max_ptype) {
- get_ptype_info.start_ptype_id = cpu_to_le16(next_ptype_id);
+ get_ptype_info->start_ptype_id = cpu_to_le16(next_ptype_id);
if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype)
- get_ptype_info.num_ptypes =
+ get_ptype_info->num_ptypes =
cpu_to_le16(max_ptype - next_ptype_id);
else
- get_ptype_info.num_ptypes =
+ get_ptype_info->num_ptypes =
cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO,
- sizeof(struct virtchnl2_get_ptype_info),
- (u8 *)&get_ptype_info);
- if (err)
- goto vc_buf_unlock;
-
- err = idpf_wait_for_event(adapter, NULL, IDPF_VC_GET_PTYPE_INFO,
- IDPF_VC_GET_PTYPE_INFO_ERR);
- if (err)
- goto vc_buf_unlock;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
- memcpy(ptype_info, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN);
+ if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN)
+ return -EIO;
ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes);
- if (ptypes_recvd > max_ptype) {
- err = -EINVAL;
- goto vc_buf_unlock;
- }
+ if (ptypes_recvd > max_ptype)
+ return -EINVAL;
- next_ptype_id = le16_to_cpu(get_ptype_info.start_ptype_id) +
- le16_to_cpu(get_ptype_info.num_ptypes);
+ next_ptype_id = le16_to_cpu(get_ptype_info->start_ptype_id) +
+ le16_to_cpu(get_ptype_info->num_ptypes);
ptype_offset = IDPF_RX_PTYPE_HDR_SZ;
@@ -2721,17 +2577,13 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
((u8 *)ptype_info + ptype_offset);
ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
- if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN) {
- err = -EINVAL;
- goto vc_buf_unlock;
- }
+ if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN)
+ return -EINVAL;
/* 0xFFFF indicates end of ptypes */
if (le16_to_cpu(ptype->ptype_id_10) ==
- IDPF_INVALID_PTYPE_ID) {
- err = 0;
- goto vc_buf_unlock;
- }
+ IDPF_INVALID_PTYPE_ID)
+ return 0;
if (idpf_is_queue_model_split(vport->rxq_model))
k = le16_to_cpu(ptype->ptype_id_10);
@@ -2859,11 +2711,7 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
}
}
-vc_buf_unlock:
- mutex_unlock(&adapter->vc_buf_lock);
- kfree(ptype_info);
-
- return err;
+ return 0;
}
/**
@@ -2875,27 +2723,20 @@ vc_buf_unlock:
*/
int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport)
{
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_loopback loopback;
- int err;
+ ssize_t reply_sz;
loopback.vport_id = cpu_to_le32(vport->vport_id);
loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK);
- mutex_lock(&vport->vc_buf_lock);
-
- err = idpf_send_mb_msg(vport->adapter, VIRTCHNL2_OP_LOOPBACK,
- sizeof(loopback), (u8 *)&loopback);
- if (err)
- goto rel_lock;
+ xn_params.vc_op = VIRTCHNL2_OP_LOOPBACK;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = &loopback;
+ xn_params.send_buf.iov_len = sizeof(loopback);
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- err = idpf_wait_for_event(vport->adapter, vport,
- IDPF_VC_LOOPBACK_STATE,
- IDPF_VC_LOOPBACK_STATE_ERR);
-
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -2960,7 +2801,7 @@ int idpf_init_dflt_mbx(struct idpf_adapter *adapter)
return -ENOENT;
}
- adapter->state = __IDPF_STARTUP;
+ adapter->state = __IDPF_VER_CHECK;
return 0;
}
@@ -3057,35 +2898,42 @@ int idpf_vc_core_init(struct idpf_adapter *adapter)
u16 num_max_vports;
int err = 0;
+ if (!adapter->vcxn_mngr) {
+ adapter->vcxn_mngr = kzalloc(sizeof(*adapter->vcxn_mngr), GFP_KERNEL);
+ if (!adapter->vcxn_mngr) {
+ err = -ENOMEM;
+ goto init_failed;
+ }
+ }
+ idpf_vc_xn_init(adapter->vcxn_mngr);
+
while (adapter->state != __IDPF_INIT_SW) {
switch (adapter->state) {
- case __IDPF_STARTUP:
- if (idpf_send_ver_msg(adapter))
- goto init_failed;
- adapter->state = __IDPF_VER_CHECK;
- goto restart;
case __IDPF_VER_CHECK:
- err = idpf_recv_ver_msg(adapter);
- if (err == -EIO) {
- return err;
- } else if (err == -EAGAIN) {
- adapter->state = __IDPF_STARTUP;
+ err = idpf_send_ver_msg(adapter);
+ switch (err) {
+ case 0:
+ /* success, move state machine forward */
+ adapter->state = __IDPF_GET_CAPS;
+ fallthrough;
+ case -EAGAIN:
goto restart;
- } else if (err) {
+ default:
+ /* Something bad happened, try again but only a
+ * few times.
+ */
goto init_failed;
}
- if (idpf_send_get_caps_msg(adapter))
- goto init_failed;
- adapter->state = __IDPF_GET_CAPS;
- goto restart;
case __IDPF_GET_CAPS:
- if (idpf_recv_get_caps_msg(adapter))
+ err = idpf_send_get_caps_msg(adapter);
+ if (err)
goto init_failed;
adapter->state = __IDPF_INIT_SW;
break;
default:
dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n",
adapter->state);
+ err = -EINVAL;
goto init_failed;
}
break;
@@ -3144,7 +2992,9 @@ restart:
queue_delayed_work(adapter->init_wq, &adapter->init_task,
msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
- goto no_err;
+ set_bit(IDPF_VC_CORE_INIT, adapter->flags);
+
+ return 0;
err_intr_req:
cancel_delayed_work_sync(&adapter->serv_task);
@@ -3153,7 +3003,6 @@ err_intr_req:
err_netdev_alloc:
kfree(adapter->vports);
adapter->vports = NULL;
-no_err:
return err;
init_failed:
@@ -3170,7 +3019,9 @@ init_failed:
* register writes might not have taken effect. Retry to initialize
* the mailbox again
*/
- adapter->state = __IDPF_STARTUP;
+ adapter->state = __IDPF_VER_CHECK;
+ if (adapter->vcxn_mngr)
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
idpf_deinit_dflt_mbx(adapter);
set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
@@ -3186,29 +3037,22 @@ init_failed:
*/
void idpf_vc_core_deinit(struct idpf_adapter *adapter)
{
- int i;
+ if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
+ return;
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
idpf_deinit_task(adapter);
idpf_intr_rel(adapter);
- /* Set all bits as we dont know on which vc_state the vhnl_wq is
- * waiting on and wakeup the virtchnl workqueue even if it is waiting
- * for the response as we are going down
- */
- for (i = 0; i < IDPF_VC_NBITS; i++)
- set_bit(i, adapter->vc_state);
- wake_up(&adapter->vchnl_wq);
cancel_delayed_work_sync(&adapter->serv_task);
cancel_delayed_work_sync(&adapter->mbx_task);
idpf_vport_params_buf_rel(adapter);
- /* Clear all the bits */
- for (i = 0; i < IDPF_VC_NBITS; i++)
- clear_bit(i, adapter->vc_state);
-
kfree(adapter->vports);
adapter->vports = NULL;
+
+ clear_bit(IDPF_VC_CORE_INIT, adapter->flags);
}
/**
@@ -3624,6 +3468,75 @@ u32 idpf_get_vport_id(struct idpf_vport *vport)
}
/**
+ * idpf_mac_filter_async_handler - Async callback for mac filters
+ * @adapter: private data struct
+ * @xn: transaction for message
+ * @ctlq_msg: received message
+ *
+ * In some scenarios driver can't sleep and wait for a reply (e.g.: stack is
+ * holding rtnl_lock) when adding a new mac filter. It puts us in a difficult
+ * situation to deal with errors returned on the reply. The best we can
+ * ultimately do is remove it from our list of mac filters and report the
+ * error.
+ */
+static int idpf_mac_filter_async_handler(struct idpf_adapter *adapter,
+ struct idpf_vc_xn *xn,
+ const struct idpf_ctlq_msg *ctlq_msg)
+{
+ struct virtchnl2_mac_addr_list *ma_list;
+ struct idpf_vport_config *vport_config;
+ struct virtchnl2_mac_addr *mac_addr;
+ struct idpf_mac_filter *f, *tmp;
+ struct list_head *ma_list_head;
+ struct idpf_vport *vport;
+ u16 num_entries;
+ int i;
+
+ /* if success we're done, we're only here if something bad happened */
+ if (!ctlq_msg->cookie.mbx.chnl_retval)
+ return 0;
+
+ /* make sure at least struct is there */
+ if (xn->reply_sz < sizeof(*ma_list))
+ goto invalid_payload;
+
+ ma_list = ctlq_msg->ctx.indirect.payload->va;
+ mac_addr = ma_list->mac_addr_list;
+ num_entries = le16_to_cpu(ma_list->num_mac_addr);
+ /* we should have received a buffer at least this big */
+ if (xn->reply_sz < struct_size(ma_list, mac_addr_list, num_entries))
+ goto invalid_payload;
+
+ vport = idpf_vid_to_vport(adapter, le32_to_cpu(ma_list->vport_id));
+ if (!vport)
+ goto invalid_payload;
+
+ vport_config = adapter->vport_config[le32_to_cpu(ma_list->vport_id)];
+ ma_list_head = &vport_config->user_config.mac_filter_list;
+
+ /* We can't do much to reconcile bad filters at this point, however we
+ * should at least remove them from our list one way or the other so we
+ * have some idea what good filters we have.
+ */
+ spin_lock_bh(&vport_config->mac_filter_list_lock);
+ list_for_each_entry_safe(f, tmp, ma_list_head, list)
+ for (i = 0; i < num_entries; i++)
+ if (ether_addr_equal(mac_addr[i].addr, f->macaddr))
+ list_del(&f->list);
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+ dev_err_ratelimited(&adapter->pdev->dev, "Received error sending MAC filter request (op %d)\n",
+ xn->vc_op);
+
+ return 0;
+
+invalid_payload:
+ dev_err_ratelimited(&adapter->pdev->dev, "Received invalid MAC filter payload (op %d) (len %zd)\n",
+ xn->vc_op, xn->reply_sz);
+
+ return -EINVAL;
+}
+
+/**
* idpf_add_del_mac_filters - Add/del mac filters
* @vport: Virtual port data structure
* @np: Netdev private structure
@@ -3636,17 +3549,21 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
struct idpf_netdev_priv *np,
bool add, bool async)
{
- struct virtchnl2_mac_addr_list *ma_list = NULL;
+ struct virtchnl2_mac_addr_list *ma_list __free(kfree) = NULL;
+ struct virtchnl2_mac_addr *mac_addr __free(kfree) = NULL;
struct idpf_adapter *adapter = np->adapter;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_vport_config *vport_config;
- enum idpf_vport_config_flags mac_flag;
- struct pci_dev *pdev = adapter->pdev;
- enum idpf_vport_vc_state vc, vc_err;
- struct virtchnl2_mac_addr *mac_addr;
- struct idpf_mac_filter *f, *tmp;
u32 num_msgs, total_filters = 0;
- int i = 0, k, err = 0;
- u32 vop;
+ struct idpf_mac_filter *f;
+ ssize_t reply_sz;
+ int i = 0, k;
+
+ xn_params.vc_op = add ? VIRTCHNL2_OP_ADD_MAC_ADDR :
+ VIRTCHNL2_OP_DEL_MAC_ADDR;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.async = async;
+ xn_params.async_handler = idpf_mac_filter_async_handler;
vport_config = adapter->vport_config[np->vport_idx];
spin_lock_bh(&vport_config->mac_filter_list_lock);
@@ -3670,13 +3587,13 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
mac_addr = kcalloc(total_filters, sizeof(struct virtchnl2_mac_addr),
GFP_ATOMIC);
if (!mac_addr) {
- err = -ENOMEM;
spin_unlock_bh(&vport_config->mac_filter_list_lock);
- goto error;
+
+ return -ENOMEM;
}
- list_for_each_entry_safe(f, tmp, &vport_config->user_config.mac_filter_list,
- list) {
+ list_for_each_entry(f, &vport_config->user_config.mac_filter_list,
+ list) {
if (add && f->add) {
ether_addr_copy(mac_addr[i].addr, f->macaddr);
i++;
@@ -3695,26 +3612,11 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
spin_unlock_bh(&vport_config->mac_filter_list_lock);
- if (add) {
- vop = VIRTCHNL2_OP_ADD_MAC_ADDR;
- vc = IDPF_VC_ADD_MAC_ADDR;
- vc_err = IDPF_VC_ADD_MAC_ADDR_ERR;
- mac_flag = IDPF_VPORT_ADD_MAC_REQ;
- } else {
- vop = VIRTCHNL2_OP_DEL_MAC_ADDR;
- vc = IDPF_VC_DEL_MAC_ADDR;
- vc_err = IDPF_VC_DEL_MAC_ADDR_ERR;
- mac_flag = IDPF_VPORT_DEL_MAC_REQ;
- }
-
/* Chunk up the filters into multiple messages to avoid
* sending a control queue message buffer that is too large
*/
num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG);
- if (!async)
- mutex_lock(&vport->vc_buf_lock);
-
for (i = 0, k = 0; i < num_msgs; i++) {
u32 entries_size, buf_size, num_entries;
@@ -3726,10 +3628,8 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) {
kfree(ma_list);
ma_list = kzalloc(buf_size, GFP_ATOMIC);
- if (!ma_list) {
- err = -ENOMEM;
- goto list_prep_error;
- }
+ if (!ma_list)
+ return -ENOMEM;
} else {
memset(ma_list, 0, buf_size);
}
@@ -3738,34 +3638,17 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
ma_list->num_mac_addr = cpu_to_le16(num_entries);
memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size);
- if (async)
- set_bit(mac_flag, vport_config->flags);
-
- err = idpf_send_mb_msg(adapter, vop, buf_size, (u8 *)ma_list);
- if (err)
- goto mbx_error;
-
- if (!async) {
- err = idpf_wait_for_event(adapter, vport, vc, vc_err);
- if (err)
- goto mbx_error;
- }
+ xn_params.send_buf.iov_base = ma_list;
+ xn_params.send_buf.iov_len = buf_size;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_entries;
total_filters -= num_entries;
}
-mbx_error:
- if (!async)
- mutex_unlock(&vport->vc_buf_lock);
- kfree(ma_list);
-list_prep_error:
- kfree(mac_addr);
-error:
- if (err)
- dev_err(&pdev->dev, "Failed to add or del mac filters %d", err);
-
- return err;
+ return 0;
}
/**
@@ -3782,9 +3665,10 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter,
struct idpf_vport_user_config_data *config_data,
u32 vport_id)
{
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_promisc_info vpi;
+ ssize_t reply_sz;
u16 flags = 0;
- int err;
if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags))
flags |= VIRTCHNL2_UNICAST_PROMISC;
@@ -3794,9 +3678,13 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter,
vpi.vport_id = cpu_to_le32(vport_id);
vpi.flags = cpu_to_le16(flags);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE,
- sizeof(struct virtchnl2_promisc_info),
- (u8 *)&vpi);
+ xn_params.vc_op = VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = &vpi;
+ xn_params.send_buf.iov_len = sizeof(vpi);
+ /* setting promiscuous is only ever done asynchronously */
+ xn_params.async = true;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
new file mode 100644
index 000000000000..83da5d8da56b
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef _IDPF_VIRTCHNL_H_
+#define _IDPF_VIRTCHNL_H_
+
+struct idpf_adapter;
+struct idpf_netdev_priv;
+struct idpf_vec_regs;
+struct idpf_vport;
+struct idpf_vport_max_q;
+struct idpf_vport_user_config_data;
+
+int idpf_init_dflt_mbx(struct idpf_adapter *adapter);
+void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
+int idpf_vc_core_init(struct idpf_adapter *adapter);
+void idpf_vc_core_deinit(struct idpf_adapter *adapter);
+
+int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
+ struct idpf_vec_regs *reg_vals);
+int idpf_queue_reg_init(struct idpf_vport *vport);
+int idpf_vport_queue_ids_init(struct idpf_vport *vport);
+
+int idpf_recv_mb_msg(struct idpf_adapter *adapter);
+int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
+ u16 msg_size, u8 *msg, u16 cookie);
+
+void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
+u32 idpf_get_vport_id(struct idpf_vport *vport);
+int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q);
+int idpf_send_destroy_vport_msg(struct idpf_vport *vport);
+int idpf_send_enable_vport_msg(struct idpf_vport *vport);
+int idpf_send_disable_vport_msg(struct idpf_vport *vport);
+
+int idpf_vport_adjust_qs(struct idpf_vport *vport);
+int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q);
+void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q);
+int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
+ u16 num_complq, u16 num_rx_q, u16 num_rx_bufq);
+int idpf_send_delete_queues_msg(struct idpf_vport *vport);
+int idpf_send_enable_queues_msg(struct idpf_vport *vport);
+int idpf_send_disable_queues_msg(struct idpf_vport *vport);
+int idpf_send_config_queues_msg(struct idpf_vport *vport);
+
+int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
+int idpf_get_vec_ids(struct idpf_adapter *adapter,
+ u16 *vecids, int num_vecids,
+ struct virtchnl2_vector_chunks *chunks);
+int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors);
+int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter);
+int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
+
+int idpf_add_del_mac_filters(struct idpf_vport *vport,
+ struct idpf_netdev_priv *np,
+ bool add, bool async);
+int idpf_set_promiscuous(struct idpf_adapter *adapter,
+ struct idpf_vport_user_config_data *config_data,
+ u32 vport_id);
+int idpf_check_supported_desc_ids(struct idpf_vport *vport);
+int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport);
+int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport);
+int idpf_send_get_stats_msg(struct idpf_vport *vport);
+int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
+int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
+int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
+
+#endif /* _IDPF_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index b66199c9bb3a..99977a22b843 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -3027,7 +3027,7 @@ static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
-static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int igb_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -3038,11 +3038,13 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
(hw->phy.media_type != e1000_media_type_copper))
return -EOPNOTSUPP;
- edata->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ edata->supported);
if (!hw->dev_spec._82575.eee_disable)
- edata->advertised =
- mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+ mii_eee_cap1_mod_linkmode_t(edata->advertised,
+ adapter->eee_advert);
/* The IPCNFG and EEER registers are not supported on I354. */
if (hw->mac.type == e1000_i354) {
@@ -3068,7 +3070,7 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
if (ret_val)
return -ENODATA;
- edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+ mii_eee_cap1_mod_linkmode_t(edata->lp_advertised, phy_data);
break;
case e1000_i354:
case e1000_i210:
@@ -3079,7 +3081,7 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
if (ret_val)
return -ENODATA;
- edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+ mii_eee_cap1_mod_linkmode_t(edata->lp_advertised, phy_data);
break;
default:
@@ -3099,18 +3101,20 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
edata->eee_enabled = false;
edata->eee_active = false;
edata->tx_lpi_enabled = false;
- edata->advertised &= ~edata->advertised;
+ linkmode_zero(edata->advertised);
}
return 0;
}
static int igb_set_eee(struct net_device *netdev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct igb_adapter *adapter = netdev_priv(netdev);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {};
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = {};
struct e1000_hw *hw = &adapter->hw;
- struct ethtool_eee eee_curr;
+ struct ethtool_keee eee_curr;
bool adv1g_eee = true, adv100m_eee = true;
s32 ret_val;
@@ -3118,7 +3122,7 @@ static int igb_set_eee(struct net_device *netdev,
(hw->phy.media_type != e1000_media_type_copper))
return -EOPNOTSUPP;
- memset(&eee_curr, 0, sizeof(struct ethtool_eee));
+ memset(&eee_curr, 0, sizeof(struct ethtool_keee));
ret_val = igb_get_eee(netdev, &eee_curr);
if (ret_val)
@@ -3138,14 +3142,21 @@ static int igb_set_eee(struct net_device *netdev,
return -EINVAL;
}
- if (!edata->advertised || (edata->advertised &
- ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL))) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ supported);
+ if (linkmode_andnot(tmp, edata->advertised, supported)) {
dev_err(&adapter->pdev->dev,
"EEE Advertisement supports only 100Tx and/or 100T full duplex\n");
return -EINVAL;
}
- adv100m_eee = !!(edata->advertised & ADVERTISE_100_FULL);
- adv1g_eee = !!(edata->advertised & ADVERTISE_1000_FULL);
+ adv100m_eee = linkmode_test_bit(
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ edata->advertised);
+ adv1g_eee = linkmode_test_bit(
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->advertised);
} else if (!edata->eee_enabled) {
dev_err(&adapter->pdev->dev,
@@ -3153,7 +3164,7 @@ static int igb_set_eee(struct net_device *netdev,
return -EINVAL;
}
- adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+ adapter->eee_advert = linkmode_to_mii_eee_cap1_t(edata->advertised);
if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
adapter->flags |= IGB_FLAG_EEE;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index cebb44f51d5f..a3f100769e39 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -202,7 +202,7 @@ static struct notifier_block dca_notifier = {
#endif
#ifdef CONFIG_PCI_IOV
static unsigned int max_vfs;
-module_param(max_vfs, uint, 0);
+module_param(max_vfs, uint, 0444);
MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
#endif /* CONFIG_PCI_IOV */
@@ -2538,7 +2538,7 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
@@ -6985,44 +6985,31 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
static void igb_tsync_interrupt(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u32 ack = 0, tsicr = rd32(E1000_TSICR);
+ u32 tsicr = rd32(E1000_TSICR);
struct ptp_clock_event event;
if (tsicr & TSINTR_SYS_WRAP) {
event.type = PTP_CLOCK_PPS;
if (adapter->ptp_caps.pps)
ptp_clock_event(adapter->ptp_clock, &event);
- ack |= TSINTR_SYS_WRAP;
}
if (tsicr & E1000_TSICR_TXTS) {
/* retrieve hardware timestamp */
schedule_work(&adapter->ptp_tx_work);
- ack |= E1000_TSICR_TXTS;
}
- if (tsicr & TSINTR_TT0) {
+ if (tsicr & TSINTR_TT0)
igb_perout(adapter, 0);
- ack |= TSINTR_TT0;
- }
- if (tsicr & TSINTR_TT1) {
+ if (tsicr & TSINTR_TT1)
igb_perout(adapter, 1);
- ack |= TSINTR_TT1;
- }
- if (tsicr & TSINTR_AUTT0) {
+ if (tsicr & TSINTR_AUTT0)
igb_extts(adapter, 0);
- ack |= TSINTR_AUTT0;
- }
- if (tsicr & TSINTR_AUTT1) {
+ if (tsicr & TSINTR_AUTT1)
igb_extts(adapter, 1);
- ack |= TSINTR_AUTT1;
- }
-
- /* acknowledge the interrupts */
- wr32(E1000_TSICR, ack);
}
static irqreturn_t igb_msix_other(int irq, void *data)
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index a4d4f00e6a87..b0cf310e6f7b 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2655,7 +2655,7 @@ igbvf_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IGBVF_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile
index 95d1e8c490a4..ebffd3054285 100644
--- a/drivers/net/ethernet/intel/igc/Makefile
+++ b/drivers/net/ethernet/intel/igc/Makefile
@@ -6,6 +6,7 @@
#
obj-$(CONFIG_IGC) += igc.o
+igc-$(CONFIG_IGC_LEDS) += igc_leds.o
igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o igc_xdp.o
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 45430e246e9c..90316dc58630 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -168,7 +168,7 @@ struct igc_ring {
struct igc_adapter {
struct net_device *netdev;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
u16 eee_advert;
unsigned long state;
@@ -295,6 +295,9 @@ struct igc_adapter {
struct timespec64 start;
struct timespec64 period;
} perout[IGC_N_PEROUT];
+
+ /* LEDs */
+ struct mutex led_mutex;
};
void igc_up(struct igc_adapter *adapter);
@@ -567,7 +570,6 @@ struct igc_q_vector {
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
- struct net_device poll_dev;
/* for dynamic allocation of rings associated with this q_vector */
struct igc_ring ring[] ____cacheline_internodealigned_in_smp;
@@ -585,7 +587,7 @@ enum igc_filter_match_flags {
struct igc_nfc_filter {
u8 match_flags;
u16 etype;
- __be16 vlan_etype;
+ u16 vlan_etype;
u16 vlan_tci;
u16 vlan_tci_mask;
u8 src_addr[ETH_ALEN];
@@ -720,6 +722,8 @@ void igc_ptp_tx_hang(struct igc_adapter *adapter);
void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts);
void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter);
+int igc_led_setup(struct igc_adapter *adapter);
+
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
#define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS)
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index b95d2c86e803..1a64f1ca6ca8 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -981,7 +981,7 @@ static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter,
if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) {
fsp->flow_type |= FLOW_EXT;
- fsp->h_ext.vlan_etype = rule->filter.vlan_etype;
+ fsp->h_ext.vlan_etype = htons(rule->filter.vlan_etype);
fsp->m_ext.vlan_etype = ETHER_TYPE_FULL_MASK;
}
@@ -1249,7 +1249,7 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
/* VLAN etype matching */
if ((fsp->flow_type & FLOW_EXT) && fsp->h_ext.vlan_etype) {
- rule->filter.vlan_etype = fsp->h_ext.vlan_etype;
+ rule->filter.vlan_etype = ntohs(fsp->h_ext.vlan_etype);
rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_ETYPE;
}
@@ -1623,18 +1623,17 @@ static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags)
}
static int igc_ethtool_get_eee(struct net_device *netdev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
u32 eeer;
if (hw->dev_spec._base.eee_enable)
- edata->advertised =
- mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+ mii_eee_cap1_mod_linkmode_t(edata->advertised,
+ adapter->eee_advert);
*edata = adapter->eee;
- edata->supported = SUPPORTED_Autoneg;
eeer = rd32(IGC_EEER);
@@ -1647,9 +1646,6 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
edata->eee_enabled = hw->dev_spec._base.eee_enable;
- edata->advertised = SUPPORTED_Autoneg;
- edata->lp_advertised = SUPPORTED_Autoneg;
-
/* Report correct negotiated EEE status for devices that
* wrongly report EEE at half-duplex
*/
@@ -1657,21 +1653,21 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
edata->eee_enabled = false;
edata->eee_active = false;
edata->tx_lpi_enabled = false;
- edata->advertised &= ~edata->advertised;
+ linkmode_zero(edata->advertised);
}
return 0;
}
static int igc_ethtool_set_eee(struct net_device *netdev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
- struct ethtool_eee eee_curr;
+ struct ethtool_keee eee_curr;
s32 ret_val;
- memset(&eee_curr, 0, sizeof(struct ethtool_eee));
+ memset(&eee_curr, 0, sizeof(struct ethtool_keee));
ret_val = igc_ethtool_get_eee(netdev, &eee_curr);
if (ret_val) {
@@ -1699,7 +1695,8 @@ static int igc_ethtool_set_eee(struct net_device *netdev,
return -EINVAL;
}
- adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+ adapter->eee_advert = linkmode_to_mii_eee_cap1_t(edata->advertised);
+
if (hw->dev_spec._base.eee_enable != edata->eee_enabled) {
hw->dev_spec._base.eee_enable = edata->eee_enabled;
adapter->flags |= IGC_FLAG_EEE;
diff --git a/drivers/net/ethernet/intel/igc/igc_leds.c b/drivers/net/ethernet/intel/igc/igc_leds.c
new file mode 100644
index 000000000000..bf240c5daf86
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_leds.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024 Linutronix GmbH */
+
+#include <linux/bits.h>
+#include <linux/leds.h>
+#include <linux/netdevice.h>
+#include <linux/pm_runtime.h>
+#include <uapi/linux/uleds.h>
+
+#include "igc.h"
+
+#define IGC_NUM_LEDS 3
+
+#define IGC_LEDCTL_LED0_MODE_SHIFT 0
+#define IGC_LEDCTL_LED0_MODE_MASK GENMASK(3, 0)
+#define IGC_LEDCTL_LED0_BLINK BIT(7)
+#define IGC_LEDCTL_LED1_MODE_SHIFT 8
+#define IGC_LEDCTL_LED1_MODE_MASK GENMASK(11, 8)
+#define IGC_LEDCTL_LED1_BLINK BIT(15)
+#define IGC_LEDCTL_LED2_MODE_SHIFT 16
+#define IGC_LEDCTL_LED2_MODE_MASK GENMASK(19, 16)
+#define IGC_LEDCTL_LED2_BLINK BIT(23)
+
+#define IGC_LEDCTL_MODE_ON 0x00
+#define IGC_LEDCTL_MODE_OFF 0x01
+#define IGC_LEDCTL_MODE_LINK_10 0x05
+#define IGC_LEDCTL_MODE_LINK_100 0x06
+#define IGC_LEDCTL_MODE_LINK_1000 0x07
+#define IGC_LEDCTL_MODE_LINK_2500 0x08
+#define IGC_LEDCTL_MODE_ACTIVITY 0x0b
+
+#define IGC_SUPPORTED_MODES \
+ (BIT(TRIGGER_NETDEV_LINK_2500) | BIT(TRIGGER_NETDEV_LINK_1000) | \
+ BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK_10) | \
+ BIT(TRIGGER_NETDEV_RX) | BIT(TRIGGER_NETDEV_TX))
+
+#define IGC_ACTIVITY_MODES \
+ (BIT(TRIGGER_NETDEV_RX) | BIT(TRIGGER_NETDEV_TX))
+
+struct igc_led_classdev {
+ struct net_device *netdev;
+ struct led_classdev led;
+ int index;
+};
+
+#define lcdev_to_igc_ldev(lcdev) \
+ container_of(lcdev, struct igc_led_classdev, led)
+
+static void igc_led_select(struct igc_adapter *adapter, int led,
+ u32 *mask, u32 *shift, u32 *blink)
+{
+ switch (led) {
+ case 0:
+ *mask = IGC_LEDCTL_LED0_MODE_MASK;
+ *shift = IGC_LEDCTL_LED0_MODE_SHIFT;
+ *blink = IGC_LEDCTL_LED0_BLINK;
+ break;
+ case 1:
+ *mask = IGC_LEDCTL_LED1_MODE_MASK;
+ *shift = IGC_LEDCTL_LED1_MODE_SHIFT;
+ *blink = IGC_LEDCTL_LED1_BLINK;
+ break;
+ case 2:
+ *mask = IGC_LEDCTL_LED2_MODE_MASK;
+ *shift = IGC_LEDCTL_LED2_MODE_SHIFT;
+ *blink = IGC_LEDCTL_LED2_BLINK;
+ break;
+ default:
+ *mask = *shift = *blink = 0;
+ netdev_err(adapter->netdev, "Unknown LED %d selected!\n", led);
+ }
+}
+
+static void igc_led_set(struct igc_adapter *adapter, int led, u32 mode,
+ bool blink)
+{
+ u32 shift, mask, blink_bit, ledctl;
+ struct igc_hw *hw = &adapter->hw;
+
+ igc_led_select(adapter, led, &mask, &shift, &blink_bit);
+
+ pm_runtime_get_sync(&adapter->pdev->dev);
+ mutex_lock(&adapter->led_mutex);
+
+ /* Set mode */
+ ledctl = rd32(IGC_LEDCTL);
+ ledctl &= ~mask;
+ ledctl |= mode << shift;
+
+ /* Configure blinking */
+ if (blink)
+ ledctl |= blink_bit;
+ else
+ ledctl &= ~blink_bit;
+ wr32(IGC_LEDCTL, ledctl);
+
+ mutex_unlock(&adapter->led_mutex);
+ pm_runtime_put(&adapter->pdev->dev);
+}
+
+static u32 igc_led_get(struct igc_adapter *adapter, int led)
+{
+ u32 shift, mask, blink_bit, ledctl;
+ struct igc_hw *hw = &adapter->hw;
+
+ igc_led_select(adapter, led, &mask, &shift, &blink_bit);
+
+ pm_runtime_get_sync(&adapter->pdev->dev);
+ mutex_lock(&adapter->led_mutex);
+ ledctl = rd32(IGC_LEDCTL);
+ mutex_unlock(&adapter->led_mutex);
+ pm_runtime_put(&adapter->pdev->dev);
+
+ return (ledctl & mask) >> shift;
+}
+
+static int igc_led_brightness_set_blocking(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct igc_led_classdev *ldev = lcdev_to_igc_ldev(led_cdev);
+ struct igc_adapter *adapter = netdev_priv(ldev->netdev);
+ u32 mode;
+
+ if (brightness)
+ mode = IGC_LEDCTL_MODE_ON;
+ else
+ mode = IGC_LEDCTL_MODE_OFF;
+
+ netdev_dbg(adapter->netdev, "Set brightness for LED %d to mode %u!\n",
+ ldev->index, mode);
+
+ igc_led_set(adapter, ldev->index, mode, false);
+
+ return 0;
+}
+
+static int igc_led_hw_control_is_supported(struct led_classdev *led_cdev,
+ unsigned long flags)
+{
+ if (flags & ~IGC_SUPPORTED_MODES)
+ return -EOPNOTSUPP;
+
+ /* If Tx and Rx selected, activity can be offloaded unless some other
+ * mode is selected as well.
+ */
+ if ((flags & BIT(TRIGGER_NETDEV_TX)) &&
+ (flags & BIT(TRIGGER_NETDEV_RX)) &&
+ !(flags & ~IGC_ACTIVITY_MODES))
+ return 0;
+
+ /* Single Rx or Tx activity is not supported. */
+ if (flags & IGC_ACTIVITY_MODES)
+ return -EOPNOTSUPP;
+
+ /* Only one mode can be active at a given time. */
+ if (flags & (flags - 1))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int igc_led_hw_control_set(struct led_classdev *led_cdev,
+ unsigned long flags)
+{
+ struct igc_led_classdev *ldev = lcdev_to_igc_ldev(led_cdev);
+ struct igc_adapter *adapter = netdev_priv(ldev->netdev);
+ u32 mode = IGC_LEDCTL_MODE_OFF;
+ bool blink = false;
+
+ if (flags & BIT(TRIGGER_NETDEV_LINK_10))
+ mode = IGC_LEDCTL_MODE_LINK_10;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_100))
+ mode = IGC_LEDCTL_MODE_LINK_100;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_1000))
+ mode = IGC_LEDCTL_MODE_LINK_1000;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_2500))
+ mode = IGC_LEDCTL_MODE_LINK_2500;
+ if ((flags & BIT(TRIGGER_NETDEV_TX)) &&
+ (flags & BIT(TRIGGER_NETDEV_RX)))
+ mode = IGC_LEDCTL_MODE_ACTIVITY;
+
+ netdev_dbg(adapter->netdev, "Set HW control for LED %d to mode %u!\n",
+ ldev->index, mode);
+
+ /* blink is recommended for activity */
+ if (mode == IGC_LEDCTL_MODE_ACTIVITY)
+ blink = true;
+
+ igc_led_set(adapter, ldev->index, mode, blink);
+
+ return 0;
+}
+
+static int igc_led_hw_control_get(struct led_classdev *led_cdev,
+ unsigned long *flags)
+{
+ struct igc_led_classdev *ldev = lcdev_to_igc_ldev(led_cdev);
+ struct igc_adapter *adapter = netdev_priv(ldev->netdev);
+ u32 mode;
+
+ mode = igc_led_get(adapter, ldev->index);
+
+ switch (mode) {
+ case IGC_LEDCTL_MODE_ACTIVITY:
+ *flags = BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX);
+ break;
+ case IGC_LEDCTL_MODE_LINK_10:
+ *flags = BIT(TRIGGER_NETDEV_LINK_10);
+ break;
+ case IGC_LEDCTL_MODE_LINK_100:
+ *flags = BIT(TRIGGER_NETDEV_LINK_100);
+ break;
+ case IGC_LEDCTL_MODE_LINK_1000:
+ *flags = BIT(TRIGGER_NETDEV_LINK_1000);
+ break;
+ case IGC_LEDCTL_MODE_LINK_2500:
+ *flags = BIT(TRIGGER_NETDEV_LINK_2500);
+ break;
+ }
+
+ return 0;
+}
+
+static struct device *igc_led_hw_control_get_device(struct led_classdev *led_cdev)
+{
+ struct igc_led_classdev *ldev = lcdev_to_igc_ldev(led_cdev);
+
+ return &ldev->netdev->dev;
+}
+
+static void igc_led_get_name(struct igc_adapter *adapter, int index, char *buf,
+ size_t buf_len)
+{
+ snprintf(buf, buf_len, "igc-%x%x-led%d",
+ pci_domain_nr(adapter->pdev->bus),
+ pci_dev_id(adapter->pdev), index);
+}
+
+static void igc_setup_ldev(struct igc_led_classdev *ldev,
+ struct net_device *netdev, int index)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct led_classdev *led_cdev = &ldev->led;
+ char led_name[LED_MAX_NAME_SIZE];
+
+ ldev->netdev = netdev;
+ ldev->index = index;
+
+ igc_led_get_name(adapter, index, led_name, LED_MAX_NAME_SIZE);
+ led_cdev->name = led_name;
+ led_cdev->flags |= LED_RETAIN_AT_SHUTDOWN;
+ led_cdev->max_brightness = 1;
+ led_cdev->brightness_set_blocking = igc_led_brightness_set_blocking;
+ led_cdev->hw_control_trigger = "netdev";
+ led_cdev->hw_control_is_supported = igc_led_hw_control_is_supported;
+ led_cdev->hw_control_set = igc_led_hw_control_set;
+ led_cdev->hw_control_get = igc_led_hw_control_get;
+ led_cdev->hw_control_get_device = igc_led_hw_control_get_device;
+
+ devm_led_classdev_register(&netdev->dev, led_cdev);
+}
+
+int igc_led_setup(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct device *dev = &netdev->dev;
+ struct igc_led_classdev *leds;
+ int i;
+
+ mutex_init(&adapter->led_mutex);
+
+ leds = devm_kcalloc(dev, IGC_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
+ if (!leds)
+ return -ENOMEM;
+
+ for (i = 0; i < IGC_NUM_LEDS; i++)
+ igc_setup_ldev(leds + i, netdev, i);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 81c21a893ede..2e1cfbd82f4f 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -3385,7 +3385,7 @@ static int igc_flex_filter_select(struct igc_adapter *adapter,
u32 fhftsl;
if (input->index >= MAX_FLEX_FILTER) {
- dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n");
+ netdev_err(adapter->netdev, "Wrong Flex Filter index selected!\n");
return -EINVAL;
}
@@ -3420,7 +3420,6 @@ static int igc_flex_filter_select(struct igc_adapter *adapter,
static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
struct igc_flex_filter *input)
{
- struct device *dev = &adapter->pdev->dev;
struct igc_hw *hw = &adapter->hw;
u8 *data = input->data;
u8 *mask = input->mask;
@@ -3434,7 +3433,7 @@ static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
* out early to avoid surprises later.
*/
if (input->length % 8 != 0) {
- dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n");
+ netdev_err(adapter->netdev, "The length of a flex filter has to be 8 byte aligned!\n");
return -EINVAL;
}
@@ -3504,8 +3503,8 @@ static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
}
wr32(IGC_WUFC, wufc);
- dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n",
- input->index);
+ netdev_dbg(adapter->netdev, "Added flex filter %u to HW.\n",
+ input->index);
return 0;
}
@@ -3577,9 +3576,9 @@ static bool igc_flex_filter_in_use(struct igc_adapter *adapter)
static int igc_add_flex_filter(struct igc_adapter *adapter,
struct igc_nfc_rule *rule)
{
- struct igc_flex_filter flex = { };
struct igc_nfc_filter *filter = &rule->filter;
unsigned int eth_offset, user_offset;
+ struct igc_flex_filter flex = { };
int ret, index;
bool vlan;
@@ -3615,10 +3614,12 @@ static int igc_add_flex_filter(struct igc_adapter *adapter,
ETH_ALEN, NULL);
/* Add VLAN etype */
- if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE)
- igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12,
- sizeof(filter->vlan_etype),
- NULL);
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) {
+ __be16 vlan_etype = cpu_to_be16(filter->vlan_etype);
+
+ igc_flex_filter_add_field(&flex, &vlan_etype, 12,
+ sizeof(vlan_etype), NULL);
+ }
/* Add VLAN TCI */
if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
@@ -5276,7 +5277,7 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
@@ -5302,25 +5303,22 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev,
static void igc_tsync_interrupt(struct igc_adapter *adapter)
{
- u32 ack, tsauxc, sec, nsec, tsicr;
struct igc_hw *hw = &adapter->hw;
+ u32 tsauxc, sec, nsec, tsicr;
struct ptp_clock_event event;
struct timespec64 ts;
tsicr = rd32(IGC_TSICR);
- ack = 0;
if (tsicr & IGC_TSICR_SYS_WRAP) {
event.type = PTP_CLOCK_PPS;
if (adapter->ptp_caps.pps)
ptp_clock_event(adapter->ptp_clock, &event);
- ack |= IGC_TSICR_SYS_WRAP;
}
if (tsicr & IGC_TSICR_TXTS) {
/* retrieve hardware timestamp */
igc_ptp_tx_tstamp_event(adapter);
- ack |= IGC_TSICR_TXTS;
}
if (tsicr & IGC_TSICR_TT0) {
@@ -5334,7 +5332,6 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
wr32(IGC_TSAUXC, tsauxc);
adapter->perout[0].start = ts;
spin_unlock(&adapter->tmreg_lock);
- ack |= IGC_TSICR_TT0;
}
if (tsicr & IGC_TSICR_TT1) {
@@ -5348,7 +5345,6 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
wr32(IGC_TSAUXC, tsauxc);
adapter->perout[1].start = ts;
spin_unlock(&adapter->tmreg_lock);
- ack |= IGC_TSICR_TT1;
}
if (tsicr & IGC_TSICR_AUTT0) {
@@ -5358,7 +5354,6 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
event.index = 0;
event.timestamp = sec * NSEC_PER_SEC + nsec;
ptp_clock_event(adapter->ptp_clock, &event);
- ack |= IGC_TSICR_AUTT0;
}
if (tsicr & IGC_TSICR_AUTT1) {
@@ -5368,11 +5363,7 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
event.index = 1;
event.timestamp = sec * NSEC_PER_SEC + nsec;
ptp_clock_event(adapter->ptp_clock, &event);
- ack |= IGC_TSICR_AUTT1;
}
-
- /* acknowledge the interrupts */
- wr32(IGC_TSICR, ack);
}
/**
@@ -6976,6 +6967,12 @@ static int igc_probe(struct pci_dev *pdev,
pm_runtime_put_noidle(&pdev->dev);
+ if (IS_ENABLED(CONFIG_IGC_LEDS)) {
+ err = igc_led_setup(adapter);
+ if (err)
+ goto err_register;
+ }
+
return 0;
err_register:
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index d38c87d7e5e8..e5b893fc5b66 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -12,6 +12,7 @@
#define IGC_MDIC 0x00020 /* MDI Control - RW */
#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
#define IGC_VET 0x00038 /* VLAN Ether Type - RW */
+#define IGC_LEDCTL 0x00E00 /* LED Control - RW */
#define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */
#define IGC_GPHY_VERSION 0x0001E /* I225 gPHY Firmware Version */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index b6f0376e42f4..559b443c409f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -949,19 +949,19 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
void ixgbe_write_eitr(struct ixgbe_q_vector *);
int ixgbe_poll(struct napi_struct *napi, int budget);
int ethtool_ioctl(struct ifreq *ifr);
-s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
-s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
+int ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+int ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+int ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common,
u8 queue);
-s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input_mask);
-s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id, u8 queue);
-s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id);
void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
@@ -1059,7 +1059,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter);
void ixgbe_store_key(struct ixgbe_adapter *adapter);
void ixgbe_store_reta(struct ixgbe_adapter *adapter);
-s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+int ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
#ifdef CONFIG_IXGBE_IPSEC
void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 6835d5f18753..283a23150a4d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -15,10 +15,10 @@
#define IXGBE_82598_VFT_TBL_SIZE 128
#define IXGBE_82598_RX_PB_SIZE 512
-static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
+static int ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
-static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
/**
@@ -66,7 +66,7 @@ out:
IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
}
-static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
@@ -93,12 +93,12 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
* not known. Perform the SFP init if necessary.
*
**/
-static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
+static int ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
- s32 ret_val;
u16 list_offset, data_offset;
+ int ret_val;
/* Identify the PHY */
phy->ops.identify(hw);
@@ -148,9 +148,9 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
* Then set pcie completion timeout
*
**/
-static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
+static int ixgbe_start_hw_82598(struct ixgbe_hw *hw)
{
- s32 ret_val;
+ int ret_val;
ret_val = ixgbe_start_hw_generic(hw);
if (ret_val)
@@ -170,7 +170,7 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
*
* Determines the link capabilities by reading the AUTOC register.
**/
-static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+static int ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg)
{
@@ -271,7 +271,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
*
* Enable flow control according to the current settings.
**/
-static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
+static int ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
{
u32 fctrl_reg;
u32 rmcs_reg;
@@ -411,13 +411,13 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
* Configures link settings based on values in the ixgbe_hw struct.
* Restarts the link. Performs autonegotiation if needed.
**/
-static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
+static int ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete)
{
+ int status = 0;
u32 autoc_reg;
u32 links_reg;
u32 i;
- s32 status = 0;
/* Restart link */
autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -457,7 +457,7 @@ static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
* Function indicates success when phy link is available. If phy is not ready
* within 5 seconds of MAC indicating link, the function returns error.
**/
-static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
+static int ixgbe_validate_link_ready(struct ixgbe_hw *hw)
{
u32 timeout;
u16 an_reg;
@@ -493,7 +493,7 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
*
* Reads the links register to determine if link is up and the current speed
**/
-static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
+static int ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed *speed, bool *link_up,
bool link_up_wait_to_complete)
{
@@ -579,7 +579,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
*
* Set the link speed in the AUTOC register and restarts link.
**/
-static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
+static int ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
@@ -624,11 +624,11 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
*
* Sets the link speed in the AUTOC register in the MAC and restarts link.
**/
-static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+static int ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
- s32 status;
+ int status;
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed,
@@ -647,15 +647,15 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
* clears all interrupts, performing a PHY reset, and performing a link (MAC)
* reset.
**/
-static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
+static int ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
{
- s32 status;
- s32 phy_status = 0;
- u32 ctrl;
+ int phy_status = 0;
+ u8 analog_val;
u32 gheccr;
- u32 i;
+ int status;
u32 autoc;
- u8 analog_val;
+ u32 ctrl;
+ u32 i;
/* Call adapter stop to disable tx/rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
@@ -781,7 +781,7 @@ mac_reset_top:
* @rar: receive address register index to associate with a VMDq index
* @vmdq: VMDq set index
**/
-static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+static int ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -805,7 +805,7 @@ static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
* @rar: receive address register index to associate with a VMDq index
* @vmdq: VMDq clear index (not used in 82598, but elsewhere)
**/
-static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+static int ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -836,7 +836,7 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
-static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+static int ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, bool vlvf_bypass)
{
u32 regindex;
@@ -881,7 +881,7 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
*
* Clears the VLAN filter table, and the VMDq index associated with the filter
**/
-static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
+static int ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
{
u32 offset;
u32 vlanbyte;
@@ -905,7 +905,7 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
*
* Performs read operation to Atlas analog register specified.
**/
-static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
+static int ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
{
u32 atlas_ctl;
@@ -927,7 +927,7 @@ static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
*
* Performs write operation to Atlas analog register specified.
**/
-static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
+static int ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
{
u32 atlas_ctl;
@@ -948,13 +948,13 @@ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
*
* Performs 8 byte read operation to SFP module's data over I2C interface.
**/
-static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
+static int ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
u8 byte_offset, u8 *eeprom_data)
{
- s32 status = 0;
u16 sfp_addr = 0;
u16 sfp_data = 0;
u16 sfp_stat = 0;
+ int status = 0;
u16 gssr;
u32 i;
@@ -1019,7 +1019,7 @@ out:
*
* Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
**/
-static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data)
{
return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
@@ -1034,8 +1034,8 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
*
* Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
**/
-static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *sff8472_data)
+static int ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data)
{
return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
byte_offset, sff8472_data);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 339e106a5732..cdaf087b4e85 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -21,24 +21,24 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
static void
ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *, ixgbe_link_speed);
-static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+static int ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
-static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+static int ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
-static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
-static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
+static int ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+static int ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
-static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
-static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
+static int ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
-static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
-static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
+static int ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
@@ -98,10 +98,10 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
}
}
-static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
+static int ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
{
- s32 ret_val;
u16 list_offset, data_offset, data_value;
+ int ret_val;
if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
ixgbe_init_mac_link_ops_82599(hw);
@@ -173,10 +173,10 @@ setup_sfp_err:
* prot_autoc_write_82599(). Note, that locked can only be true in cases
* where this function doesn't return an error.
**/
-static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
+static int prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
u32 *reg_val)
{
- s32 ret_val;
+ int ret_val;
*locked = false;
/* If LESM is on then we need to hold the SW/FW semaphore. */
@@ -203,9 +203,9 @@ static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
* This part (82599) may need to hold a the SW/FW lock around all writes to
* AUTOC. Likewise after a write we need to do a pipeline reset.
**/
-static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
+static int prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
{
- s32 ret_val = 0;
+ int ret_val = 0;
/* Blocked by MNG FW so bail */
if (ixgbe_check_reset_blocked(hw))
@@ -237,7 +237,7 @@ out:
return ret_val;
}
-static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
@@ -263,11 +263,11 @@ static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
* not known. Perform the SFP init if necessary.
*
**/
-static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
+static int ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
- s32 ret_val;
+ int ret_val;
u32 esdp;
if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
@@ -322,7 +322,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
*
* Determines the link capabilities by reading the AUTOC register.
**/
-static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
+static int ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg)
{
@@ -334,7 +334,9 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1) {
*speed = IXGBE_LINK_SPEED_1GB_FULL;
*autoneg = true;
return 0;
@@ -500,14 +502,14 @@ static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
* Configures link settings based on values in the ixgbe_hw struct.
* Restarts the link. Performs autonegotiation if needed.
**/
-static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
- bool autoneg_wait_to_complete)
+static int ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+ bool autoneg_wait_to_complete)
{
+ bool got_lock = false;
+ int status = 0;
u32 autoc_reg;
u32 links_reg;
u32 i;
- s32 status = 0;
- bool got_lock = false;
if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
status = hw->mac.ops.acquire_swfw_sync(hw,
@@ -657,15 +659,15 @@ ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed)
*
* Implements the Intel SmartSpeed algorithm.
**/
-static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+static int ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
- s32 status = 0;
ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
- s32 i, j;
- bool link_up = false;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ bool link_up = false;
+ int status = 0;
+ s32 i, j;
/* Set autoneg_advertised value based on input link speed */
hw->phy.autoneg_advertised = 0;
@@ -767,16 +769,15 @@ out:
*
* Set the link speed in the AUTOC register and restarts link.
**/
-static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+static int ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
- bool autoneg = false;
- s32 status;
- u32 pma_pmd_1g, link_mode, links_reg, i;
- u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
- u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+ u32 pma_pmd_10g_serial, pma_pmd_1g, link_mode, links_reg, i;
+ u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ bool autoneg = false;
+ int status;
/* holds the value of AUTOC register at this current point in time */
u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -785,6 +786,8 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
/* temporary variable used for comparison purposes */
u32 autoc = current_autoc;
+ pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
+
/* Check to see if speed passed in is supported. */
status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
&autoneg);
@@ -882,11 +885,11 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
*
* Restarts link on PHY and MAC based on settings passed in.
**/
-static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
+static int ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
- s32 status;
+ int status;
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed,
@@ -905,13 +908,13 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
* and clears all interrupts, perform a PHY reset, and perform a link (MAC)
* reset.
**/
-static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
+static int ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
{
ixgbe_link_speed link_speed;
- s32 status;
u32 ctrl, i, autoc, autoc2;
- u32 curr_lms;
bool link_up = false;
+ u32 curr_lms;
+ int status;
/* Call adapter stop to disable tx/rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
@@ -1081,7 +1084,7 @@ mac_reset_top:
* @hw: pointer to hardware structure
* @fdircmd: current value of FDIRCMD register
*/
-static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
+static int ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
{
int i;
@@ -1099,12 +1102,12 @@ static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
* ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
* @hw: pointer to hardware structure
**/
-s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
+int ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
{
- int i;
u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
u32 fdircmd;
- s32 err;
+ int err;
+ int i;
fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
@@ -1212,7 +1215,7 @@ static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
* @fdirctrl: value to write to flow director control register, initially
* contains just the value of the Rx packet buffer allocation
**/
-s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+int ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
{
/*
* Continue setup of fdirctrl register bits:
@@ -1236,7 +1239,7 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
* @fdirctrl: value to write to flow director control register, initially
* contains just the value of the Rx packet buffer allocation
**/
-s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+int ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
{
/*
* Continue setup of fdirctrl register bits:
@@ -1359,7 +1362,7 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
* Note that the tunnel bit in input must not be set when the hardware
* tunneling support does not exist.
**/
-s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common,
u8 queue)
@@ -1515,7 +1518,7 @@ static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
#define IXGBE_STORE_AS_BE16(_value) __swab16(ntohs((_value)))
-s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input_mask)
{
/* mask IPv6 since it is currently not supported */
@@ -1627,12 +1630,12 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
return 0;
}
-s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id, u8 queue)
{
u32 fdirport, fdirvlan, fdirhash, fdircmd;
- s32 err;
+ int err;
/* currently IPv6 is not supported, must be programmed with 0 */
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
@@ -1690,13 +1693,13 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
return 0;
}
-s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id)
{
u32 fdirhash;
u32 fdircmd;
- s32 err;
+ int err;
/* configure FDIRHASH register */
fdirhash = (__force u32)input->formatted.bkt_hash;
@@ -1734,7 +1737,7 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
*
* Performs read operation to Omer analog register specified.
**/
-static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
+static int ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
{
u32 core_ctl;
@@ -1756,7 +1759,7 @@ static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
*
* Performs write operation to Omer analog register specified.
**/
-static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
+static int ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
{
u32 core_ctl;
@@ -1776,9 +1779,9 @@ static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
* and the generation start_hw function.
* Then performs revision-specific operations, if any.
**/
-static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
+static int ixgbe_start_hw_82599(struct ixgbe_hw *hw)
{
- s32 ret_val = 0;
+ int ret_val = 0;
ret_val = ixgbe_start_hw_generic(hw);
if (ret_val)
@@ -1802,9 +1805,9 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
* If PHY already detected, maintains current PHY type in hw struct,
* otherwise executes the PHY detection routine.
**/
-static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
+static int ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
{
- s32 status;
+ int status;
/* Detect PHY if not unknown - returns success if already detected. */
status = ixgbe_identify_phy_generic(hw);
@@ -1835,7 +1838,7 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
*
* Enables the Rx DMA unit for 82599
**/
-static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
+static int ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
{
/*
* Workaround for 82599 silicon errata when enabling the Rx datapath.
@@ -1865,12 +1868,12 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
* Return: -EACCES if the FW is not present or if the FW version is
* not supported.
**/
-static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
+static int ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
{
u16 fw_offset, fw_ptp_cfg_offset;
- s32 status = -EACCES;
- u16 offset;
+ int status = -EACCES;
u16 fw_version = 0;
+ u16 offset;
/* firmware check is only necessary for SFI devices */
if (hw->phy.media_type != ixgbe_media_type_fiber)
@@ -1917,7 +1920,7 @@ fw_version_err:
static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
{
u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
- s32 status;
+ int status;
/* get the offset to the Firmware Module block */
status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
@@ -1956,7 +1959,7 @@ static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
*
* Retrieves 16 bit word(s) read from EEPROM
**/
-static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
+static int ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
@@ -1982,7 +1985,7 @@ static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
*
* Reads a 16 bit word from the EEPROM
**/
-static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
+static int ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
u16 offset, u16 *data)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
@@ -2006,11 +2009,11 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
* full pipeline reset. Note - We must hold the SW/FW semaphore before writing
* to AUTOC, so this function assumes the semaphore is held.
**/
-static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
+static int ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
{
- s32 ret_val;
- u32 anlp1_reg = 0;
u32 i, autoc_reg, autoc2_reg;
+ u32 anlp1_reg = 0;
+ int ret_val;
/* Enable link if disabled in NVM */
autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
@@ -2061,12 +2064,12 @@ reset_pipeline_out:
* Performs byte read operation to SFP module's EEPROM over I2C interface at
* a specified device address.
**/
-static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data)
{
- u32 esdp;
- s32 status;
s32 timeout = 200;
+ int status;
+ u32 esdp;
if (hw->phy.qsfp_shared_i2c_bus == true) {
/* Acquire I2C bus ownership. */
@@ -2115,12 +2118,12 @@ release_i2c_access:
* Performs byte write operation to SFP module's EEPROM over I2C interface at
* a specified device address.
**/
-static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data)
{
- u32 esdp;
- s32 status;
s32 timeout = 200;
+ int status;
+ u32 esdp;
if (hw->phy.qsfp_shared_i2c_bus == true) {
/* Acquire I2C bus ownership. */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 2e6e0365154a..3be1bfb16498 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -10,10 +10,10 @@
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
-static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
-static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
+static int ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
+static int ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
-static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
+static int ixgbe_ready_eeprom(struct ixgbe_hw *hw);
static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
u16 count);
@@ -22,15 +22,15 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
-static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
-static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
-static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+static int ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
+static int ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
+static int ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
- u16 words, u16 *data);
-static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+static int ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+static int ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
u16 offset);
-static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw);
+static int ixgbe_disable_pcie_primary(struct ixgbe_hw *hw);
/* Base table for registers values that change by MAC */
const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = {
@@ -111,12 +111,12 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
*
* Called at init time to set up flow control.
**/
-s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
+int ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
{
- s32 ret_val = 0;
u32 reg = 0, reg_bp = 0;
- u16 reg_cu = 0;
bool locked = false;
+ int ret_val = 0;
+ u16 reg_cu = 0;
/*
* Validate the requested mode. Strict IEEE mode does not allow
@@ -267,11 +267,11 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
* table, VLAN filter table, calls routine to set up link and flow control
* settings, and leaves transmit and receive units disabled and uninitialized
**/
-s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
+int ixgbe_start_hw_generic(struct ixgbe_hw *hw)
{
- s32 ret_val;
- u32 ctrl_ext;
u16 device_caps;
+ u32 ctrl_ext;
+ int ret_val;
/* Set the media type */
hw->phy.media_type = hw->mac.ops.get_media_type(hw);
@@ -330,7 +330,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
* 82599
* X540
**/
-s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
+int ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
{
u32 i;
@@ -354,9 +354,9 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
* up link and flow control settings, and leaves transmit and receive units
* disabled and uninitialized
**/
-s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
+int ixgbe_init_hw_generic(struct ixgbe_hw *hw)
{
- s32 status;
+ int status;
/* Reset the hardware */
status = hw->mac.ops.reset_hw(hw);
@@ -380,7 +380,7 @@ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
* Clears all hardware statistics counters by reading them from the hardware
* Statistics counters are clear on read.
**/
-s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
+int ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
{
u16 i = 0;
@@ -489,14 +489,14 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
*
* Reads the part number string from the EEPROM.
**/
-s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+int ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
u32 pba_num_size)
{
- s32 ret_val;
- u16 data;
+ int ret_val;
u16 pba_ptr;
u16 offset;
u16 length;
+ u16 data;
if (pba_num == NULL) {
hw_dbg(hw, "PBA string buffer was null\n");
@@ -599,7 +599,7 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
* A reset of the adapter must be performed prior to calling this function
* in order for the MAC address to have been loaded from the EEPROM into RAR0
**/
-s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
+int ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
{
u32 rar_high;
u32 rar_low;
@@ -653,7 +653,7 @@ enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status)
*
* Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
**/
-s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
+int ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
{
u16 link_status;
@@ -709,7 +709,7 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
* the shared code and drivers to determine if the adapter is in a stopped
* state and should not touch the hardware.
**/
-s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
+int ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
{
u32 reg_val;
u16 i;
@@ -759,7 +759,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
* Store the index for the link active LED. This will be used to support
* blinking the LED.
**/
-s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
+int ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
u32 led_reg, led_mode;
@@ -800,7 +800,7 @@ s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
* @index: led number to turn on
**/
-s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
+int ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
{
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
@@ -821,7 +821,7 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
* @hw: pointer to hardware structure
* @index: led number to turn off
**/
-s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
+int ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
{
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
@@ -844,7 +844,7 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
* Initializes the EEPROM parameters ixgbe_eeprom_info within the
* ixgbe_hw struct in order to set up EEPROM access.
**/
-s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
+int ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
u32 eec;
@@ -895,11 +895,11 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
*
* Reads 16 bit word(s) from EEPROM through bit-bang method
**/
-s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
- s32 status;
u16 i, count;
+ int status;
hw->eeprom.ops.init_params(hw);
@@ -942,14 +942,14 @@ s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
* If ixgbe_eeprom_update_checksum is not called after this function, the
* EEPROM will most likely contain an invalid checksum.
**/
-static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+static int ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
- s32 status;
- u16 word;
+ u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
u16 page_size;
+ int status;
+ u16 word;
u16 i;
- u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
/* Prepare the EEPROM for writing */
status = ixgbe_acquire_eeprom(hw);
@@ -1019,7 +1019,7 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
* If ixgbe_eeprom_update_checksum is not called after this function, the
* EEPROM will most likely contain an invalid checksum.
**/
-s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+int ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
{
hw->eeprom.ops.init_params(hw);
@@ -1038,11 +1038,11 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
*
* Reads 16 bit word(s) from EEPROM through bit-bang method
**/
-s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
- s32 status;
u16 i, count;
+ int status;
hw->eeprom.ops.init_params(hw);
@@ -1077,12 +1077,12 @@ s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
*
* Reads 16 bit word(s) from EEPROM through bit-bang method
**/
-static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+static int ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
- s32 status;
- u16 word_in;
u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
+ u16 word_in;
+ int status;
u16 i;
/* Prepare the EEPROM for reading */
@@ -1129,7 +1129,7 @@ static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
*
* Reads 16 bit value from EEPROM through bit-bang method
**/
-s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data)
{
hw->eeprom.ops.init_params(hw);
@@ -1149,11 +1149,11 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
*
* Reads a 16 bit word(s) from the EEPROM using the EERD register.
**/
-s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
+ int status;
u32 eerd;
- s32 status;
u32 i;
hw->eeprom.ops.init_params(hw);
@@ -1189,11 +1189,11 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
* This function is called only when we are writing a new large buffer
* at given offset so the data would be overwritten anyway.
**/
-static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+static int ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
u16 offset)
{
u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
- s32 status;
+ int status;
u16 i;
for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
@@ -1229,7 +1229,7 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
*
* Reads a 16 bit word from the EEPROM using the EERD register.
**/
-s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
+int ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
}
@@ -1243,11 +1243,11 @@ s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
*
* Write a 16 bit word(s) to the EEPROM using the EEWR register.
**/
-s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
+ int status;
u32 eewr;
- s32 status;
u16 i;
hw->eeprom.ops.init_params(hw);
@@ -1286,7 +1286,7 @@ s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
*
* Write a 16 bit word to the EEPROM using the EEWR register.
**/
-s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+int ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
{
return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
}
@@ -1299,7 +1299,7 @@ s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
* Polls the status bit (bit 1) of the EERD or EEWR to determine when the
* read or write is done respectively.
**/
-static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
+static int ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
{
u32 i;
u32 reg;
@@ -1325,7 +1325,7 @@ static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
* Prepares EEPROM for access using bit-bang method. This function should
* be called before issuing a command to the EEPROM.
**/
-static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
+static int ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
{
u32 eec;
u32 i;
@@ -1371,7 +1371,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
*
* Sets the hardware semaphores so EEPROM access can occur for bit-bang method
**/
-static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
+static int ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
{
u32 timeout = 2000;
u32 i;
@@ -1462,7 +1462,7 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
* ixgbe_ready_eeprom - Polls for EEPROM ready
* @hw: pointer to hardware structure
**/
-static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
+static int ixgbe_ready_eeprom(struct ixgbe_hw *hw)
{
u16 i;
u8 spi_stat_reg;
@@ -1680,7 +1680,7 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
* ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
* @hw: pointer to hardware structure
**/
-s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
+int ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
{
u16 i;
u16 j;
@@ -1728,7 +1728,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
- return (s32)checksum;
+ return (int)checksum;
}
/**
@@ -1739,12 +1739,12 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
* Performs checksum calculation and validates the EEPROM checksum. If the
* caller does not need checksum_val, the value can be NULL.
**/
-s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+int ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val)
{
- s32 status;
- u16 checksum;
u16 read_checksum = 0;
+ u16 checksum;
+ int status;
/*
* Read the first word from the EEPROM. If this times out or fails, do
@@ -1786,10 +1786,10 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
* ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
* @hw: pointer to hardware structure
**/
-s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
+int ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
{
- s32 status;
u16 checksum;
+ int status;
/*
* Read the first word from the EEPROM. If this times out or fails, do
@@ -1823,7 +1823,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
*
* Puts an ethernet address into a receive address register.
**/
-s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+int ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr)
{
u32 rar_low, rar_high;
@@ -1876,7 +1876,7 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
*
* Clears an ethernet address from a receive address register.
**/
-s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
+int ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
{
u32 rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -1917,7 +1917,7 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
* of the receive address registers. Clears the multicast table. Assumes
* the receiver is in reset when the routine is called.
**/
-s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
+int ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
{
u32 i;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -1980,7 +1980,7 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
* by the MO field of the MCSTCTRL. The MO field is set during initialization
* to mc_filter_type.
**/
-static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+static int ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
{
u32 vector = 0;
@@ -2049,7 +2049,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
* registers for the first multicast addresses, and hashes the rest into the
* multicast table.
**/
-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
+int ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
struct net_device *netdev)
{
struct netdev_hw_addr *ha;
@@ -2091,7 +2091,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
*
* Enables multicast address in RAR and the use of the multicast hash table.
**/
-s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
+int ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
{
struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
@@ -2108,7 +2108,7 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
*
* Disables multicast address in RAR and the use of the multicast hash table.
**/
-s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
+int ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
{
struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
@@ -2124,7 +2124,7 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
*
* Enable flow control according to the current settings.
**/
-s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
+int ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
{
u32 mflcn_reg, fccfg_reg;
u32 reg;
@@ -2252,7 +2252,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
* Find the intersection between advertised settings and link partner's
* advertised settings
**/
-s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+int ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
{
if ((!(adv_reg)) || (!(lp_reg)))
@@ -2294,10 +2294,10 @@ s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
*
* Enable flow control according on 1 gig fiber.
**/
-static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
+static int ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
{
u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
- s32 ret_val;
+ int ret_val;
/*
* On multispeed fiber at 1g, bail out if
@@ -2328,10 +2328,10 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
*
* Enable flow control according to IEEE clause 37.
**/
-static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
+static int ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
{
u32 links2, anlp1_reg, autoc_reg, links;
- s32 ret_val;
+ int ret_val;
/*
* On backplane, bail out if
@@ -2367,7 +2367,7 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
*
* Enable flow control according to IEEE clause 37.
**/
-static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
+static int ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
{
u16 technology_ability_reg = 0;
u16 lp_technology_ability_reg = 0;
@@ -2395,7 +2395,7 @@ static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
{
ixgbe_link_speed speed;
- s32 ret_val = -EIO;
+ int ret_val = -EIO;
bool link_up;
/*
@@ -2501,7 +2501,7 @@ static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
* bit hasn't caused the primary requests to be disabled, else 0
* is returned signifying primary requests disabled.
**/
-static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw)
+static int ixgbe_disable_pcie_primary(struct ixgbe_hw *hw)
{
u32 i, poll;
u16 value;
@@ -2573,7 +2573,7 @@ gio_disable_fail:
* Acquires the SWFW semaphore through the GSSR register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
-s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
+int ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
{
u32 gssr = 0;
u32 swmask = mask;
@@ -2641,7 +2641,7 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
*
* The default case requires no protection so just to the register read.
**/
-s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
+int prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
{
*locked = false;
*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -2655,7 +2655,7 @@ s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
* @locked: bool to indicate whether the SW/FW lock was already taken by
* previous read.
**/
-s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
+int prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
{
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
return 0;
@@ -2668,7 +2668,7 @@ s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
* Stops the receive data path and waits for the HW to internally
* empty the Rx security block.
**/
-s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
+int ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
{
#define IXGBE_MAX_SECRX_POLL 40
int i;
@@ -2700,7 +2700,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
*
* Enables the receive data path
**/
-s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
+int ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
{
u32 secrxreg;
@@ -2719,7 +2719,7 @@ s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
*
* Enables the Rx DMA unit
**/
-s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
+int ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
{
if (regval & IXGBE_RXCTRL_RXEN)
hw->mac.ops.enable_rx(hw);
@@ -2734,14 +2734,14 @@ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
* @hw: pointer to hardware structure
* @index: led number to blink
**/
-s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
+int ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
{
- ixgbe_link_speed speed = 0;
- bool link_up = false;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ ixgbe_link_speed speed = 0;
+ bool link_up = false;
bool locked = false;
- s32 ret_val;
+ int ret_val;
if (index > 3)
return -EINVAL;
@@ -2782,12 +2782,12 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
* @hw: pointer to hardware structure
* @index: led number to stop blinking
**/
-s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
+int ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
{
- u32 autoc_reg = 0;
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
bool locked = false;
- s32 ret_val;
+ u32 autoc_reg = 0;
+ int ret_val;
if (index > 3)
return -EINVAL;
@@ -2821,10 +2821,10 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
* pointer, and returns the value at that location. This is used in both
* get and set mac_addr routines.
**/
-static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
- u16 *san_mac_offset)
+static int ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
+ u16 *san_mac_offset)
{
- s32 ret_val;
+ int ret_val;
/*
* First read the EEPROM pointer to see if the MAC addresses are
@@ -2849,11 +2849,11 @@ static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
* set_lan_id() is called by identify_sfp(), but this cannot be relied
* upon for non-SFP connections, so we must call it here.
**/
-s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
+int ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
{
u16 san_mac_data, san_mac_offset;
+ int ret_val;
u8 i;
- s32 ret_val;
/*
* First read the EEPROM pointer to see if the MAC addresses are
@@ -2942,7 +2942,7 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
* @rar: receive address register index to disassociate
* @vmdq: VMDq pool index to remove from the rar
**/
-s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+int ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 mpsar_lo, mpsar_hi;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -2993,7 +2993,7 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
* @rar: receive address register index to associate with a VMDq index
* @vmdq: VMDq pool index
**/
-s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+int ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 mpsar;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -3026,7 +3026,7 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
* VFs advertized and not 0.
* MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
**/
-s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
+int ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
{
u32 rar = hw->mac.san_mac_rar_index;
@@ -3045,7 +3045,7 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
* ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
* @hw: pointer to hardware structure
**/
-s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
+int ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
{
int i;
@@ -3065,9 +3065,9 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
* return the VLVF index where this VLAN id should be placed
*
**/
-static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
+static int ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
{
- s32 regindex, first_empty_slot;
+ int regindex, first_empty_slot;
u32 bits;
/* short cut the special case */
@@ -3115,11 +3115,11 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
-s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+int ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, bool vlvf_bypass)
{
u32 regidx, vfta_delta, vfta, bits;
- s32 vlvf_index;
+ int vlvf_index;
if ((vlan > 4095) || (vind > 63))
return -EINVAL;
@@ -3226,7 +3226,7 @@ vfta_update:
*
* Clears the VLAN filter table, and the VMDq index associated with the filter
**/
-s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
+int ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
{
u32 offset;
@@ -3276,7 +3276,7 @@ static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
*
* Reads the links register to determine if link is up and the current speed
**/
-s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+int ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete)
{
bool crosstalk_fix_active = ixgbe_need_crosstalk_fix(hw);
@@ -3396,8 +3396,8 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
* This function will read the EEPROM from the alternative SAN MAC address
* block to check the support for the alternative WWNN/WWPN prefix support.
**/
-s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
- u16 *wwpn_prefix)
+int ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix)
{
u16 offset, caps;
u16 alt_san_mac_blk_offset;
@@ -3494,7 +3494,7 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
* This function will read the EEPROM location for the device capabilities,
* and return the word through device_caps.
**/
-s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
+int ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
{
hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
@@ -3604,7 +3604,7 @@ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
* This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
* by the caller.
**/
-s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
+int ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
u32 timeout)
{
u32 hicr, i, fwsts;
@@ -3676,15 +3676,15 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
* Communicates with the manageability block. On success return 0
* else return -EIO or -EINVAL.
**/
-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
+int ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
u32 length, u32 timeout,
bool return_data)
{
u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
struct ixgbe_hic_hdr *hdr = buffer;
- u32 *u32arr = buffer;
u16 buf_len, dword_len;
- s32 status;
+ u32 *u32arr = buffer;
+ int status;
u32 bi;
if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
@@ -3753,13 +3753,13 @@ rel_out:
* else returns -EBUSY when encountering an error acquiring
* semaphore or -EIO when command fails.
**/
-s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+int ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 sub, __always_unused u16 len,
__always_unused const char *driver_ver)
{
struct ixgbe_hic_drv_info fw_cmd;
+ int ret_val;
int i;
- s32 ret_val;
fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
@@ -3875,10 +3875,10 @@ static const u8 ixgbe_emc_therm_limit[4] = {
*
* Returns error code.
**/
-static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
+static int ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
u16 *ets_offset)
{
- s32 status;
+ int status;
status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset);
if (status)
@@ -3903,13 +3903,13 @@ static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
*
* Returns the thermal sensor data structure
**/
-s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
+int ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
{
- s32 status;
u16 ets_offset;
- u16 ets_cfg;
u16 ets_sensor;
u8 num_sensors;
+ u16 ets_cfg;
+ int status;
u8 i;
struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
@@ -3959,17 +3959,17 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
* Inits the thermal sensor thresholds according to the NVM map
* and save off the threshold and location values into mac.thermal_sensor_data
**/
-s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
+int ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
{
- s32 status;
- u16 ets_offset;
- u16 ets_cfg;
- u16 ets_sensor;
+ struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
u8 low_thresh_delta;
u8 num_sensors;
u8 therm_limit;
+ u16 ets_sensor;
+ u16 ets_offset;
+ u16 ets_cfg;
+ int status;
u8 i;
- struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
@@ -4192,16 +4192,16 @@ bool ixgbe_mng_present(struct ixgbe_hw *hw)
*
* Set the link speed in the MAC and/or PHY register and restarts link.
*/
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+int ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
- ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
- s32 status = 0;
+ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ bool autoneg, link_up = false;
u32 speedcnt = 0;
+ int status = 0;
u32 i = 0;
- bool autoneg, link_up = false;
/* Mask off requested but non-supported speeds */
status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg);
@@ -4340,8 +4340,8 @@ out:
void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
ixgbe_link_speed speed)
{
- s32 status;
u8 rs, eeprom_data;
+ int status;
switch (speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 34761e691d52..6493abf189de 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -8,89 +8,89 @@
#include "ixgbe.h"
u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
-s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
-s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
-s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+int ixgbe_init_hw_generic(struct ixgbe_hw *hw);
+int ixgbe_start_hw_generic(struct ixgbe_hw *hw);
+int ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
+int ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
+int ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
u32 pba_num_size);
-s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
+int ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status);
enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status);
-s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
+int ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
-s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
+int ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
-s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw);
+int ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
+int ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
+int ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw);
-s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
-s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
-s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
+int ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+int ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
-s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
+int ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
-s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+int ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data);
-s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
-s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+int ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
+int ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val);
-s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
+int ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
-s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+int ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr);
-s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
+int ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
+int ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
+int ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
struct net_device *netdev);
-s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
-s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
-s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
-s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
-s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
-s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
-s32 ixgbe_setup_fc_generic(struct ixgbe_hw *);
+int ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
+int ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
+int ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
+int ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
+int ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
+int ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
+int ixgbe_setup_fc_generic(struct ixgbe_hw *);
bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
-s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask);
+int ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask);
-s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
-s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
-s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
-s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
+int ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
+int ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+int ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
+int ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+int ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
+int ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
u32 vind, bool vlan_on, bool vlvf_bypass);
-s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
-s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
+int ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
+int ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete);
-s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+int ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
u16 *wwpn_prefix);
-s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
-s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
+int prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
+int prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
-s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
+int ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
+int ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
-s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
-s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+int ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
+int ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver, u16 len, const char *str);
u8 ixgbe_calculate_checksum(u8 *buffer, u32 length);
-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length,
+int ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length,
u32 timeout, bool return_data);
-s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout);
-s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
+int ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout);
+int ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
u32 (*data)[FW_PHY_ACT_DATA_COUNT]);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
bool ixgbe_mng_present(struct ixgbe_hw *hw);
@@ -111,8 +111,8 @@ extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT];
#define IXGBE_EMC_DIODE3_DATA 0x2A
#define IXGBE_EMC_DIODE3_THERM_LIMIT 0x30
-s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
-s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
+int ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
+int ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
void ixgbe_get_etk_id(struct ixgbe_hw *hw,
struct ixgbe_nvm_version *nvm_ver);
void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
@@ -121,7 +121,7 @@ void ixgbe_get_orom_version(struct ixgbe_hw *hw,
struct ixgbe_nvm_version *nvm_ver);
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+int ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index d26cea5b43bd..502666f28124 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -18,7 +18,7 @@
* @max: max credits by traffic class
* @max_frame: maximum frame size
*/
-static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
+static int ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
__u16 *max, int max_frame)
{
int min_percent = 100;
@@ -59,7 +59,7 @@ static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
* It should be called only after the rules are checked by
* ixgbe_dcb_check_config().
*/
-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
+int ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config,
int max_frame, u8 direction)
{
@@ -247,7 +247,7 @@ void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
*
* Configure dcb settings and enable dcb mode.
*/
-s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
+int ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
u8 pfc_en;
@@ -283,7 +283,7 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
}
/* Helper routines to abstract HW specifics from DCB netlink ops */
-s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
+int ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
{
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -300,7 +300,7 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
return -EINVAL;
}
-s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame)
+int ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame)
{
__u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
__u8 prio_type[IEEE_8021QAZ_MAX_TCS];
@@ -333,7 +333,7 @@ s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame)
bwg_id, prio_type, ets->prio_tc);
}
-s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
+int ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
u16 *refill, u16 *max, u8 *bwg_id,
u8 *prio_type, u8 *prio_tc)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
index 60cd5863bf5e..91788e4c4e19 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
@@ -124,15 +124,15 @@ void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *, int, u8 *);
u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8);
/* DCB credits calculation */
-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
+int ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
struct ixgbe_dcb_config *, int, u8);
/* DCB hw initialization */
-s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max);
-s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
+int ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max);
+int ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
u8 *bwg_id, u8 *prio_type, u8 *tc_prio);
-s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio);
-s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+int ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio);
+int ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index 379ae747cdce..185c3e5f9837 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -15,10 +15,8 @@
*
* Configure Rx Data Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *prio_type)
+int ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *prio_type)
{
u32 reg = 0;
u32 credit_refill = 0;
@@ -75,11 +73,8 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type)
+int ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type)
{
u32 reg, max_credits;
u8 i;
@@ -124,11 +119,8 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Tx Data Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type)
+int ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type)
{
u32 reg;
u8 i;
@@ -171,7 +163,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Priority Flow Control for each traffic class.
*/
-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
+int ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
{
u32 fcrtl, reg;
u8 i;
@@ -224,7 +216,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
-static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
+static int ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
{
u32 reg = 0;
u8 i = 0;
@@ -260,7 +252,7 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
*
* Configure dcb settings and enable dcb mode.
*/
-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
+int ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type)
{
ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
index fdca41abb44c..5bf3f13c6953 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
@@ -46,27 +46,19 @@
/* DCB hardware-specific driver APIs */
/* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en);
+int ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en);
/* DCB hw initialization */
-s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *prio_type);
-
-s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type);
-
-s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type);
-
-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
+int ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *prio_type);
+
+int ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type);
+
+int ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type);
+
+int ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type);
#endif /* _DCB_82598_CONFIG_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 7948849840a5..c61bd9059541 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -17,7 +17,7 @@
*
* Configure Rx Packet Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
+int ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
@@ -76,7 +76,7 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
+int ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
@@ -128,7 +128,7 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
*
* Configure Tx Packet Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
+int ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
@@ -187,7 +187,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
*
* Configure Priority Flow Control (PFC) for each traffic class.
*/
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
+int ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
{
u32 i, j, fcrtl, reg;
u8 max_tc = 0;
@@ -272,7 +272,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
-static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
+static int ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
{
u32 reg = 0;
u8 i = 0;
@@ -330,7 +330,7 @@ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
*
* Configure dcb settings and enable dcb mode.
*/
-s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
+int ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc)
{
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index c6f084883cab..f6e5a87c03e3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -70,30 +70,21 @@
/* DCB hardware-specific driver APIs */
/* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc);
+int ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc);
/* DCB hw initialization */
-s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type,
- u8 *prio_tc);
-
-s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type);
-
-s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type,
- u8 *prio_tc);
-
-s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
+int ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type,
+ u8 *prio_tc);
+
+int ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type);
+
+int ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type,
+ u8 *prio_tc);
+
+int ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type,
u8 *prio_tc);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 9a63457712c7..6e6e6f1847b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -349,6 +349,8 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev,
case ixgbe_sfp_type_1g_sx_core1:
case ixgbe_sfp_type_1g_lx_core0:
case ixgbe_sfp_type_1g_lx_core1:
+ case ixgbe_sfp_type_1g_bx_core0:
+ case ixgbe_sfp_type_1g_bx_core1:
ethtool_link_ksettings_add_link_mode(cmd, supported,
FIBRE);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
@@ -459,7 +461,7 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 advertised, old;
- s32 err = 0;
+ int err = 0;
if ((hw->phy.media_type == ixgbe_media_type_copper) ||
(hw->phy.multispeed_fiber)) {
@@ -3326,9 +3328,9 @@ static int ixgbe_get_module_info(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- s32 status;
u8 sff8472_rev, addr_mode;
bool page_swap = false;
+ int status;
if (hw->phy.type == ixgbe_phy_fw)
return -ENXIO;
@@ -3372,7 +3374,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- s32 status = -EFAULT;
+ int status = -EFAULT;
u8 databyte = 0xFF;
int i = 0;
@@ -3403,66 +3405,68 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
static const struct {
ixgbe_link_speed mac_speed;
- u32 supported;
+ u32 link_mode;
} ixgbe_ls_map[] = {
- { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full },
- { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full },
- { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full },
- { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full },
- { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full },
+ { IXGBE_LINK_SPEED_10_FULL, ETHTOOL_LINK_MODE_10baseT_Full_BIT },
+ { IXGBE_LINK_SPEED_100_FULL, ETHTOOL_LINK_MODE_100baseT_Full_BIT },
+ { IXGBE_LINK_SPEED_1GB_FULL, ETHTOOL_LINK_MODE_1000baseT_Full_BIT },
+ { IXGBE_LINK_SPEED_2_5GB_FULL, ETHTOOL_LINK_MODE_2500baseX_Full_BIT },
+ { IXGBE_LINK_SPEED_10GB_FULL, ETHTOOL_LINK_MODE_10000baseT_Full_BIT },
};
static const struct {
u32 lp_advertised;
- u32 mac_speed;
+ u32 link_mode;
} ixgbe_lp_map[] = {
- { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full },
- { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full },
- { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full },
- { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full },
- { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full },
- { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full},
+ { FW_PHY_ACT_UD_2_100M_TX_EEE, ETHTOOL_LINK_MODE_100baseT_Full_BIT },
+ { FW_PHY_ACT_UD_2_1G_T_EEE, ETHTOOL_LINK_MODE_1000baseT_Full_BIT },
+ { FW_PHY_ACT_UD_2_10G_T_EEE, ETHTOOL_LINK_MODE_10000baseT_Full_BIT },
+ { FW_PHY_ACT_UD_2_1G_KX_EEE, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT },
+ { FW_PHY_ACT_UD_2_10G_KX4_EEE, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT },
+ { FW_PHY_ACT_UD_2_10G_KR_EEE, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
};
static int
-ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata)
+ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_keee *edata)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
struct ixgbe_hw *hw = &adapter->hw;
- s32 rc;
+ int rc;
u16 i;
rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info);
if (rc)
return rc;
- edata->lp_advertised = 0;
for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) {
if (info[0] & ixgbe_lp_map[i].lp_advertised)
- edata->lp_advertised |= ixgbe_lp_map[i].mac_speed;
+ linkmode_set_bit(ixgbe_lp_map[i].link_mode,
+ edata->lp_advertised);
}
- edata->supported = 0;
for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
- edata->supported |= ixgbe_ls_map[i].supported;
+ linkmode_set_bit(ixgbe_lp_map[i].link_mode,
+ edata->supported);
}
- edata->advertised = 0;
for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
- edata->advertised |= ixgbe_ls_map[i].supported;
+ linkmode_set_bit(ixgbe_lp_map[i].link_mode,
+ edata->advertised);
}
- edata->eee_enabled = !!edata->advertised;
+ edata->eee_enabled = !linkmode_empty(edata->advertised);
edata->tx_lpi_enabled = edata->eee_enabled;
- if (edata->advertised & edata->lp_advertised)
- edata->eee_active = true;
+
+ linkmode_and(common, edata->advertised, edata->lp_advertised);
+ edata->eee_active = !linkmode_empty(common);
return 0;
}
-static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -3476,17 +3480,17 @@ static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
-static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- struct ethtool_eee eee_data;
- s32 ret_val;
+ struct ethtool_keee eee_data;
+ int ret_val;
if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
return -EOPNOTSUPP;
- memset(&eee_data, 0, sizeof(struct ethtool_eee));
+ memset(&eee_data, 0, sizeof(struct ethtool_keee));
ret_val = ixgbe_get_eee(netdev, &eee_data);
if (ret_val)
@@ -3504,7 +3508,7 @@ static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
return -EINVAL;
}
- if (eee_data.advertised != edata->advertised) {
+ if (!linkmode_equal(eee_data.advertised, edata->advertised)) {
e_err(drv,
"Setting EEE advertised speeds is not supported\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 99876b765b08..f985252c8c8d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -153,7 +153,7 @@ MODULE_PARM_DESC(max_vfs,
#endif /* CONFIG_PCI_IOV */
static bool allow_unsupported_sfp;
-module_param(allow_unsupported_sfp, bool, 0);
+module_param(allow_unsupported_sfp, bool, 0444);
MODULE_PARM_DESC(allow_unsupported_sfp,
"Allow unsupported and untested SFP+ modules on 82599-based adapters");
@@ -205,7 +205,7 @@ static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
return 0;
}
-static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
+static int ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u16 link_status = 0;
@@ -1106,6 +1106,44 @@ static int ixgbe_tx_maxrate(struct net_device *netdev,
}
/**
+ * ixgbe_update_tx_ring_stats - Update Tx ring specific counters
+ * @tx_ring: ring to update
+ * @q_vector: queue vector ring belongs to
+ * @pkts: number of processed packets
+ * @bytes: number of processed bytes
+ */
+void ixgbe_update_tx_ring_stats(struct ixgbe_ring *tx_ring,
+ struct ixgbe_q_vector *q_vector, u64 pkts,
+ u64 bytes)
+{
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += bytes;
+ tx_ring->stats.packets += pkts;
+ u64_stats_update_end(&tx_ring->syncp);
+ q_vector->tx.total_bytes += bytes;
+ q_vector->tx.total_packets += pkts;
+}
+
+/**
+ * ixgbe_update_rx_ring_stats - Update Rx ring specific counters
+ * @rx_ring: ring to update
+ * @q_vector: queue vector ring belongs to
+ * @pkts: number of processed packets
+ * @bytes: number of processed bytes
+ */
+void ixgbe_update_rx_ring_stats(struct ixgbe_ring *rx_ring,
+ struct ixgbe_q_vector *q_vector, u64 pkts,
+ u64 bytes)
+{
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.bytes += bytes;
+ rx_ring->stats.packets += pkts;
+ u64_stats_update_end(&rx_ring->syncp);
+ q_vector->rx.total_bytes += bytes;
+ q_vector->rx.total_packets += pkts;
+}
+
+/**
* ixgbe_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: structure containing interrupt and ring information
* @tx_ring: tx ring to clean
@@ -1207,12 +1245,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
i += tx_ring->count;
tx_ring->next_to_clean = i;
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->stats.bytes += total_bytes;
- tx_ring->stats.packets += total_packets;
- u64_stats_update_end(&tx_ring->syncp);
- q_vector->tx.total_bytes += total_bytes;
- q_vector->tx.total_packets += total_packets;
+ ixgbe_update_tx_ring_stats(tx_ring, q_vector, total_packets,
+ total_bytes);
adapter->tx_ipsec += total_ipsec;
if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
@@ -2429,12 +2463,8 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
ixgbe_xdp_ring_update_tail_locked(ring);
}
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->stats.packets += total_rx_packets;
- rx_ring->stats.bytes += total_rx_bytes;
- u64_stats_update_end(&rx_ring->syncp);
- q_vector->rx.total_packets += total_rx_packets;
- q_vector->rx.total_bytes += total_rx_bytes;
+ ixgbe_update_rx_ring_stats(rx_ring, q_vector, total_rx_packets,
+ total_rx_bytes);
return total_rx_packets;
}
@@ -7809,7 +7839,7 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- s32 err;
+ int err;
/* not searching for SFP so there is nothing to do here */
if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
@@ -10205,7 +10235,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index fe7ef5773369..d67d77e5dacc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -15,7 +15,7 @@
*
* returns SUCCESS if it successfully read message from buffer
**/
-s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+int ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
@@ -38,7 +38,7 @@ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
*
* returns SUCCESS if it successfully copied message into the buffer
**/
-s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+int ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
@@ -58,7 +58,7 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
-s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+int ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
@@ -75,7 +75,7 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
-s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+int ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
@@ -92,7 +92,7 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
-s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
+int ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
@@ -109,7 +109,7 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
*
* returns SUCCESS if it successfully received a message notification
**/
-static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+static int ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
@@ -134,7 +134,7 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
*
* returns SUCCESS if it successfully received a message acknowledgement
**/
-static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+static int ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
@@ -162,11 +162,11 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
* returns SUCCESS if it successfully received a message notification and
* copied it into the receive buffer.
**/
-static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+static int ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val;
+ int ret_val;
if (!mbx->ops)
return -EIO;
@@ -189,11 +189,11 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
* returns SUCCESS if it successfully copied message into the buffer and
* received an ack to that message within delay * timeout period
**/
-static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 mbx_id)
+static int ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val;
+ int ret_val;
/* exit if either we can't write or there isn't a defined timeout */
if (!mbx->ops || !mbx->timeout)
@@ -208,7 +208,7 @@ static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
return ixgbe_poll_for_ack(hw, mbx_id);
}
-static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
+static int ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
{
u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
@@ -227,9 +227,9 @@ static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
-static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
+static int ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
{
- s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ int index = IXGBE_MBVFICR_INDEX(vf_number);
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
@@ -248,9 +248,9 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
-static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
+static int ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
{
- s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ int index = IXGBE_MBVFICR_INDEX(vf_number);
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
@@ -269,7 +269,7 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
-static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
+static int ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
{
u32 reg_offset = (vf_number < 32) ? 0 : 1;
u32 vf_shift = vf_number % 32;
@@ -305,7 +305,7 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
*
* return SUCCESS if we obtained the mailbox lock
**/
-static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
+static int ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
{
u32 p2v_mailbox;
@@ -329,10 +329,10 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
*
* returns SUCCESS if it successfully copied message into the buffer
**/
-static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+static int ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 vf_number)
{
- s32 ret_val;
+ int ret_val;
u16 i;
/* lock the mailbox to prevent pf/vf race condition */
@@ -368,10 +368,10 @@ static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
* memory buffer. The presumption is that the caller knows that there was
* a message due to a VF request so no polling for message is needed.
**/
-static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+static int ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 vf_number)
{
- s32 ret_val;
+ int ret_val;
u16 i;
/* lock the mailbox to prevent pf/vf race condition */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index 6434c190e7a4..bd205306934b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -96,11 +96,11 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
-s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
-s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
-s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
+int ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+int ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+int ixgbe_check_for_msg(struct ixgbe_hw *, u16);
+int ixgbe_check_for_ack(struct ixgbe_hw *, u16);
+int ixgbe_check_for_rst(struct ixgbe_hw *, u16);
#ifdef CONFIG_PCI_IOV
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
#endif /* CONFIG_PCI_IOV */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index f28140a05f09..07eaa3c3f4d3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -11,19 +11,19 @@
static void ixgbe_i2c_start(struct ixgbe_hw *hw);
static void ixgbe_i2c_stop(struct ixgbe_hw *hw);
-static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
-static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
-static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
-static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
-static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
+static int ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
+static int ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
+static int ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
+static int ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
+static int ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
-static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
+static int ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl);
static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
-static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
-static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
+static int ixgbe_get_phy_id(struct ixgbe_hw *hw);
+static int ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
/**
* ixgbe_out_i2c_byte_ack - Send I2C byte with ack
@@ -32,9 +32,9 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
*
* Returns an error code on error.
**/
-static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
+static int ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
{
- s32 status;
+ int status;
status = ixgbe_clock_out_i2c_byte(hw, byte);
if (status)
@@ -49,9 +49,9 @@ static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
*
* Returns an error code on error.
**/
-static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
+static int ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
{
- s32 status;
+ int status;
status = ixgbe_clock_in_i2c_byte(hw, byte);
if (status)
@@ -85,7 +85,7 @@ static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
*
* Returns an error code on error.
*/
-s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+int ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
@@ -163,7 +163,7 @@ fail:
*
* Returns an error code on error.
*/
-s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+int ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
@@ -260,7 +260,7 @@ static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr)
*
* Determines the physical layer module found on the current adapter.
**/
-s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
+int ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
{
u32 status = -EFAULT;
u32 phy_addr;
@@ -332,11 +332,11 @@ bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
*
**/
-static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
+static int ixgbe_get_phy_id(struct ixgbe_hw *hw)
{
- s32 status;
u16 phy_id_high = 0;
u16 phy_id_low = 0;
+ int status;
status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
&phy_id_high);
@@ -394,11 +394,11 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
* ixgbe_reset_phy_generic - Performs a PHY reset
* @hw: pointer to hardware structure
**/
-s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
+int ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
{
u32 i;
u16 ctrl = 0;
- s32 status = 0;
+ int status = 0;
if (hw->phy.type == ixgbe_phy_unknown)
status = ixgbe_identify_phy_generic(hw);
@@ -470,8 +470,8 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
*
* Reads a value from a specified PHY register without the SWFW lock
**/
-s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
- u16 *phy_data)
+int ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 *phy_data)
{
u32 i, data, command;
@@ -546,11 +546,11 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
* @device_type: 5 bit device type
* @phy_data: Pointer to read data from PHY register
**/
-s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
- s32 status;
u32 gssr = hw->phy.phy_semaphore_mask;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
@@ -571,8 +571,8 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
* @device_type: 5 bit device type
* @phy_data: Data to write to the PHY register
**/
-s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 phy_data)
+int ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 phy_data)
{
u32 i, command;
@@ -644,11 +644,11 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
* @device_type: 5 bit device type
* @phy_data: Data to write to the PHY register
**/
-s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
- s32 status;
u32 gssr = hw->phy.phy_semaphore_mask;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
@@ -668,7 +668,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
* @hw: pointer to hardware structure
* @cmd: command register value to write
**/
-static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd)
+static int ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd)
{
IXGBE_WRITE_REG(hw, IXGBE_MSCA, cmd);
@@ -684,11 +684,11 @@ static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd)
* @regnum: register number
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_read_generic_c22(struct ixgbe_hw *hw, int addr,
+static int ixgbe_mii_bus_read_generic_c22(struct ixgbe_hw *hw, int addr,
int regnum, u32 gssr)
{
u32 hwaddr, cmd;
- s32 data;
+ int data;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
return -EBUSY;
@@ -718,11 +718,11 @@ mii_bus_read_done:
* @regnum: register number
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_read_generic_c45(struct ixgbe_hw *hw, int addr,
+static int ixgbe_mii_bus_read_generic_c45(struct ixgbe_hw *hw, int addr,
int devad, int regnum, u32 gssr)
{
u32 hwaddr, cmd;
- s32 data;
+ int data;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
return -EBUSY;
@@ -756,11 +756,11 @@ mii_bus_read_done:
* @val: value to write
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_write_generic_c22(struct ixgbe_hw *hw, int addr,
+static int ixgbe_mii_bus_write_generic_c22(struct ixgbe_hw *hw, int addr,
int regnum, u16 val, u32 gssr)
{
u32 hwaddr, cmd;
- s32 err;
+ int err;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
return -EBUSY;
@@ -787,12 +787,12 @@ static s32 ixgbe_mii_bus_write_generic_c22(struct ixgbe_hw *hw, int addr,
* @val: value to write
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_write_generic_c45(struct ixgbe_hw *hw, int addr,
+static int ixgbe_mii_bus_write_generic_c45(struct ixgbe_hw *hw, int addr,
int devad, int regnum, u16 val,
u32 gssr)
{
u32 hwaddr, cmd;
- s32 err;
+ int err;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
return -EBUSY;
@@ -821,7 +821,7 @@ mii_bus_write_done:
* @addr: address
* @regnum: register number
**/
-static s32 ixgbe_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum)
+static int ixgbe_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum)
{
struct ixgbe_adapter *adapter = bus->priv;
struct ixgbe_hw *hw = &adapter->hw;
@@ -837,7 +837,7 @@ static s32 ixgbe_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum)
* @addr: address
* @regnum: register number
**/
-static s32 ixgbe_mii_bus_read_c45(struct mii_bus *bus, int devad, int addr,
+static int ixgbe_mii_bus_read_c45(struct mii_bus *bus, int devad, int addr,
int regnum)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -854,7 +854,7 @@ static s32 ixgbe_mii_bus_read_c45(struct mii_bus *bus, int devad, int addr,
* @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum,
+static int ixgbe_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum,
u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -872,7 +872,7 @@ static s32 ixgbe_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum,
* @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad,
+static int ixgbe_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad,
int regnum, u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -889,7 +889,7 @@ static s32 ixgbe_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad,
* @addr: address
* @regnum: register number
**/
-static s32 ixgbe_x550em_a_mii_bus_read_c22(struct mii_bus *bus, int addr,
+static int ixgbe_x550em_a_mii_bus_read_c22(struct mii_bus *bus, int addr,
int regnum)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -907,7 +907,7 @@ static s32 ixgbe_x550em_a_mii_bus_read_c22(struct mii_bus *bus, int addr,
* @devad: device address to read
* @regnum: register number
**/
-static s32 ixgbe_x550em_a_mii_bus_read_c45(struct mii_bus *bus, int addr,
+static int ixgbe_x550em_a_mii_bus_read_c45(struct mii_bus *bus, int addr,
int devad, int regnum)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -925,7 +925,7 @@ static s32 ixgbe_x550em_a_mii_bus_read_c45(struct mii_bus *bus, int addr,
* @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_x550em_a_mii_bus_write_c22(struct mii_bus *bus, int addr,
+static int ixgbe_x550em_a_mii_bus_write_c22(struct mii_bus *bus, int addr,
int regnum, u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -944,7 +944,7 @@ static s32 ixgbe_x550em_a_mii_bus_write_c22(struct mii_bus *bus, int addr,
* @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_x550em_a_mii_bus_write_c45(struct mii_bus *bus, int addr,
+static int ixgbe_x550em_a_mii_bus_write_c45(struct mii_bus *bus, int addr,
int devad, int regnum, u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -1023,13 +1023,13 @@ out:
*
* ixgbe_mii_bus_init initializes a mii_bus structure in adapter
**/
-s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
+int ixgbe_mii_bus_init(struct ixgbe_hw *hw)
{
- s32 (*write_c22)(struct mii_bus *bus, int addr, int regnum, u16 val);
- s32 (*read_c22)(struct mii_bus *bus, int addr, int regnum);
- s32 (*write_c45)(struct mii_bus *bus, int addr, int devad, int regnum,
+ int (*write_c22)(struct mii_bus *bus, int addr, int regnum, u16 val);
+ int (*read_c22)(struct mii_bus *bus, int addr, int regnum);
+ int (*write_c45)(struct mii_bus *bus, int addr, int devad, int regnum,
u16 val);
- s32 (*read_c45)(struct mii_bus *bus, int addr, int devad, int regnum);
+ int (*read_c45)(struct mii_bus *bus, int addr, int devad, int regnum);
struct ixgbe_adapter *adapter = hw->back;
struct pci_dev *pdev = adapter->pdev;
struct device *dev = &adapter->netdev->dev;
@@ -1095,12 +1095,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
*
* Restart autonegotiation and PHY and waits for completion.
**/
-s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
+int ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
{
- s32 status = 0;
u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
- bool autoneg = false;
ixgbe_link_speed speed;
+ bool autoneg = false;
+ int status = 0;
ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
@@ -1173,7 +1173,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
* @speed: new link speed
* @autoneg_wait_to_complete: unused
**/
-s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+int ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
@@ -1214,10 +1214,10 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
* Determines the supported link capabilities by reading the PHY auto
* negotiation register.
*/
-static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
+static int ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
{
u16 speed_ability;
- s32 status;
+ int status;
status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
&speed_ability);
@@ -1253,11 +1253,11 @@ static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
* @speed: pointer to link speed
* @autoneg: boolean auto-negotiation value
*/
-s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+int ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg)
{
- s32 status = 0;
+ int status = 0;
*autoneg = true;
if (!hw->phy.speeds_supported)
@@ -1276,15 +1276,15 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
* Reads the VS1 register to determine if link is up and the current speed for
* the PHY.
**/
-s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+int ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up)
{
- s32 status;
- u32 time_out;
u32 max_time_out = 10;
- u16 phy_link = 0;
u16 phy_speed = 0;
+ u16 phy_link = 0;
u16 phy_data = 0;
+ u32 time_out;
+ int status;
/* Initialize speed and link to default case */
*link_up = false;
@@ -1326,7 +1326,7 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
* it is called via a function pointer that could call other
* functions that could return an error.
**/
-s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
+int ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
{
u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
bool autoneg = false;
@@ -1399,13 +1399,13 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
* ixgbe_reset_phy_nl - Performs a PHY reset
* @hw: pointer to hardware structure
**/
-s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
+int ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
{
u16 phy_offset, control, eword, edata, block_crc;
- bool end_data = false;
u16 list_offset, data_offset;
+ bool end_data = false;
u16 phy_data = 0;
- s32 ret_val;
+ int ret_val;
u32 i;
/* Blocked by MNG FW so bail */
@@ -1506,7 +1506,7 @@ err_eeprom:
*
* Determines HW type and calls appropriate function.
**/
-s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
+int ixgbe_identify_module_generic(struct ixgbe_hw *hw)
{
switch (hw->mac.ops.get_media_type(hw)) {
case ixgbe_media_type_fiber:
@@ -1527,19 +1527,20 @@ s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
*
* Searches for and identifies the SFP module and assigns appropriate PHY type.
**/
-s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+int ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
{
+ enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
struct ixgbe_adapter *adapter = hw->back;
- s32 status;
+ u8 oui_bytes[3] = {0, 0, 0};
+ u8 bitrate_nominal = 0;
+ u8 comp_codes_10g = 0;
+ u8 comp_codes_1g = 0;
+ u16 enforce_sfp = 0;
u32 vendor_oui = 0;
- enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
u8 identifier = 0;
- u8 comp_codes_1g = 0;
- u8 comp_codes_10g = 0;
- u8 oui_bytes[3] = {0, 0, 0};
u8 cable_tech = 0;
u8 cable_spec = 0;
- u16 enforce_sfp = 0;
+ int status;
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
hw->phy.sfp_type = ixgbe_sfp_type_not_present;
@@ -1576,7 +1577,12 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_CABLE_TECHNOLOGY,
&cable_tech);
+ if (status)
+ goto err_read_i2c_eeprom;
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_BITRATE_NOMINAL,
+ &bitrate_nominal);
if (status)
goto err_read_i2c_eeprom;
@@ -1659,6 +1665,18 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
else
hw->phy.sfp_type =
ixgbe_sfp_type_1g_lx_core1;
+ /* Support only Ethernet 1000BASE-BX10, checking the Bit Rate
+ * Nominal Value as per SFF-8472 by convention 1.25 Gb/s should
+ * be rounded up to 0Dh (13 in units of 100 MBd) for 1000BASE-BX
+ */
+ } else if ((comp_codes_1g & IXGBE_SFF_BASEBX10_CAPABLE) &&
+ (bitrate_nominal == 0xD)) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_bx_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_bx_core1;
} else {
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
}
@@ -1747,7 +1765,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1)) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
return -EOPNOTSUPP;
}
@@ -1763,7 +1783,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1)) {
/* Make sure we're a supported PHY type */
if (hw->phy.type == ixgbe_phy_sfp_intel)
return 0;
@@ -1792,10 +1814,10 @@ err_read_i2c_eeprom:
*
* Searches for and identifies the QSFP module and assigns appropriate PHY type
**/
-static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
+static int ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
{
struct ixgbe_adapter *adapter = hw->back;
- s32 status;
+ int status;
u32 vendor_oui = 0;
enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
u8 identifier = 0;
@@ -1975,7 +1997,7 @@ err_read_i2c_eeprom:
* Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
* so it returns the offsets to the phy init sequence block.
**/
-s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+int ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u16 *data_offset)
{
@@ -1999,12 +2021,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
- sfp_type == ixgbe_sfp_type_1g_sx_core0)
+ sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ sfp_type == ixgbe_sfp_type_1g_bx_core0)
sfp_type = ixgbe_sfp_type_srlr_core0;
else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
- sfp_type == ixgbe_sfp_type_1g_sx_core1)
+ sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ sfp_type == ixgbe_sfp_type_1g_bx_core1)
sfp_type = ixgbe_sfp_type_srlr_core1;
/* Read offset to PHY init contents */
@@ -2065,7 +2089,7 @@ err_phy:
*
* Performs byte read operation to SFP module's EEPROM over I2C interface.
**/
-s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data)
{
return hw->phy.ops.read_i2c_byte(hw, byte_offset,
@@ -2081,7 +2105,7 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
*
* Performs byte read operation to SFP module's SFF-8472 data over I2C
**/
-s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *sff8472_data)
{
return hw->phy.ops.read_i2c_byte(hw, byte_offset,
@@ -2097,7 +2121,7 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
*
* Performs byte write operation to SFP module's EEPROM over I2C interface.
**/
-s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 eeprom_data)
{
return hw->phy.ops.write_i2c_byte(hw, byte_offset,
@@ -2131,14 +2155,14 @@ static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
* Performs byte read operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data, bool lock)
{
- s32 status;
- u32 max_retry = 10;
- u32 retry = 0;
u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ u32 max_retry = 10;
bool nack = true;
+ u32 retry = 0;
+ int status;
if (hw->mac.type >= ixgbe_mac_X550)
max_retry = 3;
@@ -2221,7 +2245,7 @@ fail:
* Performs byte read operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data)
{
return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
@@ -2238,7 +2262,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
* Performs byte read operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data)
{
return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
@@ -2256,13 +2280,13 @@ s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
* Performs byte write operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data, bool lock)
{
- s32 status;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
u32 max_retry = 1;
u32 retry = 0;
- u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ int status;
if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return -EBUSY;
@@ -2324,7 +2348,7 @@ fail:
* Performs byte write operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data)
{
return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
@@ -2341,7 +2365,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
* Performs byte write operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data)
{
return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
@@ -2422,10 +2446,10 @@ static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
*
* Clocks in one byte data via I2C data/clock
**/
-static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
+static int ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
{
- s32 i;
bool bit = false;
+ int i;
*data = 0;
for (i = 7; i >= 0; i--) {
@@ -2443,12 +2467,12 @@ static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
*
* Clocks out one byte data via I2C data/clock
**/
-static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
+static int ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
{
- s32 status;
- s32 i;
- u32 i2cctl;
bool bit = false;
+ int status;
+ u32 i2cctl;
+ int i;
for (i = 7; i >= 0; i--) {
bit = (data >> i) & 0x1;
@@ -2474,14 +2498,14 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
*
* Clocks in/out one bit via I2C data/clock
**/
-static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
+static int ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
{
- u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
- s32 status = 0;
- u32 i = 0;
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
u32 timeout = 10;
bool ack = true;
+ int status = 0;
+ u32 i = 0;
if (data_oe_bit) {
i2cctl |= IXGBE_I2C_DATA_OUT(hw);
@@ -2525,7 +2549,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
*
* Clocks in one bit via I2C data/clock
**/
-static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
+static int ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
{
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
@@ -2559,10 +2583,10 @@ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
*
* Clocks out one bit via I2C data/clock
**/
-static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
+static int ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
{
- s32 status;
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ int status;
status = ixgbe_set_i2c_data(hw, &i2cctl, data);
if (status == 0) {
@@ -2647,7 +2671,7 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
* Sets the I2C data bit
* Asserts the I2C data output enable on X550 hardware.
**/
-static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
+static int ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
{
u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
@@ -2769,7 +2793,7 @@ bool ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
* @on: true for on, false for off
**/
-s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
+int ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
{
u32 status;
u16 reg;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index ef72729d7c93..14aa2ca51f70 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -17,6 +17,7 @@
#define IXGBE_SFF_1GBE_COMP_CODES 0x6
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
+#define IXGBE_SFF_BITRATE_NOMINAL 0xC
#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
#define IXGBE_SFF_SFF_8472_SWAP 0x5C
#define IXGBE_SFF_SFF_8472_COMP 0x5E
@@ -39,6 +40,7 @@
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
+#define IXGBE_SFF_BASEBX10_CAPABLE 0x64
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
@@ -121,57 +123,57 @@
/* SFP+ SFF-8472 Compliance code */
#define IXGBE_SFF_SFF_8472_UNSUP 0x00
-s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw);
+int ixgbe_mii_bus_init(struct ixgbe_hw *hw);
-s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
-s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
-s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
+int ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
+int ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data);
-s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data);
-s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data);
-s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data);
-s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
-s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+int ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
+int ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
-s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+int ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg);
bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw);
/* PHY specific */
-s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
+int ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up);
-s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
+int ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
-s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
-s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
-s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
-s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
-s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+int ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
+int ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
+int ixgbe_identify_module_generic(struct ixgbe_hw *hw);
+int ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
+int ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u16 *data_offset);
bool ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
-s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
-s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
-s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
-s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
-s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
-s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *sff8472_data);
-s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 eeprom_data);
-s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg,
+int ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg,
u16 *val, bool lock);
-s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg,
+int ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg,
u16 val, bool lock);
#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 7299a830f6e4..fcfd0a075eee 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -492,7 +492,7 @@ static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf
struct net_device *dev = adapter->netdev;
int pf_max_frame = dev->mtu + ETH_HLEN;
u32 reg_offset, vf_shift, vfre;
- s32 err = 0;
+ int err = 0;
#ifdef CONFIG_FCOE
if (dev->features & NETIF_F_FCOE_MTU)
@@ -775,7 +775,7 @@ static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf)
static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
int vf, unsigned char *mac_addr)
{
- s32 retval;
+ int retval;
ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
retval = ixgbe_add_mac_filter(adapter, mac_addr, vf);
@@ -1254,7 +1254,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
struct ixgbe_hw *hw = &adapter->hw;
- s32 retval;
+ int retval;
retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
@@ -1418,7 +1418,7 @@ void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter)
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- s32 retval;
+ int retval;
if (vf >= adapter->num_vfs)
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
index f1f69ce67420..78deea5ec536 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
@@ -46,4 +46,11 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
int ixgbe_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring);
+void ixgbe_update_tx_ring_stats(struct ixgbe_ring *tx_ring,
+ struct ixgbe_q_vector *q_vector, u64 pkts,
+ u64 bytes);
+void ixgbe_update_rx_ring_stats(struct ixgbe_ring *rx_ring,
+ struct ixgbe_q_vector *q_vector, u64 pkts,
+ u64 bytes);
+
#endif /* #define _IXGBE_TXRX_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 61b9774b3d31..ed440dd0c4f9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -3210,6 +3210,9 @@ enum ixgbe_sfp_type {
ixgbe_sfp_type_1g_sx_core1 = 12,
ixgbe_sfp_type_1g_lx_core0 = 13,
ixgbe_sfp_type_1g_lx_core1 = 14,
+ ixgbe_sfp_type_1g_bx_core0 = 15,
+ ixgbe_sfp_type_1g_bx_core1 = 16,
+
ixgbe_sfp_type_not_present = 0xFFFE,
ixgbe_sfp_type_unknown = 0xFFFF
};
@@ -3393,50 +3396,50 @@ struct ixgbe_hw;
/* Function pointer table */
struct ixgbe_eeprom_operations {
- s32 (*init_params)(struct ixgbe_hw *);
- s32 (*read)(struct ixgbe_hw *, u16, u16 *);
- s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
- s32 (*write)(struct ixgbe_hw *, u16, u16);
- s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
- s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
- s32 (*update_checksum)(struct ixgbe_hw *);
- s32 (*calc_checksum)(struct ixgbe_hw *);
+ int (*init_params)(struct ixgbe_hw *);
+ int (*read)(struct ixgbe_hw *, u16, u16 *);
+ int (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
+ int (*write)(struct ixgbe_hw *, u16, u16);
+ int (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
+ int (*validate_checksum)(struct ixgbe_hw *, u16 *);
+ int (*update_checksum)(struct ixgbe_hw *);
+ int (*calc_checksum)(struct ixgbe_hw *);
};
struct ixgbe_mac_operations {
- s32 (*init_hw)(struct ixgbe_hw *);
- s32 (*reset_hw)(struct ixgbe_hw *);
- s32 (*start_hw)(struct ixgbe_hw *);
- s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
+ int (*init_hw)(struct ixgbe_hw *);
+ int (*reset_hw)(struct ixgbe_hw *);
+ int (*start_hw)(struct ixgbe_hw *);
+ int (*clear_hw_cntrs)(struct ixgbe_hw *);
enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
- s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
- s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
- s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
- s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
- s32 (*stop_adapter)(struct ixgbe_hw *);
- s32 (*get_bus_info)(struct ixgbe_hw *);
+ int (*get_mac_addr)(struct ixgbe_hw *, u8 *);
+ int (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
+ int (*get_device_caps)(struct ixgbe_hw *, u16 *);
+ int (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
+ int (*stop_adapter)(struct ixgbe_hw *);
+ int (*get_bus_info)(struct ixgbe_hw *);
void (*set_lan_id)(struct ixgbe_hw *);
- s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
- s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
- s32 (*setup_sfp)(struct ixgbe_hw *);
- s32 (*disable_rx_buff)(struct ixgbe_hw *);
- s32 (*enable_rx_buff)(struct ixgbe_hw *);
- s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
- s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
+ int (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
+ int (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
+ int (*setup_sfp)(struct ixgbe_hw *);
+ int (*disable_rx_buff)(struct ixgbe_hw *);
+ int (*enable_rx_buff)(struct ixgbe_hw *);
+ int (*enable_rx_dma)(struct ixgbe_hw *, u32);
+ int (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
void (*release_swfw_sync)(struct ixgbe_hw *, u32);
void (*init_swfw_sync)(struct ixgbe_hw *);
- s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
- s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
+ int (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
+ int (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
/* Link */
void (*disable_tx_laser)(struct ixgbe_hw *);
void (*enable_tx_laser)(struct ixgbe_hw *);
void (*flap_tx_laser)(struct ixgbe_hw *);
void (*stop_link_on_d3)(struct ixgbe_hw *);
- s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
- s32 (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
- s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
- s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
+ int (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+ int (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+ int (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
+ int (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
bool *);
void (*set_rate_select_speed)(struct ixgbe_hw *, ixgbe_link_speed);
@@ -3444,38 +3447,38 @@ struct ixgbe_mac_operations {
void (*set_rxpba)(struct ixgbe_hw *, int, u32, int);
/* LED */
- s32 (*led_on)(struct ixgbe_hw *, u32);
- s32 (*led_off)(struct ixgbe_hw *, u32);
- s32 (*blink_led_start)(struct ixgbe_hw *, u32);
- s32 (*blink_led_stop)(struct ixgbe_hw *, u32);
- s32 (*init_led_link_act)(struct ixgbe_hw *);
+ int (*led_on)(struct ixgbe_hw *, u32);
+ int (*led_off)(struct ixgbe_hw *, u32);
+ int (*blink_led_start)(struct ixgbe_hw *, u32);
+ int (*blink_led_stop)(struct ixgbe_hw *, u32);
+ int (*init_led_link_act)(struct ixgbe_hw *);
/* RAR, Multicast, VLAN */
- s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
- s32 (*clear_rar)(struct ixgbe_hw *, u32);
- s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
- s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32);
- s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
- s32 (*init_rx_addrs)(struct ixgbe_hw *);
- s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
- s32 (*enable_mc)(struct ixgbe_hw *);
- s32 (*disable_mc)(struct ixgbe_hw *);
- s32 (*clear_vfta)(struct ixgbe_hw *);
- s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool);
- s32 (*init_uta_tables)(struct ixgbe_hw *);
+ int (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
+ int (*clear_rar)(struct ixgbe_hw *, u32);
+ int (*set_vmdq)(struct ixgbe_hw *, u32, u32);
+ int (*set_vmdq_san_mac)(struct ixgbe_hw *, u32);
+ int (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
+ int (*init_rx_addrs)(struct ixgbe_hw *);
+ int (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
+ int (*enable_mc)(struct ixgbe_hw *);
+ int (*disable_mc)(struct ixgbe_hw *);
+ int (*clear_vfta)(struct ixgbe_hw *);
+ int (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool);
+ int (*init_uta_tables)(struct ixgbe_hw *);
void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
/* Flow Control */
- s32 (*fc_enable)(struct ixgbe_hw *);
- s32 (*setup_fc)(struct ixgbe_hw *);
+ int (*fc_enable)(struct ixgbe_hw *);
+ int (*setup_fc)(struct ixgbe_hw *);
void (*fc_autoneg)(struct ixgbe_hw *);
/* Manageability interface */
- s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16,
+ int (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16,
const char *);
- s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
- s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
+ int (*get_thermal_sensor_data)(struct ixgbe_hw *);
+ int (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
bool (*fw_recovery_mode)(struct ixgbe_hw *hw);
void (*disable_rx)(struct ixgbe_hw *hw);
void (*enable_rx)(struct ixgbe_hw *hw);
@@ -3484,47 +3487,47 @@ struct ixgbe_mac_operations {
void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int);
/* DMA Coalescing */
- s32 (*dmac_config)(struct ixgbe_hw *hw);
- s32 (*dmac_update_tcs)(struct ixgbe_hw *hw);
- s32 (*dmac_config_tcs)(struct ixgbe_hw *hw);
- s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
- s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
+ int (*dmac_config)(struct ixgbe_hw *hw);
+ int (*dmac_update_tcs)(struct ixgbe_hw *hw);
+ int (*dmac_config_tcs)(struct ixgbe_hw *hw);
+ int (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
+ int (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
};
struct ixgbe_phy_operations {
- s32 (*identify)(struct ixgbe_hw *);
- s32 (*identify_sfp)(struct ixgbe_hw *);
- s32 (*init)(struct ixgbe_hw *);
- s32 (*reset)(struct ixgbe_hw *);
- s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
- s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
- s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
- s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
- s32 (*setup_link)(struct ixgbe_hw *);
- s32 (*setup_internal_link)(struct ixgbe_hw *);
- s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
- s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
- s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
- s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
- s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
- s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
- s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
+ int (*identify)(struct ixgbe_hw *);
+ int (*identify_sfp)(struct ixgbe_hw *);
+ int (*init)(struct ixgbe_hw *);
+ int (*reset)(struct ixgbe_hw *);
+ int (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
+ int (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
+ int (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
+ int (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
+ int (*setup_link)(struct ixgbe_hw *);
+ int (*setup_internal_link)(struct ixgbe_hw *);
+ int (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+ int (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
+ int (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
+ int (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
+ int (*read_i2c_sff8472)(struct ixgbe_hw *, u8, u8 *);
+ int (*read_i2c_eeprom)(struct ixgbe_hw *, u8, u8 *);
+ int (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
bool (*check_overtemp)(struct ixgbe_hw *);
- s32 (*set_phy_power)(struct ixgbe_hw *, bool on);
- s32 (*enter_lplu)(struct ixgbe_hw *);
- s32 (*handle_lasi)(struct ixgbe_hw *hw, bool *);
- s32 (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
+ int (*set_phy_power)(struct ixgbe_hw *, bool on);
+ int (*enter_lplu)(struct ixgbe_hw *);
+ int (*handle_lasi)(struct ixgbe_hw *hw, bool *);
+ int (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
u8 *value);
- s32 (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
+ int (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
u8 value);
};
struct ixgbe_link_operations {
- s32 (*read_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
- s32 (*read_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
+ int (*read_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
+ int (*read_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
u16 *val);
- s32 (*write_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
- s32 (*write_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
+ int (*write_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
+ int (*write_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
u16 val);
};
@@ -3602,14 +3605,14 @@ struct ixgbe_phy_info {
#include "ixgbe_mbx.h"
struct ixgbe_mbx_operations {
- s32 (*init_params)(struct ixgbe_hw *hw);
- s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*check_for_msg)(struct ixgbe_hw *, u16);
- s32 (*check_for_ack)(struct ixgbe_hw *, u16);
- s32 (*check_for_rst)(struct ixgbe_hw *, u16);
+ int (*init_params)(struct ixgbe_hw *hw);
+ int (*read)(struct ixgbe_hw *, u32 *, u16, u16);
+ int (*write)(struct ixgbe_hw *, u32 *, u16, u16);
+ int (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ int (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ int (*check_for_msg)(struct ixgbe_hw *, u16);
+ int (*check_for_ack)(struct ixgbe_hw *, u16);
+ int (*check_for_rst)(struct ixgbe_hw *, u16);
};
struct ixgbe_mbx_stats {
@@ -3656,7 +3659,7 @@ struct ixgbe_hw {
struct ixgbe_info {
enum ixgbe_mac_type mac;
- s32 (*get_invariants)(struct ixgbe_hw *);
+ int (*get_invariants)(struct ixgbe_hw *);
const struct ixgbe_mac_operations *mac_ops;
const struct ixgbe_eeprom_operations *eeprom_ops;
const struct ixgbe_phy_operations *phy_ops;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 57a912e4653f..f1ffa398f6df 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -16,9 +16,9 @@
#define IXGBE_X540_VFT_TBL_SIZE 128
#define IXGBE_X540_RX_PB_SIZE 384
-static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
-static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
-static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
+static int ixgbe_update_flash_X540(struct ixgbe_hw *hw);
+static int ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
+static int ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
@@ -26,7 +26,7 @@ enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
return ixgbe_media_type_copper;
}
-s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
+int ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
@@ -51,7 +51,7 @@ s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
* @speed: new link speed
* @autoneg_wait_to_complete: true when waiting for completion is needed
**/
-s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
return hw->phy.ops.setup_link_speed(hw, speed,
@@ -66,11 +66,11 @@ s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
* and clears all interrupts, perform a PHY reset, and perform a link (MAC)
* reset.
**/
-s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
+int ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
{
- s32 status;
- u32 ctrl, i;
u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ u32 ctrl, i;
+ int status;
/* Call adapter stop to disable tx/rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
@@ -166,9 +166,9 @@ mac_reset_top:
* and the generation start_hw function.
* Then performs revision-specific operations, if any.
**/
-s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
+int ixgbe_start_hw_X540(struct ixgbe_hw *hw)
{
- s32 ret_val;
+ int ret_val;
ret_val = ixgbe_start_hw_generic(hw);
if (ret_val)
@@ -184,7 +184,7 @@ s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
* Initializes the EEPROM parameters ixgbe_eeprom_info within the
* ixgbe_hw struct in order to set up EEPROM access.
**/
-s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
+int ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
@@ -215,9 +215,9 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
*
* Reads a 16 bit word from the EEPROM using the EERD register.
**/
-static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
+static int ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
return -EBUSY;
@@ -237,10 +237,10 @@ static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
*
* Reads a 16 bit word(s) from the EEPROM using the EERD register.
**/
-static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
+static int ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data)
{
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
return -EBUSY;
@@ -259,9 +259,9 @@ static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
*
* Write a 16 bit word to the EEPROM using the EEWR register.
**/
-static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
+static int ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
{
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
return -EBUSY;
@@ -281,10 +281,10 @@ static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
*
* Write a 16 bit word(s) to the EEPROM using the EEWR register.
**/
-static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
+static int ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data)
{
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
return -EBUSY;
@@ -303,7 +303,7 @@ static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
*
* @hw: pointer to hardware structure
**/
-static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+static int ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
{
u16 i;
u16 j;
@@ -368,7 +368,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
- return (s32)checksum;
+ return (int)checksum;
}
/**
@@ -379,12 +379,12 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
* Performs checksum calculation and validates the EEPROM checksum. If the
* caller does not need checksum_val, the value can be NULL.
**/
-static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
+static int ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
u16 *checksum_val)
{
- s32 status;
- u16 checksum;
u16 read_checksum = 0;
+ u16 checksum;
+ int status;
/* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
@@ -439,10 +439,10 @@ out:
* checksum and updates the EEPROM and instructs the hardware to update
* the flash.
**/
-static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
+static int ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
{
- s32 status;
u16 checksum;
+ int status;
/* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
@@ -484,10 +484,10 @@ out:
* Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
* EEPROM from shadow RAM to the flash device.
**/
-static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
+static int ixgbe_update_flash_X540(struct ixgbe_hw *hw)
{
+ int status;
u32 flup;
- s32 status;
status = ixgbe_poll_flash_update_done_X540(hw);
if (status == -EIO) {
@@ -529,7 +529,7 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
* Polls the FLUDONE (bit 26) of the EEC Register to determine when the
* flash update is done.
**/
-static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
+static int ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
{
u32 i;
u32 reg;
@@ -551,7 +551,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
* Acquires the SWFW semaphore thought the SW_FW_SYNC register for
* the specified function (CSR, PHY0, PHY1, NVM, Flash)
**/
-s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
+int ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
{
u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK;
u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK;
@@ -660,7 +660,7 @@ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
*
* Sets the hardware semaphores so SW/FW can gain control of shared resources
*/
-static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
+static int ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
{
u32 timeout = 2000;
u32 i;
@@ -760,7 +760,7 @@ void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
* Devices that implement the version 2 interface:
* X540
**/
-s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
+int ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
{
u32 macc_reg;
u32 ledctl_reg;
@@ -798,7 +798,7 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
* Devices that implement the version 2 interface:
* X540
**/
-s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
+int ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
{
u32 macc_reg;
u32 ledctl_reg;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
index e246c0d2a427..b69a680d3ab5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
@@ -3,17 +3,17 @@
#include "ixgbe_type.h"
-s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+int ixgbe_get_invariants_X540(struct ixgbe_hw *hw);
+int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
-s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
+int ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
+int ixgbe_start_hw_X540(struct ixgbe_hw *hw);
enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
-s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
+int ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
+int ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
+int ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw);
-s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
+int ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index c1adc94a5a65..2decb0710b6e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -6,13 +6,13 @@
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
-static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed);
-static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *);
+static int ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed);
+static int ixgbe_setup_fc_x550em(struct ixgbe_hw *);
static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *);
static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *);
-static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *);
+static int ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *);
-static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
@@ -29,7 +29,7 @@ static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
return 0;
}
-static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw)
{
struct ixgbe_phy_info *phy = &hw->phy;
@@ -41,7 +41,7 @@ static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw)
return 0;
}
-static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
@@ -55,7 +55,7 @@ static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
return 0;
}
-static s32 ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw)
{
struct ixgbe_phy_info *phy = &hw->phy;
@@ -91,7 +91,7 @@ static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
*
* Returns status code
*/
-static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
+static int ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
{
return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value);
}
@@ -104,7 +104,7 @@ static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
*
* Returns status code
*/
-static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
+static int ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
{
return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value);
}
@@ -117,9 +117,9 @@ static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
*
* Returns status code
*/
-static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
+static int ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
{
- s32 status;
+ int status;
status = ixgbe_read_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE, value);
if (status)
@@ -135,9 +135,9 @@ static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
*
* Returns status code
*/
-static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
+static int ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
{
- s32 status;
+ int status;
status = ixgbe_write_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE,
value);
@@ -153,9 +153,9 @@ static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
* This function assumes that the caller has acquired the proper semaphore.
* Returns error code
*/
-static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
+static int ixgbe_reset_cs4227(struct ixgbe_hw *hw)
{
- s32 status;
+ int status;
u32 retry;
u16 value;
u8 reg;
@@ -225,7 +225,7 @@ static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
- s32 status;
+ int status;
u16 value;
u8 retry;
@@ -292,7 +292,7 @@ out:
*
* Returns error code
*/
-static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
+static int ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
{
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_A_SFP:
@@ -347,13 +347,13 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
return 0;
}
-static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
return -EOPNOTSUPP;
}
-static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
return -EOPNOTSUPP;
@@ -368,7 +368,7 @@ static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
*
* Returns an error code on error.
**/
-static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
+static int ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val)
{
return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true);
@@ -383,7 +383,7 @@ static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
*
* Returns an error code on error.
**/
-static s32
+static int
ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val)
{
@@ -399,7 +399,7 @@ ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
*
* Returns an error code on error.
**/
-static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
+static int ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
u8 addr, u16 reg, u16 val)
{
return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true);
@@ -414,7 +414,7 @@ static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
*
* Returns an error code on error.
**/
-static s32
+static int
ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
u8 addr, u16 reg, u16 val)
{
@@ -427,7 +427,7 @@ ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
* @activity: activity to perform
* @data: Pointer to 4 32-bit words of data
*/
-s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
+int ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
u32 (*data)[FW_PHY_ACT_DATA_COUNT])
{
union {
@@ -435,7 +435,7 @@ s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
struct ixgbe_hic_phy_activity_resp rsp;
} hic;
u16 retries = FW_PHY_ACT_RETRIES;
- s32 rc;
+ int rc;
u32 i;
do {
@@ -484,12 +484,12 @@ static const struct {
*
* Returns error code
*/
-static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
+static int ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
{
u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
u16 phy_speeds;
u16 phy_id_lo;
- s32 rc;
+ int rc;
u16 i;
if (hw->phy.id)
@@ -526,7 +526,7 @@ static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
*
* Returns error code
*/
-static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
+static int ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
{
if (hw->bus.lan_id)
hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
@@ -545,7 +545,7 @@ static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
*
* Returns error code
*/
-static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
+static int ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
{
u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
@@ -557,10 +557,10 @@ static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
* ixgbe_setup_fw_link - Setup firmware-controlled PHYs
* @hw: pointer to hardware structure
*/
-static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
+static int ixgbe_setup_fw_link(struct ixgbe_hw *hw)
{
u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
- s32 rc;
+ int rc;
u16 i;
if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
@@ -613,7 +613,7 @@ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
*
* Called at init time to set up flow control.
*/
-static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
+static int ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
{
if (hw->fc.requested_mode == ixgbe_fc_default)
hw->fc.requested_mode = ixgbe_fc_full;
@@ -627,7 +627,7 @@ static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
* Initializes the EEPROM parameters ixgbe_eeprom_info within the
* ixgbe_hw struct in order to set up EEPROM access.
**/
-static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
+static int ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
@@ -659,7 +659,7 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
*
* Note: ctrl can be NULL if the IOSF control register value is not needed
*/
-static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
+static int ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
{
u32 i, command;
@@ -690,12 +690,12 @@ static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
* @device_type: 3 bit device type
* @phy_data: Pointer to read data from the register
**/
-static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 *data)
{
u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
u32 command, error;
- s32 ret;
+ int ret;
ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
if (ret)
@@ -732,10 +732,10 @@ out:
* ixgbe_get_phy_token - Get the token for shared PHY access
* @hw: Pointer to hardware structure
*/
-static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
+static int ixgbe_get_phy_token(struct ixgbe_hw *hw)
{
struct ixgbe_hic_phy_token_req token_cmd;
- s32 status;
+ int status;
token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
@@ -761,10 +761,10 @@ static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
* ixgbe_put_phy_token - Put the token for shared PHY access
* @hw: Pointer to hardware structure
*/
-static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
+static int ixgbe_put_phy_token(struct ixgbe_hw *hw)
{
struct ixgbe_hic_phy_token_req token_cmd;
- s32 status;
+ int status;
token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
@@ -790,7 +790,7 @@ static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
* @device_type: 3 bit device type
* @data: Data to write to the register
**/
-static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
__always_unused u32 device_type,
u32 data)
{
@@ -816,7 +816,7 @@ static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
* @device_type: 3 bit device type
* @data: Pointer to read data from the register
**/
-static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
__always_unused u32 device_type,
u32 *data)
{
@@ -824,7 +824,7 @@ static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
struct ixgbe_hic_internal_phy_req cmd;
struct ixgbe_hic_internal_phy_resp rsp;
} hic;
- s32 status;
+ int status;
memset(&hic, 0, sizeof(hic));
hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
@@ -851,14 +851,14 @@ static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
*
* Reads a 16 bit word(s) from the EEPROM using the hostif.
**/
-static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+static int ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data)
{
const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
struct ixgbe_hic_read_shadow_ram buffer;
u32 current_word = 0;
u16 words_to_read;
- s32 status;
+ int status;
u32 i;
/* Take semaphore for the entire operation. */
@@ -923,14 +923,14 @@ out:
*
* Returns error status for any failure
**/
-static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
+static int ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
u16 size, u16 *csum, u16 *buffer,
u32 buffer_size)
{
- u16 buf[256];
- s32 status;
u16 length, bufsz, i, start;
u16 *local_buffer;
+ u16 buf[256];
+ int status;
bufsz = ARRAY_SIZE(buf);
@@ -991,14 +991,14 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
*
* Returns a negative error code on error, or the 16-bit checksum
**/
-static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
+static int ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
u32 buffer_size)
{
u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
+ u16 pointer, i, size;
u16 *local_buffer;
- s32 status;
u16 checksum = 0;
- u16 pointer, i, size;
+ int status;
hw->eeprom.ops.init_params(hw);
@@ -1060,7 +1060,7 @@ static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
- return (s32)checksum;
+ return (int)checksum;
}
/** ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
@@ -1068,7 +1068,7 @@ static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
*
* Returns a negative error code on error, or the 16-bit checksum
**/
-static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
+static int ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
{
return ixgbe_calc_checksum_X550(hw, NULL, 0);
}
@@ -1080,11 +1080,11 @@ static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
*
* Reads a 16 bit word from the EEPROM using the hostif.
**/
-static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
+static int ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
struct ixgbe_hic_read_shadow_ram buffer;
- s32 status;
+ int status;
buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
buffer.hdr.req.buf_lenh = 0;
@@ -1118,12 +1118,12 @@ static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
* Performs checksum calculation and validates the EEPROM checksum. If the
* caller does not need checksum_val, the value can be NULL.
**/
-static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
+static int ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
u16 *checksum_val)
{
- s32 status;
- u16 checksum;
u16 read_checksum = 0;
+ u16 checksum;
+ int status;
/* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
@@ -1168,11 +1168,11 @@ static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
*
* Write a 16 bit word to the EEPROM using the hostif.
**/
-static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
+static int ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
u16 data)
{
- s32 status;
struct ixgbe_hic_write_shadow_ram buffer;
+ int status;
buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
buffer.hdr.req.buf_lenh = 0;
@@ -1196,9 +1196,9 @@ static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
*
* Write a 16 bit word to the EEPROM using the hostif.
**/
-static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
+static int ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
{
- s32 status = 0;
+ int status = 0;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
@@ -1216,10 +1216,10 @@ static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
*
* Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
**/
-static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
+static int ixgbe_update_flash_X550(struct ixgbe_hw *hw)
{
- s32 status = 0;
union ixgbe_hic_hdr2 buffer;
+ int status = 0;
buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
buffer.req.buf_lenh = 0;
@@ -1238,7 +1238,7 @@ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
* Sets bus link width and speed to unknown because X550em is
* not a PCI device.
**/
-static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
+static int ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
{
hw->bus.type = ixgbe_bus_type_internal;
hw->bus.width = ixgbe_bus_width_unknown;
@@ -1269,9 +1269,9 @@ static bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
**/
static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
{
- u32 rxctrl, pfdtxgswc;
- s32 status;
struct ixgbe_hic_disable_rxen fw_cmd;
+ u32 rxctrl, pfdtxgswc;
+ int status;
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
if (rxctrl & IXGBE_RXCTRL_RXEN) {
@@ -1311,10 +1311,10 @@ static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
* checksum and updates the EEPROM and instructs the hardware to update
* the flash.
**/
-static s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
+static int ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
{
- s32 status;
u16 checksum = 0;
+ int status;
/* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
@@ -1351,11 +1351,11 @@ static s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
*
* Write a 16 bit word(s) to the EEPROM using the hostif.
**/
-static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+static int ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
u16 offset, u16 words,
u16 *data)
{
- s32 status = 0;
+ int status = 0;
u32 i = 0;
/* Take semaphore for the entire operation. */
@@ -1387,12 +1387,12 @@ static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
* @device_type: 3 bit device type
* @data: Data to write to the register
**/
-static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 data)
{
u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
u32 command, error;
- s32 ret;
+ int ret;
ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
if (ret)
@@ -1430,10 +1430,10 @@ out:
*
* iXfI configuration needed for ixgbe_mac_X550EM_x devices.
**/
-static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
+static int ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
{
- s32 status;
u32 reg_val;
+ int status;
/* Disable training protocol FSM. */
status = ixgbe_read_iosf_sb_reg_x550(hw,
@@ -1502,10 +1502,10 @@ static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
* internal PHY
* @hw: pointer to hardware structure
**/
-static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
+static int ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
{
- s32 status;
u32 link_ctrl;
+ int status;
/* Restart auto-negotiation. */
status = hw->mac.ops.read_iosf_sb_reg(hw,
@@ -1551,11 +1551,11 @@ static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
* Configures the integrated KR PHY to use iXFI mode. Used to connect an
* internal and external PHY at a specific speed, without autonegotiation.
**/
-static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+static int ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
{
struct ixgbe_mac_info *mac = &hw->mac;
- s32 status;
u32 reg_val;
+ int status;
/* iXFI is only supported with X552 */
if (mac->type != ixgbe_mac_X550EM_x)
@@ -1608,7 +1608,7 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
* @hw: pointer to hardware structure
* @linear: true if SFP module is linear
*/
-static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
+static int ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
{
switch (hw->phy.sfp_type) {
case ixgbe_sfp_type_not_present:
@@ -1645,14 +1645,14 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
*
* Configures the extern PHY and the integrated KR PHY for SFP support.
*/
-static s32
+static int
ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
__always_unused bool autoneg_wait_to_complete)
{
- s32 status;
- u16 reg_slice, reg_val;
bool setup_linear = false;
+ u16 reg_slice, reg_val;
+ int status;
/* Check if SFP module is supported and linear */
status = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
@@ -1691,11 +1691,11 @@ ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
* Configures the integrated PHY for native SFI mode. Used to connect the
* internal PHY directly to an SFP cage, without autonegotiation.
**/
-static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+static int ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
{
struct ixgbe_mac_info *mac = &hw->mac;
- s32 status;
u32 reg_val;
+ int status;
/* Disable all AN and force speed to 10G Serial. */
status = mac->ops.read_iosf_sb_reg(hw,
@@ -1790,13 +1790,13 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
*
* Configure the integrated PHY for native SFP support.
*/
-static s32
+static int
ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
__always_unused bool autoneg_wait_to_complete)
{
bool setup_linear = false;
u32 reg_phy_int;
- s32 ret_val;
+ int ret_val;
/* Check if SFP module is supported and linear */
ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
@@ -1839,14 +1839,14 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
*
* Configure the integrated PHY for SFP support.
*/
-static s32
+static int
ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
__always_unused bool autoneg_wait_to_complete)
{
u32 reg_slice, slice_offset;
bool setup_linear = false;
u16 reg_phy_ext;
- s32 ret_val;
+ int ret_val;
/* Check if SFP module is supported and linear */
ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
@@ -1918,12 +1918,12 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
*
* Returns error status for any failure
**/
-static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
+static int ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait)
{
- s32 status;
ixgbe_link_speed force_speed;
+ int status;
/* Setup internal/external PHY link speed to iXFI (10G), unless
* only 1G is auto advertised then setup KX link.
@@ -1954,7 +1954,7 @@ static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
*
* Check that both the MAC and X557 external PHY have link.
**/
-static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
+static int ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up,
bool link_up_wait_to_complete)
@@ -1998,13 +1998,13 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
* @speed: unused
* @autoneg_wait_to_complete: unused
*/
-static s32
+static int
ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
__always_unused bool autoneg_wait_to_complete)
{
struct ixgbe_mac_info *mac = &hw->mac;
u32 lval, sval, flx_val;
- s32 rc;
+ int rc;
rc = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
@@ -2071,12 +2071,12 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
* @speed: the link speed to force
* @autoneg_wait: true when waiting for completion is needed
*/
-static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+static int ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait)
{
struct ixgbe_mac_info *mac = &hw->mac;
u32 lval, sval, flx_val;
- s32 rc;
+ int rc;
rc = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
@@ -2148,7 +2148,7 @@ static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
{
u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
ixgbe_link_speed speed;
- s32 status = -EIO;
+ int status = -EIO;
bool link_up;
/* AN should have completed when the cable was plugged in.
@@ -2276,10 +2276,10 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
/** ixgbe_setup_sfp_modules_X550em - Setup SFP module
* @hw: pointer to hardware structure
*/
-static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
+static int ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
{
- s32 status;
bool linear;
+ int status;
/* Check if SFP module is supported */
status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
@@ -2297,7 +2297,7 @@ static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
* @speed: pointer to link speed
* @autoneg: true when autoneg or autotry is enabled
**/
-static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
+static int ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg)
{
@@ -2375,7 +2375,7 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
* Determime if external Base T PHY interrupt cause is high temperature
* failure alarm or link status change.
**/
-static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc,
+static int ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc,
bool *is_overtemp)
{
u32 status;
@@ -2463,7 +2463,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc,
*
* Returns PHY access status
**/
-static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
+static int ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
{
bool lsc, overtemp;
u32 status;
@@ -2555,7 +2555,7 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
* failure alarm then return error, else if link status change
* then setup internal/external PHY link
**/
-static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw,
+static int ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw,
bool *is_overtemp)
{
struct ixgbe_phy_info *phy = &hw->phy;
@@ -2579,11 +2579,11 @@ static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw,
*
* Configures the integrated KR PHY.
**/
-static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
+static int ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
ixgbe_link_speed speed)
{
- s32 status;
u32 reg_val;
+ int status;
status = hw->mac.ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
@@ -2634,7 +2634,7 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
* ixgbe_setup_kr_x550em - Configure the KR PHY
* @hw: pointer to hardware structure
**/
-static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
+static int ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
{
/* leave link alone for 2.5G */
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
@@ -2652,7 +2652,7 @@ static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
*
* Returns error code if unable to get link status.
**/
-static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
+static int ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
{
u32 ret;
u16 autoneg_status;
@@ -2686,7 +2686,7 @@ static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
* A return of a non-zero value indicates an error, and the base driver should
* not report link up.
**/
-static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
+static int ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
{
ixgbe_link_speed force_speed;
bool link_up;
@@ -2746,9 +2746,9 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
/** ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
* @hw: pointer to hardware structure
**/
-static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
+static int ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
{
- s32 status;
+ int status;
status = ixgbe_reset_phy_generic(hw);
@@ -2764,7 +2764,7 @@ static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
* @led_idx: led number to turn on
**/
-static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
+static int ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
{
u16 phy_data;
@@ -2786,7 +2786,7 @@ static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
* @hw: pointer to hardware structure
* @led_idx: led number to turn off
**/
-static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
+static int ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
{
u16 phy_data;
@@ -2819,12 +2819,12 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
* semaphore, -EIO when command fails or -ENIVAL when incorrect
* params passed.
**/
-static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+static int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 sub, u16 len,
const char *driver_ver)
{
struct ixgbe_hic_drv_info2 fw_cmd;
- s32 ret_val;
+ int ret_val;
int i;
if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string)))
@@ -2866,12 +2866,12 @@ static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
*
* Determine lowest common link speed with link partner.
**/
-static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
+static int ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
ixgbe_link_speed *lcd_speed)
{
- u16 an_lp_status;
- s32 status;
u16 word = hw->eeprom.ctrl_word_3;
+ u16 an_lp_status;
+ int status;
*lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
@@ -2884,28 +2884,28 @@ static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
/* If link partner advertised 1G, return 1G */
if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
*lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
- return status;
+ return 0;
}
/* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
(word & NVM_INIT_CTRL_3_D10GMP_PORT0))
- return status;
+ return 0;
/* Link partner not capable of lower speeds, return 10G */
*lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
- return status;
+ return 0;
}
/**
* ixgbe_setup_fc_x550em - Set up flow control
* @hw: pointer to hardware structure
*/
-static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
+static int ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
{
bool pause, asm_dir;
u32 reg_val;
- s32 rc = 0;
+ int rc = 0;
/* Validate the requested mode */
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
@@ -2990,7 +2990,7 @@ static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
{
u32 link_s1, lp_an_page_low, an_cntl_1;
ixgbe_link_speed speed;
- s32 status = -EIO;
+ int status = -EIO;
bool link_up;
/* AN should have completed when the cable was plugged in.
@@ -3073,13 +3073,13 @@ static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
* (from D0 to non-D0). Link is required to enter LPLU so avoid resetting
* the X557 PHY immediately prior to entering LPLU.
**/
-static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
+static int ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
{
u16 an_10g_cntl_reg, autoneg_reg, speed;
- s32 status;
ixgbe_link_speed lcd_speed;
u32 save_autoneg;
bool link_up;
+ int status;
/* If blocked by MNG FW, then don't restart AN */
if (ixgbe_check_reset_blocked(hw))
@@ -3130,7 +3130,7 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
(lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
(lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
- return status;
+ return 0;
/* Clear AN completed indication */
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
@@ -3167,10 +3167,10 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
* ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
* @hw: pointer to hardware structure
*/
-static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
+static int ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
{
u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
- s32 rc;
+ int rc;
if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
return 0;
@@ -3196,7 +3196,7 @@ static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
static bool ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
{
u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
- s32 rc;
+ int rc;
rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
if (rc)
@@ -3239,10 +3239,10 @@ static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
* set during init_shared_code because the PHY/SFP type was
* not known. Perform the SFP init if necessary.
**/
-static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
+static int ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
{
struct ixgbe_phy_info *phy = &hw->phy;
- s32 ret_val;
+ int ret_val;
hw->mac.ops.set_lan_id(hw);
@@ -3367,9 +3367,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
** @hw: pointer to hardware structure
**/
-static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
+static int ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
{
- s32 status;
+ int status;
u16 reg;
status = hw->phy.ops.read_reg(hw,
@@ -3441,14 +3441,14 @@ static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
** and clears all interrupts, perform a PHY reset, and perform a link (MAC)
** reset.
**/
-static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
+static int ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
ixgbe_link_speed link_speed;
- s32 status;
+ bool link_up = false;
u32 ctrl = 0;
+ int status;
u32 i;
- bool link_up = false;
- u32 swfw_mask = hw->phy.phy_semaphore_mask;
/* Call adapter stop to disable Tx/Rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
@@ -3609,10 +3609,10 @@ static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw,
*
* Called at init time to set up flow control.
**/
-static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
+static int ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
{
- s32 status = 0;
u32 an_cntl = 0;
+ int status = 0;
/* Validate the requested mode */
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
@@ -3714,9 +3714,9 @@ static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
*
* Acquires the SWFW semaphore and sets the I2C MUX
*/
-static s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
+static int ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
{
- s32 status;
+ int status;
status = ixgbe_acquire_swfw_sync_X540(hw, mask);
if (status)
@@ -3750,11 +3750,11 @@ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
*
* Acquires the SWFW semaphore and get the shared PHY token as needed
*/
-static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
+static int ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
{
u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
int retries = FW_PHY_TOKEN_RETRIES;
- s32 status;
+ int status;
while (--retries) {
status = 0;
@@ -3807,11 +3807,11 @@ static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
* Token. The PHY Token is needed since the MDIO is shared between to MAC
* instances.
*/
-static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, mask))
return -EBUSY;
@@ -3833,11 +3833,11 @@ static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
* Writes a value to specified PHY register using the SWFW lock and PHY Token.
* The PHY Token is needed since the MDIO is shared between to MAC instances.
*/
-static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, mask))
return -EBUSY;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 59798bc33298..d34d715c59eb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -359,12 +359,8 @@ construct_skb:
ixgbe_xdp_ring_update_tail_locked(ring);
}
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->stats.packets += total_rx_packets;
- rx_ring->stats.bytes += total_rx_bytes;
- u64_stats_update_end(&rx_ring->syncp);
- q_vector->rx.total_packets += total_rx_packets;
- q_vector->rx.total_bytes += total_rx_bytes;
+ ixgbe_update_rx_ring_stats(rx_ring, q_vector, total_rx_packets,
+ total_rx_bytes);
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
@@ -499,13 +495,8 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
}
tx_ring->next_to_clean = ntc;
-
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->stats.bytes += total_bytes;
- tx_ring->stats.packets += total_packets;
- u64_stats_update_end(&tx_ring->syncp);
- q_vector->tx.total_bytes += total_bytes;
- q_vector->tx.total_packets += total_packets;
+ ixgbe_update_tx_ring_stats(tx_ring, q_vector, total_packets,
+ total_bytes);
if (xsk_frames)
xsk_tx_completed(pool, xsk_frames);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index a44e4bd56142..9c960017a6de 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4413,7 +4413,7 @@ ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 884d64114bff..837295fecd17 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -180,6 +180,7 @@ config SKY2_DEBUG
source "drivers/net/ethernet/marvell/octeontx2/Kconfig"
source "drivers/net/ethernet/marvell/octeon_ep/Kconfig"
+source "drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig"
source "drivers/net/ethernet/marvell/prestera/Kconfig"
endif # NET_VENDOR_MARVELL
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index ceba4aa4f026..a399defe25fd 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -12,5 +12,6 @@ obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o
obj-y += octeon_ep/
+obj-y += octeon_ep_vf/
obj-y += octeontx2/
obj-y += prestera/
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index a641b3534ca3..40a5f1431e4e 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -5097,7 +5097,7 @@ static int mvneta_ethtool_set_wol(struct net_device *dev,
}
static int mvneta_ethtool_get_eee(struct net_device *dev,
- struct ethtool_eee *eee)
+ struct ethtool_keee *eee)
{
struct mvneta_port *pp = netdev_priv(dev);
u32 lpi_ctl0;
@@ -5113,7 +5113,7 @@ static int mvneta_ethtool_get_eee(struct net_device *dev,
}
static int mvneta_ethtool_set_eee(struct net_device *dev,
- struct ethtool_eee *eee)
+ struct ethtool_keee *eee)
{
struct mvneta_port *pp = netdev_priv(dev);
u32 lpi_ctl0;
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig b/drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig
new file mode 100644
index 000000000000..e371a3ef0c49
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Marvell's Octeon PCI Endpoint NIC VF Driver Configuration
+#
+
+config OCTEON_EP_VF
+ tristate "Marvell Octeon PCI Endpoint NIC VF Driver"
+ depends on 64BIT
+ depends on PCI
+ help
+ This driver supports the networking functionality of Marvell's
+ Octeon PCI Endpoint NIC VF.
+
+ To know the list of devices supported by this driver, refer to the
+ documentation in
+ <file:Documentation/networking/device_drivers/ethernet/marvell/octeon_ep_vf.rst>.
+
+ To compile this driver as a module, choose M here.
+ The name of the module will be octeon_ep_vf.
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/Makefile b/drivers/net/ethernet/marvell/octeon_ep_vf/Makefile
new file mode 100644
index 000000000000..4a5f9fcb0b40
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Network driver for Marvell's Octeon PCI Endpoint NIC VF
+#
+
+obj-$(CONFIG_OCTEON_EP_VF) += octeon_ep_vf.o
+
+octeon_ep_vf-y := octep_vf_main.o octep_vf_cn9k.o octep_vf_cnxk.o \
+ octep_vf_tx.o octep_vf_rx.o octep_vf_mbox.o \
+ octep_vf_ethtool.o
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
new file mode 100644
index 000000000000..88937fce75f1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+#include "octep_vf_regs_cn9k.h"
+
+/* Dump useful hardware IQ/OQ CSRs for debug purpose */
+static void cn93_vf_dump_q_regs(struct octep_vf_device *oct, int qno)
+{
+ struct device *dev = &oct->pdev->dev;
+
+ dev_info(dev, "IQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_INSTR_DBELL(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(qno)));
+ dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_CONTROL(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(qno)));
+ dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_ENABLE(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_INSTR_BADDR(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_INSTR_RSIZE(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(qno)));
+ dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_CNTS(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CNTS(qno)));
+ dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_INT_LEVELS(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_PKT_CNT(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_BYTE_CNT(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_BYTE_CNT(qno)));
+
+ dev_info(dev, "OQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_SLIST_DBELL(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(qno)));
+ dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_CONTROL(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(qno)));
+ dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_ENABLE(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_SLIST_BADDR(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_BADDR(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_SLIST_RSIZE(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_RSIZE(qno)));
+ dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_CNTS(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CNTS(qno)));
+ dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_INT_LEVELS(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_PKT_CNT(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_BYTE_CNT(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_BYTE_CNT(qno)));
+}
+
+/* Reset Hardware Tx queue */
+static void cn93_vf_reset_iq(struct octep_vf_device *oct, int q_no)
+{
+ u64 val = ULL(0);
+
+ dev_dbg(&oct->pdev->dev, "Reset VF IQ-%d\n", q_no);
+
+ /* Disable the Tx/Instruction Ring */
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(q_no), val);
+
+ /* clear the Instruction Ring packet/byte counts and doorbell CSRs */
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q_no), val);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_PKT_CNT(q_no), val);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_BYTE_CNT(q_no), val);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(q_no), val);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(q_no), val);
+
+ val = GENMASK_ULL(31, 0);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(q_no), val);
+
+ val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CNTS(q_no));
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_CNTS(q_no),
+ val & GENMASK_ULL(31, 0));
+}
+
+/* Reset Hardware Rx queue */
+static void cn93_vf_reset_oq(struct octep_vf_device *oct, int q_no)
+{
+ u64 val = ULL(0);
+
+ /* Disable Output (Rx) Ring */
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(q_no), val);
+
+ /* Clear count CSRs */
+ val = octep_vf_read_csr(oct, CN93_VF_SDP_R_OUT_CNTS(q_no));
+ octep_vf_write_csr(oct, CN93_VF_SDP_R_OUT_CNTS(q_no), val);
+
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_PKT_CNT(q_no), GENMASK_ULL(35, 0));
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(q_no), GENMASK_ULL(31, 0));
+}
+
+/* Reset all hardware Tx/Rx queues */
+static void octep_vf_reset_io_queues_cn93(struct octep_vf_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+ int q;
+
+ dev_dbg(&pdev->dev, "Reset OCTEP_CN93 VF IO Queues\n");
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ cn93_vf_reset_iq(oct, q);
+ cn93_vf_reset_oq(oct, q);
+ }
+}
+
+/* Initialize configuration limits and initial active config */
+static void octep_vf_init_config_cn93_vf(struct octep_vf_device *oct)
+{
+ struct octep_vf_config *conf = oct->conf;
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(0));
+ conf->ring_cfg.max_io_rings = (reg_val >> CN93_VF_R_IN_CTL_RPVF_POS) &
+ CN93_VF_R_IN_CTL_RPVF_MASK;
+ conf->ring_cfg.active_io_rings = conf->ring_cfg.max_io_rings;
+
+ conf->iq.num_descs = OCTEP_VF_IQ_MAX_DESCRIPTORS;
+ conf->iq.instr_type = OCTEP_VF_64BYTE_INSTR;
+ conf->iq.db_min = OCTEP_VF_DB_MIN;
+ conf->iq.intr_threshold = OCTEP_VF_IQ_INTR_THRESHOLD;
+
+ conf->oq.num_descs = OCTEP_VF_OQ_MAX_DESCRIPTORS;
+ conf->oq.buf_size = OCTEP_VF_OQ_BUF_SIZE;
+ conf->oq.refill_threshold = OCTEP_VF_OQ_REFILL_THRESHOLD;
+ conf->oq.oq_intr_pkt = OCTEP_VF_OQ_INTR_PKT_THRESHOLD;
+ conf->oq.oq_intr_time = OCTEP_VF_OQ_INTR_TIME_THRESHOLD;
+
+ conf->msix_cfg.ioq_msix = conf->ring_cfg.active_io_rings;
+}
+
+/* Setup registers for a hardware Tx Queue */
+static void octep_vf_setup_iq_regs_cn93(struct octep_vf_device *oct, int iq_no)
+{
+ struct octep_vf_iq *iq = oct->iq[iq_no];
+ u32 reset_instr_cnt;
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CN93_VF_R_IN_CTL_IDLE)) {
+ do {
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no));
+ } while (!(reg_val & CN93_VF_R_IN_CTL_IDLE));
+ }
+ reg_val |= CN93_VF_R_IN_CTL_RDSIZE;
+ reg_val |= CN93_VF_R_IN_CTL_IS_64B;
+ reg_val |= CN93_VF_R_IN_CTL_ESR;
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no), reg_val);
+
+ /* Write the start of the input queue's ring and its size */
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(iq_no), iq->desc_ring_dma);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(iq_no), iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr for this queue */
+ iq->doorbell_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no);
+ iq->inst_cnt_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_CNTS(iq_no);
+ iq->intr_lvl_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_INT_LEVELS(iq_no);
+
+ /* Store the current instruction counter (used in flush_iq calculation) */
+ reset_instr_cnt = readl(iq->inst_cnt_reg);
+ writel(reset_instr_cnt, iq->inst_cnt_reg);
+
+ /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */
+ reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & GENMASK_ULL(31, 0);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+}
+
+/* Setup registers for a hardware Rx Queue */
+static void octep_vf_setup_oq_regs_cn93(struct octep_vf_device *oct, int oq_no)
+{
+ struct octep_vf_oq *oq = oct->oq[oq_no];
+ u32 time_threshold = 0;
+ u64 oq_ctl = ULL(0);
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CN93_VF_R_OUT_CTL_IDLE)) {
+ do {
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no));
+ } while (!(reg_val & CN93_VF_R_OUT_CTL_IDLE));
+ }
+
+ reg_val &= ~(CN93_VF_R_OUT_CTL_IMODE);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_P);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_P);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_I);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_I);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_ES_I);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_D);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_D);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_ES_D);
+ reg_val |= (CN93_VF_R_OUT_CTL_ES_P);
+
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no), reg_val);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count);
+
+ oq_ctl = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no));
+ oq_ctl &= ~GENMASK_ULL(22, 0); //clear the ISIZE and BSIZE (22-0)
+ oq_ctl |= (oq->buffer_size & GENMASK_ULL(15, 0)); //populate the BSIZE (15-0)
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no), oq_ctl);
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ oq->pkts_sent_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_OUT_CNTS(oq_no);
+ oq->pkts_credit_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_OUT_SLIST_DBELL(oq_no);
+
+ time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
+ reg_val = ((u64)time_threshold << 32) | CFG_GET_OQ_INTR_PKT(oct->conf);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+}
+
+/* Setup registers for a VF mailbox */
+static void octep_vf_setup_mbox_regs_cn93(struct octep_vf_device *oct, int q_no)
+{
+ struct octep_vf_mbox *mbox = oct->mbox;
+
+ /* PF to VF DATA reg. VF reads from this reg */
+ mbox->mbox_read_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_PF_VF_DATA(q_no);
+
+ /* VF mbox interrupt reg */
+ mbox->mbox_int_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_PF_VF_INT(q_no);
+
+ /* VF to PF DATA reg. VF writes into this reg */
+ mbox->mbox_write_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_VF_PF_DATA(q_no);
+}
+
+/* Mailbox Interrupt handler */
+static void cn93_handle_vf_mbox_intr(struct octep_vf_device *oct)
+{
+ if (oct->mbox)
+ schedule_work(&oct->mbox->wk.work);
+ else
+ dev_err(&oct->pdev->dev, "cannot schedule work on invalid mbox\n");
+}
+
+/* Tx/Rx queue interrupt handler */
+static irqreturn_t octep_vf_ioq_intr_handler_cn93(void *data)
+{
+ struct octep_vf_ioq_vector *vector = data;
+ struct octep_vf_device *oct;
+ struct octep_vf_oq *oq;
+ u64 reg_val;
+
+ oct = vector->octep_vf_dev;
+ oq = vector->oq;
+ /* Mailbox interrupt arrives along with interrupt of tx/rx ring pair 0 */
+ if (oq->q_no == 0) {
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0));
+ if (reg_val & CN93_VF_SDP_R_MBOX_PF_VF_INT_STATUS) {
+ cn93_handle_vf_mbox_intr(oct);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0), reg_val);
+ }
+ }
+ napi_schedule_irqoff(oq->napi);
+ return IRQ_HANDLED;
+}
+
+/* Re-initialize Octeon hardware registers */
+static void octep_vf_reinit_regs_cn93(struct octep_vf_device *oct)
+{
+ u32 i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_iq_regs(oct, i);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_oq_regs(oct, i);
+
+ oct->hw_ops.enable_interrupts(oct);
+ oct->hw_ops.enable_io_queues(oct);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/* Enable all interrupts */
+static void octep_vf_enable_interrupts_cn93(struct octep_vf_device *oct)
+{
+ int num_rings, q;
+ u64 reg_val;
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++) {
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
+ }
+ /* Enable PF to VF mbox interrupt by setting 2nd bit*/
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0),
+ CN93_VF_SDP_R_MBOX_PF_VF_INT_ENAB);
+}
+
+/* Disable all interrupts */
+static void octep_vf_disable_interrupts_cn93(struct octep_vf_device *oct)
+{
+ int num_rings, q;
+ u64 reg_val;
+
+ /* Disable PF to VF mbox interrupt by setting 2nd bit*/
+ if (oct->mbox)
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0), 0x0);
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++) {
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q));
+ reg_val &= ~BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q));
+ reg_val &= ~BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
+ }
+}
+
+/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
+static u32 octep_vf_update_iq_read_index_cn93(struct octep_vf_iq *iq)
+{
+ u32 pkt_in_done = readl(iq->inst_cnt_reg);
+ u32 last_done, new_idx;
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ new_idx = (iq->octep_vf_read_index + last_done) % iq->max_count;
+
+ return new_idx;
+}
+
+/* Enable a hardware Tx Queue */
+static void octep_vf_enable_iq_cn93(struct octep_vf_device *oct, int iq_no)
+{
+ u64 loop = HZ;
+ u64 reg_val;
+
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no), GENMASK_ULL(31, 0));
+
+ while (octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no)) &&
+ loop--) {
+ schedule_timeout_interruptible(1);
+ }
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no));
+ reg_val |= ULL(1);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Enable a hardware Rx Queue */
+static void octep_vf_enable_oq_cn93(struct octep_vf_device *oct, int oq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(oq_no), GENMASK_ULL(31, 0));
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no));
+ reg_val |= ULL(1);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Enable all hardware Tx/Rx Queues assigned to VF */
+static void octep_vf_enable_io_queues_cn93(struct octep_vf_device *oct)
+{
+ u8 q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_vf_enable_iq_cn93(oct, q);
+ octep_vf_enable_oq_cn93(oct, q);
+ }
+}
+
+/* Disable a hardware Tx Queue assigned to VF */
+static void octep_vf_disable_iq_cn93(struct octep_vf_device *oct, int iq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no));
+ reg_val &= ~ULL(1);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Disable a hardware Rx Queue assigned to VF */
+static void octep_vf_disable_oq_cn93(struct octep_vf_device *oct, int oq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no));
+ reg_val &= ~ULL(1);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Disable all hardware Tx/Rx Queues assigned to VF */
+static void octep_vf_disable_io_queues_cn93(struct octep_vf_device *oct)
+{
+ int q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_vf_disable_iq_cn93(oct, q);
+ octep_vf_disable_oq_cn93(oct, q);
+ }
+}
+
+/* Dump hardware registers (including Tx/Rx queues) for debugging. */
+static void octep_vf_dump_registers_cn93(struct octep_vf_device *oct)
+{
+ u8 num_rings, q;
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++)
+ cn93_vf_dump_q_regs(oct, q);
+}
+
+/**
+ * octep_vf_device_setup_cn93() - Setup Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * - initialize hardware operations.
+ * - get target side pcie port number for the device.
+ * - set initial configuration and max limits.
+ */
+void octep_vf_device_setup_cn93(struct octep_vf_device *oct)
+{
+ oct->hw_ops.setup_iq_regs = octep_vf_setup_iq_regs_cn93;
+ oct->hw_ops.setup_oq_regs = octep_vf_setup_oq_regs_cn93;
+ oct->hw_ops.setup_mbox_regs = octep_vf_setup_mbox_regs_cn93;
+
+ oct->hw_ops.ioq_intr_handler = octep_vf_ioq_intr_handler_cn93;
+ oct->hw_ops.reinit_regs = octep_vf_reinit_regs_cn93;
+
+ oct->hw_ops.enable_interrupts = octep_vf_enable_interrupts_cn93;
+ oct->hw_ops.disable_interrupts = octep_vf_disable_interrupts_cn93;
+
+ oct->hw_ops.update_iq_read_idx = octep_vf_update_iq_read_index_cn93;
+
+ oct->hw_ops.enable_iq = octep_vf_enable_iq_cn93;
+ oct->hw_ops.enable_oq = octep_vf_enable_oq_cn93;
+ oct->hw_ops.enable_io_queues = octep_vf_enable_io_queues_cn93;
+
+ oct->hw_ops.disable_iq = octep_vf_disable_iq_cn93;
+ oct->hw_ops.disable_oq = octep_vf_disable_oq_cn93;
+ oct->hw_ops.disable_io_queues = octep_vf_disable_io_queues_cn93;
+ oct->hw_ops.reset_io_queues = octep_vf_reset_io_queues_cn93;
+
+ oct->hw_ops.dump_registers = octep_vf_dump_registers_cn93;
+ octep_vf_init_config_cn93_vf(oct);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
new file mode 100644
index 000000000000..1f79dfad42c6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+#include "octep_vf_regs_cnxk.h"
+
+/* Dump useful hardware IQ/OQ CSRs for debug purpose */
+static void cnxk_vf_dump_q_regs(struct octep_vf_device *oct, int qno)
+{
+ struct device *dev = &oct->pdev->dev;
+
+ dev_info(dev, "IQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_INSTR_DBELL(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(qno)));
+ dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_CONTROL(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(qno)));
+ dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_ENABLE(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_INSTR_BADDR(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_INSTR_RSIZE(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(qno)));
+ dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_CNTS(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(qno)));
+ dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_INT_LEVELS(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_PKT_CNT(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_BYTE_CNT(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_BYTE_CNT(qno)));
+
+ dev_info(dev, "OQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_SLIST_DBELL(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(qno)));
+ dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_CONTROL(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(qno)));
+ dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_ENABLE(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_SLIST_BADDR(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(qno)));
+ dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_CNTS(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CNTS(qno)));
+ dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_INT_LEVELS(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_PKT_CNT(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_BYTE_CNT(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_BYTE_CNT(qno)));
+ dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_ERR_TYPE(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_ERR_TYPE(qno)));
+}
+
+/* Reset Hardware Tx queue */
+static void cnxk_vf_reset_iq(struct octep_vf_device *oct, int q_no)
+{
+ u64 val = ULL(0);
+
+ dev_dbg(&oct->pdev->dev, "Reset VF IQ-%d\n", q_no);
+
+ /* Disable the Tx/Instruction Ring */
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(q_no), val);
+
+ /* clear the Instruction Ring packet/byte counts and doorbell CSRs */
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q_no), val);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_PKT_CNT(q_no), val);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_BYTE_CNT(q_no), val);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(q_no), val);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(q_no), val);
+
+ val = GENMASK_ULL(31, 0);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(q_no), val);
+
+ val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(q_no));
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(q_no), val & GENMASK_ULL(31, 0));
+}
+
+/* Reset Hardware Rx queue */
+static void cnxk_vf_reset_oq(struct octep_vf_device *oct, int q_no)
+{
+ u64 val = ULL(0);
+
+ /* Disable Output (Rx) Ring */
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(q_no), val);
+
+ /* Clear count CSRs */
+ val = octep_vf_read_csr(oct, CNXK_VF_SDP_R_OUT_CNTS(q_no));
+ octep_vf_write_csr(oct, CNXK_VF_SDP_R_OUT_CNTS(q_no), val);
+
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_PKT_CNT(q_no), GENMASK_ULL(35, 0));
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(q_no), GENMASK_ULL(31, 0));
+}
+
+/* Reset all hardware Tx/Rx queues */
+static void octep_vf_reset_io_queues_cnxk(struct octep_vf_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+ int q;
+
+ dev_dbg(&pdev->dev, "Reset OCTEP_CNXK VF IO Queues\n");
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ cnxk_vf_reset_iq(oct, q);
+ cnxk_vf_reset_oq(oct, q);
+ }
+}
+
+/* Initialize configuration limits and initial active config */
+static void octep_vf_init_config_cnxk_vf(struct octep_vf_device *oct)
+{
+ struct octep_vf_config *conf = oct->conf;
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(0));
+ conf->ring_cfg.max_io_rings = (reg_val >> CNXK_VF_R_IN_CTL_RPVF_POS) &
+ CNXK_VF_R_IN_CTL_RPVF_MASK;
+ conf->ring_cfg.active_io_rings = conf->ring_cfg.max_io_rings;
+
+ conf->iq.num_descs = OCTEP_VF_IQ_MAX_DESCRIPTORS;
+ conf->iq.instr_type = OCTEP_VF_64BYTE_INSTR;
+ conf->iq.db_min = OCTEP_VF_DB_MIN;
+ conf->iq.intr_threshold = OCTEP_VF_IQ_INTR_THRESHOLD;
+
+ conf->oq.num_descs = OCTEP_VF_OQ_MAX_DESCRIPTORS;
+ conf->oq.buf_size = OCTEP_VF_OQ_BUF_SIZE;
+ conf->oq.refill_threshold = OCTEP_VF_OQ_REFILL_THRESHOLD;
+ conf->oq.oq_intr_pkt = OCTEP_VF_OQ_INTR_PKT_THRESHOLD;
+ conf->oq.oq_intr_time = OCTEP_VF_OQ_INTR_TIME_THRESHOLD;
+ conf->oq.wmark = OCTEP_VF_OQ_WMARK_MIN;
+
+ conf->msix_cfg.ioq_msix = conf->ring_cfg.active_io_rings;
+}
+
+/* Setup registers for a hardware Tx Queue */
+static void octep_vf_setup_iq_regs_cnxk(struct octep_vf_device *oct, int iq_no)
+{
+ struct octep_vf_iq *iq = oct->iq[iq_no];
+ u32 reset_instr_cnt;
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CNXK_VF_R_IN_CTL_IDLE)) {
+ do {
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no));
+ } while (!(reg_val & CNXK_VF_R_IN_CTL_IDLE));
+ }
+ reg_val |= CNXK_VF_R_IN_CTL_RDSIZE;
+ reg_val |= CNXK_VF_R_IN_CTL_IS_64B;
+ reg_val |= CNXK_VF_R_IN_CTL_ESR;
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no), reg_val);
+
+ /* Write the start of the input queue's ring and its size */
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(iq_no), iq->desc_ring_dma);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(iq_no), iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr for this queue */
+ iq->doorbell_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no);
+ iq->inst_cnt_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_CNTS(iq_no);
+ iq->intr_lvl_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no);
+
+ /* Store the current instruction counter (used in flush_iq calculation) */
+ reset_instr_cnt = readl(iq->inst_cnt_reg);
+ writel(reset_instr_cnt, iq->inst_cnt_reg);
+
+ /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */
+ reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & GENMASK_ULL(31, 0);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+}
+
+/* Setup registers for a hardware Rx Queue */
+static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no)
+{
+ struct octep_vf_oq *oq = oct->oq[oq_no];
+ u32 time_threshold = 0;
+ u64 oq_ctl = ULL(0);
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CNXK_VF_R_OUT_CTL_IDLE)) {
+ do {
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
+ } while (!(reg_val & CNXK_VF_R_OUT_CTL_IDLE));
+ }
+
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_IMODE);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_P);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_P);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_I);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_I);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_ES_I);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_D);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_D);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_ES_D);
+ reg_val |= (CNXK_VF_R_OUT_CTL_ES_P);
+
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no), reg_val);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count);
+
+ oq_ctl = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
+ /* Clear the ISIZE and BSIZE (22-0) */
+ oq_ctl &= ~GENMASK_ULL(22, 0);
+ /* Populate the BSIZE (15-0) */
+ oq_ctl |= (oq->buffer_size & GENMASK_ULL(15, 0));
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no), oq_ctl);
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ oq->pkts_sent_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_OUT_CNTS(oq_no);
+ oq->pkts_credit_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_OUT_SLIST_DBELL(oq_no);
+
+ time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
+ reg_val = ((u64)time_threshold << 32) | CFG_GET_OQ_INTR_PKT(oct->conf);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ /* set watermark for backpressure */
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no));
+ reg_val &= ~GENMASK_ULL(31, 0);
+ reg_val |= CFG_GET_OQ_WMARK(oct->conf);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no), reg_val);
+}
+
+/* Setup registers for a VF mailbox */
+static void octep_vf_setup_mbox_regs_cnxk(struct octep_vf_device *oct, int q_no)
+{
+ struct octep_vf_mbox *mbox = oct->mbox;
+
+ /* PF to VF DATA reg. VF reads from this reg */
+ mbox->mbox_read_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_PF_VF_DATA(q_no);
+
+ /* VF mbox interrupt reg */
+ mbox->mbox_int_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_PF_VF_INT(q_no);
+
+ /* VF to PF DATA reg. VF writes into this reg */
+ mbox->mbox_write_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_VF_PF_DATA(q_no);
+}
+
+/* Mailbox Interrupt handler */
+static void cnxk_handle_vf_mbox_intr(struct octep_vf_device *oct)
+{
+ if (oct->mbox)
+ schedule_work(&oct->mbox->wk.work);
+ else
+ dev_err(&oct->pdev->dev, "cannot schedule work on invalid mbox\n");
+}
+
+/* Tx/Rx queue interrupt handler */
+static irqreturn_t octep_vf_ioq_intr_handler_cnxk(void *data)
+{
+ struct octep_vf_ioq_vector *vector = data;
+ struct octep_vf_device *oct;
+ struct octep_vf_oq *oq;
+ u64 reg_val;
+
+ oct = vector->octep_vf_dev;
+ oq = vector->oq;
+ /* Mailbox interrupt arrives along with interrupt of tx/rx ring pair 0 */
+ if (oq->q_no == 0) {
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0));
+ if (reg_val & CNXK_VF_SDP_R_MBOX_PF_VF_INT_STATUS) {
+ cnxk_handle_vf_mbox_intr(oct);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0), reg_val);
+ }
+ }
+ napi_schedule_irqoff(oq->napi);
+ return IRQ_HANDLED;
+}
+
+/* Re-initialize Octeon hardware registers */
+static void octep_vf_reinit_regs_cnxk(struct octep_vf_device *oct)
+{
+ u32 i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_iq_regs(oct, i);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_oq_regs(oct, i);
+
+ oct->hw_ops.enable_interrupts(oct);
+ oct->hw_ops.enable_io_queues(oct);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/* Enable all interrupts */
+static void octep_vf_enable_interrupts_cnxk(struct octep_vf_device *oct)
+{
+ int num_rings, q;
+ u64 reg_val;
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++) {
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
+ }
+ /* Enable PF to VF mbox interrupt by setting 2nd bit*/
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0),
+ CNXK_VF_SDP_R_MBOX_PF_VF_INT_ENAB);
+}
+
+/* Disable all interrupts */
+static void octep_vf_disable_interrupts_cnxk(struct octep_vf_device *oct)
+{
+ int num_rings, q;
+ u64 reg_val;
+
+ /* Disable PF to VF mbox interrupt by setting 2nd bit*/
+ if (oct->mbox)
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0), 0x0);
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++) {
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q));
+ reg_val &= ~BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q));
+ reg_val &= ~BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
+ }
+}
+
+/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
+static u32 octep_vf_update_iq_read_index_cnxk(struct octep_vf_iq *iq)
+{
+ u32 pkt_in_done = readl(iq->inst_cnt_reg);
+ u32 last_done, new_idx;
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ new_idx = (iq->octep_vf_read_index + last_done) % iq->max_count;
+
+ return new_idx;
+}
+
+/* Enable a hardware Tx Queue */
+static void octep_vf_enable_iq_cnxk(struct octep_vf_device *oct, int iq_no)
+{
+ u64 loop = HZ;
+ u64 reg_val;
+
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no), GENMASK_ULL(31, 0));
+
+ while (octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no)) &&
+ loop--) {
+ schedule_timeout_interruptible(1);
+ }
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no));
+ reg_val |= ULL(1);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Enable a hardware Rx Queue */
+static void octep_vf_enable_oq_cnxk(struct octep_vf_device *oct, int oq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(oq_no), GENMASK_ULL(31, 0));
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no));
+ reg_val |= ULL(1);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Enable all hardware Tx/Rx Queues assigned to VF */
+static void octep_vf_enable_io_queues_cnxk(struct octep_vf_device *oct)
+{
+ u8 q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_vf_enable_iq_cnxk(oct, q);
+ octep_vf_enable_oq_cnxk(oct, q);
+ }
+}
+
+/* Disable a hardware Tx Queue assigned to VF */
+static void octep_vf_disable_iq_cnxk(struct octep_vf_device *oct, int iq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no));
+ reg_val &= ~ULL(1);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Disable a hardware Rx Queue assigned to VF */
+static void octep_vf_disable_oq_cnxk(struct octep_vf_device *oct, int oq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no));
+ reg_val &= ~ULL(1);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Disable all hardware Tx/Rx Queues assigned to VF */
+static void octep_vf_disable_io_queues_cnxk(struct octep_vf_device *oct)
+{
+ int q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_vf_disable_iq_cnxk(oct, q);
+ octep_vf_disable_oq_cnxk(oct, q);
+ }
+}
+
+/* Dump hardware registers (including Tx/Rx queues) for debugging. */
+static void octep_vf_dump_registers_cnxk(struct octep_vf_device *oct)
+{
+ u8 num_rings, q;
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++)
+ cnxk_vf_dump_q_regs(oct, q);
+}
+
+/**
+ * octep_vf_device_setup_cnxk() - Setup Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * - initialize hardware operations.
+ * - get target side pcie port number for the device.
+ * - set initial configuration and max limits.
+ */
+void octep_vf_device_setup_cnxk(struct octep_vf_device *oct)
+{
+ oct->hw_ops.setup_iq_regs = octep_vf_setup_iq_regs_cnxk;
+ oct->hw_ops.setup_oq_regs = octep_vf_setup_oq_regs_cnxk;
+ oct->hw_ops.setup_mbox_regs = octep_vf_setup_mbox_regs_cnxk;
+
+ oct->hw_ops.ioq_intr_handler = octep_vf_ioq_intr_handler_cnxk;
+ oct->hw_ops.reinit_regs = octep_vf_reinit_regs_cnxk;
+
+ oct->hw_ops.enable_interrupts = octep_vf_enable_interrupts_cnxk;
+ oct->hw_ops.disable_interrupts = octep_vf_disable_interrupts_cnxk;
+
+ oct->hw_ops.update_iq_read_idx = octep_vf_update_iq_read_index_cnxk;
+
+ oct->hw_ops.enable_iq = octep_vf_enable_iq_cnxk;
+ oct->hw_ops.enable_oq = octep_vf_enable_oq_cnxk;
+ oct->hw_ops.enable_io_queues = octep_vf_enable_io_queues_cnxk;
+
+ oct->hw_ops.disable_iq = octep_vf_disable_iq_cnxk;
+ oct->hw_ops.disable_oq = octep_vf_disable_oq_cnxk;
+ oct->hw_ops.disable_io_queues = octep_vf_disable_io_queues_cnxk;
+ oct->hw_ops.reset_io_queues = octep_vf_reset_io_queues_cnxk;
+
+ oct->hw_ops.dump_registers = octep_vf_dump_registers_cnxk;
+ octep_vf_init_config_cnxk_vf(oct);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h
new file mode 100644
index 000000000000..e03a647b0110
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_VF_CONFIG_H_
+#define _OCTEP_VF_CONFIG_H_
+
+/* Tx instruction types by length */
+#define OCTEP_VF_32BYTE_INSTR 32
+#define OCTEP_VF_64BYTE_INSTR 64
+
+/* Tx Queue: maximum descriptors per ring */
+#define OCTEP_VF_IQ_MAX_DESCRIPTORS 1024
+/* Minimum input (Tx) requests to be enqueued to ring doorbell */
+#define OCTEP_VF_DB_MIN 8
+/* Packet threshold for Tx queue interrupt */
+#define OCTEP_VF_IQ_INTR_THRESHOLD 0x0
+
+/* Minimum watermark for backpressure */
+#define OCTEP_VF_OQ_WMARK_MIN 256
+
+/* Rx Queue: maximum descriptors per ring */
+#define OCTEP_VF_OQ_MAX_DESCRIPTORS 1024
+
+/* Rx buffer size: Use page size buffers.
+ * Build skb from allocated page buffer once the packet is received.
+ * When a gathered packet is received, make head page as skb head and
+ * page buffers in consecutive Rx descriptors as fragments.
+ */
+#define OCTEP_VF_OQ_BUF_SIZE (SKB_WITH_OVERHEAD(PAGE_SIZE))
+#define OCTEP_VF_OQ_PKTS_PER_INTR 128
+#define OCTEP_VF_OQ_REFILL_THRESHOLD (OCTEP_VF_OQ_MAX_DESCRIPTORS / 4)
+
+#define OCTEP_VF_OQ_INTR_PKT_THRESHOLD 1
+#define OCTEP_VF_OQ_INTR_TIME_THRESHOLD 10
+
+#define OCTEP_VF_MSIX_NAME_SIZE (IFNAMSIZ + 32)
+
+/* Tx Queue wake threshold
+ * wakeup a stopped Tx queue if minimum 2 descriptors are available.
+ * Even a skb with fragments consume only one Tx queue descriptor entry.
+ */
+#define OCTEP_VF_WAKE_QUEUE_THRESHOLD 2
+
+/* Minimum MTU supported by Octeon network interface */
+#define OCTEP_VF_MIN_MTU ETH_MIN_MTU
+/* Maximum MTU supported by Octeon interface*/
+#define OCTEP_VF_MAX_MTU (10000 - (ETH_HLEN + ETH_FCS_LEN))
+/* Default MTU */
+#define OCTEP_VF_DEFAULT_MTU 1500
+
+/* Macros to get octeon config params */
+#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq)
+#define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs)
+#define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type)
+#define CFG_GET_IQ_INSTR_SIZE(cfg) (64)
+#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min)
+#define CFG_GET_IQ_INTR_THRESHOLD(cfg) ((cfg)->iq.intr_threshold)
+
+#define CFG_GET_OQ_NUM_DESC(cfg) ((cfg)->oq.num_descs)
+#define CFG_GET_OQ_BUF_SIZE(cfg) ((cfg)->oq.buf_size)
+#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold)
+#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt)
+#define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time)
+#define CFG_GET_OQ_WMARK(cfg) ((cfg)->oq.wmark)
+
+#define CFG_GET_PORTS_ACTIVE_IO_RINGS(cfg) ((cfg)->ring_cfg.active_io_rings)
+#define CFG_GET_PORTS_MAX_IO_RINGS(cfg) ((cfg)->ring_cfg.max_io_rings)
+
+#define CFG_GET_CORE_TICS_PER_US(cfg) ((cfg)->core_cfg.core_tics_per_us)
+#define CFG_GET_COPROC_TICS_PER_US(cfg) ((cfg)->core_cfg.coproc_tics_per_us)
+
+#define CFG_GET_IOQ_MSIX(cfg) ((cfg)->msix_cfg.ioq_msix)
+
+/* Hardware Tx Queue configuration. */
+struct octep_vf_iq_config {
+ /* Size of the Input queue (number of commands) */
+ u16 num_descs;
+
+ /* Command size - 32 or 64 bytes */
+ u16 instr_type;
+
+ /* Minimum number of commands pending to be posted to Octeon before driver
+ * hits the Input queue doorbell.
+ */
+ u16 db_min;
+
+ /* Trigger the IQ interrupt when processed cmd count reaches
+ * this level.
+ */
+ u32 intr_threshold;
+};
+
+/* Hardware Rx Queue configuration. */
+struct octep_vf_oq_config {
+ /* Size of Output queue (number of descriptors) */
+ u16 num_descs;
+
+ /* Size of buffer in this Output queue. */
+ u16 buf_size;
+
+ /* The number of buffers that were consumed during packet processing
+ * by the driver on this Output queue before the driver attempts to
+ * replenish the descriptor ring with new buffers.
+ */
+ u16 refill_threshold;
+
+ /* Interrupt Coalescing (Packet Count). Octeon will interrupt the host
+ * only if it sent as many packets as specified by this field.
+ * The driver usually does not use packet count interrupt coalescing.
+ */
+ u32 oq_intr_pkt;
+
+ /* Interrupt Coalescing (Time Interval). Octeon will interrupt the host
+ * if at least one packet was sent in the time interval specified by
+ * this field. The driver uses time interval interrupt coalescing by
+ * default. The time is specified in microseconds.
+ */
+ u32 oq_intr_time;
+
+ /* Water mark for backpressure.
+ * Output queue sends backpressure signal to source when
+ * free buffer count falls below wmark.
+ */
+ u32 wmark;
+};
+
+/* Tx/Rx configuration */
+struct octep_vf_ring_config {
+ /* Max number of IOQs */
+ u16 max_io_rings;
+
+ /* Number of active IOQs */
+ u16 active_io_rings;
+};
+
+/* Octeon MSI-x config. */
+struct octep_vf_msix_config {
+ /* Number of IOQ interrupts */
+ u16 ioq_msix;
+};
+
+/* Data Structure to hold configuration limits and active config */
+struct octep_vf_config {
+ /* Input Queue attributes. */
+ struct octep_vf_iq_config iq;
+
+ /* Output Queue attributes. */
+ struct octep_vf_oq_config oq;
+
+ /* MSI-X interrupt config */
+ struct octep_vf_msix_config msix_cfg;
+
+ /* NIC VF ring Configuration */
+ struct octep_vf_ring_config ring_cfg;
+};
+#endif /* _OCTEP_VF_CONFIG_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
new file mode 100644
index 000000000000..a1979b45e355
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+
+static const char octep_vf_gstrings_global_stats[][ETH_GSTRING_LEN] = {
+ "rx_alloc_errors",
+ "tx_busy_errors",
+ "tx_hw_pkts",
+ "tx_hw_octs",
+ "tx_hw_bcast",
+ "tx_hw_mcast",
+ "rx_hw_pkts",
+ "rx_hw_bytes",
+ "rx_hw_bcast",
+ "rx_dropped_bytes_fifo_full",
+};
+
+#define OCTEP_VF_GLOBAL_STATS_CNT (sizeof(octep_vf_gstrings_global_stats) / ETH_GSTRING_LEN)
+
+static const char octep_vf_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = {
+ "tx_packets_posted[Q-%u]",
+ "tx_packets_completed[Q-%u]",
+ "tx_bytes[Q-%u]",
+ "tx_busy[Q-%u]",
+};
+
+#define OCTEP_VF_TX_Q_STATS_CNT (sizeof(octep_vf_gstrings_tx_q_stats) / ETH_GSTRING_LEN)
+
+static const char octep_vf_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets[Q-%u]",
+ "rx_bytes[Q-%u]",
+ "rx_alloc_errors[Q-%u]",
+};
+
+#define OCTEP_VF_RX_Q_STATS_CNT (sizeof(octep_vf_gstrings_rx_q_stats) / ETH_GSTRING_LEN)
+
+static void octep_vf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+
+ strscpy(info->driver, OCTEP_VF_DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(oct->pdev), sizeof(info->bus_info));
+}
+
+static void octep_vf_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ char *strings = (char *)data;
+ int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < OCTEP_VF_GLOBAL_STATS_CNT; i++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_vf_gstrings_global_stats[i]);
+ strings += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ for (j = 0; j < OCTEP_VF_TX_Q_STATS_CNT; j++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_vf_gstrings_tx_q_stats[j], i);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ for (j = 0; j < OCTEP_VF_RX_Q_STATS_CNT; j++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_vf_gstrings_rx_q_stats[j], i);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int octep_vf_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return OCTEP_VF_GLOBAL_STATS_CNT + (num_queues *
+ (OCTEP_VF_TX_Q_STATS_CNT + OCTEP_VF_RX_Q_STATS_CNT));
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void octep_vf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ struct octep_vf_iface_tx_stats *iface_tx_stats;
+ struct octep_vf_iface_rx_stats *iface_rx_stats;
+ u64 rx_alloc_errors, tx_busy_errors;
+ int q, i;
+
+ rx_alloc_errors = 0;
+ tx_busy_errors = 0;
+
+ octep_vf_get_if_stats(oct);
+ iface_tx_stats = &oct->iface_tx_stats;
+ iface_rx_stats = &oct->iface_rx_stats;
+
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_vf_iq *iq = oct->iq[q];
+ struct octep_vf_oq *oq = oct->oq[q];
+
+ tx_busy_errors += iq->stats.tx_busy;
+ rx_alloc_errors += oq->stats.alloc_failures;
+ }
+ i = 0;
+ data[i++] = rx_alloc_errors;
+ data[i++] = tx_busy_errors;
+ data[i++] = iface_tx_stats->pkts;
+ data[i++] = iface_tx_stats->octs;
+ data[i++] = iface_tx_stats->bcst;
+ data[i++] = iface_tx_stats->mcst;
+ data[i++] = iface_rx_stats->pkts;
+ data[i++] = iface_rx_stats->octets;
+ data[i++] = iface_rx_stats->bcast_pkts;
+ data[i++] = iface_rx_stats->dropped_octets_fifo_full;
+
+ /* Per Tx Queue stats */
+ for (q = 0; q < oct->num_iqs; q++) {
+ struct octep_vf_iq *iq = oct->iq[q];
+
+ data[i++] = iq->stats.instr_posted;
+ data[i++] = iq->stats.instr_completed;
+ data[i++] = iq->stats.bytes_sent;
+ data[i++] = iq->stats.tx_busy;
+ }
+
+ /* Per Rx Queue stats */
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_vf_oq *oq = oct->oq[q];
+
+ data[i++] = oq->stats.packets;
+ data[i++] = oq->stats.bytes;
+ data[i++] = oq->stats.alloc_failures;
+ }
+}
+
+#define OCTEP_VF_SET_ETHTOOL_LINK_MODES_BITMAP(octep_vf_speeds, ksettings, name) \
+{ \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_T)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseT_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_R)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseR_FEC); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseCR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseKR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_LR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseLR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseSR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_25GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseCR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_25GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseKR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_25GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseSR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_CR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseCR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_KR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseKR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_LR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseLR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_SR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseSR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_CR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR2_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_KR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR2_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_SR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR2_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_LR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseLR_ER_FR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_CR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseCR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_KR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseKR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_LR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseLR4_ER4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_SR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseSR4_Full); \
+}
+
+static int octep_vf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ struct octep_vf_iface_link_info *link_info;
+ u32 advertised_modes, supported_modes;
+
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+
+ octep_vf_get_link_info(oct);
+
+ advertised_modes = oct->link_info.advertised_modes;
+ supported_modes = oct->link_info.supported_modes;
+ link_info = &oct->link_info;
+
+ OCTEP_VF_SET_ETHTOOL_LINK_MODES_BITMAP(supported_modes, cmd, supported);
+ OCTEP_VF_SET_ETHTOOL_LINK_MODES_BITMAP(advertised_modes, cmd, advertising);
+
+ if (link_info->autoneg) {
+ if (link_info->autoneg & OCTEP_VF_LINK_MODE_AUTONEG_SUPPORTED)
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ if (link_info->autoneg & OCTEP_VF_LINK_MODE_AUTONEG_ADVERTISED) {
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ }
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ }
+
+ cmd->base.port = PORT_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+
+ if (netif_carrier_ok(netdev)) {
+ cmd->base.speed = link_info->speed;
+ cmd->base.duplex = DUPLEX_FULL;
+ } else {
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+ return 0;
+}
+
+static const struct ethtool_ops octep_vf_ethtool_ops = {
+ .get_drvinfo = octep_vf_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = octep_vf_get_strings,
+ .get_sset_count = octep_vf_get_sset_count,
+ .get_ethtool_stats = octep_vf_get_ethtool_stats,
+ .get_link_ksettings = octep_vf_get_link_ksettings,
+};
+
+void octep_vf_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &octep_vf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
new file mode 100644
index 000000000000..dd49d0b8b494
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
@@ -0,0 +1,1231 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/vmalloc.h>
+#include <net/netdev_queues.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+
+struct workqueue_struct *octep_vf_wq;
+
+/* Supported Devices */
+static const struct pci_device_id octep_vf_pci_id_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF95N_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN98_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KA_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KA_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KB_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KB_VF)},
+ {0, },
+};
+MODULE_DEVICE_TABLE(pci, octep_vf_pci_id_tbl);
+
+MODULE_AUTHOR("Veerasenareddy Burru <vburru@marvell.com>");
+MODULE_DESCRIPTION(OCTEP_VF_DRV_STRING);
+MODULE_LICENSE("GPL");
+
+/**
+ * octep_vf_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate resources to hold per Tx/Rx queue interrupt info.
+ * This is the information passed to interrupt handler, from which napi poll
+ * is scheduled and includes quick access to private data of Tx/Rx queue
+ * corresponding to the interrupt being handled.
+ *
+ * Return: 0, on successful allocation of resources for all queue interrupts.
+ * -1, if failed to allocate any resource.
+ */
+static int octep_vf_alloc_ioq_vectors(struct octep_vf_device *oct)
+{
+ struct octep_vf_ioq_vector *ioq_vector;
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i]));
+ if (!oct->ioq_vector[i])
+ goto free_ioq_vector;
+
+ ioq_vector = oct->ioq_vector[i];
+ ioq_vector->iq = oct->iq[i];
+ ioq_vector->oq = oct->oq[i];
+ ioq_vector->octep_vf_dev = oct;
+ }
+
+ dev_info(&oct->pdev->dev, "Allocated %d IOQ vectors\n", oct->num_oqs);
+ return 0;
+
+free_ioq_vector:
+ while (i) {
+ i--;
+ vfree(oct->ioq_vector[i]);
+ oct->ioq_vector[i] = NULL;
+ }
+ return -1;
+}
+
+/**
+ * octep_vf_free_ioq_vectors() - Free Tx/Rx Queue interrupt vector info.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_free_ioq_vectors(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ if (oct->ioq_vector[i]) {
+ vfree(oct->ioq_vector[i]);
+ oct->ioq_vector[i] = NULL;
+ }
+ }
+ netdev_info(oct->netdev, "Freed IOQ Vectors\n");
+}
+
+/**
+ * octep_vf_enable_msix_range() - enable MSI-x interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate and enable all MSI-x interrupts (queue and non-queue interrupts)
+ * for the Octeon device.
+ *
+ * Return: 0, on successfully enabling all MSI-x interrupts.
+ * -1, if failed to enable any MSI-x interrupt.
+ */
+static int octep_vf_enable_msix_range(struct octep_vf_device *oct)
+{
+ int num_msix, msix_allocated;
+ int i;
+
+ /* Generic interrupts apart from input/output queues */
+ //num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf);
+ num_msix = oct->num_oqs;
+ oct->msix_entries = kcalloc(num_msix, sizeof(struct msix_entry), GFP_KERNEL);
+ if (!oct->msix_entries)
+ goto msix_alloc_err;
+
+ for (i = 0; i < num_msix; i++)
+ oct->msix_entries[i].entry = i;
+
+ msix_allocated = pci_enable_msix_range(oct->pdev, oct->msix_entries,
+ num_msix, num_msix);
+ if (msix_allocated != num_msix) {
+ dev_err(&oct->pdev->dev,
+ "Failed to enable %d msix irqs; got only %d\n",
+ num_msix, msix_allocated);
+ goto enable_msix_err;
+ }
+ oct->num_irqs = msix_allocated;
+ dev_info(&oct->pdev->dev, "MSI-X enabled successfully\n");
+
+ return 0;
+
+enable_msix_err:
+ if (msix_allocated > 0)
+ pci_disable_msix(oct->pdev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+msix_alloc_err:
+ return -1;
+}
+
+/**
+ * octep_vf_disable_msix() - disable MSI-x interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Disable MSI-x on the Octeon device.
+ */
+static void octep_vf_disable_msix(struct octep_vf_device *oct)
+{
+ pci_disable_msix(oct->pdev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ dev_info(&oct->pdev->dev, "Disabled MSI-X\n");
+}
+
+/**
+ * octep_vf_ioq_intr_handler() - handler for all Tx/Rx queue interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data contains pointers to Tx/Rx queue private data
+ * and correspong NAPI context.
+ *
+ * this is common handler for all non-queue (generic) interrupts.
+ */
+static irqreturn_t octep_vf_ioq_intr_handler(int irq, void *data)
+{
+ struct octep_vf_ioq_vector *ioq_vector = data;
+ struct octep_vf_device *oct = ioq_vector->octep_vf_dev;
+
+ return oct->hw_ops.ioq_intr_handler(ioq_vector);
+}
+
+/**
+ * octep_vf_request_irqs() - Register interrupt handlers.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Register handlers for all queue and non-queue interrupts.
+ *
+ * Return: 0, on successful registration of all interrupt handlers.
+ * -1, on any error.
+ */
+static int octep_vf_request_irqs(struct octep_vf_device *oct)
+{
+ struct net_device *netdev = oct->netdev;
+ struct octep_vf_ioq_vector *ioq_vector;
+ struct msix_entry *msix_entry;
+ int ret, i;
+
+ /* Request IRQs for Tx/Rx queues */
+ for (i = 0; i < oct->num_oqs; i++) {
+ ioq_vector = oct->ioq_vector[i];
+ msix_entry = &oct->msix_entries[i];
+
+ snprintf(ioq_vector->name, sizeof(ioq_vector->name),
+ "%s-q%d", netdev->name, i);
+ ret = request_irq(msix_entry->vector,
+ octep_vf_ioq_intr_handler, 0,
+ ioq_vector->name, ioq_vector);
+ if (ret) {
+ netdev_err(netdev,
+ "request_irq failed for Q-%d; err=%d",
+ i, ret);
+ goto ioq_irq_err;
+ }
+
+ cpumask_set_cpu(i % num_online_cpus(),
+ &ioq_vector->affinity_mask);
+ irq_set_affinity_hint(msix_entry->vector,
+ &ioq_vector->affinity_mask);
+ }
+
+ return 0;
+ioq_irq_err:
+ while (i) {
+ --i;
+ free_irq(oct->msix_entries[i].vector, oct);
+ }
+ return -1;
+}
+
+/**
+ * octep_vf_free_irqs() - free all registered interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Free all queue and non-queue interrupts of the Octeon device.
+ */
+static void octep_vf_free_irqs(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_irqs; i++) {
+ irq_set_affinity_hint(oct->msix_entries[i].vector, NULL);
+ free_irq(oct->msix_entries[i].vector, oct->ioq_vector[i]);
+ }
+ netdev_info(oct->netdev, "IRQs freed\n");
+}
+
+/**
+ * octep_vf_setup_irqs() - setup interrupts for the Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate data structures to hold per interrupt information, allocate/enable
+ * MSI-x interrupt and register interrupt handlers.
+ *
+ * Return: 0, on successful allocation and registration of all interrupts.
+ * -1, on any error.
+ */
+static int octep_vf_setup_irqs(struct octep_vf_device *oct)
+{
+ if (octep_vf_alloc_ioq_vectors(oct))
+ goto ioq_vector_err;
+
+ if (octep_vf_enable_msix_range(oct))
+ goto enable_msix_err;
+
+ if (octep_vf_request_irqs(oct))
+ goto request_irq_err;
+
+ return 0;
+
+request_irq_err:
+ octep_vf_disable_msix(oct);
+enable_msix_err:
+ octep_vf_free_ioq_vectors(oct);
+ioq_vector_err:
+ return -1;
+}
+
+/**
+ * octep_vf_clean_irqs() - free all interrupts and its resources.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_clean_irqs(struct octep_vf_device *oct)
+{
+ octep_vf_free_irqs(oct);
+ octep_vf_disable_msix(oct);
+ octep_vf_free_ioq_vectors(oct);
+}
+
+/**
+ * octep_vf_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @oq: Octeon Rx queue data structure.
+ */
+static void octep_vf_enable_ioq_irq(struct octep_vf_iq *iq, struct octep_vf_oq *oq)
+{
+ u32 pkts_pend = oq->pkts_pending;
+
+ netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no);
+ if (iq->pkts_processed) {
+ writel(iq->pkts_processed, iq->inst_cnt_reg);
+ iq->pkt_in_done -= iq->pkts_processed;
+ iq->pkts_processed = 0;
+ }
+ if (oq->last_pkt_count - pkts_pend) {
+ writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg);
+ oq->last_pkt_count = pkts_pend;
+ }
+
+ /* Flush the previous wrties before writing to RESEND bit */
+ smp_wmb();
+ writeq(1UL << OCTEP_VF_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg);
+ writeq(1UL << OCTEP_VF_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg);
+}
+
+/**
+ * octep_vf_napi_poll() - NAPI poll function for Tx/Rx.
+ *
+ * @napi: pointer to napi context.
+ * @budget: max number of packets to be processed in single invocation.
+ */
+static int octep_vf_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct octep_vf_ioq_vector *ioq_vector =
+ container_of(napi, struct octep_vf_ioq_vector, napi);
+ u32 tx_pending, rx_done;
+
+ tx_pending = octep_vf_iq_process_completions(ioq_vector->iq, 64);
+ rx_done = octep_vf_oq_process_rx(ioq_vector->oq, budget);
+
+ /* need more polling if tx completion processing is still pending or
+ * processed at least 'budget' number of rx packets.
+ */
+ if (tx_pending || rx_done >= budget)
+ return budget;
+
+ if (likely(napi_complete_done(napi, rx_done)))
+ octep_vf_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq);
+
+ return rx_done;
+}
+
+/**
+ * octep_vf_napi_add() - Add NAPI poll for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_napi_add(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i);
+ netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi, octep_vf_napi_poll);
+ oct->oq[i]->napi = &oct->ioq_vector[i]->napi;
+ }
+}
+
+/**
+ * octep_vf_napi_delete() - delete NAPI poll callback for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_napi_delete(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i);
+ netif_napi_del(&oct->ioq_vector[i]->napi);
+ oct->oq[i]->napi = NULL;
+ }
+}
+
+/**
+ * octep_vf_napi_enable() - enable NAPI for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_napi_enable(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i);
+ napi_enable(&oct->ioq_vector[i]->napi);
+ }
+}
+
+/**
+ * octep_vf_napi_disable() - disable NAPI for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_napi_disable(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i);
+ napi_disable(&oct->ioq_vector[i]->napi);
+ }
+}
+
+static void octep_vf_link_up(struct net_device *netdev)
+{
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+}
+
+static void octep_vf_set_rx_state(struct octep_vf_device *oct, bool up)
+{
+ int err;
+
+ err = octep_vf_mbox_set_rx_state(oct, up);
+ if (err)
+ netdev_err(oct->netdev, "Set Rx state to %d failed with err:%d\n", up, err);
+}
+
+static int octep_vf_get_link_status(struct octep_vf_device *oct)
+{
+ int err;
+
+ err = octep_vf_mbox_get_link_status(oct, &oct->link_info.oper_up);
+ if (err)
+ netdev_err(oct->netdev, "Get link status failed with err:%d\n", err);
+ return oct->link_info.oper_up;
+}
+
+static void octep_vf_set_link_status(struct octep_vf_device *oct, bool up)
+{
+ int err;
+
+ err = octep_vf_mbox_set_link_status(oct, up);
+ if (err) {
+ netdev_err(oct->netdev, "Set link status to %d failed with err:%d\n", up, err);
+ return;
+ }
+ oct->link_info.oper_up = up;
+}
+
+/**
+ * octep_vf_open() - start the octeon network device.
+ *
+ * @netdev: pointer to kernel network device.
+ *
+ * setup Tx/Rx queues, interrupts and enable hardware operation of Tx/Rx queues
+ * and interrupts..
+ *
+ * Return: 0, on successfully setting up device and bring it up.
+ * -1, on any error.
+ */
+static int octep_vf_open(struct net_device *netdev)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ int err, ret;
+
+ netdev_info(netdev, "Starting netdev ...\n");
+ netif_carrier_off(netdev);
+
+ oct->hw_ops.reset_io_queues(oct);
+
+ if (octep_vf_setup_iqs(oct))
+ goto setup_iq_err;
+ if (octep_vf_setup_oqs(oct))
+ goto setup_oq_err;
+ if (octep_vf_setup_irqs(oct))
+ goto setup_irq_err;
+
+ err = netif_set_real_num_tx_queues(netdev, oct->num_oqs);
+ if (err)
+ goto set_queues_err;
+ err = netif_set_real_num_rx_queues(netdev, oct->num_iqs);
+ if (err)
+ goto set_queues_err;
+
+ octep_vf_napi_add(oct);
+ octep_vf_napi_enable(oct);
+
+ oct->link_info.admin_up = 1;
+ octep_vf_set_rx_state(oct, true);
+
+ ret = octep_vf_get_link_status(oct);
+ if (!ret)
+ octep_vf_set_link_status(oct, true);
+
+ /* Enable the input and output queues for this Octeon device */
+ oct->hw_ops.enable_io_queues(oct);
+
+ /* Enable Octeon device interrupts */
+ oct->hw_ops.enable_interrupts(oct);
+
+ octep_vf_oq_dbell_init(oct);
+
+ ret = octep_vf_get_link_status(oct);
+ if (ret)
+ octep_vf_link_up(netdev);
+
+ return 0;
+
+set_queues_err:
+ octep_vf_napi_disable(oct);
+ octep_vf_napi_delete(oct);
+ octep_vf_clean_irqs(oct);
+setup_irq_err:
+ octep_vf_free_oqs(oct);
+setup_oq_err:
+ octep_vf_free_iqs(oct);
+setup_iq_err:
+ return -1;
+}
+
+/**
+ * octep_vf_stop() - stop the octeon network device.
+ *
+ * @netdev: pointer to kernel network device.
+ *
+ * stop the device Tx/Rx operations, bring down the link and
+ * free up all resources allocated for Tx/Rx queues and interrupts.
+ */
+static int octep_vf_stop(struct net_device *netdev)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+
+ netdev_info(netdev, "Stopping the device ...\n");
+
+ /* Stop Tx from stack */
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ octep_vf_set_link_status(oct, false);
+ octep_vf_set_rx_state(oct, false);
+
+ oct->link_info.admin_up = 0;
+ oct->link_info.oper_up = 0;
+
+ oct->hw_ops.disable_interrupts(oct);
+ octep_vf_napi_disable(oct);
+ octep_vf_napi_delete(oct);
+
+ octep_vf_clean_irqs(oct);
+ octep_vf_clean_iqs(oct);
+
+ oct->hw_ops.disable_io_queues(oct);
+ oct->hw_ops.reset_io_queues(oct);
+ octep_vf_free_oqs(oct);
+ octep_vf_free_iqs(oct);
+ netdev_info(netdev, "Device stopped !!\n");
+ return 0;
+}
+
+/**
+ * octep_vf_iq_full_check() - check if a Tx queue is full.
+ *
+ * @iq: Octeon Tx queue data structure.
+ *
+ * Return: 0, if the Tx queue is not full.
+ * 1, if the Tx queue is full.
+ */
+static int octep_vf_iq_full_check(struct octep_vf_iq *iq)
+{
+ int ret;
+
+ ret = netif_subqueue_maybe_stop(iq->netdev, iq->q_no, IQ_INSTR_SPACE(iq),
+ OCTEP_VF_WAKE_QUEUE_THRESHOLD,
+ OCTEP_VF_WAKE_QUEUE_THRESHOLD);
+ switch (ret) {
+ case 0: /* Stopped the queue, since IQ is full */
+ return 1;
+ case -1: /*
+ * Pending updates in write index from
+ * iq_process_completion in other cpus
+ * caused queues to get re-enabled after
+ * being stopped
+ */
+ iq->stats.restart_cnt++;
+ fallthrough;
+ case 1: /* Queue left enabled, since IQ is not yet full*/
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * octep_vf_start_xmit() - Enqueue packet to Octoen hardware Tx Queue.
+ *
+ * @skb: packet skbuff pointer.
+ * @netdev: kernel network device.
+ *
+ * Return: NETDEV_TX_BUSY, if Tx Queue is full.
+ * NETDEV_TX_OK, if successfully enqueued to hardware Tx queue.
+ */
+static netdev_tx_t octep_vf_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ netdev_features_t feat = netdev->features;
+ struct octep_vf_tx_sglist_desc *sglist;
+ struct octep_vf_tx_buffer *tx_buffer;
+ struct octep_vf_tx_desc_hw *hw_desc;
+ struct skb_shared_info *shinfo;
+ struct octep_vf_instr_hdr *ih;
+ struct octep_vf_iq *iq;
+ skb_frag_t *frag;
+ u16 nr_frags, si;
+ int xmit_more;
+ u16 q_no, wi;
+
+ if (skb_put_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+
+ q_no = skb_get_queue_mapping(skb);
+ if (q_no >= oct->num_iqs) {
+ netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no);
+ q_no = q_no % oct->num_iqs;
+ }
+
+ iq = oct->iq[q_no];
+
+ shinfo = skb_shinfo(skb);
+ nr_frags = shinfo->nr_frags;
+
+ wi = iq->host_write_index;
+ hw_desc = &iq->desc_ring[wi];
+ hw_desc->ih64 = 0;
+
+ tx_buffer = iq->buff_info + wi;
+ tx_buffer->skb = skb;
+
+ ih = &hw_desc->ih;
+ ih->tlen = skb->len;
+ ih->pkind = oct->fw_info.pkind;
+ ih->fsz = oct->fw_info.fsz;
+ ih->tlen = skb->len + ih->fsz;
+
+ if (!nr_frags) {
+ tx_buffer->gather = 0;
+ tx_buffer->dma = dma_map_single(iq->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, tx_buffer->dma))
+ goto dma_map_err;
+ hw_desc->dptr = tx_buffer->dma;
+ } else {
+ /* Scatter/Gather */
+ dma_addr_t dma;
+ u16 len;
+
+ sglist = tx_buffer->sglist;
+
+ ih->gsz = nr_frags + 1;
+ ih->gather = 1;
+ tx_buffer->gather = 1;
+
+ len = skb_headlen(skb);
+ dma = dma_map_single(iq->dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, dma))
+ goto dma_map_err;
+
+ memset(sglist, 0, OCTEP_VF_SGLIST_SIZE_PER_PKT);
+ sglist[0].len[3] = len;
+ sglist[0].dma_ptr[0] = dma;
+
+ si = 1; /* entry 0 is main skb, mapped above */
+ frag = &shinfo->frags[0];
+ while (nr_frags--) {
+ len = skb_frag_size(frag);
+ dma = skb_frag_dma_map(iq->dev, frag, 0,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, dma))
+ goto dma_map_sg_err;
+
+ sglist[si >> 2].len[3 - (si & 3)] = len;
+ sglist[si >> 2].dma_ptr[si & 3] = dma;
+
+ frag++;
+ si++;
+ }
+ hw_desc->dptr = tx_buffer->sglist_dma;
+ }
+ if (oct->fw_info.tx_ol_flags) {
+ if ((feat & (NETIF_F_TSO)) && (skb_is_gso(skb))) {
+ hw_desc->txm.ol_flags = OCTEP_VF_TX_OFFLOAD_CKSUM;
+ hw_desc->txm.ol_flags |= OCTEP_VF_TX_OFFLOAD_TSO;
+ hw_desc->txm.gso_size = skb_shinfo(skb)->gso_size;
+ hw_desc->txm.gso_segs = skb_shinfo(skb)->gso_segs;
+ } else if (feat & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
+ hw_desc->txm.ol_flags = OCTEP_VF_TX_OFFLOAD_CKSUM;
+ }
+ /* due to ESR txm will be swapped by hw */
+ hw_desc->txm64[0] = (__force u64)cpu_to_be64(hw_desc->txm64[0]);
+ }
+
+ xmit_more = netdev_xmit_more();
+
+ netdev_tx_sent_queue(iq->netdev_q, skb->len);
+
+ skb_tx_timestamp(skb);
+ iq->fill_cnt++;
+ wi++;
+ iq->host_write_index = wi & iq->ring_size_mask;
+
+ /* octep_iq_full_check stops the queue and returns
+ * true if so, in case the queue has become full
+ * by inserting current packet. If so, we can
+ * go ahead and ring doorbell.
+ */
+ if (!octep_vf_iq_full_check(iq) && xmit_more &&
+ iq->fill_cnt < iq->fill_threshold)
+ return NETDEV_TX_OK;
+
+ goto ring_dbell;
+
+dma_map_sg_err:
+ if (si > 0) {
+ dma_unmap_single(iq->dev, sglist[0].dma_ptr[0],
+ sglist[0].len[0], DMA_TO_DEVICE);
+ sglist[0].len[0] = 0;
+ }
+ while (si > 1) {
+ dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3],
+ sglist[si >> 2].len[si & 3], DMA_TO_DEVICE);
+ sglist[si >> 2].len[si & 3] = 0;
+ si--;
+ }
+ tx_buffer->gather = 0;
+dma_map_err:
+ dev_kfree_skb_any(skb);
+ring_dbell:
+ /* Flush the hw descriptors before writing to doorbell */
+ smp_wmb();
+ writel(iq->fill_cnt, iq->doorbell_reg);
+ iq->stats.instr_posted += iq->fill_cnt;
+ iq->fill_cnt = 0;
+ return NETDEV_TX_OK;
+}
+
+int octep_vf_get_if_stats(struct octep_vf_device *oct)
+{
+ struct octep_vf_iface_rxtx_stats vf_stats;
+ int ret, size;
+
+ memset(&vf_stats, 0, sizeof(struct octep_vf_iface_rxtx_stats));
+ ret = octep_vf_mbox_bulk_read(oct, OCTEP_PFVF_MBOX_CMD_GET_STATS,
+ (u8 *)&vf_stats, &size);
+
+ if (ret)
+ return ret;
+
+ memcpy(&oct->iface_rx_stats, &vf_stats.iface_rx_stats,
+ sizeof(struct octep_vf_iface_rx_stats));
+ memcpy(&oct->iface_tx_stats, &vf_stats.iface_tx_stats,
+ sizeof(struct octep_vf_iface_tx_stats));
+
+ return 0;
+}
+
+int octep_vf_get_link_info(struct octep_vf_device *oct)
+{
+ int ret, size;
+
+ ret = octep_vf_mbox_bulk_read(oct, OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO,
+ (u8 *)&oct->link_info, &size);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Get VF link info failed via VF Mbox\n");
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * octep_vf_get_stats64() - Get Octeon network device statistics.
+ *
+ * @netdev: kernel network device.
+ * @stats: pointer to stats structure to be filled in.
+ */
+static void octep_vf_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
+ int q;
+
+ tx_packets = 0;
+ tx_bytes = 0;
+ rx_packets = 0;
+ rx_bytes = 0;
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_vf_iq *iq = oct->iq[q];
+ struct octep_vf_oq *oq = oct->oq[q];
+
+ tx_packets += iq->stats.instr_completed;
+ tx_bytes += iq->stats.bytes_sent;
+ rx_packets += oq->stats.packets;
+ rx_bytes += oq->stats.bytes;
+ }
+ stats->tx_packets = tx_packets;
+ stats->tx_bytes = tx_bytes;
+ stats->rx_packets = rx_packets;
+ stats->rx_bytes = rx_bytes;
+ if (!octep_vf_get_if_stats(oct)) {
+ stats->multicast = oct->iface_rx_stats.mcast_pkts;
+ stats->rx_errors = oct->iface_rx_stats.err_pkts;
+ stats->rx_dropped = oct->iface_rx_stats.dropped_pkts_fifo_full +
+ oct->iface_rx_stats.err_pkts;
+ stats->rx_missed_errors = oct->iface_rx_stats.dropped_pkts_fifo_full;
+ stats->tx_dropped = oct->iface_tx_stats.dropped;
+ }
+}
+
+/**
+ * octep_vf_tx_timeout_task - work queue task to Handle Tx queue timeout.
+ *
+ * @work: pointer to Tx queue timeout work_struct
+ *
+ * Stop and start the device so that it frees up all queue resources
+ * and restarts the queues, that potentially clears a Tx queue timeout
+ * condition.
+ **/
+static void octep_vf_tx_timeout_task(struct work_struct *work)
+{
+ struct octep_vf_device *oct = container_of(work, struct octep_vf_device,
+ tx_timeout_task);
+ struct net_device *netdev = oct->netdev;
+
+ rtnl_lock();
+ if (netif_running(netdev)) {
+ octep_vf_stop(netdev);
+ octep_vf_open(netdev);
+ }
+ rtnl_unlock();
+ netdev_put(netdev, NULL);
+}
+
+/**
+ * octep_vf_tx_timeout() - Handle Tx Queue timeout.
+ *
+ * @netdev: pointer to kernel network device.
+ * @txqueue: Timed out Tx queue number.
+ *
+ * Schedule a work to handle Tx queue timeout.
+ */
+static void octep_vf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+
+ netdev_hold(netdev, NULL, GFP_ATOMIC);
+ schedule_work(&oct->tx_timeout_task);
+}
+
+static int octep_vf_set_mac(struct net_device *netdev, void *p)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ struct sockaddr *addr = (struct sockaddr *)p;
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ err = octep_vf_mbox_set_mac_addr(oct, addr->sa_data);
+ if (err)
+ return err;
+
+ memcpy(oct->mac_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(netdev, addr->sa_data);
+
+ return 0;
+}
+
+static int octep_vf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ struct octep_vf_iface_link_info *link_info;
+ int err;
+
+ link_info = &oct->link_info;
+ if (link_info->mtu == new_mtu)
+ return 0;
+
+ err = octep_vf_mbox_set_mtu(oct, new_mtu);
+ if (!err) {
+ oct->link_info.mtu = new_mtu;
+ netdev->mtu = new_mtu;
+ }
+ return err;
+}
+
+static int octep_vf_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ u16 rx_offloads = 0, tx_offloads = 0;
+ int err;
+
+ /* We only support features received from firmware */
+ if ((features & netdev->hw_features) != features)
+ return -EINVAL;
+
+ if (features & NETIF_F_TSO)
+ tx_offloads |= OCTEP_VF_TX_OFFLOAD_TSO;
+
+ if (features & NETIF_F_TSO6)
+ tx_offloads |= OCTEP_VF_TX_OFFLOAD_TSO;
+
+ if (features & NETIF_F_IP_CSUM)
+ tx_offloads |= OCTEP_VF_TX_OFFLOAD_CKSUM;
+
+ if (features & NETIF_F_IPV6_CSUM)
+ tx_offloads |= OCTEP_VF_TX_OFFLOAD_CKSUM;
+
+ if (features & NETIF_F_RXCSUM)
+ rx_offloads |= OCTEP_VF_RX_OFFLOAD_CKSUM;
+
+ err = octep_vf_mbox_set_offloads(oct, tx_offloads, rx_offloads);
+ if (!err)
+ netdev->features = features;
+
+ return err;
+}
+
+static const struct net_device_ops octep_vf_netdev_ops = {
+ .ndo_open = octep_vf_open,
+ .ndo_stop = octep_vf_stop,
+ .ndo_start_xmit = octep_vf_start_xmit,
+ .ndo_get_stats64 = octep_vf_get_stats64,
+ .ndo_tx_timeout = octep_vf_tx_timeout,
+ .ndo_set_mac_address = octep_vf_set_mac,
+ .ndo_change_mtu = octep_vf_change_mtu,
+ .ndo_set_features = octep_vf_set_features,
+};
+
+static const char *octep_vf_devid_to_str(struct octep_vf_device *oct)
+{
+ switch (oct->chip_id) {
+ case OCTEP_PCI_DEVICE_ID_CN93_VF:
+ return "CN93XX";
+ case OCTEP_PCI_DEVICE_ID_CNF95N_VF:
+ return "CNF95N";
+ case OCTEP_PCI_DEVICE_ID_CN10KA_VF:
+ return "CN10KA";
+ case OCTEP_PCI_DEVICE_ID_CNF10KA_VF:
+ return "CNF10KA";
+ case OCTEP_PCI_DEVICE_ID_CNF10KB_VF:
+ return "CNF10KB";
+ case OCTEP_PCI_DEVICE_ID_CN10KB_VF:
+ return "CN10KB";
+ default:
+ return "Unsupported";
+ }
+}
+
+/**
+ * octep_vf_device_setup() - Setup Octeon Device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Setup Octeon device hardware operations, configuration, etc ...
+ */
+int octep_vf_device_setup(struct octep_vf_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+
+ /* allocate memory for oct->conf */
+ oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL);
+ if (!oct->conf)
+ return -ENOMEM;
+
+ /* Map BAR region 0 */
+ oct->mmio.hw_addr = ioremap(pci_resource_start(oct->pdev, 0),
+ pci_resource_len(oct->pdev, 0));
+ if (!oct->mmio.hw_addr) {
+ dev_err(&pdev->dev,
+ "Failed to remap BAR0; start=0x%llx len=0x%llx\n",
+ pci_resource_start(oct->pdev, 0),
+ pci_resource_len(oct->pdev, 0));
+ goto ioremap_err;
+ }
+ oct->mmio.mapped = 1;
+
+ oct->chip_id = pdev->device;
+ oct->rev_id = pdev->revision;
+ dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device);
+
+ switch (oct->chip_id) {
+ case OCTEP_PCI_DEVICE_ID_CN93_VF:
+ case OCTEP_PCI_DEVICE_ID_CNF95N_VF:
+ case OCTEP_PCI_DEVICE_ID_CN98_VF:
+ dev_info(&pdev->dev, "Setting up OCTEON %s VF PASS%d.%d\n",
+ octep_vf_devid_to_str(oct), OCTEP_VF_MAJOR_REV(oct),
+ OCTEP_VF_MINOR_REV(oct));
+ octep_vf_device_setup_cn93(oct);
+ break;
+ case OCTEP_PCI_DEVICE_ID_CNF10KA_VF:
+ case OCTEP_PCI_DEVICE_ID_CN10KA_VF:
+ case OCTEP_PCI_DEVICE_ID_CNF10KB_VF:
+ case OCTEP_PCI_DEVICE_ID_CN10KB_VF:
+ dev_info(&pdev->dev, "Setting up OCTEON %s VF PASS%d.%d\n",
+ octep_vf_devid_to_str(oct), OCTEP_VF_MAJOR_REV(oct),
+ OCTEP_VF_MINOR_REV(oct));
+ octep_vf_device_setup_cnxk(oct);
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported device\n");
+ goto unsupported_dev;
+ }
+
+ return 0;
+
+unsupported_dev:
+ iounmap(oct->mmio.hw_addr);
+ioremap_err:
+ kfree(oct->conf);
+ return -EOPNOTSUPP;
+}
+
+/**
+ * octep_vf_device_cleanup() - Cleanup Octeon Device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Cleanup Octeon device allocated resources.
+ */
+static void octep_vf_device_cleanup(struct octep_vf_device *oct)
+{
+ dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n");
+
+ if (oct->mmio.mapped)
+ iounmap(oct->mmio.hw_addr);
+
+ kfree(oct->conf);
+ oct->conf = NULL;
+}
+
+static int octep_vf_get_mac_addr(struct octep_vf_device *oct, u8 *addr)
+{
+ return octep_vf_mbox_get_mac_addr(oct, addr);
+}
+
+/**
+ * octep_vf_probe() - Octeon PCI device probe handler.
+ *
+ * @pdev: PCI device structure.
+ * @ent: entry in Octeon PCI device ID table.
+ *
+ * Initializes and enables the Octeon PCI device for network operations.
+ * Initializes Octeon private data structure and registers a network device.
+ */
+static int octep_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct octep_vf_device *octep_vf_dev;
+ struct net_device *netdev;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set DMA mask !!\n");
+ goto disable_pci_device;
+ }
+
+ err = pci_request_mem_regions(pdev, OCTEP_VF_DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to map PCI memory regions\n");
+ goto disable_pci_device;
+ }
+
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev_mq(sizeof(struct octep_vf_device),
+ OCTEP_VF_MAX_QUEUES);
+ if (!netdev) {
+ dev_err(&pdev->dev, "Failed to allocate netdev\n");
+ err = -ENOMEM;
+ goto mem_regions_release;
+ }
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ octep_vf_dev = netdev_priv(netdev);
+ octep_vf_dev->netdev = netdev;
+ octep_vf_dev->pdev = pdev;
+ octep_vf_dev->dev = &pdev->dev;
+ pci_set_drvdata(pdev, octep_vf_dev);
+
+ err = octep_vf_device_setup(octep_vf_dev);
+ if (err) {
+ dev_err(&pdev->dev, "Device setup failed\n");
+ goto netdevice_free;
+ }
+ INIT_WORK(&octep_vf_dev->tx_timeout_task, octep_vf_tx_timeout_task);
+
+ netdev->netdev_ops = &octep_vf_netdev_ops;
+ octep_vf_set_ethtool_ops(netdev);
+ netif_carrier_off(netdev);
+
+ if (octep_vf_setup_mbox(octep_vf_dev)) {
+ dev_err(&pdev->dev, "VF Mailbox setup failed\n");
+ err = -ENOMEM;
+ goto device_cleanup;
+ }
+
+ if (octep_vf_mbox_version_check(octep_vf_dev)) {
+ dev_err(&pdev->dev, "PF VF Mailbox version mismatch\n");
+ err = -EINVAL;
+ goto delete_mbox;
+ }
+
+ if (octep_vf_mbox_get_fw_info(octep_vf_dev)) {
+ dev_err(&pdev->dev, "unable to get fw info\n");
+ err = -EINVAL;
+ goto delete_mbox;
+ }
+
+ netdev->hw_features = NETIF_F_SG;
+ if (OCTEP_VF_TX_IP_CSUM(octep_vf_dev->fw_info.tx_ol_flags))
+ netdev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+
+ if (OCTEP_VF_RX_IP_CSUM(octep_vf_dev->fw_info.rx_ol_flags))
+ netdev->hw_features |= NETIF_F_RXCSUM;
+
+ netdev->min_mtu = OCTEP_VF_MIN_MTU;
+ netdev->max_mtu = OCTEP_VF_MAX_MTU;
+ netdev->mtu = OCTEP_VF_DEFAULT_MTU;
+
+ if (OCTEP_VF_TX_TSO(octep_vf_dev->fw_info.tx_ol_flags)) {
+ netdev->hw_features |= NETIF_F_TSO;
+ netif_set_tso_max_size(netdev, netdev->max_mtu);
+ }
+
+ netdev->features |= netdev->hw_features;
+ octep_vf_get_mac_addr(octep_vf_dev, octep_vf_dev->mac_addr);
+ eth_hw_addr_set(netdev, octep_vf_dev->mac_addr);
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register netdev\n");
+ goto delete_mbox;
+ }
+ dev_info(&pdev->dev, "Device probe successful\n");
+ return 0;
+
+delete_mbox:
+ octep_vf_delete_mbox(octep_vf_dev);
+device_cleanup:
+ octep_vf_device_cleanup(octep_vf_dev);
+netdevice_free:
+ free_netdev(netdev);
+mem_regions_release:
+ pci_release_mem_regions(pdev);
+disable_pci_device:
+ pci_disable_device(pdev);
+ dev_err(&pdev->dev, "Device probe failed\n");
+ return err;
+}
+
+/**
+ * octep_vf_remove() - Remove Octeon PCI device from driver control.
+ *
+ * @pdev: PCI device structure of the Octeon device.
+ *
+ * Cleanup all resources allocated for the Octeon device.
+ * Unregister from network device and disable the PCI device.
+ */
+static void octep_vf_remove(struct pci_dev *pdev)
+{
+ struct octep_vf_device *oct = pci_get_drvdata(pdev);
+ struct net_device *netdev;
+
+ if (!oct)
+ return;
+
+ octep_vf_mbox_dev_remove(oct);
+ cancel_work_sync(&oct->tx_timeout_task);
+ netdev = oct->netdev;
+ if (netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(netdev);
+ octep_vf_delete_mbox(oct);
+ octep_vf_device_cleanup(oct);
+ pci_release_mem_regions(pdev);
+ free_netdev(netdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver octep_vf_driver = {
+ .name = OCTEP_VF_DRV_NAME,
+ .id_table = octep_vf_pci_id_tbl,
+ .probe = octep_vf_probe,
+ .remove = octep_vf_remove,
+};
+
+/**
+ * octep_vf_init_module() - Module initialization.
+ *
+ * create common resource for the driver and register PCI driver.
+ */
+static int __init octep_vf_init_module(void)
+{
+ int ret;
+
+ pr_info("%s: Loading %s ...\n", OCTEP_VF_DRV_NAME, OCTEP_VF_DRV_STRING);
+
+ ret = pci_register_driver(&octep_vf_driver);
+ if (ret < 0) {
+ pr_err("%s: Failed to register PCI driver; err=%d\n",
+ OCTEP_VF_DRV_NAME, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * octep_vf_exit_module() - Module exit routine.
+ *
+ * unregister the driver with PCI subsystem and cleanup common resources.
+ */
+static void __exit octep_vf_exit_module(void)
+{
+ pr_info("%s: Unloading ...\n", OCTEP_VF_DRV_NAME);
+
+ pci_unregister_driver(&octep_vf_driver);
+
+ pr_info("%s: Unloading complete\n", OCTEP_VF_DRV_NAME);
+}
+
+module_init(octep_vf_init_module);
+module_exit(octep_vf_exit_module);
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
new file mode 100644
index 000000000000..5769f62545cd
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
@@ -0,0 +1,334 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_VF_MAIN_H_
+#define _OCTEP_VF_MAIN_H_
+
+#include "octep_vf_tx.h"
+#include "octep_vf_rx.h"
+#include "octep_vf_mbox.h"
+
+#define OCTEP_VF_DRV_NAME "octeon_ep_vf"
+#define OCTEP_VF_DRV_STRING "Marvell Octeon EndPoint NIC VF Driver"
+
+#define OCTEP_PCI_DEVICE_ID_CN93_VF 0xB203 //93xx VF
+#define OCTEP_PCI_DEVICE_ID_CNF95N_VF 0xB403 //95N VF
+#define OCTEP_PCI_DEVICE_ID_CN98_VF 0xB103
+#define OCTEP_PCI_DEVICE_ID_CN10KA_VF 0xB903
+#define OCTEP_PCI_DEVICE_ID_CNF10KA_VF 0xBA03
+#define OCTEP_PCI_DEVICE_ID_CNF10KB_VF 0xBC03
+#define OCTEP_PCI_DEVICE_ID_CN10KB_VF 0xBD03
+
+#define OCTEP_VF_MAX_QUEUES 63
+#define OCTEP_VF_MAX_IQ OCTEP_VF_MAX_QUEUES
+#define OCTEP_VF_MAX_OQ OCTEP_VF_MAX_QUEUES
+
+#define OCTEP_VF_MAX_MSIX_VECTORS OCTEP_VF_MAX_OQ
+
+#define OCTEP_VF_IQ_INTR_RESEND_BIT 59
+#define OCTEP_VF_OQ_INTR_RESEND_BIT 59
+
+#define IQ_INSTR_PENDING(iq) ({ typeof(iq) iq__ = (iq); \
+ ((iq__)->host_write_index - (iq__)->flush_index) & \
+ (iq__)->ring_size_mask; \
+ })
+#define IQ_INSTR_SPACE(iq) ({ typeof(iq) iq_ = (iq); \
+ (iq_)->max_count - IQ_INSTR_PENDING(iq_); \
+ })
+
+/* PCI address space mapping information.
+ * Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of
+ * Octeon gets mapped to different physical address spaces in
+ * the kernel.
+ */
+struct octep_vf_mmio {
+ /* The physical address to which the PCI address space is mapped. */
+ u8 __iomem *hw_addr;
+
+ /* Flag indicating the mapping was successful. */
+ int mapped;
+};
+
+struct octep_vf_hw_ops {
+ void (*setup_iq_regs)(struct octep_vf_device *oct, int q);
+ void (*setup_oq_regs)(struct octep_vf_device *oct, int q);
+ void (*setup_mbox_regs)(struct octep_vf_device *oct, int mbox);
+
+ irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector);
+ irqreturn_t (*ioq_intr_handler)(void *ioq_vector);
+ void (*reinit_regs)(struct octep_vf_device *oct);
+ u32 (*update_iq_read_idx)(struct octep_vf_iq *iq);
+
+ void (*enable_interrupts)(struct octep_vf_device *oct);
+ void (*disable_interrupts)(struct octep_vf_device *oct);
+
+ void (*enable_io_queues)(struct octep_vf_device *oct);
+ void (*disable_io_queues)(struct octep_vf_device *oct);
+ void (*enable_iq)(struct octep_vf_device *oct, int q);
+ void (*disable_iq)(struct octep_vf_device *oct, int q);
+ void (*enable_oq)(struct octep_vf_device *oct, int q);
+ void (*disable_oq)(struct octep_vf_device *oct, int q);
+ void (*reset_io_queues)(struct octep_vf_device *oct);
+ void (*dump_registers)(struct octep_vf_device *oct);
+};
+
+/* Octeon mailbox data */
+struct octep_vf_mbox_data {
+ /* Holds the offset of received data via mailbox. */
+ u32 data_index;
+
+ /* Holds the received data via mailbox. */
+ u8 recv_data[OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE];
+};
+
+/* wrappers around work structs */
+struct octep_vf_mbox_wk {
+ struct work_struct work;
+ void *ctxptr;
+};
+
+/* Octeon device mailbox */
+struct octep_vf_mbox {
+ /* A mutex to protect access to this q_mbox. */
+ struct mutex lock;
+
+ u32 state;
+
+ /* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */
+ u8 __iomem *mbox_int_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(0) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(1) for VF.
+ */
+ u8 __iomem *mbox_write_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(1) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(0) for VF.
+ */
+ u8 __iomem *mbox_read_reg;
+
+ /* Octeon mailbox data */
+ struct octep_vf_mbox_data mbox_data;
+
+ /* Octeon mailbox work handler to process Mbox messages */
+ struct octep_vf_mbox_wk wk;
+};
+
+/* Tx/Rx queue vector per interrupt. */
+struct octep_vf_ioq_vector {
+ char name[OCTEP_VF_MSIX_NAME_SIZE];
+ struct napi_struct napi;
+ struct octep_vf_device *octep_vf_dev;
+ struct octep_vf_iq *iq;
+ struct octep_vf_oq *oq;
+ cpumask_t affinity_mask;
+};
+
+/* Octeon hardware/firmware offload capability flags. */
+#define OCTEP_VF_CAP_TX_CHECKSUM BIT(0)
+#define OCTEP_VF_CAP_RX_CHECKSUM BIT(1)
+#define OCTEP_VF_CAP_TSO BIT(2)
+
+/* Link modes */
+enum octep_vf_link_mode_bit_indices {
+ OCTEP_VF_LINK_MODE_10GBASE_T = 0,
+ OCTEP_VF_LINK_MODE_10GBASE_R,
+ OCTEP_VF_LINK_MODE_10GBASE_CR,
+ OCTEP_VF_LINK_MODE_10GBASE_KR,
+ OCTEP_VF_LINK_MODE_10GBASE_LR,
+ OCTEP_VF_LINK_MODE_10GBASE_SR,
+ OCTEP_VF_LINK_MODE_25GBASE_CR,
+ OCTEP_VF_LINK_MODE_25GBASE_KR,
+ OCTEP_VF_LINK_MODE_25GBASE_SR,
+ OCTEP_VF_LINK_MODE_40GBASE_CR4,
+ OCTEP_VF_LINK_MODE_40GBASE_KR4,
+ OCTEP_VF_LINK_MODE_40GBASE_LR4,
+ OCTEP_VF_LINK_MODE_40GBASE_SR4,
+ OCTEP_VF_LINK_MODE_50GBASE_CR2,
+ OCTEP_VF_LINK_MODE_50GBASE_KR2,
+ OCTEP_VF_LINK_MODE_50GBASE_SR2,
+ OCTEP_VF_LINK_MODE_50GBASE_CR,
+ OCTEP_VF_LINK_MODE_50GBASE_KR,
+ OCTEP_VF_LINK_MODE_50GBASE_LR,
+ OCTEP_VF_LINK_MODE_50GBASE_SR,
+ OCTEP_VF_LINK_MODE_100GBASE_CR4,
+ OCTEP_VF_LINK_MODE_100GBASE_KR4,
+ OCTEP_VF_LINK_MODE_100GBASE_LR4,
+ OCTEP_VF_LINK_MODE_100GBASE_SR4,
+ OCTEP_VF_LINK_MODE_NBITS
+};
+
+/* Hardware interface link state information. */
+struct octep_vf_iface_link_info {
+ /* Bitmap of Supported link speeds/modes. */
+ u64 supported_modes;
+
+ /* Bitmap of Advertised link speeds/modes. */
+ u64 advertised_modes;
+
+ /* Negotiated link speed in Mbps. */
+ u32 speed;
+
+ /* MTU */
+ u16 mtu;
+
+ /* Autonegotiation state. */
+#define OCTEP_VF_LINK_MODE_AUTONEG_SUPPORTED BIT(0)
+#define OCTEP_VF_LINK_MODE_AUTONEG_ADVERTISED BIT(1)
+ u8 autoneg;
+
+ /* Pause frames setting. */
+#define OCTEP_VF_LINK_MODE_PAUSE_SUPPORTED BIT(0)
+#define OCTEP_VF_LINK_MODE_PAUSE_ADVERTISED BIT(1)
+ u8 pause;
+
+ /* Admin state of the link (ifconfig <iface> up/down */
+ u8 admin_up;
+
+ /* Operational state of the link: physical link is up down */
+ u8 oper_up;
+};
+
+/* Hardware interface stats information. */
+struct octep_vf_iface_rxtx_stats {
+ /* Hardware Interface Rx statistics */
+ struct octep_vf_iface_rx_stats iface_rx_stats;
+
+ /* Hardware Interface Tx statistics */
+ struct octep_vf_iface_tx_stats iface_tx_stats;
+};
+
+struct octep_vf_fw_info {
+ /* pkind value to be used in every Tx hardware descriptor */
+ u8 pkind;
+ /* front size data */
+ u8 fsz;
+ /* supported rx offloads OCTEP_VF_RX_OFFLOAD_* */
+ u16 rx_ol_flags;
+ /* supported tx offloads OCTEP_VF_TX_OFFLOAD_* */
+ u16 tx_ol_flags;
+};
+
+/* The Octeon device specific private data structure.
+ * Each Octeon device has this structure to represent all its components.
+ */
+struct octep_vf_device {
+ struct octep_vf_config *conf;
+
+ /* Octeon Chip type. */
+ u16 chip_id;
+ u16 rev_id;
+
+ /* Device capabilities enabled */
+ u64 caps_enabled;
+ /* Device capabilities supported */
+ u64 caps_supported;
+
+ /* Pointer to basic Linux device */
+ struct device *dev;
+ /* Linux PCI device pointer */
+ struct pci_dev *pdev;
+ /* Netdev corresponding to the Octeon device */
+ struct net_device *netdev;
+
+ /* memory mapped io range */
+ struct octep_vf_mmio mmio;
+
+ /* MAC address */
+ u8 mac_addr[ETH_ALEN];
+
+ /* Tx queues (IQ: Instruction Queue) */
+ u16 num_iqs;
+ /* Pointers to Octeon Tx queues */
+ struct octep_vf_iq *iq[OCTEP_VF_MAX_IQ];
+
+ /* Rx queues (OQ: Output Queue) */
+ u16 num_oqs;
+ /* Pointers to Octeon Rx queues */
+ struct octep_vf_oq *oq[OCTEP_VF_MAX_OQ];
+
+ /* Hardware port number of the PCIe interface */
+ u16 pcie_port;
+
+ /* Hardware operations */
+ struct octep_vf_hw_ops hw_ops;
+
+ /* IRQ info */
+ u16 num_irqs;
+ u16 num_non_ioq_irqs;
+ char *non_ioq_irq_names;
+ struct msix_entry *msix_entries;
+ /* IOq information of it's corresponding MSI-X interrupt. */
+ struct octep_vf_ioq_vector *ioq_vector[OCTEP_VF_MAX_QUEUES];
+
+ /* Hardware Interface Tx statistics */
+ struct octep_vf_iface_tx_stats iface_tx_stats;
+ /* Hardware Interface Rx statistics */
+ struct octep_vf_iface_rx_stats iface_rx_stats;
+
+ /* Hardware Interface Link info like supported modes, aneg support */
+ struct octep_vf_iface_link_info link_info;
+
+ /* Mailbox to talk to VFs */
+ struct octep_vf_mbox *mbox;
+
+ /* Work entry to handle Tx timeout */
+ struct work_struct tx_timeout_task;
+
+ /* offset for iface stats */
+ u32 ctrl_mbox_ifstats_offset;
+
+ /* Negotiated Mbox version */
+ u32 mbox_neg_ver;
+
+ /* firmware info */
+ struct octep_vf_fw_info fw_info;
+};
+
+static inline u16 OCTEP_VF_MAJOR_REV(struct octep_vf_device *oct)
+{
+ u16 rev = (oct->rev_id & 0xC) >> 2;
+
+ return (rev == 0) ? 1 : rev;
+}
+
+static inline u16 OCTEP_VF_MINOR_REV(struct octep_vf_device *oct)
+{
+ return (oct->rev_id & 0x3);
+}
+
+/* Octeon CSR read/write access APIs */
+#define octep_vf_write_csr(octep_vf_dev, reg_off, value) \
+ writel(value, (octep_vf_dev)->mmio.hw_addr + (reg_off))
+
+#define octep_vf_write_csr64(octep_vf_dev, reg_off, val64) \
+ writeq(val64, (octep_vf_dev)->mmio.hw_addr + (reg_off))
+
+#define octep_vf_read_csr(octep_vf_dev, reg_off) \
+ readl((octep_vf_dev)->mmio.hw_addr + (reg_off))
+
+#define octep_vf_read_csr64(octep_vf_dev, reg_off) \
+ readq((octep_vf_dev)->mmio.hw_addr + (reg_off))
+
+extern struct workqueue_struct *octep_vf_wq;
+
+int octep_vf_device_setup(struct octep_vf_device *oct);
+int octep_vf_setup_iqs(struct octep_vf_device *oct);
+void octep_vf_free_iqs(struct octep_vf_device *oct);
+void octep_vf_clean_iqs(struct octep_vf_device *oct);
+int octep_vf_setup_oqs(struct octep_vf_device *oct);
+void octep_vf_free_oqs(struct octep_vf_device *oct);
+void octep_vf_oq_dbell_init(struct octep_vf_device *oct);
+void octep_vf_device_setup_cn93(struct octep_vf_device *oct);
+void octep_vf_device_setup_cnxk(struct octep_vf_device *oct);
+int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget);
+int octep_vf_oq_process_rx(struct octep_vf_oq *oq, int budget);
+void octep_vf_set_ethtool_ops(struct net_device *netdev);
+int octep_vf_get_link_info(struct octep_vf_device *oct);
+int octep_vf_get_if_stats(struct octep_vf_device *oct);
+void octep_vf_mbox_work(struct work_struct *work);
+#endif /* _OCTEP_VF_MAIN_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c
new file mode 100644
index 000000000000..2eab21e43048
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+
+/* When a new command is implemented, the below table should be updated
+ * with new command and it's version info.
+ */
+static u32 pfvf_cmd_versions[OCTEP_PFVF_MBOX_CMD_MAX] = {
+ [0 ... OCTEP_PFVF_MBOX_CMD_DEV_REMOVE] = OCTEP_PFVF_MBOX_VERSION_V1,
+ [OCTEP_PFVF_MBOX_CMD_GET_FW_INFO ... OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS] =
+ OCTEP_PFVF_MBOX_VERSION_V2
+};
+
+int octep_vf_setup_mbox(struct octep_vf_device *oct)
+{
+ int ring = 0;
+
+ oct->mbox = vzalloc(sizeof(*oct->mbox));
+ if (!oct->mbox)
+ return -1;
+
+ mutex_init(&oct->mbox->lock);
+
+ oct->hw_ops.setup_mbox_regs(oct, ring);
+ INIT_WORK(&oct->mbox->wk.work, octep_vf_mbox_work);
+ oct->mbox->wk.ctxptr = oct;
+ oct->mbox_neg_ver = OCTEP_PFVF_MBOX_VERSION_CURRENT;
+ dev_info(&oct->pdev->dev, "setup vf mbox successfully\n");
+ return 0;
+}
+
+void octep_vf_delete_mbox(struct octep_vf_device *oct)
+{
+ if (oct->mbox) {
+ if (work_pending(&oct->mbox->wk.work))
+ cancel_work_sync(&oct->mbox->wk.work);
+
+ mutex_destroy(&oct->mbox->lock);
+ vfree(oct->mbox);
+ oct->mbox = NULL;
+ dev_info(&oct->pdev->dev, "Deleted vf mbox successfully\n");
+ }
+}
+
+int octep_vf_mbox_version_check(struct octep_vf_device *oct)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_version.opcode = OCTEP_PFVF_MBOX_CMD_VERSION;
+ cmd.s_version.version = OCTEP_PFVF_MBOX_VERSION_CURRENT;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret == OCTEP_PFVF_MBOX_CMD_STATUS_NACK) {
+ dev_err(&oct->pdev->dev,
+ "VF Mbox version is incompatible with PF\n");
+ return -EINVAL;
+ }
+ oct->mbox_neg_ver = (u32)rsp.s_version.version;
+ dev_dbg(&oct->pdev->dev,
+ "VF Mbox version:%u Negotiated VF version with PF:%u\n",
+ (u32)cmd.s_version.version,
+ (u32)rsp.s_version.version);
+ return 0;
+}
+
+void octep_vf_mbox_work(struct work_struct *work)
+{
+ struct octep_vf_mbox_wk *wk = container_of(work, struct octep_vf_mbox_wk, work);
+ struct octep_vf_iface_link_info *link_info;
+ struct octep_vf_device *oct = NULL;
+ struct octep_vf_mbox *mbox = NULL;
+ union octep_pfvf_mbox_word *notif;
+ u64 pf_vf_data;
+
+ oct = (struct octep_vf_device *)wk->ctxptr;
+ link_info = &oct->link_info;
+ mbox = oct->mbox;
+ pf_vf_data = readq(mbox->mbox_read_reg);
+
+ notif = (union octep_pfvf_mbox_word *)&pf_vf_data;
+
+ switch (notif->s.opcode) {
+ case OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS:
+ if (notif->s_link_status.status) {
+ link_info->oper_up = OCTEP_PFVF_LINK_STATUS_UP;
+ netif_carrier_on(oct->netdev);
+ dev_info(&oct->pdev->dev, "netif_carrier_on\n");
+ } else {
+ link_info->oper_up = OCTEP_PFVF_LINK_STATUS_DOWN;
+ netif_carrier_off(oct->netdev);
+ dev_info(&oct->pdev->dev, "netif_carrier_off\n");
+ }
+ break;
+ default:
+ dev_err(&oct->pdev->dev,
+ "Received unsupported notif %d\n", notif->s.opcode);
+ break;
+ }
+}
+
+static int __octep_vf_mbox_send_cmd(struct octep_vf_device *oct,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ struct octep_vf_mbox *mbox = oct->mbox;
+ u64 reg_val = 0ull;
+ int count;
+
+ if (!mbox)
+ return OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP;
+
+ cmd.s.type = OCTEP_PFVF_MBOX_TYPE_CMD;
+ writeq(cmd.u64, mbox->mbox_write_reg);
+
+ /* No response for notification messages */
+ if (!rsp)
+ return 0;
+
+ for (count = 0; count < OCTEP_PFVF_MBOX_TIMEOUT_WAIT_COUNT; count++) {
+ usleep_range(1000, 1500);
+ reg_val = readq(mbox->mbox_write_reg);
+ if (reg_val != cmd.u64) {
+ rsp->u64 = reg_val;
+ break;
+ }
+ }
+ if (count == OCTEP_PFVF_MBOX_TIMEOUT_WAIT_COUNT) {
+ dev_err(&oct->pdev->dev, "mbox send command timed out\n");
+ return OCTEP_PFVF_MBOX_CMD_STATUS_TIMEDOUT;
+ }
+ if (rsp->s.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "mbox_send: Received NACK\n");
+ return OCTEP_PFVF_MBOX_CMD_STATUS_NACK;
+ }
+ rsp->u64 = reg_val;
+ return 0;
+}
+
+int octep_vf_mbox_send_cmd(struct octep_vf_device *oct, union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ struct octep_vf_mbox *mbox = oct->mbox;
+ int ret;
+
+ if (!mbox)
+ return OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP;
+ mutex_lock(&mbox->lock);
+ if (pfvf_cmd_versions[cmd.s.opcode] > oct->mbox_neg_ver) {
+ dev_dbg(&oct->pdev->dev, "CMD:%d not supported in Version:%d\n",
+ cmd.s.opcode, oct->mbox_neg_ver);
+ mutex_unlock(&mbox->lock);
+ return -EOPNOTSUPP;
+ }
+ ret = __octep_vf_mbox_send_cmd(oct, cmd, rsp);
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+int octep_vf_mbox_bulk_read(struct octep_vf_device *oct, enum octep_pfvf_mbox_opcode opcode,
+ u8 *data, int *size)
+{
+ struct octep_vf_mbox *mbox = oct->mbox;
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int data_len = 0, tmp_len = 0;
+ int read_cnt, i = 0, ret;
+
+ if (!mbox)
+ return OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP;
+
+ mutex_lock(&mbox->lock);
+ cmd.u64 = 0;
+ cmd.s_data.opcode = opcode;
+ cmd.s_data.frag = 0;
+ /* Send cmd to read data from PF */
+ ret = __octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "send mbox cmd fail for data request\n");
+ mutex_unlock(&mbox->lock);
+ return ret;
+ }
+ /* PF sends the data length of requested CMD
+ * in ACK
+ */
+ data_len = *((int32_t *)rsp.s_data.data);
+ tmp_len = data_len;
+ cmd.u64 = 0;
+ rsp.u64 = 0;
+ cmd.s_data.opcode = opcode;
+ cmd.s_data.frag = 1;
+ while (data_len) {
+ ret = __octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "send mbox cmd fail for data request\n");
+ mutex_unlock(&mbox->lock);
+ mbox->mbox_data.data_index = 0;
+ memset(mbox->mbox_data.recv_data, 0, OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE);
+ return ret;
+ }
+ if (data_len > OCTEP_PFVF_MBOX_MAX_DATA_SIZE) {
+ data_len -= OCTEP_PFVF_MBOX_MAX_DATA_SIZE;
+ read_cnt = OCTEP_PFVF_MBOX_MAX_DATA_SIZE;
+ } else {
+ read_cnt = data_len;
+ data_len = 0;
+ }
+ for (i = 0; i < read_cnt; i++) {
+ mbox->mbox_data.recv_data[mbox->mbox_data.data_index] =
+ rsp.s_data.data[i];
+ mbox->mbox_data.data_index++;
+ }
+ cmd.u64 = 0;
+ rsp.u64 = 0;
+ cmd.s_data.opcode = opcode;
+ cmd.s_data.frag = 1;
+ }
+ memcpy(data, mbox->mbox_data.recv_data, tmp_len);
+ *size = tmp_len;
+ mbox->mbox_data.data_index = 0;
+ memset(mbox->mbox_data.recv_data, 0, OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE);
+ mutex_unlock(&mbox->lock);
+ return 0;
+}
+
+int octep_vf_mbox_set_mtu(struct octep_vf_device *oct, int mtu)
+{
+ int frame_size = mtu + ETH_HLEN + ETH_FCS_LEN;
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret = 0;
+
+ if (mtu < ETH_MIN_MTU || frame_size > ETH_MAX_MTU) {
+ dev_err(&oct->pdev->dev,
+ "Failed to set MTU to %d MIN MTU:%d MAX MTU:%d\n",
+ mtu, ETH_MIN_MTU, ETH_MAX_MTU);
+ return -EINVAL;
+ }
+
+ cmd.u64 = 0;
+ cmd.s_set_mtu.opcode = OCTEP_PFVF_MBOX_CMD_SET_MTU;
+ cmd.s_set_mtu.mtu = mtu;
+
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Mbox send failed; err=%d\n", ret);
+ return ret;
+ }
+ if (rsp.s_set_mtu.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Received Mbox NACK from PF for MTU:%d\n", mtu);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int octep_vf_mbox_set_mac_addr(struct octep_vf_device *oct, char *mac_addr)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int i, ret;
+
+ cmd.u64 = 0;
+ cmd.s_set_mac.opcode = OCTEP_PFVF_MBOX_CMD_SET_MAC_ADDR;
+ for (i = 0; i < ETH_ALEN; i++)
+ cmd.s_set_mac.mac_addr[i] = mac_addr[i];
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Mbox send failed; err = %d\n", ret);
+ return ret;
+ }
+ if (rsp.s_set_mac.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "received NACK\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int octep_vf_mbox_get_mac_addr(struct octep_vf_device *oct, char *mac_addr)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int i, ret;
+
+ cmd.u64 = 0;
+ cmd.s_set_mac.opcode = OCTEP_PFVF_MBOX_CMD_GET_MAC_ADDR;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "get_mac: mbox send failed; err = %d\n", ret);
+ return ret;
+ }
+ if (rsp.s_set_mac.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "get_mac: received NACK\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = rsp.s_set_mac.mac_addr[i];
+ return 0;
+}
+
+int octep_vf_mbox_set_rx_state(struct octep_vf_device *oct, bool state)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_link_state.opcode = OCTEP_PFVF_MBOX_CMD_SET_RX_STATE;
+ cmd.s_link_state.state = state;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Set Rx state via VF Mbox send failed\n");
+ return ret;
+ }
+ if (rsp.s_link_state.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Set Rx state received NACK\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int octep_vf_mbox_set_link_status(struct octep_vf_device *oct, bool status)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_link_status.opcode = OCTEP_PFVF_MBOX_CMD_SET_LINK_STATUS;
+ cmd.s_link_status.status = status;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Set link status via VF Mbox send failed\n");
+ return ret;
+ }
+ if (rsp.s_link_status.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Set link status received NACK\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int octep_vf_mbox_get_link_status(struct octep_vf_device *oct, u8 *oper_up)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_link_status.opcode = OCTEP_PFVF_MBOX_CMD_GET_LINK_STATUS;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Get link status via VF Mbox send failed\n");
+ return ret;
+ }
+ if (rsp.s_link_status.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Get link status received NACK\n");
+ return -EINVAL;
+ }
+ *oper_up = rsp.s_link_status.status;
+ return 0;
+}
+
+int octep_vf_mbox_dev_remove(struct octep_vf_device *oct)
+{
+ union octep_pfvf_mbox_word cmd;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s.opcode = OCTEP_PFVF_MBOX_CMD_DEV_REMOVE;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, NULL);
+ return ret;
+}
+
+int octep_vf_mbox_get_fw_info(struct octep_vf_device *oct)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_fw_info.opcode = OCTEP_PFVF_MBOX_CMD_GET_FW_INFO;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Get link status via VF Mbox send failed\n");
+ return ret;
+ }
+ if (rsp.s_fw_info.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Get link status received NACK\n");
+ return -EINVAL;
+ }
+ oct->fw_info.pkind = rsp.s_fw_info.pkind;
+ oct->fw_info.fsz = rsp.s_fw_info.fsz;
+ oct->fw_info.rx_ol_flags = rsp.s_fw_info.rx_ol_flags;
+ oct->fw_info.tx_ol_flags = rsp.s_fw_info.tx_ol_flags;
+
+ return 0;
+}
+
+int octep_vf_mbox_set_offloads(struct octep_vf_device *oct, u16 tx_offloads,
+ u16 rx_offloads)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_offloads.opcode = OCTEP_PFVF_MBOX_CMD_SET_OFFLOADS;
+ cmd.s_offloads.rx_ol_flags = rx_offloads;
+ cmd.s_offloads.tx_ol_flags = tx_offloads;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Set offloads via VF Mbox send failed\n");
+ return ret;
+ }
+ if (rsp.s_link_state.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Set offloads received NACK\n");
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.h
new file mode 100644
index 000000000000..9b5efad37eab
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#ifndef _OCTEP_VF_MBOX_H_
+#define _OCTEP_VF_MBOX_H_
+
+/* When a new command is implemented, VF Mbox version should be bumped.
+ */
+enum octep_pfvf_mbox_version {
+ OCTEP_PFVF_MBOX_VERSION_V0,
+ OCTEP_PFVF_MBOX_VERSION_V1,
+ OCTEP_PFVF_MBOX_VERSION_V2
+};
+
+#define OCTEP_PFVF_MBOX_VERSION_CURRENT OCTEP_PFVF_MBOX_VERSION_V2
+
+enum octep_pfvf_mbox_opcode {
+ OCTEP_PFVF_MBOX_CMD_VERSION,
+ OCTEP_PFVF_MBOX_CMD_SET_MTU,
+ OCTEP_PFVF_MBOX_CMD_SET_MAC_ADDR,
+ OCTEP_PFVF_MBOX_CMD_GET_MAC_ADDR,
+ OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO,
+ OCTEP_PFVF_MBOX_CMD_GET_STATS,
+ OCTEP_PFVF_MBOX_CMD_SET_RX_STATE,
+ OCTEP_PFVF_MBOX_CMD_SET_LINK_STATUS,
+ OCTEP_PFVF_MBOX_CMD_GET_LINK_STATUS,
+ OCTEP_PFVF_MBOX_CMD_GET_MTU,
+ OCTEP_PFVF_MBOX_CMD_DEV_REMOVE,
+ OCTEP_PFVF_MBOX_CMD_GET_FW_INFO,
+ OCTEP_PFVF_MBOX_CMD_SET_OFFLOADS,
+ OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS,
+ OCTEP_PFVF_MBOX_CMD_MAX,
+};
+
+enum octep_pfvf_mbox_word_type {
+ OCTEP_PFVF_MBOX_TYPE_CMD,
+ OCTEP_PFVF_MBOX_TYPE_RSP_ACK,
+ OCTEP_PFVF_MBOX_TYPE_RSP_NACK,
+};
+
+enum octep_pfvf_mbox_cmd_status {
+ OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP = 1,
+ OCTEP_PFVF_MBOX_CMD_STATUS_TIMEDOUT = 2,
+ OCTEP_PFVF_MBOX_CMD_STATUS_NACK = 3,
+ OCTEP_PFVF_MBOX_CMD_STATUS_BUSY = 4,
+ OCTEP_PFVF_MBOX_CMD_STATUS_ERR = 5
+};
+
+enum octep_pfvf_link_status {
+ OCTEP_PFVF_LINK_STATUS_DOWN,
+ OCTEP_PFVF_LINK_STATUS_UP,
+};
+
+enum octep_pfvf_link_speed {
+ OCTEP_PFVF_LINK_SPEED_NONE,
+ OCTEP_PFVF_LINK_SPEED_1000,
+ OCTEP_PFVF_LINK_SPEED_10000,
+ OCTEP_PFVF_LINK_SPEED_25000,
+ OCTEP_PFVF_LINK_SPEED_40000,
+ OCTEP_PFVF_LINK_SPEED_50000,
+ OCTEP_PFVF_LINK_SPEED_100000,
+ OCTEP_PFVF_LINK_SPEED_LAST,
+};
+
+enum octep_pfvf_link_duplex {
+ OCTEP_PFVF_LINK_HALF_DUPLEX,
+ OCTEP_PFVF_LINK_FULL_DUPLEX,
+};
+
+enum octep_pfvf_link_autoneg {
+ OCTEP_PFVF_LINK_AUTONEG,
+ OCTEP_PFVF_LINK_FIXED,
+};
+
+#define OCTEP_PFVF_MBOX_TIMEOUT_WAIT_COUNT 8000
+#define OCTEP_PFVF_MBOX_TIMEOUT_WAIT_UDELAY 1000
+#define OCTEP_PFVF_MBOX_MAX_RETRIES 2
+#define OCTEP_PFVF_MBOX_VERSION 0
+#define OCTEP_PFVF_MBOX_MAX_DATA_SIZE 6
+#define OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE 320
+#define OCTEP_PFVF_MBOX_MORE_FRAG_FLAG 1
+
+union octep_pfvf_mbox_word {
+ u64 u64;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 data:48;
+ } s;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 frag:1;
+ u64 rsvd:5;
+ u8 data[6];
+ } s_data;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 version:48;
+ } s_version;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u8 mac_addr[6];
+ } s_set_mac;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 mtu:48;
+ } s_set_mtu;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 state:1;
+ u64 rsvd:53;
+ } s_link_state;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 status:1;
+ u64 rsvd:53;
+ } s_link_status;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 pkind:8;
+ u64 fsz:8;
+ u64 rx_ol_flags:16;
+ u64 tx_ol_flags:16;
+ u64 rsvd:6;
+ } s_fw_info;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:22;
+ u64 rx_ol_flags:16;
+ u64 tx_ol_flags:16;
+ } s_offloads;
+} __packed;
+
+int octep_vf_setup_mbox(struct octep_vf_device *oct);
+void octep_vf_delete_mbox(struct octep_vf_device *oct);
+int octep_vf_mbox_send_cmd(struct octep_vf_device *oct, union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp);
+int octep_vf_mbox_bulk_read(struct octep_vf_device *oct, enum octep_pfvf_mbox_opcode opcode,
+ u8 *data, int *size);
+int octep_vf_mbox_set_mtu(struct octep_vf_device *oct, int mtu);
+int octep_vf_mbox_set_mac_addr(struct octep_vf_device *oct, char *mac_addr);
+int octep_vf_mbox_get_mac_addr(struct octep_vf_device *oct, char *mac_addr);
+int octep_vf_mbox_version_check(struct octep_vf_device *oct);
+int octep_vf_mbox_set_rx_state(struct octep_vf_device *oct, bool state);
+int octep_vf_mbox_set_link_status(struct octep_vf_device *oct, bool status);
+int octep_vf_mbox_get_link_status(struct octep_vf_device *oct, u8 *oper_up);
+int octep_vf_mbox_dev_remove(struct octep_vf_device *oct);
+int octep_vf_mbox_get_fw_info(struct octep_vf_device *oct);
+int octep_vf_mbox_set_offloads(struct octep_vf_device *oct, u16 tx_offloads, u16 rx_offloads);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cn9k.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cn9k.h
new file mode 100644
index 000000000000..25e2a876ebba
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cn9k.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#ifndef _OCTEP_VF_REGS_CN9K_H_
+#define _OCTEP_VF_REGS_CN9K_H_
+
+/*############################ RST #########################*/
+#define CN93_VF_CONFIG_XPANSION_BAR 0x38
+#define CN93_VF_CONFIG_PCIE_CAP 0x70
+#define CN93_VF_CONFIG_PCIE_DEVCAP 0x74
+#define CN93_VF_CONFIG_PCIE_DEVCTL 0x78
+#define CN93_VF_CONFIG_PCIE_LINKCAP 0x7C
+#define CN93_VF_CONFIG_PCIE_LINKCTL 0x80
+#define CN93_VF_CONFIG_PCIE_SLOTCAP 0x84
+#define CN93_VF_CONFIG_PCIE_SLOTCTL 0x88
+
+#define CN93_VF_RING_OFFSET BIT_ULL(17)
+
+/*###################### RING IN REGISTERS #########################*/
+#define CN93_VF_SDP_R_IN_CONTROL_START 0x10000
+#define CN93_VF_SDP_R_IN_ENABLE_START 0x10010
+#define CN93_VF_SDP_R_IN_INSTR_BADDR_START 0x10020
+#define CN93_VF_SDP_R_IN_INSTR_RSIZE_START 0x10030
+#define CN93_VF_SDP_R_IN_INSTR_DBELL_START 0x10040
+#define CN93_VF_SDP_R_IN_CNTS_START 0x10050
+#define CN93_VF_SDP_R_IN_INT_LEVELS_START 0x10060
+#define CN93_VF_SDP_R_IN_PKT_CNT_START 0x10080
+#define CN93_VF_SDP_R_IN_BYTE_CNT_START 0x10090
+
+#define CN93_VF_SDP_R_IN_CONTROL(ring) \
+ (CN93_VF_SDP_R_IN_CONTROL_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_ENABLE(ring) \
+ (CN93_VF_SDP_R_IN_ENABLE_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_INSTR_BADDR(ring) \
+ (CN93_VF_SDP_R_IN_INSTR_BADDR_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_INSTR_RSIZE(ring) \
+ (CN93_VF_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_INSTR_DBELL(ring) \
+ (CN93_VF_SDP_R_IN_INSTR_DBELL_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_CNTS(ring) \
+ (CN93_VF_SDP_R_IN_CNTS_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_INT_LEVELS(ring) \
+ (CN93_VF_SDP_R_IN_INT_LEVELS_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_PKT_CNT(ring) \
+ (CN93_VF_SDP_R_IN_PKT_CNT_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_BYTE_CNT(ring) \
+ (CN93_VF_SDP_R_IN_BYTE_CNT_START + ((ring) * CN93_VF_RING_OFFSET))
+
+/*------------------ R_IN Masks ----------------*/
+
+/** Rings per Virtual Function **/
+#define CN93_VF_R_IN_CTL_RPVF_MASK (0xF)
+#define CN93_VF_R_IN_CTL_RPVF_POS (48)
+
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ **/
+#define CN93_VF_R_IN_CTL_IDLE BIT_ULL(28)
+#define CN93_VF_R_IN_CTL_RDSIZE (0x3ULL << 25)
+#define CN93_VF_R_IN_CTL_IS_64B BIT_ULL(24)
+#define CN93_VF_R_IN_CTL_D_NSR BIT_ULL(8)
+#define CN93_VF_R_IN_CTL_D_ESR BIT_ULL(6)
+#define CN93_VF_R_IN_CTL_D_ROR BIT_ULL(5)
+#define CN93_VF_R_IN_CTL_NSR BIT_ULL(3)
+#define CN93_VF_R_IN_CTL_ESR BIT_ULL(1)
+#define CN93_VF_R_IN_CTL_ROR BIT_ULL(0)
+
+#define CN93_VF_R_IN_CTL_MASK (CN93_VF_R_IN_CTL_RDSIZE | CN93_VF_R_IN_CTL_IS_64B)
+
+/*###################### RING OUT REGISTERS #########################*/
+#define CN93_VF_SDP_R_OUT_CNTS_START 0x10100
+#define CN93_VF_SDP_R_OUT_INT_LEVELS_START 0x10110
+#define CN93_VF_SDP_R_OUT_SLIST_BADDR_START 0x10120
+#define CN93_VF_SDP_R_OUT_SLIST_RSIZE_START 0x10130
+#define CN93_VF_SDP_R_OUT_SLIST_DBELL_START 0x10140
+#define CN93_VF_SDP_R_OUT_CONTROL_START 0x10150
+#define CN93_VF_SDP_R_OUT_ENABLE_START 0x10160
+#define CN93_VF_SDP_R_OUT_PKT_CNT_START 0x10180
+#define CN93_VF_SDP_R_OUT_BYTE_CNT_START 0x10190
+
+#define CN93_VF_SDP_R_OUT_CONTROL(ring) \
+ (CN93_VF_SDP_R_OUT_CONTROL_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_ENABLE(ring) \
+ (CN93_VF_SDP_R_OUT_ENABLE_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_SLIST_BADDR(ring) \
+ (CN93_VF_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_SLIST_RSIZE(ring) \
+ (CN93_VF_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_SLIST_DBELL(ring) \
+ (CN93_VF_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_CNTS(ring) \
+ (CN93_VF_SDP_R_OUT_CNTS_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_INT_LEVELS(ring) \
+ (CN93_VF_SDP_R_OUT_INT_LEVELS_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_PKT_CNT(ring) \
+ (CN93_VF_SDP_R_OUT_PKT_CNT_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_BYTE_CNT(ring) \
+ (CN93_VF_SDP_R_OUT_BYTE_CNT_START + ((ring) * CN93_VF_RING_OFFSET))
+
+/*------------------ R_OUT Masks ----------------*/
+#define CN93_VF_R_OUT_INT_LEVELS_BMODE BIT_ULL(63)
+#define CN93_VF_R_OUT_INT_LEVELS_TIMET (32)
+
+#define CN93_VF_R_OUT_CTL_IDLE BIT_ULL(40)
+#define CN93_VF_R_OUT_CTL_ES_I BIT_ULL(34)
+#define CN93_VF_R_OUT_CTL_NSR_I BIT_ULL(33)
+#define CN93_VF_R_OUT_CTL_ROR_I BIT_ULL(32)
+#define CN93_VF_R_OUT_CTL_ES_D BIT_ULL(30)
+#define CN93_VF_R_OUT_CTL_NSR_D BIT_ULL(29)
+#define CN93_VF_R_OUT_CTL_ROR_D BIT_ULL(28)
+#define CN93_VF_R_OUT_CTL_ES_P BIT_ULL(26)
+#define CN93_VF_R_OUT_CTL_NSR_P BIT_ULL(25)
+#define CN93_VF_R_OUT_CTL_ROR_P BIT_ULL(24)
+#define CN93_VF_R_OUT_CTL_IMODE BIT_ULL(23)
+
+/* ##################### Mail Box Registers ########################## */
+/* SDP PF to VF Mailbox Data Register */
+#define CN93_VF_SDP_R_MBOX_PF_VF_DATA_START 0x10210
+/* SDP Packet PF to VF Mailbox Interrupt Register */
+#define CN93_VF_SDP_R_MBOX_PF_VF_INT_START 0x10220
+/* SDP VF to PF Mailbox Data Register */
+#define CN93_VF_SDP_R_MBOX_VF_PF_DATA_START 0x10230
+
+#define CN93_VF_SDP_R_MBOX_PF_VF_INT_ENAB BIT_ULL(1)
+#define CN93_VF_SDP_R_MBOX_PF_VF_INT_STATUS BIT_ULL(0)
+
+#define CN93_VF_SDP_R_MBOX_PF_VF_DATA(ring) \
+ (CN93_VF_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_MBOX_PF_VF_INT(ring) \
+ (CN93_VF_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_MBOX_VF_PF_DATA(ring) \
+ (CN93_VF_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CN93_VF_RING_OFFSET))
+#endif /* _OCTEP_VF_REGS_CN9K_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cnxk.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cnxk.h
new file mode 100644
index 000000000000..2e156745ef64
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cnxk.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#ifndef _OCTEP_VF_REGS_CNXK_H_
+#define _OCTEP_VF_REGS_CNXK_H_
+
+/*############################ RST #########################*/
+#define CNXK_VF_CONFIG_XPANSION_BAR 0x38
+#define CNXK_VF_CONFIG_PCIE_CAP 0x70
+#define CNXK_VF_CONFIG_PCIE_DEVCAP 0x74
+#define CNXK_VF_CONFIG_PCIE_DEVCTL 0x78
+#define CNXK_VF_CONFIG_PCIE_LINKCAP 0x7C
+#define CNXK_VF_CONFIG_PCIE_LINKCTL 0x80
+#define CNXK_VF_CONFIG_PCIE_SLOTCAP 0x84
+#define CNXK_VF_CONFIG_PCIE_SLOTCTL 0x88
+
+#define CNXK_VF_RING_OFFSET (0x1ULL << 17)
+
+/*###################### RING IN REGISTERS #########################*/
+#define CNXK_VF_SDP_R_IN_CONTROL_START 0x10000
+#define CNXK_VF_SDP_R_IN_ENABLE_START 0x10010
+#define CNXK_VF_SDP_R_IN_INSTR_BADDR_START 0x10020
+#define CNXK_VF_SDP_R_IN_INSTR_RSIZE_START 0x10030
+#define CNXK_VF_SDP_R_IN_INSTR_DBELL_START 0x10040
+#define CNXK_VF_SDP_R_IN_CNTS_START 0x10050
+#define CNXK_VF_SDP_R_IN_INT_LEVELS_START 0x10060
+#define CNXK_VF_SDP_R_IN_PKT_CNT_START 0x10080
+#define CNXK_VF_SDP_R_IN_BYTE_CNT_START 0x10090
+#define CNXK_VF_SDP_R_ERR_TYPE_START 0x10400
+
+#define CNXK_VF_SDP_R_ERR_TYPE(ring) \
+ (CNXK_VF_SDP_R_ERR_TYPE_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_CONTROL(ring) \
+ (CNXK_VF_SDP_R_IN_CONTROL_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_ENABLE(ring) \
+ (CNXK_VF_SDP_R_IN_ENABLE_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_INSTR_BADDR(ring) \
+ (CNXK_VF_SDP_R_IN_INSTR_BADDR_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_INSTR_RSIZE(ring) \
+ (CNXK_VF_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_INSTR_DBELL(ring) \
+ (CNXK_VF_SDP_R_IN_INSTR_DBELL_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_CNTS(ring) \
+ (CNXK_VF_SDP_R_IN_CNTS_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_INT_LEVELS(ring) \
+ (CNXK_VF_SDP_R_IN_INT_LEVELS_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_PKT_CNT(ring) \
+ (CNXK_VF_SDP_R_IN_PKT_CNT_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_BYTE_CNT(ring) \
+ (CNXK_VF_SDP_R_IN_BYTE_CNT_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+/*------------------ R_IN Masks ----------------*/
+
+/** Rings per Virtual Function **/
+#define CNXK_VF_R_IN_CTL_RPVF_MASK (0xF)
+#define CNXK_VF_R_IN_CTL_RPVF_POS (48)
+
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ **/
+#define CNXK_VF_R_IN_CTL_IDLE (0x1ULL << 28)
+#define CNXK_VF_R_IN_CTL_RDSIZE (0x3ULL << 25)
+#define CNXK_VF_R_IN_CTL_IS_64B (0x1ULL << 24)
+#define CNXK_VF_R_IN_CTL_D_NSR (0x1ULL << 8)
+#define CNXK_VF_R_IN_CTL_D_ESR (0x1ULL << 6)
+#define CNXK_VF_R_IN_CTL_D_ROR (0x1ULL << 5)
+#define CNXK_VF_R_IN_CTL_NSR (0x1ULL << 3)
+#define CNXK_VF_R_IN_CTL_ESR (0x1ULL << 1)
+#define CNXK_VF_R_IN_CTL_ROR (0x1ULL << 0)
+
+#define CNXK_VF_R_IN_CTL_MASK (CNXK_VF_R_IN_CTL_RDSIZE | CNXK_VF_R_IN_CTL_IS_64B)
+
+/*###################### RING OUT REGISTERS #########################*/
+#define CNXK_VF_SDP_R_OUT_CNTS_START 0x10100
+#define CNXK_VF_SDP_R_OUT_INT_LEVELS_START 0x10110
+#define CNXK_VF_SDP_R_OUT_SLIST_BADDR_START 0x10120
+#define CNXK_VF_SDP_R_OUT_SLIST_RSIZE_START 0x10130
+#define CNXK_VF_SDP_R_OUT_SLIST_DBELL_START 0x10140
+#define CNXK_VF_SDP_R_OUT_CONTROL_START 0x10150
+#define CNXK_VF_SDP_R_OUT_WMARK_START 0x10160
+#define CNXK_VF_SDP_R_OUT_ENABLE_START 0x10170
+#define CNXK_VF_SDP_R_OUT_PKT_CNT_START 0x10180
+#define CNXK_VF_SDP_R_OUT_BYTE_CNT_START 0x10190
+
+#define CNXK_VF_SDP_R_OUT_CONTROL(ring) \
+ (CNXK_VF_SDP_R_OUT_CONTROL_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_ENABLE(ring) \
+ (CNXK_VF_SDP_R_OUT_ENABLE_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_SLIST_BADDR(ring) \
+ (CNXK_VF_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_SLIST_RSIZE(ring) \
+ (CNXK_VF_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_SLIST_DBELL(ring) \
+ (CNXK_VF_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_WMARK(ring) \
+ (CNXK_VF_SDP_R_OUT_WMARK_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_CNTS(ring) \
+ (CNXK_VF_SDP_R_OUT_CNTS_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_INT_LEVELS(ring) \
+ (CNXK_VF_SDP_R_OUT_INT_LEVELS_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_PKT_CNT(ring) \
+ (CNXK_VF_SDP_R_OUT_PKT_CNT_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_BYTE_CNT(ring) \
+ (CNXK_VF_SDP_R_OUT_BYTE_CNT_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+/*------------------ R_OUT Masks ----------------*/
+#define CNXK_VF_R_OUT_INT_LEVELS_BMODE BIT_ULL(63)
+#define CNXK_VF_R_OUT_INT_LEVELS_TIMET (32)
+
+#define CNXK_VF_R_OUT_CTL_IDLE BIT_ULL(40)
+#define CNXK_VF_R_OUT_CTL_ES_I BIT_ULL(34)
+#define CNXK_VF_R_OUT_CTL_NSR_I BIT_ULL(33)
+#define CNXK_VF_R_OUT_CTL_ROR_I BIT_ULL(32)
+#define CNXK_VF_R_OUT_CTL_ES_D BIT_ULL(30)
+#define CNXK_VF_R_OUT_CTL_NSR_D BIT_ULL(29)
+#define CNXK_VF_R_OUT_CTL_ROR_D BIT_ULL(28)
+#define CNXK_VF_R_OUT_CTL_ES_P BIT_ULL(26)
+#define CNXK_VF_R_OUT_CTL_NSR_P BIT_ULL(25)
+#define CNXK_VF_R_OUT_CTL_ROR_P BIT_ULL(24)
+#define CNXK_VF_R_OUT_CTL_IMODE BIT_ULL(23)
+
+/* ##################### Mail Box Registers ########################## */
+/* SDP PF to VF Mailbox Data Register */
+#define CNXK_VF_SDP_R_MBOX_PF_VF_DATA_START 0x10210
+/* SDP Packet PF to VF Mailbox Interrupt Register */
+#define CNXK_VF_SDP_R_MBOX_PF_VF_INT_START 0x10220
+/* SDP VF to PF Mailbox Data Register */
+#define CNXK_VF_SDP_R_MBOX_VF_PF_DATA_START 0x10230
+
+#define CNXK_VF_SDP_R_MBOX_PF_VF_INT_ENAB BIT_ULL(1)
+#define CNXK_VF_SDP_R_MBOX_PF_VF_INT_STATUS BIT_ULL(0)
+
+#define CNXK_VF_SDP_R_MBOX_PF_VF_DATA(ring) \
+ (CNXK_VF_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_MBOX_PF_VF_INT(ring) \
+ (CNXK_VF_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_MBOX_VF_PF_DATA(ring) \
+ (CNXK_VF_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CNXK_VF_RING_OFFSET))
+#endif /* _OCTEP_VF_REGS_CNXK_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
new file mode 100644
index 000000000000..82821bc28634
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
@@ -0,0 +1,510 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+
+static void octep_vf_oq_reset_indices(struct octep_vf_oq *oq)
+{
+ oq->host_read_idx = 0;
+ oq->host_refill_idx = 0;
+ oq->refill_count = 0;
+ oq->last_pkt_count = 0;
+ oq->pkts_pending = 0;
+}
+
+/**
+ * octep_vf_oq_fill_ring_buffers() - fill initial receive buffers for Rx ring.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: 0, if successfully filled receive buffers for all descriptors.
+ * -ENOMEM, if failed to allocate a buffer or failed to map for DMA.
+ */
+static int octep_vf_oq_fill_ring_buffers(struct octep_vf_oq *oq)
+{
+ struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring;
+ struct page *page;
+ u32 i;
+
+ for (i = 0; i < oq->max_count; i++) {
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ dev_err(oq->dev, "Rx buffer alloc failed\n");
+ goto rx_buf_alloc_err;
+ }
+ desc_ring[i].buffer_ptr = dma_map_page(oq->dev, page, 0,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(oq->dev, desc_ring[i].buffer_ptr)) {
+ dev_err(oq->dev,
+ "OQ-%d buffer alloc: DMA mapping error!\n",
+ oq->q_no);
+ goto dma_map_err;
+ }
+ oq->buff_info[i].page = page;
+ }
+
+ return 0;
+
+dma_map_err:
+ put_page(page);
+rx_buf_alloc_err:
+ while (i) {
+ i--;
+ dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE);
+ put_page(oq->buff_info[i].page);
+ oq->buff_info[i].page = NULL;
+ }
+
+ return -ENOMEM;
+}
+
+/**
+ * octep_vf_oq_refill() - refill buffers for used Rx ring descriptors.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: number of descriptors successfully refilled with receive buffers.
+ */
+static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *oq)
+{
+ struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring;
+ struct page *page;
+ u32 refill_idx, i;
+
+ refill_idx = oq->host_refill_idx;
+ for (i = 0; i < oq->refill_count; i++) {
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ dev_err(oq->dev, "refill: rx buffer alloc failed\n");
+ oq->stats.alloc_failures++;
+ break;
+ }
+
+ desc_ring[refill_idx].buffer_ptr = dma_map_page(oq->dev, page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(oq->dev, desc_ring[refill_idx].buffer_ptr)) {
+ dev_err(oq->dev,
+ "OQ-%d buffer refill: DMA mapping error!\n",
+ oq->q_no);
+ put_page(page);
+ oq->stats.alloc_failures++;
+ break;
+ }
+ oq->buff_info[refill_idx].page = page;
+ refill_idx++;
+ if (refill_idx == oq->max_count)
+ refill_idx = 0;
+ }
+ oq->host_refill_idx = refill_idx;
+ oq->refill_count -= i;
+
+ return i;
+}
+
+/**
+ * octep_vf_setup_oq() - Setup a Rx queue.
+ *
+ * @oct: Octeon device private data structure.
+ * @q_no: Rx queue number to be setup.
+ *
+ * Allocate resources for a Rx queue.
+ */
+static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no)
+{
+ struct octep_vf_oq *oq;
+ u32 desc_ring_size;
+
+ oq = vzalloc(sizeof(*oq));
+ if (!oq)
+ goto create_oq_fail;
+ oct->oq[q_no] = oq;
+
+ oq->octep_vf_dev = oct;
+ oq->netdev = oct->netdev;
+ oq->dev = &oct->pdev->dev;
+ oq->q_no = q_no;
+ oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
+ oq->ring_size_mask = oq->max_count - 1;
+ oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
+ oq->max_single_buffer_size = oq->buffer_size - OCTEP_VF_OQ_RESP_HW_SIZE;
+
+ /* When the hardware/firmware supports additional capabilities,
+ * additional header is filled-in by Octeon after length field in
+ * Rx packets. this header contains additional packet information.
+ */
+ if (oct->fw_info.rx_ol_flags)
+ oq->max_single_buffer_size -= OCTEP_VF_OQ_RESP_HW_EXT_SIZE;
+
+ oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf);
+
+ desc_ring_size = oq->max_count * OCTEP_VF_OQ_DESC_SIZE;
+ oq->desc_ring = dma_alloc_coherent(oq->dev, desc_ring_size,
+ &oq->desc_ring_dma, GFP_KERNEL);
+
+ if (unlikely(!oq->desc_ring)) {
+ dev_err(oq->dev,
+ "Failed to allocate DMA memory for OQ-%d !!\n", q_no);
+ goto desc_dma_alloc_err;
+ }
+
+ oq->buff_info = vzalloc(oq->max_count * OCTEP_VF_OQ_RECVBUF_SIZE);
+
+ if (unlikely(!oq->buff_info)) {
+ dev_err(&oct->pdev->dev,
+ "Failed to allocate buffer info for OQ-%d\n", q_no);
+ goto buf_list_err;
+ }
+
+ if (octep_vf_oq_fill_ring_buffers(oq))
+ goto oq_fill_buff_err;
+
+ octep_vf_oq_reset_indices(oq);
+ oct->hw_ops.setup_oq_regs(oct, q_no);
+ oct->num_oqs++;
+
+ return 0;
+
+oq_fill_buff_err:
+ vfree(oq->buff_info);
+ oq->buff_info = NULL;
+buf_list_err:
+ dma_free_coherent(oq->dev, desc_ring_size,
+ oq->desc_ring, oq->desc_ring_dma);
+ oq->desc_ring = NULL;
+desc_dma_alloc_err:
+ vfree(oq);
+ oct->oq[q_no] = NULL;
+create_oq_fail:
+ return -ENOMEM;
+}
+
+/**
+ * octep_vf_oq_free_ring_buffers() - Free ring buffers.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Free receive buffers in unused Rx queue descriptors.
+ */
+static void octep_vf_oq_free_ring_buffers(struct octep_vf_oq *oq)
+{
+ struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring;
+ int i;
+
+ if (!oq->desc_ring || !oq->buff_info)
+ return;
+
+ for (i = 0; i < oq->max_count; i++) {
+ if (oq->buff_info[i].page) {
+ dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ put_page(oq->buff_info[i].page);
+ oq->buff_info[i].page = NULL;
+ desc_ring[i].buffer_ptr = 0;
+ }
+ }
+ octep_vf_oq_reset_indices(oq);
+}
+
+/**
+ * octep_vf_free_oq() - Free Rx queue resources.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Free all resources of a Rx queue.
+ */
+static int octep_vf_free_oq(struct octep_vf_oq *oq)
+{
+ struct octep_vf_device *oct = oq->octep_vf_dev;
+ int q_no = oq->q_no;
+
+ octep_vf_oq_free_ring_buffers(oq);
+
+ vfree(oq->buff_info);
+
+ if (oq->desc_ring)
+ dma_free_coherent(oq->dev,
+ oq->max_count * OCTEP_VF_OQ_DESC_SIZE,
+ oq->desc_ring, oq->desc_ring_dma);
+
+ vfree(oq);
+ oct->oq[q_no] = NULL;
+ oct->num_oqs--;
+ return 0;
+}
+
+/**
+ * octep_vf_setup_oqs() - setup resources for all Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+int octep_vf_setup_oqs(struct octep_vf_device *oct)
+{
+ int i, retval = 0;
+
+ oct->num_oqs = 0;
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ retval = octep_vf_setup_oq(oct, i);
+ if (retval) {
+ dev_err(&oct->pdev->dev,
+ "Failed to setup OQ(RxQ)-%d.\n", i);
+ goto oq_setup_err;
+ }
+ dev_dbg(&oct->pdev->dev, "Successfully setup OQ(RxQ)-%d.\n", i);
+ }
+
+ return 0;
+
+oq_setup_err:
+ while (i) {
+ i--;
+ octep_vf_free_oq(oct->oq[i]);
+ }
+ return retval;
+}
+
+/**
+ * octep_vf_oq_dbell_init() - Initialize Rx queue doorbell.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Write number of descriptors to Rx queue doorbell register.
+ */
+void octep_vf_oq_dbell_init(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/**
+ * octep_vf_free_oqs() - Free resources of all Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+void octep_vf_free_oqs(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ if (!oct->oq[i])
+ continue;
+ octep_vf_free_oq(oct->oq[i]);
+ dev_dbg(&oct->pdev->dev,
+ "Successfully freed OQ(RxQ)-%d.\n", i);
+ }
+}
+
+/**
+ * octep_vf_oq_check_hw_for_pkts() - Check for new Rx packets.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: packets received after previous check.
+ */
+static int octep_vf_oq_check_hw_for_pkts(struct octep_vf_device *oct,
+ struct octep_vf_oq *oq)
+{
+ u32 pkt_count, new_pkts;
+
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts = pkt_count - oq->last_pkt_count;
+
+ /* Clear the hardware packets counter register if the rx queue is
+ * being processed continuously with-in a single interrupt and
+ * reached half its max value.
+ * this counter is not cleared every time read, to save write cycles.
+ */
+ if (unlikely(pkt_count > 0xF0000000U)) {
+ writel(pkt_count, oq->pkts_sent_reg);
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts += pkt_count;
+ }
+ oq->last_pkt_count = pkt_count;
+ oq->pkts_pending += new_pkts;
+ return new_pkts;
+}
+
+/**
+ * __octep_vf_oq_process_rx() - Process hardware Rx queue and push to stack.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ * @pkts_to_process: number of packets to be processed.
+ *
+ * Process the new packets in Rx queue.
+ * Packets larger than single Rx buffer arrive in consecutive descriptors.
+ * But, count returned by the API only accounts full packets, not fragments.
+ *
+ * Return: number of packets processed and pushed to stack.
+ */
+static int __octep_vf_oq_process_rx(struct octep_vf_device *oct,
+ struct octep_vf_oq *oq, u16 pkts_to_process)
+{
+ struct octep_vf_oq_resp_hw_ext *resp_hw_ext = NULL;
+ netdev_features_t feat = oq->netdev->features;
+ struct octep_vf_rx_buffer *buff_info;
+ struct octep_vf_oq_resp_hw *resp_hw;
+ u32 pkt, rx_bytes, desc_used;
+ u16 data_offset, rx_ol_flags;
+ struct sk_buff *skb;
+ u32 read_idx;
+
+ read_idx = oq->host_read_idx;
+ rx_bytes = 0;
+ desc_used = 0;
+ for (pkt = 0; pkt < pkts_to_process; pkt++) {
+ buff_info = (struct octep_vf_rx_buffer *)&oq->buff_info[read_idx];
+ dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ resp_hw = page_address(buff_info->page);
+ buff_info->page = NULL;
+
+ /* Swap the length field that is in Big-Endian to CPU */
+ buff_info->len = be64_to_cpu(resp_hw->length);
+ if (oct->fw_info.rx_ol_flags) {
+ /* Extended response header is immediately after
+ * response header (resp_hw)
+ */
+ resp_hw_ext = (struct octep_vf_oq_resp_hw_ext *)
+ (resp_hw + 1);
+ buff_info->len -= OCTEP_VF_OQ_RESP_HW_EXT_SIZE;
+ /* Packet Data is immediately after
+ * extended response header.
+ */
+ data_offset = OCTEP_VF_OQ_RESP_HW_SIZE +
+ OCTEP_VF_OQ_RESP_HW_EXT_SIZE;
+ rx_ol_flags = resp_hw_ext->rx_ol_flags;
+ } else {
+ /* Data is immediately after
+ * Hardware Rx response header.
+ */
+ data_offset = OCTEP_VF_OQ_RESP_HW_SIZE;
+ rx_ol_flags = 0;
+ }
+ rx_bytes += buff_info->len;
+
+ if (buff_info->len <= oq->max_single_buffer_size) {
+ skb = napi_build_skb((void *)resp_hw, PAGE_SIZE);
+ skb_reserve(skb, data_offset);
+ skb_put(skb, buff_info->len);
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+ } else {
+ struct skb_shared_info *shinfo;
+ u16 data_len;
+
+ skb = napi_build_skb((void *)resp_hw, PAGE_SIZE);
+ skb_reserve(skb, data_offset);
+ /* Head fragment includes response header(s);
+ * subsequent fragments contains only data.
+ */
+ skb_put(skb, oq->max_single_buffer_size);
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+
+ shinfo = skb_shinfo(skb);
+ data_len = buff_info->len - oq->max_single_buffer_size;
+ while (data_len) {
+ dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ buff_info = (struct octep_vf_rx_buffer *)
+ &oq->buff_info[read_idx];
+ if (data_len < oq->buffer_size) {
+ buff_info->len = data_len;
+ data_len = 0;
+ } else {
+ buff_info->len = oq->buffer_size;
+ data_len -= oq->buffer_size;
+ }
+
+ skb_add_rx_frag(skb, shinfo->nr_frags,
+ buff_info->page, 0,
+ buff_info->len,
+ buff_info->len);
+ buff_info->page = NULL;
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+ }
+ }
+
+ skb->dev = oq->netdev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ if (feat & NETIF_F_RXCSUM &&
+ OCTEP_VF_RX_CSUM_VERIFIED(rx_ol_flags))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+ napi_gro_receive(oq->napi, skb);
+ }
+
+ oq->host_read_idx = read_idx;
+ oq->refill_count += desc_used;
+ oq->stats.packets += pkt;
+ oq->stats.bytes += rx_bytes;
+
+ return pkt;
+}
+
+/**
+ * octep_vf_oq_process_rx() - Process Rx queue.
+ *
+ * @oq: Octeon Rx queue data structure.
+ * @budget: max number of packets can be processed in one invocation.
+ *
+ * Check for newly received packets and process them.
+ * Keeps checking for new packets until budget is used or no new packets seen.
+ *
+ * Return: number of packets processed.
+ */
+int octep_vf_oq_process_rx(struct octep_vf_oq *oq, int budget)
+{
+ u32 pkts_available, pkts_processed, total_pkts_processed;
+ struct octep_vf_device *oct = oq->octep_vf_dev;
+
+ pkts_available = 0;
+ pkts_processed = 0;
+ total_pkts_processed = 0;
+ while (total_pkts_processed < budget) {
+ /* update pending count only when current one exhausted */
+ if (oq->pkts_pending == 0)
+ octep_vf_oq_check_hw_for_pkts(oct, oq);
+ pkts_available = min(budget - total_pkts_processed,
+ oq->pkts_pending);
+ if (!pkts_available)
+ break;
+
+ pkts_processed = __octep_vf_oq_process_rx(oct, oq,
+ pkts_available);
+ oq->pkts_pending -= pkts_processed;
+ total_pkts_processed += pkts_processed;
+ }
+
+ if (oq->refill_count >= oq->refill_threshold) {
+ u32 desc_refilled = octep_vf_oq_refill(oct, oq);
+
+ /* flush pending writes before updating credits */
+ smp_wmb();
+ writel(desc_refilled, oq->pkts_credit_reg);
+ }
+
+ return total_pkts_processed;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
new file mode 100644
index 000000000000..fe46838b5200
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_VF_RX_H_
+#define _OCTEP_VF_RX_H_
+
+/* struct octep_vf_oq_desc_hw - Octeon Hardware OQ descriptor format.
+ *
+ * The descriptor ring is made of descriptors which have 2 64-bit values:
+ *
+ * @buffer_ptr: DMA address of the skb->data
+ * @info_ptr: DMA address of host memory, used to update pkt count by hw.
+ * This is currently unused to save pci writes.
+ */
+struct octep_vf_oq_desc_hw {
+ dma_addr_t buffer_ptr;
+ u64 info_ptr;
+};
+
+static_assert(sizeof(struct octep_vf_oq_desc_hw) == 16);
+
+#define OCTEP_VF_OQ_DESC_SIZE (sizeof(struct octep_vf_oq_desc_hw))
+
+/* Rx offload flags */
+#define OCTEP_VF_RX_OFFLOAD_VLAN_STRIP BIT(0)
+#define OCTEP_VF_RX_OFFLOAD_IPV4_CKSUM BIT(1)
+#define OCTEP_VF_RX_OFFLOAD_UDP_CKSUM BIT(2)
+#define OCTEP_VF_RX_OFFLOAD_TCP_CKSUM BIT(3)
+
+#define OCTEP_VF_RX_OFFLOAD_CKSUM (OCTEP_VF_RX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_VF_RX_OFFLOAD_UDP_CKSUM | \
+ OCTEP_VF_RX_OFFLOAD_TCP_CKSUM)
+
+#define OCTEP_VF_RX_IP_CSUM(flags) ((flags) & \
+ (OCTEP_VF_RX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_VF_RX_OFFLOAD_TCP_CKSUM | \
+ OCTEP_VF_RX_OFFLOAD_UDP_CKSUM))
+
+/* bit 0 is vlan strip */
+#define OCTEP_VF_RX_CSUM_IP_VERIFIED BIT(1)
+#define OCTEP_VF_RX_CSUM_L4_VERIFIED BIT(2)
+
+#define OCTEP_VF_RX_CSUM_VERIFIED(flags) ((flags) & \
+ (OCTEP_VF_RX_CSUM_L4_VERIFIED | \
+ OCTEP_VF_RX_CSUM_IP_VERIFIED))
+
+/* Extended Response Header in packet data received from Hardware.
+ * Includes metadata like checksum status.
+ * this is valid only if hardware/firmware published support for this.
+ * This is at offset 0 of packet data (skb->data).
+ */
+struct octep_vf_oq_resp_hw_ext {
+ /* Reserved. */
+ u64 rsvd:48;
+
+ /* rx offload flags */
+ u16 rx_ol_flags;
+};
+
+static_assert(sizeof(struct octep_vf_oq_resp_hw_ext) == 8);
+
+#define OCTEP_VF_OQ_RESP_HW_EXT_SIZE (sizeof(struct octep_vf_oq_resp_hw_ext))
+
+/* Length of Rx packet DMA'ed by Octeon to Host.
+ * this is in bigendian; so need to be converted to cpu endian.
+ * Octeon writes this at the beginning of Rx buffer (skb->data).
+ */
+struct octep_vf_oq_resp_hw {
+ /* The Length of the packet. */
+ __be64 length;
+};
+
+static_assert(sizeof(struct octep_vf_oq_resp_hw) == 8);
+
+#define OCTEP_VF_OQ_RESP_HW_SIZE (sizeof(struct octep_vf_oq_resp_hw))
+
+/* Pointer to data buffer.
+ * Driver keeps a pointer to the data buffer that it made available to
+ * the Octeon device. Since the descriptor ring keeps physical (bus)
+ * addresses, this field is required for the driver to keep track of
+ * the virtual address pointers. The fields are operated by
+ * OS-dependent routines.
+ */
+struct octep_vf_rx_buffer {
+ struct page *page;
+
+ /* length from rx hardware descriptor after converting to cpu endian */
+ u64 len;
+};
+
+#define OCTEP_VF_OQ_RECVBUF_SIZE (sizeof(struct octep_vf_rx_buffer))
+
+/* Output Queue statistics. Each output queue has four stats fields. */
+struct octep_vf_oq_stats {
+ /* Number of packets received from the Device. */
+ u64 packets;
+
+ /* Number of bytes received from the Device. */
+ u64 bytes;
+
+ /* Number of times failed to allocate buffers. */
+ u64 alloc_failures;
+};
+
+#define OCTEP_VF_OQ_STATS_SIZE (sizeof(struct octep_vf_oq_stats))
+
+/* Hardware interface Rx statistics */
+struct octep_vf_iface_rx_stats {
+ /* Received packets */
+ u64 pkts;
+
+ /* Octets of received packets */
+ u64 octets;
+
+ /* Received PAUSE and Control packets */
+ u64 pause_pkts;
+
+ /* Received PAUSE and Control octets */
+ u64 pause_octets;
+
+ /* Filtered DMAC0 packets */
+ u64 dmac0_pkts;
+
+ /* Filtered DMAC0 octets */
+ u64 dmac0_octets;
+
+ /* Packets dropped due to RX FIFO full */
+ u64 dropped_pkts_fifo_full;
+
+ /* Octets dropped due to RX FIFO full */
+ u64 dropped_octets_fifo_full;
+
+ /* Error packets */
+ u64 err_pkts;
+
+ /* Filtered DMAC1 packets */
+ u64 dmac1_pkts;
+
+ /* Filtered DMAC1 octets */
+ u64 dmac1_octets;
+
+ /* NCSI-bound packets dropped */
+ u64 ncsi_dropped_pkts;
+
+ /* NCSI-bound octets dropped */
+ u64 ncsi_dropped_octets;
+
+ /* Multicast packets received. */
+ u64 mcast_pkts;
+
+ /* Broadcast packets received. */
+ u64 bcast_pkts;
+
+};
+
+/* The Descriptor Ring Output Queue structure.
+ * This structure has all the information required to implement a
+ * Octeon OQ.
+ */
+struct octep_vf_oq {
+ u32 q_no;
+
+ struct octep_vf_device *octep_vf_dev;
+ struct net_device *netdev;
+ struct device *dev;
+
+ struct napi_struct *napi;
+
+ /* The receive buffer list. This list has the virtual addresses
+ * of the buffers.
+ */
+ struct octep_vf_rx_buffer *buff_info;
+
+ /* Pointer to the mapped packet credit register.
+ * Host writes number of info/buffer ptrs available to this register
+ */
+ u8 __iomem *pkts_credit_reg;
+
+ /* Pointer to the mapped packet sent register.
+ * Octeon writes the number of packets DMA'ed to host memory
+ * in this register.
+ */
+ u8 __iomem *pkts_sent_reg;
+
+ /* Statistics for this OQ. */
+ struct octep_vf_oq_stats stats;
+
+ /* Packets pending to be processed */
+ u32 pkts_pending;
+ u32 last_pkt_count;
+
+ /* Index in the ring where the driver should read the next packet */
+ u32 host_read_idx;
+
+ /* Number of descriptors in this ring. */
+ u32 max_count;
+ u32 ring_size_mask;
+
+ /* The number of descriptors pending refill. */
+ u32 refill_count;
+
+ /* Index in the ring where the driver will refill the
+ * descriptor's buffer
+ */
+ u32 host_refill_idx;
+ u32 refill_threshold;
+
+ /* The size of each buffer pointed by the buffer pointer. */
+ u32 buffer_size;
+ u32 max_single_buffer_size;
+
+ /* The 8B aligned descriptor ring starts at this address. */
+ struct octep_vf_oq_desc_hw *desc_ring;
+
+ /* DMA mapped address of the OQ descriptor ring. */
+ dma_addr_t desc_ring_dma;
+};
+
+#define OCTEP_VF_OQ_SIZE (sizeof(struct octep_vf_oq))
+#endif /* _OCTEP_VF_RX_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
new file mode 100644
index 000000000000..47a5c054fdb6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+#include <net/netdev_queues.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+
+/* Reset various index of Tx queue data structure. */
+static void octep_vf_iq_reset_indices(struct octep_vf_iq *iq)
+{
+ iq->fill_cnt = 0;
+ iq->host_write_index = 0;
+ iq->octep_vf_read_index = 0;
+ iq->flush_index = 0;
+ iq->pkts_processed = 0;
+ iq->pkt_in_done = 0;
+}
+
+/**
+ * octep_vf_iq_process_completions() - Process Tx queue completions.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @budget: max number of completions to be processed in one invocation.
+ */
+int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget)
+{
+ u32 compl_pkts, compl_bytes, compl_sg;
+ struct octep_vf_device *oct = iq->octep_vf_dev;
+ struct octep_vf_tx_buffer *tx_buffer;
+ struct skb_shared_info *shinfo;
+ u32 fi = iq->flush_index;
+ struct sk_buff *skb;
+ u8 frags, i;
+
+ compl_pkts = 0;
+ compl_sg = 0;
+ compl_bytes = 0;
+ iq->octep_vf_read_index = oct->hw_ops.update_iq_read_idx(iq);
+
+ while (likely(budget && (fi != iq->octep_vf_read_index))) {
+ tx_buffer = iq->buff_info + fi;
+ skb = tx_buffer->skb;
+
+ fi++;
+ if (unlikely(fi == iq->max_count))
+ fi = 0;
+ compl_bytes += skb->len;
+ compl_pkts++;
+ budget--;
+
+ if (!tx_buffer->gather) {
+ dma_unmap_single(iq->dev, tx_buffer->dma,
+ tx_buffer->skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ /* Scatter/Gather */
+ shinfo = skb_shinfo(skb);
+ frags = shinfo->nr_frags;
+ compl_sg++;
+
+ dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0],
+ tx_buffer->sglist[0].len[3], DMA_TO_DEVICE);
+
+ i = 1; /* entry 0 is main skb, unmapped above */
+ while (frags--) {
+ dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+ tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE);
+ i++;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+ iq->pkts_processed += compl_pkts;
+ iq->stats.instr_completed += compl_pkts;
+ iq->stats.bytes_sent += compl_bytes;
+ iq->stats.sgentry_sent += compl_sg;
+ iq->flush_index = fi;
+
+ netif_subqueue_completed_wake(iq->netdev, iq->q_no, compl_pkts,
+ compl_bytes, IQ_INSTR_SPACE(iq),
+ OCTEP_VF_WAKE_QUEUE_THRESHOLD);
+
+ return !budget;
+}
+
+/**
+ * octep_vf_iq_free_pending() - Free Tx buffers for pending completions.
+ *
+ * @iq: Octeon Tx queue data structure.
+ */
+static void octep_vf_iq_free_pending(struct octep_vf_iq *iq)
+{
+ struct octep_vf_tx_buffer *tx_buffer;
+ struct skb_shared_info *shinfo;
+ u32 fi = iq->flush_index;
+ struct sk_buff *skb;
+ u8 frags, i;
+
+ while (fi != iq->host_write_index) {
+ tx_buffer = iq->buff_info + fi;
+ skb = tx_buffer->skb;
+
+ fi++;
+ if (unlikely(fi == iq->max_count))
+ fi = 0;
+
+ if (!tx_buffer->gather) {
+ dma_unmap_single(iq->dev, tx_buffer->dma,
+ tx_buffer->skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ /* Scatter/Gather */
+ shinfo = skb_shinfo(skb);
+ frags = shinfo->nr_frags;
+
+ dma_unmap_single(iq->dev,
+ tx_buffer->sglist[0].dma_ptr[0],
+ tx_buffer->sglist[0].len[0],
+ DMA_TO_DEVICE);
+
+ i = 1; /* entry 0 is main skb, unmapped above */
+ while (frags--) {
+ dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+ tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
+ i++;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+ iq->flush_index = fi;
+ netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no));
+}
+
+/**
+ * octep_vf_clean_iqs() - Clean Tx queues to shutdown the device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Free the buffers in Tx queue descriptors pending completion and
+ * reset queue indices
+ */
+void octep_vf_clean_iqs(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_iqs; i++) {
+ octep_vf_iq_free_pending(oct->iq[i]);
+ octep_vf_iq_reset_indices(oct->iq[i]);
+ }
+}
+
+/**
+ * octep_vf_setup_iq() - Setup a Tx queue.
+ *
+ * @oct: Octeon device private data structure.
+ * @q_no: Tx queue number to be setup.
+ *
+ * Allocate resources for a Tx queue.
+ */
+static int octep_vf_setup_iq(struct octep_vf_device *oct, int q_no)
+{
+ u32 desc_ring_size, buff_info_size, sglist_size;
+ struct octep_vf_iq *iq;
+ int i;
+
+ iq = vzalloc(sizeof(*iq));
+ if (!iq)
+ goto iq_alloc_err;
+ oct->iq[q_no] = iq;
+
+ iq->octep_vf_dev = oct;
+ iq->netdev = oct->netdev;
+ iq->dev = &oct->pdev->dev;
+ iq->q_no = q_no;
+ iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->ring_size_mask = iq->max_count - 1;
+ iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
+ iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no);
+
+ /* Allocate memory for hardware queue descriptors */
+ desc_ring_size = OCTEP_VF_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->desc_ring = dma_alloc_coherent(iq->dev, desc_ring_size,
+ &iq->desc_ring_dma, GFP_KERNEL);
+ if (unlikely(!iq->desc_ring)) {
+ dev_err(iq->dev,
+ "Failed to allocate DMA memory for IQ-%d\n", q_no);
+ goto desc_dma_alloc_err;
+ }
+
+ /* Allocate memory for hardware SGLIST descriptors */
+ sglist_size = OCTEP_VF_SGLIST_SIZE_PER_PKT *
+ CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->sglist = dma_alloc_coherent(iq->dev, sglist_size,
+ &iq->sglist_dma, GFP_KERNEL);
+ if (unlikely(!iq->sglist)) {
+ dev_err(iq->dev,
+ "Failed to allocate DMA memory for IQ-%d SGLIST\n",
+ q_no);
+ goto sglist_alloc_err;
+ }
+
+ /* allocate memory to manage Tx packets pending completion */
+ buff_info_size = OCTEP_VF_IQ_TXBUFF_INFO_SIZE * iq->max_count;
+ iq->buff_info = vzalloc(buff_info_size);
+ if (!iq->buff_info) {
+ dev_err(iq->dev,
+ "Failed to allocate buff info for IQ-%d\n", q_no);
+ goto buff_info_err;
+ }
+
+ /* Setup sglist addresses in tx_buffer entries */
+ for (i = 0; i < CFG_GET_IQ_NUM_DESC(oct->conf); i++) {
+ struct octep_vf_tx_buffer *tx_buffer;
+
+ tx_buffer = &iq->buff_info[i];
+ tx_buffer->sglist =
+ &iq->sglist[i * OCTEP_VF_SGLIST_ENTRIES_PER_PKT];
+ tx_buffer->sglist_dma =
+ iq->sglist_dma + (i * OCTEP_VF_SGLIST_SIZE_PER_PKT);
+ }
+
+ octep_vf_iq_reset_indices(iq);
+ oct->hw_ops.setup_iq_regs(oct, q_no);
+
+ oct->num_iqs++;
+ return 0;
+
+buff_info_err:
+ dma_free_coherent(iq->dev, sglist_size, iq->sglist, iq->sglist_dma);
+sglist_alloc_err:
+ dma_free_coherent(iq->dev, desc_ring_size,
+ iq->desc_ring, iq->desc_ring_dma);
+desc_dma_alloc_err:
+ vfree(iq);
+ oct->iq[q_no] = NULL;
+iq_alloc_err:
+ return -1;
+}
+
+/**
+ * octep_vf_free_iq() - Free Tx queue resources.
+ *
+ * @iq: Octeon Tx queue data structure.
+ *
+ * Free all the resources allocated for a Tx queue.
+ */
+static void octep_vf_free_iq(struct octep_vf_iq *iq)
+{
+ struct octep_vf_device *oct = iq->octep_vf_dev;
+ u64 desc_ring_size, sglist_size;
+ int q_no = iq->q_no;
+
+ desc_ring_size = OCTEP_VF_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
+
+ vfree(iq->buff_info);
+
+ if (iq->desc_ring)
+ dma_free_coherent(iq->dev, desc_ring_size,
+ iq->desc_ring, iq->desc_ring_dma);
+
+ sglist_size = OCTEP_VF_SGLIST_SIZE_PER_PKT *
+ CFG_GET_IQ_NUM_DESC(oct->conf);
+ if (iq->sglist)
+ dma_free_coherent(iq->dev, sglist_size,
+ iq->sglist, iq->sglist_dma);
+
+ vfree(iq);
+ oct->iq[q_no] = NULL;
+ oct->num_iqs--;
+}
+
+/**
+ * octep_vf_setup_iqs() - setup resources for all Tx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+int octep_vf_setup_iqs(struct octep_vf_device *oct)
+{
+ int i;
+
+ oct->num_iqs = 0;
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ if (octep_vf_setup_iq(oct, i)) {
+ dev_err(&oct->pdev->dev,
+ "Failed to setup IQ(TxQ)-%d.\n", i);
+ goto iq_setup_err;
+ }
+ dev_dbg(&oct->pdev->dev, "Successfully setup IQ(TxQ)-%d.\n", i);
+ }
+
+ return 0;
+
+iq_setup_err:
+ while (i) {
+ i--;
+ octep_vf_free_iq(oct->iq[i]);
+ }
+ return -1;
+}
+
+/**
+ * octep_vf_free_iqs() - Free resources of all Tx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+void octep_vf_free_iqs(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ octep_vf_free_iq(oct->iq[i]);
+ dev_dbg(&oct->pdev->dev,
+ "Successfully destroyed IQ(TxQ)-%d.\n", i);
+ }
+ oct->num_iqs = 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
new file mode 100644
index 000000000000..f338b975103c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
@@ -0,0 +1,276 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_VF_TX_H_
+#define _OCTEP_VF_TX_H_
+
+#define IQ_SEND_OK 0
+#define IQ_SEND_STOP 1
+#define IQ_SEND_FAILED -1
+
+#define TX_BUFTYPE_NONE 0
+#define TX_BUFTYPE_NET 1
+#define TX_BUFTYPE_NET_SG 2
+#define NUM_TX_BUFTYPES 3
+
+/* Hardware format for Scatter/Gather list
+ *
+ * 63 48|47 32|31 16|15 0
+ * -----------------------------------------
+ * | Len 0 | Len 1 | Len 2 | Len 3 |
+ * -----------------------------------------
+ * | Ptr 0 |
+ * -----------------------------------------
+ * | Ptr 1 |
+ * -----------------------------------------
+ * | Ptr 2 |
+ * -----------------------------------------
+ * | Ptr 3 |
+ * -----------------------------------------
+ */
+struct octep_vf_tx_sglist_desc {
+ u16 len[4];
+ dma_addr_t dma_ptr[4];
+};
+
+static_assert(sizeof(struct octep_vf_tx_sglist_desc) == 40);
+
+/* Each Scatter/Gather entry sent to hardwar hold four pointers.
+ * So, number of entries required is (MAX_SKB_FRAGS + 1)/4, where '+1'
+ * is for main skb which also goes as a gather buffer to Octeon hardware.
+ * To allocate sufficient SGLIST entries for a packet with max fragments,
+ * align by adding 3 before calcuating max SGLIST entries per packet.
+ */
+#define OCTEP_VF_SGLIST_ENTRIES_PER_PKT ((MAX_SKB_FRAGS + 1 + 3) / 4)
+#define OCTEP_VF_SGLIST_SIZE_PER_PKT \
+ (OCTEP_VF_SGLIST_ENTRIES_PER_PKT * sizeof(struct octep_vf_tx_sglist_desc))
+
+struct octep_vf_tx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct octep_vf_tx_sglist_desc *sglist;
+ dma_addr_t sglist_dma;
+ u8 gather;
+};
+
+#define OCTEP_VF_IQ_TXBUFF_INFO_SIZE (sizeof(struct octep_vf_tx_buffer))
+
+/* VF Hardware interface Tx statistics */
+struct octep_vf_iface_tx_stats {
+ /* Total frames sent on the interface */
+ u64 pkts;
+
+ /* Total octets sent on the interface */
+ u64 octs;
+
+ /* Packets sent to a broadcast DMAC */
+ u64 bcst;
+
+ /* Packets sent to the multicast DMAC */
+ u64 mcst;
+
+ /* Packets dropped */
+ u64 dropped;
+
+ /* Reserved */
+ u64 reserved[13];
+};
+
+/* VF Input Queue statistics */
+struct octep_vf_iq_stats {
+ /* Instructions posted to this queue. */
+ u64 instr_posted;
+
+ /* Instructions copied by hardware for processing. */
+ u64 instr_completed;
+
+ /* Instructions that could not be processed. */
+ u64 instr_dropped;
+
+ /* Bytes sent through this queue. */
+ u64 bytes_sent;
+
+ /* Gather entries sent through this queue. */
+ u64 sgentry_sent;
+
+ /* Number of transmit failures due to TX_BUSY */
+ u64 tx_busy;
+
+ /* Number of times the queue is restarted */
+ u64 restart_cnt;
+};
+
+/* The instruction (input) queue.
+ * The input queue is used to post raw (instruction) mode data or packet
+ * data to Octeon device from the host. Each input queue (up to 4) for
+ * a Octeon device has one such structure to represent it.
+ */
+struct octep_vf_iq {
+ u32 q_no;
+
+ struct octep_vf_device *octep_vf_dev;
+ struct net_device *netdev;
+ struct device *dev;
+ struct netdev_queue *netdev_q;
+
+ /* Index in input ring where driver should write the next packet */
+ u16 host_write_index;
+
+ /* Index in input ring where Octeon is expected to read next packet */
+ u16 octep_vf_read_index;
+
+ /* This index aids in finding the window in the queue where Octeon
+ * has read the commands.
+ */
+ u16 flush_index;
+
+ /* Statistics for this input queue. */
+ struct octep_vf_iq_stats stats;
+
+ /* Pointer to the Virtual Base addr of the input ring. */
+ struct octep_vf_tx_desc_hw *desc_ring;
+
+ /* DMA mapped base address of the input descriptor ring. */
+ dma_addr_t desc_ring_dma;
+
+ /* Info of Tx buffers pending completion. */
+ struct octep_vf_tx_buffer *buff_info;
+
+ /* Base pointer to Scatter/Gather lists for all ring descriptors. */
+ struct octep_vf_tx_sglist_desc *sglist;
+
+ /* DMA mapped addr of Scatter Gather Lists */
+ dma_addr_t sglist_dma;
+
+ /* Octeon doorbell register for the ring. */
+ u8 __iomem *doorbell_reg;
+
+ /* Octeon instruction count register for this ring. */
+ u8 __iomem *inst_cnt_reg;
+
+ /* interrupt level register for this ring */
+ u8 __iomem *intr_lvl_reg;
+
+ /* Maximum no. of instructions in this queue. */
+ u32 max_count;
+ u32 ring_size_mask;
+
+ u32 pkt_in_done;
+ u32 pkts_processed;
+
+ u32 status;
+
+ /* Number of instructions pending to be posted to Octeon. */
+ u32 fill_cnt;
+
+ /* The max. number of instructions that can be held pending by the
+ * driver before ringing doorbell.
+ */
+ u32 fill_threshold;
+};
+
+/* Hardware Tx Instruction Header */
+struct octep_vf_instr_hdr {
+ /* Data Len */
+ u64 tlen:16;
+
+ /* Reserved */
+ u64 rsvd:20;
+
+ /* PKIND for SDP */
+ u64 pkind:6;
+
+ /* Front Data size */
+ u64 fsz:6;
+
+ /* No. of entries in gather list */
+ u64 gsz:14;
+
+ /* Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /* Reserved3 */
+ u64 reserved3:1;
+};
+
+static_assert(sizeof(struct octep_vf_instr_hdr) == 8);
+
+/* Tx offload flags */
+#define OCTEP_VF_TX_OFFLOAD_VLAN_INSERT BIT(0)
+#define OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM BIT(1)
+#define OCTEP_VF_TX_OFFLOAD_UDP_CKSUM BIT(2)
+#define OCTEP_VF_TX_OFFLOAD_TCP_CKSUM BIT(3)
+#define OCTEP_VF_TX_OFFLOAD_SCTP_CKSUM BIT(4)
+#define OCTEP_VF_TX_OFFLOAD_TCP_TSO BIT(5)
+#define OCTEP_VF_TX_OFFLOAD_UDP_TSO BIT(6)
+
+#define OCTEP_VF_TX_OFFLOAD_CKSUM (OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_VF_TX_OFFLOAD_UDP_CKSUM | \
+ OCTEP_VF_TX_OFFLOAD_TCP_CKSUM)
+
+#define OCTEP_VF_TX_OFFLOAD_TSO (OCTEP_VF_TX_OFFLOAD_TCP_TSO | \
+ OCTEP_VF_TX_OFFLOAD_UDP_TSO)
+
+#define OCTEP_VF_TX_IP_CSUM(flags) ((flags) & \
+ (OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_VF_TX_OFFLOAD_TCP_CKSUM | \
+ OCTEP_VF_TX_OFFLOAD_UDP_CKSUM))
+
+#define OCTEP_VF_TX_TSO(flags) ((flags) & \
+ (OCTEP_VF_TX_OFFLOAD_TCP_TSO | \
+ OCTEP_VF_TX_OFFLOAD_UDP_TSO))
+
+struct tx_mdata {
+ /* offload flags */
+ u16 ol_flags;
+
+ /* gso size */
+ u16 gso_size;
+
+ /* gso flags */
+ u16 gso_segs;
+
+ /* reserved */
+ u16 rsvd1;
+
+ /* reserved */
+ u64 rsvd2;
+};
+
+static_assert(sizeof(struct tx_mdata) == 16);
+
+/* 64-byte Tx instruction format.
+ * Format of instruction for a 64-byte mode input queue.
+ *
+ * only first 16-bytes (dptr and ih) are mandatory; rest are optional
+ * and filled by the driver based on firmware/hardware capabilities.
+ * These optional headers together called Front Data and its size is
+ * described by ih->fsz.
+ */
+struct octep_vf_tx_desc_hw {
+ /* Pointer where the input data is available. */
+ u64 dptr;
+
+ /* Instruction Header. */
+ union {
+ struct octep_vf_instr_hdr ih;
+ u64 ih64;
+ };
+
+ union {
+ u64 txm64[2];
+ struct tx_mdata txm;
+ };
+
+ /* Additional headers available in a 64-byte instruction. */
+ u64 exhdr[4];
+};
+
+static_assert(sizeof(struct octep_vf_tx_desc_hw) == 64);
+
+#define OCTEP_VF_IQ_DESC_SIZE (sizeof(struct octep_vf_tx_desc_hw))
+#endif /* _OCTEP_VF_TX_H_ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index edeb0f737312..61ab7f66f053 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -837,6 +837,8 @@ enum nix_af_status {
NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
NIX_AF_ERR_LINK_CREDITS = -431,
+ NIX_AF_ERR_INVALID_BPID = -434,
+ NIX_AF_ERR_INVALID_BPID_REQ = -435,
NIX_AF_ERR_INVALID_MCAST_GRP = -436,
NIX_AF_ERR_INVALID_MCAST_DEL_REQ = -437,
NIX_AF_ERR_NON_CONTIG_MCE_LIST = -438,
@@ -1114,6 +1116,7 @@ struct nix_rss_flowkey_cfg {
#define NIX_FLOW_KEY_TYPE_INNR_UDP BIT(15)
#define NIX_FLOW_KEY_TYPE_INNR_SCTP BIT(16)
#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
+#define NIX_FLOW_KEY_TYPE_CUSTOM0 BIT(19)
#define NIX_FLOW_KEY_TYPE_VLAN BIT(20)
#define NIX_FLOW_KEY_TYPE_IPV4_PROTO BIT(21)
#define NIX_FLOW_KEY_TYPE_AH BIT(22)
@@ -1553,6 +1556,7 @@ struct flow_msg {
u32 mpls_lse[4];
u8 icmp_type;
u8 icmp_code;
+ __be16 tcp_flags;
};
struct npc_install_flow_req {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index b0b4dea548e1..d883157393ea 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -85,8 +85,7 @@ enum npc_kpu_lc_ltype {
enum npc_kpu_ld_ltype {
NPC_LT_LD_TCP = 1,
NPC_LT_LD_UDP,
- NPC_LT_LD_ICMP,
- NPC_LT_LD_SCTP,
+ NPC_LT_LD_SCTP = 4,
NPC_LT_LD_ICMP6,
NPC_LT_LD_CUSTOM0,
NPC_LT_LD_CUSTOM1,
@@ -97,6 +96,7 @@ enum npc_kpu_ld_ltype {
NPC_LT_LD_NSH,
NPC_LT_LD_TU_MPLS_IN_NSH,
NPC_LT_LD_TU_MPLS_IN_IP,
+ NPC_LT_LD_ICMP,
};
enum npc_kpu_le_ltype {
@@ -140,14 +140,14 @@ enum npc_kpu_lg_ltype {
enum npc_kpu_lh_ltype {
NPC_LT_LH_TU_TCP = 1,
NPC_LT_LH_TU_UDP,
- NPC_LT_LH_TU_ICMP,
- NPC_LT_LH_TU_SCTP,
+ NPC_LT_LH_TU_SCTP = 4,
NPC_LT_LH_TU_ICMP6,
+ NPC_LT_LH_CUSTOM0,
+ NPC_LT_LH_CUSTOM1,
NPC_LT_LH_TU_IGMP = 8,
NPC_LT_LH_TU_ESP,
NPC_LT_LH_TU_AH,
- NPC_LT_LH_CUSTOM0 = 0xE,
- NPC_LT_LH_CUSTOM1 = 0xF,
+ NPC_LT_LH_TU_ICMP = 0xF,
};
/* NPC port kind defines how the incoming or outgoing packets
@@ -155,10 +155,11 @@ enum npc_kpu_lh_ltype {
* Software assigns pkind for each incoming port such as CGX
* Ethernet interfaces, LBK interfaces, etc.
*/
-#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CUSTOM_PRE_L2_PKIND
+#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CPT_HDR_PTP_PKIND
enum npc_pkind_type {
NPC_RX_LBK_PKIND = 0ULL,
+ NPC_RX_CPT_HDR_PTP_PKIND = 54ULL,
NPC_RX_CUSTOM_PRE_L2_PKIND = 55ULL,
NPC_RX_VLAN_EXDSA_PKIND = 56ULL,
NPC_RX_CHLEN24B_PKIND = 57ULL,
@@ -216,6 +217,7 @@ enum key_fields {
NPC_MPLS4_TTL,
NPC_TYPE_ICMP,
NPC_CODE_ICMP,
+ NPC_TCP_FLAGS,
NPC_HEADER_FIELDS_MAX,
NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */
NPC_PF_FUNC, /* Valid when Tx */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index a820bad3abb2..41de72c8607f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -35,6 +35,7 @@
#define NPC_ETYPE_NSH 0x894f
#define NPC_ETYPE_DSA 0xdada
#define NPC_ETYPE_PPPOE 0x8864
+#define NPC_ETYPE_ERSPA 0x88be
#define NPC_PPP_IP 0x0021
#define NPC_PPP_IP6 0x0057
@@ -59,6 +60,9 @@
#define NPC_IPNH_MPLS 137
#define NPC_IPNH_HOSTID 139
#define NPC_IPNH_SHIM6 140
+#define NPC_IPNH_CUSTOM 253
+
+#define NPC_IP6_ROUTE_TYPE 4
#define NPC_UDP_PORT_PTP_E 319
#define NPC_UDP_PORT_PTP_G 320
@@ -187,6 +191,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU2_EXDSA,
NPC_S_KPU2_CPT_CTAG,
NPC_S_KPU2_CPT_QINQ,
+ NPC_S_KPU2_MT,
NPC_S_KPU3_CTAG,
NPC_S_KPU3_STAG,
NPC_S_KPU3_QINQ,
@@ -231,6 +236,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU8_ICMP6,
NPC_S_KPU8_GRE,
NPC_S_KPU8_AH,
+ NPC_S_KPU8_CUSTOM,
NPC_S_KPU9_TU_MPLS_IN_GRE,
NPC_S_KPU9_TU_MPLS_IN_NSH,
NPC_S_KPU9_TU_MPLS_IN_IP,
@@ -242,6 +248,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU9_GTPC,
NPC_S_KPU9_GTPU,
NPC_S_KPU9_ESP,
+ NPC_S_KPU9_CUSTOM,
NPC_S_KPU10_TU_MPLS_IN_VXLANGPE,
NPC_S_KPU10_TU_MPLS_PL,
NPC_S_KPU10_TU_MPLS,
@@ -318,10 +325,10 @@ enum npc_kpu_lc_uflag {
NPC_F_LC_U_UNK_PROTO = 0x10,
NPC_F_LC_U_IP_FRAG = 0x20,
NPC_F_LC_U_IP6_FRAG = 0x40,
+ NPC_F_LC_L_6TO4 = 0x80,
};
enum npc_kpu_lc_lflag {
NPC_F_LC_L_IP_IN_IP = 1,
- NPC_F_LC_L_6TO4,
NPC_F_LC_L_MPLS_IN_IP,
NPC_F_LC_L_IP6_TUN_IP6,
NPC_F_LC_L_IP6_MPLS_IN_IP,
@@ -334,6 +341,8 @@ enum npc_kpu_lc_lflag {
NPC_F_LC_L_EXT_MOBILITY,
NPC_F_LC_L_EXT_HOSTID,
NPC_F_LC_L_EXT_SHIM6,
+ NPC_F_LC_L_IP6_SRH_SEG_1,
+ NPC_F_LC_L_IP6_SRH_SEG_2,
};
enum npc_kpu_ld_lflag {
@@ -970,10 +979,10 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
+ NPC_S_KPU1_CPT_HDR, 48, 0,
NPC_LID_LA, NPC_LT_NA,
0,
- 0, 0, 0, 0,
+ 0, 7, 0, 0,
},
{
@@ -2786,6 +2795,24 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU2_MT, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_MT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -4501,6 +4528,24 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
0xff00,
NPC_IP_VER_6,
NPC_IP_VER_MASK,
+ (NPC_IP6_ROUTE_TYPE << 8) | 1,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ (NPC_IP6_ROUTE_TYPE << 8) | 2,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
0x0000,
0x0000,
},
@@ -4776,6 +4821,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
},
{
NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_CUSTOM,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
0x0000,
0x0000,
NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
@@ -4884,6 +4938,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
},
{
NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_CUSTOM,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
0x0000,
0x0000,
NPC_IP_VER_4,
@@ -5064,6 +5127,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
},
{
NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
0x0000,
0x0000,
NPC_IP_VER_6,
@@ -5208,6 +5280,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5325,6 +5406,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5433,6 +5523,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5532,6 +5631,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5649,6 +5757,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5757,6 +5874,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5883,6 +6009,15 @@ static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
},
{
NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5982,6 +6117,15 @@ static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
},
{
NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
0x0000,
0x0000,
0x0000,
@@ -6081,6 +6225,15 @@ static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
},
{
NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
0x0000,
0x0000,
0x0000,
@@ -6310,6 +6463,15 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
0xffff,
0x0000,
0x0000,
+ 0x0009,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_ESP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
0x0000,
0x0000,
},
@@ -6756,6 +6918,78 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
},
{
NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
0x0000,
0xffff,
NPC_GRE_F_ROUTE,
@@ -6836,6 +7070,15 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU8_CUSTOM, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -7304,6 +7547,24 @@ static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU9_CUSTOM, 0xff,
+ 0x4000,
+ 0xf000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_CUSTOM, 0xff,
+ 0x6000,
+ 0xf000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -8384,7 +8645,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
+ 6, 0, 42, 3, 0,
NPC_S_KPU5_IP6, 14, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
0,
@@ -8536,7 +8797,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
+ 6, 0, 42, 3, 0,
NPC_S_KPU5_IP6, 22, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
NPC_F_LA_U_HAS_IH_NIX,
@@ -8693,7 +8954,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
+ 6, 0, 42, 3, 0,
NPC_S_KPU5_IP6, 30, 1,
NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
NPC_F_LA_U_HAS_HIGIG2,
@@ -8818,7 +9079,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
+ 6, 0, 42, 3, 0,
NPC_S_KPU5_IP6, 38, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
@@ -8947,7 +9208,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
+ 6, 0, 42, 3, 0,
NPC_S_KPU5_IP6, 14, 0,
NPC_LID_LA, NPC_LT_NA,
0,
@@ -9124,7 +9385,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 6, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
0,
@@ -9204,7 +9465,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 14, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
0,
@@ -9213,7 +9474,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
+ NPC_S_NA, 6, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
NPC_F_LB_U_UNK_ETYPE,
0, 0, 0, 0,
@@ -9228,7 +9489,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
@@ -9324,7 +9585,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
@@ -9428,7 +9689,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
@@ -9532,7 +9793,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
0,
@@ -9628,7 +9889,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 28, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
@@ -9684,7 +9945,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -9757,7 +10018,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
+ NPC_S_NA, 8, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
NPC_F_LB_U_UNK_ETYPE,
0, 0, 0, 0,
@@ -9772,7 +10033,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 18, 1,
NPC_LID_LB, NPC_LT_LB_EDSA,
NPC_F_LB_L_EDSA,
@@ -9836,7 +10097,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_EXDSA,
NPC_F_LB_L_EXDSA,
@@ -9923,6 +10184,22 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 0, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU3_CTAG_C, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K3,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -9949,7 +10226,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
@@ -10029,7 +10306,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 8, 0,
NPC_LID_LB, NPC_LT_NA,
0,
@@ -10101,7 +10378,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 4, 0,
NPC_LID_LB, NPC_LT_NA,
0,
@@ -10165,7 +10442,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 8, 0,
NPC_LID_LB, NPC_LT_NA,
0,
@@ -10237,7 +10514,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 4, 0,
NPC_LID_LB, NPC_LT_NA,
0,
@@ -10310,80 +10587,80 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
- NPC_S_KPU5_IP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU5_IP, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
- NPC_S_KPU5_IP6, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ 6, 0, 42, 1, 0,
+ NPC_S_KPU5_IP6, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_ARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU5_ARP, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_RARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU5_RARP, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_PTP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU5_PTP, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_FCOE, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU5_FCOE, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 0, 0,
- NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU4_MPLS, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 0, 0,
- NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU4_MPLS, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 0, 0, 0, 0,
- NPC_S_KPU4_NSH, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU4_NSH, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
@@ -10397,7 +10674,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 8, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -10469,7 +10746,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 4, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -10533,7 +10810,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 8, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -10605,7 +10882,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 4, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -10685,7 +10962,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_DSA,
NPC_F_LB_L_DSA,
@@ -10733,7 +11010,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 14, 1,
NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
NPC_F_LB_L_DSA_VLAN,
@@ -10894,7 +11171,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 0, 0,
+ 6, 0, 42, 0, 0,
NPC_S_KPU5_IP6, 6, 1,
NPC_LID_LB, NPC_LT_LB_FDSA,
NPC_F_LB_L_FDSA,
@@ -10942,7 +11219,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 0, 0,
+ 6, 0, 42, 0, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_FDSA,
NPC_F_LB_L_FDSA,
@@ -10990,7 +11267,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 0, 0,
+ 6, 0, 42, 0, 0,
NPC_S_KPU5_IP6, 14, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
0,
@@ -11014,7 +11291,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 0, 0,
+ 6, 0, 42, 0, 0,
NPC_S_KPU5_IP6, 2, 0,
NPC_LID_LC, NPC_LT_NA,
0,
@@ -11063,15 +11340,15 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 0, 0,
- NPC_S_KPU5_IP, 10, 0,
+ NPC_S_KPU5_IP, 10, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 0, 0,
- NPC_S_KPU5_IP6, 10, 0,
+ 6, 0, 42, 0, 0,
+ NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
0,
0, 0, 0, 0,
@@ -11119,7 +11396,7 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 0, 0, 2, 0,
+ 2, 0, 4, 2, 0,
NPC_S_KPU8_UDP, 20, 1,
NPC_LID_LC, NPC_LT_LC_IP,
0,
@@ -11223,7 +11500,7 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 8, 10, 2, 0,
+ 2, 8, 4, 2, 0,
NPC_S_KPU8_UDP, 0, 1,
NPC_LID_LC, NPC_LT_LC_IP_OPT,
0,
@@ -11450,6 +11727,22 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
0, 0, 0, 0, 0,
NPC_S_KPU6_IP6_ROUT, 40, 1,
NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_IP6_SRH_SEG_1,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_ROUT, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_IP6_SRH_SEG_2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_ROUT, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
NPC_F_LC_L_EXT_ROUT,
0, 0, 0, 0,
},
@@ -11695,6 +11988,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_CUSTOM, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LC, NPC_LT_LC_IP,
@@ -11791,6 +12092,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_CUSTOM, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LC, NPC_LT_LC_IP_OPT,
@@ -11951,6 +12260,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_CUSTOM, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LC, NPC_LT_LC_IP6,
@@ -12080,6 +12397,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12184,6 +12509,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12280,6 +12613,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12368,6 +12709,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12472,6 +12821,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12568,6 +12925,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12681,6 +13046,14 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12769,6 +13142,14 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12857,6 +13238,14 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -13058,6 +13447,14 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
NPC_S_KPU9_ESP, 8, 1,
NPC_LID_LD, NPC_LT_LD_UDP,
@@ -13458,6 +13855,70 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 24, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LD, NPC_LT_LD_GRE,
@@ -13529,6 +13990,14 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_LD, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU9_CUSTOM, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_CUSTOM0,
+ 0,
+ 0, 0xff, 0, 0,
+ },
+ {
NPC_ERRLEV_LD, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -13946,6 +14415,22 @@ static struct npc_kpu_profile_action kpu9_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LE, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LE, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LE, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -15105,7 +15590,9 @@ static struct npc_lt_def_cfg npc_lt_defaults = {
},
.rx_et = {
{
- .lid = NPC_LID_LB,
+ .offset = -2,
+ .valid = 1,
+ .lid = NPC_LID_LC,
.ltype_match = NPC_LT_NA,
.ltype_mask = 0x0,
},
@@ -15139,6 +15626,12 @@ static struct npc_mcam_kex npc_mkex_default = {
/* Ethertype: 2 bytes, KW0[55:40] */
KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x5),
},
+ [NPC_LT_LA_CPT_HDR] = {
+ /* DMAC: 6 bytes, KW1[55:8] */
+ KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_KEXOF_DMAC),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x5),
+ },
/* Layer A: HiGig2: */
[NPC_LT_LA_HIGIG2_ETHER] = {
/* Classification: 2 bytes, KW1[23:8] */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 5c1d04a3c559..07d4859de53a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -817,6 +817,8 @@ static int rvu_fwdata_init(struct rvu *rvu)
err = cgx_get_fwdata_base(&fwdbase);
if (err)
goto fail;
+
+ BUILD_BUG_ON(offsetof(struct rvu_fwdata, cgx_fw_data) > FWDATA_CGX_LMAC_OFFSET);
rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
if (!rvu->fwdata)
goto fail;
@@ -1484,7 +1486,7 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
/* All CGX mapped PFs are set with assigned NIX block during init */
if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
blkaddr = pf->nix_blkaddr;
- } else if (is_afvf(pcifunc)) {
+ } else if (is_lbk_vf(rvu, pcifunc)) {
vf = pcifunc - 1;
/* Assign NIX based on VF number. All even numbered VFs get
* NIX0 and odd numbered gets NIX1
@@ -2034,7 +2036,7 @@ int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
u16 target;
/* Only PF can add VF permissions */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc))
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_lbk_vf(rvu, pcifunc))
return -EOPNOTSUPP;
target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
@@ -2618,6 +2620,9 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
* 3. Cleanup pools (NPA)
*/
+ /* Free allocated BPIDs */
+ rvu_nix_flr_free_bpids(rvu, pcifunc);
+
/* Free multicast/mirror node associated with the 'pcifunc' */
rvu_nix_mcast_flr_free_entries(rvu, pcifunc);
@@ -3151,6 +3156,7 @@ static int rvu_enable_sriov(struct rvu *rvu)
{
struct pci_dev *pdev = rvu->pdev;
int err, chans, vfs;
+ int pos = 0;
if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
dev_warn(&pdev->dev,
@@ -3158,6 +3164,12 @@ static int rvu_enable_sriov(struct rvu *rvu)
return 0;
}
+ /* Get RVU VFs device id */
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos)
+ return 0;
+ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &rvu->vf_devid);
+
chans = rvu_get_num_lbk_chans();
if (chans < 0)
return chans;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 43be37dd1f32..f390525a6217 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -288,6 +288,16 @@ enum rvu_pfvf_flags {
#define RVU_CLEAR_VF_PERM ~GENMASK(PF_SET_VF_TRUSTED, PF_SET_VF_MAC)
+struct nix_bp {
+ struct rsrc_bmap bpids; /* free bpids bitmap */
+ u16 cgx_bpid_cnt;
+ u16 sdp_bpid_cnt;
+ u16 free_pool_base;
+ u16 *fn_map; /* pcifunc mapping */
+ u8 *intf_map; /* interface type map */
+ u8 *ref_cnt;
+};
+
struct nix_txsch {
struct rsrc_bmap schq;
u8 lvl;
@@ -363,6 +373,7 @@ struct nix_hw {
struct nix_lso lso;
struct nix_txvlan txvlan;
struct nix_ipolicer *ipolicer;
+ struct nix_bp bp;
u64 *tx_credits;
u8 cc_mcs_cnt;
};
@@ -432,6 +443,13 @@ struct mbox_wq_info {
struct workqueue_struct *mbox_wq;
};
+struct channel_fwdata {
+ struct sdp_node_info info;
+ u8 valid;
+#define RVU_CHANL_INFO_RESERVED 379
+ u8 reserved[RVU_CHANL_INFO_RESERVED];
+};
+
struct rvu_fwdata {
#define RVU_FWDATA_HEADER_MAGIC 0xCFDA /* Custom Firmware Data*/
#define RVU_FWDATA_VERSION 0x0001
@@ -450,11 +468,13 @@ struct rvu_fwdata {
u64 msixtr_base;
u32 ptp_ext_clk_rate;
u32 ptp_ext_tstamp;
-#define FWDATA_RESERVED_MEM 1022
+ struct channel_fwdata channel_data;
+#define FWDATA_RESERVED_MEM 958
u64 reserved[FWDATA_RESERVED_MEM];
#define CGX_MAX 9
#define CGX_LMACS_MAX 4
#define CGX_LMACS_USX 8
+#define FWDATA_CGX_LMAC_OFFSET 10536
union {
struct cgx_lmac_fwdata_s
cgx_fw_data[CGX_MAX][CGX_LMACS_MAX];
@@ -503,6 +523,7 @@ struct rvu {
struct mutex rsrc_lock; /* Serialize resource alloc/free */
struct mutex alias_lock; /* Serialize bar2 alias access */
int vfs; /* Number of VFs attached to RVU */
+ u16 vf_devid; /* VF devices id */
int nix_blkaddr[MAX_NIX_BLKS];
/* Mbox */
@@ -732,9 +753,11 @@ static inline bool is_rvu_supports_nix1(struct rvu *rvu)
/* Function Prototypes
* RVU
*/
-static inline bool is_afvf(u16 pcifunc)
+#define RVU_LBK_VF_DEVID 0xA0F8
+static inline bool is_lbk_vf(struct rvu *rvu, u16 pcifunc)
{
- return !(pcifunc & ~RVU_PFVF_FUNC_MASK);
+ return (!(pcifunc & ~RVU_PFVF_FUNC_MASK) &&
+ (rvu->vf_devid == RVU_LBK_VF_DEVID));
}
static inline bool is_vf(u16 pcifunc)
@@ -794,7 +817,7 @@ void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);
int rvu_sdp_init(struct rvu *rvu);
bool is_sdp_pfvf(u16 pcifunc);
bool is_sdp_pf(u16 pcifunc);
-bool is_sdp_vf(u16 pcifunc);
+bool is_sdp_vf(struct rvu *rvu, u16 pcifunc);
/* CGX APIs */
static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
@@ -873,6 +896,7 @@ int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc,
u32 mcast_grp_idx);
int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
u32 mcast_grp_idx, u16 mcam_index);
+void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc);
/* NPC APIs */
void rvu_npc_freemem(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index e6d7914ce61c..2500f5ba4f5a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -2870,6 +2870,10 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
seq_printf(s, "%d ", ntohs(rule->packet.dport));
seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
break;
+ case NPC_TCP_FLAGS:
+ seq_printf(s, "%d ", rule->packet.tcp_flags);
+ seq_printf(s, "mask 0x%x\n", rule->mask.tcp_flags);
+ break;
case NPC_IPSEC_SPI:
seq_printf(s, "0x%x ", ntohl(rule->packet.spi));
seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi));
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 1e6fbd98423d..96c04f7d93f8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -1235,8 +1235,8 @@ static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
enum rvu_af_dl_param_id {
RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
- RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
+ RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
};
@@ -1434,15 +1434,6 @@ static const struct devlink_param rvu_af_dl_params[] = {
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
rvu_af_dl_dwrr_mtu_validate),
-};
-
-static const struct devlink_param rvu_af_dl_param_exact_match[] = {
- DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
- "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- rvu_af_npc_exact_feature_get,
- rvu_af_npc_exact_feature_disable,
- rvu_af_npc_exact_feature_validate),
DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
"npc_mcam_high_zone_percent", DEVLINK_PARAM_TYPE_U8,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
@@ -1457,6 +1448,15 @@ static const struct devlink_param rvu_af_dl_param_exact_match[] = {
rvu_af_dl_nix_maxlf_validate),
};
+static const struct devlink_param rvu_af_dl_param_exact_match[] = {
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
+ "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_npc_exact_feature_get,
+ rvu_af_npc_exact_feature_disable,
+ rvu_af_npc_exact_feature_validate),
+};
+
/* Devlink switch mode */
static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 66203a90f052..d39001cdc707 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -499,29 +499,115 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
rvu_cgx_disable_dmac_entries(rvu, pcifunc);
}
+#define NIX_BPIDS_PER_LMAC 8
+#define NIX_BPIDS_PER_CPT 1
+static int nix_setup_bpids(struct rvu *rvu, struct nix_hw *hw, int blkaddr)
+{
+ struct nix_bp *bp = &hw->bp;
+ int err, max_bpids;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+ max_bpids = FIELD_GET(NIX_CONST_MAX_BPIDS, cfg);
+
+ /* Reserve the BPIds for CGX and SDP */
+ bp->cgx_bpid_cnt = rvu->hw->cgx_links * NIX_BPIDS_PER_LMAC;
+ bp->sdp_bpid_cnt = rvu->hw->sdp_links * FIELD_GET(NIX_CONST_SDP_CHANS, cfg);
+ bp->free_pool_base = bp->cgx_bpid_cnt + bp->sdp_bpid_cnt +
+ NIX_BPIDS_PER_CPT;
+ bp->bpids.max = max_bpids - bp->free_pool_base;
+
+ err = rvu_alloc_bitmap(&bp->bpids);
+ if (err)
+ return err;
+
+ bp->fn_map = devm_kcalloc(rvu->dev, bp->bpids.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!bp->fn_map)
+ return -ENOMEM;
+
+ bp->intf_map = devm_kcalloc(rvu->dev, bp->bpids.max,
+ sizeof(u8), GFP_KERNEL);
+ if (!bp->intf_map)
+ return -ENOMEM;
+
+ bp->ref_cnt = devm_kcalloc(rvu->dev, bp->bpids.max,
+ sizeof(u8), GFP_KERNEL);
+ if (!bp->ref_cnt)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc)
+{
+ int blkaddr, bpid, err;
+ struct nix_hw *nix_hw;
+ struct nix_bp *bp;
+
+ if (!is_lbk_vf(rvu, pcifunc))
+ return;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return;
+
+ bp = &nix_hw->bp;
+
+ mutex_lock(&rvu->rsrc_lock);
+ for (bpid = 0; bpid < bp->bpids.max; bpid++) {
+ if (bp->fn_map[bpid] == pcifunc) {
+ bp->ref_cnt[bpid]--;
+ if (bp->ref_cnt[bpid])
+ continue;
+ rvu_free_rsrc(&bp->bpids, bpid);
+ bp->fn_map[bpid] = 0;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+}
+
int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
struct nix_bp_cfg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, pf, type, err;
+ u16 chan_base, chan, bpid;
struct rvu_pfvf *pfvf;
- int blkaddr, pf, type;
- u16 chan_base, chan;
+ struct nix_hw *nix_hw;
+ struct nix_bp *bp;
u64 cfg;
pf = rvu_get_pf(pcifunc);
- type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
return 0;
pfvf = rvu_get_pfvf(rvu, pcifunc);
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+ bp = &nix_hw->bp;
chan_base = pfvf->rx_chan_base + req->chan_base;
for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
cfg & ~BIT_ULL(16));
+
+ if (type == NIX_INTF_TYPE_LBK) {
+ bpid = cfg & GENMASK(8, 0);
+ mutex_lock(&rvu->rsrc_lock);
+ rvu_free_rsrc(&bp->bpids, bpid - bp->free_pool_base);
+ for (bpid = 0; bpid < bp->bpids.max; bpid++) {
+ if (bp->fn_map[bpid] == pcifunc) {
+ bp->fn_map[bpid] = 0;
+ bp->ref_cnt[bpid] = 0;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ }
}
return 0;
}
@@ -529,25 +615,20 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id)
{
- int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt;
- u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt;
+ int bpid, blkaddr, sdp_chan_base, err;
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_pfvf *pfvf;
+ struct nix_hw *nix_hw;
u8 cgx_id, lmac_id;
- u64 cfg;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
- lmac_chan_cnt = cfg & 0xFF;
+ struct nix_bp *bp;
- cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
- lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
- sdp_chan_cnt = cfg & 0xFFF;
- sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
+ err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
- pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ bp = &nix_hw->bp;
/* Backpressure IDs range division
* CGX channles are mapped to (0 - 191) BPIDs
@@ -561,38 +642,48 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
*/
switch (type) {
case NIX_INTF_TYPE_CGX:
- if ((req->chan_base + req->chan_cnt) > 16)
- return -EINVAL;
+ if ((req->chan_base + req->chan_cnt) > NIX_BPIDS_PER_LMAC)
+ return NIX_AF_ERR_INVALID_BPID_REQ;
rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
/* Assign bpid based on cgx, lmac and chan id */
- bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
- (lmac_id * lmac_chan_cnt) + req->chan_base;
+ bpid = (cgx_id * hw->lmac_per_cgx * NIX_BPIDS_PER_LMAC) +
+ (lmac_id * NIX_BPIDS_PER_LMAC) + req->chan_base;
if (req->bpid_per_chan)
bpid += chan_id;
- if (bpid > cgx_bpid_cnt)
- return -EINVAL;
+ if (bpid > bp->cgx_bpid_cnt)
+ return NIX_AF_ERR_INVALID_BPID;
break;
case NIX_INTF_TYPE_LBK:
- if ((req->chan_base + req->chan_cnt) > 63)
- return -EINVAL;
- bpid = cgx_bpid_cnt + req->chan_base;
- if (req->bpid_per_chan)
- bpid += chan_id;
- if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
- return -EINVAL;
+ /* Alloc bpid from the free pool */
+ mutex_lock(&rvu->rsrc_lock);
+ bpid = rvu_alloc_rsrc(&bp->bpids);
+ if (bpid < 0) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return NIX_AF_ERR_INVALID_BPID;
+ }
+ bp->fn_map[bpid] = req->hdr.pcifunc;
+ bp->ref_cnt[bpid]++;
+ bpid += bp->free_pool_base;
+ mutex_unlock(&rvu->rsrc_lock);
break;
case NIX_INTF_TYPE_SDP:
- if ((req->chan_base + req->chan_cnt) > 255)
- return -EINVAL;
+ if ((req->chan_base + req->chan_cnt) > bp->sdp_bpid_cnt)
+ return NIX_AF_ERR_INVALID_BPID_REQ;
- bpid = sdp_bpid_cnt + req->chan_base;
+ /* Handle usecase of 2 SDP blocks */
+ if (!hw->cap.programmable_chans)
+ sdp_chan_base = pfvf->rx_chan_base - NIX_CHAN_SDP_CH_START;
+ else
+ sdp_chan_base = pfvf->rx_chan_base - hw->sdp_chan_base;
+
+ bpid = bp->cgx_bpid_cnt + req->chan_base + sdp_chan_base;
if (req->bpid_per_chan)
bpid += chan_id;
- if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt))
- return -EINVAL;
+ if (bpid > (bp->cgx_bpid_cnt + bp->sdp_bpid_cnt))
+ return NIX_AF_ERR_INVALID_BPID;
break;
default:
return -EINVAL;
@@ -612,7 +703,7 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
u64 cfg;
pf = rvu_get_pf(pcifunc);
- type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (is_sdp_pfvf(pcifunc))
type = NIX_INTF_TYPE_SDP;
@@ -1523,7 +1614,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
cfg = NPC_TX_DEF_PKIND;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
- intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (is_sdp_pfvf(pcifunc))
intf = NIX_INTF_TYPE_SDP;
@@ -1899,7 +1990,7 @@ static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
int pf = rvu_get_pf(pcifunc);
u8 cgx_id = 0, lmac_id = 0;
- if (is_afvf(pcifunc)) {/* LBK links */
+ if (is_lbk_vf(rvu, pcifunc)) {/* LBK links */
return hw->cgx_links;
} else if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -1916,7 +2007,7 @@ static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
struct rvu_hwinfo *hw = rvu->hw;
int pf = rvu_get_pf(pcifunc);
- if (is_afvf(pcifunc)) { /* LBK links */
+ if (is_lbk_vf(rvu, pcifunc)) { /* LBK links */
*start = hw->cap.nix_txsch_per_cgx_lmac * link;
*end = *start + hw->cap.nix_txsch_per_lbk_lmac;
} else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
@@ -3356,7 +3447,7 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
int pf;
/* skip multicast pkt replication for AF's VFs & SDP links */
- if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(pcifunc))
return 0;
if (!hw->cap.nix_rx_multicast)
@@ -3703,7 +3794,7 @@ int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
- if (is_afvf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc))
rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
else
rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
@@ -4039,6 +4130,13 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->ltype_match = NPC_LT_LE_GTPU;
field->ltype_mask = 0xF;
break;
+ case NIX_FLOW_KEY_TYPE_CUSTOM0:
+ field->lid = NPC_LID_LC;
+ field->hdr_offset = 6;
+ field->bytesm1 = 1; /* 2 Bytes*/
+ field->ltype_match = NPC_LT_LC_CUSTOM0;
+ field->ltype_mask = 0xF;
+ break;
case NIX_FLOW_KEY_TYPE_VLAN:
field->lid = NPC_LID_LB;
field->hdr_offset = 2; /* Skip TPID (2-bytes) */
@@ -4420,7 +4518,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
- if (is_afvf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc))
rvu_get_lbk_link_max_frs(rvu, &max_mtu);
else
rvu_get_lmac_link_max_frs(rvu, &max_mtu);
@@ -4784,6 +4882,10 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
if (err)
return err;
+ err = nix_setup_bpids(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
/* Configure segmentation offload formats */
nix_setup_lso(rvu, nix_hw, blkaddr);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 516adb50f9f6..e350242bbafb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -395,7 +395,7 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
owner = mcam->entry2pfvf_map[index];
target_func = (entry->action >> 4) & 0xffff;
/* do nothing when target is LBK/PF or owner is not PF */
- if (is_pffunc_af(owner) || is_afvf(target_func) ||
+ if (is_pffunc_af(owner) || is_lbk_vf(rvu, target_func) ||
(owner & RVU_PFVF_FUNC_MASK) ||
!(target_func & RVU_PFVF_FUNC_MASK))
return;
@@ -608,7 +608,7 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int blkaddr, index;
/* AF's and SDP VFs work in promiscuous mode */
- if (is_afvf(pcifunc) || is_sdp_vf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc) || is_sdp_vf(rvu, pcifunc))
return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -773,7 +773,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
return;
/* Skip LBK VFs */
- if (is_afvf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc))
return;
/* If pkt replication is not supported,
@@ -853,7 +853,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
u16 vf_func;
/* Only CGX PF/VF can add allmulticast entry */
- if (is_afvf(pcifunc) && is_sdp_vf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc) && is_sdp_vf(rvu, pcifunc))
return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index c75669c8fde7..c181e7aa9eb6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -53,6 +53,7 @@ static const char * const npc_flow_names[] = {
[NPC_MPLS4_TTL] = "lse depth 4",
[NPC_TYPE_ICMP] = "icmp type",
[NPC_CODE_ICMP] = "icmp code",
+ [NPC_TCP_FLAGS] = "tcp flags",
[NPC_UNKNOWN] = "unknown",
};
@@ -530,6 +531,7 @@ do { \
NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2);
NPC_SCAN_HDR(NPC_TYPE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 0, 1);
NPC_SCAN_HDR(NPC_CODE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 1, 1);
+ NPC_SCAN_HDR(NPC_TCP_FLAGS, NPC_LID_LD, NPC_LT_LD_TCP, 12, 2);
NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2);
NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2);
NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2);
@@ -574,7 +576,8 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) |
BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) |
BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) |
- BIT_ULL(NPC_TYPE_ICMP) | BIT_ULL(NPC_CODE_ICMP);
+ BIT_ULL(NPC_TYPE_ICMP) | BIT_ULL(NPC_CODE_ICMP) |
+ BIT_ULL(NPC_TCP_FLAGS);
/* for tcp/udp/sctp corresponding layer type should be in the key */
if (*features & proto_flags) {
@@ -982,7 +985,8 @@ do { \
mask->icmp_type, 0);
NPC_WRITE_FLOW(NPC_CODE_ICMP, icmp_code, pkt->icmp_code, 0,
mask->icmp_code, 0);
-
+ NPC_WRITE_FLOW(NPC_TCP_FLAGS, tcp_flags, ntohs(pkt->tcp_flags), 0,
+ ntohs(mask->tcp_flags), 0);
NPC_WRITE_FLOW(NPC_IPSEC_SPI, spi, ntohl(pkt->spi), 0,
ntohl(mask->spi), 0);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 6f73ad9807f0..086f05c0376f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -439,6 +439,9 @@
#define NIX_AF_LINKX_RANGE_MASK GENMASK_ULL(19, 16)
#define NIX_AF_LINKX_MCS_CNT_MASK GENMASK_ULL(33, 32)
+#define NIX_CONST_MAX_BPIDS GENMASK_ULL(23, 12)
+#define NIX_CONST_SDP_CHANS GENMASK_ULL(11, 0)
+
/* SSO */
#define SSO_AF_CONST (0x1000)
#define SSO_AF_CONST1 (0x1008)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
index ae50d56258ec..38cfe148f4b7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
@@ -40,8 +40,12 @@ bool is_sdp_pf(u16 pcifunc)
!(pcifunc & RVU_PFVF_FUNC_MASK));
}
-bool is_sdp_vf(u16 pcifunc)
+#define RVU_SDP_VF_DEVID 0xA0F7
+bool is_sdp_vf(struct rvu *rvu, u16 pcifunc)
{
+ if (!(pcifunc & ~RVU_PFVF_FUNC_MASK))
+ return (rvu->vf_devid == RVU_SDP_VF_DEVID);
+
return (is_sdp_pfvf(pcifunc) &&
!!(pcifunc & RVU_PFVF_FUNC_MASK));
}
@@ -52,6 +56,14 @@ int rvu_sdp_init(struct rvu *rvu)
struct rvu_pfvf *pfvf;
u32 i = 0;
+ if (rvu->fwdata->channel_data.valid) {
+ sdp_pf_num[0] = 0;
+ pfvf = &rvu->pf[sdp_pf_num[0]];
+ pfvf->sdp_info = &rvu->fwdata->channel_data.info;
+
+ return 0;
+ }
+
while ((i < MAX_SDP) && (pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
PCI_DEVID_OTX2_SDP_PF,
pdev)) != NULL) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 4fd44b6eecea..87bdb93cb066 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -638,6 +638,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
BIT(FLOW_DISSECTOR_KEY_IPSEC) |
BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_TCP) |
BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) {
netdev_info(nic->netdev, "unsupported flow used key 0x%llx",
dissector->used_keys);
@@ -857,6 +858,16 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
}
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
+ struct flow_match_tcp match;
+
+ flow_rule_match_tcp(rule, &match);
+
+ flow_spec->tcp_flags = match.key->flags;
+ flow_mask->tcp_flags = match.mask->flags;
+ req->features |= BIT_ULL(NPC_TCP_FLAGS);
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
struct flow_match_mpls match;
u8 bit;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
index d58b07e7e123..7063c78bd35f 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
@@ -286,7 +286,6 @@ mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
static void
mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{
- struct page *page;
int i;
for (i = 0; i < q->n_desc; i++) {
@@ -301,19 +300,12 @@ mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
entry->buf = NULL;
}
- if (!q->cache.va)
- return;
-
- page = virt_to_page(q->cache.va);
- __page_frag_cache_drain(page, q->cache.pagecnt_bias);
- memset(&q->cache, 0, sizeof(q->cache));
+ page_frag_cache_drain(&q->cache);
}
static void
mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{
- struct page *page;
-
for (;;) {
void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
@@ -323,12 +315,7 @@ mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
skb_free_frag(buf);
}
- if (!q->cache.va)
- return;
-
- page = virt_to_page(q->cache.va);
- __page_frag_cache_drain(page, q->cache.pagecnt_bias);
- memset(&q->cache, 0, sizeof(q->cache));
+ page_frag_cache_drain(&q->cache);
}
static void
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index f5b1f8c7834f..7f20813456e2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2199,8 +2199,9 @@ reset_slave:
if (cmd != MLX4_COMM_CMD_RESET) {
mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
slave, cmd);
- /* Turn on internal error letting slave reset itself immeditaly,
- * otherwise it might take till timeout on command is passed
+ /* Turn on internal error letting slave reset itself
+ * immediately, otherwise it might take till timeout on
+ * command is passed
*/
reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR);
}
@@ -2954,7 +2955,7 @@ static bool mlx4_valid_vf_state_change(struct mlx4_dev *dev, int port,
dummy_admin.default_vlan = vlan;
/* VF wants to move to other VST state which is valid with current
- * rate limit. Either differnt default vlan in VST or other
+ * rate limit. Either different default vlan in VST or other
* supported QoS priority. Otherwise we don't allow this change when
* the TX rate is still configured.
*/
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 4d4f9cf9facb..e130e7259275 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -115,7 +115,7 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
return;
}
- /* Acessing the CQ outside of rcu_read_lock is safe, because
+ /* Accessing the CQ outside of rcu_read_lock is safe, because
* the CQ is freed only after interrupt handling is completed.
*/
++cq->arm_sn;
@@ -137,7 +137,7 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
return;
}
- /* Acessing the CQ outside of rcu_read_lock is safe, because
+ /* Accessing the CQ outside of rcu_read_lock is safe, because
* the CQ is freed only after interrupt handling is completed.
*/
cq->event(cq, event_type);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 9e3b76182088..cd754cd76bde 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -96,8 +96,8 @@ void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev)
#define MLX4_EN_WRAP_AROUND_SEC 10UL
/* By scheduling the overflow check every 5 seconds, we have a reasonably
- * good chance we wont miss a wrap around.
- * TOTO: Use a timer instead of a work queue to increase the guarantee.
+ * good chance we won't miss a wrap around.
+ * TODO: Use a timer instead of a work queue to increase the guarantee.
*/
#define MLX4_EN_OVERFLOW_PERIOD (MLX4_EN_WRAP_AROUND_SEC * HZ / 2)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 33bbcced8105..5d3fde63b273 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -42,6 +42,7 @@
#include <net/ip.h>
#include <net/vxlan.h>
#include <net/devlink.h>
+#include <net/rps.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
@@ -1072,7 +1073,8 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
1, MLX4_MCAST_CONFIG);
/* Update multicast list - we cache all addresses so they won't
- * change while HW is updated holding the command semaphor */
+ * change while HW is updated holding the command semaphore
+ */
netif_addr_lock_bh(dev);
mlx4_en_cache_mclist(dev);
netif_addr_unlock_bh(dev);
@@ -1817,7 +1819,7 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_en_set_rss_steer_rules(priv))
mlx4_warn(mdev, "Failed setting steering rules\n");
- /* Attach rx QP to bradcast address */
+ /* Attach rx QP to broadcast address */
eth_broadcast_addr(&mc_list[10]);
mc_list[5] = priv->port; /* needed for B0 steering support */
if (mlx4_multicast_attach(mdev->dev, priv->rss_map.indir_qp, mc_list,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index a09b6e05337d..eac49657bd07 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -762,7 +762,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Drop packet on bad receive or bad checksum */
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR)) {
- en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
+ en_err(priv, "CQE completed in error - vendor syndrome:%d syndrome:%d\n",
((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
((struct mlx4_err_cqe *)cqe)->syndrome);
goto next;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 65cb63f6c465..1ddb11cb25f9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -992,7 +992,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
tx_info->ts_requested = 1;
}
- /* Prepare ctrl segement apart opcode+ownership, which depends on
+ /* Prepare ctrl segment apart opcode+ownership, which depends on
* whether LSO is used */
tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 6598b10a9ff4..9572a45f6143 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -210,7 +210,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
s_eqe->slave_id = slave;
- /* ensure all information is written before setting the ownersip bit */
+ /* ensure all information is written before setting the ownership bit */
dma_wmb();
s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
++slave_eq->prod;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h
index 954b86faac29..40ca29bb928c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h
@@ -44,7 +44,7 @@
/* Default supported priorities for VPP allocation */
#define MLX4_DEFAULT_QOS_PRIO (0)
-/* Derived from FW feature definition, 0 is the default vport fo all QPs */
+/* Derived from FW feature definition, 0 is the default vport for all QPs */
#define MLX4_VPP_DEFAULT_VPORT (0)
struct mlx4_vport_qos_param {
@@ -98,7 +98,7 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port,
u16 *available_vpp, u8 *vpp_p_up);
/**
- * mlx4_ALLOCATE_VPP_set - Distribution of VPPs among differnt priorities.
+ * mlx4_ALLOCATE_VPP_set - Distribution of VPPs among different priorities.
* The total number of VPPs assigned to all for a port must not exceed
* the value reported by available_vpp in mlx4_ALLOCATE_VPP_get.
* VPP allocation is allowed only after the port type has been set,
@@ -113,7 +113,7 @@ int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port,
int mlx4_ALLOCATE_VPP_set(struct mlx4_dev *dev, u8 port, u8 *vpp_p_up);
/**
- * mlx4_SET_VPORT_QOS_get - Query QoS proporties of a Vport.
+ * mlx4_SET_VPORT_QOS_get - Query QoS properties of a Vport.
* Each priority allowed for the Vport is assigned with a share of the BW,
* and a BW limitation. This commands query the current QoS values.
*
@@ -128,7 +128,7 @@ int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport,
struct mlx4_vport_qos_param *out_param);
/**
- * mlx4_SET_VPORT_QOS_set - Set QoS proporties of a Vport.
+ * mlx4_SET_VPORT_QOS_set - Set QoS properties of a Vport.
* QoS parameters can be modified at any time, but must be initialized
* before any QP is associated with the VPort.
*
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 2581226836b5..7b02ff61126d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -129,7 +129,7 @@ static const struct mlx4_profile default_profile = {
.num_cq = 1 << 16,
.num_mcg = 1 << 13,
.num_mpt = 1 << 19,
- .num_mtt = 1 << 20, /* It is really num mtt segements */
+ .num_mtt = 1 << 20, /* It is really num mtt segments */
};
static const struct mlx4_profile low_mem_profile = {
@@ -1508,7 +1508,7 @@ static int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
priv->v2p.port1 = port1;
priv->v2p.port2 = port2;
} else {
- mlx4_err(dev, "Failed to change port mape: %d\n", err);
+ mlx4_err(dev, "Failed to change port map: %d\n", err);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index e9cd4bb6f83d..d3d9ec042d2c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -112,7 +112,7 @@ struct mlx4_en_stat_out_flow_control_mbox {
__be64 tx_pause_duration;
/* Number of transmitter transitions from XOFF state to XON state */
__be64 tx_pause_transition;
- /* Reserverd */
+ /* Reserved */
__be64 reserved[2];
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 256a06b3c096..4e43f4a7d246 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -2118,7 +2118,7 @@ static void mlx4_qsfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
* @data: output buffer to put the requested data into.
*
* Reads cable module eeprom data, puts the outcome data into
- * data pointer paramer.
+ * data pointer parameter.
* Returns num of read bytes on success or a negative error
* code.
*/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index c44870b175f9..76dc5a9b9648 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -29,7 +29,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
en/qos.o en/htb.o en/trap.o en/fs_tt_redirect.o en/selq.o \
- lib/crypto.o
+ lib/crypto.o lib/sd.o
#
# Netdev extra
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index cf0477f53dc4..47e7c2639774 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -210,7 +210,7 @@ static bool is_dpll_supported(struct mlx5_core_dev *dev)
return false;
if (!MLX5_CAP_MCAM_REG2(dev, synce_registers)) {
- mlx5_core_warn(dev, "Missing SyncE capability\n");
+ mlx5_core_dbg(dev, "Missing SyncE capability\n");
return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
index d74a5aaf4268..904e08de852e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
@@ -41,6 +41,7 @@ struct mlx5_dpll_synce_status {
enum mlx5_msees_oper_status oper_status;
bool ho_acq;
bool oper_freq_measure;
+ enum mlx5_msees_failure_reason failure_reason;
s32 frequency_diff;
};
@@ -60,6 +61,7 @@ mlx5_dpll_synce_status_get(struct mlx5_core_dev *mdev,
synce_status->oper_status = MLX5_GET(msees_reg, out, oper_status);
synce_status->ho_acq = MLX5_GET(msees_reg, out, ho_acq);
synce_status->oper_freq_measure = MLX5_GET(msees_reg, out, oper_freq_measure);
+ synce_status->failure_reason = MLX5_GET(msees_reg, out, failure_reason);
synce_status->frequency_diff = MLX5_GET(msees_reg, out, frequency_diff);
return 0;
}
@@ -99,6 +101,26 @@ mlx5_dpll_lock_status_get(struct mlx5_dpll_synce_status *synce_status)
}
}
+static enum dpll_lock_status_error
+mlx5_dpll_lock_status_error_get(struct mlx5_dpll_synce_status *synce_status)
+{
+ switch (synce_status->oper_status) {
+ case MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER:
+ fallthrough;
+ case MLX5_MSEES_OPER_STATUS_FAIL_FREE_RUNNING:
+ switch (synce_status->failure_reason) {
+ case MLX5_MSEES_FAILURE_REASON_PORT_DOWN:
+ return DPLL_LOCK_STATUS_ERROR_MEDIA_DOWN;
+ case MLX5_MSEES_FAILURE_REASON_TOO_HIGH_FREQUENCY_DIFF:
+ return DPLL_LOCK_STATUS_ERROR_FRACTIONAL_FREQUENCY_OFFSET_TOO_HIGH;
+ default:
+ return DPLL_LOCK_STATUS_ERROR_UNDEFINED;
+ }
+ default:
+ return DPLL_LOCK_STATUS_ERROR_NONE;
+ }
+}
+
static enum dpll_pin_state
mlx5_dpll_pin_state_get(struct mlx5_dpll_synce_status *synce_status)
{
@@ -118,10 +140,11 @@ mlx5_dpll_pin_ffo_get(struct mlx5_dpll_synce_status *synce_status,
return 0;
}
-static int mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll,
- void *priv,
- enum dpll_lock_status *status,
- struct netlink_ext_ack *extack)
+static int
+mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll, void *priv,
+ enum dpll_lock_status *status,
+ enum dpll_lock_status_error *status_error,
+ struct netlink_ext_ack *extack)
{
struct mlx5_dpll_synce_status synce_status;
struct mlx5_dpll *mdpll = priv;
@@ -131,6 +154,7 @@ static int mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll,
if (err)
return err;
*status = mlx5_dpll_lock_status_get(&synce_status);
+ *status_error = mlx5_dpll_lock_status_error_get(&synce_status);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 55c6ace0acd5..84db05fb9389 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -60,6 +60,7 @@
#include "lib/clock.h"
#include "en/rx_res.h"
#include "en/selq.h"
+#include "lib/sd.h"
extern const struct net_device_ops mlx5e_netdev_ops;
struct page_pool;
@@ -791,6 +792,8 @@ struct mlx5e_channel {
struct hwtstamp_config *tstamp;
DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
int ix;
+ int vec_ix;
+ int sd_ix;
int cpu;
/* Sync between icosq recovery and XSK enable/disable. */
struct mutex icosq_recovery_lock;
@@ -914,7 +917,7 @@ struct mlx5e_priv {
bool tx_ptp_opened;
bool rx_ptp_opened;
struct hwtstamp_config tstamp;
- u16 q_counter;
+ u16 q_counter[MLX5_SD_MAX_GROUP_SZ];
u16 drop_rq_q_counter;
struct notifier_block events_nb;
struct notifier_block blocking_events_nb;
@@ -1029,12 +1032,12 @@ struct mlx5e_xsk_param;
struct mlx5e_rq_param;
int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
- struct mlx5e_xsk_param *xsk, int node,
+ struct mlx5e_xsk_param *xsk, int node, u16 q_counter,
struct mlx5e_rq *rq);
#define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
void mlx5e_close_rq(struct mlx5e_rq *rq);
-int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param);
+int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter);
void mlx5e_destroy_rq(struct mlx5e_rq *rq);
struct mlx5e_sq_param;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
index 48581ea3adcb..874a1016623c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
@@ -23,20 +23,26 @@ bool mlx5e_channels_is_xsk(struct mlx5e_channels *chs, unsigned int ix)
return test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
}
-void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
+void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn,
+ u32 *vhca_id)
{
struct mlx5e_channel *c = mlx5e_channels_get(chs, ix);
*rqn = c->rq.rqn;
+ if (vhca_id)
+ *vhca_id = MLX5_CAP_GEN(c->mdev, vhca_id);
}
-void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
+void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn,
+ u32 *vhca_id)
{
struct mlx5e_channel *c = mlx5e_channels_get(chs, ix);
WARN_ON_ONCE(!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state));
*rqn = c->xskrq.rqn;
+ if (vhca_id)
+ *vhca_id = MLX5_CAP_GEN(c->mdev, vhca_id);
}
bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
index 637ca90daaa8..6715aa9383b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
@@ -10,8 +10,10 @@ struct mlx5e_channels;
unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs);
bool mlx5e_channels_is_xsk(struct mlx5e_channels *chs, unsigned int ix);
-void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn);
-void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn);
+void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn,
+ u32 *vhca_id);
+void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn,
+ u32 *vhca_id);
bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn);
#endif /* __MLX5_EN_CHANNELS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
index 40c8df111754..e2d8d2754be0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
@@ -20,10 +20,8 @@
#define NUM_REQ_PPCNT_COUNTER_S1 MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1
#define NUM_REQ_Q_COUNTERS_S1 MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1
-int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv)
+static int mlx5e_monitor_counter_cap(struct mlx5_core_dev *mdev)
{
- struct mlx5_core_dev *mdev = priv->mdev;
-
if (!MLX5_CAP_GEN(mdev, max_num_of_monitor_counters))
return false;
if (MLX5_CAP_PCAM_REG(mdev, ppcnt) &&
@@ -36,24 +34,38 @@ int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv)
return true;
}
-static void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv)
+int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *pos;
+ int i;
+
+ mlx5_sd_for_each_dev(i, priv->mdev, pos)
+ if (!mlx5e_monitor_counter_cap(pos))
+ return false;
+ return true;
+}
+
+static void mlx5e_monitor_counter_arm(struct mlx5_core_dev *mdev)
{
u32 in[MLX5_ST_SZ_DW(arm_monitor_counter_in)] = {};
MLX5_SET(arm_monitor_counter_in, in, opcode,
MLX5_CMD_OP_ARM_MONITOR_COUNTER);
- mlx5_cmd_exec_in(priv->mdev, arm_monitor_counter, in);
+ mlx5_cmd_exec_in(mdev, arm_monitor_counter, in);
}
static void mlx5e_monitor_counters_work(struct work_struct *work)
{
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
monitor_counters_work);
+ struct mlx5_core_dev *pos;
+ int i;
mutex_lock(&priv->state_lock);
mlx5e_stats_update_ndo_stats(priv);
mutex_unlock(&priv->state_lock);
- mlx5e_monitor_counter_arm(priv);
+ mlx5_sd_for_each_dev(i, priv->mdev, pos)
+ mlx5e_monitor_counter_arm(pos);
}
static int mlx5e_monitor_event_handler(struct notifier_block *nb,
@@ -97,15 +109,13 @@ static int fill_monitor_counter_q_counter_set1(int cnt, int q_counter, u32 *in)
}
/* check if mlx5e_monitor_counter_supported before calling this function*/
-static void mlx5e_set_monitor_counter(struct mlx5e_priv *priv)
+static void mlx5e_set_monitor_counter(struct mlx5_core_dev *mdev, int q_counter)
{
- struct mlx5_core_dev *mdev = priv->mdev;
int max_num_of_counters = MLX5_CAP_GEN(mdev, max_num_of_monitor_counters);
int num_q_counters = MLX5_CAP_GEN(mdev, num_q_monitor_counters);
int num_ppcnt_counters = !MLX5_CAP_PCAM_REG(mdev, ppcnt) ? 0 :
MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters);
u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
- int q_counter = priv->q_counter;
int cnt = 0;
if (num_ppcnt_counters >= NUM_REQ_PPCNT_COUNTER_S1 &&
@@ -127,13 +137,17 @@ static void mlx5e_set_monitor_counter(struct mlx5e_priv *priv)
/* check if mlx5e_monitor_counter_supported before calling this function*/
void mlx5e_monitor_counter_init(struct mlx5e_priv *priv)
{
+ struct mlx5_core_dev *pos;
+ int i;
+
INIT_WORK(&priv->monitor_counters_work, mlx5e_monitor_counters_work);
MLX5_NB_INIT(&priv->monitor_counters_nb, mlx5e_monitor_event_handler,
MONITOR_COUNTER);
- mlx5_eq_notifier_register(priv->mdev, &priv->monitor_counters_nb);
-
- mlx5e_set_monitor_counter(priv);
- mlx5e_monitor_counter_arm(priv);
+ mlx5_sd_for_each_dev(i, priv->mdev, pos) {
+ mlx5_eq_notifier_register(pos, &priv->monitor_counters_nb);
+ mlx5e_set_monitor_counter(pos, priv->q_counter[i]);
+ mlx5e_monitor_counter_arm(pos);
+ }
queue_work(priv->wq, &priv->update_stats_work);
}
@@ -141,11 +155,15 @@ void mlx5e_monitor_counter_init(struct mlx5e_priv *priv)
void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv)
{
u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
+ struct mlx5_core_dev *pos;
+ int i;
MLX5_SET(set_monitor_counter_in, in, opcode,
MLX5_CMD_OP_SET_MONITOR_COUNTER);
- mlx5_cmd_exec_in(priv->mdev, set_monitor_counter, in);
- mlx5_eq_notifier_unregister(priv->mdev, &priv->monitor_counters_nb);
+ mlx5_sd_for_each_dev(i, priv->mdev, pos) {
+ mlx5_cmd_exec_in(pos, set_monitor_counter, in);
+ mlx5_eq_notifier_unregister(pos, &priv->monitor_counters_nb);
+ }
cancel_work_sync(&priv->monitor_counters_work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 5d213a9886f1..a3f31d9d527e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -240,11 +240,14 @@ static u32 mlx5e_rx_get_linear_sz_xsk(struct mlx5e_params *params,
return xsk->headroom + hw_mtu;
}
-static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool xsk)
+static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool no_head_tail_room)
{
- /* SKBs built on XDP_PASS on XSK RQs don't have headroom. */
- u16 headroom = xsk ? 0 : mlx5e_get_linear_rq_headroom(params, NULL);
u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ u16 headroom;
+
+ if (no_head_tail_room)
+ return SKB_DATA_ALIGN(hw_mtu);
+ headroom = mlx5e_get_linear_rq_headroom(params, NULL);
return MLX5_SKB_FRAG_SZ(headroom + hw_mtu);
}
@@ -254,6 +257,7 @@ static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
struct mlx5e_xsk_param *xsk,
bool mpwqe)
{
+ bool no_head_tail_room;
u32 sz;
/* XSK frames are mapped as individual pages, because frames may come in
@@ -262,7 +266,13 @@ static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
if (xsk)
return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE;
- sz = roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, false));
+ no_head_tail_room = params->xdp_prog && mpwqe && !mlx5e_rx_is_linear_skb(mdev, params, xsk);
+
+ /* When no_head_tail_room is set, headroom and tailroom are excluded from skb calculations.
+ * no_head_tail_room should be set in the case of XDP with Striding RQ
+ * when SKB is not linear. This is because another page is allocated for the linear part.
+ */
+ sz = roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, no_head_tail_room));
/* XDP in mlx5e doesn't support multiple packets per page.
* Do not assume sz <= PAGE_SIZE if params->xdp_prog is set.
@@ -289,7 +299,11 @@ bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE)
return false;
- /* Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data
+ /* Call mlx5e_rx_get_linear_sz_skb with the no_head_tail_room parameter set
+ * to exclude headroom and tailroom from calculations.
+ * no_head_tail_room is true when SKB is built on XDP_PASS on XSK RQs
+ * since packet data buffers don't have headroom and tailroom resreved for the SKB.
+ * Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data
* must fit into a CPU page.
*/
if (mlx5e_rx_get_linear_sz_skb(params, xsk) > PAGE_SIZE)
@@ -674,7 +688,7 @@ void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e
.napi = &c->napi,
.ch_stats = c->stats,
.node = cpu_to_node(c->cpu),
- .ix = c->ix,
+ .ix = c->vec_ix,
};
}
@@ -945,7 +959,6 @@ static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *param
int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
- u16 q_counter,
struct mlx5e_rq_param *param)
{
void *rqc = param->rqc;
@@ -1007,7 +1020,6 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
MLX5_SET(wq, wq, log_wq_stride,
mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
- MLX5_SET(rqc, rqc, counter_set_id, q_counter);
MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
@@ -1018,7 +1030,6 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
}
void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
- u16 q_counter,
struct mlx5e_rq_param *param)
{
void *rqc = param->rqc;
@@ -1027,7 +1038,6 @@ void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, log_wq_stride,
mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
- MLX5_SET(rqc, rqc, counter_set_id, q_counter);
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
}
@@ -1292,13 +1302,12 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- u16 q_counter,
struct mlx5e_channel_param *cparam)
{
u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
int err;
- err = mlx5e_build_rq_param(mdev, params, NULL, q_counter, &cparam->rq);
+ err = mlx5e_build_rq_param(mdev, params, NULL, &cparam->rq);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 6800949dafbc..9a781f18b57f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -130,10 +130,8 @@ void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e
int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
- u16 q_counter,
struct mlx5e_rq_param *param);
void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
- u16 q_counter,
struct mlx5e_rq_param *param);
void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev,
struct mlx5e_sq_param *param);
@@ -149,7 +147,6 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_sq_param *param);
int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- u16 q_counter,
struct mlx5e_channel_param *cparam);
u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index ca05b3252a1b..d0af7271da34 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -646,7 +646,6 @@ static void mlx5e_ptp_build_sq_param(struct mlx5_core_dev *mdev,
static void mlx5e_ptp_build_rq_param(struct mlx5_core_dev *mdev,
struct net_device *netdev,
- u16 q_counter,
struct mlx5e_ptp_params *ptp_params)
{
struct mlx5e_rq_param *rq_params = &ptp_params->rq_param;
@@ -655,7 +654,7 @@ static void mlx5e_ptp_build_rq_param(struct mlx5_core_dev *mdev,
params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
mlx5e_init_rq_type_params(mdev, params);
params->sw_mtu = netdev->max_mtu;
- mlx5e_build_rq_param(mdev, params, NULL, q_counter, rq_params);
+ mlx5e_build_rq_param(mdev, params, NULL, rq_params);
}
static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
@@ -681,7 +680,7 @@ static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
/* RQ */
if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
params->vlan_strip_disable = orig->vlan_strip_disable;
- mlx5e_ptp_build_rq_param(c->mdev, c->netdev, c->priv->q_counter, cparams);
+ mlx5e_ptp_build_rq_param(c->mdev, c->netdev, cparams);
}
}
@@ -714,13 +713,16 @@ static int mlx5e_ptp_open_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param)
{
int node = dev_to_node(c->mdev->device);
- int err;
+ int err, sd_ix;
+ u16 q_counter;
err = mlx5e_init_ptp_rq(c, params, &c->rq);
if (err)
return err;
- return mlx5e_open_rq(params, rq_param, NULL, node, &c->rq);
+ sd_ix = mlx5_sd_ch_ix_get_dev_ix(c->mdev, MLX5E_PTP_CHANNEL_IX);
+ q_counter = c->priv->q_counter[sd_ix];
+ return mlx5e_open_rq(params, rq_param, NULL, node, q_counter, &c->rq);
}
static int mlx5e_ptp_open_queues(struct mlx5e_ptp *c,
@@ -935,6 +937,7 @@ void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
mlx5e_ptp_rx_set_fs(c->priv);
mlx5e_activate_rq(&c->rq);
+ netif_queue_set_napi(c->netdev, c->rq.ix, NETDEV_QUEUE_TYPE_RX, &c->napi);
}
mlx5e_trigger_napi_sched(&c->napi);
}
@@ -943,8 +946,10 @@ void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
{
int tc;
- if (test_bit(MLX5E_PTP_STATE_RX, c->state))
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
+ netif_queue_set_napi(c->netdev, c->rq.ix, NETDEV_QUEUE_TYPE_RX, NULL);
mlx5e_deactivate_rq(&c->rq);
+ }
if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
for (tc = 0; tc < c->num_tc; tc++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 34adf8c3f81a..e87e26f2c669 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -122,8 +122,8 @@ int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
memset(&param_sq, 0, sizeof(param_sq));
memset(&param_cq, 0, sizeof(param_cq));
- mlx5e_build_sq_param(priv->mdev, params, &param_sq);
- mlx5e_build_tx_cq_param(priv->mdev, params, &param_cq);
+ mlx5e_build_sq_param(c->mdev, params, &param_sq);
+ mlx5e_build_tx_cq_param(c->mdev, params, &param_cq);
err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &param_cq, &ccp, &sq->cq);
if (err)
goto err_free_sq;
@@ -176,7 +176,7 @@ int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id)
*/
smp_wmb();
- qos_dbg(priv->mdev, "Activate QoS SQ qid %u\n", node_qid);
+ qos_dbg(sq->mdev, "Activate QoS SQ qid %u\n", node_qid);
mlx5e_activate_txqsq(sq);
return 0;
@@ -190,7 +190,7 @@ void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
if (!sq) /* Handle the case when the SQ failed to open. */
return;
- qos_dbg(priv->mdev, "Deactivate QoS SQ qid %u\n", qid);
+ qos_dbg(sq->mdev, "Deactivate QoS SQ qid %u\n", qid);
mlx5e_deactivate_txqsq(sq);
priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 4358798d6ce1..25d751eba99b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -294,8 +294,8 @@ static void mlx5e_rx_reporter_diagnose_generic_rq(struct mlx5e_rq *rq,
params = &priv->channels.params;
rq_sz = mlx5e_rqwq_get_size(rq);
- real_time = mlx5_is_real_time_rq(priv->mdev);
- rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(priv->mdev, params, NULL));
+ real_time = mlx5_is_real_time_rq(rq->mdev);
+ rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(rq->mdev, params, NULL));
mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ");
devlink_fmsg_u8_pair_put(fmsg, "type", params->rq_wq_type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 6b44ddce14e9..0ab9db319530 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -219,7 +219,6 @@ mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg,
struct mlx5e_txqsq *sq, int tc)
{
bool stopped = netif_xmit_stopped(sq->txq);
- struct mlx5e_priv *priv = sq->priv;
u8 state;
int err;
@@ -227,7 +226,7 @@ mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg,
devlink_fmsg_u32_pair_put(fmsg, "txq ix", sq->txq_ix);
devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn);
- err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state);
+ err = mlx5_core_query_sq_state(sq->mdev, sq->sqn, &state);
if (!err)
devlink_fmsg_u8_pair_put(fmsg, "HW state", state);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
index 7b8ff7a71003..bcafb4bf9415 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
@@ -4,6 +4,33 @@
#include "rqt.h"
#include <linux/mlx5/transobj.h>
+static bool verify_num_vhca_ids(struct mlx5_core_dev *mdev, u32 *vhca_ids,
+ unsigned int size)
+{
+ unsigned int max_num_vhca_id = MLX5_CAP_GEN_2(mdev, max_rqt_vhca_id);
+ int i;
+
+ /* Verify that all vhca_ids are in range [0, max_num_vhca_ids - 1] */
+ for (i = 0; i < size; i++)
+ if (vhca_ids[i] >= max_num_vhca_id)
+ return false;
+ return true;
+}
+
+static bool rqt_verify_vhca_ids(struct mlx5_core_dev *mdev, u32 *vhca_ids,
+ unsigned int size)
+{
+ if (!vhca_ids)
+ return true;
+
+ if (!MLX5_CAP_GEN(mdev, cross_vhca_rqt))
+ return false;
+ if (!verify_num_vhca_ids(mdev, vhca_ids, size))
+ return false;
+
+ return true;
+}
+
void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir,
unsigned int num_channels)
{
@@ -13,19 +40,38 @@ void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir,
indir->table[i] = i % num_channels;
}
+static void fill_rqn_list(void *rqtc, u32 *rqns, u32 *vhca_ids, unsigned int size)
+{
+ unsigned int i;
+
+ if (vhca_ids) {
+ MLX5_SET(rqtc, rqtc, rq_vhca_id_format, 1);
+ for (i = 0; i < size; i++) {
+ MLX5_SET(rqtc, rqtc, rq_vhca[i].rq_num, rqns[i]);
+ MLX5_SET(rqtc, rqtc, rq_vhca[i].rq_vhca_id, vhca_ids[i]);
+ }
+ } else {
+ for (i = 0; i < size; i++)
+ MLX5_SET(rqtc, rqtc, rq_num[i], rqns[i]);
+ }
+}
static int mlx5e_rqt_init(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
- u16 max_size, u32 *init_rqns, u16 init_size)
+ u16 max_size, u32 *init_rqns, u32 *init_vhca_ids, u16 init_size)
{
+ int entry_sz;
void *rqtc;
int inlen;
int err;
u32 *in;
- int i;
+
+ if (!rqt_verify_vhca_ids(mdev, init_vhca_ids, init_size))
+ return -EOPNOTSUPP;
rqt->mdev = mdev;
rqt->size = max_size;
- inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * init_size;
+ entry_sz = init_vhca_ids ? MLX5_ST_SZ_BYTES(rq_vhca) : MLX5_ST_SZ_BYTES(rq_num);
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + entry_sz * init_size;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -33,10 +79,9 @@ static int mlx5e_rqt_init(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
MLX5_SET(rqtc, rqtc, rqt_max_size, rqt->size);
-
MLX5_SET(rqtc, rqtc, rqt_actual_size, init_size);
- for (i = 0; i < init_size; i++)
- MLX5_SET(rqtc, rqtc, rq_num[i], init_rqns[i]);
+
+ fill_rqn_list(rqtc, init_rqns, init_vhca_ids, init_size);
err = mlx5_core_create_rqt(rqt->mdev, in, inlen, &rqt->rqtn);
@@ -49,7 +94,7 @@ int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
{
u16 max_size = indir_enabled ? indir_table_size : 1;
- return mlx5e_rqt_init(rqt, mdev, max_size, &init_rqn, 1);
+ return mlx5e_rqt_init(rqt, mdev, max_size, &init_rqn, NULL, 1);
}
static int mlx5e_bits_invert(unsigned long a, int size)
@@ -63,7 +108,8 @@ static int mlx5e_bits_invert(unsigned long a, int size)
return inv;
}
-static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, unsigned int num_rqns,
+static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, u32 *rss_vhca_ids, u32 *vhca_ids,
+ unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir)
{
unsigned int i;
@@ -82,30 +128,42 @@ static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, unsigned int num_rqns
*/
return -EINVAL;
rss_rqns[i] = rqns[ix];
+ if (vhca_ids)
+ rss_vhca_ids[i] = vhca_ids[ix];
}
return 0;
}
int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
- u32 *rqns, unsigned int num_rqns,
+ u32 *rqns, u32 *vhca_ids, unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir)
{
- u32 *rss_rqns;
+ u32 *rss_rqns, *rss_vhca_ids = NULL;
int err;
rss_rqns = kvmalloc_array(indir->actual_table_size, sizeof(*rss_rqns), GFP_KERNEL);
if (!rss_rqns)
return -ENOMEM;
- err = mlx5e_calc_indir_rqns(rss_rqns, rqns, num_rqns, hfunc, indir);
+ if (vhca_ids) {
+ rss_vhca_ids = kvmalloc_array(indir->actual_table_size, sizeof(*rss_vhca_ids),
+ GFP_KERNEL);
+ if (!rss_vhca_ids) {
+ kvfree(rss_rqns);
+ return -ENOMEM;
+ }
+ }
+
+ err = mlx5e_calc_indir_rqns(rss_rqns, rqns, rss_vhca_ids, vhca_ids, num_rqns, hfunc, indir);
if (err)
goto out;
- err = mlx5e_rqt_init(rqt, mdev, indir->max_table_size, rss_rqns,
+ err = mlx5e_rqt_init(rqt, mdev, indir->max_table_size, rss_rqns, rss_vhca_ids,
indir->actual_table_size);
out:
+ kvfree(rss_vhca_ids);
kvfree(rss_rqns);
return err;
}
@@ -126,15 +184,20 @@ void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt)
mlx5_core_destroy_rqt(rqt->mdev, rqt->rqtn);
}
-static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int size)
+static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, u32 *vhca_ids,
+ unsigned int size)
{
- unsigned int i;
+ int entry_sz;
void *rqtc;
int inlen;
u32 *in;
int err;
- inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * size;
+ if (!rqt_verify_vhca_ids(rqt->mdev, vhca_ids, size))
+ return -EINVAL;
+
+ entry_sz = vhca_ids ? MLX5_ST_SZ_BYTES(rq_vhca) : MLX5_ST_SZ_BYTES(rq_num);
+ inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + entry_sz * size;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -143,8 +206,8 @@ static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int siz
MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
MLX5_SET(rqtc, rqtc, rqt_actual_size, size);
- for (i = 0; i < size; i++)
- MLX5_SET(rqtc, rqtc, rq_num[i], rqns[i]);
+
+ fill_rqn_list(rqtc, rqns, vhca_ids, size);
err = mlx5_core_modify_rqt(rqt->mdev, rqt->rqtn, in, inlen);
@@ -152,17 +215,21 @@ static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int siz
return err;
}
-int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn)
+int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn, u32 *vhca_id)
{
- return mlx5e_rqt_redirect(rqt, &rqn, 1);
+ return mlx5e_rqt_redirect(rqt, &rqn, vhca_id, 1);
}
-int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns,
+int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, u32 *vhca_ids,
+ unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir)
{
- u32 *rss_rqns;
+ u32 *rss_rqns, *rss_vhca_ids = NULL;
int err;
+ if (!rqt_verify_vhca_ids(rqt->mdev, vhca_ids, num_rqns))
+ return -EINVAL;
+
if (WARN_ON(rqt->size != indir->max_table_size))
return -EINVAL;
@@ -170,13 +237,23 @@ int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_
if (!rss_rqns)
return -ENOMEM;
- err = mlx5e_calc_indir_rqns(rss_rqns, rqns, num_rqns, hfunc, indir);
+ if (vhca_ids) {
+ rss_vhca_ids = kvmalloc_array(indir->actual_table_size, sizeof(*rss_vhca_ids),
+ GFP_KERNEL);
+ if (!rss_vhca_ids) {
+ kvfree(rss_rqns);
+ return -ENOMEM;
+ }
+ }
+
+ err = mlx5e_calc_indir_rqns(rss_rqns, rqns, rss_vhca_ids, vhca_ids, num_rqns, hfunc, indir);
if (err)
goto out;
- err = mlx5e_rqt_redirect(rqt, rss_rqns, indir->actual_table_size);
+ err = mlx5e_rqt_redirect(rqt, rss_rqns, rss_vhca_ids, indir->actual_table_size);
out:
+ kvfree(rss_vhca_ids);
kvfree(rss_rqns);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
index 77fba3ebd18d..e0bc30308c77 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
@@ -20,7 +20,7 @@ void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir,
unsigned int num_channels);
struct mlx5e_rqt {
- struct mlx5_core_dev *mdev;
+ struct mlx5_core_dev *mdev; /* primary */
u32 rqtn;
u16 size;
};
@@ -28,7 +28,7 @@ struct mlx5e_rqt {
int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
bool indir_enabled, u32 init_rqn, u32 indir_table_size);
int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
- u32 *rqns, unsigned int num_rqns,
+ u32 *rqns, u32 *vhca_ids, unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir);
void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt);
@@ -38,8 +38,9 @@ static inline u32 mlx5e_rqt_get_rqtn(struct mlx5e_rqt *rqt)
}
u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels);
-int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn);
-int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns,
+int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn, u32 *vhca_id);
+int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, u32 *vhca_ids,
+ unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir);
#endif /* __MLX5_EN_RQT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
index c1545a2e8d6d..5f742f896600 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
@@ -74,7 +74,7 @@ struct mlx5e_rss {
struct mlx5e_tir *tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir *inner_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_rqt rqt;
- struct mlx5_core_dev *mdev;
+ struct mlx5_core_dev *mdev; /* primary */
u32 drop_rqn;
bool inner_ft_support;
bool enabled;
@@ -473,21 +473,22 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
return 0;
}
-static int mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns)
+static int mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns)
{
int err;
- err = mlx5e_rqt_redirect_indir(&rss->rqt, rqns, num_rqns, rss->hash.hfunc, &rss->indir);
+ err = mlx5e_rqt_redirect_indir(&rss->rqt, rqns, vhca_ids, num_rqns, rss->hash.hfunc,
+ &rss->indir);
if (err)
mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to channels: err = %d\n",
mlx5e_rqt_get_rqtn(&rss->rqt), err);
return err;
}
-void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns)
+void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns)
{
rss->enabled = true;
- mlx5e_rss_apply(rss, rqns, num_rqns);
+ mlx5e_rss_apply(rss, rqns, vhca_ids, num_rqns);
}
void mlx5e_rss_disable(struct mlx5e_rss *rss)
@@ -495,7 +496,7 @@ void mlx5e_rss_disable(struct mlx5e_rss *rss)
int err;
rss->enabled = false;
- err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->drop_rqn);
+ err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->drop_rqn, NULL);
if (err)
mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to drop RQ %#x: err = %d\n",
mlx5e_rqt_get_rqtn(&rss->rqt), rss->drop_rqn, err);
@@ -568,7 +569,7 @@ int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc)
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
const u8 *key, const u8 *hfunc,
- u32 *rqns, unsigned int num_rqns)
+ u32 *rqns, u32 *vhca_ids, unsigned int num_rqns)
{
bool changed_indir = false;
bool changed_hash = false;
@@ -608,7 +609,7 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
}
if (changed_indir && rss->enabled) {
- err = mlx5e_rss_apply(rss, rqns, num_rqns);
+ err = mlx5e_rss_apply(rss, rqns, vhca_ids, num_rqns);
if (err) {
mlx5e_rss_copy(rss, old_rss);
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
index d1d0bc350e92..d0df98963c8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
@@ -39,7 +39,7 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
bool inner, u32 *tirn);
-void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns);
+void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns);
void mlx5e_rss_disable(struct mlx5e_rss *rss);
int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
@@ -47,7 +47,7 @@ int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc);
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
const u8 *key, const u8 *hfunc,
- u32 *rqns, unsigned int num_rqns);
+ u32 *rqns, u32 *vhca_ids, unsigned int num_rqns);
struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss);
u8 mlx5e_rss_get_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt);
int mlx5e_rss_set_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index b23e224e3763..a86eade9a9e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -8,7 +8,7 @@
#define MLX5E_MAX_NUM_RSS 16
struct mlx5e_rx_res {
- struct mlx5_core_dev *mdev;
+ struct mlx5_core_dev *mdev; /* primary */
enum mlx5e_rx_res_features features;
unsigned int max_nch;
u32 drop_rqn;
@@ -19,6 +19,7 @@ struct mlx5e_rx_res {
struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS];
bool rss_active;
u32 *rss_rqns;
+ u32 *rss_vhca_ids;
unsigned int rss_nch;
struct {
@@ -34,6 +35,13 @@ struct mlx5e_rx_res {
/* API for rx_res_rss_* */
+static u32 *get_vhca_ids(struct mlx5e_rx_res *res, int offset)
+{
+ bool multi_vhca = res->features & MLX5E_RX_RES_FEATURE_MULTI_VHCA;
+
+ return multi_vhca ? res->rss_vhca_ids + offset : NULL;
+}
+
void mlx5e_rx_res_rss_update_num_channels(struct mlx5e_rx_res *res, u32 nch)
{
int i;
@@ -85,8 +93,11 @@ int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int i
return PTR_ERR(rss);
mlx5e_rss_set_indir_uniform(rss, init_nch);
- if (res->rss_active)
- mlx5e_rss_enable(rss, res->rss_rqns, res->rss_nch);
+ if (res->rss_active) {
+ u32 *vhca_ids = get_vhca_ids(res, 0);
+
+ mlx5e_rss_enable(rss, res->rss_rqns, vhca_ids, res->rss_nch);
+ }
res->rss[i] = rss;
*rss_idx = i;
@@ -153,10 +164,12 @@ static void mlx5e_rx_res_rss_enable(struct mlx5e_rx_res *res)
for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
struct mlx5e_rss *rss = res->rss[i];
+ u32 *vhca_ids;
if (!rss)
continue;
- mlx5e_rss_enable(rss, res->rss_rqns, res->rss_nch);
+ vhca_ids = get_vhca_ids(res, 0);
+ mlx5e_rss_enable(rss, res->rss_rqns, vhca_ids, res->rss_nch);
}
}
@@ -200,6 +213,7 @@ int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
const u32 *indir, const u8 *key, const u8 *hfunc)
{
+ u32 *vhca_ids = get_vhca_ids(res, 0);
struct mlx5e_rss *rss;
if (rss_idx >= MLX5E_MAX_NUM_RSS)
@@ -209,7 +223,8 @@ int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
if (!rss)
return -ENOENT;
- return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, res->rss_nch);
+ return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, vhca_ids,
+ res->rss_nch);
}
int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
@@ -280,11 +295,13 @@ struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx)
static void mlx5e_rx_res_free(struct mlx5e_rx_res *res)
{
+ kvfree(res->rss_vhca_ids);
kvfree(res->rss_rqns);
kvfree(res);
}
-static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsigned int max_nch)
+static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsigned int max_nch,
+ bool multi_vhca)
{
struct mlx5e_rx_res *rx_res;
@@ -298,6 +315,15 @@ static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsig
return NULL;
}
+ if (multi_vhca) {
+ rx_res->rss_vhca_ids = kvcalloc(max_nch, sizeof(*rx_res->rss_vhca_ids), GFP_KERNEL);
+ if (!rx_res->rss_vhca_ids) {
+ kvfree(rx_res->rss_rqns);
+ kvfree(rx_res);
+ return NULL;
+ }
+ }
+
return rx_res;
}
@@ -424,10 +450,11 @@ mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features featu
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
unsigned int init_nch)
{
+ bool multi_vhca = features & MLX5E_RX_RES_FEATURE_MULTI_VHCA;
struct mlx5e_rx_res *res;
int err;
- res = mlx5e_rx_res_alloc(mdev, max_nch);
+ res = mlx5e_rx_res_alloc(mdev, max_nch, multi_vhca);
if (!res)
return ERR_PTR(-ENOMEM);
@@ -504,10 +531,11 @@ static void mlx5e_rx_res_channel_activate_direct(struct mlx5e_rx_res *res,
struct mlx5e_channels *chs,
unsigned int ix)
{
+ u32 *vhca_id = get_vhca_ids(res, ix);
u32 rqn = res->rss_rqns[ix];
int err;
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn);
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn, vhca_id);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
@@ -519,7 +547,7 @@ static void mlx5e_rx_res_channel_deactivate_direct(struct mlx5e_rx_res *res,
{
int err;
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn, NULL);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
@@ -534,10 +562,12 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann
nch = mlx5e_channels_get_num(chs);
for (ix = 0; ix < chs->num; ix++) {
+ u32 *vhca_id = get_vhca_ids(res, ix);
+
if (mlx5e_channels_is_xsk(chs, ix))
- mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix]);
+ mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
else
- mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]);
+ mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
}
res->rss_nch = chs->num;
@@ -554,7 +584,7 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann
if (!mlx5e_channels_get_ptp_rqn(chs, &rqn))
rqn = res->drop_rqn;
- err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn);
+ err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn, NULL);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (PTP): err = %d\n",
mlx5e_rqt_get_rqtn(&res->ptp.rqt),
@@ -573,7 +603,7 @@ void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
mlx5e_rx_res_channel_deactivate_direct(res, ix);
if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
- err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, res->drop_rqn);
+ err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, res->drop_rqn, NULL);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (PTP): err = %d\n",
mlx5e_rqt_get_rqtn(&res->ptp.rqt),
@@ -584,10 +614,12 @@ void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
void mlx5e_rx_res_xsk_update(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
unsigned int ix, bool xsk)
{
+ u32 *vhca_id = get_vhca_ids(res, ix);
+
if (xsk)
- mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix]);
+ mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
else
- mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]);
+ mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
mlx5e_rx_res_rss_enable(res);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
index 82aaba8a82b3..7b1a9f0f1874 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -18,6 +18,7 @@ struct mlx5e_rss_params_hash;
enum mlx5e_rx_res_features {
MLX5E_RX_RES_FEATURE_INNER_FT = BIT(0),
MLX5E_RX_RES_FEATURE_PTP = BIT(1),
+ MLX5E_RX_RES_FEATURE_MULTI_VHCA = BIT(2),
};
/* Setup */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index ac458a8d10e0..53ca16cb9c41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -63,10 +63,12 @@ static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct mlx5e_trap *t)
struct mlx5e_create_cq_param ccp = {};
struct dim_cq_moder trap_moder = {};
struct mlx5e_rq *rq = &t->rq;
+ u16 q_counter;
int node;
int err;
node = dev_to_node(mdev->device);
+ q_counter = priv->q_counter[0];
ccp.netdev = priv->netdev;
ccp.wq = priv->wq;
@@ -79,7 +81,7 @@ static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct mlx5e_trap *t)
return err;
mlx5e_init_trap_rq(t, &t->params, rq);
- err = mlx5e_open_rq(&t->params, rq_param, NULL, node, rq);
+ err = mlx5e_open_rq(&t->params, rq_param, NULL, node, q_counter, rq);
if (err)
goto err_destroy_cq;
@@ -116,15 +118,14 @@ static int mlx5e_create_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct ml
}
static void mlx5e_build_trap_params(struct mlx5_core_dev *mdev,
- int max_mtu, u16 q_counter,
- struct mlx5e_trap *t)
+ int max_mtu, struct mlx5e_trap *t)
{
struct mlx5e_params *params = &t->params;
params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
mlx5e_init_rq_type_params(mdev, params);
params->sw_mtu = max_mtu;
- mlx5e_build_rq_param(mdev, params, NULL, q_counter, &t->rq_param);
+ mlx5e_build_rq_param(mdev, params, NULL, &t->rq_param);
}
static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
@@ -138,7 +139,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
if (!t)
return ERR_PTR(-ENOMEM);
- mlx5e_build_trap_params(priv->mdev, netdev->max_mtu, priv->q_counter, t);
+ mlx5e_build_trap_params(priv->mdev, netdev->max_mtu, t);
t->priv = priv;
t->mdev = priv->mdev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
index ebada0c5af3c..db776e515b6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
@@ -6,10 +6,10 @@
#include "setup.h"
#include "en/params.h"
-static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv,
+static int mlx5e_xsk_map_pool(struct mlx5_core_dev *mdev,
struct xsk_buff_pool *pool)
{
- struct device *dev = mlx5_core_dma_dev(priv->mdev);
+ struct device *dev = mlx5_core_dma_dev(mdev);
return xsk_pool_dma_map(pool, dev, DMA_ATTR_SKIP_CPU_SYNC);
}
@@ -89,7 +89,7 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
if (unlikely(!mlx5e_xsk_is_pool_sane(pool)))
return -EINVAL;
- err = mlx5e_xsk_map_pool(priv, pool);
+ err = mlx5e_xsk_map_pool(mlx5_sd_ch_ix_get_dev(priv->mdev, ix), pool);
if (unlikely(err))
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 82e6abbc1734..06592b9f0424 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -49,10 +49,9 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
static void mlx5e_build_xsk_cparam(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
- u16 q_counter,
struct mlx5e_channel_param *cparam)
{
- mlx5e_build_rq_param(mdev, params, xsk, q_counter, &cparam->rq);
+ mlx5e_build_rq_param(mdev, params, xsk, &cparam->rq);
mlx5e_build_xdpsq_param(mdev, params, xsk, &cparam->xdp_sq);
}
@@ -93,6 +92,7 @@ static int mlx5e_open_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *param
struct mlx5e_rq_param *rq_params, struct xsk_buff_pool *pool,
struct mlx5e_xsk_param *xsk)
{
+ u16 q_counter = c->priv->q_counter[c->sd_ix];
struct mlx5e_rq *xskrq = &c->xskrq;
int err;
@@ -100,7 +100,7 @@ static int mlx5e_open_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *param
if (err)
return err;
- err = mlx5e_open_rq(params, rq_params, xsk, cpu_to_node(c->cpu), xskrq);
+ err = mlx5e_open_rq(params, rq_params, xsk, cpu_to_node(c->cpu), q_counter, xskrq);
if (err)
return err;
@@ -125,7 +125,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (!cparam)
return -ENOMEM;
- mlx5e_build_xsk_cparam(priv->mdev, params, xsk, priv->q_counter, cparam);
+ mlx5e_build_xsk_cparam(priv->mdev, params, xsk, cparam);
err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
&c->xskrq.cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 05612d9c6080..c54fd01ea635 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -984,21 +984,41 @@ static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
queue_work(sa_entry->ipsec->wq, &work->work);
}
-static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
+static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
+ struct net *net = dev_net(x->xso.dev);
u64 packets, bytes, lastuse;
lockdep_assert(lockdep_is_held(&x->lock) ||
- lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex));
+ lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) ||
+ lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_state_lock));
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
return;
+ if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
+ mlx5_fc_query_cached(ipsec_rule->auth.fc, &bytes, &packets, &lastuse);
+ x->stats.integrity_failed += packets;
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, packets);
+
+ mlx5_fc_query_cached(ipsec_rule->trailer.fc, &bytes, &packets, &lastuse);
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, packets);
+ }
+
+ if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
+ return;
+
mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
x->curlft.packets += packets;
x->curlft.bytes += bytes;
+
+ if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
+ mlx5_fc_query_cached(ipsec_rule->replay.fc, &bytes, &packets, &lastuse);
+ x->stats.replay += packets;
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, packets);
+ }
}
static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
@@ -1156,7 +1176,7 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
.xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
- .xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft,
+ .xdo_dev_state_update_stats = mlx5e_xfrm_update_stats,
.xdo_dev_policy_add = mlx5e_xfrm_add_policy,
.xdo_dev_policy_delete = mlx5e_xfrm_del_policy,
.xdo_dev_policy_free = mlx5e_xfrm_free_policy,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index adaea3493193..7d943e93cf6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -137,7 +137,6 @@ struct mlx5e_ipsec_hw_stats {
struct mlx5e_ipsec_sw_stats {
atomic64_t ipsec_rx_drop_sp_alloc;
atomic64_t ipsec_rx_drop_sadb_miss;
- atomic64_t ipsec_rx_drop_syndrome;
atomic64_t ipsec_tx_drop_bundle;
atomic64_t ipsec_tx_drop_no_state;
atomic64_t ipsec_tx_drop_not_ip;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 51a144246ea6..727fa7c18523 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -304,12 +304,6 @@ drop:
return false;
}
-enum {
- MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED,
- MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
- MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER,
-};
-
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb,
u32 ipsec_meta_data)
@@ -343,20 +337,7 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
xo = xfrm_offload(skb);
xo->flags = CRYPTO_DONE;
-
- switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) {
- case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
- xo->status = CRYPTO_SUCCESS;
- break;
- case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED:
- xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
- break;
- case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER:
- xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
- break;
- default:
- atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_syndrome);
- }
+ xo->status = CRYPTO_SUCCESS;
}
int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metadata)
@@ -374,8 +355,6 @@ int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metada
return err;
}
- *metadata = MLX5_IPSEC_METADATA_CREATE(ipsec_obj_id,
- MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED);
-
+ *metadata = ipsec_obj_id;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 2ed99772f168..82064614846f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -43,7 +43,6 @@
#define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1)
#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0))
#define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0))
-#define MLX5_IPSEC_METADATA_CREATE(id, syndrome) ((id) | ((syndrome) << 24))
struct mlx5e_accel_tx_ipsec_state {
struct xfrm_offload *xo;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index e0e36a09721c..dd36b04e30a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -51,7 +51,6 @@ static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sadb_miss) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_syndrome) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_bundle) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_no_state) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_not_ip) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index 984fa04bd331..e3e57c849436 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -96,7 +96,7 @@ bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
{
u8 max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
- if (is_kdump_kernel() || !MLX5_CAP_GEN(mdev, tls_rx))
+ if (is_kdump_kernel() || !MLX5_CAP_GEN(mdev, tls_rx) || mlx5_get_sd(mdev))
return false;
/* Check the possibility to post the required ICOSQ WQEs. */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index f11075e67658..adc6d8ea0960 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -11,6 +11,7 @@
#ifdef CONFIG_MLX5_EN_TLS
#include "lib/crypto.h"
+#include "lib/mlx5.h"
struct mlx5_crypto_dek *mlx5_ktls_create_key(struct mlx5_crypto_dek_pool *dek_pool,
struct tls_crypto_info *crypto_info);
@@ -61,7 +62,8 @@ void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_
static inline bool mlx5e_is_ktls_tx(struct mlx5_core_dev *mdev)
{
- return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx);
+ return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx) &&
+ !mlx5_get_sd(mdev);
}
bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 9b597cb24598..65ccb33edafb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -267,7 +267,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
goto err_out;
}
- pdev = mlx5_core_dma_dev(sq->channel->priv->mdev);
+ pdev = mlx5_core_dma_dev(sq->channel->mdev);
buf->dma_addr = dma_map_single(pdev, &buf->progress,
PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(pdev, buf->dma_addr))) {
@@ -425,14 +425,12 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
{
struct mlx5e_ktls_rx_resync_buf *buf = wi->tls_get_params.buf;
struct mlx5e_ktls_offload_context_rx *priv_rx;
- struct mlx5e_ktls_rx_resync_ctx *resync;
u8 tracker_state, auth_state, *ctx;
struct device *dev;
u32 hw_seq;
priv_rx = buf->priv_rx;
- resync = &priv_rx->resync;
- dev = mlx5_core_dma_dev(resync->priv->mdev);
+ dev = mlx5_core_dma_dev(sq->channel->mdev);
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index e66f486faafe..c7f542d0b8f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -34,6 +34,7 @@
#include <linux/mlx5/fs.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <net/rps.h>
#include "en.h"
#define ARFS_HASH_SHIFT BITS_PER_BYTE
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index c8e8f512803e..91848eae4565 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -70,6 +70,7 @@
#include "qos.h"
#include "en/trap.h"
#include "lib/devcom.h"
+#include "lib/sd.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode)
@@ -1024,7 +1025,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
mlx5_wq_destroy(&rq->wq_ctrl);
}
-int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
+int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter)
{
struct mlx5_core_dev *mdev = rq->mdev;
u8 ts_format;
@@ -1051,6 +1052,7 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
MLX5_SET(rqc, rqc, ts_format, ts_format);
+ MLX5_SET(rqc, rqc, counter_set_id, q_counter);
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
@@ -1274,7 +1276,7 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
}
int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
- struct mlx5e_xsk_param *xsk, int node,
+ struct mlx5e_xsk_param *xsk, int node, u16 q_counter,
struct mlx5e_rq *rq)
{
struct mlx5_core_dev *mdev = rq->mdev;
@@ -1287,7 +1289,7 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
if (err)
return err;
- err = mlx5e_create_rq(rq, param);
+ err = mlx5e_create_rq(rq, param, q_counter);
if (err)
goto err_free_rq;
@@ -1806,6 +1808,7 @@ void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
netdev_tx_reset_queue(sq->txq);
netif_tx_start_queue(sq->txq);
+ netif_queue_set_napi(sq->netdev, sq->txq_ix, NETDEV_QUEUE_TYPE_TX, sq->cq.napi);
}
void mlx5e_tx_disable_queue(struct netdev_queue *txq)
@@ -1819,6 +1822,7 @@ void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
{
struct mlx5_wq_cyc *wq = &sq->wq;
+ netif_queue_set_napi(sq->netdev, sq->txq_ix, NETDEV_QUEUE_TYPE_TX, NULL);
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
synchronize_net(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
@@ -2333,13 +2337,14 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
static int mlx5e_open_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_rq_param *rq_params)
{
+ u16 q_counter = c->priv->q_counter[c->sd_ix];
int err;
err = mlx5e_init_rxq_rq(c, params, rq_params->xdp_frag_size, &c->rq);
if (err)
return err;
- return mlx5e_open_rq(params, rq_params, NULL, cpu_to_node(c->cpu), &c->rq);
+ return mlx5e_open_rq(params, rq_params, NULL, cpu_to_node(c->cpu), q_counter, &c->rq);
}
static int mlx5e_open_queues(struct mlx5e_channel *c,
@@ -2526,14 +2531,20 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct xsk_buff_pool *xsk_pool,
struct mlx5e_channel **cp)
{
- int cpu = mlx5_comp_vector_get_cpu(priv->mdev, ix);
struct net_device *netdev = priv->netdev;
+ struct mlx5_core_dev *mdev;
struct mlx5e_xsk_param xsk;
struct mlx5e_channel *c;
unsigned int irq;
+ int vec_ix;
+ int cpu;
int err;
- err = mlx5_comp_irqn_get(priv->mdev, ix, &irq);
+ mdev = mlx5_sd_ch_ix_get_dev(priv->mdev, ix);
+ vec_ix = mlx5_sd_ch_ix_get_vec_ix(mdev, ix);
+ cpu = mlx5_comp_vector_get_cpu(mdev, vec_ix);
+
+ err = mlx5_comp_irqn_get(mdev, vec_ix, &irq);
if (err)
return err;
@@ -2546,20 +2557,23 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
return -ENOMEM;
c->priv = priv;
- c->mdev = priv->mdev;
+ c->mdev = mdev;
c->tstamp = &priv->tstamp;
c->ix = ix;
+ c->vec_ix = vec_ix;
+ c->sd_ix = mlx5_sd_ch_ix_get_dev_ix(mdev, ix);
c->cpu = cpu;
- c->pdev = mlx5_core_dma_dev(priv->mdev);
+ c->pdev = mlx5_core_dma_dev(mdev);
c->netdev = priv->netdev;
- c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
+ c->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
c->num_tc = mlx5e_get_dcb_num_tc(params);
c->xdp = !!params->xdp_prog;
c->stats = &priv->channel_stats[ix]->ch;
c->aff_mask = irq_get_effective_affinity_mask(irq);
- c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
+ c->lag_port = mlx5e_enumerate_lag_port(mdev, ix);
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll);
+ netif_napi_set_irq(&c->napi, irq);
err = mlx5e_open_queues(c, params, cparam);
if (unlikely(err))
@@ -2602,12 +2616,16 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
mlx5e_activate_xsk(c);
else
mlx5e_activate_rq(&c->rq);
+
+ netif_queue_set_napi(c->netdev, c->ix, NETDEV_QUEUE_TYPE_RX, &c->napi);
}
static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
{
int tc;
+ netif_queue_set_napi(c->netdev, c->ix, NETDEV_QUEUE_TYPE_RX, NULL);
+
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_deactivate_xsk(c);
else
@@ -2647,7 +2665,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
if (!chs->c || !cparam)
goto err_free;
- err = mlx5e_build_channel_param(priv->mdev, &chs->params, priv->q_counter, cparam);
+ err = mlx5e_build_channel_param(priv->mdev, &chs->params, cparam);
if (err)
goto err_free;
@@ -2935,15 +2953,18 @@ static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_netdev_queues);
static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
struct mlx5e_params *params)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- int num_comp_vectors, ix, irq;
-
- num_comp_vectors = mlx5_comp_vectors_max(mdev);
+ int ix;
for (ix = 0; ix < params->num_channels; ix++) {
+ int num_comp_vectors, irq, vec_ix;
+ struct mlx5_core_dev *mdev;
+
+ mdev = mlx5_sd_ch_ix_get_dev(priv->mdev, ix);
+ num_comp_vectors = mlx5_comp_vectors_max(mdev);
cpumask_clear(priv->scratchpad.cpumask);
+ vec_ix = mlx5_sd_ch_ix_get_vec_ix(mdev, ix);
- for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) {
+ for (irq = vec_ix; irq < num_comp_vectors; irq += params->num_channels) {
int cpu = mlx5_comp_vector_get_cpu(mdev, irq);
cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
@@ -3335,7 +3356,7 @@ int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
struct mlx5e_cq *cq = &drop_rq->cq;
int err;
- mlx5e_build_drop_rq_param(mdev, priv->drop_rq_q_counter, &rq_param);
+ mlx5e_build_drop_rq_param(mdev, &rq_param);
err = mlx5e_alloc_drop_cq(priv, cq, &cq_param);
if (err)
@@ -3349,7 +3370,7 @@ int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
if (err)
goto err_destroy_cq;
- err = mlx5e_create_rq(drop_rq, &rq_param);
+ err = mlx5e_create_rq(drop_rq, &rq_param, priv->drop_rq_q_counter);
if (err)
goto err_free_rq;
@@ -5264,13 +5285,17 @@ void mlx5e_create_q_counters(struct mlx5e_priv *priv)
u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
struct mlx5_core_dev *mdev = priv->mdev;
- int err;
+ struct mlx5_core_dev *pos;
+ int err, i;
MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
- err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
- if (!err)
- priv->q_counter =
- MLX5_GET(alloc_q_counter_out, out, counter_set_id);
+
+ mlx5_sd_for_each_dev(i, mdev, pos) {
+ err = mlx5_cmd_exec_inout(pos, alloc_q_counter, in, out);
+ if (!err)
+ priv->q_counter[i] =
+ MLX5_GET(alloc_q_counter_out, out, counter_set_id);
+ }
err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
if (!err)
@@ -5281,13 +5306,17 @@ void mlx5e_create_q_counters(struct mlx5e_priv *priv)
void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
{
u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
+ struct mlx5_core_dev *pos;
+ int i;
MLX5_SET(dealloc_q_counter_in, in, opcode,
MLX5_CMD_OP_DEALLOC_Q_COUNTER);
- if (priv->q_counter) {
- MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
- priv->q_counter);
- mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in);
+ mlx5_sd_for_each_dev(i, priv->mdev, pos) {
+ if (priv->q_counter[i]) {
+ MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
+ priv->q_counter[i]);
+ mlx5_cmd_exec_in(pos, dealloc_q_counter, in);
+ }
}
if (priv->drop_rq_q_counter) {
@@ -5371,6 +5400,8 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
features = MLX5E_RX_RES_FEATURE_PTP;
if (mlx5_tunnel_inner_ft_supported(mdev))
features |= MLX5E_RX_RES_FEATURE_INNER_FT;
+ if (mlx5_get_sd(priv->mdev))
+ features |= MLX5E_RX_RES_FEATURE_MULTI_VHCA;
priv->rx_res = mlx5e_rx_res_create(priv->mdev, features, priv->max_nch, priv->drop_rq.rqn,
&priv->channels.params.packet_merge,
@@ -5980,28 +6011,52 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
free_netdev(netdev);
}
-static int mlx5e_resume(struct auxiliary_device *adev)
+static int _mlx5e_resume(struct auxiliary_device *adev)
{
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
struct mlx5e_priv *priv = mlx5e_dev->priv;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = edev->mdev;
- int err;
+ struct mlx5_core_dev *pos, *to;
+ int err, i;
if (netif_device_present(netdev))
return 0;
- err = mlx5e_create_mdev_resources(mdev, true);
- if (err)
- return err;
+ mlx5_sd_for_each_dev(i, mdev, pos) {
+ err = mlx5e_create_mdev_resources(pos, true);
+ if (err)
+ goto err_destroy_mdev_res;
+ }
err = mlx5e_attach_netdev(priv);
- if (err) {
- mlx5e_destroy_mdev_resources(mdev);
+ if (err)
+ goto err_destroy_mdev_res;
+
+ return 0;
+
+err_destroy_mdev_res:
+ to = pos;
+ mlx5_sd_for_each_dev_to(i, mdev, to, pos)
+ mlx5e_destroy_mdev_resources(pos);
+ return err;
+}
+
+static int mlx5e_resume(struct auxiliary_device *adev)
+{
+ struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = edev->mdev;
+ struct auxiliary_device *actual_adev;
+ int err;
+
+ err = mlx5_sd_init(mdev);
+ if (err)
return err;
- }
+ actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
+ if (actual_adev)
+ return _mlx5e_resume(actual_adev);
return 0;
}
@@ -6011,21 +6066,36 @@ static int _mlx5e_suspend(struct auxiliary_device *adev)
struct mlx5e_priv *priv = mlx5e_dev->priv;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_core_dev *pos;
+ int i;
if (!netif_device_present(netdev)) {
if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
- mlx5e_destroy_mdev_resources(mdev);
+ mlx5_sd_for_each_dev(i, mdev, pos)
+ mlx5e_destroy_mdev_resources(pos);
return -ENODEV;
}
mlx5e_detach_netdev(priv);
- mlx5e_destroy_mdev_resources(mdev);
+ mlx5_sd_for_each_dev(i, mdev, pos)
+ mlx5e_destroy_mdev_resources(pos);
+
return 0;
}
static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
{
- return _mlx5e_suspend(adev);
+ struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = edev->mdev;
+ struct auxiliary_device *actual_adev;
+ int err = 0;
+
+ actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
+ if (actual_adev)
+ err = _mlx5e_suspend(actual_adev);
+
+ mlx5_sd_cleanup(mdev);
+ return err;
}
static int _mlx5e_probe(struct auxiliary_device *adev)
@@ -6071,9 +6141,9 @@ static int _mlx5e_probe(struct auxiliary_device *adev)
goto err_destroy_netdev;
}
- err = mlx5e_resume(adev);
+ err = _mlx5e_resume(adev);
if (err) {
- mlx5_core_err(mdev, "mlx5e_resume failed, %d\n", err);
+ mlx5_core_err(mdev, "_mlx5e_resume failed, %d\n", err);
goto err_profile_cleanup;
}
@@ -6104,15 +6174,29 @@ err_devlink_unregister:
static int mlx5e_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
- return _mlx5e_probe(adev);
+ struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = edev->mdev;
+ struct auxiliary_device *actual_adev;
+ int err;
+
+ err = mlx5_sd_init(mdev);
+ if (err)
+ return err;
+
+ actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
+ if (actual_adev)
+ return _mlx5e_probe(actual_adev);
+ return 0;
}
-static void mlx5e_remove(struct auxiliary_device *adev)
+static void _mlx5e_remove(struct auxiliary_device *adev)
{
+ struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
struct mlx5e_priv *priv = mlx5e_dev->priv;
+ struct mlx5_core_dev *mdev = edev->mdev;
- mlx5_core_uplink_netdev_set(priv->mdev, NULL);
+ mlx5_core_uplink_netdev_set(mdev, NULL);
mlx5e_dcbnl_delete_app(priv);
unregister_netdev(priv->netdev);
_mlx5e_suspend(adev);
@@ -6122,6 +6206,19 @@ static void mlx5e_remove(struct auxiliary_device *adev)
mlx5e_destroy_devlink(mlx5e_dev);
}
+static void mlx5e_remove(struct auxiliary_device *adev)
+{
+ struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = edev->mdev;
+ struct auxiliary_device *actual_adev;
+
+ actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
+ if (actual_adev)
+ _mlx5e_remove(actual_adev);
+
+ mlx5_sd_cleanup(mdev);
+}
+
static const struct auxiliary_device_id mlx5e_id_table[] = {
{ .name = MLX5_ADEV_NAME ".eth", },
{},
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 4b96ad657145..f3d0898bdbc6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -561,11 +561,23 @@ static const struct counter_desc drop_rq_stats_desc[] = {
#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
#define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
+static bool q_counter_any(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *pos;
+ int i;
+
+ mlx5_sd_for_each_dev(i, priv->mdev, pos)
+ if (priv->q_counter[i++])
+ return true;
+
+ return false;
+}
+
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
{
int num_stats = 0;
- if (priv->q_counter)
+ if (q_counter_any(priv))
num_stats += NUM_Q_COUNTERS;
if (priv->drop_rq_q_counter)
@@ -578,7 +590,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
{
int i;
- for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
+ for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
q_stats_desc[i].format);
@@ -593,7 +605,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
{
int i;
- for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
+ for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++)
data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
q_stats_desc, i);
for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
@@ -607,18 +619,23 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
- int ret;
+ struct mlx5_core_dev *pos;
+ u32 rx_out_of_buffer = 0;
+ int ret, i;
MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
- if (priv->q_counter) {
- MLX5_SET(query_q_counter_in, in, counter_set_id,
- priv->q_counter);
- ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
- if (!ret)
- qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
- out, out_of_buffer);
+ mlx5_sd_for_each_dev(i, priv->mdev, pos) {
+ if (priv->q_counter[i]) {
+ MLX5_SET(query_q_counter_in, in, counter_set_id,
+ priv->q_counter[i]);
+ ret = mlx5_cmd_exec_inout(pos, query_q_counter, in, out);
+ if (!ret)
+ rx_out_of_buffer += MLX5_GET(query_q_counter_out,
+ out, out_of_buffer);
+ }
}
+ qcnt->rx_out_of_buffer = rx_out_of_buffer;
if (priv->drop_rq_q_counter) {
MLX5_SET(query_q_counter_in, in, counter_set_id,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9fb2c057bd78..31ed26cac9bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -766,7 +766,7 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
return err;
mlx5e_rss_params_indir_init_uniform(&indir, hp->num_channels);
- err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels,
+ err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, NULL, hp->num_channels,
mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc,
&indir);
@@ -1169,7 +1169,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz),
MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
- params.q_counter = priv->q_counter;
+ params.q_counter = priv->q_counter[0];
err = devl_param_driverinit_value_get(
devlink, MLX5_DEVLINK_PARAM_ID_HAIRPIN_NUM_QUEUES, &val);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 58f4c0d0fafa..e7faf7e73ca4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -366,18 +366,18 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
return -EIO;
}
- mlx5_set_nic_state(dev, MLX5_NIC_IFC_DISABLED);
+ mlx5_set_nic_state(dev, MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED);
/* Loop until device state turns to disable */
end = jiffies + msecs_to_jiffies(delay_ms);
do {
- if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
break;
cond_resched();
} while (!time_after(jiffies, end));
- if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
+ if (mlx5_get_nic_state(dev) != MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED) {
dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
mlx5_get_nic_state(dev), delay_ms);
return -EIO;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index b5c709bba155..ad38e31822df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -116,9 +116,9 @@ u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev)
return MLX5_SENSOR_PCI_COMM_ERR;
if (pci_channel_offline(dev->pdev))
return MLX5_SENSOR_PCI_ERR;
- if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
return MLX5_SENSOR_NIC_DISABLED;
- if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_SW_RESET)
+ if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET)
return MLX5_SENSOR_NIC_SW_RESET;
if (sensor_fw_synd_rfr(dev))
return MLX5_SENSOR_FW_SYND_RFR;
@@ -185,7 +185,7 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev)
/* Write the NIC interface field to initiate the reset, the command
* interface address also resides here, don't overwrite it.
*/
- mlx5_set_nic_state(dev, MLX5_NIC_IFC_SW_RESET);
+ mlx5_set_nic_state(dev, MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET);
return true;
}
@@ -246,13 +246,13 @@ recover_from_sw_reset:
/* Recover from SW reset */
end = jiffies + msecs_to_jiffies(delay_ms);
do {
- if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
break;
msleep(20);
} while (!time_after(jiffies, end));
- if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
+ if (mlx5_get_nic_state(dev) != MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED) {
dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
mlx5_get_nic_state(dev), delay_ms);
}
@@ -272,26 +272,26 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
u8 nic_interface = mlx5_get_nic_state(dev);
switch (nic_interface) {
- case MLX5_NIC_IFC_FULL:
+ case MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER:
mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n");
break;
- case MLX5_NIC_IFC_DISABLED:
+ case MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED:
mlx5_core_warn(dev, "starting teardown\n");
break;
- case MLX5_NIC_IFC_NO_DRAM_NIC:
+ case MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC:
mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n");
break;
- case MLX5_NIC_IFC_SW_RESET:
+ case MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET:
/* The IFC mode field is 3 bits, so it will read 0x7 in 2 cases:
* 1. PCI has been disabled (ie. PCI-AER, PF driver unloaded
* and this is a VF), this is not recoverable by SW reset.
* Logging of this is handled elsewhere.
* 2. FW reset has been issued by another function, driver can
* be reloaded to recover after the mode switches to
- * MLX5_NIC_IFC_DISABLED.
+ * MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED.
*/
if (dev->priv.health.fatal_error != MLX5_SENSOR_PCI_COMM_ERR)
mlx5_core_warn(dev, "NIC SW reset in progress\n");
@@ -555,12 +555,17 @@ static void mlx5_fw_reporter_err_work(struct work_struct *work)
&fw_reporter_ctx);
}
-static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops = {
+static const struct devlink_health_reporter_ops mlx5_fw_reporter_pf_ops = {
.name = "fw",
.diagnose = mlx5_fw_reporter_diagnose,
.dump = mlx5_fw_reporter_dump,
};
+static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops = {
+ .name = "fw",
+ .diagnose = mlx5_fw_reporter_diagnose,
+};
+
static int
mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter,
void *priv_ctx,
@@ -646,12 +651,17 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
}
}
-static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
+static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_pf_ops = {
.name = "fw_fatal",
.recover = mlx5_fw_fatal_reporter_recover,
.dump = mlx5_fw_fatal_reporter_dump,
};
+static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
+ .name = "fw_fatal",
+ .recover = mlx5_fw_fatal_reporter_recover,
+};
+
#define MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD 180000
#define MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD 60000
#define MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD 30000
@@ -659,10 +669,14 @@ static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
{
+ const struct devlink_health_reporter_ops *fw_fatal_ops;
struct mlx5_core_health *health = &dev->priv.health;
+ const struct devlink_health_reporter_ops *fw_ops;
struct devlink *devlink = priv_to_devlink(dev);
u64 grace_period;
+ fw_fatal_ops = &mlx5_fw_fatal_reporter_pf_ops;
+ fw_ops = &mlx5_fw_reporter_pf_ops;
if (mlx5_core_is_ecpf(dev)) {
grace_period = MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD;
} else if (mlx5_core_is_pf(dev)) {
@@ -670,18 +684,19 @@ void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
} else {
/* VF or SF */
grace_period = MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD;
+ fw_fatal_ops = &mlx5_fw_fatal_reporter_ops;
+ fw_ops = &mlx5_fw_reporter_ops;
}
health->fw_reporter =
- devl_health_reporter_create(devlink, &mlx5_fw_reporter_ops,
- 0, dev);
+ devl_health_reporter_create(devlink, fw_ops, 0, dev);
if (IS_ERR(health->fw_reporter))
mlx5_core_warn(dev, "Failed to create fw reporter, err = %ld\n",
PTR_ERR(health->fw_reporter));
health->fw_fatal_reporter =
devl_health_reporter_create(devlink,
- &mlx5_fw_fatal_reporter_ops,
+ fw_fatal_ops,
grace_period,
dev);
if (IS_ERR(health->fw_fatal_reporter))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
index ec32b686f586..d58032dd0df7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
@@ -10,6 +10,7 @@ enum mlx5_devcom_component {
MLX5_DEVCOM_ESW_OFFLOADS,
MLX5_DEVCOM_MPV,
MLX5_DEVCOM_HCA_PORTS,
+ MLX5_DEVCOM_SD_GROUP,
MLX5_DEVCOM_NUM_COMPONENTS,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index 2b5826a785c4..37d5f445598c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -54,4 +54,16 @@ static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *md
{
return mdev->mlx5e_res.uplink_netdev;
}
+
+struct mlx5_sd;
+
+static inline struct mlx5_sd *mlx5_get_sd(struct mlx5_core_dev *dev)
+{
+ return dev->sd;
+}
+
+static inline void mlx5_set_sd(struct mlx5_core_dev *dev, struct mlx5_sd *sd)
+{
+ dev->sd = sd;
+}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
new file mode 100644
index 000000000000..5b28084e8a03
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
@@ -0,0 +1,524 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "lib/sd.h"
+#include "mlx5_core.h"
+#include "lib/mlx5.h"
+#include "fs_cmd.h"
+#include <linux/mlx5/vport.h>
+#include <linux/debugfs.h>
+
+#define sd_info(__dev, format, ...) \
+ dev_info((__dev)->device, "Socket-Direct: " format, ##__VA_ARGS__)
+#define sd_warn(__dev, format, ...) \
+ dev_warn((__dev)->device, "Socket-Direct: " format, ##__VA_ARGS__)
+
+struct mlx5_sd {
+ u32 group_id;
+ u8 host_buses;
+ struct mlx5_devcom_comp_dev *devcom;
+ struct dentry *dfs;
+ bool primary;
+ union {
+ struct { /* primary */
+ struct mlx5_core_dev *secondaries[MLX5_SD_MAX_GROUP_SZ - 1];
+ struct mlx5_flow_table *tx_ft;
+ };
+ struct { /* secondary */
+ struct mlx5_core_dev *primary_dev;
+ u32 alias_obj_id;
+ };
+ };
+};
+
+static int mlx5_sd_get_host_buses(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(dev);
+
+ if (!sd)
+ return 1;
+
+ return sd->host_buses;
+}
+
+static struct mlx5_core_dev *mlx5_sd_get_primary(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(dev);
+
+ if (!sd)
+ return dev;
+
+ return sd->primary ? dev : sd->primary_dev;
+}
+
+struct mlx5_core_dev *
+mlx5_sd_primary_get_peer(struct mlx5_core_dev *primary, int idx)
+{
+ struct mlx5_sd *sd;
+
+ if (idx == 0)
+ return primary;
+
+ if (idx >= mlx5_sd_get_host_buses(primary))
+ return NULL;
+
+ sd = mlx5_get_sd(primary);
+ return sd->secondaries[idx - 1];
+}
+
+int mlx5_sd_ch_ix_get_dev_ix(struct mlx5_core_dev *dev, int ch_ix)
+{
+ return ch_ix % mlx5_sd_get_host_buses(dev);
+}
+
+int mlx5_sd_ch_ix_get_vec_ix(struct mlx5_core_dev *dev, int ch_ix)
+{
+ return ch_ix / mlx5_sd_get_host_buses(dev);
+}
+
+struct mlx5_core_dev *mlx5_sd_ch_ix_get_dev(struct mlx5_core_dev *primary, int ch_ix)
+{
+ int mdev_idx = mlx5_sd_ch_ix_get_dev_ix(primary, ch_ix);
+
+ return mlx5_sd_primary_get_peer(primary, mdev_idx);
+}
+
+static bool ft_create_alias_supported(struct mlx5_core_dev *dev)
+{
+ u64 obj_allowed = MLX5_CAP_GEN_2_64(dev, allowed_object_for_other_vhca_access);
+ u32 obj_supp = MLX5_CAP_GEN_2(dev, cross_vhca_object_to_object_supported);
+
+ if (!(obj_supp &
+ MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_ROOT_TO_REMOTE_FLOW_TABLE))
+ return false;
+
+ if (!(obj_allowed & MLX5_ALLOWED_OBJ_FOR_OTHER_VHCA_ACCESS_FLOW_TABLE))
+ return false;
+
+ return true;
+}
+
+static bool mlx5_sd_is_supported(struct mlx5_core_dev *dev, u8 host_buses)
+{
+ /* Feature is currently implemented for PFs only */
+ if (!mlx5_core_is_pf(dev))
+ return false;
+
+ /* Honor the SW implementation limit */
+ if (host_buses > MLX5_SD_MAX_GROUP_SZ)
+ return false;
+
+ /* Disconnect secondaries from the network */
+ if (!MLX5_CAP_GEN(dev, eswitch_manager))
+ return false;
+ if (!MLX5_CAP_GEN(dev, silent_mode))
+ return false;
+
+ /* RX steering from primary to secondaries */
+ if (!MLX5_CAP_GEN(dev, cross_vhca_rqt))
+ return false;
+ if (host_buses > MLX5_CAP_GEN_2(dev, max_rqt_vhca_id))
+ return false;
+
+ /* TX steering from secondaries to primary */
+ if (!ft_create_alias_supported(dev))
+ return false;
+ if (!MLX5_CAP_FLOWTABLE_NIC_TX(dev, reset_root_to_default))
+ return false;
+
+ return true;
+}
+
+static int mlx5_query_sd(struct mlx5_core_dev *dev, bool *sdm,
+ u8 *host_buses, u8 *sd_group)
+{
+ u32 out[MLX5_ST_SZ_DW(mpir_reg)];
+ int err;
+
+ err = mlx5_query_mpir_reg(dev, out);
+ if (err)
+ return err;
+
+ err = mlx5_query_nic_vport_sd_group(dev, sd_group);
+ if (err)
+ return err;
+
+ *sdm = MLX5_GET(mpir_reg, out, sdm);
+ *host_buses = MLX5_GET(mpir_reg, out, host_buses);
+
+ return 0;
+}
+
+static u32 mlx5_sd_group_id(struct mlx5_core_dev *dev, u8 sd_group)
+{
+ return (u32)((MLX5_CAP_GEN(dev, native_port_num) << 8) | sd_group);
+}
+
+static int sd_init(struct mlx5_core_dev *dev)
+{
+ u8 host_buses, sd_group;
+ struct mlx5_sd *sd;
+ u32 group_id;
+ bool sdm;
+ int err;
+
+ if (!MLX5_CAP_MCAM_REG(dev, mpir))
+ return 0;
+
+ err = mlx5_query_sd(dev, &sdm, &host_buses, &sd_group);
+ if (err)
+ return err;
+
+ if (!sdm)
+ return 0;
+
+ if (!sd_group)
+ return 0;
+
+ group_id = mlx5_sd_group_id(dev, sd_group);
+
+ if (!mlx5_sd_is_supported(dev, host_buses)) {
+ sd_warn(dev, "can't support requested netdev combining for group id 0x%x), skipping\n",
+ group_id);
+ return 0;
+ }
+
+ sd = kzalloc(sizeof(*sd), GFP_KERNEL);
+ if (!sd)
+ return -ENOMEM;
+
+ sd->host_buses = host_buses;
+ sd->group_id = group_id;
+
+ mlx5_set_sd(dev, sd);
+
+ return 0;
+}
+
+static void sd_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(dev);
+
+ mlx5_set_sd(dev, NULL);
+ kfree(sd);
+}
+
+static int sd_register(struct mlx5_core_dev *dev)
+{
+ struct mlx5_devcom_comp_dev *devcom, *pos;
+ struct mlx5_core_dev *peer, *primary;
+ struct mlx5_sd *sd, *primary_sd;
+ int err, i;
+
+ sd = mlx5_get_sd(dev);
+ devcom = mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_SD_GROUP,
+ sd->group_id, NULL, dev);
+ if (!devcom)
+ return -ENOMEM;
+
+ sd->devcom = devcom;
+
+ if (mlx5_devcom_comp_get_size(devcom) != sd->host_buses)
+ return 0;
+
+ mlx5_devcom_comp_lock(devcom);
+ mlx5_devcom_comp_set_ready(devcom, true);
+ mlx5_devcom_comp_unlock(devcom);
+
+ if (!mlx5_devcom_for_each_peer_begin(devcom)) {
+ err = -ENODEV;
+ goto err_devcom_unreg;
+ }
+
+ primary = dev;
+ mlx5_devcom_for_each_peer_entry(devcom, peer, pos)
+ if (peer->pdev->bus->number < primary->pdev->bus->number)
+ primary = peer;
+
+ primary_sd = mlx5_get_sd(primary);
+ primary_sd->primary = true;
+ i = 0;
+ /* loop the secondaries */
+ mlx5_devcom_for_each_peer_entry(primary_sd->devcom, peer, pos) {
+ struct mlx5_sd *peer_sd = mlx5_get_sd(peer);
+
+ primary_sd->secondaries[i++] = peer;
+ peer_sd->primary = false;
+ peer_sd->primary_dev = primary;
+ }
+
+ mlx5_devcom_for_each_peer_end(devcom);
+ return 0;
+
+err_devcom_unreg:
+ mlx5_devcom_comp_lock(sd->devcom);
+ mlx5_devcom_comp_set_ready(sd->devcom, false);
+ mlx5_devcom_comp_unlock(sd->devcom);
+ mlx5_devcom_unregister_component(sd->devcom);
+ return err;
+}
+
+static void sd_unregister(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(dev);
+
+ mlx5_devcom_comp_lock(sd->devcom);
+ mlx5_devcom_comp_set_ready(sd->devcom, false);
+ mlx5_devcom_comp_unlock(sd->devcom);
+ mlx5_devcom_unregister_component(sd->devcom);
+}
+
+static int sd_cmd_set_primary(struct mlx5_core_dev *primary, u8 *alias_key)
+{
+ struct mlx5_cmd_allow_other_vhca_access_attr allow_attr = {};
+ struct mlx5_sd *sd = mlx5_get_sd(primary);
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *nic_ns;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ nic_ns = mlx5_get_flow_namespace(primary, MLX5_FLOW_NAMESPACE_EGRESS);
+ if (!nic_ns)
+ return -EOPNOTSUPP;
+
+ ft = mlx5_create_flow_table(nic_ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ return err;
+ }
+ sd->tx_ft = ft;
+ memcpy(allow_attr.access_key, alias_key, ACCESS_KEY_LEN);
+ allow_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS;
+ allow_attr.obj_id = (ft->type << FT_ID_FT_TYPE_OFFSET) | ft->id;
+
+ err = mlx5_cmd_allow_other_vhca_access(primary, &allow_attr);
+ if (err) {
+ mlx5_core_err(primary, "Failed to allow other vhca access err=%d\n",
+ err);
+ mlx5_destroy_flow_table(ft);
+ return err;
+ }
+
+ return 0;
+}
+
+static void sd_cmd_unset_primary(struct mlx5_core_dev *primary)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(primary);
+
+ mlx5_destroy_flow_table(sd->tx_ft);
+}
+
+static int sd_secondary_create_alias_ft(struct mlx5_core_dev *secondary,
+ struct mlx5_core_dev *primary,
+ struct mlx5_flow_table *ft,
+ u32 *obj_id, u8 *alias_key)
+{
+ u32 aliased_object_id = (ft->type << FT_ID_FT_TYPE_OFFSET) | ft->id;
+ u16 vhca_id_to_be_accessed = MLX5_CAP_GEN(primary, vhca_id);
+ struct mlx5_cmd_alias_obj_create_attr alias_attr = {};
+ int ret;
+
+ memcpy(alias_attr.access_key, alias_key, ACCESS_KEY_LEN);
+ alias_attr.obj_id = aliased_object_id;
+ alias_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS;
+ alias_attr.vhca_id = vhca_id_to_be_accessed;
+ ret = mlx5_cmd_alias_obj_create(secondary, &alias_attr, obj_id);
+ if (ret) {
+ mlx5_core_err(secondary, "Failed to create alias object err=%d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sd_secondary_destroy_alias_ft(struct mlx5_core_dev *secondary)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(secondary);
+
+ mlx5_cmd_alias_obj_destroy(secondary, sd->alias_obj_id,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS);
+}
+
+static int sd_cmd_set_secondary(struct mlx5_core_dev *secondary,
+ struct mlx5_core_dev *primary,
+ u8 *alias_key)
+{
+ struct mlx5_sd *primary_sd = mlx5_get_sd(primary);
+ struct mlx5_sd *sd = mlx5_get_sd(secondary);
+ int err;
+
+ err = mlx5_fs_cmd_set_l2table_entry_silent(secondary, 1);
+ if (err)
+ return err;
+
+ err = sd_secondary_create_alias_ft(secondary, primary, primary_sd->tx_ft,
+ &sd->alias_obj_id, alias_key);
+ if (err)
+ goto err_unset_silent;
+
+ err = mlx5_fs_cmd_set_tx_flow_table_root(secondary, sd->alias_obj_id, false);
+ if (err)
+ goto err_destroy_alias_ft;
+
+ return 0;
+
+err_destroy_alias_ft:
+ sd_secondary_destroy_alias_ft(secondary);
+err_unset_silent:
+ mlx5_fs_cmd_set_l2table_entry_silent(secondary, 0);
+ return err;
+}
+
+static void sd_cmd_unset_secondary(struct mlx5_core_dev *secondary)
+{
+ mlx5_fs_cmd_set_tx_flow_table_root(secondary, 0, true);
+ sd_secondary_destroy_alias_ft(secondary);
+ mlx5_fs_cmd_set_l2table_entry_silent(secondary, 0);
+}
+
+static void sd_print_group(struct mlx5_core_dev *primary)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(primary);
+ struct mlx5_core_dev *pos;
+ int i;
+
+ sd_info(primary, "group id %#x, primary %s, vhca %#x\n",
+ sd->group_id, pci_name(primary->pdev),
+ MLX5_CAP_GEN(primary, vhca_id));
+ mlx5_sd_for_each_secondary(i, primary, pos)
+ sd_info(primary, "group id %#x, secondary_%d %s, vhca %#x\n",
+ sd->group_id, i - 1, pci_name(pos->pdev),
+ MLX5_CAP_GEN(pos, vhca_id));
+}
+
+static ssize_t dev_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ struct mlx5_core_dev *dev;
+ char tbuf[32];
+ int ret;
+
+ dev = filp->private_data;
+ ret = snprintf(tbuf, sizeof(tbuf), "%s vhca %#x\n", pci_name(dev->pdev),
+ MLX5_CAP_GEN(dev, vhca_id));
+
+ return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+}
+
+static const struct file_operations dev_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = dev_read,
+};
+
+int mlx5_sd_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_dev *primary, *pos, *to;
+ struct mlx5_sd *sd = mlx5_get_sd(dev);
+ u8 alias_key[ACCESS_KEY_LEN];
+ int err, i;
+
+ err = sd_init(dev);
+ if (err)
+ return err;
+
+ sd = mlx5_get_sd(dev);
+ if (!sd)
+ return 0;
+
+ err = sd_register(dev);
+ if (err)
+ goto err_sd_cleanup;
+
+ if (!mlx5_devcom_comp_is_ready(sd->devcom))
+ return 0;
+
+ primary = mlx5_sd_get_primary(dev);
+
+ for (i = 0; i < ACCESS_KEY_LEN; i++)
+ alias_key[i] = get_random_u8();
+
+ err = sd_cmd_set_primary(primary, alias_key);
+ if (err)
+ goto err_sd_unregister;
+
+ sd->dfs = debugfs_create_dir("multi-pf", mlx5_debugfs_get_dev_root(primary));
+ debugfs_create_x32("group_id", 0400, sd->dfs, &sd->group_id);
+ debugfs_create_file("primary", 0400, sd->dfs, primary, &dev_fops);
+
+ mlx5_sd_for_each_secondary(i, primary, pos) {
+ char name[32];
+
+ err = sd_cmd_set_secondary(pos, primary, alias_key);
+ if (err)
+ goto err_unset_secondaries;
+
+ snprintf(name, sizeof(name), "secondary_%d", i - 1);
+ debugfs_create_file(name, 0400, sd->dfs, pos, &dev_fops);
+
+ }
+
+ sd_info(primary, "group id %#x, size %d, combined\n",
+ sd->group_id, mlx5_devcom_comp_get_size(sd->devcom));
+ sd_print_group(primary);
+
+ return 0;
+
+err_unset_secondaries:
+ to = pos;
+ mlx5_sd_for_each_secondary_to(i, primary, to, pos)
+ sd_cmd_unset_secondary(pos);
+ sd_cmd_unset_primary(primary);
+ debugfs_remove_recursive(sd->dfs);
+err_sd_unregister:
+ sd_unregister(dev);
+err_sd_cleanup:
+ sd_cleanup(dev);
+ return err;
+}
+
+void mlx5_sd_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(dev);
+ struct mlx5_core_dev *primary, *pos;
+ int i;
+
+ if (!sd)
+ return;
+
+ if (!mlx5_devcom_comp_is_ready(sd->devcom))
+ goto out;
+
+ primary = mlx5_sd_get_primary(dev);
+ mlx5_sd_for_each_secondary(i, primary, pos)
+ sd_cmd_unset_secondary(pos);
+ sd_cmd_unset_primary(primary);
+ debugfs_remove_recursive(sd->dfs);
+
+ sd_info(primary, "group id %#x, uncombined\n", sd->group_id);
+out:
+ sd_unregister(dev);
+ sd_cleanup(dev);
+}
+
+struct auxiliary_device *mlx5_sd_get_adev(struct mlx5_core_dev *dev,
+ struct auxiliary_device *adev,
+ int idx)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(dev);
+ struct mlx5_core_dev *primary;
+
+ if (!sd)
+ return adev;
+
+ if (!mlx5_devcom_comp_is_ready(sd->devcom))
+ return NULL;
+
+ primary = mlx5_sd_get_primary(dev);
+ if (dev == primary)
+ return adev;
+
+ return &primary->priv.adev[idx]->adev;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.h
new file mode 100644
index 000000000000..137efaf9aabc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_LIB_SD_H__
+#define __MLX5_LIB_SD_H__
+
+#define MLX5_SD_MAX_GROUP_SZ 2
+
+struct mlx5_sd;
+
+struct mlx5_core_dev *mlx5_sd_primary_get_peer(struct mlx5_core_dev *primary, int idx);
+int mlx5_sd_ch_ix_get_dev_ix(struct mlx5_core_dev *dev, int ch_ix);
+int mlx5_sd_ch_ix_get_vec_ix(struct mlx5_core_dev *dev, int ch_ix);
+struct mlx5_core_dev *mlx5_sd_ch_ix_get_dev(struct mlx5_core_dev *primary, int ch_ix);
+struct auxiliary_device *mlx5_sd_get_adev(struct mlx5_core_dev *dev,
+ struct auxiliary_device *adev,
+ int idx);
+
+int mlx5_sd_init(struct mlx5_core_dev *dev);
+void mlx5_sd_cleanup(struct mlx5_core_dev *dev);
+
+#define mlx5_sd_for_each_dev_from_to(i, primary, ix_from, to, pos) \
+ for (i = ix_from; \
+ (pos = mlx5_sd_primary_get_peer(primary, i)) && pos != (to); i++)
+
+#define mlx5_sd_for_each_dev(i, primary, pos) \
+ mlx5_sd_for_each_dev_from_to(i, primary, 0, NULL, pos)
+
+#define mlx5_sd_for_each_dev_to(i, primary, to, pos) \
+ mlx5_sd_for_each_dev_from_to(i, primary, 0, to, pos)
+
+#define mlx5_sd_for_each_secondary(i, primary, pos) \
+ mlx5_sd_for_each_dev_from_to(i, primary, 1, NULL, pos)
+
+#define mlx5_sd_for_each_secondary_to(i, primary, to, pos) \
+ mlx5_sd_for_each_dev_from_to(i, primary, 1, to, pos)
+
+#endif /* __MLX5_LIB_SD_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index bccf6e53556c..c2593625c09a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -187,31 +187,36 @@ static struct mlx5_profile profile[] = {
};
static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
- u32 warn_time_mili)
+ u32 warn_time_mili, const char *init_state)
{
unsigned long warn = jiffies + msecs_to_jiffies(warn_time_mili);
unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
u32 fw_initializing;
- int err = 0;
do {
fw_initializing = ioread32be(&dev->iseg->initializing);
if (!(fw_initializing >> 31))
break;
- if (time_after(jiffies, end) ||
- test_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) {
- err = -EBUSY;
- break;
+ if (time_after(jiffies, end)) {
+ mlx5_core_err(dev, "Firmware over %u MS in %s state, aborting\n",
+ max_wait_mili, init_state);
+ return -ETIMEDOUT;
+ }
+ if (test_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) {
+ mlx5_core_warn(dev, "device is being removed, stop waiting for FW %s\n",
+ init_state);
+ return -ENODEV;
}
if (warn_time_mili && time_after(jiffies, warn)) {
- mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds (0x%x)\n",
- jiffies_to_msecs(end - warn) / 1000, fw_initializing);
+ mlx5_core_warn(dev, "Waiting for FW %s, timeout abort in %ds (0x%x)\n",
+ init_state, jiffies_to_msecs(end - warn) / 1000,
+ fw_initializing);
warn = jiffies + msecs_to_jiffies(warn_time_mili);
}
msleep(mlx5_tout_ms(dev, FW_PRE_INIT_WAIT));
} while (true);
- return err;
+ return 0;
}
static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
@@ -1151,12 +1156,10 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
/* wait for firmware to accept initialization segments configurations
*/
err = wait_fw_init(dev, timeout,
- mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL));
- if (err) {
- mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n",
- timeout);
+ mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL),
+ "pre-initializing");
+ if (err)
return err;
- }
err = mlx5_cmd_enable(dev);
if (err) {
@@ -1166,12 +1169,9 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
mlx5_tout_query_iseg(dev);
- err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_INIT), 0);
- if (err) {
- mlx5_core_err(dev, "Firmware over %llu MS in initializing state, aborting\n",
- mlx5_tout_ms(dev, FW_INIT));
+ err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_INIT), 0, "initializing");
+ if (err)
goto err_cmd_cleanup;
- }
dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index a79b7959361b..58732f44940f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -312,13 +312,6 @@ static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev)
return ret;
}
-enum {
- MLX5_NIC_IFC_FULL = 0,
- MLX5_NIC_IFC_DISABLED = 1,
- MLX5_NIC_IFC_NO_DRAM_NIC = 2,
- MLX5_NIC_IFC_SW_RESET = 7
-};
-
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index c93492b67788..99219ea52c4b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -74,7 +74,8 @@ static void mlx5_sf_dev_release(struct device *device)
kfree(sf_dev);
}
-static void mlx5_sf_dev_remove(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_dev)
+static void mlx5_sf_dev_remove_aux(struct mlx5_core_dev *dev,
+ struct mlx5_sf_dev *sf_dev)
{
int id;
@@ -138,7 +139,7 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u16 fn_id,
return;
xa_err:
- mlx5_sf_dev_remove(dev, sf_dev);
+ mlx5_sf_dev_remove_aux(dev, sf_dev);
add_err:
mlx5_core_err(dev, "SF DEV: fail device add for index=%d sfnum=%d err=%d\n",
sf_index, sfnum, err);
@@ -149,7 +150,7 @@ static void mlx5_sf_dev_del(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_de
struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
xa_erase(&table->devices, sf_index);
- mlx5_sf_dev_remove(dev, sf_dev);
+ mlx5_sf_dev_remove_aux(dev, sf_dev);
}
static int
@@ -367,7 +368,7 @@ static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table)
xa_for_each(&table->devices, index, sf_dev) {
xa_erase(&table->devices, index);
- mlx5_sf_dev_remove(table->dev, sf_dev);
+ mlx5_sf_dev_remove_aux(table->dev, sf_dev);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index 169c2c68ed5c..bc863e1f062e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -95,24 +95,29 @@ mdev_err:
static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
- struct devlink *devlink = priv_to_devlink(sf_dev->mdev);
+ struct mlx5_core_dev *mdev = sf_dev->mdev;
+ struct devlink *devlink;
- mlx5_drain_health_wq(sf_dev->mdev);
+ devlink = priv_to_devlink(mdev);
+ set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
+ mlx5_drain_health_wq(mdev);
devlink_unregister(devlink);
- if (mlx5_dev_is_lightweight(sf_dev->mdev))
- mlx5_uninit_one_light(sf_dev->mdev);
+ if (mlx5_dev_is_lightweight(mdev))
+ mlx5_uninit_one_light(mdev);
else
- mlx5_uninit_one(sf_dev->mdev);
- iounmap(sf_dev->mdev->iseg);
- mlx5_mdev_uninit(sf_dev->mdev);
+ mlx5_uninit_one(mdev);
+ iounmap(mdev->iseg);
+ mlx5_mdev_uninit(mdev);
mlx5_devlink_free(devlink);
}
static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+ struct mlx5_core_dev *mdev = sf_dev->mdev;
- mlx5_unload_one(sf_dev->mdev, false);
+ set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
+ mlx5_unload_one(mdev, false);
}
static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
index 7e36e1062139..64f4cc284aea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
@@ -54,6 +54,107 @@ enum dr_dump_rec_type {
DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE = 3425,
};
+static struct mlx5dr_dbg_dump_buff *
+mlx5dr_dbg_dump_data_init_new_buff(struct mlx5dr_dbg_dump_data *dump_data)
+{
+ struct mlx5dr_dbg_dump_buff *new_buff;
+
+ new_buff = kzalloc(sizeof(*new_buff), GFP_KERNEL);
+ if (!new_buff)
+ return NULL;
+
+ new_buff->buff = kvzalloc(MLX5DR_DEBUG_DUMP_BUFF_SIZE, GFP_KERNEL);
+ if (!new_buff->buff) {
+ kfree(new_buff);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&new_buff->node);
+ list_add_tail(&new_buff->node, &dump_data->buff_list);
+
+ return new_buff;
+}
+
+static struct mlx5dr_dbg_dump_data *
+mlx5dr_dbg_create_dump_data(void)
+{
+ struct mlx5dr_dbg_dump_data *dump_data;
+
+ dump_data = kzalloc(sizeof(*dump_data), GFP_KERNEL);
+ if (!dump_data)
+ return NULL;
+
+ INIT_LIST_HEAD(&dump_data->buff_list);
+
+ if (!mlx5dr_dbg_dump_data_init_new_buff(dump_data)) {
+ kfree(dump_data);
+ return NULL;
+ }
+
+ return dump_data;
+}
+
+static void
+mlx5dr_dbg_destroy_dump_data(struct mlx5dr_dbg_dump_data *dump_data)
+{
+ struct mlx5dr_dbg_dump_buff *dump_buff, *tmp_buff;
+
+ if (!dump_data)
+ return;
+
+ list_for_each_entry_safe(dump_buff, tmp_buff, &dump_data->buff_list, node) {
+ kvfree(dump_buff->buff);
+ list_del(&dump_buff->node);
+ kfree(dump_buff);
+ }
+
+ kfree(dump_data);
+}
+
+static int
+mlx5dr_dbg_dump_data_print(struct seq_file *file, char *str, u32 size)
+{
+ struct mlx5dr_domain *dmn = file->private;
+ struct mlx5dr_dbg_dump_data *dump_data;
+ struct mlx5dr_dbg_dump_buff *buff;
+ u32 buff_capacity, write_size;
+ int remain_size, ret;
+
+ if (size >= MLX5DR_DEBUG_DUMP_BUFF_SIZE)
+ return -EINVAL;
+
+ dump_data = dmn->dump_info.dump_data;
+ buff = list_last_entry(&dump_data->buff_list,
+ struct mlx5dr_dbg_dump_buff, node);
+
+ buff_capacity = (MLX5DR_DEBUG_DUMP_BUFF_SIZE - 1) - buff->index;
+ remain_size = buff_capacity - size;
+ write_size = (remain_size > 0) ? size : buff_capacity;
+
+ if (likely(write_size)) {
+ ret = snprintf(buff->buff + buff->index, write_size + 1, "%s", str);
+ if (ret < 0)
+ return ret;
+
+ buff->index += write_size;
+ }
+
+ if (remain_size < 0) {
+ remain_size *= -1;
+ buff = mlx5dr_dbg_dump_data_init_new_buff(dump_data);
+ if (!buff)
+ return -ENOMEM;
+
+ ret = snprintf(buff->buff, remain_size + 1, "%s", str + write_size);
+ if (ret < 0)
+ return ret;
+
+ buff->index += remain_size;
+ }
+
+ return 0;
+}
+
void mlx5dr_dbg_tbl_add(struct mlx5dr_table *tbl)
{
mutex_lock(&tbl->dmn->dump_info.dbg_mutex);
@@ -109,36 +210,68 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
{
struct mlx5dr_action *action = action_mem->action;
const u64 action_id = DR_DBG_PTR_TO_ID(action);
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
u64 hit_tbl_ptr, miss_tbl_ptr;
u32 hit_tbl_id, miss_tbl_id;
+ int ret;
switch (action->action_type) {
case DR_ACTION_TYP_DROP:
- seq_printf(file, "%d,0x%llx,0x%llx\n",
- DR_DUMP_REC_TYPE_ACTION_DROP, action_id, rule_id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_DROP, action_id,
+ rule_id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_FT:
if (action->dest_tbl->is_fw_tbl)
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_FT, action_id,
- rule_id, action->dest_tbl->fw_tbl.id,
- -1);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_FT, action_id,
+ rule_id, action->dest_tbl->fw_tbl.id,
+ -1);
else
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%llx\n",
- DR_DUMP_REC_TYPE_ACTION_FT, action_id,
- rule_id, action->dest_tbl->tbl->table_id,
- DR_DBG_PTR_TO_ID(action->dest_tbl->tbl));
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_FT, action_id,
+ rule_id, action->dest_tbl->tbl->table_id,
+ DR_DBG_PTR_TO_ID(action->dest_tbl->tbl));
+
+ if (ret < 0)
+ return ret;
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_CTR:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_CTR, action_id, rule_id,
- action->ctr->ctr_id + action->ctr->offset);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_CTR, action_id, rule_id,
+ action->ctr->ctr_id + action->ctr->offset);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_TAG:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_TAG, action_id, rule_id,
- action->flow_tag->flow_tag);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_TAG, action_id, rule_id,
+ action->flow_tag->flow_tag);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_MODIFY_HDR:
{
@@ -150,83 +283,171 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
ptrn_arg = !action->rewrite->single_action_opt && ptrn && arg;
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,%d,0x%x,0x%x,0x%x",
- DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR, action_id,
- rule_id, action->rewrite->index,
- action->rewrite->single_action_opt,
- ptrn_arg ? action->rewrite->num_of_actions : 0,
- ptrn_arg ? ptrn->index : 0,
- ptrn_arg ? mlx5dr_arg_get_obj_id(arg) : 0);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,%d,0x%x,0x%x,0x%x",
+ DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR, action_id,
+ rule_id, action->rewrite->index,
+ action->rewrite->single_action_opt,
+ ptrn_arg ? action->rewrite->num_of_actions : 0,
+ ptrn_arg ? ptrn->index : 0,
+ ptrn_arg ? mlx5dr_arg_get_obj_id(arg) : 0);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
if (ptrn_arg) {
for (i = 0; i < action->rewrite->num_of_actions; i++) {
- seq_printf(file, ",0x%016llx",
- be64_to_cpu(((__be64 *)rewrite_data)[i]));
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ ",0x%016llx",
+ be64_to_cpu(((__be64 *)rewrite_data)[i]));
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
}
}
- seq_puts(file, "\n");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, "\n");
+ if (ret < 0)
+ return ret;
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
}
case DR_ACTION_TYP_VPORT:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_VPORT, action_id, rule_id,
- action->vport->caps->num);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_VPORT, action_id, rule_id,
+ action->vport->caps->num);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_TNL_L2_TO_L2:
- seq_printf(file, "%d,0x%llx,0x%llx\n",
- DR_DUMP_REC_TYPE_ACTION_DECAP_L2, action_id,
- rule_id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_DECAP_L2, action_id,
+ rule_id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_TNL_L3_TO_L2:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_DECAP_L3, action_id,
- rule_id,
- (action->rewrite->ptrn && action->rewrite->arg) ?
- mlx5dr_arg_get_obj_id(action->rewrite->arg) :
- action->rewrite->index);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_DECAP_L3, action_id,
+ rule_id,
+ (action->rewrite->ptrn && action->rewrite->arg) ?
+ mlx5dr_arg_get_obj_id(action->rewrite->arg) :
+ action->rewrite->index);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_L2_TO_TNL_L2:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_ENCAP_L2, action_id,
- rule_id, action->reformat->id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_ENCAP_L2, action_id,
+ rule_id, action->reformat->id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_L2_TO_TNL_L3:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_ENCAP_L3, action_id,
- rule_id, action->reformat->id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_ENCAP_L3, action_id,
+ rule_id, action->reformat->id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_POP_VLAN:
- seq_printf(file, "%d,0x%llx,0x%llx\n",
- DR_DUMP_REC_TYPE_ACTION_POP_VLAN, action_id,
- rule_id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_POP_VLAN, action_id,
+ rule_id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_PUSH_VLAN:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN, action_id,
- rule_id, action->push_vlan->vlan_hdr);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN, action_id,
+ rule_id, action->push_vlan->vlan_hdr);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_INSERT_HDR:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_INSERT_HDR, action_id,
- rule_id, action->reformat->id,
- action->reformat->param_0,
- action->reformat->param_1);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_INSERT_HDR, action_id,
+ rule_id, action->reformat->id,
+ action->reformat->param_0,
+ action->reformat->param_1);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_REMOVE_HDR:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR, action_id,
- rule_id, action->reformat->id,
- action->reformat->param_0,
- action->reformat->param_1);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR, action_id,
+ rule_id, action->reformat->id,
+ action->reformat->param_0,
+ action->reformat->param_1);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_SAMPLER:
- seq_printf(file,
- "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x,0x%llx,0x%llx\n",
- DR_DUMP_REC_TYPE_ACTION_SAMPLER, action_id, rule_id,
- 0, 0, action->sampler->sampler_id,
- action->sampler->rx_icm_addr,
- action->sampler->tx_icm_addr);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_SAMPLER, action_id,
+ rule_id, 0, 0, action->sampler->sampler_id,
+ action->sampler->rx_icm_addr,
+ action->sampler->tx_icm_addr);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_RANGE:
if (action->range->hit_tbl_action->dest_tbl->is_fw_tbl) {
@@ -247,10 +468,17 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
DR_DBG_PTR_TO_ID(action->range->miss_tbl_action->dest_tbl->tbl);
}
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%llx,0x%x,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE, action_id, rule_id,
- hit_tbl_id, hit_tbl_ptr, miss_tbl_id, miss_tbl_ptr,
- action->range->definer_id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%llx,0x%x,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE, action_id,
+ rule_id, hit_tbl_id, hit_tbl_ptr, miss_tbl_id,
+ miss_tbl_ptr, action->range->definer_id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
default:
return 0;
@@ -263,8 +491,10 @@ static int
dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
bool is_rx, const u64 rule_id, u8 format_ver)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
char hw_ste_dump[DR_HEX_SIZE];
u32 mem_rec_type;
+ int ret;
if (format_ver == MLX5_STEERING_FORMAT_CONNECTX_5) {
mem_rec_type = is_rx ? DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 :
@@ -277,9 +507,16 @@ dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
dr_dump_hex_print(hw_ste_dump, (char *)mlx5dr_ste_get_hw_ste(ste),
DR_STE_SIZE_REDUCED);
- seq_printf(file, "%d,0x%llx,0x%llx,%s\n", mem_rec_type,
- dr_dump_icm_to_idx(mlx5dr_ste_get_icm_addr(ste)), rule_id,
- hw_ste_dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,%s\n", mem_rec_type,
+ dr_dump_icm_to_idx(mlx5dr_ste_get_icm_addr(ste)),
+ rule_id, hw_ste_dump);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
return 0;
}
@@ -309,6 +546,7 @@ static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
{
struct mlx5dr_rule_action_member *action_mem;
const u64 rule_id = DR_DBG_PTR_TO_ID(rule);
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
struct mlx5dr_rule_rx_tx *rx = &rule->rx;
struct mlx5dr_rule_rx_tx *tx = &rule->tx;
u8 format_ver;
@@ -316,8 +554,15 @@ static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
format_ver = rule->matcher->tbl->dmn->info.caps.sw_format_ver;
- seq_printf(file, "%d,0x%llx,0x%llx\n", DR_DUMP_REC_TYPE_RULE, rule_id,
- DR_DBG_PTR_TO_ID(rule->matcher));
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx\n", DR_DUMP_REC_TYPE_RULE,
+ rule_id, DR_DBG_PTR_TO_ID(rule->matcher));
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
if (rx->nic_matcher) {
ret = dr_dump_rule_rx_tx(file, rx, true, rule_id, format_ver);
@@ -344,46 +589,94 @@ static int
dr_dump_matcher_mask(struct seq_file *file, struct mlx5dr_match_param *mask,
u8 criteria, const u64 matcher_id)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
char dump[DR_HEX_SIZE];
+ int ret;
- seq_printf(file, "%d,0x%llx,", DR_DUMP_REC_TYPE_MATCHER_MASK,
- matcher_id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, "%d,0x%llx,",
+ DR_DUMP_REC_TYPE_MATCHER_MASK, matcher_id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
if (criteria & DR_MATCHER_CRITERIA_OUTER) {
dr_dump_hex_print(dump, (char *)&mask->outer, sizeof(mask->outer));
- seq_printf(file, "%s,", dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%s,", dump);
} else {
- seq_puts(file, ",");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
}
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
if (criteria & DR_MATCHER_CRITERIA_INNER) {
dr_dump_hex_print(dump, (char *)&mask->inner, sizeof(mask->inner));
- seq_printf(file, "%s,", dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%s,", dump);
} else {
- seq_puts(file, ",");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
}
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
if (criteria & DR_MATCHER_CRITERIA_MISC) {
dr_dump_hex_print(dump, (char *)&mask->misc, sizeof(mask->misc));
- seq_printf(file, "%s,", dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%s,", dump);
} else {
- seq_puts(file, ",");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
}
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
if (criteria & DR_MATCHER_CRITERIA_MISC2) {
dr_dump_hex_print(dump, (char *)&mask->misc2, sizeof(mask->misc2));
- seq_printf(file, "%s,", dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%s,", dump);
} else {
- seq_puts(file, ",");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
}
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
if (criteria & DR_MATCHER_CRITERIA_MISC3) {
dr_dump_hex_print(dump, (char *)&mask->misc3, sizeof(mask->misc3));
- seq_printf(file, "%s\n", dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%s\n", dump);
} else {
- seq_puts(file, ",\n");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",\n");
}
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -391,9 +684,19 @@ static int
dr_dump_matcher_builder(struct seq_file *file, struct mlx5dr_ste_build *builder,
u32 index, bool is_rx, const u64 matcher_id)
{
- seq_printf(file, "%d,0x%llx,%d,%d,0x%x\n",
- DR_DUMP_REC_TYPE_MATCHER_BUILDER, matcher_id, index, is_rx,
- builder->lu_type);
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
+ int ret;
+
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,%d,%d,0x%x\n",
+ DR_DUMP_REC_TYPE_MATCHER_BUILDER, matcher_id, index,
+ is_rx, builder->lu_type);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
return 0;
}
@@ -403,6 +706,7 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
struct mlx5dr_matcher_rx_tx *matcher_rx_tx,
const u64 matcher_id)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
enum dr_dump_rec_type rec_type;
u64 s_icm_addr, e_icm_addr;
int i, ret;
@@ -412,11 +716,19 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->s_htbl->chunk);
e_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->e_anchor->chunk);
- seq_printf(file, "%d,0x%llx,0x%llx,%d,0x%llx,0x%llx\n",
- rec_type, DR_DBG_PTR_TO_ID(matcher_rx_tx),
- matcher_id, matcher_rx_tx->num_of_builders,
- dr_dump_icm_to_idx(s_icm_addr),
- dr_dump_icm_to_idx(e_icm_addr));
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,%d,0x%llx,0x%llx\n",
+ rec_type, DR_DBG_PTR_TO_ID(matcher_rx_tx),
+ matcher_id, matcher_rx_tx->num_of_builders,
+ dr_dump_icm_to_idx(s_icm_addr),
+ dr_dump_icm_to_idx(e_icm_addr));
+
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
for (i = 0; i < matcher_rx_tx->num_of_builders; i++) {
ret = dr_dump_matcher_builder(file,
@@ -434,13 +746,22 @@ dr_dump_matcher(struct seq_file *file, struct mlx5dr_matcher *matcher)
{
struct mlx5dr_matcher_rx_tx *rx = &matcher->rx;
struct mlx5dr_matcher_rx_tx *tx = &matcher->tx;
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
u64 matcher_id;
int ret;
matcher_id = DR_DBG_PTR_TO_ID(matcher);
- seq_printf(file, "%d,0x%llx,0x%llx,%d\n", DR_DUMP_REC_TYPE_MATCHER,
- matcher_id, DR_DBG_PTR_TO_ID(matcher->tbl), matcher->prio);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,%d\n", DR_DUMP_REC_TYPE_MATCHER,
+ matcher_id, DR_DBG_PTR_TO_ID(matcher->tbl),
+ matcher->prio);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
ret = dr_dump_matcher_mask(file, &matcher->mask,
matcher->match_criteria, matcher_id);
@@ -486,15 +807,24 @@ dr_dump_table_rx_tx(struct seq_file *file, bool is_rx,
struct mlx5dr_table_rx_tx *table_rx_tx,
const u64 table_id)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
enum dr_dump_rec_type rec_type;
u64 s_icm_addr;
+ int ret;
rec_type = is_rx ? DR_DUMP_REC_TYPE_TABLE_RX :
DR_DUMP_REC_TYPE_TABLE_TX;
s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(table_rx_tx->s_anchor->chunk);
- seq_printf(file, "%d,0x%llx,0x%llx\n", rec_type, table_id,
- dr_dump_icm_to_idx(s_icm_addr));
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx\n", rec_type, table_id,
+ dr_dump_icm_to_idx(s_icm_addr));
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
return 0;
}
@@ -503,11 +833,19 @@ static int dr_dump_table(struct seq_file *file, struct mlx5dr_table *table)
{
struct mlx5dr_table_rx_tx *rx = &table->rx;
struct mlx5dr_table_rx_tx *tx = &table->tx;
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
int ret;
- seq_printf(file, "%d,0x%llx,0x%llx,%d,%d\n", DR_DUMP_REC_TYPE_TABLE,
- DR_DBG_PTR_TO_ID(table), DR_DBG_PTR_TO_ID(table->dmn),
- table->table_type, table->level);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,%d,%d\n", DR_DUMP_REC_TYPE_TABLE,
+ DR_DBG_PTR_TO_ID(table), DR_DBG_PTR_TO_ID(table->dmn),
+ table->table_type, table->level);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
if (rx->nic_dmn) {
ret = dr_dump_table_rx_tx(file, true, rx,
@@ -546,46 +884,86 @@ static int
dr_dump_send_ring(struct seq_file *file, struct mlx5dr_send_ring *ring,
const u64 domain_id)
{
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n",
- DR_DUMP_REC_TYPE_DOMAIN_SEND_RING, DR_DBG_PTR_TO_ID(ring),
- domain_id, ring->cq->mcq.cqn, ring->qp->qpn);
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
+ int ret;
+
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_DOMAIN_SEND_RING,
+ DR_DBG_PTR_TO_ID(ring), domain_id,
+ ring->cq->mcq.cqn, ring->qp->qpn);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
return 0;
}
-static int
+static noinline_for_stack int
dr_dump_domain_info_flex_parser(struct seq_file *file,
const char *flex_parser_name,
const u8 flex_parser_value,
const u64 domain_id)
{
- seq_printf(file, "%d,0x%llx,%s,0x%x\n",
- DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER, domain_id,
- flex_parser_name, flex_parser_value);
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
+ int ret;
+
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,%s,0x%x\n",
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER, domain_id,
+ flex_parser_name, flex_parser_value);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
return 0;
}
-static int
+static noinline_for_stack int
dr_dump_domain_info_caps(struct seq_file *file, struct mlx5dr_cmd_caps *caps,
const u64 domain_id)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
struct mlx5dr_cmd_vport_cap *vport_caps;
unsigned long i, vports_num;
+ int ret;
xa_for_each(&caps->vports.vports_caps_xa, vports_num, vport_caps)
; /* count the number of vports in xarray */
- seq_printf(file, "%d,0x%llx,0x%x,0x%llx,0x%llx,0x%x,%lu,%d\n",
- DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS, domain_id, caps->gvmi,
- caps->nic_rx_drop_address, caps->nic_tx_drop_address,
- caps->flex_protocols, vports_num, caps->eswitch_manager);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%x,0x%llx,0x%llx,0x%x,%lu,%d\n",
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS, domain_id, caps->gvmi,
+ caps->nic_rx_drop_address, caps->nic_tx_drop_address,
+ caps->flex_protocols, vports_num, caps->eswitch_manager);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
xa_for_each(&caps->vports.vports_caps_xa, i, vport_caps) {
vport_caps = xa_load(&caps->vports.vports_caps_xa, i);
- seq_printf(file, "%d,0x%llx,%lu,0x%x,0x%llx,0x%llx\n",
- DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT, domain_id, i,
- vport_caps->vport_gvmi, vport_caps->icm_address_rx,
- vport_caps->icm_address_tx);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,%lu,0x%x,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT,
+ domain_id, i, vport_caps->vport_gvmi,
+ vport_caps->icm_address_rx,
+ vport_caps->icm_address_tx);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
}
return 0;
}
@@ -627,24 +1005,32 @@ dr_dump_domain_info(struct seq_file *file, struct mlx5dr_domain_info *info,
return 0;
}
-static int
+static noinline_for_stack int
dr_dump_domain(struct seq_file *file, struct mlx5dr_domain *dmn)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
u64 domain_id = DR_DBG_PTR_TO_ID(dmn);
int ret;
- seq_printf(file, "%d,0x%llx,%d,0%x,%d,%u.%u.%u,%s,%d,%u,%u,%u\n",
- DR_DUMP_REC_TYPE_DOMAIN,
- domain_id, dmn->type, dmn->info.caps.gvmi,
- dmn->info.supp_sw_steering,
- /* package version */
- LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL,
- LINUX_VERSION_SUBLEVEL,
- pci_name(dmn->mdev->pdev),
- 0, /* domain flags */
- dmn->num_buddies[DR_ICM_TYPE_STE],
- dmn->num_buddies[DR_ICM_TYPE_MODIFY_ACTION],
- dmn->num_buddies[DR_ICM_TYPE_MODIFY_HDR_PTRN]);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,%d,0%x,%d,%u.%u.%u,%s,%d,%u,%u,%u\n",
+ DR_DUMP_REC_TYPE_DOMAIN,
+ domain_id, dmn->type, dmn->info.caps.gvmi,
+ dmn->info.supp_sw_steering,
+ /* package version */
+ LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL,
+ LINUX_VERSION_SUBLEVEL,
+ pci_name(dmn->mdev->pdev),
+ 0, /* domain flags */
+ dmn->num_buddies[DR_ICM_TYPE_STE],
+ dmn->num_buddies[DR_ICM_TYPE_MODIFY_ACTION],
+ dmn->num_buddies[DR_ICM_TYPE_MODIFY_HDR_PTRN]);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
ret = dr_dump_domain_info(file, &dmn->info, domain_id);
if (ret < 0)
@@ -683,11 +1069,91 @@ unlock_mutex:
return ret;
}
-static int dr_dump_show(struct seq_file *file, void *priv)
+static void *
+dr_dump_start(struct seq_file *file, loff_t *pos)
{
- return dr_dump_domain_all(file, file->private);
+ struct mlx5dr_domain *dmn = file->private;
+ struct mlx5dr_dbg_dump_data *dump_data;
+
+ if (atomic_read(&dmn->dump_info.state) != MLX5DR_DEBUG_DUMP_STATE_FREE) {
+ mlx5_core_warn(dmn->mdev, "Dump already in progress\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_IN_PROGRESS);
+ dump_data = dmn->dump_info.dump_data;
+
+ if (dump_data) {
+ return seq_list_start(&dump_data->buff_list, *pos);
+ } else if (*pos == 0) {
+ dump_data = mlx5dr_dbg_create_dump_data();
+ if (!dump_data)
+ goto exit;
+
+ dmn->dump_info.dump_data = dump_data;
+ if (dr_dump_domain_all(file, dmn)) {
+ mlx5dr_dbg_destroy_dump_data(dump_data);
+ dmn->dump_info.dump_data = NULL;
+ goto exit;
+ }
+
+ return seq_list_start(&dump_data->buff_list, *pos);
+ }
+
+exit:
+ atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_FREE);
+ return NULL;
}
-DEFINE_SHOW_ATTRIBUTE(dr_dump);
+
+static void *
+dr_dump_next(struct seq_file *file, void *v, loff_t *pos)
+{
+ struct mlx5dr_domain *dmn = file->private;
+ struct mlx5dr_dbg_dump_data *dump_data;
+
+ dump_data = dmn->dump_info.dump_data;
+
+ return seq_list_next(v, &dump_data->buff_list, pos);
+}
+
+static void
+dr_dump_stop(struct seq_file *file, void *v)
+{
+ struct mlx5dr_domain *dmn = file->private;
+ struct mlx5dr_dbg_dump_data *dump_data;
+
+ if (v && IS_ERR(v))
+ return;
+
+ if (!v) {
+ dump_data = dmn->dump_info.dump_data;
+ if (dump_data) {
+ mlx5dr_dbg_destroy_dump_data(dump_data);
+ dmn->dump_info.dump_data = NULL;
+ }
+ }
+
+ atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_FREE);
+}
+
+static int
+dr_dump_show(struct seq_file *file, void *v)
+{
+ struct mlx5dr_dbg_dump_buff *entry;
+
+ entry = list_entry(v, struct mlx5dr_dbg_dump_buff, node);
+ seq_printf(file, "%s", entry->buff);
+
+ return 0;
+}
+
+static const struct seq_operations dr_dump_sops = {
+ .start = dr_dump_start,
+ .next = dr_dump_next,
+ .stop = dr_dump_stop,
+ .show = dr_dump_show,
+};
+DEFINE_SEQ_ATTRIBUTE(dr_dump);
void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h
index def6cf853eea..57c6b363b870 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h
@@ -1,10 +1,30 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+#define MLX5DR_DEBUG_DUMP_BUFF_SIZE (64 * 1024 * 1024)
+#define MLX5DR_DEBUG_DUMP_BUFF_LENGTH 512
+
+enum {
+ MLX5DR_DEBUG_DUMP_STATE_FREE,
+ MLX5DR_DEBUG_DUMP_STATE_IN_PROGRESS,
+};
+
+struct mlx5dr_dbg_dump_buff {
+ char *buff;
+ u32 index;
+ struct list_head node;
+};
+
+struct mlx5dr_dbg_dump_data {
+ struct list_head buff_list;
+};
+
struct mlx5dr_dbg_dump_info {
struct mutex dbg_mutex; /* protect dbg lists */
struct dentry *steering_debugfs;
struct dentry *fdb_debugfs;
+ struct mlx5dr_dbg_dump_data *dump_data;
+ atomic_t state;
};
void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
index 253d7ad9b809..8b63968bbee9 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
@@ -124,6 +124,41 @@ static void mlxbf_gige_get_pauseparam(struct net_device *netdev,
pause->tx_pause = 1;
}
+static bool mlxbf_gige_llu_counters_enabled(struct mlxbf_gige *priv)
+{
+ u32 data;
+
+ if (priv->hw_version == MLXBF_GIGE_VERSION_BF2) {
+ data = readl(priv->llu_base + MLXBF_GIGE_BF2_LLU_GENERAL_CONFIG);
+ if (data & MLXBF_GIGE_BF2_LLU_COUNTERS_EN)
+ return true;
+ } else {
+ data = readl(priv->llu_base + MLXBF_GIGE_BF3_LLU_GENERAL_CONFIG);
+ if (data & MLXBF_GIGE_BF3_LLU_COUNTERS_EN)
+ return true;
+ }
+
+ return false;
+}
+
+static void mlxbf_gige_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct mlxbf_gige *priv = netdev_priv(netdev);
+ u64 data_lo, data_hi;
+
+ /* Read LLU counters to provide stats only if counters are enabled */
+ if (mlxbf_gige_llu_counters_enabled(priv)) {
+ data_lo = readl(priv->llu_base + MLXBF_GIGE_TX_PAUSE_CNT_LO);
+ data_hi = readl(priv->llu_base + MLXBF_GIGE_TX_PAUSE_CNT_HI);
+ pause_stats->tx_pause_frames = (data_hi << 32) | data_lo;
+
+ data_lo = readl(priv->llu_base + MLXBF_GIGE_RX_PAUSE_CNT_LO);
+ data_hi = readl(priv->llu_base + MLXBF_GIGE_RX_PAUSE_CNT_HI);
+ pause_stats->rx_pause_frames = (data_hi << 32) | data_lo;
+ }
+}
+
const struct ethtool_ops mlxbf_gige_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_ringparam = mlxbf_gige_get_ringparam,
@@ -134,6 +169,7 @@ const struct ethtool_ops mlxbf_gige_ethtool_ops = {
.get_ethtool_stats = mlxbf_gige_get_ethtool_stats,
.nway_reset = phy_ethtool_nway_reset,
.get_pauseparam = mlxbf_gige_get_pauseparam,
+ .get_pause_stats = mlxbf_gige_get_pause_stats,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
index cd0973229c9b..98a8681c21b9 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
@@ -99,4 +99,34 @@
#define MLXBF_GIGE_100M_IPG_SIZE 119
#define MLXBF_GIGE_10M_IPG_SIZE 1199
+/* Offsets into OOB LLU block for pause frame counters */
+#define MLXBF_GIGE_BF2_TX_PAUSE_CNT_HI 0x33d8
+#define MLXBF_GIGE_BF2_TX_PAUSE_CNT_LO 0x33dc
+#define MLXBF_GIGE_BF2_RX_PAUSE_CNT_HI 0x3210
+#define MLXBF_GIGE_BF2_RX_PAUSE_CNT_LO 0x3214
+
+#define MLXBF_GIGE_BF3_TX_PAUSE_CNT_HI 0x3a88
+#define MLXBF_GIGE_BF3_TX_PAUSE_CNT_LO 0x3a8c
+#define MLXBF_GIGE_BF3_RX_PAUSE_CNT_HI 0x38c0
+#define MLXBF_GIGE_BF3_RX_PAUSE_CNT_LO 0x38c4
+
+#define MLXBF_GIGE_TX_PAUSE_CNT_HI ((priv->hw_version == MLXBF_GIGE_VERSION_BF2) ? \
+ MLXBF_GIGE_BF2_TX_PAUSE_CNT_HI : \
+ MLXBF_GIGE_BF3_TX_PAUSE_CNT_HI)
+#define MLXBF_GIGE_TX_PAUSE_CNT_LO ((priv->hw_version == MLXBF_GIGE_VERSION_BF2) ? \
+ MLXBF_GIGE_BF2_TX_PAUSE_CNT_LO : \
+ MLXBF_GIGE_BF3_TX_PAUSE_CNT_LO)
+#define MLXBF_GIGE_RX_PAUSE_CNT_HI ((priv->hw_version == MLXBF_GIGE_VERSION_BF2) ? \
+ MLXBF_GIGE_BF2_RX_PAUSE_CNT_HI : \
+ MLXBF_GIGE_BF3_RX_PAUSE_CNT_HI)
+#define MLXBF_GIGE_RX_PAUSE_CNT_LO ((priv->hw_version == MLXBF_GIGE_VERSION_BF2) ? \
+ MLXBF_GIGE_BF2_RX_PAUSE_CNT_LO : \
+ MLXBF_GIGE_BF3_RX_PAUSE_CNT_LO)
+
+#define MLXBF_GIGE_BF2_LLU_GENERAL_CONFIG 0x2110
+#define MLXBF_GIGE_BF3_LLU_GENERAL_CONFIG 0x2030
+
+#define MLXBF_GIGE_BF2_LLU_COUNTERS_EN BIT(0)
+#define MLXBF_GIGE_BF3_LLU_COUNTERS_EN BIT(4)
+
#endif /* !defined(__MLXBF_GIGE_REGS_H__) */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index faa63ea9b83e..1915fa41c622 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -95,7 +95,7 @@ struct mlxsw_afa_set {
*/
has_trap:1,
has_police:1;
- unsigned int ref_count;
+ refcount_t ref_count;
struct mlxsw_afa_set *next; /* Pointer to the next set. */
struct mlxsw_afa_set *prev; /* Pointer to the previous set,
* note that set may have multiple
@@ -120,7 +120,7 @@ struct mlxsw_afa_fwd_entry {
struct rhash_head ht_node;
struct mlxsw_afa_fwd_entry_ht_key ht_key;
u32 kvdl_index;
- unsigned int ref_count;
+ refcount_t ref_count;
};
static const struct rhashtable_params mlxsw_afa_fwd_entry_ht_params = {
@@ -282,7 +282,7 @@ static struct mlxsw_afa_set *mlxsw_afa_set_create(bool is_first)
/* Need to initialize the set to pass by default */
mlxsw_afa_set_goto_set(set, MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
set->ht_key.is_first = is_first;
- set->ref_count = 1;
+ refcount_set(&set->ref_count, 1);
return set;
}
@@ -330,7 +330,7 @@ static void mlxsw_afa_set_unshare(struct mlxsw_afa *mlxsw_afa,
static void mlxsw_afa_set_put(struct mlxsw_afa *mlxsw_afa,
struct mlxsw_afa_set *set)
{
- if (--set->ref_count)
+ if (!refcount_dec_and_test(&set->ref_count))
return;
if (set->shared)
mlxsw_afa_set_unshare(mlxsw_afa, set);
@@ -350,7 +350,7 @@ static struct mlxsw_afa_set *mlxsw_afa_set_get(struct mlxsw_afa *mlxsw_afa,
set = rhashtable_lookup_fast(&mlxsw_afa->set_ht, &orig_set->ht_key,
mlxsw_afa_set_ht_params);
if (set) {
- set->ref_count++;
+ refcount_inc(&set->ref_count);
mlxsw_afa_set_put(mlxsw_afa, orig_set);
} else {
set = orig_set;
@@ -564,7 +564,7 @@ mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u16 local_port)
if (!fwd_entry)
return ERR_PTR(-ENOMEM);
fwd_entry->ht_key.local_port = local_port;
- fwd_entry->ref_count = 1;
+ refcount_set(&fwd_entry->ref_count, 1);
err = rhashtable_insert_fast(&mlxsw_afa->fwd_entry_ht,
&fwd_entry->ht_node,
@@ -607,7 +607,7 @@ mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u16 local_port)
fwd_entry = rhashtable_lookup_fast(&mlxsw_afa->fwd_entry_ht, &ht_key,
mlxsw_afa_fwd_entry_ht_params);
if (fwd_entry) {
- fwd_entry->ref_count++;
+ refcount_inc(&fwd_entry->ref_count);
return fwd_entry;
}
return mlxsw_afa_fwd_entry_create(mlxsw_afa, local_port);
@@ -616,7 +616,7 @@ mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u16 local_port)
static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa,
struct mlxsw_afa_fwd_entry *fwd_entry)
{
- if (--fwd_entry->ref_count)
+ if (!refcount_dec_and_test(&fwd_entry->ref_count))
return;
mlxsw_afa_fwd_entry_destroy(mlxsw_afa, fwd_entry);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index 0d5e6f9b466e..947500f8ed71 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -5,6 +5,7 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/errno.h>
+#include <linux/refcount.h>
#include "item.h"
#include "core_acl_flex_keys.h"
@@ -107,7 +108,7 @@ EXPORT_SYMBOL(mlxsw_afk_destroy);
struct mlxsw_afk_key_info {
struct list_head list;
- unsigned int ref_count;
+ refcount_t ref_count;
unsigned int blocks_count;
int element_to_block[MLXSW_AFK_ELEMENT_MAX]; /* index is element, value
* is index inside "blocks"
@@ -334,7 +335,7 @@ mlxsw_afk_key_info_create(struct mlxsw_afk *mlxsw_afk,
if (err)
goto err_picker;
list_add(&key_info->list, &mlxsw_afk->key_info_list);
- key_info->ref_count = 1;
+ refcount_set(&key_info->ref_count, 1);
return key_info;
err_picker:
@@ -356,7 +357,7 @@ mlxsw_afk_key_info_get(struct mlxsw_afk *mlxsw_afk,
key_info = mlxsw_afk_key_info_find(mlxsw_afk, elusage);
if (key_info) {
- key_info->ref_count++;
+ refcount_inc(&key_info->ref_count);
return key_info;
}
return mlxsw_afk_key_info_create(mlxsw_afk, elusage);
@@ -365,7 +366,7 @@ EXPORT_SYMBOL(mlxsw_afk_key_info_get);
void mlxsw_afk_key_info_put(struct mlxsw_afk_key_info *key_info)
{
- if (--key_info->ref_count)
+ if (!refcount_dec_and_test(&key_info->ref_count))
return;
mlxsw_afk_key_info_destroy(key_info);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 6b98c3287b49..f0ceb196a6ce 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -708,7 +708,6 @@ static const struct i2c_device_id mlxsw_m_i2c_id[] = {
static struct i2c_driver mlxsw_m_i2c_driver = {
.driver.name = "mlxsw_minimal",
- .class = I2C_CLASS_HWMON,
.id_table = mlxsw_m_i2c_id,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 5d3413636a62..bb642e9bb6cf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -176,13 +176,15 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
- unsigned int counter_index, u64 *packets,
- u64 *bytes)
+ unsigned int counter_index, bool clear,
+ u64 *packets, u64 *bytes)
{
+ enum mlxsw_reg_mgpc_opcode op = clear ? MLXSW_REG_MGPC_OPCODE_CLEAR :
+ MLXSW_REG_MGPC_OPCODE_NOP;
char mgpc_pl[MLXSW_REG_MGPC_LEN];
int err;
- mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
+ mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, op,
MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
if (err)
@@ -2695,23 +2697,18 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_lag_pgt_init(struct mlxsw_sp *mlxsw_sp)
{
char sgcr_pl[MLXSW_REG_SGCR_LEN];
- u16 max_lag;
int err;
if (mlxsw_core_lag_mode(mlxsw_sp->core) !=
MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW)
return 0;
- err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
- if (err)
- return err;
-
/* In DDD mode, which we by default use, each LAG entry is 8 PGT
* entries. The LAG table address needs to be 8-aligned, but that ought
* to be the case, since the LAG table is allocated first.
*/
err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, &mlxsw_sp->lag_pgt_base,
- max_lag * 8);
+ mlxsw_sp->max_lag * 8);
if (err)
return err;
if (WARN_ON_ONCE(mlxsw_sp->lag_pgt_base % 8)) {
@@ -2728,33 +2725,31 @@ static int mlxsw_sp_lag_pgt_init(struct mlxsw_sp *mlxsw_sp)
err_mid_alloc_range:
mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base,
- max_lag * 8);
+ mlxsw_sp->max_lag * 8);
return err;
}
static void mlxsw_sp_lag_pgt_fini(struct mlxsw_sp *mlxsw_sp)
{
- u16 max_lag;
- int err;
-
if (mlxsw_core_lag_mode(mlxsw_sp->core) !=
MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW)
return;
- err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
- if (err)
- return;
-
mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base,
- max_lag * 8);
+ mlxsw_sp->max_lag * 8);
}
#define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
+struct mlxsw_sp_lag {
+ struct net_device *dev;
+ refcount_t ref_count;
+ u16 lag_id;
+};
+
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
{
char slcr_pl[MLXSW_REG_SLCR_LEN];
- u16 max_lag;
u32 seed;
int err;
@@ -2773,7 +2768,7 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
if (err)
return err;
- err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
+ err = mlxsw_core_max_lag(mlxsw_sp->core, &mlxsw_sp->max_lag);
if (err)
return err;
@@ -2784,7 +2779,7 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
if (err)
return err;
- mlxsw_sp->lags = kcalloc(max_lag, sizeof(struct mlxsw_sp_upper),
+ mlxsw_sp->lags = kcalloc(mlxsw_sp->max_lag, sizeof(struct mlxsw_sp_lag),
GFP_KERNEL);
if (!mlxsw_sp->lags) {
err = -ENOMEM;
@@ -4269,19 +4264,48 @@ mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
-static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
+static struct mlxsw_sp_lag *
+mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev,
+ struct netlink_ext_ack *extack)
{
char sldr_pl[MLXSW_REG_SLDR_LEN];
+ struct mlxsw_sp_lag *lag;
+ u16 lag_id;
+ int i, err;
+
+ for (i = 0; i < mlxsw_sp->max_lag; i++) {
+ if (!mlxsw_sp->lags[i].dev)
+ break;
+ }
+
+ if (i == mlxsw_sp->max_lag) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Exceeded number of supported LAG devices");
+ return ERR_PTR(-EBUSY);
+ }
+ lag_id = i;
mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
+ if (err)
+ return ERR_PTR(err);
+
+ lag = &mlxsw_sp->lags[lag_id];
+ lag->lag_id = lag_id;
+ lag->dev = lag_dev;
+ refcount_set(&lag->ref_count, 1);
+
+ return lag;
}
-static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
+static int
+mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lag *lag)
{
char sldr_pl[MLXSW_REG_SLDR_LEN];
- mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
+ lag->dev = NULL;
+
+ mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag->lag_id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
}
@@ -4329,34 +4353,44 @@ static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
}
-static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
- struct net_device *lag_dev,
- u16 *p_lag_id)
+static struct mlxsw_sp_lag *
+mlxsw_sp_lag_find(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev)
{
- struct mlxsw_sp_upper *lag;
- int free_lag_id = -1;
- u16 max_lag;
- int err, i;
+ int i;
- err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
- if (err)
- return err;
+ for (i = 0; i < mlxsw_sp->max_lag; i++) {
+ if (!mlxsw_sp->lags[i].dev)
+ continue;
- for (i = 0; i < max_lag; i++) {
- lag = mlxsw_sp_lag_get(mlxsw_sp, i);
- if (lag->ref_count) {
- if (lag->dev == lag_dev) {
- *p_lag_id = i;
- return 0;
- }
- } else if (free_lag_id < 0) {
- free_lag_id = i;
- }
+ if (mlxsw_sp->lags[i].dev == lag_dev)
+ return &mlxsw_sp->lags[i];
}
- if (free_lag_id < 0)
- return -EBUSY;
- *p_lag_id = free_lag_id;
- return 0;
+
+ return NULL;
+}
+
+static struct mlxsw_sp_lag *
+mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_lag *lag;
+
+ lag = mlxsw_sp_lag_find(mlxsw_sp, lag_dev);
+ if (lag) {
+ refcount_inc(&lag->ref_count);
+ return lag;
+ }
+
+ return mlxsw_sp_lag_create(mlxsw_sp, lag_dev, extack);
+}
+
+static void
+mlxsw_sp_lag_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lag *lag)
+{
+ if (!refcount_dec_and_test(&lag->ref_count))
+ return;
+
+ mlxsw_sp_lag_destroy(mlxsw_sp, lag);
}
static bool
@@ -4365,12 +4399,6 @@ mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
struct netdev_lag_upper_info *lag_upper_info,
struct netlink_ext_ack *extack)
{
- u16 lag_id;
-
- if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
- NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
- return false;
- }
if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
return false;
@@ -4482,22 +4510,16 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_upper *lag;
+ struct mlxsw_sp_lag *lag;
u16 lag_id;
u8 port_index;
int err;
- err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
- if (err)
- return err;
- lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
- if (!lag->ref_count) {
- err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
- if (err)
- return err;
- lag->dev = lag_dev;
- }
+ lag = mlxsw_sp_lag_get(mlxsw_sp, lag_dev, extack);
+ if (IS_ERR(lag))
+ return PTR_ERR(lag);
+ lag_id = lag->lag_id;
err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
if (err)
return err;
@@ -4515,7 +4537,6 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->local_port);
mlxsw_sp_port->lag_id = lag_id;
mlxsw_sp_port->lagged = 1;
- lag->ref_count++;
err = mlxsw_sp_fid_port_join_lag(mlxsw_sp_port);
if (err)
@@ -4542,7 +4563,6 @@ err_replay:
err_router_join:
mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port);
err_fid_port_join_lag:
- lag->ref_count--;
mlxsw_sp_port->lagged = 0;
mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
mlxsw_sp_port->local_port);
@@ -4550,8 +4570,7 @@ err_fid_port_join_lag:
err_col_port_add:
mlxsw_sp_lag_uppers_bridge_leave(mlxsw_sp_port, lag_dev);
err_lag_uppers_bridge_join:
- if (!lag->ref_count)
- mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
+ mlxsw_sp_lag_put(mlxsw_sp, lag);
return err;
}
@@ -4560,12 +4579,11 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u16 lag_id = mlxsw_sp_port->lag_id;
- struct mlxsw_sp_upper *lag;
+ struct mlxsw_sp_lag *lag;
if (!mlxsw_sp_port->lagged)
return;
- lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
- WARN_ON(lag->ref_count == 0);
+ lag = &mlxsw_sp->lags[lag_id];
mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
@@ -4579,13 +4597,11 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port);
- if (lag->ref_count == 1)
- mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
+ mlxsw_sp_lag_put(mlxsw_sp, lag);
mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
mlxsw_sp_port->local_port);
mlxsw_sp_port->lagged = 0;
- lag->ref_count--;
/* Make sure untagged frames are allowed to ingress */
mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index a0c9775fa955..3beb5d0847ab 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -78,11 +78,6 @@ struct mlxsw_sp_span_entry;
enum mlxsw_sp_l3proto;
union mlxsw_sp_l3addr;
-struct mlxsw_sp_upper {
- struct net_device *dev;
- unsigned int ref_count;
-};
-
enum mlxsw_sp_rif_type {
MLXSW_SP_RIF_TYPE_SUBPORT,
MLXSW_SP_RIF_TYPE_VLAN,
@@ -136,6 +131,7 @@ struct mlxsw_sp_span_ops;
struct mlxsw_sp_qdisc_state;
struct mlxsw_sp_mall_entry;
struct mlxsw_sp_pgt;
+struct mlxsw_sp_lag;
struct mlxsw_sp_port_mapping {
u8 module;
@@ -164,7 +160,8 @@ struct mlxsw_sp {
const struct mlxsw_bus_info *bus_info;
unsigned char base_mac[ETH_ALEN];
const unsigned char *mac_mask;
- struct mlxsw_sp_upper *lags;
+ struct mlxsw_sp_lag *lags;
+ u16 max_lag;
struct mlxsw_sp_port_mapping *port_mapping;
struct mlxsw_sp_port_mapping_events port_mapping_events;
struct rhashtable sample_trigger_ht;
@@ -257,12 +254,6 @@ struct mlxsw_sp_fid_core_ops {
void (*fini)(struct mlxsw_sp *mlxsw_sp);
};
-static inline struct mlxsw_sp_upper *
-mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
-{
- return &mlxsw_sp->lags[lag_id];
-}
-
struct mlxsw_sp_port_pcpu_stats {
u64 rx_packets;
u64 rx_bytes;
@@ -715,8 +706,8 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
u16 vid_end, bool is_member, bool untagged);
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
- unsigned int counter_index, u64 *packets,
- u64 *bytes);
+ unsigned int counter_index, bool clear,
+ u64 *packets, u64 *bytes);
int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
unsigned int *p_counter_index);
void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 7c59c8a13584..3e70cee4d2f3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -9,6 +9,7 @@
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
+#include <linux/refcount.h>
#include <net/net_namespace.h>
#include <net/tc_act/tc_vlan.h>
@@ -55,7 +56,7 @@ struct mlxsw_sp_acl_ruleset {
struct rhash_head ht_node; /* Member of acl HT */
struct mlxsw_sp_acl_ruleset_ht_key ht_key;
struct rhashtable rule_ht;
- unsigned int ref_count;
+ refcount_t ref_count;
unsigned int min_prio;
unsigned int max_prio;
unsigned long priv[];
@@ -99,7 +100,7 @@ static bool
mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
{
/* We hold a reference on ruleset ourselves */
- return ruleset->ref_count == 2;
+ return refcount_read(&ruleset->ref_count) == 2;
}
int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
@@ -176,7 +177,7 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
ruleset = kzalloc(alloc_size, GFP_KERNEL);
if (!ruleset)
return ERR_PTR(-ENOMEM);
- ruleset->ref_count = 1;
+ refcount_set(&ruleset->ref_count, 1);
ruleset->ht_key.block = block;
ruleset->ht_key.chain_index = chain_index;
ruleset->ht_key.ops = ops;
@@ -222,13 +223,13 @@ static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
{
- ruleset->ref_count++;
+ refcount_inc(&ruleset->ref_count);
}
static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset)
{
- if (--ruleset->ref_count)
+ if (!refcount_dec_and_test(&ruleset->ref_count))
return;
mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
}
@@ -1023,7 +1024,7 @@ int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
rulei = mlxsw_sp_acl_rule_rulei(rule);
if (rulei->counter_valid) {
err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
- &current_packets,
+ false, &current_packets,
&current_bytes);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 50ea1eff02b2..f20052776b3f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -9,6 +9,7 @@
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
+#include <linux/refcount.h>
#include <net/devlink.h>
#include <trace/events/mlxsw.h>
@@ -155,7 +156,7 @@ struct mlxsw_sp_acl_tcam_vregion {
struct mlxsw_sp_acl_tcam_rehash_ctx ctx;
} rehash;
struct mlxsw_sp *mlxsw_sp;
- unsigned int ref_count;
+ refcount_t ref_count;
};
struct mlxsw_sp_acl_tcam_vchunk;
@@ -176,7 +177,7 @@ struct mlxsw_sp_acl_tcam_vchunk {
unsigned int priority; /* Priority within the vregion and group */
struct mlxsw_sp_acl_tcam_vgroup *vgroup;
struct mlxsw_sp_acl_tcam_vregion *vregion;
- unsigned int ref_count;
+ refcount_t ref_count;
};
struct mlxsw_sp_acl_tcam_entry {
@@ -769,7 +770,7 @@ mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
vregion->tcam = tcam;
vregion->mlxsw_sp = mlxsw_sp;
vregion->vgroup = vgroup;
- vregion->ref_count = 1;
+ refcount_set(&vregion->ref_count, 1);
vregion->key_info = mlxsw_afk_key_info_get(afk, elusage);
if (IS_ERR(vregion->key_info)) {
@@ -856,7 +857,7 @@ mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp,
*/
return ERR_PTR(-EOPNOTSUPP);
}
- vregion->ref_count++;
+ refcount_inc(&vregion->ref_count);
return vregion;
}
@@ -871,7 +872,7 @@ static void
mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vregion *vregion)
{
- if (--vregion->ref_count)
+ if (!refcount_dec_and_test(&vregion->ref_count))
return;
mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
}
@@ -924,7 +925,7 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
INIT_LIST_HEAD(&vchunk->ventry_list);
vchunk->priority = priority;
vchunk->vgroup = vgroup;
- vchunk->ref_count = 1;
+ refcount_set(&vchunk->ref_count, 1);
vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup,
priority, elusage);
@@ -1008,7 +1009,7 @@ mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info,
elusage)))
return ERR_PTR(-EINVAL);
- vchunk->ref_count++;
+ refcount_inc(&vchunk->ref_count);
return vchunk;
}
return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup,
@@ -1019,7 +1020,7 @@ static void
mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vchunk *vchunk)
{
- if (--vchunk->ref_count)
+ if (!refcount_dec_and_test(&vchunk->ref_count))
return;
mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index c8a356accdf8..ca80af06465f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -1181,9 +1181,11 @@ static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable)
char ratr_pl[MLXSW_REG_RATR_LEN];
struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_nexthop *nh;
+ unsigned int n_done = 0;
u32 adj_hash_index = 0;
u32 adj_index = 0;
u32 adj_size = 0;
+ int err;
mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) {
if (!mlxsw_sp_nexthop_is_forward(nh) ||
@@ -1192,15 +1194,27 @@ static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable)
mlxsw_sp_nexthop_indexes(nh, &adj_index, &adj_size,
&adj_hash_index);
- if (enable)
- mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
- else
- mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ if (enable) {
+ err = mlxsw_sp_nexthop_counter_enable(mlxsw_sp, nh);
+ if (err)
+ goto err_counter_enable;
+ } else {
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
+ }
mlxsw_sp_nexthop_eth_update(mlxsw_sp,
adj_index + adj_hash_index, nh,
true, ratr_pl);
+ n_done++;
}
return 0;
+
+err_counter_enable:
+ mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) {
+ if (!n_done--)
+ break;
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
+ }
+ return err;
}
static u64
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
index 221aa6a474eb..01d81ae3662a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
@@ -361,7 +361,7 @@ static int mlxsw_sp_mr_tcam_route_stats(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_tcam_route *route = route_priv;
return mlxsw_sp_flow_counter_get(mlxsw_sp, route->counter_index,
- packets, bytes);
+ false, packets, bytes);
}
static int
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 7164f9e6370f..40ba314fbc72 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -19,6 +19,7 @@
#include <linux/net_namespace.h>
#include <linux/mutex.h>
#include <linux/genalloc.h>
+#include <linux/xarray.h>
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
@@ -501,7 +502,7 @@ struct mlxsw_sp_rt6 {
struct mlxsw_sp_lpm_tree {
u8 id; /* tree ID */
- unsigned int ref_count;
+ refcount_t ref_count;
enum mlxsw_sp_l3proto proto;
unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
struct mlxsw_sp_prefix_usage prefix_usage;
@@ -578,7 +579,7 @@ mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
lpm_tree = &mlxsw_sp->router->lpm.trees[i];
- if (lpm_tree->ref_count == 0)
+ if (refcount_read(&lpm_tree->ref_count) == 0)
return lpm_tree;
}
return NULL;
@@ -654,7 +655,7 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
sizeof(lpm_tree->prefix_usage));
memset(&lpm_tree->prefix_ref_count, 0,
sizeof(lpm_tree->prefix_ref_count));
- lpm_tree->ref_count = 1;
+ refcount_set(&lpm_tree->ref_count, 1);
return lpm_tree;
err_left_struct_set:
@@ -678,7 +679,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
lpm_tree = &mlxsw_sp->router->lpm.trees[i];
- if (lpm_tree->ref_count != 0 &&
+ if (refcount_read(&lpm_tree->ref_count) &&
lpm_tree->proto == proto &&
mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
prefix_usage)) {
@@ -691,14 +692,15 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
{
- lpm_tree->ref_count++;
+ refcount_inc(&lpm_tree->ref_count);
}
static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- if (--lpm_tree->ref_count == 0)
- mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
+ if (!refcount_dec_and_test(&lpm_tree->ref_count))
+ return;
+ mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
}
#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
@@ -2250,7 +2252,7 @@ int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
return -EINVAL;
return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
- p_counter, NULL);
+ false, p_counter, NULL);
}
static struct mlxsw_sp_neigh_entry *
@@ -3048,6 +3050,8 @@ struct mlxsw_sp_nexthop_key {
struct fib_nh *fib_nh;
};
+struct mlxsw_sp_nexthop_counter;
+
struct mlxsw_sp_nexthop {
struct list_head neigh_list_node; /* member of neigh entry list */
struct list_head crif_list_node;
@@ -3079,8 +3083,8 @@ struct mlxsw_sp_nexthop {
struct mlxsw_sp_neigh_entry *neigh_entry;
struct mlxsw_sp_ipip_entry *ipip_entry;
};
- unsigned int counter_index;
- bool counter_valid;
+ struct mlxsw_sp_nexthop_counter *counter;
+ u32 id; /* NH ID for members of a NH object group. */
};
static struct net_device *
@@ -3105,8 +3109,10 @@ struct mlxsw_sp_nexthop_group_info {
int sum_norm_weight;
u8 adj_index_valid:1,
gateway:1, /* routes using the group use a gateway */
- is_resilient:1;
+ is_resilient:1,
+ hw_stats:1;
struct list_head list; /* member in nh_res_grp_list */
+ struct xarray nexthop_counters;
struct mlxsw_sp_nexthop nexthops[] __counted_by(count);
};
@@ -3150,39 +3156,148 @@ struct mlxsw_sp_nexthop_group {
bool can_destroy;
};
-void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+struct mlxsw_sp_nexthop_counter {
+ unsigned int counter_index;
+ refcount_t ref_count;
+};
+
+static struct mlxsw_sp_nexthop_counter *
+mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_nexthop_counter *nhct;
+ int err;
+
+ nhct = kzalloc(sizeof(*nhct), GFP_KERNEL);
+ if (!nhct)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nhct->counter_index);
+ if (err)
+ goto err_counter_alloc;
+
+ refcount_set(&nhct->ref_count, 1);
+ return nhct;
+
+err_counter_alloc:
+ kfree(nhct);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_counter *nhct)
+{
+ mlxsw_sp_flow_counter_free(mlxsw_sp, nhct->counter_index);
+ kfree(nhct);
+}
+
+static struct mlxsw_sp_nexthop_counter *
+mlxsw_sp_nexthop_sh_counter_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
+ struct mlxsw_sp_nexthop_counter *nhct;
+ void *ptr;
+ int err;
+
+ nhct = xa_load(&nh_grp->nhgi->nexthop_counters, nh->id);
+ if (nhct) {
+ refcount_inc(&nhct->ref_count);
+ return nhct;
+ }
+
+ nhct = mlxsw_sp_nexthop_counter_alloc(mlxsw_sp);
+ if (IS_ERR(nhct))
+ return nhct;
+
+ ptr = xa_store(&nh_grp->nhgi->nexthop_counters, nh->id, nhct,
+ GFP_KERNEL);
+ if (IS_ERR(ptr)) {
+ err = PTR_ERR(ptr);
+ goto err_store;
+ }
+
+ return nhct;
+
+err_store:
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nhct);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_nexthop_sh_counter_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
+ struct mlxsw_sp_nexthop_counter *nhct;
+
+ nhct = xa_load(&nh_grp->nhgi->nexthop_counters, nh->id);
+ if (WARN_ON(!nhct))
+ return;
+
+ if (!refcount_dec_and_test(&nhct->ref_count))
+ return;
+
+ xa_erase(&nh_grp->nhgi->nexthop_counters, nh->id);
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nhct);
+}
+
+int mlxsw_sp_nexthop_counter_enable(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
+ const char *table_adj = MLXSW_SP_DPIPE_TABLE_NAME_ADJ;
+ struct mlxsw_sp_nexthop_counter *nhct;
struct devlink *devlink;
+ bool dpipe_stats;
+
+ if (nh->counter)
+ return 0;
devlink = priv_to_devlink(mlxsw_sp->core);
- if (!devlink_dpipe_table_counter_enabled(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
- return;
+ dpipe_stats = devlink_dpipe_table_counter_enabled(devlink, table_adj);
+ if (!(nh->nhgi->hw_stats || dpipe_stats))
+ return 0;
- if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
- return;
+ if (nh->id)
+ nhct = mlxsw_sp_nexthop_sh_counter_get(mlxsw_sp, nh);
+ else
+ nhct = mlxsw_sp_nexthop_counter_alloc(mlxsw_sp);
+ if (IS_ERR(nhct))
+ return PTR_ERR(nhct);
- nh->counter_valid = true;
+ nh->counter = nhct;
+ return 0;
}
-void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop *nh)
+void mlxsw_sp_nexthop_counter_disable(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
{
- if (!nh->counter_valid)
+ if (!nh->counter)
return;
- mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
- nh->counter_valid = false;
+
+ if (nh->id)
+ mlxsw_sp_nexthop_sh_counter_put(mlxsw_sp, nh);
+ else
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh->counter);
+ nh->counter = NULL;
+}
+
+static int mlxsw_sp_nexthop_counter_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ if (nh->nhgi->hw_stats)
+ return mlxsw_sp_nexthop_counter_enable(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
+ return 0;
}
int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh, u64 *p_counter)
{
- if (!nh->counter_valid)
+ if (!nh->counter)
return -EINVAL;
- return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
- p_counter, NULL);
+ return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter->counter_index,
+ true, p_counter, NULL);
}
struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
@@ -3655,8 +3770,9 @@ static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp,
WARN_ON_ONCE(1);
return -EINVAL;
}
- if (nh->counter_valid)
- mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
+ if (nh->counter)
+ mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter->counter_index,
+ true);
else
mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
@@ -3743,6 +3859,7 @@ mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
nh = &nhgi->nexthops[i];
if (!nh->should_offload) {
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
nh->offloaded = 0;
continue;
}
@@ -3750,6 +3867,10 @@ mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
if (nh->update || reallocate) {
int err = 0;
+ err = mlxsw_sp_nexthop_counter_update(mlxsw_sp, nh);
+ if (err)
+ return err;
+
err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh,
true, ratr_pl);
if (err)
@@ -4506,7 +4627,10 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
+ err = mlxsw_sp_nexthop_counter_enable(mlxsw_sp, nh);
+ if (err)
+ goto err_counter_enable;
+
list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
if (!dev)
@@ -4530,7 +4654,8 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
err_nexthop_neigh_init:
list_del(&nh->router_list_node);
- mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
+err_counter_enable:
mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
return err;
}
@@ -4540,7 +4665,7 @@ static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
{
mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
list_del(&nh->router_list_node);
- mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
}
@@ -5005,9 +5130,9 @@ mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
break;
}
- mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
nh->ifindex = dev->ifindex;
+ nh->id = nh_obj->id;
err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
if (err)
@@ -5029,7 +5154,6 @@ mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
err_type_init:
list_del(&nh->router_list_node);
- mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
return err;
}
@@ -5040,7 +5164,7 @@ static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
list_del(&nh->router_list_node);
- mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
nh->should_offload = 0;
}
@@ -5052,6 +5176,7 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group_info *nhgi;
struct mlxsw_sp_nexthop *nh;
bool is_resilient = false;
+ bool hw_stats = false;
unsigned int nhs;
int err, i;
@@ -5061,9 +5186,11 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
break;
case NH_NOTIFIER_INFO_TYPE_GRP:
nhs = info->nh_grp->num_nh;
+ hw_stats = info->nh_grp->hw_stats;
break;
case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
nhs = info->nh_res_table->num_nh_buckets;
+ hw_stats = info->nh_res_table->hw_stats;
is_resilient = true;
break;
default:
@@ -5078,6 +5205,10 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
nhgi->is_resilient = is_resilient;
nhgi->count = nhs;
+ nhgi->hw_stats = hw_stats;
+
+ xa_init_flags(&nhgi->nexthop_counters, XA_FLAGS_ALLOC1);
+
for (i = 0; i < nhgi->count; i++) {
struct nh_notifier_single_info *nh_obj;
int weight;
@@ -5160,6 +5291,8 @@ mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
}
mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
WARN_ON_ONCE(nhgi->adj_index_valid);
+ WARN_ON(!xa_empty(&nhgi->nexthop_counters));
+ xa_destroy(&nhgi->nexthop_counters);
kfree(nhgi);
}
@@ -5299,6 +5432,43 @@ err_out:
return err;
}
+static int mlxsw_sp_nexthop_obj_res_group_pre(struct mlxsw_sp *mlxsw_sp,
+ struct nh_notifier_info *info)
+{
+ struct nh_notifier_grp_info *grp_info = info->nh_grp;
+ struct mlxsw_sp_nexthop_group_info *nhgi;
+ struct mlxsw_sp_nexthop_group *nh_grp;
+ int err;
+ int i;
+
+ nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
+ if (!nh_grp)
+ return 0;
+ nhgi = nh_grp->nhgi;
+
+ if (nhgi->hw_stats == grp_info->hw_stats)
+ return 0;
+
+ nhgi->hw_stats = grp_info->hw_stats;
+
+ for (i = 0; i < nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
+
+ if (nh->offloaded)
+ nh->update = 1;
+ }
+
+ err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ if (err)
+ goto err_group_refresh;
+
+ return 0;
+
+err_group_refresh:
+ nhgi->hw_stats = !grp_info->hw_stats;
+ return err;
+}
+
static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
struct nh_notifier_info *info)
{
@@ -5475,6 +5645,79 @@ err_nexthop_obj_init:
return err;
}
+static void
+mlxsw_sp_nexthop_obj_mp_hw_stats_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group_info *nhgi,
+ struct nh_notifier_grp_hw_stats_info *info)
+{
+ int nhi;
+
+ for (nhi = 0; nhi < info->num_nh; nhi++) {
+ struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[nhi];
+ u64 packets;
+ int err;
+
+ err = mlxsw_sp_nexthop_counter_get(mlxsw_sp, nh, &packets);
+ if (err)
+ continue;
+
+ nh_grp_hw_stats_report_delta(info, nhi, packets);
+ }
+}
+
+static void
+mlxsw_sp_nexthop_obj_res_hw_stats_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group_info *nhgi,
+ struct nh_notifier_grp_hw_stats_info *info)
+{
+ int nhi = -1;
+ int bucket;
+
+ for (bucket = 0; bucket < nhgi->count; bucket++) {
+ struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[bucket];
+ u64 packets;
+ int err;
+
+ if (nhi == -1 || info->stats[nhi].id != nh->id) {
+ for (nhi = 0; nhi < info->num_nh; nhi++)
+ if (info->stats[nhi].id == nh->id)
+ break;
+ if (WARN_ON_ONCE(nhi == info->num_nh)) {
+ nhi = -1;
+ continue;
+ }
+ }
+
+ err = mlxsw_sp_nexthop_counter_get(mlxsw_sp, nh, &packets);
+ if (err)
+ continue;
+
+ nh_grp_hw_stats_report_delta(info, nhi, packets);
+ }
+}
+
+static void mlxsw_sp_nexthop_obj_hw_stats_get(struct mlxsw_sp *mlxsw_sp,
+ struct nh_notifier_info *info)
+{
+ struct mlxsw_sp_nexthop_group_info *nhgi;
+ struct mlxsw_sp_nexthop_group *nh_grp;
+
+ if (info->type != NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS)
+ return;
+
+ nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
+ if (!nh_grp)
+ return;
+ nhgi = nh_grp->nhgi;
+
+ if (nhgi->is_resilient)
+ mlxsw_sp_nexthop_obj_res_hw_stats_get(mlxsw_sp, nhgi,
+ info->nh_grp_hw_stats);
+ else
+ mlxsw_sp_nexthop_obj_mp_hw_stats_get(mlxsw_sp, nhgi,
+ info->nh_grp_hw_stats);
+}
+
static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -5490,6 +5733,10 @@ static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
mutex_lock(&router->lock);
switch (event) {
+ case NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE:
+ err = mlxsw_sp_nexthop_obj_res_group_pre(router->mlxsw_sp,
+ info);
+ break;
case NEXTHOP_EVENT_REPLACE:
err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
break;
@@ -5500,6 +5747,9 @@ static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp,
info);
break;
+ case NEXTHOP_EVENT_HW_STATS_REPORT_DELTA:
+ mlxsw_sp_nexthop_obj_hw_stats_get(router->mlxsw_sp, info);
+ break;
default:
break;
}
@@ -6733,7 +6983,10 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
#if IS_ENABLED(CONFIG_IPV6)
nh->neigh_tbl = &nd_tbl;
#endif
- mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
+
+ err = mlxsw_sp_nexthop_counter_enable(mlxsw_sp, nh);
+ if (err)
+ return err;
list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
@@ -6749,7 +7002,7 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
err_nexthop_type_init:
list_del(&nh->router_list_node);
- mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
return err;
}
@@ -6758,7 +7011,7 @@ static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
{
mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
list_del(&nh->router_list_node);
- mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
}
static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index ed3b628caafe..0432c7cc6b07 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -156,10 +156,10 @@ int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
struct mlxsw_sp_nexthop *nh, bool force,
char *ratr_pl);
-void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+int mlxsw_sp_nexthop_counter_enable(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh);
-void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop *nh);
+void mlxsw_sp_nexthop_counter_disable(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh);
static inline bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
const union mlxsw_sp_l3addr *addr2)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 6c749c148148..6397ff0dc951 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -61,7 +61,7 @@ struct mlxsw_sp_bridge_port {
struct mlxsw_sp_bridge_device *bridge_device;
struct list_head list;
struct list_head vlans_list;
- unsigned int ref_count;
+ refcount_t ref_count;
u8 stp_state;
unsigned long flags;
bool mrouter;
@@ -495,7 +495,7 @@ mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
BR_MCAST_FLOOD;
INIT_LIST_HEAD(&bridge_port->vlans_list);
list_add(&bridge_port->list, &bridge_device->ports_list);
- bridge_port->ref_count = 1;
+ refcount_set(&bridge_port->ref_count, 1);
err = switchdev_bridge_port_offload(brport_dev, mlxsw_sp_port->dev,
NULL, NULL, NULL, false, extack);
@@ -531,7 +531,7 @@ mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
if (bridge_port) {
- bridge_port->ref_count++;
+ refcount_inc(&bridge_port->ref_count);
return bridge_port;
}
@@ -558,7 +558,7 @@ static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
{
struct mlxsw_sp_bridge_device *bridge_device;
- if (--bridge_port->ref_count != 0)
+ if (!refcount_dec_and_test(&bridge_port->ref_count))
return;
bridge_device = bridge_port->bridge_device;
mlxsw_sp_bridge_port_destroy(bridge_port);
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
index 5693784eec5b..443128adbcb6 100644
--- a/drivers/net/ethernet/microchip/encx24j600-regmap.c
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -464,7 +464,7 @@ static struct regmap_config regcfg = {
.val_bits = 16,
.max_register = 0xee,
.reg_stride = 2,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
.readable_reg = encx24j600_regmap_readable,
.writeable_reg = encx24j600_regmap_writeable,
@@ -485,7 +485,7 @@ static struct regmap_config phycfg = {
.reg_bits = 8,
.val_bits = 16,
.max_register = 0x1f,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
.readable_reg = encx24j600_phymap_readable,
.writeable_reg = encx24j600_phymap_writeable,
@@ -513,4 +513,5 @@ int devm_regmap_init_encx24j600(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_regmap_init_encx24j600);
+MODULE_DESCRIPTION("Microchip ENCX24J600 helpers");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index a2b3f4433ca8..8a6ae171e375 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -1055,7 +1055,7 @@ static int lan743x_ethtool_get_ts_info(struct net_device *netdev,
}
static int lan743x_ethtool_get_eee(struct net_device *netdev,
- struct ethtool_eee *eee)
+ struct ethtool_keee *eee)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
struct phy_device *phydev = netdev->phydev;
@@ -1092,7 +1092,7 @@ static int lan743x_ethtool_get_eee(struct net_device *netdev,
}
static int lan743x_ethtool_set_eee(struct net_device *netdev,
- struct ethtool_eee *eee)
+ struct ethtool_keee *eee)
{
struct lan743x_adapter *adapter;
struct phy_device *phydev;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 45e209a7d083..bd8aa83b47e5 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1196,7 +1196,7 @@ static int lan743x_sgmii_config(struct lan743x_adapter *adapter)
ret = lan743x_is_sgmii_2_5G_mode(adapter, &status);
if (ret < 0) {
netif_err(adapter, drv, adapter->netdev,
- "erro %d SGMII get mode failed\n", ret);
+ "error %d SGMII get mode failed\n", ret);
return ret;
}
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 2f04bc77a118..2801f08bf1c9 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -1712,13 +1712,13 @@ bool lan743x_ptp_request_tx_timestamp(struct lan743x_adapter *adapter)
struct lan743x_ptp *ptp = &adapter->ptp;
bool result = false;
- spin_lock_bh(&ptp->tx_ts_lock);
+ spin_lock(&ptp->tx_ts_lock);
if (ptp->pending_tx_timestamps < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS) {
/* request granted */
ptp->pending_tx_timestamps++;
result = true;
}
- spin_unlock_bh(&ptp->tx_ts_lock);
+ spin_unlock(&ptp->tx_ts_lock);
return result;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c
index ac525ff1503e..3a01e13bd10b 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c
@@ -25,6 +25,8 @@ static void lan966x_vcap_is1_port_keys(struct lan966x_port *port,
for (int l = 0; l < admin->lookups; ++l) {
out->prf(out->dst, "\n Lookup %d: ", l);
+ val = lan_rd(lan966x, ANA_VCAP_S1_CFG(port->chip_port, l));
+
out->prf(out->dst, "\n other: ");
switch (ANA_VCAP_S1_CFG_KEY_OTHER_CFG_GET(val)) {
case VCAP_IS1_PS_OTHER_NORMAL:
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index d33b27214539..1332db9a08eb 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1249,15 +1249,47 @@ void mana_gd_free_res_map(struct gdma_resource *r)
r->size = 0;
}
+static int irq_setup(unsigned int *irqs, unsigned int len, int node)
+{
+ const struct cpumask *next, *prev = cpu_none_mask;
+ cpumask_var_t cpus __free(free_cpumask_var);
+ int cpu, weight;
+
+ if (!alloc_cpumask_var(&cpus, GFP_KERNEL))
+ return -ENOMEM;
+
+ rcu_read_lock();
+ for_each_numa_hop_mask(next, node) {
+ weight = cpumask_weight_andnot(next, prev);
+ while (weight > 0) {
+ cpumask_andnot(cpus, next, prev);
+ for_each_cpu(cpu, cpus) {
+ if (len-- == 0)
+ goto done;
+ irq_set_affinity_and_hint(*irqs++, topology_sibling_cpumask(cpu));
+ cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
+ --weight;
+ }
+ }
+ prev = next;
+ }
+done:
+ rcu_read_unlock();
+ return 0;
+}
+
static int mana_gd_setup_irqs(struct pci_dev *pdev)
{
- unsigned int max_queues_per_port = num_online_cpus();
struct gdma_context *gc = pci_get_drvdata(pdev);
+ unsigned int max_queues_per_port;
struct gdma_irq_context *gic;
unsigned int max_irqs, cpu;
- int nvec, irq;
+ int start_irq_index = 1;
+ int nvec, *irqs, irq;
int err, i = 0, j;
+ cpus_read_lock();
+ max_queues_per_port = num_online_cpus();
if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
max_queues_per_port = MANA_MAX_NUM_QUEUES;
@@ -1265,8 +1297,18 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
max_irqs = max_queues_per_port + 1;
nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX);
- if (nvec < 0)
+ if (nvec < 0) {
+ cpus_read_unlock();
return nvec;
+ }
+ if (nvec <= num_online_cpus())
+ start_irq_index = 0;
+
+ irqs = kmalloc_array((nvec - start_irq_index), sizeof(int), GFP_KERNEL);
+ if (!irqs) {
+ err = -ENOMEM;
+ goto free_irq_vector;
+ }
gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context),
GFP_KERNEL);
@@ -1294,17 +1336,41 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
goto free_irq;
}
- err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
- if (err)
- goto free_irq;
-
- cpu = cpumask_local_spread(i, gc->numa_node);
- irq_set_affinity_and_hint(irq, cpumask_of(cpu));
+ if (!i) {
+ err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
+ if (err)
+ goto free_irq;
+
+ /* If number of IRQ is one extra than number of online CPUs,
+ * then we need to assign IRQ0 (hwc irq) and IRQ1 to
+ * same CPU.
+ * Else we will use different CPUs for IRQ0 and IRQ1.
+ * Also we are using cpumask_local_spread instead of
+ * cpumask_first for the node, because the node can be
+ * mem only.
+ */
+ if (start_irq_index) {
+ cpu = cpumask_local_spread(i, gc->numa_node);
+ irq_set_affinity_and_hint(irq, cpumask_of(cpu));
+ } else {
+ irqs[start_irq_index] = irq;
+ }
+ } else {
+ irqs[i - start_irq_index] = irq;
+ err = request_irq(irqs[i - start_irq_index], mana_gd_intr, 0,
+ gic->name, gic);
+ if (err)
+ goto free_irq;
+ }
}
+ err = irq_setup(irqs, (nvec - start_irq_index), gc->numa_node);
+ if (err)
+ goto free_irq;
+
gc->max_num_msix = nvec;
gc->num_msix_usable = nvec;
-
+ cpus_read_unlock();
return 0;
free_irq:
@@ -1317,8 +1383,10 @@ free_irq:
}
kfree(gc->irq_contexts);
+ kfree(irqs);
gc->irq_contexts = NULL;
free_irq_vector:
+ cpus_read_unlock();
pci_free_irq_vectors(pdev);
return err;
}
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 56ccbd4c37fe..ed2fb44500b0 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -3078,4 +3078,5 @@ void ocelot_deinit_port(struct ocelot *ocelot, int port)
}
EXPORT_SYMBOL(ocelot_deinit_port);
+MODULE_DESCRIPTION("Microsemi Ocelot switch family library");
MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 2b383d92d7f5..2c3f62907958 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -460,7 +460,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
set_tun->ttl = ip6_dst_hoplimit(dst);
dst_release(dst);
} else {
- set_tun->ttl = net->ipv6.devconf_all->hop_limit;
+ set_tun->ttl = READ_ONCE(net->ipv6.devconf_all->hop_limit);
}
#endif
} else {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index 361d7c495e2d..2c7bd6e80d99 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -337,6 +337,11 @@ static void nfp_fl_lag_do_work(struct work_struct *work)
acti_netdevs = kmalloc_array(entry->slave_cnt,
sizeof(*acti_netdevs), GFP_KERNEL);
+ if (!acti_netdevs) {
+ schedule_delayed_work(&lag->work,
+ NFP_FL_LAG_DELAY);
+ continue;
+ }
/* Include sanity check in the loop. It may be that a bond has
* changed between processing the last notification and the
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 7a549b834e97..31f896c4aa26 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1761,7 +1761,7 @@ static void nv_get_stats(int cpu, struct fe_priv *np,
/*
* nv_get_stats64: dev->ndo_get_stats64 function
* Get latest stats value from the nic.
- * Called with read_lock(&dev_base_lock) held for read -
+ * Called with rcu_read_lock() held -
* only synchronized against unregister_netdevice.
*/
static void
@@ -3090,7 +3090,7 @@ static void set_bufsize(struct net_device *dev)
/*
* nv_change_mtu: dev->change_mtu function
- * Called with dev_base_lock held for read.
+ * Called with RTNL held for read.
*/
static int nv_change_mtu(struct net_device *dev, int new_mtu)
{
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 9ffef2e06885..2ccc2c2a06e3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -76,6 +76,8 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode,
u8 status, int err);
+bool ionic_notifyq_service(struct ionic_cq *cq);
+bool ionic_adminq_service(struct ionic_cq *cq);
int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait);
int ionic_dev_cmd_wait_nomsg(struct ionic *ionic, unsigned long max_wait);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
index 91327ef670c7..c3ae11a48024 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
@@ -113,8 +113,8 @@ static const struct debugfs_reg32 intr_ctrl_regs[] = {
void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
struct dentry *qcq_dentry, *q_dentry, *cq_dentry;
- struct dentry *intr_dentry, *stats_dentry;
struct ionic_dev *idev = &lif->ionic->idev;
+ struct dentry *intr_dentry, *stats_dentry;
struct debugfs_regset32 *intr_ctrl_regset;
struct ionic_intr_info *intr = &qcq->intr;
struct debugfs_blob_wrapper *desc_blob;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 746072b4dbd0..874499337132 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -629,43 +629,25 @@ int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
cq->desc_size = desc_size;
cq->tail_idx = 0;
cq->done_color = 1;
+ cq->idev = &lif->ionic->idev;
return 0;
}
-void ionic_cq_map(struct ionic_cq *cq, void *base, dma_addr_t base_pa)
-{
- struct ionic_cq_info *cur;
- unsigned int i;
-
- cq->base = base;
- cq->base_pa = base_pa;
-
- for (i = 0, cur = cq->info; i < cq->num_descs; i++, cur++)
- cur->cq_desc = base + (i * cq->desc_size);
-}
-
-void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q)
-{
- cq->bound_q = q;
-}
-
unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
ionic_cq_cb cb, ionic_cq_done_cb done_cb,
void *done_arg)
{
- struct ionic_cq_info *cq_info;
unsigned int work_done = 0;
if (work_to_do == 0)
return 0;
- cq_info = &cq->info[cq->tail_idx];
- while (cb(cq, cq_info)) {
+ while (cb(cq)) {
if (cq->tail_idx == cq->num_descs - 1)
cq->done_color = !cq->done_color;
+
cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
- cq_info = &cq->info[cq->tail_idx];
if (++work_done >= work_to_do)
break;
@@ -692,7 +674,6 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
return -EINVAL;
q->lif = lif;
- q->idev = idev;
q->index = index;
q->num_descs = num_descs;
q->desc_size = desc_size;
@@ -706,53 +687,11 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
return 0;
}
-void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
+void ionic_q_post(struct ionic_queue *q, bool ring_doorbell)
{
- struct ionic_desc_info *cur;
- unsigned int i;
-
- q->base = base;
- q->base_pa = base_pa;
-
- for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
- cur->desc = base + (i * q->desc_size);
-}
-
-void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa)
-{
- struct ionic_desc_info *cur;
- unsigned int i;
-
- q->cmb_base = base;
- q->cmb_base_pa = base_pa;
-
- for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
- cur->cmb_desc = base + (i * q->desc_size);
-}
-
-void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
-{
- struct ionic_desc_info *cur;
- unsigned int i;
-
- q->sg_base = base;
- q->sg_base_pa = base_pa;
-
- for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
- cur->sg_desc = base + (i * q->sg_desc_size);
-}
-
-void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
- void *cb_arg)
-{
- struct ionic_desc_info *desc_info;
struct ionic_lif *lif = q->lif;
struct device *dev = q->dev;
- desc_info = &q->info[q->head_idx];
- desc_info->cb = cb;
- desc_info->cb_arg = cb_arg;
-
q->head_idx = (q->head_idx + 1) & (q->num_descs - 1);
dev_dbg(dev, "lif=%d qname=%s qid=%d qtype=%d p_index=%d ringdb=%d\n",
@@ -771,7 +710,7 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
}
}
-static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
+bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
{
unsigned int mask, tail, head;
@@ -781,37 +720,3 @@ static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
return ((pos - tail) & mask) < ((head - tail) & mask);
}
-
-void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
- unsigned int stop_index)
-{
- struct ionic_desc_info *desc_info;
- ionic_desc_cb cb;
- void *cb_arg;
- u16 index;
-
- /* check for empty queue */
- if (q->tail_idx == q->head_idx)
- return;
-
- /* stop index must be for a descriptor that is not yet completed */
- if (unlikely(!ionic_q_is_posted(q, stop_index)))
- dev_err(q->dev,
- "ionic stop is not posted %s stop %u tail %u head %u\n",
- q->name, stop_index, q->tail_idx, q->head_idx);
-
- do {
- desc_info = &q->info[q->tail_idx];
- index = q->tail_idx;
- q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
-
- cb = desc_info->cb;
- cb_arg = desc_info->cb_arg;
-
- desc_info->cb = NULL;
- desc_info->cb_arg = NULL;
-
- if (cb)
- cb(q, desc_info, cq_info, cb_arg);
- } while (index != stop_index);
-}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 2667e1cde16b..f30eee4a5a80 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -8,6 +8,7 @@
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/skbuff.h>
+#include <linux/bpf_trace.h>
#include "ionic_if.h"
#include "ionic_regs.h"
@@ -15,9 +16,10 @@
#define IONIC_MAX_TX_DESC 8192
#define IONIC_MAX_RX_DESC 16384
#define IONIC_MIN_TXRX_DESC 64
-#define IONIC_DEF_TXRX_DESC 4096
+#define IONIC_DEF_TXRX_DESC 1024
#define IONIC_RX_FILL_THRESHOLD 16
#define IONIC_RX_FILL_DIV 8
+#define IONIC_TSO_DESCS_NEEDED 44 /* 64K TSO @1500B */
#define IONIC_LIFS_MAX 1024
#define IONIC_WATCHDOG_SECS 5
#define IONIC_ITR_COAL_USEC_DEFAULT 64
@@ -120,11 +122,13 @@ static_assert(sizeof(struct ionic_log_event) == 64);
/* I/O */
static_assert(sizeof(struct ionic_txq_desc) == 16);
static_assert(sizeof(struct ionic_txq_sg_desc) == 128);
+static_assert(sizeof(struct ionic_txq_sg_desc_v1) == 256);
static_assert(sizeof(struct ionic_txq_comp) == 16);
static_assert(sizeof(struct ionic_rxq_desc) == 16);
static_assert(sizeof(struct ionic_rxq_sg_desc) == 128);
static_assert(sizeof(struct ionic_rxq_comp) == 16);
+static_assert(sizeof(struct ionic_rxq_comp) == sizeof(struct ionic_txq_comp));
/* SR/IOV */
static_assert(sizeof(struct ionic_vf_setattr_cmd) == 64);
@@ -173,21 +177,8 @@ struct ionic_dev {
struct ionic_devinfo dev_info;
};
-struct ionic_cq_info {
- union {
- void *cq_desc;
- struct ionic_admin_comp *admincq;
- struct ionic_notifyq_event *notifyq;
- };
-};
-
struct ionic_queue;
struct ionic_qcq;
-struct ionic_desc_info;
-
-typedef void (*ionic_desc_cb)(struct ionic_queue *q,
- struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info, void *cb_arg);
#define IONIC_MAX_BUF_LEN ((u16)-1)
#define IONIC_PAGE_SIZE PAGE_SIZE
@@ -195,6 +186,11 @@ typedef void (*ionic_desc_cb)(struct ionic_queue *q,
#define IONIC_PAGE_GFP_MASK (GFP_ATOMIC | __GFP_NOWARN |\
__GFP_COMP | __GFP_MEMALLOC)
+#define IONIC_XDP_MAX_LINEAR_MTU (IONIC_PAGE_SIZE - \
+ (VLAN_ETH_HLEN + \
+ XDP_PACKET_HEADROOM + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))))
+
struct ionic_buf_info {
struct page *page;
dma_addr_t dma_addr;
@@ -202,26 +198,25 @@ struct ionic_buf_info {
u32 len;
};
-#define IONIC_MAX_FRAGS (1 + IONIC_TX_MAX_SG_ELEMS_V1)
+#define IONIC_TX_MAX_FRAGS (1 + IONIC_TX_MAX_SG_ELEMS_V1)
+#define IONIC_RX_MAX_FRAGS (1 + IONIC_RX_MAX_SG_ELEMS)
-struct ionic_desc_info {
- union {
- void *desc;
- struct ionic_txq_desc *txq_desc;
- struct ionic_rxq_desc *rxq_desc;
- struct ionic_admin_cmd *adminq_desc;
- };
- void __iomem *cmb_desc;
- union {
- void *sg_desc;
- struct ionic_txq_sg_desc *txq_sg_desc;
- struct ionic_rxq_sg_desc *rxq_sgl_desc;
- };
+struct ionic_tx_desc_info {
unsigned int bytes;
unsigned int nbufs;
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ enum xdp_action act;
struct ionic_buf_info bufs[MAX_SKB_FRAGS + 1];
- ionic_desc_cb cb;
- void *cb_arg;
+};
+
+struct ionic_rx_desc_info {
+ unsigned int nbufs;
+ struct ionic_buf_info bufs[IONIC_RX_MAX_FRAGS];
+};
+
+struct ionic_admin_desc_info {
+ void *ctx;
};
#define IONIC_QUEUE_NAME_MAX_SZ 16
@@ -229,7 +224,12 @@ struct ionic_desc_info {
struct ionic_queue {
struct device *dev;
struct ionic_lif *lif;
- struct ionic_desc_info *info;
+ union {
+ void *info;
+ struct ionic_tx_desc_info *tx_info;
+ struct ionic_rx_desc_info *rx_info;
+ struct ionic_admin_desc_info *admin_info;
+ };
u64 dbval;
unsigned long dbell_deadline;
unsigned long dbell_jiffies;
@@ -239,26 +239,33 @@ struct ionic_queue {
unsigned int num_descs;
unsigned int max_sg_elems;
u64 features;
- u64 drop;
- struct ionic_dev *idev;
unsigned int type;
unsigned int hw_index;
unsigned int hw_type;
+ bool xdp_flush;
union {
void *base;
struct ionic_txq_desc *txq;
struct ionic_rxq_desc *rxq;
struct ionic_admin_cmd *adminq;
};
- void __iomem *cmb_base;
+ union {
+ void __iomem *cmb_base;
+ struct ionic_txq_desc __iomem *cmb_txq;
+ struct ionic_rxq_desc __iomem *cmb_rxq;
+ };
union {
void *sg_base;
struct ionic_txq_sg_desc *txq_sgl;
+ struct ionic_txq_sg_desc_v1 *txq_sgl_v1;
struct ionic_rxq_sg_desc *rxq_sgl;
};
+ struct xdp_rxq_info *xdp_rxq_info;
+ struct ionic_queue *partner;
dma_addr_t base_pa;
dma_addr_t cmb_base_pa;
dma_addr_t sg_base_pa;
+ u64 drop;
unsigned int desc_size;
unsigned int sg_desc_size;
unsigned int pid;
@@ -280,7 +287,6 @@ struct ionic_intr_info {
struct ionic_cq {
struct ionic_lif *lif;
- struct ionic_cq_info *info;
struct ionic_queue *bound_q;
struct ionic_intr_info *bound_intr;
u16 tail_idx;
@@ -289,6 +295,7 @@ struct ionic_cq {
unsigned int desc_size;
void *base;
dma_addr_t base_pa;
+ struct ionic_dev *idev;
} ____cacheline_aligned_in_smp;
struct ionic;
@@ -363,23 +370,20 @@ int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
unsigned int num_descs, size_t desc_size);
void ionic_cq_map(struct ionic_cq *cq, void *base, dma_addr_t base_pa);
void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q);
-typedef bool (*ionic_cq_cb)(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
+typedef bool (*ionic_cq_cb)(struct ionic_cq *cq);
typedef void (*ionic_cq_done_cb)(void *done_arg);
unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
ionic_cq_cb cb, ionic_cq_done_cb done_cb,
void *done_arg);
+unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do);
int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
struct ionic_queue *q, unsigned int index, const char *name,
unsigned int num_descs, size_t desc_size,
size_t sg_desc_size, unsigned int pid);
-void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa);
-void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa);
-void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa);
-void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
- void *cb_arg);
-void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
- unsigned int stop_index);
+void ionic_q_post(struct ionic_queue *q, bool ring_doorbell);
+bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos);
+
int ionic_heartbeat_check(struct ionic *ionic);
bool ionic_is_fw_running(struct ionic_dev *idev);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 0ffc9c4904ac..91183965a6b7 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -726,6 +726,11 @@ static int ionic_set_channels(struct net_device *netdev,
ionic_init_queue_params(lif, &qparam);
+ if ((ch->rx_count || ch->tx_count) && lif->xdp_prog) {
+ netdev_info(lif->netdev, "Split Tx/Rx interrupts not available when using XDP\n");
+ return -EOPNOTSUPP;
+ }
+
if (ch->rx_count != ch->tx_count) {
netdev_info(netdev, "The rx and tx count must be equal\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index fcb44ceeb6aa..7f0c6cdc375e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -46,18 +46,26 @@ static int ionic_start_queues(struct ionic_lif *lif);
static void ionic_stop_queues(struct ionic_lif *lif);
static void ionic_lif_queue_identify(struct ionic_lif *lif);
+static int ionic_xdp_queues_config(struct ionic_lif *lif);
+static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q);
+
static void ionic_dim_work(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
- struct ionic_intr_info *intr;
struct dim_cq_moder cur_moder;
+ struct ionic_intr_info *intr;
struct ionic_qcq *qcq;
struct ionic_lif *lif;
+ struct ionic_queue *q;
u32 new_coal;
- cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
qcq = container_of(dim, struct ionic_qcq, dim);
- lif = qcq->q.lif;
+ q = &qcq->q;
+ if (q->type == IONIC_QTYPE_RXQ)
+ cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ else
+ cur_moder = net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
+ lif = q->lif;
new_coal = ionic_coal_usec_to_hw(lif->ionic, cur_moder.usec);
new_coal = new_coal ? new_coal : 1;
@@ -422,10 +430,9 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
qcq->sg_base_pa = 0;
}
+ ionic_xdp_unregister_rxq_info(&qcq->q);
ionic_qcq_intr_free(lif, qcq);
- vfree(qcq->cq.info);
- qcq->cq.info = NULL;
vfree(qcq->q.info);
qcq->q.info = NULL;
}
@@ -529,14 +536,11 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
unsigned int num_descs, unsigned int desc_size,
unsigned int cq_desc_size,
unsigned int sg_desc_size,
+ unsigned int desc_info_size,
unsigned int pid, struct ionic_qcq **qcq)
{
struct ionic_dev *idev = &lif->ionic->idev;
struct device *dev = lif->ionic->dev;
- void *q_base, *cq_base, *sg_base;
- dma_addr_t cq_base_pa = 0;
- dma_addr_t sg_base_pa = 0;
- dma_addr_t q_base_pa = 0;
struct ionic_qcq *new;
int err;
@@ -552,7 +556,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
new->q.dev = dev;
new->flags = flags;
- new->q.info = vcalloc(num_descs, sizeof(*new->q.info));
+ new->q.info = vcalloc(num_descs, desc_info_size);
if (!new->q.info) {
netdev_err(lif->netdev, "Cannot allocate queue info\n");
err = -ENOMEM;
@@ -571,19 +575,12 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
err = ionic_alloc_qcq_interrupt(lif, new);
if (err)
- goto err_out;
-
- new->cq.info = vcalloc(num_descs, sizeof(*new->cq.info));
- if (!new->cq.info) {
- netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
- err = -ENOMEM;
- goto err_out_free_irq;
- }
+ goto err_out_free_q_info;
err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
if (err) {
netdev_err(lif->netdev, "Cannot initialize completion queue\n");
- goto err_out_free_cq_info;
+ goto err_out_free_irq;
}
if (flags & IONIC_QCQ_F_NOTIFYQ) {
@@ -601,16 +598,15 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
if (!new->q_base) {
netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n");
err = -ENOMEM;
- goto err_out_free_cq_info;
+ goto err_out_free_irq;
}
- q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
- q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
- ionic_q_map(&new->q, q_base, q_base_pa);
-
- cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE);
- cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE);
- ionic_cq_map(&new->cq, cq_base, cq_base_pa);
- ionic_cq_bind(&new->cq, &new->q);
+ new->q.base = PTR_ALIGN(new->q_base, PAGE_SIZE);
+ new->q.base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
+
+ /* Base the NotifyQ cq.base off of the ALIGNed q.base */
+ new->cq.base = PTR_ALIGN(new->q.base + q_size, PAGE_SIZE);
+ new->cq.base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE);
+ new->cq.bound_q = &new->q;
} else {
/* regular DMA q descriptors */
new->q_size = PAGE_SIZE + (num_descs * desc_size);
@@ -619,11 +615,10 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
if (!new->q_base) {
netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
err = -ENOMEM;
- goto err_out_free_cq_info;
+ goto err_out_free_irq;
}
- q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
- q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
- ionic_q_map(&new->q, q_base, q_base_pa);
+ new->q.base = PTR_ALIGN(new->q_base, PAGE_SIZE);
+ new->q.base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
if (flags & IONIC_QCQ_F_CMB_RINGS) {
/* on-chip CMB q descriptors */
@@ -648,7 +643,8 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
}
new->cmb_q_base_pa -= idev->phy_cmb_pages;
- ionic_q_cmb_map(&new->q, new->cmb_q_base, new->cmb_q_base_pa);
+ new->q.cmb_base = new->cmb_q_base;
+ new->q.cmb_base_pa = new->cmb_q_base_pa;
}
/* cq DMA descriptors */
@@ -660,10 +656,9 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
err = -ENOMEM;
goto err_out_free_q;
}
- cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
- cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
- ionic_cq_map(&new->cq, cq_base, cq_base_pa);
- ionic_cq_bind(&new->cq, &new->q);
+ new->cq.base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
+ new->cq.base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
+ new->cq.bound_q = &new->q;
}
if (flags & IONIC_QCQ_F_SG) {
@@ -675,13 +670,12 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
err = -ENOMEM;
goto err_out_free_cq;
}
- sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
- sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
- ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
+ new->q.sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
+ new->q.sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
}
INIT_WORK(&new->dim.work, ionic_dim_work);
- new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
*qcq = new;
@@ -695,8 +689,6 @@ err_out_free_q:
ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order);
}
dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
-err_out_free_cq_info:
- vfree(new->cq.info);
err_out_free_irq:
if (flags & IONIC_QCQ_F_INTR) {
devm_free_irq(dev, new->intr.vector, &new->napi);
@@ -722,7 +714,9 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
IONIC_ADMINQ_LENGTH,
sizeof(struct ionic_admin_cmd),
sizeof(struct ionic_admin_comp),
- 0, lif->kern_pid, &lif->adminqcq);
+ 0,
+ sizeof(struct ionic_admin_desc_info),
+ lif->kern_pid, &lif->adminqcq);
if (err)
return err;
ionic_debugfs_add_qcq(lif, lif->adminqcq);
@@ -733,7 +727,9 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
flags, IONIC_NOTIFYQ_LENGTH,
sizeof(struct ionic_notifyq_cmd),
sizeof(union ionic_notifyq_comp),
- 0, lif->kern_pid, &lif->notifyqcq);
+ 0,
+ sizeof(struct ionic_admin_desc_info),
+ lif->kern_pid, &lif->notifyqcq);
if (err)
goto err_out;
ionic_debugfs_add_qcq(lif, lif->notifyqcq);
@@ -862,8 +858,7 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.type = q->type,
.ver = lif->qtype_info[q->type].version,
.index = cpu_to_le32(q->index),
- .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
- IONIC_QINIT_F_SG),
+ .flags = cpu_to_le16(IONIC_QINIT_F_IRQ),
.intr_index = cpu_to_le16(cq->bound_intr->index),
.pid = cpu_to_le16(q->pid),
.ring_size = ilog2(q->num_descs),
@@ -875,6 +870,13 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
};
int err;
+ q->partner = &lif->txqcqs[q->index]->q;
+ q->partner->partner = q;
+
+ if (!lif->xdp_prog ||
+ (lif->xdp_prog->aux && lif->xdp_prog->aux->xdp_has_frags))
+ ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_SG);
+
if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) {
ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB);
ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa);
@@ -945,6 +947,7 @@ int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif)
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
+ sizeof(struct ionic_tx_desc_info),
lif->kern_pid, &txq);
if (err)
goto err_qcq_alloc;
@@ -1004,6 +1007,7 @@ int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif)
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
+ sizeof(struct ionic_rx_desc_info),
lif->kern_pid, &rxq);
if (err)
goto err_qcq_alloc;
@@ -1157,71 +1161,6 @@ int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class)
return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class);
}
-static bool ionic_notifyq_service(struct ionic_cq *cq,
- struct ionic_cq_info *cq_info)
-{
- union ionic_notifyq_comp *comp = cq_info->cq_desc;
- struct ionic_deferred_work *work;
- struct net_device *netdev;
- struct ionic_queue *q;
- struct ionic_lif *lif;
- u64 eid;
-
- q = cq->bound_q;
- lif = q->info[0].cb_arg;
- netdev = lif->netdev;
- eid = le64_to_cpu(comp->event.eid);
-
- /* Have we run out of new completions to process? */
- if ((s64)(eid - lif->last_eid) <= 0)
- return false;
-
- lif->last_eid = eid;
-
- dev_dbg(lif->ionic->dev, "notifyq event:\n");
- dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
- comp, sizeof(*comp), true);
-
- switch (le16_to_cpu(comp->event.ecode)) {
- case IONIC_EVENT_LINK_CHANGE:
- ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
- break;
- case IONIC_EVENT_RESET:
- if (lif->ionic->idev.fw_status_ready &&
- !test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
- !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work) {
- netdev_err(lif->netdev, "Reset event dropped\n");
- clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
- } else {
- work->type = IONIC_DW_TYPE_LIF_RESET;
- ionic_lif_deferred_enqueue(&lif->deferred, work);
- }
- }
- break;
- default:
- netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
- comp->event.ecode, eid);
- break;
- }
-
- return true;
-}
-
-static bool ionic_adminq_service(struct ionic_cq *cq,
- struct ionic_cq_info *cq_info)
-{
- struct ionic_admin_comp *comp = cq_info->cq_desc;
-
- if (!color_match(comp->color, cq->done_color))
- return false;
-
- ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
-
- return true;
-}
-
static int ionic_adminq_napi(struct napi_struct *napi, int budget)
{
struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr;
@@ -1252,8 +1191,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
ionic_rx_service, NULL, NULL);
if (lif->hwstamp_txq)
- tx_work = ionic_cq_service(&lif->hwstamp_txq->cq, budget,
- ionic_tx_service, NULL, NULL);
+ tx_work = ionic_tx_cq_service(&lif->hwstamp_txq->cq, budget);
work_done = max(max(n_work, a_work), max(rx_work, tx_work));
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -1640,6 +1578,12 @@ static int ionic_init_nic_features(struct ionic_lif *lif)
netdev->priv_flags |= IFF_UNICAST_FLT |
IFF_LIVE_ADDR_CHANGE;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
+
return 0;
}
@@ -1777,6 +1721,21 @@ static int ionic_start_queues_reconfig(struct ionic_lif *lif)
return err;
}
+static bool ionic_xdp_is_valid_mtu(struct ionic_lif *lif, u32 mtu,
+ struct bpf_prog *xdp_prog)
+{
+ if (!xdp_prog)
+ return true;
+
+ if (mtu <= IONIC_XDP_MAX_LINEAR_MTU)
+ return true;
+
+ if (xdp_prog->aux && xdp_prog->aux->xdp_has_frags)
+ return true;
+
+ return false;
+}
+
static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ionic_lif *lif = netdev_priv(netdev);
@@ -1789,8 +1748,13 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
.mtu = cpu_to_le32(new_mtu),
},
};
+ struct bpf_prog *xdp_prog;
int err;
+ xdp_prog = READ_ONCE(lif->xdp_prog);
+ if (!ionic_xdp_is_valid_mtu(lif, new_mtu, xdp_prog))
+ return -EINVAL;
+
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
return err;
@@ -2070,6 +2034,7 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
for (i = 0; i < lif->nxqs; i++) {
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
+ sizeof(struct ionic_tx_desc_info),
lif->kern_pid, &lif->txqcqs[i]);
if (err)
goto err_out;
@@ -2101,6 +2066,7 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
for (i = 0; i < lif->nxqs; i++) {
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
+ sizeof(struct ionic_rx_desc_info),
lif->kern_pid, &lif->rxqcqs[i]);
if (err)
goto err_out;
@@ -2166,6 +2132,10 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
int derr = 0;
int i, err;
+ err = ionic_xdp_queues_config(lif);
+ if (err)
+ return err;
+
for (i = 0; i < lif->nxqs; i++) {
if (!(lif->rxqcqs[i] && lif->txqcqs[i])) {
dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i);
@@ -2211,6 +2181,8 @@ err_out:
derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr);
}
+ ionic_xdp_queues_config(lif);
+
return err;
}
@@ -2668,11 +2640,151 @@ static void ionic_vf_attr_replay(struct ionic_lif *lif)
ionic_vf_start(ionic);
}
+static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q)
+{
+ struct xdp_rxq_info *xi;
+
+ if (!q->xdp_rxq_info)
+ return;
+
+ xi = q->xdp_rxq_info;
+ q->xdp_rxq_info = NULL;
+
+ xdp_rxq_info_unreg(xi);
+ kfree(xi);
+}
+
+static int ionic_xdp_register_rxq_info(struct ionic_queue *q, unsigned int napi_id)
+{
+ struct xdp_rxq_info *rxq_info;
+ int err;
+
+ rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
+ if (!rxq_info)
+ return -ENOMEM;
+
+ err = xdp_rxq_info_reg(rxq_info, q->lif->netdev, q->index, napi_id);
+ if (err) {
+ dev_err(q->dev, "Queue %d xdp_rxq_info_reg failed, err %d\n",
+ q->index, err);
+ goto err_out;
+ }
+
+ err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_ORDER0, NULL);
+ if (err) {
+ dev_err(q->dev, "Queue %d xdp_rxq_info_reg_mem_model failed, err %d\n",
+ q->index, err);
+ xdp_rxq_info_unreg(rxq_info);
+ goto err_out;
+ }
+
+ q->xdp_rxq_info = rxq_info;
+
+ return 0;
+
+err_out:
+ kfree(rxq_info);
+ return err;
+}
+
+static int ionic_xdp_queues_config(struct ionic_lif *lif)
+{
+ unsigned int i;
+ int err;
+
+ if (!lif->rxqcqs)
+ return 0;
+
+ /* There's no need to rework memory if not going to/from NULL program.
+ * If there is no lif->xdp_prog, there should also be no q.xdp_rxq_info
+ * This way we don't need to keep an *xdp_prog in every queue struct.
+ */
+ if (!lif->xdp_prog == !lif->rxqcqs[0]->q.xdp_rxq_info)
+ return 0;
+
+ for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
+ struct ionic_queue *q = &lif->rxqcqs[i]->q;
+
+ if (q->xdp_rxq_info) {
+ ionic_xdp_unregister_rxq_info(q);
+ continue;
+ }
+
+ err = ionic_xdp_register_rxq_info(q, lif->rxqcqs[i]->napi.napi_id);
+ if (err) {
+ dev_err(lif->ionic->dev, "failed to register RX queue %d info for XDP, err %d\n",
+ i, err);
+ goto err_out;
+ }
+ }
+
+ return 0;
+
+err_out:
+ for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++)
+ ionic_xdp_unregister_rxq_info(&lif->rxqcqs[i]->q);
+
+ return err;
+}
+
+static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct bpf_prog *old_prog;
+ u32 maxfs;
+
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
+#define XDP_ERR_SPLIT "XDP not available with split Tx/Rx interrupts"
+ NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_SPLIT);
+ netdev_info(lif->netdev, XDP_ERR_SPLIT);
+ return -EOPNOTSUPP;
+ }
+
+ if (!ionic_xdp_is_valid_mtu(lif, netdev->mtu, bpf->prog)) {
+#define XDP_ERR_MTU "MTU is too large for XDP without frags support"
+ NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_MTU);
+ netdev_info(lif->netdev, XDP_ERR_MTU);
+ return -EINVAL;
+ }
+
+ maxfs = __le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN;
+ if (bpf->prog && !(bpf->prog->aux && bpf->prog->aux->xdp_has_frags))
+ maxfs = min_t(u32, maxfs, IONIC_XDP_MAX_LINEAR_MTU);
+ netdev->max_mtu = maxfs;
+
+ if (!netif_running(netdev)) {
+ old_prog = xchg(&lif->xdp_prog, bpf->prog);
+ } else {
+ mutex_lock(&lif->queue_lock);
+ ionic_stop_queues_reconfig(lif);
+ old_prog = xchg(&lif->xdp_prog, bpf->prog);
+ ionic_start_queues_reconfig(lif);
+ mutex_unlock(&lif->queue_lock);
+ }
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+static int ionic_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return ionic_xdp_config(netdev, bpf);
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops ionic_netdev_ops = {
.ndo_open = ionic_open,
.ndo_stop = ionic_stop,
.ndo_eth_ioctl = ionic_eth_ioctl,
.ndo_start_xmit = ionic_start_xmit,
+ .ndo_bpf = ionic_xdp,
+ .ndo_xdp_xmit = ionic_xdp_xmit,
.ndo_get_stats64 = ionic_get_stats64,
.ndo_set_rx_mode = ionic_ndo_set_rx_mode,
.ndo_set_features = ionic_set_features,
@@ -2755,6 +2867,8 @@ static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
swap(a->q.base, b->q.base);
swap(a->q.base_pa, b->q.base_pa);
swap(a->q.info, b->q.info);
+ swap(a->q.xdp_rxq_info, b->q.xdp_rxq_info);
+ swap(a->q.partner, b->q.partner);
swap(a->q_base, b->q_base);
swap(a->q_base_pa, b->q_base_pa);
swap(a->q_size, b->q_size);
@@ -2770,7 +2884,6 @@ static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
swap(a->cq.desc_size, b->cq.desc_size);
swap(a->cq.base, b->cq.base);
swap(a->cq.base_pa, b->cq.base_pa);
- swap(a->cq.info, b->cq.info);
swap(a->cq_base, b->cq_base);
swap(a->cq_base_pa, b->cq_base_pa);
swap(a->cq_size, b->cq_size);
@@ -2834,6 +2947,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
4, desc_sz, comp_sz, sg_desc_sz,
+ sizeof(struct ionic_tx_desc_info),
lif->kern_pid, &lif->txqcqs[i]);
if (err)
goto err_out;
@@ -2842,6 +2956,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
+ sizeof(struct ionic_tx_desc_info),
lif->kern_pid, &tx_qcqs[i]);
if (err)
goto err_out;
@@ -2863,6 +2978,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
4, desc_sz, comp_sz, sg_desc_sz,
+ sizeof(struct ionic_rx_desc_info),
lif->kern_pid, &lif->rxqcqs[i]);
if (err)
goto err_out;
@@ -2871,6 +2987,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
+ sizeof(struct ionic_rx_desc_info),
lif->kern_pid, &rx_qcqs[i]);
if (err)
goto err_out;
@@ -3391,9 +3508,12 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
napi_enable(&qcq->napi);
- if (qcq->flags & IONIC_QCQ_F_INTR)
+ if (qcq->flags & IONIC_QCQ_F_INTR) {
+ irq_set_affinity_hint(qcq->intr.vector,
+ &qcq->intr.affinity_mask);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_CLEAR);
+ }
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -3442,7 +3562,7 @@ static int ionic_lif_notifyq_init(struct ionic_lif *lif)
dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
/* preset the callback info */
- q->info[0].cb_arg = lif;
+ q->admin_info[0].ctx = lif;
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -3694,6 +3814,7 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif)
union ionic_q_identity __iomem *q_ident;
struct ionic *ionic = lif->ionic;
struct ionic_dev *idev;
+ u16 max_frags;
int qtype;
int err;
@@ -3761,17 +3882,16 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif)
dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
qtype, qti->sg_desc_stride);
- if (qti->max_sg_elems >= IONIC_MAX_FRAGS) {
- qti->max_sg_elems = IONIC_MAX_FRAGS - 1;
- dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to IONIC_MAX_FRAGS-1 %d\n",
- qtype, qti->max_sg_elems);
- }
+ if (qtype == IONIC_QTYPE_TXQ)
+ max_frags = IONIC_TX_MAX_FRAGS;
+ else if (qtype == IONIC_QTYPE_RXQ)
+ max_frags = IONIC_RX_MAX_FRAGS;
+ else
+ max_frags = 1;
- if (qti->max_sg_elems > MAX_SKB_FRAGS) {
- qti->max_sg_elems = MAX_SKB_FRAGS;
- dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to MAX_SKB_FRAGS %d\n",
- qtype, qti->max_sg_elems);
- }
+ qti->max_sg_elems = min_t(u16, max_frags - 1, MAX_SKB_FRAGS);
+ dev_dbg(ionic->dev, "qtype %d max_sg_elems %d\n",
+ qtype, qti->max_sg_elems);
}
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 61548b3eea93..08f4266fe2aa 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -37,6 +37,7 @@ struct ionic_tx_stats {
u64 dma_map_err;
u64 hwstamp_valid;
u64 hwstamp_invalid;
+ u64 xdp_frames;
};
struct ionic_rx_stats {
@@ -51,6 +52,11 @@ struct ionic_rx_stats {
u64 alloc_err;
u64 hwstamp_valid;
u64 hwstamp_invalid;
+ u64 xdp_drop;
+ u64 xdp_aborted;
+ u64 xdp_pass;
+ u64 xdp_tx;
+ u64 xdp_redirect;
};
#define IONIC_QCQ_F_INITED BIT(0)
@@ -65,25 +71,25 @@ struct ionic_qcq {
void *q_base;
dma_addr_t q_base_pa;
u32 q_size;
+ u32 cq_size;
void *cq_base;
dma_addr_t cq_base_pa;
- u32 cq_size;
void *sg_base;
dma_addr_t sg_base_pa;
u32 sg_size;
+ unsigned int flags;
void __iomem *cmb_q_base;
phys_addr_t cmb_q_base_pa;
u32 cmb_q_size;
u32 cmb_pgid;
u32 cmb_order;
struct dim dim;
+ struct timer_list napi_deadline;
struct ionic_queue q;
struct ionic_cq cq;
- struct ionic_intr_info intr;
- struct timer_list napi_deadline;
struct napi_struct napi;
- unsigned int flags;
struct ionic_qcq *napi_qcq;
+ struct ionic_intr_info intr;
struct dentry *dentry;
};
@@ -135,6 +141,12 @@ struct ionic_lif_sw_stats {
u64 hw_rx_over_errors;
u64 hw_rx_missed_errors;
u64 hw_tx_aborted_errors;
+ u64 xdp_drop;
+ u64 xdp_aborted;
+ u64 xdp_pass;
+ u64 xdp_tx;
+ u64 xdp_redirect;
+ u64 xdp_frames;
};
enum ionic_lif_state_flags {
@@ -230,6 +242,7 @@ struct ionic_lif {
struct ionic_phc *phc;
struct dentry *dentry;
+ struct bpf_prog *xdp_prog;
};
struct ionic_phc {
@@ -314,7 +327,7 @@ static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
static inline bool ionic_txq_hwstamp_enabled(struct ionic_queue *q)
{
- return unlikely(q->features & IONIC_TXQ_F_HWSTAMP);
+ return q->features & IONIC_TXQ_F_HWSTAMP;
}
void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 2f479de329fe..c1259324b0be 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -15,7 +15,7 @@
#include "ionic_debugfs.h"
MODULE_DESCRIPTION(IONIC_DRV_DESCRIPTION);
-MODULE_AUTHOR("Pensando Systems, Inc");
+MODULE_AUTHOR("Shannon Nelson <shannon.nelson@amd.com>");
MODULE_LICENSE("GPL");
static const char *ionic_error_to_str(enum ionic_status_code code)
@@ -190,7 +190,8 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
static void ionic_adminq_flush(struct ionic_lif *lif)
{
- struct ionic_desc_info *desc_info;
+ struct ionic_admin_desc_info *desc_info;
+ struct ionic_admin_cmd *desc;
unsigned long irqflags;
struct ionic_queue *q;
@@ -203,10 +204,10 @@ static void ionic_adminq_flush(struct ionic_lif *lif)
q = &lif->adminqcq->q;
while (q->tail_idx != q->head_idx) {
- desc_info = &q->info[q->tail_idx];
- memset(desc_info->desc, 0, sizeof(union ionic_adminq_cmd));
- desc_info->cb = NULL;
- desc_info->cb_arg = NULL;
+ desc = &q->adminq[q->tail_idx];
+ desc_info = &q->admin_info[q->tail_idx];
+ memset(desc, 0, sizeof(union ionic_adminq_cmd));
+ desc_info->ctx = NULL;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
}
spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
@@ -246,25 +247,93 @@ static int ionic_adminq_check_err(struct ionic_lif *lif,
return err;
}
-static void ionic_adminq_cb(struct ionic_queue *q,
- struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info, void *cb_arg)
+bool ionic_notifyq_service(struct ionic_cq *cq)
{
- struct ionic_admin_ctx *ctx = cb_arg;
+ struct ionic_deferred_work *work;
+ union ionic_notifyq_comp *comp;
+ struct net_device *netdev;
+ struct ionic_queue *q;
+ struct ionic_lif *lif;
+ u64 eid;
+
+ comp = &((union ionic_notifyq_comp *)cq->base)[cq->tail_idx];
+
+ q = cq->bound_q;
+ lif = q->admin_info[0].ctx;
+ netdev = lif->netdev;
+ eid = le64_to_cpu(comp->event.eid);
+
+ /* Have we run out of new completions to process? */
+ if ((s64)(eid - lif->last_eid) <= 0)
+ return false;
+
+ lif->last_eid = eid;
+
+ dev_dbg(lif->ionic->dev, "notifyq event:\n");
+ dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
+ comp, sizeof(*comp), true);
+
+ switch (le16_to_cpu(comp->event.ecode)) {
+ case IONIC_EVENT_LINK_CHANGE:
+ ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
+ break;
+ case IONIC_EVENT_RESET:
+ if (lif->ionic->idev.fw_status_ready &&
+ !test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
+ !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ netdev_err(lif->netdev, "Reset event dropped\n");
+ clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
+ } else {
+ work->type = IONIC_DW_TYPE_LIF_RESET;
+ ionic_lif_deferred_enqueue(&lif->deferred, work);
+ }
+ }
+ break;
+ default:
+ netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
+ comp->event.ecode, eid);
+ break;
+ }
+
+ return true;
+}
+
+bool ionic_adminq_service(struct ionic_cq *cq)
+{
+ struct ionic_admin_desc_info *desc_info;
+ struct ionic_queue *q = cq->bound_q;
struct ionic_admin_comp *comp;
+ u16 index;
- if (!ctx)
- return;
+ comp = &((struct ionic_admin_comp *)cq->base)[cq->tail_idx];
+
+ if (!color_match(comp->color, cq->done_color))
+ return false;
+
+ /* check for empty queue */
+ if (q->tail_idx == q->head_idx)
+ return false;
- comp = cq_info->cq_desc;
+ do {
+ desc_info = &q->admin_info[q->tail_idx];
+ index = q->tail_idx;
+ q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
+ if (likely(desc_info->ctx)) {
+ struct ionic_admin_ctx *ctx = desc_info->ctx;
- memcpy(&ctx->comp, comp, sizeof(*comp));
+ memcpy(&ctx->comp, comp, sizeof(*comp));
- dev_dbg(q->dev, "comp admin queue command:\n");
- dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
- &ctx->comp, sizeof(ctx->comp), true);
+ dev_dbg(q->dev, "comp admin queue command:\n");
+ dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
+ &ctx->comp, sizeof(ctx->comp), true);
+ complete_all(&ctx->work);
+ desc_info->ctx = NULL;
+ }
+ } while (index != le16_to_cpu(comp->comp_index));
- complete_all(&ctx->work);
+ return true;
}
bool ionic_adminq_poke_doorbell(struct ionic_queue *q)
@@ -298,7 +367,8 @@ bool ionic_adminq_poke_doorbell(struct ionic_queue *q)
int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
- struct ionic_desc_info *desc_info;
+ struct ionic_admin_desc_info *desc_info;
+ struct ionic_admin_cmd *desc;
unsigned long irqflags;
struct ionic_queue *q;
int err = 0;
@@ -320,14 +390,17 @@ int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
if (err)
goto err_out;
- desc_info = &q->info[q->head_idx];
- memcpy(desc_info->desc, &ctx->cmd, sizeof(ctx->cmd));
+ desc_info = &q->admin_info[q->head_idx];
+ desc_info->ctx = ctx;
+
+ desc = &q->adminq[q->head_idx];
+ memcpy(desc, &ctx->cmd, sizeof(ctx->cmd));
dev_dbg(&lif->netdev->dev, "post admin queue command:\n");
dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
&ctx->cmd, sizeof(ctx->cmd), true);
- ionic_q_post(q, true, ionic_adminq_cb, ctx);
+ ionic_q_post(q, true);
err_out:
spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index 1f6022fb7679..0107599a9dd4 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -27,6 +27,12 @@ static const struct ionic_stat_desc ionic_lif_stats_desc[] = {
IONIC_LIF_STAT_DESC(hw_rx_over_errors),
IONIC_LIF_STAT_DESC(hw_rx_missed_errors),
IONIC_LIF_STAT_DESC(hw_tx_aborted_errors),
+ IONIC_LIF_STAT_DESC(xdp_drop),
+ IONIC_LIF_STAT_DESC(xdp_aborted),
+ IONIC_LIF_STAT_DESC(xdp_pass),
+ IONIC_LIF_STAT_DESC(xdp_tx),
+ IONIC_LIF_STAT_DESC(xdp_redirect),
+ IONIC_LIF_STAT_DESC(xdp_frames),
};
static const struct ionic_stat_desc ionic_port_stats_desc[] = {
@@ -135,6 +141,7 @@ static const struct ionic_stat_desc ionic_tx_stats_desc[] = {
IONIC_TX_STAT_DESC(csum_none),
IONIC_TX_STAT_DESC(csum),
IONIC_TX_STAT_DESC(vlan_inserted),
+ IONIC_TX_STAT_DESC(xdp_frames),
};
static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
@@ -149,6 +156,11 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
IONIC_RX_STAT_DESC(hwstamp_invalid),
IONIC_RX_STAT_DESC(dropped),
IONIC_RX_STAT_DESC(vlan_stripped),
+ IONIC_RX_STAT_DESC(xdp_drop),
+ IONIC_RX_STAT_DESC(xdp_aborted),
+ IONIC_RX_STAT_DESC(xdp_pass),
+ IONIC_RX_STAT_DESC(xdp_tx),
+ IONIC_RX_STAT_DESC(xdp_redirect),
};
#define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc)
@@ -171,6 +183,7 @@ static void ionic_add_lif_txq_stats(struct ionic_lif *lif, int q_num,
stats->tx_csum += txstats->csum;
stats->tx_hwstamp_valid += txstats->hwstamp_valid;
stats->tx_hwstamp_invalid += txstats->hwstamp_invalid;
+ stats->xdp_frames += txstats->xdp_frames;
}
static void ionic_add_lif_rxq_stats(struct ionic_lif *lif, int q_num,
@@ -185,6 +198,11 @@ static void ionic_add_lif_rxq_stats(struct ionic_lif *lif, int q_num,
stats->rx_csum_error += rxstats->csum_error;
stats->rx_hwstamp_valid += rxstats->hwstamp_valid;
stats->rx_hwstamp_invalid += rxstats->hwstamp_invalid;
+ stats->xdp_drop += rxstats->xdp_drop;
+ stats->xdp_aborted += rxstats->xdp_aborted;
+ stats->xdp_pass += rxstats->xdp_pass;
+ stats->xdp_tx += rxstats->xdp_tx;
+ stats->xdp_redirect += rxstats->xdp_redirect;
}
static void ionic_get_lif_stats(struct ionic_lif *lif,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 6f4776759863..5dba6d2d633c 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -5,27 +5,40 @@
#include <linux/ipv6.h>
#include <linux/if_vlan.h>
#include <net/ip6_checksum.h>
+#include <net/netdev_queues.h>
#include "ionic.h"
#include "ionic_lif.h"
#include "ionic_txrx.h"
-static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
- ionic_desc_cb cb_func, void *cb_arg)
+static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
+ void *data, size_t len);
+
+static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
+ const skb_frag_t *frag,
+ size_t offset, size_t len);
+
+static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
+ struct ionic_tx_desc_info *desc_info);
+
+static void ionic_tx_clean(struct ionic_queue *q,
+ struct ionic_tx_desc_info *desc_info,
+ struct ionic_txq_comp *comp);
+
+static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell)
{
- ionic_q_post(q, ring_dbell, cb_func, cb_arg);
+ ionic_q_post(q, ring_dbell);
}
-static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
- ionic_desc_cb cb_func, void *cb_arg)
+static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell)
{
- ionic_q_post(q, ring_dbell, cb_func, cb_arg);
+ ionic_q_post(q, ring_dbell);
}
bool ionic_txq_poke_doorbell(struct ionic_queue *q)
{
- unsigned long now, then, dif;
struct netdev_queue *netdev_txq;
+ unsigned long now, then, dif;
struct net_device *netdev;
netdev = q->lif->netdev;
@@ -83,46 +96,61 @@ bool ionic_rxq_poke_doorbell(struct ionic_queue *q)
return true;
}
-static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
+static inline struct ionic_txq_sg_elem *ionic_tx_sg_elems(struct ionic_queue *q)
+{
+ if (likely(q->sg_desc_size == sizeof(struct ionic_txq_sg_desc_v1)))
+ return q->txq_sgl_v1[q->head_idx].elems;
+ else
+ return q->txq_sgl[q->head_idx].elems;
+}
+
+static inline struct netdev_queue *q_to_ndq(struct net_device *netdev,
+ struct ionic_queue *q)
{
- return netdev_get_tx_queue(q->lif->netdev, q->index);
+ return netdev_get_tx_queue(netdev, q->index);
+}
+
+static void *ionic_rx_buf_va(struct ionic_buf_info *buf_info)
+{
+ return page_address(buf_info->page) + buf_info->page_offset;
+}
+
+static dma_addr_t ionic_rx_buf_pa(struct ionic_buf_info *buf_info)
+{
+ return buf_info->dma_addr + buf_info->page_offset;
+}
+
+static unsigned int ionic_rx_buf_size(struct ionic_buf_info *buf_info)
+{
+ return min_t(u32, IONIC_MAX_BUF_LEN, IONIC_PAGE_SIZE - buf_info->page_offset);
}
static int ionic_rx_page_alloc(struct ionic_queue *q,
struct ionic_buf_info *buf_info)
{
- struct net_device *netdev = q->lif->netdev;
- struct ionic_rx_stats *stats;
- struct device *dev;
+ struct device *dev = q->dev;
+ dma_addr_t dma_addr;
struct page *page;
- dev = q->dev;
- stats = q_to_rx_stats(q);
-
- if (unlikely(!buf_info)) {
- net_err_ratelimited("%s: %s invalid buf_info in alloc\n",
- netdev->name, q->name);
- return -EINVAL;
- }
-
page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
if (unlikely(!page)) {
net_err_ratelimited("%s: %s page alloc failed\n",
- netdev->name, q->name);
- stats->alloc_err++;
+ dev_name(dev), q->name);
+ q_to_rx_stats(q)->alloc_err++;
return -ENOMEM;
}
- buf_info->dma_addr = dma_map_page(dev, page, 0,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) {
+ dma_addr = dma_map_page(dev, page, 0,
+ IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, dma_addr))) {
__free_pages(page, 0);
net_err_ratelimited("%s: %s dma map failed\n",
- netdev->name, q->name);
- stats->dma_map_err++;
+ dev_name(dev), q->name);
+ q_to_rx_stats(q)->dma_map_err++;
return -EIO;
}
+ buf_info->dma_addr = dma_addr;
buf_info->page = page;
buf_info->page_offset = 0;
@@ -132,12 +160,11 @@ static int ionic_rx_page_alloc(struct ionic_queue *q,
static void ionic_rx_page_free(struct ionic_queue *q,
struct ionic_buf_info *buf_info)
{
- struct net_device *netdev = q->lif->netdev;
struct device *dev = q->dev;
if (unlikely(!buf_info)) {
net_err_ratelimited("%s: %s invalid buf_info in free\n",
- netdev->name, q->name);
+ dev_name(dev), q->name);
return;
}
@@ -150,7 +177,7 @@ static void ionic_rx_page_free(struct ionic_queue *q,
}
static bool ionic_rx_buf_recycle(struct ionic_queue *q,
- struct ionic_buf_info *buf_info, u32 used)
+ struct ionic_buf_info *buf_info, u32 len)
{
u32 size;
@@ -162,7 +189,7 @@ static bool ionic_rx_buf_recycle(struct ionic_queue *q,
if (page_to_nid(buf_info->page) != numa_mem_id())
return false;
- size = ALIGN(used, IONIC_PAGE_SPLIT_SZ);
+ size = ALIGN(len, q->xdp_rxq_info ? IONIC_PAGE_SIZE : IONIC_PAGE_SPLIT_SZ);
buf_info->page_offset += size;
if (buf_info->page_offset >= IONIC_PAGE_SIZE)
return false;
@@ -172,88 +199,96 @@ static bool ionic_rx_buf_recycle(struct ionic_queue *q,
return true;
}
-static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
- struct ionic_desc_info *desc_info,
- struct ionic_rxq_comp *comp)
+static void ionic_rx_add_skb_frag(struct ionic_queue *q,
+ struct sk_buff *skb,
+ struct ionic_buf_info *buf_info,
+ u32 off, u32 len,
+ bool synced)
+{
+ if (!synced)
+ dma_sync_single_range_for_cpu(q->dev, ionic_rx_buf_pa(buf_info),
+ off, len, DMA_FROM_DEVICE);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ buf_info->page, buf_info->page_offset + off,
+ len,
+ IONIC_PAGE_SIZE);
+
+ if (!ionic_rx_buf_recycle(q, buf_info, len)) {
+ dma_unmap_page(q->dev, buf_info->dma_addr,
+ IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
+ buf_info->page = NULL;
+ }
+}
+
+static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q,
+ struct ionic_rx_desc_info *desc_info,
+ unsigned int headroom,
+ unsigned int len,
+ unsigned int num_sg_elems,
+ bool synced)
{
- struct net_device *netdev = q->lif->netdev;
struct ionic_buf_info *buf_info;
- struct ionic_rx_stats *stats;
- struct device *dev = q->dev;
struct sk_buff *skb;
unsigned int i;
u16 frag_len;
- u16 len;
-
- stats = q_to_rx_stats(q);
buf_info = &desc_info->bufs[0];
- len = le16_to_cpu(comp->len);
-
prefetchw(buf_info->page);
skb = napi_get_frags(&q_to_qcq(q)->napi);
if (unlikely(!skb)) {
net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
- netdev->name, q->name);
- stats->alloc_err++;
+ dev_name(q->dev), q->name);
+ q_to_rx_stats(q)->alloc_err++;
return NULL;
}
- i = comp->num_sg_elems + 1;
- do {
- if (unlikely(!buf_info->page)) {
- dev_kfree_skb(skb);
- return NULL;
- }
-
- frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN,
- IONIC_PAGE_SIZE - buf_info->page_offset));
- len -= frag_len;
-
- dma_sync_single_for_cpu(dev,
- buf_info->dma_addr + buf_info->page_offset,
- frag_len, DMA_FROM_DEVICE);
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- buf_info->page, buf_info->page_offset, frag_len,
- IONIC_PAGE_SIZE);
-
- if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
- dma_unmap_page(dev, buf_info->dma_addr,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
- buf_info->page = NULL;
- }
+ if (headroom)
+ frag_len = min_t(u16, len,
+ IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
+ else
+ frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
- buf_info++;
+ if (unlikely(!buf_info->page))
+ goto err_bad_buf_page;
+ ionic_rx_add_skb_frag(q, skb, buf_info, headroom, frag_len, synced);
+ len -= frag_len;
+ buf_info++;
- i--;
- } while (i > 0);
+ for (i = 0; i < num_sg_elems; i++, buf_info++) {
+ if (unlikely(!buf_info->page))
+ goto err_bad_buf_page;
+ frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
+ ionic_rx_add_skb_frag(q, skb, buf_info, 0, frag_len, synced);
+ len -= frag_len;
+ }
return skb;
+
+err_bad_buf_page:
+ dev_kfree_skb(skb);
+ return NULL;
}
-static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
- struct ionic_desc_info *desc_info,
- struct ionic_rxq_comp *comp)
+static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev,
+ struct ionic_queue *q,
+ struct ionic_rx_desc_info *desc_info,
+ unsigned int headroom,
+ unsigned int len,
+ bool synced)
{
- struct net_device *netdev = q->lif->netdev;
struct ionic_buf_info *buf_info;
- struct ionic_rx_stats *stats;
struct device *dev = q->dev;
struct sk_buff *skb;
- u16 len;
-
- stats = q_to_rx_stats(q);
buf_info = &desc_info->bufs[0];
- len = le16_to_cpu(comp->len);
skb = napi_alloc_skb(&q_to_qcq(q)->napi, len);
if (unlikely(!skb)) {
net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
- netdev->name, q->name);
- stats->alloc_err++;
+ dev_name(dev), q->name);
+ q_to_rx_stats(q)->alloc_err++;
return NULL;
}
@@ -262,30 +297,343 @@ static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
return NULL;
}
- dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset,
- len, DMA_FROM_DEVICE);
- skb_copy_to_linear_data(skb, page_address(buf_info->page) + buf_info->page_offset, len);
- dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset,
- len, DMA_FROM_DEVICE);
+ if (!synced)
+ dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info),
+ headroom, len, DMA_FROM_DEVICE);
+ skb_copy_to_linear_data(skb, ionic_rx_buf_va(buf_info) + headroom, len);
+ dma_sync_single_range_for_device(dev, ionic_rx_buf_pa(buf_info),
+ headroom, len, DMA_FROM_DEVICE);
skb_put(skb, len);
- skb->protocol = eth_type_trans(skb, q->lif->netdev);
+ skb->protocol = eth_type_trans(skb, netdev);
return skb;
}
+static void ionic_xdp_tx_desc_clean(struct ionic_queue *q,
+ struct ionic_tx_desc_info *desc_info)
+{
+ unsigned int nbufs = desc_info->nbufs;
+ struct ionic_buf_info *buf_info;
+ struct device *dev = q->dev;
+ int i;
+
+ if (!nbufs)
+ return;
+
+ buf_info = desc_info->bufs;
+ dma_unmap_single(dev, buf_info->dma_addr,
+ buf_info->len, DMA_TO_DEVICE);
+ if (desc_info->act == XDP_TX)
+ __free_pages(buf_info->page, 0);
+ buf_info->page = NULL;
+
+ buf_info++;
+ for (i = 1; i < nbufs + 1 && buf_info->page; i++, buf_info++) {
+ dma_unmap_page(dev, buf_info->dma_addr,
+ buf_info->len, DMA_TO_DEVICE);
+ if (desc_info->act == XDP_TX)
+ __free_pages(buf_info->page, 0);
+ buf_info->page = NULL;
+ }
+
+ if (desc_info->act == XDP_REDIRECT)
+ xdp_return_frame(desc_info->xdpf);
+
+ desc_info->nbufs = 0;
+ desc_info->xdpf = NULL;
+ desc_info->act = 0;
+}
+
+static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
+ enum xdp_action act, struct page *page, int off,
+ bool ring_doorbell)
+{
+ struct ionic_tx_desc_info *desc_info;
+ struct ionic_buf_info *buf_info;
+ struct ionic_tx_stats *stats;
+ struct ionic_txq_desc *desc;
+ size_t len = frame->len;
+ dma_addr_t dma_addr;
+ u64 cmd;
+
+ desc_info = &q->tx_info[q->head_idx];
+ desc = &q->txq[q->head_idx];
+ buf_info = desc_info->bufs;
+ stats = q_to_tx_stats(q);
+
+ dma_addr = ionic_tx_map_single(q, frame->data, len);
+ if (!dma_addr)
+ return -EIO;
+ buf_info->dma_addr = dma_addr;
+ buf_info->len = len;
+ buf_info->page = page;
+ buf_info->page_offset = off;
+
+ desc_info->nbufs = 1;
+ desc_info->xdpf = frame;
+ desc_info->act = act;
+
+ if (xdp_frame_has_frags(frame)) {
+ struct ionic_txq_sg_elem *elem;
+ struct skb_shared_info *sinfo;
+ struct ionic_buf_info *bi;
+ skb_frag_t *frag;
+ int i;
+
+ bi = &buf_info[1];
+ sinfo = xdp_get_shared_info_from_frame(frame);
+ frag = sinfo->frags;
+ elem = ionic_tx_sg_elems(q);
+ for (i = 0; i < sinfo->nr_frags; i++, frag++, bi++) {
+ dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
+ if (!dma_addr) {
+ ionic_tx_desc_unmap_bufs(q, desc_info);
+ return -EIO;
+ }
+ bi->dma_addr = dma_addr;
+ bi->len = skb_frag_size(frag);
+ bi->page = skb_frag_page(frag);
+
+ elem->addr = cpu_to_le64(bi->dma_addr);
+ elem->len = cpu_to_le16(bi->len);
+ elem++;
+
+ desc_info->nbufs++;
+ }
+ }
+
+ cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
+ 0, (desc_info->nbufs - 1), buf_info->dma_addr);
+ desc->cmd = cpu_to_le64(cmd);
+ desc->len = cpu_to_le16(len);
+ desc->csum_start = 0;
+ desc->csum_offset = 0;
+
+ stats->xdp_frames++;
+ stats->pkts++;
+ stats->bytes += len;
+
+ ionic_txq_post(q, ring_doorbell);
+
+ return 0;
+}
+
+int ionic_xdp_xmit(struct net_device *netdev, int n,
+ struct xdp_frame **xdp_frames, u32 flags)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_queue *txq;
+ struct netdev_queue *nq;
+ int nxmit;
+ int space;
+ int cpu;
+ int qi;
+
+ if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state)))
+ return -ENETDOWN;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ /* AdminQ is assumed on cpu 0, while we attempt to affinitize the
+ * TxRx queue pairs 0..n-1 on cpus 1..n. We try to keep with that
+ * affinitization here, but of course irqbalance and friends might
+ * have juggled things anyway, so we have to check for the 0 case.
+ */
+ cpu = smp_processor_id();
+ qi = cpu ? (cpu - 1) % lif->nxqs : cpu;
+
+ txq = &lif->txqcqs[qi]->q;
+ nq = netdev_get_tx_queue(netdev, txq->index);
+ __netif_tx_lock(nq, cpu);
+ txq_trans_cond_update(nq);
+
+ if (netif_tx_queue_stopped(nq) ||
+ !netif_txq_maybe_stop(q_to_ndq(netdev, txq),
+ ionic_q_space_avail(txq),
+ 1, 1)) {
+ __netif_tx_unlock(nq);
+ return -EIO;
+ }
+
+ space = min_t(int, n, ionic_q_space_avail(txq));
+ for (nxmit = 0; nxmit < space ; nxmit++) {
+ if (ionic_xdp_post_frame(txq, xdp_frames[nxmit],
+ XDP_REDIRECT,
+ virt_to_page(xdp_frames[nxmit]->data),
+ 0, false)) {
+ nxmit--;
+ break;
+ }
+ }
+
+ if (flags & XDP_XMIT_FLUSH)
+ ionic_dbell_ring(lif->kern_dbpage, txq->hw_type,
+ txq->dbval | txq->head_idx);
+
+ netif_txq_maybe_stop(q_to_ndq(netdev, txq),
+ ionic_q_space_avail(txq),
+ 4, 4);
+ __netif_tx_unlock(nq);
+
+ return nxmit;
+}
+
+static bool ionic_run_xdp(struct ionic_rx_stats *stats,
+ struct net_device *netdev,
+ struct bpf_prog *xdp_prog,
+ struct ionic_queue *rxq,
+ struct ionic_buf_info *buf_info,
+ int len)
+{
+ u32 xdp_action = XDP_ABORTED;
+ struct xdp_buff xdp_buf;
+ struct ionic_queue *txq;
+ struct netdev_queue *nq;
+ struct xdp_frame *xdpf;
+ int remain_len;
+ int frag_len;
+ int err = 0;
+
+ xdp_init_buff(&xdp_buf, IONIC_PAGE_SIZE, rxq->xdp_rxq_info);
+ frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
+ xdp_prepare_buff(&xdp_buf, ionic_rx_buf_va(buf_info),
+ XDP_PACKET_HEADROOM, frag_len, false);
+
+ dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(buf_info),
+ XDP_PACKET_HEADROOM, len,
+ DMA_FROM_DEVICE);
+
+ prefetchw(&xdp_buf.data_hard_start);
+
+ /* We limit MTU size to one buffer if !xdp_has_frags, so
+ * if the recv len is bigger than one buffer
+ * then we know we have frag info to gather
+ */
+ remain_len = len - frag_len;
+ if (remain_len) {
+ struct skb_shared_info *sinfo;
+ struct ionic_buf_info *bi;
+ skb_frag_t *frag;
+
+ bi = buf_info;
+ sinfo = xdp_get_shared_info_from_buff(&xdp_buf);
+ sinfo->nr_frags = 0;
+ sinfo->xdp_frags_size = 0;
+ xdp_buff_set_frags_flag(&xdp_buf);
+
+ do {
+ if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) {
+ err = -ENOSPC;
+ goto out_xdp_abort;
+ }
+
+ frag = &sinfo->frags[sinfo->nr_frags];
+ sinfo->nr_frags++;
+ bi++;
+ frag_len = min_t(u16, remain_len, ionic_rx_buf_size(bi));
+ dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(bi),
+ 0, frag_len, DMA_FROM_DEVICE);
+ skb_frag_fill_page_desc(frag, bi->page, 0, frag_len);
+ sinfo->xdp_frags_size += frag_len;
+ remain_len -= frag_len;
+
+ if (page_is_pfmemalloc(bi->page))
+ xdp_buff_set_frag_pfmemalloc(&xdp_buf);
+ } while (remain_len > 0);
+ }
+
+ xdp_action = bpf_prog_run_xdp(xdp_prog, &xdp_buf);
+
+ switch (xdp_action) {
+ case XDP_PASS:
+ stats->xdp_pass++;
+ return false; /* false = we didn't consume the packet */
+
+ case XDP_DROP:
+ ionic_rx_page_free(rxq, buf_info);
+ stats->xdp_drop++;
+ break;
+
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(&xdp_buf);
+ if (!xdpf)
+ goto out_xdp_abort;
+
+ txq = rxq->partner;
+ nq = netdev_get_tx_queue(netdev, txq->index);
+ __netif_tx_lock(nq, smp_processor_id());
+ txq_trans_cond_update(nq);
+
+ if (netif_tx_queue_stopped(nq) ||
+ !netif_txq_maybe_stop(q_to_ndq(netdev, txq),
+ ionic_q_space_avail(txq),
+ 1, 1)) {
+ __netif_tx_unlock(nq);
+ goto out_xdp_abort;
+ }
+
+ dma_unmap_page(rxq->dev, buf_info->dma_addr,
+ IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
+
+ err = ionic_xdp_post_frame(txq, xdpf, XDP_TX,
+ buf_info->page,
+ buf_info->page_offset,
+ true);
+ __netif_tx_unlock(nq);
+ if (err) {
+ netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err);
+ goto out_xdp_abort;
+ }
+ stats->xdp_tx++;
+
+ /* the Tx completion will free the buffers */
+ break;
+
+ case XDP_REDIRECT:
+ /* unmap the pages before handing them to a different device */
+ dma_unmap_page(rxq->dev, buf_info->dma_addr,
+ IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
+
+ err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog);
+ if (err) {
+ netdev_dbg(netdev, "xdp_do_redirect err %d\n", err);
+ goto out_xdp_abort;
+ }
+ buf_info->page = NULL;
+ rxq->xdp_flush = true;
+ stats->xdp_redirect++;
+ break;
+
+ case XDP_ABORTED:
+ default:
+ goto out_xdp_abort;
+ }
+
+ return true;
+
+out_xdp_abort:
+ trace_xdp_exception(netdev, xdp_prog, xdp_action);
+ ionic_rx_page_free(rxq, buf_info);
+ stats->xdp_aborted++;
+
+ return true;
+}
+
static void ionic_rx_clean(struct ionic_queue *q,
- struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info,
- void *cb_arg)
+ struct ionic_rx_desc_info *desc_info,
+ struct ionic_rxq_comp *comp)
{
struct net_device *netdev = q->lif->netdev;
struct ionic_qcq *qcq = q_to_qcq(q);
struct ionic_rx_stats *stats;
- struct ionic_rxq_comp *comp;
+ struct bpf_prog *xdp_prog;
+ unsigned int headroom;
struct sk_buff *skb;
-
- comp = cq_info->cq_desc + qcq->cq.desc_size - sizeof(*comp);
+ bool synced = false;
+ bool use_copybreak;
+ u16 len;
stats = q_to_rx_stats(q);
@@ -294,13 +642,25 @@ static void ionic_rx_clean(struct ionic_queue *q,
return;
}
+ len = le16_to_cpu(comp->len);
stats->pkts++;
- stats->bytes += le16_to_cpu(comp->len);
+ stats->bytes += len;
+
+ xdp_prog = READ_ONCE(q->lif->xdp_prog);
+ if (xdp_prog) {
+ if (ionic_run_xdp(stats, netdev, xdp_prog, q, desc_info->bufs, len))
+ return;
+ synced = true;
+ }
- if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
- skb = ionic_rx_copybreak(q, desc_info, comp);
+ headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0;
+ use_copybreak = len <= q->lif->rx_copybreak;
+ if (use_copybreak)
+ skb = ionic_rx_copybreak(netdev, q, desc_info,
+ headroom, len, synced);
else
- skb = ionic_rx_frags(q, desc_info, comp);
+ skb = ionic_rx_build_skb(q, desc_info, headroom, len,
+ comp->num_sg_elems, synced);
if (unlikely(!skb)) {
stats->dropped++;
@@ -352,7 +712,7 @@ static void ionic_rx_clean(struct ionic_queue *q,
u64 hwstamp;
cq_desc_hwstamp =
- cq_info->cq_desc +
+ (void *)comp +
qcq->cq.desc_size -
sizeof(struct ionic_rxq_comp) -
IONIC_HWSTAMP_CQ_NEGOFFSET;
@@ -367,19 +727,19 @@ static void ionic_rx_clean(struct ionic_queue *q,
}
}
- if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
+ if (use_copybreak)
napi_gro_receive(&qcq->napi, skb);
else
napi_gro_frags(&qcq->napi);
}
-bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
+bool ionic_rx_service(struct ionic_cq *cq)
{
+ struct ionic_rx_desc_info *desc_info;
struct ionic_queue *q = cq->bound_q;
- struct ionic_desc_info *desc_info;
struct ionic_rxq_comp *comp;
- comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
+ comp = &((struct ionic_rxq_comp *)cq->base)[cq->tail_idx];
if (!color_match(comp->pkt_type_color, cq->done_color))
return false;
@@ -391,31 +751,29 @@ bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
if (q->tail_idx != le16_to_cpu(comp->comp_index))
return false;
- desc_info = &q->info[q->tail_idx];
+ desc_info = &q->rx_info[q->tail_idx];
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
/* clean the related q entry, only one per qc completion */
- ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
-
- desc_info->cb = NULL;
- desc_info->cb_arg = NULL;
+ ionic_rx_clean(q, desc_info, comp);
return true;
}
static inline void ionic_write_cmb_desc(struct ionic_queue *q,
- void __iomem *cmb_desc,
void *desc)
{
- if (q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS)
- memcpy_toio(cmb_desc, desc, q->desc_size);
+ /* Since Rx and Tx descriptors are the same size, we can
+ * save an instruction or two and skip the qtype check.
+ */
+ if (unlikely(q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS))
+ memcpy_toio(&q->cmb_txq[q->head_idx], desc, sizeof(q->cmb_txq[0]));
}
void ionic_rx_fill(struct ionic_queue *q)
{
struct net_device *netdev = q->lif->netdev;
- struct ionic_desc_info *desc_info;
- struct ionic_rxq_sg_desc *sg_desc;
+ struct ionic_rx_desc_info *desc_info;
struct ionic_rxq_sg_elem *sg_elem;
struct ionic_buf_info *buf_info;
unsigned int fill_threshold;
@@ -424,8 +782,9 @@ void ionic_rx_fill(struct ionic_queue *q)
unsigned int frag_len;
unsigned int nfrags;
unsigned int n_fill;
- unsigned int i, j;
unsigned int len;
+ unsigned int i;
+ unsigned int j;
n_fill = ionic_q_space_avail(q);
@@ -434,13 +793,16 @@ void ionic_rx_fill(struct ionic_queue *q)
if (n_fill < fill_threshold)
return;
- len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
+ len = netdev->mtu + VLAN_ETH_HLEN;
for (i = n_fill; i; i--) {
+ unsigned int headroom;
+ unsigned int buf_len;
+
nfrags = 0;
remain_len = len;
- desc_info = &q->info[q->head_idx];
- desc = desc_info->desc;
+ desc = &q->rxq[q->head_idx];
+ desc_info = &q->rx_info[q->head_idx];
buf_info = &desc_info->bufs[0];
if (!buf_info->page) { /* alloc a new buffer? */
@@ -451,19 +813,26 @@ void ionic_rx_fill(struct ionic_queue *q)
}
}
- /* fill main descriptor - buf[0] */
- desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
- frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN,
- IONIC_PAGE_SIZE - buf_info->page_offset));
+ /* fill main descriptor - buf[0]
+ * XDP uses space in the first buffer, so account for
+ * head room, tail room, and ip header in the first frag size.
+ */
+ headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0;
+ if (q->xdp_rxq_info)
+ buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN;
+ else
+ buf_len = ionic_rx_buf_size(buf_info);
+ frag_len = min_t(u16, len, buf_len);
+
+ desc->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info) + headroom);
desc->len = cpu_to_le16(frag_len);
remain_len -= frag_len;
buf_info++;
nfrags++;
/* fill sg descriptors - buf[1..n] */
- sg_desc = desc_info->sg_desc;
- for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) {
- sg_elem = &sg_desc->elems[j];
+ sg_elem = q->rxq_sgl[q->head_idx].elems;
+ for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++, sg_elem++) {
if (!buf_info->page) { /* alloc a new sg buffer? */
if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
sg_elem->addr = 0;
@@ -472,10 +841,8 @@ void ionic_rx_fill(struct ionic_queue *q)
}
}
- sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
- frag_len = min_t(u16, remain_len, min_t(u32, IONIC_MAX_BUF_LEN,
- IONIC_PAGE_SIZE -
- buf_info->page_offset));
+ sg_elem->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info));
+ frag_len = min_t(u16, remain_len, ionic_rx_buf_size(buf_info));
sg_elem->len = cpu_to_le16(frag_len);
remain_len -= frag_len;
buf_info++;
@@ -483,18 +850,16 @@ void ionic_rx_fill(struct ionic_queue *q)
}
/* clear end sg element as a sentinel */
- if (j < q->max_sg_elems) {
- sg_elem = &sg_desc->elems[j];
+ if (j < q->max_sg_elems)
memset(sg_elem, 0, sizeof(*sg_elem));
- }
desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
IONIC_RXQ_DESC_OPCODE_SIMPLE;
desc_info->nbufs = nfrags;
- ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
+ ionic_write_cmb_desc(q, desc);
- ionic_rxq_post(q, false, ionic_rx_clean, NULL);
+ ionic_rxq_post(q, false);
}
ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
@@ -509,21 +874,19 @@ void ionic_rx_fill(struct ionic_queue *q)
void ionic_rx_empty(struct ionic_queue *q)
{
- struct ionic_desc_info *desc_info;
+ struct ionic_rx_desc_info *desc_info;
struct ionic_buf_info *buf_info;
unsigned int i, j;
for (i = 0; i < q->num_descs; i++) {
- desc_info = &q->info[i];
- for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) {
+ desc_info = &q->rx_info[i];
+ for (j = 0; j < ARRAY_SIZE(desc_info->bufs); j++) {
buf_info = &desc_info->bufs[j];
if (buf_info->page)
ionic_rx_page_free(q, buf_info);
}
desc_info->nbufs = 0;
- desc_info->cb = NULL;
- desc_info->cb_arg = NULL;
}
q->head_idx = 0;
@@ -568,16 +931,10 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
{
struct ionic_qcq *qcq = napi_to_qcq(napi);
struct ionic_cq *cq = napi_to_cq(napi);
- struct ionic_dev *idev;
- struct ionic_lif *lif;
u32 work_done = 0;
u32 flags = 0;
- lif = cq->bound_q->lif;
- idev = &lif->ionic->idev;
-
- work_done = ionic_cq_service(cq, budget,
- ionic_tx_service, NULL, NULL);
+ work_done = ionic_tx_cq_service(cq, budget);
if (unlikely(!budget))
return budget;
@@ -590,7 +947,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
if (work_done || flags) {
flags |= IONIC_INTR_CRED_RESET_COALESCE;
- ionic_intr_credits(idev->intr_ctrl,
+ ionic_intr_credits(cq->idev->intr_ctrl,
cq->bound_intr->index,
work_done, flags);
}
@@ -601,26 +958,30 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
return work_done;
}
+static void ionic_xdp_do_flush(struct ionic_cq *cq)
+{
+ if (cq->bound_q->xdp_flush) {
+ xdp_do_flush();
+ cq->bound_q->xdp_flush = false;
+ }
+}
+
int ionic_rx_napi(struct napi_struct *napi, int budget)
{
struct ionic_qcq *qcq = napi_to_qcq(napi);
struct ionic_cq *cq = napi_to_cq(napi);
- struct ionic_dev *idev;
- struct ionic_lif *lif;
u32 work_done = 0;
u32 flags = 0;
if (unlikely(!budget))
return budget;
- lif = cq->bound_q->lif;
- idev = &lif->ionic->idev;
-
work_done = ionic_cq_service(cq, budget,
ionic_rx_service, NULL, NULL);
ionic_rx_fill(cq->bound_q);
+ ionic_xdp_do_flush(cq);
if (work_done < budget && napi_complete_done(napi, work_done)) {
ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
flags |= IONIC_INTR_CRED_UNMASK;
@@ -629,7 +990,7 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
if (work_done || flags) {
flags |= IONIC_INTR_CRED_RESET_COALESCE;
- ionic_intr_credits(idev->intr_ctrl,
+ ionic_intr_credits(cq->idev->intr_ctrl,
cq->bound_intr->index,
work_done, flags);
}
@@ -646,7 +1007,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
struct ionic_cq *rxcq = napi_to_cq(napi);
unsigned int qi = rxcq->bound_q->index;
struct ionic_qcq *txqcq;
- struct ionic_dev *idev;
struct ionic_lif *lif;
struct ionic_cq *txcq;
bool resched = false;
@@ -655,12 +1015,10 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
u32 flags = 0;
lif = rxcq->bound_q->lif;
- idev = &lif->ionic->idev;
txqcq = lif->txqcqs[qi];
txcq = &lif->txqcqs[qi]->cq;
- tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
- ionic_tx_service, NULL, NULL);
+ tx_work_done = ionic_tx_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT);
if (unlikely(!budget))
return budget;
@@ -670,6 +1028,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
ionic_rx_fill(rxcq->bound_q);
+ ionic_xdp_do_flush(rxcq);
if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
ionic_dim_update(rxqcq, 0);
flags |= IONIC_INTR_CRED_UNMASK;
@@ -678,7 +1037,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
if (rx_work_done || flags) {
flags |= IONIC_INTR_CRED_RESET_COALESCE;
- ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
+ ionic_intr_credits(rxcq->idev->intr_ctrl, rxcq->bound_intr->index,
tx_work_done + rx_work_done, flags);
}
@@ -695,15 +1054,14 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
void *data, size_t len)
{
- struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct device *dev = q->dev;
dma_addr_t dma_addr;
dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
net_warn_ratelimited("%s: DMA single map failed on %s!\n",
- q->lif->netdev->name, q->name);
- stats->dma_map_err++;
+ dev_name(dev), q->name);
+ q_to_tx_stats(q)->dma_map_err++;
return 0;
}
return dma_addr;
@@ -713,24 +1071,23 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
const skb_frag_t *frag,
size_t offset, size_t len)
{
- struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct device *dev = q->dev;
dma_addr_t dma_addr;
dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
- q->lif->netdev->name, q->name);
- stats->dma_map_err++;
+ dev_name(dev), q->name);
+ q_to_tx_stats(q)->dma_map_err++;
+ return 0;
}
return dma_addr;
}
static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
- struct ionic_desc_info *desc_info)
+ struct ionic_tx_desc_info *desc_info)
{
struct ionic_buf_info *buf_info = desc_info->bufs;
- struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct device *dev = q->dev;
dma_addr_t dma_addr;
unsigned int nfrags;
@@ -738,10 +1095,8 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
int frag_idx;
dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
- if (dma_mapping_error(dev, dma_addr)) {
- stats->dma_map_err++;
+ if (!dma_addr)
return -EIO;
- }
buf_info->dma_addr = dma_addr;
buf_info->len = skb_headlen(skb);
buf_info++;
@@ -750,10 +1105,8 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
nfrags = skb_shinfo(skb)->nr_frags;
for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
- if (dma_mapping_error(dev, dma_addr)) {
- stats->dma_map_err++;
+ if (!dma_addr)
goto dma_fail;
- }
buf_info->dma_addr = dma_addr;
buf_info->len = skb_frag_size(frag);
buf_info++;
@@ -771,12 +1124,13 @@ dma_fail:
dma_unmap_page(dev, buf_info->dma_addr,
buf_info->len, DMA_TO_DEVICE);
}
- dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE);
+ dma_unmap_single(dev, desc_info->bufs[0].dma_addr,
+ desc_info->bufs[0].len, DMA_TO_DEVICE);
return -EIO;
}
static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
- struct ionic_desc_info *desc_info)
+ struct ionic_tx_desc_info *desc_info)
{
struct ionic_buf_info *buf_info = desc_info->bufs;
struct device *dev = q->dev;
@@ -785,41 +1139,48 @@ static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
if (!desc_info->nbufs)
return;
- dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
+ dma_unmap_single(dev, buf_info->dma_addr,
buf_info->len, DMA_TO_DEVICE);
buf_info++;
for (i = 1; i < desc_info->nbufs; i++, buf_info++)
- dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr,
+ dma_unmap_page(dev, buf_info->dma_addr,
buf_info->len, DMA_TO_DEVICE);
desc_info->nbufs = 0;
}
static void ionic_tx_clean(struct ionic_queue *q,
- struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info,
- void *cb_arg)
+ struct ionic_tx_desc_info *desc_info,
+ struct ionic_txq_comp *comp)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_qcq *qcq = q_to_qcq(q);
- struct sk_buff *skb = cb_arg;
- u16 qi;
+ struct sk_buff *skb;
+
+ if (desc_info->xdpf) {
+ ionic_xdp_tx_desc_clean(q->partner, desc_info);
+ stats->clean++;
+
+ if (unlikely(__netif_subqueue_stopped(q->lif->netdev, q->index)))
+ netif_wake_subqueue(q->lif->netdev, q->index);
+
+ return;
+ }
ionic_tx_desc_unmap_bufs(q, desc_info);
+ skb = desc_info->skb;
if (!skb)
return;
- qi = skb_get_queue_mapping(skb);
-
- if (ionic_txq_hwstamp_enabled(q)) {
- if (cq_info) {
+ if (unlikely(ionic_txq_hwstamp_enabled(q))) {
+ if (comp) {
struct skb_shared_hwtstamps hwts = {};
__le64 *cq_desc_hwstamp;
u64 hwstamp;
cq_desc_hwstamp =
- cq_info->cq_desc +
+ (void *)comp +
qcq->cq.desc_size -
sizeof(struct ionic_txq_comp) -
IONIC_HWSTAMP_CQ_NEGOFFSET;
@@ -837,27 +1198,25 @@ static void ionic_tx_clean(struct ionic_queue *q,
stats->hwstamp_invalid++;
}
}
-
- } else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) {
- netif_wake_subqueue(q->lif->netdev, qi);
}
desc_info->bytes = skb->len;
stats->clean++;
- dev_consume_skb_any(skb);
+ napi_consume_skb(skb, 1);
}
-bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
+static bool ionic_tx_service(struct ionic_cq *cq,
+ unsigned int *total_pkts, unsigned int *total_bytes)
{
+ struct ionic_tx_desc_info *desc_info;
struct ionic_queue *q = cq->bound_q;
- struct ionic_desc_info *desc_info;
struct ionic_txq_comp *comp;
- int bytes = 0;
- int pkts = 0;
+ unsigned int bytes = 0;
+ unsigned int pkts = 0;
u16 index;
- comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
+ comp = &((struct ionic_txq_comp *)cq->base)[cq->tail_idx];
if (!color_match(comp->color, cq->done_color))
return false;
@@ -866,59 +1225,90 @@ bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
* several q entries completed for each cq completion
*/
do {
- desc_info = &q->info[q->tail_idx];
+ desc_info = &q->tx_info[q->tail_idx];
desc_info->bytes = 0;
index = q->tail_idx;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
- ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg);
- if (desc_info->cb_arg) {
+ ionic_tx_clean(q, desc_info, comp);
+ if (desc_info->skb) {
pkts++;
bytes += desc_info->bytes;
+ desc_info->skb = NULL;
}
- desc_info->cb = NULL;
- desc_info->cb_arg = NULL;
} while (index != le16_to_cpu(comp->comp_index));
- if (pkts && bytes && !ionic_txq_hwstamp_enabled(q))
- netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
+ (*total_pkts) += pkts;
+ (*total_bytes) += bytes;
return true;
}
+unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do)
+{
+ unsigned int work_done = 0;
+ unsigned int bytes = 0;
+ unsigned int pkts = 0;
+
+ if (work_to_do == 0)
+ return 0;
+
+ while (ionic_tx_service(cq, &pkts, &bytes)) {
+ if (cq->tail_idx == cq->num_descs - 1)
+ cq->done_color = !cq->done_color;
+ cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
+
+ if (++work_done >= work_to_do)
+ break;
+ }
+
+ if (work_done) {
+ struct ionic_queue *q = cq->bound_q;
+
+ if (likely(!ionic_txq_hwstamp_enabled(q)))
+ netif_txq_completed_wake(q_to_ndq(q->lif->netdev, q),
+ pkts, bytes,
+ ionic_q_space_avail(q),
+ IONIC_TSO_DESCS_NEEDED);
+ }
+
+ return work_done;
+}
+
void ionic_tx_flush(struct ionic_cq *cq)
{
- struct ionic_dev *idev = &cq->lif->ionic->idev;
u32 work_done;
- work_done = ionic_cq_service(cq, cq->num_descs,
- ionic_tx_service, NULL, NULL);
+ work_done = ionic_tx_cq_service(cq, cq->num_descs);
if (work_done)
- ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
+ ionic_intr_credits(cq->idev->intr_ctrl, cq->bound_intr->index,
work_done, IONIC_INTR_CRED_RESET_COALESCE);
}
void ionic_tx_empty(struct ionic_queue *q)
{
- struct ionic_desc_info *desc_info;
+ struct ionic_tx_desc_info *desc_info;
int bytes = 0;
int pkts = 0;
/* walk the not completed tx entries, if any */
while (q->head_idx != q->tail_idx) {
- desc_info = &q->info[q->tail_idx];
+ desc_info = &q->tx_info[q->tail_idx];
desc_info->bytes = 0;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
- ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
- if (desc_info->cb_arg) {
+ ionic_tx_clean(q, desc_info, NULL);
+ if (desc_info->skb) {
pkts++;
bytes += desc_info->bytes;
+ desc_info->skb = NULL;
}
- desc_info->cb = NULL;
- desc_info->cb_arg = NULL;
}
- if (pkts && bytes && !ionic_txq_hwstamp_enabled(q))
- netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
+ if (likely(!ionic_txq_hwstamp_enabled(q))) {
+ struct netdev_queue *ndq = q_to_ndq(q->lif->netdev, q);
+
+ netdev_tx_completed_queue(ndq, pkts, bytes);
+ netdev_tx_reset_queue(ndq);
+ }
}
static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
@@ -966,8 +1356,8 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
return 0;
}
-static void ionic_tx_tso_post(struct ionic_queue *q,
- struct ionic_desc_info *desc_info,
+static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q,
+ struct ionic_tx_desc_info *desc_info,
struct sk_buff *skb,
dma_addr_t addr, u8 nsge, u16 len,
unsigned int hdrlen, unsigned int mss,
@@ -975,7 +1365,7 @@ static void ionic_tx_tso_post(struct ionic_queue *q,
u16 vlan_tci, bool has_vlan,
bool start, bool done)
{
- struct ionic_txq_desc *desc = desc_info->desc;
+ struct ionic_txq_desc *desc = &q->txq[q->head_idx];
u8 flags = 0;
u64 cmd;
@@ -991,22 +1381,23 @@ static void ionic_tx_tso_post(struct ionic_queue *q,
desc->hdr_len = cpu_to_le16(hdrlen);
desc->mss = cpu_to_le16(mss);
- ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
+ ionic_write_cmb_desc(q, desc);
if (start) {
skb_tx_timestamp(skb);
- if (!ionic_txq_hwstamp_enabled(q))
- netdev_tx_sent_queue(q_to_ndq(q), skb->len);
- ionic_txq_post(q, false, ionic_tx_clean, skb);
+ if (likely(!ionic_txq_hwstamp_enabled(q)))
+ netdev_tx_sent_queue(q_to_ndq(netdev, q), skb->len);
+ ionic_txq_post(q, false);
} else {
- ionic_txq_post(q, done, NULL, NULL);
+ ionic_txq_post(q, done);
}
}
-static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
+static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
+ struct sk_buff *skb)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- struct ionic_desc_info *desc_info;
+ struct ionic_tx_desc_info *desc_info;
struct ionic_buf_info *buf_info;
struct ionic_txq_sg_elem *elem;
struct ionic_txq_desc *desc;
@@ -1028,8 +1419,7 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
bool encap;
int err;
- desc_info = &q->info[q->head_idx];
- buf_info = desc_info->bufs;
+ desc_info = &q->tx_info[q->head_idx];
if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
return -EIO;
@@ -1066,6 +1456,8 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
else
hdrlen = skb_tcp_all_headers(skb);
+ desc_info->skb = skb;
+ buf_info = desc_info->bufs;
tso_rem = len;
seg_rem = min(tso_rem, hdrlen + mss);
@@ -1092,8 +1484,8 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
chunk_len = min(frag_rem, seg_rem);
if (!desc) {
/* fill main descriptor */
- desc = desc_info->txq_desc;
- elem = desc_info->txq_sg_desc->elems;
+ desc = &q->txq[q->head_idx];
+ elem = ionic_tx_sg_elems(q);
desc_addr = frag_addr;
desc_len = chunk_len;
} else {
@@ -1111,13 +1503,13 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
seg_rem = min(tso_rem, mss);
done = (tso_rem == 0);
/* post descriptor */
- ionic_tx_tso_post(q, desc_info, skb,
+ ionic_tx_tso_post(netdev, q, desc_info, skb,
desc_addr, desc_nsge, desc_len,
hdrlen, mss, outer_csum, vlan_tci, has_vlan,
start, done);
start = false;
/* Buffer information is stored with the first tso descriptor */
- desc_info = &q->info[q->head_idx];
+ desc_info = &q->tx_info[q->head_idx];
desc_info->nbufs = 0;
}
@@ -1130,9 +1522,9 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
}
static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
- struct ionic_desc_info *desc_info)
+ struct ionic_tx_desc_info *desc_info)
{
- struct ionic_txq_desc *desc = desc_info->txq_desc;
+ struct ionic_txq_desc *desc = &q->txq[q->head_idx];
struct ionic_buf_info *buf_info = desc_info->bufs;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
bool has_vlan;
@@ -1160,7 +1552,7 @@ static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
desc->csum_offset = cpu_to_le16(skb->csum_offset);
- ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
+ ionic_write_cmb_desc(q, desc);
if (skb_csum_is_sctp(skb))
stats->crc32_csum++;
@@ -1169,9 +1561,9 @@ static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
}
static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
- struct ionic_desc_info *desc_info)
+ struct ionic_tx_desc_info *desc_info)
{
- struct ionic_txq_desc *desc = desc_info->txq_desc;
+ struct ionic_txq_desc *desc = &q->txq[q->head_idx];
struct ionic_buf_info *buf_info = desc_info->bufs;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
bool has_vlan;
@@ -1199,20 +1591,20 @@ static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
desc->csum_start = 0;
desc->csum_offset = 0;
- ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
+ ionic_write_cmb_desc(q, desc);
stats->csum_none++;
}
static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
- struct ionic_desc_info *desc_info)
+ struct ionic_tx_desc_info *desc_info)
{
- struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc;
struct ionic_buf_info *buf_info = &desc_info->bufs[1];
- struct ionic_txq_sg_elem *elem = sg_desc->elems;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
+ struct ionic_txq_sg_elem *elem;
unsigned int i;
+ elem = ionic_tx_sg_elems(q);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) {
elem->addr = cpu_to_le64(buf_info->dma_addr);
elem->len = cpu_to_le16(buf_info->len);
@@ -1221,14 +1613,18 @@ static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
stats->frags += skb_shinfo(skb)->nr_frags;
}
-static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
+static int ionic_tx(struct net_device *netdev, struct ionic_queue *q,
+ struct sk_buff *skb)
{
- struct ionic_desc_info *desc_info = &q->info[q->head_idx];
+ struct ionic_tx_desc_info *desc_info = &q->tx_info[q->head_idx];
struct ionic_tx_stats *stats = q_to_tx_stats(q);
+ bool ring_dbell = true;
if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
return -EIO;
+ desc_info->skb = skb;
+
/* set up the initial descriptor */
if (skb->ip_summed == CHECKSUM_PARTIAL)
ionic_tx_calc_csum(q, skb, desc_info);
@@ -1242,16 +1638,22 @@ static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
stats->pkts++;
stats->bytes += skb->len;
- if (!ionic_txq_hwstamp_enabled(q))
- netdev_tx_sent_queue(q_to_ndq(q), skb->len);
- ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
+ if (likely(!ionic_txq_hwstamp_enabled(q))) {
+ struct netdev_queue *ndq = q_to_ndq(netdev, q);
+
+ if (unlikely(!ionic_q_has_space(q, MAX_SKB_FRAGS + 1)))
+ netif_tx_stop_queue(ndq);
+ ring_dbell = __netdev_tx_sent_queue(ndq, skb->len,
+ netdev_xmit_more());
+ }
+ ionic_txq_post(q, ring_dbell);
return 0;
}
static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
{
- struct ionic_tx_stats *stats = q_to_tx_stats(q);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
bool too_many_frags = false;
skb_frag_t *frag;
int desc_bufs;
@@ -1267,17 +1669,20 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
/* Each desc is mss long max, so a descriptor for each gso_seg */
if (skb_is_gso(skb)) {
ndescs = skb_shinfo(skb)->gso_segs;
+ if (!nr_frags)
+ return ndescs;
} else {
ndescs = 1;
- if (skb_shinfo(skb)->nr_frags > q->max_sg_elems) {
+ if (!nr_frags)
+ return ndescs;
+
+ if (unlikely(nr_frags > q->max_sg_elems)) {
too_many_frags = true;
goto linearize;
}
- }
- /* If non-TSO, or no frags to check, we're done */
- if (!skb_is_gso(skb) || !skb_shinfo(skb)->nr_frags)
return ndescs;
+ }
/* We need to scan the skb to be sure that none of the MTU sized
* packets in the TSO will require more sgs per descriptor than we
@@ -1328,36 +1733,17 @@ linearize:
err = skb_linearize(skb);
if (err)
return err;
- stats->linearize++;
+ q_to_tx_stats(q)->linearize++;
}
return ndescs;
}
-static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
-{
- int stopped = 0;
-
- if (unlikely(!ionic_q_has_space(q, ndescs))) {
- netif_stop_subqueue(q->lif->netdev, q->index);
- stopped = 1;
-
- /* Might race with ionic_tx_clean, check again */
- smp_rmb();
- if (ionic_q_has_space(q, ndescs)) {
- netif_wake_subqueue(q->lif->netdev, q->index);
- stopped = 0;
- }
- }
-
- return stopped;
-}
-
static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_queue *q = &lif->hwstamp_txq->q;
+ struct ionic_queue *q;
int err, ndescs;
/* Does not stop/start txq, because we post to a separate tx queue
@@ -1365,6 +1751,7 @@ static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
* the timestamping queue, it is dropped.
*/
+ q = &lif->hwstamp_txq->q;
ndescs = ionic_tx_descs_needed(q, skb);
if (unlikely(ndescs < 0))
goto err_out_drop;
@@ -1374,9 +1761,9 @@ static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP;
if (skb_is_gso(skb))
- err = ionic_tx_tso(q, skb);
+ err = ionic_tx_tso(netdev, q, skb);
else
- err = ionic_tx(q, skb);
+ err = ionic_tx(netdev, q, skb);
if (err)
goto err_out_drop;
@@ -1414,23 +1801,19 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (ndescs < 0)
goto err_out_drop;
- if (unlikely(ionic_maybe_stop_tx(q, ndescs)))
+ if (!netif_txq_maybe_stop(q_to_ndq(netdev, q),
+ ionic_q_space_avail(q),
+ ndescs, ndescs))
return NETDEV_TX_BUSY;
if (skb_is_gso(skb))
- err = ionic_tx_tso(q, skb);
+ err = ionic_tx_tso(netdev, q, skb);
else
- err = ionic_tx(q, skb);
+ err = ionic_tx(netdev, q, skb);
if (err)
goto err_out_drop;
- /* Stop the queue if there aren't descriptors for the next packet.
- * Since our SG lists per descriptor take care of most of the possible
- * fragmentation, we don't need to have many descriptors available.
- */
- ionic_maybe_stop_tx(q, 4);
-
return NETDEV_TX_OK;
err_out_drop:
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
index d7cbaad8a6fb..9e73e324e7a1 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
@@ -14,7 +14,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget);
int ionic_txrx_napi(struct napi_struct *napi, int budget);
netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev);
-bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
-bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
+bool ionic_rx_service(struct ionic_cq *cq);
+int ionic_xdp_xmit(struct net_device *netdev, int n, struct xdp_frame **xdp, u32 flags);
#endif /* _IONIC_TXRX_H_ */
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 35ec9aab3dc7..51fa880eaf6c 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1186,7 +1186,6 @@ static int
netxen_p3_has_mn(struct netxen_adapter *adapter)
{
u32 capability, flashed_ver;
- capability = 0;
/* NX2031 always had MN */
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
@@ -1197,7 +1196,6 @@ netxen_p3_has_mn(struct netxen_adapter *adapter)
flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) {
-
capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY);
if (capability & NX_PEG_TUNE_MN_PRESENT)
return 1;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 5a5dbbb8d8aa..9a1660a12c57 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -1794,8 +1794,6 @@ qed_rdma_create_srq(void *rdma_cxt,
goto err;
opaque_fid = p_hwfn->hw_info.opaque_fid;
-
- opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.opaque_fid = opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 0e240b5ab8d4..ae3ebf0cf999 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -1776,7 +1776,7 @@ static int qede_get_tunable(struct net_device *dev,
return 0;
}
-static int qede_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int qede_get_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct qede_dev *edev = netdev_priv(dev);
struct qed_link_output current_link;
@@ -1789,18 +1789,26 @@ static int qede_get_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
- if (current_link.eee.adv_caps & QED_EEE_1G_ADV)
- edata->advertised = ADVERTISED_1000baseT_Full;
- if (current_link.eee.adv_caps & QED_EEE_10G_ADV)
- edata->advertised |= ADVERTISED_10000baseT_Full;
- if (current_link.sup_caps & QED_EEE_1G_ADV)
- edata->supported = ADVERTISED_1000baseT_Full;
- if (current_link.sup_caps & QED_EEE_10G_ADV)
- edata->supported |= ADVERTISED_10000baseT_Full;
- if (current_link.eee.lp_adv_caps & QED_EEE_1G_ADV)
- edata->lp_advertised = ADVERTISED_1000baseT_Full;
- if (current_link.eee.lp_adv_caps & QED_EEE_10G_ADV)
- edata->lp_advertised |= ADVERTISED_10000baseT_Full;
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->advertised,
+ current_link.eee.adv_caps & QED_EEE_1G_ADV);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ edata->advertised,
+ current_link.eee.adv_caps & QED_EEE_10G_ADV);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->supported,
+ current_link.sup_caps & QED_EEE_1G_ADV);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ edata->supported,
+ current_link.sup_caps & QED_EEE_10G_ADV);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->lp_advertised,
+ current_link.eee.lp_adv_caps & QED_EEE_1G_ADV);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ edata->lp_advertised,
+ current_link.eee.lp_adv_caps & QED_EEE_10G_ADV);
edata->tx_lpi_timer = current_link.eee.tx_lpi_timer;
edata->eee_enabled = current_link.eee.enable;
@@ -1810,11 +1818,14 @@ static int qede_get_eee(struct net_device *dev, struct ethtool_eee *edata)
return 0;
}
-static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int qede_set_eee(struct net_device *dev, struct ethtool_keee *edata)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {};
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = {};
struct qede_dev *edev = netdev_priv(dev);
struct qed_link_output current_link;
struct qed_link_params params;
+ bool unsupp;
if (!edev->ops->common->can_link_change(edev->cdev)) {
DP_INFO(edev, "Link settings are not allowed to be changed\n");
@@ -1832,21 +1843,26 @@ static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata)
memset(&params, 0, sizeof(params));
params.override_flags |= QED_LINK_OVERRIDE_EEE_CONFIG;
- if (!(edata->advertised & (ADVERTISED_1000baseT_Full |
- ADVERTISED_10000baseT_Full)) ||
- ((edata->advertised & (ADVERTISED_1000baseT_Full |
- ADVERTISED_10000baseT_Full)) !=
- edata->advertised)) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ supported);
+
+ unsupp = linkmode_andnot(tmp, edata->advertised, supported);
+ if (unsupp) {
DP_VERBOSE(edev, QED_MSG_DEBUG,
- "Invalid advertised capabilities %d\n",
- edata->advertised);
+ "Invalid advertised capabilities %*pb\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, edata->advertised);
return -EINVAL;
}
- if (edata->advertised & ADVERTISED_1000baseT_Full)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->advertised))
params.eee.adv_caps = QED_EEE_1G_ADV;
- if (edata->advertised & ADVERTISED_10000baseT_Full)
- params.eee.adv_caps |= QED_EEE_10G_ADV;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ edata->advertised))
+ params.eee.adv_caps = QED_EEE_10G_ADV;
+
params.eee.enable = edata->eee_enabled;
params.eee.tx_lpi_enable = edata->tx_lpi_enabled;
params.eee.tx_lpi_timer = edata->tx_lpi_timer;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index cb1746bc0e0c..847fa62c80df 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -215,7 +215,7 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
- bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
+ bd2_bits2 |= ((skb_transport_offset(skb) >> 1) &
ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
<< ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 41894d154013..b9dc0071c5de 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -446,8 +446,7 @@ static int qlcnic_tx_encap_pkt(struct qlcnic_adapter *adapter,
encap_descr |= skb_network_offset(skb) << 10;
first_desc->encap_descr = cpu_to_le16(encap_descr);
- first_desc->tcp_hdr_offset = skb_inner_transport_header(skb) -
- skb->data;
+ first_desc->tcp_hdr_offset = skb_inner_transport_offset(skb);
first_desc->ip_hdr_offset = skb_inner_network_offset(skb);
qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 3270df72541b..4c06f55878de 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -771,5 +771,6 @@ static struct platform_driver emac_platform_driver = {
module_platform_driver(emac_platform_driver);
+MODULE_DESCRIPTION("Qualcomm EMAC Gigabit Ethernet driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:qcom-emac");
diff --git a/drivers/net/ethernet/qualcomm/qca_7k.c b/drivers/net/ethernet/qualcomm/qca_7k.c
index 4292c89bd35c..6263e4cf47fa 100644
--- a/drivers/net/ethernet/qualcomm/qca_7k.c
+++ b/drivers/net/ethernet/qualcomm/qca_7k.c
@@ -1,22 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- *
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- *
*/
/* This module implements the Qualcomm Atheros SPI protocol for
diff --git a/drivers/net/ethernet/qualcomm/qca_7k.h b/drivers/net/ethernet/qualcomm/qca_7k.h
index 356de8ec5d48..828ee9c27578 100644
--- a/drivers/net/ethernet/qualcomm/qca_7k.h
+++ b/drivers/net/ethernet/qualcomm/qca_7k.h
@@ -1,21 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- *
*/
/* Qualcomm Atheros SPI register definition.
diff --git a/drivers/net/ethernet/qualcomm/qca_7k_common.c b/drivers/net/ethernet/qualcomm/qca_7k_common.c
index 6b511f05df61..5302da587620 100644
--- a/drivers/net/ethernet/qualcomm/qca_7k_common.c
+++ b/drivers/net/ethernet/qualcomm/qca_7k_common.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
* Copyright (c) 2011, 2012, Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Atheros ethernet framing. Every Ethernet frame is surrounded
@@ -162,5 +149,5 @@ EXPORT_SYMBOL_GPL(qcafrm_fsm_decode);
MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 common");
MODULE_AUTHOR("Qualcomm Atheros Communications");
-MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/qualcomm/qca_7k_common.h b/drivers/net/ethernet/qualcomm/qca_7k_common.h
index 928554f11e35..44ed66fdb407 100644
--- a/drivers/net/ethernet/qualcomm/qca_7k_common.h
+++ b/drivers/net/ethernet/qualcomm/qca_7k_common.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
* Copyright (c) 2011, 2012, Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Atheros Ethernet framing. Every Ethernet frame is surrounded by an atheros
@@ -107,9 +94,6 @@ struct qcafrm_handle {
/* Offset in buffer (borrowed for length too) */
u16 offset;
-
- /* Frame length as kept by this module */
- u16 len;
};
u16 qcafrm_create_header(u8 *buf, u16 len);
@@ -128,17 +112,6 @@ static inline void qcafrm_fsm_init_uart(struct qcafrm_handle *handle)
handle->state = handle->init;
}
-/* Gather received bytes and try to extract a full Ethernet frame
- * by following a simple state machine.
- *
- * Return: QCAFRM_GATHER No Ethernet frame fully received yet.
- * QCAFRM_NOHEAD Header expected but not found.
- * QCAFRM_INVLEN QCA7K frame length is invalid
- * QCAFRM_NOTAIL Footer expected but not found.
- * > 0 Number of byte in the fully received
- * Ethernet frame
- */
-
s32 qcafrm_fsm_decode(struct qcafrm_handle *handle, u8 *buf, u16 buf_len, u8 recv_byte);
#endif /* _QCA_FRAMING_H */
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index 1822f2ad8f0d..ff3b89e9028e 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* This file contains debugging routines for use in the QCA7K driver.
@@ -255,7 +242,7 @@ qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
struct qcaspi *qca = netdev_priv(dev);
ring->rx_max_pending = QCASPI_RX_MAX_FRAMES;
- ring->tx_max_pending = TX_RING_MAX_LEN;
+ ring->tx_max_pending = QCASPI_TX_RING_MAX_LEN;
ring->rx_pending = QCASPI_RX_MAX_FRAMES;
ring->tx_pending = qca->txr.count;
}
@@ -275,8 +262,8 @@ qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
if (qca->spi_thread)
kthread_park(qca->spi_thread);
- qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN);
- qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN);
+ qca->txr.count = max_t(u32, ring->tx_pending, QCASPI_TX_RING_MIN_LEN);
+ qca->txr.count = min_t(u16, qca->txr.count, QCASPI_TX_RING_MAX_LEN);
if (qca->spi_thread)
kthread_unpark(qca->spi_thread);
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.h b/drivers/net/ethernet/qualcomm/qca_debug.h
index 46a785844421..0d98cef3abc4 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.h
+++ b/drivers/net/ethernet/qualcomm/qca_debug.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* This file contains debugging routines for use in the QCA7K driver.
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 5f3c11fb3fa2..5799ecc88a87 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* This module implements the Qualcomm Atheros SPI protocol for
@@ -359,7 +346,7 @@ qcaspi_receive(struct qcaspi *qca)
/* Read the packet size. */
qcaspi_read_register(qca, SPI_REG_RDBUF_BYTE_AVA, &available);
- netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n",
+ netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %04x\n",
available);
if (available > QCASPI_HW_BUF_LEN + QCASPI_HW_PKT_LEN) {
@@ -476,7 +463,7 @@ qcaspi_flush_tx_ring(struct qcaspi *qca)
* has been replaced by netif_tx_lock_bh() and so on.
*/
netif_tx_lock_bh(qca->net_dev);
- for (i = 0; i < TX_RING_MAX_LEN; i++) {
+ for (i = 0; i < QCASPI_TX_RING_MAX_LEN; i++) {
if (qca->txr.skb[i]) {
dev_kfree_skb(qca->txr.skb[i]);
qca->txr.skb[i] = NULL;
@@ -687,7 +674,7 @@ static int
qcaspi_netdev_open(struct net_device *dev)
{
struct qcaspi *qca = netdev_priv(dev);
- int ret = 0;
+ struct task_struct *thread;
if (!qca)
return -EINVAL;
@@ -697,23 +684,18 @@ qcaspi_netdev_open(struct net_device *dev)
qca->sync = QCASPI_SYNC_UNKNOWN;
qcafrm_fsm_init_spi(&qca->frm_handle);
- qca->spi_thread = kthread_run((void *)qcaspi_spi_thread,
- qca, "%s", dev->name);
+ thread = kthread_run((void *)qcaspi_spi_thread,
+ qca, "%s", dev->name);
- if (IS_ERR(qca->spi_thread)) {
+ if (IS_ERR(thread)) {
netdev_err(dev, "%s: unable to start kernel thread.\n",
QCASPI_DRV_NAME);
- return PTR_ERR(qca->spi_thread);
+ return PTR_ERR(thread);
}
- ret = request_irq(qca->spi_dev->irq, qcaspi_intr_handler, 0,
- dev->name, qca);
- if (ret) {
- netdev_err(dev, "%s: unable to get IRQ %d (irqval=%d).\n",
- QCASPI_DRV_NAME, qca->spi_dev->irq, ret);
- kthread_stop(qca->spi_thread);
- return ret;
- }
+ qca->spi_thread = thread;
+
+ enable_irq(qca->spi_dev->irq);
/* SPI thread takes care of TX queue */
@@ -728,10 +710,12 @@ qcaspi_netdev_close(struct net_device *dev)
netif_stop_queue(dev);
qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0, wr_verify);
- free_irq(qca->spi_dev->irq, qca);
+ disable_irq(qca->spi_dev->irq);
- kthread_stop(qca->spi_thread);
- qca->spi_thread = NULL;
+ if (qca->spi_thread) {
+ kthread_stop(qca->spi_thread);
+ qca->spi_thread = NULL;
+ }
qcaspi_flush_tx_ring(qca);
return 0;
@@ -831,8 +815,8 @@ qcaspi_netdev_init(struct net_device *dev)
qca->clkspeed = qcaspi_clkspeed;
qca->burst_len = qcaspi_burst_len;
qca->spi_thread = NULL;
- qca->buffer_size = (dev->mtu + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN +
- QCAFRM_FOOTER_LEN + 4) * 4;
+ qca->buffer_size = (QCAFRM_MAX_MTU + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN +
+ QCAFRM_FOOTER_LEN + QCASPI_HW_PKT_LEN) * QCASPI_RX_MAX_FRAMES;
memset(&qca->stats, 0, sizeof(struct qcaspi_stats));
@@ -881,6 +865,8 @@ qcaspi_netdev_setup(struct net_device *dev)
qcaspi_set_ethtool_ops(dev);
dev->watchdog_timeo = QCASPI_TX_TIMEOUT;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->needed_tailroom = ALIGN(QCAFRM_FOOTER_LEN + QCAFRM_MIN_LEN, 4);
+ dev->needed_headroom = ALIGN(QCAFRM_HEADER_LEN, 4);
dev->tx_queue_len = 100;
/* MTU range: 46 - 1500 */
@@ -891,7 +877,7 @@ qcaspi_netdev_setup(struct net_device *dev)
memset(qca, 0, sizeof(struct qcaspi));
memset(&qca->txr, 0, sizeof(qca->txr));
- qca->txr.count = TX_RING_MAX_LEN;
+ qca->txr.count = QCASPI_TX_RING_MAX_LEN;
}
static const struct of_device_id qca_spi_of_match[] = {
@@ -984,6 +970,15 @@ qca_spi_probe(struct spi_device *spi)
spi_set_drvdata(spi, qcaspi_devs);
+ ret = devm_request_irq(&spi->dev, spi->irq, qcaspi_intr_handler,
+ IRQF_NO_AUTOEN, qca->net_dev->name, qca);
+ if (ret) {
+ dev_err(&spi->dev, "Unable to get IRQ %d (irqval=%d).\n",
+ spi->irq, ret);
+ free_netdev(qcaspi_devs);
+ return ret;
+ }
+
ret = of_get_ethdev_address(spi->dev.of_node, qca->net_dev);
if (ret) {
eth_hw_addr_random(qca->net_dev);
@@ -998,8 +993,8 @@ qca_spi_probe(struct spi_device *spi)
qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
if (signature != QCASPI_GOOD_SIGNATURE) {
- dev_err(&spi->dev, "Invalid signature (0x%04X)\n",
- signature);
+ dev_err(&spi->dev, "Invalid signature (expected 0x%04x, read 0x%04x)\n",
+ QCASPI_GOOD_SIGNATURE, signature);
free_netdev(qcaspi_devs);
return -EFAULT;
}
@@ -1048,6 +1043,6 @@ module_spi_driver(qca_spi_driver);
MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 SPI Driver");
MODULE_AUTHOR("Qualcomm Atheros Communications");
-MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net>");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(QCASPI_DRV_VERSION);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index 3067356106f0..d59cb2352cee 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Qualcomm Atheros SPI register definition.
@@ -39,8 +26,9 @@
#define QCASPI_GOOD_SIGNATURE 0xAA55
-#define TX_RING_MAX_LEN 10
-#define TX_RING_MIN_LEN 2
+#define QCASPI_TX_RING_MAX_LEN 10
+#define QCASPI_TX_RING_MIN_LEN 2
+#define QCASPI_RX_MAX_FRAMES 4
/* sync related constants */
#define QCASPI_SYNC_UNKNOWN 0
@@ -54,7 +42,7 @@
#define QCASPI_EVENT_CPUON 1
struct tx_ring {
- struct sk_buff *skb[TX_RING_MAX_LEN];
+ struct sk_buff *skb[QCASPI_TX_RING_MAX_LEN];
u16 head;
u16 tail;
u16 size;
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index 223321897b96..321fd8d00730 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2017, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* This module implements the Qualcomm Atheros UART protocol for
@@ -410,6 +397,6 @@ module_serdev_device_driver(qca_uart_driver);
MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 UART Driver");
MODULE_AUTHOR("Qualcomm Atheros Communications");
-MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net>");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(QCAUART_DRV_VERSION);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 5b69b9268c75..f3bea196a8f9 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -520,4 +520,5 @@ static void __exit rmnet_exit(void)
module_init(rmnet_init)
module_exit(rmnet_exit)
MODULE_ALIAS_RTNL_LINK("rmnet");
+MODULE_DESCRIPTION("Qualcomm RmNet MAP driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 046b5f7d8e7c..9d2a9562c96f 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -98,7 +98,7 @@ static int rmnet_vnd_get_iflink(const struct net_device *dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
- return priv->real_dev->ifindex;
+ return READ_ONCE(priv->real_dev->ifindex);
}
static int rmnet_vnd_init(struct net_device *dev)
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index 81567fcf3957..4c043052198d 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -68,6 +68,7 @@ enum mac_version {
/* support for RTL_GIGA_MAC_VER_60 has been removed */
RTL_GIGA_MAC_VER_61,
RTL_GIGA_MAC_VER_63,
+ RTL_GIGA_MAC_VER_65,
RTL_GIGA_MAC_NONE
};
@@ -84,3 +85,6 @@ void r8169_get_led_name(struct rtl8169_private *tp, int idx,
int rtl8168_get_led_mode(struct rtl8169_private *tp);
int rtl8168_led_mod_ctrl(struct rtl8169_private *tp, u16 mask, u16 val);
void rtl8168_init_leds(struct net_device *ndev);
+int rtl8125_get_led_mode(struct rtl8169_private *tp, int index);
+int rtl8125_set_led_mode(struct rtl8169_private *tp, int index, u16 mode);
+void rtl8125_init_leds(struct net_device *ndev);
diff --git a/drivers/net/ethernet/realtek/r8169_leds.c b/drivers/net/ethernet/realtek/r8169_leds.c
index 007d077edcad..7c5dc9d0df85 100644
--- a/drivers/net/ethernet/realtek/r8169_leds.c
+++ b/drivers/net/ethernet/realtek/r8169_leds.c
@@ -18,12 +18,14 @@
#define RTL8168_LED_CTRL_LINK_100 BIT(1)
#define RTL8168_LED_CTRL_LINK_10 BIT(0)
-#define RTL8168_NUM_LEDS 3
+#define RTL8125_LED_CTRL_ACT BIT(9)
+#define RTL8125_LED_CTRL_LINK_2500 BIT(5)
+#define RTL8125_LED_CTRL_LINK_1000 BIT(3)
+#define RTL8125_LED_CTRL_LINK_100 BIT(1)
+#define RTL8125_LED_CTRL_LINK_10 BIT(0)
-#define RTL8168_SUPPORTED_MODES \
- (BIT(TRIGGER_NETDEV_LINK_1000) | BIT(TRIGGER_NETDEV_LINK_100) | \
- BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_RX) | \
- BIT(TRIGGER_NETDEV_TX))
+#define RTL8168_NUM_LEDS 3
+#define RTL8125_NUM_LEDS 4
struct r8169_led_classdev {
struct led_classdev led;
@@ -33,28 +35,35 @@ struct r8169_led_classdev {
#define lcdev_to_r8169_ldev(lcdev) container_of(lcdev, struct r8169_led_classdev, led)
+static bool r8169_trigger_mode_is_valid(unsigned long flags)
+{
+ bool rx, tx;
+
+ if (flags & BIT(TRIGGER_NETDEV_HALF_DUPLEX))
+ return false;
+ if (flags & BIT(TRIGGER_NETDEV_FULL_DUPLEX))
+ return false;
+
+ rx = flags & BIT(TRIGGER_NETDEV_RX);
+ tx = flags & BIT(TRIGGER_NETDEV_TX);
+
+ return rx == tx;
+}
+
static int rtl8168_led_hw_control_is_supported(struct led_classdev *led_cdev,
unsigned long flags)
{
struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev);
struct rtl8169_private *tp = netdev_priv(ldev->ndev);
int shift = ldev->index * 4;
- bool rx, tx;
-
- if (flags & ~RTL8168_SUPPORTED_MODES)
- goto nosupp;
- rx = flags & BIT(TRIGGER_NETDEV_RX);
- tx = flags & BIT(TRIGGER_NETDEV_TX);
- if (rx != tx)
- goto nosupp;
+ if (!r8169_trigger_mode_is_valid(flags)) {
+ /* Switch LED off to indicate that mode isn't supported */
+ rtl8168_led_mod_ctrl(tp, 0x000f << shift, 0);
+ return -EOPNOTSUPP;
+ }
return 0;
-
-nosupp:
- /* Switch LED off to indicate that mode isn't supported */
- rtl8168_led_mod_ctrl(tp, 0x000f << shift, 0);
- return -EOPNOTSUPP;
}
static int rtl8168_led_hw_control_set(struct led_classdev *led_cdev,
@@ -129,7 +138,6 @@ static void rtl8168_setup_ldev(struct r8169_led_classdev *ldev,
r8169_get_led_name(tp, index, led_name, LED_MAX_NAME_SIZE);
led_cdev->name = led_name;
- led_cdev->default_trigger = "netdev";
led_cdev->hw_control_trigger = "netdev";
led_cdev->flags |= LED_RETAIN_AT_SHUTDOWN;
led_cdev->hw_control_is_supported = rtl8168_led_hw_control_is_supported;
@@ -155,3 +163,102 @@ void rtl8168_init_leds(struct net_device *ndev)
for (i = 0; i < RTL8168_NUM_LEDS; i++)
rtl8168_setup_ldev(leds + i, ndev, i);
}
+
+static int rtl8125_led_hw_control_is_supported(struct led_classdev *led_cdev,
+ unsigned long flags)
+{
+ struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev);
+ struct rtl8169_private *tp = netdev_priv(ldev->ndev);
+
+ if (!r8169_trigger_mode_is_valid(flags)) {
+ /* Switch LED off to indicate that mode isn't supported */
+ rtl8125_set_led_mode(tp, ldev->index, 0);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int rtl8125_led_hw_control_set(struct led_classdev *led_cdev,
+ unsigned long flags)
+{
+ struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev);
+ struct rtl8169_private *tp = netdev_priv(ldev->ndev);
+ u16 mode = 0;
+
+ if (flags & BIT(TRIGGER_NETDEV_LINK_10))
+ mode |= RTL8125_LED_CTRL_LINK_10;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_100))
+ mode |= RTL8125_LED_CTRL_LINK_100;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_1000))
+ mode |= RTL8125_LED_CTRL_LINK_1000;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_2500))
+ mode |= RTL8125_LED_CTRL_LINK_2500;
+ if (flags & (BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX)))
+ mode |= RTL8125_LED_CTRL_ACT;
+
+ return rtl8125_set_led_mode(tp, ldev->index, mode);
+}
+
+static int rtl8125_led_hw_control_get(struct led_classdev *led_cdev,
+ unsigned long *flags)
+{
+ struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev);
+ struct rtl8169_private *tp = netdev_priv(ldev->ndev);
+ int mode;
+
+ mode = rtl8125_get_led_mode(tp, ldev->index);
+ if (mode < 0)
+ return mode;
+
+ if (mode & RTL8125_LED_CTRL_LINK_10)
+ *flags |= BIT(TRIGGER_NETDEV_LINK_10);
+ if (mode & RTL8125_LED_CTRL_LINK_100)
+ *flags |= BIT(TRIGGER_NETDEV_LINK_100);
+ if (mode & RTL8125_LED_CTRL_LINK_1000)
+ *flags |= BIT(TRIGGER_NETDEV_LINK_1000);
+ if (mode & RTL8125_LED_CTRL_LINK_2500)
+ *flags |= BIT(TRIGGER_NETDEV_LINK_2500);
+ if (mode & RTL8125_LED_CTRL_ACT)
+ *flags |= BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX);
+
+ return 0;
+}
+
+static void rtl8125_setup_led_ldev(struct r8169_led_classdev *ldev,
+ struct net_device *ndev, int index)
+{
+ struct rtl8169_private *tp = netdev_priv(ndev);
+ struct led_classdev *led_cdev = &ldev->led;
+ char led_name[LED_MAX_NAME_SIZE];
+
+ ldev->ndev = ndev;
+ ldev->index = index;
+
+ r8169_get_led_name(tp, index, led_name, LED_MAX_NAME_SIZE);
+ led_cdev->name = led_name;
+ led_cdev->hw_control_trigger = "netdev";
+ led_cdev->flags |= LED_RETAIN_AT_SHUTDOWN;
+ led_cdev->hw_control_is_supported = rtl8125_led_hw_control_is_supported;
+ led_cdev->hw_control_set = rtl8125_led_hw_control_set;
+ led_cdev->hw_control_get = rtl8125_led_hw_control_get;
+ led_cdev->hw_control_get_device = r8169_led_hw_control_get_device;
+
+ /* ignore errors */
+ devm_led_classdev_register(&ndev->dev, led_cdev);
+}
+
+void rtl8125_init_leds(struct net_device *ndev)
+{
+ /* bind resource mgmt to netdev */
+ struct device *dev = &ndev->dev;
+ struct r8169_led_classdev *leds;
+ int i;
+
+ leds = devm_kcalloc(dev, RTL8125_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
+ if (!leds)
+ return;
+
+ for (i = 0; i < RTL8125_NUM_LEDS; i++)
+ rtl8125_setup_led_ldev(leds + i, ndev, i);
+}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index dd73df6b17b0..5c879a5c86d7 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -55,6 +55,7 @@
#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw"
+#define FIRMWARE_8126A_2 "rtl_nic/rtl8126a-2.fw"
#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
@@ -136,6 +137,7 @@ static const struct {
[RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3},
/* reserve 62 for CFG_METHOD_4 in the vendor driver */
[RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2},
+ [RTL_GIGA_MAC_VER_65] = {"RTL8126A", FIRMWARE_8126A_2},
};
static const struct pci_device_id rtl8169_pci_tbl[] = {
@@ -158,6 +160,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024 },
{ 0x0001, 0x8168, PCI_ANY_ID, 0x2410 },
{ PCI_VDEVICE(REALTEK, 0x8125) },
+ { PCI_VDEVICE(REALTEK, 0x8126) },
{ PCI_VDEVICE(REALTEK, 0x3000) },
{}
};
@@ -327,13 +330,23 @@ enum rtl8168_registers {
};
enum rtl8125_registers {
+ LEDSEL0 = 0x18,
+ INT_CFG0_8125 = 0x34,
+#define INT_CFG0_ENABLE_8125 BIT(0)
+#define INT_CFG0_CLKREQEN BIT(3)
IntrMask_8125 = 0x38,
IntrStatus_8125 = 0x3c,
+ INT_CFG1_8125 = 0x7a,
+ LEDSEL2 = 0x84,
+ LEDSEL1 = 0x86,
TxPoll_8125 = 0x90,
+ LEDSEL3 = 0x96,
MAC0_BKP = 0x19e0,
EEE_TXIDLE_TIMER_8125 = 0x6048,
};
+#define LEDSEL_MASK_8125 0x23f
+
#define RX_VLAN_INNER_8125 BIT(22)
#define RX_VLAN_OUTER_8125 BIT(23)
#define RX_VLAN_8125 (RX_VLAN_INNER_8125 | RX_VLAN_OUTER_8125)
@@ -606,6 +619,7 @@ struct rtl8169_private {
struct page *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
u16 cp_cmd;
+ u16 tx_lpi_timer;
u32 irq_mask;
int irq;
struct clk *clk;
@@ -629,7 +643,6 @@ struct rtl8169_private {
struct rtl8169_counters *counters;
struct rtl8169_tc_offsets tc_offset;
u32 saved_wolopts;
- int eee_adv;
const char *fw_name;
struct rtl_fw *rtl_fw;
@@ -663,6 +676,7 @@ MODULE_FIRMWARE(FIRMWARE_8168FP_3);
MODULE_FIRMWARE(FIRMWARE_8107E_2);
MODULE_FIRMWARE(FIRMWARE_8125A_3);
MODULE_FIRMWARE(FIRMWARE_8125B_2);
+MODULE_FIRMWARE(FIRMWARE_8126A_2);
static inline struct device *tp_to_dev(struct rtl8169_private *tp)
{
@@ -824,6 +838,51 @@ int rtl8168_get_led_mode(struct rtl8169_private *tp)
return ret;
}
+static int rtl8125_get_led_reg(int index)
+{
+ static const int led_regs[] = { LEDSEL0, LEDSEL1, LEDSEL2, LEDSEL3 };
+
+ return led_regs[index];
+}
+
+int rtl8125_set_led_mode(struct rtl8169_private *tp, int index, u16 mode)
+{
+ int reg = rtl8125_get_led_reg(index);
+ struct device *dev = tp_to_dev(tp);
+ int ret;
+ u16 val;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&tp->led_lock);
+ val = RTL_R16(tp, reg) & ~LEDSEL_MASK_8125;
+ RTL_W16(tp, reg, val | mode);
+ mutex_unlock(&tp->led_lock);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+int rtl8125_get_led_mode(struct rtl8169_private *tp, int index)
+{
+ int reg = rtl8125_get_led_reg(index);
+ struct device *dev = tp_to_dev(tp);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = RTL_R16(tp, reg);
+
+ pm_runtime_put_sync(dev);
+
+ return ret;
+}
+
void r8169_get_led_name(struct rtl8169_private *tp, int idx,
char *buf, int buf_len)
{
@@ -1140,7 +1199,7 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
case RTL_GIGA_MAC_VER_31:
r8168dp_2_mdio_write(tp, location, val);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
r8168g_mdio_write(tp, location, val);
break;
default:
@@ -1155,7 +1214,7 @@ static int rtl_readphy(struct rtl8169_private *tp, int location)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_2_mdio_read(tp, location);
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
return r8168g_mdio_read(tp, location);
default:
return r8169_mdio_read(tp, location);
@@ -1341,7 +1400,7 @@ static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable)
case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_65:
if (enable)
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~D3_NO_PLL_DOWN);
else
@@ -1508,7 +1567,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
break;
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_65:
if (wolopts)
rtl_mod_config2(tp, 0, PME_SIGNAL);
else
@@ -1974,30 +2033,64 @@ static int rtl_set_coalesce(struct net_device *dev,
return 0;
}
-static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data)
+static void rtl_set_eee_txidle_timer(struct rtl8169_private *tp)
+{
+ unsigned int timer_val = READ_ONCE(tp->dev->mtu) + ETH_HLEN + 0x20;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_46:
+ case RTL_GIGA_MAC_VER_48:
+ tp->tx_lpi_timer = timer_val;
+ r8168_mac_ocp_write(tp, 0xe048, timer_val);
+ break;
+ case RTL_GIGA_MAC_VER_61:
+ case RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_65:
+ tp->tx_lpi_timer = timer_val;
+ RTL_W16(tp, EEE_TXIDLE_TIMER_8125, timer_val);
+ break;
+ default:
+ break;
+ }
+}
+
+static unsigned int r8169_get_tx_lpi_timer_us(struct rtl8169_private *tp)
+{
+ unsigned int speed = tp->phydev->speed;
+ unsigned int timer = tp->tx_lpi_timer;
+
+ if (!timer || speed == SPEED_UNKNOWN)
+ return 0;
+
+ /* tx_lpi_timer value is in bytes */
+ return DIV_ROUND_CLOSEST(timer * BITS_PER_BYTE, speed);
+}
+
+static int rtl8169_get_eee(struct net_device *dev, struct ethtool_keee *data)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ int ret;
if (!rtl_supports_eee(tp))
return -EOPNOTSUPP;
- return phy_ethtool_get_eee(tp->phydev, data);
+ ret = phy_ethtool_get_eee(tp->phydev, data);
+ if (ret)
+ return ret;
+
+ data->tx_lpi_timer = r8169_get_tx_lpi_timer_us(tp);
+
+ return 0;
}
-static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
+static int rtl8169_set_eee(struct net_device *dev, struct ethtool_keee *data)
{
struct rtl8169_private *tp = netdev_priv(dev);
- int ret;
if (!rtl_supports_eee(tp))
return -EOPNOTSUPP;
- ret = phy_ethtool_set_eee(tp->phydev, data);
-
- if (!ret)
- tp->eee_adv = phy_read_mmd(dev->phydev, MDIO_MMD_AN,
- MDIO_AN_EEE_ADV);
- return ret;
+ return phy_ethtool_set_eee(tp->phydev, data);
}
static void rtl8169_get_ringparam(struct net_device *dev,
@@ -2062,21 +2155,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.set_pauseparam = rtl8169_set_pauseparam,
};
-static void rtl_enable_eee(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
- int adv;
-
- /* respect EEE advertisement the user may have set */
- if (tp->eee_adv >= 0)
- adv = tp->eee_adv;
- else
- adv = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
-
- if (adv >= 0)
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
-}
-
static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{
/*
@@ -2095,6 +2173,9 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
u16 val;
enum mac_version ver;
} mac_info[] = {
+ /* 8126A family. */
+ { 0x7cf, 0x649, RTL_GIGA_MAC_VER_65 },
+
/* 8125B family. */
{ 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 },
@@ -2250,14 +2331,8 @@ static void rtl8125a_config_eee_mac(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1));
}
-static void rtl8125_set_eee_txidle_timer(struct rtl8169_private *tp)
-{
- RTL_W16(tp, EEE_TXIDLE_TIMER_8125, tp->dev->mtu + ETH_HLEN + 0x20);
-}
-
static void rtl8125b_config_eee_mac(struct rtl8169_private *tp)
{
- rtl8125_set_eee_txidle_timer(tp);
r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
}
@@ -2313,9 +2388,6 @@ static void rtl8169_init_phy(struct rtl8169_private *tp)
/* We may have called phy_speed_down before */
phy_speed_up(tp->phydev);
- if (rtl_supports_eee(tp))
- rtl_enable_eee(tp);
-
genphy_soft_reset(tp->phydev);
}
@@ -2368,6 +2440,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_65:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST |
RX_PAUSE_SLOT_ON);
break;
@@ -2554,7 +2627,7 @@ static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_61:
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
- case RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_65:
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42);
@@ -2797,7 +2870,7 @@ static void rtl_enable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38:
rtl_eri_set_bits(tp, 0xd4, 0x0c00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80);
break;
default:
@@ -2811,7 +2884,7 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
rtl_eri_clear_bits(tp, 0xd4, 0x1f00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
r8168_mac_ocp_modify(tp, 0xc0ac, 0x1f80, 0);
break;
default:
@@ -2821,6 +2894,8 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp)
static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
{
+ u8 val8;
+
if (tp->mac_version < RTL_GIGA_MAC_VER_32)
return;
@@ -2834,11 +2909,19 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
return;
rtl_mod_config5(tp, 0, ASPM_en);
- rtl_mod_config2(tp, 0, ClkReqEn);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_65:
+ val8 = RTL_R8(tp, INT_CFG0_8125) | INT_CFG0_CLKREQEN;
+ RTL_W8(tp, INT_CFG0_8125, val8);
+ break;
+ default:
+ rtl_mod_config2(tp, 0, ClkReqEn);
+ break;
+ }
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
/* reset ephy tx/rx disable timer */
r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0);
/* chip can trigger L1.2 */
@@ -2850,14 +2933,22 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
} else {
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0);
break;
default:
break;
}
- rtl_mod_config2(tp, ClkReqEn, 0);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_65:
+ val8 = RTL_R8(tp, INT_CFG0_8125) & ~INT_CFG0_CLKREQEN;
+ RTL_W8(tp, INT_CFG0_8125, val8);
+ break;
+ default:
+ rtl_mod_config2(tp, ClkReqEn, 0);
+ break;
+ }
rtl_mod_config5(tp, ASPM_en, 0);
}
}
@@ -3570,10 +3661,15 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
/* disable new tx descriptor format */
r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000);
- if (tp->mac_version == RTL_GIGA_MAC_VER_63)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_65)
+ RTL_W8(tp, 0xD8, RTL_R8(tp, 0xD8) & ~0x02);
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_65)
+ r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
+ else if (tp->mac_version == RTL_GIGA_MAC_VER_63)
r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200);
else
- r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
+ r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0300);
if (tp->mac_version == RTL_GIGA_MAC_VER_63)
r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0000);
@@ -3586,6 +3682,10 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030);
r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000);
r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001);
+ if (tp->mac_version == RTL_GIGA_MAC_VER_65)
+ r8168_mac_ocp_modify(tp, 0xea1c, 0x0300, 0x0000);
+ else
+ r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000);
r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403);
r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0068);
r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f);
@@ -3600,10 +3700,10 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10);
- if (tp->mac_version == RTL_GIGA_MAC_VER_63)
- rtl8125b_config_eee_mac(tp);
- else
+ if (tp->mac_version == RTL_GIGA_MAC_VER_61)
rtl8125a_config_eee_mac(tp);
+ else
+ rtl8125b_config_eee_mac(tp);
rtl_disable_rxdvgate(tp);
}
@@ -3647,6 +3747,12 @@ static void rtl_hw_start_8125b(struct rtl8169_private *tp)
rtl_hw_start_8125_common(tp);
}
+static void rtl_hw_start_8126a(struct rtl8169_private *tp)
+{
+ rtl_set_def_aspm_entry_latency(tp);
+ rtl_hw_start_8125_common(tp);
+}
+
static void rtl_hw_config(struct rtl8169_private *tp)
{
static const rtl_generic_fct hw_configs[] = {
@@ -3689,6 +3795,7 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117,
[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2,
[RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b,
+ [RTL_GIGA_MAC_VER_65] = rtl_hw_start_8126a,
};
if (hw_configs[tp->mac_version])
@@ -3699,9 +3806,23 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp)
{
int i;
+ RTL_W8(tp, INT_CFG0_8125, 0x00);
+
/* disable interrupt coalescing */
- for (i = 0xa00; i < 0xb00; i += 4)
- RTL_W32(tp, i, 0);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_61:
+ for (i = 0xa00; i < 0xb00; i += 4)
+ RTL_W32(tp, i, 0);
+ break;
+ case RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_65:
+ for (i = 0xa00; i < 0xa80; i += 4)
+ RTL_W32(tp, i, 0);
+ RTL_W16(tp, INT_CFG1_8125, 0x0000);
+ break;
+ default:
+ break;
+ }
rtl_hw_config(tp);
}
@@ -3744,6 +3865,8 @@ static void rtl_hw_start(struct rtl8169_private *tp)
rtl_hw_aspm_clkreq_enable(tp, false);
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
+ rtl_set_eee_txidle_timer(tp);
+
if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
rtl_hw_start_8169(tp);
else if (rtl_is_8125(tp))
@@ -3777,15 +3900,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
dev->mtu = new_mtu;
netdev_update_features(dev);
rtl_jumbo_config(tp);
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_61:
- case RTL_GIGA_MAC_VER_63:
- rtl8125_set_eee_txidle_timer(tp);
- break;
- default:
- break;
- }
+ rtl_set_eee_txidle_timer(tp);
return 0;
}
@@ -3929,7 +4044,7 @@ static void rtl8169_cleanup(struct rtl8169_private *tp)
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
rtl_enable_rxdvgate(tp);
fsleep(2000);
break;
@@ -4080,8 +4195,7 @@ static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_61:
- case RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
padto = max_t(unsigned int, padto, ETH_ZLEN);
break;
default:
@@ -5058,7 +5172,8 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
}
tp->phydev->mac_managed_pm = true;
-
+ if (rtl_supports_eee(tp))
+ phy_support_eee(tp->phydev);
phy_support_asym_pause(tp->phydev);
/* PHY will be woken up in rtl_open() */
@@ -5108,7 +5223,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
rtl_hw_init_8125(tp);
break;
default:
@@ -5193,7 +5308,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->dev = dev;
tp->pci_dev = pdev;
tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1;
- tp->eee_adv = -1;
tp->ocp_base = OCP_STD_PHY_BASE;
raw_spin_lock_init(&tp->cfg9346_usage_lock);
@@ -5201,11 +5315,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
raw_spin_lock_init(&tp->mac_ocp_lock);
mutex_init(&tp->led_lock);
- dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev,
- struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
/* Get the *optional* external "ether_clk" used on some boards */
tp->clk = devm_clk_get_optional_enabled(&pdev->dev, "ether_clk");
if (IS_ERR(tp->clk))
@@ -5320,6 +5429,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+
netdev_sw_irq_coalesce_default_on(dev);
/* configure chip for default features */
@@ -5356,10 +5467,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- if (IS_ENABLED(CONFIG_R8169_LEDS) &&
- tp->mac_version > RTL_GIGA_MAC_VER_06 &&
- tp->mac_version < RTL_GIGA_MAC_VER_61)
- rtl8168_init_leds(dev);
+ if (IS_ENABLED(CONFIG_R8169_LEDS)) {
+ if (rtl_is_8125(tp))
+ rtl8125_init_leds(dev);
+ else if (tp->mac_version > RTL_GIGA_MAC_VER_06)
+ rtl8168_init_leds(dev);
+ }
netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
rtl_chip_infos[chipset].name, dev->dev_addr, xid, tp->irq);
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index b50f16786c24..1f74317beb88 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -1102,6 +1102,12 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp,
rtl8125b_config_eee_phy(phydev);
}
+static void rtl8126a_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+}
+
void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
enum mac_version ver)
{
@@ -1152,6 +1158,7 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_53] = rtl8117_hw_phy_config,
[RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config,
[RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config,
+ [RTL_GIGA_MAC_VER_65] = rtl8126a_hw_phy_config,
};
if (phy_configs[ver])
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index d6136fe5c206..b03fae7a0f72 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -34,6 +34,7 @@ config RAVB
select MII
select MDIO_BITBANG
select PHYLIB
+ select RESET_CONTROLLER
help
Renesas Ethernet AVB device driver.
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index e0f8276cffed..b48935ec7e28 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -205,7 +205,11 @@ enum ravb_reg {
TLFRCR = 0x0758,
RFCR = 0x0760,
MAFCR = 0x0778,
- CSR0 = 0x0800, /* RZ/G2L only */
+
+ /* TOE registers (RZ/G2L only) */
+ CSR0 = 0x0800,
+ CSR1 = 0x0804,
+ CSR2 = 0x0808,
};
@@ -978,16 +982,39 @@ enum CSR0_BIT {
CSR0_RPE = 0x00000020,
};
+enum CSR1_BIT {
+ CSR1_TIP4 = 0x00000001,
+ CSR1_TTCP4 = 0x00000010,
+ CSR1_TUDP4 = 0x00000020,
+ CSR1_TICMP4 = 0x00000040,
+ CSR1_TTCP6 = 0x00100000,
+ CSR1_TUDP6 = 0x00200000,
+ CSR1_TICMP6 = 0x00400000,
+ CSR1_THOP = 0x01000000,
+ CSR1_TROUT = 0x02000000,
+ CSR1_TAHD = 0x04000000,
+ CSR1_TDHD = 0x08000000,
+};
+
+enum CSR2_BIT {
+ CSR2_RIP4 = 0x00000001,
+ CSR2_RTCP4 = 0x00000010,
+ CSR2_RUDP4 = 0x00000020,
+ CSR2_RICMP4 = 0x00000040,
+ CSR2_RTCP6 = 0x00100000,
+ CSR2_RUDP6 = 0x00200000,
+ CSR2_RICMP6 = 0x00400000,
+ CSR2_RHOP = 0x01000000,
+ CSR2_RROUT = 0x02000000,
+ CSR2_RAHD = 0x04000000,
+ CSR2_RDHD = 0x08000000,
+};
+
#define DBAT_ENTRY_NUM 22
#define RX_QUEUE_OFFSET 4
#define NUM_RX_QUEUE 2
#define NUM_TX_QUEUE 2
-#define RX_BUF_SZ (2048 - ETH_FCS_LEN + sizeof(__sum16))
-
-#define GBETH_RX_BUFF_MAX 8192
-#define GBETH_RX_DESC_DATA_SIZE 4080
-
struct ravb_tstamp_skb {
struct list_head list;
struct sk_buff *skb;
@@ -1012,9 +1039,6 @@ struct ravb_ptp {
};
struct ravb_hw_info {
- void (*rx_ring_free)(struct net_device *ndev, int q);
- void (*rx_ring_format)(struct net_device *ndev, int q);
- void *(*alloc_rx_desc)(struct net_device *ndev, int q);
bool (*receive)(struct net_device *ndev, int *quota, int q);
void (*set_rate)(struct net_device *ndev);
int (*set_feature)(struct net_device *ndev, netdev_features_t features);
@@ -1025,9 +1049,10 @@ struct ravb_hw_info {
netdev_features_t net_hw_features;
netdev_features_t net_features;
int stats_len;
- size_t max_rx_len;
u32 tccr_mask;
- u32 rx_max_buf_size;
+ u32 rx_max_frame_size;
+ u32 rx_max_desc_use;
+ u32 rx_desc_size;
unsigned aligned_tx: 1;
/* hardware features */
@@ -1060,8 +1085,11 @@ struct ravb_private {
struct ravb_desc *desc_bat;
dma_addr_t rx_desc_dma[NUM_RX_QUEUE];
dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
- struct ravb_rx_desc *gbeth_rx_ring;
- struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
+ union {
+ struct ravb_rx_desc *desc;
+ struct ravb_ex_rx_desc *ex_desc;
+ void *raw;
+ } rx_ring[NUM_RX_QUEUE];
struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
void *tx_align[NUM_TX_QUEUE];
struct sk_buff *rx_1st_skb;
@@ -1089,10 +1117,6 @@ struct ravb_private {
int msg_enable;
int speed;
int emac_irq;
- int erra_irq;
- int mgmta_irq;
- int rx_irqs[NUM_RX_QUEUE];
- int tx_irqs[NUM_TX_QUEUE];
unsigned no_avb_link:1;
unsigned avb_link_active_low:1;
@@ -1106,6 +1130,8 @@ struct ravb_private {
const struct ravb_hw_info *info;
struct reset_control *rstc;
+
+ u32 gti_tiv;
};
static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index f7566cfa45ca..d1be030c8848 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -29,6 +29,7 @@
#include <linux/spinlock.h>
#include <linux/reset.h>
#include <linux/math64.h>
+#include <net/ip.h>
#include "ravb.h"
@@ -38,16 +39,6 @@
NETIF_MSG_RX_ERR | \
NETIF_MSG_TX_ERR)
-static const char *ravb_rx_irqs[NUM_RX_QUEUE] = {
- "ch0", /* RAVB_BE */
- "ch1", /* RAVB_NC */
-};
-
-static const char *ravb_tx_irqs[NUM_TX_QUEUE] = {
- "ch18", /* RAVB_BE */
- "ch19", /* RAVB_NC */
-};
-
void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
u32 set)
{
@@ -96,13 +87,13 @@ static void ravb_set_rate_gbeth(struct net_device *ndev)
struct ravb_private *priv = netdev_priv(ndev);
switch (priv->speed) {
- case 10: /* 10BASE */
+ case 10: /* 10BASE */
ravb_write(ndev, GBETH_GECMR_SPEED_10, GECMR);
break;
- case 100: /* 100BASE */
+ case 100: /* 100BASE */
ravb_write(ndev, GBETH_GECMR_SPEED_100, GECMR);
break;
- case 1000: /* 1000BASE */
+ case 1000: /* 1000BASE */
ravb_write(ndev, GBETH_GECMR_SPEED_1000, GECMR);
break;
}
@@ -122,12 +113,23 @@ static void ravb_set_rate_rcar(struct net_device *ndev)
}
}
-static void ravb_set_buffer_align(struct sk_buff *skb)
+static struct sk_buff *
+ravb_alloc_skb(struct net_device *ndev, const struct ravb_hw_info *info,
+ gfp_t gfp_mask)
{
- u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
+ struct sk_buff *skb;
+ u32 reserve;
+ skb = __netdev_alloc_skb(ndev, info->rx_max_frame_size + RAVB_ALIGN - 1,
+ gfp_mask);
+ if (!skb)
+ return NULL;
+
+ reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
if (reserve)
skb_reserve(skb, RAVB_ALIGN - reserve);
+
+ return skb;
}
/* Get MAC address from the MAC address registers
@@ -200,6 +202,13 @@ static const struct mdiobb_ops bb_ops = {
.get_mdio_data = ravb_get_mdio_data,
};
+static struct ravb_rx_desc *
+ravb_rx_get_desc(struct ravb_private *priv, unsigned int q,
+ unsigned int i)
+{
+ return priv->rx_ring[q].raw + priv->info->rx_desc_size * i;
+}
+
/* Free TX skb function for AVB-IP */
static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
{
@@ -244,67 +253,40 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
return free_num;
}
-static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- unsigned int ring_size;
- unsigned int i;
-
- if (!priv->gbeth_rx_ring)
- return;
-
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i];
-
- if (!dma_mapping_error(ndev->dev.parent,
- le32_to_cpu(desc->dptr)))
- dma_unmap_single(ndev->dev.parent,
- le32_to_cpu(desc->dptr),
- GBETH_RX_BUFF_MAX,
- DMA_FROM_DEVICE);
- }
- ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
- dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring,
- priv->rx_desc_dma[q]);
- priv->gbeth_rx_ring = NULL;
-}
-
-static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
+static void ravb_rx_ring_free(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int ring_size;
unsigned int i;
- if (!priv->rx_ring[q])
+ if (!priv->rx_ring[q].raw)
return;
for (i = 0; i < priv->num_rx_ring[q]; i++) {
- struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
+ struct ravb_rx_desc *desc = ravb_rx_get_desc(priv, q, i);
if (!dma_mapping_error(ndev->dev.parent,
le32_to_cpu(desc->dptr)))
dma_unmap_single(ndev->dev.parent,
le32_to_cpu(desc->dptr),
- RX_BUF_SZ,
+ priv->info->rx_max_frame_size,
DMA_FROM_DEVICE);
}
- ring_size = sizeof(struct ravb_ex_rx_desc) *
- (priv->num_rx_ring[q] + 1);
- dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
+ ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
+ dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].raw,
priv->rx_desc_dma[q]);
- priv->rx_ring[q] = NULL;
+ priv->rx_ring[q].raw = NULL;
}
/* Free skb's and DMA buffers for Ethernet AVB */
static void ravb_ring_free(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
unsigned int num_tx_desc = priv->num_tx_desc;
unsigned int ring_size;
unsigned int i;
- info->rx_ring_free(ndev, q);
+ ravb_rx_ring_free(ndev, q);
if (priv->tx_ring[q]) {
ravb_tx_free(ndev, q, false);
@@ -335,7 +317,7 @@ static void ravb_ring_free(struct net_device *ndev, int q)
priv->tx_skb[q] = NULL;
}
-static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
+static void ravb_rx_ring_format(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
struct ravb_rx_desc *rx_desc;
@@ -343,45 +325,15 @@ static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
dma_addr_t dma_addr;
unsigned int i;
- rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
- memset(priv->gbeth_rx_ring, 0, rx_ring_size);
- /* Build RX ring buffer */
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- /* RX descriptor */
- rx_desc = &priv->gbeth_rx_ring[i];
- rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
- dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
- GBETH_RX_BUFF_MAX,
- DMA_FROM_DEVICE);
- /* We just set the data size to 0 for a failed mapping which
- * should prevent DMA from happening...
- */
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- rx_desc->ds_cc = cpu_to_le16(0);
- rx_desc->dptr = cpu_to_le32(dma_addr);
- rx_desc->die_dt = DT_FEMPTY;
- }
- rx_desc = &priv->gbeth_rx_ring[i];
- rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
- rx_desc->die_dt = DT_LINKFIX; /* type */
-}
-
-static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- struct ravb_ex_rx_desc *rx_desc;
- unsigned int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
- dma_addr_t dma_addr;
- unsigned int i;
-
- memset(priv->rx_ring[q], 0, rx_ring_size);
+ rx_ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q];
+ memset(priv->rx_ring[q].raw, 0, rx_ring_size);
/* Build RX ring buffer */
for (i = 0; i < priv->num_rx_ring[q]; i++) {
/* RX descriptor */
- rx_desc = &priv->rx_ring[q][i];
- rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
+ rx_desc = ravb_rx_get_desc(priv, q, i);
+ rx_desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
- RX_BUF_SZ,
+ priv->info->rx_max_frame_size,
DMA_FROM_DEVICE);
/* We just set the data size to 0 for a failed mapping which
* should prevent DMA from happening...
@@ -391,7 +343,7 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
rx_desc->dptr = cpu_to_le32(dma_addr);
rx_desc->die_dt = DT_FEMPTY;
}
- rx_desc = &priv->rx_ring[q][i];
+ rx_desc = ravb_rx_get_desc(priv, q, i);
rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
rx_desc->die_dt = DT_LINKFIX; /* type */
}
@@ -400,7 +352,6 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
static void ravb_ring_format(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
unsigned int num_tx_desc = priv->num_tx_desc;
struct ravb_tx_desc *tx_desc;
struct ravb_desc *desc;
@@ -413,7 +364,7 @@ static void ravb_ring_format(struct net_device *ndev, int q)
priv->dirty_rx[q] = 0;
priv->dirty_tx[q] = 0;
- info->rx_ring_format(ndev, q);
+ ravb_rx_ring_format(ndev, q);
memset(priv->tx_ring[q], 0, tx_ring_size);
/* Build TX ring buffer */
@@ -439,30 +390,18 @@ static void ravb_ring_format(struct net_device *ndev, int q)
desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
}
-static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)
+static void *ravb_alloc_rx_desc(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int ring_size;
- ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
+ ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
- priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size,
- &priv->rx_desc_dma[q],
- GFP_KERNEL);
- return priv->gbeth_rx_ring;
-}
+ priv->rx_ring[q].raw = dma_alloc_coherent(ndev->dev.parent, ring_size,
+ &priv->rx_desc_dma[q],
+ GFP_KERNEL);
-static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- unsigned int ring_size;
-
- ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
-
- priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
- &priv->rx_desc_dma[q],
- GFP_KERNEL);
- return priv->rx_ring[q];
+ return priv->rx_ring[q].raw;
}
/* Init skb and descriptor buffer for Ethernet AVB */
@@ -484,10 +423,9 @@ static int ravb_ring_init(struct net_device *ndev, int q)
goto error;
for (i = 0; i < priv->num_rx_ring[q]; i++) {
- skb = __netdev_alloc_skb(ndev, info->max_rx_len, GFP_KERNEL);
+ skb = ravb_alloc_skb(ndev, info, GFP_KERNEL);
if (!skb)
goto error;
- ravb_set_buffer_align(skb);
priv->rx_skb[q][i] = skb;
}
@@ -500,7 +438,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
}
/* Allocate all RX descriptors. */
- if (!info->alloc_rx_desc(ndev, q))
+ if (!ravb_alloc_rx_desc(ndev, q))
goto error;
priv->dirty_rx[q] = 0;
@@ -522,6 +460,36 @@ error:
return -ENOMEM;
}
+static void ravb_csum_init_gbeth(struct net_device *ndev)
+{
+ bool tx_enable = ndev->features & NETIF_F_HW_CSUM;
+ bool rx_enable = ndev->features & NETIF_F_RXCSUM;
+
+ if (!(tx_enable || rx_enable))
+ goto done;
+
+ ravb_write(ndev, 0, CSR0);
+ if (ravb_wait(ndev, CSR0, CSR0_TPE | CSR0_RPE, 0)) {
+ netdev_err(ndev, "Timeout enabling hardware checksum\n");
+
+ if (tx_enable)
+ ndev->features &= ~NETIF_F_HW_CSUM;
+
+ if (rx_enable)
+ ndev->features &= ~NETIF_F_RXCSUM;
+ } else {
+ if (tx_enable)
+ ravb_write(ndev, CSR1_TIP4 | CSR1_TTCP4 | CSR1_TUDP4, CSR1);
+
+ if (rx_enable)
+ ravb_write(ndev, CSR2_RIP4 | CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4,
+ CSR2);
+ }
+
+done:
+ ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0);
+}
+
static void ravb_emac_init_gbeth(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
@@ -536,7 +504,7 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
}
/* Receive frame limit set register */
- ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);
+ ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR);
/* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */
ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) |
@@ -553,7 +521,8 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
/* E-MAC status register clear */
ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR);
- ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0);
+
+ ravb_csum_init_gbeth(ndev);
/* E-MAC interrupt enable register */
ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
@@ -596,6 +565,7 @@ static void ravb_emac_init(struct net_device *ndev)
static int ravb_dmac_init_gbeth(struct net_device *ndev)
{
+ struct ravb_private *priv = netdev_priv(ndev);
int error;
error = ravb_ring_init(ndev, RAVB_BE);
@@ -609,7 +579,7 @@ static int ravb_dmac_init_gbeth(struct net_device *ndev)
ravb_write(ndev, 0x60000000, RCR);
/* Set Max Frame Length (RTC) */
- ravb_write(ndev, 0x7ffc0000 | GBETH_RX_BUFF_MAX, RTC);
+ ravb_write(ndev, 0x7ffc0000 | priv->info->rx_max_frame_size, RTC);
/* Set FIFO size */
ravb_write(ndev, 0x00222200, TGC);
@@ -734,6 +704,30 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
}
}
+static void ravb_rx_csum_gbeth(struct sk_buff *skb)
+{
+ __wsum csum_ip_hdr, csum_proto;
+ u8 *hw_csum;
+
+ /* The hardware checksum status is contained in sizeof(__sum16) * 2 = 4
+ * bytes appended to packet data. First 2 bytes is ip header checksum
+ * and last 2 bytes is protocol checksum.
+ */
+ if (unlikely(skb->len < sizeof(__sum16) * 2))
+ return;
+
+ hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
+ csum_proto = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
+
+ hw_csum -= sizeof(__sum16);
+ csum_ip_hdr = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
+ skb_trim(skb, skb->len - 2 * sizeof(__sum16));
+
+ /* TODO: IPV6 Rx checksum */
+ if (skb->protocol == htons(ETH_P_IP) && !csum_ip_hdr && !csum_proto)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
static void ravb_rx_csum(struct sk_buff *skb)
{
u8 *hw_csum;
@@ -758,7 +752,8 @@ static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry,
skb = priv->rx_skb[RAVB_BE][entry];
priv->rx_skb[RAVB_BE][entry] = NULL;
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- ALIGN(GBETH_RX_BUFF_MAX, 16), DMA_FROM_DEVICE);
+ ALIGN(priv->info->rx_max_frame_size, 16),
+ DMA_FROM_DEVICE);
return skb;
}
@@ -784,7 +779,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
stats = &priv->stats[q];
- desc = &priv->gbeth_rx_ring[entry];
+ desc = &priv->rx_ring[q].desc[entry];
for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) {
/* Descriptor type must be checked before all other reads */
dma_rmb();
@@ -815,6 +810,8 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
skb = ravb_get_skb_gbeth(ndev, entry, desc);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
+ if (ndev->features & NETIF_F_RXCSUM)
+ ravb_rx_csum_gbeth(skb);
napi_gro_receive(&priv->napi[q], skb);
rx_packets++;
stats->rx_bytes += pkt_len;
@@ -842,6 +839,8 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
dev_kfree_skb(skb);
priv->rx_1st_skb->protocol =
eth_type_trans(priv->rx_1st_skb, ndev);
+ if (ndev->features & NETIF_F_RXCSUM)
+ ravb_rx_csum_gbeth(skb);
napi_gro_receive(&priv->napi[q],
priv->rx_1st_skb);
rx_packets++;
@@ -851,23 +850,22 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
}
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
- desc = &priv->gbeth_rx_ring[entry];
+ desc = &priv->rx_ring[q].desc[entry];
}
/* Refill the RX ring buffers. */
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
- desc = &priv->gbeth_rx_ring[entry];
- desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
+ desc = &priv->rx_ring[q].desc[entry];
+ desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
if (!priv->rx_skb[q][entry]) {
- skb = netdev_alloc_skb(ndev, info->max_rx_len);
+ skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC);
if (!skb)
break;
- ravb_set_buffer_align(skb);
dma_addr = dma_map_single(ndev->dev.parent,
skb->data,
- GBETH_RX_BUFF_MAX,
+ priv->info->rx_max_frame_size,
DMA_FROM_DEVICE);
skb_checksum_none_assert(skb);
/* We just set the data size to 0 for a failed mapping
@@ -907,7 +905,7 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
boguscnt = min(boguscnt, *quota);
limit = boguscnt;
- desc = &priv->rx_ring[q][entry];
+ desc = &priv->rx_ring[q].ex_desc[entry];
while (desc->die_dt != DT_FEMPTY) {
/* Descriptor type must be checked before all other reads */
dma_rmb();
@@ -941,7 +939,7 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
skb = priv->rx_skb[q][entry];
priv->rx_skb[q][entry] = NULL;
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- RX_BUF_SZ,
+ priv->info->rx_max_frame_size,
DMA_FROM_DEVICE);
get_ts &= (q == RAVB_NC) ?
RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
@@ -967,22 +965,21 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
}
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q][entry];
+ desc = &priv->rx_ring[q].ex_desc[entry];
}
/* Refill the RX ring buffers. */
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q][entry];
- desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
+ desc = &priv->rx_ring[q].ex_desc[entry];
+ desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
if (!priv->rx_skb[q][entry]) {
- skb = netdev_alloc_skb(ndev, info->max_rx_len);
+ skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC);
if (!skb)
break; /* Better luck next round. */
- ravb_set_buffer_align(skb);
dma_addr = dma_map_single(ndev->dev.parent, skb->data,
- le16_to_cpu(desc->ds_cc),
+ priv->info->rx_max_frame_size,
DMA_FROM_DEVICE);
skb_checksum_none_assert(skb);
/* We just set the data size to 0 for a failed mapping
@@ -1088,11 +1085,23 @@ static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
+ struct device *dev = &priv->pdev->dev;
+ irqreturn_t result = IRQ_HANDLED;
+
+ pm_runtime_get_noresume(dev);
+
+ if (unlikely(!pm_runtime_active(dev))) {
+ result = IRQ_NONE;
+ goto out_rpm_put;
+ }
spin_lock(&priv->lock);
ravb_emac_interrupt_unlocked(ndev);
spin_unlock(&priv->lock);
- return IRQ_HANDLED;
+
+out_rpm_put:
+ pm_runtime_put_noidle(dev);
+ return result;
}
/* Error interrupt handler */
@@ -1172,9 +1181,15 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ struct device *dev = &priv->pdev->dev;
irqreturn_t result = IRQ_NONE;
u32 iss;
+ pm_runtime_get_noresume(dev);
+
+ if (unlikely(!pm_runtime_active(dev)))
+ goto out_rpm_put;
+
spin_lock(&priv->lock);
/* Get interrupt status */
iss = ravb_read(ndev, ISS);
@@ -1218,6 +1233,9 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
}
spin_unlock(&priv->lock);
+
+out_rpm_put:
+ pm_runtime_put_noidle(dev);
return result;
}
@@ -1226,9 +1244,15 @@ static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
+ struct device *dev = &priv->pdev->dev;
irqreturn_t result = IRQ_NONE;
u32 iss;
+ pm_runtime_get_noresume(dev);
+
+ if (unlikely(!pm_runtime_active(dev)))
+ goto out_rpm_put;
+
spin_lock(&priv->lock);
/* Get interrupt status */
iss = ravb_read(ndev, ISS);
@@ -1250,6 +1274,9 @@ static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
}
spin_unlock(&priv->lock);
+
+out_rpm_put:
+ pm_runtime_put_noidle(dev);
return result;
}
@@ -1257,8 +1284,14 @@ static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
{
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
+ struct device *dev = &priv->pdev->dev;
irqreturn_t result = IRQ_NONE;
+ pm_runtime_get_noresume(dev);
+
+ if (unlikely(!pm_runtime_active(dev)))
+ goto out_rpm_put;
+
spin_lock(&priv->lock);
/* Network control/Best effort queue RX/TX */
@@ -1266,6 +1299,9 @@ static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
result = IRQ_HANDLED;
spin_unlock(&priv->lock);
+
+out_rpm_put:
+ pm_runtime_put_noidle(dev);
return result;
}
@@ -1284,25 +1320,16 @@ static int ravb_poll(struct napi_struct *napi, int budget)
struct net_device *ndev = napi->dev;
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- bool gptp = info->gptp || info->ccc_gac;
- struct ravb_rx_desc *desc;
unsigned long flags;
int q = napi - priv->napi;
int mask = BIT(q);
int quota = budget;
- unsigned int entry;
- if (!gptp) {
- entry = priv->cur_rx[q] % priv->num_rx_ring[q];
- desc = &priv->gbeth_rx_ring[entry];
- }
/* Processing RX Descriptor Ring */
/* Clear RX interrupt */
ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
- if (gptp || desc->die_dt != DT_FEMPTY) {
- if (ravb_rx(ndev, &quota, q))
- goto out;
- }
+ if (ravb_rx(ndev, &quota, q))
+ goto out;
/* Processing TX Descriptor Ring */
spin_lock_irqsave(&priv->lock, flags);
@@ -1732,89 +1759,159 @@ static const struct ethtool_ops ravb_ethtool_ops = {
.set_wol = ravb_set_wol,
};
-static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
- struct net_device *ndev, struct device *dev,
- const char *ch)
+static int ravb_set_config_mode(struct net_device *ndev)
{
- char *name;
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
int error;
- name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
- if (!name)
- return -ENOMEM;
- error = request_irq(irq, handler, 0, name, ndev);
- if (error)
- netdev_err(ndev, "cannot request IRQ %s\n", name);
+ if (info->gptp) {
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ if (error)
+ return error;
+ /* Set CSEL value */
+ ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
+ } else if (info->ccc_gac) {
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB);
+ } else {
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ }
return error;
}
+static void ravb_set_gti(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+
+ if (!(info->gptp || info->ccc_gac))
+ return;
+
+ ravb_write(ndev, priv->gti_tiv, GTI);
+
+ /* Request GTI loading */
+ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+}
+
+static int ravb_compute_gti(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+ struct device *dev = ndev->dev.parent;
+ unsigned long rate;
+ u64 inc;
+
+ if (!(info->gptp || info->ccc_gac))
+ return 0;
+
+ if (info->gptp_ref_clk)
+ rate = clk_get_rate(priv->gptp_clk);
+ else
+ rate = clk_get_rate(priv->clk);
+ if (!rate)
+ return -EINVAL;
+
+ inc = div64_ul(1000000000ULL << 20, rate);
+
+ if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
+ dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
+ inc, GTI_TIV_MIN, GTI_TIV_MAX);
+ return -EINVAL;
+ }
+ priv->gti_tiv = inc;
+
+ return 0;
+}
+
+/* Set tx and rx clock internal delay modes */
+static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ bool explicit_delay = false;
+ u32 delay;
+
+ if (!priv->info->internal_delay)
+ return;
+
+ if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
+ /* Valid values are 0 and 1800, according to DT bindings */
+ priv->rxcidm = !!delay;
+ explicit_delay = true;
+ }
+ if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
+ /* Valid values are 0 and 2000, according to DT bindings */
+ priv->txcidm = !!delay;
+ explicit_delay = true;
+ }
+
+ if (explicit_delay)
+ return;
+
+ /* Fall back to legacy rgmii-*id behavior */
+ if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+ priv->rxcidm = 1;
+ priv->rgmii_override = 1;
+ }
+
+ if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ priv->txcidm = 1;
+ priv->rgmii_override = 1;
+ }
+}
+
+static void ravb_set_delay_mode(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 set = 0;
+
+ if (!priv->info->internal_delay)
+ return;
+
+ if (priv->rxcidm)
+ set |= APSR_RDM;
+ if (priv->txcidm)
+ set |= APSR_TDM;
+ ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
+}
+
/* Network device open function for Ethernet AVB */
static int ravb_open(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- struct platform_device *pdev = priv->pdev;
- struct device *dev = &pdev->dev;
+ struct device *dev = &priv->pdev->dev;
int error;
napi_enable(&priv->napi[RAVB_BE]);
if (info->nc_queues)
napi_enable(&priv->napi[RAVB_NC]);
- if (!info->multi_irqs) {
- error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
- ndev->name, ndev);
- if (error) {
- netdev_err(ndev, "cannot request IRQ\n");
- goto out_napi_off;
- }
- } else {
- error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev,
- dev, "ch22:multi");
- if (error)
- goto out_napi_off;
- error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev,
- dev, "ch24:emac");
- if (error)
- goto out_free_irq;
- error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt,
- ndev, dev, "ch0:rx_be");
- if (error)
- goto out_free_irq_emac;
- error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt,
- ndev, dev, "ch18:tx_be");
- if (error)
- goto out_free_irq_be_rx;
- error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt,
- ndev, dev, "ch1:rx_nc");
- if (error)
- goto out_free_irq_be_tx;
- error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt,
- ndev, dev, "ch19:tx_nc");
- if (error)
- goto out_free_irq_nc_rx;
-
- if (info->err_mgmt_irqs) {
- error = ravb_hook_irq(priv->erra_irq, ravb_multi_interrupt,
- ndev, dev, "err_a");
- if (error)
- goto out_free_irq_nc_tx;
- error = ravb_hook_irq(priv->mgmta_irq, ravb_multi_interrupt,
- ndev, dev, "mgmt_a");
- if (error)
- goto out_free_irq_erra;
- }
- }
+ error = pm_runtime_resume_and_get(dev);
+ if (error < 0)
+ goto out_napi_off;
+
+ /* Set AVB config mode */
+ error = ravb_set_config_mode(ndev);
+ if (error)
+ goto out_rpm_put;
+
+ ravb_set_delay_mode(ndev);
+ ravb_write(ndev, priv->desc_bat_dma, DBAT);
/* Device init */
error = ravb_dmac_init(ndev);
if (error)
- goto out_free_irq_mgmta;
+ goto out_set_reset;
+
ravb_emac_init(ndev);
+ ravb_set_gti(ndev);
+
/* Initialise PTP Clock driver */
- if (info->gptp)
+ if (info->gptp || info->ccc_gac)
ravb_ptp_init(ndev, priv->pdev);
/* PHY control start */
@@ -1828,29 +1925,14 @@ static int ravb_open(struct net_device *ndev)
out_ptp_stop:
/* Stop PTP Clock driver */
- if (info->gptp)
+ if (info->gptp || info->ccc_gac)
ravb_ptp_stop(ndev);
ravb_stop_dma(ndev);
-out_free_irq_mgmta:
- if (!info->multi_irqs)
- goto out_free_irq;
- if (info->err_mgmt_irqs)
- free_irq(priv->mgmta_irq, ndev);
-out_free_irq_erra:
- if (info->err_mgmt_irqs)
- free_irq(priv->erra_irq, ndev);
-out_free_irq_nc_tx:
- free_irq(priv->tx_irqs[RAVB_NC], ndev);
-out_free_irq_nc_rx:
- free_irq(priv->rx_irqs[RAVB_NC], ndev);
-out_free_irq_be_tx:
- free_irq(priv->tx_irqs[RAVB_BE], ndev);
-out_free_irq_be_rx:
- free_irq(priv->rx_irqs[RAVB_BE], ndev);
-out_free_irq_emac:
- free_irq(priv->emac_irq, ndev);
-out_free_irq:
- free_irq(ndev->irq, ndev);
+out_set_reset:
+ ravb_set_opmode(ndev, CCC_OPC_RESET);
+out_rpm_put:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
out_napi_off:
if (info->nc_queues)
napi_disable(&priv->napi[RAVB_NC]);
@@ -1935,6 +2017,36 @@ out_unlock:
rtnl_unlock();
}
+static bool ravb_can_tx_csum_gbeth(struct sk_buff *skb)
+{
+ struct iphdr *ip = ip_hdr(skb);
+
+ /* TODO: Need to add support for VLAN tag 802.1Q */
+ if (skb_vlan_tag_present(skb))
+ return false;
+
+ /* TODO: Need to add hardware checksum for IPv6 */
+ if (skb->protocol != htons(ETH_P_IP))
+ return false;
+
+ switch (ip->protocol) {
+ case IPPROTO_TCP:
+ break;
+ case IPPROTO_UDP:
+ /* If the checksum value in the UDP header field is 0, TOE does
+ * not calculate checksum for UDP part of this frame as it is
+ * optional function as per standards.
+ */
+ if (udp_hdr(skb)->check == 0)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
/* Packet transmit function for Ethernet AVB */
static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
@@ -1950,6 +2062,9 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 entry;
u32 len;
+ if (skb->ip_summed == CHECKSUM_PARTIAL && !ravb_can_tx_csum_gbeth(skb))
+ skb_checksum_help(skb);
+
spin_lock_irqsave(&priv->lock, flags);
if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
num_tx_desc) {
@@ -2084,8 +2199,15 @@ static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
struct net_device_stats *nstats, *stats0, *stats1;
+ struct device *dev = &priv->pdev->dev;
nstats = &ndev->stats;
+
+ pm_runtime_get_noresume(dev);
+
+ if (!pm_runtime_active(dev))
+ goto out_rpm_put;
+
stats0 = &priv->stats[RAVB_BE];
if (info->tx_counters) {
@@ -2127,6 +2249,8 @@ static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
nstats->rx_over_errors += stats1->rx_over_errors;
}
+out_rpm_put:
+ pm_runtime_put_noidle(dev);
return nstats;
}
@@ -2149,6 +2273,8 @@ static int ravb_close(struct net_device *ndev)
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
struct ravb_tstamp_skb *ts_skb, *ts_skb2;
+ struct device *dev = &priv->pdev->dev;
+ int error;
netif_tx_stop_all_queues(ndev);
@@ -2157,8 +2283,16 @@ static int ravb_close(struct net_device *ndev)
ravb_write(ndev, 0, RIC2);
ravb_write(ndev, 0, TIC);
+ /* PHY disconnect */
+ if (ndev->phydev) {
+ phy_stop(ndev->phydev);
+ phy_disconnect(ndev->phydev);
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
+ }
+
/* Stop PTP Clock driver */
- if (info->gptp)
+ if (info->gptp || info->ccc_gac)
ravb_ptp_stop(ndev);
/* Set the config mode to stop the AVB-DMAC's processes */
@@ -2175,29 +2309,8 @@ static int ravb_close(struct net_device *ndev)
}
}
- /* PHY disconnect */
- if (ndev->phydev) {
- phy_stop(ndev->phydev);
- phy_disconnect(ndev->phydev);
- if (of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
- }
-
cancel_work_sync(&priv->work);
- if (info->multi_irqs) {
- free_irq(priv->tx_irqs[RAVB_NC], ndev);
- free_irq(priv->rx_irqs[RAVB_NC], ndev);
- free_irq(priv->tx_irqs[RAVB_BE], ndev);
- free_irq(priv->rx_irqs[RAVB_BE], ndev);
- free_irq(priv->emac_irq, ndev);
- if (info->err_mgmt_irqs) {
- free_irq(priv->erra_irq, ndev);
- free_irq(priv->mgmta_irq, ndev);
- }
- }
- free_irq(ndev->irq, ndev);
-
if (info->nc_queues)
napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]);
@@ -2207,6 +2320,17 @@ static int ravb_close(struct net_device *ndev)
if (info->nc_queues)
ravb_ring_free(ndev, RAVB_NC);
+ /* Update statistics. */
+ ravb_get_stats(ndev);
+
+ /* Set reset mode. */
+ error = ravb_set_opmode(ndev, CCC_OPC_RESET);
+ if (error)
+ return error;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
return 0;
}
@@ -2330,11 +2454,58 @@ static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
spin_unlock_irqrestore(&priv->lock, flags);
}
+static int ravb_endisable_csum_gbeth(struct net_device *ndev, enum ravb_reg reg,
+ u32 val, u32 mask)
+{
+ u32 csr0 = CSR0_TPE | CSR0_RPE;
+ int ret;
+
+ ravb_write(ndev, csr0 & ~mask, CSR0);
+ ret = ravb_wait(ndev, CSR0, mask, 0);
+ if (!ret)
+ ravb_write(ndev, val, reg);
+
+ ravb_write(ndev, csr0, CSR0);
+
+ return ret;
+}
+
static int ravb_set_features_gbeth(struct net_device *ndev,
netdev_features_t features)
{
- /* Place holder */
- return 0;
+ netdev_features_t changed = ndev->features ^ features;
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned long flags;
+ int ret = 0;
+ u32 val;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (changed & NETIF_F_RXCSUM) {
+ if (features & NETIF_F_RXCSUM)
+ val = CSR2_RIP4 | CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4;
+ else
+ val = 0;
+
+ ret = ravb_endisable_csum_gbeth(ndev, CSR2, val, CSR0_RPE);
+ if (ret)
+ goto done;
+ }
+
+ if (changed & NETIF_F_HW_CSUM) {
+ if (features & NETIF_F_HW_CSUM)
+ val = CSR1_TIP4 | CSR1_TTCP4 | CSR1_TUDP4;
+ else
+ val = 0;
+
+ ret = ravb_endisable_csum_gbeth(ndev, CSR1, val, CSR0_TPE);
+ if (ret)
+ goto done;
+ }
+
+done:
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
}
static int ravb_set_features_rcar(struct net_device *ndev,
@@ -2345,8 +2516,6 @@ static int ravb_set_features_rcar(struct net_device *ndev,
if (changed & NETIF_F_RXCSUM)
ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
- ndev->features = features;
-
return 0;
}
@@ -2355,8 +2524,24 @@ static int ravb_set_features(struct net_device *ndev,
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ struct device *dev = &priv->pdev->dev;
+ int ret;
+
+ pm_runtime_get_noresume(dev);
+
+ if (pm_runtime_active(dev))
+ ret = info->set_feature(ndev, features);
+ else
+ ret = 0;
+
+ pm_runtime_put_noidle(dev);
+
+ if (ret)
+ return ret;
+
+ ndev->features = features;
- return info->set_feature(ndev, features);
+ return 0;
}
static const struct net_device_ops ravb_netdev_ops = {
@@ -2430,9 +2615,6 @@ static int ravb_mdio_release(struct ravb_private *priv)
}
static const struct ravb_hw_info ravb_gen3_hw_info = {
- .rx_ring_free = ravb_rx_ring_free_rcar,
- .rx_ring_format = ravb_rx_ring_format_rcar,
- .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
.set_feature = ravb_set_features_rcar,
@@ -2443,9 +2625,10 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
.net_hw_features = NETIF_F_RXCSUM,
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
- .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
- .rx_max_buf_size = SZ_2K,
+ .rx_max_frame_size = SZ_2K,
+ .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
+ .rx_desc_size = sizeof(struct ravb_ex_rx_desc),
.internal_delay = 1,
.tx_counters = 1,
.multi_irqs = 1,
@@ -2456,9 +2639,6 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
};
static const struct ravb_hw_info ravb_gen2_hw_info = {
- .rx_ring_free = ravb_rx_ring_free_rcar,
- .rx_ring_format = ravb_rx_ring_format_rcar,
- .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
.set_feature = ravb_set_features_rcar,
@@ -2469,9 +2649,10 @@ static const struct ravb_hw_info ravb_gen2_hw_info = {
.net_hw_features = NETIF_F_RXCSUM,
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
- .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
- .rx_max_buf_size = SZ_2K,
+ .rx_max_frame_size = SZ_2K,
+ .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
+ .rx_desc_size = sizeof(struct ravb_ex_rx_desc),
.aligned_tx = 1,
.gptp = 1,
.nc_queues = 1,
@@ -2479,9 +2660,6 @@ static const struct ravb_hw_info ravb_gen2_hw_info = {
};
static const struct ravb_hw_info ravb_rzv2m_hw_info = {
- .rx_ring_free = ravb_rx_ring_free_rcar,
- .rx_ring_format = ravb_rx_ring_format_rcar,
- .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
.set_feature = ravb_set_features_rcar,
@@ -2492,9 +2670,10 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
.net_hw_features = NETIF_F_RXCSUM,
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
- .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
- .rx_max_buf_size = SZ_2K,
+ .rx_max_frame_size = SZ_2K,
+ .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
+ .rx_desc_size = sizeof(struct ravb_ex_rx_desc),
.multi_irqs = 1,
.err_mgmt_irqs = 1,
.gptp = 1,
@@ -2504,9 +2683,6 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
};
static const struct ravb_hw_info gbeth_hw_info = {
- .rx_ring_free = ravb_rx_ring_free_gbeth,
- .rx_ring_format = ravb_rx_ring_format_gbeth,
- .alloc_rx_desc = ravb_alloc_rx_desc_gbeth,
.receive = ravb_rx_gbeth,
.set_rate = ravb_set_rate_gbeth,
.set_feature = ravb_set_features_gbeth,
@@ -2514,10 +2690,13 @@ static const struct ravb_hw_info gbeth_hw_info = {
.emac_init = ravb_emac_init_gbeth,
.gstrings_stats = ravb_gstrings_stats_gbeth,
.gstrings_size = sizeof(ravb_gstrings_stats_gbeth),
+ .net_hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
+ .net_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
- .max_rx_len = ALIGN(GBETH_RX_BUFF_MAX, RAVB_ALIGN),
.tccr_mask = TCCR_TSRQ0,
- .rx_max_buf_size = SZ_8K,
+ .rx_max_frame_size = SZ_8K,
+ .rx_max_desc_use = 4080,
+ .rx_desc_size = sizeof(struct ravb_rx_desc),
.aligned_tx = 1,
.tx_counters = 1,
.carrier_counters = 1,
@@ -2537,100 +2716,91 @@ static const struct of_device_id ravb_match_table[] = {
};
MODULE_DEVICE_TABLE(of, ravb_match_table);
-static int ravb_set_gti(struct net_device *ndev)
+static int ravb_setup_irq(struct ravb_private *priv, const char *irq_name,
+ const char *ch, int *irq, irq_handler_t handler)
{
- struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
- struct device *dev = ndev->dev.parent;
- unsigned long rate;
- uint64_t inc;
-
- if (info->gptp_ref_clk)
- rate = clk_get_rate(priv->gptp_clk);
- else
- rate = clk_get_rate(priv->clk);
- if (!rate)
- return -EINVAL;
+ struct platform_device *pdev = priv->pdev;
+ struct net_device *ndev = priv->ndev;
+ struct device *dev = &pdev->dev;
+ const char *dev_name;
+ unsigned long flags;
+ int error, irq_num;
- inc = div64_ul(1000000000ULL << 20, rate);
+ if (irq_name) {
+ dev_name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
+ if (!dev_name)
+ return -ENOMEM;
- if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
- dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
- inc, GTI_TIV_MIN, GTI_TIV_MAX);
- return -EINVAL;
+ irq_num = platform_get_irq_byname(pdev, irq_name);
+ flags = 0;
+ } else {
+ dev_name = ndev->name;
+ irq_num = platform_get_irq(pdev, 0);
+ flags = IRQF_SHARED;
}
+ if (irq_num < 0)
+ return irq_num;
- ravb_write(ndev, inc, GTI);
+ if (irq)
+ *irq = irq_num;
- return 0;
+ error = devm_request_irq(dev, irq_num, handler, flags, dev_name, ndev);
+ if (error)
+ netdev_err(ndev, "cannot request IRQ %s\n", dev_name);
+
+ return error;
}
-static int ravb_set_config_mode(struct net_device *ndev)
+static int ravb_setup_irqs(struct ravb_private *priv)
{
- struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ struct net_device *ndev = priv->ndev;
+ const char *irq_name, *emac_irq_name;
int error;
- if (info->gptp) {
- error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
- if (error)
- return error;
- /* Set CSEL value */
- ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
- } else if (info->ccc_gac) {
- error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB);
+ if (!info->multi_irqs)
+ return ravb_setup_irq(priv, NULL, NULL, &ndev->irq, ravb_interrupt);
+
+ if (info->err_mgmt_irqs) {
+ irq_name = "dia";
+ emac_irq_name = "line3";
} else {
- error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ irq_name = "ch22";
+ emac_irq_name = "ch24";
}
- return error;
-}
-
-/* Set tx and rx clock internal delay modes */
-static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- bool explicit_delay = false;
- u32 delay;
+ error = ravb_setup_irq(priv, irq_name, "ch22:multi", &ndev->irq, ravb_multi_interrupt);
+ if (error)
+ return error;
- if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
- /* Valid values are 0 and 1800, according to DT bindings */
- priv->rxcidm = !!delay;
- explicit_delay = true;
- }
- if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
- /* Valid values are 0 and 2000, according to DT bindings */
- priv->txcidm = !!delay;
- explicit_delay = true;
- }
+ error = ravb_setup_irq(priv, emac_irq_name, "ch24:emac", &priv->emac_irq,
+ ravb_emac_interrupt);
+ if (error)
+ return error;
- if (explicit_delay)
- return;
+ if (info->err_mgmt_irqs) {
+ error = ravb_setup_irq(priv, "err_a", "err_a", NULL, ravb_multi_interrupt);
+ if (error)
+ return error;
- /* Fall back to legacy rgmii-*id behavior */
- if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
- priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
- priv->rxcidm = 1;
- priv->rgmii_override = 1;
+ error = ravb_setup_irq(priv, "mgmt_a", "mgmt_a", NULL, ravb_multi_interrupt);
+ if (error)
+ return error;
}
- if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
- priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
- priv->txcidm = 1;
- priv->rgmii_override = 1;
- }
-}
+ error = ravb_setup_irq(priv, "ch0", "ch0:rx_be", NULL, ravb_be_interrupt);
+ if (error)
+ return error;
-static void ravb_set_delay_mode(struct net_device *ndev)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- u32 set = 0;
+ error = ravb_setup_irq(priv, "ch1", "ch1:rx_nc", NULL, ravb_nc_interrupt);
+ if (error)
+ return error;
- if (priv->rxcidm)
- set |= APSR_RDM;
- if (priv->txcidm)
- set |= APSR_TDM;
- ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
+ error = ravb_setup_irq(priv, "ch18", "ch18:tx_be", NULL, ravb_be_interrupt);
+ if (error)
+ return error;
+
+ return ravb_setup_irq(priv, "ch19", "ch19:tx_nc", NULL, ravb_nc_interrupt);
}
static int ravb_probe(struct platform_device *pdev)
@@ -2640,9 +2810,8 @@ static int ravb_probe(struct platform_device *pdev)
struct reset_control *rstc;
struct ravb_private *priv;
struct net_device *ndev;
- int error, irq, q;
struct resource *res;
- int i;
+ int error, q;
if (!np) {
dev_err(&pdev->dev,
@@ -2650,7 +2819,7 @@ static int ravb_probe(struct platform_device *pdev)
return -EINVAL;
}
- rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(rstc))
return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
"failed to get cpg reset\n");
@@ -2669,25 +2838,6 @@ static int ravb_probe(struct platform_device *pdev)
if (error)
goto out_free_netdev;
- pm_runtime_enable(&pdev->dev);
- error = pm_runtime_resume_and_get(&pdev->dev);
- if (error < 0)
- goto out_rpm_disable;
-
- if (info->multi_irqs) {
- if (info->err_mgmt_irqs)
- irq = platform_get_irq_byname(pdev, "dia");
- else
- irq = platform_get_irq_byname(pdev, "ch22");
- } else {
- irq = platform_get_irq(pdev, 0);
- }
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- ndev->irq = irq;
-
SET_NETDEV_DEV(ndev, &pdev->dev);
priv = netdev_priv(ndev);
@@ -2702,10 +2852,43 @@ static int ravb_probe(struct platform_device *pdev)
priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
}
+ error = ravb_setup_irqs(priv);
+ if (error)
+ goto out_reset_assert;
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ error = PTR_ERR(priv->clk);
+ goto out_reset_assert;
+ }
+
+ if (info->gptp_ref_clk) {
+ priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp");
+ if (IS_ERR(priv->gptp_clk)) {
+ error = PTR_ERR(priv->gptp_clk);
+ goto out_reset_assert;
+ }
+ }
+
+ priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk");
+ if (IS_ERR(priv->refclk)) {
+ error = PTR_ERR(priv->refclk);
+ goto out_reset_assert;
+ }
+ clk_prepare(priv->refclk);
+
+ platform_set_drvdata(pdev, ndev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 100);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ error = pm_runtime_resume_and_get(&pdev->dev);
+ if (error < 0)
+ goto out_rpm_disable;
+
priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->addr)) {
error = PTR_ERR(priv->addr);
- goto out_release;
+ goto out_rpm_put;
}
/* The Ether-specific entries in the device structure. */
@@ -2716,79 +2899,14 @@ static int ravb_probe(struct platform_device *pdev)
error = of_get_phy_mode(np, &priv->phy_interface);
if (error && error != -ENODEV)
- goto out_release;
+ goto out_rpm_put;
priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
priv->avb_link_active_low =
of_property_read_bool(np, "renesas,ether-link-active-low");
- if (info->multi_irqs) {
- if (info->err_mgmt_irqs)
- irq = platform_get_irq_byname(pdev, "line3");
- else
- irq = platform_get_irq_byname(pdev, "ch24");
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- priv->emac_irq = irq;
- for (i = 0; i < NUM_RX_QUEUE; i++) {
- irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]);
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- priv->rx_irqs[i] = irq;
- }
- for (i = 0; i < NUM_TX_QUEUE; i++) {
- irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]);
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- priv->tx_irqs[i] = irq;
- }
-
- if (info->err_mgmt_irqs) {
- irq = platform_get_irq_byname(pdev, "err_a");
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- priv->erra_irq = irq;
-
- irq = platform_get_irq_byname(pdev, "mgmt_a");
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- priv->mgmta_irq = irq;
- }
- }
-
- priv->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(priv->clk)) {
- error = PTR_ERR(priv->clk);
- goto out_release;
- }
-
- priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk");
- if (IS_ERR(priv->refclk)) {
- error = PTR_ERR(priv->refclk);
- goto out_release;
- }
- clk_prepare_enable(priv->refclk);
-
- if (info->gptp_ref_clk) {
- priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp");
- if (IS_ERR(priv->gptp_clk)) {
- error = PTR_ERR(priv->gptp_clk);
- goto out_disable_refclk;
- }
- clk_prepare_enable(priv->gptp_clk);
- }
-
- ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
+ ndev->max_mtu = info->rx_max_frame_size -
+ (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
ndev->min_mtu = ETH_MIN_MTU;
/* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer
@@ -2802,25 +2920,11 @@ static int ravb_probe(struct platform_device *pdev)
ndev->netdev_ops = &ravb_netdev_ops;
ndev->ethtool_ops = &ravb_ethtool_ops;
- /* Set AVB config mode */
- error = ravb_set_config_mode(ndev);
+ error = ravb_compute_gti(ndev);
if (error)
- goto out_disable_gptp_clk;
-
- if (info->gptp || info->ccc_gac) {
- /* Set GTI value */
- error = ravb_set_gti(ndev);
- if (error)
- goto out_disable_gptp_clk;
+ goto out_rpm_put;
- /* Request GTI loading */
- ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
- }
-
- if (info->internal_delay) {
- ravb_parse_delay_mode(np, ndev);
- ravb_set_delay_mode(ndev);
- }
+ ravb_parse_delay_mode(np, ndev);
/* Allocate descriptor base address table */
priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
@@ -2831,22 +2935,22 @@ static int ravb_probe(struct platform_device *pdev)
"Cannot allocate desc base address table (size %d bytes)\n",
priv->desc_bat_size);
error = -ENOMEM;
- goto out_disable_gptp_clk;
+ goto out_rpm_put;
}
for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
priv->desc_bat[q].die_dt = DT_EOS;
- ravb_write(ndev, priv->desc_bat_dma, DBAT);
/* Initialise HW timestamp list */
INIT_LIST_HEAD(&priv->ts_skb_list);
- /* Initialise PTP Clock driver */
- if (info->ccc_gac)
- ravb_ptp_init(ndev, pdev);
-
/* Debug message level */
priv->msg_enable = RAVB_DEF_MSG_ENABLE;
+ /* Set config mode as this is needed for PHY initialization. */
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ if (error)
+ goto out_rpm_put;
+
/* Read and set MAC address */
ravb_read_mac_address(np, ndev);
if (!is_valid_ether_addr(ndev->dev_addr)) {
@@ -2859,9 +2963,14 @@ static int ravb_probe(struct platform_device *pdev)
error = ravb_mdio_init(priv);
if (error) {
dev_err(&pdev->dev, "failed to initialize MDIO\n");
- goto out_dma_free;
+ goto out_reset_mode;
}
+ /* Undo previous switch to config opmode. */
+ error = ravb_set_opmode(ndev, CCC_OPC_RESET);
+ if (error)
+ goto out_mdio_release;
+
netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll);
if (info->nc_queues)
netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll);
@@ -2877,7 +2986,8 @@ static int ravb_probe(struct platform_device *pdev)
netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
(u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
- platform_set_drvdata(pdev, ndev);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
return 0;
@@ -2886,22 +2996,19 @@ out_napi_del:
netif_napi_del(&priv->napi[RAVB_NC]);
netif_napi_del(&priv->napi[RAVB_BE]);
+out_mdio_release:
ravb_mdio_release(priv);
-out_dma_free:
+out_reset_mode:
+ ravb_set_opmode(ndev, CCC_OPC_RESET);
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
-
- /* Stop PTP Clock driver */
- if (info->ccc_gac)
- ravb_ptp_stop(ndev);
-out_disable_gptp_clk:
- clk_disable_unprepare(priv->gptp_clk);
-out_disable_refclk:
- clk_disable_unprepare(priv->refclk);
-out_release:
+out_rpm_put:
pm_runtime_put(&pdev->dev);
out_rpm_disable:
pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ clk_unprepare(priv->refclk);
+out_reset_assert:
reset_control_assert(rstc);
out_free_netdev:
free_netdev(ndev);
@@ -2913,6 +3020,12 @@ static void ravb_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ struct device *dev = &priv->pdev->dev;
+ int error;
+
+ error = pm_runtime_resume_and_get(dev);
+ if (error < 0)
+ return;
unregister_netdev(ndev);
if (info->nc_queues)
@@ -2921,20 +3034,13 @@ static void ravb_remove(struct platform_device *pdev)
ravb_mdio_release(priv);
- /* Stop PTP Clock driver */
- if (info->ccc_gac)
- ravb_ptp_stop(ndev);
-
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
- ravb_set_opmode(ndev, CCC_OPC_RESET);
-
- clk_disable_unprepare(priv->gptp_clk);
- clk_disable_unprepare(priv->refclk);
-
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_put_sync_suspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(dev);
+ clk_unprepare(priv->refclk);
reset_control_assert(priv->rstc);
free_netdev(ndev);
platform_set_drvdata(pdev, NULL);
@@ -2960,6 +3066,9 @@ static int ravb_wol_setup(struct net_device *ndev)
/* Enable MagicPacket */
ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
+ if (priv->info->ccc_gac)
+ ravb_ptp_stop(ndev);
+
return enable_irq_wake(priv->emac_irq);
}
@@ -2967,6 +3076,20 @@ static int ravb_wol_restore(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ int error;
+
+ /* Set reset mode to rearm the WoL logic. */
+ error = ravb_set_opmode(ndev, CCC_OPC_RESET);
+ if (error)
+ return error;
+
+ /* Set AVB config mode. */
+ error = ravb_set_config_mode(ndev);
+ if (error)
+ return error;
+
+ if (priv->info->ccc_gac)
+ ravb_ptp_init(ndev, priv->pdev);
if (info->nc_queues)
napi_enable(&priv->napi[RAVB_NC]);
@@ -2980,102 +3103,96 @@ static int ravb_wol_restore(struct net_device *ndev)
return disable_irq_wake(priv->emac_irq);
}
-static int __maybe_unused ravb_suspend(struct device *dev)
+static int ravb_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct ravb_private *priv = netdev_priv(ndev);
int ret;
if (!netif_running(ndev))
- return 0;
+ goto reset_assert;
netif_device_detach(ndev);
if (priv->wol_enabled)
- ret = ravb_wol_setup(ndev);
- else
- ret = ravb_close(ndev);
+ return ravb_wol_setup(ndev);
- if (priv->info->ccc_gac)
- ravb_ptp_stop(ndev);
+ ret = ravb_close(ndev);
+ if (ret)
+ return ret;
- return ret;
+ ret = pm_runtime_force_suspend(&priv->pdev->dev);
+ if (ret)
+ return ret;
+
+reset_assert:
+ return reset_control_assert(priv->rstc);
}
-static int __maybe_unused ravb_resume(struct device *dev)
+static int ravb_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
- int ret = 0;
-
- /* If WoL is enabled set reset mode to rearm the WoL logic */
- if (priv->wol_enabled) {
- ret = ravb_set_opmode(ndev, CCC_OPC_RESET);
- if (ret)
- return ret;
- }
-
- /* All register have been reset to default values.
- * Restore all registers which where setup at probe time and
- * reopen device if it was running before system suspended.
- */
+ int ret;
- /* Set AVB config mode */
- ret = ravb_set_config_mode(ndev);
+ ret = reset_control_deassert(priv->rstc);
if (ret)
return ret;
- if (info->gptp || info->ccc_gac) {
- /* Set GTI value */
- ret = ravb_set_gti(ndev);
+ if (!netif_running(ndev))
+ return 0;
+
+ /* If WoL is enabled restore the interface. */
+ if (priv->wol_enabled) {
+ ret = ravb_wol_restore(ndev);
+ if (ret)
+ return ret;
+ } else {
+ ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
-
- /* Request GTI loading */
- ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
}
- if (info->internal_delay)
- ravb_set_delay_mode(ndev);
+ /* Reopening the interface will restore the device to the working state. */
+ ret = ravb_open(ndev);
+ if (ret < 0)
+ goto out_rpm_put;
- /* Restore descriptor base address table */
- ravb_write(ndev, priv->desc_bat_dma, DBAT);
+ ravb_set_rx_mode(ndev);
+ netif_device_attach(ndev);
- if (priv->info->ccc_gac)
- ravb_ptp_init(ndev, priv->pdev);
+ return 0;
- if (netif_running(ndev)) {
- if (priv->wol_enabled) {
- ret = ravb_wol_restore(ndev);
- if (ret)
- return ret;
- }
- ret = ravb_open(ndev);
- if (ret < 0)
- return ret;
- ravb_set_rx_mode(ndev);
- netif_device_attach(ndev);
+out_rpm_put:
+ if (!priv->wol_enabled) {
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
}
return ret;
}
-static int __maybe_unused ravb_runtime_nop(struct device *dev)
+static int ravb_runtime_suspend(struct device *dev)
{
- /* Runtime PM callback shared between ->runtime_suspend()
- * and ->runtime_resume(). Simply returns success.
- *
- * This driver re-initializes all registers after
- * pm_runtime_get_sync() anyway so there is no need
- * to save and restore registers here.
- */
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ clk_disable(priv->refclk);
+
return 0;
}
+static int ravb_runtime_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ return clk_enable(priv->refclk);
+}
+
static const struct dev_pm_ops ravb_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
- SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL)
+ SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
+ RUNTIME_PM_OPS(ravb_runtime_suspend, ravb_runtime_resume, NULL)
};
static struct platform_driver ravb_driver = {
@@ -3083,7 +3200,7 @@ static struct platform_driver ravb_driver = {
.remove_new = ravb_remove,
.driver = {
.name = "ravb",
- .pm = &ravb_dev_pm_ops,
+ .pm = pm_ptr(&ravb_dev_pm_ops),
.of_match_table = ravb_match_table,
},
};
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 9e59669a93dd..755db89db909 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -32,7 +32,6 @@
#include <net/fib_rules.h>
#include <net/fib_notifier.h>
#include <linux/io-64-nonatomic-lo-hi.h>
-#include <generated/utsrelease.h>
#include "rocker_hw.h"
#include "rocker.h"
@@ -2227,7 +2226,6 @@ static void rocker_port_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
strscpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
- strscpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
}
static struct rocker_port_stats {
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
index d14e0cfc3a6b..1458939c3bf5 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
@@ -503,7 +503,6 @@ struct sxgbe_priv_data {
bool tx_path_in_lpi_mode;
int lpi_irq;
int eee_enabled;
- int eee_active;
int tx_lpi_timer;
};
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index 8ba017ec9849..4a439b34114d 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -133,22 +133,20 @@ static const struct sxgbe_stats sxgbe_gstrings_stats[] = {
#define SXGBE_STATS_LEN ARRAY_SIZE(sxgbe_gstrings_stats)
static int sxgbe_get_eee(struct net_device *dev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
if (!priv->hw_cap.eee)
return -EOPNOTSUPP;
- edata->eee_enabled = priv->eee_enabled;
- edata->eee_active = priv->eee_active;
edata->tx_lpi_timer = priv->tx_lpi_timer;
return phy_ethtool_get_eee(dev->phydev, edata);
}
static int sxgbe_set_eee(struct net_device *dev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 71439825ea4e..ecbe3994f2b1 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -130,7 +130,6 @@ bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
if (phy_init_eee(ndev->phydev, true))
return false;
- priv->eee_active = 1;
timer_setup(&priv->eee_ctrl_timer, sxgbe_eee_ctrl_timer, 0);
priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer);
add_timer(&priv->eee_ctrl_timer);
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index 175bd9cdfdac..551f890db90a 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -595,7 +595,7 @@ void efx_stop_all(struct efx_nic *efx)
efx_stop_datapath(efx);
}
-/* Context: process, dev_base_lock or RTNL held, non-blocking. */
+/* Context: process, rcu_read_lock or RTNL held, non-blocking. */
void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index e001f27085c6..1cb32aedd89c 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2085,7 +2085,7 @@ int ef4_net_stop(struct net_device *net_dev)
return 0;
}
-/* Context: process, dev_base_lock or RTNL held, non-blocking. */
+/* Context: process, rcu_read_lock or RTNL held, non-blocking. */
static void ef4_net_stats(struct net_device *net_dev,
struct rtnl_link_stats64 *stats)
{
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index fac227d372db..dcd901eccfc8 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -11,6 +11,7 @@
#include "net_driver.h"
#include <linux/module.h>
#include <linux/iommu.h>
+#include <net/rps.h>
#include "efx.h"
#include "nic.h"
#include "rx_common.h"
diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c
index e4b294b8e9ac..88e5bc347a44 100644
--- a/drivers/net/ethernet/sfc/siena/efx_common.c
+++ b/drivers/net/ethernet/sfc/siena/efx_common.c
@@ -605,7 +605,7 @@ static size_t efx_siena_update_stats_atomic(struct efx_nic *efx, u64 *full_stats
return efx->type->update_stats(efx, full_stats, core_stats);
}
-/* Context: process, dev_base_lock or RTNL held, non-blocking. */
+/* Context: process, rcu_read_lock or RTNL held, non-blocking. */
void efx_siena_net_stats(struct net_device *net_dev,
struct rtnl_link_stats64 *stats)
{
diff --git a/drivers/net/ethernet/sfc/siena/rx_common.c b/drivers/net/ethernet/sfc/siena/rx_common.c
index 4579f43484c3..219fb358a646 100644
--- a/drivers/net/ethernet/sfc/siena/rx_common.c
+++ b/drivers/net/ethernet/sfc/siena/rx_common.c
@@ -11,6 +11,7 @@
#include "net_driver.h"
#include <linux/module.h>
#include <linux/iommu.h>
+#include <net/rps.h>
#include "efx.h"
#include "nic.h"
#include "rx_common.h"
diff --git a/drivers/net/ethernet/sfc/siena/tx_common.c b/drivers/net/ethernet/sfc/siena/tx_common.c
index a7a9ab304e13..71f9b5ec5ae4 100644
--- a/drivers/net/ethernet/sfc/siena/tx_common.c
+++ b/drivers/net/ethernet/sfc/siena/tx_common.c
@@ -317,11 +317,10 @@ static int efx_tx_tso_header_length(struct sk_buff *skb)
size_t header_len;
if (skb->encapsulation)
- header_len = skb_inner_transport_header(skb) -
- skb->data +
+ header_len = skb_inner_transport_offset(skb) +
(inner_tcp_hdr(skb)->doff << 2u);
else
- header_len = skb_transport_header(skb) - skb->data +
+ header_len = skb_transport_offset(skb) +
(tcp_hdr(skb)->doff << 2u);
return header_len;
}
diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
index 9f2393d34371..2adb132b2f7e 100644
--- a/drivers/net/ethernet/sfc/tx_common.c
+++ b/drivers/net/ethernet/sfc/tx_common.c
@@ -336,11 +336,10 @@ int efx_tx_tso_header_length(struct sk_buff *skb)
size_t header_len;
if (skb->encapsulation)
- header_len = skb_inner_transport_header(skb) -
- skb->data +
+ header_len = skb_inner_transport_offset(skb) +
(inner_tcp_hdr(skb)->doff << 2u);
else
- header_len = skb_transport_header(skb) - skb->data +
+ header_len = skb_transport_offset(skb) +
(tcp_hdr(skb)->doff << 2u);
return header_len;
}
diff --git a/drivers/net/ethernet/sfc/tx_tso.c b/drivers/net/ethernet/sfc/tx_tso.c
index 64a6768f75ea..ddf149db8180 100644
--- a/drivers/net/ethernet/sfc/tx_tso.c
+++ b/drivers/net/ethernet/sfc/tx_tso.c
@@ -174,8 +174,8 @@ static int tso_start(struct tso_state *st, struct efx_nic *efx,
unsigned int header_len, in_len;
dma_addr_t dma_addr;
- st->ip_off = skb_network_header(skb) - skb->data;
- st->tcp_off = skb_transport_header(skb) - skb->data;
+ st->ip_off = skb_network_offset(skb);
+ st->tcp_off = skb_transport_offset(skb);
header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
in_len = skb_headlen(skb) - header_len;
st->header_len = header_len;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 758347616535..78ff3af7911a 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -98,6 +98,7 @@ static int watchdog = 1000;
module_param(watchdog, int, 0400);
MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
+MODULE_DESCRIPTION("SMC 91C9x/91C1xxx Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:smc91x");
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 31cb7d0166f0..74f1ccc96459 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -56,6 +56,7 @@
#define SMSC_MDIONAME "smsc911x-mdio"
#define SMSC_DRV_VERSION "2008-10-21"
+MODULE_DESCRIPTION("SMSC LAN911x/LAN921x Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(SMSC_DRV_VERSION);
MODULE_ALIAS("platform:smsc911x");
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index e1c4a11c1f18..15cb96c2506d 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -26,6 +26,7 @@
#define DRV_DESCRIPTION "SMSC LAN9420 driver"
#define DRV_VERSION "1.01"
+MODULE_DESCRIPTION("SMSC LAN9420 Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 85dcda51df05..4ec61f1ee71a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -165,9 +165,9 @@ config DWMAC_STARFIVE
help
Support for ethernet controllers on StarFive RISC-V SoCs
- This selects the StarFive platform specific glue layer support for
- the stmmac device driver. This driver is used for StarFive JH7110
- ethernet controller.
+ This selects the StarFive platform specific glue layer support
+ for the stmmac device driver. This driver is used for the
+ StarFive JH7100 and JH7110 ethernet controllers.
config DWMAC_STI
tristate "STi GMAC support"
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 5ba606a596e7..a6fefe675ef1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -225,6 +225,8 @@ struct stmmac_extra_stats {
unsigned long mtl_est_hlbf;
unsigned long mtl_est_btre;
unsigned long mtl_est_btrlm;
+ unsigned long max_sdu_txq_drop[MTL_MAX_TX_QUEUES];
+ unsigned long mtl_est_txq_hlbf[MTL_MAX_TX_QUEUES];
/* per queue statistics */
struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES];
struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];
@@ -369,6 +371,7 @@ enum request_irq_err {
REQ_IRQ_ERR_ALL,
REQ_IRQ_ERR_TX,
REQ_IRQ_ERR_RX,
+ REQ_IRQ_ERR_SFTY,
REQ_IRQ_ERR_SFTY_UE,
REQ_IRQ_ERR_SFTY_CE,
REQ_IRQ_ERR_LPI,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 31631e3f89d0..e254b21fdb59 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -106,6 +106,7 @@ struct qcom_ethqos {
struct clk *link_clk;
struct phy *serdes_phy;
unsigned int speed;
+ int serdes_speed;
phy_interface_t phy_mode;
const struct ethqos_emac_por *por;
@@ -169,6 +170,9 @@ static void rgmii_dump(void *priv)
static void
ethqos_update_link_clk(struct qcom_ethqos *ethqos, unsigned int speed)
{
+ if (!phy_interface_mode_is_rgmii(ethqos->phy_mode))
+ return;
+
switch (speed) {
case SPEED_1000:
ethqos->link_clk_rate = RGMII_1000_NOM_CLK_FREQ;
@@ -606,19 +610,39 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
*/
static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
{
+ struct net_device *dev = platform_get_drvdata(ethqos->pdev);
+ struct stmmac_priv *priv = netdev_priv(dev);
int val;
val = readl(ethqos->mac_base + MAC_CTRL_REG);
switch (ethqos->speed) {
+ case SPEED_2500:
+ val &= ~ETHQOS_MAC_CTRL_PORT_SEL;
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
+ RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
+ RGMII_IO_MACRO_CONFIG2);
+ if (ethqos->serdes_speed != SPEED_2500)
+ phy_set_speed(ethqos->serdes_phy, SPEED_2500);
+ ethqos->serdes_speed = SPEED_2500;
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 0, 0, 0);
+ break;
case SPEED_1000:
val &= ~ETHQOS_MAC_CTRL_PORT_SEL;
rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_IO_MACRO_CONFIG2);
+ if (ethqos->serdes_speed != SPEED_1000)
+ phy_set_speed(ethqos->serdes_phy, SPEED_1000);
+ ethqos->serdes_speed = SPEED_1000;
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
break;
case SPEED_100:
val |= ETHQOS_MAC_CTRL_PORT_SEL | ETHQOS_MAC_CTRL_SPEED_MODE;
+ if (ethqos->serdes_speed != SPEED_1000)
+ phy_set_speed(ethqos->serdes_phy, SPEED_1000);
+ ethqos->serdes_speed = SPEED_1000;
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
break;
case SPEED_10:
val |= ETHQOS_MAC_CTRL_PORT_SEL;
@@ -627,6 +651,10 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
FIELD_PREP(RGMII_CONFIG_SGMII_CLK_DVDR,
SGMII_10M_RX_CLK_DVDR),
RGMII_IO_MACRO_CONFIG);
+ if (ethqos->serdes_speed != SPEED_1000)
+ phy_set_speed(ethqos->serdes_phy, ethqos->speed);
+ ethqos->serdes_speed = SPEED_1000;
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
break;
}
@@ -728,7 +756,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
struct stmmac_resources stmmac_res;
struct device *dev = &pdev->dev;
struct qcom_ethqos *ethqos;
- int ret;
+ int ret, i;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
if (ret)
@@ -799,6 +827,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
"Failed to get serdes phy\n");
ethqos->speed = SPEED_1000;
+ ethqos->serdes_speed = SPEED_1000;
ethqos_update_link_clk(ethqos, SPEED_1000);
ethqos_set_func_clk_en(ethqos);
@@ -822,6 +851,10 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->serdes_powerdown = qcom_ethqos_serdes_powerdown;
}
+ /* Enable TSO on queue0 and enable TBS on rest of the queues */
+ for (i = 1; i < plat_dat->tx_queues_to_use; i++)
+ plat_dat->tx_queues_cfg[i].tbs_en = 1;
+
return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index ba2ce776bd4d..68f85e4605cb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -585,4 +585,5 @@ static struct platform_driver socfpga_dwmac_driver = {
};
module_platform_driver(socfpga_dwmac_driver);
+MODULE_DESCRIPTION("Altera SOC DWMAC Specific Glue layer");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
index 5d630affb4d1..4e1076faee0c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -15,13 +15,20 @@
#include "stmmac_platform.h"
-#define STARFIVE_DWMAC_PHY_INFT_RGMII 0x1
-#define STARFIVE_DWMAC_PHY_INFT_RMII 0x4
-#define STARFIVE_DWMAC_PHY_INFT_FIELD 0x7U
+#define STARFIVE_DWMAC_PHY_INFT_RGMII 0x1
+#define STARFIVE_DWMAC_PHY_INFT_RMII 0x4
+#define STARFIVE_DWMAC_PHY_INFT_FIELD 0x7U
+
+#define JH7100_SYSMAIN_REGISTER49_DLYCHAIN 0xc8
+
+struct starfive_dwmac_data {
+ unsigned int gtxclk_dlychain;
+};
struct starfive_dwmac {
struct device *dev;
struct clk *clk_tx;
+ const struct starfive_dwmac_data *data;
};
static void starfive_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
@@ -67,6 +74,8 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
mode = STARFIVE_DWMAC_PHY_INFT_RGMII;
break;
@@ -89,6 +98,14 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
if (err)
return dev_err_probe(dwmac->dev, err, "error setting phy mode\n");
+ if (dwmac->data) {
+ err = regmap_write(regmap, JH7100_SYSMAIN_REGISTER49_DLYCHAIN,
+ dwmac->data->gtxclk_dlychain);
+ if (err)
+ return dev_err_probe(dwmac->dev, err,
+ "error selecting gtxclk delay chain\n");
+ }
+
return 0;
}
@@ -114,6 +131,8 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
if (!dwmac)
return -ENOMEM;
+ dwmac->data = device_get_match_data(&pdev->dev);
+
dwmac->clk_tx = devm_clk_get_enabled(&pdev->dev, "tx");
if (IS_ERR(dwmac->clk_tx))
return dev_err_probe(&pdev->dev, PTR_ERR(dwmac->clk_tx),
@@ -144,8 +163,13 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
+static const struct starfive_dwmac_data jh7100_data = {
+ .gtxclk_dlychain = 4,
+};
+
static const struct of_device_id starfive_dwmac_match[] = {
- { .compatible = "starfive,jh7110-dwmac" },
+ { .compatible = "starfive,jh7100-dwmac", .data = &jh7100_data },
+ { .compatible = "starfive,jh7110-dwmac" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, starfive_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index 358e7dcb6a9a..17d9120db5fe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -92,7 +92,7 @@
#define DMA_TBS_FTOV BIT(0)
#define DMA_TBS_DEF_FTOS (DMA_TBS_FTOS | DMA_TBS_FTOV)
-/* Following DMA defines are chanels oriented */
+/* Following DMA defines are channel-oriented */
#define DMA_CHAN_BASE_ADDR 0x00001100
#define DMA_CHAN_BASE_OFFSET 0x80
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 14c9d2637dfe..dff02d75d519 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -86,10 +86,6 @@ struct stmmac_counters {
unsigned int mmc_rx_discard_octets_gb;
unsigned int mmc_rx_align_err_frames;
- /* IPC */
- unsigned int mmc_rx_ipc_intr_mask;
- unsigned int mmc_rx_ipc_intr;
-
/* IPv4 */
unsigned int mmc_rx_ipv4_gd;
unsigned int mmc_rx_ipv4_hderr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 8597c6abae8d..7eb477faa75a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -316,9 +316,6 @@ static void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
mmc->mmc_rx_fifo_overflow += readl(mmcaddr + MMC_RX_FIFO_OVERFLOW);
mmc->mmc_rx_vlan_frames_gb += readl(mmcaddr + MMC_RX_VLAN_FRAMES_GB);
mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_RX_WATCHDOG_ERROR);
- /* IPC */
- mmc->mmc_rx_ipc_intr_mask += readl(mmcaddr + MMC_RX_IPC_INTR_MASK);
- mmc->mmc_rx_ipc_intr += readl(mmcaddr + MMC_RX_IPC_INTR);
/* IPv4 */
mmc->mmc_rx_ipv4_gd += readl(mmcaddr + MMC_RX_IPV4_GD);
mmc->mmc_rx_ipv4_hderr += readl(mmcaddr + MMC_RX_IPV4_HDERR);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index f155e4841c62..dddcaa9220cc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -31,6 +31,7 @@ struct stmmac_resources {
int wol_irq;
int lpi_irq;
int irq;
+ int sfty_irq;
int sfty_ce_irq;
int sfty_ue_irq;
int rx_irq[MTL_MAX_RX_QUEUES];
@@ -298,6 +299,7 @@ struct stmmac_priv {
void __iomem *ptpaddr;
void __iomem *estaddr;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ int sfty_irq;
int sfty_ce_irq;
int sfty_ue_irq;
int rx_irq[MTL_MAX_RX_QUEUES];
@@ -306,6 +308,7 @@ struct stmmac_priv {
char int_name_mac[IFNAMSIZ + 9];
char int_name_wol[IFNAMSIZ + 9];
char int_name_lpi[IFNAMSIZ + 9];
+ char int_name_sfty[IFNAMSIZ + 10];
char int_name_sfty_ce[IFNAMSIZ + 10];
char int_name_sfty_ue[IFNAMSIZ + 10];
char int_name_rx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 14];
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
index 4da6ccc17c20..c9693f77e1f6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
@@ -81,6 +81,7 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev,
u32 status, value, feqn, hbfq, hbfs, btrl, btrl_max;
void __iomem *est_addr = priv->estaddr;
u32 txqcnt_mask = BIT(txqcnt) - 1;
+ int i;
status = readl(est_addr + EST_STATUS);
@@ -125,6 +126,11 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev,
x->mtl_est_hlbf++;
+ for (i = 0; i < txqcnt; i++) {
+ if (feqn & BIT(i))
+ x->mtl_est_txq_hlbf[i]++;
+ }
+
/* Clear Interrupt */
writel(feqn, est_addr + EST_FRM_SZ_ERR);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index ec44becf0e2d..e1537a57815f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -243,8 +243,6 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_rx_discard_frames_gb),
STMMAC_MMC_STAT(mmc_rx_discard_octets_gb),
STMMAC_MMC_STAT(mmc_rx_align_err_frames),
- STMMAC_MMC_STAT(mmc_rx_ipc_intr_mask),
- STMMAC_MMC_STAT(mmc_rx_ipc_intr),
STMMAC_MMC_STAT(mmc_rx_ipv4_gd),
STMMAC_MMC_STAT(mmc_rx_ipv4_hderr),
STMMAC_MMC_STAT(mmc_rx_ipv4_nopay),
@@ -897,15 +895,13 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
}
static int stmmac_ethtool_op_get_eee(struct net_device *dev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct stmmac_priv *priv = netdev_priv(dev);
if (!priv->dma_cap.eee)
return -EOPNOTSUPP;
- edata->eee_enabled = priv->eee_enabled;
- edata->eee_active = priv->eee_active;
edata->tx_lpi_timer = priv->tx_lpi_timer;
edata->tx_lpi_enabled = priv->tx_lpi_enabled;
@@ -913,7 +909,7 @@ static int stmmac_ethtool_op_get_eee(struct net_device *dev,
}
static int stmmac_ethtool_op_set_eee(struct net_device *dev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 7c6aef033a45..24cd80490d19 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2506,6 +2506,13 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
if (!xsk_tx_peek_desc(pool, &xdp_desc))
break;
+ if (priv->plat->est && priv->plat->est->enable &&
+ priv->plat->est->max_sdu[queue] &&
+ xdp_desc.len > priv->plat->est->max_sdu[queue]) {
+ priv->xstats.max_sdu_txq_drop[queue]++;
+ continue;
+ }
+
if (likely(priv->extend_desc))
tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
@@ -3591,6 +3598,10 @@ static void stmmac_free_irq(struct net_device *dev,
if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
free_irq(priv->wol_irq, dev);
fallthrough;
+ case REQ_IRQ_ERR_SFTY:
+ if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
+ free_irq(priv->sfty_irq, dev);
+ fallthrough;
case REQ_IRQ_ERR_WOL:
free_irq(dev->irq, dev);
fallthrough;
@@ -3661,6 +3672,23 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
}
}
+ /* Request the common Safety Feature Correctible/Uncorrectible
+ * Error line in case of another line is used
+ */
+ if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
+ int_name = priv->int_name_sfty;
+ sprintf(int_name, "%s:%s", dev->name, "safety");
+ ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
+ 0, int_name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc sfty MSI %d (error: %d)\n",
+ __func__, priv->sfty_irq, ret);
+ irq_err = REQ_IRQ_ERR_SFTY;
+ goto irq_error;
+ }
+ }
+
/* Request the Safety Feature Correctible Error line in
* case of another line is used
*/
@@ -3798,6 +3826,21 @@ static int stmmac_request_irq_single(struct net_device *dev)
}
}
+ /* Request the common Safety Feature Correctible/Uncorrectible
+ * Error line in case of another line is used
+ */
+ if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
+ ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
+ __func__, priv->sfty_irq, ret);
+ irq_err = REQ_IRQ_ERR_SFTY;
+ goto irq_error;
+ }
+ }
+
return 0;
irq_error:
@@ -4500,6 +4543,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return stmmac_tso_xmit(skb, dev);
}
+ if (priv->plat->est && priv->plat->est->enable &&
+ priv->plat->est->max_sdu[queue] &&
+ skb->len > priv->plat->est->max_sdu[queue]){
+ priv->xstats.max_sdu_txq_drop[queue]++;
+ goto max_sdu_err;
+ }
+
if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
@@ -4717,6 +4767,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dma_map_err:
netdev_err(priv->dev, "Tx DMA map failed\n");
+max_sdu_err:
dev_kfree_skb(skb);
priv->xstats.tx_dropped++;
return NETDEV_TX_OK;
@@ -4873,6 +4924,13 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
return STMMAC_XDP_CONSUMED;
+ if (priv->plat->est && priv->plat->est->enable &&
+ priv->plat->est->max_sdu[queue] &&
+ xdpf->len > priv->plat->est->max_sdu[queue]) {
+ priv->xstats.max_sdu_txq_drop[queue]++;
+ return STMMAC_XDP_CONSUMED;
+ }
+
if (likely(priv->extend_desc))
tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
@@ -6006,10 +6064,8 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv)
priv->tx_path_in_lpi_mode = false;
}
- for (queue = 0; queue < queues_count; queue++) {
- status = stmmac_host_mtl_irq_status(priv, priv->hw,
- queue);
- }
+ for (queue = 0; queue < queues_count; queue++)
+ stmmac_host_mtl_irq_status(priv, priv->hw, queue);
/* PCS link status */
if (priv->hw->pcs &&
@@ -6044,8 +6100,8 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
- /* Check if a fatal error happened */
- if (stmmac_safety_feat_interrupt(priv))
+ /* Check ASP error if it isn't delivered via an individual IRQ */
+ if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
return IRQ_HANDLED;
/* To handle Common interrupts */
@@ -7474,6 +7530,7 @@ int stmmac_dvr_probe(struct device *device,
priv->dev->irq = res->irq;
priv->wol_irq = res->wol_irq;
priv->lpi_irq = res->lpi_irq;
+ priv->sfty_irq = res->sfty_irq;
priv->sfty_ce_irq = res->sfty_ce_irq;
priv->sfty_ue_irq = res->sfty_ue_irq;
for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
index aefc121464b5..13a30e6df4c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
@@ -110,6 +110,8 @@ static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane,
/* Enable and restart the Auto-Negotiation */
if (ane)
value |= GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_RAN;
+ else
+ value &= ~GMAC_AN_CTRL_ANE;
/* In case of MAC-2-MAC connection, block is configured to operate
* according to MAC conf register.
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 70eadc83ca68..54797edc9b38 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -743,6 +743,14 @@ int stmmac_get_platform_resources(struct platform_device *pdev,
dev_info(&pdev->dev, "IRQ eth_lpi not found\n");
}
+ stmmac_res->sfty_irq =
+ platform_get_irq_byname_optional(pdev, "sfty");
+ if (stmmac_res->sfty_irq < 0) {
+ if (stmmac_res->sfty_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(&pdev->dev, "IRQ sfty not found\n");
+ }
+
stmmac_res->addr = devm_platform_ioremap_resource(pdev, 0);
return PTR_ERR_OR_ZERO(stmmac_res->addr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 26fa33e5ec34..cce00719937d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -915,8 +915,30 @@ struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
return time;
}
-static int tc_setup_taprio(struct stmmac_priv *priv,
- struct tc_taprio_qopt_offload *qopt)
+static void tc_taprio_map_maxsdu_txq(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ struct plat_stmmacenet_data *plat = priv->plat;
+ u32 num_tc = qopt->mqprio.qopt.num_tc;
+ u32 offset, count, i, j;
+
+ /* QueueMaxSDU received from the driver corresponds to the Linux traffic
+ * class. Map queueMaxSDU per Linux traffic class to DWMAC Tx queues.
+ */
+ for (i = 0; i < num_tc; i++) {
+ if (!qopt->max_sdu[i])
+ continue;
+
+ offset = qopt->mqprio.qopt.offset[i];
+ count = qopt->mqprio.qopt.count[i];
+
+ for (j = offset; j < offset + count; j++)
+ plat->est->max_sdu[j] = qopt->max_sdu[i] + ETH_HLEN - ETH_TLEN;
+ }
+}
+
+static int tc_taprio_configure(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
{
u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
struct plat_stmmacenet_data *plat = priv->plat;
@@ -968,8 +990,6 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
if (qopt->cmd == TAPRIO_CMD_DESTROY)
goto disable;
- else if (qopt->cmd != TAPRIO_CMD_REPLACE)
- return -EOPNOTSUPP;
if (qopt->num_entries >= dep)
return -EINVAL;
@@ -1045,6 +1065,8 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
priv->plat->est->ter = qopt->cycle_time_extension;
+ tc_taprio_map_maxsdu_txq(priv, qopt);
+
if (fpe && !priv->dma_cap.fpesel) {
mutex_unlock(&priv->plat->est->lock);
return -EOPNOTSUPP;
@@ -1078,6 +1100,11 @@ disable:
priv->plat->est->enable = false;
stmmac_est_configure(priv, priv, priv->plat->est,
priv->plat->clk_ptp_rate);
+ /* Reset taprio status */
+ for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
+ priv->xstats.max_sdu_txq_drop[i] = 0;
+ priv->xstats.mtl_est_txq_hlbf[i] = 0;
+ }
mutex_unlock(&priv->plat->est->lock);
}
@@ -1095,6 +1122,57 @@ disable:
return ret;
}
+static void tc_taprio_stats(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ u64 window_drops = 0;
+ int i = 0;
+
+ for (i = 0; i < priv->plat->tx_queues_to_use; i++)
+ window_drops += priv->xstats.max_sdu_txq_drop[i] +
+ priv->xstats.mtl_est_txq_hlbf[i];
+ qopt->stats.window_drops = window_drops;
+
+ /* Transmission overrun doesn't happen for stmmac, hence always 0 */
+ qopt->stats.tx_overruns = 0;
+}
+
+static void tc_taprio_queue_stats(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ struct tc_taprio_qopt_queue_stats *q_stats = &qopt->queue_stats;
+ int queue = qopt->queue_stats.queue;
+
+ q_stats->stats.window_drops = priv->xstats.max_sdu_txq_drop[queue] +
+ priv->xstats.mtl_est_txq_hlbf[queue];
+
+ /* Transmission overrun doesn't happen for stmmac, hence always 0 */
+ q_stats->stats.tx_overruns = 0;
+}
+
+static int tc_setup_taprio(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ int err = 0;
+
+ switch (qopt->cmd) {
+ case TAPRIO_CMD_REPLACE:
+ case TAPRIO_CMD_DESTROY:
+ err = tc_taprio_configure(priv, qopt);
+ break;
+ case TAPRIO_CMD_STATS:
+ tc_taprio_stats(priv, qopt);
+ break;
+ case TAPRIO_CMD_QUEUE_STATS:
+ tc_taprio_queue_stats(priv, qopt);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
static int tc_setup_etf(struct stmmac_priv *priv,
struct tc_etf_qopt_offload *qopt)
{
@@ -1126,6 +1204,7 @@ static int tc_query_caps(struct stmmac_priv *priv,
return -EOPNOTSUPP;
caps->gate_mask_per_txq = true;
+ caps->supports_queue_max_sdu = true;
return 0;
}
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index 3525d5c0d694..351609f4f011 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1144,9 +1144,9 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
nskb->protocol = skb->protocol;
offset = skb_mac_header(skb) - skb->data;
skb_set_mac_header(nskb, offset);
- offset = skb_network_header(skb) - skb->data;
+ offset = skb_network_offset(skb);
skb_set_network_header(nskb, offset);
- offset = skb_transport_header(skb) - skb->data;
+ offset = skb_transport_offset(skb);
skb_set_transport_header(nskb, offset);
offset = 0;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index 35fceba01ea4..d6ce2c9f0a8d 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -514,14 +514,14 @@ am65_cpsw_set_link_ksettings(struct net_device *ndev,
return phylink_ethtool_ksettings_set(salve->phylink, ecmd);
}
-static int am65_cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+static int am65_cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
return phylink_ethtool_get_eee(salve->phylink, edata);
}
-static int am65_cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+static int am65_cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
index 26dc906eae90..57fe936bb177 100644
--- a/drivers/net/ethernet/ti/cpsw-common.c
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -90,4 +90,5 @@ int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr)
}
EXPORT_SYMBOL_GPL(ti_cm_get_macid);
+MODULE_DESCRIPTION("TI CPSW Switch common module");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index a557a477d039..f7b283353ba2 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -422,7 +422,7 @@ int cpsw_set_link_ksettings(struct net_device *ndev,
return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy, ecmd);
}
-int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+int cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
@@ -434,7 +434,7 @@ int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
-int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+int cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 0e27c433098d..7efa72502c86 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -496,8 +496,8 @@ int cpsw_get_link_ksettings(struct net_device *ndev,
struct ethtool_link_ksettings *ecmd);
int cpsw_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *ecmd);
-int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata);
-int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata);
+int cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata);
+int cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata);
int cpsw_nway_reset(struct net_device *ndev);
void cpsw_get_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ering,
diff --git a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
index a27ec1dcc8d5..9a7dd7efcf69 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
@@ -45,7 +45,7 @@ static int emac_set_link_ksettings(struct net_device *ndev,
return phy_ethtool_set_link_ksettings(ndev, ecmd);
}
-static int emac_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+static int emac_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
if (!ndev->phydev)
return -EOPNOTSUPP;
@@ -53,7 +53,7 @@ static int emac_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
return phy_ethtool_get_eee(ndev->phydev, edata);
}
-static int emac_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+static int emac_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
if (!ndev->phydev)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 411898a4f38c..cf7b73f8f450 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -1489,9 +1489,6 @@ static int emac_ndo_stop(struct net_device *ndev)
/* Destroying the queued work in ndo_stop() */
cancel_delayed_work_sync(&emac->stats_work);
- /* stop PRUs */
- prueth_emac_stop(emac);
-
if (prueth->emacs_initialized == 1)
icss_iep_exit(emac->iep);
@@ -1502,7 +1499,6 @@ static int emac_ndo_stop(struct net_device *ndev)
free_irq(emac->rx_chns.irq[rx_flow], emac);
prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
- prueth_cleanup_tx_chns(emac);
prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
prueth_cleanup_tx_chns(emac);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index c1b0d35c8d05..5ee8e8980393 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -698,7 +698,7 @@ gelic_card_get_next_tx_descr(struct gelic_card *card)
}
/**
- * gelic_net_set_txdescr_cmdstat - sets the tx descriptor command field
+ * gelic_descr_set_tx_cmdstat - sets the tx descriptor command field
* @descr: descriptor structure to fill out
* @skb: packet to consider
*
@@ -1461,7 +1461,7 @@ static void gelic_ether_setup_netdev_ops(struct net_device *netdev,
}
/**
- * gelic_ether_setup_netdev - initialization of net_device
+ * gelic_net_setup_netdev - initialization of net_device
* @netdev: net_device structure
* @card: card structure
*
@@ -1518,14 +1518,16 @@ int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card)
return 0;
}
+#define GELIC_ALIGN (32)
+
/**
* gelic_alloc_card_net - allocates net_device and card structure
+ * @netdev: interface device structure
*
* returns the card structure or NULL in case of errors
*
* the card and net_device structures are linked to each other
*/
-#define GELIC_ALIGN (32)
static struct gelic_card *gelic_alloc_card_net(struct net_device **netdev)
{
struct gelic_card *card;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 1db754615cca..945c13d1a982 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -1958,8 +1958,6 @@ int wx_sw_init(struct wx *wx)
return -ENOMEM;
}
- wx->msix_in_use = false;
-
return 0;
}
EXPORT_SYMBOL(wx_sw_init);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 8706223a6e5a..6dff2c85682d 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -1257,7 +1257,7 @@ static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
/* compute header lengths */
l4len = enc ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
- *hdr_len = enc ? (skb_inner_transport_header(skb) - skb->data) :
+ *hdr_len = enc ? skb_inner_transport_offset(skb) :
skb_transport_offset(skb);
*hdr_len += l4len;
@@ -1614,14 +1614,12 @@ static int wx_acquire_msix_vectors(struct wx *wx)
/* One for non-queue interrupts */
nvecs += 1;
- if (!wx->msix_in_use) {
- wx->msix_entry = kcalloc(1, sizeof(struct msix_entry),
- GFP_KERNEL);
- if (!wx->msix_entry) {
- kfree(wx->msix_q_entries);
- wx->msix_q_entries = NULL;
- return -ENOMEM;
- }
+ wx->msix_entry = kcalloc(1, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!wx->msix_entry) {
+ kfree(wx->msix_q_entries);
+ wx->msix_q_entries = NULL;
+ return -ENOMEM;
}
nvecs = pci_alloc_irq_vectors_affinity(wx->pdev, nvecs,
@@ -1931,10 +1929,8 @@ void wx_reset_interrupt_capability(struct wx *wx)
if (pdev->msix_enabled) {
kfree(wx->msix_q_entries);
wx->msix_q_entries = NULL;
- if (!wx->msix_in_use) {
- kfree(wx->msix_entry);
- wx->msix_entry = NULL;
- }
+ kfree(wx->msix_entry);
+ wx->msix_entry = NULL;
}
pci_free_irq_vectors(wx->pdev);
}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index b4dc4f341117..1fdeb464d5f4 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -1047,7 +1047,6 @@ struct wx {
unsigned int queues_per_pool;
struct msix_entry *msix_q_entries;
struct msix_entry *msix_entry;
- bool msix_in_use;
struct wx_ring_feature ring_feature[RING_F_ARRAY_SIZE];
/* misc interrupt status block */
diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile
index 7507f762edfe..42718875277c 100644
--- a/drivers/net/ethernet/wangxun/txgbe/Makefile
+++ b/drivers/net/ethernet/wangxun/txgbe/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_TXGBE) += txgbe.o
txgbe-objs := txgbe_main.o \
txgbe_hw.o \
txgbe_phy.o \
+ txgbe_irq.o \
txgbe_ethtool.o
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
new file mode 100644
index 000000000000..b3e3605d1edb
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/irqdomain.h>
+#include <linux/pci.h>
+
+#include "../libwx/wx_type.h"
+#include "../libwx/wx_lib.h"
+#include "../libwx/wx_hw.h"
+#include "txgbe_type.h"
+#include "txgbe_phy.h"
+#include "txgbe_irq.h"
+
+/**
+ * txgbe_irq_enable - Enable default interrupt generation settings
+ * @wx: pointer to private structure
+ * @queues: enable irqs for queues
+ **/
+void txgbe_irq_enable(struct wx *wx, bool queues)
+{
+ wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK);
+
+ /* unmask interrupt */
+ wx_intr_enable(wx, TXGBE_INTR_MISC);
+ if (queues)
+ wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
+}
+
+/**
+ * txgbe_intr - msi/legacy mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t txgbe_intr(int __always_unused irq, void *data)
+{
+ struct wx_q_vector *q_vector;
+ struct wx *wx = data;
+ struct pci_dev *pdev;
+ u32 eicr;
+
+ q_vector = wx->q_vector[0];
+ pdev = wx->pdev;
+
+ eicr = wx_misc_isb(wx, WX_ISB_VEC0);
+ if (!eicr) {
+ /* shared interrupt alert!
+ * the interrupt that we masked before the ICR read.
+ */
+ if (netif_running(wx->netdev))
+ txgbe_irq_enable(wx, true);
+ return IRQ_NONE; /* Not our interrupt */
+ }
+ wx->isb_mem[WX_ISB_VEC0] = 0;
+ if (!(pdev->msi_enabled))
+ wr32(wx, WX_PX_INTA, 1);
+
+ wx->isb_mem[WX_ISB_MISC] = 0;
+ /* would disable interrupts here but it is auto disabled */
+ napi_schedule_irqoff(&q_vector->napi);
+
+ /* re-enable link(maybe) and non-queue interrupts, no flush.
+ * txgbe_poll will re-enable the queue interrupts
+ */
+ if (netif_running(wx->netdev))
+ txgbe_irq_enable(wx, false);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * txgbe_request_msix_irqs - Initialize MSI-X interrupts
+ * @wx: board private structure
+ *
+ * Allocate MSI-X vectors and request interrupts from the kernel.
+ **/
+static int txgbe_request_msix_irqs(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ int vector, err;
+
+ for (vector = 0; vector < wx->num_q_vectors; vector++) {
+ struct wx_q_vector *q_vector = wx->q_vector[vector];
+ struct msix_entry *entry = &wx->msix_q_entries[vector];
+
+ if (q_vector->tx.ring && q_vector->rx.ring)
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-TxRx-%d", netdev->name, entry->entry);
+ else
+ /* skip this unused q_vector */
+ continue;
+
+ err = request_irq(entry->vector, wx_msix_clean_rings, 0,
+ q_vector->name, q_vector);
+ if (err) {
+ wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
+ q_vector->name, err);
+ goto free_queue_irqs;
+ }
+ }
+
+ return 0;
+
+free_queue_irqs:
+ while (vector) {
+ vector--;
+ free_irq(wx->msix_q_entries[vector].vector,
+ wx->q_vector[vector]);
+ }
+ wx_reset_interrupt_capability(wx);
+ return err;
+}
+
+/**
+ * txgbe_request_irq - initialize interrupts
+ * @wx: board private structure
+ *
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+int txgbe_request_irq(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ struct pci_dev *pdev = wx->pdev;
+ int err;
+
+ if (pdev->msix_enabled)
+ err = txgbe_request_msix_irqs(wx);
+ else if (pdev->msi_enabled)
+ err = request_irq(wx->pdev->irq, &txgbe_intr, 0,
+ netdev->name, wx);
+ else
+ err = request_irq(wx->pdev->irq, &txgbe_intr, IRQF_SHARED,
+ netdev->name, wx);
+
+ if (err)
+ wx_err(wx, "request_irq failed, Error %d\n", err);
+
+ return err;
+}
+
+static int txgbe_request_gpio_irq(struct txgbe *txgbe)
+{
+ txgbe->gpio_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
+ return request_threaded_irq(txgbe->gpio_irq, NULL,
+ txgbe_gpio_irq_handler,
+ IRQF_ONESHOT, "txgbe-gpio-irq", txgbe);
+}
+
+static int txgbe_request_link_irq(struct txgbe *txgbe)
+{
+ txgbe->link_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
+ return request_threaded_irq(txgbe->link_irq, NULL,
+ txgbe_link_irq_handler,
+ IRQF_ONESHOT, "txgbe-link-irq", txgbe);
+}
+
+static const struct irq_chip txgbe_irq_chip = {
+ .name = "txgbe-misc-irq",
+};
+
+static int txgbe_misc_irq_domain_map(struct irq_domain *d,
+ unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct txgbe *txgbe = d->host_data;
+
+ irq_set_chip_data(irq, txgbe);
+ irq_set_chip(irq, &txgbe->misc.chip);
+ irq_set_nested_thread(irq, true);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops txgbe_misc_irq_domain_ops = {
+ .map = txgbe_misc_irq_domain_map,
+};
+
+static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
+{
+ struct txgbe *txgbe = data;
+ struct wx *wx = txgbe->wx;
+ unsigned int nhandled = 0;
+ unsigned int sub_irq;
+ u32 eicr;
+
+ eicr = wx_misc_isb(wx, WX_ISB_MISC);
+ if (eicr & TXGBE_PX_MISC_GPIO) {
+ sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
+ handle_nested_irq(sub_irq);
+ nhandled++;
+ }
+ if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN |
+ TXGBE_PX_MISC_ETH_AN)) {
+ sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
+ handle_nested_irq(sub_irq);
+ nhandled++;
+ }
+
+ wx_intr_enable(wx, TXGBE_INTR_MISC);
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+}
+
+static void txgbe_del_irq_domain(struct txgbe *txgbe)
+{
+ int hwirq, virq;
+
+ for (hwirq = 0; hwirq < txgbe->misc.nirqs; hwirq++) {
+ virq = irq_find_mapping(txgbe->misc.domain, hwirq);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(txgbe->misc.domain);
+}
+
+void txgbe_free_misc_irq(struct txgbe *txgbe)
+{
+ free_irq(txgbe->gpio_irq, txgbe);
+ free_irq(txgbe->link_irq, txgbe);
+ free_irq(txgbe->misc.irq, txgbe);
+ txgbe_del_irq_domain(txgbe);
+}
+
+int txgbe_setup_misc_irq(struct txgbe *txgbe)
+{
+ struct wx *wx = txgbe->wx;
+ int hwirq, err;
+
+ txgbe->misc.nirqs = 2;
+ txgbe->misc.domain = irq_domain_add_simple(NULL, txgbe->misc.nirqs, 0,
+ &txgbe_misc_irq_domain_ops, txgbe);
+ if (!txgbe->misc.domain)
+ return -ENOMEM;
+
+ for (hwirq = 0; hwirq < txgbe->misc.nirqs; hwirq++)
+ irq_create_mapping(txgbe->misc.domain, hwirq);
+
+ txgbe->misc.chip = txgbe_irq_chip;
+ if (wx->pdev->msix_enabled)
+ txgbe->misc.irq = wx->msix_entry->vector;
+ else
+ txgbe->misc.irq = wx->pdev->irq;
+
+ err = request_threaded_irq(txgbe->misc.irq, NULL,
+ txgbe_misc_irq_handle,
+ IRQF_ONESHOT,
+ wx->netdev->name, txgbe);
+ if (err)
+ goto del_misc_irq;
+
+ err = txgbe_request_gpio_irq(txgbe);
+ if (err)
+ goto free_msic_irq;
+
+ err = txgbe_request_link_irq(txgbe);
+ if (err)
+ goto free_gpio_irq;
+
+ return 0;
+
+free_gpio_irq:
+ free_irq(txgbe->gpio_irq, txgbe);
+free_msic_irq:
+ free_irq(txgbe->misc.irq, txgbe);
+del_misc_irq:
+ txgbe_del_irq_domain(txgbe);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h
new file mode 100644
index 000000000000..b77945e7a0f2
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
+
+void txgbe_irq_enable(struct wx *wx, bool queues);
+int txgbe_request_irq(struct wx *wx);
+void txgbe_free_misc_irq(struct txgbe *txgbe);
+int txgbe_setup_misc_irq(struct txgbe *txgbe);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 3b151c410a5c..bd4624d14ca0 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -17,6 +17,7 @@
#include "txgbe_type.h"
#include "txgbe_hw.h"
#include "txgbe_phy.h"
+#include "txgbe_irq.h"
#include "txgbe_ethtool.h"
char txgbe_driver_name[] = "txgbe";
@@ -76,137 +77,11 @@ static int txgbe_enumerate_functions(struct wx *wx)
return physfns;
}
-/**
- * txgbe_irq_enable - Enable default interrupt generation settings
- * @wx: pointer to private structure
- * @queues: enable irqs for queues
- **/
-static void txgbe_irq_enable(struct wx *wx, bool queues)
-{
- wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK);
-
- /* unmask interrupt */
- wx_intr_enable(wx, TXGBE_INTR_MISC);
- if (queues)
- wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
-}
-
-/**
- * txgbe_intr - msi/legacy mode Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a network interface device structure
- **/
-static irqreturn_t txgbe_intr(int __always_unused irq, void *data)
-{
- struct wx_q_vector *q_vector;
- struct wx *wx = data;
- struct pci_dev *pdev;
- u32 eicr;
-
- q_vector = wx->q_vector[0];
- pdev = wx->pdev;
-
- eicr = wx_misc_isb(wx, WX_ISB_VEC0);
- if (!eicr) {
- /* shared interrupt alert!
- * the interrupt that we masked before the ICR read.
- */
- if (netif_running(wx->netdev))
- txgbe_irq_enable(wx, true);
- return IRQ_NONE; /* Not our interrupt */
- }
- wx->isb_mem[WX_ISB_VEC0] = 0;
- if (!(pdev->msi_enabled))
- wr32(wx, WX_PX_INTA, 1);
-
- wx->isb_mem[WX_ISB_MISC] = 0;
- /* would disable interrupts here but it is auto disabled */
- napi_schedule_irqoff(&q_vector->napi);
-
- /* re-enable link(maybe) and non-queue interrupts, no flush.
- * txgbe_poll will re-enable the queue interrupts
- */
- if (netif_running(wx->netdev))
- txgbe_irq_enable(wx, false);
-
- return IRQ_HANDLED;
-}
-
-/**
- * txgbe_request_msix_irqs - Initialize MSI-X interrupts
- * @wx: board private structure
- *
- * Allocate MSI-X vectors and request interrupts from the kernel.
- **/
-static int txgbe_request_msix_irqs(struct wx *wx)
-{
- struct net_device *netdev = wx->netdev;
- int vector, err;
-
- for (vector = 0; vector < wx->num_q_vectors; vector++) {
- struct wx_q_vector *q_vector = wx->q_vector[vector];
- struct msix_entry *entry = &wx->msix_q_entries[vector];
-
- if (q_vector->tx.ring && q_vector->rx.ring)
- snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "%s-TxRx-%d", netdev->name, entry->entry);
- else
- /* skip this unused q_vector */
- continue;
-
- err = request_irq(entry->vector, wx_msix_clean_rings, 0,
- q_vector->name, q_vector);
- if (err) {
- wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
- q_vector->name, err);
- goto free_queue_irqs;
- }
- }
-
- return 0;
-
-free_queue_irqs:
- while (vector) {
- vector--;
- free_irq(wx->msix_q_entries[vector].vector,
- wx->q_vector[vector]);
- }
- wx_reset_interrupt_capability(wx);
- return err;
-}
-
-/**
- * txgbe_request_irq - initialize interrupts
- * @wx: board private structure
- *
- * Attempt to configure interrupts using the best available
- * capabilities of the hardware and kernel.
- **/
-static int txgbe_request_irq(struct wx *wx)
-{
- struct net_device *netdev = wx->netdev;
- struct pci_dev *pdev = wx->pdev;
- int err;
-
- if (pdev->msix_enabled)
- err = txgbe_request_msix_irqs(wx);
- else if (pdev->msi_enabled)
- err = request_irq(wx->pdev->irq, &txgbe_intr, 0,
- netdev->name, wx);
- else
- err = request_irq(wx->pdev->irq, &txgbe_intr, IRQF_SHARED,
- netdev->name, wx);
-
- if (err)
- wx_err(wx, "request_irq failed, Error %d\n", err);
-
- return err;
-}
-
static void txgbe_up_complete(struct wx *wx)
{
struct net_device *netdev = wx->netdev;
+ txgbe_reinit_gpio_intr(wx);
wx_control_hw(wx, true);
wx_configure_vectors(wx);
@@ -518,6 +393,7 @@ static void txgbe_shutdown(struct pci_dev *pdev)
int txgbe_setup_tc(struct net_device *dev, u8 tc)
{
struct wx *wx = netdev_priv(dev);
+ struct txgbe *txgbe = wx->priv;
/* Hardware has to reinitialize queues and interrupts to
* match packet buffer alignment. Unfortunately, the
@@ -528,6 +404,7 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
else
txgbe_reset(wx);
+ txgbe_free_misc_irq(txgbe);
wx_clear_interrupt_scheme(wx);
if (tc)
@@ -536,6 +413,7 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
netdev_reset_tc(dev);
wx_init_interrupt_scheme(wx);
+ txgbe_setup_misc_irq(txgbe);
if (netif_running(dev))
txgbe_open(dev);
@@ -751,10 +629,14 @@ static int txgbe_probe(struct pci_dev *pdev,
txgbe->wx = wx;
wx->priv = txgbe;
- err = txgbe_init_phy(txgbe);
+ err = txgbe_setup_misc_irq(txgbe);
if (err)
goto err_release_hw;
+ err = txgbe_init_phy(txgbe);
+ if (err)
+ goto err_free_misc_irq;
+
err = register_netdev(netdev);
if (err)
goto err_remove_phy;
@@ -781,6 +663,8 @@ static int txgbe_probe(struct pci_dev *pdev,
err_remove_phy:
txgbe_remove_phy(txgbe);
+err_free_misc_irq:
+ txgbe_free_misc_irq(txgbe);
err_release_hw:
wx_clear_interrupt_scheme(wx);
wx_control_hw(wx, false);
@@ -813,6 +697,7 @@ static void txgbe_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
txgbe_remove_phy(txgbe);
+ txgbe_free_misc_irq(txgbe);
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 1b84d495d14e..93295916b1d2 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -292,6 +292,21 @@ static int txgbe_phylink_init(struct txgbe *txgbe)
return 0;
}
+irqreturn_t txgbe_link_irq_handler(int irq, void *data)
+{
+ struct txgbe *txgbe = data;
+ struct wx *wx = txgbe->wx;
+ u32 status;
+ bool up;
+
+ status = rd32(wx, TXGBE_CFG_PORT_ST);
+ up = !!(status & TXGBE_CFG_PORT_ST_LINK_UP);
+
+ phylink_mac_change(wx->phylink, up);
+
+ return IRQ_HANDLED;
+}
+
static int txgbe_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct wx *wx = gpiochip_get_data(chip);
@@ -437,7 +452,7 @@ static int txgbe_gpio_set_type(struct irq_data *d, unsigned int type)
}
static const struct irq_chip txgbe_gpio_irq_chip = {
- .name = "txgbe_gpio_irq",
+ .name = "txgbe-gpio-irq",
.irq_ack = txgbe_gpio_irq_ack,
.irq_mask = txgbe_gpio_irq_mask,
.irq_unmask = txgbe_gpio_irq_unmask,
@@ -446,29 +461,25 @@ static const struct irq_chip txgbe_gpio_irq_chip = {
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
-static void txgbe_irq_handler(struct irq_desc *desc)
+irqreturn_t txgbe_gpio_irq_handler(int irq, void *data)
{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct wx *wx = irq_desc_get_handler_data(desc);
- struct txgbe *txgbe = wx->priv;
+ struct txgbe *txgbe = data;
+ struct wx *wx = txgbe->wx;
irq_hw_number_t hwirq;
unsigned long gpioirq;
struct gpio_chip *gc;
unsigned long flags;
- u32 eicr;
-
- eicr = wx_misc_isb(wx, WX_ISB_MISC);
-
- chained_irq_enter(chip, desc);
gpioirq = rd32(wx, WX_GPIO_INTSTATUS);
gc = txgbe->gpio;
for_each_set_bit(hwirq, &gpioirq, gc->ngpio) {
int gpio = irq_find_mapping(gc->irq.domain, hwirq);
+ struct irq_data *d = irq_get_irq_data(gpio);
u32 irq_type = irq_get_trigger_type(gpio);
- generic_handle_domain_irq(gc->irq.domain, hwirq);
+ txgbe_gpio_irq_ack(d);
+ handle_nested_irq(gpio);
if ((irq_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
raw_spin_lock_irqsave(&wx->gpio_lock, flags);
@@ -477,17 +488,34 @@ static void txgbe_irq_handler(struct irq_desc *desc)
}
}
- chained_irq_exit(chip, desc);
+ return IRQ_HANDLED;
+}
+
+void txgbe_reinit_gpio_intr(struct wx *wx)
+{
+ struct txgbe *txgbe = wx->priv;
+ irq_hw_number_t hwirq;
+ unsigned long gpioirq;
+ struct gpio_chip *gc;
+ unsigned long flags;
+
+ /* for gpio interrupt pending before irq enable */
+ gpioirq = rd32(wx, WX_GPIO_INTSTATUS);
- if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN |
- TXGBE_PX_MISC_ETH_AN)) {
- u32 reg = rd32(wx, TXGBE_CFG_PORT_ST);
+ gc = txgbe->gpio;
+ for_each_set_bit(hwirq, &gpioirq, gc->ngpio) {
+ int gpio = irq_find_mapping(gc->irq.domain, hwirq);
+ struct irq_data *d = irq_get_irq_data(gpio);
+ u32 irq_type = irq_get_trigger_type(gpio);
- phylink_mac_change(wx->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP));
- }
+ txgbe_gpio_irq_ack(d);
- /* unmask interrupt */
- wx_intr_enable(wx, TXGBE_INTR_MISC);
+ if ((irq_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
+ raw_spin_lock_irqsave(&wx->gpio_lock, flags);
+ txgbe_toggle_trigger(gc, hwirq);
+ raw_spin_unlock_irqrestore(&wx->gpio_lock, flags);
+ }
+ }
}
static int txgbe_gpio_init(struct txgbe *txgbe)
@@ -524,19 +552,6 @@ static int txgbe_gpio_init(struct txgbe *txgbe)
girq = &gc->irq;
gpio_irq_chip_set_chip(girq, &txgbe_gpio_irq_chip);
- girq->parent_handler = txgbe_irq_handler;
- girq->parent_handler_data = wx;
- girq->num_parents = 1;
- girq->parents = devm_kcalloc(dev, girq->num_parents,
- sizeof(*girq->parents), GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
-
- /* now only suuported on MSI-X interrupt */
- if (!wx->msix_entry)
- return -EPERM;
-
- girq->parents[0] = wx->msix_entry->vector;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
@@ -754,8 +769,6 @@ int txgbe_init_phy(struct txgbe *txgbe)
goto err_unregister_i2c;
}
- wx->msix_in_use = true;
-
return 0;
err_unregister_i2c:
@@ -788,5 +801,4 @@ void txgbe_remove_phy(struct txgbe *txgbe)
phylink_destroy(txgbe->wx->phylink);
xpcs_destroy(txgbe->xpcs);
software_node_unregister_node_group(txgbe->nodes.group);
- txgbe->wx->msix_in_use = false;
}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
index 1ab592124986..8a026d804fe2 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
@@ -4,6 +4,9 @@
#ifndef _TXGBE_PHY_H_
#define _TXGBE_PHY_H_
+irqreturn_t txgbe_gpio_irq_handler(int irq, void *data);
+void txgbe_reinit_gpio_intr(struct wx *wx);
+irqreturn_t txgbe_link_irq_handler(int irq, void *data);
int txgbe_init_phy(struct txgbe *txgbe);
void txgbe_remove_phy(struct txgbe *txgbe);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 270a6fd9ad0b..1b4ff50d5857 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -5,6 +5,7 @@
#define _TXGBE_TYPE_H_
#include <linux/property.h>
+#include <linux/irq.h>
/* Device IDs */
#define TXGBE_DEV_ID_SP1000 0x1001
@@ -169,15 +170,31 @@ struct txgbe_nodes {
const struct software_node *group[SWNODE_MAX + 1];
};
+enum txgbe_misc_irqs {
+ TXGBE_IRQ_GPIO = 0,
+ TXGBE_IRQ_LINK,
+ TXGBE_IRQ_MAX
+};
+
+struct txgbe_irq {
+ struct irq_chip chip;
+ struct irq_domain *domain;
+ int nirqs;
+ int irq;
+};
+
struct txgbe {
struct wx *wx;
struct txgbe_nodes nodes;
+ struct txgbe_irq misc;
struct dw_xpcs *xpcs;
struct platform_device *sfp_dev;
struct platform_device *i2c_dev;
struct clk_lookup *clock;
struct clk *clk;
struct gpio_chip *gpio;
+ unsigned int gpio_irq;
+ unsigned int link_irq;
};
#endif /* _TXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 3318b50a5911..f165616f36fe 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -539,8 +539,7 @@ static int w5300_hw_probe(struct platform_device *pdev)
eth_hw_addr_random(ndev);
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, mem);
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 765aa516aada..940452d0a4d2 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1114,8 +1114,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
ndev->irq = rc;
- res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
- lp->base_addr = devm_ioremap_resource(&ofdev->dev, res);
+ lp->base_addr = devm_platform_get_and_ioremap_resource(ofdev, 0, &res);
if (IS_ERR(lp->base_addr)) {
rc = PTR_ERR(lp->base_addr);
goto error;
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index 9f505cf02d96..e9bc38fd2025 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1240,9 +1240,7 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
SelectPage(0);
PutWord(XIRCREG0_TRS, (u_short)pktlen+2);
- freespace = GetWord(XIRCREG0_TSO);
- okay = freespace & 0x8000;
- freespace &= 0x7fff;
+ freespace = GetWord(XIRCREG0_TSO) & 0x7fff;
/* TRS doesn't work - (indeed it is eliminated with sil-rev 1) */
okay = pktlen +2 < freespace;
pr_debug("%s: avail. tx space=%u%s\n",
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index c4ed36c71897..2f6739fe78af 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -333,22 +333,16 @@ static int geneve_init(struct net_device *dev)
struct geneve_dev *geneve = netdev_priv(dev);
int err;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
err = gro_cells_init(&geneve->gro_cells, dev);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
err = dst_cache_init(&geneve->cfg.info.dst_cache, GFP_KERNEL);
if (err) {
- free_percpu(dev->tstats);
gro_cells_destroy(&geneve->gro_cells);
return err;
}
+ netdev_lockdep_set_classes(dev);
return 0;
}
@@ -358,7 +352,6 @@ static void geneve_uninit(struct net_device *dev)
dst_cache_destroy(&geneve->cfg.info.dst_cache);
gro_cells_destroy(&geneve->gro_cells);
- free_percpu(dev->tstats);
}
/* Callback from net/ipv4/udp.c to receive packets */
@@ -521,7 +514,7 @@ static struct sk_buff *geneve_gro_receive(struct sock *sk,
gh_len = geneve_hlen(gh);
hlen = off_gnv + gh_len;
- if (skb_gro_header_hard(skb, hlen)) {
+ if (!skb_gro_may_pull(skb, hlen)) {
gh = skb_gro_header_slow(skb, hlen, off_gnv);
if (unlikely(!gh))
goto out;
@@ -1135,7 +1128,6 @@ static const struct net_device_ops geneve_netdev_ops = {
.ndo_open = geneve_open,
.ndo_stop = geneve_stop,
.ndo_start_xmit = geneve_xmit,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_change_mtu = geneve_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
@@ -1155,7 +1147,7 @@ static const struct ethtool_ops geneve_ethtool_ops = {
};
/* Info for udev, that this is a virtual tunnel endpoint */
-static struct device_type geneve_type = {
+static const struct device_type geneve_type = {
.name = "geneve",
};
@@ -1202,6 +1194,7 @@ static void geneve_setup(struct net_device *dev)
dev->hw_features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
/* MTU range: 68 - (something less than 65535) */
dev->min_mtu = ETH_MIN_MTU;
/* The max_mtu calculation does not take account of GENEVE
@@ -1914,29 +1907,26 @@ static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
}
}
-static void __net_exit geneve_exit_batch_net(struct list_head *net_list)
+static void __net_exit geneve_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
list_for_each_entry(net, net_list, exit_list)
- geneve_destroy_tunnels(net, &list);
-
- /* unregister the devices gathered above */
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ geneve_destroy_tunnels(net, dev_to_kill);
+}
- list_for_each_entry(net, net_list, exit_list) {
- const struct geneve_net *gn = net_generic(net, geneve_net_id);
+static void __net_exit geneve_exit_net(struct net *net)
+{
+ const struct geneve_net *gn = net_generic(net, geneve_net_id);
- WARN_ON_ONCE(!list_empty(&gn->sock_list));
- }
+ WARN_ON_ONCE(!list_empty(&gn->sock_list));
}
static struct pernet_operations geneve_net_ops = {
.init = geneve_init_net,
- .exit_batch = geneve_exit_batch_net,
+ .exit_batch_rtnl = geneve_exit_batch_rtnl,
+ .exit = geneve_exit_net,
.id = &geneve_net_id,
.size = sizeof(struct geneve_net),
};
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 2b5357d94ff5..ba4704c2c640 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -711,25 +711,11 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
return ret;
}
-static int gtp_dev_init(struct net_device *dev)
-{
- struct gtp_dev *gtp = netdev_priv(dev);
-
- gtp->dev = dev;
-
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
- return 0;
-}
-
static void gtp_dev_uninit(struct net_device *dev)
{
struct gtp_dev *gtp = netdev_priv(dev);
gtp_encap_disable(gtp);
- free_percpu(dev->tstats);
}
static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
@@ -942,10 +928,8 @@ tx_err:
}
static const struct net_device_ops gtp_netdev_ops = {
- .ndo_init = gtp_dev_init,
.ndo_uninit = gtp_dev_uninit,
.ndo_start_xmit = gtp_dev_xmit,
- .ndo_get_stats64 = dev_get_tstats64,
};
static const struct device_type gtp_type = {
@@ -957,6 +941,7 @@ static void gtp_link_setup(struct net_device *dev)
unsigned int max_gtp_header_len = sizeof(struct iphdr) +
sizeof(struct udphdr) +
sizeof(struct gtp0_header);
+ struct gtp_dev *gtp = netdev_priv(dev);
dev->netdev_ops = &gtp_netdev_ops;
dev->needs_free_netdev = true;
@@ -970,11 +955,13 @@ static void gtp_link_setup(struct net_device *dev)
dev->type = ARPHRD_NONE;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
dev->priv_flags |= IFF_NO_QUEUE;
dev->features |= NETIF_F_LLTX;
netif_keep_dst(dev);
dev->needed_headroom = LL_MAX_HEADER + max_gtp_header_len;
+ gtp->dev = dev;
}
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
@@ -1876,23 +1863,23 @@ static int __net_init gtp_net_init(struct net *net)
return 0;
}
-static void __net_exit gtp_net_exit(struct net *net)
+static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
- struct gtp_net *gn = net_generic(net, gtp_net_id);
- struct gtp_dev *gtp;
- LIST_HEAD(list);
+ struct net *net;
- rtnl_lock();
- list_for_each_entry(gtp, &gn->gtp_dev_list, list)
- gtp_dellink(gtp->dev, &list);
+ list_for_each_entry(net, net_list, exit_list) {
+ struct gtp_net *gn = net_generic(net, gtp_net_id);
+ struct gtp_dev *gtp;
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ list_for_each_entry(gtp, &gn->gtp_dev_list, list)
+ gtp_dellink(gtp->dev, dev_to_kill);
+ }
}
static struct pernet_operations gtp_net_ops = {
.init = gtp_net_init,
- .exit = gtp_net_exit,
+ .exit_batch_rtnl = gtp_net_exit_batch_rtnl,
.id = &gtp_net_id,
.size = sizeof(struct gtp_net),
};
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 164c7f605af5..f632b0cfd5ae 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -11,17 +11,16 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/gpio/consumer.h>
#include <linux/hrtimer.h>
#include <linux/jiffies.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/property.h>
#include <linux/spi/spi.h>
#include <linux/regmap.h>
#include <linux/skbuff.h>
-#include <linux/of_gpio.h>
#include <linux/ieee802154.h>
#include <net/mac802154.h>
@@ -316,7 +315,7 @@ static const struct regmap_config at86rf230_regmap_spi_config = {
.val_bits = 8,
.write_flag_mask = CMD_REG | CMD_WRITE,
.read_flag_mask = CMD_REG,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.max_register = AT86RF2XX_NUMREGS,
.writeable_reg = at86rf230_reg_writeable,
.readable_reg = at86rf230_reg_readable,
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 4ec0dab38872..f102f26cb0e3 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -2857,19 +2857,13 @@ static int ca8210_interrupt_init(struct spi_device *spi)
*/
static int ca8210_dev_com_init(struct ca8210_priv *priv)
{
- priv->mlme_workqueue = alloc_ordered_workqueue(
- "MLME work queue",
- WQ_UNBOUND
- );
+ priv->mlme_workqueue = alloc_ordered_workqueue("MLME work queue", 0);
if (!priv->mlme_workqueue) {
dev_crit(&priv->spi->dev, "alloc of mlme_workqueue failed!\n");
return -ENOMEM;
}
- priv->irq_workqueue = alloc_ordered_workqueue(
- "ca8210 irq worker",
- WQ_UNBOUND
- );
+ priv->irq_workqueue = alloc_ordered_workqueue("ca8210 irq worker", 0);
if (!priv->irq_workqueue) {
dev_crit(&priv->spi->dev, "alloc of irq_workqueue failed!\n");
destroy_workqueue(priv->mlme_workqueue);
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index 87abe3b46316..433fb5839203 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -12,7 +12,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/skbuff.h>
-#include <linux/of_gpio.h>
#include <linux/regmap.h>
#include <linux/ieee802154.h>
#include <linux/debugfs.h>
@@ -251,7 +250,7 @@ static const struct regmap_config mcr20a_dar_regmap = {
.val_bits = 8,
.write_flag_mask = REGISTER_ACCESS | REGISTER_WRITE,
.read_flag_mask = REGISTER_ACCESS | REGISTER_READ,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.writeable_reg = mcr20a_dar_writeable,
.readable_reg = mcr20a_dar_readable,
.volatile_reg = mcr20a_dar_volatile,
@@ -387,7 +386,7 @@ static const struct regmap_config mcr20a_iar_regmap = {
.val_bits = 8,
.write_flag_mask = REGISTER_ACCESS | REGISTER_WRITE | IAR_INDEX,
.read_flag_mask = REGISTER_ACCESS | REGISTER_READ | IAR_INDEX,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.writeable_reg = mcr20a_iar_writeable,
.readable_reg = mcr20a_iar_readable,
.volatile_reg = mcr20a_iar_volatile,
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index ee4cfbf2c5cc..d3f42efc5d1a 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -388,7 +388,7 @@ static const struct regmap_config mrf24j40_short_regmap = {
.pad_bits = 1,
.write_flag_mask = MRF24J40_SHORT_WRITE,
.read_flag_mask = MRF24J40_SHORT_READ,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.max_register = MRF24J40_SHORT_NUMREGS,
.writeable_reg = mrf24j40_short_reg_writeable,
.readable_reg = mrf24j40_short_reg_readable,
@@ -495,7 +495,7 @@ static const struct regmap_config mrf24j40_long_regmap = {
.pad_bits = 5,
.write_flag_mask = MRF24J40_LONG_ACCESS,
.read_flag_mask = MRF24J40_LONG_ACCESS,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.max_register = MRF24J40_LONG_NUMREGS,
.writeable_reg = mrf24j40_long_reg_writeable,
.readable_reg = mrf24j40_long_reg_readable,
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index f3355e040a9e..334cd62cf286 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -21,7 +21,6 @@
struct clk;
struct icc_path;
struct net_device;
-struct platform_device;
struct ipa_power;
struct ipa_smp2p;
@@ -31,7 +30,7 @@ struct ipa_interrupt;
* struct ipa - IPA information
* @gsi: Embedded GSI structure
* @version: IPA hardware version
- * @pdev: Platform device
+ * @dev: IPA device pointer
* @completion: Used to signal pipeline clear transfer complete
* @nb: Notifier block used for remoteproc SSR
* @notifier: Remoteproc SSR notifier
@@ -79,7 +78,7 @@ struct ipa_interrupt;
struct ipa {
struct gsi gsi;
enum ipa_version version;
- struct platform_device *pdev;
+ struct device *dev;
struct completion completion;
struct notifier_block nb;
void *notifier;
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index f1419fbd776c..39219963dbb3 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -174,7 +174,7 @@ bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem,
u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
const char *table = route ? "route" : "filter";
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
u32 size;
size = route ? ipa->route_count : ipa->filter_count + 1;
@@ -204,7 +204,7 @@ bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem,
/* Validate the memory region that holds headers */
static bool ipa_cmd_header_init_local_valid(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
const struct ipa_mem *mem;
u32 offset_max;
u32 size_max;
@@ -256,7 +256,7 @@ static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
const char *name, u32 offset)
{
struct ipa_cmd_register_write *payload;
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
u32 offset_max;
u32 bit_count;
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index afa1d56d9095..dd490941615e 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -233,8 +233,8 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data)
{
const struct ipa_gsi_endpoint_data *other_data;
- struct device *dev = &ipa->pdev->dev;
enum ipa_endpoint_name other_name;
+ struct device *dev = ipa->dev;
if (ipa_gsi_endpoint_data_empty(data))
return true;
@@ -388,7 +388,7 @@ static u32 ipa_endpoint_max(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data)
{
const struct ipa_gsi_endpoint_data *dp = data;
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
enum ipa_endpoint_name name;
u32 max;
@@ -606,7 +606,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
trans = ipa_cmd_trans_alloc(ipa, count);
if (!trans) {
- dev_err(&ipa->pdev->dev,
+ dev_err(ipa->dev,
"no transaction to reset modem exception endpoints\n");
return -EBUSY;
}
@@ -1498,8 +1498,7 @@ ipa_endpoint_status_tag_valid(struct ipa_endpoint *endpoint, const void *data)
if (endpoint_id == command_endpoint->endpoint_id) {
complete(&ipa->completion);
} else {
- dev_err(&ipa->pdev->dev,
- "unexpected tagged packet from endpoint %u\n",
+ dev_err(ipa->dev, "unexpected tagged packet from endpoint %u\n",
endpoint_id);
}
@@ -1536,6 +1535,7 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
void *data = page_address(page) + NET_SKB_PAD;
u32 unused = buffer_size - total_len;
struct ipa *ipa = endpoint->ipa;
+ struct device *dev = ipa->dev;
u32 resid = total_len;
while (resid) {
@@ -1544,7 +1544,7 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
u32 len;
if (resid < IPA_STATUS_SIZE) {
- dev_err(&endpoint->ipa->pdev->dev,
+ dev_err(dev,
"short message (%u bytes < %zu byte status)\n",
resid, IPA_STATUS_SIZE);
break;
@@ -1666,8 +1666,8 @@ void ipa_endpoint_default_route_clear(struct ipa *ipa)
*/
static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
{
- struct device *dev = &endpoint->ipa->pdev->dev;
struct ipa *ipa = endpoint->ipa;
+ struct device *dev = ipa->dev;
struct gsi *gsi = &ipa->gsi;
bool suspended = false;
dma_addr_t addr;
@@ -1769,7 +1769,7 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
gsi_channel_reset(&ipa->gsi, channel_id, true);
if (ret)
- dev_err(&ipa->pdev->dev,
+ dev_err(ipa->dev,
"error %d resetting channel %u for endpoint %u\n",
ret, endpoint->channel_id, endpoint->endpoint_id);
}
@@ -1817,7 +1817,7 @@ int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
ret = gsi_channel_start(gsi, endpoint->channel_id);
if (ret) {
- dev_err(&ipa->pdev->dev,
+ dev_err(ipa->dev,
"error %d starting %cX channel %u for endpoint %u\n",
ret, endpoint->toward_ipa ? 'T' : 'R',
endpoint->channel_id, endpoint_id);
@@ -1854,14 +1854,13 @@ void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
/* Note that if stop fails, the channel's state is not well-defined */
ret = gsi_channel_stop(gsi, endpoint->channel_id);
if (ret)
- dev_err(&ipa->pdev->dev,
- "error %d attempting to stop endpoint %u\n", ret,
- endpoint_id);
+ dev_err(ipa->dev, "error %d attempting to stop endpoint %u\n",
+ ret, endpoint_id);
}
void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
{
- struct device *dev = &endpoint->ipa->pdev->dev;
+ struct device *dev = endpoint->ipa->dev;
struct gsi *gsi = &endpoint->ipa->gsi;
int ret;
@@ -1881,7 +1880,7 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
{
- struct device *dev = &endpoint->ipa->pdev->dev;
+ struct device *dev = endpoint->ipa->dev;
struct gsi *gsi = &endpoint->ipa->gsi;
int ret;
@@ -1983,7 +1982,7 @@ void ipa_endpoint_deconfig(struct ipa *ipa)
int ipa_endpoint_config(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
const struct reg *reg;
u32 endpoint_id;
u32 hw_limit;
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index a78c692f2d3c..c3e8784d51d9 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -19,6 +19,7 @@
* time only these three are supported.
*/
+#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
@@ -43,6 +44,30 @@ struct ipa_interrupt {
u32 enabled;
};
+/* Clear the suspend interrupt for all endpoints that signaled it */
+static void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
+{
+ struct ipa *ipa = interrupt->ipa;
+ u32 unit_count;
+ u32 unit;
+
+ unit_count = DIV_ROUND_UP(ipa->endpoint_count, 32);
+ for (unit = 0; unit < unit_count; unit++) {
+ const struct reg *reg;
+ u32 val;
+
+ reg = ipa_reg(ipa, IRQ_SUSPEND_INFO);
+ val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit));
+
+ /* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
+ if (!val || ipa->version == IPA_VERSION_3_0)
+ continue;
+
+ reg = ipa_reg(ipa, IRQ_SUSPEND_CLR);
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, unit));
+ }
+}
+
/* Process a particular interrupt type that has been received */
static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
{
@@ -70,7 +95,7 @@ static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
* caused the interrupt, so defer clearing until after
* the handler has been called.
*/
- ipa_power_suspend_handler(ipa, irq_id);
+ ipa_interrupt_suspend_clear_all(interrupt);
fallthrough;
default: /* Silently ignore (and clear) any other condition */
@@ -85,14 +110,13 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
struct ipa_interrupt *interrupt = dev_id;
struct ipa *ipa = interrupt->ipa;
u32 enabled = interrupt->enabled;
+ struct device *dev = ipa->dev;
const struct reg *reg;
- struct device *dev;
u32 pending;
u32 offset;
u32 mask;
int ret;
- dev = &ipa->pdev->dev;
ret = pm_runtime_get_sync(dev);
if (WARN_ON(ret < 0))
goto out_power_put;
@@ -205,30 +229,6 @@ ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt, u32 endpoint_id)
ipa_interrupt_suspend_control(interrupt, endpoint_id, false);
}
-/* Clear the suspend interrupt for all endpoints that signaled it */
-void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
-{
- struct ipa *ipa = interrupt->ipa;
- u32 unit_count;
- u32 unit;
-
- unit_count = DIV_ROUND_UP(ipa->endpoint_count, 32);
- for (unit = 0; unit < unit_count; unit++) {
- const struct reg *reg;
- u32 val;
-
- reg = ipa_reg(ipa, IRQ_SUSPEND_INFO);
- val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit));
-
- /* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
- if (ipa->version == IPA_VERSION_3_0)
- continue;
-
- reg = ipa_reg(ipa, IRQ_SUSPEND_CLR);
- iowrite32(val, ipa->reg_virt + reg_n_offset(reg, unit));
- }
-}
-
/* Simulate arrival of an IPA TX_SUSPEND interrupt */
void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt)
{
@@ -236,29 +236,17 @@ void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt)
}
/* Configure the IPA interrupt framework */
-struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
+int ipa_interrupt_config(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
- struct ipa_interrupt *interrupt;
+ struct ipa_interrupt *interrupt = ipa->interrupt;
+ unsigned int irq = interrupt->irq;
+ struct device *dev = ipa->dev;
const struct reg *reg;
- unsigned int irq;
int ret;
- ret = platform_get_irq_byname(ipa->pdev, "ipa");
- if (ret <= 0) {
- dev_err(dev, "DT error %d getting \"ipa\" IRQ property\n",
- ret);
- return ERR_PTR(ret ? : -EINVAL);
- }
- irq = ret;
-
- interrupt = kzalloc(sizeof(*interrupt), GFP_KERNEL);
- if (!interrupt)
- return ERR_PTR(-ENOMEM);
interrupt->ipa = ipa;
- interrupt->irq = irq;
- /* Start with all IPA interrupts disabled */
+ /* Disable all IPA interrupt types */
reg = ipa_reg(ipa, IPA_IRQ_EN);
iowrite32(0, ipa->reg_virt + reg_offset(reg));
@@ -271,26 +259,59 @@ struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
ret = dev_pm_set_wake_irq(dev, irq);
if (ret) {
- dev_err(dev, "error %d registering \"ipa\" IRQ as wakeirq\n", ret);
+ dev_err(dev, "error %d registering \"ipa\" IRQ as wakeirq\n",
+ ret);
goto err_free_irq;
}
- return interrupt;
+ ipa->interrupt = interrupt;
+
+ return 0;
err_free_irq:
free_irq(interrupt->irq, interrupt);
err_kfree:
kfree(interrupt);
- return ERR_PTR(ret);
+ return ret;
}
/* Inverse of ipa_interrupt_config() */
-void ipa_interrupt_deconfig(struct ipa_interrupt *interrupt)
+void ipa_interrupt_deconfig(struct ipa *ipa)
{
- struct device *dev = &interrupt->ipa->pdev->dev;
+ struct ipa_interrupt *interrupt = ipa->interrupt;
+ struct device *dev = ipa->dev;
+
+ ipa->interrupt = NULL;
dev_pm_clear_wake_irq(dev);
free_irq(interrupt->irq, interrupt);
+}
+
+/* Initialize the IPA interrupt structure */
+struct ipa_interrupt *ipa_interrupt_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ipa_interrupt *interrupt;
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, "ipa");
+ if (irq <= 0) {
+ dev_err(dev, "DT error %d getting \"ipa\" IRQ property\n", irq);
+
+ return ERR_PTR(irq ? : -EINVAL);
+ }
+
+ interrupt = kzalloc(sizeof(*interrupt), GFP_KERNEL);
+ if (!interrupt)
+ return ERR_PTR(-ENOMEM);
+ interrupt->irq = irq;
+
+ return interrupt;
+}
+
+/* Inverse of ipa_interrupt_init() */
+void ipa_interrupt_exit(struct ipa_interrupt *interrupt)
+{
kfree(interrupt);
}
diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
index 12e3e798ccb3..f3f4f4330a59 100644
--- a/drivers/net/ipa/ipa_interrupt.h
+++ b/drivers/net/ipa/ipa_interrupt.h
@@ -35,14 +35,6 @@ void ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt,
u32 endpoint_id);
/**
- * ipa_interrupt_suspend_clear_all - clear all suspend interrupts
- * @interrupt: IPA interrupt structure
- *
- * Clear the TX_SUSPEND interrupt for all endpoints that signaled it.
- */
-void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt);
-
-/**
* ipa_interrupt_simulate_suspend() - Simulate TX_SUSPEND IPA interrupt
* @interrupt: IPA interrupt structure
*
@@ -84,17 +76,31 @@ void ipa_interrupt_irq_enable(struct ipa *ipa);
void ipa_interrupt_irq_disable(struct ipa *ipa);
/**
- * ipa_interrupt_config() - Configure the IPA interrupt framework
+ * ipa_interrupt_config() - Configure IPA interrupts
* @ipa: IPA pointer
*
- * Return: Pointer to IPA SMP2P info, or a pointer-coded error
+ * Return: 0 if successful, or a negative error code
*/
-struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa);
+int ipa_interrupt_config(struct ipa *ipa);
/**
* ipa_interrupt_deconfig() - Inverse of ipa_interrupt_config()
+ * @ipa: IPA pointer
+ */
+void ipa_interrupt_deconfig(struct ipa *ipa);
+
+/**
+ * ipa_interrupt_init() - Initialize the IPA interrupt structure
+ * @pdev: IPA platform device pointer
+ *
+ * Return: Pointer to an IPA interrupt structure, or a pointer-coded error
+ */
+struct ipa_interrupt *ipa_interrupt_init(struct platform_device *pdev);
+
+/**
+ * ipa_interrupt_exit() - Inverse of ipa_interrupt_init()
* @interrupt: IPA interrupt structure
*/
-void ipa_interrupt_deconfig(struct ipa_interrupt *interrupt);
+void ipa_interrupt_exit(struct ipa_interrupt *interrupt);
#endif /* _IPA_INTERRUPT_H_ */
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 00475fd7a205..57b241417e8c 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -7,7 +7,6 @@
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/bitfield.h>
-#include <linux/device.h>
#include <linux/bug.h>
#include <linux/io.h>
#include <linux/firmware.h>
@@ -114,7 +113,7 @@ int ipa_setup(struct ipa *ipa)
{
struct ipa_endpoint *exception_endpoint;
struct ipa_endpoint *command_endpoint;
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
int ret;
ret = gsi_setup(&ipa->gsi);
@@ -542,12 +541,9 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
if (ret)
goto err_hardware_deconfig;
- ipa->interrupt = ipa_interrupt_config(ipa);
- if (IS_ERR(ipa->interrupt)) {
- ret = PTR_ERR(ipa->interrupt);
- ipa->interrupt = NULL;
+ ret = ipa_interrupt_config(ipa);
+ if (ret)
goto err_mem_deconfig;
- }
ipa_uc_config(ipa);
@@ -572,8 +568,7 @@ err_endpoint_deconfig:
ipa_endpoint_deconfig(ipa);
err_uc_deconfig:
ipa_uc_deconfig(ipa);
- ipa_interrupt_deconfig(ipa->interrupt);
- ipa->interrupt = NULL;
+ ipa_interrupt_deconfig(ipa);
err_mem_deconfig:
ipa_mem_deconfig(ipa);
err_hardware_deconfig:
@@ -591,8 +586,7 @@ static void ipa_deconfig(struct ipa *ipa)
ipa_modem_deconfig(ipa);
ipa_endpoint_deconfig(ipa);
ipa_uc_deconfig(ipa);
- ipa_interrupt_deconfig(ipa->interrupt);
- ipa->interrupt = NULL;
+ ipa_interrupt_deconfig(ipa);
ipa_mem_deconfig(ipa);
ipa_hardware_deconfig(ipa);
}
@@ -808,6 +802,7 @@ out_self:
static int ipa_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct ipa_interrupt *interrupt;
enum ipa_firmware_loader loader;
const struct ipa_data *data;
struct ipa_power *power;
@@ -839,12 +834,21 @@ static int ipa_probe(struct platform_device *pdev)
if (loader == IPA_LOADER_DEFER)
return -EPROBE_DEFER;
- /* The clock and interconnects might not be ready when we're
- * probed, so might return -EPROBE_DEFER.
+ /* The IPA interrupt might not be ready when we're probed, so this
+ * might return -EPROBE_DEFER.
+ */
+ interrupt = ipa_interrupt_init(pdev);
+ if (IS_ERR(interrupt))
+ return PTR_ERR(interrupt);
+
+ /* The clock and interconnects might not be ready when we're probed,
+ * so this might return -EPROBE_DEFER.
*/
power = ipa_power_init(dev, data->power_data);
- if (IS_ERR(power))
- return PTR_ERR(power);
+ if (IS_ERR(power)) {
+ ret = PTR_ERR(power);
+ goto err_interrupt_exit;
+ }
/* No more EPROBE_DEFER. Allocate and initialize the IPA structure */
ipa = kzalloc(sizeof(*ipa), GFP_KERNEL);
@@ -853,18 +857,19 @@ static int ipa_probe(struct platform_device *pdev)
goto err_power_exit;
}
- ipa->pdev = pdev;
+ ipa->dev = dev;
dev_set_drvdata(dev, ipa);
+ ipa->interrupt = interrupt;
ipa->power = power;
ipa->version = data->version;
ipa->modem_route_count = data->modem_route_count;
init_completion(&ipa->completion);
- ret = ipa_reg_init(ipa);
+ ret = ipa_reg_init(ipa, pdev);
if (ret)
goto err_kfree_ipa;
- ret = ipa_mem_init(ipa, data->mem_data);
+ ret = ipa_mem_init(ipa, pdev, data->mem_data);
if (ret)
goto err_reg_exit;
@@ -882,7 +887,7 @@ static int ipa_probe(struct platform_device *pdev)
if (ret)
goto err_endpoint_exit;
- ret = ipa_smp2p_init(ipa, loader == IPA_LOADER_MODEM);
+ ret = ipa_smp2p_init(ipa, pdev, loader == IPA_LOADER_MODEM);
if (ret)
goto err_table_exit;
@@ -939,17 +944,27 @@ err_kfree_ipa:
kfree(ipa);
err_power_exit:
ipa_power_exit(power);
+err_interrupt_exit:
+ ipa_interrupt_exit(interrupt);
return ret;
}
static void ipa_remove(struct platform_device *pdev)
{
- struct ipa *ipa = dev_get_drvdata(&pdev->dev);
- struct ipa_power *power = ipa->power;
- struct device *dev = &pdev->dev;
+ struct ipa_interrupt *interrupt;
+ struct ipa_power *power;
+ struct device *dev;
+ struct ipa *ipa;
int ret;
+ ipa = dev_get_drvdata(&pdev->dev);
+ dev = ipa->dev;
+ WARN_ON(dev != &pdev->dev);
+
+ power = ipa->power;
+ interrupt = ipa->interrupt;
+
/* Prevent the modem from triggering a call to ipa_setup(). This
* also ensures a modem-initiated setup that's underway completes.
*/
@@ -991,6 +1006,7 @@ out_power_put:
ipa_reg_exit(ipa);
kfree(ipa);
ipa_power_exit(power);
+ ipa_interrupt_exit(interrupt);
dev_info(dev, "IPA driver removed");
}
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 694960537ecd..709f061ede61 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -9,6 +9,7 @@
#include <linux/bug.h>
#include <linux/dma-mapping.h>
#include <linux/iommu.h>
+#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/soc/qcom/smem.h>
@@ -75,9 +76,9 @@ ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id)
int ipa_mem_setup(struct ipa *ipa)
{
dma_addr_t addr = ipa->zero_addr;
- const struct reg *reg;
const struct ipa_mem *mem;
struct gsi_trans *trans;
+ const struct reg *reg;
u32 offset;
u16 size;
u32 val;
@@ -87,7 +88,7 @@ int ipa_mem_setup(struct ipa *ipa)
*/
trans = ipa_cmd_trans_alloc(ipa, 4);
if (!trans) {
- dev_err(&ipa->pdev->dev, "no transaction for memory setup\n");
+ dev_err(ipa->dev, "no transaction for memory setup\n");
return -EBUSY;
}
@@ -217,8 +218,8 @@ static bool ipa_mem_id_required(struct ipa *ipa, enum ipa_mem_id mem_id)
static bool ipa_mem_valid_one(struct ipa *ipa, const struct ipa_mem *mem)
{
- struct device *dev = &ipa->pdev->dev;
enum ipa_mem_id mem_id = mem->id;
+ struct device *dev = ipa->dev;
u16 size_multiple;
/* Make sure the memory region is valid for this version of IPA */
@@ -254,7 +255,7 @@ static bool ipa_mem_valid_one(struct ipa *ipa, const struct ipa_mem *mem)
static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
{
DECLARE_BITMAP(regions, IPA_MEM_COUNT) = { };
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
enum ipa_mem_id mem_id;
u32 i;
@@ -290,7 +291,7 @@ static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
/* Do all memory regions fit within the IPA local memory? */
static bool ipa_mem_size_valid(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
u32 limit = ipa->mem_size;
u32 i;
@@ -317,7 +318,7 @@ static bool ipa_mem_size_valid(struct ipa *ipa)
*/
int ipa_mem_config(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
const struct ipa_mem *mem;
const struct reg *reg;
dma_addr_t addr;
@@ -393,7 +394,7 @@ err_dma_free:
/* Inverse of ipa_mem_config() */
void ipa_mem_deconfig(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
dma_free_coherent(dev, ipa->zero_size, ipa->zero_virt, ipa->zero_addr);
ipa->zero_size = 0;
@@ -420,8 +421,7 @@ int ipa_mem_zero_modem(struct ipa *ipa)
*/
trans = ipa_cmd_trans_alloc(ipa, 3);
if (!trans) {
- dev_err(&ipa->pdev->dev,
- "no transaction to zero modem memory\n");
+ dev_err(ipa->dev, "no transaction to zero modem memory\n");
return -EBUSY;
}
@@ -452,7 +452,7 @@ int ipa_mem_zero_modem(struct ipa *ipa)
*/
static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
struct iommu_domain *domain;
unsigned long iova;
phys_addr_t phys;
@@ -485,13 +485,12 @@ static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size)
static void ipa_imem_exit(struct ipa *ipa)
{
+ struct device *dev = ipa->dev;
struct iommu_domain *domain;
- struct device *dev;
if (!ipa->imem_size)
return;
- dev = &ipa->pdev->dev;
domain = iommu_get_domain_for_dev(dev);
if (domain) {
size_t size;
@@ -527,7 +526,7 @@ static void ipa_imem_exit(struct ipa *ipa)
*/
static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
struct iommu_domain *domain;
unsigned long iova;
phys_addr_t phys;
@@ -594,7 +593,7 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
static void ipa_smem_exit(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
struct iommu_domain *domain;
domain = iommu_get_domain_for_dev(dev);
@@ -615,9 +614,10 @@ static void ipa_smem_exit(struct ipa *ipa)
}
/* Perform memory region-related initialization */
-int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
+int ipa_mem_init(struct ipa *ipa, struct platform_device *pdev,
+ const struct ipa_mem_data *mem_data)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = &pdev->dev;
struct resource *res;
int ret;
@@ -634,14 +634,13 @@ int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
if (!ipa_table_mem_valid(ipa, true))
return -EINVAL;
- ret = dma_set_mask_and_coherent(&ipa->pdev->dev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret) {
dev_err(dev, "error %d setting DMA mask\n", ret);
return ret;
}
- res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
- "ipa-shared");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ipa-shared");
if (!res) {
dev_err(dev,
"DT error getting \"ipa-shared\" memory property\n");
diff --git a/drivers/net/ipa/ipa_mem.h b/drivers/net/ipa/ipa_mem.h
index 868e9c20e8c4..28aad00a151d 100644
--- a/drivers/net/ipa/ipa_mem.h
+++ b/drivers/net/ipa/ipa_mem.h
@@ -6,6 +6,8 @@
#ifndef _IPA_MEM_H_
#define _IPA_MEM_H_
+struct platform_device;
+
struct ipa;
struct ipa_mem_data;
@@ -100,7 +102,8 @@ int ipa_mem_setup(struct ipa *ipa); /* No ipa_mem_teardown() needed */
int ipa_mem_zero_modem(struct ipa *ipa);
-int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data);
+int ipa_mem_init(struct ipa *ipa, struct platform_device *pdev,
+ const struct ipa_mem_data *mem_data);
void ipa_mem_exit(struct ipa *ipa);
#endif /* _IPA_MEM_H_ */
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index 423422a2a445..c27ca3f27f7d 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -39,10 +39,14 @@ enum ipa_modem_state {
/**
* struct ipa_priv - IPA network device private data
* @ipa: IPA pointer
+ * @tx: Transmit endpoint pointer
+ * @rx: Receive endpoint pointer
* @work: Work structure used to wake the modem netdev TX queue
*/
struct ipa_priv {
struct ipa *ipa;
+ struct ipa_endpoint *tx;
+ struct ipa_endpoint *rx;
struct work_struct work;
};
@@ -54,16 +58,16 @@ static int ipa_open(struct net_device *netdev)
struct device *dev;
int ret;
- dev = &ipa->pdev->dev;
+ dev = ipa->dev;
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto err_power_put;
- ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+ ret = ipa_endpoint_enable_one(priv->tx);
if (ret)
goto err_power_put;
- ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
+ ret = ipa_endpoint_enable_one(priv->rx);
if (ret)
goto err_disable_tx;
@@ -75,7 +79,7 @@ static int ipa_open(struct net_device *netdev)
return 0;
err_disable_tx:
- ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+ ipa_endpoint_disable_one(priv->tx);
err_power_put:
pm_runtime_put_noidle(dev);
@@ -90,15 +94,15 @@ static int ipa_stop(struct net_device *netdev)
struct device *dev;
int ret;
- dev = &ipa->pdev->dev;
+ dev = ipa->dev;
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto out_power_put;
netif_stop_queue(netdev);
- ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
- ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+ ipa_endpoint_disable_one(priv->rx);
+ ipa_endpoint_disable_one(priv->tx);
out_power_put:
pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
@@ -106,13 +110,16 @@ out_power_put:
return 0;
}
-/** ipa_start_xmit() - Transmits an skb.
- * @skb: skb to be transmitted
- * @dev: network device
+/** ipa_start_xmit() - Transmit an skb
+ * @skb: Socket buffer to be transmitted
+ * @netdev: Network device
*
- * Return codes:
- * NETDEV_TX_OK: Success
- * NETDEV_TX_BUSY: Error while transmitting the skb. Try again later
+ * Return: NETDEV_TX_OK if successful (or dropped), NETDEV_TX_BUSY otherwise
+
+ * Normally NETDEV_TX_OK indicates the buffer was successfully transmitted.
+ * If the buffer has an unexpected protocol or its size is out of range it
+ * is quietly dropped, returning NETDEV_TX_OK. NETDEV_TX_BUSY indicates
+ * the buffer cannot be sent at this time and should retried later.
*/
static netdev_tx_t
ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev)
@@ -132,29 +139,41 @@ ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (endpoint->config.qmap && skb->protocol != htons(ETH_P_MAP))
goto err_drop_skb;
- /* The hardware must be powered for us to transmit */
- dev = &ipa->pdev->dev;
+ /* The hardware must be powered for us to transmit, so if we're not
+ * ready we want the network stack to stop queueing until power is
+ * ACTIVE. Once runtime resume has completed, we inform the network
+ * stack it's OK to try transmitting again.
+ *
+ * We learn from pm_runtime_get() whether the hardware is powered.
+ * If it was not, powering up is either started or already underway.
+ * And in that case we want to disable queueing, expecting it to be
+ * re-enabled once power is ACTIVE. But runtime PM and network
+ * transmit run concurrently, and if we're not careful the requests
+ * to stop and start queueing could occur in the wrong order.
+ *
+ * For that reason we *always* stop queueing here, *before* the call
+ * to pm_runtime_get(). If we determine here that power is ACTIVE,
+ * we restart queueing before transmitting the SKB. Otherwise
+ * queueing will eventually be enabled after resume completes.
+ */
+ netif_stop_queue(netdev);
+
+ dev = ipa->dev;
ret = pm_runtime_get(dev);
if (ret < 1) {
/* If a resume won't happen, just drop the packet */
if (ret < 0 && ret != -EINPROGRESS) {
- ipa_power_modem_queue_active(ipa);
+ netif_wake_queue(netdev);
pm_runtime_put_noidle(dev);
goto err_drop_skb;
}
- /* No power (yet). Stop the network stack from transmitting
- * until we're resumed; ipa_modem_resume() arranges for the
- * TX queue to be started again.
- */
- ipa_power_modem_queue_stop(ipa);
-
pm_runtime_put_noidle(dev);
return NETDEV_TX_BUSY;
}
- ipa_power_modem_queue_active(ipa);
+ netif_wake_queue(netdev);
ret = ipa_endpoint_skb_tx(endpoint, skb);
@@ -233,14 +252,14 @@ static void ipa_modem_netdev_setup(struct net_device *netdev)
*/
void ipa_modem_suspend(struct net_device *netdev)
{
- struct ipa_priv *priv = netdev_priv(netdev);
- struct ipa *ipa = priv->ipa;
+ struct ipa_priv *priv;
if (!(netdev->flags & IFF_UP))
return;
- ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
- ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+ priv = netdev_priv(netdev);
+ ipa_endpoint_suspend_one(priv->rx);
+ ipa_endpoint_suspend_one(priv->tx);
}
/**
@@ -258,7 +277,7 @@ static void ipa_modem_wake_queue_work(struct work_struct *work)
{
struct ipa_priv *priv = container_of(work, struct ipa_priv, work);
- ipa_power_modem_queue_wake(priv->ipa);
+ netif_wake_queue(priv->tx->netdev);
}
/** ipa_modem_resume() - resume callback for runtime_pm
@@ -268,14 +287,14 @@ static void ipa_modem_wake_queue_work(struct work_struct *work)
*/
void ipa_modem_resume(struct net_device *netdev)
{
- struct ipa_priv *priv = netdev_priv(netdev);
- struct ipa *ipa = priv->ipa;
+ struct ipa_priv *priv;
if (!(netdev->flags & IFF_UP))
return;
- ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
- ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
+ priv = netdev_priv(netdev);
+ ipa_endpoint_resume_one(priv->tx);
+ ipa_endpoint_resume_one(priv->rx);
/* Arrange for the TX queue to be restarted */
(void)queue_pm_work(&priv->work);
@@ -303,19 +322,24 @@ int ipa_modem_start(struct ipa *ipa)
goto out_set_state;
}
- SET_NETDEV_DEV(netdev, &ipa->pdev->dev);
+ SET_NETDEV_DEV(netdev, ipa->dev);
priv = netdev_priv(netdev);
priv->ipa = ipa;
+ priv->tx = ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX];
+ priv->rx = ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX];
INIT_WORK(&priv->work, ipa_modem_wake_queue_work);
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev;
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev;
+
+ priv->tx->netdev = netdev;
+ priv->rx->netdev = netdev;
+
ipa->modem_netdev = netdev;
ret = register_netdev(netdev);
if (ret) {
ipa->modem_netdev = NULL;
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = NULL;
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = NULL;
+ priv->rx->netdev = NULL;
+ priv->tx->netdev = NULL;
+
free_netdev(netdev);
}
@@ -355,9 +379,11 @@ int ipa_modem_stop(struct ipa *ipa)
if (netdev->flags & IFF_UP)
(void)ipa_stop(netdev);
unregister_netdev(netdev);
+
ipa->modem_netdev = NULL;
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = NULL;
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = NULL;
+ priv->rx->netdev = NULL;
+ priv->tx->netdev = NULL;
+
free_netdev(netdev);
}
@@ -370,7 +396,7 @@ int ipa_modem_stop(struct ipa *ipa)
/* Treat a "clean" modem stop the same as a crash */
static void ipa_modem_crashed(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
int ret;
/* Prevent the modem from triggering a call to ipa_setup() */
@@ -417,7 +443,7 @@ static int ipa_modem_notify(struct notifier_block *nb, unsigned long action,
{
struct ipa *ipa = container_of(nb, struct ipa, nb);
struct qcom_ssr_notify_data *notify_data = data;
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
switch (action) {
case QCOM_SSR_BEFORE_POWERUP:
@@ -466,7 +492,7 @@ int ipa_modem_config(struct ipa *ipa)
void ipa_modem_deconfig(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
int ret;
ret = qcom_unregister_ssr_notifier(ipa->notifier, &ipa->nb);
diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c
index e223886123ce..41ca7ef5e20f 100644
--- a/drivers/net/ipa/ipa_power.c
+++ b/drivers/net/ipa/ipa_power.c
@@ -35,28 +35,10 @@
#define IPA_AUTOSUSPEND_DELAY 500 /* milliseconds */
/**
- * enum ipa_power_flag - IPA power flags
- * @IPA_POWER_FLAG_RESUMED: Whether resume from suspend has been signaled
- * @IPA_POWER_FLAG_SYSTEM: Hardware is system (not runtime) suspended
- * @IPA_POWER_FLAG_STOPPED: Modem TX is disabled by ipa_start_xmit()
- * @IPA_POWER_FLAG_STARTED: Modem TX was enabled by ipa_runtime_resume()
- * @IPA_POWER_FLAG_COUNT: Number of defined power flags
- */
-enum ipa_power_flag {
- IPA_POWER_FLAG_RESUMED,
- IPA_POWER_FLAG_SYSTEM,
- IPA_POWER_FLAG_STOPPED,
- IPA_POWER_FLAG_STARTED,
- IPA_POWER_FLAG_COUNT, /* Last; not a flag */
-};
-
-/**
* struct ipa_power - IPA power management information
* @dev: IPA device pointer
* @core: IPA core clock
* @qmp: QMP handle for AOSS communication
- * @spinlock: Protects modem TX queue enable/disable
- * @flags: Boolean state flags
* @interconnect_count: Number of elements in interconnect[]
* @interconnect: Interconnect array
*/
@@ -64,8 +46,6 @@ struct ipa_power {
struct device *dev;
struct clk *core;
struct qmp *qmp;
- spinlock_t spinlock; /* used with STOPPED/STARTED power flags */
- DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
u32 interconnect_count;
struct icc_bulk_data interconnect[] __counted_by(interconnect_count);
};
@@ -147,7 +127,6 @@ static int ipa_runtime_suspend(struct device *dev)
/* Endpoints aren't usable until setup is complete */
if (ipa->setup_complete) {
- __clear_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags);
ipa_endpoint_suspend(ipa);
gsi_suspend(&ipa->gsi);
}
@@ -179,8 +158,6 @@ static int ipa_suspend(struct device *dev)
{
struct ipa *ipa = dev_get_drvdata(dev);
- __set_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
-
/* Increment the disable depth to ensure that the IRQ won't
* be re-enabled until the matching _enable call in
* ipa_resume(). We do this to ensure that the interrupt
@@ -202,8 +179,6 @@ static int ipa_resume(struct device *dev)
ret = pm_runtime_force_resume(dev);
- __clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
-
/* Now that PM runtime is enabled again it's safe
* to turn the IRQ back on and process any data
* that was received during suspend.
@@ -219,84 +194,6 @@ u32 ipa_core_clock_rate(struct ipa *ipa)
return ipa->power ? (u32)clk_get_rate(ipa->power->core) : 0;
}
-void ipa_power_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
-{
- /* To handle an IPA interrupt we will have resumed the hardware
- * just to handle the interrupt, so we're done. If we are in a
- * system suspend, trigger a system resume.
- */
- if (!__test_and_set_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags))
- if (test_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags))
- pm_wakeup_dev_event(&ipa->pdev->dev, 0, true);
-
- /* Acknowledge/clear the suspend interrupt on all endpoints */
- ipa_interrupt_suspend_clear_all(ipa->interrupt);
-}
-
-/* The next few functions coordinate stopping and starting the modem
- * network device transmit queue.
- *
- * Transmit can be running concurrent with power resume, and there's a
- * chance the resume completes before the transmit path stops the queue,
- * leaving the queue in a stopped state. The next two functions are used
- * to avoid this: ipa_power_modem_queue_stop() is used by ipa_start_xmit()
- * to conditionally stop the TX queue; and ipa_power_modem_queue_start()
- * is used by ipa_runtime_resume() to conditionally restart it.
- *
- * Two flags and a spinlock are used. If the queue is stopped, the STOPPED
- * power flag is set. And if the queue is started, the STARTED flag is set.
- * The queue is only started on resume if the STOPPED flag is set. And the
- * queue is only started in ipa_start_xmit() if the STARTED flag is *not*
- * set. As a result, the queue remains operational if the two activites
- * happen concurrently regardless of the order they complete. The spinlock
- * ensures the flag and TX queue operations are done atomically.
- *
- * The first function stops the modem netdev transmit queue, but only if
- * the STARTED flag is *not* set. That flag is cleared if it was set.
- * If the queue is stopped, the STOPPED flag is set. This is called only
- * from the power ->runtime_resume operation.
- */
-void ipa_power_modem_queue_stop(struct ipa *ipa)
-{
- struct ipa_power *power = ipa->power;
- unsigned long flags;
-
- spin_lock_irqsave(&power->spinlock, flags);
-
- if (!__test_and_clear_bit(IPA_POWER_FLAG_STARTED, power->flags)) {
- netif_stop_queue(ipa->modem_netdev);
- __set_bit(IPA_POWER_FLAG_STOPPED, power->flags);
- }
-
- spin_unlock_irqrestore(&power->spinlock, flags);
-}
-
-/* This function starts the modem netdev transmit queue, but only if the
- * STOPPED flag is set. That flag is cleared if it was set. If the queue
- * was restarted, the STARTED flag is set; this allows ipa_start_xmit()
- * to skip stopping the queue in the event of a race.
- */
-void ipa_power_modem_queue_wake(struct ipa *ipa)
-{
- struct ipa_power *power = ipa->power;
- unsigned long flags;
-
- spin_lock_irqsave(&power->spinlock, flags);
-
- if (__test_and_clear_bit(IPA_POWER_FLAG_STOPPED, power->flags)) {
- __set_bit(IPA_POWER_FLAG_STARTED, power->flags);
- netif_wake_queue(ipa->modem_netdev);
- }
-
- spin_unlock_irqrestore(&power->spinlock, flags);
-}
-
-/* This function clears the STARTED flag once the TX queue is operating */
-void ipa_power_modem_queue_active(struct ipa *ipa)
-{
- clear_bit(IPA_POWER_FLAG_STARTED, ipa->power->flags);
-}
-
static int ipa_power_retention_init(struct ipa_power *power)
{
struct qmp *qmp = qmp_get(power->dev);
@@ -341,7 +238,7 @@ int ipa_power_setup(struct ipa *ipa)
ipa_interrupt_enable(ipa, IPA_IRQ_TX_SUSPEND);
- ret = device_init_wakeup(&ipa->pdev->dev, true);
+ ret = device_init_wakeup(ipa->dev, true);
if (ret)
ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
@@ -350,7 +247,7 @@ int ipa_power_setup(struct ipa *ipa)
void ipa_power_teardown(struct ipa *ipa)
{
- (void)device_init_wakeup(&ipa->pdev->dev, false);
+ (void)device_init_wakeup(ipa->dev, false);
ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
}
@@ -385,7 +282,6 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
}
power->dev = dev;
power->core = clk;
- spin_lock_init(&power->spinlock);
power->interconnect_count = data->interconnect_count;
ret = ipa_interconnect_init(power, data->interconnect_data);
diff --git a/drivers/net/ipa/ipa_power.h b/drivers/net/ipa/ipa_power.h
index 3a4c59ea1222..227cc04bea80 100644
--- a/drivers/net/ipa/ipa_power.h
+++ b/drivers/net/ipa/ipa_power.h
@@ -24,24 +24,6 @@ extern const struct dev_pm_ops ipa_pm_ops;
u32 ipa_core_clock_rate(struct ipa *ipa);
/**
- * ipa_power_modem_queue_stop() - Possibly stop the modem netdev TX queue
- * @ipa: IPA pointer
- */
-void ipa_power_modem_queue_stop(struct ipa *ipa);
-
-/**
- * ipa_power_modem_queue_wake() - Possibly wake the modem netdev TX queue
- * @ipa: IPA pointer
- */
-void ipa_power_modem_queue_wake(struct ipa *ipa);
-
-/**
- * ipa_power_modem_queue_active() - Report modem netdev TX queue active
- * @ipa: IPA pointer
- */
-void ipa_power_modem_queue_active(struct ipa *ipa);
-
-/**
* ipa_power_retention() - Control register retention on power collapse
* @ipa: IPA pointer
* @enable: Whether retention should be enabled or disabled
@@ -49,17 +31,6 @@ void ipa_power_modem_queue_active(struct ipa *ipa);
void ipa_power_retention(struct ipa *ipa, bool enable);
/**
- * ipa_power_suspend_handler() - Handler for SUSPEND IPA interrupts
- * @ipa: IPA pointer
- * @irq_id: IPA interrupt ID (unused)
- *
- * If an RX endpoint is suspended, and the IPA has a packet destined for
- * that endpoint, the IPA generates a SUSPEND interrupt to inform the AP
- * that it should resume the endpoint.
- */
-void ipa_power_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id);
-
-/**
* ipa_power_setup() - Set up IPA power management
* @ipa: IPA pointer
*
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index f70f0a1d1cda..65c40e207802 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -96,7 +96,7 @@ static void ipa_server_init_complete(struct ipa_qmi *ipa_qmi)
IPA_QMI_INIT_COMPLETE_IND_SZ,
ipa_init_complete_ind_ei, &ind);
if (ret)
- dev_err(&ipa->pdev->dev,
+ dev_err(ipa->dev,
"error %d sending init complete indication\n", ret);
else
ipa_qmi->indication_sent = true;
@@ -148,7 +148,7 @@ static void ipa_qmi_ready(struct ipa_qmi *ipa_qmi)
ipa = container_of(ipa_qmi, struct ipa, qmi);
ret = ipa_modem_start(ipa);
if (ret)
- dev_err(&ipa->pdev->dev, "error %d starting modem\n", ret);
+ dev_err(ipa->dev, "error %d starting modem\n", ret);
}
/* All QMI clients from the modem node are gone (modem shut down or crashed). */
@@ -199,7 +199,7 @@ static void ipa_server_indication_register(struct qmi_handle *qmi,
ipa_qmi->indication_requested = true;
ipa_qmi_ready(ipa_qmi); /* We might be ready now */
} else {
- dev_err(&ipa->pdev->dev,
+ dev_err(ipa->dev,
"error %d sending register indication response\n", ret);
}
}
@@ -228,7 +228,7 @@ static void ipa_server_driver_init_complete(struct qmi_handle *qmi,
ipa_qmi->uc_ready = true;
ipa_qmi_ready(ipa_qmi); /* We might be ready now */
} else {
- dev_err(&ipa->pdev->dev,
+ dev_err(ipa->dev,
"error %d sending init complete response\n", ret);
}
}
@@ -417,7 +417,7 @@ static void ipa_client_init_driver_work(struct work_struct *work)
qmi = &ipa_qmi->client_handle;
ipa = container_of(ipa_qmi, struct ipa, qmi);
- dev = &ipa->pdev->dev;
+ dev = ipa->dev;
ret = qmi_txn_init(qmi, &txn, NULL, NULL);
if (ret < 0) {
diff --git a/drivers/net/ipa/ipa_reg.c b/drivers/net/ipa/ipa_reg.c
index 6a3203ae6f1e..98625956e0bb 100644
--- a/drivers/net/ipa/ipa_reg.c
+++ b/drivers/net/ipa/ipa_reg.c
@@ -4,6 +4,7 @@
* Copyright (C) 2019-2023 Linaro Ltd.
*/
+#include <linux/platform_device.h>
#include <linux/io.h>
#include "ipa.h"
@@ -132,9 +133,9 @@ static const struct regs *ipa_regs(enum ipa_version version)
}
}
-int ipa_reg_init(struct ipa *ipa)
+int ipa_reg_init(struct ipa *ipa, struct platform_device *pdev)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = &pdev->dev;
const struct regs *regs;
struct resource *res;
@@ -146,8 +147,7 @@ int ipa_reg_init(struct ipa *ipa)
return -EINVAL;
/* Setup IPA register memory */
- res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
- "ipa-reg");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ipa-reg");
if (!res) {
dev_err(dev, "DT error getting \"ipa-reg\" memory property\n");
return -ENODEV;
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index 2998f115f12c..62c62495b796 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -12,6 +12,8 @@
#include "ipa_version.h"
#include "reg.h"
+struct platform_device;
+
struct ipa;
/**
@@ -643,7 +645,7 @@ extern const struct regs ipa_regs_v5_5;
const struct reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id);
-int ipa_reg_init(struct ipa *ipa);
+int ipa_reg_init(struct ipa *ipa, struct platform_device *pdev);
void ipa_reg_exit(struct ipa *ipa);
#endif /* _IPA_REG_H_ */
diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c
index 5620dc271fac..aeccce9fab72 100644
--- a/drivers/net/ipa/ipa_smp2p.c
+++ b/drivers/net/ipa/ipa_smp2p.c
@@ -5,7 +5,7 @@
*/
#include <linux/types.h>
-#include <linux/device.h>
+#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/panic_notifier.h>
@@ -84,15 +84,13 @@ struct ipa_smp2p {
*/
static void ipa_smp2p_notify(struct ipa_smp2p *smp2p)
{
- struct device *dev;
u32 value;
u32 mask;
if (smp2p->notified)
return;
- dev = &smp2p->ipa->pdev->dev;
- smp2p->power_on = pm_runtime_get_if_active(dev, true) > 0;
+ smp2p->power_on = pm_runtime_get_if_active(smp2p->ipa->dev, true) > 0;
/* Signal whether the IPA power is enabled */
mask = BIT(smp2p->enabled_bit);
@@ -152,15 +150,16 @@ static void ipa_smp2p_panic_notifier_unregister(struct ipa_smp2p *smp2p)
static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
{
struct ipa_smp2p *smp2p = dev_id;
+ struct ipa *ipa = smp2p->ipa;
struct device *dev;
int ret;
/* Ignore any (spurious) interrupts received after the first */
- if (smp2p->ipa->setup_complete)
+ if (ipa->setup_complete)
return IRQ_HANDLED;
/* Power needs to be active for setup */
- dev = &smp2p->ipa->pdev->dev;
+ dev = ipa->dev;
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "error %d getting power for setup\n", ret);
@@ -168,7 +167,7 @@ static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
}
/* An error here won't cause driver shutdown, so warn if one occurs */
- ret = ipa_setup(smp2p->ipa);
+ ret = ipa_setup(ipa);
WARN(ret != 0, "error %d from ipa_setup()\n", ret);
out_power_put:
@@ -179,14 +178,15 @@ out_power_put:
}
/* Initialize SMP2P interrupts */
-static int ipa_smp2p_irq_init(struct ipa_smp2p *smp2p, const char *name,
- irq_handler_t handler)
+static int ipa_smp2p_irq_init(struct ipa_smp2p *smp2p,
+ struct platform_device *pdev,
+ const char *name, irq_handler_t handler)
{
- struct device *dev = &smp2p->ipa->pdev->dev;
+ struct device *dev = &pdev->dev;
unsigned int irq;
int ret;
- ret = platform_get_irq_byname(smp2p->ipa->pdev, name);
+ ret = platform_get_irq_byname(pdev, name);
if (ret <= 0)
return ret ? : -EINVAL;
irq = ret;
@@ -208,7 +208,7 @@ static void ipa_smp2p_irq_exit(struct ipa_smp2p *smp2p, u32 irq)
/* Drop the power reference if it was taken in ipa_smp2p_notify() */
static void ipa_smp2p_power_release(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
if (!ipa->smp2p->power_on)
return;
@@ -219,10 +219,11 @@ static void ipa_smp2p_power_release(struct ipa *ipa)
}
/* Initialize the IPA SMP2P subsystem */
-int ipa_smp2p_init(struct ipa *ipa, bool modem_init)
+int
+ipa_smp2p_init(struct ipa *ipa, struct platform_device *pdev, bool modem_init)
{
struct qcom_smem_state *enabled_state;
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = &pdev->dev;
struct qcom_smem_state *valid_state;
struct ipa_smp2p *smp2p;
u32 enabled_bit;
@@ -261,7 +262,7 @@ int ipa_smp2p_init(struct ipa *ipa, bool modem_init)
/* We have enough information saved to handle notifications */
ipa->smp2p = smp2p;
- ret = ipa_smp2p_irq_init(smp2p, "ipa-clock-query",
+ ret = ipa_smp2p_irq_init(smp2p, pdev, "ipa-clock-query",
ipa_smp2p_modem_clk_query_isr);
if (ret < 0)
goto err_null_smp2p;
@@ -273,7 +274,7 @@ int ipa_smp2p_init(struct ipa *ipa, bool modem_init)
if (modem_init) {
/* Result will be non-zero (negative for error) */
- ret = ipa_smp2p_irq_init(smp2p, "ipa-setup-ready",
+ ret = ipa_smp2p_irq_init(smp2p, pdev, "ipa-setup-ready",
ipa_smp2p_modem_setup_ready_isr);
if (ret < 0)
goto err_notifier_unregister;
diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h
index 9b969b03d1a4..2a3d8eefb13b 100644
--- a/drivers/net/ipa/ipa_smp2p.h
+++ b/drivers/net/ipa/ipa_smp2p.h
@@ -8,17 +8,20 @@
#include <linux/types.h>
+struct platform_device;
+
struct ipa;
/**
* ipa_smp2p_init() - Initialize the IPA SMP2P subsystem
* @ipa: IPA pointer
+ * @pdev: Platform device pointer
* @modem_init: Whether the modem is responsible for GSI initialization
*
* Return: 0 if successful, or a negative error code
- *
*/
-int ipa_smp2p_init(struct ipa *ipa, bool modem_init);
+int ipa_smp2p_init(struct ipa *ipa, struct platform_device *pdev,
+ bool modem_init);
/**
* ipa_smp2p_exit() - Inverse of ipa_smp2p_init()
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index 7b637bb8b41c..a24ac11b8893 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -163,7 +163,7 @@ ipa_table_mem(struct ipa *ipa, bool filter, bool hashed, bool ipv6)
bool ipa_filtered_valid(struct ipa *ipa, u64 filtered)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
u32 count;
if (!filtered) {
@@ -236,8 +236,7 @@ ipa_filter_reset_table(struct ipa *ipa, bool hashed, bool ipv6, bool modem)
trans = ipa_cmd_trans_alloc(ipa, hweight64(ep_mask));
if (!trans) {
- dev_err(&ipa->pdev->dev,
- "no transaction for %s filter reset\n",
+ dev_err(ipa->dev, "no transaction for %s filter reset\n",
modem ? "modem" : "AP");
return -EBUSY;
}
@@ -298,8 +297,7 @@ static int ipa_route_reset(struct ipa *ipa, bool modem)
trans = ipa_cmd_trans_alloc(ipa, hash_support ? 4 : 2);
if (!trans) {
- dev_err(&ipa->pdev->dev,
- "no transaction for %s route reset\n",
+ dev_err(ipa->dev, "no transaction for %s route reset\n",
modem ? "modem" : "AP");
return -EBUSY;
}
@@ -327,7 +325,7 @@ static int ipa_route_reset(struct ipa *ipa, bool modem)
void ipa_table_reset(struct ipa *ipa, bool modem)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
const char *ee_name;
int ret;
@@ -356,7 +354,7 @@ int ipa_table_hash_flush(struct ipa *ipa)
trans = ipa_cmd_trans_alloc(ipa, 1);
if (!trans) {
- dev_err(&ipa->pdev->dev, "no transaction for hash flush\n");
+ dev_err(ipa->dev, "no transaction for hash flush\n");
return -EBUSY;
}
@@ -469,7 +467,7 @@ int ipa_table_setup(struct ipa *ipa)
*/
trans = ipa_cmd_trans_alloc(ipa, 8);
if (!trans) {
- dev_err(&ipa->pdev->dev, "no transaction for table setup\n");
+ dev_err(ipa->dev, "no transaction for table setup\n");
return -EBUSY;
}
@@ -713,7 +711,7 @@ bool ipa_table_mem_valid(struct ipa *ipa, bool filter)
*/
int ipa_table_init(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
dma_addr_t addr;
__le64 le_addr;
__le64 *virt;
@@ -763,7 +761,7 @@ int ipa_table_init(struct ipa *ipa)
void ipa_table_exit(struct ipa *ipa)
{
u32 count = max_t(u32, 1 + ipa->filter_count, ipa->route_count);
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
size_t size;
size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64);
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index 7eaa0b4ebed9..bfd5dc6dab43 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -127,7 +127,7 @@ static struct ipa_uc_mem_area *ipa_uc_shared(struct ipa *ipa)
static void ipa_uc_event_handler(struct ipa *ipa)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
if (shared->event == IPA_UC_EVENT_ERROR)
dev_err(dev, "microcontroller error event\n");
@@ -141,7 +141,7 @@ static void ipa_uc_event_handler(struct ipa *ipa)
static void ipa_uc_response_hdlr(struct ipa *ipa)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
/* An INIT_COMPLETED response message is sent to the AP by the
* microcontroller when it is operational. Other than this, the AP
@@ -191,7 +191,7 @@ void ipa_uc_config(struct ipa *ipa)
/* Inverse of ipa_uc_config() */
void ipa_uc_deconfig(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
ipa_interrupt_disable(ipa, IPA_IRQ_UC_1);
ipa_interrupt_disable(ipa, IPA_IRQ_UC_0);
@@ -208,8 +208,8 @@ void ipa_uc_deconfig(struct ipa *ipa)
/* Take a proxy power reference for the microcontroller */
void ipa_uc_power(struct ipa *ipa)
{
+ struct device *dev = ipa->dev;
static bool already;
- struct device *dev;
int ret;
if (already)
@@ -217,7 +217,6 @@ void ipa_uc_power(struct ipa *ipa)
already = true; /* Only do this on first boot */
/* This power reference dropped in ipa_uc_response_hdlr() above */
- dev = &ipa->pdev->dev;
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
pm_runtime_put_noidle(dev);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index df7c43a109e1..5920f7e63352 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -349,7 +349,7 @@ static int ipvlan_get_iflink(const struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
- return ipvlan->phy_dev->ifindex;
+ return READ_ONCE(ipvlan->phy_dev->ifindex);
}
static const struct net_device_ops ipvlan_netdev_ops = {
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index f6d53e63ef4e..f6eab66c2660 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -144,6 +144,7 @@ static int loopback_dev_init(struct net_device *dev)
dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
if (!dev->lstats)
return -ENOMEM;
+ netdev_lockdep_set_classes(dev);
return 0;
}
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 7f5426285c61..0206b84284ab 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3519,18 +3519,13 @@ static int macsec_dev_init(struct net_device *dev)
struct net_device *real_dev = macsec->real_dev;
int err;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
err = gro_cells_init(&macsec->gro_cells, dev);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
dev->features = real_dev->features & MACSEC_FEATURES;
dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
macsec_set_head_tail_room(dev);
@@ -3550,7 +3545,6 @@ static void macsec_dev_uninit(struct net_device *dev)
struct macsec_dev *macsec = macsec_priv(dev);
gro_cells_destroy(&macsec->gro_cells);
- free_percpu(dev->tstats);
}
static netdev_features_t macsec_fix_features(struct net_device *dev,
@@ -3753,7 +3747,7 @@ static void macsec_get_stats64(struct net_device *dev,
static int macsec_get_iflink(const struct net_device *dev)
{
- return macsec_priv(dev)->real_dev->ifindex;
+ return READ_ONCE(macsec_priv(dev)->real_dev->ifindex);
}
static const struct net_device_ops macsec_netdev_ops = {
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index a3cc665757e8..0cec2783a3e7 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1158,7 +1158,7 @@ static int macvlan_dev_get_iflink(const struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
- return vlan->lowerdev->ifindex;
+ return READ_ONCE(vlan->lowerdev->ifindex);
}
static const struct ethtool_ops macvlan_ethtool_ops = {
diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c
index 68f8ee0ec8ba..f40eb50bb978 100644
--- a/drivers/net/mdio/mdio-bcm-unimac.c
+++ b/drivers/net/mdio/mdio-bcm-unimac.c
@@ -94,6 +94,10 @@ static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
int ret;
u32 cmd;
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
/* Prepare the read operation */
cmd = MDIO_RD | (phy_id << MDIO_PMD_SHIFT) | (reg << MDIO_REG_SHIFT);
unimac_mdio_writel(priv, cmd, MDIO_CMD);
@@ -103,7 +107,7 @@ static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
ret = priv->wait_func(priv->wait_func_data);
if (ret)
- return ret;
+ goto out;
cmd = unimac_mdio_readl(priv, MDIO_CMD);
@@ -112,10 +116,15 @@ static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
* that condition here and ignore the MDIO controller read failure
* indication.
*/
- if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (cmd & MDIO_READ_FAIL))
- return -EIO;
+ if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (cmd & MDIO_READ_FAIL)) {
+ ret = -EIO;
+ goto out;
+ }
- return cmd & 0xffff;
+ ret = cmd & 0xffff;
+out:
+ clk_disable_unprepare(priv->clk);
+ return ret;
}
static int unimac_mdio_write(struct mii_bus *bus, int phy_id,
@@ -123,6 +132,11 @@ static int unimac_mdio_write(struct mii_bus *bus, int phy_id,
{
struct unimac_mdio_priv *priv = bus->priv;
u32 cmd;
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
/* Prepare the write operation */
cmd = MDIO_WR | (phy_id << MDIO_PMD_SHIFT) |
@@ -131,7 +145,10 @@ static int unimac_mdio_write(struct mii_bus *bus, int phy_id,
unimac_mdio_start(priv);
- return priv->wait_func(priv->wait_func_data);
+ ret = priv->wait_func(priv->wait_func_data);
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
}
/* Workaround for integrated BCM7xxx Gigabit PHYs which have a problem with
@@ -178,14 +195,19 @@ static int unimac_mdio_reset(struct mii_bus *bus)
return 0;
}
-static void unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
+static int unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
{
unsigned long rate;
u32 reg, div;
+ int ret;
/* Keep the hardware default values */
if (!priv->clk_freq)
- return;
+ return 0;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
if (!priv->clk)
rate = 250000000;
@@ -195,7 +217,8 @@ static void unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
div = (rate / (2 * priv->clk_freq)) - 1;
if (div & ~MDIO_CLK_DIV_MASK) {
pr_warn("Incorrect MDIO clock frequency, ignoring\n");
- return;
+ ret = 0;
+ goto out;
}
/* The MDIO clock is the reference clock (typically 250Mhz) divided by
@@ -205,6 +228,9 @@ static void unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
reg &= ~(MDIO_CLK_DIV_MASK << MDIO_CLK_DIV_SHIFT);
reg |= div << MDIO_CLK_DIV_SHIFT;
unimac_mdio_writel(priv, reg, MDIO_CFG);
+out:
+ clk_disable_unprepare(priv->clk);
+ return ret;
}
static int unimac_mdio_probe(struct platform_device *pdev)
@@ -235,24 +261,12 @@ static int unimac_mdio_probe(struct platform_device *pdev)
return -ENOMEM;
}
- priv->clk = devm_clk_get_optional(&pdev->dev, NULL);
- if (IS_ERR(priv->clk))
- return PTR_ERR(priv->clk);
-
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
-
if (of_property_read_u32(np, "clock-frequency", &priv->clk_freq))
priv->clk_freq = 0;
- unimac_mdio_clk_set(priv);
-
priv->mii_bus = mdiobus_alloc();
- if (!priv->mii_bus) {
- ret = -ENOMEM;
- goto out_clk_disable;
- }
+ if (!priv->mii_bus)
+ return -ENOMEM;
bus = priv->mii_bus;
bus->priv = priv;
@@ -261,17 +275,29 @@ static int unimac_mdio_probe(struct platform_device *pdev)
priv->wait_func = pdata->wait_func;
priv->wait_func_data = pdata->wait_func_data;
bus->phy_mask = ~pdata->phy_mask;
+ priv->clk = pdata->clk;
} else {
bus->name = "unimac MII bus";
priv->wait_func_data = priv;
priv->wait_func = unimac_mdio_poll;
+ priv->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ }
+
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto out_mdio_free;
}
+
bus->parent = &pdev->dev;
bus->read = unimac_mdio_read;
bus->write = unimac_mdio_write;
bus->reset = unimac_mdio_reset;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id);
+ ret = unimac_mdio_clk_set(priv);
+ if (ret)
+ goto out_mdio_free;
+
ret = of_mdiobus_register(bus, np);
if (ret) {
dev_err(&pdev->dev, "MDIO bus registration failed\n");
@@ -286,8 +312,6 @@ static int unimac_mdio_probe(struct platform_device *pdev)
out_mdio_free:
mdiobus_free(bus);
-out_clk_disable:
- clk_disable_unprepare(priv->clk);
return ret;
}
@@ -297,36 +321,20 @@ static void unimac_mdio_remove(struct platform_device *pdev)
mdiobus_unregister(priv->mii_bus);
mdiobus_free(priv->mii_bus);
- clk_disable_unprepare(priv->clk);
-}
-
-static int __maybe_unused unimac_mdio_suspend(struct device *d)
-{
- struct unimac_mdio_priv *priv = dev_get_drvdata(d);
-
- clk_disable_unprepare(priv->clk);
-
- return 0;
}
static int __maybe_unused unimac_mdio_resume(struct device *d)
{
struct unimac_mdio_priv *priv = dev_get_drvdata(d);
- int ret;
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
-
- unimac_mdio_clk_set(priv);
-
- return 0;
+ return unimac_mdio_clk_set(priv);
}
static SIMPLE_DEV_PM_OPS(unimac_mdio_pm_ops,
- unimac_mdio_suspend, unimac_mdio_resume);
+ NULL, unimac_mdio_resume);
static const struct of_device_id unimac_mdio_ids[] = {
+ { .compatible = "brcm,asp-v2.2-mdio", },
{ .compatible = "brcm,asp-v2.1-mdio", },
{ .compatible = "brcm,asp-v2.0-mdio", },
{ .compatible = "brcm,genet-mdio-v5", },
diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c
index abd8b508ec16..9d8f43b28aac 100644
--- a/drivers/net/mdio/mdio-ipq4019.c
+++ b/drivers/net/mdio/mdio-ipq4019.c
@@ -14,6 +14,20 @@
#include <linux/clk.h>
#define MDIO_MODE_REG 0x40
+#define MDIO_MODE_MDC_MODE BIT(12)
+/* 0 = Clause 22, 1 = Clause 45 */
+#define MDIO_MODE_C45 BIT(8)
+#define MDIO_MODE_DIV_MASK GENMASK(7, 0)
+#define MDIO_MODE_DIV(x) FIELD_PREP(MDIO_MODE_DIV_MASK, (x) - 1)
+#define MDIO_MODE_DIV_1 0x0
+#define MDIO_MODE_DIV_2 0x1
+#define MDIO_MODE_DIV_4 0x3
+#define MDIO_MODE_DIV_8 0x7
+#define MDIO_MODE_DIV_16 0xf
+#define MDIO_MODE_DIV_32 0x1f
+#define MDIO_MODE_DIV_64 0x3f
+#define MDIO_MODE_DIV_128 0x7f
+#define MDIO_MODE_DIV_256 0xff
#define MDIO_ADDR_REG 0x44
#define MDIO_DATA_WRITE_REG 0x48
#define MDIO_DATA_READ_REG 0x4c
@@ -26,9 +40,6 @@
#define MDIO_CMD_ACCESS_CODE_C45_WRITE 1
#define MDIO_CMD_ACCESS_CODE_C45_READ 2
-/* 0 = Clause 22, 1 = Clause 45 */
-#define MDIO_MODE_C45 BIT(8)
-
#define IPQ4019_MDIO_TIMEOUT 10000
#define IPQ4019_MDIO_SLEEP 10
@@ -41,6 +52,7 @@ struct ipq4019_mdio_data {
void __iomem *membase;
void __iomem *eth_ldo_rdy;
struct clk *mdio_clk;
+ unsigned int mdc_rate;
};
static int ipq4019_mdio_wait_busy(struct mii_bus *bus)
@@ -203,6 +215,38 @@ static int ipq4019_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
return 0;
}
+static int ipq4019_mdio_set_div(struct ipq4019_mdio_data *priv)
+{
+ unsigned long ahb_rate;
+ int div;
+ u32 val;
+
+ /* If we don't have a clock for AHB use the fixed value */
+ ahb_rate = IPQ_MDIO_CLK_RATE;
+ if (priv->mdio_clk)
+ ahb_rate = clk_get_rate(priv->mdio_clk);
+
+ /* MDC rate is ahb_rate/(MDIO_MODE_DIV + 1)
+ * While supported, internal documentation doesn't
+ * assure correct functionality of the MDIO bus
+ * with divider of 1, 2 or 4.
+ */
+ for (div = 8; div <= 256; div *= 2) {
+ /* The requested rate is supported by the div */
+ if (priv->mdc_rate == DIV_ROUND_UP(ahb_rate, div)) {
+ val = readl(priv->membase + MDIO_MODE_REG);
+ val &= ~MDIO_MODE_DIV_MASK;
+ val |= MDIO_MODE_DIV(div);
+ writel(val, priv->membase + MDIO_MODE_REG);
+
+ return 0;
+ }
+ }
+
+ /* The requested rate is not supported */
+ return -EINVAL;
+}
+
static int ipq_mdio_reset(struct mii_bus *bus)
{
struct ipq4019_mdio_data *priv = bus->priv;
@@ -225,10 +269,58 @@ static int ipq_mdio_reset(struct mii_bus *bus)
return ret;
ret = clk_prepare_enable(priv->mdio_clk);
- if (ret == 0)
- mdelay(10);
+ if (ret)
+ return ret;
- return ret;
+ mdelay(10);
+
+ /* Restore MDC rate */
+ return ipq4019_mdio_set_div(priv);
+}
+
+static void ipq4019_mdio_select_mdc_rate(struct platform_device *pdev,
+ struct ipq4019_mdio_data *priv)
+{
+ unsigned long ahb_rate;
+ int div;
+ u32 val;
+
+ /* MDC rate defined in DT, we don't have to decide a default value */
+ if (!of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+ &priv->mdc_rate))
+ return;
+
+ /* If we don't have a clock for AHB use the fixed value */
+ ahb_rate = IPQ_MDIO_CLK_RATE;
+ if (priv->mdio_clk)
+ ahb_rate = clk_get_rate(priv->mdio_clk);
+
+ /* Check what is the current div set */
+ val = readl(priv->membase + MDIO_MODE_REG);
+ div = FIELD_GET(MDIO_MODE_DIV_MASK, val);
+
+ /* div is not set to the default value of /256
+ * Probably someone changed that (bootloader, other drivers)
+ * Keep this and don't overwrite it.
+ */
+ if (div != MDIO_MODE_DIV_256) {
+ priv->mdc_rate = DIV_ROUND_UP(ahb_rate, div + 1);
+ return;
+ }
+
+ /* If div is /256 assume nobody have set this value and
+ * try to find one MDC rate that is close the 802.3 spec of
+ * 2.5MHz
+ */
+ for (div = 256; div >= 8; div /= 2) {
+ /* Stop as soon as we found a divider that
+ * reached the closest value to 2.5MHz
+ */
+ if (DIV_ROUND_UP(ahb_rate, div) > 2500000)
+ break;
+
+ priv->mdc_rate = DIV_ROUND_UP(ahb_rate, div);
+ }
}
static int ipq4019_mdio_probe(struct platform_device *pdev)
@@ -252,6 +344,11 @@ static int ipq4019_mdio_probe(struct platform_device *pdev)
if (IS_ERR(priv->mdio_clk))
return PTR_ERR(priv->mdio_clk);
+ ipq4019_mdio_select_mdc_rate(pdev, priv);
+ ret = ipq4019_mdio_set_div(priv);
+ if (ret)
+ return ret;
+
/* The platform resource is provided on the chipset IPQ5018 */
/* This resource is optional */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 64ebcb6d235c..08e607f62e10 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -139,6 +139,53 @@ bool of_mdiobus_child_is_phy(struct device_node *child)
}
EXPORT_SYMBOL(of_mdiobus_child_is_phy);
+static int __of_mdiobus_parse_phys(struct mii_bus *mdio, struct device_node *np,
+ bool *scanphys)
+{
+ struct device_node *child;
+ int addr, rc = 0;
+
+ /* Loop over the child nodes and register a phy_device for each phy */
+ for_each_available_child_of_node(np, child) {
+ if (of_node_name_eq(child, "ethernet-phy-package")) {
+ /* Ignore invalid ethernet-phy-package node */
+ if (!of_property_present(child, "reg"))
+ continue;
+
+ rc = __of_mdiobus_parse_phys(mdio, child, NULL);
+ if (rc && rc != -ENODEV)
+ goto exit;
+
+ continue;
+ }
+
+ addr = of_mdio_parse_addr(&mdio->dev, child);
+ if (addr < 0) {
+ /* Skip scanning for invalid ethernet-phy-package node */
+ if (scanphys)
+ *scanphys = true;
+ continue;
+ }
+
+ if (of_mdiobus_child_is_phy(child))
+ rc = of_mdiobus_register_phy(mdio, child, addr);
+ else
+ rc = of_mdiobus_register_device(mdio, child, addr);
+
+ if (rc == -ENODEV)
+ dev_err(&mdio->dev,
+ "MDIO device at address %d is missing.\n",
+ addr);
+ else if (rc)
+ goto exit;
+ }
+
+ return 0;
+exit:
+ of_node_put(child);
+ return rc;
+}
+
/**
* __of_mdiobus_register - Register mii_bus and create PHYs from the device tree
* @mdio: pointer to mii_bus structure
@@ -180,33 +227,18 @@ int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np,
return rc;
/* Loop over the child nodes and register a phy_device for each phy */
- for_each_available_child_of_node(np, child) {
- addr = of_mdio_parse_addr(&mdio->dev, child);
- if (addr < 0) {
- scanphys = true;
- continue;
- }
-
- if (of_mdiobus_child_is_phy(child))
- rc = of_mdiobus_register_phy(mdio, child, addr);
- else
- rc = of_mdiobus_register_device(mdio, child, addr);
-
- if (rc == -ENODEV)
- dev_err(&mdio->dev,
- "MDIO device at address %d is missing.\n",
- addr);
- else if (rc)
- goto unregister;
- }
+ rc = __of_mdiobus_parse_phys(mdio, np, &scanphys);
+ if (rc)
+ goto unregister;
if (!scanphys)
return 0;
/* auto scan for PHYs with empty reg property */
for_each_available_child_of_node(np, child) {
- /* Skip PHYs with reg property set */
- if (of_property_present(child, "reg"))
+ /* Skip PHYs with reg property set or ethernet-phy-package node */
+ if (of_property_present(child, "reg") ||
+ of_node_name_eq(child, "ethernet-phy-package"))
continue;
for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
@@ -227,15 +259,16 @@ int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np,
if (!rc)
break;
if (rc != -ENODEV)
- goto unregister;
+ goto put_unregister;
}
}
}
return 0;
-unregister:
+put_unregister:
of_node_put(child);
+unregister:
mdiobus_unregister(mdio);
return rc;
}
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 6e14ba5e06c8..d7070dd4fe73 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -42,14 +42,20 @@ MODULE_AUTHOR("Maintainer: Matt Mackall <mpm@selenic.com>");
MODULE_DESCRIPTION("Console driver for network interfaces");
MODULE_LICENSE("GPL");
-#define MAX_PARAM_LENGTH 256
-#define MAX_PRINT_CHUNK 1000
+#define MAX_PARAM_LENGTH 256
+#define MAX_USERDATA_ENTRY_LENGTH 256
+#define MAX_USERDATA_VALUE_LENGTH 200
+/* The number 3 comes from userdata entry format characters (' ', '=', '\n') */
+#define MAX_USERDATA_NAME_LENGTH (MAX_USERDATA_ENTRY_LENGTH - \
+ MAX_USERDATA_VALUE_LENGTH - 3)
+#define MAX_USERDATA_ITEMS 16
+#define MAX_PRINT_CHUNK 1000
static char config[MAX_PARAM_LENGTH];
module_param_string(netconsole, config, MAX_PARAM_LENGTH, 0);
MODULE_PARM_DESC(netconsole, " netconsole=[src-port]@[src-ip]/[dev],[tgt-port]@<tgt-ip>/[tgt-macaddr]");
-static bool oops_only = false;
+static bool oops_only;
module_param(oops_only, bool, 0600);
MODULE_PARM_DESC(oops_only, "Only log oops messages");
@@ -79,7 +85,10 @@ static struct console netconsole_ext;
/**
* struct netconsole_target - Represents a configured netconsole target.
* @list: Links this target into the target_list.
- * @item: Links us into the configfs subsystem hierarchy.
+ * @group: Links us into the configfs subsystem hierarchy.
+ * @userdata_group: Links to the userdata configfs hierarchy
+ * @userdata_complete: Cached, formatted string of append
+ * @userdata_length: String length of userdata_complete
* @enabled: On / off knob to enable / disable target.
* Visible from userspace (read-write).
* We maintain a strict 1:1 correspondence between this and
@@ -102,7 +111,10 @@ static struct console netconsole_ext;
struct netconsole_target {
struct list_head list;
#ifdef CONFIG_NETCONSOLE_DYNAMIC
- struct config_item item;
+ struct config_group group;
+ struct config_group userdata_group;
+ char userdata_complete[MAX_USERDATA_ENTRY_LENGTH * MAX_USERDATA_ITEMS];
+ size_t userdata_length;
#endif
bool enabled;
bool extended;
@@ -134,14 +146,14 @@ static void __exit dynamic_netconsole_exit(void)
*/
static void netconsole_target_get(struct netconsole_target *nt)
{
- if (config_item_name(&nt->item))
- config_item_get(&nt->item);
+ if (config_item_name(&nt->group.cg_item))
+ config_group_get(&nt->group);
}
static void netconsole_target_put(struct netconsole_target *nt)
{
- if (config_item_name(&nt->item))
- config_item_put(&nt->item);
+ if (config_item_name(&nt->group.cg_item))
+ config_group_put(&nt->group);
}
#else /* !CONFIG_NETCONSOLE_DYNAMIC */
@@ -215,15 +227,33 @@ static struct netconsole_target *alloc_and_init(void)
* | remote_ip
* | local_mac
* | remote_mac
+ * | userdata/
+ * | <key>/
+ * | value
+ * | ...
* |
* <target>/...
*/
static struct netconsole_target *to_target(struct config_item *item)
{
- return item ?
- container_of(item, struct netconsole_target, item) :
- NULL;
+ struct config_group *cfg_group;
+
+ cfg_group = to_config_group(item);
+ if (!cfg_group)
+ return NULL;
+ return container_of(to_config_group(item),
+ struct netconsole_target, group);
+}
+
+/* Get rid of possible trailing newline, returning the new length */
+static void trim_newline(char *s, size_t maxlen)
+{
+ size_t len;
+
+ len = strnlen(s, maxlen);
+ if (s[len - 1] == '\n')
+ s[len - 1] = '\0';
}
/*
@@ -370,7 +400,7 @@ static ssize_t release_store(struct config_item *item, const char *buf,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
err = -EINVAL;
goto out_unlock;
}
@@ -398,7 +428,7 @@ static ssize_t extended_store(struct config_item *item, const char *buf,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
err = -EINVAL;
goto out_unlock;
}
@@ -420,22 +450,17 @@ static ssize_t dev_name_store(struct config_item *item, const char *buf,
size_t count)
{
struct netconsole_target *nt = to_target(item);
- size_t len;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
mutex_unlock(&dynamic_netconsole_mutex);
return -EINVAL;
}
strscpy(nt->np.dev_name, buf, IFNAMSIZ);
-
- /* Get rid of possible trailing newline from echo(1) */
- len = strnlen(nt->np.dev_name, IFNAMSIZ);
- if (nt->np.dev_name[len - 1] == '\n')
- nt->np.dev_name[len - 1] = '\0';
+ trim_newline(nt->np.dev_name, IFNAMSIZ);
mutex_unlock(&dynamic_netconsole_mutex);
return strnlen(buf, count);
@@ -450,7 +475,7 @@ static ssize_t local_port_store(struct config_item *item, const char *buf,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
goto out_unlock;
}
@@ -473,7 +498,7 @@ static ssize_t remote_port_store(struct config_item *item,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
goto out_unlock;
}
@@ -495,12 +520,13 @@ static ssize_t local_ip_store(struct config_item *item, const char *buf,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
goto out_unlock;
}
if (strnchr(buf, count, ':')) {
const char *end;
+
if (in6_pton(buf, count, nt->np.local_ip.in6.s6_addr, -1, &end) > 0) {
if (*end && *end != '\n') {
pr_err("invalid IPv6 address at: <%c>\n", *end);
@@ -510,9 +536,9 @@ static ssize_t local_ip_store(struct config_item *item, const char *buf,
} else
goto out_unlock;
} else {
- if (!nt->np.ipv6) {
+ if (!nt->np.ipv6)
nt->np.local_ip.ip = in_aton(buf);
- } else
+ else
goto out_unlock;
}
@@ -531,12 +557,13 @@ static ssize_t remote_ip_store(struct config_item *item, const char *buf,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
goto out_unlock;
}
if (strnchr(buf, count, ':')) {
const char *end;
+
if (in6_pton(buf, count, nt->np.remote_ip.in6.s6_addr, -1, &end) > 0) {
if (*end && *end != '\n') {
pr_err("invalid IPv6 address at: <%c>\n", *end);
@@ -546,9 +573,9 @@ static ssize_t remote_ip_store(struct config_item *item, const char *buf,
} else
goto out_unlock;
} else {
- if (!nt->np.ipv6) {
+ if (!nt->np.ipv6)
nt->np.remote_ip.ip = in_aton(buf);
- } else
+ else
goto out_unlock;
}
@@ -568,7 +595,7 @@ static ssize_t remote_mac_store(struct config_item *item, const char *buf,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
goto out_unlock;
}
@@ -585,6 +612,180 @@ out_unlock:
return -EINVAL;
}
+struct userdatum {
+ struct config_item item;
+ char value[MAX_USERDATA_VALUE_LENGTH];
+};
+
+static struct userdatum *to_userdatum(struct config_item *item)
+{
+ return container_of(item, struct userdatum, item);
+}
+
+struct userdata {
+ struct config_group group;
+};
+
+static struct userdata *to_userdata(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct userdata, group);
+}
+
+static struct netconsole_target *userdata_to_target(struct userdata *ud)
+{
+ struct config_group *netconsole_group;
+
+ netconsole_group = to_config_group(ud->group.cg_item.ci_parent);
+ return to_target(&netconsole_group->cg_item);
+}
+
+static ssize_t userdatum_value_show(struct config_item *item, char *buf)
+{
+ return sysfs_emit(buf, "%s\n", &(to_userdatum(item)->value[0]));
+}
+
+static void update_userdata(struct netconsole_target *nt)
+{
+ int complete_idx = 0, child_count = 0;
+ struct list_head *entry;
+
+ /* Clear the current string in case the last userdatum was deleted */
+ nt->userdata_length = 0;
+ nt->userdata_complete[0] = 0;
+
+ list_for_each(entry, &nt->userdata_group.cg_children) {
+ struct userdatum *udm_item;
+ struct config_item *item;
+
+ if (child_count >= MAX_USERDATA_ITEMS)
+ break;
+ child_count++;
+
+ item = container_of(entry, struct config_item, ci_entry);
+ udm_item = to_userdatum(item);
+
+ /* Skip userdata with no value set */
+ if (strnlen(udm_item->value, MAX_USERDATA_VALUE_LENGTH) == 0)
+ continue;
+
+ /* This doesn't overflow userdata_complete since it will write
+ * one entry length (1/MAX_USERDATA_ITEMS long), entry count is
+ * checked to not exceed MAX items with child_count above
+ */
+ complete_idx += scnprintf(&nt->userdata_complete[complete_idx],
+ MAX_USERDATA_ENTRY_LENGTH, " %s=%s\n",
+ item->ci_name, udm_item->value);
+ }
+ nt->userdata_length = strnlen(nt->userdata_complete,
+ sizeof(nt->userdata_complete));
+}
+
+static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
+ size_t count)
+{
+ struct userdatum *udm = to_userdatum(item);
+ struct netconsole_target *nt;
+ struct userdata *ud;
+ int ret;
+
+ if (count > MAX_USERDATA_VALUE_LENGTH)
+ return -EMSGSIZE;
+
+ mutex_lock(&dynamic_netconsole_mutex);
+
+ ret = strscpy(udm->value, buf, sizeof(udm->value));
+ if (ret < 0)
+ goto out_unlock;
+ trim_newline(udm->value, sizeof(udm->value));
+
+ ud = to_userdata(item->ci_parent);
+ nt = userdata_to_target(ud);
+ update_userdata(nt);
+
+ mutex_unlock(&dynamic_netconsole_mutex);
+ return count;
+out_unlock:
+ mutex_unlock(&dynamic_netconsole_mutex);
+ return ret;
+}
+
+CONFIGFS_ATTR(userdatum_, value);
+
+static struct configfs_attribute *userdatum_attrs[] = {
+ &userdatum_attr_value,
+ NULL,
+};
+
+static void userdatum_release(struct config_item *item)
+{
+ kfree(to_userdatum(item));
+}
+
+static struct configfs_item_operations userdatum_ops = {
+ .release = userdatum_release,
+};
+
+static const struct config_item_type userdatum_type = {
+ .ct_item_ops = &userdatum_ops,
+ .ct_attrs = userdatum_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_item *userdatum_make_item(struct config_group *group,
+ const char *name)
+{
+ struct netconsole_target *nt;
+ struct userdatum *udm;
+ struct userdata *ud;
+ size_t child_count;
+
+ if (strlen(name) > MAX_USERDATA_NAME_LENGTH)
+ return ERR_PTR(-ENAMETOOLONG);
+
+ ud = to_userdata(&group->cg_item);
+ nt = userdata_to_target(ud);
+ child_count = list_count_nodes(&nt->userdata_group.cg_children);
+ if (child_count >= MAX_USERDATA_ITEMS)
+ return ERR_PTR(-ENOSPC);
+
+ udm = kzalloc(sizeof(*udm), GFP_KERNEL);
+ if (!udm)
+ return ERR_PTR(-ENOMEM);
+
+ config_item_init_type_name(&udm->item, name, &userdatum_type);
+ return &udm->item;
+}
+
+static void userdatum_drop(struct config_group *group, struct config_item *item)
+{
+ struct netconsole_target *nt;
+ struct userdata *ud;
+
+ ud = to_userdata(&group->cg_item);
+ nt = userdata_to_target(ud);
+
+ mutex_lock(&dynamic_netconsole_mutex);
+ update_userdata(nt);
+ config_item_put(item);
+ mutex_unlock(&dynamic_netconsole_mutex);
+}
+
+static struct configfs_attribute *userdata_attrs[] = {
+ NULL,
+};
+
+static struct configfs_group_operations userdata_ops = {
+ .make_item = userdatum_make_item,
+ .drop_item = userdatum_drop,
+};
+
+static struct config_item_type userdata_type = {
+ .ct_item_ops = &userdatum_ops,
+ .ct_group_ops = &userdata_ops,
+ .ct_attrs = userdata_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
CONFIGFS_ATTR(, enabled);
CONFIGFS_ATTR(, extended);
CONFIGFS_ATTR(, dev_name);
@@ -629,6 +830,15 @@ static const struct config_item_type netconsole_target_type = {
.ct_owner = THIS_MODULE,
};
+static void init_target_config_group(struct netconsole_target *nt,
+ const char *name)
+{
+ config_group_init_type_name(&nt->group, name, &netconsole_target_type);
+ config_group_init_type_name(&nt->userdata_group, "userdata",
+ &userdata_type);
+ configfs_add_default_group(&nt->userdata_group, &nt->group);
+}
+
static struct netconsole_target *find_cmdline_target(const char *name)
{
struct netconsole_target *nt, *ret = NULL;
@@ -636,7 +846,7 @@ static struct netconsole_target *find_cmdline_target(const char *name)
spin_lock_irqsave(&target_list_lock, flags);
list_for_each_entry(nt, &target_list, list) {
- if (!strcmp(nt->item.ci_name, name)) {
+ if (!strcmp(nt->group.cg_item.ci_name, name)) {
ret = nt;
break;
}
@@ -650,8 +860,8 @@ static struct netconsole_target *find_cmdline_target(const char *name)
* Group operations and type for netconsole_subsys.
*/
-static struct config_item *make_netconsole_target(struct config_group *group,
- const char *name)
+static struct config_group *make_netconsole_target(struct config_group *group,
+ const char *name)
{
struct netconsole_target *nt;
unsigned long flags;
@@ -663,23 +873,25 @@ static struct config_item *make_netconsole_target(struct config_group *group,
if (!strncmp(name, NETCONSOLE_PARAM_TARGET_PREFIX,
strlen(NETCONSOLE_PARAM_TARGET_PREFIX))) {
nt = find_cmdline_target(name);
- if (nt)
- return &nt->item;
+ if (nt) {
+ init_target_config_group(nt, name);
+ return &nt->group;
+ }
}
nt = alloc_and_init();
if (!nt)
return ERR_PTR(-ENOMEM);
- /* Initialize the config_item member */
- config_item_init_type_name(&nt->item, name, &netconsole_target_type);
+ /* Initialize the config_group member */
+ init_target_config_group(nt, name);
/* Adding, but it is disabled */
spin_lock_irqsave(&target_list_lock, flags);
list_add(&nt->list, &target_list);
spin_unlock_irqrestore(&target_list_lock, flags);
- return &nt->item;
+ return &nt->group;
}
static void drop_netconsole_target(struct config_group *group,
@@ -699,11 +911,11 @@ static void drop_netconsole_target(struct config_group *group,
if (nt->enabled)
netpoll_cleanup(&nt->np);
- config_item_put(&nt->item);
+ config_item_put(&nt->group.cg_item);
}
static struct configfs_group_operations netconsole_subsys_group_ops = {
- .make_item = make_netconsole_target,
+ .make_group = make_netconsole_target,
.drop_item = drop_netconsole_target,
};
@@ -729,8 +941,7 @@ static void populate_configfs_item(struct netconsole_target *nt,
snprintf(target_name, sizeof(target_name), "%s%d",
NETCONSOLE_PARAM_TARGET_PREFIX, cmdline_count);
- config_item_init_type_name(&nt->item, target_name,
- &netconsole_target_type);
+ init_target_config_group(nt, target_name);
}
#endif /* CONFIG_NETCONSOLE_DYNAMIC */
@@ -781,6 +992,7 @@ restart:
spin_unlock_irqrestore(&target_list_lock, flags);
if (stopped) {
const char *msg = "had an event";
+
switch (event) {
case NETDEV_UNREGISTER:
msg = "unregistered";
@@ -824,19 +1036,34 @@ static void send_ext_msg_udp(struct netconsole_target *nt, const char *msg,
const char *msg_ready = msg;
const char *release;
int release_len = 0;
+ int userdata_len = 0;
+ char *userdata = NULL;
+
+#ifdef CONFIG_NETCONSOLE_DYNAMIC
+ userdata = nt->userdata_complete;
+ userdata_len = nt->userdata_length;
+#endif
if (nt->release) {
release = init_utsname()->release;
release_len = strlen(release) + 1;
}
- if (msg_len + release_len <= MAX_PRINT_CHUNK) {
+ if (msg_len + release_len + userdata_len <= MAX_PRINT_CHUNK) {
/* No fragmentation needed */
if (nt->release) {
scnprintf(buf, MAX_PRINT_CHUNK, "%s,%s", release, msg);
msg_len += release_len;
- msg_ready = buf;
+ } else {
+ memcpy(buf, msg, msg_len);
}
+
+ if (userdata)
+ msg_len += scnprintf(&buf[msg_len],
+ MAX_PRINT_CHUNK - msg_len,
+ "%s", userdata);
+
+ msg_ready = buf;
netpoll_send_udp(&nt->np, msg_ready, msg_len);
return;
}
@@ -860,24 +1087,48 @@ static void send_ext_msg_udp(struct netconsole_target *nt, const char *msg,
memcpy(buf + release_len, header, header_len);
header_len += release_len;
- while (offset < body_len) {
+ while (offset < body_len + userdata_len) {
int this_header = header_len;
- int this_chunk;
+ int this_offset = 0;
+ int this_chunk = 0;
this_header += scnprintf(buf + this_header,
sizeof(buf) - this_header,
- ",ncfrag=%d/%d;", offset, body_len);
-
- this_chunk = min(body_len - offset,
- MAX_PRINT_CHUNK - this_header);
- if (WARN_ON_ONCE(this_chunk <= 0))
- return;
-
- memcpy(buf + this_header, body + offset, this_chunk);
-
- netpoll_send_udp(&nt->np, buf, this_header + this_chunk);
+ ",ncfrag=%d/%d;", offset,
+ body_len + userdata_len);
+
+ /* Not all body data has been written yet */
+ if (offset < body_len) {
+ this_chunk = min(body_len - offset,
+ MAX_PRINT_CHUNK - this_header);
+ if (WARN_ON_ONCE(this_chunk <= 0))
+ return;
+ memcpy(buf + this_header, body + offset, this_chunk);
+ this_offset += this_chunk;
+ }
+ /* Body is fully written and there is pending userdata to write,
+ * append userdata in this chunk
+ */
+ if (offset + this_offset >= body_len &&
+ offset + this_offset < userdata_len + body_len) {
+ int sent_userdata = (offset + this_offset) - body_len;
+ int preceding_bytes = this_chunk + this_header;
+
+ if (WARN_ON_ONCE(sent_userdata < 0))
+ return;
+
+ this_chunk = min(userdata_len - sent_userdata,
+ MAX_PRINT_CHUNK - preceding_bytes);
+ if (WARN_ON_ONCE(this_chunk <= 0))
+ return;
+ memcpy(buf + this_header + this_offset,
+ userdata + sent_userdata,
+ this_chunk);
+ this_offset += this_chunk;
+ }
- offset += this_chunk;
+ netpoll_send_udp(&nt->np, buf, this_header + this_offset);
+ offset += this_offset;
}
}
diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c
index bcbc1e19edde..64c0cdd31bf8 100644
--- a/drivers/net/netdevsim/bus.c
+++ b/drivers/net/netdevsim/bus.c
@@ -129,7 +129,7 @@ static void nsim_bus_dev_release(struct device *dev)
complete(&nsim_bus_devs_released);
}
-static struct device_type nsim_bus_dev_type = {
+static const struct device_type nsim_bus_dev_type = {
.groups = nsim_bus_dev_attr_groups,
.release = nsim_bus_dev_release,
};
@@ -232,9 +232,154 @@ del_device_store(const struct bus_type *bus, const char *buf, size_t count)
}
static BUS_ATTR_WO(del_device);
+static ssize_t link_device_store(const struct bus_type *bus, const char *buf, size_t count)
+{
+ struct netdevsim *nsim_a, *nsim_b, *peer;
+ struct net_device *dev_a, *dev_b;
+ unsigned int ifidx_a, ifidx_b;
+ int netnsfd_a, netnsfd_b, err;
+ struct net *ns_a, *ns_b;
+
+ err = sscanf(buf, "%d:%u %d:%u", &netnsfd_a, &ifidx_a, &netnsfd_b,
+ &ifidx_b);
+ if (err != 4) {
+ pr_err("Format for linking two devices is \"netnsfd_a:ifidx_a netnsfd_b:ifidx_b\" (int uint int uint).\n");
+ return -EINVAL;
+ }
+
+ ns_a = get_net_ns_by_fd(netnsfd_a);
+ if (IS_ERR(ns_a)) {
+ pr_err("Could not find netns with fd: %d\n", netnsfd_a);
+ return -EINVAL;
+ }
+
+ ns_b = get_net_ns_by_fd(netnsfd_b);
+ if (IS_ERR(ns_b)) {
+ pr_err("Could not find netns with fd: %d\n", netnsfd_b);
+ put_net(ns_a);
+ return -EINVAL;
+ }
+
+ err = -EINVAL;
+ rtnl_lock();
+ dev_a = __dev_get_by_index(ns_a, ifidx_a);
+ if (!dev_a) {
+ pr_err("Could not find device with ifindex %u in netnsfd %d\n",
+ ifidx_a, netnsfd_a);
+ goto out_err;
+ }
+
+ if (!netdev_is_nsim(dev_a)) {
+ pr_err("Device with ifindex %u in netnsfd %d is not a netdevsim\n",
+ ifidx_a, netnsfd_a);
+ goto out_err;
+ }
+
+ dev_b = __dev_get_by_index(ns_b, ifidx_b);
+ if (!dev_b) {
+ pr_err("Could not find device with ifindex %u in netnsfd %d\n",
+ ifidx_b, netnsfd_b);
+ goto out_err;
+ }
+
+ if (!netdev_is_nsim(dev_b)) {
+ pr_err("Device with ifindex %u in netnsfd %d is not a netdevsim\n",
+ ifidx_b, netnsfd_b);
+ goto out_err;
+ }
+
+ if (dev_a == dev_b) {
+ pr_err("Cannot link a netdevsim to itself\n");
+ goto out_err;
+ }
+
+ err = -EBUSY;
+ nsim_a = netdev_priv(dev_a);
+ peer = rtnl_dereference(nsim_a->peer);
+ if (peer) {
+ pr_err("Netdevsim %d:%u is already linked\n", netnsfd_a,
+ ifidx_a);
+ goto out_err;
+ }
+
+ nsim_b = netdev_priv(dev_b);
+ peer = rtnl_dereference(nsim_b->peer);
+ if (peer) {
+ pr_err("Netdevsim %d:%u is already linked\n", netnsfd_b,
+ ifidx_b);
+ goto out_err;
+ }
+
+ err = 0;
+ rcu_assign_pointer(nsim_a->peer, nsim_b);
+ rcu_assign_pointer(nsim_b->peer, nsim_a);
+
+out_err:
+ put_net(ns_b);
+ put_net(ns_a);
+ rtnl_unlock();
+
+ return !err ? count : err;
+}
+static BUS_ATTR_WO(link_device);
+
+static ssize_t unlink_device_store(const struct bus_type *bus, const char *buf, size_t count)
+{
+ struct netdevsim *nsim, *peer;
+ struct net_device *dev;
+ unsigned int ifidx;
+ int netnsfd, err;
+ struct net *ns;
+
+ err = sscanf(buf, "%u:%u", &netnsfd, &ifidx);
+ if (err != 2) {
+ pr_err("Format for unlinking a device is \"netnsfd:ifidx\" (int uint).\n");
+ return -EINVAL;
+ }
+
+ ns = get_net_ns_by_fd(netnsfd);
+ if (IS_ERR(ns)) {
+ pr_err("Could not find netns with fd: %d\n", netnsfd);
+ return -EINVAL;
+ }
+
+ err = -EINVAL;
+ rtnl_lock();
+ dev = __dev_get_by_index(ns, ifidx);
+ if (!dev) {
+ pr_err("Could not find device with ifindex %u in netnsfd %d\n",
+ ifidx, netnsfd);
+ goto out_put_netns;
+ }
+
+ if (!netdev_is_nsim(dev)) {
+ pr_err("Device with ifindex %u in netnsfd %d is not a netdevsim\n",
+ ifidx, netnsfd);
+ goto out_put_netns;
+ }
+
+ nsim = netdev_priv(dev);
+ peer = rtnl_dereference(nsim->peer);
+ if (!peer)
+ goto out_put_netns;
+
+ err = 0;
+ RCU_INIT_POINTER(nsim->peer, NULL);
+ RCU_INIT_POINTER(peer->peer, NULL);
+
+out_put_netns:
+ put_net(ns);
+ rtnl_unlock();
+
+ return !err ? count : err;
+}
+static BUS_ATTR_WO(unlink_device);
+
static struct attribute *nsim_bus_attrs[] = {
&bus_attr_new_device.attr,
&bus_attr_del_device.attr,
+ &bus_attr_link_device.attr,
+ &bus_attr_unlink_device.attr,
NULL
};
ATTRIBUTE_GROUPS(nsim_bus);
@@ -260,7 +405,7 @@ static int nsim_num_vf(struct device *dev)
return nsim_bus_dev->num_vfs;
}
-static struct bus_type nsim_bus = {
+static const struct bus_type nsim_bus = {
.name = DRV_NAME,
.dev_name = DRV_NAME,
.bus_groups = nsim_bus_groups,
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 77e8250282a5..8330bc0bcb7e 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -29,18 +29,35 @@
static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct netdevsim *ns = netdev_priv(dev);
+ unsigned int len = skb->len;
+ struct netdevsim *peer_ns;
+ rcu_read_lock();
if (!nsim_ipsec_tx(ns, skb))
- goto out;
+ goto out_drop_free;
+ peer_ns = rcu_dereference(ns->peer);
+ if (!peer_ns)
+ goto out_drop_free;
+
+ skb_tx_timestamp(skb);
+ if (unlikely(dev_forward_skb(peer_ns->netdev, skb) == NET_RX_DROP))
+ goto out_drop_cnt;
+
+ rcu_read_unlock();
u64_stats_update_begin(&ns->syncp);
ns->tx_packets++;
- ns->tx_bytes += skb->len;
+ ns->tx_bytes += len;
u64_stats_update_end(&ns->syncp);
+ return NETDEV_TX_OK;
-out:
+out_drop_free:
dev_kfree_skb(skb);
-
+out_drop_cnt:
+ rcu_read_unlock();
+ u64_stats_update_begin(&ns->syncp);
+ ns->tx_dropped++;
+ u64_stats_update_end(&ns->syncp);
return NETDEV_TX_OK;
}
@@ -70,6 +87,7 @@ nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
start = u64_stats_fetch_begin(&ns->syncp);
stats->tx_bytes = ns->tx_bytes;
stats->tx_packets = ns->tx_packets;
+ stats->tx_dropped = ns->tx_dropped;
} while (u64_stats_fetch_retry(&ns->syncp, start));
}
@@ -265,6 +283,21 @@ nsim_set_features(struct net_device *dev, netdev_features_t features)
return 0;
}
+static int nsim_get_iflink(const struct net_device *dev)
+{
+ struct netdevsim *nsim, *peer;
+ int iflink;
+
+ nsim = netdev_priv(dev);
+
+ rcu_read_lock();
+ peer = rcu_dereference(nsim->peer);
+ iflink = peer ? READ_ONCE(peer->netdev->ifindex) : 0;
+ rcu_read_unlock();
+
+ return iflink;
+}
+
static const struct net_device_ops nsim_netdev_ops = {
.ndo_start_xmit = nsim_start_xmit,
.ndo_set_rx_mode = nsim_set_rx_mode,
@@ -282,6 +315,7 @@ static const struct net_device_ops nsim_netdev_ops = {
.ndo_set_vf_rss_query_en = nsim_set_vf_rss_query_en,
.ndo_setup_tc = nsim_setup_tc,
.ndo_set_features = nsim_set_features,
+ .ndo_get_iflink = nsim_get_iflink,
.ndo_bpf = nsim_bpf,
};
@@ -302,7 +336,6 @@ static void nsim_setup(struct net_device *dev)
eth_hw_addr_random(dev);
dev->tx_queue_len = 0;
- dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE |
IFF_NO_QUEUE;
@@ -413,8 +446,13 @@ err_free_netdev:
void nsim_destroy(struct netdevsim *ns)
{
struct net_device *dev = ns->netdev;
+ struct netdevsim *peer;
rtnl_lock();
+ peer = rtnl_dereference(ns->peer);
+ if (peer)
+ RCU_INIT_POINTER(peer->peer, NULL);
+ RCU_INIT_POINTER(ns->peer, NULL);
unregister_netdevice(dev);
if (nsim_dev_port_is_pf(ns->nsim_dev_port)) {
nsim_macsec_teardown(ns);
@@ -427,6 +465,11 @@ void nsim_destroy(struct netdevsim *ns)
free_netdev(dev);
}
+bool netdev_is_nsim(struct net_device *dev)
+{
+ return dev->netdev_ops == &nsim_netdev_ops;
+}
+
static int nsim_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 028c825b86db..553c4b9b4f63 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -98,6 +98,7 @@ struct netdevsim {
u64 tx_packets;
u64 tx_bytes;
+ u64 tx_dropped;
struct u64_stats_sync syncp;
struct nsim_bus_dev *nsim_bus_dev;
@@ -125,11 +126,13 @@ struct netdevsim {
} udp_ports;
struct nsim_ethtool ethtool;
+ struct netdevsim __rcu *peer;
};
struct netdevsim *
nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port);
void nsim_destroy(struct netdevsim *ns);
+bool netdev_is_nsim(struct net_device *dev);
void nsim_ethtool_init(struct netdevsim *ns);
diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
index 39171380ccf2..a4d2e76a8d58 100644
--- a/drivers/net/netkit.c
+++ b/drivers/net/netkit.c
@@ -145,7 +145,7 @@ static int netkit_get_iflink(const struct net_device *dev)
rcu_read_lock();
peer = rcu_dereference(nk->peer);
if (peer)
- iflink = peer->ifindex;
+ iflink = READ_ONCE(peer->ifindex);
rcu_read_unlock();
return iflink;
}
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index 5e19a6839dea..e5a0987a263e 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -17,17 +17,6 @@ static netdev_tx_t nlmon_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static int nlmon_dev_init(struct net_device *dev)
-{
- dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
- return dev->lstats == NULL ? -ENOMEM : 0;
-}
-
-static void nlmon_dev_uninit(struct net_device *dev)
-{
- free_percpu(dev->lstats);
-}
-
struct nlmon {
struct netlink_tap nt;
};
@@ -51,15 +40,7 @@ static int nlmon_close(struct net_device *dev)
static void
nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
- u64 packets, bytes;
-
- dev_lstats_read(dev, &packets, &bytes);
-
- stats->rx_packets = packets;
- stats->tx_packets = 0;
-
- stats->rx_bytes = bytes;
- stats->tx_bytes = 0;
+ dev_lstats_read(dev, &stats->rx_packets, &stats->rx_bytes);
}
static u32 always_on(struct net_device *dev)
@@ -72,8 +53,6 @@ static const struct ethtool_ops nlmon_ethtool_ops = {
};
static const struct net_device_ops nlmon_ops = {
- .ndo_init = nlmon_dev_init,
- .ndo_uninit = nlmon_dev_uninit,
.ndo_open = nlmon_open,
.ndo_stop = nlmon_close,
.ndo_start_xmit = nlmon_xmit,
@@ -92,6 +71,7 @@ static void nlmon_setup(struct net_device *dev)
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
NETIF_F_HIGHDMA | NETIF_F_LLTX;
dev->flags = IFF_NOARP;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_LSTATS;
/* That's rather a softlimit here, which, of course,
* can be altered. Not a real MTU, but what is to be
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
index dc3962b2aa6b..853b8c138718 100644
--- a/drivers/net/pcs/pcs-lynx.c
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -398,4 +398,5 @@ void lynx_pcs_destroy(struct phylink_pcs *pcs)
}
EXPORT_SYMBOL(lynx_pcs_destroy);
+MODULE_DESCRIPTION("NXP Lynx PCS phylink library");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/pcs/pcs-mtk-lynxi.c b/drivers/net/pcs/pcs-mtk-lynxi.c
index 8501dd365279..4f63abe638c4 100644
--- a/drivers/net/pcs/pcs-mtk-lynxi.c
+++ b/drivers/net/pcs/pcs-mtk-lynxi.c
@@ -303,4 +303,5 @@ void mtk_pcs_lynxi_destroy(struct phylink_pcs *pcs)
}
EXPORT_SYMBOL(mtk_pcs_lynxi_destroy);
+MODULE_DESCRIPTION("MediaTek SGMII library for LynxI");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c
index d93f84fbb1fd..4bd66fdde367 100644
--- a/drivers/net/pcs/pcs-rzn1-miic.c
+++ b/drivers/net/pcs/pcs-rzn1-miic.c
@@ -183,7 +183,7 @@ static void miic_converter_enable(struct miic *miic, int port, int enable)
miic_reg_rmw(miic, MIIC_CONVRST, MIIC_CONVRST_PHYIF_RST(port), val);
}
-static int miic_config(struct phylink_pcs *pcs, unsigned int mode,
+static int miic_config(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface,
const unsigned long *advertising, bool permit)
{
@@ -234,7 +234,7 @@ static int miic_config(struct phylink_pcs *pcs, unsigned int mode,
return 0;
}
-static void miic_link_up(struct phylink_pcs *pcs, unsigned int mode,
+static void miic_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, int speed, int duplex)
{
struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs);
@@ -333,6 +333,7 @@ struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
miic_port->miic = miic;
miic_port->port = port - 1;
miic_port->pcs.ops = &miic_phylink_ops;
+ miic_port->pcs.neg_mode = true;
return &miic_port->pcs;
}
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 31f0beba638a..31525fe9c32e 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -10,7 +10,7 @@
#include <linux/pcs/pcs-xpcs.h>
#include <linux/mdio.h>
#include <linux/phylink.h>
-#include <linux/workqueue.h>
+
#include "pcs-xpcs.h"
#define phylink_pcs_to_xpcs(pl_pcs) \
@@ -130,7 +130,6 @@ static const phy_interface_t xpcs_1000basex_interfaces[] = {
static const phy_interface_t xpcs_2500basex_interfaces[] = {
PHY_INTERFACE_MODE_2500BASEX,
- PHY_INTERFACE_MODE_MAX,
};
enum {
@@ -293,7 +292,7 @@ static int xpcs_soft_reset(struct dw_xpcs *xpcs,
dev = MDIO_MMD_VEND2;
break;
default:
- return -1;
+ return -EINVAL;
}
ret = xpcs_write(xpcs, dev, MDIO_CTRL1, MDIO_CTRL1_RESET);
@@ -614,14 +613,15 @@ static int xpcs_validate(struct phylink_pcs *pcs, unsigned long *supported,
xpcs = phylink_pcs_to_xpcs(pcs);
compat = xpcs_find_compat(xpcs->id, state->interface);
+ if (!compat)
+ return -EINVAL;
/* Populate the supported link modes for this PHY interface type.
* FIXME: what about the port modes and autoneg bit? This masks
* all those away.
*/
- if (compat)
- for (i = 0; compat->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++)
- set_bit(compat->supported[i], xpcs_supported);
+ for (i = 0; compat->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++)
+ set_bit(compat->supported[i], xpcs_supported);
linkmode_and(supported, supported, xpcs_supported);
@@ -636,8 +636,7 @@ void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces)
const struct xpcs_compat *compat = &xpcs->id->compat[i];
for (j = 0; j < compat->num_interfaces; j++)
- if (compat->interface[j] < PHY_INTERFACE_MODE_MAX)
- __set_bit(compat->interface[j], interfaces);
+ __set_bit(compat->interface[j], interfaces);
}
}
EXPORT_SYMBOL_GPL(xpcs_get_interfaces);
@@ -891,7 +890,7 @@ int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
return ret;
break;
default:
- return -1;
+ return -EINVAL;
}
if (compat->pma_config) {
@@ -1456,4 +1455,5 @@ struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr,
}
EXPORT_SYMBOL_GPL(xpcs_create_mdiodev);
+MODULE_DESCRIPTION("Synopsys DesignWare XPCS library");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 9e2672800f0b..1df0595c5ba9 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -232,6 +232,7 @@ config MARVELL_10G_PHY
config MARVELL_88Q2XXX_PHY
tristate "Marvell 88Q2XXX PHY"
+ depends on HWMON || HWMON=n
help
Support for the Marvell 88Q2XXX 100/1000BASE-T1 Automotive Ethernet
PHYs.
@@ -335,12 +336,7 @@ config NCN26000_PHY
Currently supports the NCN26000 10BASE-T1S Industrial PHY
with MII interface.
-config AT803X_PHY
- tristate "Qualcomm Atheros AR803X PHYs and QCA833x PHYs"
- depends on REGULATOR
- help
- Currently supports the AR8030, AR8031, AR8033, AR8035 and internal
- QCA8337(Internal qca8k PHY) model
+source "drivers/net/phy/qcom/Kconfig"
config QSEMI_PHY
tristate "Quality Semiconductor PHYs"
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 6097afd44392..197acfa0b412 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -36,7 +36,6 @@ obj-$(CONFIG_ADIN_PHY) += adin.o
obj-$(CONFIG_ADIN1100_PHY) += adin1100.o
obj-$(CONFIG_AMD_PHY) += amd.o
obj-$(CONFIG_AQUANTIA_PHY) += aquantia/
-obj-$(CONFIG_AT803X_PHY) += at803x.o
ifdef CONFIG_AX88796B_RUST_PHY
obj-$(CONFIG_AX88796B_PHY) += ax88796b_rust.o
else
@@ -91,6 +90,7 @@ endif
obj-$(CONFIG_NXP_C45_TJA11XX_PHY) += nxp-c45-tja.o
obj-$(CONFIG_NXP_CBTX_PHY) += nxp-cbtx.o
obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o
+obj-y += qcom/
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
obj-$(CONFIG_REALTEK_PHY) += realtek.o
obj-$(CONFIG_RENESAS_PHY) += uPD60620.o
diff --git a/drivers/net/phy/adin1100.c b/drivers/net/phy/adin1100.c
index 7619d6185801..85f910e2d4fb 100644
--- a/drivers/net/phy/adin1100.c
+++ b/drivers/net/phy/adin1100.c
@@ -18,6 +18,12 @@
#define PHY_ID_ADIN1110 0x0283bc91
#define PHY_ID_ADIN2111 0x0283bca1
+#define ADIN_PHY_SUBSYS_IRQ_MASK 0x0021
+#define ADIN_LINK_STAT_CHNG_IRQ_EN BIT(1)
+
+#define ADIN_PHY_SUBSYS_IRQ_STATUS 0x0011
+#define ADIN_LINK_STAT_CHNG BIT(1)
+
#define ADIN_FORCED_MODE 0x8000
#define ADIN_FORCED_MODE_EN BIT(0)
@@ -136,6 +142,53 @@ static int adin_config_aneg(struct phy_device *phydev)
return genphy_c45_config_aneg(phydev);
}
+static int adin_phy_ack_intr(struct phy_device *phydev)
+{
+ /* Clear pending interrupts */
+ int rc = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ ADIN_PHY_SUBSYS_IRQ_STATUS);
+
+ return rc < 0 ? rc : 0;
+}
+
+static int adin_config_intr(struct phy_device *phydev)
+{
+ u16 irq_mask;
+ int ret;
+
+ ret = adin_phy_ack_intr(phydev);
+ if (ret)
+ return ret;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ irq_mask = ADIN_LINK_STAT_CHNG_IRQ_EN;
+ else
+ irq_mask = 0;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2,
+ ADIN_PHY_SUBSYS_IRQ_MASK,
+ ADIN_LINK_STAT_CHNG_IRQ_EN, irq_mask);
+}
+
+static irqreturn_t adin_phy_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ ADIN_PHY_SUBSYS_IRQ_STATUS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & ADIN_LINK_STAT_CHNG))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
static int adin_set_powerdown_mode(struct phy_device *phydev, bool en)
{
int ret;
@@ -275,6 +328,8 @@ static struct phy_driver adin_driver[] = {
.probe = adin_probe,
.config_aneg = adin_config_aneg,
.read_status = adin_read_status,
+ .config_intr = adin_config_intr,
+ .handle_interrupt = adin_phy_handle_interrupt,
.set_loopback = adin_set_loopback,
.suspend = adin_suspend,
.resume = adin_resume,
diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c
index 97a2fafa15ca..71bfddb8f453 100644
--- a/drivers/net/phy/aquantia/aquantia_main.c
+++ b/drivers/net/phy/aquantia/aquantia_main.c
@@ -22,9 +22,13 @@
#define PHY_ID_AQR107 0x03a1b4e0
#define PHY_ID_AQCS109 0x03a1b5c2
#define PHY_ID_AQR405 0x03a1b4b0
+#define PHY_ID_AQR111 0x03a1b610
+#define PHY_ID_AQR111B0 0x03a1b612
#define PHY_ID_AQR112 0x03a1b662
#define PHY_ID_AQR412 0x03a1b712
+#define PHY_ID_AQR113 0x31c31c40
#define PHY_ID_AQR113C 0x31c31c12
+#define PHY_ID_AQR813 0x31c31cb2
#define MDIO_PHYXS_VEND_IF_STATUS 0xe812
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK GENMASK(7, 3)
@@ -727,6 +731,15 @@ static int aqr113c_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_TXDIS,
+ MDIO_PMD_TXDIS_GLOBAL);
+ if (ret)
+ return ret;
+
+ ret = aqr107_wait_processor_intensive_op(phydev);
+ if (ret)
+ return ret;
+
return aqr107_fill_interface_modes(phydev);
}
@@ -746,6 +759,16 @@ static int aqr107_probe(struct phy_device *phydev)
return aqr_hwmon_probe(phydev);
}
+static int aqr111_config_init(struct phy_device *phydev)
+{
+ /* AQR111 reports supporting speed up to 10G,
+ * however only speeds up to 5G are supported.
+ */
+ phy_set_max_speed(phydev, SPEED_5000);
+
+ return aqr107_config_init(phydev);
+}
+
static struct phy_driver aqr_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_AQ1202),
@@ -820,6 +843,44 @@ static struct phy_driver aqr_driver[] = {
.link_change_notify = aqr107_link_change_notify,
},
{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR111),
+ .name = "Aquantia AQR111",
+ .probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
+ .config_init = aqr111_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR111B0),
+ .name = "Aquantia AQR111B0",
+ .probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
+ .config_init = aqr111_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+},
+{
PHY_ID_MATCH_MODEL(PHY_ID_AQR405),
.name = "Aquantia AQR405",
.config_aneg = aqr_config_aneg,
@@ -864,6 +925,25 @@ static struct phy_driver aqr_driver[] = {
.link_change_notify = aqr107_link_change_notify,
},
{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR113),
+ .name = "Aquantia AQR113",
+ .probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
+ .config_init = aqr113c_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+},
+{
PHY_ID_MATCH_MODEL(PHY_ID_AQR113C),
.name = "Aquantia AQR113C",
.probe = aqr107_probe,
@@ -882,6 +962,25 @@ static struct phy_driver aqr_driver[] = {
.get_stats = aqr107_get_stats,
.link_change_notify = aqr107_link_change_notify,
},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR813),
+ .name = "Aquantia AQR813",
+ .probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
+ .config_init = aqr107_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+},
};
module_phy_driver(aqr_driver);
@@ -894,9 +993,13 @@ static struct mdio_device_id __maybe_unused aqr_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR107) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQCS109) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR405) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR111) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR111B0) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR112) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR412) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR113) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR113C) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR813) },
{ }
};
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
deleted file mode 100644
index a62442a55774..000000000000
--- a/drivers/net/phy/at803x.c
+++ /dev/null
@@ -1,2432 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * drivers/net/phy/at803x.c
- *
- * Driver for Qualcomm Atheros AR803x PHY
- *
- * Author: Matus Ujhelyi <ujhelyi.m@gmail.com>
- */
-
-#include <linux/phy.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool_netlink.h>
-#include <linux/bitfield.h>
-#include <linux/regulator/of_regulator.h>
-#include <linux/regulator/driver.h>
-#include <linux/regulator/consumer.h>
-#include <linux/of.h>
-#include <linux/phylink.h>
-#include <linux/sfp.h>
-#include <dt-bindings/net/qca-ar803x.h>
-
-#define AT803X_SPECIFIC_FUNCTION_CONTROL 0x10
-#define AT803X_SFC_ASSERT_CRS BIT(11)
-#define AT803X_SFC_FORCE_LINK BIT(10)
-#define AT803X_SFC_MDI_CROSSOVER_MODE_M GENMASK(6, 5)
-#define AT803X_SFC_AUTOMATIC_CROSSOVER 0x3
-#define AT803X_SFC_MANUAL_MDIX 0x1
-#define AT803X_SFC_MANUAL_MDI 0x0
-#define AT803X_SFC_SQE_TEST BIT(2)
-#define AT803X_SFC_POLARITY_REVERSAL BIT(1)
-#define AT803X_SFC_DISABLE_JABBER BIT(0)
-
-#define AT803X_SPECIFIC_STATUS 0x11
-#define AT803X_SS_SPEED_MASK GENMASK(15, 14)
-#define AT803X_SS_SPEED_1000 2
-#define AT803X_SS_SPEED_100 1
-#define AT803X_SS_SPEED_10 0
-#define AT803X_SS_DUPLEX BIT(13)
-#define AT803X_SS_SPEED_DUPLEX_RESOLVED BIT(11)
-#define AT803X_SS_MDIX BIT(6)
-
-#define QCA808X_SS_SPEED_MASK GENMASK(9, 7)
-#define QCA808X_SS_SPEED_2500 4
-
-#define AT803X_INTR_ENABLE 0x12
-#define AT803X_INTR_ENABLE_AUTONEG_ERR BIT(15)
-#define AT803X_INTR_ENABLE_SPEED_CHANGED BIT(14)
-#define AT803X_INTR_ENABLE_DUPLEX_CHANGED BIT(13)
-#define AT803X_INTR_ENABLE_PAGE_RECEIVED BIT(12)
-#define AT803X_INTR_ENABLE_LINK_FAIL BIT(11)
-#define AT803X_INTR_ENABLE_LINK_SUCCESS BIT(10)
-#define AT803X_INTR_ENABLE_LINK_FAIL_BX BIT(8)
-#define AT803X_INTR_ENABLE_LINK_SUCCESS_BX BIT(7)
-#define AT803X_INTR_ENABLE_WIRESPEED_DOWNGRADE BIT(5)
-#define AT803X_INTR_ENABLE_POLARITY_CHANGED BIT(1)
-#define AT803X_INTR_ENABLE_WOL BIT(0)
-
-#define AT803X_INTR_STATUS 0x13
-
-#define AT803X_SMART_SPEED 0x14
-#define AT803X_SMART_SPEED_ENABLE BIT(5)
-#define AT803X_SMART_SPEED_RETRY_LIMIT_MASK GENMASK(4, 2)
-#define AT803X_SMART_SPEED_BYPASS_TIMER BIT(1)
-#define AT803X_CDT 0x16
-#define AT803X_CDT_MDI_PAIR_MASK GENMASK(9, 8)
-#define AT803X_CDT_ENABLE_TEST BIT(0)
-#define AT803X_CDT_STATUS 0x1c
-#define AT803X_CDT_STATUS_STAT_NORMAL 0
-#define AT803X_CDT_STATUS_STAT_SHORT 1
-#define AT803X_CDT_STATUS_STAT_OPEN 2
-#define AT803X_CDT_STATUS_STAT_FAIL 3
-#define AT803X_CDT_STATUS_STAT_MASK GENMASK(9, 8)
-#define AT803X_CDT_STATUS_DELTA_TIME_MASK GENMASK(7, 0)
-#define AT803X_LED_CONTROL 0x18
-
-#define AT803X_PHY_MMD3_WOL_CTRL 0x8012
-#define AT803X_WOL_EN BIT(5)
-#define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C
-#define AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B
-#define AT803X_LOC_MAC_ADDR_32_47_OFFSET 0x804A
-#define AT803X_REG_CHIP_CONFIG 0x1f
-#define AT803X_BT_BX_REG_SEL 0x8000
-
-#define AT803X_DEBUG_ADDR 0x1D
-#define AT803X_DEBUG_DATA 0x1E
-
-#define AT803X_MODE_CFG_MASK 0x0F
-#define AT803X_MODE_CFG_BASET_RGMII 0x00
-#define AT803X_MODE_CFG_BASET_SGMII 0x01
-#define AT803X_MODE_CFG_BX1000_RGMII_50OHM 0x02
-#define AT803X_MODE_CFG_BX1000_RGMII_75OHM 0x03
-#define AT803X_MODE_CFG_BX1000_CONV_50OHM 0x04
-#define AT803X_MODE_CFG_BX1000_CONV_75OHM 0x05
-#define AT803X_MODE_CFG_FX100_RGMII_50OHM 0x06
-#define AT803X_MODE_CFG_FX100_CONV_50OHM 0x07
-#define AT803X_MODE_CFG_RGMII_AUTO_MDET 0x0B
-#define AT803X_MODE_CFG_FX100_RGMII_75OHM 0x0E
-#define AT803X_MODE_CFG_FX100_CONV_75OHM 0x0F
-
-#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/
-#define AT803X_PSSR_MR_AN_COMPLETE 0x0200
-
-#define AT803X_DEBUG_ANALOG_TEST_CTRL 0x00
-#define QCA8327_DEBUG_MANU_CTRL_EN BIT(2)
-#define QCA8337_DEBUG_MANU_CTRL_EN GENMASK(3, 2)
-#define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15)
-
-#define AT803X_DEBUG_SYSTEM_CTRL_MODE 0x05
-#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
-
-#define AT803X_DEBUG_REG_HIB_CTRL 0x0b
-#define AT803X_DEBUG_HIB_CTRL_SEL_RST_80U BIT(10)
-#define AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE BIT(13)
-#define AT803X_DEBUG_HIB_CTRL_PS_HIB_EN BIT(15)
-
-#define AT803X_DEBUG_REG_3C 0x3C
-
-#define AT803X_DEBUG_REG_GREEN 0x3D
-#define AT803X_DEBUG_GATE_CLK_IN1000 BIT(6)
-
-#define AT803X_DEBUG_REG_1F 0x1F
-#define AT803X_DEBUG_PLL_ON BIT(2)
-#define AT803X_DEBUG_RGMII_1V8 BIT(3)
-
-#define MDIO_AZ_DEBUG 0x800D
-
-/* AT803x supports either the XTAL input pad, an internal PLL or the
- * DSP as clock reference for the clock output pad. The XTAL reference
- * is only used for 25 MHz output, all other frequencies need the PLL.
- * The DSP as a clock reference is used in synchronous ethernet
- * applications.
- *
- * By default the PLL is only enabled if there is a link. Otherwise
- * the PHY will go into low power state and disabled the PLL. You can
- * set the PLL_ON bit (see debug register 0x1f) to keep the PLL always
- * enabled.
- */
-#define AT803X_MMD7_CLK25M 0x8016
-#define AT803X_CLK_OUT_MASK GENMASK(4, 2)
-#define AT803X_CLK_OUT_25MHZ_XTAL 0
-#define AT803X_CLK_OUT_25MHZ_DSP 1
-#define AT803X_CLK_OUT_50MHZ_PLL 2
-#define AT803X_CLK_OUT_50MHZ_DSP 3
-#define AT803X_CLK_OUT_62_5MHZ_PLL 4
-#define AT803X_CLK_OUT_62_5MHZ_DSP 5
-#define AT803X_CLK_OUT_125MHZ_PLL 6
-#define AT803X_CLK_OUT_125MHZ_DSP 7
-
-/* The AR8035 has another mask which is compatible with the AR8031/AR8033 mask
- * but doesn't support choosing between XTAL/PLL and DSP.
- */
-#define AT8035_CLK_OUT_MASK GENMASK(4, 3)
-
-#define AT803X_CLK_OUT_STRENGTH_MASK GENMASK(8, 7)
-#define AT803X_CLK_OUT_STRENGTH_FULL 0
-#define AT803X_CLK_OUT_STRENGTH_HALF 1
-#define AT803X_CLK_OUT_STRENGTH_QUARTER 2
-
-#define AT803X_DEFAULT_DOWNSHIFT 5
-#define AT803X_MIN_DOWNSHIFT 2
-#define AT803X_MAX_DOWNSHIFT 9
-
-#define AT803X_MMD3_SMARTEEE_CTL1 0x805b
-#define AT803X_MMD3_SMARTEEE_CTL2 0x805c
-#define AT803X_MMD3_SMARTEEE_CTL3 0x805d
-#define AT803X_MMD3_SMARTEEE_CTL3_LPI_EN BIT(8)
-
-#define ATH9331_PHY_ID 0x004dd041
-#define ATH8030_PHY_ID 0x004dd076
-#define ATH8031_PHY_ID 0x004dd074
-#define ATH8032_PHY_ID 0x004dd023
-#define ATH8035_PHY_ID 0x004dd072
-#define AT8030_PHY_ID_MASK 0xffffffef
-
-#define QCA8081_PHY_ID 0x004dd101
-
-#define QCA8327_A_PHY_ID 0x004dd033
-#define QCA8327_B_PHY_ID 0x004dd034
-#define QCA8337_PHY_ID 0x004dd036
-#define QCA9561_PHY_ID 0x004dd042
-#define QCA8K_PHY_ID_MASK 0xffffffff
-
-#define QCA8K_DEVFLAGS_REVISION_MASK GENMASK(2, 0)
-
-#define AT803X_PAGE_FIBER 0
-#define AT803X_PAGE_COPPER 1
-
-/* don't turn off internal PLL */
-#define AT803X_KEEP_PLL_ENABLED BIT(0)
-#define AT803X_DISABLE_SMARTEEE BIT(1)
-
-/* disable hibernation mode */
-#define AT803X_DISABLE_HIBERNATION_MODE BIT(2)
-
-/* ADC threshold */
-#define QCA808X_PHY_DEBUG_ADC_THRESHOLD 0x2c80
-#define QCA808X_ADC_THRESHOLD_MASK GENMASK(7, 0)
-#define QCA808X_ADC_THRESHOLD_80MV 0
-#define QCA808X_ADC_THRESHOLD_100MV 0xf0
-#define QCA808X_ADC_THRESHOLD_200MV 0x0f
-#define QCA808X_ADC_THRESHOLD_300MV 0xff
-
-/* CLD control */
-#define QCA808X_PHY_MMD3_ADDR_CLD_CTRL7 0x8007
-#define QCA808X_8023AZ_AFE_CTRL_MASK GENMASK(8, 4)
-#define QCA808X_8023AZ_AFE_EN 0x90
-
-/* AZ control */
-#define QCA808X_PHY_MMD3_AZ_TRAINING_CTRL 0x8008
-#define QCA808X_MMD3_AZ_TRAINING_VAL 0x1c32
-
-#define QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB 0x8014
-#define QCA808X_MSE_THRESHOLD_20DB_VALUE 0x529
-
-#define QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB 0x800E
-#define QCA808X_MSE_THRESHOLD_17DB_VALUE 0x341
-
-#define QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB 0x801E
-#define QCA808X_MSE_THRESHOLD_27DB_VALUE 0x419
-
-#define QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB 0x8020
-#define QCA808X_MSE_THRESHOLD_28DB_VALUE 0x341
-
-#define QCA808X_PHY_MMD7_TOP_OPTION1 0x901c
-#define QCA808X_TOP_OPTION1_DATA 0x0
-
-#define QCA808X_PHY_MMD3_DEBUG_1 0xa100
-#define QCA808X_MMD3_DEBUG_1_VALUE 0x9203
-#define QCA808X_PHY_MMD3_DEBUG_2 0xa101
-#define QCA808X_MMD3_DEBUG_2_VALUE 0x48ad
-#define QCA808X_PHY_MMD3_DEBUG_3 0xa103
-#define QCA808X_MMD3_DEBUG_3_VALUE 0x1698
-#define QCA808X_PHY_MMD3_DEBUG_4 0xa105
-#define QCA808X_MMD3_DEBUG_4_VALUE 0x8001
-#define QCA808X_PHY_MMD3_DEBUG_5 0xa106
-#define QCA808X_MMD3_DEBUG_5_VALUE 0x1111
-#define QCA808X_PHY_MMD3_DEBUG_6 0xa011
-#define QCA808X_MMD3_DEBUG_6_VALUE 0x5f85
-
-/* master/slave seed config */
-#define QCA808X_PHY_DEBUG_LOCAL_SEED 9
-#define QCA808X_MASTER_SLAVE_SEED_ENABLE BIT(1)
-#define QCA808X_MASTER_SLAVE_SEED_CFG GENMASK(12, 2)
-#define QCA808X_MASTER_SLAVE_SEED_RANGE 0x32
-
-/* Hibernation yields lower power consumpiton in contrast with normal operation mode.
- * when the copper cable is unplugged, the PHY enters into hibernation mode in about 10s.
- */
-#define QCA808X_DBG_AN_TEST 0xb
-#define QCA808X_HIBERNATION_EN BIT(15)
-
-#define QCA808X_CDT_ENABLE_TEST BIT(15)
-#define QCA808X_CDT_INTER_CHECK_DIS BIT(13)
-#define QCA808X_CDT_STATUS BIT(11)
-#define QCA808X_CDT_LENGTH_UNIT BIT(10)
-
-#define QCA808X_MMD3_CDT_STATUS 0x8064
-#define QCA808X_MMD3_CDT_DIAG_PAIR_A 0x8065
-#define QCA808X_MMD3_CDT_DIAG_PAIR_B 0x8066
-#define QCA808X_MMD3_CDT_DIAG_PAIR_C 0x8067
-#define QCA808X_MMD3_CDT_DIAG_PAIR_D 0x8068
-#define QCA808X_CDT_DIAG_LENGTH_SAME_SHORT GENMASK(15, 8)
-#define QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT GENMASK(7, 0)
-
-#define QCA808X_CDT_CODE_PAIR_A GENMASK(15, 12)
-#define QCA808X_CDT_CODE_PAIR_B GENMASK(11, 8)
-#define QCA808X_CDT_CODE_PAIR_C GENMASK(7, 4)
-#define QCA808X_CDT_CODE_PAIR_D GENMASK(3, 0)
-
-#define QCA808X_CDT_STATUS_STAT_TYPE GENMASK(1, 0)
-#define QCA808X_CDT_STATUS_STAT_FAIL FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 0)
-#define QCA808X_CDT_STATUS_STAT_NORMAL FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 1)
-#define QCA808X_CDT_STATUS_STAT_SAME_OPEN FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 2)
-#define QCA808X_CDT_STATUS_STAT_SAME_SHORT FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 3)
-
-#define QCA808X_CDT_STATUS_STAT_MDI GENMASK(3, 2)
-#define QCA808X_CDT_STATUS_STAT_MDI1 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 1)
-#define QCA808X_CDT_STATUS_STAT_MDI2 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 2)
-#define QCA808X_CDT_STATUS_STAT_MDI3 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 3)
-
-/* NORMAL are MDI with type set to 0 */
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI1
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
- QCA808X_CDT_STATUS_STAT_MDI1)
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
- QCA808X_CDT_STATUS_STAT_MDI1)
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI2
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
- QCA808X_CDT_STATUS_STAT_MDI2)
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
- QCA808X_CDT_STATUS_STAT_MDI2)
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI3
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
- QCA808X_CDT_STATUS_STAT_MDI3)
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
- QCA808X_CDT_STATUS_STAT_MDI3)
-
-/* Added for reference of existence but should be handled by wait_for_completion already */
-#define QCA808X_CDT_STATUS_STAT_BUSY (BIT(1) | BIT(3))
-
-/* QCA808X 1G chip type */
-#define QCA808X_PHY_MMD7_CHIP_TYPE 0x901d
-#define QCA808X_PHY_CHIP_TYPE_1G BIT(0)
-
-#define QCA8081_PHY_SERDES_MMD1_FIFO_CTRL 0x9072
-#define QCA8081_PHY_FIFO_RSTN BIT(11)
-
-MODULE_DESCRIPTION("Qualcomm Atheros AR803x and QCA808X PHY driver");
-MODULE_AUTHOR("Matus Ujhelyi");
-MODULE_LICENSE("GPL");
-
-enum stat_access_type {
- PHY,
- MMD
-};
-
-struct at803x_hw_stat {
- const char *string;
- u8 reg;
- u32 mask;
- enum stat_access_type access_type;
-};
-
-static struct at803x_hw_stat qca83xx_hw_stats[] = {
- { "phy_idle_errors", 0xa, GENMASK(7, 0), PHY},
- { "phy_receive_errors", 0x15, GENMASK(15, 0), PHY},
- { "eee_wake_errors", 0x16, GENMASK(15, 0), MMD},
-};
-
-struct at803x_ss_mask {
- u16 speed_mask;
- u8 speed_shift;
-};
-
-struct at803x_priv {
- int flags;
- u16 clk_25m_reg;
- u16 clk_25m_mask;
- u8 smarteee_lpi_tw_1g;
- u8 smarteee_lpi_tw_100m;
- bool is_fiber;
- bool is_1000basex;
- struct regulator_dev *vddio_rdev;
- struct regulator_dev *vddh_rdev;
- u64 stats[ARRAY_SIZE(qca83xx_hw_stats)];
-};
-
-struct at803x_context {
- u16 bmcr;
- u16 advertise;
- u16 control1000;
- u16 int_enable;
- u16 smart_speed;
- u16 led_control;
-};
-
-static int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data)
-{
- int ret;
-
- ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
- if (ret < 0)
- return ret;
-
- return phy_write(phydev, AT803X_DEBUG_DATA, data);
-}
-
-static int at803x_debug_reg_read(struct phy_device *phydev, u16 reg)
-{
- int ret;
-
- ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
- if (ret < 0)
- return ret;
-
- return phy_read(phydev, AT803X_DEBUG_DATA);
-}
-
-static int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
- u16 clear, u16 set)
-{
- u16 val;
- int ret;
-
- ret = at803x_debug_reg_read(phydev, reg);
- if (ret < 0)
- return ret;
-
- val = ret & 0xffff;
- val &= ~clear;
- val |= set;
-
- return phy_write(phydev, AT803X_DEBUG_DATA, val);
-}
-
-static int at803x_write_page(struct phy_device *phydev, int page)
-{
- int mask;
- int set;
-
- if (page == AT803X_PAGE_COPPER) {
- set = AT803X_BT_BX_REG_SEL;
- mask = 0;
- } else {
- set = 0;
- mask = AT803X_BT_BX_REG_SEL;
- }
-
- return __phy_modify(phydev, AT803X_REG_CHIP_CONFIG, mask, set);
-}
-
-static int at803x_read_page(struct phy_device *phydev)
-{
- int ccr = __phy_read(phydev, AT803X_REG_CHIP_CONFIG);
-
- if (ccr < 0)
- return ccr;
-
- if (ccr & AT803X_BT_BX_REG_SEL)
- return AT803X_PAGE_COPPER;
-
- return AT803X_PAGE_FIBER;
-}
-
-static int at803x_enable_rx_delay(struct phy_device *phydev)
-{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0,
- AT803X_DEBUG_RX_CLK_DLY_EN);
-}
-
-static int at803x_enable_tx_delay(struct phy_device *phydev)
-{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0,
- AT803X_DEBUG_TX_CLK_DLY_EN);
-}
-
-static int at803x_disable_rx_delay(struct phy_device *phydev)
-{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
- AT803X_DEBUG_RX_CLK_DLY_EN, 0);
-}
-
-static int at803x_disable_tx_delay(struct phy_device *phydev)
-{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE,
- AT803X_DEBUG_TX_CLK_DLY_EN, 0);
-}
-
-/* save relevant PHY registers to private copy */
-static void at803x_context_save(struct phy_device *phydev,
- struct at803x_context *context)
-{
- context->bmcr = phy_read(phydev, MII_BMCR);
- context->advertise = phy_read(phydev, MII_ADVERTISE);
- context->control1000 = phy_read(phydev, MII_CTRL1000);
- context->int_enable = phy_read(phydev, AT803X_INTR_ENABLE);
- context->smart_speed = phy_read(phydev, AT803X_SMART_SPEED);
- context->led_control = phy_read(phydev, AT803X_LED_CONTROL);
-}
-
-/* restore relevant PHY registers from private copy */
-static void at803x_context_restore(struct phy_device *phydev,
- const struct at803x_context *context)
-{
- phy_write(phydev, MII_BMCR, context->bmcr);
- phy_write(phydev, MII_ADVERTISE, context->advertise);
- phy_write(phydev, MII_CTRL1000, context->control1000);
- phy_write(phydev, AT803X_INTR_ENABLE, context->int_enable);
- phy_write(phydev, AT803X_SMART_SPEED, context->smart_speed);
- phy_write(phydev, AT803X_LED_CONTROL, context->led_control);
-}
-
-static int at803x_set_wol(struct phy_device *phydev,
- struct ethtool_wolinfo *wol)
-{
- int ret, irq_enabled;
-
- if (wol->wolopts & WAKE_MAGIC) {
- struct net_device *ndev = phydev->attached_dev;
- const u8 *mac;
- unsigned int i;
- static const unsigned int offsets[] = {
- AT803X_LOC_MAC_ADDR_32_47_OFFSET,
- AT803X_LOC_MAC_ADDR_16_31_OFFSET,
- AT803X_LOC_MAC_ADDR_0_15_OFFSET,
- };
-
- if (!ndev)
- return -ENODEV;
-
- mac = (const u8 *)ndev->dev_addr;
-
- if (!is_valid_ether_addr(mac))
- return -EINVAL;
-
- for (i = 0; i < 3; i++)
- phy_write_mmd(phydev, MDIO_MMD_PCS, offsets[i],
- mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
-
- /* Enable WOL interrupt */
- ret = phy_modify(phydev, AT803X_INTR_ENABLE, 0, AT803X_INTR_ENABLE_WOL);
- if (ret)
- return ret;
- } else {
- /* Disable WOL interrupt */
- ret = phy_modify(phydev, AT803X_INTR_ENABLE, AT803X_INTR_ENABLE_WOL, 0);
- if (ret)
- return ret;
- }
-
- /* Clear WOL status */
- ret = phy_read(phydev, AT803X_INTR_STATUS);
- if (ret < 0)
- return ret;
-
- /* Check if there are other interrupts except for WOL triggered when PHY is
- * in interrupt mode, only the interrupts enabled by AT803X_INTR_ENABLE can
- * be passed up to the interrupt PIN.
- */
- irq_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
- if (irq_enabled < 0)
- return irq_enabled;
-
- irq_enabled &= ~AT803X_INTR_ENABLE_WOL;
- if (ret & irq_enabled && !phy_polling_mode(phydev))
- phy_trigger_machine(phydev);
-
- return 0;
-}
-
-static void at803x_get_wol(struct phy_device *phydev,
- struct ethtool_wolinfo *wol)
-{
- int value;
-
- wol->supported = WAKE_MAGIC;
- wol->wolopts = 0;
-
- value = phy_read(phydev, AT803X_INTR_ENABLE);
- if (value < 0)
- return;
-
- if (value & AT803X_INTR_ENABLE_WOL)
- wol->wolopts |= WAKE_MAGIC;
-}
-
-static int qca83xx_get_sset_count(struct phy_device *phydev)
-{
- return ARRAY_SIZE(qca83xx_hw_stats);
-}
-
-static void qca83xx_get_strings(struct phy_device *phydev, u8 *data)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++) {
- strscpy(data + i * ETH_GSTRING_LEN,
- qca83xx_hw_stats[i].string, ETH_GSTRING_LEN);
- }
-}
-
-static u64 qca83xx_get_stat(struct phy_device *phydev, int i)
-{
- struct at803x_hw_stat stat = qca83xx_hw_stats[i];
- struct at803x_priv *priv = phydev->priv;
- int val;
- u64 ret;
-
- if (stat.access_type == MMD)
- val = phy_read_mmd(phydev, MDIO_MMD_PCS, stat.reg);
- else
- val = phy_read(phydev, stat.reg);
-
- if (val < 0) {
- ret = U64_MAX;
- } else {
- val = val & stat.mask;
- priv->stats[i] += val;
- ret = priv->stats[i];
- }
-
- return ret;
-}
-
-static void qca83xx_get_stats(struct phy_device *phydev,
- struct ethtool_stats *stats, u64 *data)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++)
- data[i] = qca83xx_get_stat(phydev, i);
-}
-
-static int at803x_suspend(struct phy_device *phydev)
-{
- int value;
- int wol_enabled;
-
- value = phy_read(phydev, AT803X_INTR_ENABLE);
- wol_enabled = value & AT803X_INTR_ENABLE_WOL;
-
- if (wol_enabled)
- value = BMCR_ISOLATE;
- else
- value = BMCR_PDOWN;
-
- phy_modify(phydev, MII_BMCR, 0, value);
-
- return 0;
-}
-
-static int at803x_resume(struct phy_device *phydev)
-{
- return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0);
-}
-
-static int at803x_parse_dt(struct phy_device *phydev)
-{
- struct device_node *node = phydev->mdio.dev.of_node;
- struct at803x_priv *priv = phydev->priv;
- u32 freq, strength, tw;
- unsigned int sel;
- int ret;
-
- if (!IS_ENABLED(CONFIG_OF_MDIO))
- return 0;
-
- if (of_property_read_bool(node, "qca,disable-smarteee"))
- priv->flags |= AT803X_DISABLE_SMARTEEE;
-
- if (of_property_read_bool(node, "qca,disable-hibernation-mode"))
- priv->flags |= AT803X_DISABLE_HIBERNATION_MODE;
-
- if (!of_property_read_u32(node, "qca,smarteee-tw-us-1g", &tw)) {
- if (!tw || tw > 255) {
- phydev_err(phydev, "invalid qca,smarteee-tw-us-1g\n");
- return -EINVAL;
- }
- priv->smarteee_lpi_tw_1g = tw;
- }
-
- if (!of_property_read_u32(node, "qca,smarteee-tw-us-100m", &tw)) {
- if (!tw || tw > 255) {
- phydev_err(phydev, "invalid qca,smarteee-tw-us-100m\n");
- return -EINVAL;
- }
- priv->smarteee_lpi_tw_100m = tw;
- }
-
- ret = of_property_read_u32(node, "qca,clk-out-frequency", &freq);
- if (!ret) {
- switch (freq) {
- case 25000000:
- sel = AT803X_CLK_OUT_25MHZ_XTAL;
- break;
- case 50000000:
- sel = AT803X_CLK_OUT_50MHZ_PLL;
- break;
- case 62500000:
- sel = AT803X_CLK_OUT_62_5MHZ_PLL;
- break;
- case 125000000:
- sel = AT803X_CLK_OUT_125MHZ_PLL;
- break;
- default:
- phydev_err(phydev, "invalid qca,clk-out-frequency\n");
- return -EINVAL;
- }
-
- priv->clk_25m_reg |= FIELD_PREP(AT803X_CLK_OUT_MASK, sel);
- priv->clk_25m_mask |= AT803X_CLK_OUT_MASK;
- }
-
- ret = of_property_read_u32(node, "qca,clk-out-strength", &strength);
- if (!ret) {
- priv->clk_25m_mask |= AT803X_CLK_OUT_STRENGTH_MASK;
- switch (strength) {
- case AR803X_STRENGTH_FULL:
- priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_FULL;
- break;
- case AR803X_STRENGTH_HALF:
- priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_HALF;
- break;
- case AR803X_STRENGTH_QUARTER:
- priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_QUARTER;
- break;
- default:
- phydev_err(phydev, "invalid qca,clk-out-strength\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int at803x_probe(struct phy_device *phydev)
-{
- struct device *dev = &phydev->mdio.dev;
- struct at803x_priv *priv;
- int ret;
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- phydev->priv = priv;
-
- ret = at803x_parse_dt(phydev);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int at803x_get_features(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- int err;
-
- err = genphy_read_abilities(phydev);
- if (err)
- return err;
-
- if (phydev->drv->phy_id != ATH8031_PHY_ID)
- return 0;
-
- /* AR8031/AR8033 have different status registers
- * for copper and fiber operation. However, the
- * extended status register is the same for both
- * operation modes.
- *
- * As a result of that, ESTATUS_1000_XFULL is set
- * to 1 even when operating in copper TP mode.
- *
- * Remove this mode from the supported link modes
- * when not operating in 1000BaseX mode.
- */
- if (!priv->is_1000basex)
- linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
- phydev->supported);
-
- return 0;
-}
-
-static int at803x_smarteee_config(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- u16 mask = 0, val = 0;
- int ret;
-
- if (priv->flags & AT803X_DISABLE_SMARTEEE)
- return phy_modify_mmd(phydev, MDIO_MMD_PCS,
- AT803X_MMD3_SMARTEEE_CTL3,
- AT803X_MMD3_SMARTEEE_CTL3_LPI_EN, 0);
-
- if (priv->smarteee_lpi_tw_1g) {
- mask |= 0xff00;
- val |= priv->smarteee_lpi_tw_1g << 8;
- }
- if (priv->smarteee_lpi_tw_100m) {
- mask |= 0x00ff;
- val |= priv->smarteee_lpi_tw_100m;
- }
- if (!mask)
- return 0;
-
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL1,
- mask, val);
- if (ret)
- return ret;
-
- return phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL3,
- AT803X_MMD3_SMARTEEE_CTL3_LPI_EN,
- AT803X_MMD3_SMARTEEE_CTL3_LPI_EN);
-}
-
-static int at803x_clk_out_config(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
-
- if (!priv->clk_25m_mask)
- return 0;
-
- return phy_modify_mmd(phydev, MDIO_MMD_AN, AT803X_MMD7_CLK25M,
- priv->clk_25m_mask, priv->clk_25m_reg);
-}
-
-static int at8031_pll_config(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
-
- /* The default after hardware reset is PLL OFF. After a soft reset, the
- * values are retained.
- */
- if (priv->flags & AT803X_KEEP_PLL_ENABLED)
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
- 0, AT803X_DEBUG_PLL_ON);
- else
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
- AT803X_DEBUG_PLL_ON, 0);
-}
-
-static int at803x_hibernation_mode_config(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
-
- /* The default after hardware reset is hibernation mode enabled. After
- * software reset, the value is retained.
- */
- if (!(priv->flags & AT803X_DISABLE_HIBERNATION_MODE))
- return 0;
-
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
- AT803X_DEBUG_HIB_CTRL_PS_HIB_EN, 0);
-}
-
-static int at803x_config_init(struct phy_device *phydev)
-{
- int ret;
-
- /* The RX and TX delay default is:
- * after HW reset: RX delay enabled and TX delay disabled
- * after SW reset: RX delay enabled, while TX delay retains the
- * value before reset.
- */
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
- ret = at803x_enable_rx_delay(phydev);
- else
- ret = at803x_disable_rx_delay(phydev);
- if (ret < 0)
- return ret;
-
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- ret = at803x_enable_tx_delay(phydev);
- else
- ret = at803x_disable_tx_delay(phydev);
- if (ret < 0)
- return ret;
-
- ret = at803x_smarteee_config(phydev);
- if (ret < 0)
- return ret;
-
- ret = at803x_clk_out_config(phydev);
- if (ret < 0)
- return ret;
-
- ret = at803x_hibernation_mode_config(phydev);
- if (ret < 0)
- return ret;
-
- /* Ar803x extended next page bit is enabled by default. Cisco
- * multigig switches read this bit and attempt to negotiate 10Gbps
- * rates even if the next page bit is disabled. This is incorrect
- * behaviour but we still need to accommodate it. XNP is only needed
- * for 10Gbps support, so disable XNP.
- */
- return phy_modify(phydev, MII_ADVERTISE, MDIO_AN_CTRL1_XNP, 0);
-}
-
-static int at803x_ack_interrupt(struct phy_device *phydev)
-{
- int err;
-
- err = phy_read(phydev, AT803X_INTR_STATUS);
-
- return (err < 0) ? err : 0;
-}
-
-static int at803x_config_intr(struct phy_device *phydev)
-{
- int err;
- int value;
-
- value = phy_read(phydev, AT803X_INTR_ENABLE);
-
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
- /* Clear any pending interrupts */
- err = at803x_ack_interrupt(phydev);
- if (err)
- return err;
-
- value |= AT803X_INTR_ENABLE_AUTONEG_ERR;
- value |= AT803X_INTR_ENABLE_SPEED_CHANGED;
- value |= AT803X_INTR_ENABLE_DUPLEX_CHANGED;
- value |= AT803X_INTR_ENABLE_LINK_FAIL;
- value |= AT803X_INTR_ENABLE_LINK_SUCCESS;
-
- err = phy_write(phydev, AT803X_INTR_ENABLE, value);
- } else {
- err = phy_write(phydev, AT803X_INTR_ENABLE, 0);
- if (err)
- return err;
-
- /* Clear any pending interrupts */
- err = at803x_ack_interrupt(phydev);
- }
-
- return err;
-}
-
-static irqreturn_t at803x_handle_interrupt(struct phy_device *phydev)
-{
- int irq_status, int_enabled;
-
- irq_status = phy_read(phydev, AT803X_INTR_STATUS);
- if (irq_status < 0) {
- phy_error(phydev);
- return IRQ_NONE;
- }
-
- /* Read the current enabled interrupts */
- int_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
- if (int_enabled < 0) {
- phy_error(phydev);
- return IRQ_NONE;
- }
-
- /* See if this was one of our enabled interrupts */
- if (!(irq_status & int_enabled))
- return IRQ_NONE;
-
- phy_trigger_machine(phydev);
-
- return IRQ_HANDLED;
-}
-
-static void at803x_link_change_notify(struct phy_device *phydev)
-{
- /*
- * Conduct a hardware reset for AT8030 every time a link loss is
- * signalled. This is necessary to circumvent a hardware bug that
- * occurs when the cable is unplugged while TX packets are pending
- * in the FIFO. In such cases, the FIFO enters an error mode it
- * cannot recover from by software.
- */
- if (phydev->state == PHY_NOLINK && phydev->mdio.reset_gpio) {
- struct at803x_context context;
-
- at803x_context_save(phydev, &context);
-
- phy_device_reset(phydev, 1);
- usleep_range(1000, 2000);
- phy_device_reset(phydev, 0);
- usleep_range(1000, 2000);
-
- at803x_context_restore(phydev, &context);
-
- phydev_dbg(phydev, "%s(): phy was reset\n", __func__);
- }
-}
-
-static int at803x_read_specific_status(struct phy_device *phydev,
- struct at803x_ss_mask ss_mask)
-{
- int ss;
-
- /* Read the AT8035 PHY-Specific Status register, which indicates the
- * speed and duplex that the PHY is actually using, irrespective of
- * whether we are in autoneg mode or not.
- */
- ss = phy_read(phydev, AT803X_SPECIFIC_STATUS);
- if (ss < 0)
- return ss;
-
- if (ss & AT803X_SS_SPEED_DUPLEX_RESOLVED) {
- int sfc, speed;
-
- sfc = phy_read(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL);
- if (sfc < 0)
- return sfc;
-
- speed = ss & ss_mask.speed_mask;
- speed >>= ss_mask.speed_shift;
-
- switch (speed) {
- case AT803X_SS_SPEED_10:
- phydev->speed = SPEED_10;
- break;
- case AT803X_SS_SPEED_100:
- phydev->speed = SPEED_100;
- break;
- case AT803X_SS_SPEED_1000:
- phydev->speed = SPEED_1000;
- break;
- case QCA808X_SS_SPEED_2500:
- phydev->speed = SPEED_2500;
- break;
- }
- if (ss & AT803X_SS_DUPLEX)
- phydev->duplex = DUPLEX_FULL;
- else
- phydev->duplex = DUPLEX_HALF;
-
- if (ss & AT803X_SS_MDIX)
- phydev->mdix = ETH_TP_MDI_X;
- else
- phydev->mdix = ETH_TP_MDI;
-
- switch (FIELD_GET(AT803X_SFC_MDI_CROSSOVER_MODE_M, sfc)) {
- case AT803X_SFC_MANUAL_MDI:
- phydev->mdix_ctrl = ETH_TP_MDI;
- break;
- case AT803X_SFC_MANUAL_MDIX:
- phydev->mdix_ctrl = ETH_TP_MDI_X;
- break;
- case AT803X_SFC_AUTOMATIC_CROSSOVER:
- phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
- break;
- }
- }
-
- return 0;
-}
-
-static int at803x_read_status(struct phy_device *phydev)
-{
- struct at803x_ss_mask ss_mask = { 0 };
- int err, old_link = phydev->link;
-
- /* Update the link, but return if there was an error */
- err = genphy_update_link(phydev);
- if (err)
- return err;
-
- /* why bother the PHY if nothing can have changed */
- if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
- return 0;
-
- phydev->speed = SPEED_UNKNOWN;
- phydev->duplex = DUPLEX_UNKNOWN;
- phydev->pause = 0;
- phydev->asym_pause = 0;
-
- err = genphy_read_lpa(phydev);
- if (err < 0)
- return err;
-
- ss_mask.speed_mask = AT803X_SS_SPEED_MASK;
- ss_mask.speed_shift = __bf_shf(AT803X_SS_SPEED_MASK);
- err = at803x_read_specific_status(phydev, ss_mask);
- if (err < 0)
- return err;
-
- if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
- phy_resolve_aneg_pause(phydev);
-
- return 0;
-}
-
-static int at803x_config_mdix(struct phy_device *phydev, u8 ctrl)
-{
- u16 val;
-
- switch (ctrl) {
- case ETH_TP_MDI:
- val = AT803X_SFC_MANUAL_MDI;
- break;
- case ETH_TP_MDI_X:
- val = AT803X_SFC_MANUAL_MDIX;
- break;
- case ETH_TP_MDI_AUTO:
- val = AT803X_SFC_AUTOMATIC_CROSSOVER;
- break;
- default:
- return 0;
- }
-
- return phy_modify_changed(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL,
- AT803X_SFC_MDI_CROSSOVER_MODE_M,
- FIELD_PREP(AT803X_SFC_MDI_CROSSOVER_MODE_M, val));
-}
-
-static int at803x_prepare_config_aneg(struct phy_device *phydev)
-{
- int ret;
-
- ret = at803x_config_mdix(phydev, phydev->mdix_ctrl);
- if (ret < 0)
- return ret;
-
- /* Changes of the midx bits are disruptive to the normal operation;
- * therefore any changes to these registers must be followed by a
- * software reset to take effect.
- */
- if (ret == 1) {
- ret = genphy_soft_reset(phydev);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-static int at803x_config_aneg(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- int ret;
-
- ret = at803x_prepare_config_aneg(phydev);
- if (ret)
- return ret;
-
- if (priv->is_1000basex)
- return genphy_c37_config_aneg(phydev);
-
- return genphy_config_aneg(phydev);
-}
-
-static int at803x_get_downshift(struct phy_device *phydev, u8 *d)
-{
- int val;
-
- val = phy_read(phydev, AT803X_SMART_SPEED);
- if (val < 0)
- return val;
-
- if (val & AT803X_SMART_SPEED_ENABLE)
- *d = FIELD_GET(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, val) + 2;
- else
- *d = DOWNSHIFT_DEV_DISABLE;
-
- return 0;
-}
-
-static int at803x_set_downshift(struct phy_device *phydev, u8 cnt)
-{
- u16 mask, set;
- int ret;
-
- switch (cnt) {
- case DOWNSHIFT_DEV_DEFAULT_COUNT:
- cnt = AT803X_DEFAULT_DOWNSHIFT;
- fallthrough;
- case AT803X_MIN_DOWNSHIFT ... AT803X_MAX_DOWNSHIFT:
- set = AT803X_SMART_SPEED_ENABLE |
- AT803X_SMART_SPEED_BYPASS_TIMER |
- FIELD_PREP(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, cnt - 2);
- mask = AT803X_SMART_SPEED_RETRY_LIMIT_MASK;
- break;
- case DOWNSHIFT_DEV_DISABLE:
- set = 0;
- mask = AT803X_SMART_SPEED_ENABLE |
- AT803X_SMART_SPEED_BYPASS_TIMER;
- break;
- default:
- return -EINVAL;
- }
-
- ret = phy_modify_changed(phydev, AT803X_SMART_SPEED, mask, set);
-
- /* After changing the smart speed settings, we need to perform a
- * software reset, use phy_init_hw() to make sure we set the
- * reapply any values which might got lost during software reset.
- */
- if (ret == 1)
- ret = phy_init_hw(phydev);
-
- return ret;
-}
-
-static int at803x_get_tunable(struct phy_device *phydev,
- struct ethtool_tunable *tuna, void *data)
-{
- switch (tuna->id) {
- case ETHTOOL_PHY_DOWNSHIFT:
- return at803x_get_downshift(phydev, data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int at803x_set_tunable(struct phy_device *phydev,
- struct ethtool_tunable *tuna, const void *data)
-{
- switch (tuna->id) {
- case ETHTOOL_PHY_DOWNSHIFT:
- return at803x_set_downshift(phydev, *(const u8 *)data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int at803x_cable_test_result_trans(u16 status)
-{
- switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) {
- case AT803X_CDT_STATUS_STAT_NORMAL:
- return ETHTOOL_A_CABLE_RESULT_CODE_OK;
- case AT803X_CDT_STATUS_STAT_SHORT:
- return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
- case AT803X_CDT_STATUS_STAT_OPEN:
- return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
- case AT803X_CDT_STATUS_STAT_FAIL:
- default:
- return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
- }
-}
-
-static bool at803x_cdt_test_failed(u16 status)
-{
- return FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status) ==
- AT803X_CDT_STATUS_STAT_FAIL;
-}
-
-static bool at803x_cdt_fault_length_valid(u16 status)
-{
- switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) {
- case AT803X_CDT_STATUS_STAT_OPEN:
- case AT803X_CDT_STATUS_STAT_SHORT:
- return true;
- }
- return false;
-}
-
-static int at803x_cdt_fault_length(int dt)
-{
- /* According to the datasheet the distance to the fault is
- * DELTA_TIME * 0.824 meters.
- *
- * The author suspect the correct formula is:
- *
- * fault_distance = DELTA_TIME * (c * VF) / 125MHz / 2
- *
- * where c is the speed of light, VF is the velocity factor of
- * the twisted pair cable, 125MHz the counter frequency and
- * we need to divide by 2 because the hardware will measure the
- * round trip time to the fault and back to the PHY.
- *
- * With a VF of 0.69 we get the factor 0.824 mentioned in the
- * datasheet.
- */
- return (dt * 824) / 10;
-}
-
-static int at803x_cdt_start(struct phy_device *phydev,
- u32 cdt_start)
-{
- return phy_write(phydev, AT803X_CDT, cdt_start);
-}
-
-static int at803x_cdt_wait_for_completion(struct phy_device *phydev,
- u32 cdt_en)
-{
- int val, ret;
-
- /* One test run takes about 25ms */
- ret = phy_read_poll_timeout(phydev, AT803X_CDT, val,
- !(val & cdt_en),
- 30000, 100000, true);
-
- return ret < 0 ? ret : 0;
-}
-
-static int at803x_cable_test_one_pair(struct phy_device *phydev, int pair)
-{
- static const int ethtool_pair[] = {
- ETHTOOL_A_CABLE_PAIR_A,
- ETHTOOL_A_CABLE_PAIR_B,
- ETHTOOL_A_CABLE_PAIR_C,
- ETHTOOL_A_CABLE_PAIR_D,
- };
- int ret, val;
-
- val = FIELD_PREP(AT803X_CDT_MDI_PAIR_MASK, pair) |
- AT803X_CDT_ENABLE_TEST;
- ret = at803x_cdt_start(phydev, val);
- if (ret)
- return ret;
-
- ret = at803x_cdt_wait_for_completion(phydev, AT803X_CDT_ENABLE_TEST);
- if (ret)
- return ret;
-
- val = phy_read(phydev, AT803X_CDT_STATUS);
- if (val < 0)
- return val;
-
- if (at803x_cdt_test_failed(val))
- return 0;
-
- ethnl_cable_test_result(phydev, ethtool_pair[pair],
- at803x_cable_test_result_trans(val));
-
- if (at803x_cdt_fault_length_valid(val)) {
- val = FIELD_GET(AT803X_CDT_STATUS_DELTA_TIME_MASK, val);
- ethnl_cable_test_fault_length(phydev, ethtool_pair[pair],
- at803x_cdt_fault_length(val));
- }
-
- return 1;
-}
-
-static int at803x_cable_test_get_status(struct phy_device *phydev,
- bool *finished, unsigned long pair_mask)
-{
- int retries = 20;
- int pair, ret;
-
- *finished = false;
-
- /* According to the datasheet the CDT can be performed when
- * there is no link partner or when the link partner is
- * auto-negotiating. Starting the test will restart the AN
- * automatically. It seems that doing this repeatedly we will
- * get a slot where our link partner won't disturb our
- * measurement.
- */
- while (pair_mask && retries--) {
- for_each_set_bit(pair, &pair_mask, 4) {
- ret = at803x_cable_test_one_pair(phydev, pair);
- if (ret < 0)
- return ret;
- if (ret)
- clear_bit(pair, &pair_mask);
- }
- if (pair_mask)
- msleep(250);
- }
-
- *finished = true;
-
- return 0;
-}
-
-static void at803x_cable_test_autoneg(struct phy_device *phydev)
-{
- /* Enable auto-negotiation, but advertise no capabilities, no link
- * will be established. A restart of the auto-negotiation is not
- * required, because the cable test will automatically break the link.
- */
- phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
- phy_write(phydev, MII_ADVERTISE, ADVERTISE_CSMA);
-}
-
-static int at803x_cable_test_start(struct phy_device *phydev)
-{
- at803x_cable_test_autoneg(phydev);
- /* we do all the (time consuming) work later */
- return 0;
-}
-
-static int at8031_rgmii_reg_set_voltage_sel(struct regulator_dev *rdev,
- unsigned int selector)
-{
- struct phy_device *phydev = rdev_get_drvdata(rdev);
-
- if (selector)
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
- 0, AT803X_DEBUG_RGMII_1V8);
- else
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
- AT803X_DEBUG_RGMII_1V8, 0);
-}
-
-static int at8031_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
-{
- struct phy_device *phydev = rdev_get_drvdata(rdev);
- int val;
-
- val = at803x_debug_reg_read(phydev, AT803X_DEBUG_REG_1F);
- if (val < 0)
- return val;
-
- return (val & AT803X_DEBUG_RGMII_1V8) ? 1 : 0;
-}
-
-static const struct regulator_ops vddio_regulator_ops = {
- .list_voltage = regulator_list_voltage_table,
- .set_voltage_sel = at8031_rgmii_reg_set_voltage_sel,
- .get_voltage_sel = at8031_rgmii_reg_get_voltage_sel,
-};
-
-static const unsigned int vddio_voltage_table[] = {
- 1500000,
- 1800000,
-};
-
-static const struct regulator_desc vddio_desc = {
- .name = "vddio",
- .of_match = of_match_ptr("vddio-regulator"),
- .n_voltages = ARRAY_SIZE(vddio_voltage_table),
- .volt_table = vddio_voltage_table,
- .ops = &vddio_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
-};
-
-static const struct regulator_ops vddh_regulator_ops = {
-};
-
-static const struct regulator_desc vddh_desc = {
- .name = "vddh",
- .of_match = of_match_ptr("vddh-regulator"),
- .n_voltages = 1,
- .fixed_uV = 2500000,
- .ops = &vddh_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
-};
-
-static int at8031_register_regulators(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- struct device *dev = &phydev->mdio.dev;
- struct regulator_config config = { };
-
- config.dev = dev;
- config.driver_data = phydev;
-
- priv->vddio_rdev = devm_regulator_register(dev, &vddio_desc, &config);
- if (IS_ERR(priv->vddio_rdev)) {
- phydev_err(phydev, "failed to register VDDIO regulator\n");
- return PTR_ERR(priv->vddio_rdev);
- }
-
- priv->vddh_rdev = devm_regulator_register(dev, &vddh_desc, &config);
- if (IS_ERR(priv->vddh_rdev)) {
- phydev_err(phydev, "failed to register VDDH regulator\n");
- return PTR_ERR(priv->vddh_rdev);
- }
-
- return 0;
-}
-
-static int at8031_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
-{
- struct phy_device *phydev = upstream;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
- DECLARE_PHY_INTERFACE_MASK(interfaces);
- phy_interface_t iface;
-
- linkmode_zero(phy_support);
- phylink_set(phy_support, 1000baseX_Full);
- phylink_set(phy_support, 1000baseT_Full);
- phylink_set(phy_support, Autoneg);
- phylink_set(phy_support, Pause);
- phylink_set(phy_support, Asym_Pause);
-
- linkmode_zero(sfp_support);
- sfp_parse_support(phydev->sfp_bus, id, sfp_support, interfaces);
- /* Some modules support 10G modes as well as others we support.
- * Mask out non-supported modes so the correct interface is picked.
- */
- linkmode_and(sfp_support, phy_support, sfp_support);
-
- if (linkmode_empty(sfp_support)) {
- dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
- return -EINVAL;
- }
-
- iface = sfp_select_interface(phydev->sfp_bus, sfp_support);
-
- /* Only 1000Base-X is supported by AR8031/8033 as the downstream SerDes
- * interface for use with SFP modules.
- * However, some copper modules detected as having a preferred SGMII
- * interface do default to and function in 1000Base-X mode, so just
- * print a warning and allow such modules, as they may have some chance
- * of working.
- */
- if (iface == PHY_INTERFACE_MODE_SGMII)
- dev_warn(&phydev->mdio.dev, "module may not function if 1000Base-X not supported\n");
- else if (iface != PHY_INTERFACE_MODE_1000BASEX)
- return -EINVAL;
-
- return 0;
-}
-
-static const struct sfp_upstream_ops at8031_sfp_ops = {
- .attach = phy_sfp_attach,
- .detach = phy_sfp_detach,
- .module_insert = at8031_sfp_insert,
-};
-
-static int at8031_parse_dt(struct phy_device *phydev)
-{
- struct device_node *node = phydev->mdio.dev.of_node;
- struct at803x_priv *priv = phydev->priv;
- int ret;
-
- if (of_property_read_bool(node, "qca,keep-pll-enabled"))
- priv->flags |= AT803X_KEEP_PLL_ENABLED;
-
- ret = at8031_register_regulators(phydev);
- if (ret < 0)
- return ret;
-
- ret = devm_regulator_get_enable_optional(&phydev->mdio.dev,
- "vddio");
- if (ret) {
- phydev_err(phydev, "failed to get VDDIO regulator\n");
- return ret;
- }
-
- /* Only AR8031/8033 support 1000Base-X for SFP modules */
- return phy_sfp_probe(phydev, &at8031_sfp_ops);
-}
-
-static int at8031_probe(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- int mode_cfg;
- int ccr;
- int ret;
-
- ret = at803x_probe(phydev);
- if (ret)
- return ret;
-
- /* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
- * options.
- */
- ret = at8031_parse_dt(phydev);
- if (ret)
- return ret;
-
- ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
- if (ccr < 0)
- return ccr;
- mode_cfg = ccr & AT803X_MODE_CFG_MASK;
-
- switch (mode_cfg) {
- case AT803X_MODE_CFG_BX1000_RGMII_50OHM:
- case AT803X_MODE_CFG_BX1000_RGMII_75OHM:
- priv->is_1000basex = true;
- fallthrough;
- case AT803X_MODE_CFG_FX100_RGMII_50OHM:
- case AT803X_MODE_CFG_FX100_RGMII_75OHM:
- priv->is_fiber = true;
- break;
- }
-
- /* Disable WoL in 1588 register which is enabled
- * by default
- */
- return phy_modify_mmd(phydev, MDIO_MMD_PCS,
- AT803X_PHY_MMD3_WOL_CTRL,
- AT803X_WOL_EN, 0);
-}
-
-static int at8031_config_init(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- int ret;
-
- /* Some bootloaders leave the fiber page selected.
- * Switch to the appropriate page (fiber or copper), as otherwise we
- * read the PHY capabilities from the wrong page.
- */
- phy_lock_mdio_bus(phydev);
- ret = at803x_write_page(phydev,
- priv->is_fiber ? AT803X_PAGE_FIBER :
- AT803X_PAGE_COPPER);
- phy_unlock_mdio_bus(phydev);
- if (ret)
- return ret;
-
- ret = at8031_pll_config(phydev);
- if (ret < 0)
- return ret;
-
- return at803x_config_init(phydev);
-}
-
-static int at8031_set_wol(struct phy_device *phydev,
- struct ethtool_wolinfo *wol)
-{
- int ret;
-
- /* First setup MAC address and enable WOL interrupt */
- ret = at803x_set_wol(phydev, wol);
- if (ret)
- return ret;
-
- if (wol->wolopts & WAKE_MAGIC)
- /* Enable WOL function for 1588 */
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
- AT803X_PHY_MMD3_WOL_CTRL,
- 0, AT803X_WOL_EN);
- else
- /* Disable WoL function for 1588 */
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
- AT803X_PHY_MMD3_WOL_CTRL,
- AT803X_WOL_EN, 0);
-
- return ret;
-}
-
-static int at8031_config_intr(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- int err, value = 0;
-
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED &&
- priv->is_fiber) {
- /* Clear any pending interrupts */
- err = at803x_ack_interrupt(phydev);
- if (err)
- return err;
-
- value |= AT803X_INTR_ENABLE_LINK_FAIL_BX;
- value |= AT803X_INTR_ENABLE_LINK_SUCCESS_BX;
-
- err = phy_set_bits(phydev, AT803X_INTR_ENABLE, value);
- if (err)
- return err;
- }
-
- return at803x_config_intr(phydev);
-}
-
-/* AR8031 and AR8033 share the same read status logic */
-static int at8031_read_status(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
-
- if (priv->is_1000basex)
- return genphy_c37_read_status(phydev);
-
- return at803x_read_status(phydev);
-}
-
-/* AR8031 and AR8035 share the same cable test get status reg */
-static int at8031_cable_test_get_status(struct phy_device *phydev,
- bool *finished)
-{
- return at803x_cable_test_get_status(phydev, finished, 0xf);
-}
-
-/* AR8031 and AR8035 share the same cable test start logic */
-static int at8031_cable_test_start(struct phy_device *phydev)
-{
- at803x_cable_test_autoneg(phydev);
- phy_write(phydev, MII_CTRL1000, 0);
- /* we do all the (time consuming) work later */
- return 0;
-}
-
-/* AR8032, AR9331 and QCA9561 share the same cable test get status reg */
-static int at8032_cable_test_get_status(struct phy_device *phydev,
- bool *finished)
-{
- return at803x_cable_test_get_status(phydev, finished, 0x3);
-}
-
-static int at8035_parse_dt(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
-
- /* Mask is set by the generic at803x_parse_dt
- * if property is set. Assume property is set
- * with the mask not zero.
- */
- if (priv->clk_25m_mask) {
- /* Fixup for the AR8030/AR8035. This chip has another mask and
- * doesn't support the DSP reference. Eg. the lowest bit of the
- * mask. The upper two bits select the same frequencies. Mask
- * the lowest bit here.
- *
- * Warning:
- * There was no datasheet for the AR8030 available so this is
- * just a guess. But the AR8035 is listed as pin compatible
- * to the AR8030 so there might be a good chance it works on
- * the AR8030 too.
- */
- priv->clk_25m_reg &= AT8035_CLK_OUT_MASK;
- priv->clk_25m_mask &= AT8035_CLK_OUT_MASK;
- }
-
- return 0;
-}
-
-/* AR8030 and AR8035 shared the same special mask for clk_25m */
-static int at8035_probe(struct phy_device *phydev)
-{
- int ret;
-
- ret = at803x_probe(phydev);
- if (ret)
- return ret;
-
- return at8035_parse_dt(phydev);
-}
-
-static int qca83xx_config_init(struct phy_device *phydev)
-{
- u8 switch_revision;
-
- switch_revision = phydev->dev_flags & QCA8K_DEVFLAGS_REVISION_MASK;
-
- switch (switch_revision) {
- case 1:
- /* For 100M waveform */
- at803x_debug_reg_write(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0x02ea);
- /* Turn on Gigabit clock */
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x68a0);
- break;
-
- case 2:
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0);
- fallthrough;
- case 4:
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_AZ_DEBUG, 0x803f);
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x6860);
- at803x_debug_reg_write(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0x2c46);
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3C, 0x6000);
- break;
- }
-
- /* Following original QCA sourcecode set port to prefer master */
- phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER);
-
- return 0;
-}
-
-static int qca8327_config_init(struct phy_device *phydev)
-{
- /* QCA8327 require DAC amplitude adjustment for 100m set to +6%.
- * Disable on init and enable only with 100m speed following
- * qca original source code.
- */
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
- QCA8327_DEBUG_MANU_CTRL_EN, 0);
-
- return qca83xx_config_init(phydev);
-}
-
-static void qca83xx_link_change_notify(struct phy_device *phydev)
-{
- /* Set DAC Amplitude adjustment to +6% for 100m on link running */
- if (phydev->state == PHY_RUNNING) {
- if (phydev->speed == SPEED_100)
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
- QCA8327_DEBUG_MANU_CTRL_EN,
- QCA8327_DEBUG_MANU_CTRL_EN);
- } else {
- /* Reset DAC Amplitude adjustment */
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
- QCA8327_DEBUG_MANU_CTRL_EN, 0);
- }
-}
-
-static int qca83xx_resume(struct phy_device *phydev)
-{
- int ret, val;
-
- /* Skip reset if not suspended */
- if (!phydev->suspended)
- return 0;
-
- /* Reinit the port, reset values set by suspend */
- qca83xx_config_init(phydev);
-
- /* Reset the port on port resume */
- phy_set_bits(phydev, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
-
- /* On resume from suspend the switch execute a reset and
- * restart auto-negotiation. Wait for reset to complete.
- */
- ret = phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET),
- 50000, 600000, true);
- if (ret)
- return ret;
-
- usleep_range(1000, 2000);
-
- return 0;
-}
-
-static int qca83xx_suspend(struct phy_device *phydev)
-{
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_GREEN,
- AT803X_DEBUG_GATE_CLK_IN1000, 0);
-
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
- AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE |
- AT803X_DEBUG_HIB_CTRL_SEL_RST_80U, 0);
-
- return 0;
-}
-
-static int qca8337_suspend(struct phy_device *phydev)
-{
- /* Only QCA8337 support actual suspend. */
- genphy_suspend(phydev);
-
- return qca83xx_suspend(phydev);
-}
-
-static int qca8327_suspend(struct phy_device *phydev)
-{
- u16 mask = 0;
-
- /* QCA8327 cause port unreliability when phy suspend
- * is set.
- */
- mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX);
- phy_modify(phydev, MII_BMCR, mask, 0);
-
- return qca83xx_suspend(phydev);
-}
-
-static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
-{
- int ret;
-
- /* Enable fast retrain */
- ret = genphy_c45_fast_retrain(phydev, true);
- if (ret)
- return ret;
-
- phy_write_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_TOP_OPTION1,
- QCA808X_TOP_OPTION1_DATA);
- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB,
- QCA808X_MSE_THRESHOLD_20DB_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB,
- QCA808X_MSE_THRESHOLD_17DB_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB,
- QCA808X_MSE_THRESHOLD_27DB_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB,
- QCA808X_MSE_THRESHOLD_28DB_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_1,
- QCA808X_MMD3_DEBUG_1_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_4,
- QCA808X_MMD3_DEBUG_4_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_5,
- QCA808X_MMD3_DEBUG_5_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_3,
- QCA808X_MMD3_DEBUG_3_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_6,
- QCA808X_MMD3_DEBUG_6_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_2,
- QCA808X_MMD3_DEBUG_2_VALUE);
-
- return 0;
-}
-
-static int qca808x_phy_ms_seed_enable(struct phy_device *phydev, bool enable)
-{
- u16 seed_value;
-
- if (!enable)
- return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
- QCA808X_MASTER_SLAVE_SEED_ENABLE, 0);
-
- seed_value = get_random_u32_below(QCA808X_MASTER_SLAVE_SEED_RANGE);
- return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
- QCA808X_MASTER_SLAVE_SEED_CFG | QCA808X_MASTER_SLAVE_SEED_ENABLE,
- FIELD_PREP(QCA808X_MASTER_SLAVE_SEED_CFG, seed_value) |
- QCA808X_MASTER_SLAVE_SEED_ENABLE);
-}
-
-static bool qca808x_is_prefer_master(struct phy_device *phydev)
-{
- return (phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_FORCE) ||
- (phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_PREFERRED);
-}
-
-static bool qca808x_has_fast_retrain_or_slave_seed(struct phy_device *phydev)
-{
- return linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
-}
-
-static int qca808x_config_init(struct phy_device *phydev)
-{
- int ret;
-
- /* Active adc&vga on 802.3az for the link 1000M and 100M */
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_ADDR_CLD_CTRL7,
- QCA808X_8023AZ_AFE_CTRL_MASK, QCA808X_8023AZ_AFE_EN);
- if (ret)
- return ret;
-
- /* Adjust the threshold on 802.3az for the link 1000M */
- ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
- QCA808X_PHY_MMD3_AZ_TRAINING_CTRL,
- QCA808X_MMD3_AZ_TRAINING_VAL);
- if (ret)
- return ret;
-
- if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
- /* Config the fast retrain for the link 2500M */
- ret = qca808x_phy_fast_retrain_config(phydev);
- if (ret)
- return ret;
-
- ret = genphy_read_master_slave(phydev);
- if (ret < 0)
- return ret;
-
- if (!qca808x_is_prefer_master(phydev)) {
- /* Enable seed and configure lower ramdom seed to make phy
- * linked as slave mode.
- */
- ret = qca808x_phy_ms_seed_enable(phydev, true);
- if (ret)
- return ret;
- }
- }
-
- /* Configure adc threshold as 100mv for the link 10M */
- return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_ADC_THRESHOLD,
- QCA808X_ADC_THRESHOLD_MASK,
- QCA808X_ADC_THRESHOLD_100MV);
-}
-
-static int qca808x_read_status(struct phy_device *phydev)
-{
- struct at803x_ss_mask ss_mask = { 0 };
- int ret;
-
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
- if (ret < 0)
- return ret;
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->lp_advertising,
- ret & MDIO_AN_10GBT_STAT_LP2_5G);
-
- ret = genphy_read_status(phydev);
- if (ret)
- return ret;
-
- /* qca8081 takes the different bits for speed value from at803x */
- ss_mask.speed_mask = QCA808X_SS_SPEED_MASK;
- ss_mask.speed_shift = __bf_shf(QCA808X_SS_SPEED_MASK);
- ret = at803x_read_specific_status(phydev, ss_mask);
- if (ret < 0)
- return ret;
-
- if (phydev->link) {
- if (phydev->speed == SPEED_2500)
- phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
- else
- phydev->interface = PHY_INTERFACE_MODE_SGMII;
- } else {
- /* generate seed as a lower random value to make PHY linked as SLAVE easily,
- * except for master/slave configuration fault detected or the master mode
- * preferred.
- *
- * the reason for not putting this code into the function link_change_notify is
- * the corner case where the link partner is also the qca8081 PHY and the seed
- * value is configured as the same value, the link can't be up and no link change
- * occurs.
- */
- if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
- if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR ||
- qca808x_is_prefer_master(phydev)) {
- qca808x_phy_ms_seed_enable(phydev, false);
- } else {
- qca808x_phy_ms_seed_enable(phydev, true);
- }
- }
- }
-
- return 0;
-}
-
-static int qca808x_soft_reset(struct phy_device *phydev)
-{
- int ret;
-
- ret = genphy_soft_reset(phydev);
- if (ret < 0)
- return ret;
-
- if (qca808x_has_fast_retrain_or_slave_seed(phydev))
- ret = qca808x_phy_ms_seed_enable(phydev, true);
-
- return ret;
-}
-
-static bool qca808x_cdt_fault_length_valid(int cdt_code)
-{
- switch (cdt_code) {
- case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
- case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
- return true;
- default:
- return false;
- }
-}
-
-static int qca808x_cable_test_result_trans(int cdt_code)
-{
- switch (cdt_code) {
- case QCA808X_CDT_STATUS_STAT_NORMAL:
- return ETHTOOL_A_CABLE_RESULT_CODE_OK;
- case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
- return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
- case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
- return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
- return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
- case QCA808X_CDT_STATUS_STAT_FAIL:
- default:
- return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
- }
-}
-
-static int qca808x_cdt_fault_length(struct phy_device *phydev, int pair,
- int result)
-{
- int val;
- u32 cdt_length_reg = 0;
-
- switch (pair) {
- case ETHTOOL_A_CABLE_PAIR_A:
- cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_A;
- break;
- case ETHTOOL_A_CABLE_PAIR_B:
- cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_B;
- break;
- case ETHTOOL_A_CABLE_PAIR_C:
- cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_C;
- break;
- case ETHTOOL_A_CABLE_PAIR_D:
- cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_D;
- break;
- default:
- return -EINVAL;
- }
-
- val = phy_read_mmd(phydev, MDIO_MMD_PCS, cdt_length_reg);
- if (val < 0)
- return val;
-
- if (result == ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT)
- val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_SAME_SHORT, val);
- else
- val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT, val);
-
- return at803x_cdt_fault_length(val);
-}
-
-static int qca808x_cable_test_start(struct phy_device *phydev)
-{
- int ret;
-
- /* perform CDT with the following configs:
- * 1. disable hibernation.
- * 2. force PHY working in MDI mode.
- * 3. for PHY working in 1000BaseT.
- * 4. configure the threshold.
- */
-
- ret = at803x_debug_reg_mask(phydev, QCA808X_DBG_AN_TEST, QCA808X_HIBERNATION_EN, 0);
- if (ret < 0)
- return ret;
-
- ret = at803x_config_mdix(phydev, ETH_TP_MDI);
- if (ret < 0)
- return ret;
-
- /* Force 1000base-T needs to configure PMA/PMD and MII_BMCR */
- phydev->duplex = DUPLEX_FULL;
- phydev->speed = SPEED_1000;
- ret = genphy_c45_pma_setup_forced(phydev);
- if (ret < 0)
- return ret;
-
- ret = genphy_setup_forced(phydev);
- if (ret < 0)
- return ret;
-
- /* configure the thresholds for open, short, pair ok test */
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8074, 0xc040);
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8076, 0xc040);
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8077, 0xa060);
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8078, 0xc050);
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807a, 0xc060);
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807e, 0xb060);
-
- return 0;
-}
-
-static int qca808x_cable_test_get_pair_status(struct phy_device *phydev, u8 pair,
- u16 status)
-{
- int length, result;
- u16 pair_code;
-
- switch (pair) {
- case ETHTOOL_A_CABLE_PAIR_A:
- pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_A, status);
- break;
- case ETHTOOL_A_CABLE_PAIR_B:
- pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_B, status);
- break;
- case ETHTOOL_A_CABLE_PAIR_C:
- pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_C, status);
- break;
- case ETHTOOL_A_CABLE_PAIR_D:
- pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_D, status);
- break;
- default:
- return -EINVAL;
- }
-
- result = qca808x_cable_test_result_trans(pair_code);
- ethnl_cable_test_result(phydev, pair, result);
-
- if (qca808x_cdt_fault_length_valid(pair_code)) {
- length = qca808x_cdt_fault_length(phydev, pair, result);
- ethnl_cable_test_fault_length(phydev, pair, length);
- }
-
- return 0;
-}
-
-static int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished)
-{
- int ret, val;
-
- *finished = false;
-
- val = QCA808X_CDT_ENABLE_TEST |
- QCA808X_CDT_LENGTH_UNIT;
- ret = at803x_cdt_start(phydev, val);
- if (ret)
- return ret;
-
- ret = at803x_cdt_wait_for_completion(phydev, QCA808X_CDT_ENABLE_TEST);
- if (ret)
- return ret;
-
- val = phy_read_mmd(phydev, MDIO_MMD_PCS, QCA808X_MMD3_CDT_STATUS);
- if (val < 0)
- return val;
-
- ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_A, val);
- if (ret)
- return ret;
-
- ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_B, val);
- if (ret)
- return ret;
-
- ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_C, val);
- if (ret)
- return ret;
-
- ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_D, val);
- if (ret)
- return ret;
-
- *finished = true;
-
- return 0;
-}
-
-static int qca808x_get_features(struct phy_device *phydev)
-{
- int ret;
-
- ret = genphy_c45_pma_read_abilities(phydev);
- if (ret)
- return ret;
-
- /* The autoneg ability is not existed in bit3 of MMD7.1,
- * but it is supported by qca808x PHY, so we add it here
- * manually.
- */
- linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
-
- /* As for the qca8081 1G version chip, the 2500baseT ability is also
- * existed in the bit0 of MMD1.21, we need to remove it manually if
- * it is the qca8081 1G chip according to the bit0 of MMD7.0x901d.
- */
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_CHIP_TYPE);
- if (ret < 0)
- return ret;
-
- if (QCA808X_PHY_CHIP_TYPE_1G & ret)
- linkmode_clear_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
-
- return 0;
-}
-
-static int qca808x_config_aneg(struct phy_device *phydev)
-{
- int phy_ctrl = 0;
- int ret;
-
- ret = at803x_prepare_config_aneg(phydev);
- if (ret)
- return ret;
-
- /* The reg MII_BMCR also needs to be configured for force mode, the
- * genphy_config_aneg is also needed.
- */
- if (phydev->autoneg == AUTONEG_DISABLE)
- genphy_c45_pma_setup_forced(phydev);
-
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->advertising))
- phy_ctrl = MDIO_AN_10GBT_CTRL_ADV2_5G;
-
- ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
- MDIO_AN_10GBT_CTRL_ADV2_5G, phy_ctrl);
- if (ret < 0)
- return ret;
-
- return __genphy_config_aneg(phydev, ret);
-}
-
-static void qca808x_link_change_notify(struct phy_device *phydev)
-{
- /* Assert interface sgmii fifo on link down, deassert it on link up,
- * the interface device address is always phy address added by 1.
- */
- mdiobus_c45_modify_changed(phydev->mdio.bus, phydev->mdio.addr + 1,
- MDIO_MMD_PMAPMD, QCA8081_PHY_SERDES_MMD1_FIFO_CTRL,
- QCA8081_PHY_FIFO_RSTN,
- phydev->link ? QCA8081_PHY_FIFO_RSTN : 0);
-}
-
-static struct phy_driver at803x_driver[] = {
-{
- /* Qualcomm Atheros AR8035 */
- PHY_ID_MATCH_EXACT(ATH8035_PHY_ID),
- .name = "Qualcomm Atheros AR8035",
- .flags = PHY_POLL_CABLE_TEST,
- .probe = at8035_probe,
- .config_aneg = at803x_config_aneg,
- .config_init = at803x_config_init,
- .soft_reset = genphy_soft_reset,
- .set_wol = at803x_set_wol,
- .get_wol = at803x_get_wol,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- /* PHY_GBIT_FEATURES */
- .read_status = at803x_read_status,
- .config_intr = at803x_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
- .get_tunable = at803x_get_tunable,
- .set_tunable = at803x_set_tunable,
- .cable_test_start = at8031_cable_test_start,
- .cable_test_get_status = at8031_cable_test_get_status,
-}, {
- /* Qualcomm Atheros AR8030 */
- .phy_id = ATH8030_PHY_ID,
- .name = "Qualcomm Atheros AR8030",
- .phy_id_mask = AT8030_PHY_ID_MASK,
- .probe = at8035_probe,
- .config_init = at803x_config_init,
- .link_change_notify = at803x_link_change_notify,
- .set_wol = at803x_set_wol,
- .get_wol = at803x_get_wol,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- /* PHY_BASIC_FEATURES */
- .config_intr = at803x_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
-}, {
- /* Qualcomm Atheros AR8031/AR8033 */
- PHY_ID_MATCH_EXACT(ATH8031_PHY_ID),
- .name = "Qualcomm Atheros AR8031/AR8033",
- .flags = PHY_POLL_CABLE_TEST,
- .probe = at8031_probe,
- .config_init = at8031_config_init,
- .config_aneg = at803x_config_aneg,
- .soft_reset = genphy_soft_reset,
- .set_wol = at8031_set_wol,
- .get_wol = at803x_get_wol,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- .read_page = at803x_read_page,
- .write_page = at803x_write_page,
- .get_features = at803x_get_features,
- .read_status = at8031_read_status,
- .config_intr = at8031_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
- .get_tunable = at803x_get_tunable,
- .set_tunable = at803x_set_tunable,
- .cable_test_start = at8031_cable_test_start,
- .cable_test_get_status = at8031_cable_test_get_status,
-}, {
- /* Qualcomm Atheros AR8032 */
- PHY_ID_MATCH_EXACT(ATH8032_PHY_ID),
- .name = "Qualcomm Atheros AR8032",
- .probe = at803x_probe,
- .flags = PHY_POLL_CABLE_TEST,
- .config_init = at803x_config_init,
- .link_change_notify = at803x_link_change_notify,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- /* PHY_BASIC_FEATURES */
- .config_intr = at803x_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
- .cable_test_start = at803x_cable_test_start,
- .cable_test_get_status = at8032_cable_test_get_status,
-}, {
- /* ATHEROS AR9331 */
- PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
- .name = "Qualcomm Atheros AR9331 built-in PHY",
- .probe = at803x_probe,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- .flags = PHY_POLL_CABLE_TEST,
- /* PHY_BASIC_FEATURES */
- .config_intr = at803x_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
- .cable_test_start = at803x_cable_test_start,
- .cable_test_get_status = at8032_cable_test_get_status,
- .read_status = at803x_read_status,
- .soft_reset = genphy_soft_reset,
- .config_aneg = at803x_config_aneg,
-}, {
- /* Qualcomm Atheros QCA9561 */
- PHY_ID_MATCH_EXACT(QCA9561_PHY_ID),
- .name = "Qualcomm Atheros QCA9561 built-in PHY",
- .probe = at803x_probe,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- .flags = PHY_POLL_CABLE_TEST,
- /* PHY_BASIC_FEATURES */
- .config_intr = at803x_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
- .cable_test_start = at803x_cable_test_start,
- .cable_test_get_status = at8032_cable_test_get_status,
- .read_status = at803x_read_status,
- .soft_reset = genphy_soft_reset,
- .config_aneg = at803x_config_aneg,
-}, {
- /* QCA8337 */
- .phy_id = QCA8337_PHY_ID,
- .phy_id_mask = QCA8K_PHY_ID_MASK,
- .name = "Qualcomm Atheros 8337 internal PHY",
- /* PHY_GBIT_FEATURES */
- .probe = at803x_probe,
- .flags = PHY_IS_INTERNAL,
- .config_init = qca83xx_config_init,
- .soft_reset = genphy_soft_reset,
- .get_sset_count = qca83xx_get_sset_count,
- .get_strings = qca83xx_get_strings,
- .get_stats = qca83xx_get_stats,
- .suspend = qca8337_suspend,
- .resume = qca83xx_resume,
-}, {
- /* QCA8327-A from switch QCA8327-AL1A */
- .phy_id = QCA8327_A_PHY_ID,
- .phy_id_mask = QCA8K_PHY_ID_MASK,
- .name = "Qualcomm Atheros 8327-A internal PHY",
- /* PHY_GBIT_FEATURES */
- .link_change_notify = qca83xx_link_change_notify,
- .probe = at803x_probe,
- .flags = PHY_IS_INTERNAL,
- .config_init = qca8327_config_init,
- .soft_reset = genphy_soft_reset,
- .get_sset_count = qca83xx_get_sset_count,
- .get_strings = qca83xx_get_strings,
- .get_stats = qca83xx_get_stats,
- .suspend = qca8327_suspend,
- .resume = qca83xx_resume,
-}, {
- /* QCA8327-B from switch QCA8327-BL1A */
- .phy_id = QCA8327_B_PHY_ID,
- .phy_id_mask = QCA8K_PHY_ID_MASK,
- .name = "Qualcomm Atheros 8327-B internal PHY",
- /* PHY_GBIT_FEATURES */
- .link_change_notify = qca83xx_link_change_notify,
- .probe = at803x_probe,
- .flags = PHY_IS_INTERNAL,
- .config_init = qca8327_config_init,
- .soft_reset = genphy_soft_reset,
- .get_sset_count = qca83xx_get_sset_count,
- .get_strings = qca83xx_get_strings,
- .get_stats = qca83xx_get_stats,
- .suspend = qca8327_suspend,
- .resume = qca83xx_resume,
-}, {
- /* Qualcomm QCA8081 */
- PHY_ID_MATCH_EXACT(QCA8081_PHY_ID),
- .name = "Qualcomm QCA8081",
- .flags = PHY_POLL_CABLE_TEST,
- .probe = at803x_probe,
- .config_intr = at803x_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
- .get_tunable = at803x_get_tunable,
- .set_tunable = at803x_set_tunable,
- .set_wol = at803x_set_wol,
- .get_wol = at803x_get_wol,
- .get_features = qca808x_get_features,
- .config_aneg = qca808x_config_aneg,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
- .read_status = qca808x_read_status,
- .config_init = qca808x_config_init,
- .soft_reset = qca808x_soft_reset,
- .cable_test_start = qca808x_cable_test_start,
- .cable_test_get_status = qca808x_cable_test_get_status,
- .link_change_notify = qca808x_link_change_notify,
-}, };
-
-module_phy_driver(at803x_driver);
-
-static struct mdio_device_id __maybe_unused atheros_tbl[] = {
- { ATH8030_PHY_ID, AT8030_PHY_ID_MASK },
- { PHY_ID_MATCH_EXACT(ATH8031_PHY_ID) },
- { PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) },
- { PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) },
- { PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) },
- { PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) },
- { PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID) },
- { PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID) },
- { PHY_ID_MATCH_EXACT(QCA9561_PHY_ID) },
- { PHY_ID_MATCH_EXACT(QCA8081_PHY_ID) },
- { }
-};
-
-MODULE_DEVICE_TABLE(mdio, atheros_tbl);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 312a8bb35d78..370e4ed45098 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -665,10 +665,11 @@ static int bcm54616s_config_aneg(struct phy_device *phydev)
static int bcm54616s_read_status(struct phy_device *phydev)
{
struct bcm54616s_phy_priv *priv = phydev->priv;
+ bool changed;
int err;
if (priv->mode_1000bx_en)
- err = genphy_c37_read_status(phydev);
+ err = genphy_c37_read_status(phydev, &changed);
else
err = genphy_read_status(phydev);
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index b7cb71817780..c3426a17e6d0 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -12,6 +12,7 @@
#include <linux/of.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
+#include <linux/bitfield.h>
#define DP83822_PHY_ID 0x2000a240
#define DP83825S_PHY_ID 0x2000a140
@@ -34,6 +35,10 @@
#define MII_DP83822_GENCFG 0x465
#define MII_DP83822_SOR1 0x467
+/* DP83826 specific registers */
+#define MII_DP83826_VOD_CFG1 0x30b
+#define MII_DP83826_VOD_CFG2 0x30c
+
/* GENCFG */
#define DP83822_SIG_DET_LOW BIT(0)
@@ -95,6 +100,8 @@
#define DP83822_WOL_CLR_INDICATION BIT(11)
/* RCSR bits */
+#define DP83822_RMII_MODE_EN BIT(5)
+#define DP83822_RMII_MODE_SEL BIT(7)
#define DP83822_RGMII_MODE_EN BIT(9)
#define DP83822_RX_CLK_SHIFT BIT(12)
#define DP83822_TX_CLK_SHIFT BIT(11)
@@ -110,6 +117,19 @@
#define DP83822_RX_ER_STR_MASK GENMASK(9, 8)
#define DP83822_RX_ER_SHIFT 8
+/* DP83826: VOD_CFG1 & VOD_CFG2 */
+#define DP83826_VOD_CFG1_MINUS_MDIX_MASK GENMASK(13, 12)
+#define DP83826_VOD_CFG1_MINUS_MDI_MASK GENMASK(11, 6)
+#define DP83826_VOD_CFG2_MINUS_MDIX_MASK GENMASK(15, 12)
+#define DP83826_VOD_CFG2_PLUS_MDIX_MASK GENMASK(11, 6)
+#define DP83826_VOD_CFG2_PLUS_MDI_MASK GENMASK(5, 0)
+#define DP83826_CFG_DAC_MINUS_MDIX_5_TO_4 GENMASK(5, 4)
+#define DP83826_CFG_DAC_MINUS_MDIX_3_TO_0 GENMASK(3, 0)
+#define DP83826_CFG_DAC_PERCENT_PER_STEP 625
+#define DP83826_CFG_DAC_PERCENT_DEFAULT 10000
+#define DP83826_CFG_DAC_MINUS_DEFAULT 0x30
+#define DP83826_CFG_DAC_PLUS_DEFAULT 0x10
+
#define MII_DP83822_FIBER_ADVERTISE (ADVERTISED_TP | ADVERTISED_MII | \
ADVERTISED_FIBRE | \
ADVERTISED_Pause | ADVERTISED_Asym_Pause)
@@ -118,6 +138,8 @@ struct dp83822_private {
bool fx_signal_det_low;
int fx_enabled;
u16 fx_sd_enable;
+ u8 cfg_dac_minus;
+ u8 cfg_dac_plus;
};
static int dp83822_set_wol(struct phy_device *phydev,
@@ -233,7 +255,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
DP83822_ENERGY_DET_INT_EN |
DP83822_LINK_QUAL_INT_EN);
- /* Private data pointer is NULL on DP83825/26 */
+ /* Private data pointer is NULL on DP83825 */
if (!dp83822 || !dp83822->fx_enabled)
misr_status |= DP83822_ANEG_COMPLETE_INT_EN |
DP83822_DUP_MODE_CHANGE_INT_EN |
@@ -254,7 +276,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
DP83822_PAGE_RX_INT_EN |
DP83822_EEE_ERROR_CHANGE_INT_EN);
- /* Private data pointer is NULL on DP83825/26 */
+ /* Private data pointer is NULL on DP83825 */
if (!dp83822 || !dp83822->fx_enabled)
misr_status |= DP83822_ANEG_ERR_INT_EN |
DP83822_WOL_PKT_INT_EN;
@@ -380,7 +402,7 @@ static int dp83822_config_init(struct phy_device *phydev)
{
struct dp83822_private *dp83822 = phydev->priv;
struct device *dev = &phydev->mdio.dev;
- int rgmii_delay;
+ int rgmii_delay = 0;
s32 rx_int_delay;
s32 tx_int_delay;
int err = 0;
@@ -390,30 +412,33 @@ static int dp83822_config_init(struct phy_device *phydev)
rx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
true);
- if (rx_int_delay <= 0)
- rgmii_delay = 0;
- else
- rgmii_delay = DP83822_RX_CLK_SHIFT;
+ /* Set DP83822_RX_CLK_SHIFT to enable rx clk internal delay */
+ if (rx_int_delay > 0)
+ rgmii_delay |= DP83822_RX_CLK_SHIFT;
tx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
false);
+
+ /* Set DP83822_TX_CLK_SHIFT to disable tx clk internal delay */
if (tx_int_delay <= 0)
- rgmii_delay &= ~DP83822_TX_CLK_SHIFT;
- else
rgmii_delay |= DP83822_TX_CLK_SHIFT;
- if (rgmii_delay) {
- err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
- MII_DP83822_RCSR, rgmii_delay);
- if (err)
- return err;
- }
+ err = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ DP83822_RX_CLK_SHIFT | DP83822_TX_CLK_SHIFT, rgmii_delay);
+ if (err)
+ return err;
+
+ err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
- phy_set_bits_mmd(phydev, DP83822_DEVADDR,
- MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
+ if (err)
+ return err;
} else {
- phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
- MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
+ err = phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
+
+ if (err)
+ return err;
}
if (dp83822->fx_enabled) {
@@ -474,6 +499,85 @@ static int dp83822_config_init(struct phy_device *phydev)
return dp8382x_disable_wol(phydev);
}
+static int dp83826_config_rmii_mode(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ const char *of_val;
+ int ret;
+
+ if (!device_property_read_string(dev, "ti,rmii-mode", &of_val)) {
+ if (strcmp(of_val, "master") == 0) {
+ ret = phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ DP83822_RMII_MODE_SEL);
+ } else if (strcmp(of_val, "slave") == 0) {
+ ret = phy_set_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ DP83822_RMII_MODE_SEL);
+ } else {
+ phydev_err(phydev, "Invalid value for ti,rmii-mode property (%s)\n",
+ of_val);
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dp83826_config_init(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822 = phydev->priv;
+ u16 val, mask;
+ int ret;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
+ ret = phy_set_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ DP83822_RMII_MODE_EN);
+ if (ret)
+ return ret;
+
+ ret = dp83826_config_rmii_mode(phydev);
+ if (ret)
+ return ret;
+ } else {
+ ret = phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ DP83822_RMII_MODE_EN);
+ if (ret)
+ return ret;
+ }
+
+ if (dp83822->cfg_dac_minus != DP83826_CFG_DAC_MINUS_DEFAULT) {
+ val = FIELD_PREP(DP83826_VOD_CFG1_MINUS_MDI_MASK, dp83822->cfg_dac_minus) |
+ FIELD_PREP(DP83826_VOD_CFG1_MINUS_MDIX_MASK,
+ FIELD_GET(DP83826_CFG_DAC_MINUS_MDIX_5_TO_4,
+ dp83822->cfg_dac_minus));
+ mask = DP83826_VOD_CFG1_MINUS_MDIX_MASK | DP83826_VOD_CFG1_MINUS_MDI_MASK;
+ ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG1, mask, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(DP83826_VOD_CFG2_MINUS_MDIX_MASK,
+ FIELD_GET(DP83826_CFG_DAC_MINUS_MDIX_3_TO_0,
+ dp83822->cfg_dac_minus));
+ mask = DP83826_VOD_CFG2_MINUS_MDIX_MASK;
+ ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG2, mask, val);
+ if (ret)
+ return ret;
+ }
+
+ if (dp83822->cfg_dac_plus != DP83826_CFG_DAC_PLUS_DEFAULT) {
+ val = FIELD_PREP(DP83826_VOD_CFG2_PLUS_MDIX_MASK, dp83822->cfg_dac_plus) |
+ FIELD_PREP(DP83826_VOD_CFG2_PLUS_MDI_MASK, dp83822->cfg_dac_plus);
+ mask = DP83826_VOD_CFG2_PLUS_MDIX_MASK | DP83826_VOD_CFG2_PLUS_MDI_MASK;
+ ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG2, mask, val);
+ if (ret)
+ return ret;
+ }
+
+ return dp8382x_disable_wol(phydev);
+}
+
static int dp8382x_config_init(struct phy_device *phydev)
{
return dp8382x_disable_wol(phydev);
@@ -509,11 +613,44 @@ static int dp83822_of_init(struct phy_device *phydev)
return 0;
}
+
+static int dp83826_to_dac_minus_one_regval(int percent)
+{
+ int tmp = DP83826_CFG_DAC_PERCENT_DEFAULT - percent;
+
+ return tmp / DP83826_CFG_DAC_PERCENT_PER_STEP;
+}
+
+static int dp83826_to_dac_plus_one_regval(int percent)
+{
+ int tmp = percent - DP83826_CFG_DAC_PERCENT_DEFAULT;
+
+ return tmp / DP83826_CFG_DAC_PERCENT_PER_STEP;
+}
+
+static void dp83826_of_init(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822 = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ u32 val;
+
+ dp83822->cfg_dac_minus = DP83826_CFG_DAC_MINUS_DEFAULT;
+ if (!device_property_read_u32(dev, "ti,cfg-dac-minus-one-bp", &val))
+ dp83822->cfg_dac_minus += dp83826_to_dac_minus_one_regval(val);
+
+ dp83822->cfg_dac_plus = DP83826_CFG_DAC_PLUS_DEFAULT;
+ if (!device_property_read_u32(dev, "ti,cfg-dac-plus-one-bp", &val))
+ dp83822->cfg_dac_plus += dp83826_to_dac_plus_one_regval(val);
+}
#else
static int dp83822_of_init(struct phy_device *phydev)
{
return 0;
}
+
+static void dp83826_of_init(struct phy_device *phydev)
+{
+}
#endif /* CONFIG_OF_MDIO */
static int dp83822_read_straps(struct phy_device *phydev)
@@ -567,6 +704,22 @@ static int dp83822_probe(struct phy_device *phydev)
return 0;
}
+static int dp83826_probe(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822;
+
+ dp83822 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83822),
+ GFP_KERNEL);
+ if (!dp83822)
+ return -ENOMEM;
+
+ phydev->priv = dp83822;
+
+ dp83826_of_init(phydev);
+
+ return 0;
+}
+
static int dp83822_suspend(struct phy_device *phydev)
{
int value;
@@ -610,6 +763,22 @@ static int dp83822_resume(struct phy_device *phydev)
.resume = dp83822_resume, \
}
+#define DP83826_PHY_DRIVER(_id, _name) \
+ { \
+ PHY_ID_MATCH_MODEL(_id), \
+ .name = (_name), \
+ /* PHY_BASIC_FEATURES */ \
+ .probe = dp83826_probe, \
+ .soft_reset = dp83822_phy_reset, \
+ .config_init = dp83826_config_init, \
+ .get_wol = dp83822_get_wol, \
+ .set_wol = dp83822_set_wol, \
+ .config_intr = dp83822_config_intr, \
+ .handle_interrupt = dp83822_handle_interrupt, \
+ .suspend = dp83822_suspend, \
+ .resume = dp83822_resume, \
+ }
+
#define DP8382X_PHY_DRIVER(_id, _name) \
{ \
PHY_ID_MATCH_MODEL(_id), \
@@ -628,8 +797,8 @@ static int dp83822_resume(struct phy_device *phydev)
static struct phy_driver dp83822_driver[] = {
DP83822_PHY_DRIVER(DP83822_PHY_ID, "TI DP83822"),
DP8382X_PHY_DRIVER(DP83825I_PHY_ID, "TI DP83825I"),
- DP8382X_PHY_DRIVER(DP83826C_PHY_ID, "TI DP83826C"),
- DP8382X_PHY_DRIVER(DP83826NC_PHY_ID, "TI DP83826NC"),
+ DP83826_PHY_DRIVER(DP83826C_PHY_ID, "TI DP83826C"),
+ DP83826_PHY_DRIVER(DP83826NC_PHY_ID, "TI DP83826NC"),
DP8382X_PHY_DRIVER(DP83825S_PHY_ID, "TI DP83825S"),
DP8382X_PHY_DRIVER(DP83825CM_PHY_ID, "TI DP83825M"),
DP8382X_PHY_DRIVER(DP83825CS_PHY_ID, "TI DP83825CS"),
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 5f08f9d38bd7..4120385c5a79 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -158,6 +158,7 @@
/* LED_DRV bits */
#define DP83867_LED_DRV_EN(x) BIT((x) * 4)
#define DP83867_LED_DRV_VAL(x) BIT((x) * 4 + 1)
+#define DP83867_LED_POLARITY(x) BIT((x) * 4 + 2)
#define DP83867_LED_FN(idx, val) (((val) & 0xf) << ((idx) * 4))
#define DP83867_LED_FN_MASK(idx) (0xf << ((idx) * 4))
@@ -1152,6 +1153,26 @@ static int dp83867_led_hw_control_get(struct phy_device *phydev, u8 index,
return 0;
}
+static int dp83867_led_polarity_set(struct phy_device *phydev, int index,
+ unsigned long modes)
+{
+ /* Default active high */
+ u16 polarity = DP83867_LED_POLARITY(index);
+ u32 mode;
+
+ for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
+ switch (mode) {
+ case PHY_LED_ACTIVE_LOW:
+ polarity = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return phy_modify(phydev, DP83867_LEDCR2,
+ DP83867_LED_POLARITY(index), polarity);
+}
+
static struct phy_driver dp83867_driver[] = {
{
.phy_id = DP83867_PHY_ID,
@@ -1184,6 +1205,7 @@ static struct phy_driver dp83867_driver[] = {
.led_hw_is_supported = dp83867_led_hw_is_supported,
.led_hw_control_set = dp83867_led_hw_control_set,
.led_hw_control_get = dp83867_led_hw_control_get,
+ .led_polarity_set = dp83867_led_polarity_set,
},
};
module_phy_driver(dp83867_driver);
diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c
index 1c3ff77de56b..6b4bd9883304 100644
--- a/drivers/net/phy/marvell-88q2xxx.c
+++ b/drivers/net/phy/marvell-88q2xxx.c
@@ -1,10 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Marvell 88Q2XXX automotive 100BASE-T1/1000BASE-T1 PHY driver
+ *
+ * Derived from Marvell Q222x API
+ *
+ * Copyright (C) 2024 Liebherr-Electronics and Drives GmbH
*/
#include <linux/ethtool_netlink.h>
#include <linux/marvell_phy.h>
#include <linux/phy.h>
+#include <linux/hwmon.h>
+
+#define PHY_ID_88Q2220_REVB0 (MARVELL_PHY_ID_88Q2220 | 0x1)
#define MDIO_MMD_AN_MV_STAT 32769
#define MDIO_MMD_AN_MV_STAT_ANEG 0x0100
@@ -13,8 +20,38 @@
#define MDIO_MMD_AN_MV_STAT_LOCAL_MASTER 0x4000
#define MDIO_MMD_AN_MV_STAT_MS_CONF_FAULT 0x8000
+#define MDIO_MMD_AN_MV_STAT2 32794
+#define MDIO_MMD_AN_MV_STAT2_AN_RESOLVED 0x0800
+#define MDIO_MMD_AN_MV_STAT2_100BT1 0x2000
+#define MDIO_MMD_AN_MV_STAT2_1000BT1 0x4000
+
+#define MDIO_MMD_PCS_MV_INT_EN 32784
+#define MDIO_MMD_PCS_MV_INT_EN_LINK_UP 0x0040
+#define MDIO_MMD_PCS_MV_INT_EN_LINK_DOWN 0x0080
+#define MDIO_MMD_PCS_MV_INT_EN_100BT1 0x1000
+
+#define MDIO_MMD_PCS_MV_GPIO_INT_STAT 32785
+#define MDIO_MMD_PCS_MV_GPIO_INT_STAT_LINK_UP 0x0040
+#define MDIO_MMD_PCS_MV_GPIO_INT_STAT_LINK_DOWN 0x0080
+#define MDIO_MMD_PCS_MV_GPIO_INT_STAT_100BT1_GEN 0x1000
+
+#define MDIO_MMD_PCS_MV_GPIO_INT_CTRL 32787
+#define MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS 0x0800
+
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR1 32833
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR1_RAW_INT 0x0001
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR1_INT 0x0040
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR1_INT_EN 0x0080
+
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR2 32834
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK 0xc000
+
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR3 32835
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR3_INT_THRESH_MASK 0xff00
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR3_MASK 0x00ff
+
#define MDIO_MMD_PCS_MV_100BT1_STAT1 33032
-#define MDIO_MMD_PCS_MV_100BT1_STAT1_IDLE_ERROR 0x00FF
+#define MDIO_MMD_PCS_MV_100BT1_STAT1_IDLE_ERROR 0x00ff
#define MDIO_MMD_PCS_MV_100BT1_STAT1_JABBER 0x0100
#define MDIO_MMD_PCS_MV_100BT1_STAT1_LINK 0x0200
#define MDIO_MMD_PCS_MV_100BT1_STAT1_LOCAL_RX 0x1000
@@ -27,6 +64,71 @@
#define MDIO_MMD_PCS_MV_100BT1_STAT2_LINK 0x0004
#define MDIO_MMD_PCS_MV_100BT1_STAT2_ANGE 0x0008
+#define MDIO_MMD_PCS_MV_100BT1_INT_EN 33042
+#define MDIO_MMD_PCS_MV_100BT1_INT_EN_LINKEVENT 0x0400
+
+#define MDIO_MMD_PCS_MV_COPPER_INT_STAT 33043
+#define MDIO_MMD_PCS_MV_COPPER_INT_STAT_LINKEVENT 0x0400
+
+#define MDIO_MMD_PCS_MV_RX_STAT 33328
+
+#define MDIO_MMD_PCS_MV_TDR_RESET 65226
+#define MDIO_MMD_PCS_MV_TDR_RESET_TDR_RST 0x1000
+
+#define MDIO_MMD_PCS_MV_TDR_OFF_SHORT_CABLE 65241
+
+#define MDIO_MMD_PCS_MV_TDR_OFF_LONG_CABLE 65242
+
+#define MDIO_MMD_PCS_MV_TDR_STATUS 65245
+#define MDIO_MMD_PCS_MV_TDR_STATUS_MASK 0x0003
+#define MDIO_MMD_PCS_MV_TDR_STATUS_OFF 0x0001
+#define MDIO_MMD_PCS_MV_TDR_STATUS_ON 0x0002
+#define MDIO_MMD_PCS_MV_TDR_STATUS_DIST_MASK 0xff00
+#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_MASK 0x00f0
+#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_SHORT 0x0030
+#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_OPEN 0x00e0
+#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_OK 0x0070
+#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_IN_PROGR 0x0080
+#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_NOISE 0x0050
+
+#define MDIO_MMD_PCS_MV_TDR_OFF_CUTOFF 65246
+
+struct mmd_val {
+ int devad;
+ u32 regnum;
+ u16 val;
+};
+
+static const struct mmd_val mv88q222x_revb0_init_seq0[] = {
+ { MDIO_MMD_PCS, 0x8033, 0x6801 },
+ { MDIO_MMD_AN, MDIO_AN_T1_CTRL, 0x0 },
+ { MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER | MDIO_PMA_CTRL1_SPEED1000 },
+ { MDIO_MMD_PCS, 0xfe1b, 0x48 },
+ { MDIO_MMD_PCS, 0xffe4, 0x6b6 },
+ { MDIO_MMD_PMAPMD, MDIO_CTRL1, 0x0 },
+ { MDIO_MMD_PCS, MDIO_CTRL1, 0x0 },
+};
+
+static const struct mmd_val mv88q222x_revb0_init_seq1[] = {
+ { MDIO_MMD_PCS, 0xfe79, 0x0 },
+ { MDIO_MMD_PCS, 0xfe07, 0x125a },
+ { MDIO_MMD_PCS, 0xfe09, 0x1288 },
+ { MDIO_MMD_PCS, 0xfe08, 0x2588 },
+ { MDIO_MMD_PCS, 0xfe11, 0x1105 },
+ { MDIO_MMD_PCS, 0xfe72, 0x042c },
+ { MDIO_MMD_PCS, 0xfbba, 0xcb2 },
+ { MDIO_MMD_PCS, 0xfbbb, 0xc4a },
+ { MDIO_MMD_AN, 0x8032, 0x2020 },
+ { MDIO_MMD_AN, 0x8031, 0xa28 },
+ { MDIO_MMD_AN, 0x8031, 0xc28 },
+ { MDIO_MMD_PCS, 0xffdb, 0xfc10 },
+ { MDIO_MMD_PCS, 0xfe1b, 0x58 },
+ { MDIO_MMD_PCS, 0xfe79, 0x4 },
+ { MDIO_MMD_PCS, 0xfe5f, 0xe8 },
+ { MDIO_MMD_PCS, 0xfe05, 0x755c },
+};
+
static int mv88q2xxx_soft_reset(struct phy_device *phydev)
{
int ret;
@@ -50,20 +152,23 @@ static int mv88q2xxx_read_link_gbit(struct phy_device *phydev)
/* Read vendor specific Auto-Negotiation status register to get local
* and remote receiver status according to software initialization
- * guide.
+ * guide. However, when not in polling mode the local and remote
+ * receiver status are not evaluated due to the Marvell 88Q2xxx APIs.
*/
ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_MMD_AN_MV_STAT);
if (ret < 0) {
return ret;
- } else if ((ret & MDIO_MMD_AN_MV_STAT_LOCAL_RX) &&
- (ret & MDIO_MMD_AN_MV_STAT_REMOTE_RX)) {
+ } else if (((ret & MDIO_MMD_AN_MV_STAT_LOCAL_RX) &&
+ (ret & MDIO_MMD_AN_MV_STAT_REMOTE_RX)) ||
+ !phy_polling_mode(phydev)) {
/* The link state is latched low so that momentary link
* drops can be detected. Do not double-read the status
* in polling mode to detect such short link drops except
* the link was already down.
*/
if (!phy_polling_mode(phydev) || !phydev->link) {
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_1000BT1_STAT);
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_PCS_1000BT1_STAT);
if (ret < 0)
return ret;
else if (ret & MDIO_PCS_1000BT1_STAT_LINK)
@@ -71,7 +176,8 @@ static int mv88q2xxx_read_link_gbit(struct phy_device *phydev)
}
if (!link) {
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_1000BT1_STAT);
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_PCS_1000BT1_STAT);
if (ret < 0)
return ret;
else if (ret & MDIO_PCS_1000BT1_STAT_LINK)
@@ -94,8 +200,20 @@ static int mv88q2xxx_read_link_100m(struct phy_device *phydev)
* the link was already down. In case we are not polling,
* we always read the realtime status.
*/
- if (!phy_polling_mode(phydev) || !phydev->link) {
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_100BT1_STAT1);
+ if (!phy_polling_mode(phydev)) {
+ phydev->link = false;
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_100BT1_STAT2);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MDIO_MMD_PCS_MV_100BT1_STAT2_LINK)
+ phydev->link = true;
+
+ return 0;
+ } else if (!phydev->link) {
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_100BT1_STAT1);
if (ret < 0)
return ret;
else if (ret & MDIO_MMD_PCS_MV_100BT1_STAT1_LINK)
@@ -120,24 +238,90 @@ out:
static int mv88q2xxx_read_link(struct phy_device *phydev)
{
- int ret;
-
/* The 88Q2XXX PHYs do not have the PMA/PMD status register available,
* therefore we need to read the link status from the vendor specific
* registers depending on the speed.
*/
+
if (phydev->speed == SPEED_1000)
- ret = mv88q2xxx_read_link_gbit(phydev);
+ return mv88q2xxx_read_link_gbit(phydev);
+ else if (phydev->speed == SPEED_100)
+ return mv88q2xxx_read_link_100m(phydev);
+
+ phydev->link = false;
+ return 0;
+}
+
+static int mv88q2xxx_read_master_slave_state(struct phy_device *phydev)
+{
+ int ret;
+
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_MMD_AN_MV_STAT);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MDIO_MMD_AN_MV_STAT_LOCAL_MASTER)
+ phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER;
else
- ret = mv88q2xxx_read_link_100m(phydev);
+ phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE;
+
+ return 0;
+}
+
+static int mv88q2xxx_read_aneg_speed(struct phy_device *phydev)
+{
+ int ret;
+
+ phydev->speed = SPEED_UNKNOWN;
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_MMD_AN_MV_STAT2);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & MDIO_MMD_AN_MV_STAT2_AN_RESOLVED))
+ return 0;
- return ret;
+ if (ret & MDIO_MMD_AN_MV_STAT2_100BT1)
+ phydev->speed = SPEED_100;
+ else if (ret & MDIO_MMD_AN_MV_STAT2_1000BT1)
+ phydev->speed = SPEED_1000;
+
+ return 0;
}
static int mv88q2xxx_read_status(struct phy_device *phydev)
{
int ret;
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ /* We have to get the negotiated speed first, otherwise we are
+ * not able to read the link.
+ */
+ ret = mv88q2xxx_read_aneg_speed(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = mv88q2xxx_read_link(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = genphy_c45_read_lpa(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = genphy_c45_baset1_read_status(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = mv88q2xxx_read_master_slave_state(phydev);
+ if (ret < 0)
+ return ret;
+
+ phy_resolve_aneg_linkmode(phydev);
+
+ return 0;
+ }
+
ret = mv88q2xxx_read_link(phydev);
if (ret < 0)
return ret;
@@ -166,7 +350,9 @@ static int mv88q2xxx_get_features(struct phy_device *phydev)
* sequence provided by Marvell. Disable it for now until a proper
* workaround is found or a new PHY revision is released.
*/
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
+ if (phydev->drv->phy_id == MARVELL_PHY_ID_88Q2110)
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->supported);
return 0;
}
@@ -179,28 +365,29 @@ static int mv88q2xxx_config_aneg(struct phy_device *phydev)
if (ret)
return ret;
- return mv88q2xxx_soft_reset(phydev);
+ return phydev->drv->soft_reset(phydev);
}
static int mv88q2xxx_config_init(struct phy_device *phydev)
{
- int ret;
-
/* The 88Q2XXX PHYs do have the extended ability register available, but
* register MDIO_PMA_EXTABLE where they should signalize it does not
* work according to specification. Therefore, we force it here.
*/
phydev->pma_extable = MDIO_PMA_EXTABLE_BT1;
- /* Read the current PHY configuration */
- ret = genphy_c45_read_pma(phydev);
- if (ret)
- return ret;
+ /* Configure interrupt with default settings, output is driven low for
+ * active interrupt and high for inactive.
+ */
+ if (phy_interrupt_is_valid(phydev))
+ return phy_set_bits_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_GPIO_INT_CTRL,
+ MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS);
- return mv88q2xxx_config_aneg(phydev);
+ return 0;
}
-static int mv88q2xxxx_get_sqi(struct phy_device *phydev)
+static int mv88q2xxx_get_sqi(struct phy_device *phydev)
{
int ret;
@@ -208,7 +395,8 @@ static int mv88q2xxxx_get_sqi(struct phy_device *phydev)
/* Read the SQI from the vendor specific receiver status
* register
*/
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8230);
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_RX_STAT);
if (ret < 0)
return ret;
@@ -218,7 +406,7 @@ static int mv88q2xxxx_get_sqi(struct phy_device *phydev)
* but can be found in the Software Initialization Guide. Only
* revisions >= A0 are supported.
*/
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, 0xFC5D, 0x00FF, 0x00AC);
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, 0xfc5d, 0xff, 0xac);
if (ret < 0)
return ret;
@@ -227,14 +415,386 @@ static int mv88q2xxxx_get_sqi(struct phy_device *phydev)
return ret;
}
- return ret & 0x0F;
+ return ret & 0x0f;
}
-static int mv88q2xxxx_get_sqi_max(struct phy_device *phydev)
+static int mv88q2xxx_get_sqi_max(struct phy_device *phydev)
{
return 15;
}
+static int mv88q2xxx_config_intr(struct phy_device *phydev)
+{
+ int ret;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ /* Enable interrupts for 1000BASE-T1 link up and down events
+ * and enable general interrupts for 100BASE-T1.
+ */
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_INT_EN,
+ MDIO_MMD_PCS_MV_INT_EN_LINK_UP |
+ MDIO_MMD_PCS_MV_INT_EN_LINK_DOWN |
+ MDIO_MMD_PCS_MV_INT_EN_100BT1);
+ if (ret < 0)
+ return ret;
+
+ /* Enable interrupts for 100BASE-T1 link events */
+ return phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_100BT1_INT_EN,
+ MDIO_MMD_PCS_MV_100BT1_INT_EN_LINKEVENT);
+ } else {
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_INT_EN, 0);
+ if (ret < 0)
+ return ret;
+
+ return phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_100BT1_INT_EN, 0);
+ }
+}
+
+static irqreturn_t mv88q2xxx_handle_interrupt(struct phy_device *phydev)
+{
+ bool trigger_machine = false;
+ int irq;
+
+ /* Before we can acknowledge the 100BT1 general interrupt, that is in
+ * the 1000BT1 interrupt status register, we have to acknowledge any
+ * interrupts that are related to it. Therefore we read first the 100BT1
+ * interrupt status register, followed by reading the 1000BT1 interrupt
+ * status register.
+ */
+
+ irq = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_COPPER_INT_STAT);
+ if (irq < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ /* Check link status for 100BT1 */
+ if (irq & MDIO_MMD_PCS_MV_COPPER_INT_STAT_LINKEVENT)
+ trigger_machine = true;
+
+ irq = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_GPIO_INT_STAT);
+ if (irq < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ /* Check link status for 1000BT1 */
+ if ((irq & MDIO_MMD_PCS_MV_GPIO_INT_STAT_LINK_UP) ||
+ (irq & MDIO_MMD_PCS_MV_GPIO_INT_STAT_LINK_DOWN))
+ trigger_machine = true;
+
+ if (!trigger_machine)
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
+static int mv88q2xxx_suspend(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Disable PHY interrupts */
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->interrupts = PHY_INTERRUPT_DISABLED;
+ ret = mv88q2xxx_config_intr(phydev);
+ if (ret)
+ return ret;
+ }
+
+ return phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+}
+
+static int mv88q2xxx_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Enable PHY interrupts */
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->interrupts = PHY_INTERRUPT_ENABLED;
+ ret = mv88q2xxx_config_intr(phydev);
+ if (ret)
+ return ret;
+ }
+
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+}
+
+#if IS_ENABLED(CONFIG_HWMON)
+static const struct hwmon_channel_info * const mv88q2xxx_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_ALARM),
+ NULL
+};
+
+static umode_t mv88q2xxx_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ case hwmon_temp_max:
+ return 0644;
+ case hwmon_temp_alarm:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static int mv88q2xxx_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ int ret;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR3);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(MDIO_MMD_PCS_MV_TEMP_SENSOR3_MASK, ret);
+ *val = (ret - 75) * 1000;
+ return 0;
+ case hwmon_temp_max:
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR3);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(MDIO_MMD_PCS_MV_TEMP_SENSOR3_INT_THRESH_MASK,
+ ret);
+ *val = (ret - 75) * 1000;
+ return 0;
+ case hwmon_temp_alarm:
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR1);
+ if (ret < 0)
+ return ret;
+
+ *val = !!(ret & MDIO_MMD_PCS_MV_TEMP_SENSOR1_RAW_INT);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mv88q2xxx_hwmon_write(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, long val)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_temp_max:
+ clamp_val(val, -75000, 180000);
+ val = (val / 1000) + 75;
+ val = FIELD_PREP(MDIO_MMD_PCS_MV_TEMP_SENSOR3_INT_THRESH_MASK,
+ val);
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR3,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR3_INT_THRESH_MASK,
+ val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_ops mv88q2xxx_hwmon_hwmon_ops = {
+ .is_visible = mv88q2xxx_hwmon_is_visible,
+ .read = mv88q2xxx_hwmon_read,
+ .write = mv88q2xxx_hwmon_write,
+};
+
+static const struct hwmon_chip_info mv88q2xxx_hwmon_chip_info = {
+ .ops = &mv88q2xxx_hwmon_hwmon_ops,
+ .info = mv88q2xxx_hwmon_info,
+};
+
+static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct device *hwmon;
+ char *hwmon_name;
+ int ret;
+
+ /* Enable temperature sense */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TEMP_SENSOR2,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK, 0);
+ if (ret < 0)
+ return ret;
+
+ hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev));
+ if (IS_ERR(hwmon_name))
+ return PTR_ERR(hwmon_name);
+
+ hwmon = devm_hwmon_device_register_with_info(dev,
+ hwmon_name,
+ phydev,
+ &mv88q2xxx_hwmon_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon);
+}
+
+#else
+static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif
+
+static int mv88q2xxx_probe(struct phy_device *phydev)
+{
+ return mv88q2xxx_hwmon_probe(phydev);
+}
+
+static int mv88q222x_soft_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Enable RESET of DCL */
+ if (phydev->autoneg == AUTONEG_ENABLE || phydev->speed == SPEED_1000) {
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, 0xfe1b, 0x48);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_1000BT1_CTRL,
+ MDIO_PCS_1000BT1_CTRL_RESET);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, 0xffe4, 0xc);
+ if (ret < 0)
+ return ret;
+
+ /* Disable RESET of DCL */
+ if (phydev->autoneg == AUTONEG_ENABLE || phydev->speed == SPEED_1000)
+ return phy_write_mmd(phydev, MDIO_MMD_PCS, 0xfe1b, 0x58);
+
+ return 0;
+}
+
+static int mv88q222x_revb0_config_init(struct phy_device *phydev)
+{
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(mv88q222x_revb0_init_seq0); i++) {
+ ret = phy_write_mmd(phydev, mv88q222x_revb0_init_seq0[i].devad,
+ mv88q222x_revb0_init_seq0[i].regnum,
+ mv88q222x_revb0_init_seq0[i].val);
+ if (ret < 0)
+ return ret;
+ }
+
+ usleep_range(5000, 10000);
+
+ for (i = 0; i < ARRAY_SIZE(mv88q222x_revb0_init_seq1); i++) {
+ ret = phy_write_mmd(phydev, mv88q222x_revb0_init_seq1[i].devad,
+ mv88q222x_revb0_init_seq1[i].regnum,
+ mv88q222x_revb0_init_seq1[i].val);
+ if (ret < 0)
+ return ret;
+ }
+
+ return mv88q2xxx_config_init(phydev);
+}
+
+static int mv88q222x_cable_test_start(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TDR_OFF_CUTOFF, 0x0058);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TDR_OFF_LONG_CABLE, 0x00eb);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TDR_OFF_SHORT_CABLE, 0x010e);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TDR_RESET,
+ 0x0d90);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TDR_STATUS,
+ MDIO_MMD_PCS_MV_TDR_STATUS_ON);
+ if (ret < 0)
+ return ret;
+
+ /* According to the Marvell API the test is finished within 500 ms */
+ msleep(500);
+
+ return 0;
+}
+
+static int mv88q222x_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ int ret, status;
+ u32 dist;
+
+ status = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TDR_STATUS);
+ if (status < 0)
+ return status;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TDR_RESET,
+ MDIO_MMD_PCS_MV_TDR_RESET_TDR_RST | 0xd90);
+ if (ret < 0)
+ return ret;
+
+ /* Test could not be finished */
+ if (FIELD_GET(MDIO_MMD_PCS_MV_TDR_STATUS_MASK, status) !=
+ MDIO_MMD_PCS_MV_TDR_STATUS_OFF)
+ return -ETIMEDOUT;
+
+ *finished = true;
+ /* Fault length reported in meters, convert to centimeters */
+ dist = FIELD_GET(MDIO_MMD_PCS_MV_TDR_STATUS_DIST_MASK, status) * 100;
+ switch (status & MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_MASK) {
+ case MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_OPEN:
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
+ ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ dist);
+ break;
+ case MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_SHORT:
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
+ ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ dist);
+ break;
+ case MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_OK:
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_OK);
+ break;
+ default:
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
+ }
+
+ return 0;
+}
+
static struct phy_driver mv88q2xxx_driver[] = {
{
.phy_id = MARVELL_PHY_ID_88Q2110,
@@ -246,8 +806,29 @@ static struct phy_driver mv88q2xxx_driver[] = {
.read_status = mv88q2xxx_read_status,
.soft_reset = mv88q2xxx_soft_reset,
.set_loopback = genphy_c45_loopback,
- .get_sqi = mv88q2xxxx_get_sqi,
- .get_sqi_max = mv88q2xxxx_get_sqi_max,
+ .get_sqi = mv88q2xxx_get_sqi,
+ .get_sqi_max = mv88q2xxx_get_sqi_max,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_88Q2220_REVB0),
+ .name = "mv88q2220",
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = mv88q2xxx_probe,
+ .get_features = mv88q2xxx_get_features,
+ .config_aneg = mv88q2xxx_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .config_init = mv88q222x_revb0_config_init,
+ .read_status = mv88q2xxx_read_status,
+ .soft_reset = mv88q222x_soft_reset,
+ .config_intr = mv88q2xxx_config_intr,
+ .handle_interrupt = mv88q2xxx_handle_interrupt,
+ .set_loopback = genphy_c45_loopback,
+ .cable_test_start = mv88q222x_cable_test_start,
+ .cable_test_get_status = mv88q222x_cable_test_get_status,
+ .get_sqi = mv88q2xxx_get_sqi,
+ .get_sqi_max = mv88q2xxx_get_sqi_max,
+ .suspend = mv88q2xxx_suspend,
+ .resume = mv88q2xxx_resume,
},
};
@@ -255,6 +836,7 @@ module_phy_driver(mv88q2xxx_driver);
static struct mdio_device_id __maybe_unused mv88q2xxx_tbl[] = {
{ MARVELL_PHY_ID_88Q2110, MARVELL_PHY_ID_MASK },
+ { PHY_ID_MATCH_EXACT(PHY_ID_88Q2220_REVB0), },
{ /*sentinel*/ }
};
MODULE_DEVICE_TABLE(mdio, mv88q2xxx_tbl);
diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c
index e3aa30dad2e6..b88398e6872b 100644
--- a/drivers/net/phy/marvell-88x2222.c
+++ b/drivers/net/phy/marvell-88x2222.c
@@ -9,12 +9,10 @@
*/
#include <linux/module.h>
#include <linux/phy.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/mdio.h>
#include <linux/marvell_phy.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/sfp.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index eba652a4c1d8..42ed013385bf 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -919,7 +919,10 @@ static int m88e1111_config_init_1000basex(struct phy_device *phydev)
if (extsr < 0)
return extsr;
- /* If using copper mode, ensure 1000BaseX auto-negotiation is enabled */
+ /* If using copper mode, ensure 1000BaseX auto-negotiation is enabled.
+ * FIXME: this does not actually enable 1000BaseX auto-negotiation if
+ * it was previously disabled in the Fiber BMCR!
+ */
mode = extsr & MII_M1111_HWCFG_MODE_MASK;
if (mode == MII_M1111_HWCFG_MODE_COPPER_1000X_NOAN) {
err = phy_modify(phydev, MII_M1111_PHY_EXT_SR,
@@ -1461,7 +1464,7 @@ static int m88e1540_get_fld(struct phy_device *phydev, u8 *msecs)
static int m88e1540_set_fld(struct phy_device *phydev, const u8 *msecs)
{
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
int val, ret;
if (*msecs == ETHTOOL_PHY_FAST_LINK_DOWN_OFF)
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index afbad1ad8683..8b9ead76e40e 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -13,7 +13,6 @@
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -25,7 +24,6 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/reset.h>
@@ -459,19 +457,34 @@ EXPORT_SYMBOL(of_mdio_find_bus);
* found, set the of_node pointer for the mdio device. This allows
* auto-probed phy devices to be supplied with information passed in
* via DT.
+ * If a PHY package is found, PHY is searched also there.
*/
-static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
- struct mdio_device *mdiodev)
+static int of_mdiobus_find_phy(struct device *dev, struct mdio_device *mdiodev,
+ struct device_node *np)
{
- struct device *dev = &mdiodev->dev;
struct device_node *child;
- if (dev->of_node || !bus->dev.of_node)
- return;
-
- for_each_available_child_of_node(bus->dev.of_node, child) {
+ for_each_available_child_of_node(np, child) {
int addr;
+ if (of_node_name_eq(child, "ethernet-phy-package")) {
+ /* Validate PHY package reg presence */
+ if (!of_property_present(child, "reg")) {
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ if (!of_mdiobus_find_phy(dev, mdiodev, child)) {
+ /* The refcount for the PHY package will be
+ * incremented later when PHY join the Package.
+ */
+ of_node_put(child);
+ return 0;
+ }
+
+ continue;
+ }
+
addr = of_mdio_parse_addr(dev, child);
if (addr < 0)
continue;
@@ -481,9 +494,22 @@ static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
/* The refcount on "child" is passed to the mdio
* device. Do _not_ use of_node_put(child) here.
*/
- return;
+ return 0;
}
}
+
+ return -ENODEV;
+}
+
+static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
+ struct mdio_device *mdiodev)
+{
+ struct device *dev = &mdiodev->dev;
+
+ if (dev->of_node || !bus->dev.of_node)
+ return;
+
+ of_mdiobus_find_phy(dev, mdiodev, bus->dev.of_node);
}
#else /* !IS_ENABLED(CONFIG_OF_MDIO) */
static inline void of_mdiobus_link_mdiodev(struct mii_bus *mdio,
@@ -1398,7 +1424,7 @@ static const struct attribute_group *mdio_bus_dev_groups[] = {
NULL,
};
-struct bus_type mdio_bus_type = {
+const struct bus_type mdio_bus_type = {
.name = "mdio_bus",
.dev_groups = mdio_bus_dev_groups,
.match = mdio_bus_match,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index dad720138baa..8b8634600c51 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -114,12 +114,25 @@
#define LAN8814_INTR_CTRL_REG_POLARITY BIT(1)
#define LAN8814_INTR_CTRL_REG_INTR_ENABLE BIT(0)
+#define LAN8814_EEE_STATE 0x38
+#define LAN8814_EEE_STATE_MASK2P5P BIT(10)
+
+#define LAN8814_PD_CONTROLS 0x9d
+#define LAN8814_PD_CONTROLS_PD_MEAS_TIME_MASK GENMASK(3, 0)
+#define LAN8814_PD_CONTROLS_PD_MEAS_TIME_VAL 0xb
+
/* Represents 1ppm adjustment in 2^32 format with
* each nsec contains 4 clock cycles.
* The value is calculated as following: (1/1000000)/((2^-32)/4)
*/
#define LAN8814_1PPM_FORMAT 17179
+/* Represents 1ppm adjustment in 2^32 format with
+ * each nsec contains 8 clock cycles.
+ * The value is calculated as following: (1/1000000)/((2^-32)/8)
+ */
+#define LAN8841_1PPM_FORMAT 34360
+
#define PTP_RX_VERSION 0x0248
#define PTP_TX_VERSION 0x0288
#define PTP_MAX_VERSION(x) (((x) & GENMASK(7, 0)) << 8)
@@ -154,11 +167,13 @@
#define PTP_CMD_CTL_PTP_LTC_STEP_SEC_ BIT(5)
#define PTP_CMD_CTL_PTP_LTC_STEP_NSEC_ BIT(6)
+#define PTP_CLOCK_SET_SEC_HI 0x0205
#define PTP_CLOCK_SET_SEC_MID 0x0206
#define PTP_CLOCK_SET_SEC_LO 0x0207
#define PTP_CLOCK_SET_NS_HI 0x0208
#define PTP_CLOCK_SET_NS_LO 0x0209
+#define PTP_CLOCK_READ_SEC_HI 0x0229
#define PTP_CLOCK_READ_SEC_MID 0x022A
#define PTP_CLOCK_READ_SEC_LO 0x022B
#define PTP_CLOCK_READ_NS_HI 0x022C
@@ -2592,35 +2607,31 @@ static bool lan8814_rxtstamp(struct mii_timestamper *mii_ts, struct sk_buff *skb
}
static void lan8814_ptp_clock_set(struct phy_device *phydev,
- u32 seconds, u32 nano_seconds)
+ time64_t sec, u32 nsec)
{
- u32 sec_low, sec_high, nsec_low, nsec_high;
-
- sec_low = seconds & 0xffff;
- sec_high = (seconds >> 16) & 0xffff;
- nsec_low = nano_seconds & 0xffff;
- nsec_high = (nano_seconds >> 16) & 0x3fff;
-
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_LO, sec_low);
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_MID, sec_high);
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_LO, nsec_low);
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_HI, nsec_high);
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_LO, lower_16_bits(sec));
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_MID, upper_16_bits(sec));
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_HI, upper_32_bits(sec));
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_LO, lower_16_bits(nsec));
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_HI, upper_16_bits(nsec));
lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_LOAD_);
}
static void lan8814_ptp_clock_get(struct phy_device *phydev,
- u32 *seconds, u32 *nano_seconds)
+ time64_t *sec, u32 *nsec)
{
lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_READ_);
- *seconds = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_MID);
- *seconds = (*seconds << 16) |
- lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_LO);
+ *sec = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_HI);
+ *sec <<= 16;
+ *sec |= lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_MID);
+ *sec <<= 16;
+ *sec |= lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_LO);
- *nano_seconds = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_HI);
- *nano_seconds = ((*nano_seconds & 0x3fff) << 16) |
- lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_LO);
+ *nsec = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_HI);
+ *nsec <<= 16;
+ *nsec |= lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_LO);
}
static int lan8814_ptpci_gettime64(struct ptp_clock_info *ptpci,
@@ -2630,7 +2641,7 @@ static int lan8814_ptpci_gettime64(struct ptp_clock_info *ptpci,
ptp_clock_info);
struct phy_device *phydev = shared->phydev;
u32 nano_seconds;
- u32 seconds;
+ time64_t seconds;
mutex_lock(&shared->shared_lock);
lan8814_ptp_clock_get(phydev, &seconds, &nano_seconds);
@@ -2660,38 +2671,37 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
{
u32 nano_seconds_step;
u64 abs_time_step_ns;
- u32 unsigned_seconds;
+ time64_t set_seconds;
u32 nano_seconds;
u32 remainder;
s32 seconds;
if (time_step_ns > 15000000000LL) {
/* convert to clock set */
- lan8814_ptp_clock_get(phydev, &unsigned_seconds, &nano_seconds);
- unsigned_seconds += div_u64_rem(time_step_ns, 1000000000LL,
- &remainder);
+ lan8814_ptp_clock_get(phydev, &set_seconds, &nano_seconds);
+ set_seconds += div_u64_rem(time_step_ns, 1000000000LL,
+ &remainder);
nano_seconds += remainder;
if (nano_seconds >= 1000000000) {
- unsigned_seconds++;
+ set_seconds++;
nano_seconds -= 1000000000;
}
- lan8814_ptp_clock_set(phydev, unsigned_seconds, nano_seconds);
+ lan8814_ptp_clock_set(phydev, set_seconds, nano_seconds);
return;
} else if (time_step_ns < -15000000000LL) {
/* convert to clock set */
time_step_ns = -time_step_ns;
- lan8814_ptp_clock_get(phydev, &unsigned_seconds, &nano_seconds);
- unsigned_seconds -= div_u64_rem(time_step_ns, 1000000000LL,
- &remainder);
+ lan8814_ptp_clock_get(phydev, &set_seconds, &nano_seconds);
+ set_seconds -= div_u64_rem(time_step_ns, 1000000000LL,
+ &remainder);
nano_seconds_step = remainder;
if (nano_seconds < nano_seconds_step) {
- unsigned_seconds--;
+ set_seconds--;
nano_seconds += 1000000000;
}
nano_seconds -= nano_seconds_step;
- lan8814_ptp_clock_set(phydev, unsigned_seconds,
- nano_seconds);
+ lan8814_ptp_clock_set(phydev, set_seconds, nano_seconds);
return;
}
@@ -3285,6 +3295,33 @@ static int lan8814_release_coma_mode(struct phy_device *phydev)
return 0;
}
+static void lan8814_clear_2psp_bit(struct phy_device *phydev)
+{
+ u16 val;
+
+ /* It was noticed that when traffic is passing through the PHY and the
+ * cable is removed then the LED was still one even though there is no
+ * link
+ */
+ val = lanphy_read_page_reg(phydev, 2, LAN8814_EEE_STATE);
+ val &= ~LAN8814_EEE_STATE_MASK2P5P;
+ lanphy_write_page_reg(phydev, 2, LAN8814_EEE_STATE, val);
+}
+
+static void lan8814_update_meas_time(struct phy_device *phydev)
+{
+ u16 val;
+
+ /* By setting the measure time to a value of 0xb this will allow cables
+ * longer than 100m to be used. This configuration can be used
+ * regardless of the mode of operation of the PHY
+ */
+ val = lanphy_read_page_reg(phydev, 1, LAN8814_PD_CONTROLS);
+ val &= ~LAN8814_PD_CONTROLS_PD_MEAS_TIME_MASK;
+ val |= LAN8814_PD_CONTROLS_PD_MEAS_TIME_VAL;
+ lanphy_write_page_reg(phydev, 1, LAN8814_PD_CONTROLS, val);
+}
+
static int lan8814_probe(struct phy_device *phydev)
{
const struct kszphy_type *type = phydev->drv->driver_data;
@@ -3321,6 +3358,10 @@ static int lan8814_probe(struct phy_device *phydev)
lan8814_ptp_init(phydev);
+ /* Errata workarounds */
+ lan8814_clear_2psp_bit(phydev);
+ lan8814_update_meas_time(phydev);
+
return 0;
}
@@ -4118,8 +4159,8 @@ static int lan8841_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
faster = false;
}
- rate = LAN8814_1PPM_FORMAT * (upper_16_bits(scaled_ppm));
- rate += (LAN8814_1PPM_FORMAT * (lower_16_bits(scaled_ppm))) >> 16;
+ rate = LAN8841_1PPM_FORMAT * (upper_16_bits(scaled_ppm));
+ rate += (LAN8841_1PPM_FORMAT * (lower_16_bits(scaled_ppm))) >> 16;
mutex_lock(&ptp_priv->ptp_lock);
phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_RATE_ADJ_HI,
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index ea1073adc5a1..b2d36a3a96f1 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -274,6 +274,14 @@ static int gpy_config_init(struct phy_device *phydev)
return ret < 0 ? ret : 0;
}
+static int gpy21x_config_init(struct phy_device *phydev)
+{
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, phydev->possible_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII, phydev->possible_interfaces);
+
+ return gpy_config_init(phydev);
+}
+
static int gpy_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
@@ -867,7 +875,7 @@ static struct phy_driver gpy_drivers[] = {
.phy_id_mask = PHY_ID_GPY21xB_MASK,
.name = "Maxlinear Ethernet GPY211B",
.get_features = genphy_c45_pma_read_abilities,
- .config_init = gpy_config_init,
+ .config_init = gpy21x_config_init,
.probe = gpy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -884,7 +892,7 @@ static struct phy_driver gpy_drivers[] = {
PHY_ID_MATCH_MODEL(PHY_ID_GPY211C),
.name = "Maxlinear Ethernet GPY211C",
.get_features = genphy_c45_pma_read_abilities,
- .config_init = gpy_config_init,
+ .config_init = gpy21x_config_init,
.probe = gpy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -902,7 +910,7 @@ static struct phy_driver gpy_drivers[] = {
.phy_id_mask = PHY_ID_GPY21xB_MASK,
.name = "Maxlinear Ethernet GPY212B",
.get_features = genphy_c45_pma_read_abilities,
- .config_init = gpy_config_init,
+ .config_init = gpy21x_config_init,
.probe = gpy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -919,7 +927,7 @@ static struct phy_driver gpy_drivers[] = {
PHY_ID_MATCH_MODEL(PHY_ID_GPY212C),
.name = "Maxlinear Ethernet GPY212C",
.get_features = genphy_c45_pma_read_abilities,
- .config_init = gpy_config_init,
+ .config_init = gpy21x_config_init,
.probe = gpy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -937,7 +945,7 @@ static struct phy_driver gpy_drivers[] = {
.phy_id_mask = PHY_ID_GPYx15B_MASK,
.name = "Maxlinear Ethernet GPY215B",
.get_features = genphy_c45_pma_read_abilities,
- .config_init = gpy_config_init,
+ .config_init = gpy21x_config_init,
.probe = gpy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -954,7 +962,7 @@ static struct phy_driver gpy_drivers[] = {
PHY_ID_MATCH_MODEL(PHY_ID_GPY215C),
.name = "Maxlinear Ethernet GPY215C",
.get_features = genphy_c45_pma_read_abilities,
- .config_init = gpy_config_init,
+ .config_init = gpy21x_config_init,
.probe = gpy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 747d14bf152c..5695935fdce9 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -208,7 +208,8 @@ static int genphy_c45_baset1_an_config_aneg(struct phy_device *phydev)
adv_l_mask = MDIO_AN_T1_ADV_L_FORCE_MS | MDIO_AN_T1_ADV_L_PAUSE_CAP |
MDIO_AN_T1_ADV_L_PAUSE_ASYM;
- adv_m_mask = MDIO_AN_T1_ADV_M_MST | MDIO_AN_T1_ADV_M_B10L;
+ adv_m_mask = MDIO_AN_T1_ADV_M_1000BT1 | MDIO_AN_T1_ADV_M_100BT1 |
+ MDIO_AN_T1_ADV_M_MST | MDIO_AN_T1_ADV_M_B10L;
switch (phydev->master_slave_set) {
case MASTER_SLAVE_CFG_MASTER_FORCE:
@@ -706,6 +707,22 @@ int genphy_c45_write_eee_adv(struct phy_device *phydev, unsigned long *adv)
changed = 1;
}
+ if (linkmode_intersects(phydev->supported_eee, PHY_EEE_CAP2_FEATURES)) {
+ val = linkmode_to_mii_eee_cap2_t(adv);
+
+ /* IEEE 802.3-2022 45.2.7.16 EEE advertisement 2
+ * (Register 7.62)
+ */
+ val = phy_modify_mmd_changed(phydev, MDIO_MMD_AN,
+ MDIO_AN_EEE_ADV2,
+ MDIO_EEE_2_5GT | MDIO_EEE_5GT,
+ val);
+ if (val < 0)
+ return val;
+ if (val > 0)
+ changed = 1;
+ }
+
if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
phydev->supported_eee)) {
val = linkmode_adv_to_mii_10base_t1_t(adv);
@@ -745,6 +762,17 @@ int genphy_c45_read_eee_adv(struct phy_device *phydev, unsigned long *adv)
mii_eee_cap1_mod_linkmode_t(adv, val);
}
+ if (linkmode_intersects(phydev->supported_eee, PHY_EEE_CAP2_FEATURES)) {
+ /* IEEE 802.3-2022 45.2.7.16 EEE advertisement 2
+ * (Register 7.62)
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV2);
+ if (val < 0)
+ return val;
+
+ mii_eee_cap2_mod_linkmode_adv_t(adv, val);
+ }
+
if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
phydev->supported_eee)) {
/* IEEE 802.3cg-2019 45.2.7.25 10BASE-T1 AN control register
@@ -781,6 +809,17 @@ static int genphy_c45_read_eee_lpa(struct phy_device *phydev,
mii_eee_cap1_mod_linkmode_t(lpa, val);
}
+ if (linkmode_intersects(phydev->supported_eee, PHY_EEE_CAP2_FEATURES)) {
+ /* IEEE 802.3-2022 45.2.7.17 EEE link partner ability 2
+ * (Register 7.63)
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE2);
+ if (val < 0)
+ return val;
+
+ mii_eee_cap2_mod_linkmode_adv_t(lpa, val);
+ }
+
if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
phydev->supported_eee)) {
/* IEEE 802.3cg-2019 45.2.7.26 10BASE-T1 AN status register
@@ -831,6 +870,30 @@ static int genphy_c45_read_eee_cap1(struct phy_device *phydev)
}
/**
+ * genphy_c45_read_eee_cap2 - read supported EEE link modes from register 3.21
+ * @phydev: target phy_device struct
+ */
+static int genphy_c45_read_eee_cap2(struct phy_device *phydev)
+{
+ int val;
+
+ /* IEEE 802.3-2022 45.2.3.11 EEE control and capability 2
+ * (Register 3.21)
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE2);
+ if (val < 0)
+ return val;
+
+ /* IEEE 802.3-2022 45.2.3.11 says 9 bits are reserved. */
+ if (val == 0xffff)
+ return 0;
+
+ mii_eee_cap2_mod_linkmode_sup_t(phydev->supported_eee, val);
+
+ return 0;
+}
+
+/**
* genphy_c45_read_eee_abilities - read supported EEE link modes
* @phydev: target phy_device struct
*/
@@ -848,6 +911,13 @@ int genphy_c45_read_eee_abilities(struct phy_device *phydev)
return val;
}
+ /* Same for cap2 (3.21) */
+ if (linkmode_intersects(phydev->supported, PHY_EEE_CAP2_FEATURES)) {
+ val = genphy_c45_read_eee_cap2(phydev);
+ if (val)
+ return val;
+ }
+
if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
phydev->supported)) {
/* IEEE 802.3cg-2019 45.2.1.186b 10BASE-T1L PMA status register
@@ -1443,17 +1513,17 @@ EXPORT_SYMBOL(genphy_c45_eee_is_active);
/**
* genphy_c45_ethtool_get_eee - get EEE supported and status
* @phydev: target phy_device struct
- * @data: ethtool_eee data
+ * @data: ethtool_keee data
*
* Description: it reports the Supported/Advertisement/LP Advertisement
* capabilities.
*/
int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
- struct ethtool_eee *data)
+ struct ethtool_keee *data)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(adv) = {};
__ETHTOOL_DECLARE_LINK_MODE_MASK(lp) = {};
- bool overflow = false, is_enabled;
+ bool is_enabled;
int ret;
ret = genphy_c45_eee_is_active(phydev, adv, lp, &is_enabled);
@@ -1462,17 +1532,9 @@ int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
data->eee_enabled = is_enabled;
data->eee_active = ret;
-
- if (!ethtool_convert_link_mode_to_legacy_u32(&data->supported,
- phydev->supported_eee))
- overflow = true;
- if (!ethtool_convert_link_mode_to_legacy_u32(&data->advertised, adv))
- overflow = true;
- if (!ethtool_convert_link_mode_to_legacy_u32(&data->lp_advertised, lp))
- overflow = true;
-
- if (overflow)
- phydev_warn(phydev, "Not all supported or advertised EEE link modes were passed to the user space\n");
+ linkmode_copy(data->supported, phydev->supported_eee);
+ linkmode_copy(data->advertised, adv);
+ linkmode_copy(data->lp_advertised, lp);
return 0;
}
@@ -1481,50 +1543,53 @@ EXPORT_SYMBOL(genphy_c45_ethtool_get_eee);
/**
* genphy_c45_ethtool_set_eee - set EEE supported and status
* @phydev: target phy_device struct
- * @data: ethtool_eee data
+ * @data: ethtool_keee data
*
* Description: sets the Supported/Advertisement/LP Advertisement
* capabilities. If eee_enabled is false, no links modes are
* advertised, but the previously advertised link modes are
* retained. This allows EEE to be enabled/disabled in a
* non-destructive way.
+ * Returns either error code, 0 if there was no change, or positive
+ * value if there was a change which triggered auto-neg.
*/
int genphy_c45_ethtool_set_eee(struct phy_device *phydev,
- struct ethtool_eee *data)
+ struct ethtool_keee *data)
{
int ret;
if (data->eee_enabled) {
- if (data->advertised) {
- __ETHTOOL_DECLARE_LINK_MODE_MASK(adv);
+ unsigned long *adv = data->advertised;
- ethtool_convert_legacy_u32_to_link_mode(adv,
- data->advertised);
- linkmode_andnot(adv, adv, phydev->supported_eee);
- if (!linkmode_empty(adv)) {
+ if (!linkmode_empty(adv)) {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
+
+ if (linkmode_andnot(tmp, adv, phydev->supported_eee)) {
phydev_warn(phydev, "At least some EEE link modes are not supported.\n");
return -EINVAL;
}
-
- ethtool_convert_legacy_u32_to_link_mode(phydev->advertising_eee,
- data->advertised);
} else {
- linkmode_copy(phydev->advertising_eee,
- phydev->supported_eee);
+ adv = phydev->supported_eee;
}
- phydev->eee_enabled = true;
- } else {
- phydev->eee_enabled = false;
+ linkmode_copy(phydev->advertising_eee, adv);
}
+ phydev->eee_enabled = data->eee_enabled;
+
ret = genphy_c45_an_config_eee_aneg(phydev);
- if (ret < 0)
- return ret;
- if (ret > 0)
- return phy_restart_aneg(phydev);
+ if (ret > 0) {
+ ret = phy_restart_aneg(phydev);
+ if (ret < 0)
+ return ret;
- return 0;
+ /* explicitly return 1, otherwise (ret > 0) value will be
+ * overwritten by phy_restart_aneg().
+ */
+ return 1;
+ }
+
+ return ret;
}
EXPORT_SYMBOL(genphy_c45_ethtool_set_eee);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 3376e58e2b88..c4236564c1cd 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -983,9 +983,17 @@ static int phy_check_link_status(struct phy_device *phydev)
if (phydev->link && phydev->state != PHY_RUNNING) {
phy_check_downshift(phydev);
phydev->state = PHY_RUNNING;
+ err = genphy_c45_eee_is_active(phydev,
+ NULL, NULL, NULL);
+ if (err <= 0)
+ phydev->enable_tx_lpi = false;
+ else
+ phydev->enable_tx_lpi = phydev->eee_cfg.tx_lpi_enabled;
+
phy_link_up(phydev);
} else if (!phydev->link && phydev->state != PHY_NOLINK) {
phydev->state = PHY_NOLINK;
+ phydev->enable_tx_lpi = false;
phy_link_down(phydev);
}
@@ -1290,7 +1298,6 @@ int phy_disable_interrupts(struct phy_device *phydev)
static irqreturn_t phy_interrupt(int irq, void *phy_dat)
{
struct phy_device *phydev = phy_dat;
- struct phy_driver *drv = phydev->drv;
irqreturn_t ret;
/* Wakeup interrupts may occur during a system sleep transition.
@@ -1316,7 +1323,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
}
mutex_lock(&phydev->lock);
- ret = drv->handle_interrupt(phydev);
+ ret = phydev->drv->handle_interrupt(phydev);
mutex_unlock(&phydev->lock);
return ret;
@@ -1632,12 +1639,12 @@ EXPORT_SYMBOL(phy_get_eee_err);
/**
* phy_ethtool_get_eee - get EEE supported and status
* @phydev: target phy_device struct
- * @data: ethtool_eee data
+ * @data: ethtool_keee data
*
- * Description: it reportes the Supported/Advertisement/LP Advertisement
- * capabilities.
+ * Description: reports the Supported/Advertisement/LP Advertisement
+ * capabilities, etc.
*/
-int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
+int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_keee *data)
{
int ret;
@@ -1646,6 +1653,7 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
mutex_lock(&phydev->lock);
ret = genphy_c45_ethtool_get_eee(phydev, data);
+ eeecfg_to_eee(data, &phydev->eee_cfg);
mutex_unlock(&phydev->lock);
return ret;
@@ -1653,13 +1661,43 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
EXPORT_SYMBOL(phy_ethtool_get_eee);
/**
+ * phy_ethtool_set_eee_noneg - Adjusts MAC LPI configuration without PHY
+ * renegotiation
+ * @phydev: pointer to the target PHY device structure
+ * @data: pointer to the ethtool_keee structure containing the new EEE settings
+ *
+ * This function updates the Energy Efficient Ethernet (EEE) configuration
+ * for cases where only the MAC's Low Power Idle (LPI) configuration changes,
+ * without triggering PHY renegotiation. It ensures that the MAC is properly
+ * informed of the new LPI settings by cycling the link down and up, which
+ * is necessary for the MAC to adopt the new configuration. This adjustment
+ * is done only if there is a change in the tx_lpi_enabled or tx_lpi_timer
+ * configuration.
+ */
+static void phy_ethtool_set_eee_noneg(struct phy_device *phydev,
+ struct ethtool_keee *data)
+{
+ if (phydev->eee_cfg.tx_lpi_enabled != data->tx_lpi_enabled ||
+ phydev->eee_cfg.tx_lpi_timer != data->tx_lpi_timer) {
+ eee_to_eeecfg(&phydev->eee_cfg, data);
+ phydev->enable_tx_lpi = eeecfg_mac_can_tx_lpi(&phydev->eee_cfg);
+ if (phydev->link) {
+ phydev->link = false;
+ phy_link_down(phydev);
+ phydev->link = true;
+ phy_link_up(phydev);
+ }
+ }
+}
+
+/**
* phy_ethtool_set_eee - set EEE supported and status
* @phydev: target phy_device struct
- * @data: ethtool_eee data
+ * @data: ethtool_keee data
*
* Description: it is to program the Advertisement EEE register.
*/
-int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
+int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_keee *data)
{
int ret;
@@ -1668,9 +1706,14 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
mutex_lock(&phydev->lock);
ret = genphy_c45_ethtool_set_eee(phydev, data);
+ if (ret >= 0) {
+ if (ret == 0)
+ phy_ethtool_set_eee_noneg(phydev, data);
+ eee_to_eeecfg(&phydev->eee_cfg, data);
+ }
mutex_unlock(&phydev->lock);
- return ret;
+ return ret < 0 ? ret : 0;
}
EXPORT_SYMBOL(phy_ethtool_set_eee);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 3611ea64875e..8297ef681bf5 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -148,6 +148,14 @@ static const int phy_eee_cap1_features_array[] = {
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap1_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_eee_cap1_features);
+static const int phy_eee_cap2_features_array[] = {
+ ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+};
+
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap2_features) __ro_after_init;
+EXPORT_SYMBOL_GPL(phy_eee_cap2_features);
+
static void features_init(void)
{
/* 10/100 half/full*/
@@ -232,6 +240,9 @@ static void features_init(void)
linkmode_set_bit_array(phy_eee_cap1_features_array,
ARRAY_SIZE(phy_eee_cap1_features_array),
phy_eee_cap1_features);
+ linkmode_set_bit_array(phy_eee_cap2_features_array,
+ ARRAY_SIZE(phy_eee_cap2_features_array),
+ phy_eee_cap2_features);
}
@@ -780,7 +791,7 @@ static int get_phy_c45_devs_in_pkg(struct mii_bus *bus, int addr, int dev_addr,
* and identifiers in @c45_ids.
*
* Returns zero on success, %-EIO on bus access error, or %-ENODEV if
- * the "devices in package" is invalid.
+ * the "devices in package" is invalid or no device responds.
*/
static int get_phy_c45_ids(struct mii_bus *bus, int addr,
struct phy_c45_device_ids *c45_ids)
@@ -803,7 +814,11 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr,
*/
ret = phy_c45_probe_present(bus, addr, i);
if (ret < 0)
- return -EIO;
+ /* returning -ENODEV doesn't stop bus
+ * scanning
+ */
+ return (phy_reg == -EIO ||
+ phy_reg == -ENODEV) ? -ENODEV : -EIO;
if (!ret)
continue;
@@ -1413,6 +1428,11 @@ int phy_sfp_probe(struct phy_device *phydev,
}
EXPORT_SYMBOL(phy_sfp_probe);
+static bool phy_drv_supports_irq(const struct phy_driver *phydrv)
+{
+ return phydrv->config_intr && phydrv->handle_interrupt;
+}
+
/**
* phy_attach_direct - attach a network device to a given PHY device pointer
* @dev: network device to attach
@@ -1527,6 +1547,9 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
if (phydev->dev_flags & PHY_F_NO_IRQ)
phydev->irq = PHY_POLL;
+ if (!phy_drv_supports_irq(phydev->drv) && phy_interrupt_is_valid(phydev))
+ phydev->irq = PHY_POLL;
+
/* Port is set to PORT_TP by default and the actual PHY driver will set
* it to different value depending on the PHY configuration. If we have
* the generic PHY driver we can't figure it out, thus set the old
@@ -1592,7 +1615,6 @@ EXPORT_SYMBOL(phy_attach_direct);
struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
phy_interface_t interface)
{
- struct bus_type *bus = &mdio_bus_type;
struct phy_device *phydev;
struct device *d;
int rc;
@@ -1603,7 +1625,7 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
/* Search the list of PHY devices on the mdio bus for the
* PHY with the requested name
*/
- d = bus_find_device_by_name(bus, NULL, bus_id);
+ d = bus_find_device_by_name(&mdio_bus_type, NULL, bus_id);
if (!d) {
pr_err("PHY %s not found\n", bus_id);
return ERR_PTR(-ENODEV);
@@ -1700,6 +1722,7 @@ int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size)
shared->priv_size = priv_size;
}
shared->base_addr = base_addr;
+ shared->np = NULL;
refcount_set(&shared->refcnt, 1);
bus->shared[base_addr] = shared;
} else {
@@ -1723,6 +1746,63 @@ err_unlock:
EXPORT_SYMBOL_GPL(phy_package_join);
/**
+ * of_phy_package_join - join a common PHY group in PHY package
+ * @phydev: target phy_device struct
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * This is a variant of phy_package_join for PHY package defined in DT.
+ *
+ * The parent node of the @phydev is checked as a valid PHY package node
+ * structure (by matching the node name "ethernet-phy-package") and the
+ * base_addr for the PHY package is passed to phy_package_join.
+ *
+ * With this configuration the shared struct will also have the np value
+ * filled to use additional DT defined properties in PHY specific
+ * probe_once and config_init_once PHY package OPs.
+ *
+ * Returns < 0 on error, 0 on success. Esp. calling phy_package_join()
+ * with the same cookie but a different priv_size is an error. Or a parent
+ * node is not detected or is not valid or doesn't match the expected node
+ * name for PHY package.
+ */
+int of_phy_package_join(struct phy_device *phydev, size_t priv_size)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct device_node *package_node;
+ u32 base_addr;
+ int ret;
+
+ if (!node)
+ return -EINVAL;
+
+ package_node = of_get_parent(node);
+ if (!package_node)
+ return -EINVAL;
+
+ if (!of_node_name_eq(package_node, "ethernet-phy-package")) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (of_property_read_u32(package_node, "reg", &base_addr)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = phy_package_join(phydev, base_addr, priv_size);
+ if (ret)
+ goto exit;
+
+ phydev->shared->np = package_node;
+
+ return 0;
+exit:
+ of_node_put(package_node);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_phy_package_join);
+
+/**
* phy_package_leave - leave a common PHY group
* @phydev: target phy_device struct
*
@@ -1738,6 +1818,10 @@ void phy_package_leave(struct phy_device *phydev)
if (!shared)
return;
+ /* Decrease the node refcount on leave if present */
+ if (shared->np)
+ of_node_put(shared->np);
+
if (refcount_dec_and_mutex_lock(&shared->refcnt, &bus->shared_lock)) {
bus->shared[shared->base_addr] = NULL;
mutex_unlock(&bus->shared_lock);
@@ -1791,6 +1875,40 @@ int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
EXPORT_SYMBOL_GPL(devm_phy_package_join);
/**
+ * devm_of_phy_package_join - resource managed of_phy_package_join()
+ * @dev: device that is registering this PHY package
+ * @phydev: target phy_device struct
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * Managed of_phy_package_join(). Shared storage fetched by this function,
+ * phy_package_leave() is automatically called on driver detach. See
+ * of_phy_package_join() for more information.
+ */
+int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev,
+ size_t priv_size)
+{
+ struct phy_device **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = of_phy_package_join(phydev, priv_size);
+
+ if (!ret) {
+ *ptr = phydev;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_of_phy_package_join);
+
+/**
* phy_detach - detach a PHY device from its network device
* @phydev: target phy_device struct
*
@@ -1859,7 +1977,7 @@ int phy_suspend(struct phy_device *phydev)
{
struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
struct net_device *netdev = phydev->attached_dev;
- struct phy_driver *phydrv = phydev->drv;
+ const struct phy_driver *phydrv = phydev->drv;
int ret;
if (phydev->suspended)
@@ -1884,7 +2002,7 @@ EXPORT_SYMBOL(phy_suspend);
int __phy_resume(struct phy_device *phydev)
{
- struct phy_driver *phydrv = phydev->drv;
+ const struct phy_driver *phydrv = phydev->drv;
int ret;
lockdep_assert_held(&phydev->lock);
@@ -2513,12 +2631,15 @@ EXPORT_SYMBOL(genphy_read_status);
/**
* genphy_c37_read_status - check the link status and update current link state
* @phydev: target phy_device struct
+ * @changed: pointer where to store if link changed
*
* Description: Check the link, then figure out the current state
* by comparing what we advertise with what the link partner
* advertises. This function is for Clause 37 1000Base-X mode.
+ *
+ * If link has changed, @changed is set to true, false otherwise.
*/
-int genphy_c37_read_status(struct phy_device *phydev)
+int genphy_c37_read_status(struct phy_device *phydev, bool *changed)
{
int lpa, err, old_link = phydev->link;
@@ -2528,9 +2649,13 @@ int genphy_c37_read_status(struct phy_device *phydev)
return err;
/* why bother the PHY if nothing can have changed */
- if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+ if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link) {
+ *changed = false;
return 0;
+ }
+ /* Signal link has changed */
+ *changed = true;
phydev->duplex = DUPLEX_UNKNOWN;
phydev->pause = 0;
phydev->asym_pause = 0;
@@ -2770,6 +2895,50 @@ void phy_advertise_supported(struct phy_device *phydev)
EXPORT_SYMBOL(phy_advertise_supported);
/**
+ * phy_advertise_eee_all - Advertise all supported EEE modes
+ * @phydev: target phy_device struct
+ *
+ * Description: Per default phylib preserves the EEE advertising at the time of
+ * phy probing, which might be a subset of the supported EEE modes. Use this
+ * function when all supported EEE modes should be advertised. This does not
+ * trigger auto-negotiation, so must be called before phy_start()/
+ * phylink_start() which will start auto-negotiation.
+ */
+void phy_advertise_eee_all(struct phy_device *phydev)
+{
+ linkmode_copy(phydev->advertising_eee, phydev->supported_eee);
+}
+EXPORT_SYMBOL_GPL(phy_advertise_eee_all);
+
+/**
+ * phy_support_eee - Set initial EEE policy configuration
+ * @phydev: Target phy_device struct
+ *
+ * This function configures the initial policy for Energy Efficient Ethernet
+ * (EEE) on the specified PHY device, influencing that EEE capabilities are
+ * advertised before the link is established. It should be called during PHY
+ * registration by the MAC driver and/or the PHY driver (for SmartEEE PHYs)
+ * if MAC supports LPI or PHY is capable to compensate missing LPI functionality
+ * of the MAC.
+ *
+ * The function sets default EEE policy parameters, including preparing the PHY
+ * to advertise EEE capabilities based on hardware support.
+ *
+ * It also sets the expected configuration for Low Power Idle (LPI) in the MAC
+ * driver. If the PHY framework determines that both local and remote
+ * advertisements support EEE, and the negotiated link mode is compatible with
+ * EEE, it will set enable_tx_lpi = true. The MAC driver is expected to act on
+ * this setting by enabling the LPI timer if enable_tx_lpi is set.
+ */
+void phy_support_eee(struct phy_device *phydev)
+{
+ linkmode_copy(phydev->advertising_eee, phydev->supported_eee);
+ phydev->eee_cfg.tx_lpi_enabled = true;
+ phydev->eee_cfg.eee_enabled = true;
+}
+EXPORT_SYMBOL(phy_support_eee);
+
+/**
* phy_support_sym_pause - Enable support of symmetrical pause
* @phydev: target phy_device struct
*
@@ -2959,7 +3128,7 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
if (delay < 0)
return delay;
- if (delay && size == 0)
+ if (size == 0)
return delay;
if (delay < delay_values[0] || delay > delay_values[size - 1]) {
@@ -2992,11 +3161,6 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
}
EXPORT_SYMBOL(phy_get_internal_delay);
-static bool phy_drv_supports_irq(struct phy_driver *phydrv)
-{
- return phydrv->config_intr && phydrv->handle_interrupt;
-}
-
static int phy_led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness value)
{
@@ -3097,6 +3261,7 @@ static int of_phy_led(struct phy_device *phydev,
struct device *dev = &phydev->mdio.dev;
struct led_init_data init_data = {};
struct led_classdev *cdev;
+ unsigned long modes = 0;
struct phy_led *phyled;
u32 index;
int err;
@@ -3114,6 +3279,21 @@ static int of_phy_led(struct phy_device *phydev,
if (index > U8_MAX)
return -EINVAL;
+ if (of_property_read_bool(led, "active-low"))
+ set_bit(PHY_LED_ACTIVE_LOW, &modes);
+ if (of_property_read_bool(led, "inactive-high-impedance"))
+ set_bit(PHY_LED_INACTIVE_HIGH_IMPEDANCE, &modes);
+
+ if (modes) {
+ /* Return error if asked to set polarity modes but not supported */
+ if (!phydev->drv->led_polarity_set)
+ return -EINVAL;
+
+ err = phydev->drv->led_polarity_set(phydev, index, modes);
+ if (err)
+ return err;
+ }
+
phyled->index = index;
if (phydev->drv->led_brightness_set)
cdev->brightness_set_blocking = phy_led_set_brightness;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index ed0b4ccaa6a6..503fd7c40523 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -2764,9 +2764,9 @@ EXPORT_SYMBOL_GPL(phylink_init_eee);
/**
* phylink_ethtool_get_eee() - read the energy efficient ethernet parameters
* @pl: a pointer to a &struct phylink returned from phylink_create()
- * @eee: a pointer to a &struct ethtool_eee for the read parameters
+ * @eee: a pointer to a &struct ethtool_keee for the read parameters
*/
-int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_eee *eee)
+int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_keee *eee)
{
int ret = -EOPNOTSUPP;
@@ -2782,9 +2782,9 @@ EXPORT_SYMBOL_GPL(phylink_ethtool_get_eee);
/**
* phylink_ethtool_set_eee() - set the energy efficient ethernet parameters
* @pl: a pointer to a &struct phylink returned from phylink_create()
- * @eee: a pointer to a &struct ethtool_eee for the desired parameters
+ * @eee: a pointer to a &struct ethtool_keee for the desired parameters
*/
-int phylink_ethtool_set_eee(struct phylink *pl, struct ethtool_eee *eee)
+int phylink_ethtool_set_eee(struct phylink *pl, struct ethtool_keee *eee)
{
int ret = -EOPNOTSUPP;
diff --git a/drivers/net/phy/qcom/Kconfig b/drivers/net/phy/qcom/Kconfig
new file mode 100644
index 000000000000..570626cc8e14
--- /dev/null
+++ b/drivers/net/phy/qcom/Kconfig
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config QCOM_NET_PHYLIB
+ tristate
+
+config AT803X_PHY
+ tristate "Qualcomm Atheros AR803X PHYs"
+ select QCOM_NET_PHYLIB
+ depends on REGULATOR
+ help
+ Currently supports the AR8030, AR8031, AR8033, AR8035 model
+
+config QCA83XX_PHY
+ tristate "Qualcomm Atheros QCA833x PHYs"
+ select QCOM_NET_PHYLIB
+ help
+ Currently supports the internal QCA8337(Internal qca8k PHY) model
+
+config QCA808X_PHY
+ tristate "Qualcomm QCA808x PHYs"
+ select QCOM_NET_PHYLIB
+ help
+ Currently supports the QCA8081 model
+
+config QCA807X_PHY
+ tristate "Qualcomm QCA807x PHYs"
+ select QCOM_NET_PHYLIB
+ depends on OF_MDIO
+ help
+ Currently supports the Qualcomm QCA8072, QCA8075 and the PSGMII
+ control PHY.
diff --git a/drivers/net/phy/qcom/Makefile b/drivers/net/phy/qcom/Makefile
new file mode 100644
index 000000000000..f24fb550babd
--- /dev/null
+++ b/drivers/net/phy/qcom/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_QCOM_NET_PHYLIB) += qcom-phy-lib.o
+obj-$(CONFIG_AT803X_PHY) += at803x.o
+obj-$(CONFIG_QCA83XX_PHY) += qca83xx.o
+obj-$(CONFIG_QCA808X_PHY) += qca808x.o
+obj-$(CONFIG_QCA807X_PHY) += qca807x.o
diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c
new file mode 100644
index 000000000000..4717c59d51d0
--- /dev/null
+++ b/drivers/net/phy/qcom/at803x.c
@@ -0,0 +1,1106 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * drivers/net/phy/at803x.c
+ *
+ * Driver for Qualcomm Atheros AR803x PHY
+ *
+ * Author: Matus Ujhelyi <ujhelyi.m@gmail.com>
+ */
+
+#include <linux/phy.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool_netlink.h>
+#include <linux/bitfield.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of.h>
+#include <linux/phylink.h>
+#include <linux/sfp.h>
+#include <dt-bindings/net/qca-ar803x.h>
+
+#include "qcom.h"
+
+#define AT803X_LED_CONTROL 0x18
+
+#define AT803X_PHY_MMD3_WOL_CTRL 0x8012
+#define AT803X_WOL_EN BIT(5)
+
+#define AT803X_REG_CHIP_CONFIG 0x1f
+#define AT803X_BT_BX_REG_SEL 0x8000
+
+#define AT803X_MODE_CFG_MASK 0x0F
+#define AT803X_MODE_CFG_BASET_RGMII 0x00
+#define AT803X_MODE_CFG_BASET_SGMII 0x01
+#define AT803X_MODE_CFG_BX1000_RGMII_50OHM 0x02
+#define AT803X_MODE_CFG_BX1000_RGMII_75OHM 0x03
+#define AT803X_MODE_CFG_BX1000_CONV_50OHM 0x04
+#define AT803X_MODE_CFG_BX1000_CONV_75OHM 0x05
+#define AT803X_MODE_CFG_FX100_RGMII_50OHM 0x06
+#define AT803X_MODE_CFG_FX100_CONV_50OHM 0x07
+#define AT803X_MODE_CFG_RGMII_AUTO_MDET 0x0B
+#define AT803X_MODE_CFG_FX100_RGMII_75OHM 0x0E
+#define AT803X_MODE_CFG_FX100_CONV_75OHM 0x0F
+
+#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/
+#define AT803X_PSSR_MR_AN_COMPLETE 0x0200
+
+#define AT803X_DEBUG_REG_1F 0x1F
+#define AT803X_DEBUG_PLL_ON BIT(2)
+#define AT803X_DEBUG_RGMII_1V8 BIT(3)
+
+/* AT803x supports either the XTAL input pad, an internal PLL or the
+ * DSP as clock reference for the clock output pad. The XTAL reference
+ * is only used for 25 MHz output, all other frequencies need the PLL.
+ * The DSP as a clock reference is used in synchronous ethernet
+ * applications.
+ *
+ * By default the PLL is only enabled if there is a link. Otherwise
+ * the PHY will go into low power state and disabled the PLL. You can
+ * set the PLL_ON bit (see debug register 0x1f) to keep the PLL always
+ * enabled.
+ */
+#define AT803X_MMD7_CLK25M 0x8016
+#define AT803X_CLK_OUT_MASK GENMASK(4, 2)
+#define AT803X_CLK_OUT_25MHZ_XTAL 0
+#define AT803X_CLK_OUT_25MHZ_DSP 1
+#define AT803X_CLK_OUT_50MHZ_PLL 2
+#define AT803X_CLK_OUT_50MHZ_DSP 3
+#define AT803X_CLK_OUT_62_5MHZ_PLL 4
+#define AT803X_CLK_OUT_62_5MHZ_DSP 5
+#define AT803X_CLK_OUT_125MHZ_PLL 6
+#define AT803X_CLK_OUT_125MHZ_DSP 7
+
+/* The AR8035 has another mask which is compatible with the AR8031/AR8033 mask
+ * but doesn't support choosing between XTAL/PLL and DSP.
+ */
+#define AT8035_CLK_OUT_MASK GENMASK(4, 3)
+
+#define AT803X_CLK_OUT_STRENGTH_MASK GENMASK(8, 7)
+#define AT803X_CLK_OUT_STRENGTH_FULL 0
+#define AT803X_CLK_OUT_STRENGTH_HALF 1
+#define AT803X_CLK_OUT_STRENGTH_QUARTER 2
+
+#define AT803X_MMD3_SMARTEEE_CTL1 0x805b
+#define AT803X_MMD3_SMARTEEE_CTL2 0x805c
+#define AT803X_MMD3_SMARTEEE_CTL3 0x805d
+#define AT803X_MMD3_SMARTEEE_CTL3_LPI_EN BIT(8)
+
+#define ATH9331_PHY_ID 0x004dd041
+#define ATH8030_PHY_ID 0x004dd076
+#define ATH8031_PHY_ID 0x004dd074
+#define ATH8032_PHY_ID 0x004dd023
+#define ATH8035_PHY_ID 0x004dd072
+#define AT8030_PHY_ID_MASK 0xffffffef
+
+#define QCA9561_PHY_ID 0x004dd042
+
+#define AT803X_PAGE_FIBER 0
+#define AT803X_PAGE_COPPER 1
+
+/* don't turn off internal PLL */
+#define AT803X_KEEP_PLL_ENABLED BIT(0)
+#define AT803X_DISABLE_SMARTEEE BIT(1)
+
+/* disable hibernation mode */
+#define AT803X_DISABLE_HIBERNATION_MODE BIT(2)
+
+MODULE_DESCRIPTION("Qualcomm Atheros AR803x PHY driver");
+MODULE_AUTHOR("Matus Ujhelyi");
+MODULE_LICENSE("GPL");
+
+struct at803x_priv {
+ int flags;
+ u16 clk_25m_reg;
+ u16 clk_25m_mask;
+ u8 smarteee_lpi_tw_1g;
+ u8 smarteee_lpi_tw_100m;
+ bool is_fiber;
+ bool is_1000basex;
+ struct regulator_dev *vddio_rdev;
+ struct regulator_dev *vddh_rdev;
+};
+
+struct at803x_context {
+ u16 bmcr;
+ u16 advertise;
+ u16 control1000;
+ u16 int_enable;
+ u16 smart_speed;
+ u16 led_control;
+};
+
+static int at803x_write_page(struct phy_device *phydev, int page)
+{
+ int mask;
+ int set;
+
+ if (page == AT803X_PAGE_COPPER) {
+ set = AT803X_BT_BX_REG_SEL;
+ mask = 0;
+ } else {
+ set = 0;
+ mask = AT803X_BT_BX_REG_SEL;
+ }
+
+ return __phy_modify(phydev, AT803X_REG_CHIP_CONFIG, mask, set);
+}
+
+static int at803x_read_page(struct phy_device *phydev)
+{
+ int ccr = __phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+
+ if (ccr < 0)
+ return ccr;
+
+ if (ccr & AT803X_BT_BX_REG_SEL)
+ return AT803X_PAGE_COPPER;
+
+ return AT803X_PAGE_FIBER;
+}
+
+static int at803x_enable_rx_delay(struct phy_device *phydev)
+{
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0,
+ AT803X_DEBUG_RX_CLK_DLY_EN);
+}
+
+static int at803x_enable_tx_delay(struct phy_device *phydev)
+{
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0,
+ AT803X_DEBUG_TX_CLK_DLY_EN);
+}
+
+static int at803x_disable_rx_delay(struct phy_device *phydev)
+{
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ AT803X_DEBUG_RX_CLK_DLY_EN, 0);
+}
+
+static int at803x_disable_tx_delay(struct phy_device *phydev)
+{
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE,
+ AT803X_DEBUG_TX_CLK_DLY_EN, 0);
+}
+
+/* save relevant PHY registers to private copy */
+static void at803x_context_save(struct phy_device *phydev,
+ struct at803x_context *context)
+{
+ context->bmcr = phy_read(phydev, MII_BMCR);
+ context->advertise = phy_read(phydev, MII_ADVERTISE);
+ context->control1000 = phy_read(phydev, MII_CTRL1000);
+ context->int_enable = phy_read(phydev, AT803X_INTR_ENABLE);
+ context->smart_speed = phy_read(phydev, AT803X_SMART_SPEED);
+ context->led_control = phy_read(phydev, AT803X_LED_CONTROL);
+}
+
+/* restore relevant PHY registers from private copy */
+static void at803x_context_restore(struct phy_device *phydev,
+ const struct at803x_context *context)
+{
+ phy_write(phydev, MII_BMCR, context->bmcr);
+ phy_write(phydev, MII_ADVERTISE, context->advertise);
+ phy_write(phydev, MII_CTRL1000, context->control1000);
+ phy_write(phydev, AT803X_INTR_ENABLE, context->int_enable);
+ phy_write(phydev, AT803X_SMART_SPEED, context->smart_speed);
+ phy_write(phydev, AT803X_LED_CONTROL, context->led_control);
+}
+
+static int at803x_suspend(struct phy_device *phydev)
+{
+ int value;
+ int wol_enabled;
+
+ value = phy_read(phydev, AT803X_INTR_ENABLE);
+ wol_enabled = value & AT803X_INTR_ENABLE_WOL;
+
+ if (wol_enabled)
+ value = BMCR_ISOLATE;
+ else
+ value = BMCR_PDOWN;
+
+ phy_modify(phydev, MII_BMCR, 0, value);
+
+ return 0;
+}
+
+static int at803x_resume(struct phy_device *phydev)
+{
+ return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0);
+}
+
+static int at803x_parse_dt(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct at803x_priv *priv = phydev->priv;
+ u32 freq, strength, tw;
+ unsigned int sel;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_OF_MDIO))
+ return 0;
+
+ if (of_property_read_bool(node, "qca,disable-smarteee"))
+ priv->flags |= AT803X_DISABLE_SMARTEEE;
+
+ if (of_property_read_bool(node, "qca,disable-hibernation-mode"))
+ priv->flags |= AT803X_DISABLE_HIBERNATION_MODE;
+
+ if (!of_property_read_u32(node, "qca,smarteee-tw-us-1g", &tw)) {
+ if (!tw || tw > 255) {
+ phydev_err(phydev, "invalid qca,smarteee-tw-us-1g\n");
+ return -EINVAL;
+ }
+ priv->smarteee_lpi_tw_1g = tw;
+ }
+
+ if (!of_property_read_u32(node, "qca,smarteee-tw-us-100m", &tw)) {
+ if (!tw || tw > 255) {
+ phydev_err(phydev, "invalid qca,smarteee-tw-us-100m\n");
+ return -EINVAL;
+ }
+ priv->smarteee_lpi_tw_100m = tw;
+ }
+
+ ret = of_property_read_u32(node, "qca,clk-out-frequency", &freq);
+ if (!ret) {
+ switch (freq) {
+ case 25000000:
+ sel = AT803X_CLK_OUT_25MHZ_XTAL;
+ break;
+ case 50000000:
+ sel = AT803X_CLK_OUT_50MHZ_PLL;
+ break;
+ case 62500000:
+ sel = AT803X_CLK_OUT_62_5MHZ_PLL;
+ break;
+ case 125000000:
+ sel = AT803X_CLK_OUT_125MHZ_PLL;
+ break;
+ default:
+ phydev_err(phydev, "invalid qca,clk-out-frequency\n");
+ return -EINVAL;
+ }
+
+ priv->clk_25m_reg |= FIELD_PREP(AT803X_CLK_OUT_MASK, sel);
+ priv->clk_25m_mask |= AT803X_CLK_OUT_MASK;
+ }
+
+ ret = of_property_read_u32(node, "qca,clk-out-strength", &strength);
+ if (!ret) {
+ priv->clk_25m_mask |= AT803X_CLK_OUT_STRENGTH_MASK;
+ switch (strength) {
+ case AR803X_STRENGTH_FULL:
+ priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_FULL;
+ break;
+ case AR803X_STRENGTH_HALF:
+ priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_HALF;
+ break;
+ case AR803X_STRENGTH_QUARTER:
+ priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_QUARTER;
+ break;
+ default:
+ phydev_err(phydev, "invalid qca,clk-out-strength\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int at803x_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct at803x_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ ret = at803x_parse_dt(phydev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int at803x_get_features(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int err;
+
+ err = genphy_read_abilities(phydev);
+ if (err)
+ return err;
+
+ if (phydev->drv->phy_id != ATH8031_PHY_ID)
+ return 0;
+
+ /* AR8031/AR8033 have different status registers
+ * for copper and fiber operation. However, the
+ * extended status register is the same for both
+ * operation modes.
+ *
+ * As a result of that, ESTATUS_1000_XFULL is set
+ * to 1 even when operating in copper TP mode.
+ *
+ * Remove this mode from the supported link modes
+ * when not operating in 1000BaseX mode.
+ */
+ if (!priv->is_1000basex)
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ phydev->supported);
+
+ return 0;
+}
+
+static int at803x_smarteee_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ u16 mask = 0, val = 0;
+ int ret;
+
+ if (priv->flags & AT803X_DISABLE_SMARTEEE)
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_MMD3_SMARTEEE_CTL3,
+ AT803X_MMD3_SMARTEEE_CTL3_LPI_EN, 0);
+
+ if (priv->smarteee_lpi_tw_1g) {
+ mask |= 0xff00;
+ val |= priv->smarteee_lpi_tw_1g << 8;
+ }
+ if (priv->smarteee_lpi_tw_100m) {
+ mask |= 0x00ff;
+ val |= priv->smarteee_lpi_tw_100m;
+ }
+ if (!mask)
+ return 0;
+
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL1,
+ mask, val);
+ if (ret)
+ return ret;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL3,
+ AT803X_MMD3_SMARTEEE_CTL3_LPI_EN,
+ AT803X_MMD3_SMARTEEE_CTL3_LPI_EN);
+}
+
+static int at803x_clk_out_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ if (!priv->clk_25m_mask)
+ return 0;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_AN, AT803X_MMD7_CLK25M,
+ priv->clk_25m_mask, priv->clk_25m_reg);
+}
+
+static int at8031_pll_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ /* The default after hardware reset is PLL OFF. After a soft reset, the
+ * values are retained.
+ */
+ if (priv->flags & AT803X_KEEP_PLL_ENABLED)
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ 0, AT803X_DEBUG_PLL_ON);
+ else
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ AT803X_DEBUG_PLL_ON, 0);
+}
+
+static int at803x_hibernation_mode_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ /* The default after hardware reset is hibernation mode enabled. After
+ * software reset, the value is retained.
+ */
+ if (!(priv->flags & AT803X_DISABLE_HIBERNATION_MODE))
+ return 0;
+
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
+ AT803X_DEBUG_HIB_CTRL_PS_HIB_EN, 0);
+}
+
+static int at803x_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* The RX and TX delay default is:
+ * after HW reset: RX delay enabled and TX delay disabled
+ * after SW reset: RX delay enabled, while TX delay retains the
+ * value before reset.
+ */
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ ret = at803x_enable_rx_delay(phydev);
+ else
+ ret = at803x_disable_rx_delay(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ ret = at803x_enable_tx_delay(phydev);
+ else
+ ret = at803x_disable_tx_delay(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = at803x_smarteee_config(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = at803x_clk_out_config(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = at803x_hibernation_mode_config(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Ar803x extended next page bit is enabled by default. Cisco
+ * multigig switches read this bit and attempt to negotiate 10Gbps
+ * rates even if the next page bit is disabled. This is incorrect
+ * behaviour but we still need to accommodate it. XNP is only needed
+ * for 10Gbps support, so disable XNP.
+ */
+ return phy_modify(phydev, MII_ADVERTISE, MDIO_AN_CTRL1_XNP, 0);
+}
+
+static void at803x_link_change_notify(struct phy_device *phydev)
+{
+ /*
+ * Conduct a hardware reset for AT8030 every time a link loss is
+ * signalled. This is necessary to circumvent a hardware bug that
+ * occurs when the cable is unplugged while TX packets are pending
+ * in the FIFO. In such cases, the FIFO enters an error mode it
+ * cannot recover from by software.
+ */
+ if (phydev->state == PHY_NOLINK && phydev->mdio.reset_gpio) {
+ struct at803x_context context;
+
+ at803x_context_save(phydev, &context);
+
+ phy_device_reset(phydev, 1);
+ usleep_range(1000, 2000);
+ phy_device_reset(phydev, 0);
+ usleep_range(1000, 2000);
+
+ at803x_context_restore(phydev, &context);
+
+ phydev_dbg(phydev, "%s(): phy was reset\n", __func__);
+ }
+}
+
+static int at803x_config_aneg(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int ret;
+
+ ret = at803x_prepare_config_aneg(phydev);
+ if (ret)
+ return ret;
+
+ if (priv->is_1000basex)
+ return genphy_c37_config_aneg(phydev);
+
+ return genphy_config_aneg(phydev);
+}
+
+static int at803x_cable_test_result_trans(u16 status)
+{
+ switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) {
+ case AT803X_CDT_STATUS_STAT_NORMAL:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case AT803X_CDT_STATUS_STAT_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case AT803X_CDT_STATUS_STAT_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case AT803X_CDT_STATUS_STAT_FAIL:
+ default:
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static bool at803x_cdt_test_failed(u16 status)
+{
+ return FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status) ==
+ AT803X_CDT_STATUS_STAT_FAIL;
+}
+
+static bool at803x_cdt_fault_length_valid(u16 status)
+{
+ switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) {
+ case AT803X_CDT_STATUS_STAT_OPEN:
+ case AT803X_CDT_STATUS_STAT_SHORT:
+ return true;
+ }
+ return false;
+}
+
+static int at803x_cable_test_one_pair(struct phy_device *phydev, int pair)
+{
+ static const int ethtool_pair[] = {
+ ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_PAIR_B,
+ ETHTOOL_A_CABLE_PAIR_C,
+ ETHTOOL_A_CABLE_PAIR_D,
+ };
+ int ret, val;
+
+ val = FIELD_PREP(AT803X_CDT_MDI_PAIR_MASK, pair) |
+ AT803X_CDT_ENABLE_TEST;
+ ret = at803x_cdt_start(phydev, val);
+ if (ret)
+ return ret;
+
+ ret = at803x_cdt_wait_for_completion(phydev, AT803X_CDT_ENABLE_TEST);
+ if (ret)
+ return ret;
+
+ val = phy_read(phydev, AT803X_CDT_STATUS);
+ if (val < 0)
+ return val;
+
+ if (at803x_cdt_test_failed(val))
+ return 0;
+
+ ethnl_cable_test_result(phydev, ethtool_pair[pair],
+ at803x_cable_test_result_trans(val));
+
+ if (at803x_cdt_fault_length_valid(val)) {
+ val = FIELD_GET(AT803X_CDT_STATUS_DELTA_TIME_MASK, val);
+ ethnl_cable_test_fault_length(phydev, ethtool_pair[pair],
+ at803x_cdt_fault_length(val));
+ }
+
+ return 1;
+}
+
+static int at803x_cable_test_get_status(struct phy_device *phydev,
+ bool *finished, unsigned long pair_mask)
+{
+ int retries = 20;
+ int pair, ret;
+
+ *finished = false;
+
+ /* According to the datasheet the CDT can be performed when
+ * there is no link partner or when the link partner is
+ * auto-negotiating. Starting the test will restart the AN
+ * automatically. It seems that doing this repeatedly we will
+ * get a slot where our link partner won't disturb our
+ * measurement.
+ */
+ while (pair_mask && retries--) {
+ for_each_set_bit(pair, &pair_mask, 4) {
+ ret = at803x_cable_test_one_pair(phydev, pair);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ clear_bit(pair, &pair_mask);
+ }
+ if (pair_mask)
+ msleep(250);
+ }
+
+ *finished = true;
+
+ return 0;
+}
+
+static void at803x_cable_test_autoneg(struct phy_device *phydev)
+{
+ /* Enable auto-negotiation, but advertise no capabilities, no link
+ * will be established. A restart of the auto-negotiation is not
+ * required, because the cable test will automatically break the link.
+ */
+ phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
+ phy_write(phydev, MII_ADVERTISE, ADVERTISE_CSMA);
+}
+
+static int at803x_cable_test_start(struct phy_device *phydev)
+{
+ at803x_cable_test_autoneg(phydev);
+ /* we do all the (time consuming) work later */
+ return 0;
+}
+
+static int at8031_rgmii_reg_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct phy_device *phydev = rdev_get_drvdata(rdev);
+
+ if (selector)
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ 0, AT803X_DEBUG_RGMII_1V8);
+ else
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ AT803X_DEBUG_RGMII_1V8, 0);
+}
+
+static int at8031_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct phy_device *phydev = rdev_get_drvdata(rdev);
+ int val;
+
+ val = at803x_debug_reg_read(phydev, AT803X_DEBUG_REG_1F);
+ if (val < 0)
+ return val;
+
+ return (val & AT803X_DEBUG_RGMII_1V8) ? 1 : 0;
+}
+
+static const struct regulator_ops vddio_regulator_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = at8031_rgmii_reg_set_voltage_sel,
+ .get_voltage_sel = at8031_rgmii_reg_get_voltage_sel,
+};
+
+static const unsigned int vddio_voltage_table[] = {
+ 1500000,
+ 1800000,
+};
+
+static const struct regulator_desc vddio_desc = {
+ .name = "vddio",
+ .of_match = of_match_ptr("vddio-regulator"),
+ .n_voltages = ARRAY_SIZE(vddio_voltage_table),
+ .volt_table = vddio_voltage_table,
+ .ops = &vddio_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+};
+
+static const struct regulator_ops vddh_regulator_ops = {
+};
+
+static const struct regulator_desc vddh_desc = {
+ .name = "vddh",
+ .of_match = of_match_ptr("vddh-regulator"),
+ .n_voltages = 1,
+ .fixed_uV = 2500000,
+ .ops = &vddh_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+};
+
+static int at8031_register_regulators(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ struct regulator_config config = { };
+
+ config.dev = dev;
+ config.driver_data = phydev;
+
+ priv->vddio_rdev = devm_regulator_register(dev, &vddio_desc, &config);
+ if (IS_ERR(priv->vddio_rdev)) {
+ phydev_err(phydev, "failed to register VDDIO regulator\n");
+ return PTR_ERR(priv->vddio_rdev);
+ }
+
+ priv->vddh_rdev = devm_regulator_register(dev, &vddh_desc, &config);
+ if (IS_ERR(priv->vddh_rdev)) {
+ phydev_err(phydev, "failed to register VDDH regulator\n");
+ return PTR_ERR(priv->vddh_rdev);
+ }
+
+ return 0;
+}
+
+static int at8031_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+{
+ struct phy_device *phydev = upstream;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+ phy_interface_t iface;
+
+ linkmode_zero(phy_support);
+ phylink_set(phy_support, 1000baseX_Full);
+ phylink_set(phy_support, 1000baseT_Full);
+ phylink_set(phy_support, Autoneg);
+ phylink_set(phy_support, Pause);
+ phylink_set(phy_support, Asym_Pause);
+
+ linkmode_zero(sfp_support);
+ sfp_parse_support(phydev->sfp_bus, id, sfp_support, interfaces);
+ /* Some modules support 10G modes as well as others we support.
+ * Mask out non-supported modes so the correct interface is picked.
+ */
+ linkmode_and(sfp_support, phy_support, sfp_support);
+
+ if (linkmode_empty(sfp_support)) {
+ dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
+ return -EINVAL;
+ }
+
+ iface = sfp_select_interface(phydev->sfp_bus, sfp_support);
+
+ /* Only 1000Base-X is supported by AR8031/8033 as the downstream SerDes
+ * interface for use with SFP modules.
+ * However, some copper modules detected as having a preferred SGMII
+ * interface do default to and function in 1000Base-X mode, so just
+ * print a warning and allow such modules, as they may have some chance
+ * of working.
+ */
+ if (iface == PHY_INTERFACE_MODE_SGMII)
+ dev_warn(&phydev->mdio.dev, "module may not function if 1000Base-X not supported\n");
+ else if (iface != PHY_INTERFACE_MODE_1000BASEX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct sfp_upstream_ops at8031_sfp_ops = {
+ .attach = phy_sfp_attach,
+ .detach = phy_sfp_detach,
+ .module_insert = at8031_sfp_insert,
+};
+
+static int at8031_parse_dt(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct at803x_priv *priv = phydev->priv;
+ int ret;
+
+ if (of_property_read_bool(node, "qca,keep-pll-enabled"))
+ priv->flags |= AT803X_KEEP_PLL_ENABLED;
+
+ ret = at8031_register_regulators(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_regulator_get_enable_optional(&phydev->mdio.dev,
+ "vddio");
+ if (ret) {
+ phydev_err(phydev, "failed to get VDDIO regulator\n");
+ return ret;
+ }
+
+ /* Only AR8031/8033 support 1000Base-X for SFP modules */
+ return phy_sfp_probe(phydev, &at8031_sfp_ops);
+}
+
+static int at8031_probe(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int mode_cfg;
+ int ccr;
+ int ret;
+
+ ret = at803x_probe(phydev);
+ if (ret)
+ return ret;
+
+ /* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
+ * options.
+ */
+ ret = at8031_parse_dt(phydev);
+ if (ret)
+ return ret;
+
+ ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+ if (ccr < 0)
+ return ccr;
+ mode_cfg = ccr & AT803X_MODE_CFG_MASK;
+
+ switch (mode_cfg) {
+ case AT803X_MODE_CFG_BX1000_RGMII_50OHM:
+ case AT803X_MODE_CFG_BX1000_RGMII_75OHM:
+ priv->is_1000basex = true;
+ fallthrough;
+ case AT803X_MODE_CFG_FX100_RGMII_50OHM:
+ case AT803X_MODE_CFG_FX100_RGMII_75OHM:
+ priv->is_fiber = true;
+ break;
+ }
+
+ /* Disable WoL in 1588 register which is enabled
+ * by default
+ */
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_PHY_MMD3_WOL_CTRL,
+ AT803X_WOL_EN, 0);
+}
+
+static int at8031_config_init(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int ret;
+
+ /* Some bootloaders leave the fiber page selected.
+ * Switch to the appropriate page (fiber or copper), as otherwise we
+ * read the PHY capabilities from the wrong page.
+ */
+ phy_lock_mdio_bus(phydev);
+ ret = at803x_write_page(phydev,
+ priv->is_fiber ? AT803X_PAGE_FIBER :
+ AT803X_PAGE_COPPER);
+ phy_unlock_mdio_bus(phydev);
+ if (ret)
+ return ret;
+
+ ret = at8031_pll_config(phydev);
+ if (ret < 0)
+ return ret;
+
+ return at803x_config_init(phydev);
+}
+
+static int at8031_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret;
+
+ /* First setup MAC address and enable WOL interrupt */
+ ret = at803x_set_wol(phydev, wol);
+ if (ret)
+ return ret;
+
+ if (wol->wolopts & WAKE_MAGIC)
+ /* Enable WOL function for 1588 */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_PHY_MMD3_WOL_CTRL,
+ 0, AT803X_WOL_EN);
+ else
+ /* Disable WoL function for 1588 */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_PHY_MMD3_WOL_CTRL,
+ AT803X_WOL_EN, 0);
+
+ return ret;
+}
+
+static int at8031_config_intr(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int err, value = 0;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED &&
+ priv->is_fiber) {
+ /* Clear any pending interrupts */
+ err = at803x_ack_interrupt(phydev);
+ if (err)
+ return err;
+
+ value |= AT803X_INTR_ENABLE_LINK_FAIL_BX;
+ value |= AT803X_INTR_ENABLE_LINK_SUCCESS_BX;
+
+ err = phy_set_bits(phydev, AT803X_INTR_ENABLE, value);
+ if (err)
+ return err;
+ }
+
+ return at803x_config_intr(phydev);
+}
+
+/* AR8031 and AR8033 share the same read status logic */
+static int at8031_read_status(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ bool changed;
+
+ if (priv->is_1000basex)
+ return genphy_c37_read_status(phydev, &changed);
+
+ return at803x_read_status(phydev);
+}
+
+/* AR8031 and AR8035 share the same cable test get status reg */
+static int at8031_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ return at803x_cable_test_get_status(phydev, finished, 0xf);
+}
+
+/* AR8031 and AR8035 share the same cable test start logic */
+static int at8031_cable_test_start(struct phy_device *phydev)
+{
+ at803x_cable_test_autoneg(phydev);
+ phy_write(phydev, MII_CTRL1000, 0);
+ /* we do all the (time consuming) work later */
+ return 0;
+}
+
+/* AR8032, AR9331 and QCA9561 share the same cable test get status reg */
+static int at8032_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ return at803x_cable_test_get_status(phydev, finished, 0x3);
+}
+
+static int at8035_parse_dt(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ /* Mask is set by the generic at803x_parse_dt
+ * if property is set. Assume property is set
+ * with the mask not zero.
+ */
+ if (priv->clk_25m_mask) {
+ /* Fixup for the AR8030/AR8035. This chip has another mask and
+ * doesn't support the DSP reference. Eg. the lowest bit of the
+ * mask. The upper two bits select the same frequencies. Mask
+ * the lowest bit here.
+ *
+ * Warning:
+ * There was no datasheet for the AR8030 available so this is
+ * just a guess. But the AR8035 is listed as pin compatible
+ * to the AR8030 so there might be a good chance it works on
+ * the AR8030 too.
+ */
+ priv->clk_25m_reg &= AT8035_CLK_OUT_MASK;
+ priv->clk_25m_mask &= AT8035_CLK_OUT_MASK;
+ }
+
+ return 0;
+}
+
+/* AR8030 and AR8035 shared the same special mask for clk_25m */
+static int at8035_probe(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = at803x_probe(phydev);
+ if (ret)
+ return ret;
+
+ return at8035_parse_dt(phydev);
+}
+
+static struct phy_driver at803x_driver[] = {
+{
+ /* Qualcomm Atheros AR8035 */
+ PHY_ID_MATCH_EXACT(ATH8035_PHY_ID),
+ .name = "Qualcomm Atheros AR8035",
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = at8035_probe,
+ .config_aneg = at803x_config_aneg,
+ .config_init = at803x_config_init,
+ .soft_reset = genphy_soft_reset,
+ .set_wol = at803x_set_wol,
+ .get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ /* PHY_GBIT_FEATURES */
+ .read_status = at803x_read_status,
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+ .cable_test_start = at8031_cable_test_start,
+ .cable_test_get_status = at8031_cable_test_get_status,
+}, {
+ /* Qualcomm Atheros AR8030 */
+ .phy_id = ATH8030_PHY_ID,
+ .name = "Qualcomm Atheros AR8030",
+ .phy_id_mask = AT8030_PHY_ID_MASK,
+ .probe = at8035_probe,
+ .config_init = at803x_config_init,
+ .link_change_notify = at803x_link_change_notify,
+ .set_wol = at803x_set_wol,
+ .get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ /* PHY_BASIC_FEATURES */
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+}, {
+ /* Qualcomm Atheros AR8031/AR8033 */
+ PHY_ID_MATCH_EXACT(ATH8031_PHY_ID),
+ .name = "Qualcomm Atheros AR8031/AR8033",
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = at8031_probe,
+ .config_init = at8031_config_init,
+ .config_aneg = at803x_config_aneg,
+ .soft_reset = genphy_soft_reset,
+ .set_wol = at8031_set_wol,
+ .get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ .read_page = at803x_read_page,
+ .write_page = at803x_write_page,
+ .get_features = at803x_get_features,
+ .read_status = at8031_read_status,
+ .config_intr = at8031_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+ .cable_test_start = at8031_cable_test_start,
+ .cable_test_get_status = at8031_cable_test_get_status,
+}, {
+ /* Qualcomm Atheros AR8032 */
+ PHY_ID_MATCH_EXACT(ATH8032_PHY_ID),
+ .name = "Qualcomm Atheros AR8032",
+ .probe = at803x_probe,
+ .flags = PHY_POLL_CABLE_TEST,
+ .config_init = at803x_config_init,
+ .link_change_notify = at803x_link_change_notify,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ /* PHY_BASIC_FEATURES */
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .cable_test_start = at803x_cable_test_start,
+ .cable_test_get_status = at8032_cable_test_get_status,
+}, {
+ /* ATHEROS AR9331 */
+ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
+ .name = "Qualcomm Atheros AR9331 built-in PHY",
+ .probe = at803x_probe,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ .flags = PHY_POLL_CABLE_TEST,
+ /* PHY_BASIC_FEATURES */
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .cable_test_start = at803x_cable_test_start,
+ .cable_test_get_status = at8032_cable_test_get_status,
+ .read_status = at803x_read_status,
+ .soft_reset = genphy_soft_reset,
+ .config_aneg = at803x_config_aneg,
+}, {
+ /* Qualcomm Atheros QCA9561 */
+ PHY_ID_MATCH_EXACT(QCA9561_PHY_ID),
+ .name = "Qualcomm Atheros QCA9561 built-in PHY",
+ .probe = at803x_probe,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ .flags = PHY_POLL_CABLE_TEST,
+ /* PHY_BASIC_FEATURES */
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .cable_test_start = at803x_cable_test_start,
+ .cable_test_get_status = at8032_cable_test_get_status,
+ .read_status = at803x_read_status,
+ .soft_reset = genphy_soft_reset,
+ .config_aneg = at803x_config_aneg,
+}, };
+
+module_phy_driver(at803x_driver);
+
+static struct mdio_device_id __maybe_unused atheros_tbl[] = {
+ { ATH8030_PHY_ID, AT8030_PHY_ID_MASK },
+ { PHY_ID_MATCH_EXACT(ATH8031_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(QCA9561_PHY_ID) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, atheros_tbl);
diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c
new file mode 100644
index 000000000000..672c6929119a
--- /dev/null
+++ b/drivers/net/phy/qcom/qca807x.c
@@ -0,0 +1,849 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 Sartura Ltd.
+ *
+ * Author: Robert Marko <robert.marko@sartura.hr>
+ * Christian Marangi <ansuelsmth@gmail.com>
+ *
+ * Qualcomm QCA8072 and QCA8075 PHY driver
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/bitfield.h>
+#include <linux/gpio/driver.h>
+#include <linux/sfp.h>
+
+#include "qcom.h"
+
+#define QCA807X_CHIP_CONFIGURATION 0x1f
+#define QCA807X_BT_BX_REG_SEL BIT(15)
+#define QCA807X_BT_BX_REG_SEL_FIBER 0
+#define QCA807X_BT_BX_REG_SEL_COPPER 1
+#define QCA807X_CHIP_CONFIGURATION_MODE_CFG_MASK GENMASK(3, 0)
+#define QCA807X_CHIP_CONFIGURATION_MODE_QSGMII_SGMII 4
+#define QCA807X_CHIP_CONFIGURATION_MODE_PSGMII_FIBER 3
+#define QCA807X_CHIP_CONFIGURATION_MODE_PSGMII_ALL_COPPER 0
+
+#define QCA807X_MEDIA_SELECT_STATUS 0x1a
+#define QCA807X_MEDIA_DETECTED_COPPER BIT(5)
+#define QCA807X_MEDIA_DETECTED_1000_BASE_X BIT(4)
+#define QCA807X_MEDIA_DETECTED_100_BASE_FX BIT(3)
+
+#define QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION 0x807e
+#define QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION_EN BIT(0)
+
+#define QCA807X_MMD7_1000BASE_T_POWER_SAVE_PER_CABLE_LENGTH 0x801a
+#define QCA807X_CONTROL_DAC_MASK GENMASK(2, 0)
+/* List of tweaks enabled by this bit:
+ * - With both FULL amplitude and FULL bias current: bias current
+ * is set to half.
+ * - With only DSP amplitude: bias current is set to half and
+ * is set to 1/4 with cable < 10m.
+ * - With DSP bias current (included both DSP amplitude and
+ * DSP bias current): bias current is half the detected current
+ * with cable < 10m.
+ */
+#define QCA807X_CONTROL_DAC_BIAS_CURRENT_TWEAK BIT(2)
+#define QCA807X_CONTROL_DAC_DSP_BIAS_CURRENT BIT(1)
+#define QCA807X_CONTROL_DAC_DSP_AMPLITUDE BIT(0)
+
+#define QCA807X_MMD7_LED_100N_1 0x8074
+#define QCA807X_MMD7_LED_100N_2 0x8075
+#define QCA807X_MMD7_LED_1000N_1 0x8076
+#define QCA807X_MMD7_LED_1000N_2 0x8077
+
+#define QCA807X_MMD7_LED_CTRL(x) (0x8074 + ((x) * 2))
+#define QCA807X_MMD7_LED_FORCE_CTRL(x) (0x8075 + ((x) * 2))
+
+/* LED hw control pattern for fiber port */
+#define QCA807X_LED_FIBER_PATTERN_MASK GENMASK(11, 1)
+#define QCA807X_LED_FIBER_TXACT_BLK_EN BIT(10)
+#define QCA807X_LED_FIBER_RXACT_BLK_EN BIT(9)
+#define QCA807X_LED_FIBER_FDX_ON_EN BIT(6)
+#define QCA807X_LED_FIBER_HDX_ON_EN BIT(5)
+#define QCA807X_LED_FIBER_1000BX_ON_EN BIT(2)
+#define QCA807X_LED_FIBER_100FX_ON_EN BIT(1)
+
+/* Some device repurpose the LED as GPIO out */
+#define QCA807X_GPIO_FORCE_EN QCA808X_LED_FORCE_EN
+#define QCA807X_GPIO_FORCE_MODE_MASK QCA808X_LED_FORCE_MODE_MASK
+
+#define QCA807X_FUNCTION_CONTROL 0x10
+#define QCA807X_FC_MDI_CROSSOVER_MODE_MASK GENMASK(6, 5)
+#define QCA807X_FC_MDI_CROSSOVER_AUTO 3
+#define QCA807X_FC_MDI_CROSSOVER_MANUAL_MDIX 1
+#define QCA807X_FC_MDI_CROSSOVER_MANUAL_MDI 0
+
+/* PQSGMII Analog PHY specific */
+#define PQSGMII_CTRL_REG 0x0
+#define PQSGMII_ANALOG_SW_RESET BIT(6)
+#define PQSGMII_DRIVE_CONTROL_1 0xb
+#define PQSGMII_TX_DRIVER_MASK GENMASK(7, 4)
+#define PQSGMII_TX_DRIVER_140MV 0x0
+#define PQSGMII_TX_DRIVER_160MV 0x1
+#define PQSGMII_TX_DRIVER_180MV 0x2
+#define PQSGMII_TX_DRIVER_200MV 0x3
+#define PQSGMII_TX_DRIVER_220MV 0x4
+#define PQSGMII_TX_DRIVER_240MV 0x5
+#define PQSGMII_TX_DRIVER_260MV 0x6
+#define PQSGMII_TX_DRIVER_280MV 0x7
+#define PQSGMII_TX_DRIVER_300MV 0x8
+#define PQSGMII_TX_DRIVER_320MV 0x9
+#define PQSGMII_TX_DRIVER_400MV 0xa
+#define PQSGMII_TX_DRIVER_500MV 0xb
+#define PQSGMII_TX_DRIVER_600MV 0xc
+#define PQSGMII_MODE_CTRL 0x6d
+#define PQSGMII_MODE_CTRL_AZ_WORKAROUND_MASK BIT(0)
+#define PQSGMII_MMD3_SERDES_CONTROL 0x805a
+
+#define PHY_ID_QCA8072 0x004dd0b2
+#define PHY_ID_QCA8075 0x004dd0b1
+
+#define QCA807X_COMBO_ADDR_OFFSET 4
+#define QCA807X_PQSGMII_ADDR_OFFSET 5
+#define SERDES_RESET_SLEEP 100
+
+enum qca807x_global_phy {
+ QCA807X_COMBO_ADDR = 4,
+ QCA807X_PQSGMII_ADDR = 5,
+};
+
+struct qca807x_shared_priv {
+ unsigned int package_mode;
+ u32 tx_drive_strength;
+};
+
+struct qca807x_gpio_priv {
+ struct phy_device *phy;
+};
+
+struct qca807x_priv {
+ bool dac_full_amplitude;
+ bool dac_full_bias_current;
+ bool dac_disable_bias_current_tweak;
+};
+
+static int qca807x_cable_test_start(struct phy_device *phydev)
+{
+ /* we do all the (time consuming) work later */
+ return 0;
+}
+
+static int qca807x_led_parse_netdev(struct phy_device *phydev, unsigned long rules,
+ u16 *offload_trigger)
+{
+ /* Parsing specific to netdev trigger */
+ switch (phydev->port) {
+ case PORT_TP:
+ if (test_bit(TRIGGER_NETDEV_TX, &rules))
+ *offload_trigger |= QCA808X_LED_TX_BLINK;
+ if (test_bit(TRIGGER_NETDEV_RX, &rules))
+ *offload_trigger |= QCA808X_LED_RX_BLINK;
+ if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED10_ON;
+ if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED100_ON;
+ if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED1000_ON;
+ if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
+ *offload_trigger |= QCA808X_LED_HALF_DUPLEX_ON;
+ if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
+ *offload_trigger |= QCA808X_LED_FULL_DUPLEX_ON;
+ break;
+ case PORT_FIBRE:
+ if (test_bit(TRIGGER_NETDEV_TX, &rules))
+ *offload_trigger |= QCA807X_LED_FIBER_TXACT_BLK_EN;
+ if (test_bit(TRIGGER_NETDEV_RX, &rules))
+ *offload_trigger |= QCA807X_LED_FIBER_RXACT_BLK_EN;
+ if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ *offload_trigger |= QCA807X_LED_FIBER_100FX_ON_EN;
+ if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ *offload_trigger |= QCA807X_LED_FIBER_1000BX_ON_EN;
+ if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
+ *offload_trigger |= QCA807X_LED_FIBER_HDX_ON_EN;
+ if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
+ *offload_trigger |= QCA807X_LED_FIBER_FDX_ON_EN;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (rules && !*offload_trigger)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int qca807x_led_hw_control_enable(struct phy_device *phydev, u8 index)
+{
+ u16 reg;
+
+ if (index > 1)
+ return -EINVAL;
+
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_hw_control_enable(phydev, reg);
+}
+
+static int qca807x_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 offload_trigger = 0;
+
+ if (index > 1)
+ return -EINVAL;
+
+ return qca807x_led_parse_netdev(phydev, rules, &offload_trigger);
+}
+
+static int qca807x_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 reg, mask, offload_trigger = 0;
+ int ret;
+
+ if (index > 1)
+ return -EINVAL;
+
+ ret = qca807x_led_parse_netdev(phydev, rules, &offload_trigger);
+ if (ret)
+ return ret;
+
+ ret = qca807x_led_hw_control_enable(phydev, index);
+ if (ret)
+ return ret;
+
+ switch (phydev->port) {
+ case PORT_TP:
+ reg = QCA807X_MMD7_LED_CTRL(index);
+ mask = QCA808X_LED_PATTERN_MASK;
+ break;
+ case PORT_FIBRE:
+ /* HW control pattern bits are in LED FORCE reg */
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ mask = QCA807X_LED_FIBER_PATTERN_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return phy_modify_mmd(phydev, MDIO_MMD_AN, reg, mask,
+ offload_trigger);
+}
+
+static bool qca807x_led_hw_control_status(struct phy_device *phydev, u8 index)
+{
+ u16 reg;
+
+ if (index > 1)
+ return false;
+
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_hw_control_status(phydev, reg);
+}
+
+static int qca807x_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ u16 reg;
+ int val;
+
+ if (index > 1)
+ return -EINVAL;
+
+ /* Check if we have hw control enabled */
+ if (qca807x_led_hw_control_status(phydev, index))
+ return -EINVAL;
+
+ /* Parsing specific to netdev trigger */
+ switch (phydev->port) {
+ case PORT_TP:
+ reg = QCA807X_MMD7_LED_CTRL(index);
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
+ if (val & QCA808X_LED_TX_BLINK)
+ set_bit(TRIGGER_NETDEV_TX, rules);
+ if (val & QCA808X_LED_RX_BLINK)
+ set_bit(TRIGGER_NETDEV_RX, rules);
+ if (val & QCA808X_LED_SPEED10_ON)
+ set_bit(TRIGGER_NETDEV_LINK_10, rules);
+ if (val & QCA808X_LED_SPEED100_ON)
+ set_bit(TRIGGER_NETDEV_LINK_100, rules);
+ if (val & QCA808X_LED_SPEED1000_ON)
+ set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+ if (val & QCA808X_LED_HALF_DUPLEX_ON)
+ set_bit(TRIGGER_NETDEV_HALF_DUPLEX, rules);
+ if (val & QCA808X_LED_FULL_DUPLEX_ON)
+ set_bit(TRIGGER_NETDEV_FULL_DUPLEX, rules);
+ break;
+ case PORT_FIBRE:
+ /* HW control pattern bits are in LED FORCE reg */
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
+ if (val & QCA807X_LED_FIBER_TXACT_BLK_EN)
+ set_bit(TRIGGER_NETDEV_TX, rules);
+ if (val & QCA807X_LED_FIBER_RXACT_BLK_EN)
+ set_bit(TRIGGER_NETDEV_RX, rules);
+ if (val & QCA807X_LED_FIBER_100FX_ON_EN)
+ set_bit(TRIGGER_NETDEV_LINK_100, rules);
+ if (val & QCA807X_LED_FIBER_1000BX_ON_EN)
+ set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+ if (val & QCA807X_LED_FIBER_HDX_ON_EN)
+ set_bit(TRIGGER_NETDEV_HALF_DUPLEX, rules);
+ if (val & QCA807X_LED_FIBER_FDX_ON_EN)
+ set_bit(TRIGGER_NETDEV_FULL_DUPLEX, rules);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qca807x_led_hw_control_reset(struct phy_device *phydev, u8 index)
+{
+ u16 reg, mask;
+
+ if (index > 1)
+ return -EINVAL;
+
+ switch (phydev->port) {
+ case PORT_TP:
+ reg = QCA807X_MMD7_LED_CTRL(index);
+ mask = QCA808X_LED_PATTERN_MASK;
+ break;
+ case PORT_FIBRE:
+ /* HW control pattern bits are in LED FORCE reg */
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ mask = QCA807X_LED_FIBER_PATTERN_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg, mask);
+}
+
+static int qca807x_led_brightness_set(struct phy_device *phydev,
+ u8 index, enum led_brightness value)
+{
+ u16 reg;
+ int ret;
+
+ if (index > 1)
+ return -EINVAL;
+
+ /* If we are setting off the LED reset any hw control rule */
+ if (!value) {
+ ret = qca807x_led_hw_control_reset(phydev, index);
+ if (ret)
+ return ret;
+ }
+
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_brightness_set(phydev, reg, value);
+}
+
+static int qca807x_led_blink_set(struct phy_device *phydev, u8 index,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ u16 reg;
+
+ if (index > 1)
+ return -EINVAL;
+
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_blink_set(phydev, reg, delay_on, delay_off);
+}
+
+#ifdef CONFIG_GPIOLIB
+static int qca807x_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int qca807x_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct qca807x_gpio_priv *priv = gpiochip_get_data(gc);
+ u16 reg;
+ int val;
+
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(offset);
+ val = phy_read_mmd(priv->phy, MDIO_MMD_AN, reg);
+
+ return FIELD_GET(QCA807X_GPIO_FORCE_MODE_MASK, val);
+}
+
+static void qca807x_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct qca807x_gpio_priv *priv = gpiochip_get_data(gc);
+ u16 reg;
+ int val;
+
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(offset);
+
+ val = phy_read_mmd(priv->phy, MDIO_MMD_AN, reg);
+ val &= ~QCA807X_GPIO_FORCE_MODE_MASK;
+ val |= QCA807X_GPIO_FORCE_EN;
+ val |= FIELD_PREP(QCA807X_GPIO_FORCE_MODE_MASK, value);
+
+ phy_write_mmd(priv->phy, MDIO_MMD_AN, reg, val);
+}
+
+static int qca807x_gpio_dir_out(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ qca807x_gpio_set(gc, offset, value);
+
+ return 0;
+}
+
+static int qca807x_gpio(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct qca807x_gpio_priv *priv;
+ struct gpio_chip *gc;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->phy = phydev;
+
+ gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
+ if (!gc)
+ return -ENOMEM;
+
+ gc->label = dev_name(dev);
+ gc->base = -1;
+ gc->ngpio = 2;
+ gc->parent = dev;
+ gc->owner = THIS_MODULE;
+ gc->can_sleep = true;
+ gc->get_direction = qca807x_gpio_get_direction;
+ gc->direction_output = qca807x_gpio_dir_out;
+ gc->get = qca807x_gpio_get;
+ gc->set = qca807x_gpio_set;
+
+ return devm_gpiochip_add_data(dev, gc, priv);
+}
+#endif
+
+static int qca807x_read_fiber_status(struct phy_device *phydev)
+{
+ bool changed;
+ int ss, err;
+
+ err = genphy_c37_read_status(phydev, &changed);
+ if (err || !changed)
+ return err;
+
+ /* Read the QCA807x PHY-Specific Status register fiber page,
+ * which indicates the speed and duplex that the PHY is actually
+ * using, irrespective of whether we are in autoneg mode or not.
+ */
+ ss = phy_read(phydev, AT803X_SPECIFIC_STATUS);
+ if (ss < 0)
+ return ss;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ if (ss & AT803X_SS_SPEED_DUPLEX_RESOLVED) {
+ switch (FIELD_GET(AT803X_SS_SPEED_MASK, ss)) {
+ case AT803X_SS_SPEED_100:
+ phydev->speed = SPEED_100;
+ break;
+ case AT803X_SS_SPEED_1000:
+ phydev->speed = SPEED_1000;
+ break;
+ }
+
+ if (ss & AT803X_SS_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+ }
+
+ return 0;
+}
+
+static int qca807x_read_status(struct phy_device *phydev)
+{
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported)) {
+ switch (phydev->port) {
+ case PORT_FIBRE:
+ return qca807x_read_fiber_status(phydev);
+ case PORT_TP:
+ return at803x_read_status(phydev);
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return at803x_read_status(phydev);
+}
+
+static int qca807x_phy_package_probe_once(struct phy_device *phydev)
+{
+ struct phy_package_shared *shared = phydev->shared;
+ struct qca807x_shared_priv *priv = shared->priv;
+ unsigned int tx_drive_strength;
+ const char *package_mode_name;
+
+ /* Default to 600mw if not defined */
+ if (of_property_read_u32(shared->np, "qcom,tx-drive-strength-milliwatt",
+ &tx_drive_strength))
+ tx_drive_strength = 600;
+
+ switch (tx_drive_strength) {
+ case 140:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_140MV;
+ break;
+ case 160:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_160MV;
+ break;
+ case 180:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_180MV;
+ break;
+ case 200:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_200MV;
+ break;
+ case 220:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_220MV;
+ break;
+ case 240:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_240MV;
+ break;
+ case 260:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_260MV;
+ break;
+ case 280:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_280MV;
+ break;
+ case 300:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_300MV;
+ break;
+ case 320:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_320MV;
+ break;
+ case 400:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_400MV;
+ break;
+ case 500:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_500MV;
+ break;
+ case 600:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_600MV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ priv->package_mode = PHY_INTERFACE_MODE_NA;
+ if (!of_property_read_string(shared->np, "qcom,package-mode",
+ &package_mode_name)) {
+ if (!strcasecmp(package_mode_name,
+ phy_modes(PHY_INTERFACE_MODE_PSGMII)))
+ priv->package_mode = PHY_INTERFACE_MODE_PSGMII;
+ else if (!strcasecmp(package_mode_name,
+ phy_modes(PHY_INTERFACE_MODE_QSGMII)))
+ priv->package_mode = PHY_INTERFACE_MODE_QSGMII;
+ else
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qca807x_phy_package_config_init_once(struct phy_device *phydev)
+{
+ struct phy_package_shared *shared = phydev->shared;
+ struct qca807x_shared_priv *priv = shared->priv;
+ int val, ret;
+
+ /* Make sure PHY follow PHY package mode if enforced */
+ if (priv->package_mode != PHY_INTERFACE_MODE_NA &&
+ phydev->interface != priv->package_mode)
+ return -EINVAL;
+
+ phy_lock_mdio_bus(phydev);
+
+ /* Set correct PHY package mode */
+ val = __phy_package_read(phydev, QCA807X_COMBO_ADDR,
+ QCA807X_CHIP_CONFIGURATION);
+ val &= ~QCA807X_CHIP_CONFIGURATION_MODE_CFG_MASK;
+ /* package_mode can be QSGMII or PSGMII and we validate
+ * this in probe_once.
+ * With package_mode to NA, we default to PSGMII.
+ */
+ switch (priv->package_mode) {
+ case PHY_INTERFACE_MODE_QSGMII:
+ val |= QCA807X_CHIP_CONFIGURATION_MODE_QSGMII_SGMII;
+ break;
+ case PHY_INTERFACE_MODE_PSGMII:
+ default:
+ val |= QCA807X_CHIP_CONFIGURATION_MODE_PSGMII_ALL_COPPER;
+ }
+ ret = __phy_package_write(phydev, QCA807X_COMBO_ADDR,
+ QCA807X_CHIP_CONFIGURATION, val);
+ if (ret)
+ goto exit;
+
+ /* After mode change Serdes reset is required */
+ val = __phy_package_read(phydev, QCA807X_PQSGMII_ADDR,
+ PQSGMII_CTRL_REG);
+ val &= ~PQSGMII_ANALOG_SW_RESET;
+ ret = __phy_package_write(phydev, QCA807X_PQSGMII_ADDR,
+ PQSGMII_CTRL_REG, val);
+ if (ret)
+ goto exit;
+
+ msleep(SERDES_RESET_SLEEP);
+
+ val = __phy_package_read(phydev, QCA807X_PQSGMII_ADDR,
+ PQSGMII_CTRL_REG);
+ val |= PQSGMII_ANALOG_SW_RESET;
+ ret = __phy_package_write(phydev, QCA807X_PQSGMII_ADDR,
+ PQSGMII_CTRL_REG, val);
+ if (ret)
+ goto exit;
+
+ /* Workaround to enable AZ transmitting ability */
+ val = __phy_package_read_mmd(phydev, QCA807X_PQSGMII_ADDR,
+ MDIO_MMD_PMAPMD, PQSGMII_MODE_CTRL);
+ val &= ~PQSGMII_MODE_CTRL_AZ_WORKAROUND_MASK;
+ ret = __phy_package_write_mmd(phydev, QCA807X_PQSGMII_ADDR,
+ MDIO_MMD_PMAPMD, PQSGMII_MODE_CTRL, val);
+ if (ret)
+ goto exit;
+
+ /* Set PQSGMII TX AMP strength */
+ val = __phy_package_read(phydev, QCA807X_PQSGMII_ADDR,
+ PQSGMII_DRIVE_CONTROL_1);
+ val &= ~PQSGMII_TX_DRIVER_MASK;
+ val |= FIELD_PREP(PQSGMII_TX_DRIVER_MASK, priv->tx_drive_strength);
+ ret = __phy_package_write(phydev, QCA807X_PQSGMII_ADDR,
+ PQSGMII_DRIVE_CONTROL_1, val);
+ if (ret)
+ goto exit;
+
+ /* Prevent PSGMII going into hibernation via PSGMII self test */
+ val = __phy_package_read_mmd(phydev, QCA807X_COMBO_ADDR,
+ MDIO_MMD_PCS, PQSGMII_MMD3_SERDES_CONTROL);
+ val &= ~BIT(1);
+ ret = __phy_package_write_mmd(phydev, QCA807X_COMBO_ADDR,
+ MDIO_MMD_PCS, PQSGMII_MMD3_SERDES_CONTROL, val);
+
+exit:
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+static int qca807x_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+{
+ struct phy_device *phydev = upstream;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
+ phy_interface_t iface;
+ int ret;
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+
+ sfp_parse_support(phydev->sfp_bus, id, support, interfaces);
+ iface = sfp_select_interface(phydev->sfp_bus, support);
+
+ dev_info(&phydev->mdio.dev, "%s SFP module inserted\n", phy_modes(iface));
+
+ switch (iface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_100BASEX:
+ /* Set PHY mode to PSGMII combo (1/4 copper + combo ports) mode */
+ ret = phy_modify(phydev,
+ QCA807X_CHIP_CONFIGURATION,
+ QCA807X_CHIP_CONFIGURATION_MODE_CFG_MASK,
+ QCA807X_CHIP_CONFIGURATION_MODE_PSGMII_FIBER);
+ /* Enable fiber mode autodection (1000Base-X or 100Base-FX) */
+ ret = phy_set_bits_mmd(phydev,
+ MDIO_MMD_AN,
+ QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION,
+ QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION_EN);
+ /* Select fiber page */
+ ret = phy_clear_bits(phydev,
+ QCA807X_CHIP_CONFIGURATION,
+ QCA807X_BT_BX_REG_SEL);
+
+ phydev->port = PORT_FIBRE;
+ break;
+ default:
+ dev_err(&phydev->mdio.dev, "Incompatible SFP module inserted\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static void qca807x_sfp_remove(void *upstream)
+{
+ struct phy_device *phydev = upstream;
+
+ /* Select copper page */
+ phy_set_bits(phydev,
+ QCA807X_CHIP_CONFIGURATION,
+ QCA807X_BT_BX_REG_SEL);
+
+ phydev->port = PORT_TP;
+}
+
+static const struct sfp_upstream_ops qca807x_sfp_ops = {
+ .attach = phy_sfp_attach,
+ .detach = phy_sfp_detach,
+ .module_insert = qca807x_sfp_insert,
+ .module_remove = qca807x_sfp_remove,
+};
+
+static int qca807x_probe(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct qca807x_shared_priv *shared_priv;
+ struct device *dev = &phydev->mdio.dev;
+ struct phy_package_shared *shared;
+ struct qca807x_priv *priv;
+ int ret;
+
+ ret = devm_of_phy_package_join(dev, phydev, sizeof(*shared_priv));
+ if (ret)
+ return ret;
+
+ if (phy_package_probe_once(phydev)) {
+ ret = qca807x_phy_package_probe_once(phydev);
+ if (ret)
+ return ret;
+ }
+
+ shared = phydev->shared;
+ shared_priv = shared->priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dac_full_amplitude = of_property_read_bool(node, "qcom,dac-full-amplitude");
+ priv->dac_full_bias_current = of_property_read_bool(node, "qcom,dac-full-bias-current");
+ priv->dac_disable_bias_current_tweak = of_property_read_bool(node,
+ "qcom,dac-disable-bias-current-tweak");
+
+#if IS_ENABLED(CONFIG_GPIOLIB)
+ /* Make sure we don't have mixed leds node and gpio-controller
+ * to prevent registering leds and having gpio-controller usage
+ * conflicting with them.
+ */
+ if (of_find_property(node, "leds", NULL) &&
+ of_find_property(node, "gpio-controller", NULL)) {
+ phydev_err(phydev, "Invalid property detected. LEDs and gpio-controller are mutually exclusive.");
+ return -EINVAL;
+ }
+
+ /* Do not register a GPIO controller unless flagged for it */
+ if (of_property_read_bool(node, "gpio-controller")) {
+ ret = qca807x_gpio(phydev);
+ if (ret)
+ return ret;
+ }
+#endif
+
+ /* Attach SFP bus on combo port*/
+ if (phy_read(phydev, QCA807X_CHIP_CONFIGURATION)) {
+ ret = phy_sfp_probe(phydev, &qca807x_sfp_ops);
+ if (ret)
+ return ret;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->advertising);
+ }
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
+static int qca807x_config_init(struct phy_device *phydev)
+{
+ struct qca807x_priv *priv = phydev->priv;
+ u16 control_dac;
+ int ret;
+
+ if (phy_package_init_once(phydev)) {
+ ret = qca807x_phy_package_config_init_once(phydev);
+ if (ret)
+ return ret;
+ }
+
+ control_dac = phy_read_mmd(phydev, MDIO_MMD_AN,
+ QCA807X_MMD7_1000BASE_T_POWER_SAVE_PER_CABLE_LENGTH);
+ control_dac &= ~QCA807X_CONTROL_DAC_MASK;
+ if (!priv->dac_full_amplitude)
+ control_dac |= QCA807X_CONTROL_DAC_DSP_AMPLITUDE;
+ if (!priv->dac_full_amplitude)
+ control_dac |= QCA807X_CONTROL_DAC_DSP_BIAS_CURRENT;
+ if (!priv->dac_disable_bias_current_tweak)
+ control_dac |= QCA807X_CONTROL_DAC_BIAS_CURRENT_TWEAK;
+ return phy_write_mmd(phydev, MDIO_MMD_AN,
+ QCA807X_MMD7_1000BASE_T_POWER_SAVE_PER_CABLE_LENGTH,
+ control_dac);
+}
+
+static struct phy_driver qca807x_drivers[] = {
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_QCA8072),
+ .name = "Qualcomm QCA8072",
+ .flags = PHY_POLL_CABLE_TEST,
+ /* PHY_GBIT_FEATURES */
+ .probe = qca807x_probe,
+ .config_init = qca807x_config_init,
+ .read_status = qca807x_read_status,
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .soft_reset = genphy_soft_reset,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+ .resume = genphy_resume,
+ .suspend = genphy_suspend,
+ .cable_test_start = qca807x_cable_test_start,
+ .cable_test_get_status = qca808x_cable_test_get_status,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_QCA8075),
+ .name = "Qualcomm QCA8075",
+ .flags = PHY_POLL_CABLE_TEST,
+ /* PHY_GBIT_FEATURES */
+ .probe = qca807x_probe,
+ .config_init = qca807x_config_init,
+ .read_status = qca807x_read_status,
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .soft_reset = genphy_soft_reset,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+ .resume = genphy_resume,
+ .suspend = genphy_suspend,
+ .cable_test_start = qca807x_cable_test_start,
+ .cable_test_get_status = qca808x_cable_test_get_status,
+ .led_brightness_set = qca807x_led_brightness_set,
+ .led_blink_set = qca807x_led_blink_set,
+ .led_hw_is_supported = qca807x_led_hw_is_supported,
+ .led_hw_control_set = qca807x_led_hw_control_set,
+ .led_hw_control_get = qca807x_led_hw_control_get,
+ },
+};
+module_phy_driver(qca807x_drivers);
+
+static struct mdio_device_id __maybe_unused qca807x_tbl[] = {
+ { PHY_ID_MATCH_EXACT(PHY_ID_QCA8072) },
+ { PHY_ID_MATCH_EXACT(PHY_ID_QCA8075) },
+ { }
+};
+
+MODULE_AUTHOR("Robert Marko <robert.marko@sartura.hr>");
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_DESCRIPTION("Qualcomm QCA807x PHY driver");
+MODULE_DEVICE_TABLE(mdio, qca807x_tbl);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/qcom/qca808x.c b/drivers/net/phy/qcom/qca808x.c
new file mode 100644
index 000000000000..5048304ccc9e
--- /dev/null
+++ b/drivers/net/phy/qcom/qca808x.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/phy.h>
+#include <linux/module.h>
+
+#include "qcom.h"
+
+/* ADC threshold */
+#define QCA808X_PHY_DEBUG_ADC_THRESHOLD 0x2c80
+#define QCA808X_ADC_THRESHOLD_MASK GENMASK(7, 0)
+#define QCA808X_ADC_THRESHOLD_80MV 0
+#define QCA808X_ADC_THRESHOLD_100MV 0xf0
+#define QCA808X_ADC_THRESHOLD_200MV 0x0f
+#define QCA808X_ADC_THRESHOLD_300MV 0xff
+
+/* CLD control */
+#define QCA808X_PHY_MMD3_ADDR_CLD_CTRL7 0x8007
+#define QCA808X_8023AZ_AFE_CTRL_MASK GENMASK(8, 4)
+#define QCA808X_8023AZ_AFE_EN 0x90
+
+/* AZ control */
+#define QCA808X_PHY_MMD3_AZ_TRAINING_CTRL 0x8008
+#define QCA808X_MMD3_AZ_TRAINING_VAL 0x1c32
+
+#define QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB 0x8014
+#define QCA808X_MSE_THRESHOLD_20DB_VALUE 0x529
+
+#define QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB 0x800E
+#define QCA808X_MSE_THRESHOLD_17DB_VALUE 0x341
+
+#define QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB 0x801E
+#define QCA808X_MSE_THRESHOLD_27DB_VALUE 0x419
+
+#define QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB 0x8020
+#define QCA808X_MSE_THRESHOLD_28DB_VALUE 0x341
+
+#define QCA808X_PHY_MMD7_TOP_OPTION1 0x901c
+#define QCA808X_TOP_OPTION1_DATA 0x0
+
+#define QCA808X_PHY_MMD3_DEBUG_1 0xa100
+#define QCA808X_MMD3_DEBUG_1_VALUE 0x9203
+#define QCA808X_PHY_MMD3_DEBUG_2 0xa101
+#define QCA808X_MMD3_DEBUG_2_VALUE 0x48ad
+#define QCA808X_PHY_MMD3_DEBUG_3 0xa103
+#define QCA808X_MMD3_DEBUG_3_VALUE 0x1698
+#define QCA808X_PHY_MMD3_DEBUG_4 0xa105
+#define QCA808X_MMD3_DEBUG_4_VALUE 0x8001
+#define QCA808X_PHY_MMD3_DEBUG_5 0xa106
+#define QCA808X_MMD3_DEBUG_5_VALUE 0x1111
+#define QCA808X_PHY_MMD3_DEBUG_6 0xa011
+#define QCA808X_MMD3_DEBUG_6_VALUE 0x5f85
+
+/* master/slave seed config */
+#define QCA808X_PHY_DEBUG_LOCAL_SEED 9
+#define QCA808X_MASTER_SLAVE_SEED_ENABLE BIT(1)
+#define QCA808X_MASTER_SLAVE_SEED_CFG GENMASK(12, 2)
+#define QCA808X_MASTER_SLAVE_SEED_RANGE 0x32
+
+/* Hibernation yields lower power consumpiton in contrast with normal operation mode.
+ * when the copper cable is unplugged, the PHY enters into hibernation mode in about 10s.
+ */
+#define QCA808X_DBG_AN_TEST 0xb
+#define QCA808X_HIBERNATION_EN BIT(15)
+
+#define QCA808X_MMD7_LED2_CTRL 0x8074
+#define QCA808X_MMD7_LED2_FORCE_CTRL 0x8075
+#define QCA808X_MMD7_LED1_CTRL 0x8076
+#define QCA808X_MMD7_LED1_FORCE_CTRL 0x8077
+#define QCA808X_MMD7_LED0_CTRL 0x8078
+#define QCA808X_MMD7_LED_CTRL(x) (0x8078 - ((x) * 2))
+
+#define QCA808X_MMD7_LED0_FORCE_CTRL 0x8079
+#define QCA808X_MMD7_LED_FORCE_CTRL(x) (0x8079 - ((x) * 2))
+
+#define QCA808X_MMD7_LED_POLARITY_CTRL 0x901a
+/* QSDK sets by default 0x46 to this reg that sets BIT 6 for
+ * LED to active high. It's not clear what BIT 3 and BIT 4 does.
+ */
+#define QCA808X_LED_ACTIVE_HIGH BIT(6)
+
+/* QCA808X 1G chip type */
+#define QCA808X_PHY_MMD7_CHIP_TYPE 0x901d
+#define QCA808X_PHY_CHIP_TYPE_1G BIT(0)
+
+#define QCA8081_PHY_SERDES_MMD1_FIFO_CTRL 0x9072
+#define QCA8081_PHY_FIFO_RSTN BIT(11)
+
+#define QCA8081_PHY_ID 0x004dd101
+
+MODULE_DESCRIPTION("Qualcomm Atheros QCA808X PHY driver");
+MODULE_AUTHOR("Matus Ujhelyi");
+MODULE_LICENSE("GPL");
+
+struct qca808x_priv {
+ int led_polarity_mode;
+};
+
+static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Enable fast retrain */
+ ret = genphy_c45_fast_retrain(phydev, true);
+ if (ret)
+ return ret;
+
+ phy_write_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_TOP_OPTION1,
+ QCA808X_TOP_OPTION1_DATA);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB,
+ QCA808X_MSE_THRESHOLD_20DB_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB,
+ QCA808X_MSE_THRESHOLD_17DB_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB,
+ QCA808X_MSE_THRESHOLD_27DB_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB,
+ QCA808X_MSE_THRESHOLD_28DB_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_1,
+ QCA808X_MMD3_DEBUG_1_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_4,
+ QCA808X_MMD3_DEBUG_4_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_5,
+ QCA808X_MMD3_DEBUG_5_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_3,
+ QCA808X_MMD3_DEBUG_3_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_6,
+ QCA808X_MMD3_DEBUG_6_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_2,
+ QCA808X_MMD3_DEBUG_2_VALUE);
+
+ return 0;
+}
+
+static int qca808x_phy_ms_seed_enable(struct phy_device *phydev, bool enable)
+{
+ u16 seed_value;
+
+ if (!enable)
+ return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
+ QCA808X_MASTER_SLAVE_SEED_ENABLE, 0);
+
+ seed_value = get_random_u32_below(QCA808X_MASTER_SLAVE_SEED_RANGE);
+ return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
+ QCA808X_MASTER_SLAVE_SEED_CFG | QCA808X_MASTER_SLAVE_SEED_ENABLE,
+ FIELD_PREP(QCA808X_MASTER_SLAVE_SEED_CFG, seed_value) |
+ QCA808X_MASTER_SLAVE_SEED_ENABLE);
+}
+
+static bool qca808x_is_prefer_master(struct phy_device *phydev)
+{
+ return (phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_FORCE) ||
+ (phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_PREFERRED);
+}
+
+static bool qca808x_has_fast_retrain_or_slave_seed(struct phy_device *phydev)
+{
+ return linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
+}
+
+static bool qca808x_is_1g_only(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_CHIP_TYPE);
+ if (ret < 0)
+ return true;
+
+ return !!(QCA808X_PHY_CHIP_TYPE_1G & ret);
+}
+
+static void qca808x_fill_possible_interfaces(struct phy_device *phydev)
+{
+ unsigned long *possible = phydev->possible_interfaces;
+
+ __set_bit(PHY_INTERFACE_MODE_SGMII, possible);
+
+ if (!qca808x_is_1g_only(phydev))
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, possible);
+}
+
+static int qca808x_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct qca808x_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Init LED polarity mode to -1 */
+ priv->led_polarity_mode = -1;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
+static int qca808x_config_init(struct phy_device *phydev)
+{
+ struct qca808x_priv *priv = phydev->priv;
+ int ret;
+
+ /* Default to LED Active High if active-low not in DT */
+ if (priv->led_polarity_mode == -1) {
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_AN,
+ QCA808X_MMD7_LED_POLARITY_CTRL,
+ QCA808X_LED_ACTIVE_HIGH);
+ if (ret)
+ return ret;
+ }
+
+ /* Active adc&vga on 802.3az for the link 1000M and 100M */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_ADDR_CLD_CTRL7,
+ QCA808X_8023AZ_AFE_CTRL_MASK, QCA808X_8023AZ_AFE_EN);
+ if (ret)
+ return ret;
+
+ /* Adjust the threshold on 802.3az for the link 1000M */
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ QCA808X_PHY_MMD3_AZ_TRAINING_CTRL,
+ QCA808X_MMD3_AZ_TRAINING_VAL);
+ if (ret)
+ return ret;
+
+ if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
+ /* Config the fast retrain for the link 2500M */
+ ret = qca808x_phy_fast_retrain_config(phydev);
+ if (ret)
+ return ret;
+
+ ret = genphy_read_master_slave(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (!qca808x_is_prefer_master(phydev)) {
+ /* Enable seed and configure lower ramdom seed to make phy
+ * linked as slave mode.
+ */
+ ret = qca808x_phy_ms_seed_enable(phydev, true);
+ if (ret)
+ return ret;
+ }
+ }
+
+ qca808x_fill_possible_interfaces(phydev);
+
+ /* Configure adc threshold as 100mv for the link 10M */
+ return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_ADC_THRESHOLD,
+ QCA808X_ADC_THRESHOLD_MASK,
+ QCA808X_ADC_THRESHOLD_100MV);
+}
+
+static int qca808x_read_status(struct phy_device *phydev)
+{
+ struct at803x_ss_mask ss_mask = { 0 };
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
+ if (ret < 0)
+ return ret;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->lp_advertising,
+ ret & MDIO_AN_10GBT_STAT_LP2_5G);
+
+ ret = genphy_read_status(phydev);
+ if (ret)
+ return ret;
+
+ /* qca8081 takes the different bits for speed value from at803x */
+ ss_mask.speed_mask = QCA808X_SS_SPEED_MASK;
+ ss_mask.speed_shift = __bf_shf(QCA808X_SS_SPEED_MASK);
+ ret = at803x_read_specific_status(phydev, ss_mask);
+ if (ret < 0)
+ return ret;
+
+ if (phydev->link) {
+ if (phydev->speed == SPEED_2500)
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ else
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ } else {
+ /* generate seed as a lower random value to make PHY linked as SLAVE easily,
+ * except for master/slave configuration fault detected or the master mode
+ * preferred.
+ *
+ * the reason for not putting this code into the function link_change_notify is
+ * the corner case where the link partner is also the qca8081 PHY and the seed
+ * value is configured as the same value, the link can't be up and no link change
+ * occurs.
+ */
+ if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
+ if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR ||
+ qca808x_is_prefer_master(phydev)) {
+ qca808x_phy_ms_seed_enable(phydev, false);
+ } else {
+ qca808x_phy_ms_seed_enable(phydev, true);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int qca808x_soft_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_soft_reset(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (qca808x_has_fast_retrain_or_slave_seed(phydev))
+ ret = qca808x_phy_ms_seed_enable(phydev, true);
+
+ return ret;
+}
+
+static int qca808x_cable_test_start(struct phy_device *phydev)
+{
+ int ret;
+
+ /* perform CDT with the following configs:
+ * 1. disable hibernation.
+ * 2. force PHY working in MDI mode.
+ * 3. for PHY working in 1000BaseT.
+ * 4. configure the threshold.
+ */
+
+ ret = at803x_debug_reg_mask(phydev, QCA808X_DBG_AN_TEST, QCA808X_HIBERNATION_EN, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = at803x_config_mdix(phydev, ETH_TP_MDI);
+ if (ret < 0)
+ return ret;
+
+ /* Force 1000base-T needs to configure PMA/PMD and MII_BMCR */
+ phydev->duplex = DUPLEX_FULL;
+ phydev->speed = SPEED_1000;
+ ret = genphy_c45_pma_setup_forced(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = genphy_setup_forced(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* configure the thresholds for open, short, pair ok test */
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8074, 0xc040);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8076, 0xc040);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8077, 0xa060);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8078, 0xc050);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807a, 0xc060);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807e, 0xb060);
+
+ return 0;
+}
+
+static int qca808x_get_features(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_c45_pma_read_abilities(phydev);
+ if (ret)
+ return ret;
+
+ /* The autoneg ability is not existed in bit3 of MMD7.1,
+ * but it is supported by qca808x PHY, so we add it here
+ * manually.
+ */
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
+
+ /* As for the qca8081 1G version chip, the 2500baseT ability is also
+ * existed in the bit0 of MMD1.21, we need to remove it manually if
+ * it is the qca8081 1G chip according to the bit0 of MMD7.0x901d.
+ */
+ if (qca808x_is_1g_only(phydev))
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
+
+ return 0;
+}
+
+static int qca808x_config_aneg(struct phy_device *phydev)
+{
+ int phy_ctrl = 0;
+ int ret;
+
+ ret = at803x_prepare_config_aneg(phydev);
+ if (ret)
+ return ret;
+
+ /* The reg MII_BMCR also needs to be configured for force mode, the
+ * genphy_config_aneg is also needed.
+ */
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ genphy_c45_pma_setup_forced(phydev);
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->advertising))
+ phy_ctrl = MDIO_AN_10GBT_CTRL_ADV2_5G;
+
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ MDIO_AN_10GBT_CTRL_ADV2_5G, phy_ctrl);
+ if (ret < 0)
+ return ret;
+
+ return __genphy_config_aneg(phydev, ret);
+}
+
+static void qca808x_link_change_notify(struct phy_device *phydev)
+{
+ /* Assert interface sgmii fifo on link down, deassert it on link up,
+ * the interface device address is always phy address added by 1.
+ */
+ mdiobus_c45_modify_changed(phydev->mdio.bus, phydev->mdio.addr + 1,
+ MDIO_MMD_PMAPMD, QCA8081_PHY_SERDES_MMD1_FIFO_CTRL,
+ QCA8081_PHY_FIFO_RSTN,
+ phydev->link ? QCA8081_PHY_FIFO_RSTN : 0);
+}
+
+static int qca808x_led_parse_netdev(struct phy_device *phydev, unsigned long rules,
+ u16 *offload_trigger)
+{
+ /* Parsing specific to netdev trigger */
+ if (test_bit(TRIGGER_NETDEV_TX, &rules))
+ *offload_trigger |= QCA808X_LED_TX_BLINK;
+ if (test_bit(TRIGGER_NETDEV_RX, &rules))
+ *offload_trigger |= QCA808X_LED_RX_BLINK;
+ if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED10_ON;
+ if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED100_ON;
+ if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED1000_ON;
+ if (test_bit(TRIGGER_NETDEV_LINK_2500, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED2500_ON;
+ if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
+ *offload_trigger |= QCA808X_LED_HALF_DUPLEX_ON;
+ if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
+ *offload_trigger |= QCA808X_LED_FULL_DUPLEX_ON;
+
+ if (rules && !*offload_trigger)
+ return -EOPNOTSUPP;
+
+ /* Enable BLINK_CHECK_BYPASS by default to make the LED
+ * blink even with duplex or speed mode not enabled.
+ */
+ *offload_trigger |= QCA808X_LED_BLINK_CHECK_BYPASS;
+
+ return 0;
+}
+
+static int qca808x_led_hw_control_enable(struct phy_device *phydev, u8 index)
+{
+ u16 reg;
+
+ if (index > 2)
+ return -EINVAL;
+
+ reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_hw_control_enable(phydev, reg);
+}
+
+static int qca808x_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 offload_trigger = 0;
+
+ if (index > 2)
+ return -EINVAL;
+
+ return qca808x_led_parse_netdev(phydev, rules, &offload_trigger);
+}
+
+static int qca808x_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 reg, offload_trigger = 0;
+ int ret;
+
+ if (index > 2)
+ return -EINVAL;
+
+ reg = QCA808X_MMD7_LED_CTRL(index);
+
+ ret = qca808x_led_parse_netdev(phydev, rules, &offload_trigger);
+ if (ret)
+ return ret;
+
+ ret = qca808x_led_hw_control_enable(phydev, index);
+ if (ret)
+ return ret;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
+ QCA808X_LED_PATTERN_MASK,
+ offload_trigger);
+}
+
+static bool qca808x_led_hw_control_status(struct phy_device *phydev, u8 index)
+{
+ u16 reg;
+
+ if (index > 2)
+ return false;
+
+ reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_hw_control_status(phydev, reg);
+}
+
+static int qca808x_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ u16 reg;
+ int val;
+
+ if (index > 2)
+ return -EINVAL;
+
+ /* Check if we have hw control enabled */
+ if (qca808x_led_hw_control_status(phydev, index))
+ return -EINVAL;
+
+ reg = QCA808X_MMD7_LED_CTRL(index);
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
+ if (val & QCA808X_LED_TX_BLINK)
+ set_bit(TRIGGER_NETDEV_TX, rules);
+ if (val & QCA808X_LED_RX_BLINK)
+ set_bit(TRIGGER_NETDEV_RX, rules);
+ if (val & QCA808X_LED_SPEED10_ON)
+ set_bit(TRIGGER_NETDEV_LINK_10, rules);
+ if (val & QCA808X_LED_SPEED100_ON)
+ set_bit(TRIGGER_NETDEV_LINK_100, rules);
+ if (val & QCA808X_LED_SPEED1000_ON)
+ set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+ if (val & QCA808X_LED_SPEED2500_ON)
+ set_bit(TRIGGER_NETDEV_LINK_2500, rules);
+ if (val & QCA808X_LED_HALF_DUPLEX_ON)
+ set_bit(TRIGGER_NETDEV_HALF_DUPLEX, rules);
+ if (val & QCA808X_LED_FULL_DUPLEX_ON)
+ set_bit(TRIGGER_NETDEV_FULL_DUPLEX, rules);
+
+ return 0;
+}
+
+static int qca808x_led_hw_control_reset(struct phy_device *phydev, u8 index)
+{
+ u16 reg;
+
+ if (index > 2)
+ return -EINVAL;
+
+ reg = QCA808X_MMD7_LED_CTRL(index);
+
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg,
+ QCA808X_LED_PATTERN_MASK);
+}
+
+static int qca808x_led_brightness_set(struct phy_device *phydev,
+ u8 index, enum led_brightness value)
+{
+ u16 reg;
+ int ret;
+
+ if (index > 2)
+ return -EINVAL;
+
+ if (!value) {
+ ret = qca808x_led_hw_control_reset(phydev, index);
+ if (ret)
+ return ret;
+ }
+
+ reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_brightness_set(phydev, reg, value);
+}
+
+static int qca808x_led_blink_set(struct phy_device *phydev, u8 index,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ u16 reg;
+
+ if (index > 2)
+ return -EINVAL;
+
+ reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_blink_set(phydev, reg, delay_on, delay_off);
+}
+
+static int qca808x_led_polarity_set(struct phy_device *phydev, int index,
+ unsigned long modes)
+{
+ struct qca808x_priv *priv = phydev->priv;
+ bool active_low = false;
+ u32 mode;
+
+ for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
+ switch (mode) {
+ case PHY_LED_ACTIVE_LOW:
+ active_low = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* PHY polarity is global and can't be set per LED.
+ * To detect this, check if last requested polarity mode
+ * match the new one.
+ */
+ if (priv->led_polarity_mode >= 0 &&
+ priv->led_polarity_mode != active_low) {
+ phydev_err(phydev, "PHY polarity is global. Mismatched polarity on different LED\n");
+ return -EINVAL;
+ }
+
+ /* Save the last PHY polarity mode */
+ priv->led_polarity_mode = active_low;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_AN,
+ QCA808X_MMD7_LED_POLARITY_CTRL,
+ QCA808X_LED_ACTIVE_HIGH,
+ active_low ? 0 : QCA808X_LED_ACTIVE_HIGH);
+}
+
+static struct phy_driver qca808x_driver[] = {
+{
+ /* Qualcomm QCA8081 */
+ PHY_ID_MATCH_EXACT(QCA8081_PHY_ID),
+ .name = "Qualcomm QCA8081",
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = qca808x_probe,
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+ .set_wol = at803x_set_wol,
+ .get_wol = at803x_get_wol,
+ .get_features = qca808x_get_features,
+ .config_aneg = qca808x_config_aneg,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .read_status = qca808x_read_status,
+ .config_init = qca808x_config_init,
+ .soft_reset = qca808x_soft_reset,
+ .cable_test_start = qca808x_cable_test_start,
+ .cable_test_get_status = qca808x_cable_test_get_status,
+ .link_change_notify = qca808x_link_change_notify,
+ .led_brightness_set = qca808x_led_brightness_set,
+ .led_blink_set = qca808x_led_blink_set,
+ .led_hw_is_supported = qca808x_led_hw_is_supported,
+ .led_hw_control_set = qca808x_led_hw_control_set,
+ .led_hw_control_get = qca808x_led_hw_control_get,
+ .led_polarity_set = qca808x_led_polarity_set,
+}, };
+
+module_phy_driver(qca808x_driver);
+
+static struct mdio_device_id __maybe_unused qca808x_tbl[] = {
+ { PHY_ID_MATCH_EXACT(QCA8081_PHY_ID) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, qca808x_tbl);
diff --git a/drivers/net/phy/qcom/qca83xx.c b/drivers/net/phy/qcom/qca83xx.c
new file mode 100644
index 000000000000..5d083ef0250e
--- /dev/null
+++ b/drivers/net/phy/qcom/qca83xx.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/phy.h>
+#include <linux/module.h>
+
+#include "qcom.h"
+
+#define AT803X_DEBUG_REG_3C 0x3C
+
+#define AT803X_DEBUG_REG_GREEN 0x3D
+#define AT803X_DEBUG_GATE_CLK_IN1000 BIT(6)
+
+#define MDIO_AZ_DEBUG 0x800D
+
+#define QCA8327_A_PHY_ID 0x004dd033
+#define QCA8327_B_PHY_ID 0x004dd034
+#define QCA8337_PHY_ID 0x004dd036
+#define QCA8K_PHY_ID_MASK 0xffffffff
+
+#define QCA8K_DEVFLAGS_REVISION_MASK GENMASK(2, 0)
+
+static struct at803x_hw_stat qca83xx_hw_stats[] = {
+ { "phy_idle_errors", 0xa, GENMASK(7, 0), PHY},
+ { "phy_receive_errors", 0x15, GENMASK(15, 0), PHY},
+ { "eee_wake_errors", 0x16, GENMASK(15, 0), MMD},
+};
+
+struct qca83xx_priv {
+ u64 stats[ARRAY_SIZE(qca83xx_hw_stats)];
+};
+
+MODULE_DESCRIPTION("Qualcomm Atheros QCA83XX PHY driver");
+MODULE_AUTHOR("Matus Ujhelyi");
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_LICENSE("GPL");
+
+static int qca83xx_get_sset_count(struct phy_device *phydev)
+{
+ return ARRAY_SIZE(qca83xx_hw_stats);
+}
+
+static void qca83xx_get_strings(struct phy_device *phydev, u8 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++) {
+ strscpy(data + i * ETH_GSTRING_LEN,
+ qca83xx_hw_stats[i].string, ETH_GSTRING_LEN);
+ }
+}
+
+static u64 qca83xx_get_stat(struct phy_device *phydev, int i)
+{
+ struct at803x_hw_stat stat = qca83xx_hw_stats[i];
+ struct qca83xx_priv *priv = phydev->priv;
+ int val;
+ u64 ret;
+
+ if (stat.access_type == MMD)
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, stat.reg);
+ else
+ val = phy_read(phydev, stat.reg);
+
+ if (val < 0) {
+ ret = U64_MAX;
+ } else {
+ val = val & stat.mask;
+ priv->stats[i] += val;
+ ret = priv->stats[i];
+ }
+
+ return ret;
+}
+
+static void qca83xx_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++)
+ data[i] = qca83xx_get_stat(phydev, i);
+}
+
+static int qca83xx_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct qca83xx_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
+static int qca83xx_config_init(struct phy_device *phydev)
+{
+ u8 switch_revision;
+
+ switch_revision = phydev->dev_flags & QCA8K_DEVFLAGS_REVISION_MASK;
+
+ switch (switch_revision) {
+ case 1:
+ /* For 100M waveform */
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0x02ea);
+ /* Turn on Gigabit clock */
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x68a0);
+ break;
+
+ case 2:
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0);
+ fallthrough;
+ case 4:
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_AZ_DEBUG, 0x803f);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x6860);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0x2c46);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3C, 0x6000);
+ break;
+ }
+
+ /* Following original QCA sourcecode set port to prefer master */
+ phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER);
+
+ return 0;
+}
+
+static int qca8327_config_init(struct phy_device *phydev)
+{
+ /* QCA8327 require DAC amplitude adjustment for 100m set to +6%.
+ * Disable on init and enable only with 100m speed following
+ * qca original source code.
+ */
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN, 0);
+
+ return qca83xx_config_init(phydev);
+}
+
+static void qca83xx_link_change_notify(struct phy_device *phydev)
+{
+ /* Set DAC Amplitude adjustment to +6% for 100m on link running */
+ if (phydev->state == PHY_RUNNING) {
+ if (phydev->speed == SPEED_100)
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN,
+ QCA8327_DEBUG_MANU_CTRL_EN);
+ } else {
+ /* Reset DAC Amplitude adjustment */
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN, 0);
+ }
+}
+
+static int qca83xx_resume(struct phy_device *phydev)
+{
+ int ret, val;
+
+ /* Skip reset if not suspended */
+ if (!phydev->suspended)
+ return 0;
+
+ /* Reinit the port, reset values set by suspend */
+ qca83xx_config_init(phydev);
+
+ /* Reset the port on port resume */
+ phy_set_bits(phydev, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
+
+ /* On resume from suspend the switch execute a reset and
+ * restart auto-negotiation. Wait for reset to complete.
+ */
+ ret = phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET),
+ 50000, 600000, true);
+ if (ret)
+ return ret;
+
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
+static int qca83xx_suspend(struct phy_device *phydev)
+{
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_GREEN,
+ AT803X_DEBUG_GATE_CLK_IN1000, 0);
+
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
+ AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE |
+ AT803X_DEBUG_HIB_CTRL_SEL_RST_80U, 0);
+
+ return 0;
+}
+
+static int qca8337_suspend(struct phy_device *phydev)
+{
+ /* Only QCA8337 support actual suspend. */
+ genphy_suspend(phydev);
+
+ return qca83xx_suspend(phydev);
+}
+
+static int qca8327_suspend(struct phy_device *phydev)
+{
+ u16 mask = 0;
+
+ /* QCA8327 cause port unreliability when phy suspend
+ * is set.
+ */
+ mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX);
+ phy_modify(phydev, MII_BMCR, mask, 0);
+
+ return qca83xx_suspend(phydev);
+}
+
+static struct phy_driver qca83xx_driver[] = {
+{
+ /* QCA8337 */
+ .phy_id = QCA8337_PHY_ID,
+ .phy_id_mask = QCA8K_PHY_ID_MASK,
+ .name = "Qualcomm Atheros 8337 internal PHY",
+ /* PHY_GBIT_FEATURES */
+ .probe = qca83xx_probe,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = qca83xx_config_init,
+ .soft_reset = genphy_soft_reset,
+ .get_sset_count = qca83xx_get_sset_count,
+ .get_strings = qca83xx_get_strings,
+ .get_stats = qca83xx_get_stats,
+ .suspend = qca8337_suspend,
+ .resume = qca83xx_resume,
+}, {
+ /* QCA8327-A from switch QCA8327-AL1A */
+ .phy_id = QCA8327_A_PHY_ID,
+ .phy_id_mask = QCA8K_PHY_ID_MASK,
+ .name = "Qualcomm Atheros 8327-A internal PHY",
+ /* PHY_GBIT_FEATURES */
+ .link_change_notify = qca83xx_link_change_notify,
+ .probe = qca83xx_probe,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = qca8327_config_init,
+ .soft_reset = genphy_soft_reset,
+ .get_sset_count = qca83xx_get_sset_count,
+ .get_strings = qca83xx_get_strings,
+ .get_stats = qca83xx_get_stats,
+ .suspend = qca8327_suspend,
+ .resume = qca83xx_resume,
+}, {
+ /* QCA8327-B from switch QCA8327-BL1A */
+ .phy_id = QCA8327_B_PHY_ID,
+ .phy_id_mask = QCA8K_PHY_ID_MASK,
+ .name = "Qualcomm Atheros 8327-B internal PHY",
+ /* PHY_GBIT_FEATURES */
+ .link_change_notify = qca83xx_link_change_notify,
+ .probe = qca83xx_probe,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = qca8327_config_init,
+ .soft_reset = genphy_soft_reset,
+ .get_sset_count = qca83xx_get_sset_count,
+ .get_strings = qca83xx_get_strings,
+ .get_stats = qca83xx_get_stats,
+ .suspend = qca8327_suspend,
+ .resume = qca83xx_resume,
+}, };
+
+module_phy_driver(qca83xx_driver);
+
+static struct mdio_device_id __maybe_unused qca83xx_tbl[] = {
+ { PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, qca83xx_tbl);
diff --git a/drivers/net/phy/qcom/qcom-phy-lib.c b/drivers/net/phy/qcom/qcom-phy-lib.c
new file mode 100644
index 000000000000..d28815ef56bb
--- /dev/null
+++ b/drivers/net/phy/qcom/qcom-phy-lib.c
@@ -0,0 +1,676 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/phy.h>
+#include <linux/module.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool_netlink.h>
+
+#include "qcom.h"
+
+MODULE_DESCRIPTION("Qualcomm PHY driver Common Functions");
+MODULE_AUTHOR("Matus Ujhelyi");
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_LICENSE("GPL");
+
+int at803x_debug_reg_read(struct phy_device *phydev, u16 reg)
+{
+ int ret;
+
+ ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
+ if (ret < 0)
+ return ret;
+
+ return phy_read(phydev, AT803X_DEBUG_DATA);
+}
+EXPORT_SYMBOL_GPL(at803x_debug_reg_read);
+
+int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
+ u16 clear, u16 set)
+{
+ u16 val;
+ int ret;
+
+ ret = at803x_debug_reg_read(phydev, reg);
+ if (ret < 0)
+ return ret;
+
+ val = ret & 0xffff;
+ val &= ~clear;
+ val |= set;
+
+ return phy_write(phydev, AT803X_DEBUG_DATA, val);
+}
+EXPORT_SYMBOL_GPL(at803x_debug_reg_mask);
+
+int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data)
+{
+ int ret;
+
+ ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
+ if (ret < 0)
+ return ret;
+
+ return phy_write(phydev, AT803X_DEBUG_DATA, data);
+}
+EXPORT_SYMBOL_GPL(at803x_debug_reg_write);
+
+int at803x_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret, irq_enabled;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ struct net_device *ndev = phydev->attached_dev;
+ const u8 *mac;
+ unsigned int i;
+ static const unsigned int offsets[] = {
+ AT803X_LOC_MAC_ADDR_32_47_OFFSET,
+ AT803X_LOC_MAC_ADDR_16_31_OFFSET,
+ AT803X_LOC_MAC_ADDR_0_15_OFFSET,
+ };
+
+ if (!ndev)
+ return -ENODEV;
+
+ mac = (const u8 *)ndev->dev_addr;
+
+ if (!is_valid_ether_addr(mac))
+ return -EINVAL;
+
+ for (i = 0; i < 3; i++)
+ phy_write_mmd(phydev, MDIO_MMD_PCS, offsets[i],
+ mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
+
+ /* Enable WOL interrupt */
+ ret = phy_modify(phydev, AT803X_INTR_ENABLE, 0, AT803X_INTR_ENABLE_WOL);
+ if (ret)
+ return ret;
+ } else {
+ /* Disable WOL interrupt */
+ ret = phy_modify(phydev, AT803X_INTR_ENABLE, AT803X_INTR_ENABLE_WOL, 0);
+ if (ret)
+ return ret;
+ }
+
+ /* Clear WOL status */
+ ret = phy_read(phydev, AT803X_INTR_STATUS);
+ if (ret < 0)
+ return ret;
+
+ /* Check if there are other interrupts except for WOL triggered when PHY is
+ * in interrupt mode, only the interrupts enabled by AT803X_INTR_ENABLE can
+ * be passed up to the interrupt PIN.
+ */
+ irq_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
+ if (irq_enabled < 0)
+ return irq_enabled;
+
+ irq_enabled &= ~AT803X_INTR_ENABLE_WOL;
+ if (ret & irq_enabled && !phy_polling_mode(phydev))
+ phy_trigger_machine(phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(at803x_set_wol);
+
+void at803x_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int value;
+
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = 0;
+
+ value = phy_read(phydev, AT803X_INTR_ENABLE);
+ if (value < 0)
+ return;
+
+ if (value & AT803X_INTR_ENABLE_WOL)
+ wol->wolopts |= WAKE_MAGIC;
+}
+EXPORT_SYMBOL_GPL(at803x_get_wol);
+
+int at803x_ack_interrupt(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_read(phydev, AT803X_INTR_STATUS);
+
+ return (err < 0) ? err : 0;
+}
+EXPORT_SYMBOL_GPL(at803x_ack_interrupt);
+
+int at803x_config_intr(struct phy_device *phydev)
+{
+ int err;
+ int value;
+
+ value = phy_read(phydev, AT803X_INTR_ENABLE);
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ /* Clear any pending interrupts */
+ err = at803x_ack_interrupt(phydev);
+ if (err)
+ return err;
+
+ value |= AT803X_INTR_ENABLE_AUTONEG_ERR;
+ value |= AT803X_INTR_ENABLE_SPEED_CHANGED;
+ value |= AT803X_INTR_ENABLE_DUPLEX_CHANGED;
+ value |= AT803X_INTR_ENABLE_LINK_FAIL;
+ value |= AT803X_INTR_ENABLE_LINK_SUCCESS;
+
+ err = phy_write(phydev, AT803X_INTR_ENABLE, value);
+ } else {
+ err = phy_write(phydev, AT803X_INTR_ENABLE, 0);
+ if (err)
+ return err;
+
+ /* Clear any pending interrupts */
+ err = at803x_ack_interrupt(phydev);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(at803x_config_intr);
+
+irqreturn_t at803x_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status, int_enabled;
+
+ irq_status = phy_read(phydev, AT803X_INTR_STATUS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ /* Read the current enabled interrupts */
+ int_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
+ if (int_enabled < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ /* See if this was one of our enabled interrupts */
+ if (!(irq_status & int_enabled))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(at803x_handle_interrupt);
+
+int at803x_read_specific_status(struct phy_device *phydev,
+ struct at803x_ss_mask ss_mask)
+{
+ int ss;
+
+ /* Read the AT8035 PHY-Specific Status register, which indicates the
+ * speed and duplex that the PHY is actually using, irrespective of
+ * whether we are in autoneg mode or not.
+ */
+ ss = phy_read(phydev, AT803X_SPECIFIC_STATUS);
+ if (ss < 0)
+ return ss;
+
+ if (ss & AT803X_SS_SPEED_DUPLEX_RESOLVED) {
+ int sfc, speed;
+
+ sfc = phy_read(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL);
+ if (sfc < 0)
+ return sfc;
+
+ speed = ss & ss_mask.speed_mask;
+ speed >>= ss_mask.speed_shift;
+
+ switch (speed) {
+ case AT803X_SS_SPEED_10:
+ phydev->speed = SPEED_10;
+ break;
+ case AT803X_SS_SPEED_100:
+ phydev->speed = SPEED_100;
+ break;
+ case AT803X_SS_SPEED_1000:
+ phydev->speed = SPEED_1000;
+ break;
+ case QCA808X_SS_SPEED_2500:
+ phydev->speed = SPEED_2500;
+ break;
+ }
+ if (ss & AT803X_SS_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ if (ss & AT803X_SS_MDIX)
+ phydev->mdix = ETH_TP_MDI_X;
+ else
+ phydev->mdix = ETH_TP_MDI;
+
+ switch (FIELD_GET(AT803X_SFC_MDI_CROSSOVER_MODE_M, sfc)) {
+ case AT803X_SFC_MANUAL_MDI:
+ phydev->mdix_ctrl = ETH_TP_MDI;
+ break;
+ case AT803X_SFC_MANUAL_MDIX:
+ phydev->mdix_ctrl = ETH_TP_MDI_X;
+ break;
+ case AT803X_SFC_AUTOMATIC_CROSSOVER:
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+ break;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(at803x_read_specific_status);
+
+int at803x_config_mdix(struct phy_device *phydev, u8 ctrl)
+{
+ u16 val;
+
+ switch (ctrl) {
+ case ETH_TP_MDI:
+ val = AT803X_SFC_MANUAL_MDI;
+ break;
+ case ETH_TP_MDI_X:
+ val = AT803X_SFC_MANUAL_MDIX;
+ break;
+ case ETH_TP_MDI_AUTO:
+ val = AT803X_SFC_AUTOMATIC_CROSSOVER;
+ break;
+ default:
+ return 0;
+ }
+
+ return phy_modify_changed(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL,
+ AT803X_SFC_MDI_CROSSOVER_MODE_M,
+ FIELD_PREP(AT803X_SFC_MDI_CROSSOVER_MODE_M, val));
+}
+EXPORT_SYMBOL_GPL(at803x_config_mdix);
+
+int at803x_prepare_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = at803x_config_mdix(phydev, phydev->mdix_ctrl);
+ if (ret < 0)
+ return ret;
+
+ /* Changes of the midx bits are disruptive to the normal operation;
+ * therefore any changes to these registers must be followed by a
+ * software reset to take effect.
+ */
+ if (ret == 1) {
+ ret = genphy_soft_reset(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(at803x_prepare_config_aneg);
+
+int at803x_read_status(struct phy_device *phydev)
+{
+ struct at803x_ss_mask ss_mask = { 0 };
+ int err, old_link = phydev->link;
+
+ /* Update the link, but return if there was an error */
+ err = genphy_update_link(phydev);
+ if (err)
+ return err;
+
+ /* why bother the PHY if nothing can have changed */
+ if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+ return 0;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ err = genphy_read_lpa(phydev);
+ if (err < 0)
+ return err;
+
+ ss_mask.speed_mask = AT803X_SS_SPEED_MASK;
+ ss_mask.speed_shift = __bf_shf(AT803X_SS_SPEED_MASK);
+ err = at803x_read_specific_status(phydev, ss_mask);
+ if (err < 0)
+ return err;
+
+ if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
+ phy_resolve_aneg_pause(phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(at803x_read_status);
+
+static int at803x_get_downshift(struct phy_device *phydev, u8 *d)
+{
+ int val;
+
+ val = phy_read(phydev, AT803X_SMART_SPEED);
+ if (val < 0)
+ return val;
+
+ if (val & AT803X_SMART_SPEED_ENABLE)
+ *d = FIELD_GET(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, val) + 2;
+ else
+ *d = DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int at803x_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ u16 mask, set;
+ int ret;
+
+ switch (cnt) {
+ case DOWNSHIFT_DEV_DEFAULT_COUNT:
+ cnt = AT803X_DEFAULT_DOWNSHIFT;
+ fallthrough;
+ case AT803X_MIN_DOWNSHIFT ... AT803X_MAX_DOWNSHIFT:
+ set = AT803X_SMART_SPEED_ENABLE |
+ AT803X_SMART_SPEED_BYPASS_TIMER |
+ FIELD_PREP(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, cnt - 2);
+ mask = AT803X_SMART_SPEED_RETRY_LIMIT_MASK;
+ break;
+ case DOWNSHIFT_DEV_DISABLE:
+ set = 0;
+ mask = AT803X_SMART_SPEED_ENABLE |
+ AT803X_SMART_SPEED_BYPASS_TIMER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = phy_modify_changed(phydev, AT803X_SMART_SPEED, mask, set);
+
+ /* After changing the smart speed settings, we need to perform a
+ * software reset, use phy_init_hw() to make sure we set the
+ * reapply any values which might got lost during software reset.
+ */
+ if (ret == 1)
+ ret = phy_init_hw(phydev);
+
+ return ret;
+}
+
+int at803x_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return at803x_get_downshift(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+EXPORT_SYMBOL_GPL(at803x_get_tunable);
+
+int at803x_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return at803x_set_downshift(phydev, *(const u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+EXPORT_SYMBOL_GPL(at803x_set_tunable);
+
+int at803x_cdt_fault_length(int dt)
+{
+ /* According to the datasheet the distance to the fault is
+ * DELTA_TIME * 0.824 meters.
+ *
+ * The author suspect the correct formula is:
+ *
+ * fault_distance = DELTA_TIME * (c * VF) / 125MHz / 2
+ *
+ * where c is the speed of light, VF is the velocity factor of
+ * the twisted pair cable, 125MHz the counter frequency and
+ * we need to divide by 2 because the hardware will measure the
+ * round trip time to the fault and back to the PHY.
+ *
+ * With a VF of 0.69 we get the factor 0.824 mentioned in the
+ * datasheet.
+ */
+ return (dt * 824) / 10;
+}
+EXPORT_SYMBOL_GPL(at803x_cdt_fault_length);
+
+int at803x_cdt_start(struct phy_device *phydev, u32 cdt_start)
+{
+ return phy_write(phydev, AT803X_CDT, cdt_start);
+}
+EXPORT_SYMBOL_GPL(at803x_cdt_start);
+
+int at803x_cdt_wait_for_completion(struct phy_device *phydev,
+ u32 cdt_en)
+{
+ int val, ret;
+
+ /* One test run takes about 25ms */
+ ret = phy_read_poll_timeout(phydev, AT803X_CDT, val,
+ !(val & cdt_en),
+ 30000, 100000, true);
+
+ return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(at803x_cdt_wait_for_completion);
+
+static bool qca808x_cdt_fault_length_valid(int cdt_code)
+{
+ switch (cdt_code) {
+ case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int qca808x_cable_test_result_trans(int cdt_code)
+{
+ switch (cdt_code) {
+ case QCA808X_CDT_STATUS_STAT_NORMAL:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
+ case QCA808X_CDT_STATUS_STAT_FAIL:
+ default:
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static int qca808x_cdt_fault_length(struct phy_device *phydev, int pair,
+ int result)
+{
+ int val;
+ u32 cdt_length_reg = 0;
+
+ switch (pair) {
+ case ETHTOOL_A_CABLE_PAIR_A:
+ cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_A;
+ break;
+ case ETHTOOL_A_CABLE_PAIR_B:
+ cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_B;
+ break;
+ case ETHTOOL_A_CABLE_PAIR_C:
+ cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_C;
+ break;
+ case ETHTOOL_A_CABLE_PAIR_D:
+ cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_D;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, cdt_length_reg);
+ if (val < 0)
+ return val;
+
+ if (result == ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT)
+ val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_SAME_SHORT, val);
+ else
+ val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT, val);
+
+ return at803x_cdt_fault_length(val);
+}
+
+static int qca808x_cable_test_get_pair_status(struct phy_device *phydev, u8 pair,
+ u16 status)
+{
+ int length, result;
+ u16 pair_code;
+
+ switch (pair) {
+ case ETHTOOL_A_CABLE_PAIR_A:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_A, status);
+ break;
+ case ETHTOOL_A_CABLE_PAIR_B:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_B, status);
+ break;
+ case ETHTOOL_A_CABLE_PAIR_C:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_C, status);
+ break;
+ case ETHTOOL_A_CABLE_PAIR_D:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_D, status);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ result = qca808x_cable_test_result_trans(pair_code);
+ ethnl_cable_test_result(phydev, pair, result);
+
+ if (qca808x_cdt_fault_length_valid(pair_code)) {
+ length = qca808x_cdt_fault_length(phydev, pair, result);
+ ethnl_cable_test_fault_length(phydev, pair, length);
+ }
+
+ return 0;
+}
+
+int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished)
+{
+ int ret, val;
+
+ *finished = false;
+
+ val = QCA808X_CDT_ENABLE_TEST |
+ QCA808X_CDT_LENGTH_UNIT;
+ ret = at803x_cdt_start(phydev, val);
+ if (ret)
+ return ret;
+
+ ret = at803x_cdt_wait_for_completion(phydev, QCA808X_CDT_ENABLE_TEST);
+ if (ret)
+ return ret;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, QCA808X_MMD3_CDT_STATUS);
+ if (val < 0)
+ return val;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_A, val);
+ if (ret)
+ return ret;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_B, val);
+ if (ret)
+ return ret;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_C, val);
+ if (ret)
+ return ret;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_D, val);
+ if (ret)
+ return ret;
+
+ *finished = true;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qca808x_cable_test_get_status);
+
+int qca808x_led_reg_hw_control_enable(struct phy_device *phydev, u16 reg)
+{
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg,
+ QCA808X_LED_FORCE_EN);
+}
+EXPORT_SYMBOL_GPL(qca808x_led_reg_hw_control_enable);
+
+bool qca808x_led_reg_hw_control_status(struct phy_device *phydev, u16 reg)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
+ return !(val & QCA808X_LED_FORCE_EN);
+}
+EXPORT_SYMBOL_GPL(qca808x_led_reg_hw_control_status);
+
+int qca808x_led_reg_brightness_set(struct phy_device *phydev,
+ u16 reg, enum led_brightness value)
+{
+ return phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
+ QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_MODE_MASK,
+ QCA808X_LED_FORCE_EN | (value ? QCA808X_LED_FORCE_ON :
+ QCA808X_LED_FORCE_OFF));
+}
+EXPORT_SYMBOL_GPL(qca808x_led_reg_brightness_set);
+
+int qca808x_led_reg_blink_set(struct phy_device *phydev, u16 reg,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ int ret;
+
+ /* Set blink to 50% off, 50% on at 4Hz by default */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_AN, QCA808X_MMD7_LED_GLOBAL,
+ QCA808X_LED_BLINK_FREQ_MASK | QCA808X_LED_BLINK_DUTY_MASK,
+ QCA808X_LED_BLINK_FREQ_4HZ | QCA808X_LED_BLINK_DUTY_50_50);
+ if (ret)
+ return ret;
+
+ /* We use BLINK_1 for normal blinking */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
+ QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_MODE_MASK,
+ QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_BLINK_1);
+ if (ret)
+ return ret;
+
+ /* We set blink to 4Hz, aka 250ms */
+ *delay_on = 250 / 2;
+ *delay_off = 250 / 2;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qca808x_led_reg_blink_set);
diff --git a/drivers/net/phy/qcom/qcom.h b/drivers/net/phy/qcom/qcom.h
new file mode 100644
index 000000000000..4bb541728846
--- /dev/null
+++ b/drivers/net/phy/qcom/qcom.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define AT803X_SPECIFIC_FUNCTION_CONTROL 0x10
+#define AT803X_SFC_ASSERT_CRS BIT(11)
+#define AT803X_SFC_FORCE_LINK BIT(10)
+#define AT803X_SFC_MDI_CROSSOVER_MODE_M GENMASK(6, 5)
+#define AT803X_SFC_AUTOMATIC_CROSSOVER 0x3
+#define AT803X_SFC_MANUAL_MDIX 0x1
+#define AT803X_SFC_MANUAL_MDI 0x0
+#define AT803X_SFC_SQE_TEST BIT(2)
+#define AT803X_SFC_POLARITY_REVERSAL BIT(1)
+#define AT803X_SFC_DISABLE_JABBER BIT(0)
+
+#define AT803X_SPECIFIC_STATUS 0x11
+#define AT803X_SS_SPEED_MASK GENMASK(15, 14)
+#define AT803X_SS_SPEED_1000 2
+#define AT803X_SS_SPEED_100 1
+#define AT803X_SS_SPEED_10 0
+#define AT803X_SS_DUPLEX BIT(13)
+#define AT803X_SS_SPEED_DUPLEX_RESOLVED BIT(11)
+#define AT803X_SS_MDIX BIT(6)
+
+#define QCA808X_SS_SPEED_MASK GENMASK(9, 7)
+#define QCA808X_SS_SPEED_2500 4
+
+#define AT803X_INTR_ENABLE 0x12
+#define AT803X_INTR_ENABLE_AUTONEG_ERR BIT(15)
+#define AT803X_INTR_ENABLE_SPEED_CHANGED BIT(14)
+#define AT803X_INTR_ENABLE_DUPLEX_CHANGED BIT(13)
+#define AT803X_INTR_ENABLE_PAGE_RECEIVED BIT(12)
+#define AT803X_INTR_ENABLE_LINK_FAIL BIT(11)
+#define AT803X_INTR_ENABLE_LINK_SUCCESS BIT(10)
+#define AT803X_INTR_ENABLE_LINK_FAIL_BX BIT(8)
+#define AT803X_INTR_ENABLE_LINK_SUCCESS_BX BIT(7)
+#define AT803X_INTR_ENABLE_WIRESPEED_DOWNGRADE BIT(5)
+#define AT803X_INTR_ENABLE_POLARITY_CHANGED BIT(1)
+#define AT803X_INTR_ENABLE_WOL BIT(0)
+
+#define AT803X_INTR_STATUS 0x13
+
+#define AT803X_SMART_SPEED 0x14
+#define AT803X_SMART_SPEED_ENABLE BIT(5)
+#define AT803X_SMART_SPEED_RETRY_LIMIT_MASK GENMASK(4, 2)
+#define AT803X_SMART_SPEED_BYPASS_TIMER BIT(1)
+
+#define AT803X_CDT 0x16
+#define AT803X_CDT_MDI_PAIR_MASK GENMASK(9, 8)
+#define AT803X_CDT_ENABLE_TEST BIT(0)
+#define AT803X_CDT_STATUS 0x1c
+#define AT803X_CDT_STATUS_STAT_NORMAL 0
+#define AT803X_CDT_STATUS_STAT_SHORT 1
+#define AT803X_CDT_STATUS_STAT_OPEN 2
+#define AT803X_CDT_STATUS_STAT_FAIL 3
+#define AT803X_CDT_STATUS_STAT_MASK GENMASK(9, 8)
+#define AT803X_CDT_STATUS_DELTA_TIME_MASK GENMASK(7, 0)
+
+#define QCA808X_CDT_ENABLE_TEST BIT(15)
+#define QCA808X_CDT_INTER_CHECK_DIS BIT(13)
+#define QCA808X_CDT_STATUS BIT(11)
+#define QCA808X_CDT_LENGTH_UNIT BIT(10)
+
+#define QCA808X_MMD3_CDT_STATUS 0x8064
+#define QCA808X_MMD3_CDT_DIAG_PAIR_A 0x8065
+#define QCA808X_MMD3_CDT_DIAG_PAIR_B 0x8066
+#define QCA808X_MMD3_CDT_DIAG_PAIR_C 0x8067
+#define QCA808X_MMD3_CDT_DIAG_PAIR_D 0x8068
+#define QCA808X_CDT_DIAG_LENGTH_SAME_SHORT GENMASK(15, 8)
+#define QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT GENMASK(7, 0)
+
+#define QCA808X_CDT_CODE_PAIR_A GENMASK(15, 12)
+#define QCA808X_CDT_CODE_PAIR_B GENMASK(11, 8)
+#define QCA808X_CDT_CODE_PAIR_C GENMASK(7, 4)
+#define QCA808X_CDT_CODE_PAIR_D GENMASK(3, 0)
+
+#define QCA808X_CDT_STATUS_STAT_TYPE GENMASK(1, 0)
+#define QCA808X_CDT_STATUS_STAT_FAIL FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 0)
+#define QCA808X_CDT_STATUS_STAT_NORMAL FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 1)
+#define QCA808X_CDT_STATUS_STAT_SAME_OPEN FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 2)
+#define QCA808X_CDT_STATUS_STAT_SAME_SHORT FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 3)
+
+#define QCA808X_CDT_STATUS_STAT_MDI GENMASK(3, 2)
+#define QCA808X_CDT_STATUS_STAT_MDI1 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 1)
+#define QCA808X_CDT_STATUS_STAT_MDI2 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 2)
+#define QCA808X_CDT_STATUS_STAT_MDI3 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 3)
+
+/* NORMAL are MDI with type set to 0 */
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI1
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+ QCA808X_CDT_STATUS_STAT_MDI1)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+ QCA808X_CDT_STATUS_STAT_MDI1)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI2
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+ QCA808X_CDT_STATUS_STAT_MDI2)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+ QCA808X_CDT_STATUS_STAT_MDI2)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI3
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+ QCA808X_CDT_STATUS_STAT_MDI3)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+ QCA808X_CDT_STATUS_STAT_MDI3)
+
+/* Added for reference of existence but should be handled by wait_for_completion already */
+#define QCA808X_CDT_STATUS_STAT_BUSY (BIT(1) | BIT(3))
+
+#define QCA808X_MMD7_LED_GLOBAL 0x8073
+#define QCA808X_LED_BLINK_1 GENMASK(11, 6)
+#define QCA808X_LED_BLINK_2 GENMASK(5, 0)
+/* Values are the same for both BLINK_1 and BLINK_2 */
+#define QCA808X_LED_BLINK_FREQ_MASK GENMASK(5, 3)
+#define QCA808X_LED_BLINK_FREQ_2HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x0)
+#define QCA808X_LED_BLINK_FREQ_4HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x1)
+#define QCA808X_LED_BLINK_FREQ_8HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x2)
+#define QCA808X_LED_BLINK_FREQ_16HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x3)
+#define QCA808X_LED_BLINK_FREQ_32HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x4)
+#define QCA808X_LED_BLINK_FREQ_64HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x5)
+#define QCA808X_LED_BLINK_FREQ_128HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x6)
+#define QCA808X_LED_BLINK_FREQ_256HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x7)
+#define QCA808X_LED_BLINK_DUTY_MASK GENMASK(2, 0)
+#define QCA808X_LED_BLINK_DUTY_50_50 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x0)
+#define QCA808X_LED_BLINK_DUTY_75_25 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x1)
+#define QCA808X_LED_BLINK_DUTY_25_75 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x2)
+#define QCA808X_LED_BLINK_DUTY_33_67 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x3)
+#define QCA808X_LED_BLINK_DUTY_67_33 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x4)
+#define QCA808X_LED_BLINK_DUTY_17_83 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x5)
+#define QCA808X_LED_BLINK_DUTY_83_17 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x6)
+#define QCA808X_LED_BLINK_DUTY_8_92 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x7)
+
+/* LED hw control pattern is the same for every LED */
+#define QCA808X_LED_PATTERN_MASK GENMASK(15, 0)
+#define QCA808X_LED_SPEED2500_ON BIT(15)
+#define QCA808X_LED_SPEED2500_BLINK BIT(14)
+/* Follow blink trigger even if duplex or speed condition doesn't match */
+#define QCA808X_LED_BLINK_CHECK_BYPASS BIT(13)
+#define QCA808X_LED_FULL_DUPLEX_ON BIT(12)
+#define QCA808X_LED_HALF_DUPLEX_ON BIT(11)
+#define QCA808X_LED_TX_BLINK BIT(10)
+#define QCA808X_LED_RX_BLINK BIT(9)
+#define QCA808X_LED_TX_ON_10MS BIT(8)
+#define QCA808X_LED_RX_ON_10MS BIT(7)
+#define QCA808X_LED_SPEED1000_ON BIT(6)
+#define QCA808X_LED_SPEED100_ON BIT(5)
+#define QCA808X_LED_SPEED10_ON BIT(4)
+#define QCA808X_LED_COLLISION_BLINK BIT(3)
+#define QCA808X_LED_SPEED1000_BLINK BIT(2)
+#define QCA808X_LED_SPEED100_BLINK BIT(1)
+#define QCA808X_LED_SPEED10_BLINK BIT(0)
+
+/* LED force ctrl is the same for every LED
+ * No documentation exist for this, not even internal one
+ * with NDA as QCOM gives only info about configuring
+ * hw control pattern rules and doesn't indicate any way
+ * to force the LED to specific mode.
+ * These define comes from reverse and testing and maybe
+ * lack of some info or some info are not entirely correct.
+ * For the basic LED control and hw control these finding
+ * are enough to support LED control in all the required APIs.
+ *
+ * On doing some comparison with implementation with qca807x,
+ * it was found that it's 1:1 equal to it and confirms all the
+ * reverse done. It was also found further specification with the
+ * force mode and the blink modes.
+ */
+#define QCA808X_LED_FORCE_EN BIT(15)
+#define QCA808X_LED_FORCE_MODE_MASK GENMASK(14, 13)
+#define QCA808X_LED_FORCE_BLINK_1 FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x3)
+#define QCA808X_LED_FORCE_BLINK_2 FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x2)
+#define QCA808X_LED_FORCE_ON FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x1)
+#define QCA808X_LED_FORCE_OFF FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x0)
+
+#define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C
+#define AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B
+#define AT803X_LOC_MAC_ADDR_32_47_OFFSET 0x804A
+
+#define AT803X_DEBUG_ADDR 0x1D
+#define AT803X_DEBUG_DATA 0x1E
+
+#define AT803X_DEBUG_ANALOG_TEST_CTRL 0x00
+#define QCA8327_DEBUG_MANU_CTRL_EN BIT(2)
+#define QCA8337_DEBUG_MANU_CTRL_EN GENMASK(3, 2)
+#define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15)
+
+#define AT803X_DEBUG_SYSTEM_CTRL_MODE 0x05
+#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
+
+#define AT803X_DEBUG_REG_HIB_CTRL 0x0b
+#define AT803X_DEBUG_HIB_CTRL_SEL_RST_80U BIT(10)
+#define AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE BIT(13)
+#define AT803X_DEBUG_HIB_CTRL_PS_HIB_EN BIT(15)
+
+#define AT803X_DEFAULT_DOWNSHIFT 5
+#define AT803X_MIN_DOWNSHIFT 2
+#define AT803X_MAX_DOWNSHIFT 9
+
+enum stat_access_type {
+ PHY,
+ MMD
+};
+
+struct at803x_hw_stat {
+ const char *string;
+ u8 reg;
+ u32 mask;
+ enum stat_access_type access_type;
+};
+
+struct at803x_ss_mask {
+ u16 speed_mask;
+ u8 speed_shift;
+};
+
+int at803x_debug_reg_read(struct phy_device *phydev, u16 reg);
+int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
+ u16 clear, u16 set);
+int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data);
+int at803x_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol);
+void at803x_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol);
+int at803x_ack_interrupt(struct phy_device *phydev);
+int at803x_config_intr(struct phy_device *phydev);
+irqreturn_t at803x_handle_interrupt(struct phy_device *phydev);
+int at803x_read_specific_status(struct phy_device *phydev,
+ struct at803x_ss_mask ss_mask);
+int at803x_config_mdix(struct phy_device *phydev, u8 ctrl);
+int at803x_prepare_config_aneg(struct phy_device *phydev);
+int at803x_read_status(struct phy_device *phydev);
+int at803x_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data);
+int at803x_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data);
+int at803x_cdt_fault_length(int dt);
+int at803x_cdt_start(struct phy_device *phydev, u32 cdt_start);
+int at803x_cdt_wait_for_completion(struct phy_device *phydev,
+ u32 cdt_en);
+int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished);
+int qca808x_led_reg_hw_control_enable(struct phy_device *phydev, u16 reg);
+bool qca808x_led_reg_hw_control_status(struct phy_device *phydev, u16 reg);
+int qca808x_led_reg_brightness_set(struct phy_device *phydev,
+ u16 reg, enum led_brightness value);
+int qca808x_led_reg_blink_set(struct phy_device *phydev, u16 reg,
+ unsigned long *delay_on,
+ unsigned long *delay_off);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 337899c69738..1fa70427b2a2 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -57,14 +57,6 @@
#define RTL8366RB_POWER_SAVE 0x15
#define RTL8366RB_POWER_SAVE_ON BIT(12)
-#define RTL_SUPPORTS_5000FULL BIT(14)
-#define RTL_SUPPORTS_2500FULL BIT(13)
-#define RTL_SUPPORTS_10000FULL BIT(0)
-#define RTL_ADV_2500FULL BIT(7)
-#define RTL_LPADV_10000FULL BIT(11)
-#define RTL_LPADV_5000FULL BIT(6)
-#define RTL_LPADV_2500FULL BIT(5)
-
#define RTL9000A_GINMR 0x14
#define RTL9000A_GINMR_LINK_STATUS BIT(4)
@@ -676,11 +668,11 @@ static int rtl822x_get_features(struct phy_device *phydev)
return val;
linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
- phydev->supported, val & RTL_SUPPORTS_2500FULL);
+ phydev->supported, val & MDIO_PMA_SPEED_2_5G);
linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
- phydev->supported, val & RTL_SUPPORTS_5000FULL);
+ phydev->supported, val & MDIO_PMA_SPEED_5G);
linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
- phydev->supported, val & RTL_SUPPORTS_10000FULL);
+ phydev->supported, val & MDIO_SPEED_10G);
return genphy_read_abilities(phydev);
}
@@ -690,14 +682,12 @@ static int rtl822x_config_aneg(struct phy_device *phydev)
int ret = 0;
if (phydev->autoneg == AUTONEG_ENABLE) {
- u16 adv2500 = 0;
-
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
- phydev->advertising))
- adv2500 = RTL_ADV_2500FULL;
+ u16 adv = linkmode_adv_to_mii_10gbt_adv_t(phydev->advertising);
ret = phy_modify_paged_changed(phydev, 0xa5d, 0x12,
- RTL_ADV_2500FULL, adv2500);
+ MDIO_AN_10GBT_CTRL_ADV2_5G |
+ MDIO_AN_10GBT_CTRL_ADV5G,
+ adv);
if (ret < 0)
return ret;
}
@@ -715,12 +705,8 @@ static int rtl822x_read_status(struct phy_device *phydev)
if (lpadv < 0)
return lpadv;
- linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
- phydev->lp_advertising, lpadv & RTL_LPADV_10000FULL);
- linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
- phydev->lp_advertising, lpadv & RTL_LPADV_5000FULL);
- linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
- phydev->lp_advertising, lpadv & RTL_LPADV_2500FULL);
+ mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising,
+ lpadv);
}
ret = genphy_read_status(phydev);
@@ -738,7 +724,7 @@ static bool rtlgen_supports_2_5gbps(struct phy_device *phydev)
val = phy_read(phydev, 0x13);
phy_write(phydev, RTL821x_PAGE_SELECT, 0);
- return val >= 0 && val & RTL_SUPPORTS_2500FULL;
+ return val >= 0 && val & MDIO_PMA_SPEED_2_5G;
}
static int rtlgen_match_phy_device(struct phy_device *phydev)
@@ -1050,6 +1036,16 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
+ PHY_ID_MATCH_EXACT(0x001cc862),
+ .name = "RTL8251B 5Gbps PHY",
+ .get_features = rtl822x_get_features,
+ .config_aneg = rtl822x_config_aneg,
+ .read_status = rtl822x_read_status,
+ .suspend = genphy_suspend,
+ .resume = rtlgen_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ }, {
PHY_ID_MATCH_EXACT(0x001cc961),
.name = "RTL8366RB Gigabit Ethernet",
.config_init = &rtl8366rb_config_init,
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index 7fd9fe6a602b..7b1bc5fcef9b 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -22,7 +22,7 @@
struct gmii2rgmii {
struct phy_device *phy_dev;
- struct phy_driver *phy_drv;
+ const struct phy_driver *phy_drv;
struct phy_driver conv_phy_drv;
struct mdio_device *mdio;
};
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 3dd52bf28f15..fe380fe196e7 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -295,7 +295,9 @@ static void ppp_setup(struct net_device *dev);
static const struct net_device_ops ppp_netdev_ops;
-static struct class *ppp_class;
+static const struct class ppp_class = {
+ .name = "ppp",
+};
/* per net-namespace data */
static inline struct ppp_net *ppp_pernet(struct net *net)
@@ -1394,11 +1396,9 @@ static int __init ppp_init(void)
goto out_net;
}
- ppp_class = class_create("ppp");
- if (IS_ERR(ppp_class)) {
- err = PTR_ERR(ppp_class);
+ err = class_register(&ppp_class);
+ if (err)
goto out_chrdev;
- }
err = rtnl_link_register(&ppp_link_ops);
if (err) {
@@ -1407,12 +1407,12 @@ static int __init ppp_init(void)
}
/* not a big deal if we fail here :-) */
- device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
+ device_create(&ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
return 0;
out_class:
- class_destroy(ppp_class);
+ class_unregister(&ppp_class);
out_chrdev:
unregister_chrdev(PPP_MAJOR, "ppp");
out_net:
@@ -1607,7 +1607,7 @@ static const struct net_device_ops ppp_netdev_ops = {
.ndo_fill_forward_path = ppp_fill_forward_path,
};
-static struct device_type ppp_type = {
+static const struct device_type ppp_type = {
.name = "ppp",
};
@@ -3549,8 +3549,8 @@ static void __exit ppp_cleanup(void)
pr_err("PPP: removing module but units remain!\n");
rtnl_link_unregister(&ppp_link_ops);
unregister_chrdev(PPP_MAJOR, "ppp");
- device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
- class_destroy(ppp_class);
+ device_destroy(&ppp_class, MKDEV(PPP_MAJOR, 0));
+ class_unregister(&ppp_class);
unregister_pernet_device(&ppp_net_ops);
}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index f575f225d417..0a44bbdcfb7b 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -25,7 +25,6 @@
#include <net/genetlink.h>
#include <net/netlink.h>
#include <net/sch_generic.h>
-#include <generated/utsrelease.h>
#include <linux/if_team.h>
#define DRV_NAME "team"
@@ -2074,7 +2073,6 @@ static void team_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strscpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
}
static int team_ethtool_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 8f95a562b8d0..0b3f21cba552 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -54,6 +54,7 @@
#include <linux/if_tun.h>
#include <linux/if_vlan.h>
#include <linux/crc32.h>
+#include <linux/math.h>
#include <linux/nsproxy.h>
#include <linux/virtio_net.h>
#include <linux/rcupdate.h>
@@ -77,6 +78,7 @@
#include <net/ax25.h>
#include <net/rose.h>
#include <net/6lowpan.h>
+#include <net/rps.h>
#include <linux/uaccess.h>
#include <linux/proc_fs.h>
@@ -523,8 +525,7 @@ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
{
struct tun_flow_entry *e;
- u32 txq = 0;
- u32 numqueues = 0;
+ u32 txq, numqueues;
numqueues = READ_ONCE(tun->numqueues);
@@ -534,8 +535,7 @@ static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
tun_flow_save_rps_rxhash(e, txq);
txq = e->queue_index;
} else {
- /* use multiply and shift instead of expensive divide */
- txq = ((u64)txq * numqueues) >> 32;
+ txq = reciprocal_scale(txq, numqueues);
}
return txq;
@@ -978,20 +978,15 @@ static int tun_net_init(struct net_device *dev)
struct ifreq *ifr = tun->ifr;
int err;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
spin_lock_init(&tun->lock);
err = security_tun_dev_alloc_security(&tun->security);
- if (err < 0) {
- free_percpu(dev->tstats);
+ if (err < 0)
return err;
- }
tun_flow_init(tun);
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
@@ -1009,7 +1004,6 @@ static int tun_net_init(struct net_device *dev)
if (err < 0) {
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
- free_percpu(dev->tstats);
return err;
}
return 0;
@@ -1345,7 +1339,6 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_select_queue = tun_select_queue,
.ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = tun_set_headroom,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_bpf = tun_xdp,
.ndo_xdp_xmit = tun_xdp_xmit,
.ndo_change_carrier = tun_net_change_carrier,
@@ -1928,7 +1921,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
rcu_read_lock();
xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog) {
- ret = do_xdp_generic(xdp_prog, skb);
+ ret = do_xdp_generic(xdp_prog, &skb);
if (ret != XDP_PASS) {
rcu_read_unlock();
local_bh_enable();
@@ -2318,7 +2311,6 @@ static void tun_free_netdev(struct net_device *dev)
BUG_ON(!(list_empty(&tun->disabled)));
- free_percpu(dev->tstats);
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
__tun_set_ebpf(tun, &tun->steering_prog, NULL);
@@ -2518,7 +2510,7 @@ build:
skb_record_rx_queue(skb, tfile->queue_index);
if (skb_xdp) {
- ret = do_xdp_generic(xdp_prog, skb);
+ ret = do_xdp_generic(xdp_prog, &skb);
if (ret != XDP_PASS) {
ret = 0;
goto out;
@@ -3645,12 +3637,22 @@ static int tun_set_coalesce(struct net_device *dev,
return 0;
}
+static void tun_get_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+
+ channels->combined_count = tun->numqueues;
+ channels->max_combined = tun->flags & IFF_MULTI_QUEUE ? MAX_TAP_QUEUES : 1;
+}
+
static const struct ethtool_ops tun_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES,
.get_drvinfo = tun_get_drvinfo,
.get_msglevel = tun_get_msglevel,
.set_msglevel = tun_set_msglevel,
.get_link = ethtool_op_get_link,
+ .get_channels = tun_get_channels,
.get_ts_info = ethtool_op_get_ts_info,
.get_coalesce = tun_get_coalesce,
.set_coalesce = tun_set_coalesce,
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 3fd7dccf0f9c..3c360d4f0635 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -99,6 +99,7 @@ config USB_RTL8150
config USB_RTL8152
tristate "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
select MII
+ select PHYLIB
select CRC32
select CRYPTO
select CRYPTO_HASH
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index d837c1887416..88e084534853 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -667,7 +667,7 @@ static int ax88179_set_link_ksettings(struct net_device *net,
}
static int
-ax88179_ethtool_get_eee(struct usbnet *dev, struct ethtool_eee *data)
+ax88179_ethtool_get_eee(struct usbnet *dev, struct ethtool_keee *data)
{
int val;
@@ -676,29 +676,29 @@ ax88179_ethtool_get_eee(struct usbnet *dev, struct ethtool_eee *data)
MDIO_MMD_PCS);
if (val < 0)
return val;
- data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
+ mii_eee_cap1_mod_linkmode_t(data->supported, val);
/* Get advertisement EEE */
val = ax88179_phy_read_mmd_indirect(dev, MDIO_AN_EEE_ADV,
MDIO_MMD_AN);
if (val < 0)
return val;
- data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(data->advertised, val);
/* Get LP advertisement EEE */
val = ax88179_phy_read_mmd_indirect(dev, MDIO_AN_EEE_LPABLE,
MDIO_MMD_AN);
if (val < 0)
return val;
- data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(data->lp_advertised, val);
return 0;
}
static int
-ax88179_ethtool_set_eee(struct usbnet *dev, struct ethtool_eee *data)
+ax88179_ethtool_set_eee(struct usbnet *dev, struct ethtool_keee *data)
{
- u16 tmp16 = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
+ u16 tmp16 = linkmode_to_mii_eee_cap1_t(data->advertised);
return ax88179_phy_write_mmd_indirect(dev, MDIO_AN_EEE_ADV,
MDIO_MMD_AN, tmp16);
@@ -807,7 +807,7 @@ static void ax88179_enable_eee(struct usbnet *dev)
GMII_PHY_PAGE_SELECT, 2, &tmp16);
}
-static int ax88179_get_eee(struct net_device *net, struct ethtool_eee *edata)
+static int ax88179_get_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct usbnet *dev = netdev_priv(net);
struct ax88179_data *priv = dev->driver_priv;
@@ -818,7 +818,7 @@ static int ax88179_get_eee(struct net_device *net, struct ethtool_eee *edata)
return ax88179_ethtool_get_eee(dev, edata);
}
-static int ax88179_set_eee(struct net_device *net, struct ethtool_eee *edata)
+static int ax88179_set_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct usbnet *dev = netdev_priv(net);
struct ax88179_data *priv = dev->driver_priv;
@@ -1587,7 +1587,7 @@ static int ax88179_reset(struct usbnet *dev)
u16 *tmp16;
u8 *tmp;
struct ax88179_data *ax179_data = dev->driver_priv;
- struct ethtool_eee eee_data;
+ struct ethtool_keee eee_data;
tmp16 = (u16 *)buf;
tmp = (u8 *)buf;
@@ -1663,7 +1663,7 @@ static int ax88179_reset(struct usbnet *dev)
ax88179_disable_eee(dev);
ax88179_ethtool_get_eee(dev, &eee_data);
- eee_data.advertised = 0;
+ linkmode_zero(eee_data.advertised);
ax88179_ethtool_set_eee(dev, &eee_data);
/* Restart autoneg */
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index cd4083e0b3b9..e13e4920ee9b 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -339,7 +339,7 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
in6_dev = in6_dev_get(netdev);
if (!in6_dev)
goto out;
- is_router = !!in6_dev->cnf.forwarding;
+ is_router = !!READ_ONCE(in6_dev->cnf.forwarding);
in6_dev_put(in6_dev);
/* ipv6_stub != NULL if in6_dev_get returned an inet6_dev */
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index f088ea2ba6f3..1aeb36119d3f 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2465,7 +2465,7 @@ static void hso_create_rfkill(struct hso_device *hso_dev,
}
}
-static struct device_type hso_type = {
+static const struct device_type hso_type = {
.name = "wwan",
};
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index d2aa2c5b1989..80ee4fcdfb36 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1675,7 +1675,7 @@ static int lan78xx_set_wol(struct net_device *netdev,
return ret;
}
-static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
+static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct lan78xx_net *dev = netdev_priv(net);
struct phy_device *phydev = net->phydev;
@@ -1711,7 +1711,7 @@ exit:
return ret;
}
-static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
+static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct lan78xx_net *dev = netdev_priv(net);
int ret;
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 0d0672d2a654..5d6aeb086fc7 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -10,6 +10,7 @@
#include <linux/etherdevice.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
+#include <linux/phy.h>
#include <linux/usb.h>
#include <linux/crc32.h>
#include <linux/if_vlan.h>
@@ -891,8 +892,8 @@ struct r8152 {
void (*up)(struct r8152 *tp);
void (*down)(struct r8152 *tp);
void (*unload)(struct r8152 *tp);
- int (*eee_get)(struct r8152 *tp, struct ethtool_eee *eee);
- int (*eee_set)(struct r8152 *tp, struct ethtool_eee *eee);
+ int (*eee_get)(struct r8152 *tp, struct ethtool_keee *eee);
+ int (*eee_set)(struct r8152 *tp, struct ethtool_keee *eee);
bool (*in_nway)(struct r8152 *tp);
void (*hw_phy_cfg)(struct r8152 *tp);
void (*autosuspend_en)(struct r8152 *tp, bool enable);
@@ -8922,32 +8923,31 @@ static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data)
}
}
-static int r8152_get_eee(struct r8152 *tp, struct ethtool_eee *eee)
+static int r8152_get_eee(struct r8152 *tp, struct ethtool_keee *eee)
{
- u32 lp, adv, supported = 0;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
u16 val;
val = r8152_mmd_read(tp, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
- supported = mmd_eee_cap_to_ethtool_sup_t(val);
+ mii_eee_cap1_mod_linkmode_t(eee->supported, val);
val = r8152_mmd_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
- adv = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(eee->advertised, val);
val = r8152_mmd_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
- lp = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(eee->lp_advertised, val);
eee->eee_enabled = tp->eee_en;
- eee->eee_active = !!(supported & adv & lp);
- eee->supported = supported;
- eee->advertised = tp->eee_adv;
- eee->lp_advertised = lp;
+
+ linkmode_and(common, eee->advertised, eee->lp_advertised);
+ eee->eee_active = phy_check_valid(tp->speed, tp->duplex, common);
return 0;
}
-static int r8152_set_eee(struct r8152 *tp, struct ethtool_eee *eee)
+static int r8152_set_eee(struct r8152 *tp, struct ethtool_keee *eee)
{
- u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised);
+ u16 val = linkmode_to_mii_eee_cap1_t(eee->advertised);
tp->eee_en = eee->eee_enabled;
tp->eee_adv = val;
@@ -8957,31 +8957,30 @@ static int r8152_set_eee(struct r8152 *tp, struct ethtool_eee *eee)
return 0;
}
-static int r8153_get_eee(struct r8152 *tp, struct ethtool_eee *eee)
+static int r8153_get_eee(struct r8152 *tp, struct ethtool_keee *eee)
{
- u32 lp, adv, supported = 0;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
u16 val;
val = ocp_reg_read(tp, OCP_EEE_ABLE);
- supported = mmd_eee_cap_to_ethtool_sup_t(val);
+ mii_eee_cap1_mod_linkmode_t(eee->supported, val);
val = ocp_reg_read(tp, OCP_EEE_ADV);
- adv = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(eee->advertised, val);
val = ocp_reg_read(tp, OCP_EEE_LPABLE);
- lp = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(eee->lp_advertised, val);
eee->eee_enabled = tp->eee_en;
- eee->eee_active = !!(supported & adv & lp);
- eee->supported = supported;
- eee->advertised = tp->eee_adv;
- eee->lp_advertised = lp;
+
+ linkmode_and(common, eee->advertised, eee->lp_advertised);
+ eee->eee_active = phy_check_valid(tp->speed, tp->duplex, common);
return 0;
}
static int
-rtl_ethtool_get_eee(struct net_device *net, struct ethtool_eee *edata)
+rtl_ethtool_get_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct r8152 *tp = netdev_priv(net);
int ret;
@@ -9008,7 +9007,7 @@ out:
}
static int
-rtl_ethtool_set_eee(struct net_device *net, struct ethtool_eee *edata)
+rtl_ethtool_set_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct r8152 *tp = netdev_priv(net);
int ret;
@@ -10078,7 +10077,7 @@ static int rtl8152_cfgselector_choose_configuration(struct usb_device *udev)
* driver supports it.
*/
if (__rtl_get_hw_ver(udev) == RTL_VER_UNKNOWN)
- return 0;
+ return -ENODEV;
/* The vendor mode is not always config #1, so to find it out. */
c = udev->config;
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index 143bd4ab160d..57947a5590cc 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -737,7 +737,9 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
data->eeprom_len = SR9800_EEPROM_LEN;
- usbnet_get_endpoints(dev, intf);
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret)
+ goto out;
/* LED Setting Rule :
* AABB:CCDD
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 2d14b0d78541..e84efa661589 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1633,7 +1633,6 @@ void usbnet_disconnect (struct usb_interface *intf)
usb_free_urb(dev->interrupt);
kfree(dev->padding_pkt);
- free_percpu(net->tstats);
free_netdev(net);
}
EXPORT_SYMBOL_GPL(usbnet_disconnect);
@@ -1645,7 +1644,6 @@ static const struct net_device_ops usbnet_netdev_ops = {
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_set_rx_mode = usbnet_set_rx_mode,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -1654,11 +1652,11 @@ static const struct net_device_ops usbnet_netdev_ops = {
// precondition: never called in_interrupt
-static struct device_type wlan_type = {
+static const struct device_type wlan_type = {
.name = "wlan",
};
-static struct device_type wwan_type = {
+static const struct device_type wwan_type = {
.name = "wwan",
};
@@ -1710,10 +1708,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->rx_speed = SPEED_UNSET;
dev->tx_speed = SPEED_UNSET;
- net->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!net->tstats)
- goto out0;
-
dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
| NETIF_MSG_PROBE | NETIF_MSG_LINK);
init_waitqueue_head(&dev->wait);
@@ -1743,6 +1737,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
net->netdev_ops = &usbnet_netdev_ops;
net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
net->ethtool_ops = &usbnet_ethtool_ops;
+ net->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
// allow device-specific bind/init procedures
// NOTE net->name still not usable ...
@@ -1861,8 +1856,6 @@ out1:
*/
cancel_work_sync(&dev->kevent);
del_timer_sync(&dev->delay);
- free_percpu(net->tstats);
-out0:
free_netdev(net);
out:
return status;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index cd4a6fe458f9..13d902462d8e 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -729,80 +729,10 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
if (skb_shared(skb) || skb_head_is_locked(skb) ||
skb_shinfo(skb)->nr_frags ||
skb_headroom(skb) < XDP_PACKET_HEADROOM) {
- u32 size, len, max_head_size, off, truesize, page_offset;
- struct sk_buff *nskb;
- struct page *page;
- int i, head_off;
- void *va;
-
- /* We need a private copy of the skb and data buffers since
- * the ebpf program can modify it. We segment the original skb
- * into order-0 pages without linearize it.
- *
- * Make sure we have enough space for linear and paged area
- */
- max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE -
- VETH_XDP_HEADROOM);
- if (skb->len > PAGE_SIZE * MAX_SKB_FRAGS + max_head_size)
- goto drop;
-
- size = min_t(u32, skb->len, max_head_size);
- truesize = SKB_HEAD_ALIGN(size) + VETH_XDP_HEADROOM;
-
- /* Allocate skb head */
- va = page_pool_dev_alloc_va(rq->page_pool, &truesize);
- if (!va)
- goto drop;
-
- nskb = napi_build_skb(va, truesize);
- if (!nskb) {
- page_pool_free_va(rq->page_pool, va, true);
+ if (skb_pp_cow_data(rq->page_pool, pskb, XDP_PACKET_HEADROOM))
goto drop;
- }
-
- skb_reserve(nskb, VETH_XDP_HEADROOM);
- skb_copy_header(nskb, skb);
- skb_mark_for_recycle(nskb);
-
- if (skb_copy_bits(skb, 0, nskb->data, size)) {
- consume_skb(nskb);
- goto drop;
- }
- skb_put(nskb, size);
- head_off = skb_headroom(nskb) - skb_headroom(skb);
- skb_headers_offset_update(nskb, head_off);
-
- /* Allocate paged area of new skb */
- off = size;
- len = skb->len - off;
-
- for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
- size = min_t(u32, len, PAGE_SIZE);
- truesize = size;
-
- page = page_pool_dev_alloc(rq->page_pool, &page_offset,
- &truesize);
- if (!page) {
- consume_skb(nskb);
- goto drop;
- }
-
- skb_add_rx_frag(nskb, i, page, page_offset, size,
- truesize);
- if (skb_copy_bits(skb, off,
- page_address(page) + page_offset,
- size)) {
- consume_skb(nskb);
- goto drop;
- }
-
- len -= size;
- off += size;
- }
-
- consume_skb(skb);
- skb = nskb;
+ skb = *pskb;
}
/* SKB "head" area always have tailroom for skb_shared_info */
@@ -1483,6 +1413,7 @@ static void veth_free_queues(struct net_device *dev)
static int veth_dev_init(struct net_device *dev)
{
+ netdev_lockdep_set_classes(dev);
return veth_alloc_queues(dev);
}
@@ -1514,7 +1445,7 @@ static int veth_get_iflink(const struct net_device *dev)
rcu_read_lock();
peer = rcu_dereference(priv->peer);
- iflink = peer ? peer->ifindex : 0;
+ iflink = peer ? READ_ONCE(peer->ifindex) : 0;
rcu_read_unlock();
return iflink;
diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c
index b1bb1b04b664..a1ba5169ed5d 100644
--- a/drivers/net/vsockmon.c
+++ b/drivers/net/vsockmon.c
@@ -13,19 +13,6 @@
#define DEFAULT_MTU (VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + \
sizeof(struct af_vsockmon_hdr))
-static int vsockmon_dev_init(struct net_device *dev)
-{
- dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
- if (!dev->lstats)
- return -ENOMEM;
- return 0;
-}
-
-static void vsockmon_dev_uninit(struct net_device *dev)
-{
- free_percpu(dev->lstats);
-}
-
struct vsockmon {
struct vsock_tap vt;
};
@@ -59,9 +46,6 @@ static void
vsockmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
dev_lstats_read(dev, &stats->rx_packets, &stats->rx_bytes);
-
- stats->tx_packets = 0;
- stats->tx_bytes = 0;
}
static int vsockmon_is_valid_mtu(int new_mtu)
@@ -79,8 +63,6 @@ static int vsockmon_change_mtu(struct net_device *dev, int new_mtu)
}
static const struct net_device_ops vsockmon_ops = {
- .ndo_init = vsockmon_dev_init,
- .ndo_uninit = vsockmon_dev_uninit,
.ndo_open = vsockmon_open,
.ndo_stop = vsockmon_close,
.ndo_start_xmit = vsockmon_xmit,
@@ -112,6 +94,7 @@ static void vsockmon_setup(struct net_device *dev)
dev->flags = IFF_NOARP;
dev->mtu = DEFAULT_MTU;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_LSTATS;
}
static struct rtnl_link_ops vsockmon_link_ops __read_mostly = {
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 16106e088c63..3495591a5c29 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -2841,26 +2841,19 @@ static int vxlan_init(struct net_device *dev)
if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
vxlan_vnigroup_init(vxlan);
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats) {
- err = -ENOMEM;
- goto err_vnigroup_uninit;
- }
-
err = gro_cells_init(&vxlan->gro_cells, dev);
if (err)
- goto err_free_percpu;
+ goto err_vnigroup_uninit;
err = vxlan_mdb_init(vxlan);
if (err)
goto err_gro_cells_destroy;
+ netdev_lockdep_set_classes(dev);
return 0;
err_gro_cells_destroy:
gro_cells_destroy(&vxlan->gro_cells);
-err_free_percpu:
- free_percpu(dev->tstats);
err_vnigroup_uninit:
if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
vxlan_vnigroup_uninit(vxlan);
@@ -2891,8 +2884,6 @@ static void vxlan_uninit(struct net_device *dev)
gro_cells_destroy(&vxlan->gro_cells);
vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
-
- free_percpu(dev->tstats);
}
/* Start ageing timer and join group when device is brought up */
@@ -3223,7 +3214,6 @@ static const struct net_device_ops vxlan_netdev_ether_ops = {
.ndo_open = vxlan_open,
.ndo_stop = vxlan_stop,
.ndo_start_xmit = vxlan_xmit,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_rx_mode = vxlan_set_multicast_list,
.ndo_change_mtu = vxlan_change_mtu,
.ndo_validate_addr = eth_validate_addr,
@@ -3247,13 +3237,12 @@ static const struct net_device_ops vxlan_netdev_raw_ops = {
.ndo_open = vxlan_open,
.ndo_stop = vxlan_stop,
.ndo_start_xmit = vxlan_xmit,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_change_mtu = vxlan_change_mtu,
.ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
};
/* Info for udev, that this is a virtual tunnel endpoint */
-static struct device_type vxlan_type = {
+static const struct device_type vxlan_type = {
.name = "vxlan",
};
@@ -3315,6 +3304,7 @@ static void vxlan_setup(struct net_device *dev)
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = ETH_MAX_MTU;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
INIT_LIST_HEAD(&vxlan->next);
timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE);
@@ -4826,55 +4816,43 @@ static __net_init int vxlan_init_net(struct net *net)
NULL);
}
-static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
+static void __net_exit vxlan_destroy_tunnels(struct vxlan_net *vn,
+ struct list_head *dev_to_kill)
{
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan, *next;
- struct net_device *dev, *aux;
-
- for_each_netdev_safe(net, dev, aux)
- if (dev->rtnl_link_ops == &vxlan_link_ops)
- unregister_netdevice_queue(dev, head);
-
- list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
- /* If vxlan->dev is in the same netns, it has already been added
- * to the list by the previous loop.
- */
- if (!net_eq(dev_net(vxlan->dev), net))
- unregister_netdevice_queue(vxlan->dev, head);
- }
+ list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next)
+ vxlan_dellink(vxlan->dev, dev_to_kill);
}
-static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
+static void __net_exit vxlan_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct net *net;
- LIST_HEAD(list);
- unsigned int h;
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list) {
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
- unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
- }
- rtnl_lock();
- list_for_each_entry(net, net_list, exit_list)
- vxlan_destroy_tunnels(net, &list);
+ __unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ vxlan_destroy_tunnels(vn, dev_to_kill);
+ }
+}
- list_for_each_entry(net, net_list, exit_list) {
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+static void __net_exit vxlan_exit_net(struct net *net)
+{
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ unsigned int h;
- for (h = 0; h < PORT_HASH_SIZE; ++h)
- WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
- }
+ for (h = 0; h < PORT_HASH_SIZE; ++h)
+ WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
}
static struct pernet_operations vxlan_net_ops = {
.init = vxlan_init_net,
- .exit_batch = vxlan_exit_batch_net,
+ .exit_batch_rtnl = vxlan_exit_batch_rtnl,
+ .exit = vxlan_exit_net,
.id = &vxlan_net_id,
.size = sizeof(struct vxlan_net),
};
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 7dda87756d3f..31ab2136cdf1 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -197,6 +197,18 @@ config FARSYNC
To compile this driver as a module, choose M here: the
module will be called farsync.
+config FSL_QMC_HDLC
+ tristate "Freescale QMC HDLC support"
+ depends on HDLC
+ depends on CPM_QMC
+ help
+ HDLC support using the Freescale QUICC Multichannel Controller (QMC).
+
+ To compile this driver as a module, choose M here: the
+ module will be called fsl_qmc_hdlc.
+
+ If unsure, say N.
+
config FSL_UCC_HDLC
tristate "Freescale QUICC Engine HDLC support"
depends on HDLC
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index 8119b49d1da9..00e9b7ee1e01 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_WANXL) += wanxl.o
obj-$(CONFIG_PCI200SYN) += pci200syn.o
obj-$(CONFIG_PC300TOO) += pc300too.o
obj-$(CONFIG_IXP4XX_HSS) += ixp4xx_hss.o
+obj-$(CONFIG_FSL_QMC_HDLC) += fsl_qmc_hdlc.o
obj-$(CONFIG_FSL_UCC_HDLC) += fsl_ucc_hdlc.o
obj-$(CONFIG_SLIC_DS26522) += slic_ds26522.o
diff --git a/drivers/net/wan/framer/framer-core.c b/drivers/net/wan/framer/framer-core.c
index c04dc88bda6c..f547c22e26ac 100644
--- a/drivers/net/wan/framer/framer-core.c
+++ b/drivers/net/wan/framer/framer-core.c
@@ -18,7 +18,12 @@
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
-static struct class *framer_class;
+static void framer_release(struct device *dev);
+static const struct class framer_class = {
+ .name = "framer",
+ .dev_release = framer_release,
+};
+
static DEFINE_MUTEX(framer_provider_mutex);
static LIST_HEAD(framer_provider_list);
static DEFINE_IDA(framer_ida);
@@ -384,7 +389,7 @@ static struct framer_provider *framer_provider_of_lookup(const struct device_nod
return ERR_PTR(-EPROBE_DEFER);
}
-static struct framer *framer_of_get_from_provider(struct of_phandle_args *args)
+static struct framer *framer_of_get_from_provider(const struct of_phandle_args *args)
{
struct framer_provider *framer_provider;
struct framer *framer;
@@ -627,7 +632,7 @@ struct framer *framer_create(struct device *dev, struct device_node *node,
INIT_DELAYED_WORK(&framer->polling_work, framer_polling_work);
BLOCKING_INIT_NOTIFIER_HEAD(&framer->notifier_list);
- framer->dev.class = framer_class;
+ framer->dev.class = &framer_class;
framer->dev.parent = dev;
framer->dev.of_node = node ? node : dev->of_node;
framer->id = id;
@@ -735,12 +740,13 @@ EXPORT_SYMBOL_GPL(devm_framer_create);
* should provide a custom of_xlate function that reads the *args* and returns
* the appropriate framer.
*/
-struct framer *framer_provider_simple_of_xlate(struct device *dev, struct of_phandle_args *args)
+struct framer *framer_provider_simple_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
struct class_dev_iter iter;
struct framer *framer;
- class_dev_iter_init(&iter, framer_class, NULL, NULL);
+ class_dev_iter_init(&iter, &framer_class, NULL, NULL);
while ((dev = class_dev_iter_next(&iter))) {
framer = dev_to_framer(dev);
if (args->np != framer->dev.of_node)
@@ -768,7 +774,7 @@ EXPORT_SYMBOL_GPL(framer_provider_simple_of_xlate);
struct framer_provider *
__framer_provider_of_register(struct device *dev, struct module *owner,
struct framer *(*of_xlate)(struct device *dev,
- struct of_phandle_args *args))
+ const struct of_phandle_args *args))
{
struct framer_provider *framer_provider;
@@ -830,7 +836,7 @@ static void devm_framer_provider_of_unregister(struct device *dev, void *res)
struct framer_provider *
__devm_framer_provider_of_register(struct device *dev, struct module *owner,
struct framer *(*of_xlate)(struct device *dev,
- struct of_phandle_args *args))
+ const struct of_phandle_args *args))
{
struct framer_provider **ptr, *framer_provider;
@@ -869,14 +875,6 @@ static void framer_release(struct device *dev)
static int __init framer_core_init(void)
{
- framer_class = class_create("framer");
- if (IS_ERR(framer_class)) {
- pr_err("failed to create framer class (%pe)\n", framer_class);
- return PTR_ERR(framer_class);
- }
-
- framer_class->dev_release = framer_release;
-
- return 0;
+ return class_register(&framer_class);
}
device_initcall(framer_core_init);
diff --git a/drivers/net/wan/framer/pef2256/pef2256.c b/drivers/net/wan/framer/pef2256/pef2256.c
index 4f81053ee4f0..413a3c1d15bb 100644
--- a/drivers/net/wan/framer/pef2256/pef2256.c
+++ b/drivers/net/wan/framer/pef2256/pef2256.c
@@ -838,7 +838,7 @@ static int pef2256_probe(struct platform_device *pdev)
return 0;
}
-static int pef2256_remove(struct platform_device *pdev)
+static void pef2256_remove(struct platform_device *pdev)
{
struct pef2256 *pef2256 = platform_get_drvdata(pdev);
@@ -849,8 +849,6 @@ static int pef2256_remove(struct platform_device *pdev)
pef2256_write8(pef2256, PEF2256_IMR3, 0xff);
pef2256_write8(pef2256, PEF2256_IMR4, 0xff);
pef2256_write8(pef2256, PEF2256_IMR5, 0xff);
-
- return 0;
}
static const struct of_device_id pef2256_id_table[] = {
@@ -865,7 +863,7 @@ static struct platform_driver pef2256_driver = {
.of_match_table = pef2256_id_table,
},
.probe = pef2256_probe,
- .remove = pef2256_remove,
+ .remove_new = pef2256_remove,
};
module_platform_driver(pef2256_driver);
diff --git a/drivers/net/wan/fsl_qmc_hdlc.c b/drivers/net/wan/fsl_qmc_hdlc.c
new file mode 100644
index 000000000000..960371df470a
--- /dev/null
+++ b/drivers/net/wan/fsl_qmc_hdlc.c
@@ -0,0 +1,797 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Freescale QMC HDLC Device Driver
+ *
+ * Copyright 2023 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+
+#include <linux/array_size.h>
+#include <linux/bug.h>
+#include <linux/cleanup.h>
+#include <linux/bitmap.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/framer/framer.h>
+#include <linux/hdlc.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <soc/fsl/qe/qmc.h>
+
+struct qmc_hdlc_desc {
+ struct net_device *netdev;
+ struct sk_buff *skb; /* NULL if the descriptor is not in use */
+ dma_addr_t dma_addr;
+ size_t dma_size;
+};
+
+struct qmc_hdlc {
+ struct device *dev;
+ struct qmc_chan *qmc_chan;
+ struct net_device *netdev;
+ struct framer *framer;
+ spinlock_t carrier_lock; /* Protect carrier detection */
+ struct notifier_block nb;
+ bool is_crc32;
+ spinlock_t tx_lock; /* Protect tx descriptors */
+ struct qmc_hdlc_desc tx_descs[8];
+ unsigned int tx_out;
+ struct qmc_hdlc_desc rx_descs[4];
+ u32 slot_map;
+};
+
+static struct qmc_hdlc *netdev_to_qmc_hdlc(struct net_device *netdev)
+{
+ return dev_to_hdlc(netdev)->priv;
+}
+
+static int qmc_hdlc_framer_set_carrier(struct qmc_hdlc *qmc_hdlc)
+{
+ struct framer_status framer_status;
+ int ret;
+
+ if (!qmc_hdlc->framer)
+ return 0;
+
+ guard(spinlock_irqsave)(&qmc_hdlc->carrier_lock);
+
+ ret = framer_get_status(qmc_hdlc->framer, &framer_status);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "get framer status failed (%d)\n", ret);
+ return ret;
+ }
+ if (framer_status.link_is_on)
+ netif_carrier_on(qmc_hdlc->netdev);
+ else
+ netif_carrier_off(qmc_hdlc->netdev);
+
+ return 0;
+}
+
+static int qmc_hdlc_framer_notifier(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct qmc_hdlc *qmc_hdlc = container_of(nb, struct qmc_hdlc, nb);
+ int ret;
+
+ if (action != FRAMER_EVENT_STATUS)
+ return NOTIFY_DONE;
+
+ ret = qmc_hdlc_framer_set_carrier(qmc_hdlc);
+ return ret ? NOTIFY_DONE : NOTIFY_OK;
+}
+
+static int qmc_hdlc_framer_start(struct qmc_hdlc *qmc_hdlc)
+{
+ struct framer_status framer_status;
+ int ret;
+
+ if (!qmc_hdlc->framer)
+ return 0;
+
+ ret = framer_power_on(qmc_hdlc->framer);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "framer power-on failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* Be sure that get_status is supported */
+ ret = framer_get_status(qmc_hdlc->framer, &framer_status);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "get framer status failed (%d)\n", ret);
+ goto framer_power_off;
+ }
+
+ qmc_hdlc->nb.notifier_call = qmc_hdlc_framer_notifier;
+ ret = framer_notifier_register(qmc_hdlc->framer, &qmc_hdlc->nb);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "framer notifier register failed (%d)\n", ret);
+ goto framer_power_off;
+ }
+
+ return 0;
+
+framer_power_off:
+ framer_power_off(qmc_hdlc->framer);
+ return ret;
+}
+
+static void qmc_hdlc_framer_stop(struct qmc_hdlc *qmc_hdlc)
+{
+ if (!qmc_hdlc->framer)
+ return;
+
+ framer_notifier_unregister(qmc_hdlc->framer, &qmc_hdlc->nb);
+ framer_power_off(qmc_hdlc->framer);
+}
+
+static int qmc_hdlc_framer_set_iface(struct qmc_hdlc *qmc_hdlc, int if_iface,
+ const te1_settings *te1)
+{
+ struct framer_config config;
+ int ret;
+
+ if (!qmc_hdlc->framer)
+ return 0;
+
+ ret = framer_get_config(qmc_hdlc->framer, &config);
+ if (ret)
+ return ret;
+
+ switch (if_iface) {
+ case IF_IFACE_E1:
+ config.iface = FRAMER_IFACE_E1;
+ break;
+ case IF_IFACE_T1:
+ config.iface = FRAMER_IFACE_T1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (te1->clock_type) {
+ case CLOCK_DEFAULT:
+ /* Keep current value */
+ break;
+ case CLOCK_EXT:
+ config.clock_type = FRAMER_CLOCK_EXT;
+ break;
+ case CLOCK_INT:
+ config.clock_type = FRAMER_CLOCK_INT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ config.line_clock_rate = te1->clock_rate;
+
+ return framer_set_config(qmc_hdlc->framer, &config);
+}
+
+static int qmc_hdlc_framer_get_iface(struct qmc_hdlc *qmc_hdlc, int *if_iface, te1_settings *te1)
+{
+ struct framer_config config;
+ int ret;
+
+ if (!qmc_hdlc->framer) {
+ *if_iface = IF_IFACE_E1;
+ return 0;
+ }
+
+ ret = framer_get_config(qmc_hdlc->framer, &config);
+ if (ret)
+ return ret;
+
+ switch (config.iface) {
+ case FRAMER_IFACE_E1:
+ *if_iface = IF_IFACE_E1;
+ break;
+ case FRAMER_IFACE_T1:
+ *if_iface = IF_IFACE_T1;
+ break;
+ }
+
+ if (!te1)
+ return 0; /* Only iface type requested */
+
+ switch (config.clock_type) {
+ case FRAMER_CLOCK_EXT:
+ te1->clock_type = CLOCK_EXT;
+ break;
+ case FRAMER_CLOCK_INT:
+ te1->clock_type = CLOCK_INT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ te1->clock_rate = config.line_clock_rate;
+ return 0;
+}
+
+static int qmc_hdlc_framer_init(struct qmc_hdlc *qmc_hdlc)
+{
+ int ret;
+
+ if (!qmc_hdlc->framer)
+ return 0;
+
+ ret = framer_init(qmc_hdlc->framer);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "framer init failed (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qmc_hdlc_framer_exit(struct qmc_hdlc *qmc_hdlc)
+{
+ if (!qmc_hdlc->framer)
+ return;
+
+ framer_exit(qmc_hdlc->framer);
+}
+
+static int qmc_hdlc_recv_queue(struct qmc_hdlc *qmc_hdlc, struct qmc_hdlc_desc *desc, size_t size);
+
+#define QMC_HDLC_RX_ERROR_FLAGS \
+ (QMC_RX_FLAG_HDLC_OVF | QMC_RX_FLAG_HDLC_UNA | \
+ QMC_RX_FLAG_HDLC_CRC | QMC_RX_FLAG_HDLC_ABORT)
+
+static void qmc_hcld_recv_complete(void *context, size_t length, unsigned int flags)
+{
+ struct qmc_hdlc_desc *desc = context;
+ struct net_device *netdev;
+ struct qmc_hdlc *qmc_hdlc;
+ int ret;
+
+ netdev = desc->netdev;
+ qmc_hdlc = netdev_to_qmc_hdlc(netdev);
+
+ dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, DMA_FROM_DEVICE);
+
+ if (flags & QMC_HDLC_RX_ERROR_FLAGS) {
+ netdev->stats.rx_errors++;
+ if (flags & QMC_RX_FLAG_HDLC_OVF) /* Data overflow */
+ netdev->stats.rx_over_errors++;
+ if (flags & QMC_RX_FLAG_HDLC_UNA) /* bits received not multiple of 8 */
+ netdev->stats.rx_frame_errors++;
+ if (flags & QMC_RX_FLAG_HDLC_ABORT) /* Received an abort sequence */
+ netdev->stats.rx_frame_errors++;
+ if (flags & QMC_RX_FLAG_HDLC_CRC) /* CRC error */
+ netdev->stats.rx_crc_errors++;
+ kfree_skb(desc->skb);
+ } else {
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += length;
+
+ skb_put(desc->skb, length);
+ desc->skb->protocol = hdlc_type_trans(desc->skb, netdev);
+ netif_rx(desc->skb);
+ }
+
+ /* Re-queue a transfer using the same descriptor */
+ ret = qmc_hdlc_recv_queue(qmc_hdlc, desc, desc->dma_size);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "queue recv desc failed (%d)\n", ret);
+ netdev->stats.rx_errors++;
+ }
+}
+
+static int qmc_hdlc_recv_queue(struct qmc_hdlc *qmc_hdlc, struct qmc_hdlc_desc *desc, size_t size)
+{
+ int ret;
+
+ desc->skb = dev_alloc_skb(size);
+ if (!desc->skb)
+ return -ENOMEM;
+
+ desc->dma_size = size;
+ desc->dma_addr = dma_map_single(qmc_hdlc->dev, desc->skb->data,
+ desc->dma_size, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(qmc_hdlc->dev, desc->dma_addr);
+ if (ret)
+ goto free_skb;
+
+ ret = qmc_chan_read_submit(qmc_hdlc->qmc_chan, desc->dma_addr, desc->dma_size,
+ qmc_hcld_recv_complete, desc);
+ if (ret)
+ goto dma_unmap;
+
+ return 0;
+
+dma_unmap:
+ dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, DMA_FROM_DEVICE);
+free_skb:
+ kfree_skb(desc->skb);
+ desc->skb = NULL;
+ return ret;
+}
+
+static void qmc_hdlc_xmit_complete(void *context)
+{
+ struct qmc_hdlc_desc *desc = context;
+ struct net_device *netdev;
+ struct qmc_hdlc *qmc_hdlc;
+ struct sk_buff *skb;
+
+ netdev = desc->netdev;
+ qmc_hdlc = netdev_to_qmc_hdlc(netdev);
+
+ scoped_guard(spinlock_irqsave, &qmc_hdlc->tx_lock) {
+ dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, DMA_TO_DEVICE);
+ skb = desc->skb;
+ desc->skb = NULL; /* Release the descriptor */
+ if (netif_queue_stopped(netdev))
+ netif_wake_queue(netdev);
+ }
+
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+
+ dev_consume_skb_any(skb);
+}
+
+static int qmc_hdlc_xmit_queue(struct qmc_hdlc *qmc_hdlc, struct qmc_hdlc_desc *desc)
+{
+ int ret;
+
+ desc->dma_addr = dma_map_single(qmc_hdlc->dev, desc->skb->data,
+ desc->dma_size, DMA_TO_DEVICE);
+ ret = dma_mapping_error(qmc_hdlc->dev, desc->dma_addr);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "failed to map skb\n");
+ return ret;
+ }
+
+ ret = qmc_chan_write_submit(qmc_hdlc->qmc_chan, desc->dma_addr, desc->dma_size,
+ qmc_hdlc_xmit_complete, desc);
+ if (ret) {
+ dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, DMA_TO_DEVICE);
+ dev_err(qmc_hdlc->dev, "qmc chan write returns %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static netdev_tx_t qmc_hdlc_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
+ struct qmc_hdlc_desc *desc;
+ int err;
+
+ guard(spinlock_irqsave)(&qmc_hdlc->tx_lock);
+
+ desc = &qmc_hdlc->tx_descs[qmc_hdlc->tx_out];
+ if (WARN_ONCE(desc->skb, "No tx descriptors available\n")) {
+ /* Should never happen.
+ * Previous xmit should have already stopped the queue.
+ */
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ desc->netdev = netdev;
+ desc->dma_size = skb->len;
+ desc->skb = skb;
+ err = qmc_hdlc_xmit_queue(qmc_hdlc, desc);
+ if (err) {
+ desc->skb = NULL; /* Release the descriptor */
+ if (err == -EBUSY) {
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+ dev_kfree_skb(skb);
+ netdev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ qmc_hdlc->tx_out = (qmc_hdlc->tx_out + 1) % ARRAY_SIZE(qmc_hdlc->tx_descs);
+
+ if (qmc_hdlc->tx_descs[qmc_hdlc->tx_out].skb)
+ netif_stop_queue(netdev);
+
+ return NETDEV_TX_OK;
+}
+
+static int qmc_hdlc_xlate_slot_map(struct qmc_hdlc *qmc_hdlc,
+ u32 slot_map, struct qmc_chan_ts_info *ts_info)
+{
+ DECLARE_BITMAP(ts_mask_avail, 64);
+ DECLARE_BITMAP(ts_mask, 64);
+ DECLARE_BITMAP(map, 64);
+
+ /* Tx and Rx available masks must be identical */
+ if (ts_info->rx_ts_mask_avail != ts_info->tx_ts_mask_avail) {
+ dev_err(qmc_hdlc->dev, "tx and rx available timeslots mismatch (0x%llx, 0x%llx)\n",
+ ts_info->rx_ts_mask_avail, ts_info->tx_ts_mask_avail);
+ return -EINVAL;
+ }
+
+ bitmap_from_u64(ts_mask_avail, ts_info->rx_ts_mask_avail);
+ bitmap_from_u64(map, slot_map);
+ bitmap_scatter(ts_mask, map, ts_mask_avail, 64);
+
+ if (bitmap_weight(ts_mask, 64) != bitmap_weight(map, 64)) {
+ dev_err(qmc_hdlc->dev, "Cannot translate timeslots %64pb -> (%64pb, %64pb)\n",
+ map, ts_mask_avail, ts_mask);
+ return -EINVAL;
+ }
+
+ bitmap_to_arr64(&ts_info->tx_ts_mask, ts_mask, 64);
+ ts_info->rx_ts_mask = ts_info->tx_ts_mask;
+ return 0;
+}
+
+static int qmc_hdlc_xlate_ts_info(struct qmc_hdlc *qmc_hdlc,
+ const struct qmc_chan_ts_info *ts_info, u32 *slot_map)
+{
+ DECLARE_BITMAP(ts_mask_avail, 64);
+ DECLARE_BITMAP(ts_mask, 64);
+ DECLARE_BITMAP(map, 64);
+ u32 slot_array[2];
+
+ /* Tx and Rx masks and available masks must be identical */
+ if (ts_info->rx_ts_mask_avail != ts_info->tx_ts_mask_avail) {
+ dev_err(qmc_hdlc->dev, "tx and rx available timeslots mismatch (0x%llx, 0x%llx)\n",
+ ts_info->rx_ts_mask_avail, ts_info->tx_ts_mask_avail);
+ return -EINVAL;
+ }
+ if (ts_info->rx_ts_mask != ts_info->tx_ts_mask) {
+ dev_err(qmc_hdlc->dev, "tx and rx timeslots mismatch (0x%llx, 0x%llx)\n",
+ ts_info->rx_ts_mask, ts_info->tx_ts_mask);
+ return -EINVAL;
+ }
+
+ bitmap_from_u64(ts_mask_avail, ts_info->rx_ts_mask_avail);
+ bitmap_from_u64(ts_mask, ts_info->rx_ts_mask);
+ bitmap_gather(map, ts_mask, ts_mask_avail, 64);
+
+ if (bitmap_weight(ts_mask, 64) != bitmap_weight(map, 64)) {
+ dev_err(qmc_hdlc->dev, "Cannot translate timeslots (%64pb, %64pb) -> %64pb\n",
+ ts_mask_avail, ts_mask, map);
+ return -EINVAL;
+ }
+
+ bitmap_to_arr32(slot_array, map, 64);
+ if (slot_array[1]) {
+ dev_err(qmc_hdlc->dev, "Slot map out of 32bit (%64pb, %64pb) -> %64pb\n",
+ ts_mask_avail, ts_mask, map);
+ return -EINVAL;
+ }
+
+ *slot_map = slot_array[0];
+ return 0;
+}
+
+static int qmc_hdlc_set_iface(struct qmc_hdlc *qmc_hdlc, int if_iface, const te1_settings *te1)
+{
+ struct qmc_chan_ts_info ts_info;
+ int ret;
+
+ ret = qmc_chan_get_ts_info(qmc_hdlc->qmc_chan, &ts_info);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "get QMC channel ts info failed %d\n", ret);
+ return ret;
+ }
+ ret = qmc_hdlc_xlate_slot_map(qmc_hdlc, te1->slot_map, &ts_info);
+ if (ret)
+ return ret;
+
+ ret = qmc_chan_set_ts_info(qmc_hdlc->qmc_chan, &ts_info);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "set QMC channel ts info failed %d\n", ret);
+ return ret;
+ }
+
+ qmc_hdlc->slot_map = te1->slot_map;
+
+ ret = qmc_hdlc_framer_set_iface(qmc_hdlc, if_iface, te1);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "framer set iface failed %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qmc_hdlc_ioctl(struct net_device *netdev, struct if_settings *ifs)
+{
+ struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
+ te1_settings te1;
+ int ret;
+
+ switch (ifs->type) {
+ case IF_GET_IFACE:
+ if (ifs->size < sizeof(te1)) {
+ /* Retrieve type only */
+ ret = qmc_hdlc_framer_get_iface(qmc_hdlc, &ifs->type, NULL);
+ if (ret)
+ return ret;
+
+ if (!ifs->size)
+ return 0; /* only type requested */
+
+ ifs->size = sizeof(te1); /* data size wanted */
+ return -ENOBUFS;
+ }
+
+ memset(&te1, 0, sizeof(te1));
+
+ /* Retrieve info from framer */
+ ret = qmc_hdlc_framer_get_iface(qmc_hdlc, &ifs->type, &te1);
+ if (ret)
+ return ret;
+
+ /* Update slot_map */
+ te1.slot_map = qmc_hdlc->slot_map;
+
+ if (copy_to_user(ifs->ifs_ifsu.te1, &te1, sizeof(te1)))
+ return -EFAULT;
+ return 0;
+
+ case IF_IFACE_E1:
+ case IF_IFACE_T1:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (netdev->flags & IFF_UP)
+ return -EBUSY;
+
+ if (copy_from_user(&te1, ifs->ifs_ifsu.te1, sizeof(te1)))
+ return -EFAULT;
+
+ return qmc_hdlc_set_iface(qmc_hdlc, ifs->type, &te1);
+
+ default:
+ return hdlc_ioctl(netdev, ifs);
+ }
+}
+
+static int qmc_hdlc_open(struct net_device *netdev)
+{
+ struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
+ struct qmc_chan_param chan_param;
+ struct qmc_hdlc_desc *desc;
+ int ret;
+ int i;
+
+ ret = qmc_hdlc_framer_start(qmc_hdlc);
+ if (ret)
+ return ret;
+
+ ret = hdlc_open(netdev);
+ if (ret)
+ goto framer_stop;
+
+ /* Update carrier */
+ qmc_hdlc_framer_set_carrier(qmc_hdlc);
+
+ chan_param.mode = QMC_HDLC;
+ /* HDLC_MAX_MRU + 4 for the CRC
+ * HDLC_MAX_MRU + 4 + 8 for the CRC and some extraspace needed by the QMC
+ */
+ chan_param.hdlc.max_rx_buf_size = HDLC_MAX_MRU + 4 + 8;
+ chan_param.hdlc.max_rx_frame_size = HDLC_MAX_MRU + 4;
+ chan_param.hdlc.is_crc32 = qmc_hdlc->is_crc32;
+ ret = qmc_chan_set_param(qmc_hdlc->qmc_chan, &chan_param);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "failed to set param (%d)\n", ret);
+ goto hdlc_close;
+ }
+
+ /* Queue as many recv descriptors as possible */
+ for (i = 0; i < ARRAY_SIZE(qmc_hdlc->rx_descs); i++) {
+ desc = &qmc_hdlc->rx_descs[i];
+
+ desc->netdev = netdev;
+ ret = qmc_hdlc_recv_queue(qmc_hdlc, desc, chan_param.hdlc.max_rx_buf_size);
+ if (ret == -EBUSY && i != 0)
+ break; /* We use all the QMC chan capability */
+ if (ret)
+ goto free_desc;
+ }
+
+ ret = qmc_chan_start(qmc_hdlc->qmc_chan, QMC_CHAN_ALL);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "qmc chan start failed (%d)\n", ret);
+ goto free_desc;
+ }
+
+ netif_start_queue(netdev);
+
+ return 0;
+
+free_desc:
+ qmc_chan_reset(qmc_hdlc->qmc_chan, QMC_CHAN_ALL);
+ while (i--) {
+ desc = &qmc_hdlc->rx_descs[i];
+ dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size,
+ DMA_FROM_DEVICE);
+ kfree_skb(desc->skb);
+ desc->skb = NULL;
+ }
+hdlc_close:
+ hdlc_close(netdev);
+framer_stop:
+ qmc_hdlc_framer_stop(qmc_hdlc);
+ return ret;
+}
+
+static int qmc_hdlc_close(struct net_device *netdev)
+{
+ struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
+ struct qmc_hdlc_desc *desc;
+ int i;
+
+ qmc_chan_stop(qmc_hdlc->qmc_chan, QMC_CHAN_ALL);
+ qmc_chan_reset(qmc_hdlc->qmc_chan, QMC_CHAN_ALL);
+
+ netif_stop_queue(netdev);
+
+ for (i = 0; i < ARRAY_SIZE(qmc_hdlc->tx_descs); i++) {
+ desc = &qmc_hdlc->tx_descs[i];
+ if (!desc->skb)
+ continue;
+ dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size,
+ DMA_TO_DEVICE);
+ kfree_skb(desc->skb);
+ desc->skb = NULL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(qmc_hdlc->rx_descs); i++) {
+ desc = &qmc_hdlc->rx_descs[i];
+ if (!desc->skb)
+ continue;
+ dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size,
+ DMA_FROM_DEVICE);
+ kfree_skb(desc->skb);
+ desc->skb = NULL;
+ }
+
+ hdlc_close(netdev);
+ qmc_hdlc_framer_stop(qmc_hdlc);
+ return 0;
+}
+
+static int qmc_hdlc_attach(struct net_device *netdev, unsigned short encoding,
+ unsigned short parity)
+{
+ struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
+
+ if (encoding != ENCODING_NRZ)
+ return -EINVAL;
+
+ switch (parity) {
+ case PARITY_CRC16_PR1_CCITT:
+ qmc_hdlc->is_crc32 = false;
+ break;
+ case PARITY_CRC32_PR1_CCITT:
+ qmc_hdlc->is_crc32 = true;
+ break;
+ default:
+ dev_err(qmc_hdlc->dev, "unsupported parity %u\n", parity);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops qmc_hdlc_netdev_ops = {
+ .ndo_open = qmc_hdlc_open,
+ .ndo_stop = qmc_hdlc_close,
+ .ndo_start_xmit = hdlc_start_xmit,
+ .ndo_siocwandev = qmc_hdlc_ioctl,
+};
+
+static int qmc_hdlc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct qmc_chan_ts_info ts_info;
+ struct qmc_hdlc *qmc_hdlc;
+ struct qmc_chan_info info;
+ hdlc_device *hdlc;
+ int ret;
+
+ qmc_hdlc = devm_kzalloc(dev, sizeof(*qmc_hdlc), GFP_KERNEL);
+ if (!qmc_hdlc)
+ return -ENOMEM;
+
+ qmc_hdlc->dev = dev;
+ spin_lock_init(&qmc_hdlc->tx_lock);
+ spin_lock_init(&qmc_hdlc->carrier_lock);
+
+ qmc_hdlc->qmc_chan = devm_qmc_chan_get_bychild(dev, dev->of_node);
+ if (IS_ERR(qmc_hdlc->qmc_chan))
+ return dev_err_probe(dev, PTR_ERR(qmc_hdlc->qmc_chan),
+ "get QMC channel failed\n");
+
+ ret = qmc_chan_get_info(qmc_hdlc->qmc_chan, &info);
+ if (ret)
+ return dev_err_probe(dev, ret, "get QMC channel info failed\n");
+
+ if (info.mode != QMC_HDLC)
+ return dev_err_probe(dev, -EINVAL, "QMC chan mode %d is not QMC_HDLC\n",
+ info.mode);
+
+ ret = qmc_chan_get_ts_info(qmc_hdlc->qmc_chan, &ts_info);
+ if (ret)
+ return dev_err_probe(dev, ret, "get QMC channel ts info failed\n");
+
+ ret = qmc_hdlc_xlate_ts_info(qmc_hdlc, &ts_info, &qmc_hdlc->slot_map);
+ if (ret)
+ return ret;
+
+ qmc_hdlc->framer = devm_framer_optional_get(dev, "fsl,framer");
+ if (IS_ERR(qmc_hdlc->framer))
+ return PTR_ERR(qmc_hdlc->framer);
+
+ ret = qmc_hdlc_framer_init(qmc_hdlc);
+ if (ret)
+ return ret;
+
+ qmc_hdlc->netdev = alloc_hdlcdev(qmc_hdlc);
+ if (!qmc_hdlc->netdev) {
+ ret = -ENOMEM;
+ goto framer_exit;
+ }
+
+ hdlc = dev_to_hdlc(qmc_hdlc->netdev);
+ hdlc->attach = qmc_hdlc_attach;
+ hdlc->xmit = qmc_hdlc_xmit;
+ SET_NETDEV_DEV(qmc_hdlc->netdev, dev);
+ qmc_hdlc->netdev->tx_queue_len = ARRAY_SIZE(qmc_hdlc->tx_descs);
+ qmc_hdlc->netdev->netdev_ops = &qmc_hdlc_netdev_ops;
+ ret = register_hdlc_device(qmc_hdlc->netdev);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to register hdlc device\n");
+ goto free_netdev;
+ }
+
+ platform_set_drvdata(pdev, qmc_hdlc);
+ return 0;
+
+free_netdev:
+ free_netdev(qmc_hdlc->netdev);
+framer_exit:
+ qmc_hdlc_framer_exit(qmc_hdlc);
+ return ret;
+}
+
+static int qmc_hdlc_remove(struct platform_device *pdev)
+{
+ struct qmc_hdlc *qmc_hdlc = platform_get_drvdata(pdev);
+
+ unregister_hdlc_device(qmc_hdlc->netdev);
+ free_netdev(qmc_hdlc->netdev);
+ qmc_hdlc_framer_exit(qmc_hdlc);
+
+ return 0;
+}
+
+static const struct of_device_id qmc_hdlc_id_table[] = {
+ { .compatible = "fsl,qmc-hdlc" },
+ {} /* sentinel */
+};
+MODULE_DEVICE_TABLE(of, qmc_hdlc_driver);
+
+static struct platform_driver qmc_hdlc_driver = {
+ .driver = {
+ .name = "fsl-qmc-hdlc",
+ .of_match_table = qmc_hdlc_id_table,
+ },
+ .probe = qmc_hdlc_probe,
+ .remove = qmc_hdlc_remove,
+};
+module_platform_driver(qmc_hdlc_driver);
+
+MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
+MODULE_DESCRIPTION("QMC HDLC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
index a176653c8861..df275b4fccb6 100644
--- a/drivers/net/wireguard/receive.c
+++ b/drivers/net/wireguard/receive.c
@@ -263,7 +263,7 @@ static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
* call skb_cow_data, so that there's no chance that data is removed
* from the skb, so that later we can extract the original endpoint.
*/
- offset = skb->data - skb_network_header(skb);
+ offset = -skb_network_offset(skb);
skb_push(skb, offset);
num_frags = skb_cow_data(skb, 0, &trailer);
offset += sizeof(struct message_data);
diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index 2fceea9f6550..e3fd48dd3909 100644
--- a/drivers/net/wireless/admtek/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -1759,6 +1759,10 @@ static int adm8211_alloc_rings(struct ieee80211_hw *dev)
}
static const struct ieee80211_ops adm8211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = adm8211_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = adm8211_start,
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index a742cec44e3d..815f8f599f5d 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1358,6 +1358,10 @@ static void ar5523_configure_filter(struct ieee80211_hw *hw,
}
static const struct ieee80211_ops ar5523_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.start = ar5523_start,
.stop = ar5523_stop,
.tx = ar5523_tx,
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 0032f8aa892f..9ce6f49ab261 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -3613,7 +3613,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
default:
ath10k_err(ar, "unsupported core hardware revision %d\n",
hw_rev);
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
goto err_free_mac;
}
diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h
index e5ef0352e319..8d274e0f374b 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.h
+++ b/drivers/net/wireless/ath/ath10k/coredump.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _COREDUMP_H_
@@ -13,7 +13,11 @@
/**
* enum ath10k_fw_crash_dump_type - types of data in the dump file
- * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format
+ * @ATH10K_FW_CRASH_DUMP_REGISTERS: Register crash dump in binary format
+ * @ATH10K_FW_CRASH_DUMP_CE_DATA: Copy Engine crash dump data
+ * @ATH10K_FW_CRASH_DUMP_RAM_DATA: RAM crash dump data, contains multiple
+ * struct ath10k_dump_ram_data_hdr
+ * @ATH10K_FW_CRASH_DUMP_MAX: Maximum enumeration
*/
enum ath10k_fw_crash_dump_type {
ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 907e1e13871a..dbaf262cd7c1 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/slab.h>
@@ -381,7 +382,7 @@ static int ath10k_htt_verify_version(struct ath10k_htt *htt)
htt->target_version_major != 3) {
ath10k_err(ar, "unsupported htt major version %d. supported versions are 2 and 3\n",
htt->target_version_major);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 4a9270e2a4c8..603f6de62b0a 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021, 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021, 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _HTT_H_
@@ -906,7 +906,7 @@ struct htt_data_tx_completion_ext {
__le16 msdus_rssi[];
} __packed;
-/**
+/*
* @brief target -> host TX completion indication message definition
*
* @details
@@ -1474,15 +1474,19 @@ enum htt_q_depth_type {
#define HTT_TX_Q_STATE_ENTRY_MULTIPLIER 0
/**
- * htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config
+ * struct htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config
*
* Defines host q state format and behavior. See htt_q_state.
*
+ * @paddr: Queue physical address
+ * @num_peers: Number of supported peers
+ * @num_tids: Number of supported TIDs
* @record_size: Defines the size of each host q entry in bytes. In practice
* however firmware (at least 10.4.3-00191) ignores this host
* configuration value and uses hardcoded value of 1.
* @record_multiplier: This is valid only when q depth type is MSDUs. It
* defines the exponent for the power of 2 multiplication.
+ * @pad: struct padding for 32-bit alignment
*/
struct htt_q_state_conf {
__le32 paddr;
@@ -1518,7 +1522,7 @@ struct htt_frag_desc_bank_cfg64 {
#define HTT_TX_Q_STATE_ENTRY_EXP_LSB 6
/**
- * htt_q_state - shared between host and firmware via DMA
+ * struct htt_q_state - shared between host and firmware via DMA
*
* This structure is used for the host to expose it's software queue state to
* firmware so that its rate control can schedule fetch requests for optimized
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 090bcf148d0c..e322b528baaf 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "mac.h"
@@ -2034,8 +2034,8 @@ static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
if (!arvif->is_up)
return;
- if (!ieee80211_beacon_cntdwn_is_complete(vif)) {
- ieee80211_beacon_update_cntdwn(vif);
+ if (!ieee80211_beacon_cntdwn_is_complete(vif, 0)) {
+ ieee80211_beacon_update_cntdwn(vif, 0);
ret = ath10k_mac_setup_bcn_tmpl(arvif);
if (ret)
@@ -2047,7 +2047,7 @@ static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
ret);
} else {
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
}
}
@@ -4056,7 +4056,7 @@ static int ath10k_mac_tx(struct ath10k *ar,
!(skb_cb->flags & ATH10K_SKB_F_RAW_TX)) {
WARN_ON_ONCE(1);
ieee80211_free_txskb(hw, skb);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
}
@@ -7065,7 +7065,7 @@ static int ath10k_mac_set_tid_config(struct ath10k *ar, struct ieee80211_sta *st
if (sta) {
if (!sta->wme)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
arsta = (struct ath10k_sta *)sta->drv_priv;
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 3de2de6d44bc..5c34b156b4ff 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -2,7 +2,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/pci.h>
@@ -889,7 +889,7 @@ static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
return ar_pci->targ_cpu_to_ce_addr(ar, addr);
}
@@ -2668,7 +2668,7 @@ static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (!ar_pci->pci_soft_reset)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
return ar_pci->pci_soft_reset(ar);
}
@@ -2808,7 +2808,7 @@ static int ath10k_pci_chip_reset(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (WARN_ON(!ar_pci->pci_hard_reset))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
return ar_pci->pci_hard_reset(ar);
}
@@ -3594,7 +3594,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
break;
default:
WARN_ON(1);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 6b6aa3c36744..aed97fd121ba 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -3,6 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
#include "debug.h"
@@ -851,6 +852,10 @@ ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb,
}
ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
arg->desc_id = ev->desc_id;
arg->status = ev->status;
@@ -1347,7 +1352,7 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
__le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
__le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
__le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
arg->min_tx_power = ev->hw_min_tx_power;
@@ -2119,9 +2124,9 @@ static int ath10k_wmi_tlv_op_get_vdev_subtype(struct ath10k *ar,
case WMI_VDEV_SUBTYPE_MESH_11S:
return WMI_TLV_VDEV_SUBTYPE_MESH_11S;
case WMI_VDEV_SUBTYPE_MESH_NON_11S:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static struct sk_buff *
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 83a8f07a687f..8a2f87d0a3a3 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _WMI_TLV_H
#define _WMI_TLV_H
@@ -2343,7 +2343,7 @@ struct wmi_tlv_adaptive_qcs {
} __packed;
/**
- * wmi_tlv_tx_pause_id - firmware tx queue pause reason types
+ * enum wmi_tlv_tx_pause_id - firmware tx queue pause reason types
*
* @WMI_TLV_TX_PAUSE_ID_MCC: used for by multi-channel firmware scheduler.
* Only vdev_map is valid.
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 88befe92f95d..2e9661f4bea8 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
@@ -3884,8 +3884,8 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
* actual channel switch is done
*/
if (arvif->vif->bss_conf.csa_active &&
- ieee80211_beacon_cntdwn_is_complete(arvif->vif)) {
- ieee80211_csa_finish(arvif->vif);
+ ieee80211_beacon_cntdwn_is_complete(arvif->vif, 0)) {
+ ieee80211_csa_finish(arvif->vif, 0);
continue;
}
@@ -6927,14 +6927,14 @@ void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
}
static void
-ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
+ath10k_wmi_put_start_scan_tlvs(u8 *tlvs,
const struct wmi_start_scan_arg *arg)
{
struct wmi_ie_data *ie;
struct wmi_chan_list *channels;
struct wmi_ssid_list *ssids;
struct wmi_bssid_list *bssids;
- void *ptr = tlvs->tlvs;
+ void *ptr = tlvs;
int i;
if (arg->n_channels) {
@@ -7012,7 +7012,7 @@ ath10k_wmi_op_gen_start_scan(struct ath10k *ar,
cmd = (struct wmi_start_scan_cmd *)skb->data;
ath10k_wmi_put_start_scan_common(&cmd->common, arg);
- ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
+ ath10k_wmi_put_start_scan_tlvs(cmd->tlvs, arg);
cmd->burst_duration_ms = __cpu_to_le32(0);
@@ -7041,7 +7041,7 @@ ath10k_wmi_10x_op_gen_start_scan(struct ath10k *ar,
cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
ath10k_wmi_put_start_scan_common(&cmd->common, arg);
- ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
+ ath10k_wmi_put_start_scan_tlvs(cmd->tlvs, arg);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n");
return skb;
@@ -8733,9 +8733,9 @@ int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
return WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA;
case WMI_VDEV_SUBTYPE_MESH_11S:
case WMI_VDEV_SUBTYPE_MESH_NON_11S:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar,
@@ -8755,9 +8755,9 @@ static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar,
case WMI_VDEV_SUBTYPE_MESH_11S:
return WMI_VDEV_SUBTYPE_10_2_4_MESH_11S;
case WMI_VDEV_SUBTYPE_MESH_NON_11S:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar,
@@ -8779,7 +8779,7 @@ static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar,
case WMI_VDEV_SUBTYPE_MESH_NON_11S:
return WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static struct sk_buff *
@@ -8918,8 +8918,6 @@ ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k *ar,
if (!skb)
return ERR_PTR(-ENOMEM);
- memset(skb->data, 0, sizeof(*cmd));
-
cmd = (struct wmi_10_4_tdls_peer_update_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 9146df98fcee..2379501225a4 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _WMI_H_
@@ -3008,8 +3008,11 @@ enum wmi_coex_version {
* @WMI_10_4_TDLS_UAPSD_SLEEP_STA: TDLS sleep sta support enable/disable
* @WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE: TDLS connection tracker in host
* enable/disable
- * @WMI_10_4_TDLS_EXPLICIT_MODE_ONLY:Explicit TDLS mode enable/disable
+ * @WMI_10_4_TDLS_EXPLICIT_MODE_ONLY: Explicit TDLS mode enable/disable
* @WMI_10_4_TX_DATA_ACK_RSSI: Enable DATA ACK RSSI if firmware is capable
+ * @WMI_10_4_EXT_PEER_TID_CONFIGS_SUPPORT: Firmware supports Extended Peer
+ * TID configuration for QoS related settings
+ * @WMI_10_4_REPORT_AIRTIME: Firmware supports transmit airtime reporting
*/
enum wmi_10_4_feature_mask {
WMI_10_4_LTEU_SUPPORT = BIT(0),
@@ -3069,7 +3072,10 @@ struct host_memory_chunk {
struct wmi_host_mem_chunks {
__le32 count;
/* some fw revisions require at least 1 chunk regardless of count */
- struct host_memory_chunk items[1];
+ union {
+ struct host_memory_chunk item;
+ DECLARE_FLEX_ARRAY(struct host_memory_chunk, items);
+ };
} __packed;
struct wmi_init_cmd {
@@ -3215,23 +3221,16 @@ struct wmi_start_scan_common {
__le32 scan_ctrl_flags;
} __packed;
-struct wmi_start_scan_tlvs {
- /* TLV parameters. These includes channel list, ssid list, bssid list,
- * extra ies.
- */
- u8 tlvs[0];
-} __packed;
-
struct wmi_start_scan_cmd {
struct wmi_start_scan_common common;
__le32 burst_duration_ms;
- struct wmi_start_scan_tlvs tlvs;
+ u8 tlvs[];
} __packed;
/* This is the definition from 10.X firmware branch */
struct wmi_10x_start_scan_cmd {
struct wmi_start_scan_common common;
- struct wmi_start_scan_tlvs tlvs;
+ u8 tlvs[];
} __packed;
struct wmi_ssid_arg {
@@ -4260,13 +4259,6 @@ struct wmi_peer_sta_ps_state_chg_event {
__le32 peer_ps_state;
} __packed;
-struct wmi_pdev_chanlist_update_event {
- /* number of channels */
- __le32 num_chan;
- /* array of channels */
- struct wmi_channel channel_list[1];
-} __packed;
-
#define WMI_MAX_DEBUG_MESG (sizeof(u32) * 32)
struct wmi_debug_mesg_event {
@@ -5793,30 +5785,6 @@ struct wmi_bcn_prb_info {
/* app IE */
} __packed;
-struct wmi_bcn_tmpl_cmd {
- /* unique id identifying the VDEV, generated by the caller */
- __le32 vdev_id;
- /* TIM IE offset from the beginning of the template. */
- __le32 tim_ie_offset;
- /* beacon probe capabilities and IEs */
- struct wmi_bcn_prb_info bcn_prb_info;
- /* beacon buffer length */
- __le32 buf_len;
- /* variable length data */
- u8 data[1];
-} __packed;
-
-struct wmi_prb_tmpl_cmd {
- /* unique id identifying the VDEV, generated by the caller */
- __le32 vdev_id;
- /* beacon probe capabilities and IEs */
- struct wmi_bcn_prb_info bcn_prb_info;
- /* beacon buffer length */
- __le32 buf_len;
- /* Variable length data */
- u8 data[1];
-} __packed;
-
enum wmi_sta_ps_mode {
/* enable power save for the given STA VDEV */
WMI_STA_PS_MODE_DISABLED = 0,
@@ -7197,7 +7165,13 @@ struct wmi_tdls_peer_capabilities {
__le32 is_peer_responder;
__le32 pref_offchan_num;
__le32 pref_offchan_bw;
- struct wmi_channel peer_chan_list[1];
+ union {
+ /* to match legacy implementation allocate room for
+ * at least one record even if peer_chan_len is 0
+ */
+ struct wmi_channel peer_chan_min_allocation;
+ DECLARE_FLEX_ARRAY(struct wmi_channel, peer_chan_list);
+ };
} __packed;
struct wmi_10_4_tdls_peer_update_cmd {
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 0c6ecbb9a066..c78bce19bd75 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -122,6 +122,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
+ .support_dual_stations = false,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -205,6 +206,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
+ .support_dual_stations = false,
},
{
.name = "qca6390 hw2.0",
@@ -255,7 +257,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.coldboot_cal_ftm = false,
.cbcal_restart_fw = false,
.fw_mem_mode = 0,
- .num_vdevs = 16 + 1,
+ .num_vdevs = 2 + 1,
.num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
@@ -290,6 +292,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
+ .support_dual_stations = true,
},
{
.name = "qcn9074 hw1.0",
@@ -372,6 +375,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
+ .support_dual_stations = false,
},
{
.name = "wcn6855 hw2.0",
@@ -422,7 +426,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.coldboot_cal_ftm = false,
.cbcal_restart_fw = false,
.fw_mem_mode = 0,
- .num_vdevs = 16 + 1,
+ .num_vdevs = 2 + 1,
.num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
@@ -457,6 +461,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
+ .support_dual_stations = true,
},
{
.name = "wcn6855 hw2.1",
@@ -505,7 +510,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.coldboot_cal_ftm = false,
.cbcal_restart_fw = false,
.fw_mem_mode = 0,
- .num_vdevs = 16 + 1,
+ .num_vdevs = 2 + 1,
.num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
@@ -540,6 +545,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
+ .support_dual_stations = true,
},
{
.name = "wcn6750 hw1.0",
@@ -621,6 +627,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE_WCN6750,
.smp2p_wow_exit = true,
.support_fw_mac_sequence = true,
+ .support_dual_stations = false,
},
{
.hw_rev = ATH11K_HW_IPQ5018_HW10,
@@ -702,6 +709,93 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
+ .support_dual_stations = false,
+ },
+ {
+ .name = "qca2066 hw2.1",
+ .hw_rev = ATH11K_HW_QCA2066_HW21,
+ .fw = {
+ .dir = "QCA2066/hw2.1",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 3,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &wcn6855_ops,
+ .ring_mask = &ath11k_hw_ring_mask_qca6390,
+ .internal_sleep_clock = true,
+ .regs = &wcn6855_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
+ .host_ce_config = ath11k_host_ce_config_qca6390,
+ .ce_count = 9,
+ .target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+ .svc_to_ce_map_len = 14,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
+ .single_pdev_only = true,
+ .rxdma1_enable = false,
+ .num_rxmda_per_pdev = 2,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+ .htt_peer_map_v2 = false,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ .fragment_160mhz = false,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP),
+ .supports_monitor = false,
+ .full_monitor_mode = false,
+ .supports_shadow_regs = true,
+ .idle_ps = true,
+ .supports_sta_ps = true,
+ .coldboot_cal_mm = false,
+ .coldboot_cal_ftm = false,
+ .cbcal_restart_fw = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 2 + 1,
+ .num_peers = 512,
+ .supports_suspend = true,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
+ .supports_regdb = true,
+ .fix_l1ss = false,
+ .credit_flow = true,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
+ .hal_params = &ath11k_hw_hal_params_qca6390,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = false,
+ .supports_rssi_stats = true,
+ .fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
+ .global_reset = true,
+ .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {
+ .start = 0x01400000,
+ .end = 0x0177ffff,
+ },
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ .support_fw_mac_sequence = true,
+ .support_dual_stations = true,
},
};
@@ -1775,10 +1869,9 @@ static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab)
mutex_lock(&ab->core_lock);
ath11k_thermal_unregister(ab);
- ath11k_hif_irq_disable(ab);
ath11k_dp_pdev_free(ab);
ath11k_spectral_deinit(ab);
- ath11k_hif_stop(ab);
+ ath11k_ce_cleanup_pipes(ab);
ath11k_wmi_detach(ab);
ath11k_dp_pdev_reo_cleanup(ab);
mutex_unlock(&ab->core_lock);
@@ -2033,6 +2126,9 @@ static void ath11k_core_reset(struct work_struct *work)
time_left = wait_for_completion_timeout(&ab->recovery_start,
ATH11K_RECOVER_START_TIMEOUT_HZ);
+ ath11k_hif_irq_disable(ab);
+ ath11k_hif_ce_irq_disable(ab);
+
ath11k_hif_power_down(ab);
ath11k_hif_power_up(ab);
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 02e160d831be..b3fb74a226fb 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -147,6 +147,7 @@ enum ath11k_hw_rev {
ATH11K_HW_WCN6855_HW21,
ATH11K_HW_WCN6750_HW10,
ATH11K_HW_IPQ5018_HW10,
+ ATH11K_HW_QCA2066_HW21,
};
enum ath11k_firmware_mode {
@@ -314,6 +315,43 @@ struct ath11k_rekey_data {
bool enable_offload;
};
+/**
+ * struct ath11k_chan_power_info - TPE containing power info per channel chunk
+ * @chan_cfreq: channel center freq (MHz)
+ * e.g.
+ * channel 37/20 MHz, it is 6135
+ * channel 37/40 MHz, it is 6125
+ * channel 37/80 MHz, it is 6145
+ * channel 37/160 MHz, it is 6185
+ * @tx_power: transmit power (dBm)
+ */
+struct ath11k_chan_power_info {
+ u16 chan_cfreq;
+ s8 tx_power;
+};
+
+/**
+ * struct ath11k_reg_tpc_power_info - regulatory TPC power info
+ * @is_psd_power: is PSD power or not
+ * @eirp_power: Maximum EIRP power (dBm), valid only if power is PSD
+ * @ap_power_type: type of power (SP/LPI/VLP)
+ * @num_pwr_levels: number of power levels
+ * @reg_max: Array of maximum TX power (dBm) per PSD value
+ * @ap_constraint_power: AP constraint power (dBm)
+ * @tpe: TPE values processed from TPE IE
+ * @chan_power_info: power info to send to firmware
+ */
+struct ath11k_reg_tpc_power_info {
+ bool is_psd_power;
+ u8 eirp_power;
+ enum wmi_reg_6ghz_ap_type ap_power_type;
+ u8 num_pwr_levels;
+ u8 reg_max[IEEE80211_MAX_NUM_PWR_LEVEL];
+ u8 ap_constraint_power;
+ s8 tpe[IEEE80211_MAX_NUM_PWR_LEVEL];
+ struct ath11k_chan_power_info chan_power_info[IEEE80211_MAX_NUM_PWR_LEVEL];
+};
+
struct ath11k_vif {
u32 vdev_id;
enum wmi_vdev_type vdev_type;
@@ -368,6 +406,8 @@ struct ath11k_vif {
struct ieee80211_chanctx_conf chanctx;
struct ath11k_arp_ns_offload arp_ns_offload;
struct ath11k_rekey_data rekey_data;
+
+ struct ath11k_reg_tpc_power_info reg_tpc_info;
};
struct ath11k_vif_iter {
@@ -735,6 +775,7 @@ struct ath11k {
/* protected by conf_mutex */
bool ps_state_enable;
bool ps_timekeeper_enable;
+ s8 max_allowed_tx_power;
};
struct ath11k_band_cap {
@@ -918,6 +959,7 @@ struct ath11k_base {
* This may or may not be used during the runtime
*/
struct ieee80211_regdomain *new_regd[MAX_RADIOS];
+ struct cur_regulatory_info *reg_info_store;
/* Current DFS Regulatory */
enum ath11k_dfs_region dfs_region;
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index 8975dc57ad77..1a62407e5a9f 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -104,11 +104,14 @@ void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
if (!ring->vaddr_unaligned)
return;
- if (ring->cached)
+ if (ring->cached) {
+ dma_unmap_single(ab->dev, ring->paddr_unaligned, ring->size,
+ DMA_FROM_DEVICE);
kfree(ring->vaddr_unaligned);
- else
+ } else {
dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
ring->paddr_unaligned);
+ }
ring->vaddr_unaligned = NULL;
}
@@ -249,7 +252,18 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
if (cached) {
ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
- ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned);
+ if (!ring->vaddr_unaligned)
+ return -ENOMEM;
+
+ ring->paddr_unaligned = dma_map_single(ab->dev,
+ ring->vaddr_unaligned,
+ ring->size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ab->dev, ring->paddr_unaligned)) {
+ kfree(ring->vaddr_unaligned);
+ ring->vaddr_unaligned = NULL;
+ return -ENOMEM;
+ }
}
}
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index c1072e66e3e8..272b1c35f98d 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -103,7 +103,7 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
!ieee80211_is_data(hdr->frame_control)))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
@@ -1018,7 +1018,7 @@ int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab)
if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
ath11k_err(ab, "unsupported htt major version %d supported version is %d\n",
dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index c060c4b5c0cc..f3d04568c221 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -626,15 +626,30 @@ u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng)
return NULL;
}
+static u32 *ath11k_hal_srng_dst_peek_with_dma(struct ath11k_base *ab,
+ struct hal_srng *srng, dma_addr_t *paddr)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) {
+ *paddr = srng->ring_base_paddr +
+ sizeof(*srng->ring_base_vaddr) * srng->u.dst_ring.tp;
+ return srng->ring_base_vaddr + srng->u.dst_ring.tp;
+ }
+
+ return NULL;
+}
+
static void ath11k_hal_srng_prefetch_desc(struct ath11k_base *ab,
struct hal_srng *srng)
{
+ dma_addr_t desc_paddr;
u32 *desc;
/* prefetch only if desc is available */
- desc = ath11k_hal_srng_dst_peek(ab, srng);
+ desc = ath11k_hal_srng_dst_peek_with_dma(ab, srng, &desc_paddr);
if (likely(desc)) {
- dma_sync_single_for_cpu(ab->dev, virt_to_phys(desc),
+ dma_sync_single_for_cpu(ab->dev, desc_paddr,
(srng->entry_size * sizeof(u32)),
DMA_FROM_DEVICE);
prefetch(desc);
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index 80447f488954..65e8f244ebb9 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_HAL_H
@@ -674,6 +674,7 @@ struct hal_srng_config {
* @HAL_RX_BUF_RBM_SW1_BM: For Tx completion -- returned to host
* @HAL_RX_BUF_RBM_SW2_BM: For Tx completion -- returned to host
* @HAL_RX_BUF_RBM_SW3_BM: For Rx release -- returned to host
+ * @HAL_RX_BUF_RBM_SW4_BM: For Tx completion -- returned to host
*/
enum hal_rx_buf_return_buf_manager {
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
index e758ee8e17c9..8f7dd43dc1bd 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "debug.h"
@@ -246,7 +246,7 @@ int ath11k_hal_reo_cmd_send(struct ath11k_base *ab, struct hal_srng *srng,
case HAL_REO_CMD_UNBLOCK_CACHE:
case HAL_REO_CMD_FLUSH_TIMEOUT_LIST:
ath11k_warn(ab, "Unsupported reo command %d\n", type);
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
break;
default:
ath11k_warn(ab, "Unknown reo command %d\n", type);
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
index 77d8f9237680..caa6dc12a790 100644
--- a/drivers/net/wireless/ath/ath11k/hw.c
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -58,7 +58,7 @@ static void ath11k_hw_wcn6855_tx_mesh_enable(struct ath11k_base *ab,
static void ath11k_init_wmi_config_qca6390(struct ath11k_base *ab,
struct target_resource_config *config)
{
- config->num_vdevs = 4;
+ config->num_vdevs = ab->hw_params.num_vdevs;
config->num_peers = 16;
config->num_tids = 32;
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 1b070747a5db..14ef4eb48f80 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -226,6 +226,7 @@ struct ath11k_hw_params {
u32 tx_ring_size;
bool smp2p_wow_exit;
bool support_fw_mac_sequence;
+ bool support_dual_stations;
};
struct ath11k_hw_ops {
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index b13525bbbb80..a6a37d67a50a 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <net/mac80211.h>
@@ -255,9 +255,6 @@ static const u32 ath11k_smps_map[] = {
[WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
};
-static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-
enum nl80211_he_ru_alloc ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(u16 ru_phy)
{
enum nl80211_he_ru_alloc ret;
@@ -1580,7 +1577,7 @@ void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif)
return;
if (vif->bss_conf.color_change_active &&
- ieee80211_beacon_cntdwn_is_complete(vif)) {
+ ieee80211_beacon_cntdwn_is_complete(vif, 0)) {
arvif->bcca_zero_sent = true;
ieee80211_color_change_finish(vif);
return;
@@ -1589,7 +1586,7 @@ void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif)
arvif->bcca_zero_sent = false;
if (vif->bss_conf.color_change_active)
- ieee80211_beacon_update_cntdwn(vif);
+ ieee80211_beacon_update_cntdwn(vif, 0);
ath11k_mac_setup_bcn_tmpl(arvif);
}
@@ -2297,6 +2294,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+ /* Initialize rx_mcs_160 to 9 which is an invalid value */
+ rx_mcs_160 = 9;
if (support_160) {
for (i = 7; i >= 0; i--) {
u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3;
@@ -2308,6 +2307,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
}
}
+ /* Initialize rx_mcs_80 to 9 which is an invalid value */
+ rx_mcs_80 = 9;
for (i = 7; i >= 0; i--) {
u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3;
@@ -3026,7 +3027,14 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
rcu_read_unlock();
+ if (!ath11k_mac_vif_recalc_sta_he_txbf(ar, vif, &he_cap)) {
+ ath11k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM\n",
+ arvif->vdev_id, bss_conf->bssid);
+ return;
+ }
+
peer_arg.is_assoc = true;
+
ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
if (ret) {
ath11k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n",
@@ -3049,12 +3057,6 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
return;
}
- if (!ath11k_mac_vif_recalc_sta_he_txbf(ar, vif, &he_cap)) {
- ath11k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM\n",
- arvif->vdev_id, bss_conf->bssid);
- return;
- }
-
WARN_ON(arvif->is_up);
arvif->aid = vif->cfg.aid;
@@ -3397,6 +3399,18 @@ static int ath11k_mac_config_obss_pd(struct ath11k *ar,
return 0;
}
+static bool ath11k_mac_supports_station_tpc(struct ath11k *ar,
+ struct ath11k_vif *arvif,
+ const struct cfg80211_chan_def *chandef)
+{
+ return ath11k_wmi_supports_6ghz_cc_ext(ar) &&
+ test_bit(WMI_TLV_SERVICE_EXT_TPC_REG_SUPPORT, ar->ab->wmi_ab.svc_map) &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ arvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE &&
+ chandef->chan &&
+ chandef->chan->band == NL80211_BAND_6GHZ;
+}
+
static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@@ -3596,7 +3610,6 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_TXPOWER) {
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev_id %i txpower %d\n",
arvif->vdev_id, info->txpower);
-
arvif->txpower = info->txpower;
ath11k_mac_txpower_recalc(ar);
}
@@ -4000,7 +4013,7 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
req->ssids[i].ssid_len);
}
} else {
- arg->scan_flags |= WMI_SCAN_FLAG_PASSIVE;
+ arg->scan_f_passive = 1;
}
if (req->n_channels) {
@@ -4906,100 +4919,6 @@ static void ath11k_mac_dec_num_stations(struct ath11k_vif *arvif,
ar->num_stations--;
}
-static int ath11k_mac_station_add(struct ath11k *ar,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct ath11k_base *ab = ar->ab;
- struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
- struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
- struct peer_create_params peer_param;
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- ret = ath11k_mac_inc_num_stations(arvif, sta);
- if (ret) {
- ath11k_warn(ab, "refusing to associate station: too many connected already (%d)\n",
- ar->max_num_stations);
- goto exit;
- }
-
- arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
- if (!arsta->rx_stats) {
- ret = -ENOMEM;
- goto dec_num_station;
- }
-
- peer_param.vdev_id = arvif->vdev_id;
- peer_param.peer_addr = sta->addr;
- peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
-
- ret = ath11k_peer_create(ar, arvif, sta, &peer_param);
- if (ret) {
- ath11k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- goto free_rx_stats;
- }
-
- ath11k_dbg(ab, ATH11K_DBG_MAC, "Added peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
-
- if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) {
- arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), GFP_KERNEL);
- if (!arsta->tx_stats) {
- ret = -ENOMEM;
- goto free_peer;
- }
- }
-
- if (ieee80211_vif_is_mesh(vif)) {
- ath11k_dbg(ab, ATH11K_DBG_MAC,
- "setting USE_4ADDR for mesh STA %pM\n", sta->addr);
- ret = ath11k_wmi_set_peer_param(ar, sta->addr,
- arvif->vdev_id,
- WMI_PEER_USE_4ADDR, 1);
- if (ret) {
- ath11k_warn(ab, "failed to set mesh STA %pM 4addr capability: %d\n",
- sta->addr, ret);
- goto free_tx_stats;
- }
- }
-
- ret = ath11k_dp_peer_setup(ar, arvif->vdev_id, sta->addr);
- if (ret) {
- ath11k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n",
- sta->addr, arvif->vdev_id, ret);
- goto free_tx_stats;
- }
-
- if (ab->hw_params.vdev_start_delay &&
- !arvif->is_started &&
- arvif->vdev_type != WMI_VDEV_TYPE_AP) {
- ret = ath11k_start_vdev_delay(ar->hw, vif);
- if (ret) {
- ath11k_warn(ab, "failed to delay vdev start: %d\n", ret);
- goto free_tx_stats;
- }
- }
-
- ewma_avg_rssi_init(&arsta->avg_rssi);
- return 0;
-
-free_tx_stats:
- kfree(arsta->tx_stats);
- arsta->tx_stats = NULL;
-free_peer:
- ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
-free_rx_stats:
- kfree(arsta->rx_stats);
- arsta->rx_stats = NULL;
-dec_num_station:
- ath11k_mac_dec_num_stations(arvif, sta);
-exit:
- return ret;
-}
-
static u32 ath11k_mac_ieee80211_sta_bw_to_wmi(struct ath11k *ar,
struct ieee80211_sta *sta)
{
@@ -5028,140 +4947,6 @@ static u32 ath11k_mac_ieee80211_sta_bw_to_wmi(struct ath11k *ar,
return bw;
}
-static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- enum ieee80211_sta_state old_state,
- enum ieee80211_sta_state new_state)
-{
- struct ath11k *ar = hw->priv;
- struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
- struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
- struct ath11k_peer *peer;
- int ret = 0;
-
- /* cancel must be done outside the mutex to avoid deadlock */
- if ((old_state == IEEE80211_STA_NONE &&
- new_state == IEEE80211_STA_NOTEXIST)) {
- cancel_work_sync(&arsta->update_wk);
- cancel_work_sync(&arsta->set_4addr_wk);
- }
-
- mutex_lock(&ar->conf_mutex);
-
- if (old_state == IEEE80211_STA_NOTEXIST &&
- new_state == IEEE80211_STA_NONE) {
- memset(arsta, 0, sizeof(*arsta));
- arsta->arvif = arvif;
- arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
- INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk);
- INIT_WORK(&arsta->set_4addr_wk, ath11k_sta_set_4addr_wk);
-
- ret = ath11k_mac_station_add(ar, vif, sta);
- if (ret)
- ath11k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- } else if ((old_state == IEEE80211_STA_NONE &&
- new_state == IEEE80211_STA_NOTEXIST)) {
- bool skip_peer_delete = ar->ab->hw_params.vdev_start_delay &&
- vif->type == NL80211_IFTYPE_STATION;
-
- ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
-
- if (!skip_peer_delete) {
- ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
- if (ret)
- ath11k_warn(ar->ab,
- "Failed to delete peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- else
- ath11k_dbg(ar->ab,
- ATH11K_DBG_MAC,
- "Removed peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- }
-
- ath11k_mac_dec_num_stations(arvif, sta);
- mutex_lock(&ar->ab->tbl_mtx_lock);
- spin_lock_bh(&ar->ab->base_lock);
- peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (skip_peer_delete && peer) {
- peer->sta = NULL;
- } else if (peer && peer->sta == sta) {
- ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
- vif->addr, arvif->vdev_id);
- ath11k_peer_rhash_delete(ar->ab, peer);
- peer->sta = NULL;
- list_del(&peer->list);
- kfree(peer);
- ar->num_peers--;
- }
- spin_unlock_bh(&ar->ab->base_lock);
- mutex_unlock(&ar->ab->tbl_mtx_lock);
-
- kfree(arsta->tx_stats);
- arsta->tx_stats = NULL;
-
- kfree(arsta->rx_stats);
- arsta->rx_stats = NULL;
- } else if (old_state == IEEE80211_STA_AUTH &&
- new_state == IEEE80211_STA_ASSOC &&
- (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_MESH_POINT ||
- vif->type == NL80211_IFTYPE_ADHOC)) {
- ret = ath11k_station_assoc(ar, vif, sta, false);
- if (ret)
- ath11k_warn(ar->ab, "Failed to associate station: %pM\n",
- sta->addr);
-
- spin_lock_bh(&ar->data_lock);
- /* Set arsta bw and prev bw */
- arsta->bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
- arsta->bw_prev = arsta->bw;
- spin_unlock_bh(&ar->data_lock);
- } else if (old_state == IEEE80211_STA_ASSOC &&
- new_state == IEEE80211_STA_AUTHORIZED) {
- spin_lock_bh(&ar->ab->base_lock);
-
- peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (peer)
- peer->is_authorized = true;
-
- spin_unlock_bh(&ar->ab->base_lock);
-
- if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
- ret = ath11k_wmi_set_peer_param(ar, sta->addr,
- arvif->vdev_id,
- WMI_PEER_AUTHORIZE,
- 1);
- if (ret)
- ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
- sta->addr, arvif->vdev_id, ret);
- }
- } else if (old_state == IEEE80211_STA_AUTHORIZED &&
- new_state == IEEE80211_STA_ASSOC) {
- spin_lock_bh(&ar->ab->base_lock);
-
- peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (peer)
- peer->is_authorized = false;
-
- spin_unlock_bh(&ar->ab->base_lock);
- } else if (old_state == IEEE80211_STA_ASSOC &&
- new_state == IEEE80211_STA_AUTH &&
- (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_MESH_POINT ||
- vif->type == NL80211_IFTYPE_ADHOC)) {
- ret = ath11k_station_disassoc(ar, vif, sta);
- if (ret)
- ath11k_warn(ar->ab, "Failed to disassociate station: %pM\n",
- sta->addr);
- }
-
- mutex_unlock(&ar->conf_mutex);
- return ret;
-}
-
static int ath11k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -6940,6 +6725,14 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
ret);
}
+ if (ath11k_wmi_supports_6ghz_cc_ext(ar)) {
+ struct cur_regulatory_info *reg_info;
+
+ reg_info = &ab->reg_info_store[ar->pdev_idx];
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "interface added to change reg rules\n");
+ ath11k_reg_handle_chan_list(ab, reg_info, IEEE80211_REG_LPI_AP);
+ }
+
mutex_unlock(&ar->conf_mutex);
return 0;
@@ -7266,6 +7059,15 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
return ret;
}
+ /* TODO: For now we only set TPC power here. However when
+ * channel changes, say CSA, it should be updated again.
+ */
+ if (ath11k_mac_supports_station_tpc(ar, arvif, chandef)) {
+ ath11k_mac_fill_reg_tpc_info(ar, arvif->vif, &arvif->chanctx);
+ ath11k_wmi_send_vdev_set_tpc_power(ar, arvif->vdev_id,
+ &arvif->reg_tpc_info);
+ }
+
if (!restart)
ar->num_started_vdevs++;
@@ -7542,8 +7344,8 @@ unlock:
mutex_unlock(&ar->conf_mutex);
}
-static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int ath11k_mac_start_vdev_delay(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
@@ -7589,6 +7391,501 @@ static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
return 0;
}
+static int ath11k_mac_stop_vdev_early(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ int ret;
+
+ if (WARN_ON(!arvif->is_started))
+ return -EBUSY;
+
+ ret = ath11k_mac_vdev_stop(arvif);
+ if (ret) {
+ ath11k_warn(ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ arvif->is_started = false;
+
+ /* TODO: Setup ps and cts/rts protection */
+ return 0;
+}
+
+static u8 ath11k_mac_get_tpe_count(u8 txpwr_intrprt, u8 txpwr_cnt)
+{
+ switch (txpwr_intrprt) {
+ /* Refer "Table 9-276-Meaning of Maximum Transmit Power Count subfield
+ * if the Maximum Transmit Power Interpretation subfield is 0 or 2" of
+ * "IEEE Std 802.11ax 2021".
+ */
+ case IEEE80211_TPE_LOCAL_EIRP:
+ case IEEE80211_TPE_REG_CLIENT_EIRP:
+ txpwr_cnt = txpwr_cnt <= 3 ? txpwr_cnt : 3;
+ txpwr_cnt = txpwr_cnt + 1;
+ break;
+ /* Refer "Table 9-277-Meaning of Maximum Transmit Power Count subfield
+ * if Maximum Transmit Power Interpretation subfield is 1 or 3" of
+ * "IEEE Std 802.11ax 2021".
+ */
+ case IEEE80211_TPE_LOCAL_EIRP_PSD:
+ case IEEE80211_TPE_REG_CLIENT_EIRP_PSD:
+ txpwr_cnt = txpwr_cnt <= 4 ? txpwr_cnt : 4;
+ txpwr_cnt = txpwr_cnt ? (BIT(txpwr_cnt - 1)) : 1;
+ break;
+ }
+
+ return txpwr_cnt;
+}
+
+static u8 ath11k_mac_get_num_pwr_levels(struct cfg80211_chan_def *chan_def)
+{
+ if (chan_def->chan->flags & IEEE80211_CHAN_PSD) {
+ switch (chan_def->width) {
+ case NL80211_CHAN_WIDTH_20:
+ return 1;
+ case NL80211_CHAN_WIDTH_40:
+ return 2;
+ case NL80211_CHAN_WIDTH_80:
+ return 4;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ return 8;
+ default:
+ return 1;
+ }
+ } else {
+ switch (chan_def->width) {
+ case NL80211_CHAN_WIDTH_20:
+ return 1;
+ case NL80211_CHAN_WIDTH_40:
+ return 2;
+ case NL80211_CHAN_WIDTH_80:
+ return 3;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ return 4;
+ default:
+ return 1;
+ }
+ }
+}
+
+static u16 ath11k_mac_get_6ghz_start_frequency(struct cfg80211_chan_def *chan_def)
+{
+ u16 diff_seq;
+
+ /* It is to get the lowest channel number's center frequency of the chan.
+ * For example,
+ * bandwidth=40 MHz, center frequency is 5965, lowest channel is 1
+ * with center frequency 5955, its diff is 5965 - 5955 = 10.
+ * bandwidth=80 MHz, center frequency is 5985, lowest channel is 1
+ * with center frequency 5955, its diff is 5985 - 5955 = 30.
+ * bandwidth=160 MHz, center frequency is 6025, lowest channel is 1
+ * with center frequency 5955, its diff is 6025 - 5955 = 70.
+ */
+ switch (chan_def->width) {
+ case NL80211_CHAN_WIDTH_160:
+ diff_seq = 70;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ case NL80211_CHAN_WIDTH_80P80:
+ diff_seq = 30;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ diff_seq = 10;
+ break;
+ default:
+ diff_seq = 0;
+ }
+
+ return chan_def->center_freq1 - diff_seq;
+}
+
+static u16 ath11k_mac_get_seg_freq(struct cfg80211_chan_def *chan_def,
+ u16 start_seq, u8 seq)
+{
+ u16 seg_seq;
+
+ /* It is to get the center frequency of the specific bandwidth.
+ * start_seq means the lowest channel number's center frequency.
+ * seq 0/1/2/3 means 20 MHz/40 MHz/80 MHz/160 MHz&80P80.
+ * For example,
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 5955 when bandwidth=20 MHz, its diff is 5955 - 5955 = 0.
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 5965 when bandwidth=40 MHz, its diff is 5965 - 5955 = 10.
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 5985 when bandwidth=80 MHz, its diff is 5985 - 5955 = 30.
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 6025 when bandwidth=160 MHz, its diff is 6025 - 5955 = 70.
+ */
+ if (chan_def->width == NL80211_CHAN_WIDTH_80P80 && seq == 3)
+ return chan_def->center_freq2;
+
+ seg_seq = 10 * (BIT(seq) - 1);
+ return seg_seq + start_seq;
+}
+
+static void ath11k_mac_get_psd_channel(struct ath11k *ar,
+ u16 step_freq,
+ u16 *start_freq,
+ u16 *center_freq,
+ u8 i,
+ struct ieee80211_channel **temp_chan,
+ s8 *tx_power)
+{
+ /* It is to get the center frequency for each 20 MHz.
+ * For example, if the chan is 160 MHz and center frequency is 6025,
+ * then it include 8 channels, they are 1/5/9/13/17/21/25/29,
+ * channel number 1's center frequency is 5955, it is parameter start_freq.
+ * parameter i is the step of the 8 channels. i is 0~7 for the 8 channels.
+ * the channel 1/5/9/13/17/21/25/29 maps i=0/1/2/3/4/5/6/7,
+ * and maps its center frequency is 5955/5975/5995/6015/6035/6055/6075/6095,
+ * the gap is 20 for each channel, parameter step_freq means the gap.
+ * after get the center frequency of each channel, it is easy to find the
+ * struct ieee80211_channel of it and get the max_reg_power.
+ */
+ *center_freq = *start_freq + i * step_freq;
+ *temp_chan = ieee80211_get_channel(ar->hw->wiphy, *center_freq);
+ *tx_power = (*temp_chan)->max_reg_power;
+}
+
+static void ath11k_mac_get_eirp_power(struct ath11k *ar,
+ u16 *start_freq,
+ u16 *center_freq,
+ u8 i,
+ struct ieee80211_channel **temp_chan,
+ struct cfg80211_chan_def *def,
+ s8 *tx_power)
+{
+ /* It is to get the center frequency for 20 MHz/40 MHz/80 MHz/
+ * 160 MHz&80P80 bandwidth, and then plus 10 to the center frequency,
+ * it is the center frequency of a channel number.
+ * For example, when configured channel number is 1.
+ * center frequency is 5965 when bandwidth=40 MHz, after plus 10, it is 5975,
+ * then it is channel number 5.
+ * center frequency is 5985 when bandwidth=80 MHz, after plus 10, it is 5995,
+ * then it is channel number 9.
+ * center frequency is 6025 when bandwidth=160 MHz, after plus 10, it is 6035,
+ * then it is channel number 17.
+ * after get the center frequency of each channel, it is easy to find the
+ * struct ieee80211_channel of it and get the max_reg_power.
+ */
+ *center_freq = ath11k_mac_get_seg_freq(def, *start_freq, i);
+
+ /* For the 20 MHz, its center frequency is same with same channel */
+ if (i != 0)
+ *center_freq += 10;
+
+ *temp_chan = ieee80211_get_channel(ar->hw->wiphy, *center_freq);
+ *tx_power = (*temp_chan)->max_reg_power;
+}
+
+void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct ath11k_reg_tpc_power_info *reg_tpc_info = &arvif->reg_tpc_info;
+ struct ieee80211_channel *chan, *temp_chan;
+ u8 pwr_lvl_idx, num_pwr_levels, pwr_reduction;
+ bool is_psd_power = false, is_tpe_present = false;
+ s8 max_tx_power[IEEE80211_MAX_NUM_PWR_LEVEL],
+ psd_power, tx_power;
+ s8 eirp_power = 0;
+ u16 start_freq, center_freq;
+
+ chan = ctx->def.chan;
+ start_freq = ath11k_mac_get_6ghz_start_frequency(&ctx->def);
+ pwr_reduction = bss_conf->pwr_reduction;
+
+ if (arvif->reg_tpc_info.num_pwr_levels) {
+ is_tpe_present = true;
+ num_pwr_levels = arvif->reg_tpc_info.num_pwr_levels;
+ } else {
+ num_pwr_levels = ath11k_mac_get_num_pwr_levels(&ctx->def);
+ }
+
+ for (pwr_lvl_idx = 0; pwr_lvl_idx < num_pwr_levels; pwr_lvl_idx++) {
+ /* STA received TPE IE*/
+ if (is_tpe_present) {
+ /* local power is PSD power*/
+ if (chan->flags & IEEE80211_CHAN_PSD) {
+ /* Connecting AP is psd power */
+ if (reg_tpc_info->is_psd_power) {
+ is_psd_power = true;
+ ath11k_mac_get_psd_channel(ar, 20,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &tx_power);
+ psd_power = temp_chan->psd;
+ eirp_power = tx_power;
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ psd_power,
+ reg_tpc_info->tpe[pwr_lvl_idx]);
+ /* Connecting AP is not psd power */
+ } else {
+ ath11k_mac_get_eirp_power(ar,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &ctx->def,
+ &tx_power);
+ psd_power = temp_chan->psd;
+ /* convert psd power to EIRP power based
+ * on channel width
+ */
+ tx_power =
+ min_t(s8, tx_power,
+ psd_power + 13 + pwr_lvl_idx * 3);
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ tx_power,
+ reg_tpc_info->tpe[pwr_lvl_idx]);
+ }
+ /* local power is not PSD power */
+ } else {
+ /* Connecting AP is psd power */
+ if (reg_tpc_info->is_psd_power) {
+ is_psd_power = true;
+ ath11k_mac_get_psd_channel(ar, 20,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &tx_power);
+ eirp_power = tx_power;
+ max_tx_power[pwr_lvl_idx] =
+ reg_tpc_info->tpe[pwr_lvl_idx];
+ /* Connecting AP is not psd power */
+ } else {
+ ath11k_mac_get_eirp_power(ar,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &ctx->def,
+ &tx_power);
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ tx_power,
+ reg_tpc_info->tpe[pwr_lvl_idx]);
+ }
+ }
+ /* STA not received TPE IE */
+ } else {
+ /* local power is PSD power*/
+ if (chan->flags & IEEE80211_CHAN_PSD) {
+ is_psd_power = true;
+ ath11k_mac_get_psd_channel(ar, 20,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &tx_power);
+ psd_power = temp_chan->psd;
+ eirp_power = tx_power;
+ max_tx_power[pwr_lvl_idx] = psd_power;
+ } else {
+ ath11k_mac_get_eirp_power(ar,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &ctx->def,
+ &tx_power);
+ max_tx_power[pwr_lvl_idx] = tx_power;
+ }
+ }
+
+ if (is_psd_power) {
+ /* If AP local power constraint is present */
+ if (pwr_reduction)
+ eirp_power = eirp_power - pwr_reduction;
+
+ /* If firmware updated max tx power is non zero, then take
+ * the min of firmware updated ap tx power
+ * and max power derived from above mentioned parameters.
+ */
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "eirp power : %d firmware report power : %d\n",
+ eirp_power, ar->max_allowed_tx_power);
+ /* Firmware reports lower max_allowed_tx_power during vdev
+ * start response. In case of 6 GHz, firmware is not aware
+ * of EIRP power unless driver sets EIRP power through WMI
+ * TPC command. So radio which does not support idle power
+ * save can set maximum calculated EIRP power directly to
+ * firmware through TPC command without min comparison with
+ * vdev start response's max_allowed_tx_power.
+ */
+ if (ar->max_allowed_tx_power && ab->hw_params.idle_ps)
+ eirp_power = min_t(s8,
+ eirp_power,
+ ar->max_allowed_tx_power);
+ } else {
+ /* If AP local power constraint is present */
+ if (pwr_reduction)
+ max_tx_power[pwr_lvl_idx] =
+ max_tx_power[pwr_lvl_idx] - pwr_reduction;
+ /* If firmware updated max tx power is non zero, then take
+ * the min of firmware updated ap tx power
+ * and max power derived from above mentioned parameters.
+ */
+ if (ar->max_allowed_tx_power && ab->hw_params.idle_ps)
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ max_tx_power[pwr_lvl_idx],
+ ar->max_allowed_tx_power);
+ }
+ reg_tpc_info->chan_power_info[pwr_lvl_idx].chan_cfreq = center_freq;
+ reg_tpc_info->chan_power_info[pwr_lvl_idx].tx_power =
+ max_tx_power[pwr_lvl_idx];
+ }
+
+ reg_tpc_info->num_pwr_levels = num_pwr_levels;
+ reg_tpc_info->is_psd_power = is_psd_power;
+ reg_tpc_info->eirp_power = eirp_power;
+ reg_tpc_info->ap_power_type =
+ ath11k_reg_ap_pwr_convert(vif->bss_conf.power_type);
+}
+
+static void ath11k_mac_parse_tx_pwr_env(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct ieee80211_tx_pwr_env *single_tpe;
+ enum wmi_reg_6ghz_client_type client_type;
+ struct cur_regulatory_info *reg_info;
+ int i;
+ u8 pwr_count, pwr_interpret, pwr_category;
+ u8 psd_index = 0, non_psd_index = 0, local_tpe_count = 0, reg_tpe_count = 0;
+ bool use_local_tpe, non_psd_set = false, psd_set = false;
+
+ reg_info = &ab->reg_info_store[ar->pdev_idx];
+ client_type = reg_info->client_type;
+
+ for (i = 0; i < bss_conf->tx_pwr_env_num; i++) {
+ single_tpe = &bss_conf->tx_pwr_env[i];
+ pwr_category = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_CATEGORY);
+ pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+
+ if (pwr_category == client_type) {
+ if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP ||
+ pwr_interpret == IEEE80211_TPE_LOCAL_EIRP_PSD)
+ local_tpe_count++;
+ else if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP ||
+ pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP_PSD)
+ reg_tpe_count++;
+ }
+ }
+
+ if (!reg_tpe_count && !local_tpe_count) {
+ ath11k_warn(ab,
+ "no transmit power envelope match client power type %d\n",
+ client_type);
+ return;
+ } else if (!reg_tpe_count) {
+ use_local_tpe = true;
+ } else {
+ use_local_tpe = false;
+ }
+
+ for (i = 0; i < bss_conf->tx_pwr_env_num; i++) {
+ single_tpe = &bss_conf->tx_pwr_env[i];
+ pwr_category = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_CATEGORY);
+ pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+
+ if (pwr_category != client_type)
+ continue;
+
+ /* get local transmit power envelope */
+ if (use_local_tpe) {
+ if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP) {
+ non_psd_index = i;
+ non_psd_set = true;
+ } else if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP_PSD) {
+ psd_index = i;
+ psd_set = true;
+ }
+ /* get regulatory transmit power envelope */
+ } else {
+ if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP) {
+ non_psd_index = i;
+ non_psd_set = true;
+ } else if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP_PSD) {
+ psd_index = i;
+ psd_set = true;
+ }
+ }
+ }
+
+ if (non_psd_set && !psd_set) {
+ single_tpe = &bss_conf->tx_pwr_env[non_psd_index];
+ pwr_count = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_COUNT);
+ pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+ arvif->reg_tpc_info.is_psd_power = false;
+ arvif->reg_tpc_info.eirp_power = 0;
+
+ arvif->reg_tpc_info.num_pwr_levels =
+ ath11k_mac_get_tpe_count(pwr_interpret, pwr_count);
+
+ for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "non PSD power[%d] : %d\n",
+ i, single_tpe->tx_power[i]);
+ arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[i] / 2;
+ }
+ }
+
+ if (psd_set) {
+ single_tpe = &bss_conf->tx_pwr_env[psd_index];
+ pwr_count = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_COUNT);
+ pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+ arvif->reg_tpc_info.is_psd_power = true;
+
+ if (pwr_count == 0) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "TPE PSD power : %d\n", single_tpe->tx_power[0]);
+ arvif->reg_tpc_info.num_pwr_levels =
+ ath11k_mac_get_num_pwr_levels(&ctx->def);
+
+ for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++)
+ arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[0] / 2;
+ } else {
+ arvif->reg_tpc_info.num_pwr_levels =
+ ath11k_mac_get_tpe_count(pwr_interpret, pwr_count);
+
+ for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "TPE PSD power[%d] : %d\n",
+ i, single_tpe->tx_power[i]);
+ arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[i] / 2;
+ }
+ }
+ }
+}
+
static int
ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -7599,7 +7896,8 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
int ret;
- struct peer_create_params param;
+ struct cur_regulatory_info *reg_info;
+ enum ieee80211_ap_reg_power power_type;
mutex_lock(&ar->conf_mutex);
@@ -7607,6 +7905,24 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
"chanctx assign ptr %p vdev_id %i\n",
ctx, arvif->vdev_id);
+ if (ath11k_wmi_supports_6ghz_cc_ext(ar) &&
+ ctx->def.chan->band == NL80211_BAND_6GHZ &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA) {
+ reg_info = &ab->reg_info_store[ar->pdev_idx];
+ power_type = vif->bss_conf.power_type;
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "chanctx power type %d\n", power_type);
+
+ if (power_type == IEEE80211_REG_UNSET_AP) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ath11k_reg_handle_chan_list(ab, reg_info, power_type);
+ arvif->chanctx = *ctx;
+ ath11k_mac_parse_tx_pwr_env(ar, vif, ctx);
+ }
+
/* for QCA6390 bss peer must be created before vdev_start */
if (ab->hw_params.vdev_start_delay &&
arvif->vdev_type != WMI_VDEV_TYPE_AP &&
@@ -7622,21 +7938,6 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out;
}
- if (ab->hw_params.vdev_start_delay &&
- arvif->vdev_type != WMI_VDEV_TYPE_AP &&
- arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
- param.vdev_id = arvif->vdev_id;
- param.peer_type = WMI_PEER_TYPE_DEFAULT;
- param.peer_addr = ar->mac_addr;
-
- ret = ath11k_peer_create(ar, arvif, NULL, &param);
- if (ret) {
- ath11k_warn(ab, "failed to create peer after vdev start delay: %d",
- ret);
- goto out;
- }
- }
-
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath11k_mac_monitor_start(ar);
if (ret) {
@@ -7649,15 +7950,17 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out;
}
- ret = ath11k_mac_vdev_start(arvif, ctx);
- if (ret) {
- ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
- arvif->vdev_id, vif->addr,
- ctx->def.chan->center_freq, ret);
- goto out;
- }
+ if (!arvif->is_started) {
+ ret = ath11k_mac_vdev_start(arvif, ctx);
+ if (ret) {
+ ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
+ arvif->vdev_id, vif->addr,
+ ctx->def.chan->center_freq, ret);
+ goto out;
+ }
- arvif->is_started = true;
+ arvif->is_started = true;
+ }
if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) {
@@ -7697,8 +8000,6 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
"chanctx unassign ptr %p vdev_id %i\n",
ctx, arvif->vdev_id);
- WARN_ON(!arvif->is_started);
-
if (ab->hw_params.vdev_start_delay &&
arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
spin_lock_bh(&ab->base_lock);
@@ -7722,24 +8023,13 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
return;
}
- ret = ath11k_mac_vdev_stop(arvif);
- if (ret)
- ath11k_warn(ab, "failed to stop vdev %i: %d\n",
- arvif->vdev_id, ret);
-
- arvif->is_started = false;
-
- if (ab->hw_params.vdev_start_delay &&
- arvif->vdev_type == WMI_VDEV_TYPE_STA) {
- ret = ath11k_peer_delete(ar, arvif->vdev_id, arvif->bssid);
+ if (arvif->is_started) {
+ ret = ath11k_mac_vdev_stop(arvif);
if (ret)
- ath11k_warn(ar->ab,
- "failed to delete peer %pM for vdev %d: %d\n",
- arvif->bssid, arvif->vdev_id, ret);
- else
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
- "removed peer %pM vdev %d after vdev stop\n",
- arvif->bssid, arvif->vdev_id);
+ ath11k_warn(ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_started = false;
}
if (ab->hw_params.vdev_start_delay &&
@@ -8962,8 +9252,8 @@ static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
arg->dwell_time_active = scan_time_msec;
arg->dwell_time_passive = scan_time_msec;
arg->max_scan_time = scan_time_msec;
- arg->scan_flags |= WMI_SCAN_FLAG_PASSIVE;
- arg->scan_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+ arg->scan_f_passive = 1;
+ arg->scan_f_filter_prb_req = 1;
arg->burst_duration = duration;
ret = ath11k_start_scan(ar, arg);
@@ -9097,6 +9387,252 @@ err_fallback:
return 0;
}
+static int ath11k_mac_station_add(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct peer_create_params peer_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath11k_mac_inc_num_stations(arvif, sta);
+ if (ret) {
+ ath11k_warn(ab, "refusing to associate station: too many connected already (%d)\n",
+ ar->max_num_stations);
+ goto exit;
+ }
+
+ arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
+ if (!arsta->rx_stats) {
+ ret = -ENOMEM;
+ goto dec_num_station;
+ }
+
+ peer_param.vdev_id = arvif->vdev_id;
+ peer_param.peer_addr = sta->addr;
+ peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+
+ ret = ath11k_peer_create(ar, arvif, sta, &peer_param);
+ if (ret) {
+ ath11k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ goto free_rx_stats;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "Added peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) {
+ arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), GFP_KERNEL);
+ if (!arsta->tx_stats) {
+ ret = -ENOMEM;
+ goto free_peer;
+ }
+ }
+
+ if (ieee80211_vif_is_mesh(vif)) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "setting USE_4ADDR for mesh STA %pM\n", sta->addr);
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_USE_4ADDR, 1);
+ if (ret) {
+ ath11k_warn(ab, "failed to set mesh STA %pM 4addr capability: %d\n",
+ sta->addr, ret);
+ goto free_tx_stats;
+ }
+ }
+
+ ret = ath11k_dp_peer_setup(ar, arvif->vdev_id, sta->addr);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n",
+ sta->addr, arvif->vdev_id, ret);
+ goto free_tx_stats;
+ }
+
+ if (ab->hw_params.vdev_start_delay &&
+ !arvif->is_started &&
+ arvif->vdev_type != WMI_VDEV_TYPE_AP) {
+ ret = ath11k_mac_start_vdev_delay(ar->hw, vif);
+ if (ret) {
+ ath11k_warn(ab, "failed to delay vdev start: %d\n", ret);
+ goto free_tx_stats;
+ }
+ }
+
+ ewma_avg_rssi_init(&arsta->avg_rssi);
+ return 0;
+
+free_tx_stats:
+ kfree(arsta->tx_stats);
+ arsta->tx_stats = NULL;
+free_peer:
+ ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
+free_rx_stats:
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
+dec_num_station:
+ ath11k_mac_dec_num_stations(arvif, sta);
+exit:
+ return ret;
+}
+
+static int ath11k_mac_station_remove(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ int ret;
+
+ if (ab->hw_params.vdev_start_delay &&
+ arvif->is_started &&
+ arvif->vdev_type != WMI_VDEV_TYPE_AP) {
+ ret = ath11k_mac_stop_vdev_early(ar->hw, vif);
+ if (ret) {
+ ath11k_warn(ab, "failed to do early vdev stop: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
+
+ ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ if (ret)
+ ath11k_warn(ab, "Failed to delete peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ else
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ ath11k_mac_dec_num_stations(arvif, sta);
+
+ kfree(arsta->tx_stats);
+ arsta->tx_stats = NULL;
+
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
+
+ return ret;
+}
+
+static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct ath11k_peer *peer;
+ int ret = 0;
+
+ /* cancel must be done outside the mutex to avoid deadlock */
+ if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ cancel_work_sync(&arsta->update_wk);
+ cancel_work_sync(&arsta->set_4addr_wk);
+ }
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ memset(arsta, 0, sizeof(*arsta));
+ arsta->arvif = arvif;
+ arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
+ INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk);
+ INIT_WORK(&arsta->set_4addr_wk, ath11k_sta_set_4addr_wk);
+
+ ret = ath11k_mac_station_add(ar, vif, sta);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ } else if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ ret = ath11k_mac_station_remove(ar, vif, sta);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to remove station: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ mutex_lock(&ar->ab->tbl_mtx_lock);
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer && peer->sta == sta) {
+ ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
+ vif->addr, arvif->vdev_id);
+ ath11k_peer_rhash_delete(ar->ab, peer);
+ peer->sta = NULL;
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+ }
+ spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ ret = ath11k_station_assoc(ar, vif, sta, false);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to associate station: %pM\n",
+ sta->addr);
+
+ spin_lock_bh(&ar->data_lock);
+ /* Set arsta bw and prev bw */
+ arsta->bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
+ arsta->bw_prev = arsta->bw;
+ spin_unlock_bh(&ar->data_lock);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED) {
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer)
+ peer->is_authorized = true;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_AUTHORIZE,
+ 1);
+ if (ret)
+ ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ }
+ } else if (old_state == IEEE80211_STA_AUTHORIZED &&
+ new_state == IEEE80211_STA_ASSOC) {
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer)
+ peer->is_authorized = false;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ ret = ath11k_station_disassoc(ar, vif, sta);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to disassociate station: %pM\n",
+ sta->addr);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
static const struct ieee80211_ops ath11k_ops = {
.tx = ath11k_mac_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
@@ -9288,6 +9824,33 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
return 0;
}
+static void ath11k_mac_setup_mac_address_list(struct ath11k *ar)
+{
+ struct mac_address *addresses;
+ u16 n_addresses;
+ int i;
+
+ if (!ar->ab->hw_params.support_dual_stations)
+ return;
+
+ n_addresses = ar->ab->hw_params.num_vdevs;
+ addresses = kcalloc(n_addresses, sizeof(*addresses), GFP_KERNEL);
+ if (!addresses)
+ return;
+
+ memcpy(addresses[0].addr, ar->mac_addr, ETH_ALEN);
+ for (i = 1; i < n_addresses; i++) {
+ memcpy(addresses[i].addr, ar->mac_addr, ETH_ALEN);
+ /* set Local Administered Address bit */
+ addresses[i].addr[0] |= 0x2;
+
+ addresses[i].addr[0] += (i - 1) << 4;
+ }
+
+ ar->hw->wiphy->addresses = addresses;
+ ar->hw->wiphy->n_addresses = n_addresses;
+}
+
static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
@@ -9307,28 +9870,46 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
return -ENOMEM;
}
- limits[0].max = 1;
- limits[0].types |= BIT(NL80211_IFTYPE_STATION);
-
- limits[1].max = 16;
- limits[1].types |= BIT(NL80211_IFTYPE_AP);
+ if (ab->hw_params.support_dual_stations) {
+ limits[0].max = 2;
+ limits[0].types |= BIT(NL80211_IFTYPE_STATION);
- if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
- ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
- limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
+ limits[1].max = 1;
+ limits[1].types |= BIT(NL80211_IFTYPE_AP);
+ if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
+ ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
+ limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
- combinations[0].limits = limits;
- combinations[0].n_limits = n_limits;
- combinations[0].max_interfaces = 16;
- combinations[0].num_different_channels = 1;
- combinations[0].beacon_int_infra_match = true;
- combinations[0].beacon_int_min_gcd = 100;
- combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
- BIT(NL80211_CHAN_WIDTH_20) |
- BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80) |
- BIT(NL80211_CHAN_WIDTH_80P80) |
- BIT(NL80211_CHAN_WIDTH_160);
+ combinations[0].limits = limits;
+ combinations[0].n_limits = 2;
+ combinations[0].max_interfaces = ab->hw_params.num_vdevs;
+ combinations[0].num_different_channels = 2;
+ combinations[0].beacon_int_infra_match = true;
+ combinations[0].beacon_int_min_gcd = 100;
+ } else {
+ limits[0].max = 1;
+ limits[0].types |= BIT(NL80211_IFTYPE_STATION);
+
+ limits[1].max = 16;
+ limits[1].types |= BIT(NL80211_IFTYPE_AP);
+
+ if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
+ ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
+ limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
+
+ combinations[0].limits = limits;
+ combinations[0].n_limits = 2;
+ combinations[0].max_interfaces = 16;
+ combinations[0].num_different_channels = 1;
+ combinations[0].beacon_int_infra_match = true;
+ combinations[0].beacon_int_min_gcd = 100;
+ combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_80P80) |
+ BIT(NL80211_CHAN_WIDTH_160);
+ }
ar->hw->wiphy->iface_combinations = combinations;
ar->hw->wiphy->n_iface_combinations = 1;
@@ -9393,6 +9974,8 @@ static void __ath11k_mac_unregister(struct ath11k *ar)
kfree(ar->hw->wiphy->iface_combinations[0].limits);
kfree(ar->hw->wiphy->iface_combinations);
+ kfree(ar->hw->wiphy->addresses);
+
SET_IEEE80211_DEV(ar->hw, NULL);
}
@@ -9435,6 +10018,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
ath11k_pdev_caps_update(ar);
SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
+ ath11k_mac_setup_mac_address_list(ar);
SET_IEEE80211_DEV(ar->hw, ab->dev);
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
index 0dfdeed5177b..f5800fbecff8 100644
--- a/drivers/net/wireless/ath/ath11k/mac.h
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_MAC_H
@@ -176,4 +176,7 @@ int ath11k_mac_wait_tx_complete(struct ath11k *ar);
int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif,
enum wmi_sta_keepalive_method method,
u32 interval);
+void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index 6835c14b82cc..fb4ecf9a103e 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/msi.h>
@@ -20,35 +20,7 @@
#define MHI_TIMEOUT_DEFAULT_MS 20000
#define RDDM_DUMP_SIZE 0x420000
-static struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
- {
- .num = 0,
- .name = "LOOPBACK",
- .num_elements = 32,
- .event_ring = 0,
- .dir = DMA_TO_DEVICE,
- .ee_mask = 0x4,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
- {
- .num = 1,
- .name = "LOOPBACK",
- .num_elements = 32,
- .event_ring = 0,
- .dir = DMA_FROM_DEVICE,
- .ee_mask = 0x4,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
+static const struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
{
.num = 20,
.name = "IPCR",
@@ -102,46 +74,18 @@ static struct mhi_event_config ath11k_mhi_events_qca6390[] = {
},
};
-static struct mhi_controller_config ath11k_mhi_config_qca6390 = {
+static const struct mhi_controller_config ath11k_mhi_config_qca6390 = {
.max_channels = 128,
.timeout_ms = 2000,
.use_bounce_buf = false,
- .buf_len = 0,
+ .buf_len = 8192,
.num_channels = ARRAY_SIZE(ath11k_mhi_channels_qca6390),
.ch_cfg = ath11k_mhi_channels_qca6390,
.num_events = ARRAY_SIZE(ath11k_mhi_events_qca6390),
.event_cfg = ath11k_mhi_events_qca6390,
};
-static struct mhi_channel_config ath11k_mhi_channels_qcn9074[] = {
- {
- .num = 0,
- .name = "LOOPBACK",
- .num_elements = 32,
- .event_ring = 1,
- .dir = DMA_TO_DEVICE,
- .ee_mask = 0x14,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
- {
- .num = 1,
- .name = "LOOPBACK",
- .num_elements = 32,
- .event_ring = 1,
- .dir = DMA_FROM_DEVICE,
- .ee_mask = 0x14,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
+static const struct mhi_channel_config ath11k_mhi_channels_qcn9074[] = {
{
.num = 20,
.name = "IPCR",
@@ -195,7 +139,7 @@ static struct mhi_event_config ath11k_mhi_events_qcn9074[] = {
},
};
-static struct mhi_controller_config ath11k_mhi_config_qcn9074 = {
+static const struct mhi_controller_config ath11k_mhi_config_qcn9074 = {
.max_channels = 30,
.timeout_ms = 10000,
.use_bounce_buf = false,
@@ -384,7 +328,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
{
struct ath11k_base *ab = ab_pci->ab;
struct mhi_controller *mhi_ctrl;
- struct mhi_controller_config *ath11k_mhi_config;
+ const struct mhi_controller_config *ath11k_mhi_config;
int ret;
mhi_ctrl = mhi_alloc_controller();
@@ -423,7 +367,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
goto free_controller;
} else {
mhi_ctrl->iova_start = 0;
- mhi_ctrl->iova_stop = 0xFFFFFFFF;
+ mhi_ctrl->iova_stop = ab_pci->dma_mask;
}
mhi_ctrl->rddm_size = RDDM_DUMP_SIZE;
@@ -443,6 +387,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
case ATH11K_HW_QCA6390_HW20:
case ATH11K_HW_WCN6855_HW20:
case ATH11K_HW_WCN6855_HW21:
+ case ATH11K_HW_QCA2066_HW21:
ath11k_mhi_config = &ath11k_mhi_config_qca6390;
break;
default:
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 09e65c5e55c4..be9d2c69cc41 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -18,7 +18,8 @@
#include "qmi.h"
#define ATH11K_PCI_BAR_NUM 0
-#define ATH11K_PCI_DMA_MASK 32
+#define ATH11K_PCI_DMA_MASK 36
+#define ATH11K_PCI_COHERENT_DMA_MASK 32
#define TCSR_SOC_HW_VERSION 0x0224
#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8)
@@ -28,6 +29,8 @@
#define QCN9074_DEVICE_ID 0x1104
#define WCN6855_DEVICE_ID 0x1103
+#define TCSR_SOC_HW_SUB_VER 0x1910010
+
static const struct pci_device_id ath11k_pci_id_table[] = {
{ PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) },
{ PCI_VDEVICE(QCOM, WCN6855_DEVICE_ID) },
@@ -526,14 +529,24 @@ static int ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev)
goto disable_device;
}
- ret = dma_set_mask_and_coherent(&pdev->dev,
- DMA_BIT_MASK(ATH11K_PCI_DMA_MASK));
+ ret = dma_set_mask(&pdev->dev,
+ DMA_BIT_MASK(ATH11K_PCI_DMA_MASK));
if (ret) {
ath11k_err(ab, "failed to set pci dma mask to %d: %d\n",
ATH11K_PCI_DMA_MASK, ret);
goto release_region;
}
+ ab_pci->dma_mask = DMA_BIT_MASK(ATH11K_PCI_DMA_MASK);
+
+ ret = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(ATH11K_PCI_COHERENT_DMA_MASK));
+ if (ret) {
+ ath11k_err(ab, "failed to set pci coherent dma mask to %d: %d\n",
+ ATH11K_PCI_COHERENT_DMA_MASK, ret);
+ goto release_region;
+ }
+
pci_set_master(pdev);
ab->mem_len = pci_resource_len(pdev, ATH11K_PCI_BAR_NUM);
@@ -731,8 +744,8 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
struct ath11k_base *ab;
struct ath11k_pci *ab_pci;
u32 soc_hw_version_major, soc_hw_version_minor, addr;
- const struct ath11k_pci_ops *pci_ops;
int ret;
+ u32 sub_version;
ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI);
@@ -777,6 +790,12 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
switch (pci_dev->device) {
case QCA6390_DEVICE_ID:
+ ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qca6390);
+ if (ret) {
+ ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
+ goto err_pci_free_region;
+ }
+
ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
switch (soc_hw_version_major) {
@@ -790,13 +809,21 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
goto err_pci_free_region;
}
- pci_ops = &ath11k_pci_ops_qca6390;
break;
case QCN9074_DEVICE_ID:
- pci_ops = &ath11k_pci_ops_qcn9074;
+ ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qcn9074);
+ if (ret) {
+ ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
+ goto err_pci_free_region;
+ }
ab->hw_rev = ATH11K_HW_QCN9074_HW10;
break;
case WCN6855_DEVICE_ID:
+ ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qca6390);
+ if (ret) {
+ ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
+ goto err_pci_free_region;
+ }
ab->id.bdf_search = ATH11K_BDF_SEARCH_BUS_AND_BOARD;
ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
@@ -809,7 +836,19 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
break;
case 0x10:
case 0x11:
- ab->hw_rev = ATH11K_HW_WCN6855_HW21;
+ sub_version = ath11k_pcic_read32(ab, TCSR_SOC_HW_SUB_VER);
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "sub_version 0x%x\n",
+ sub_version);
+ switch (sub_version) {
+ case 0x1019A0E1:
+ case 0x1019B0E1:
+ case 0x1019C0E1:
+ case 0x1019D0E1:
+ ab->hw_rev = ATH11K_HW_QCA2066_HW21;
+ break;
+ default:
+ ab->hw_rev = ATH11K_HW_WCN6855_HW21;
+ }
break;
default:
goto unsupported_wcn6855_soc;
@@ -823,7 +862,6 @@ unsupported_wcn6855_soc:
goto err_pci_free_region;
}
- pci_ops = &ath11k_pci_ops_qca6390;
break;
default:
dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
@@ -832,12 +870,6 @@ unsupported_wcn6855_soc:
goto err_pci_free_region;
}
- ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
- if (ret) {
- ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
- goto err_pci_free_region;
- }
-
ret = ath11k_pcic_init_msi_config(ab);
if (ret) {
ath11k_err(ab, "failed to init msi config: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h
index e9a01f344ec6..6be73333d90b 100644
--- a/drivers/net/wireless/ath/ath11k/pci.h
+++ b/drivers/net/wireless/ath/ath11k/pci.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022,2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_PCI_H
#define _ATH11K_PCI_H
@@ -72,6 +72,7 @@ struct ath11k_pci {
/* enum ath11k_pci_flags */
unsigned long flags;
u16 link_ctl;
+ u64 dma_mask;
};
static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c
index 15e2ceb22a44..add4db4c50bc 100644
--- a/drivers/net/wireless/ath/ath11k/pcic.c
+++ b/drivers/net/wireless/ath/ath11k/pcic.c
@@ -115,6 +115,17 @@ static const struct ath11k_msi_config ath11k_msi_config[] = {
},
.hw_rev = ATH11K_HW_WCN6750_HW10,
},
+ {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+ .hw_rev = ATH11K_HW_QCA2066_HW21,
+ },
};
int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 2c7cab62b9bb..5006f81f779b 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/elf.h>
@@ -3249,7 +3249,8 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work)
case ATH11K_QMI_EVENT_FW_INIT_DONE:
clear_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) {
- ath11k_hal_dump_srng_stats(ab);
+ if (ab->is_reset)
+ ath11k_hal_dump_srng_stats(ab);
queue_work(ab->workqueue, &ab->restart_work);
break;
}
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index b4fd4d2107c7..737fcd450d4b 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -425,6 +425,11 @@ static void ath11k_reg_intersect_rules(struct ieee80211_reg_rule *rule1,
/* Use the flags of both the rules */
new_rule->flags = rule1->flags | rule2->flags;
+ if ((rule1->flags & NL80211_RRF_PSD) && (rule2->flags & NL80211_RRF_PSD))
+ new_rule->psd = min_t(s8, rule1->psd, rule2->psd);
+ else
+ new_rule->flags &= ~NL80211_RRF_PSD;
+
/* To be safe, lts use the max cac timeout of both rules */
new_rule->dfs_cac_ms = max_t(u32, rule1->dfs_cac_ms,
rule2->dfs_cac_ms);
@@ -527,13 +532,14 @@ ath11k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw)
static void
ath11k_reg_update_rule(struct ieee80211_reg_rule *reg_rule, u32 start_freq,
u32 end_freq, u32 bw, u32 ant_gain, u32 reg_pwr,
- u32 reg_flags)
+ s8 psd, u32 reg_flags)
{
reg_rule->freq_range.start_freq_khz = MHZ_TO_KHZ(start_freq);
reg_rule->freq_range.end_freq_khz = MHZ_TO_KHZ(end_freq);
reg_rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw);
reg_rule->power_rule.max_antenna_gain = DBI_TO_MBI(ant_gain);
reg_rule->power_rule.max_eirp = DBM_TO_MBM(reg_pwr);
+ reg_rule->psd = psd;
reg_rule->flags = reg_flags;
}
@@ -563,7 +569,7 @@ ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
reg_rule->start_freq,
ETSI_WEATHER_RADAR_BAND_LOW, bw,
reg_rule->ant_gain, reg_rule->reg_power,
- flags);
+ reg_rule->psd_eirp, flags);
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
@@ -584,7 +590,7 @@ ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
ath11k_reg_update_rule(regd->reg_rules + i, start_freq,
end_freq, bw, reg_rule->ant_gain,
- reg_rule->reg_power, flags);
+ reg_rule->reg_power, reg_rule->psd_eirp, flags);
regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT;
@@ -605,7 +611,7 @@ ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
ETSI_WEATHER_RADAR_BAND_HIGH,
reg_rule->end_freq, bw,
reg_rule->ant_gain, reg_rule->reg_power,
- flags);
+ reg_rule->psd_eirp, flags);
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
@@ -618,25 +624,68 @@ ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
*rule_idx = i;
}
+enum wmi_reg_6ghz_ap_type
+ath11k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type)
+{
+ switch (power_type) {
+ case IEEE80211_REG_LPI_AP:
+ return WMI_REG_INDOOR_AP;
+ case IEEE80211_REG_SP_AP:
+ return WMI_REG_STANDARD_POWER_AP;
+ case IEEE80211_REG_VLP_AP:
+ return WMI_REG_VERY_LOW_POWER_AP;
+ default:
+ return WMI_REG_MAX_AP_TYPE;
+ }
+}
+
struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab,
- struct cur_regulatory_info *reg_info, bool intersect)
+ struct cur_regulatory_info *reg_info, bool intersect,
+ enum wmi_vdev_type vdev_type,
+ enum ieee80211_ap_reg_power power_type)
{
struct ieee80211_regdomain *tmp_regd, *default_regd, *new_regd = NULL;
- struct cur_reg_rule *reg_rule;
+ struct cur_reg_rule *reg_rule, *reg_rule_6ghz;
u8 i = 0, j = 0, k = 0;
u8 num_rules;
u16 max_bw;
- u32 flags;
+ u32 flags, reg_6ghz_number, max_bw_6ghz;
char alpha2[3];
num_rules = reg_info->num_5ghz_reg_rules + reg_info->num_2ghz_reg_rules;
- /* FIXME: Currently taking reg rules for 6 GHz only from Indoor AP mode list.
- * This can be updated after complete 6 GHz regulatory support is added.
- */
- if (reg_info->is_ext_reg_event)
- num_rules += reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP];
+ if (reg_info->is_ext_reg_event) {
+ if (vdev_type == WMI_VDEV_TYPE_STA) {
+ enum wmi_reg_6ghz_ap_type ap_type;
+
+ ap_type = ath11k_reg_ap_pwr_convert(power_type);
+
+ if (ap_type == WMI_REG_MAX_AP_TYPE)
+ ap_type = WMI_REG_INDOOR_AP;
+
+ reg_6ghz_number = reg_info->num_6ghz_rules_client
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+
+ if (reg_6ghz_number == 0) {
+ ap_type = WMI_REG_INDOOR_AP;
+ reg_6ghz_number = reg_info->num_6ghz_rules_client
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ }
+
+ reg_rule_6ghz = reg_info->reg_rules_6ghz_client_ptr
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ max_bw_6ghz = reg_info->max_bw_6ghz_client
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ } else {
+ reg_6ghz_number = reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP];
+ reg_rule_6ghz =
+ reg_info->reg_rules_6ghz_ap_ptr[WMI_REG_INDOOR_AP];
+ max_bw_6ghz = reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP];
+ }
+
+ num_rules += reg_6ghz_number;
+ }
if (!num_rules)
goto ret;
@@ -683,14 +732,13 @@ ath11k_reg_build_regd(struct ath11k_base *ab,
* per other BW rule flags we pass from here
*/
flags = NL80211_RRF_AUTO_BW;
- } else if (reg_info->is_ext_reg_event &&
- reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP] &&
- (k < reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP])) {
- reg_rule = reg_info->reg_rules_6ghz_ap_ptr[WMI_REG_INDOOR_AP] +
- k++;
- max_bw = min_t(u16, reg_rule->max_bw,
- reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP]);
+ } else if (reg_info->is_ext_reg_event && reg_6ghz_number &&
+ k < reg_6ghz_number) {
+ reg_rule = reg_rule_6ghz + k++;
+ max_bw = min_t(u16, reg_rule->max_bw, max_bw_6ghz);
flags = NL80211_RRF_AUTO_BW;
+ if (reg_rule->psd_flag)
+ flags |= NL80211_RRF_PSD;
} else {
break;
}
@@ -702,7 +750,7 @@ ath11k_reg_build_regd(struct ath11k_base *ab,
reg_rule->start_freq,
reg_rule->end_freq, max_bw,
reg_rule->ant_gain, reg_rule->reg_power,
- flags);
+ reg_rule->psd_eirp, flags);
/* Update dfs cac timeout if the dfs domain is ETSI and the
* new rule covers weather radar band.
@@ -758,6 +806,159 @@ ret:
return new_regd;
}
+static bool ath11k_reg_is_world_alpha(char *alpha)
+{
+ if (alpha[0] == '0' && alpha[1] == '0')
+ return true;
+
+ if (alpha[0] == 'n' && alpha[1] == 'a')
+ return true;
+
+ return false;
+}
+
+static enum wmi_vdev_type ath11k_reg_get_ar_vdev_type(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+
+ /* Currently each struct ath11k maps to one struct ieee80211_hw/wiphy
+ * and one struct ieee80211_regdomain, so it could only store one group
+ * reg rules. It means multi-interface concurrency in the same ath11k is
+ * not support for the regdomain. So get the vdev type of the first entry
+ * now. After concurrency support for the regdomain, this should change.
+ */
+ arvif = list_first_entry_or_null(&ar->arvifs, struct ath11k_vif, list);
+ if (arvif)
+ return arvif->vdev_type;
+
+ return WMI_VDEV_TYPE_UNSPEC;
+}
+
+int ath11k_reg_handle_chan_list(struct ath11k_base *ab,
+ struct cur_regulatory_info *reg_info,
+ enum ieee80211_ap_reg_power power_type)
+{
+ struct ieee80211_regdomain *regd;
+ bool intersect = false;
+ int pdev_idx;
+ struct ath11k *ar;
+ enum wmi_vdev_type vdev_type;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event reg handle chan list");
+
+ if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
+ /* In case of failure to set the requested ctry,
+ * fw retains the current regd. We print a failure info
+ * and return from here.
+ */
+ ath11k_warn(ab, "Failed to set the requested Country regulatory setting\n");
+ return -EINVAL;
+ }
+
+ pdev_idx = reg_info->phy_id;
+
+ /* Avoid default reg rule updates sent during FW recovery if
+ * it is already available
+ */
+ spin_lock_bh(&ab->base_lock);
+ if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags) &&
+ ab->default_regd[pdev_idx]) {
+ spin_unlock_bh(&ab->base_lock);
+ goto retfail;
+ }
+ spin_unlock_bh(&ab->base_lock);
+
+ if (pdev_idx >= ab->num_radios) {
+ /* Process the event for phy0 only if single_pdev_only
+ * is true. If pdev_idx is valid but not 0, discard the
+ * event. Otherwise, it goes to fallback. In either case
+ * ath11k_reg_reset_info() needs to be called to avoid
+ * memory leak issue.
+ */
+ ath11k_reg_reset_info(reg_info);
+
+ if (ab->hw_params.single_pdev_only &&
+ pdev_idx < ab->hw_params.num_rxmda_per_pdev)
+ return 0;
+ goto fallback;
+ }
+
+ /* Avoid multiple overwrites to default regd, during core
+ * stop-start after mac registration.
+ */
+ if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
+ !memcmp((char *)ab->default_regd[pdev_idx]->alpha2,
+ (char *)reg_info->alpha2, 2))
+ goto retfail;
+
+ /* Intersect new rules with default regd if a new country setting was
+ * requested, i.e a default regd was already set during initialization
+ * and the regd coming from this event has a valid country info.
+ */
+ if (ab->default_regd[pdev_idx] &&
+ !ath11k_reg_is_world_alpha((char *)
+ ab->default_regd[pdev_idx]->alpha2) &&
+ !ath11k_reg_is_world_alpha((char *)reg_info->alpha2))
+ intersect = true;
+
+ ar = ab->pdevs[pdev_idx].ar;
+ vdev_type = ath11k_reg_get_ar_vdev_type(ar);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi handle chan list power type %d vdev type %d intersect %d\n",
+ power_type, vdev_type, intersect);
+
+ regd = ath11k_reg_build_regd(ab, reg_info, intersect, vdev_type, power_type);
+ if (!regd) {
+ ath11k_warn(ab, "failed to build regd from reg_info\n");
+ goto fallback;
+ }
+
+ if (power_type == IEEE80211_REG_UNSET_AP) {
+ ath11k_reg_reset_info(&ab->reg_info_store[pdev_idx]);
+ ab->reg_info_store[pdev_idx] = *reg_info;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ if (ab->default_regd[pdev_idx]) {
+ /* The initial rules from FW after WMI Init is to build
+ * the default regd. From then on, any rules updated for
+ * the pdev could be due to user reg changes.
+ * Free previously built regd before assigning the newly
+ * generated regd to ar. NULL pointer handling will be
+ * taken care by kfree itself.
+ */
+ ar = ab->pdevs[pdev_idx].ar;
+ kfree(ab->new_regd[pdev_idx]);
+ ab->new_regd[pdev_idx] = regd;
+ queue_work(ab->workqueue, &ar->regd_update_work);
+ } else {
+ /* This regd would be applied during mac registration and is
+ * held constant throughout for regd intersection purpose
+ */
+ ab->default_regd[pdev_idx] = regd;
+ }
+ ab->dfs_region = reg_info->dfs_region;
+ spin_unlock_bh(&ab->base_lock);
+
+ return 0;
+
+fallback:
+ /* Fallback to older reg (by sending previous country setting
+ * again if fw has succeeded and we failed to process here.
+ * The Regdomain should be uniform across driver and fw. Since the
+ * FW has processed the command and sent a success status, we expect
+ * this function to succeed as well. If it doesn't, CTRY needs to be
+ * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
+ */
+ /* TODO: This is rare, but still should also be handled */
+ WARN_ON(1);
+
+retfail:
+
+ return -EINVAL;
+}
+
void ath11k_regd_update_work(struct work_struct *work)
{
struct ath11k *ar = container_of(work, struct ath11k,
@@ -781,10 +982,36 @@ void ath11k_reg_init(struct ath11k *ar)
ar->hw->wiphy->reg_notifier = ath11k_reg_notifier;
}
+void ath11k_reg_reset_info(struct cur_regulatory_info *reg_info)
+{
+ int i, j;
+
+ if (!reg_info)
+ return;
+
+ kfree(reg_info->reg_rules_2ghz_ptr);
+ kfree(reg_info->reg_rules_5ghz_ptr);
+
+ for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
+ kfree(reg_info->reg_rules_6ghz_ap_ptr[i]);
+
+ for (j = 0; j < WMI_REG_MAX_CLIENT_TYPE; j++)
+ kfree(reg_info->reg_rules_6ghz_client_ptr[i][j]);
+ }
+
+ memset(reg_info, 0, sizeof(*reg_info));
+}
+
void ath11k_reg_free(struct ath11k_base *ab)
{
int i;
+ for (i = 0; i < ab->num_radios; i++)
+ ath11k_reg_reset_info(&ab->reg_info_store[i]);
+
+ kfree(ab->reg_info_store);
+ ab->reg_info_store = NULL;
+
for (i = 0; i < ab->hw_params.max_radios; i++) {
kfree(ab->default_regd[i]);
kfree(ab->new_regd[i]);
diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h
index f28902f85e41..64edb794260a 100644
--- a/drivers/net/wireless/ath/ath11k/reg.h
+++ b/drivers/net/wireless/ath/ath11k/reg.h
@@ -30,11 +30,20 @@ enum ath11k_dfs_region {
/* ATH11K Regulatory API's */
void ath11k_reg_init(struct ath11k *ar);
+void ath11k_reg_reset_info(struct cur_regulatory_info *reg_info);
void ath11k_reg_free(struct ath11k_base *ab);
void ath11k_regd_update_work(struct work_struct *work);
struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab,
- struct cur_regulatory_info *reg_info, bool intersect);
+ struct cur_regulatory_info *reg_info, bool intersect,
+ enum wmi_vdev_type vdev_type,
+ enum ieee80211_ap_reg_power power_type);
int ath11k_regd_update(struct ath11k *ar);
int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait);
+enum wmi_reg_6ghz_ap_type
+ath11k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type);
+int ath11k_reg_handle_chan_list(struct ath11k_base *ab,
+ struct cur_regulatory_info *reg_info,
+ enum ieee80211_ap_reg_power power_type);
+
#endif
diff --git a/drivers/net/wireless/ath/ath11k/testmode.c b/drivers/net/wireless/ath/ath11k/testmode.c
index 43bb23265d34..302d66092b97 100644
--- a/drivers/net/wireless/ath/ath11k/testmode.c
+++ b/drivers/net/wireless/ath/ath11k/testmode.c
@@ -198,7 +198,7 @@ static void ath11k_tm_wmi_event_segmented(struct ath11k_base *ab, u32 cmd_id,
u16 length;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse ftm event tlv: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath11k/thermal.c b/drivers/net/wireless/ath/ath11k/thermal.c
index c29b11ab5bfa..41e7499f075f 100644
--- a/drivers/net/wireless/ath/ath11k/thermal.c
+++ b/drivers/net/wireless/ath/ath11k/thermal.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/device.h>
@@ -163,6 +163,9 @@ int ath11k_thermal_register(struct ath11k_base *ab)
struct ath11k_pdev *pdev;
int i, ret;
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 8a65fa04b48d..34ab9631ff36 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
@@ -238,8 +238,8 @@ static int ath11k_wmi_tlv_parse(struct ath11k_base *ar, const void **tb,
(void *)tb);
}
-const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, const void *ptr,
- size_t len, gfp_t gfp)
+const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab,
+ struct sk_buff *skb, gfp_t gfp)
{
const void **tb;
int ret;
@@ -248,7 +248,7 @@ const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, const void *ptr,
if (!tb)
return ERR_PTR(-ENOMEM);
- ret = ath11k_wmi_tlv_parse(ab, tb, ptr, len);
+ ret = ath11k_wmi_tlv_parse(ab, tb, skb->data, skb->len);
if (ret) {
kfree(tb);
return ERR_PTR(ret);
@@ -2098,7 +2098,7 @@ void ath11k_wmi_start_scan_init(struct ath11k *ar,
WMI_SCAN_EVENT_BSS_CHANNEL |
WMI_SCAN_EVENT_FOREIGN_CHAN |
WMI_SCAN_EVENT_DEQUEUED;
- arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+ arg->scan_f_chan_stat_evnt = 1;
if (test_bit(WMI_TLV_SERVICE_PASSIVE_SCAN_START_TIME_ENHANCE,
ar->ab->wmi_ab.svc_map))
@@ -2379,6 +2379,70 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
return ret;
}
+int ath11k_wmi_send_vdev_set_tpc_power(struct ath11k *ar,
+ u32 vdev_id,
+ struct ath11k_reg_tpc_power_info *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_set_tpc_power_cmd *cmd;
+ struct wmi_vdev_ch_power_info *ch;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ u8 *ptr;
+ int i, ret, len, array_len;
+
+ array_len = sizeof(*ch) * param->num_pwr_levels;
+ len = sizeof(*cmd) + TLV_HDR_SIZE + array_len;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+
+ cmd = (struct wmi_vdev_set_tpc_power_cmd *)ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_TPC_POWER_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->psd_power = param->is_psd_power;
+ cmd->eirp_power = param->eirp_power;
+ cmd->power_type_6ghz = param->ap_power_type;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "tpc vdev id %d is psd power %d eirp power %d 6 ghz power type %d\n",
+ vdev_id, param->is_psd_power, param->eirp_power, param->ap_power_type);
+
+ ptr += sizeof(*cmd);
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, array_len);
+
+ ptr += TLV_HDR_SIZE;
+ ch = (struct wmi_vdev_ch_power_info *)ptr;
+
+ for (i = 0; i < param->num_pwr_levels; i++, ch++) {
+ ch->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_VDEV_CH_POWER_INFO) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*ch) - TLV_HDR_SIZE);
+
+ ch->chan_cfreq = param->chan_power_info[i].chan_cfreq;
+ ch->tx_power = param->chan_power_info[i].tx_power;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tpc chan freq %d TX power %d\n",
+ ch->chan_cfreq, ch->tx_power);
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_TPC_POWER_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_VDEV_SET_TPC_POWER_CMDID\n");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar,
struct scan_cancel_param *param)
{
@@ -3930,7 +3994,7 @@ ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *sk
struct ath11k_vif *arvif;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -3956,8 +4020,7 @@ ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *sk
switch (ev->evt_type) {
case WMI_BSS_COLOR_COLLISION_DETECTION:
- ieee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap,
- GFP_KERNEL);
+ ieee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap);
ath11k_dbg(ab, ATH11K_DBG_WMI,
"OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n",
ev->vdev_id, ev->evt_type, ev->obss_color_bitmap);
@@ -4749,6 +4812,14 @@ static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc,
soc->pdevs[0].pdev_id = 0;
}
+ if (!soc->reg_info_store) {
+ soc->reg_info_store = kcalloc(soc->num_radios,
+ sizeof(*soc->reg_info_store),
+ GFP_ATOMIC);
+ if (!soc->reg_info_store)
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -4786,6 +4857,7 @@ static void ath11k_wmi_free_dbring_caps(struct ath11k_base *ab)
{
kfree(ab->db_caps);
ab->db_caps = NULL;
+ ab->num_db_cap = 0;
}
static int ath11k_wmi_tlv_dma_ring_caps(struct ath11k_base *ab,
@@ -5003,7 +5075,7 @@ static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base *ab, struct sk_buf
const struct wmi_vdev_start_resp_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5028,6 +5100,7 @@ static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base *ab, struct sk_buf
vdev_rsp->mac_id = ev->mac_id;
vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams;
vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams;
+ vdev_rsp->max_allowed_tx_power = ev->max_allowed_tx_power;
kfree(tb);
return 0;
@@ -5102,7 +5175,7 @@ static int ath11k_pull_reg_chan_list_update_ev(struct ath11k_base *ab,
ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory channel list\n");
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5278,7 +5351,7 @@ static int ath11k_pull_reg_chan_list_ext_update_ev(struct ath11k_base *ab,
ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory ext channel list\n");
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5634,7 +5707,7 @@ static int ath11k_pull_peer_del_resp_ev(struct ath11k_base *ab, struct sk_buff *
const struct wmi_peer_delete_resp_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5666,7 +5739,7 @@ static int ath11k_pull_vdev_del_resp_ev(struct ath11k_base *ab,
const struct wmi_vdev_delete_resp_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5686,15 +5759,15 @@ static int ath11k_pull_vdev_del_resp_ev(struct ath11k_base *ab,
return 0;
}
-static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab, void *evt_buf,
- u32 len, u32 *vdev_id,
- u32 *tx_status)
+static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab,
+ struct sk_buff *skb,
+ u32 *vdev_id, u32 *tx_status)
{
const void **tb;
const struct wmi_bcn_tx_status_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5722,7 +5795,7 @@ static int ath11k_pull_vdev_stopped_param_tlv(struct ath11k_base *ab, struct sk_
const struct wmi_vdev_stopped_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5876,7 +5949,7 @@ static int ath11k_pull_mgmt_tx_compl_param_tlv(struct ath11k_base *ab,
const struct wmi_mgmt_tx_compl_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6052,7 +6125,7 @@ static int ath11k_pull_scan_ev(struct ath11k_base *ab, struct sk_buff *skb,
const struct wmi_scan_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6085,7 +6158,7 @@ static int ath11k_pull_peer_sta_kickout_ev(struct ath11k_base *ab, struct sk_buf
const struct wmi_peer_sta_kickout_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6112,7 +6185,7 @@ static int ath11k_pull_roam_ev(struct ath11k_base *ab, struct sk_buff *skb,
const struct wmi_roam_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6153,14 +6226,14 @@ exit:
return idx;
}
-static int ath11k_pull_chan_info_ev(struct ath11k_base *ab, u8 *evt_buf,
- u32 len, struct wmi_chan_info_event *ch_info_ev)
+static int ath11k_pull_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_chan_info_event *ch_info_ev)
{
const void **tb;
const struct wmi_chan_info_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6199,7 +6272,7 @@ ath11k_pull_pdev_bss_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb,
const struct wmi_pdev_bss_chan_info_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6239,7 +6312,7 @@ ath11k_pull_vdev_install_key_compl_ev(struct ath11k_base *ab, struct sk_buff *sk
const struct wmi_vdev_install_key_compl_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6270,7 +6343,7 @@ static int ath11k_pull_peer_assoc_conf_ev(struct ath11k_base *ab, struct sk_buff
const struct wmi_peer_assoc_conf_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6995,7 +7068,7 @@ static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *s
const void **tb;
int ret, i;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -7060,32 +7133,15 @@ static void ath11k_wmi_htc_tx_complete(struct ath11k_base *ab,
wake_up(&wmi->tx_ce_desc_wq);
}
-static bool ath11k_reg_is_world_alpha(char *alpha)
-{
- if (alpha[0] == '0' && alpha[1] == '0')
- return true;
-
- if (alpha[0] == 'n' && alpha[1] == 'a')
- return true;
-
- return false;
-}
-
-static int ath11k_reg_chan_list_event(struct ath11k_base *ab,
- struct sk_buff *skb,
+static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *skb,
enum wmi_reg_chan_list_cmd_type id)
{
- struct cur_regulatory_info *reg_info = NULL;
- struct ieee80211_regdomain *regd = NULL;
- bool intersect = false;
- int ret = 0, pdev_idx, i, j;
- struct ath11k *ar;
+ struct cur_regulatory_info *reg_info;
+ int ret;
reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
- if (!reg_info) {
- ret = -ENOMEM;
- goto fallback;
- }
+ if (!reg_info)
+ return -ENOMEM;
if (id == WMI_REG_CHAN_LIST_CC_ID)
ret = ath11k_pull_reg_chan_list_update_ev(ab, skb, reg_info);
@@ -7093,118 +7149,22 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab,
ret = ath11k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
if (ret) {
- ath11k_warn(ab, "failed to extract regulatory info from received event\n");
- goto fallback;
- }
-
- ath11k_dbg(ab, ATH11K_DBG_WMI, "event reg chan list id %d", id);
-
- if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
- /* In case of failure to set the requested ctry,
- * fw retains the current regd. We print a failure info
- * and return from here.
- */
- ath11k_warn(ab, "Failed to set the requested Country regulatory setting\n");
- goto mem_free;
- }
-
- pdev_idx = reg_info->phy_id;
-
- /* Avoid default reg rule updates sent during FW recovery if
- * it is already available
- */
- spin_lock(&ab->base_lock);
- if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags) &&
- ab->default_regd[pdev_idx]) {
- spin_unlock(&ab->base_lock);
+ ath11k_warn(ab, "failed to extract regulatory info\n");
goto mem_free;
}
- spin_unlock(&ab->base_lock);
- if (pdev_idx >= ab->num_radios) {
- /* Process the event for phy0 only if single_pdev_only
- * is true. If pdev_idx is valid but not 0, discard the
- * event. Otherwise, it goes to fallback.
- */
- if (ab->hw_params.single_pdev_only &&
- pdev_idx < ab->hw_params.num_rxmda_per_pdev)
- goto mem_free;
- else
- goto fallback;
- }
-
- /* Avoid multiple overwrites to default regd, during core
- * stop-start after mac registration.
- */
- if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
- !memcmp((char *)ab->default_regd[pdev_idx]->alpha2,
- (char *)reg_info->alpha2, 2))
+ ret = ath11k_reg_handle_chan_list(ab, reg_info, IEEE80211_REG_UNSET_AP);
+ if (ret) {
+ ath11k_warn(ab, "failed to process regulatory info %d\n", ret);
goto mem_free;
-
- /* Intersect new rules with default regd if a new country setting was
- * requested, i.e a default regd was already set during initialization
- * and the regd coming from this event has a valid country info.
- */
- if (ab->default_regd[pdev_idx] &&
- !ath11k_reg_is_world_alpha((char *)
- ab->default_regd[pdev_idx]->alpha2) &&
- !ath11k_reg_is_world_alpha((char *)reg_info->alpha2))
- intersect = true;
-
- regd = ath11k_reg_build_regd(ab, reg_info, intersect);
- if (!regd) {
- ath11k_warn(ab, "failed to build regd from reg_info\n");
- goto fallback;
- }
-
- spin_lock(&ab->base_lock);
- if (ab->default_regd[pdev_idx]) {
- /* The initial rules from FW after WMI Init is to build
- * the default regd. From then on, any rules updated for
- * the pdev could be due to user reg changes.
- * Free previously built regd before assigning the newly
- * generated regd to ar. NULL pointer handling will be
- * taken care by kfree itself.
- */
- ar = ab->pdevs[pdev_idx].ar;
- kfree(ab->new_regd[pdev_idx]);
- ab->new_regd[pdev_idx] = regd;
- queue_work(ab->workqueue, &ar->regd_update_work);
- } else {
- /* This regd would be applied during mac registration and is
- * held constant throughout for regd intersection purpose
- */
- ab->default_regd[pdev_idx] = regd;
}
- ab->dfs_region = reg_info->dfs_region;
- spin_unlock(&ab->base_lock);
- goto mem_free;
+ kfree(reg_info);
+ return 0;
-fallback:
- /* Fallback to older reg (by sending previous country setting
- * again if fw has succeeded and we failed to process here.
- * The Regdomain should be uniform across driver and fw. Since the
- * FW has processed the command and sent a success status, we expect
- * this function to succeed as well. If it doesn't, CTRY needs to be
- * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
- */
- /* TODO: This is rare, but still should also be handled */
- WARN_ON(1);
mem_free:
- if (reg_info) {
- kfree(reg_info->reg_rules_2ghz_ptr);
- kfree(reg_info->reg_rules_5ghz_ptr);
- if (reg_info->is_ext_reg_event) {
- for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
- kfree(reg_info->reg_rules_6ghz_ap_ptr[i]);
-
- for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
- for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
- kfree(reg_info->reg_rules_6ghz_client_ptr[j][i]);
- }
- kfree(reg_info);
- }
+ ath11k_reg_reset_info(reg_info);
+ kfree(reg_info);
return ret;
}
@@ -7362,7 +7322,7 @@ static void ath11k_vdev_start_resp_event(struct ath11k_base *ab, struct sk_buff
}
ar->last_wmi_vdev_start_status = 0;
-
+ ar->max_allowed_tx_power = vdev_start_resp.max_allowed_tx_power;
status = vdev_start_resp.status;
if (WARN_ON_ONCE(status)) {
@@ -7384,8 +7344,7 @@ static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *s
struct ath11k_vif *arvif;
u32 vdev_id, tx_status;
- if (ath11k_pull_bcn_tx_status_ev(ab, skb->data, skb->len,
- &vdev_id, &tx_status) != 0) {
+ if (ath11k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) {
ath11k_warn(ab, "failed to extract bcn tx status");
return;
}
@@ -7416,7 +7375,7 @@ static void ath11k_wmi_event_peer_sta_ps_state_chg(struct ath11k_base *ab,
enum ath11k_wmi_peer_ps_state peer_previous_ps_state;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -7884,7 +7843,7 @@ static void ath11k_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb)
/* HW channel counters frequency value in hertz */
u32 cc_freq_hz = ab->cc_freq_hz;
- if (ath11k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) {
+ if (ath11k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) {
ath11k_warn(ab, "failed to extract chan info event");
return;
}
@@ -8216,7 +8175,7 @@ static void ath11k_pdev_ctl_failsafe_check_event(struct ath11k_base *ab,
const struct wmi_pdev_ctl_failsafe_chk_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -8267,7 +8226,7 @@ ath11k_wmi_process_csa_switch_count_event(struct ath11k_base *ab,
}
if (arvif->is_up && arvif->vif->bss_conf.csa_active)
- ieee80211_csa_finish(arvif->vif);
+ ieee80211_csa_finish(arvif->vif, 0);
}
rcu_read_unlock();
}
@@ -8281,7 +8240,7 @@ ath11k_wmi_pdev_csa_switch_count_status_event(struct ath11k_base *ab,
const u32 *vdev_ids;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -8315,7 +8274,7 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff
struct ath11k *ar;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -8369,7 +8328,7 @@ ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
const struct wmi_pdev_temperature_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -8409,7 +8368,7 @@ static void ath11k_fils_discovery_event(struct ath11k_base *ab,
const struct wmi_fils_discovery_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab,
@@ -8441,7 +8400,7 @@ static void ath11k_probe_resp_tx_status_event(struct ath11k_base *ab,
const struct wmi_probe_resp_tx_status_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab,
@@ -8567,7 +8526,7 @@ static void ath11k_wmi_twt_add_dialog_event(struct ath11k_base *ab,
const struct wmi_twt_add_dialog_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab,
@@ -8604,7 +8563,7 @@ static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab,
u64 replay_ctr;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -9793,3 +9752,9 @@ int ath11k_wmi_sta_keepalive(struct ath11k *ar,
return ath11k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
}
+
+bool ath11k_wmi_supports_6ghz_cc_ext(struct ath11k *ar)
+{
+ return test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
+ ar->ab->wmi_ab.svc_map) && ar->supports_6ghz;
+}
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index ff0a9a92beeb..bb419e3abb00 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -15,6 +15,7 @@ struct ath11k;
struct ath11k_fw_stats;
struct ath11k_fw_dbglog;
struct ath11k_vif;
+struct ath11k_reg_tpc_power_info;
#define PSOC_HOST_MAX_NUM_SS (8)
@@ -327,6 +328,22 @@ enum wmi_tlv_cmd_id {
WMI_VDEV_SET_CUSTOM_AGGR_SIZE_CMDID,
WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID,
WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMDID,
+ WMI_VDEV_SET_ARP_STAT_CMDID,
+ WMI_VDEV_GET_ARP_STAT_CMDID,
+ WMI_VDEV_GET_TX_POWER_CMDID,
+ WMI_VDEV_LIMIT_OFFCHAN_CMDID,
+ WMI_VDEV_SET_CUSTOM_SW_RETRY_TH_CMDID,
+ WMI_VDEV_CHAINMASK_CONFIG_CMDID,
+ WMI_VDEV_GET_BCN_RECEPTION_STATS_CMDID,
+ WMI_VDEV_GET_MWS_COEX_INFO_CMDID,
+ WMI_VDEV_DELETE_ALL_PEER_CMDID,
+ WMI_VDEV_BSS_MAX_IDLE_TIME_CMDID,
+ WMI_VDEV_AUDIO_SYNC_TRIGGER_CMDID,
+ WMI_VDEV_AUDIO_SYNC_QTIMER_CMDID,
+ WMI_VDEV_SET_PCL_CMDID,
+ WMI_VDEV_GET_BIG_DATA_CMDID,
+ WMI_VDEV_GET_BIG_DATA_P2_CMDID,
+ WMI_VDEV_SET_TPC_POWER_CMDID,
WMI_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_PEER),
WMI_PEER_DELETE_CMDID,
WMI_PEER_FLUSH_TIDS_CMDID,
@@ -1880,6 +1897,8 @@ enum wmi_tlv_tag {
WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD,
WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9,
WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT,
+ WMI_TAG_VDEV_SET_TPC_POWER_CMD = 0x3B5,
+ WMI_TAG_VDEV_CH_POWER_INFO,
WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8,
WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD,
WMI_TAG_MAX
@@ -2114,6 +2133,7 @@ enum wmi_tlv_service {
/* The second 128 bits */
WMI_MAX_EXT_SERVICE = 256,
WMI_TLV_SERVICE_SCAN_CONFIG_PER_CHANNEL = 265,
+ WMI_TLV_SERVICE_EXT_TPC_REG_SUPPORT = 280,
WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT = 281,
WMI_TLV_SERVICE_BIOS_SAR_SUPPORT = 326,
WMI_TLV_SERVICE_SUPPORT_11D_FOR_HOST_SCAN = 357,
@@ -3168,6 +3188,41 @@ struct wlan_ssid {
u8 ssid[WLAN_SSID_MAX_LEN];
};
+struct wmi_vdev_ch_power_info {
+ u32 tlv_header;
+
+ /* Channel center frequency (MHz) */
+ u32 chan_cfreq;
+
+ /* Unit: dBm, either PSD/EIRP power for this frequency or
+ * incremental for non-PSD BW
+ */
+ u32 tx_power;
+} __packed;
+
+struct wmi_vdev_set_tpc_power_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+
+ /* Value: 0 or 1, is PSD power or not */
+ u32 psd_power;
+
+ /* Maximum EIRP power (dBm units), valid only if power is PSD */
+ u32 eirp_power;
+
+ /* Type: WMI_6GHZ_REG_TYPE, used for halphy CTL lookup */
+ u32 power_type_6ghz;
+
+ /* This fixed_param TLV is followed by the below TLVs:
+ * num_pwr_levels of wmi_vdev_ch_power_info
+ * For PSD power, it is the PSD/EIRP power of the frequency (20 MHz chunks).
+ * For non-PSD power, the power values are for 20, 40, and till
+ * BSS BW power levels.
+ * The num_pwr_levels will be checked by sw how many elements present
+ * in the variable-length array.
+ */
+} __packed;
+
#define WMI_IE_BITMAP_SIZE 8
/* prefix used by scan requestor ids on the host */
@@ -3308,24 +3363,19 @@ struct scan_req_params {
u32 vdev_id;
u32 pdev_id;
enum wmi_scan_priority scan_priority;
- union {
- struct {
- u32 scan_ev_started:1,
- scan_ev_completed:1,
- scan_ev_bss_chan:1,
- scan_ev_foreign_chan:1,
- scan_ev_dequeued:1,
- scan_ev_preempted:1,
- scan_ev_start_failed:1,
- scan_ev_restarted:1,
- scan_ev_foreign_chn_exit:1,
- scan_ev_invalid:1,
- scan_ev_gpio_timeout:1,
- scan_ev_suspended:1,
- scan_ev_resumed:1;
- };
- u32 scan_events;
- };
+ u32 scan_ev_started:1,
+ scan_ev_completed:1,
+ scan_ev_bss_chan:1,
+ scan_ev_foreign_chan:1,
+ scan_ev_dequeued:1,
+ scan_ev_preempted:1,
+ scan_ev_start_failed:1,
+ scan_ev_restarted:1,
+ scan_ev_foreign_chn_exit:1,
+ scan_ev_invalid:1,
+ scan_ev_gpio_timeout:1,
+ scan_ev_suspended:1,
+ scan_ev_resumed:1;
u32 scan_ctrl_flags_ext;
u32 dwell_time_active;
u32 dwell_time_active_2g;
@@ -3339,36 +3389,31 @@ struct scan_req_params {
u32 idle_time;
u32 max_scan_time;
u32 probe_delay;
- union {
- struct {
- u32 scan_f_passive:1,
- scan_f_bcast_probe:1,
- scan_f_cck_rates:1,
- scan_f_ofdm_rates:1,
- scan_f_chan_stat_evnt:1,
- scan_f_filter_prb_req:1,
- scan_f_bypass_dfs_chn:1,
- scan_f_continue_on_err:1,
- scan_f_offchan_mgmt_tx:1,
- scan_f_offchan_data_tx:1,
- scan_f_promisc_mode:1,
- scan_f_capture_phy_err:1,
- scan_f_strict_passive_pch:1,
- scan_f_half_rate:1,
- scan_f_quarter_rate:1,
- scan_f_force_active_dfs_chn:1,
- scan_f_add_tpc_ie_in_probe:1,
- scan_f_add_ds_ie_in_probe:1,
- scan_f_add_spoofed_mac_in_probe:1,
- scan_f_add_rand_seq_in_probe:1,
- scan_f_en_ie_whitelist_in_probe:1,
- scan_f_forced:1,
- scan_f_2ghz:1,
- scan_f_5ghz:1,
- scan_f_80mhz:1;
- };
- u32 scan_flags;
- };
+ u32 scan_f_passive:1,
+ scan_f_bcast_probe:1,
+ scan_f_cck_rates:1,
+ scan_f_ofdm_rates:1,
+ scan_f_chan_stat_evnt:1,
+ scan_f_filter_prb_req:1,
+ scan_f_bypass_dfs_chn:1,
+ scan_f_continue_on_err:1,
+ scan_f_offchan_mgmt_tx:1,
+ scan_f_offchan_data_tx:1,
+ scan_f_promisc_mode:1,
+ scan_f_capture_phy_err:1,
+ scan_f_strict_passive_pch:1,
+ scan_f_half_rate:1,
+ scan_f_quarter_rate:1,
+ scan_f_force_active_dfs_chn:1,
+ scan_f_add_tpc_ie_in_probe:1,
+ scan_f_add_ds_ie_in_probe:1,
+ scan_f_add_spoofed_mac_in_probe:1,
+ scan_f_add_rand_seq_in_probe:1,
+ scan_f_en_ie_whitelist_in_probe:1,
+ scan_f_forced:1,
+ scan_f_2ghz:1,
+ scan_f_5ghz:1,
+ scan_f_80mhz:1;
enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode;
u32 burst_duration;
u32 num_chan;
@@ -4119,6 +4164,7 @@ struct wmi_vdev_start_resp_event {
};
u32 cfgd_tx_streams;
u32 cfgd_rx_streams;
+ s32 max_allowed_tx_power;
} __packed;
/* VDEV start response status codes */
@@ -4951,6 +4997,7 @@ struct ath11k_targ_cap {
};
enum wmi_vdev_type {
+ WMI_VDEV_TYPE_UNSPEC = 0,
WMI_VDEV_TYPE_AP = 1,
WMI_VDEV_TYPE_STA = 2,
WMI_VDEV_TYPE_IBSS = 3,
@@ -6295,8 +6342,8 @@ enum wmi_sta_keepalive_method {
#define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30
#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
-const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, const void *ptr,
- size_t len, gfp_t gfp);
+const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab,
+ struct sk_buff *skb, gfp_t gfp);
int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
u32 cmd_id);
struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len);
@@ -6479,5 +6526,9 @@ int ath11k_wmi_pdev_set_bios_sar_table_param(struct ath11k *ar, const u8 *sar_va
int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar);
int ath11k_wmi_sta_keepalive(struct ath11k *ar,
const struct wmi_sta_keepalive_arg *arg);
+bool ath11k_wmi_supports_6ghz_cc_ext(struct ath11k *ar);
+int ath11k_wmi_send_vdev_set_tpc_power(struct ath11k *ar,
+ u32 vdev_id,
+ struct ath11k_reg_tpc_power_info *param);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/Makefile b/drivers/net/wireless/ath/ath12k/Makefile
index 62c52e733b5e..71669f94ff75 100644
--- a/drivers/net/wireless/ath/ath12k/Makefile
+++ b/drivers/net/wireless/ath/ath12k/Makefile
@@ -19,7 +19,9 @@ ath12k-y += core.o \
hw.o \
mhi.o \
pci.o \
- dp_mon.o
+ dp_mon.o \
+ fw.o \
+ p2p.o
ath12k-$(CONFIG_ATH12K_TRACING) += trace.o
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
index 6c01b282fcd3..391b6fb2bd42 100644
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -14,6 +14,7 @@
#include "dp_rx.h"
#include "debug.h"
#include "hif.h"
+#include "fw.h"
unsigned int ath12k_debug_mask;
module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
@@ -104,27 +105,66 @@ int ath12k_core_resume(struct ath12k_base *ab)
return 0;
}
-static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
- size_t name_len)
+static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
+ size_t name_len, bool with_variant,
+ bool bus_type_mode)
{
/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
- if (ab->qmi.target.bdf_ext[0] != '\0')
+ if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
scnprintf(variant, sizeof(variant), ",variant=%s",
ab->qmi.target.bdf_ext);
- scnprintf(name, name_len,
- "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
- ath12k_bus_str(ab->hif.bus),
- ab->qmi.target.chip_id,
- ab->qmi.target.board_id, variant);
+ switch (ab->id.bdf_search) {
+ case ATH12K_BDF_SEARCH_BUS_AND_BOARD:
+ if (bus_type_mode)
+ scnprintf(name, name_len,
+ "bus=%s",
+ ath12k_bus_str(ab->hif.bus));
+ else
+ scnprintf(name, name_len,
+ "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
+ ath12k_bus_str(ab->hif.bus),
+ ab->id.vendor, ab->id.device,
+ ab->id.subsystem_vendor,
+ ab->id.subsystem_device,
+ ab->qmi.target.chip_id,
+ ab->qmi.target.board_id,
+ variant);
+ break;
+ default:
+ scnprintf(name, name_len,
+ "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
+ ath12k_bus_str(ab->hif.bus),
+ ab->qmi.target.chip_id,
+ ab->qmi.target.board_id, variant);
+ break;
+ }
ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name);
return 0;
}
+static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
+ size_t name_len)
+{
+ return __ath12k_core_create_board_name(ab, name, name_len, true, false);
+}
+
+static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
+ size_t name_len)
+{
+ return __ath12k_core_create_board_name(ab, name, name_len, false, false);
+}
+
+static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
+ size_t name_len)
+{
+ return __ath12k_core_create_board_name(ab, name, name_len, false, true);
+}
+
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
const char *file)
{
@@ -159,7 +199,9 @@ static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
struct ath12k_board_data *bd,
const void *buf, size_t buf_len,
const char *boardname,
- int bd_ie_type)
+ int ie_id,
+ int name_id,
+ int data_id)
{
const struct ath12k_fw_ie *hdr;
bool name_match_found;
@@ -169,7 +211,7 @@ static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
name_match_found = false;
- /* go through ATH12K_BD_IE_BOARD_ elements */
+ /* go through ATH12K_BD_IE_BOARD_/ATH12K_BD_IE_REGDB_ elements */
while (buf_len > sizeof(struct ath12k_fw_ie)) {
hdr = buf;
board_ie_id = le32_to_cpu(hdr->id);
@@ -180,48 +222,50 @@ static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
buf += sizeof(*hdr);
if (buf_len < ALIGN(board_ie_len, 4)) {
- ath12k_err(ab, "invalid ATH12K_BD_IE_BOARD length: %zu < %zu\n",
+ ath12k_err(ab, "invalid %s length: %zu < %zu\n",
+ ath12k_bd_ie_type_str(ie_id),
buf_len, ALIGN(board_ie_len, 4));
ret = -EINVAL;
goto out;
}
- switch (board_ie_id) {
- case ATH12K_BD_IE_BOARD_NAME:
+ if (board_ie_id == name_id) {
ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "",
board_ie_data, board_ie_len);
if (board_ie_len != strlen(boardname))
- break;
+ goto next;
ret = memcmp(board_ie_data, boardname, strlen(boardname));
if (ret)
- break;
+ goto next;
name_match_found = true;
ath12k_dbg(ab, ATH12K_DBG_BOOT,
- "boot found match for name '%s'",
+ "boot found match %s for name '%s'",
+ ath12k_bd_ie_type_str(ie_id),
boardname);
- break;
- case ATH12K_BD_IE_BOARD_DATA:
+ } else if (board_ie_id == data_id) {
if (!name_match_found)
/* no match found */
- break;
+ goto next;
ath12k_dbg(ab, ATH12K_DBG_BOOT,
- "boot found board data for '%s'", boardname);
+ "boot found %s for '%s'",
+ ath12k_bd_ie_type_str(ie_id),
+ boardname);
bd->data = board_ie_data;
bd->len = board_ie_len;
ret = 0;
goto out;
- default:
- ath12k_warn(ab, "unknown ATH12K_BD_IE_BOARD found: %d\n",
+ } else {
+ ath12k_warn(ab, "unknown %s id found: %d\n",
+ ath12k_bd_ie_type_str(ie_id),
board_ie_id);
- break;
}
-
+next:
/* jump over the padding */
board_ie_len = ALIGN(board_ie_len, 4);
@@ -238,7 +282,10 @@ out:
static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
struct ath12k_board_data *bd,
- const char *boardname)
+ const char *boardname,
+ int ie_id_match,
+ int name_id,
+ int data_id)
{
size_t len, magic_len;
const u8 *data;
@@ -303,22 +350,23 @@ static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
goto err;
}
- switch (ie_id) {
- case ATH12K_BD_IE_BOARD:
+ if (ie_id == ie_id_match) {
ret = ath12k_core_parse_bd_ie_board(ab, bd, data,
ie_len,
boardname,
- ATH12K_BD_IE_BOARD);
+ ie_id_match,
+ name_id,
+ data_id);
if (ret == -ENOENT)
/* no match found, continue */
- break;
+ goto next;
else if (ret)
/* there was an error, bail out */
goto err;
/* either found or error, so stop searching */
goto out;
}
-
+next:
/* jump over the padding */
ie_len = ALIGN(ie_len, 4);
@@ -328,8 +376,9 @@ static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
out:
if (!bd->data || !bd->len) {
- ath12k_err(ab,
- "failed to fetch board data for %s from %s\n",
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "failed to fetch %s for %s from %s\n",
+ ath12k_bd_ie_type_str(ie_id_match),
boardname, filepath);
ret = -ENODATA;
goto err;
@@ -356,28 +405,56 @@ int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
return 0;
}
-#define BOARD_NAME_SIZE 100
+#define BOARD_NAME_SIZE 200
int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
{
- char boardname[BOARD_NAME_SIZE];
+ char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE];
+ char *filename, filepath[100];
int bd_api;
int ret;
- ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
+ filename = ATH12K_BOARD_API2_FILE;
+
+ ret = ath12k_core_create_board_name(ab, boardname, sizeof(boardname));
if (ret) {
ath12k_err(ab, "failed to create board name: %d", ret);
return ret;
}
bd_api = 2;
- ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname);
+ ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
+ ATH12K_BD_IE_BOARD,
+ ATH12K_BD_IE_BOARD_NAME,
+ ATH12K_BD_IE_BOARD_DATA);
+ if (!ret)
+ goto success;
+
+ ret = ath12k_core_create_fallback_board_name(ab, fallback_boardname,
+ sizeof(fallback_boardname));
+ if (ret) {
+ ath12k_err(ab, "failed to create fallback board name: %d", ret);
+ return ret;
+ }
+
+ ret = ath12k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
+ ATH12K_BD_IE_BOARD,
+ ATH12K_BD_IE_BOARD_NAME,
+ ATH12K_BD_IE_BOARD_DATA);
if (!ret)
goto success;
bd_api = 1;
ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE);
if (ret) {
- ath12k_err(ab, "failed to fetch board-2.bin or board.bin from %s\n",
+ ath12k_core_create_firmware_path(ab, filename,
+ filepath, sizeof(filepath));
+ ath12k_err(ab, "failed to fetch board data for %s from %s\n",
+ boardname, filepath);
+ if (memcmp(boardname, fallback_boardname, strlen(boardname)))
+ ath12k_err(ab, "failed to fetch board data for %s from %s\n",
+ fallback_boardname, filepath);
+
+ ath12k_err(ab, "failed to fetch board.bin from %s\n",
ab->hw_params->fw.dir);
return ret;
}
@@ -387,6 +464,79 @@ success:
return 0;
}
+int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd)
+{
+ char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE];
+ int ret;
+
+ ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
+ if (ret) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "failed to create board name for regdb: %d", ret);
+ goto exit;
+ }
+
+ ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
+ ATH12K_BD_IE_REGDB,
+ ATH12K_BD_IE_REGDB_NAME,
+ ATH12K_BD_IE_REGDB_DATA);
+ if (!ret)
+ goto exit;
+
+ ret = ath12k_core_create_bus_type_board_name(ab, default_boardname,
+ BOARD_NAME_SIZE);
+ if (ret) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "failed to create default board name for regdb: %d", ret);
+ goto exit;
+ }
+
+ ret = ath12k_core_fetch_board_data_api_n(ab, bd, default_boardname,
+ ATH12K_BD_IE_REGDB,
+ ATH12K_BD_IE_REGDB_NAME,
+ ATH12K_BD_IE_REGDB_DATA);
+ if (!ret)
+ goto exit;
+
+ ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_REGDB_FILE_NAME);
+ if (ret)
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to fetch %s from %s\n",
+ ATH12K_REGDB_FILE_NAME, ab->hw_params->fw.dir);
+
+exit:
+ if (!ret)
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "fetched regdb\n");
+
+ return ret;
+}
+
+u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab)
+{
+ if (ab->num_radios == 2)
+ return TARGET_NUM_STATIONS_DBS;
+ else if (ab->num_radios == 3)
+ return TARGET_NUM_PEERS_PDEV_DBS_SBS;
+ return TARGET_NUM_STATIONS_SINGLE;
+}
+
+u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab)
+{
+ if (ab->num_radios == 2)
+ return TARGET_NUM_PEERS_PDEV_DBS;
+ else if (ab->num_radios == 3)
+ return TARGET_NUM_PEERS_PDEV_DBS_SBS;
+ return TARGET_NUM_PEERS_PDEV_SINGLE;
+}
+
+u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab)
+{
+ if (ab->num_radios == 2)
+ return TARGET_NUM_TIDS(DBS);
+ else if (ab->num_radios == 3)
+ return TARGET_NUM_TIDS(DBS_SBS);
+ return TARGET_NUM_TIDS(SINGLE);
+}
+
static void ath12k_core_stop(struct ath12k_base *ab)
{
if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
@@ -592,14 +742,14 @@ static int ath12k_core_start(struct ath12k_base *ab,
ath12k_dp_cc_config(ab);
- ath12k_dp_pdev_pre_alloc(ab);
-
ret = ath12k_dp_rx_pdev_reo_setup(ab);
if (ret) {
ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
goto err_mac_destroy;
}
+ ath12k_dp_hal_rx_desc_init(ab);
+
ret = ath12k_wmi_cmd_init(ab);
if (ret) {
ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret);
@@ -759,20 +909,30 @@ static void ath12k_rfkill_work(struct work_struct *work)
{
struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
struct ath12k *ar;
+ struct ath12k_hw *ah;
+ struct ieee80211_hw *hw;
bool rfkill_radio_on;
- int i;
+ int i, j;
spin_lock_bh(&ab->base_lock);
rfkill_radio_on = ab->rfkill_radio_on;
spin_unlock_bh(&ab->base_lock);
- for (i = 0; i < ab->num_radios; i++) {
- ar = ab->pdevs[i].ar;
- if (!ar)
+ for (i = 0; i < ab->num_hw; i++) {
+ ah = ab->ah[i];
+ if (!ah)
continue;
- ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
- wiphy_rfkill_set_hw_state(ar->hw->wiphy, !rfkill_radio_on);
+ for (j = 0; j < ah->num_radio; j++) {
+ ar = &ah->radio[j];
+ if (!ar)
+ continue;
+
+ ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
+ }
+
+ hw = ah->hw;
+ wiphy_rfkill_set_hw_state(hw->wiphy, !rfkill_radio_on);
}
}
@@ -801,6 +961,7 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
{
struct ath12k *ar;
struct ath12k_pdev *pdev;
+ struct ath12k_hw *ah;
int i;
spin_lock_bh(&ab->base_lock);
@@ -810,16 +971,24 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
if (ab->is_reset)
set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+ for (i = 0; i < ab->num_hw; i++) {
+ if (!ab->ah[i])
+ continue;
+
+ ah = ab->ah[i];
+ ieee80211_stop_queues(ah->hw);
+ }
+
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
if (!ar || ar->state == ATH12K_STATE_OFF)
continue;
- ieee80211_stop_queues(ar->hw);
ath12k_mac_drain_tx(ar);
complete(&ar->scan.started);
complete(&ar->scan.completed);
+ complete(&ar->scan.on_channel);
complete(&ar->peer_assoc_done);
complete(&ar->peer_delete_done);
complete(&ar->install_key_done);
@@ -856,7 +1025,7 @@ static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
case ATH12K_STATE_ON:
ar->state = ATH12K_STATE_RESTARTING;
ath12k_core_halt(ar);
- ieee80211_restart_hw(ar->hw);
+ ieee80211_restart_hw(ath12k_ar_to_hw(ar));
break;
case ATH12K_STATE_OFF:
ath12k_warn(ab,
@@ -979,6 +1148,8 @@ int ath12k_core_pre_init(struct ath12k_base *ab)
return ret;
}
+ ath12k_fw_map(ab);
+
return 0;
}
@@ -1007,6 +1178,7 @@ void ath12k_core_deinit(struct ath12k_base *ab)
ath12k_hif_power_down(ab);
ath12k_mac_destroy(ab);
ath12k_core_soc_destroy(ab);
+ ath12k_fw_unmap(ab);
}
void ath12k_core_free(struct ath12k_base *ab)
@@ -1054,6 +1226,8 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
ab->dev = dev;
ab->hif.bus = bus;
+ ab->qmi.num_radios = U8_MAX;
+ ab->slo_capable = true;
return ab;
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index 8458dc292821..97e5a0ccd233 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_CORE_H
@@ -13,6 +13,7 @@
#include <linux/bitfield.h>
#include <linux/dmi.h>
#include <linux/ctype.h>
+#include <linux/firmware.h>
#include "qmi.h"
#include "htc.h"
#include "wmi.h"
@@ -24,6 +25,7 @@
#include "hal_rx.h"
#include "reg.h"
#include "dbring.h"
+#include "fw.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -55,6 +57,11 @@
#define ATH12K_RECONFIGURE_TIMEOUT_HZ (10 * HZ)
#define ATH12K_RECOVER_START_TIMEOUT_HZ (20 * HZ)
+enum ath12k_bdf_search {
+ ATH12K_BDF_SEARCH_DEFAULT,
+ ATH12K_BDF_SEARCH_BUS_AND_BOARD,
+};
+
enum wme_ac {
WME_AC_BE,
WME_AC_BK,
@@ -259,6 +266,7 @@ struct ath12k_vif {
u8 tx_encap_type;
u8 vdev_stats_id;
u32 punct_bitmap;
+ bool ps;
};
struct ath12k_vif_iter {
@@ -420,7 +428,7 @@ struct ath12k_sta {
};
#define ATH12K_MIN_5G_FREQ 4150
-#define ATH12K_MIN_6G_FREQ 5945
+#define ATH12K_MIN_6G_FREQ 5925
#define ATH12K_MAX_6G_FREQ 7115
#define ATH12K_NUM_CHANS 100
#define ATH12K_MAX_5G_CHAN 173
@@ -468,7 +476,7 @@ struct ath12k_per_peer_tx_stats {
struct ath12k {
struct ath12k_base *ab;
struct ath12k_pdev *pdev;
- struct ieee80211_hw *hw;
+ struct ath12k_hw *ah;
struct ath12k_wmi_pdev *wmi;
struct ath12k_pdev_dp dp;
u8 mac_addr[ETH_ALEN];
@@ -532,6 +540,7 @@ struct ath12k {
/* pdev_idx starts from 0 whereas pdev->pdev_id starts with 1 */
u8 pdev_idx;
u8 lmac_id;
+ u8 hw_link_id;
struct completion peer_assoc_done;
struct completion peer_delete_done;
@@ -591,6 +600,13 @@ struct ath12k {
int monitor_vdev_id;
};
+struct ath12k_hw {
+ struct ieee80211_hw *hw;
+
+ u8 num_radio;
+ struct ath12k radio[] __aligned(sizeof(void *));
+};
+
struct ath12k_band_cap {
u32 phy_id;
u32 max_bw_supported;
@@ -724,6 +740,16 @@ struct ath12k_base {
u8 fw_pdev_count;
struct ath12k_pdev __rcu *pdevs_active[MAX_RADIOS];
+
+ /* Holds information of wiphy (hw) registration.
+ *
+ * In Multi/Single Link Operation case, all pdevs are registered as
+ * a single wiphy. In other (legacy/Non-MLO) cases, each pdev is
+ * registered as separate wiphys.
+ */
+ struct ath12k_hw *ah[MAX_RADIOS];
+ u8 num_hw;
+
struct ath12k_wmi_hal_reg_capabilities_ext_arg hal_reg_cap[MAX_RADIOS];
unsigned long long free_vdev_map;
unsigned long long free_vdev_stats_id_map;
@@ -793,10 +819,44 @@ struct ath12k_base {
/* true means radio is on */
bool rfkill_radio_on;
+ struct {
+ enum ath12k_bdf_search bdf_search;
+ u32 vendor;
+ u32 device;
+ u32 subsystem_vendor;
+ u32 subsystem_device;
+ } id;
+
+ struct {
+ u32 api_version;
+
+ const struct firmware *fw;
+ const u8 *amss_data;
+ size_t amss_len;
+ const u8 *amss_dualmac_data;
+ size_t amss_dualmac_len;
+ const u8 *m3_data;
+ size_t m3_len;
+
+ DECLARE_BITMAP(fw_features, ATH12K_FW_FEATURE_COUNT);
+ } fw;
+
+ const struct hal_rx_ops *hal_rx_ops;
+
+ /* slo_capable denotes if the single/multi link operation
+ * is supported within the same chip (SoC).
+ */
+ bool slo_capable;
+
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
+struct ath12k_pdev_map {
+ struct ath12k_base *ab;
+ u8 pdev_idx;
+};
+
int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab);
int ath12k_core_pre_init(struct ath12k_base *ab);
int ath12k_core_init(struct ath12k_base *ath12k);
@@ -810,6 +870,7 @@ int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
int ath12k_core_fetch_bdf(struct ath12k_base *ath12k,
struct ath12k_board_data *bd);
void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd);
+int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd);
int ath12k_core_check_dt(struct ath12k_base *ath12k);
int ath12k_core_check_smbios(struct ath12k_base *ab);
void ath12k_core_halt(struct ath12k *ar);
@@ -818,6 +879,9 @@ int ath12k_core_suspend(struct ath12k_base *ab);
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
const char *filename);
+u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab);
+u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab);
+u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab);
static inline const char *ath12k_scan_state_str(enum ath12k_scan_state state)
{
@@ -882,4 +946,18 @@ static inline const char *ath12k_bus_str(enum ath12k_bus bus)
return "unknown";
}
+static inline struct ath12k_hw *ath12k_hw_to_ah(struct ieee80211_hw *hw)
+{
+ return hw->priv;
+}
+
+static inline struct ath12k *ath12k_ah_to_ar(struct ath12k_hw *ah)
+{
+ return ah->radio;
+}
+
+static inline struct ieee80211_hw *ath12k_ar_to_hw(struct ath12k *ar)
+{
+ return ar->ah->hw;
+}
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
index a6f81f2f97ef..c8e1b244b69e 100644
--- a/drivers/net/wireless/ath/ath12k/dp.c
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <crypto/hash.h>
@@ -997,6 +997,29 @@ void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab)
}
}
+bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base *ab)
+{
+ if (test_bit(WMI_TLV_SERVICE_WMSK_COMPACTION_RX_TLVS, ab->wmi_ab.svc_map) &&
+ ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start &&
+ ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end &&
+ ab->hw_params->hal_ops->get_hal_rx_compact_ops) {
+ return true;
+ }
+ return false;
+}
+
+void ath12k_dp_hal_rx_desc_init(struct ath12k_base *ab)
+{
+ if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) {
+ /* RX TLVS compaction is supported, hence change the hal_rx_ops
+ * to compact hal_rx_ops.
+ */
+ ab->hal_rx_ops = ab->hw_params->hal_ops->get_hal_rx_compact_ops();
+ }
+ ab->hal.hal_desc_sz =
+ ab->hal_rx_ops->rx_desc_get_desc_size();
+}
+
static void ath12k_dp_service_mon_ring(struct timer_list *t)
{
struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer);
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
index 1df3cdd46140..eb2dd408e081 100644
--- a/drivers/net/wireless/ath/ath12k/dp.h
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_H
@@ -150,7 +150,7 @@ struct ath12k_pdev_dp {
#define DP_RX_HASH_ENABLE 1 /* Enable hash based Rx steering */
-#define DP_BA_WIN_SZ_MAX 256
+#define DP_BA_WIN_SZ_MAX 1024
#define DP_TCL_NUM_RING_MAX 4
@@ -170,6 +170,7 @@ struct ath12k_pdev_dp {
#define DP_REO_CMD_RING_SIZE 128
#define DP_REO_STATUS_RING_SIZE 2048
#define DP_RXDMA_BUF_RING_SIZE 4096
+#define DP_RX_MAC_BUF_RING_SIZE 2048
#define DP_RXDMA_REFILL_RING_SIZE 2048
#define DP_RXDMA_ERR_DST_RING_SIZE 1024
#define DP_RXDMA_MON_STATUS_RING_SIZE 1024
@@ -765,6 +766,11 @@ enum htt_stats_internal_ppdu_frametype {
#define HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET GENMASK(31, 16)
#define HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET GENMASK(15, 0)
+#define HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACT_SET BIT(23)
+#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK GENMASK(15, 0)
+#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK GENMASK(18, 16)
+#define HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK GENMASK(16, 0)
+
enum htt_rx_filter_tlv_flags {
HTT_RX_FILTER_TLV_FLAGS_MPDU_START = BIT(0),
HTT_RX_FILTER_TLV_FLAGS_MSDU_START = BIT(1),
@@ -1088,6 +1094,11 @@ struct htt_rx_ring_selection_cfg_cmd {
__le32 rx_mpdu_offset;
__le32 rx_msdu_offset;
__le32 rx_attn_offset;
+ __le32 info2;
+ __le32 reserved[2];
+ __le32 rx_mpdu_start_end_mask;
+ __le32 rx_msdu_end_word_mask;
+ __le32 info3;
} __packed;
struct htt_rx_ring_tlv_filter {
@@ -1104,6 +1115,9 @@ struct htt_rx_ring_tlv_filter {
u16 rx_msdu_end_offset;
u16 rx_msdu_start_offset;
u16 rx_attn_offset;
+ u16 rx_mpdu_start_wmask;
+ u16 rx_mpdu_end_wmask;
+ u32 rx_msdu_end_wmask;
};
#define HTT_STATS_FRAME_CTRL_TYPE_MGMT 0x0
@@ -1820,4 +1834,6 @@ struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
u32 cookie);
struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
u32 desc_id);
+bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base *ab);
+void ath12k_dp_hal_rx_desc_init(struct ath12k_base *ab);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
index be4b39f5fa80..2d56913a75d0 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "dp_mon.h"
@@ -864,7 +864,7 @@ static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar, struct sk_buff
{
u32 rx_pkt_offset, l2_hdr_offset;
- rx_pkt_offset = ar->ab->hw_params->hal_desc_sz;
+ rx_pkt_offset = ar->ab->hal.hal_desc_sz;
l2_hdr_offset = ath12k_dp_rx_h_l3pad(ar->ab,
(struct hal_rx_desc *)msdu->data);
skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
@@ -917,7 +917,8 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
u8 qos_pkt = 0;
rx_desc = (struct hal_rx_desc *)head_msdu->data;
- hdr_desc = ab->hw_params->hal_ops->rx_desc_get_msdu_payload(rx_desc);
+ hdr_desc =
+ ab->hal_rx_ops->rx_desc_get_msdu_payload(rx_desc);
/* Base size */
wh = (struct ieee80211_hdr_3addr *)hdr_desc;
@@ -1130,7 +1131,7 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
!(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
rx_status->flag |= RX_FLAG_8023;
- ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
+ ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi);
}
static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id,
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index 1ee83f765929..ca76c018dd0c 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/ieee80211.h>
@@ -23,34 +23,34 @@
static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- if (!ab->hw_params->hal_ops->rx_desc_encrypt_valid(desc))
+ if (!ab->hal_rx_ops->rx_desc_encrypt_valid(desc))
return HAL_ENCRYPT_TYPE_OPEN;
- return ab->hw_params->hal_ops->rx_desc_get_encrypt_type(desc);
+ return ab->hal_rx_ops->rx_desc_get_encrypt_type(desc);
}
u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_decap_type(desc);
+ return ab->hal_rx_ops->rx_desc_get_decap_type(desc);
}
static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mesh_ctl(desc);
+ return ab->hal_rx_ops->rx_desc_get_mesh_ctl(desc);
}
static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
+ return ab->hal_rx_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
}
static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mpdu_fc_valid(desc);
+ return ab->hal_rx_ops->rx_desc_get_mpdu_fc_valid(desc);
}
static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab,
@@ -58,7 +58,7 @@ static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab,
{
struct ieee80211_hdr *hdr;
- hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);
+ hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz);
return ieee80211_has_morefrags(hdr->frame_control);
}
@@ -67,156 +67,156 @@ static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab,
{
struct ieee80211_hdr *hdr;
- hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);
+ hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz);
return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
}
static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mpdu_start_seq_no(desc);
+ return ab->hal_rx_ops->rx_desc_get_mpdu_start_seq_no(desc);
}
static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->dp_rx_h_msdu_done(desc);
+ return ab->hal_rx_ops->dp_rx_h_msdu_done(desc);
}
static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->dp_rx_h_l4_cksum_fail(desc);
+ return ab->hal_rx_ops->dp_rx_h_l4_cksum_fail(desc);
}
static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->dp_rx_h_ip_cksum_fail(desc);
+ return ab->hal_rx_ops->dp_rx_h_ip_cksum_fail(desc);
}
static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->dp_rx_h_is_decrypted(desc);
+ return ab->hal_rx_ops->dp_rx_h_is_decrypted(desc);
}
u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->dp_rx_h_mpdu_err(desc);
+ return ab->hal_rx_ops->dp_rx_h_mpdu_err(desc);
}
static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_msdu_len(desc);
+ return ab->hal_rx_ops->rx_desc_get_msdu_len(desc);
}
static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_msdu_sgi(desc);
+ return ab->hal_rx_ops->rx_desc_get_msdu_sgi(desc);
}
static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_msdu_rate_mcs(desc);
+ return ab->hal_rx_ops->rx_desc_get_msdu_rate_mcs(desc);
}
static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_msdu_rx_bw(desc);
+ return ab->hal_rx_ops->rx_desc_get_msdu_rx_bw(desc);
}
static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_msdu_freq(desc);
+ return ab->hal_rx_ops->rx_desc_get_msdu_freq(desc);
}
static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_msdu_pkt_type(desc);
+ return ab->hal_rx_ops->rx_desc_get_msdu_pkt_type(desc);
}
static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return hweight8(ab->hw_params->hal_ops->rx_desc_get_msdu_nss(desc));
+ return hweight8(ab->hal_rx_ops->rx_desc_get_msdu_nss(desc));
}
static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mpdu_tid(desc);
+ return ab->hal_rx_ops->rx_desc_get_mpdu_tid(desc);
}
static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mpdu_peer_id(desc);
+ return ab->hal_rx_ops->rx_desc_get_mpdu_peer_id(desc);
}
u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_l3_pad_bytes(desc);
+ return ab->hal_rx_ops->rx_desc_get_l3_pad_bytes(desc);
}
static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_first_msdu(desc);
+ return ab->hal_rx_ops->rx_desc_get_first_msdu(desc);
}
static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_last_msdu(desc);
+ return ab->hal_rx_ops->rx_desc_get_last_msdu(desc);
}
static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab,
struct hal_rx_desc *fdesc,
struct hal_rx_desc *ldesc)
{
- ab->hw_params->hal_ops->rx_desc_copy_end_tlv(fdesc, ldesc);
+ ab->hal_rx_ops->rx_desc_copy_end_tlv(fdesc, ldesc);
}
static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab,
struct hal_rx_desc *desc,
u16 len)
{
- ab->hw_params->hal_ops->rx_desc_set_msdu_len(desc, len);
+ ab->hal_rx_ops->rx_desc_set_msdu_len(desc, len);
}
static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return (ath12k_dp_rx_h_first_msdu(ab, desc) &&
- ab->hw_params->hal_ops->rx_desc_is_da_mcbc(desc));
+ ab->hal_rx_ops->rx_desc_is_da_mcbc(desc));
}
static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_mac_addr2_valid(desc);
+ return ab->hal_rx_ops->rx_desc_mac_addr2_valid(desc);
}
static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_mpdu_start_addr2(desc);
+ return ab->hal_rx_ops->rx_desc_mpdu_start_addr2(desc);
}
static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab,
struct hal_rx_desc *desc,
struct ieee80211_hdr *hdr)
{
- ab->hw_params->hal_ops->rx_desc_get_dot11_hdr(desc, hdr);
+ ab->hal_rx_ops->rx_desc_get_dot11_hdr(desc, hdr);
}
static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
@@ -224,13 +224,19 @@ static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
u8 *crypto_hdr,
enum hal_encrypt_type enctype)
{
- ab->hw_params->hal_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
+ ab->hal_rx_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
}
static u16 ath12k_dp_rxdesc_get_mpdu_frame_ctrl(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mpdu_frame_ctl(desc);
+ return ab->hal_rx_ops->rx_desc_get_mpdu_frame_ctl(desc);
+}
+
+static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hal_rx_ops->rx_desc_get_msdu_src_link_id(desc);
}
static int ath12k_dp_purge_mon_ring(struct ath12k_base *ab)
@@ -1761,7 +1767,7 @@ static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
int buf_first_hdr_len, buf_first_len;
struct hal_rx_desc *ldesc;
int space_extra, rem_len, buf_len;
- u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
/* As the msdu is spread across multiple rx buffers,
* find the offset to the start of msdu for computing
@@ -2458,7 +2464,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
!(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
rx_status->flag |= RX_FLAG_8023;
- ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
+ ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi);
}
static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
@@ -2473,7 +2479,7 @@ static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
u8 l3_pad_bytes;
u16 msdu_len;
int ret;
- u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
if (!last_buf) {
@@ -2804,7 +2810,7 @@ static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer
u8 mic[IEEE80211_CCMP_MIC_LEN];
int head_len, tail_len, ret;
size_t data_len;
- u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hdr_len, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
u8 *key, *data;
u8 key_idx;
@@ -2844,7 +2850,7 @@ mic_fail:
ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
- ieee80211_rx(ar->hw, msdu);
+ ieee80211_rx(ath12k_ar_to_hw(ar), msdu);
return -EINVAL;
}
@@ -2854,7 +2860,7 @@ static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu,
struct ieee80211_hdr *hdr;
size_t hdr_len;
size_t crypto_len;
- u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
if (!flags)
return;
@@ -2892,7 +2898,7 @@ static int ath12k_dp_rx_h_defrag(struct ath12k *ar,
bool is_decrypted = false;
int msdu_len = 0;
int extra_space;
- u32 flags, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 flags, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
first_frag = skb_peek(&rx_tid->rx_frags);
last_frag = skb_peek_tail(&rx_tid->rx_frags);
@@ -2968,7 +2974,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
struct ath12k_rx_desc_info *desc_info;
u8 dst_ind;
- hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
+ hal_rx_desc_sz = ab->hal.hal_desc_sz;
link_desc_banks = dp->link_desc_banks;
reo_dest_ring = rx_tid->dst_ring_desc;
@@ -3122,7 +3128,7 @@ static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb)
struct ieee80211_hdr *hdr;
u64 pn = 0;
u8 *ehdr;
- u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
@@ -3305,7 +3311,7 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
struct ath12k_skb_rxcb *rxcb;
struct hal_rx_desc *rx_desc;
u16 msdu_len;
- u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
struct ath12k_rx_desc_info *desc_info;
u64 desc_va;
@@ -3486,7 +3492,7 @@ static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar,
int n_buffs;
n_buffs = DIV_ROUND_UP(msdu_len,
- (DP_RX_BUFFER_SIZE - ar->ab->hw_params->hal_desc_sz));
+ (DP_RX_BUFFER_SIZE - ar->ab->hal.hal_desc_sz));
skb_queue_walk_safe(msdu_list, skb, tmp) {
rxcb = ATH12K_SKB_RXCB(skb);
@@ -3510,7 +3516,7 @@ static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
u8 l3pad_bytes;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
@@ -3607,7 +3613,7 @@ static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
u8 l3pad_bytes;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc);
rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc);
@@ -3695,16 +3701,15 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
struct hal_rx_wbm_rel_info err_info;
struct hal_srng *srng;
struct sk_buff *msdu;
- struct sk_buff_head msdu_list[MAX_RADIOS];
+ struct sk_buff_head msdu_list;
struct ath12k_skb_rxcb *rxcb;
void *rx_desc;
- int mac_id;
+ u8 mac_id;
int num_buffs_reaped = 0;
struct ath12k_rx_desc_info *desc_info;
- int ret, i;
+ int ret, pdev_id;
- for (i = 0; i < ab->num_radios; i++)
- __skb_queue_head_init(&msdu_list[i]);
+ __skb_queue_head_init(&msdu_list);
srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
rx_ring = &dp->rx_refill_buf_ring;
@@ -3737,11 +3742,6 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
}
}
- /* FIXME: Extract mac id correctly. Since descs are not tied
- * to mac, we can extract from vdev id in ring desc.
- */
- mac_id = 0;
-
if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
ath12k_warn(ab, "WBM RX err, Check HW CC implementation");
@@ -3771,7 +3771,8 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
rxcb->err_rel_src = err_info.err_rel_src;
rxcb->err_code = err_info.err_code;
rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
- __skb_queue_tail(&msdu_list[mac_id], msdu);
+
+ __skb_queue_tail(&msdu_list, msdu);
rxcb->is_first_msdu = err_info.first_msdu;
rxcb->is_last_msdu = err_info.last_msdu;
@@ -3788,21 +3789,22 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_buffs_reaped);
rcu_read_lock();
- for (i = 0; i < ab->num_radios; i++) {
- if (!rcu_dereference(ab->pdevs_active[i])) {
- __skb_queue_purge(&msdu_list[i]);
+ while ((msdu = __skb_dequeue(&msdu_list))) {
+ mac_id = ath12k_dp_rx_get_msdu_src_link(ab,
+ (struct hal_rx_desc *)msdu->data);
+ pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
+ ar = ab->pdevs[pdev_id].ar;
+
+ if (!ar || !rcu_dereference(ar->ab->pdevs_active[mac_id])) {
+ dev_kfree_skb_any(msdu);
continue;
}
- ar = ab->pdevs[i].ar;
-
if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
- __skb_queue_purge(&msdu_list[i]);
+ dev_kfree_skb_any(msdu);
continue;
}
-
- while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
- ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
+ ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list);
}
rcu_read_unlock();
done:
@@ -3922,7 +3924,7 @@ int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)
struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 ring_id;
int ret;
- u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
@@ -3935,14 +3937,20 @@ int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)
tlv_filter.rx_packet_offset = hal_rx_desc_sz;
tlv_filter.rx_mpdu_start_offset =
- ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();
+ ab->hal_rx_ops->rx_desc_get_mpdu_start_offset();
tlv_filter.rx_msdu_end_offset =
- ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();
+ ab->hal_rx_ops->rx_desc_get_msdu_end_offset();
+
+ if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) {
+ tlv_filter.rx_mpdu_start_wmask =
+ ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start();
+ tlv_filter.rx_msdu_end_wmask =
+ ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end();
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "Configuring compact tlv masks rx_mpdu_start_wmask 0x%x rx_msdu_end_wmask 0x%x\n",
+ tlv_filter.rx_mpdu_start_wmask, tlv_filter.rx_msdu_end_wmask);
+ }
- /* TODO: Selectively subscribe to required qwords within msdu_end
- * and mpdu_start and setup the mask in below msg
- * and modify the rx_desc struct
- */
ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0,
HAL_RXDMA_BUF,
DP_RXDMA_REFILL_RING_SIZE,
@@ -3957,7 +3965,7 @@ int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 ring_id;
int ret;
- u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
int i;
ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
@@ -3973,9 +3981,9 @@ int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv);
tlv_filter.rx_mpdu_start_offset =
- ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();
+ ab->hal_rx_ops->rx_desc_get_mpdu_start_offset();
tlv_filter.rx_msdu_end_offset =
- ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();
+ ab->hal_rx_ops->rx_desc_get_msdu_end_offset();
/* TODO: Selectively subscribe to required qwords within msdu_end
* and mpdu_start and setup the mask in below msg
@@ -4086,7 +4094,7 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab)
ret = ath12k_dp_srng_setup(ab,
&dp->rx_mac_buf_ring[i],
HAL_RXDMA_BUF, 1,
- i, 1024);
+ i, DP_RX_MAC_BUF_RING_SIZE);
if (ret) {
ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n",
i);
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
index 62f9cdbb811c..572b87153647 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -151,7 +151,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
!ieee80211_is_data(hdr->frame_control))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
pool_id = skb_get_queue_mapping(skb) & (ATH12K_HW_MAX_QUEUES - 1);
@@ -401,7 +401,7 @@ ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
}
}
- ieee80211_tx_status_skb(ar->hw, msdu);
+ ieee80211_tx_status_skb(ath12k_ar_to_hw(ar), msdu);
}
static void
@@ -498,7 +498,7 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
* Might end up reporting it out-of-band from HTT stats.
*/
- ieee80211_tx_status_skb(ar->hw, msdu);
+ ieee80211_tx_status_skb(ath12k_ar_to_hw(ar), msdu);
exit:
rcu_read_unlock();
@@ -837,7 +837,7 @@ int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab)
if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
ath12k_err(ab, "unsupported htt major version %d supported version is %d\n",
dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
@@ -964,6 +964,26 @@ int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET);
}
+ if (tlv_filter->rx_mpdu_start_wmask > 0 &&
+ tlv_filter->rx_msdu_end_wmask > 0) {
+ cmd->info2 |=
+ le32_encode_bits(true,
+ HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACT_SET);
+ cmd->rx_mpdu_start_end_mask =
+ le32_encode_bits(tlv_filter->rx_mpdu_start_wmask,
+ HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK);
+ /* mpdu_end is not used for any hardwares so far
+ * please assign it in future if any chip is
+ * using through hal ops
+ */
+ cmd->rx_mpdu_start_end_mask |=
+ le32_encode_bits(tlv_filter->rx_mpdu_end_wmask,
+ HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK);
+ cmd->rx_msdu_end_word_mask =
+ le32_encode_bits(tlv_filter->rx_msdu_end_wmask,
+ HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK);
+ }
+
ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
if (ret)
goto err_free;
diff --git a/drivers/net/wireless/ath/ath12k/fw.c b/drivers/net/wireless/ath/ath12k/fw.c
new file mode 100644
index 000000000000..5be4b2d4a19d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/fw.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+
+#include "debug.h"
+
+static int ath12k_fw_request_firmware_api_n(struct ath12k_base *ab,
+ const char *name)
+{
+ size_t magic_len, len, ie_len;
+ int ie_id, i, index, bit, ret;
+ struct ath12k_fw_ie *hdr;
+ const u8 *data;
+ __le32 *timestamp;
+
+ ab->fw.fw = ath12k_core_firmware_request(ab, name);
+ if (IS_ERR(ab->fw.fw)) {
+ ret = PTR_ERR(ab->fw.fw);
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to load %s: %d\n", name, ret);
+ ab->fw.fw = NULL;
+ return ret;
+ }
+
+ data = ab->fw.fw->data;
+ len = ab->fw.fw->size;
+
+ /* magic also includes the null byte, check that as well */
+ magic_len = strlen(ATH12K_FIRMWARE_MAGIC) + 1;
+
+ if (len < magic_len) {
+ ath12k_err(ab, "firmware image too small to contain magic: %zu\n",
+ len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (memcmp(data, ATH12K_FIRMWARE_MAGIC, magic_len) != 0) {
+ ath12k_err(ab, "Invalid firmware magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* jump over the padding */
+ magic_len = ALIGN(magic_len, 4);
+
+ /* make sure there's space for padding */
+ if (magic_len > len) {
+ ath12k_err(ab, "No space for padding after magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ len -= magic_len;
+ data += magic_len;
+
+ /* loop elements */
+ while (len > sizeof(struct ath12k_fw_ie)) {
+ hdr = (struct ath12k_fw_ie *)data;
+
+ ie_id = le32_to_cpu(hdr->id);
+ ie_len = le32_to_cpu(hdr->len);
+
+ len -= sizeof(*hdr);
+ data += sizeof(*hdr);
+
+ if (len < ie_len) {
+ ath12k_err(ab, "Invalid length for FW IE %d (%zu < %zu)\n",
+ ie_id, len, ie_len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (ie_id) {
+ case ATH12K_FW_IE_TIMESTAMP:
+ if (ie_len != sizeof(u32))
+ break;
+
+ timestamp = (__le32 *)data;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "found fw timestamp %d\n",
+ le32_to_cpup(timestamp));
+ break;
+ case ATH12K_FW_IE_FEATURES:
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "found firmware features ie (%zd B)\n",
+ ie_len);
+
+ for (i = 0; i < ATH12K_FW_FEATURE_COUNT; i++) {
+ index = i / 8;
+ bit = i % 8;
+
+ if (index == ie_len)
+ break;
+
+ if (data[index] & (1 << bit))
+ __set_bit(i, ab->fw.fw_features);
+ }
+
+ ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "features", "",
+ ab->fw.fw_features,
+ sizeof(ab->fw.fw_features));
+ break;
+ case ATH12K_FW_IE_AMSS_IMAGE:
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "found fw image ie (%zd B)\n",
+ ie_len);
+
+ ab->fw.amss_data = data;
+ ab->fw.amss_len = ie_len;
+ break;
+ case ATH12K_FW_IE_M3_IMAGE:
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "found m3 image ie (%zd B)\n",
+ ie_len);
+
+ ab->fw.m3_data = data;
+ ab->fw.m3_len = ie_len;
+ break;
+ case ATH12K_FW_IE_AMSS_DUALMAC_IMAGE:
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "found dualmac fw image ie (%zd B)\n",
+ ie_len);
+ ab->fw.amss_dualmac_data = data;
+ ab->fw.amss_dualmac_len = ie_len;
+ break;
+ default:
+ ath12k_warn(ab, "Unknown FW IE: %u\n", ie_id);
+ break;
+ }
+
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
+ /* make sure there's space for padding */
+ if (ie_len > len)
+ break;
+
+ len -= ie_len;
+ data += ie_len;
+ }
+
+ return 0;
+
+err:
+ release_firmware(ab->fw.fw);
+ ab->fw.fw = NULL;
+ return ret;
+}
+
+void ath12k_fw_map(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_fw_request_firmware_api_n(ab, ATH12K_FW_API2_FILE);
+ if (ret == 0)
+ ab->fw.api_version = 2;
+ else
+ ab->fw.api_version = 1;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "using fw api %d\n",
+ ab->fw.api_version);
+}
+
+void ath12k_fw_unmap(struct ath12k_base *ab)
+{
+ release_firmware(ab->fw.fw);
+ memset(&ab->fw, 0, sizeof(ab->fw));
+}
diff --git a/drivers/net/wireless/ath/ath12k/fw.h b/drivers/net/wireless/ath/ath12k/fw.h
new file mode 100644
index 000000000000..3ff041f15fa0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/fw.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_FW_H
+#define ATH12K_FW_H
+
+#define ATH12K_FW_API2_FILE "firmware-2.bin"
+#define ATH12K_FIRMWARE_MAGIC "QCOM-ATH12K-FW"
+
+enum ath12k_fw_ie_type {
+ ATH12K_FW_IE_TIMESTAMP = 0,
+ ATH12K_FW_IE_FEATURES = 1,
+ ATH12K_FW_IE_AMSS_IMAGE = 2,
+ ATH12K_FW_IE_M3_IMAGE = 3,
+ ATH12K_FW_IE_AMSS_DUALMAC_IMAGE = 4,
+};
+
+enum ath12k_fw_features {
+ /* The firmware supports setting the QRTR id via register
+ * PCIE_LOCAL_REG_QRTR_NODE_ID
+ */
+ ATH12K_FW_FEATURE_MULTI_QRTR_ID = 0,
+
+ /* keep last */
+ ATH12K_FW_FEATURE_COUNT,
+};
+
+void ath12k_fw_map(struct ath12k_base *ab);
+void ath12k_fw_unmap(struct ath12k_base *ab);
+
+#endif /* ATH12K_FW_H */
diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c
index a489369d8068..78310da8cfe8 100644
--- a/drivers/net/wireless/ath/ath12k/hal.c
+++ b/drivers/net/wireless/ath/ath12k/hal.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/dma-mapping.h>
#include "hal_tx.h"
@@ -449,8 +449,8 @@ static u8 *ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
static bool ath12k_hw_qcn9274_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
{
- return __le16_to_cpu(desc->u.qcn9274.msdu_end.info5) &
- RX_MSDU_END_INFO5_DA_IS_MCBC;
+ return __le32_to_cpu(desc->u.qcn9274.mpdu_start.info6) &
+ RX_MPDU_START_INFO6_MCAST_BCAST;
}
static void ath12k_hw_qcn9274_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
@@ -626,6 +626,21 @@ static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab)
return 0;
}
+static u16 ath12k_hal_qcn9274_rx_mpdu_start_wmask_get(void)
+{
+ return QCN9274_MPDU_START_WMASK;
+}
+
+static u32 ath12k_hal_qcn9274_rx_msdu_end_wmask_get(void)
+{
+ return QCN9274_MSDU_END_WMASK;
+}
+
+static const struct hal_rx_ops *ath12k_hal_qcn9274_get_hal_rx_compact_ops(void)
+{
+ return &hal_rx_qcn9274_compact_ops;
+}
+
static bool ath12k_hw_qcn9274_dp_rx_h_msdu_done(struct hal_rx_desc *desc)
{
return !!le32_get_bits(desc->u.qcn9274.msdu_end.info14,
@@ -680,7 +695,17 @@ static u32 ath12k_hw_qcn9274_dp_rx_h_mpdu_err(struct hal_rx_desc *desc)
return errmap;
}
-const struct hal_ops hal_qcn9274_ops = {
+static u32 ath12k_hw_qcn9274_get_rx_desc_size(void)
+{
+ return sizeof(struct hal_rx_desc_qcn9274);
+}
+
+static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_src_link(struct hal_rx_desc *desc)
+{
+ return 0;
+}
+
+const struct hal_rx_ops hal_rx_qcn9274_ops = {
.rx_desc_get_first_msdu = ath12k_hw_qcn9274_rx_desc_get_first_msdu,
.rx_desc_get_last_msdu = ath12k_hw_qcn9274_rx_desc_get_last_msdu,
.rx_desc_get_l3_pad_bytes = ath12k_hw_qcn9274_rx_desc_get_l3_pad_bytes,
@@ -712,13 +737,367 @@ const struct hal_ops hal_qcn9274_ops = {
.rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_rx_desc_get_dot11_hdr,
.rx_desc_get_crypto_header = ath12k_hw_qcn9274_rx_desc_get_crypto_hdr,
.rx_desc_get_mpdu_frame_ctl = ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl,
- .create_srng_config = ath12k_hal_srng_create_config_qcn9274,
- .tcl_to_wbm_rbm_map = ath12k_hal_qcn9274_tcl_to_wbm_rbm_map,
.dp_rx_h_msdu_done = ath12k_hw_qcn9274_dp_rx_h_msdu_done,
.dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_l4_cksum_fail,
.dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_ip_cksum_fail,
.dp_rx_h_is_decrypted = ath12k_hw_qcn9274_dp_rx_h_is_decrypted,
.dp_rx_h_mpdu_err = ath12k_hw_qcn9274_dp_rx_h_mpdu_err,
+ .rx_desc_get_desc_size = ath12k_hw_qcn9274_get_rx_desc_size,
+ .rx_desc_get_msdu_src_link_id = ath12k_hw_qcn9274_rx_desc_get_msdu_src_link,
+};
+
+static bool ath12k_hw_qcn9274_compact_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
+{
+ return !!le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
+ RX_MSDU_END_INFO5_FIRST_MSDU);
+}
+
+static bool ath12k_hw_qcn9274_compact_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
+{
+ return !!le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
+ RX_MSDU_END_INFO5_LAST_MSDU);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
+{
+ return le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
+ RX_MSDU_END_INFO5_L3_HDR_PADDING);
+}
+
+static bool ath12k_hw_qcn9274_compact_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
+ RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID);
+}
+
+static u32 ath12k_hw_qcn9274_compact_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info2,
+ RX_MPDU_START_INFO2_ENC_TYPE);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_decap_type(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info11,
+ RX_MSDU_END_INFO11_DECAP_FORMAT);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274.msdu_end.info11,
+ RX_MSDU_END_INFO11_MESH_CTRL_PRESENT);
+}
+
+static bool
+ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID);
+}
+
+static bool ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_FCTRL_VALID);
+}
+
+static u16
+ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_SEQ_NUM);
+}
+
+static u16 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info10,
+ RX_MSDU_END_INFO10_MSDU_LENGTH);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
+ RX_MSDU_END_INFO12_SGI);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
+ RX_MSDU_END_INFO12_RATE_MCS);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
+ RX_MSDU_END_INFO12_RECV_BW);
+}
+
+static u32 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.phy_meta_data);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
+ RX_MSDU_END_INFO12_PKT_TYPE);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
+ RX_MSDU_END_QCN9274_INFO12_MIMO_SS_BITMAP);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
+{
+ return le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
+ RX_MSDU_END_QCN9274_INFO5_TID);
+}
+
+static u16 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.sw_peer_id);
+}
+
+static void ath12k_hw_qcn9274_compact_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ fdesc->u.qcn9274_compact.msdu_end = ldesc->u.qcn9274_compact.msdu_end;
+}
+
+static u32 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.phy_ppdu_id);
+}
+
+static void
+ath12k_hw_qcn9274_compact_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
+{
+ u32 info = __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.info10);
+
+ info = u32_replace_bits(info, len, RX_MSDU_END_INFO10_MSDU_LENGTH);
+ desc->u.qcn9274_compact.msdu_end.info10 = __cpu_to_le32(info);
+}
+
+static u8 *ath12k_hw_qcn9274_compact_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
+{
+ return &desc->u.qcn9274_compact.msdu_payload[0];
+}
+
+static u32 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_offset(void)
+{
+ return offsetof(struct hal_rx_desc_qcn9274_compact, mpdu_start);
+}
+
+static u32 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_end_offset(void)
+{
+ return offsetof(struct hal_rx_desc_qcn9274_compact, msdu_end);
+}
+
+static bool ath12k_hw_qcn9274_compact_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info4) &
+ RX_MPDU_START_INFO4_MAC_ADDR2_VALID;
+}
+
+static u8 *ath12k_hw_qcn9274_compact_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+ return desc->u.qcn9274_compact.mpdu_start.addr2;
+}
+
+static bool ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info6) &
+ RX_MPDU_START_INFO6_MCAST_BCAST;
+}
+
+static void ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
+ struct ieee80211_hdr *hdr)
+{
+ hdr->frame_control = desc->u.qcn9274_compact.mpdu_start.frame_ctrl;
+ hdr->duration_id = desc->u.qcn9274_compact.mpdu_start.duration;
+ ether_addr_copy(hdr->addr1, desc->u.qcn9274_compact.mpdu_start.addr1);
+ ether_addr_copy(hdr->addr2, desc->u.qcn9274_compact.mpdu_start.addr2);
+ ether_addr_copy(hdr->addr3, desc->u.qcn9274_compact.mpdu_start.addr3);
+ if (__le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info4) &
+ RX_MPDU_START_INFO4_MAC_ADDR4_VALID) {
+ ether_addr_copy(hdr->addr4, desc->u.qcn9274_compact.mpdu_start.addr4);
+ }
+ hdr->seq_ctrl = desc->u.qcn9274_compact.mpdu_start.seq_ctrl;
+}
+
+static void
+ath12k_hw_qcn9274_compact_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc,
+ u8 *crypto_hdr,
+ enum hal_encrypt_type enctype)
+{
+ unsigned int key_id;
+
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ return;
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ crypto_hdr[0] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ crypto_hdr[1] = 0;
+ crypto_hdr[2] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ break;
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ crypto_hdr[0] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ crypto_hdr[1] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ crypto_hdr[2] = 0;
+ break;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ return;
+ }
+ key_id = le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info5,
+ RX_MPDU_START_INFO5_KEY_ID);
+ crypto_hdr[3] = 0x20 | (key_id << 6);
+ crypto_hdr[4] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE3(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ crypto_hdr[5] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE4(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ crypto_hdr[6] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[1]);
+ crypto_hdr[7] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[1]);
+}
+
+static u16 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.frame_ctrl);
+}
+
+static bool ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info14,
+ RX_MSDU_END_INFO14_MSDU_DONE);
+}
+
+static bool ath12k_hw_qcn9274_compact_dp_rx_h_l4_cksum_fail(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info13,
+ RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL);
+}
+
+static bool ath12k_hw_qcn9274_compact_dp_rx_h_ip_cksum_fail(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info13,
+ RX_MSDU_END_INFO13_IP_CKSUM_FAIL);
+}
+
+static bool ath12k_hw_qcn9274_compact_dp_rx_h_is_decrypted(struct hal_rx_desc *desc)
+{
+ return (le32_get_bits(desc->u.qcn9274_compact.msdu_end.info14,
+ RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE) ==
+ RX_DESC_DECRYPT_STATUS_CODE_OK);
+}
+
+static u32 ath12k_hw_qcn9274_compact_dp_rx_h_mpdu_err(struct hal_rx_desc *desc)
+{
+ u32 info = __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.info13);
+ u32 errmap = 0;
+
+ if (info & RX_MSDU_END_INFO13_FCS_ERR)
+ errmap |= HAL_RX_MPDU_ERR_FCS;
+
+ if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
+ errmap |= HAL_RX_MPDU_ERR_DECRYPT;
+
+ if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
+ errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
+
+ if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
+ errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
+
+ if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
+ errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
+
+ if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
+ errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
+
+ if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
+ errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
+
+ return errmap;
+}
+
+static u32 ath12k_hw_qcn9274_compact_get_rx_desc_size(void)
+{
+ return sizeof(struct hal_rx_desc_qcn9274_compact);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_src_link(struct hal_rx_desc *desc)
+{
+ return le64_get_bits(desc->u.qcn9274_compact.msdu_end.msdu_end_tag,
+ RX_MSDU_END_64_TLV_SRC_LINK_ID);
+}
+
+const struct hal_rx_ops hal_rx_qcn9274_compact_ops = {
+ .rx_desc_get_first_msdu = ath12k_hw_qcn9274_compact_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath12k_hw_qcn9274_compact_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath12k_hw_qcn9274_compact_rx_desc_get_l3_pad_bytes,
+ .rx_desc_encrypt_valid = ath12k_hw_qcn9274_compact_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath12k_hw_qcn9274_compact_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath12k_hw_qcn9274_compact_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath12k_hw_qcn9274_compact_rx_desc_get_mesh_ctl,
+ .rx_desc_get_mpdu_seq_ctl_vld =
+ ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no =
+ ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_end_tlv = ath12k_hw_qcn9274_compact_rx_desc_copy_end_tlv,
+ .rx_desc_get_mpdu_ppdu_id = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath12k_hw_qcn9274_compact_rx_desc_set_msdu_len,
+ .rx_desc_get_msdu_payload = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_payload,
+ .rx_desc_get_mpdu_start_offset =
+ ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_offset,
+ .rx_desc_get_msdu_end_offset =
+ ath12k_hw_qcn9274_compact_rx_desc_get_msdu_end_offset,
+ .rx_desc_mac_addr2_valid = ath12k_hw_qcn9274_compact_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath12k_hw_qcn9274_compact_rx_desc_mpdu_start_addr2,
+ .rx_desc_is_da_mcbc = ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc,
+ .rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr,
+ .rx_desc_get_crypto_header = ath12k_hw_qcn9274_compact_rx_desc_get_crypto_hdr,
+ .rx_desc_get_mpdu_frame_ctl =
+ ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_frame_ctl,
+ .dp_rx_h_msdu_done = ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done,
+ .dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_l4_cksum_fail,
+ .dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_ip_cksum_fail,
+ .dp_rx_h_is_decrypted = ath12k_hw_qcn9274_compact_dp_rx_h_is_decrypted,
+ .dp_rx_h_mpdu_err = ath12k_hw_qcn9274_compact_dp_rx_h_mpdu_err,
+ .rx_desc_get_desc_size = ath12k_hw_qcn9274_compact_get_rx_desc_size,
+ .rx_desc_get_msdu_src_link_id =
+ ath12k_hw_qcn9274_compact_rx_desc_get_msdu_src_link,
+};
+
+const struct hal_ops hal_qcn9274_ops = {
+ .create_srng_config = ath12k_hal_srng_create_config_qcn9274,
+ .tcl_to_wbm_rbm_map = ath12k_hal_qcn9274_tcl_to_wbm_rbm_map,
+ .rxdma_ring_wmask_rx_mpdu_start = ath12k_hal_qcn9274_rx_mpdu_start_wmask_get,
+ .rxdma_ring_wmask_rx_msdu_end = ath12k_hal_qcn9274_rx_msdu_end_wmask_get,
+ .get_hal_rx_compact_ops = ath12k_hal_qcn9274_get_hal_rx_compact_ops,
};
static bool ath12k_hw_wcn7850_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
@@ -1134,7 +1513,17 @@ static u32 ath12k_hw_wcn7850_dp_rx_h_mpdu_err(struct hal_rx_desc *desc)
return errmap;
}
-const struct hal_ops hal_wcn7850_ops = {
+static u32 ath12k_hw_wcn7850_get_rx_desc_size(void)
+{
+ return sizeof(struct hal_rx_desc_wcn7850);
+}
+
+static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_src_link(struct hal_rx_desc *desc)
+{
+ return 0;
+}
+
+const struct hal_rx_ops hal_rx_wcn7850_ops = {
.rx_desc_get_first_msdu = ath12k_hw_wcn7850_rx_desc_get_first_msdu,
.rx_desc_get_last_msdu = ath12k_hw_wcn7850_rx_desc_get_last_msdu,
.rx_desc_get_l3_pad_bytes = ath12k_hw_wcn7850_rx_desc_get_l3_pad_bytes,
@@ -1167,13 +1556,21 @@ const struct hal_ops hal_wcn7850_ops = {
.rx_desc_get_dot11_hdr = ath12k_hw_wcn7850_rx_desc_get_dot11_hdr,
.rx_desc_get_crypto_header = ath12k_hw_wcn7850_rx_desc_get_crypto_hdr,
.rx_desc_get_mpdu_frame_ctl = ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl,
- .create_srng_config = ath12k_hal_srng_create_config_wcn7850,
- .tcl_to_wbm_rbm_map = ath12k_hal_wcn7850_tcl_to_wbm_rbm_map,
.dp_rx_h_msdu_done = ath12k_hw_wcn7850_dp_rx_h_msdu_done,
.dp_rx_h_l4_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_l4_cksum_fail,
.dp_rx_h_ip_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_ip_cksum_fail,
.dp_rx_h_is_decrypted = ath12k_hw_wcn7850_dp_rx_h_is_decrypted,
.dp_rx_h_mpdu_err = ath12k_hw_wcn7850_dp_rx_h_mpdu_err,
+ .rx_desc_get_desc_size = ath12k_hw_wcn7850_get_rx_desc_size,
+ .rx_desc_get_msdu_src_link_id = ath12k_hw_wcn7850_rx_desc_get_msdu_src_link,
+};
+
+const struct hal_ops hal_wcn7850_ops = {
+ .create_srng_config = ath12k_hal_srng_create_config_wcn7850,
+ .tcl_to_wbm_rbm_map = ath12k_hal_wcn7850_tcl_to_wbm_rbm_map,
+ .rxdma_ring_wmask_rx_mpdu_start = NULL,
+ .rxdma_ring_wmask_rx_msdu_end = NULL,
+ .get_hal_rx_compact_ops = NULL,
};
static int ath12k_hal_alloc_cont_rdp(struct ath12k_base *ab)
diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h
index fc47e7e6b498..107927d64bbb 100644
--- a/drivers/net/wireless/ath/ath12k/hal.h
+++ b/drivers/net/wireless/ath/ath12k/hal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HAL_H
@@ -1023,6 +1023,8 @@ struct ath12k_hal {
/* shadow register configuration */
u32 shadow_reg_addr[HAL_SHADOW_NUM_REGS];
int num_shadow_reg_configured;
+
+ u32 hal_desc_sz;
};
/* Maps WBM ring number and Return Buffer Manager Id per TCL ring */
@@ -1031,7 +1033,7 @@ struct ath12k_hal_tcl_to_wbm_rbm_map {
u8 rbm_id;
};
-struct hal_ops {
+struct hal_rx_ops {
bool (*rx_desc_get_first_msdu)(struct hal_rx_desc *desc);
bool (*rx_desc_get_last_msdu)(struct hal_rx_desc *desc);
u8 (*rx_desc_get_l3_pad_bytes)(struct hal_rx_desc *desc);
@@ -1070,18 +1072,30 @@ struct hal_ops {
void (*rx_desc_get_crypto_header)(struct hal_rx_desc *desc,
u8 *crypto_hdr,
enum hal_encrypt_type enctype);
- int (*create_srng_config)(struct ath12k_base *ab);
bool (*dp_rx_h_msdu_done)(struct hal_rx_desc *desc);
bool (*dp_rx_h_l4_cksum_fail)(struct hal_rx_desc *desc);
bool (*dp_rx_h_ip_cksum_fail)(struct hal_rx_desc *desc);
bool (*dp_rx_h_is_decrypted)(struct hal_rx_desc *desc);
u32 (*dp_rx_h_mpdu_err)(struct hal_rx_desc *desc);
+ u32 (*rx_desc_get_desc_size)(void);
+ u8 (*rx_desc_get_msdu_src_link_id)(struct hal_rx_desc *desc);
+};
+
+struct hal_ops {
+ int (*create_srng_config)(struct ath12k_base *ab);
+ u16 (*rxdma_ring_wmask_rx_mpdu_start)(void);
+ u32 (*rxdma_ring_wmask_rx_msdu_end)(void);
+ const struct hal_rx_ops *(*get_hal_rx_compact_ops)(void);
const struct ath12k_hal_tcl_to_wbm_rbm_map *tcl_to_wbm_rbm_map;
};
extern const struct hal_ops hal_qcn9274_ops;
extern const struct hal_ops hal_wcn7850_ops;
+extern const struct hal_rx_ops hal_rx_qcn9274_ops;
+extern const struct hal_rx_ops hal_rx_qcn9274_compact_ops;
+extern const struct hal_rx_ops hal_rx_wcn7850_ops;
+
u32 ath12k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid);
void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc,
int tid, u32 ba_window_size,
diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h
index 6c17adc6d60b..63340256d3f6 100644
--- a/drivers/net/wireless/ath/ath12k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath12k/hal_desc.h
@@ -2500,13 +2500,13 @@ struct hal_rx_reo_queue {
#define HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE BIT(30)
#define HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG BIT(31)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE GENMASK(7, 0)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE GENMASK(9, 8)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_SVLD BIT(10)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_SSN GENMASK(22, 11)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR BIT(23)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR BIT(24)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_VALID BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE GENMASK(9, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE GENMASK(11, 10)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SVLD BIT(12)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SSN GENMASK(24, 13)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR BIT(26)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_VALID BIT(27)
struct hal_reo_update_rx_queue {
struct hal_reo_cmd_hdr cmd;
@@ -2517,6 +2517,12 @@ struct hal_reo_update_rx_queue {
__le32 pn[4];
} __packed;
+struct hal_rx_reo_queue_1k {
+ struct hal_desc_header desc_hdr;
+ __le32 rx_bitmap_1023_288[23];
+ __le32 reserved[8];
+} __packed;
+
#define HAL_REO_UNBLOCK_CACHE_INFO0_UNBLK_CACHE BIT(0)
#define HAL_REO_UNBLOCK_CACHE_INFO0_RESOURCE_IDX GENMASK(2, 1)
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.c b/drivers/net/wireless/ath/ath12k/hal_rx.c
index 4f25eb9f7745..f7c1aaa3b5d4 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "debug.h"
@@ -247,7 +247,7 @@ int ath12k_hal_reo_cmd_send(struct ath12k_base *ab, struct hal_srng *srng,
case HAL_REO_CMD_UNBLOCK_CACHE:
case HAL_REO_CMD_FLUSH_TIMEOUT_LIST:
ath12k_warn(ab, "Unsupported reo command %d\n", type);
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
break;
default:
ath12k_warn(ab, "Unknown reo command %d\n", type);
@@ -688,23 +688,28 @@ void ath12k_hal_reo_update_rx_reo_queue_status(struct ath12k_base *ab,
u32 ath12k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid)
{
- u32 num_ext_desc;
+ u32 num_ext_desc, num_1k_desc = 0;
if (ba_window_size <= 1) {
if (tid != HAL_DESC_REO_NON_QOS_TID)
num_ext_desc = 1;
else
num_ext_desc = 0;
+
} else if (ba_window_size <= 105) {
num_ext_desc = 1;
} else if (ba_window_size <= 210) {
num_ext_desc = 2;
- } else {
+ } else if (ba_window_size <= 256) {
num_ext_desc = 3;
+ } else {
+ num_ext_desc = 10;
+ num_1k_desc = 1;
}
return sizeof(struct hal_rx_reo_queue) +
- (num_ext_desc * sizeof(struct hal_rx_reo_queue_ext));
+ (num_ext_desc * sizeof(struct hal_rx_reo_queue_ext)) +
+ (num_1k_desc * sizeof(struct hal_rx_reo_queue_1k));
}
void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc,
diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
index de60d988d860..0b17dfd47856 100644
--- a/drivers/net/wireless/ath/ath12k/hw.c
+++ b/drivers/net/wireless/ath/ath12k/hw.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -897,7 +897,6 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.reoq_lut_support = false,
.supports_shadow_regs = false,
- .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274),
.num_tcl_banks = 48,
.max_tx_ring = 4,
@@ -914,6 +913,13 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.rfkill_on_level = 0,
.rddm_size = 0,
+
+ .def_num_link = 0,
+ .max_mlo_peer = 256,
+
+ .otp_board_id_register = QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB,
+
+ .supports_sta_ps = false,
},
{
.name = "wcn7850 hw2.0",
@@ -950,7 +956,10 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.vdev_start_delay = true,
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP),
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
.supports_monitor = false,
.idle_ps = true,
@@ -960,7 +969,6 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.reoq_lut_support = false,
.supports_shadow_regs = true,
- .hal_desc_sz = sizeof(struct hal_rx_desc_wcn7850),
.num_tcl_banks = 7,
.max_tx_ring = 3,
@@ -978,6 +986,13 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.rfkill_on_level = 1,
.rddm_size = 0x780000,
+
+ .def_num_link = 2,
+ .max_mlo_peer = 32,
+
+ .otp_board_id_register = 0,
+
+ .supports_sta_ps = true,
},
{
.name = "qcn9274 hw2.0",
@@ -987,7 +1002,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.board_size = 256 * 1024,
.cal_offset = 128 * 1024,
},
- .max_radios = 1,
+ .max_radios = 2,
.single_pdev_only = false,
.qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9274,
.internal_sleep_clock = false,
@@ -1023,7 +1038,6 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.reoq_lut_support = false,
.supports_shadow_regs = false,
- .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274),
.num_tcl_banks = 48,
.max_tx_ring = 4,
@@ -1040,6 +1054,13 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.rfkill_on_level = 0,
.rddm_size = 0,
+
+ .def_num_link = 0,
+ .max_mlo_peer = 256,
+
+ .otp_board_id_register = QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB,
+
+ .supports_sta_ps = false,
},
};
diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h
index d2622bfef942..87965980b938 100644
--- a/drivers/net/wireless/ath/ath12k/hw.h
+++ b/drivers/net/wireless/ath/ath12k/hw.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HW_H
@@ -17,19 +17,30 @@
/* Num VDEVS per radio */
#define TARGET_NUM_VDEVS (16 + 1)
-#define TARGET_NUM_PEERS_PDEV (512 + TARGET_NUM_VDEVS)
+#define TARGET_NUM_PEERS_PDEV_SINGLE (TARGET_NUM_STATIONS_SINGLE + \
+ TARGET_NUM_VDEVS)
+#define TARGET_NUM_PEERS_PDEV_DBS (TARGET_NUM_STATIONS_DBS + \
+ TARGET_NUM_VDEVS)
+#define TARGET_NUM_PEERS_PDEV_DBS_SBS (TARGET_NUM_STATIONS_DBS_SBS + \
+ TARGET_NUM_VDEVS)
/* Num of peers for Single Radio mode */
-#define TARGET_NUM_PEERS_SINGLE (TARGET_NUM_PEERS_PDEV)
+#define TARGET_NUM_PEERS_SINGLE (TARGET_NUM_PEERS_PDEV_SINGLE)
/* Num of peers for DBS */
-#define TARGET_NUM_PEERS_DBS (2 * TARGET_NUM_PEERS_PDEV)
+#define TARGET_NUM_PEERS_DBS (2 * TARGET_NUM_PEERS_PDEV_DBS)
/* Num of peers for DBS_SBS */
-#define TARGET_NUM_PEERS_DBS_SBS (3 * TARGET_NUM_PEERS_PDEV)
+#define TARGET_NUM_PEERS_DBS_SBS (3 * TARGET_NUM_PEERS_PDEV_DBS_SBS)
-/* Max num of stations (per radio) */
-#define TARGET_NUM_STATIONS 512
+/* Max num of stations for Single Radio mode */
+#define TARGET_NUM_STATIONS_SINGLE 512
+
+/* Max num of stations for DBS */
+#define TARGET_NUM_STATIONS_DBS 128
+
+/* Max num of stations for DBS_SBS */
+#define TARGET_NUM_STATIONS_DBS_SBS 128
#define TARGET_NUM_PEERS(x) TARGET_NUM_PEERS_##x
#define TARGET_NUM_PEER_KEYS 2
@@ -66,6 +77,8 @@
#define TARGET_NUM_WDS_ENTRIES 32
#define TARGET_DMA_BURST_SIZE 1
#define TARGET_RX_BATCHMODE 1
+#define TARGET_RX_PEER_METADATA_VER_V1A 2
+#define TARGET_RX_PEER_METADATA_VER_V1B 3
#define ATH12K_HW_MAX_QUEUES 4
#define ATH12K_QUEUE_LEN 4096
@@ -174,7 +187,6 @@ struct ath12k_hw_params {
bool reoq_lut_support:1;
bool supports_shadow_regs:1;
- u32 hal_desc_sz;
u32 num_tcl_banks;
u32 max_tx_ring;
@@ -192,6 +204,13 @@ struct ath12k_hw_params {
u32 rfkill_on_level;
u32 rddm_size;
+
+ u8 def_num_link;
+ u16 max_mlo_peer;
+
+ u32 otp_board_id_register;
+
+ bool supports_sta_ps;
};
struct ath12k_hw_ops {
@@ -242,10 +261,16 @@ enum ath12k_bd_ie_board_type {
ATH12K_BD_IE_BOARD_DATA = 1,
};
+enum ath12k_bd_ie_regdb_type {
+ ATH12K_BD_IE_REGDB_NAME = 0,
+ ATH12K_BD_IE_REGDB_DATA = 1,
+};
+
enum ath12k_bd_ie_type {
/* contains sub IEs of enum ath12k_bd_ie_board_type */
ATH12K_BD_IE_BOARD = 0,
- ATH12K_BD_IE_BOARD_EXT = 1,
+ /* contains sub IEs of enum ath12k_bd_ie_regdb_type */
+ ATH12K_BD_IE_REGDB = 1,
};
struct ath12k_hw_regs {
@@ -315,6 +340,18 @@ struct ath12k_hw_regs {
u32 hal_reo_status_ring_base;
};
+static inline const char *ath12k_bd_ie_type_str(enum ath12k_bd_ie_type type)
+{
+ switch (type) {
+ case ATH12K_BD_IE_BOARD:
+ return "board data";
+ case ATH12K_BD_IE_REGDB:
+ return "regdb data";
+ }
+
+ return "unknown";
+}
+
int ath12k_hw_init(struct ath12k_base *ab);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 88cec54c6c2e..52a5fb8b03e9 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <net/mac80211.h>
@@ -241,8 +241,8 @@ static const u32 ath12k_smps_map[] = {
[WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
};
-static int ath12k_start_vdev_delay(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
+static int ath12k_start_vdev_delay(struct ath12k *ar,
+ struct ath12k_vif *arvif);
static const char *ath12k_mac_phymode_str(enum wmi_phy_mode mode)
{
@@ -542,7 +542,7 @@ struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id)
arvif_iter.vdev_id = vdev_id;
flags = IEEE80211_IFACE_ITER_RESUME_ALL;
- ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar),
flags,
ath12k_get_arvif_iter,
&arvif_iter);
@@ -563,7 +563,8 @@ struct ath12k_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab,
for (i = 0; i < ab->num_radios; i++) {
pdev = rcu_dereference(ab->pdevs_active[i]);
- if (pdev && pdev->ar) {
+ if (pdev && pdev->ar &&
+ (pdev->ar->allocated_vdev_map & (1LL << vdev_id))) {
arvif = ath12k_mac_get_arvif(pdev->ar, vdev_id);
if (arvif)
return arvif;
@@ -1040,7 +1041,7 @@ static int ath12k_mac_monitor_start(struct ath12k *ar)
if (ar->monitor_started)
return 0;
- ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar),
ath12k_mac_get_any_chandef_iter,
&chandef);
if (!chandef)
@@ -1083,9 +1084,49 @@ static int ath12k_mac_monitor_stop(struct ath12k *ar)
return ret;
}
-static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
+static int ath12k_mac_vdev_stop(struct ath12k_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath12k_wmi_vdev_stop(ar, arvif->vdev_id);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ ret = ath12k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ WARN_ON(ar->num_started_vdevs == 0);
+
+ ar->num_started_vdevs--;
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
+ arvif->vif->addr, arvif->vdev_id);
+
+ if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+ clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n",
+ arvif->vdev_id);
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+static int ath12k_mac_config(struct ath12k *ar, u32 changed)
{
- struct ath12k *ar = hw->priv;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct ieee80211_conf *conf = &hw->conf;
int ret = 0;
@@ -1122,11 +1163,84 @@ err_mon_del:
return ret;
}
+static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ int ret;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ ret = ath12k_mac_config(ar, changed);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to update config pdev idx %d: %d\n",
+ ar->pdev_idx, ret);
+
+ return ret;
+}
+
+static int ath12k_mac_setup_bcn_p2p_ie(struct ath12k_vif *arvif,
+ struct sk_buff *bcn)
+{
+ struct ath12k *ar = arvif->ar;
+ struct ieee80211_mgmt *mgmt;
+ const u8 *p2p_ie;
+ int ret;
+
+ mgmt = (void *)bcn->data;
+ p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
+ mgmt->u.beacon.variable,
+ bcn->len - (mgmt->u.beacon.variable -
+ bcn->data));
+ if (!p2p_ie) {
+ ath12k_warn(ar->ab, "no P2P ie found in beacon\n");
+ return -ENOENT;
+ }
+
+ ret = ath12k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit P2P GO bcn ie for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath12k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
+ u8 oui_type, size_t ie_offset)
+{
+ const u8 *next, *end;
+ size_t len;
+ u8 *ie;
+
+ if (WARN_ON(skb->len < ie_offset))
+ return -EINVAL;
+
+ ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
+ skb->data + ie_offset,
+ skb->len - ie_offset);
+ if (!ie)
+ return -ENOENT;
+
+ len = ie[1] + 2;
+ end = skb->data + skb->len;
+ next = ie + len;
+
+ if (WARN_ON(next > end))
+ return -EINVAL;
+
+ memmove(ie, next, end - next);
+ skb_trim(skb, skb->len - len);
+
+ return 0;
+}
+
static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif)
{
struct ath12k *ar = arvif->ar;
struct ath12k_base *ab = ar->ab;
- struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct ieee80211_vif *vif = arvif->vif;
struct ieee80211_mutable_offsets offs = {};
struct sk_buff *bcn;
@@ -1154,14 +1268,37 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif)
ies, (skb_tail_pointer(bcn) - ies)))
arvif->wpaie_present = true;
- ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
+ if (arvif->vif->type == NL80211_IFTYPE_AP && arvif->vif->p2p) {
+ ret = ath12k_mac_setup_bcn_p2p_ie(arvif, bcn);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup P2P GO bcn ie: %d\n",
+ ret);
+ goto free_bcn_skb;
+ }
- kfree_skb(bcn);
+ /* P2P IE is inserted by firmware automatically (as
+ * configured above) so remove it from the base beacon
+ * template to avoid duplicate P2P IEs in beacon frames.
+ */
+ ret = ath12k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA,
+ WLAN_OUI_TYPE_WFA_P2P,
+ offsetof(struct ieee80211_mgmt,
+ u.beacon.variable));
+ if (ret) {
+ ath12k_warn(ab, "failed to remove P2P vendor ie: %d\n",
+ ret);
+ goto free_bcn_skb;
+ }
+ }
+
+ ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
if (ret)
ath12k_warn(ab, "failed to submit beacon template command: %d\n",
ret);
+free_bcn_skb:
+ kfree_skb(bcn);
return ret;
}
@@ -1214,6 +1351,7 @@ static void ath12k_peer_assoc_h_basic(struct ath12k *ar,
struct ath12k_wmi_peer_assoc_arg *arg)
{
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
u32 aid;
lockdep_assert_held(&ar->conf_mutex);
@@ -1228,7 +1366,7 @@ static void ath12k_peer_assoc_h_basic(struct ath12k *ar,
arg->peer_associd = aid;
arg->auth_flag = true;
/* TODO: STA WAR in ath10k for listen interval required? */
- arg->peer_listen_intval = ar->hw->conf.listen_interval;
+ arg->peer_listen_intval = hw->conf.listen_interval;
arg->peer_nss = 1;
arg->peer_caps = vif->bss_conf.assoc_capability;
}
@@ -1242,6 +1380,7 @@ static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
struct cfg80211_chan_def def;
struct cfg80211_bss *bss;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
const u8 *rsnie = NULL;
const u8 *wpaie = NULL;
@@ -1250,7 +1389,7 @@ static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
return;
- bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
+ bss = cfg80211_get_bss(hw->wiphy, def.chan, info->bssid, NULL, 0,
IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
if (arvif->rsnie_present || arvif->wpaie_present) {
@@ -1270,7 +1409,7 @@ static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
ies->data,
ies->len);
rcu_read_unlock();
- cfg80211_put_bss(ar->hw->wiphy, bss);
+ cfg80211_put_bss(hw->wiphy, bss);
}
/* FIXME: base on RSN IE/WPA IE is a correct idea? */
@@ -1304,6 +1443,7 @@ static void ath12k_peer_assoc_h_rates(struct ath12k *ar,
struct cfg80211_chan_def def;
const struct ieee80211_supported_band *sband;
const struct ieee80211_rate *rates;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
enum nl80211_band band;
u32 ratemask;
u8 rate;
@@ -1315,7 +1455,7 @@ static void ath12k_peer_assoc_h_rates(struct ath12k *ar,
return;
band = def.chan->band;
- sband = ar->hw->wiphy->bands[band];
+ sband = hw->wiphy->bands[band];
ratemask = sta->deflink.supp_rates[band];
ratemask &= arvif->bitrate_mask.control[band].legacy;
rates = sband->bitrates;
@@ -2266,12 +2406,11 @@ static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_vif *arvif,
ath12k_smps_map[smps]);
}
-static void ath12k_bss_assoc(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
+static void ath12k_bss_assoc(struct ath12k *ar,
+ struct ath12k_vif *arvif,
struct ieee80211_bss_conf *bss_conf)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_vif *vif = arvif->vif;
struct ath12k_wmi_peer_assoc_arg peer_arg;
struct ieee80211_sta *ap_sta;
struct ath12k_peer *peer;
@@ -2361,11 +2500,9 @@ static void ath12k_bss_assoc(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
}
-static void ath12k_bss_disassoc(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static void ath12k_bss_disassoc(struct ath12k *ar,
+ struct ath12k_vif *arvif)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
int ret;
lockdep_assert_held(&ar->conf_mutex);
@@ -2413,6 +2550,7 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar,
struct cfg80211_chan_def *def)
{
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
const struct ieee80211_supported_band *sband;
u8 basic_rate_idx;
int hw_rate_code;
@@ -2422,7 +2560,7 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar,
lockdep_assert_held(&ar->conf_mutex);
- sband = ar->hw->wiphy->bands[def->chan->band];
+ sband = hw->wiphy->bands[def->chan->band];
basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
bitrate = sband->bitrates[basic_rate_idx].bitrate;
@@ -2449,6 +2587,7 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif,
struct ieee80211_bss_conf *info)
{
struct ath12k *ar = arvif->ar;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct sk_buff *tmpl;
int ret;
u32 interval;
@@ -2457,7 +2596,7 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif,
if (info->fils_discovery.max_interval) {
interval = info->fils_discovery.max_interval;
- tmpl = ieee80211_get_fils_discovery_tmpl(ar->hw, arvif->vif);
+ tmpl = ieee80211_get_fils_discovery_tmpl(hw, arvif->vif);
if (tmpl)
ret = ath12k_wmi_fils_discovery_tmpl(ar, arvif->vdev_id,
tmpl);
@@ -2465,7 +2604,7 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif,
unsol_bcast_probe_resp_enabled = 1;
interval = info->unsol_bcast_probe_resp_interval;
- tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(ar->hw,
+ tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw,
arvif->vif);
if (tmpl)
ret = ath12k_wmi_probe_resp_tmpl(ar, arvif->vdev_id,
@@ -2491,13 +2630,60 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif,
return ret;
}
-static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info,
- u64 changed)
+static void ath12k_mac_vif_setup_ps(struct ath12k_vif *arvif)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k *ar = arvif->ar;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf;
+ enum wmi_sta_powersave_param param;
+ enum wmi_sta_ps_mode psmode;
+ int ret;
+ int timeout;
+ bool enable_ps;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ enable_ps = arvif->ps;
+ if (enable_ps) {
+ psmode = WMI_STA_PS_MODE_ENABLED;
+ param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
+
+ timeout = conf->dynamic_ps_timeout;
+ if (timeout == 0) {
+ /* firmware doesn't like 0 */
+ timeout = ieee80211_tu_to_usec(vif->bss_conf.beacon_int) / 1000;
+ }
+
+ ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
+ timeout);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+ } else {
+ psmode = WMI_STA_PS_MODE_DISABLED;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d psmode %s\n",
+ arvif->vdev_id, psmode ? "enable" : "disable");
+
+ ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n",
+ psmode, arvif->vdev_id, ret);
+}
+
+static void ath12k_mac_bss_info_changed(struct ath12k *ar,
+ struct ath12k_vif *arvif,
+ struct ieee80211_bss_conf *info,
+ u64 changed)
+{
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_vif_cfg *vif_cfg = &vif->cfg;
struct cfg80211_chan_def def;
u32 param_id, param_value;
enum nl80211_band band;
@@ -2510,7 +2696,7 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
u8 rateidx;
u32 rate;
- mutex_lock(&ar->conf_mutex);
+ lockdep_assert_held(&ar->conf_mutex);
if (changed & BSS_CHANGED_BEACON_INT) {
arvif->beacon_interval = info->beacon_int;
@@ -2666,9 +2852,9 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC) {
if (vif->cfg.assoc)
- ath12k_bss_assoc(hw, vif, info);
+ ath12k_bss_assoc(ar, arvif, info);
else
- ath12k_bss_disassoc(hw, vif);
+ ath12k_bss_disassoc(ar, arvif);
}
if (changed & BSS_CHANGED_TXPOWER) {
@@ -2768,14 +2954,35 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
ath12k_mac_fils_discovery(arvif, info);
- if (changed & BSS_CHANGED_EHT_PUNCTURING)
- arvif->punct_bitmap = info->eht_puncturing;
+ if (changed & BSS_CHANGED_PS &&
+ ar->ab->hw_params->supports_sta_ps) {
+ arvif->ps = vif_cfg->ps;
+ ath12k_mac_vif_setup_ps(arvif);
+ }
+}
+
+static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u64 changed)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+
+ ar = ath12k_ah_to_ar(ah);
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath12k_mac_bss_info_changed(ar, arvif, info, changed);
mutex_unlock(&ar->conf_mutex);
}
void __ath12k_mac_scan_finish(struct ath12k *ar)
{
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+
lockdep_assert_held(&ar->data_lock);
switch (ar->scan.state) {
@@ -2784,7 +2991,7 @@ void __ath12k_mac_scan_finish(struct ath12k *ar)
case ATH12K_SCAN_RUNNING:
case ATH12K_SCAN_ABORTING:
if (ar->scan.is_roc && ar->scan.roc_notify)
- ieee80211_remain_on_channel_expired(ar->hw);
+ ieee80211_remain_on_channel_expired(hw);
fallthrough;
case ATH12K_SCAN_STARTING:
if (!ar->scan.is_roc) {
@@ -2795,7 +3002,7 @@ void __ath12k_mac_scan_finish(struct ath12k *ar)
ATH12K_SCAN_STARTING)),
};
- ieee80211_scan_completed(ar->hw, &info);
+ ieee80211_scan_completed(hw, &info);
}
ar->scan.state = ATH12K_SCAN_IDLE;
@@ -2940,13 +3147,16 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_scan_request *hw_req)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct cfg80211_scan_request *req = &hw_req->req;
struct ath12k_wmi_scan_req_arg arg = {};
int ret;
int i;
+ ar = ath12k_ah_to_ar(ah);
+
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
@@ -2988,7 +3198,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
for (i = 0; i < arg.num_ssids; i++)
arg.ssid[i] = req->ssids[i];
} else {
- arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE;
+ arg.scan_f_passive = 1;
}
if (req->n_channels) {
@@ -3014,7 +3224,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
}
/* Add a margin to account for event/command processing */
- ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+ ieee80211_queue_delayed_work(ath12k_ar_to_hw(ar), &ar->scan.timeout,
msecs_to_jiffies(arg.max_scan_time +
ATH12K_MAC_SCAN_TIMEOUT_MSECS));
@@ -3025,13 +3235,17 @@ exit:
kfree(arg.extraie.ptr);
mutex_unlock(&ar->conf_mutex);
+
return ret;
}
static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+
+ ar = ath12k_ah_to_ar(ah);
mutex_lock(&ar->conf_mutex);
ath12k_scan_abort(ar);
@@ -3159,8 +3373,9 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_peer *peer;
struct ath12k_sta *arsta;
@@ -3175,6 +3390,9 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
return 1;
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
+
if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
return 1;
@@ -3696,7 +3914,7 @@ static int ath12k_mac_station_add(struct ath12k *ar,
if (ab->hw_params->vdev_start_delay &&
!arvif->is_started &&
arvif->vdev_type != WMI_VDEV_TYPE_AP) {
- ret = ath12k_start_vdev_delay(ar->hw, vif);
+ ret = ath12k_start_vdev_delay(ar, arvif);
if (ret) {
ath12k_warn(ab, "failed to delay vdev start: %d\n", ret);
goto free_peer;
@@ -3750,7 +3968,8 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
enum ieee80211_sta_state old_state,
enum ieee80211_sta_state new_state)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta);
struct ath12k_peer *peer;
@@ -3761,6 +3980,8 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
new_state == IEEE80211_STA_NOTEXIST))
cancel_work_sync(&arsta->update_wk);
+ ar = ath12k_ah_to_ar(ah);
+
mutex_lock(&ar->conf_mutex);
if (old_state == IEEE80211_STA_NOTEXIST &&
@@ -3775,6 +3996,13 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
sta->addr, arvif->vdev_id);
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
+ ath12k_bss_disassoc(ar, arvif);
+ ret = ath12k_mac_vdev_stop(arvif);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
ath12k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
ret = ath12k_peer_delete(ar, arvif->vdev_id, sta->addr);
@@ -3856,6 +4084,7 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
}
mutex_unlock(&ar->conf_mutex);
+
return ret;
}
@@ -3863,7 +4092,8 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
int ret;
s16 txpwr;
@@ -3879,6 +4109,8 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
if (txpwr > ATH12K_TX_POWER_MAX_VAL || txpwr < ATH12K_TX_POWER_MIN_VAL)
return -EINVAL;
+ ar = ath12k_ah_to_ar(ah);
+
mutex_lock(&ar->conf_mutex);
ret = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
@@ -3899,12 +4131,15 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u32 changed)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta);
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_peer *peer;
u32 bw, smps;
+ ar = ath12k_ah_to_ar(ah);
+
spin_lock_bh(&ar->ab->base_lock);
peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
@@ -3964,10 +4199,10 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
ieee80211_queue_work(hw, &arsta->update_wk);
}
-static int ath12k_conf_tx_uapsd(struct ath12k *ar, struct ieee80211_vif *vif,
+static int ath12k_conf_tx_uapsd(struct ath12k_vif *arvif,
u16 ac, bool enable)
{
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k *ar = arvif->ar;
u32 value;
int ret;
@@ -4021,17 +4256,16 @@ exit:
return ret;
}
-static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- unsigned int link_id, u16 ac,
- const struct ieee80211_tx_queue_params *params)
+static int ath12k_mac_conf_tx(struct ath12k_vif *arvif,
+ unsigned int link_id, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct wmi_wmm_params_arg *p = NULL;
+ struct ath12k *ar = arvif->ar;
+ struct ath12k_base *ab = ar->ab;
int ret;
- mutex_lock(&ar->conf_mutex);
+ lockdep_assert_held(&ar->conf_mutex);
switch (ac) {
case IEEE80211_AC_VO:
@@ -4061,17 +4295,36 @@ static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw,
ret = ath12k_wmi_send_wmm_update_cmd(ar, arvif->vdev_id,
&arvif->wmm_params);
if (ret) {
- ath12k_warn(ar->ab, "failed to set wmm params: %d\n", ret);
+ ath12k_warn(ab, "pdev idx %d failed to set wmm params: %d\n",
+ ar->pdev_idx, ret);
goto exit;
}
- ret = ath12k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
-
+ ret = ath12k_conf_tx_uapsd(arvif, ac, params->uapsd);
if (ret)
- ath12k_warn(ar->ab, "failed to set sta uapsd: %d\n", ret);
+ ath12k_warn(ab, "pdev idx %d failed to set sta uapsd: %d\n",
+ ar->pdev_idx, ret);
exit:
+ return ret;
+}
+
+static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ int ret;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ mutex_lock(&ar->conf_mutex);
+ ret = ath12k_mac_conf_tx(arvif, link_id, ac, params);
mutex_unlock(&ar->conf_mutex);
+
return ret;
}
@@ -4782,7 +5035,7 @@ static void ath12k_mgmt_over_wmi_tx_drop(struct ath12k *ar, struct sk_buff *skb)
{
int num_mgmt;
- ieee80211_free_txskb(ar->hw, skb);
+ ieee80211_free_txskb(ath12k_ar_to_hw(ar), skb);
num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
@@ -4914,8 +5167,8 @@ static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work)
}
arvif = ath12k_vif_to_arvif(skb_cb->vif);
- if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) &&
- arvif->is_started) {
+
+ if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) {
ret = ath12k_mac_mgmt_tx_wmi(ar, arvif, skb);
if (ret) {
ath12k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
@@ -4959,20 +5212,41 @@ static int ath12k_mac_mgmt_tx(struct ath12k *ar, struct sk_buff *skb,
skb_queue_tail(q, skb);
atomic_inc(&ar->num_pending_mgmt_tx);
- ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
+ ieee80211_queue_work(ath12k_ar_to_hw(ar), &ar->wmi_mgmt_tx_work);
return 0;
}
+static void ath12k_mac_add_p2p_noa_ie(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb,
+ bool is_prb_rsp)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+
+ if (likely(!is_prb_rsp))
+ return;
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (arvif->u.ap.noa_data &&
+ !pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
+ GFP_ATOMIC))
+ skb_put_data(skb, arvif->u.ap.noa_data,
+ arvif->u.ap.noa_len);
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
- struct ath12k *ar = hw->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k *ar = arvif->ar;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_key_conf *key = info->control.hw_key;
u32 info_flags = info->flags;
@@ -4987,10 +5261,11 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
skb_cb->flags |= ATH12K_SKB_CIPHER_SET;
}
+ is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
+
if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
skb_cb->flags |= ATH12K_SKB_HW_80211_ENCAP;
} else if (ieee80211_is_mgmt(hdr->frame_control)) {
- is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
ret = ath12k_mac_mgmt_tx(ar, skb, is_prb_rsp);
if (ret) {
ath12k_warn(ar->ab, "failed to queue management frame %d\n",
@@ -5000,6 +5275,10 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
return;
}
+ /* This is case only for P2P_GO */
+ if (vif->type == NL80211_IFTYPE_AP && vif->p2p)
+ ath12k_mac_add_p2p_noa_ie(ar, vif, skb, is_prb_rsp);
+
ret = ath12k_dp_tx(ar, arvif, skb);
if (ret) {
ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret);
@@ -5018,7 +5297,7 @@ void ath12k_mac_drain_tx(struct ath12k *ar)
static int ath12k_mac_config_mon_status_default(struct ath12k *ar, bool enable)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
/* TODO: Need to support new monitor mode */
}
@@ -5044,14 +5323,12 @@ static void ath12k_mac_wait_reconfigure(struct ath12k_base *ab)
ATH12K_RECONFIGURE_TIMEOUT_HZ);
}
-static int ath12k_mac_op_start(struct ieee80211_hw *hw)
+static int ath12k_mac_start(struct ath12k *ar)
{
- struct ath12k *ar = hw->priv;
struct ath12k_base *ab = ar->ab;
struct ath12k_pdev *pdev = ar->pdev;
int ret;
- ath12k_mac_drain_tx(ar);
mutex_lock(&ar->conf_mutex);
switch (ar->state) {
@@ -5074,14 +5351,14 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw)
1, pdev->pdev_id);
if (ret) {
- ath12k_err(ar->ab, "failed to enable PMF QOS: (%d\n", ret);
+ ath12k_err(ab, "failed to enable PMF QOS: (%d\n", ret);
goto err;
}
ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 1,
pdev->pdev_id);
if (ret) {
- ath12k_err(ar->ab, "failed to enable dynamic bw: %d\n", ret);
+ ath12k_err(ab, "failed to enable dynamic bw: %d\n", ret);
goto err;
}
@@ -5111,7 +5388,7 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw)
1, pdev->pdev_id);
if (ret) {
- ath12k_err(ar->ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret);
+ ath12k_err(ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret);
goto err;
}
@@ -5130,14 +5407,14 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw)
* such as rssi, rx_duration.
*/
ret = ath12k_mac_config_mon_status_default(ar, true);
- if (ret && (ret != -ENOTSUPP)) {
+ if (ret && (ret != -EOPNOTSUPP)) {
ath12k_err(ab, "failed to configure monitor status ring with default rx_filter: (%d)\n",
ret);
goto err;
}
- if (ret == -ENOTSUPP)
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ if (ret == -EOPNOTSUPP)
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
"monitor status config is not yet supported");
/* Configure the hash seed for hash based reo dest ring selection */
@@ -5159,7 +5436,6 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw)
&ab->pdevs[ar->pdev_idx]);
return 0;
-
err:
ar->state = ATH12K_STATE_OFF;
mutex_unlock(&ar->conf_mutex);
@@ -5167,6 +5443,25 @@ err:
return ret;
}
+static int ath12k_mac_op_start(struct ieee80211_hw *hw)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+ struct ath12k_base *ab = ar->ab;
+ int ret;
+
+ ath12k_mac_drain_tx(ar);
+
+ ret = ath12k_mac_start(ar);
+ if (ret) {
+ ath12k_err(ab, "fail to start mac operations in pdev idx %d ret %d\n",
+ ar->pdev_idx, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
int ath12k_mac_rfkill_config(struct ath12k *ar)
{
struct ath12k_base *ab = ar->ab;
@@ -5224,17 +5519,14 @@ int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable)
return 0;
}
-static void ath12k_mac_op_stop(struct ieee80211_hw *hw)
+static void ath12k_mac_stop(struct ath12k *ar)
{
- struct ath12k *ar = hw->priv;
struct htt_ppdu_stats_info *ppdu_stats, *tmp;
int ret;
- ath12k_mac_drain_tx(ar);
-
mutex_lock(&ar->conf_mutex);
ret = ath12k_mac_config_mon_status_default(ar, false);
- if (ret && (ret != -ENOTSUPP))
+ if (ret && (ret != -EOPNOTSUPP))
ath12k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n",
ret);
@@ -5260,6 +5552,16 @@ static void ath12k_mac_op_stop(struct ieee80211_hw *hw)
atomic_set(&ar->num_pending_mgmt_tx, 0);
}
+static void ath12k_mac_op_stop(struct ieee80211_hw *hw)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+
+ ath12k_mac_drain_tx(ar);
+
+ ath12k_mac_stop(ar);
+}
+
static u8
ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif)
{
@@ -5269,7 +5571,7 @@ ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif)
do {
if (ab->free_vdev_stats_id_map & (1LL << vdev_stats_id)) {
vdev_stats_id++;
- if (vdev_stats_id <= ATH12K_INVAL_VDEV_STATS_ID) {
+ if (vdev_stats_id >= ATH12K_MAX_VDEV_STATS_ID) {
vdev_stats_id = ATH12K_INVAL_VDEV_STATS_ID;
break;
}
@@ -5376,12 +5678,11 @@ static int ath12k_set_he_mu_sounding_mode(struct ath12k *ar,
return ret;
}
-static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static void ath12k_mac_update_vif_offload(struct ath12k_vif *arvif)
{
- struct ath12k *ar = hw->priv;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ath12k *ar = arvif->ar;
struct ath12k_base *ab = ar->ab;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
u32 param_id, param_value;
int ret;
@@ -5423,11 +5724,20 @@ static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
}
}
+static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+
+ ath12k_mac_update_vif_offload(arvif);
+}
+
static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_wmi_vdev_create_arg vdev_arg = {0};
struct ath12k_wmi_peer_create_arg peer_param;
@@ -5439,6 +5749,9 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
+
mutex_lock(&ar->conf_mutex);
if (vif->type == NL80211_IFTYPE_AP &&
@@ -5483,17 +5796,29 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_STATION:
arvif->vdev_type = WMI_VDEV_TYPE_STA;
+
+ if (vif->p2p)
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT;
+
break;
case NL80211_IFTYPE_MESH_POINT:
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
fallthrough;
case NL80211_IFTYPE_AP:
arvif->vdev_type = WMI_VDEV_TYPE_AP;
+
+ if (vif->p2p)
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO;
+
break;
case NL80211_IFTYPE_MONITOR:
arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
ar->monitor_vdev_id = bit;
break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ arvif->vdev_type = WMI_VDEV_TYPE_STA;
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
+ break;
default:
WARN_ON(1);
break;
@@ -5526,7 +5851,7 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
list_add(&arvif->list, &ar->arvifs);
spin_unlock_bh(&ar->data_lock);
- ath12k_mac_op_update_vif_offload(hw, vif);
+ ath12k_mac_update_vif_offload(arvif);
nss = hweight32(ar->cfg_tx_chainmask) ? : 1;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
@@ -5685,12 +6010,16 @@ static void ath12k_mac_vif_unref(struct ath12k_dp *dp, struct ieee80211_vif *vif
static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_base *ab;
unsigned long time_left;
int ret;
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
+
mutex_lock(&ar->conf_mutex);
ath12k_dbg(ab, ATH12K_DBG_MAC, "mac remove interface (vdev %d)\n",
@@ -5766,19 +6095,15 @@ err_vdev_del:
FIF_PROBE_REQ | \
FIF_FCSFAIL)
-static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast)
+static void ath12k_mac_configure_filter(struct ath12k *ar,
+ unsigned int total_flags)
{
- struct ath12k *ar = hw->priv;
bool reset_flag;
int ret;
- mutex_lock(&ar->conf_mutex);
+ lockdep_assert_held(&ar->conf_mutex);
- *total_flags &= SUPPORTED_FILTERS;
- ar->filter_flags = *total_flags;
+ ar->filter_flags = total_flags;
/* For monitor mode */
reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC);
@@ -5793,16 +6118,36 @@ static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
ath12k_warn(ar->ab,
"fail to set monitor filter: %d\n", ret);
}
+
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"total_flags:0x%x, reset_flag:%d\n",
- *total_flags, reset_flag);
+ total_flags, reset_flag);
+}
+
+static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ mutex_lock(&ar->conf_mutex);
+
+ *total_flags &= SUPPORTED_FILTERS;
+ ath12k_mac_configure_filter(ar, *total_flags);
mutex_unlock(&ar->conf_mutex);
}
static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+
+ ar = ath12k_ah_to_ar(ah);
mutex_lock(&ar->conf_mutex);
@@ -5816,9 +6161,12 @@ static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *
static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
int ret;
+ ar = ath12k_ah_to_ar(ah);
+
mutex_lock(&ar->conf_mutex);
ret = __ath12k_set_antenna(ar, tx_ant, rx_ant);
mutex_unlock(&ar->conf_mutex);
@@ -5826,14 +6174,13 @@ static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx
return ret;
}
-static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_ampdu_params *params)
+static int ath12k_mac_ampdu_action(struct ath12k_vif *arvif,
+ struct ieee80211_ampdu_params *params)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k *ar = arvif->ar;
int ret = -EINVAL;
- mutex_lock(&ar->conf_mutex);
+ lockdep_assert_held(&ar->conf_mutex);
switch (params->action) {
case IEEE80211_AMPDU_RX_START:
@@ -5854,16 +6201,40 @@ static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
break;
}
+ return ret;
+}
+
+static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ int ret = -EINVAL;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ mutex_lock(&ar->conf_mutex);
+ ret = ath12k_mac_ampdu_action(arvif, params);
mutex_unlock(&ar->conf_mutex);
+ if (ret)
+ ath12k_warn(ar->ab, "pdev idx %d unable to perform ampdu action %d ret %d\n",
+ ar->pdev_idx, params->action, ret);
+
return ret;
}
static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
+
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
ath12k_dbg(ab, ATH12K_DBG_MAC,
"mac chanctx add freq %u width %d ptr %pK\n",
@@ -5886,8 +6257,12 @@ static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
static void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
+
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
ath12k_dbg(ab, ATH12K_DBG_MAC,
"mac chanctx remove freq %u width %d ptr %pK\n",
@@ -5995,6 +6370,11 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif,
arg.pref_tx_streams = ar->num_tx_chains;
arg.pref_rx_streams = ar->num_rx_chains;
+ /* Fill the MBSSID flags to indicate AP is non MBSSID by default
+ * Corresponding flags would be updated with MBSSID support.
+ */
+ arg.mbssid_flags = WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP;
+
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
arg.ssid = arvif->u.ap.ssid;
arg.ssid_len = arvif->u.ap.ssid_len;
@@ -6071,46 +6451,6 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif,
return 0;
}
-static int ath12k_mac_vdev_stop(struct ath12k_vif *arvif)
-{
- struct ath12k *ar = arvif->ar;
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- reinit_completion(&ar->vdev_setup_done);
-
- ret = ath12k_wmi_vdev_stop(ar, arvif->vdev_id);
- if (ret) {
- ath12k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n",
- arvif->vdev_id, ret);
- goto err;
- }
-
- ret = ath12k_mac_vdev_setup_sync(ar);
- if (ret) {
- ath12k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n",
- arvif->vdev_id, ret);
- goto err;
- }
-
- WARN_ON(ar->num_started_vdevs == 0);
-
- ar->num_started_vdevs--;
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
- arvif->vif->addr, arvif->vdev_id);
-
- if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
- clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n",
- arvif->vdev_id);
- }
-
- return 0;
-err:
- return ret;
-}
-
static int ath12k_mac_vdev_start(struct ath12k_vif *arvif,
struct ieee80211_chanctx_conf *ctx)
{
@@ -6215,6 +6555,8 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
if (WARN_ON(!arvif->is_started))
continue;
+ arvif->punct_bitmap = vifs[i].new_ctx->def.punctured;
+
/* Firmware expect vdev_restart only if vdev is up.
* If vdev is down then it expect vdev_stop->vdev_start.
*/
@@ -6266,7 +6608,7 @@ ath12k_mac_update_active_vif_chan(struct ath12k *ar,
struct ieee80211_chanctx_conf *ctx)
{
struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx };
- struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
lockdep_assert_held(&ar->conf_mutex);
@@ -6295,8 +6637,12 @@ static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx,
u32 changed)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
+
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
mutex_lock(&ar->conf_mutex);
@@ -6311,7 +6657,8 @@ static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw,
goto unlock;
if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH ||
- changed & IEEE80211_CHANCTX_CHANGE_RADAR)
+ changed & IEEE80211_CHANCTX_CHANGE_RADAR ||
+ changed & IEEE80211_CHANCTX_CHANGE_PUNCTURING)
ath12k_mac_update_active_vif_chan(ar, ctx);
/* TODO: Recalc radar detection */
@@ -6320,12 +6667,11 @@ unlock:
mutex_unlock(&ar->conf_mutex);
}
-static int ath12k_start_vdev_delay(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int ath12k_start_vdev_delay(struct ath12k *ar,
+ struct ath12k_vif *arvif)
{
- struct ath12k *ar = hw->priv;
struct ath12k_base *ab = ar->ab;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_vif *vif = arvif->vif;
int ret;
if (WARN_ON(arvif->is_started))
@@ -6359,19 +6705,23 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
int ret;
struct ath12k_wmi_peer_create_arg param;
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
+
mutex_lock(&ar->conf_mutex);
ath12k_dbg(ab, ATH12K_DBG_MAC,
"mac chanctx assign ptr %pK vdev_id %i\n",
ctx, arvif->vdev_id);
- arvif->punct_bitmap = link_conf->eht_puncturing;
+ arvif->punct_bitmap = ctx->def.punctured;
/* for some targets bss peer must be created before vdev_start */
if (ab->hw_params->vdev_start_delay &&
@@ -6438,11 +6788,15 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
int ret;
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
+
mutex_lock(&ar->conf_mutex);
ath12k_dbg(ab, ATH12K_DBG_MAC,
@@ -6466,11 +6820,13 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
arvif->is_started = false;
}
- ret = ath12k_mac_vdev_stop(arvif);
- if (ret)
- ath12k_warn(ab, "failed to stop vdev %i: %d\n",
- arvif->vdev_id, ret);
-
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA) {
+ ath12k_bss_disassoc(ar, arvif);
+ ret = ath12k_mac_vdev_stop(arvif);
+ if (ret)
+ ath12k_warn(ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
arvif->is_started = false;
if (ab->hw_params->vdev_start_delay &&
@@ -6490,7 +6846,10 @@ ath12k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
int n_vifs,
enum ieee80211_chanctx_switch_mode mode)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+
+ ar = ath12k_ah_to_ar(ah);
mutex_lock(&ar->conf_mutex);
@@ -6532,10 +6891,15 @@ ath12k_set_vdev_param_to_all_vifs(struct ath12k *ar, int param, u32 value)
*/
static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
- struct ath12k *ar = hw->priv;
- int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD, ret;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ ret = ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
- return ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
+ return ret;
}
static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
@@ -6553,15 +6917,10 @@ static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
return -EOPNOTSUPP;
}
-static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u32 queues, bool drop)
+static void ath12k_mac_flush(struct ath12k *ar)
{
- struct ath12k *ar = hw->priv;
long time_left;
- if (drop)
- return;
-
time_left = wait_event_timeout(ar->dp.tx_empty_waitq,
(atomic_read(&ar->dp.num_tx_pending) == 0),
ATH12K_FLUSH_TIMEOUT);
@@ -6576,6 +6935,18 @@ static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *v
time_left);
}
+static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+
+ if (drop)
+ return;
+
+ ath12k_mac_flush(ar);
+}
+
static int
ath12k_mac_bitrate_mask_num_ht_rates(struct ath12k *ar,
enum nl80211_band band,
@@ -6778,7 +7149,7 @@ static void ath12k_mac_set_bitrate_mask_iter(void *data,
arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
spin_unlock_bh(&ar->data_lock);
- ieee80211_queue_work(ar->hw, &arsta->update_wk);
+ ieee80211_queue_work(ath12k_ar_to_hw(ar), &arsta->update_wk);
}
static void ath12k_mac_disable_peer_fixed_rate(void *data,
@@ -6826,8 +7197,10 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
sgi = mask->control[band].gi;
- if (sgi == NL80211_TXRATE_FORCE_LGI)
- return -EINVAL;
+ if (sgi == NL80211_TXRATE_FORCE_LGI) {
+ ret = -EINVAL;
+ goto out;
+ }
/* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it
* requires passing at least one of used basic rates along with them.
@@ -6843,7 +7216,7 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
if (ret) {
ath12k_warn(ar->ab, "failed to get single legacy rate for vdev %i: %d\n",
arvif->vdev_id, ret);
- return ret;
+ goto out;
}
ieee80211_iterate_stations_atomic(hw,
ath12k_mac_disable_peer_fixed_rate,
@@ -6888,7 +7261,8 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
*/
ath12k_warn(ar->ab,
"Setting more than one MCS Value in bitrate mask not supported\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
ieee80211_iterate_stations_atomic(hw,
@@ -6915,6 +7289,7 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
mutex_unlock(&ar->conf_mutex);
+out:
return ret;
}
@@ -6922,14 +7297,18 @@ static void
ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
enum ieee80211_reconfig_type reconfig_type)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
struct ath12k_vif *arvif;
int recovery_count;
if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
return;
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
+
mutex_lock(&ar->conf_mutex);
if (ar->state == ATH12K_STATE_RESTARTED) {
@@ -7013,7 +7392,8 @@ ath12k_mac_update_bss_chan_survey(struct ath12k *ar,
static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
struct ieee80211_supported_band *sband;
struct survey_info *ar_survey;
int ret = 0;
@@ -7021,6 +7401,8 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
if (idx >= ATH12K_NUM_CHANS)
return -ENOENT;
+ ar = ath12k_ah_to_ar(ah);
+
ar_survey = &ar->survey[idx];
mutex_lock(&ar->conf_mutex);
@@ -7052,6 +7434,7 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
exit:
mutex_unlock(&ar->conf_mutex);
+
return ret;
}
@@ -7089,6 +7472,125 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
}
+static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.roc_notify = false;
+ spin_unlock_bh(&ar->data_lock);
+
+ ath12k_scan_abort(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+
+ return 0;
+}
+
+static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan,
+ int duration,
+ enum ieee80211_roc_type type)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_wmi_scan_req_arg arg;
+ struct ath12k *ar;
+ u32 scan_time_msec;
+ int ret;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ mutex_lock(&ar->conf_mutex);
+ spin_lock_bh(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH12K_SCAN_IDLE:
+ reinit_completion(&ar->scan.started);
+ reinit_completion(&ar->scan.completed);
+ reinit_completion(&ar->scan.on_channel);
+ ar->scan.state = ATH12K_SCAN_STARTING;
+ ar->scan.is_roc = true;
+ ar->scan.vdev_id = arvif->vdev_id;
+ ar->scan.roc_freq = chan->center_freq;
+ ar->scan.roc_notify = true;
+ ret = 0;
+ break;
+ case ATH12K_SCAN_STARTING:
+ case ATH12K_SCAN_RUNNING:
+ case ATH12K_SCAN_ABORTING:
+ ret = -EBUSY;
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ret)
+ goto exit;
+
+ scan_time_msec = hw->wiphy->max_remain_on_channel_duration * 2;
+
+ memset(&arg, 0, sizeof(arg));
+ ath12k_wmi_start_scan_init(ar, &arg);
+ arg.num_chan = 1;
+ arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
+ GFP_KERNEL);
+ if (!arg.chan_list) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.scan_id = ATH12K_SCAN_ID;
+ arg.chan_list[0] = chan->center_freq;
+ arg.dwell_time_active = scan_time_msec;
+ arg.dwell_time_passive = scan_time_msec;
+ arg.max_scan_time = scan_time_msec;
+ arg.scan_f_passive = 1;
+ arg.burst_duration = duration;
+
+ ret = ath12k_start_scan(ar, &arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to start roc scan: %d\n", ret);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.state = ATH12K_SCAN_IDLE;
+ spin_unlock_bh(&ar->data_lock);
+ goto free_chan_list;
+ }
+
+ ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
+ if (ret == 0) {
+ ath12k_warn(ar->ab, "failed to switch to channel for roc scan\n");
+ ret = ath12k_scan_stop(ar);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to stop scan: %d\n", ret);
+ ret = -ETIMEDOUT;
+ goto free_chan_list;
+ }
+
+ ieee80211_queue_delayed_work(hw, &ar->scan.timeout,
+ msecs_to_jiffies(duration));
+
+ ret = 0;
+
+free_chan_list:
+ kfree(arg.chan_list);
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
static const struct ieee80211_ops ath12k_ops = {
.tx = ath12k_mac_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
@@ -7123,6 +7625,8 @@ static const struct ieee80211_ops ath12k_ops = {
.get_survey = ath12k_mac_op_get_survey,
.flush = ath12k_mac_op_flush,
.sta_statistics = ath12k_mac_op_sta_statistics,
+ .remain_on_channel = ath12k_mac_op_remain_on_channel,
+ .cancel_remain_on_channel = ath12k_mac_op_cancel_remain_on_channel,
};
static void ath12k_mac_update_ch_list(struct ath12k *ar,
@@ -7158,9 +7662,9 @@ static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band)
}
static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
- u32 supported_bands)
+ u32 supported_bands,
+ struct ieee80211_supported_band *bands[])
{
- struct ieee80211_hw *hw = ar->hw;
struct ieee80211_supported_band *band;
struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap;
void *channels;
@@ -7186,7 +7690,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
band->channels = channels;
band->n_bitrates = ath12k_g_rates_size;
band->bitrates = ath12k_g_rates;
- hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
+ bands[NL80211_BAND_2GHZ] = band;
if (ar->ab->hw_params->single_pdev_only) {
phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP);
@@ -7198,7 +7702,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
}
if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
- if (reg_cap->high_5ghz_chan >= ATH12K_MAX_6G_FREQ) {
+ if (reg_cap->high_5ghz_chan >= ATH12K_MIN_6G_FREQ) {
channels = kmemdup(ath12k_6ghz_channels,
sizeof(ath12k_6ghz_channels), GFP_KERNEL);
if (!channels) {
@@ -7213,7 +7717,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
band->channels = channels;
band->n_bitrates = ath12k_a_rates_size;
band->bitrates = ath12k_a_rates;
- hw->wiphy->bands[NL80211_BAND_6GHZ] = band;
+ bands[NL80211_BAND_6GHZ] = band;
ath12k_mac_update_ch_list(ar, band,
reg_cap->low_5ghz_chan,
reg_cap->high_5ghz_chan);
@@ -7235,7 +7739,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
band->channels = channels;
band->n_bitrates = ath12k_a_rates_size;
band->bitrates = ath12k_a_rates;
- hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
+ bands[NL80211_BAND_5GHZ] = band;
if (ar->ab->hw_params->single_pdev_only) {
phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
@@ -7251,28 +7755,59 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
return 0;
}
-static int ath12k_mac_setup_iface_combinations(struct ath12k *ar)
+static u16 ath12k_mac_get_ifmodes(struct ath12k_hw *ah)
{
- struct ath12k_base *ab = ar->ab;
- struct ieee80211_hw *hw = ar->hw;
- struct wiphy *wiphy = hw->wiphy;
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+ u16 interface_modes = U16_MAX;
+
+ interface_modes &= ar->ab->hw_params->interface_modes;
+
+ return interface_modes == U16_MAX ? 0 : interface_modes;
+}
+
+static bool ath12k_mac_is_iface_mode_enable(struct ath12k_hw *ah,
+ enum nl80211_iftype type)
+{
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+ u16 interface_modes, mode;
+ bool is_enable = true;
+
+ mode = BIT(type);
+
+ interface_modes = ar->ab->hw_params->interface_modes;
+ if (!(interface_modes & mode))
+ is_enable = false;
+
+ return is_enable;
+}
+
+static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
+{
+ struct wiphy *wiphy = ah->hw->wiphy;
struct ieee80211_iface_combination *combinations;
struct ieee80211_iface_limit *limits;
int n_limits, max_interfaces;
- bool ap, mesh;
+ bool ap, mesh, p2p;
- ap = ab->hw_params->interface_modes & BIT(NL80211_IFTYPE_AP);
+ ap = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_AP);
+ p2p = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_P2P_DEVICE);
mesh = IS_ENABLED(CONFIG_MAC80211_MESH) &&
- ab->hw_params->interface_modes & BIT(NL80211_IFTYPE_MESH_POINT);
+ ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_MESH_POINT);
combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
if (!combinations)
return -ENOMEM;
- if (ap || mesh) {
+ if ((ap || mesh) && !p2p) {
n_limits = 2;
max_interfaces = 16;
+ } else if (p2p) {
+ n_limits = 3;
+ if (ap || mesh)
+ max_interfaces = 16;
+ else
+ max_interfaces = 3;
} else {
n_limits = 1;
max_interfaces = 1;
@@ -7287,14 +7822,22 @@ static int ath12k_mac_setup_iface_combinations(struct ath12k *ar)
limits[0].max = 1;
limits[0].types |= BIT(NL80211_IFTYPE_STATION);
- if (ap) {
+ if (ap || mesh || p2p)
limits[1].max = max_interfaces;
+
+ if (ap)
limits[1].types |= BIT(NL80211_IFTYPE_AP);
- }
if (mesh)
limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
+ if (p2p) {
+ limits[1].types |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
+ limits[2].max = 1;
+ limits[2].types |= BIT(NL80211_IFTYPE_P2P_DEVICE);
+ }
+
combinations[0].limits = limits;
combinations[0].n_limits = n_limits;
combinations[0].max_interfaces = max_interfaces;
@@ -7349,21 +7892,27 @@ static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = {
},
};
-static void __ath12k_mac_unregister(struct ath12k *ar)
+static void ath12k_mac_cleanup_unregister(struct ath12k *ar)
{
- struct ieee80211_hw *hw = ar->hw;
- struct wiphy *wiphy = hw->wiphy;
-
- cancel_work_sync(&ar->regd_update_work);
-
- ieee80211_unregister_hw(hw);
-
idr_for_each(&ar->txmgmt_idr, ath12k_mac_tx_mgmt_pending_free, ar);
idr_destroy(&ar->txmgmt_idr);
kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+}
+
+static void ath12k_mac_hw_unregister(struct ath12k_hw *ah)
+{
+ struct ieee80211_hw *hw = ah->hw;
+ struct wiphy *wiphy = hw->wiphy;
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+
+ cancel_work_sync(&ar->regd_update_work);
+
+ ieee80211_unregister_hw(hw);
+
+ ath12k_mac_cleanup_unregister(ar);
kfree(wiphy->iface_combinations[0].limits);
kfree(wiphy->iface_combinations);
@@ -7371,28 +7920,42 @@ static void __ath12k_mac_unregister(struct ath12k *ar)
SET_IEEE80211_DEV(hw, NULL);
}
-void ath12k_mac_unregister(struct ath12k_base *ab)
+static int ath12k_mac_setup_register(struct ath12k *ar,
+ u32 *ht_cap,
+ struct ieee80211_supported_band *bands[])
{
- struct ath12k *ar;
- struct ath12k_pdev *pdev;
- int i;
+ struct ath12k_pdev_cap *cap = &ar->pdev->cap;
+ int ret;
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- ar = pdev->ar;
- if (!ar)
- continue;
+ init_waitqueue_head(&ar->txmgmt_empty_waitq);
+ idr_init(&ar->txmgmt_idr);
+ spin_lock_init(&ar->txmgmt_idr_lock);
- __ath12k_mac_unregister(ar);
- }
+ ath12k_pdev_caps_update(ar);
+
+ ret = ath12k_mac_setup_channels_rates(ar,
+ cap->supported_bands,
+ bands);
+ if (ret)
+ return ret;
+
+ ath12k_mac_setup_ht_vht_cap(ar, cap, ht_cap);
+ ath12k_mac_setup_sband_iftype_data(ar, cap);
+
+ ar->max_num_stations = ath12k_core_get_max_station_per_radio(ar->ab);
+ ar->max_num_peers = ath12k_core_get_max_peers_per_radio(ar->ab);
+
+ return 0;
}
-static int __ath12k_mac_register(struct ath12k *ar)
+static int ath12k_mac_hw_register(struct ath12k_hw *ah)
{
- struct ath12k_base *ab = ar->ab;
- struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_hw *hw = ah->hw;
struct wiphy *wiphy = hw->wiphy;
- struct ath12k_pdev_cap *cap = &ar->pdev->cap;
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_pdev *pdev;
+ struct ath12k_pdev_cap *cap;
static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
@@ -7407,30 +7970,34 @@ static int __ath12k_mac_register(struct ath12k *ar)
int ret;
u32 ht_cap = 0;
- ath12k_pdev_caps_update(ar);
+ pdev = ar->pdev;
- SET_IEEE80211_PERM_ADDR(hw, ar->mac_addr);
-
- SET_IEEE80211_DEV(hw, ab->dev);
+ if (ab->pdevs_macaddr_valid)
+ ether_addr_copy(ar->mac_addr, pdev->mac_addr);
+ else
+ ether_addr_copy(ar->mac_addr, ab->mac_addr);
- ret = ath12k_mac_setup_channels_rates(ar,
- cap->supported_bands);
+ ret = ath12k_mac_setup_register(ar, &ht_cap, hw->wiphy->bands);
if (ret)
- goto err;
+ goto out;
- ath12k_mac_setup_ht_vht_cap(ar, cap, &ht_cap);
- ath12k_mac_setup_sband_iftype_data(ar, cap);
+ wiphy->max_ap_assoc_sta = ar->max_num_stations;
- ret = ath12k_mac_setup_iface_combinations(ar);
- if (ret) {
- ath12k_err(ar->ab, "failed to setup interface combinations: %d\n", ret);
- goto err_free_channels;
- }
+ cap = &pdev->cap;
wiphy->available_antennas_rx = cap->rx_chain_mask;
wiphy->available_antennas_tx = cap->tx_chain_mask;
- wiphy->interface_modes = ab->hw_params->interface_modes;
+ SET_IEEE80211_PERM_ADDR(hw, ar->mac_addr);
+ SET_IEEE80211_DEV(hw, ab->dev);
+
+ ret = ath12k_mac_setup_iface_combinations(ah);
+ if (ret) {
+ ath12k_err(ab, "failed to setup interface combinations: %d\n", ret);
+ goto err_cleanup_unregister;
+ }
+
+ wiphy->interface_modes = ath12k_mac_get_ifmodes(ah);
if (wiphy->bands[NL80211_BAND_2GHZ] &&
wiphy->bands[NL80211_BAND_5GHZ] &&
@@ -7483,15 +8050,10 @@ static int __ath12k_mac_register(struct ath12k *ar)
wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
NL80211_FEATURE_AP_SCAN;
- ar->max_num_stations = TARGET_NUM_STATIONS;
- ar->max_num_peers = TARGET_NUM_PEERS_PDEV;
-
- wiphy->max_ap_assoc_sta = ar->max_num_stations;
-
hw->queues = ATH12K_HW_MAX_QUEUES;
wiphy->tx_queue_len = ATH12K_QUEUE_LEN;
hw->offchannel_tx_hw_queue = ATH12K_HW_MAX_QUEUES - 1;
- hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
+ hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_EHT;
hw->vif_data_size = sizeof(struct ath12k_vif);
hw->sta_data_size = sizeof(struct ath12k_sta);
@@ -7524,7 +8086,7 @@ static int __ath12k_mac_register(struct ath12k *ar)
ret = ieee80211_register_hw(hw);
if (ret) {
- ath12k_err(ar->ab, "ieee80211 registration failed: %d\n", ret);
+ ath12k_err(ab, "ieee80211 registration failed: %d\n", ret);
goto err_free_if_combs;
}
@@ -7552,142 +8114,213 @@ err_free_if_combs:
kfree(wiphy->iface_combinations[0].limits);
kfree(wiphy->iface_combinations);
-err_free_channels:
- kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
- kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
- kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+err_cleanup_unregister:
+ ath12k_mac_cleanup_unregister(ar);
-err:
+out:
SET_IEEE80211_DEV(hw, NULL);
+
return ret;
}
+static void ath12k_mac_setup(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_pdev *pdev = ar->pdev;
+ u8 pdev_idx = ar->pdev_idx;
+
+ ar->lmac_id = ath12k_hw_get_mac_from_pdev_id(ab->hw_params, pdev_idx);
+
+ ar->wmi = &ab->wmi_ab.wmi[pdev_idx];
+ /* FIXME: wmi[0] is already initialized during attach,
+ * Should we do this again?
+ */
+ ath12k_wmi_pdev_attach(ab, pdev_idx);
+
+ ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask;
+ ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask;
+ ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask);
+ ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask);
+
+ spin_lock_init(&ar->data_lock);
+ INIT_LIST_HEAD(&ar->arvifs);
+ INIT_LIST_HEAD(&ar->ppdu_stats_info);
+ mutex_init(&ar->conf_mutex);
+ init_completion(&ar->vdev_setup_done);
+ init_completion(&ar->vdev_delete_done);
+ init_completion(&ar->peer_assoc_done);
+ init_completion(&ar->peer_delete_done);
+ init_completion(&ar->install_key_done);
+ init_completion(&ar->bss_survey_done);
+ init_completion(&ar->scan.started);
+ init_completion(&ar->scan.completed);
+ init_completion(&ar->scan.on_channel);
+
+ INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work);
+ INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work);
+
+ INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
+ skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
+ clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+}
+
int ath12k_mac_register(struct ath12k_base *ab)
{
- struct ath12k *ar;
- struct ath12k_pdev *pdev;
+ struct ath12k_hw *ah;
int i;
int ret;
if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
return 0;
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- ar = pdev->ar;
- if (ab->pdevs_macaddr_valid) {
- ether_addr_copy(ar->mac_addr, pdev->mac_addr);
- } else {
- ether_addr_copy(ar->mac_addr, ab->mac_addr);
- ar->mac_addr[4] += i;
- }
-
- ret = __ath12k_mac_register(ar);
- if (ret)
- goto err_cleanup;
-
- init_waitqueue_head(&ar->txmgmt_empty_waitq);
- idr_init(&ar->txmgmt_idr);
- spin_lock_init(&ar->txmgmt_idr_lock);
- }
-
/* Initialize channel counters frequency value in hertz */
ab->cc_freq_hz = 320000;
ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+ for (i = 0; i < ab->num_hw; i++) {
+ ah = ab->ah[i];
+
+ ret = ath12k_mac_hw_register(ah);
+ if (ret)
+ goto err;
+ }
+
return 0;
-err_cleanup:
+err:
for (i = i - 1; i >= 0; i--) {
- pdev = &ab->pdevs[i];
- ar = pdev->ar;
- __ath12k_mac_unregister(ar);
+ ah = ab->ah[i];
+ if (!ah)
+ continue;
+
+ ath12k_mac_hw_unregister(ah);
}
return ret;
}
-int ath12k_mac_allocate(struct ath12k_base *ab)
+void ath12k_mac_unregister(struct ath12k_base *ab)
+{
+ struct ath12k_hw *ah;
+ int i;
+
+ for (i = ab->num_hw - 1; i >= 0; i--) {
+ ah = ab->ah[i];
+ if (!ah)
+ continue;
+
+ ath12k_mac_hw_unregister(ah);
+ }
+}
+
+static void ath12k_mac_hw_destroy(struct ath12k_hw *ah)
+{
+ ieee80211_free_hw(ah->hw);
+}
+
+static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab,
+ struct ath12k_pdev_map *pdev_map,
+ u8 num_pdev_map)
{
struct ieee80211_hw *hw;
struct ath12k *ar;
struct ath12k_pdev *pdev;
- int ret;
+ struct ath12k_hw *ah;
int i;
+ u8 pdev_idx;
- if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
- return 0;
+ hw = ieee80211_alloc_hw(struct_size(ah, radio, num_pdev_map),
+ &ath12k_ops);
+ if (!hw)
+ return NULL;
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- hw = ieee80211_alloc_hw(sizeof(struct ath12k), &ath12k_ops);
- if (!hw) {
- ath12k_warn(ab, "failed to allocate mac80211 hw device\n");
- ret = -ENOMEM;
- goto err_free_mac;
- }
+ ah = ath12k_hw_to_ah(hw);
+ ah->hw = hw;
+ ah->num_radio = num_pdev_map;
+
+ for (i = 0; i < num_pdev_map; i++) {
+ ab = pdev_map[i].ab;
+ pdev_idx = pdev_map[i].pdev_idx;
+ pdev = &ab->pdevs[pdev_idx];
- ar = hw->priv;
- ar->hw = hw;
+ ar = ath12k_ah_to_ar(ah);
+ ar->ah = ah;
ar->ab = ab;
+ ar->hw_link_id = i;
ar->pdev = pdev;
- ar->pdev_idx = i;
- ar->lmac_id = ath12k_hw_get_mac_from_pdev_id(ab->hw_params, i);
-
- ar->wmi = &ab->wmi_ab.wmi[i];
- /* FIXME: wmi[0] is already initialized during attach,
- * Should we do this again?
- */
- ath12k_wmi_pdev_attach(ab, i);
-
- ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask;
- ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask;
- ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask);
- ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask);
-
+ ar->pdev_idx = pdev_idx;
pdev->ar = ar;
- spin_lock_init(&ar->data_lock);
- INIT_LIST_HEAD(&ar->arvifs);
- INIT_LIST_HEAD(&ar->ppdu_stats_info);
- mutex_init(&ar->conf_mutex);
- init_completion(&ar->vdev_setup_done);
- init_completion(&ar->vdev_delete_done);
- init_completion(&ar->peer_assoc_done);
- init_completion(&ar->peer_delete_done);
- init_completion(&ar->install_key_done);
- init_completion(&ar->bss_survey_done);
- init_completion(&ar->scan.started);
- init_completion(&ar->scan.completed);
-
- INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work);
- INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work);
-
- INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
- skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
- clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
- }
- return 0;
-
-err_free_mac:
- ath12k_mac_destroy(ab);
+ ath12k_mac_setup(ar);
+ }
- return ret;
+ return ah;
}
void ath12k_mac_destroy(struct ath12k_base *ab)
{
- struct ath12k *ar;
struct ath12k_pdev *pdev;
int i;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
- ar = pdev->ar;
- if (!ar)
+ if (!pdev->ar)
continue;
- ieee80211_free_hw(ar->hw);
pdev->ar = NULL;
}
+
+ for (i = 0; i < ab->num_hw; i++) {
+ if (!ab->ah[i])
+ continue;
+
+ ath12k_mac_hw_destroy(ab->ah[i]);
+ ab->ah[i] = NULL;
+ }
+}
+
+int ath12k_mac_allocate(struct ath12k_base *ab)
+{
+ struct ath12k_hw *ah;
+ struct ath12k_pdev_map pdev_map[MAX_RADIOS];
+ int ret, i, j;
+ u8 radio_per_hw;
+
+ if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
+ ab->num_hw = ab->num_radios;
+ radio_per_hw = 1;
+
+ for (i = 0; i < ab->num_hw; i++) {
+ for (j = 0; j < radio_per_hw; j++) {
+ pdev_map[j].ab = ab;
+ pdev_map[j].pdev_idx = (i * radio_per_hw) + j;
+ }
+
+ ah = ath12k_mac_hw_allocate(ab, pdev_map, radio_per_hw);
+ if (!ah) {
+ ath12k_warn(ab, "failed to allocate mac80211 hw device for hw_idx %d\n",
+ i);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ab->ah[i] = ah;
+ }
+
+ ath12k_dp_pdev_pre_alloc(ab);
+
+ return 0;
+
+err:
+ for (i = i - 1; i >= 0; i--) {
+ if (!ab->ah[i])
+ continue;
+
+ ath12k_mac_hw_destroy(ab->ah[i]);
+ ab->ah[i] = NULL;
+ }
+
+ return ret;
}
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
index 7c63bb628adc..3f5e1be0dff9 100644
--- a/drivers/net/wireless/ath/ath12k/mac.h
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_MAC_H
@@ -12,6 +12,8 @@
struct ath12k;
struct ath12k_base;
+struct ath12k_hw;
+struct ath12k_pdev_map;
struct ath12k_generic_iter {
struct ath12k *ar;
diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c
index d5441ddb374b..adb8c3ec1950 100644
--- a/drivers/net/wireless/ath/ath12k/mhi.c
+++ b/drivers/net/wireless/ath/ath12k/mhi.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/msi.h>
#include <linux/pci.h>
+#include <linux/firmware.h>
#include "core.h"
#include "debug.h"
@@ -13,6 +14,8 @@
#include "pci.h"
#define MHI_TIMEOUT_DEFAULT_MS 90000
+#define OTP_INVALID_BOARD_ID 0xFFFF
+#define OTP_VALID_DUALMAC_BOARD_ID_MASK 0x1000
static const struct mhi_channel_config ath12k_mhi_channels_qcn9274[] = {
{
@@ -358,23 +361,60 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci)
{
struct ath12k_base *ab = ab_pci->ab;
struct mhi_controller *mhi_ctrl;
+ unsigned int board_id;
int ret;
+ bool dualmac = false;
mhi_ctrl = mhi_alloc_controller();
if (!mhi_ctrl)
return -ENOMEM;
- ath12k_core_create_firmware_path(ab, ATH12K_AMSS_FILE,
- ab_pci->amss_path,
- sizeof(ab_pci->amss_path));
-
ab_pci->mhi_ctrl = mhi_ctrl;
mhi_ctrl->cntrl_dev = ab->dev;
- mhi_ctrl->fw_image = ab_pci->amss_path;
mhi_ctrl->regs = ab->mem;
mhi_ctrl->reg_len = ab->mem_len;
mhi_ctrl->rddm_size = ab->hw_params->rddm_size;
+ if (ab->hw_params->otp_board_id_register) {
+ board_id =
+ ath12k_pci_read32(ab, ab->hw_params->otp_board_id_register);
+ board_id = u32_get_bits(board_id, OTP_BOARD_ID_MASK);
+
+ if (!board_id || (board_id == OTP_INVALID_BOARD_ID)) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "failed to read board id\n");
+ } else if (board_id & OTP_VALID_DUALMAC_BOARD_ID_MASK) {
+ dualmac = true;
+ ab->slo_capable = false;
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "dualmac fw selected for board id: %x\n", board_id);
+ }
+ }
+
+ if (dualmac) {
+ if (ab->fw.amss_dualmac_data && ab->fw.amss_dualmac_len > 0) {
+ /* use MHI firmware file from firmware-N.bin */
+ mhi_ctrl->fw_data = ab->fw.amss_dualmac_data;
+ mhi_ctrl->fw_sz = ab->fw.amss_dualmac_len;
+ } else {
+ ath12k_warn(ab, "dualmac firmware IE not present in firmware-N.bin\n");
+ ret = -ENOENT;
+ goto free_controller;
+ }
+ } else {
+ if (ab->fw.amss_data && ab->fw.amss_len > 0) {
+ /* use MHI firmware file from firmware-N.bin */
+ mhi_ctrl->fw_data = ab->fw.amss_data;
+ mhi_ctrl->fw_sz = ab->fw.amss_len;
+ } else {
+ /* use the old separate mhi.bin MHI firmware file */
+ ath12k_core_create_firmware_path(ab, ATH12K_AMSS_FILE,
+ ab_pci->amss_path,
+ sizeof(ab_pci->amss_path));
+ mhi_ctrl->fw_image = ab_pci->amss_path;
+ }
+ }
+
ret = ath12k_mhi_get_msi(ab_pci);
if (ret) {
ath12k_err(ab, "failed to get msi for mhi\n");
diff --git a/drivers/net/wireless/ath/ath12k/p2p.c b/drivers/net/wireless/ath/ath12k/p2p.c
new file mode 100644
index 000000000000..d334df720032
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/p2p.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <net/mac80211.h>
+#include "core.h"
+#include "mac.h"
+#include "p2p.h"
+
+static void ath12k_p2p_noa_ie_fill(u8 *data, size_t len,
+ const struct ath12k_wmi_p2p_noa_info *noa)
+{
+ struct ieee80211_p2p_noa_attr *noa_attr;
+ u8 ctwindow = le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_CTWIN_TU);
+ bool oppps = le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_OPP_PS);
+ __le16 *noa_attr_len;
+ u16 attr_len;
+ u8 noa_descriptors = le32_get_bits(noa->noa_attr,
+ WMI_P2P_NOA_INFO_DESC_NUM);
+ int i;
+
+ /* P2P IE */
+ data[0] = WLAN_EID_VENDOR_SPECIFIC;
+ data[1] = len - 2;
+ data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
+ data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
+ data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
+ data[5] = WLAN_OUI_TYPE_WFA_P2P;
+
+ /* NOA ATTR */
+ data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
+ noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
+ noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
+
+ noa_attr->index = le32_get_bits(noa->noa_attr,
+ WMI_P2P_NOA_INFO_INDEX);
+ noa_attr->oppps_ctwindow = ctwindow;
+ if (oppps)
+ noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
+
+ for (i = 0; i < noa_descriptors; i++) {
+ noa_attr->desc[i].count =
+ __le32_to_cpu(noa->descriptors[i].type_count);
+ noa_attr->desc[i].duration = noa->descriptors[i].duration;
+ noa_attr->desc[i].interval = noa->descriptors[i].interval;
+ noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
+ }
+
+ attr_len = 2; /* index + oppps_ctwindow */
+ attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+ *noa_attr_len = __cpu_to_le16(attr_len);
+}
+
+static size_t ath12k_p2p_noa_ie_len_compute(const struct ath12k_wmi_p2p_noa_info *noa)
+{
+ size_t len = 0;
+
+ if (!(le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM)) &&
+ !(le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_OPP_PS)))
+ return 0;
+
+ len += 1 + 1 + 4; /* EID + len + OUI */
+ len += 1 + 2; /* noa attr + attr len */
+ len += 1 + 1; /* index + oppps_ctwindow */
+ len += le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM) *
+ sizeof(struct ieee80211_p2p_noa_desc);
+
+ return len;
+}
+
+static void ath12k_p2p_noa_ie_assign(struct ath12k_vif *arvif, void *ie,
+ size_t len)
+{
+ struct ath12k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ kfree(arvif->u.ap.noa_data);
+
+ arvif->u.ap.noa_data = ie;
+ arvif->u.ap.noa_len = len;
+}
+
+static void __ath12k_p2p_noa_update(struct ath12k_vif *arvif,
+ const struct ath12k_wmi_p2p_noa_info *noa)
+{
+ struct ath12k *ar = arvif->ar;
+ void *ie;
+ size_t len;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ ath12k_p2p_noa_ie_assign(arvif, NULL, 0);
+
+ len = ath12k_p2p_noa_ie_len_compute(noa);
+ if (!len)
+ return;
+
+ ie = kmalloc(len, GFP_ATOMIC);
+ if (!ie)
+ return;
+
+ ath12k_p2p_noa_ie_fill(ie, len, noa);
+ ath12k_p2p_noa_ie_assign(arvif, ie, len);
+}
+
+void ath12k_p2p_noa_update(struct ath12k_vif *arvif,
+ const struct ath12k_wmi_p2p_noa_info *noa)
+{
+ struct ath12k *ar = arvif->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ __ath12k_p2p_noa_update(arvif, noa);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath12k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_p2p_noa_arg *arg = data;
+
+ if (arvif->vdev_id != arg->vdev_id)
+ return;
+
+ ath12k_p2p_noa_update(arvif, arg->noa);
+}
+
+void ath12k_p2p_noa_update_by_vdev_id(struct ath12k *ar, u32 vdev_id,
+ const struct ath12k_wmi_p2p_noa_info *noa)
+{
+ struct ath12k_p2p_noa_arg arg = {
+ .vdev_id = vdev_id,
+ .noa = noa,
+ };
+
+ ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar),
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath12k_p2p_noa_update_vdev_iter,
+ &arg);
+}
diff --git a/drivers/net/wireless/ath/ath12k/p2p.h b/drivers/net/wireless/ath/ath12k/p2p.h
new file mode 100644
index 000000000000..5768139a7844
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/p2p.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved..
+ */
+
+#ifndef ATH12K_P2P_H
+#define ATH12K_P2P_H
+
+#include "wmi.h"
+
+struct ath12k_wmi_p2p_noa_info;
+
+struct ath12k_p2p_noa_arg {
+ u32 vdev_id;
+ const struct ath12k_wmi_p2p_noa_info *noa;
+};
+
+void ath12k_p2p_noa_update(struct ath12k_vif *arvif,
+ const struct ath12k_wmi_p2p_noa_info *noa);
+void ath12k_p2p_noa_update_by_vdev_id(struct ath12k *ar, u32 vdev_id,
+ const struct ath12k_wmi_p2p_noa_info *noa);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index f0d2e2d8719c..14954bc05144 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -39,6 +39,10 @@
#define QCN9274_DEVICE_ID 0x1109
#define WCN7850_DEVICE_ID 0x1107
+#define PCIE_LOCAL_REG_QRTR_NODE_ID 0x1E03164
+#define DOMAIN_NUMBER_MASK GENMASK(7, 4)
+#define BUS_NUMBER_MASK GENMASK(3, 0)
+
static const struct pci_device_id ath12k_pci_id_table[] = {
{ PCI_VDEVICE(QCOM, QCN9274_DEVICE_ID) },
{ PCI_VDEVICE(QCOM, WCN7850_DEVICE_ID) },
@@ -201,18 +205,17 @@ static u32 ath12k_pci_get_window_start(struct ath12k_base *ab,
/* If offset lies within CE register range, use 2nd window */
else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK)
window_start = 2 * WINDOW_START;
- /* If offset lies within PCI_BAR_WINDOW0_BASE and within PCI_SOC_PCI_REG_BASE
- * use 0th window
- */
- else if (((offset ^ PCI_BAR_WINDOW0_BASE) < WINDOW_RANGE_MASK) &&
- !((offset ^ PCI_SOC_PCI_REG_BASE) < PCI_SOC_RANGE_MASK))
- window_start = 0;
else
window_start = WINDOW_START;
return window_start;
}
+static inline bool ath12k_pci_is_offset_within_mhi_region(u32 offset)
+{
+ return (offset >= PCI_MHIREGLEN_REG && offset <= PCI_MHI_REGION_END);
+}
+
static void ath12k_pci_soc_global_reset(struct ath12k_base *ab)
{
u32 val, delay;
@@ -682,12 +685,22 @@ static void ath12k_pci_init_qmi_ce_config(struct ath12k_base *ab)
{
struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+ struct pci_bus *bus = ab_pci->pdev->bus;
+
cfg->tgt_ce = ab->hw_params->target_ce_config;
cfg->tgt_ce_len = ab->hw_params->target_ce_count;
cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map;
cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len;
ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id;
+
+ if (test_bit(ATH12K_FW_FEATURE_MULTI_QRTR_ID, ab->fw.fw_features)) {
+ ab_pci->qmi_instance =
+ u32_encode_bits(pci_domain_nr(bus), DOMAIN_NUMBER_MASK) |
+ u32_encode_bits(bus->number, BUS_NUMBER_MASK);
+ ab->qmi.service_ins_id += ab_pci->qmi_instance;
+ }
}
static void ath12k_pci_ce_irqs_enable(struct ath12k_base *ab)
@@ -901,6 +914,26 @@ static void ath12k_pci_aspm_disable(struct ath12k_pci *ab_pci)
set_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags);
}
+static void ath12k_pci_update_qrtr_node_id(struct ath12k_base *ab)
+{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+ u32 reg;
+
+ /* On platforms with two or more identical mhi devices, qmi service run
+ * with identical qrtr-node-id. Because of this identical ID qrtr-lookup
+ * cannot register more than one qmi service with identical node ID.
+ *
+ * This generates a unique instance ID from PCIe domain number and bus number,
+ * writes to the given register, it is available for firmware when the QMI service
+ * is spawned.
+ */
+ reg = PCIE_LOCAL_REG_QRTR_NODE_ID & WINDOW_RANGE_MASK;
+ ath12k_pci_write32(ab, reg, ab_pci->qmi_instance);
+
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "pci reg 0x%x instance 0x%x read val 0x%x\n",
+ reg, ab_pci->qmi_instance, ath12k_pci_read32(ab, reg));
+}
+
static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci)
{
if (test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags))
@@ -1138,15 +1171,17 @@ u32 ath12k_pci_read32(struct ath12k_base *ab, u32 offset)
if (window_start == WINDOW_START) {
spin_lock_bh(&ab_pci->window_lock);
ath12k_pci_select_window(ab_pci, offset);
- val = ioread32(ab->mem + window_start +
- (offset & WINDOW_RANGE_MASK));
+
+ if (ath12k_pci_is_offset_within_mhi_region(offset)) {
+ offset = offset - PCI_MHIREGLEN_REG;
+ val = ioread32(ab->mem +
+ (offset & WINDOW_RANGE_MASK));
+ } else {
+ val = ioread32(ab->mem + window_start +
+ (offset & WINDOW_RANGE_MASK));
+ }
spin_unlock_bh(&ab_pci->window_lock);
} else {
- if ((!window_start) &&
- (offset >= PCI_MHIREGLEN_REG &&
- offset <= PCI_MHI_REGION_END))
- offset = offset - PCI_MHIREGLEN_REG;
-
val = ioread32(ab->mem + window_start +
(offset & WINDOW_RANGE_MASK));
}
@@ -1183,15 +1218,17 @@ void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value)
if (window_start == WINDOW_START) {
spin_lock_bh(&ab_pci->window_lock);
ath12k_pci_select_window(ab_pci, offset);
- iowrite32(value, ab->mem + window_start +
- (offset & WINDOW_RANGE_MASK));
+
+ if (ath12k_pci_is_offset_within_mhi_region(offset)) {
+ offset = offset - PCI_MHIREGLEN_REG;
+ iowrite32(value, ab->mem +
+ (offset & WINDOW_RANGE_MASK));
+ } else {
+ iowrite32(value, ab->mem + window_start +
+ (offset & WINDOW_RANGE_MASK));
+ }
spin_unlock_bh(&ab_pci->window_lock);
} else {
- if ((!window_start) &&
- (offset >= PCI_MHIREGLEN_REG &&
- offset <= PCI_MHI_REGION_END))
- offset = offset - PCI_MHIREGLEN_REG;
-
iowrite32(value, ab->mem + window_start +
(offset & WINDOW_RANGE_MASK));
}
@@ -1219,6 +1256,9 @@ int ath12k_pci_power_up(struct ath12k_base *ab)
ath12k_pci_msi_enable(ab_pci);
+ if (test_bit(ATH12K_FW_FEATURE_MULTI_QRTR_ID, ab->fw.fw_features))
+ ath12k_pci_update_qrtr_node_id(ab);
+
ret = ath12k_mhi_start(ab_pci);
if (ret) {
ath12k_err(ab, "failed to start mhi: %d\n", ret);
@@ -1310,11 +1350,21 @@ static int ath12k_pci_probe(struct pci_dev *pdev,
goto err_free_core;
}
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
+ pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device);
+
+ ab->id.vendor = pdev->vendor;
+ ab->id.device = pdev->device;
+ ab->id.subsystem_vendor = pdev->subsystem_vendor;
+ ab->id.subsystem_device = pdev->subsystem_device;
+
switch (pci_dev->device) {
case QCN9274_DEVICE_ID:
ab_pci->msi_config = &ath12k_msi_config[0];
ab->static_window_map = true;
ab_pci->pci_ops = &ath12k_pci_ops_qcn9274;
+ ab->hal_rx_ops = &hal_rx_qcn9274_ops;
ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
switch (soc_hw_version_major) {
@@ -1333,9 +1383,11 @@ static int ath12k_pci_probe(struct pci_dev *pdev,
}
break;
case WCN7850_DEVICE_ID:
+ ab->id.bdf_search = ATH12K_BDF_SEARCH_BUS_AND_BOARD;
ab_pci->msi_config = &ath12k_msi_config[0];
ab->static_window_map = false;
ab_pci->pci_ops = &ath12k_pci_ops_wcn7850;
+ ab->hal_rx_ops = &hal_rx_wcn7850_ops;
ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
switch (soc_hw_version_major) {
diff --git a/drivers/net/wireless/ath/ath12k/pci.h b/drivers/net/wireless/ath/ath12k/pci.h
index b2edf32ada20..ca93693ba4e9 100644
--- a/drivers/net/wireless/ath/ath12k/pci.h
+++ b/drivers/net/wireless/ath/ath12k/pci.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_PCI_H
#define ATH12K_PCI_H
@@ -53,6 +53,9 @@
#define WLAON_QFPROM_PWR_CTRL_REG 0x01f8031c
#define QFPROM_PWR_CTRL_VDD4BLOW_MASK 0x4
+#define QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB 0x1E20338
+#define OTP_BOARD_ID_MASK GENMASK(15, 0)
+
#define PCI_BAR_WINDOW0_BASE 0x1E00000
#define PCI_BAR_WINDOW0_END 0x1E7FFFC
#define PCI_SOC_RANGE_MASK 0x3FFF
@@ -111,6 +114,7 @@ struct ath12k_pci {
u16 link_ctl;
unsigned long irq_flags;
const struct ath12k_pci_ops *pci_ops;
+ u32 qmi_instance;
};
static inline struct ath12k_pci *ath12k_pci_priv(struct ath12k_base *ab)
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
index 77a132f6bbd1..92845ffff44a 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.c
+++ b/drivers/net/wireless/ath/ath12k/qmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/elf.h>
@@ -17,7 +17,7 @@
#define PLATFORM_CAP_PCIE_GLOBAL_RESET 0x08
#define ATH12K_QMI_MAX_CHUNK_SIZE 2097152
-static struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = {
+static const struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
@@ -61,7 +61,7 @@ static struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -511,7 +511,7 @@ static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -528,7 +528,68 @@ static struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_phy_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_phy_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+ num_phy_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+ num_phy),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+ board_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+ board_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -753,7 +814,7 @@ static struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -789,7 +850,7 @@ static struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@@ -821,7 +882,7 @@ static struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -863,7 +924,7 @@ static struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
@@ -890,7 +951,7 @@ static struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@@ -930,7 +991,7 @@ static struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
@@ -957,7 +1018,7 @@ static struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -975,7 +1036,7 @@ static struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
@@ -983,7 +1044,7 @@ static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1009,7 +1070,7 @@ static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1026,7 +1087,7 @@ static struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1042,7 +1103,7 @@ static struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_dev_mem_info_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_dev_mem_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@@ -1068,7 +1129,7 @@ static struct qmi_elem_info qmi_wlanfw_dev_mem_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1094,7 +1155,7 @@ static struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1348,7 +1409,7 @@ static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
@@ -1483,7 +1544,7 @@ static struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1501,7 +1562,7 @@ static struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@@ -1525,7 +1586,7 @@ static struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1542,7 +1603,7 @@ static struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1595,7 +1656,7 @@ static struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1630,7 +1691,7 @@ static struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_2_BYTE,
.elem_len = 1,
@@ -1654,7 +1715,7 @@ static struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1671,7 +1732,7 @@ static struct qmi_elem_info qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1706,7 +1767,7 @@ static struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1724,7 +1785,7 @@ static struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -1862,7 +1923,7 @@ static struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1879,22 +1940,78 @@ static struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
},
};
-static struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
},
};
-static void ath12k_host_cap_parse_mlo(struct qmi_wlanfw_host_cap_req_msg_v01 *req)
+static const struct qmi_elem_info qmi_wlanfw_wlan_ini_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
+ enable_fwlog_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
+ enable_fwlog),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_wlan_ini_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
+ struct qmi_wlanfw_host_cap_req_msg_v01 *req)
{
+ struct wlfw_host_mlo_chip_info_s_v01 *info;
+ u8 hw_link_id = 0;
+ int i;
+
+ if (!ab->qmi.num_radios || ab->qmi.num_radios == U8_MAX) {
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "skip QMI MLO cap due to invalid num_radio %d\n",
+ ab->qmi.num_radios);
+ return;
+ }
+
req->mlo_capable_valid = 1;
req->mlo_capable = 1;
req->mlo_chip_id_valid = 1;
@@ -1905,28 +2022,31 @@ static void ath12k_host_cap_parse_mlo(struct qmi_wlanfw_host_cap_req_msg_v01 *re
/* Max peer number generally won't change for the same device
* but needs to be synced with host driver.
*/
- req->max_mlo_peer = 32;
+ req->max_mlo_peer = ab->hw_params->max_mlo_peer;
req->mlo_num_chips_valid = 1;
req->mlo_num_chips = 1;
+
+ info = &req->mlo_chip_info[0];
+ info->chip_id = 0;
+ info->num_local_links = ab->qmi.num_radios;
+
+ for (i = 0; i < info->num_local_links; i++) {
+ info->hw_link_id[i] = hw_link_id;
+ info->valid_mlo_link_id[i] = 1;
+
+ hw_link_id++;
+ }
+
req->mlo_chip_info_valid = 1;
- req->mlo_chip_info[0].chip_id = 0;
- req->mlo_chip_info[0].num_local_links = 2;
- req->mlo_chip_info[0].hw_link_id[0] = 0;
- req->mlo_chip_info[0].hw_link_id[1] = 1;
- req->mlo_chip_info[0].valid_mlo_link_id[0] = 1;
- req->mlo_chip_info[0].valid_mlo_link_id[1] = 1;
}
static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
{
- struct qmi_wlanfw_host_cap_req_msg_v01 req;
- struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_wlanfw_host_cap_req_msg_v01 req = {};
+ struct qmi_wlanfw_host_cap_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
int ret = 0;
- memset(&req, 0, sizeof(req));
- memset(&resp, 0, sizeof(resp));
-
req.num_clients_valid = 1;
req.num_clients = 1;
req.mem_cfg_mode = ab->qmi.target_mem_mode;
@@ -1963,10 +2083,10 @@ static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
*/
req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT;
req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET;
-
- ath12k_host_cap_parse_mlo(&req);
}
+ ath12k_host_cap_parse_mlo(ab, &req);
+
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp);
if (ret < 0)
@@ -1977,6 +2097,7 @@ static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_host_cap_req_msg_v01_ei, &req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "Failed to send host capability request,err = %d\n", ret);
goto out;
}
@@ -1996,6 +2117,62 @@ out:
return ret;
}
+static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
+{
+ struct qmi_wlanfw_phy_cap_req_msg_v01 req = {};
+ struct qmi_wlanfw_phy_cap_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
+ int ret;
+
+ if (!ab->slo_capable)
+ goto out;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_phy_cap_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_PHY_CAP_REQ_V01,
+ QMI_WLANFW_PHY_CAP_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_phy_cap_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath12k_warn(ab, "failed to send phy capability request: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!resp.num_phy_valid) {
+ ret = -ENODATA;
+ goto out;
+ }
+
+ ab->qmi.num_radios = resp.num_phy;
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "phy capability resp valid %d num_phy %d valid %d board_id %d\n",
+ resp.num_phy_valid, resp.num_phy,
+ resp.board_id_valid, resp.board_id);
+
+ return;
+
+out:
+ /* If PHY capability not advertised then rely on default num link */
+ ab->qmi.num_radios = ab->hw_params->def_num_link;
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "no valid response from PHY capability, choose default num_phy %d\n",
+ ab->qmi.num_radios);
+}
+
static int ath12k_qmi_fw_ind_register_send(struct ath12k_base *ab)
{
struct qmi_wlanfw_ind_register_req_msg_v01 *req;
@@ -2040,6 +2217,7 @@ static int ath12k_qmi_fw_ind_register_send(struct ath12k_base *ab)
QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_ind_register_req_msg_v01_ei, req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "Failed to send indication register request, err = %d\n",
ret);
goto out;
@@ -2068,8 +2246,8 @@ resp_out:
static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
{
struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
- struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_wlanfw_respond_mem_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
int ret = 0, i;
bool delayed;
@@ -2077,8 +2255,6 @@ static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
if (!req)
return -ENOMEM;
- memset(&resp, 0, sizeof(resp));
-
/* Some targets by default request a block of big contiguous
* DMA memory, it's hard to allocate from kernel. So host returns
* failure to firmware and firmware then request multiple blocks of
@@ -2088,7 +2264,6 @@ static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
delayed = true;
ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi delays mem_request %d\n",
ab->qmi.mem_seg_count);
- memset(req, 0, sizeof(*req));
} else {
delayed = false;
req->mem_seg_len = ab->qmi.mem_seg_count;
@@ -2114,6 +2289,7 @@ static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_respond_mem_req_msg_v01_ei, req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "qmi failed to respond memory request, err = %d\n",
ret);
goto out;
@@ -2208,17 +2384,14 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
static int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
{
- struct qmi_wlanfw_cap_req_msg_v01 req;
- struct qmi_wlanfw_cap_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_wlanfw_cap_req_msg_v01 req = {};
+ struct qmi_wlanfw_cap_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
unsigned int board_id = ATH12K_BOARD_ID_DEFAULT;
int ret = 0;
int r;
int i;
- memset(&req, 0, sizeof(req));
- memset(&resp, 0, sizeof(resp));
-
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_cap_resp_msg_v01_ei, &resp);
if (ret < 0)
@@ -2229,6 +2402,7 @@ static int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_cap_req_msg_v01_ei, &req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "qmi failed to send target cap request, err = %d\n",
ret);
goto out;
@@ -2310,8 +2484,8 @@ static int ath12k_qmi_load_file_target_mem(struct ath12k_base *ab,
const u8 *data, u32 len, u8 type)
{
struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
- struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_wlanfw_bdf_download_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
const u8 *temp = data;
int ret;
u32 remaining = len;
@@ -2319,7 +2493,6 @@ static int ath12k_qmi_load_file_target_mem(struct ath12k_base *ab,
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
- memset(&resp, 0, sizeof(resp));
while (remaining) {
req->valid = 1;
@@ -2423,8 +2596,7 @@ static int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab,
break;
case ATH12K_QMI_BDF_TYPE_REGDB:
- ret = ath12k_core_fetch_board_data_api_1(ab, &bd,
- ATH12K_REGDB_FILE_NAME);
+ ret = ath12k_core_fetch_regdb(ab, &bd);
if (ret) {
ath12k_warn(ab, "qmi failed to load regdb bin:\n");
goto out;
@@ -2497,37 +2669,56 @@ out:
static int ath12k_qmi_m3_load(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
- const struct firmware *fw;
+ const struct firmware *fw = NULL;
+ const void *m3_data;
char path[100];
+ size_t m3_len;
int ret;
- if (m3_mem->vaddr || m3_mem->size)
+ if (m3_mem->vaddr)
+ /* m3 firmware buffer is already available in the DMA buffer */
return 0;
- fw = ath12k_core_firmware_request(ab, ATH12K_M3_FILE);
- if (IS_ERR(fw)) {
- ret = PTR_ERR(fw);
- ath12k_core_create_firmware_path(ab, ATH12K_M3_FILE,
- path, sizeof(path));
- ath12k_err(ab, "failed to load %s: %d\n", path, ret);
- return ret;
+ if (ab->fw.m3_data && ab->fw.m3_len > 0) {
+ /* firmware-N.bin had a m3 firmware file so use that */
+ m3_data = ab->fw.m3_data;
+ m3_len = ab->fw.m3_len;
+ } else {
+ /* No m3 file in firmware-N.bin so try to request old
+ * separate m3.bin.
+ */
+ fw = ath12k_core_firmware_request(ab, ATH12K_M3_FILE);
+ if (IS_ERR(fw)) {
+ ret = PTR_ERR(fw);
+ ath12k_core_create_firmware_path(ab, ATH12K_M3_FILE,
+ path, sizeof(path));
+ ath12k_err(ab, "failed to load %s: %d\n", path, ret);
+ return ret;
+ }
+
+ m3_data = fw->data;
+ m3_len = fw->size;
}
m3_mem->vaddr = dma_alloc_coherent(ab->dev,
- fw->size, &m3_mem->paddr,
+ m3_len, &m3_mem->paddr,
GFP_KERNEL);
if (!m3_mem->vaddr) {
ath12k_err(ab, "failed to allocate memory for M3 with size %zu\n",
fw->size);
- release_firmware(fw);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
- memcpy(m3_mem->vaddr, fw->data, fw->size);
- m3_mem->size = fw->size;
+ memcpy(m3_mem->vaddr, m3_data, m3_len);
+ m3_mem->size = m3_len;
+
+ ret = 0;
+
+out:
release_firmware(fw);
- return 0;
+ return ret;
}
static void ath12k_qmi_m3_free(struct ath12k_base *ab)
@@ -2546,14 +2737,11 @@ static void ath12k_qmi_m3_free(struct ath12k_base *ab)
static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
- struct qmi_wlanfw_m3_info_req_msg_v01 req;
- struct qmi_wlanfw_m3_info_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_wlanfw_m3_info_req_msg_v01 req = {};
+ struct qmi_wlanfw_m3_info_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
int ret = 0;
- memset(&req, 0, sizeof(req));
- memset(&resp, 0, sizeof(resp));
-
ret = ath12k_qmi_m3_load(ab);
if (ret) {
ath12k_err(ab, "failed to load m3 firmware: %d", ret);
@@ -2573,6 +2761,7 @@ static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
qmi_wlanfw_m3_info_req_msg_v01_ei, &req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "qmi failed to send M3 information request, err = %d\n",
ret);
goto out;
@@ -2597,14 +2786,11 @@ out:
static int ath12k_qmi_wlanfw_mode_send(struct ath12k_base *ab,
u32 mode)
{
- struct qmi_wlanfw_wlan_mode_req_msg_v01 req;
- struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_wlanfw_wlan_mode_req_msg_v01 req = {};
+ struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
int ret = 0;
- memset(&req, 0, sizeof(req));
- memset(&resp, 0, sizeof(resp));
-
req.mode = mode;
req.hw_debug_valid = 1;
req.hw_debug = 0;
@@ -2619,6 +2805,7 @@ static int ath12k_qmi_wlanfw_mode_send(struct ath12k_base *ab,
QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "qmi failed to send mode request, mode: %d, err = %d\n",
mode, ret);
goto out;
@@ -2649,10 +2836,10 @@ out:
static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab)
{
struct qmi_wlanfw_wlan_cfg_req_msg_v01 *req;
- struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp;
+ struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp = {};
struct ce_pipe_config *ce_cfg;
struct service_to_pipe *svc_cfg;
- struct qmi_txn txn = {};
+ struct qmi_txn txn;
int ret = 0, pipe_num;
ce_cfg = (struct ce_pipe_config *)ab->qmi.ce_cfg.tgt_ce;
@@ -2662,8 +2849,6 @@ static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab)
if (!req)
return -ENOMEM;
- memset(&resp, 0, sizeof(resp));
-
req->host_version_valid = 1;
strscpy(req->host_version, ATH12K_HOST_VERSION_STRING,
sizeof(req->host_version));
@@ -2710,6 +2895,7 @@ static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab)
QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "qmi failed to send wlan config request, err = %d\n",
ret);
goto out;
@@ -2733,6 +2919,49 @@ out:
return ret;
}
+static int ath12k_qmi_wlanfw_wlan_ini_send(struct ath12k_base *ab)
+{
+ struct qmi_wlanfw_wlan_ini_resp_msg_v01 resp = {};
+ struct qmi_wlanfw_wlan_ini_req_msg_v01 req = {};
+ struct qmi_txn txn;
+ int ret;
+
+ req.enable_fwlog_valid = true;
+ req.enable_fwlog = 1;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_wlan_ini_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ ATH12K_QMI_WLANFW_WLAN_INI_REQ_V01,
+ QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_wlan_ini_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath12k_warn(ab, "failed to send QMI wlan ini request: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath12k_warn(ab, "failed to receive QMI wlan ini request: %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath12k_warn(ab, "QMI wlan ini response failure: %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
void ath12k_qmi_firmware_stop(struct ath12k_base *ab)
{
int ret;
@@ -2749,6 +2978,12 @@ int ath12k_qmi_firmware_start(struct ath12k_base *ab,
{
int ret;
+ ret = ath12k_qmi_wlanfw_wlan_ini_send(ab);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to send wlan fw ini: %d\n", ret);
+ return ret;
+ }
+
ret = ath12k_qmi_wlanfw_wlan_cfg_send(ab);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to send wlan cfg:%d\n", ret);
@@ -2792,6 +3027,8 @@ static int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
struct ath12k_base *ab = qmi->ab;
int ret;
+ ath12k_qmi_phy_cap_send(ab);
+
ret = ath12k_qmi_fw_ind_register_send(ab);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to send FW indication QMI:%d\n", ret);
diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h
index e25bbaa125e8..6ee33c9851c6 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.h
+++ b/drivers/net/wireless/ath/ath12k/qmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_QMI_H
@@ -15,7 +15,6 @@
#define ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE 64
#define ATH12K_QMI_CALDB_ADDRESS 0x4BA00000
#define ATH12K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 128
-#define ATH12K_QMI_WLFW_NODE_ID_BASE 0x07
#define ATH12K_QMI_WLFW_SERVICE_ID_V01 0x45
#define ATH12K_QMI_WLFW_SERVICE_VERS_V01 0x01
#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01 0x02
@@ -141,6 +140,7 @@ struct ath12k_qmi {
u32 target_mem_mode;
bool target_mem_delayed;
u8 cal_done;
+ u8 num_radios;
struct target_info target;
struct m3_mem_region m3_mem;
unsigned int service_ins_id;
@@ -251,6 +251,22 @@ struct qmi_wlanfw_host_cap_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
+#define QMI_WLANFW_PHY_CAP_REQ_MSG_V01_MAX_LEN 0
+#define QMI_WLANFW_PHY_CAP_REQ_V01 0x0057
+#define QMI_WLANFW_PHY_CAP_RESP_MSG_V01_MAX_LEN 18
+#define QMI_WLANFW_PHY_CAP_RESP_V01 0x0057
+
+struct qmi_wlanfw_phy_cap_req_msg_v01 {
+};
+
+struct qmi_wlanfw_phy_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 num_phy_valid;
+ u8 num_phy;
+ u8 board_id_valid;
+ u32 board_id;
+};
+
#define QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN 54
#define QMI_WLANFW_IND_REGISTER_REQ_V01 0x0020
#define QMI_WLANFW_IND_REGISTER_RESP_MSG_V01_MAX_LEN 18
@@ -559,6 +575,21 @@ struct qmi_wlanfw_wlan_cfg_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
+#define ATH12K_QMI_WLANFW_WLAN_INI_REQ_V01 0x002F
+#define ATH12K_QMI_WLANFW_WLAN_INI_RESP_V01 0x002F
+#define QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_WLAN_INI_RESP_MSG_V01_MAX_LEN 7
+
+struct qmi_wlanfw_wlan_ini_req_msg_v01 {
+ /* Must be set to true if enable_fwlog is being passed */
+ u8 enable_fwlog_valid;
+ u8 enable_fwlog;
+};
+
+struct qmi_wlanfw_wlan_ini_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
int ath12k_qmi_firmware_start(struct ath12k_base *ab,
u32 mode);
void ath12k_qmi_firmware_stop(struct ath12k_base *ab);
diff --git a/drivers/net/wireless/ath/ath12k/reg.c b/drivers/net/wireless/ath/ath12k/reg.c
index f924bc13ccff..f308e9a6ed55 100644
--- a/drivers/net/wireless/ath/ath12k/reg.c
+++ b/drivers/net/wireless/ath/ath12k/reg.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/rtnetlink.h>
#include "core.h"
@@ -48,7 +48,8 @@ ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ath12k_wmi_init_country_arg arg;
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
int ret;
ath12k_dbg(ar->ab, ATH12K_DBG_REG,
@@ -95,7 +96,7 @@ int ath12k_reg_update_chan_list(struct ath12k *ar)
struct ieee80211_supported_band **bands;
struct ath12k_wmi_scan_chan_list_arg *arg;
struct ieee80211_channel *channel;
- struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct ath12k_wmi_channel_arg *ch;
enum nl80211_band band;
int num_channels = 0;
@@ -103,7 +104,7 @@ int ath12k_reg_update_chan_list(struct ath12k *ar)
bands = hw->wiphy->bands;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
- if (!bands[band])
+ if (!(ar->mac.sbands[band].channels && bands[band]))
continue;
for (i = 0; i < bands[band]->n_channels; i++) {
@@ -129,7 +130,7 @@ int ath12k_reg_update_chan_list(struct ath12k *ar)
ch = arg->channel;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
- if (!bands[band])
+ if (!(ar->mac.sbands[band].channels && bands[band]))
continue;
for (i = 0; i < bands[band]->n_channels; i++) {
@@ -199,7 +200,7 @@ static void ath12k_copy_regd(struct ieee80211_regdomain *regd_orig,
int ath12k_regd_update(struct ath12k *ar, bool init)
{
- struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct ieee80211_regdomain *regd, *regd_copy = NULL;
int ret, regd_len, pdev_id;
struct ath12k_base *ab;
diff --git a/drivers/net/wireless/ath/ath12k/rx_desc.h b/drivers/net/wireless/ath/ath12k/rx_desc.h
index 55f20c446ca9..a0db6702a189 100644
--- a/drivers/net/wireless/ath/ath12k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath12k/rx_desc.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_RX_DESC_H
#define ATH12K_RX_DESC_H
@@ -147,6 +147,61 @@ struct rx_mpdu_start_qcn9274 {
__le32 res1;
} __packed;
+#define QCN9274_MPDU_START_SELECT_MPDU_START_TAG BIT(0)
+#define QCN9274_MPDU_START_SELECT_INFO0_REO_QUEUE_DESC_LO BIT(1)
+#define QCN9274_MPDU_START_SELECT_INFO1_PN_31_0 BIT(2)
+#define QCN9274_MPDU_START_SELECT_PN_95_32 BIT(3)
+#define QCN9274_MPDU_START_SELECT_PN_127_96_INFO2 BIT(4)
+#define QCN9274_MPDU_START_SELECT_PEER_MDATA_INFO3_PHY_PPDU_ID BIT(5)
+#define QCN9274_MPDU_START_SELECT_AST_IDX_SW_PEER_ID_INFO4 BIT(6)
+#define QCN9274_MPDU_START_SELECT_INFO5_INFO6 BIT(7)
+#define QCN9274_MPDU_START_SELECT_FRAME_CTRL_DURATION_ADDR1_31_0 BIT(8)
+#define QCN9274_MPDU_START_SELECT_ADDR2_47_0_ADDR1_47_32 BIT(9)
+#define QCN9274_MPDU_START_SELECT_ADDR3_47_0_SEQ_CTRL BIT(10)
+#define QCN9274_MPDU_START_SELECT_ADDR4_47_0_QOS_CTRL BIT(11)
+#define QCN9274_MPDU_START_SELECT_HT_CTRL_INFO7 BIT(12)
+#define QCN9274_MPDU_START_SELECT_ML_ADDR1_47_0_ML_ADDR2_15_0 BIT(13)
+#define QCN9274_MPDU_START_SELECT_ML_ADDR2_47_16_INFO8 BIT(14)
+#define QCN9274_MPDU_START_SELECT_RES_0_RES_1 BIT(15)
+
+#define QCN9274_MPDU_START_WMASK (QCN9274_MPDU_START_SELECT_INFO1_PN_31_0 | \
+ QCN9274_MPDU_START_SELECT_PN_95_32 | \
+ QCN9274_MPDU_START_SELECT_PN_127_96_INFO2 | \
+ QCN9274_MPDU_START_SELECT_PEER_MDATA_INFO3_PHY_PPDU_ID | \
+ QCN9274_MPDU_START_SELECT_AST_IDX_SW_PEER_ID_INFO4 | \
+ QCN9274_MPDU_START_SELECT_INFO5_INFO6 | \
+ QCN9274_MPDU_START_SELECT_FRAME_CTRL_DURATION_ADDR1_31_0 | \
+ QCN9274_MPDU_START_SELECT_ADDR2_47_0_ADDR1_47_32 | \
+ QCN9274_MPDU_START_SELECT_ADDR3_47_0_SEQ_CTRL | \
+ QCN9274_MPDU_START_SELECT_ADDR4_47_0_QOS_CTRL)
+
+/* The below rx_mpdu_start_qcn9274_compact structure is tied with the mask
+ * value QCN9274_MPDU_START_WMASK. If the mask value changes the structure
+ * will also change.
+ */
+
+struct rx_mpdu_start_qcn9274_compact {
+ __le32 info1;
+ __le32 pn[4];
+ __le32 info2;
+ __le32 peer_meta_data;
+ __le16 info3;
+ __le16 phy_ppdu_id;
+ __le16 ast_index;
+ __le16 sw_peer_id;
+ __le32 info4;
+ __le32 info5;
+ __le32 info6;
+ __le16 frame_ctrl;
+ __le16 duration;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ __le16 qos_ctrl;
+} __packed;
+
/* rx_mpdu_start
*
* reo_destination_indication
@@ -608,6 +663,8 @@ enum rx_msdu_start_reception_type {
RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA_MIMO,
};
+#define RX_MSDU_END_64_TLV_SRC_LINK_ID GENMASK(24, 22)
+
#define RX_MSDU_END_INFO0_RXPCU_MPDU_FITLER GENMASK(1, 0)
#define RX_MSDU_END_INFO0_SW_FRAME_GRP_ID GENMASK(8, 2)
@@ -786,6 +843,52 @@ struct rx_msdu_end_qcn9274 {
__le32 info14;
} __packed;
+#define QCN9274_MSDU_END_SELECT_MSDU_END_TAG BIT(0)
+#define QCN9274_MSDU_END_SELECT_INFO0_PHY_PPDUID_IP_HDR_CSUM_INFO1 BIT(1)
+#define QCN9274_MSDU_END_SELECT_INFO2_CUMULATIVE_CSUM_RULE_IND_0 BIT(2)
+#define QCN9274_MSDU_END_SELECT_IPV6_OP_CRC_INFO3_TYPE13 BIT(3)
+#define QCN9274_MSDU_END_SELECT_RULE_IND_1_TCP_SEQ_NUM BIT(4)
+#define QCN9274_MSDU_END_SELECT_TCP_ACK_NUM_INFO4_WINDOW_SIZE BIT(5)
+#define QCN9274_MSDU_END_SELECT_SA_SW_PER_ID_INFO5_SA_DA_ID BIT(6)
+#define QCN9274_MSDU_END_SELECT_INFO6_FSE_METADATA BIT(7)
+#define QCN9274_MSDU_END_SELECT_CCE_MDATA_TCP_UDP_CSUM_INFO7_IP_LEN BIT(8)
+#define QCN9274_MSDU_END_SELECT_INFO8_INFO9 BIT(9)
+#define QCN9274_MSDU_END_SELECT_INFO10_INFO11 BIT(10)
+#define QCN9274_MSDU_END_SELECT_VLAN_CTAG_STAG_CI_PEER_MDATA BIT(11)
+#define QCN9274_MSDU_END_SELECT_INFO12_AND_FLOW_ID_TOEPLITZ BIT(12)
+#define QCN9274_MSDU_END_SELECT_PPDU_START_TS_63_32_PHY_MDATA BIT(13)
+#define QCN9274_MSDU_END_SELECT_PPDU_START_TS_31_0_TOEPLITZ_HASH_2_4 BIT(14)
+#define QCN9274_MSDU_END_SELECT_RES0_SA_47_0 BIT(15)
+#define QCN9274_MSDU_END_SELECT_INFO13_INFO14 BIT(16)
+
+#define QCN9274_MSDU_END_WMASK (QCN9274_MSDU_END_SELECT_MSDU_END_TAG | \
+ QCN9274_MSDU_END_SELECT_SA_SW_PER_ID_INFO5_SA_DA_ID | \
+ QCN9274_MSDU_END_SELECT_INFO10_INFO11 | \
+ QCN9274_MSDU_END_SELECT_INFO12_AND_FLOW_ID_TOEPLITZ | \
+ QCN9274_MSDU_END_SELECT_PPDU_START_TS_63_32_PHY_MDATA | \
+ QCN9274_MSDU_END_SELECT_INFO13_INFO14)
+
+/* The below rx_msdu_end_qcn9274_compact structure is tied with the mask value
+ * QCN9274_MSDU_END_WMASK. If the mask value changes the structure will also
+ * change.
+ */
+
+struct rx_msdu_end_qcn9274_compact {
+ __le64 msdu_end_tag;
+ __le16 sa_sw_peer_id;
+ __le16 info5;
+ __le16 sa_idx;
+ __le16 da_idx_or_sw_peer_id;
+ __le32 info10;
+ __le32 info11;
+ __le32 info12;
+ __le32 flow_id_toeplitz;
+ __le32 ppdu_start_timestamp_63_32;
+ __le32 phy_meta_data;
+ __le32 info13;
+ __le32 info14;
+} __packed;
+
/* These macro definitions are only used for WCN7850 */
#define RX_MSDU_END_WCN7850_INFO2_KEY_ID BIT(7, 0)
@@ -1450,16 +1553,18 @@ struct rx_msdu_end_wcn7850 {
*
*/
-/* TODO: Move to compact TLV approach
- * By default these tlv's are not aligned to 128b boundary
- * Need to remove unused qwords and make them compact/aligned
- */
struct hal_rx_desc_qcn9274 {
struct rx_msdu_end_qcn9274 msdu_end;
struct rx_mpdu_start_qcn9274 mpdu_start;
u8 msdu_payload[];
} __packed;
+struct hal_rx_desc_qcn9274_compact {
+ struct rx_msdu_end_qcn9274_compact msdu_end;
+ struct rx_mpdu_start_qcn9274_compact mpdu_start;
+ u8 msdu_payload[];
+} __packed;
+
#define RX_BE_PADDING0_BYTES 8
#define RX_BE_PADDING1_BYTES 8
@@ -1484,6 +1589,7 @@ struct hal_rx_desc_wcn7850 {
struct hal_rx_desc {
union {
struct hal_rx_desc_qcn9274 qcn9274;
+ struct hal_rx_desc_qcn9274_compact qcn9274_compact;
struct hal_rx_desc_wcn7850 wcn7850;
} u;
} __packed;
diff --git a/drivers/net/wireless/ath/ath12k/trace.h b/drivers/net/wireless/ath/ath12k/trace.h
index f72096684b74..240737e1542d 100644
--- a/drivers/net/wireless/ath/ath12k/trace.h
+++ b/drivers/net/wireless/ath/ath12k/trace.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
@@ -140,6 +140,33 @@ TRACE_EVENT(ath12k_htt_rxdesc,
)
);
+TRACE_EVENT(ath12k_wmi_diag,
+ TP_PROTO(struct ath12k_base *ab, const void *data, size_t len),
+
+ TP_ARGS(ab, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ab->dev))
+ __string(driver, dev_driver_string(ab->dev))
+ __field(u16, len)
+ __dynamic_array(u8, data, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ab->dev));
+ __assign_str(driver, dev_driver_string(ab->dev));
+ __entry->len = len;
+ memcpy(__get_dynamic_array(data), data, len);
+ ),
+
+ TP_printk(
+ "%s %s tlv diag len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 11cc3005c0f9..9d69a1769926 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
@@ -19,6 +19,7 @@
#include "mac.h"
#include "hw.h"
#include "peer.h"
+#include "p2p.h"
struct ath12k_wmi_svc_ready_parse {
bool wmi_svc_bitmap_done;
@@ -162,6 +163,14 @@ static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
+ [WMI_TAG_TWT_ENABLE_COMPLETE_EVENT] = {
+ .min_len = sizeof(struct wmi_twt_enable_event) },
+ [WMI_TAG_TWT_DISABLE_COMPLETE_EVENT] = {
+ .min_len = sizeof(struct wmi_twt_disable_event) },
+ [WMI_TAG_P2P_NOA_INFO] = {
+ .min_len = sizeof(struct ath12k_wmi_p2p_noa_info) },
+ [WMI_TAG_P2P_NOA_EVENT] = {
+ .min_len = sizeof(struct wmi_p2p_noa_event) },
};
static __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
@@ -179,18 +188,9 @@ void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config)
{
config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
-
- if (ab->num_radios == 2) {
- config->num_peers = TARGET_NUM_PEERS(DBS);
- config->num_tids = TARGET_NUM_TIDS(DBS);
- } else if (ab->num_radios == 3) {
- config->num_peers = TARGET_NUM_PEERS(DBS_SBS);
- config->num_tids = TARGET_NUM_TIDS(DBS_SBS);
- } else {
- /* Control should not reach here */
- config->num_peers = TARGET_NUM_PEERS(SINGLE);
- config->num_tids = TARGET_NUM_TIDS(SINGLE);
- }
+ config->num_peers = ab->num_radios *
+ ath12k_core_get_max_peers_per_radio(ab);
+ config->num_tids = ath12k_core_get_max_num_tids(ab);
config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
config->num_peer_keys = TARGET_NUM_PEER_KEYS;
@@ -228,6 +228,9 @@ void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
config->peer_map_unmap_version = 0x32;
config->twt_ap_pdev_count = ab->num_radios;
config->twt_ap_sta_count = 1000;
+
+ if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
+ config->dp_peer_meta_data_ver = TARGET_RX_PEER_METADATA_VER_V1B;
}
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@@ -359,8 +362,8 @@ static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb,
}
static const void **
-ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, const void *ptr,
- size_t len, gfp_t gfp)
+ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab,
+ struct sk_buff *skb, gfp_t gfp)
{
const void **tb;
int ret;
@@ -369,7 +372,7 @@ ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, const void *ptr,
if (!tb)
return ERR_PTR(-ENOMEM);
- ret = ath12k_wmi_tlv_parse(ab, tb, ptr, len);
+ ret = ath12k_wmi_tlv_parse(ab, tb, skb->data, skb->len);
if (ret) {
kfree(tb);
return ERR_PTR(ret);
@@ -493,13 +496,13 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
mac_caps = wmi_mac_phy_caps + phy_idx;
- pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id);
+ pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
fw_pdev = &ab->fw_pdev[ab->fw_pdev_count];
fw_pdev->supported_bands = le32_to_cpu(mac_caps->supported_bands);
- fw_pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id);
+ fw_pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id);
ab->fw_pdev_count++;
@@ -727,6 +730,20 @@ static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *sk
return 0;
}
+static u32 ath12k_wmi_mgmt_get_freq(struct ath12k *ar,
+ struct ieee80211_tx_info *info)
+{
+ struct ath12k_base *ab = ar->ab;
+ u32 freq = 0;
+
+ if (ab->hw_params->single_pdev_only &&
+ ar->scan.is_roc &&
+ (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
+ freq = ar->scan.roc_freq;
+
+ return freq;
+}
+
struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len)
{
struct sk_buff *skb;
@@ -752,6 +769,7 @@ int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_mgmt_send_cmd *cmd;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
struct wmi_tlv *frame_tlv;
struct sk_buff *skb;
u32 buf_len;
@@ -770,7 +788,7 @@ int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->desc_id = cpu_to_le32(buf_id);
- cmd->chanfreq = 0;
+ cmd->chanfreq = cpu_to_le32(ath12k_wmi_mgmt_get_freq(ar, info));
cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr));
cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr));
cmd->frame_len = cpu_to_le32(frame->len);
@@ -826,6 +844,9 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
+ if (args->if_stats_id != ATH12K_INVAL_VDEV_STATS_ID)
+ cmd->vdev_stats_id_valid = cpu_to_le32(BIT(0));
+
ptr = skb->data + sizeof(*cmd);
len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
@@ -1024,6 +1045,7 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
cmd->regdomain = cpu_to_le32(arg->regdomain);
cmd->he_ops = cpu_to_le32(arg->he_ops);
cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
+ cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags);
if (!restart) {
if (arg->ssid) {
@@ -1051,7 +1073,7 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
/* Note: This is a nested TLV containing:
- * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
+ * [wmi_tlv][ath12k_wmi_p2p_noa_descriptor][wmi_tlv]..
*/
ptr += sizeof(*tlv);
@@ -1710,6 +1732,48 @@ int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
return ret;
}
+int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
+ const u8 *p2p_ie)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_p2p_go_set_beacon_ie_cmd *cmd;
+ size_t p2p_ie_len, aligned_len;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ int ret, len;
+
+ p2p_ie_len = p2p_ie[1] + 2;
+ aligned_len = roundup(p2p_ie_len, sizeof(u32));
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+ cmd = ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_P2P_GO_SET_BEACON_IE,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->ie_buf_len = cpu_to_le32(p2p_ie_len);
+
+ ptr += sizeof(*cmd);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
+ aligned_len);
+ memcpy(tlv->value, p2p_ie, p2p_ie_len);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
struct ieee80211_mutable_offsets *offs,
struct sk_buff *bcn)
@@ -2130,7 +2194,7 @@ void ath12k_wmi_start_scan_init(struct ath12k *ar,
WMI_SCAN_EVENT_BSS_CHANNEL |
WMI_SCAN_EVENT_FOREIGN_CHAN |
WMI_SCAN_EVENT_DEQUEUED;
- arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+ arg->scan_f_chan_stat_evnt = 1;
arg->num_bssid = 1;
/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
@@ -3265,6 +3329,9 @@ ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cf
wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
+ wmi_cfg->flags2 = le32_encode_bits(tg_cfg->dp_peer_meta_data_ver,
+ WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION);
+
wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported <<
WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
}
@@ -4214,7 +4281,7 @@ ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
for (i = 0; i < ab->fw_pdev_count; i++) {
struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i];
- if (fw_pdev->pdev_id == le32_to_cpu(caps->pdev_id) &&
+ if (fw_pdev->pdev_id == ath12k_wmi_caps_ext_get_pdev_id(caps) &&
fw_pdev->phy_id == le32_to_cpu(caps->phy_id)) {
bands = fw_pdev->supported_bands;
break;
@@ -4271,7 +4338,8 @@ static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag,
return 0;
} else {
for (i = 0; i < ab->num_radios; i++) {
- if (ab->pdevs[i].pdev_id == le32_to_cpu(caps->pdev_id))
+ if (ab->pdevs[i].pdev_id ==
+ ath12k_wmi_caps_ext_get_pdev_id(caps))
break;
}
@@ -4374,7 +4442,7 @@ static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buf
const struct wmi_vdev_start_resp_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4452,7 +4520,7 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n");
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4738,7 +4806,7 @@ static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *
const struct wmi_peer_delete_resp_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4770,7 +4838,7 @@ static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
const struct wmi_vdev_delete_resp_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4790,15 +4858,15 @@ static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
return 0;
}
-static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab, void *evt_buf,
- u32 len, u32 *vdev_id,
- u32 *tx_status)
+static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab,
+ struct sk_buff *skb,
+ u32 *vdev_id, u32 *tx_status)
{
const void **tb;
const struct wmi_bcn_tx_status_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4826,7 +4894,7 @@ static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_
const struct wmi_vdev_stopped_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4948,7 +5016,7 @@ static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
info->flags |= IEEE80211_TX_STAT_ACK;
- ieee80211_tx_status_irqsafe(ar->hw, msdu);
+ ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu);
num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
@@ -4970,7 +5038,7 @@ static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab,
const struct wmi_mgmt_tx_compl_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5006,6 +5074,10 @@ static void ath12k_wmi_event_scan_started(struct ath12k *ar)
break;
case ATH12K_SCAN_STARTING:
ar->scan.state = ATH12K_SCAN_RUNNING;
+
+ if (ar->scan.is_roc)
+ ieee80211_ready_on_channel(ath12k_ar_to_hw(ar));
+
complete(&ar->scan.started);
break;
}
@@ -5076,6 +5148,8 @@ static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar)
static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
{
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+
lockdep_assert_held(&ar->data_lock);
switch (ar->scan.state) {
@@ -5087,7 +5161,11 @@ static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
break;
case ATH12K_SCAN_RUNNING:
case ATH12K_SCAN_ABORTING:
- ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
+ ar->scan_channel = ieee80211_get_channel(hw->wiphy, freq);
+
+ if (ar->scan.is_roc && ar->scan.roc_freq == freq)
+ complete(&ar->scan.on_channel);
+
break;
}
}
@@ -5141,7 +5219,7 @@ static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb,
const struct wmi_scan_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5174,7 +5252,7 @@ static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buf
const struct wmi_peer_sta_kickout_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5201,7 +5279,7 @@ static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
const struct wmi_roam_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5226,13 +5304,14 @@ static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
static int freq_to_idx(struct ath12k *ar, int freq)
{
struct ieee80211_supported_band *sband;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
int band, ch, idx = 0;
for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
if (!ar->mac.sbands[band].channels)
continue;
- sband = ar->hw->wiphy->bands[band];
+ sband = hw->wiphy->bands[band];
if (!sband)
continue;
@@ -5245,14 +5324,14 @@ exit:
return idx;
}
-static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, u8 *evt_buf,
- u32 len, struct wmi_chan_info_event *ch_info_ev)
+static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
+ struct wmi_chan_info_event *ch_info_ev)
{
const void **tb;
const struct wmi_chan_info_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5291,7 +5370,7 @@ ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
const struct wmi_pdev_bss_chan_info_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5331,7 +5410,7 @@ ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *sk
const struct wmi_vdev_install_key_compl_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5362,7 +5441,7 @@ static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff
const struct wmi_peer_assoc_conf_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5384,13 +5463,13 @@ static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff
}
static int
-ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, u8 *evt_buf,
- u32 len, const struct wmi_pdev_temperature_event *ev)
+ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, struct sk_buff *skb,
+ const struct wmi_pdev_temperature_event *ev)
{
const void **tb;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5725,8 +5804,7 @@ static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *s
{
u32 vdev_id, tx_status;
- if (ath12k_pull_bcn_tx_status_ev(ab, skb->data, skb->len,
- &vdev_id, &tx_status) != 0) {
+ if (ath12k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) {
ath12k_warn(ab, "failed to extract bcn tx status");
return;
}
@@ -5864,7 +5942,7 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
status->freq, status->band, status->signal,
status->rate_idx);
- ieee80211_rx_ni(ar->hw, skb);
+ ieee80211_rx_ni(ath12k_ar_to_hw(ar), skb);
exit:
rcu_read_unlock();
@@ -6037,7 +6115,7 @@ static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff
goto exit;
}
- sta = ieee80211_find_sta_by_ifaddr(ar->hw,
+ sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
arg.mac_addr, NULL);
if (!sta) {
ath12k_warn(ab, "Spurious quick kickout for STA %pM\n",
@@ -6110,7 +6188,7 @@ static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
/* HW channel counters frequency value in hertz */
u32 cc_freq_hz = ab->cc_freq_hz;
- if (ath12k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) {
+ if (ath12k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) {
ath12k_warn(ab, "failed to extract chan info event");
return;
}
@@ -6395,7 +6473,7 @@ static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab,
const struct wmi_pdev_ctl_failsafe_chk_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6446,7 +6524,7 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
}
if (arvif->is_up && arvif->vif->bss_conf.csa_active)
- ieee80211_csa_finish(arvif->vif);
+ ieee80211_csa_finish(arvif->vif, 0);
}
rcu_read_unlock();
}
@@ -6460,7 +6538,7 @@ ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab,
const u32 *vdev_ids;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6494,7 +6572,7 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
struct ath12k *ar;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6531,7 +6609,7 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
if (ar->dfs_block_radar_events)
ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
else
- ieee80211_radar_detected(ar->hw);
+ ieee80211_radar_detected(ath12k_ar_to_hw(ar));
exit:
rcu_read_unlock();
@@ -6546,7 +6624,7 @@ ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
struct ath12k *ar;
struct wmi_pdev_temperature_event ev = {0};
- if (ath12k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) {
+ if (ath12k_pull_pdev_temp_ev(ab, skb, &ev) != 0) {
ath12k_warn(ab, "failed to extract pdev temperature event");
return;
}
@@ -6573,7 +6651,7 @@ static void ath12k_fils_discovery_event(struct ath12k_base *ab,
const struct wmi_fils_discovery_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab,
@@ -6603,7 +6681,7 @@ static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
const struct wmi_probe_resp_tx_status_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab,
@@ -6628,6 +6706,56 @@ static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
kfree(tb);
}
+static int ath12k_wmi_p2p_noa_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_p2p_noa_event *ev;
+ const struct ath12k_wmi_p2p_noa_info *noa;
+ struct ath12k *ar;
+ int ret, vdev_id;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse P2P NoA TLV: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_P2P_NOA_EVENT];
+ noa = tb[WMI_TAG_P2P_NOA_INFO];
+
+ if (!ev || !noa) {
+ ret = -EPROTO;
+ goto out;
+ }
+
+ vdev_id = __le32_to_cpu(ev->vdev_id);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "wmi tlv p2p noa vdev_id %i descriptors %u\n",
+ vdev_id, le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM));
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
+ if (!ar) {
+ ath12k_warn(ab, "invalid vdev id %d in P2P NoA event\n",
+ vdev_id);
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ath12k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
+
+ ret = 0;
+
+unlock:
+ rcu_read_unlock();
+out:
+ kfree(tb);
+ return ret;
+}
+
static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
struct sk_buff *skb)
{
@@ -6635,7 +6763,7 @@ static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
const void **tb;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6662,6 +6790,70 @@ static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
kfree(tb);
}
+static void
+ath12k_wmi_diag_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ trace_ath12k_wmi_diag(ab, skb->data, skb->len);
+}
+
+static void ath12k_wmi_twt_enable_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_twt_enable_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse wmi twt enable status event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch twt enable wmi event\n");
+ goto exit;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt enable event pdev id %u status %u\n",
+ le32_to_cpu(ev->pdev_id),
+ le32_to_cpu(ev->status));
+
+exit:
+ kfree(tb);
+}
+
+static void ath12k_wmi_twt_disable_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_twt_disable_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse wmi twt disable status event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch twt disable wmi event\n");
+ goto exit;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt disable event pdev id %d status %u\n",
+ le32_to_cpu(ev->pdev_id),
+ le32_to_cpu(ev->status));
+
+exit:
+ kfree(tb);
+}
+
static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -6757,11 +6949,18 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_RFKILL_STATE_CHANGE_EVENTID:
ath12k_rfkill_state_change_event(ab, skb);
break;
+ case WMI_TWT_ENABLE_EVENTID:
+ ath12k_wmi_twt_enable_event(ab, skb);
+ break;
+ case WMI_TWT_DISABLE_EVENTID:
+ ath12k_wmi_twt_disable_event(ab, skb);
+ break;
+ case WMI_P2P_NOA_EVENTID:
+ ath12k_wmi_p2p_noa_event(ab, skb);
+ break;
/* add Unsupported events here */
case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
- case WMI_TWT_ENABLE_EVENTID:
- case WMI_TWT_DISABLE_EVENTID:
case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
ath12k_dbg(ab, ATH12K_DBG_WMI,
"ignoring unsupported event 0x%x\n", id);
@@ -6772,6 +6971,9 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_VDEV_DELETE_RESP_EVENTID:
ath12k_vdev_delete_resp_event(ab, skb);
break;
+ case WMI_DIAG_EVENTID:
+ ath12k_wmi_diag_event(ab, skb);
+ break;
/* TODO: Add remaining events */
default:
ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index 06e5b9b4049b..103462feb935 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_WMI_H
@@ -168,10 +168,6 @@ struct wmi_tlv {
#define WLAN_SCAN_MAX_HINT_BSSID 10
#define MAX_RNR_BSS 5
-#define WLAN_SCAN_PARAMS_MAX_SSID 16
-#define WLAN_SCAN_PARAMS_MAX_BSSID 4
-#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
-
#define WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG 1
#define WMI_BA_MODE_BUFFER_SIZE_256 3
@@ -2163,6 +2159,10 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_11BE = 289,
+ WMI_TLV_SERVICE_WMSK_COMPACTION_RX_TLVS = 361,
+
+ WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT = 365,
+
WMI_MAX_EXT2_SERVICE,
};
@@ -2350,6 +2350,7 @@ struct ath12k_wmi_resource_config_arg {
u32 twt_ap_pdev_count;
u32 twt_ap_sta_count;
bool is_reg_cc_ext_event_supported;
+ u8 dp_peer_meta_data_ver;
};
struct ath12k_wmi_init_cmd_arg {
@@ -2402,6 +2403,7 @@ struct wmi_init_cmd {
} __packed;
#define WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT 4
+#define WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION GENMASK(5, 4)
struct ath12k_wmi_resource_config_params {
__le32 tlv_header;
@@ -2542,9 +2544,17 @@ struct ath12k_wmi_hw_mode_cap_params {
#define WMI_MAX_HECAP_PHY_SIZE (3)
+/* pdev_id is present in lower 16 bits of pdev_and_hw_link_ids in
+ * ath12k_wmi_mac_phy_caps_params & ath12k_wmi_caps_ext_params.
+ *
+ * hw_link_id is present in higher 16 bits of pdev_and_hw_link_ids.
+ */
+#define WMI_CAPS_PARAMS_PDEV_ID GENMASK(15, 0)
+#define WMI_CAPS_PARAMS_HW_LINK_ID GENMASK(31, 16)
+
struct ath12k_wmi_mac_phy_caps_params {
__le32 hw_mode_id;
- __le32 pdev_id;
+ __le32 pdev_and_hw_link_ids;
__le32 phy_id;
__le32 supported_flags;
__le32 supported_bands;
@@ -2636,13 +2646,7 @@ struct wmi_service_ready_ext2_event {
struct ath12k_wmi_caps_ext_params {
__le32 hw_mode_id;
- union {
- struct {
- __le16 pdev_id;
- __le16 hw_link_id;
- } __packed ath12k_wmi_pdev_to_link_map;
- __le32 pdev_id;
- };
+ __le32 pdev_and_hw_link_ids;
__le32 phy_id;
__le32 wireless_modes_ext;
__le32 eht_cap_mac_info_2ghz[WMI_MAX_EHTCAP_MAC_SIZE];
@@ -2716,6 +2720,9 @@ struct wmi_vdev_create_cmd {
struct ath12k_wmi_mac_addr_params vdev_macaddr;
__le32 num_cfg_txrx_streams;
__le32 pdev_id;
+ __le32 mbssid_flags;
+ __le32 mbssid_tx_vdev_id;
+ __le32 vdev_stats_id_valid;
__le32 vdev_stats_id;
} __packed;
@@ -2764,6 +2771,10 @@ struct ath12k_wmi_ssid_params {
#define ATH12K_VDEV_SETUP_TIMEOUT_HZ (5 * HZ)
+enum wmi_vdev_mbssid_flags {
+ WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP = BIT(0),
+};
+
struct wmi_vdev_start_request_cmd {
__le32 tlv_header;
__le32 vdev_id;
@@ -2782,7 +2793,7 @@ struct wmi_vdev_start_request_cmd {
__le32 cac_duration_ms;
__le32 regdomain;
__le32 min_data_rate;
- __le32 mbssid_flags;
+ __le32 mbssid_flags; /* uses enum wmi_vdev_mbssid_flags */
__le32 mbssid_tx_vdev_id;
__le32 eht_ops;
__le32 punct_bitmap;
@@ -3146,7 +3157,7 @@ struct ath12k_wmi_element_info_arg {
#define WLAN_SCAN_PARAMS_MAX_SSID 16
#define WLAN_SCAN_PARAMS_MAX_BSSID 4
-#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN 512
/* Values lower than this may be refused by some firmware revisions with a scan
* completion with a timedout reason.
@@ -3270,24 +3281,19 @@ struct ath12k_wmi_scan_req_arg {
u32 vdev_id;
u32 pdev_id;
enum wmi_scan_priority scan_priority;
- union {
- struct {
- u32 scan_ev_started:1,
- scan_ev_completed:1,
- scan_ev_bss_chan:1,
- scan_ev_foreign_chan:1,
- scan_ev_dequeued:1,
- scan_ev_preempted:1,
- scan_ev_start_failed:1,
- scan_ev_restarted:1,
- scan_ev_foreign_chn_exit:1,
- scan_ev_invalid:1,
- scan_ev_gpio_timeout:1,
- scan_ev_suspended:1,
- scan_ev_resumed:1;
- };
- u32 scan_events;
- };
+ u32 scan_ev_started:1,
+ scan_ev_completed:1,
+ scan_ev_bss_chan:1,
+ scan_ev_foreign_chan:1,
+ scan_ev_dequeued:1,
+ scan_ev_preempted:1,
+ scan_ev_start_failed:1,
+ scan_ev_restarted:1,
+ scan_ev_foreign_chn_exit:1,
+ scan_ev_invalid:1,
+ scan_ev_gpio_timeout:1,
+ scan_ev_suspended:1,
+ scan_ev_resumed:1;
u32 dwell_time_active;
u32 dwell_time_active_2g;
u32 dwell_time_passive;
@@ -3300,36 +3306,31 @@ struct ath12k_wmi_scan_req_arg {
u32 idle_time;
u32 max_scan_time;
u32 probe_delay;
- union {
- struct {
- u32 scan_f_passive:1,
- scan_f_bcast_probe:1,
- scan_f_cck_rates:1,
- scan_f_ofdm_rates:1,
- scan_f_chan_stat_evnt:1,
- scan_f_filter_prb_req:1,
- scan_f_bypass_dfs_chn:1,
- scan_f_continue_on_err:1,
- scan_f_offchan_mgmt_tx:1,
- scan_f_offchan_data_tx:1,
- scan_f_promisc_mode:1,
- scan_f_capture_phy_err:1,
- scan_f_strict_passive_pch:1,
- scan_f_half_rate:1,
- scan_f_quarter_rate:1,
- scan_f_force_active_dfs_chn:1,
- scan_f_add_tpc_ie_in_probe:1,
- scan_f_add_ds_ie_in_probe:1,
- scan_f_add_spoofed_mac_in_probe:1,
- scan_f_add_rand_seq_in_probe:1,
- scan_f_en_ie_whitelist_in_probe:1,
- scan_f_forced:1,
- scan_f_2ghz:1,
- scan_f_5ghz:1,
- scan_f_80mhz:1;
- };
- u32 scan_flags;
- };
+ u32 scan_f_passive:1,
+ scan_f_bcast_probe:1,
+ scan_f_cck_rates:1,
+ scan_f_ofdm_rates:1,
+ scan_f_chan_stat_evnt:1,
+ scan_f_filter_prb_req:1,
+ scan_f_bypass_dfs_chn:1,
+ scan_f_continue_on_err:1,
+ scan_f_offchan_mgmt_tx:1,
+ scan_f_offchan_data_tx:1,
+ scan_f_promisc_mode:1,
+ scan_f_capture_phy_err:1,
+ scan_f_strict_passive_pch:1,
+ scan_f_half_rate:1,
+ scan_f_quarter_rate:1,
+ scan_f_force_active_dfs_chn:1,
+ scan_f_add_tpc_ie_in_probe:1,
+ scan_f_add_ds_ie_in_probe:1,
+ scan_f_add_spoofed_mac_in_probe:1,
+ scan_f_add_rand_seq_in_probe:1,
+ scan_f_en_ie_whitelist_in_probe:1,
+ scan_f_forced:1,
+ scan_f_2ghz:1,
+ scan_f_5ghz:1,
+ scan_f_80mhz:1;
enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode;
u32 burst_duration;
u32 num_chan;
@@ -3489,6 +3490,37 @@ struct wmi_get_pdev_temperature_cmd {
__le32 pdev_id;
} __packed;
+#define WMI_P2P_MAX_NOA_DESCRIPTORS 4
+
+struct wmi_p2p_noa_event {
+ __le32 vdev_id;
+} __packed;
+
+struct ath12k_wmi_p2p_noa_descriptor {
+ __le32 type_count; /* 255: continuous schedule, 0: reserved */
+ __le32 duration; /* Absent period duration in micro seconds */
+ __le32 interval; /* Absent period interval in micro seconds */
+ __le32 start_time; /* 32 bit tsf time when in starts */
+} __packed;
+
+#define WMI_P2P_NOA_INFO_CHANGED_FLAG BIT(0)
+#define WMI_P2P_NOA_INFO_INDEX GENMASK(15, 8)
+#define WMI_P2P_NOA_INFO_OPP_PS BIT(16)
+#define WMI_P2P_NOA_INFO_CTWIN_TU GENMASK(23, 17)
+#define WMI_P2P_NOA_INFO_DESC_NUM GENMASK(31, 24)
+
+struct ath12k_wmi_p2p_noa_info {
+ /* Bit 0 - Flag to indicate an update in NOA schedule
+ * Bits 7-1 - Reserved
+ * Bits 15-8 - Index (identifies the instance of NOA sub element)
+ * Bit 16 - Opp PS state of the AP
+ * Bits 23-17 - Ctwindow in TUs
+ * Bits 31-24 - Number of NOA descriptors
+ */
+ __le32 noa_attr;
+ struct ath12k_wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS];
+} __packed;
+
#define WMI_BEACON_TX_BUFFER_SIZE 512
struct wmi_bcn_tmpl_cmd {
@@ -3503,6 +3535,12 @@ struct wmi_bcn_tmpl_cmd {
__le32 esp_ie_offset;
} __packed;
+struct wmi_p2p_go_set_beacon_ie_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 ie_buf_len;
+} __packed;
+
struct wmi_vdev_install_key_cmd {
__le32 tlv_header;
__le32 vdev_id;
@@ -4797,6 +4835,16 @@ struct wmi_rfkill_state_change_event {
__le32 radio_state;
} __packed;
+struct wmi_twt_enable_event {
+ __le32 pdev_id;
+ __le32 status;
+} __packed;
+
+struct wmi_twt_disable_event {
+ __le32 pdev_id;
+ __le32 status;
+} __packed;
+
void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config);
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@@ -4806,6 +4854,8 @@ int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_sc, u32 len);
int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
struct sk_buff *frame);
+int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
+ const u8 *p2p_ie);
int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
struct ieee80211_mutable_offsets *offs,
struct sk_buff *bcn);
@@ -4917,4 +4967,30 @@ int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
enum wmi_host_hw_mode_config_type mode);
+static inline u32
+ath12k_wmi_caps_ext_get_pdev_id(const struct ath12k_wmi_caps_ext_params *param)
+{
+ return le32_get_bits(param->pdev_and_hw_link_ids, WMI_CAPS_PARAMS_PDEV_ID);
+}
+
+static inline u32
+ath12k_wmi_caps_ext_get_hw_link_id(const struct ath12k_wmi_caps_ext_params *param)
+{
+ return le32_get_bits(param->pdev_and_hw_link_ids, WMI_CAPS_PARAMS_HW_LINK_ID);
+}
+
+static inline u32
+ath12k_wmi_mac_phy_get_pdev_id(const struct ath12k_wmi_mac_phy_caps_params *param)
+{
+ return le32_get_bits(param->pdev_and_hw_link_ids,
+ WMI_CAPS_PARAMS_PDEV_ID);
+}
+
+static inline u32
+ath12k_wmi_mac_phy_get_hw_link_id(const struct ath12k_wmi_mac_phy_caps_params *param)
+{
+ return le32_get_bits(param->pdev_and_hw_link_ids,
+ WMI_CAPS_PARAMS_HW_LINK_ID);
+}
+
#endif
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index c630343ca4f9..eea4bda77608 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -779,6 +779,10 @@ static int ath5k_set_ringparam(struct ieee80211_hw *hw, u32 tx, u32 rx)
const struct ieee80211_ops ath5k_hw_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = ath5k_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = ath5k_start,
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index e37db4af33de..61b2e3f15f0e 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1119,7 +1119,7 @@ void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT);
wiphy_lock(vif->ar->wiphy);
- cfg80211_ch_switch_notify(vif->ndev, &chandef, 0, 0);
+ cfg80211_ch_switch_notify(vif->ndev, &chandef, 0);
wiphy_unlock(vif->ar->wiphy);
}
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 9bfaadfa6c00..1a6697b6e3b4 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -144,7 +144,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
return ret;
}
-static int ath_ahb_remove(struct platform_device *pdev)
+static void ath_ahb_remove(struct platform_device *pdev)
{
struct ieee80211_hw *hw = platform_get_drvdata(pdev);
@@ -155,13 +155,11 @@ static int ath_ahb_remove(struct platform_device *pdev)
free_irq(sc->irq, sc);
ieee80211_free_hw(sc->hw);
}
-
- return 0;
}
static struct platform_driver ath_ahb_driver = {
.probe = ath_ahb_probe,
- .remove = ath_ahb_remove,
+ .remove_new = ath_ahb_remove,
.driver = {
.name = "ath9k",
},
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index 988222cea9df..acc84e6711b0 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -643,7 +643,7 @@ static void ath_ant_try_scan(struct ath_ant_comb *antcomb,
conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
} else if (antcomb->rssi_sub >
- antcomb->rssi_lna1) {
+ antcomb->rssi_lna2) {
/* set to A-B */
conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 57e2b4c89125..ad72a30b67c3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -851,8 +851,6 @@
#define AR_PHY_TXGAIN_FORCED_TXBB1DBGAIN 0x0000000e
#define AR_PHY_TXGAIN_FORCED_TXBB1DBGAIN_S 1
-#define AR_PHY_POWER_TX_RATE1 0x9934
-#define AR_PHY_POWER_TX_RATE2 0x9938
#define AR_PHY_POWER_TX_RATE_MAX 0x993c
#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
#define PHY_AGC_CLR 0x10000000
@@ -1041,13 +1039,6 @@
#define AR_PHY_TX_IQCAL_STATUS_B2_FAILED 0x00000001
-/*
- * AGC 3 Register Map
- */
-#define AR_AGC3_BASE 0xce00
-
-#define AR_PHY_RSSI_3 (AR_AGC3_BASE + 0x180)
-
/* GLB Registers */
#define AR_GLB_BASE 0x20000
#define AR_GLB_GPIO_CONTROL (AR_GLB_BASE)
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index ee72faac2f1d..b399a7926ef5 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -365,10 +365,10 @@ bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif)
if (!vif || !vif->bss_conf.csa_active)
return false;
- if (!ieee80211_beacon_cntdwn_is_complete(vif))
+ if (!ieee80211_beacon_cntdwn_is_complete(vif, 0))
return false;
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
return true;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 237f4ec2cffd..6c33e898b300 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -306,7 +306,6 @@ struct ath9k_htc_tx {
DECLARE_BITMAP(tx_slot, MAX_TX_BUF_NUM);
struct timer_list cleanup_timer;
spinlock_t tx_lock;
- bool initialized;
};
struct ath9k_htc_tx_ctl {
@@ -515,6 +514,7 @@ struct ath9k_htc_priv {
unsigned long ps_usecount;
bool ps_enabled;
bool ps_idle;
+ bool initialized;
#ifdef CONFIG_MAC80211_LEDS
enum led_brightness brightness;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 533471e69400..547634f82183 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -514,10 +514,10 @@ bool ath9k_htc_csa_is_finished(struct ath9k_htc_priv *priv)
if (!vif || !vif->bss_conf.csa_active)
return false;
- if (!ieee80211_beacon_cntdwn_is_complete(vif))
+ if (!ieee80211_beacon_cntdwn_is_complete(vif, 0))
return false;
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
priv->csa_vif = NULL;
return true;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 0aa5bdeb44a1..3633f9eb2c55 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -966,6 +966,10 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
htc_handle->drv_priv = priv;
+ /* Allow ath9k_wmi_event_tasklet() to operate. */
+ smp_wmb();
+ priv->initialized = true;
+
return 0;
err_init:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 9a9b5212051a..b389e19381c4 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1868,6 +1868,10 @@ static void ath9k_htc_channel_switch_beacon(struct ieee80211_hw *hw,
}
struct ieee80211_ops ath9k_htc_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = ath9k_htc_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = ath9k_htc_start,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index efcaeccb055a..ce9c04e418b8 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -815,10 +815,6 @@ int ath9k_tx_init(struct ath9k_htc_priv *priv)
skb_queue_head_init(&priv->tx.data_vo_queue);
skb_queue_head_init(&priv->tx.tx_failed);
- /* Allow ath9k_wmi_event_tasklet(WMI_TXSTATUS_EVENTID) to operate. */
- smp_wmb();
- priv->tx.initialized = true;
-
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index c48ff0ffbfef..a2943aaecb20 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2786,6 +2786,10 @@ static int ath9k_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
struct ieee80211_ops ath9k_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = ath9k_tx,
.start = ath9k_start,
.stop = ath9k_stop,
diff --git a/drivers/net/wireless/ath/ath9k/reg_aic.h b/drivers/net/wireless/ath/ath9k/reg_aic.h
index 955147ab48a2..f50994910eae 100644
--- a/drivers/net/wireless/ath/ath9k/reg_aic.h
+++ b/drivers/net/wireless/ath/ath9k/reg_aic.h
@@ -17,10 +17,6 @@
#ifndef REG_AIC_H
#define REG_AIC_H
-#define AR_SM_BASE 0xa200
-#define AR_SM1_BASE 0xb200
-#define AR_AGC_BASE 0x9e00
-
#define AR_PHY_AIC_CTRL_0_B0 (AR_SM_BASE + 0x4b0)
#define AR_PHY_AIC_CTRL_1_B0 (AR_SM_BASE + 0x4b4)
#define AR_PHY_AIC_CTRL_2_B0 (AR_SM_BASE + 0x4b8)
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index 1476b42b52a9..805ad31edba2 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -155,6 +155,12 @@ void ath9k_wmi_event_tasklet(struct tasklet_struct *t)
}
spin_unlock_irqrestore(&wmi->wmi_lock, flags);
+ /* Check if ath9k_htc_probe_device() completed. */
+ if (!data_race(priv->initialized)) {
+ kfree_skb(skb);
+ continue;
+ }
+
hdr = (struct wmi_cmd_hdr *) skb->data;
cmd_id = be16_to_cpu(hdr->command_id);
wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr));
@@ -169,10 +175,6 @@ void ath9k_wmi_event_tasklet(struct tasklet_struct *t)
&wmi->drv_priv->fatal_work);
break;
case WMI_TXSTATUS_EVENTID:
- /* Check if ath9k_tx_init() completed. */
- if (!data_race(priv->tx.initialized))
- break;
-
spin_lock_bh(&priv->tx.tx_lock);
if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) {
spin_unlock_bh(&priv->tx.tx_lock);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index f15684379b03..d519b676a109 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -369,12 +369,11 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
struct list_head bf_head;
struct ath_tx_status ts;
struct ath_frame_info *fi;
- int ret;
memset(&ts, 0, sizeof(ts));
INIT_LIST_HEAD(&bf_head);
- while ((ret = ath_tid_dequeue(tid, &skb)) == 0) {
+ while (ath_tid_dequeue(tid, &skb) == 0) {
fi = get_frame_info(skb);
bf = fi->bf;
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 524327d24964..7e7797bf44b7 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1712,6 +1712,10 @@ static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw)
}
static const struct ieee80211_ops carl9170_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.start = carl9170_op_start,
.stop = carl9170_op_stop,
.tx = carl9170_op_tx,
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 6bb9aa2bfe65..e902ca80eba7 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -189,7 +189,7 @@ static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb)
static int carl9170_alloc_dev_space(struct ar9170 *ar, struct sk_buff *skb)
{
- struct _carl9170_tx_superframe *super = (void *) skb->data;
+ struct _carl9170_tx_superframe *super;
unsigned int chunks;
int cookie = -1;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 4e6b4df8562f..bfbd3c7a70b3 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1347,6 +1347,10 @@ static void wcn36xx_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif
}
static const struct ieee80211_ops wcn36xx_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.start = wcn36xx_start,
.stop = wcn36xx_stop,
.add_interface = wcn36xx_add_interface,
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index 447b51cff8f9..0b55a272bfd6 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -2178,6 +2178,10 @@ static int at76_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
static const struct ieee80211_ops at76_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = at76_mac80211_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.add_interface = at76_add_interface,
diff --git a/drivers/net/wireless/broadcom/b43/b43.h b/drivers/net/wireless/broadcom/b43/b43.h
index 67b4bac048e5..c0d8fc0b22fb 100644
--- a/drivers/net/wireless/broadcom/b43/b43.h
+++ b/drivers/net/wireless/broadcom/b43/b43.h
@@ -1082,6 +1082,22 @@ static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
return dev->__using_pio_transfers;
}
+static inline void b43_wake_queue(struct b43_wldev *dev, int queue_prio)
+{
+ if (dev->qos_enabled)
+ ieee80211_wake_queue(dev->wl->hw, queue_prio);
+ else
+ ieee80211_wake_queue(dev->wl->hw, 0);
+}
+
+static inline void b43_stop_queue(struct b43_wldev *dev, int queue_prio)
+{
+ if (dev->qos_enabled)
+ ieee80211_stop_queue(dev->wl->hw, queue_prio);
+ else
+ ieee80211_stop_queue(dev->wl->hw, 0);
+}
+
/* Message printing */
__printf(2, 3) void b43info(struct b43_wl *wl, const char *fmt, ...);
__printf(2, 3) void b43err(struct b43_wl *wl, const char *fmt, ...);
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
index 760d1a28edc6..6ac7dcebfff9 100644
--- a/drivers/net/wireless/broadcom/b43/dma.c
+++ b/drivers/net/wireless/broadcom/b43/dma.c
@@ -1399,7 +1399,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
should_inject_overflow(ring)) {
/* This TX ring is full. */
unsigned int skb_mapping = skb_get_queue_mapping(skb);
- ieee80211_stop_queue(dev->wl->hw, skb_mapping);
+ b43_stop_queue(dev, skb_mapping);
dev->wl->tx_queue_stopped[skb_mapping] = true;
ring->stopped = true;
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
@@ -1570,7 +1570,7 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
} else {
/* If the driver queue is running wake the corresponding
* mac80211 queue. */
- ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
+ b43_wake_queue(dev, ring->queue_prio);
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
}
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 92ca0b2ca286..badb2f494035 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -2587,7 +2587,8 @@ static void b43_request_firmware(struct work_struct *work)
start_ieee80211:
wl->hw->queues = B43_QOS_QUEUE_NUM;
- if (!modparam_qos || dev->fw.opensource)
+ if (!modparam_qos || dev->fw.opensource ||
+ dev->dev->chip_id == BCMA_CHIP_ID_BCM4331)
wl->hw->queues = 1;
err = ieee80211_register_hw(wl->hw);
@@ -3603,7 +3604,7 @@ static void b43_tx_work(struct work_struct *work)
err = b43_dma_tx(dev, skb);
if (err == -ENOSPC) {
wl->tx_queue_stopped[queue_num] = true;
- ieee80211_stop_queue(wl->hw, queue_num);
+ b43_stop_queue(dev, queue_num);
skb_queue_head(&wl->tx_queue[queue_num], skb);
break;
}
@@ -3627,6 +3628,7 @@ static void b43_op_tx(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
+ u16 skb_queue_mapping;
if (unlikely(skb->len < 2 + 2 + 6)) {
/* Too short, this can't be a valid frame. */
@@ -3635,12 +3637,12 @@ static void b43_op_tx(struct ieee80211_hw *hw,
}
B43_WARN_ON(skb_shinfo(skb)->nr_frags);
- skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb);
- if (!wl->tx_queue_stopped[skb->queue_mapping]) {
+ skb_queue_mapping = skb_get_queue_mapping(skb);
+ skb_queue_tail(&wl->tx_queue[skb_queue_mapping], skb);
+ if (!wl->tx_queue_stopped[skb_queue_mapping])
ieee80211_queue_work(wl->hw, &wl->tx_work);
- } else {
- ieee80211_stop_queue(wl->hw, skb->queue_mapping);
- }
+ else
+ b43_stop_queue(wl->current_dev, skb_queue_mapping);
}
static void b43_qos_params_upload(struct b43_wldev *dev,
@@ -5170,6 +5172,10 @@ static int b43_op_get_survey(struct ieee80211_hw *hw, int idx,
}
static const struct ieee80211_ops b43_hw_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = b43_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.conf_tx = b43_op_conf_tx,
diff --git a/drivers/net/wireless/broadcom/b43/phy_ht.c b/drivers/net/wireless/broadcom/b43/phy_ht.c
index d050971d150a..26a226126bc4 100644
--- a/drivers/net/wireless/broadcom/b43/phy_ht.c
+++ b/drivers/net/wireless/broadcom/b43/phy_ht.c
@@ -322,8 +322,8 @@ static void b43_phy_ht_bphy_reset(struct b43_wldev *dev, bool reset)
B43_PHY_B_BBCFG_RSTCCA | B43_PHY_B_BBCFG_RSTRX);
else
b43_phy_mask(dev, B43_PHY_B_BBCFG,
- (u16)~(B43_PHY_B_BBCFG_RSTCCA |
- B43_PHY_B_BBCFG_RSTRX));
+ 0xffff & ~(B43_PHY_B_BBCFG_RSTCCA |
+ B43_PHY_B_BBCFG_RSTRX));
b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp);
}
@@ -551,7 +551,7 @@ static void b43_phy_ht_tx_power_ctl(struct b43_wldev *dev, bool enable)
phy_ht->tx_pwr_idx[i] =
b43_phy_read(dev, status_regs[i]);
}
- b43_phy_mask(dev, B43_PHY_HT_TXPCTL_CMD_C1, ~en_bits);
+ b43_phy_mask(dev, B43_PHY_HT_TXPCTL_CMD_C1, 0xffff & ~en_bits);
} else {
b43_phy_set(dev, B43_PHY_HT_TXPCTL_CMD_C1, en_bits);
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index 2c0c019a815d..4bb005b93f2c 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -6246,7 +6246,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4);
/* Take BPHY out of the reset */
b43_phy_mask(dev, B43_PHY_B_BBCFG,
- (u16)~(B43_PHY_B_BBCFG_RSTCCA | B43_PHY_B_BBCFG_RSTRX));
+ ~(B43_PHY_B_BBCFG_RSTCCA | B43_PHY_B_BBCFG_RSTRX) & 0xffff);
b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16);
}
@@ -6377,7 +6377,7 @@ static int b43_nphy_set_channel(struct b43_wldev *dev,
} else if (channel_type == NL80211_CHAN_HT40MINUS) {
b43_phy_mask(dev, B43_NPHY_RXCTL, ~B43_NPHY_RXCTL_BSELU20);
if (phy->rev >= 7)
- b43_phy_mask(dev, 0x310, (u16)~0x8000);
+ b43_phy_mask(dev, 0x310, 0x7fff);
}
if (phy->rev >= 19) {
diff --git a/drivers/net/wireless/broadcom/b43/pio.c b/drivers/net/wireless/broadcom/b43/pio.c
index 0cf70fdb60a6..e41f2f5b4c26 100644
--- a/drivers/net/wireless/broadcom/b43/pio.c
+++ b/drivers/net/wireless/broadcom/b43/pio.c
@@ -525,7 +525,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
if (total_len > (q->buffer_size - q->buffer_used)) {
/* Not enough memory on the queue. */
err = -EBUSY;
- ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
+ b43_stop_queue(dev, skb_get_queue_mapping(skb));
q->stopped = true;
goto out;
}
@@ -552,7 +552,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
(q->free_packet_slots == 0)) {
/* The queue is full. */
- ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
+ b43_stop_queue(dev, skb_get_queue_mapping(skb));
q->stopped = true;
}
@@ -587,7 +587,7 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
list_add(&pack->list, &q->packets_list);
if (q->stopped) {
- ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
+ b43_wake_queue(dev, q->queue_prio);
q->stopped = false;
}
}
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index 760136638a95..18eb610f600a 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -3531,6 +3531,10 @@ static int b43legacy_op_get_survey(struct ieee80211_hw *hw, int idx,
}
static const struct ieee80211_ops b43legacy_hw_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = b43legacy_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.conf_tx = b43legacy_op_conf_tx,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c
index ac3a36fa3640..f471c962104a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c
@@ -7,21 +7,33 @@
#include <core.h>
#include <bus.h>
#include <fwvid.h>
+#include <feature.h>
#include "vops.h"
-static int brcmf_bca_attach(struct brcmf_pub *drvr)
+#define BRCMF_BCA_E_LAST 212
+
+static void brcmf_bca_feat_attach(struct brcmf_if *ifp)
{
- pr_err("%s: executing\n", __func__);
- return 0;
+ /* SAE support not confirmed so disabling for now */
+ ifp->drvr->feat_flags &= ~BIT(BRCMF_FEAT_SAE);
}
-static void brcmf_bca_detach(struct brcmf_pub *drvr)
+static int brcmf_bca_alloc_fweh_info(struct brcmf_pub *drvr)
{
- pr_err("%s: executing\n", __func__);
+ struct brcmf_fweh_info *fweh;
+
+ fweh = kzalloc(struct_size(fweh, evt_handler, BRCMF_BCA_E_LAST),
+ GFP_KERNEL);
+ if (!fweh)
+ return -ENOMEM;
+
+ fweh->num_event_codes = BRCMF_BCA_E_LAST;
+ drvr->fweh = fweh;
+ return 0;
}
const struct brcmf_fwvid_ops brcmf_bca_ops = {
- .attach = brcmf_bca_attach,
- .detach = brcmf_bca_detach,
+ .feat_attach = brcmf_bca_feat_attach,
+ .alloc_fweh_info = brcmf_bca_alloc_fweh_info,
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 28d6a30cc010..b99aa66dc5a9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -32,6 +32,7 @@
#include "vendor.h"
#include "bus.h"
#include "common.h"
+#include "fwvid.h"
#define BRCMF_SCAN_IE_LEN_MAX 2048
@@ -1179,8 +1180,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
scan_request = cfg->scan_request;
cfg->scan_request = NULL;
- if (timer_pending(&cfg->escan_timeout))
- del_timer_sync(&cfg->escan_timeout);
+ timer_delete_sync(&cfg->escan_timeout);
if (fw_abort) {
/* Do a scan abort to stop the driver's scan engine */
@@ -1687,52 +1687,39 @@ static u16 brcmf_map_fw_linkdown_reason(const struct brcmf_event_msg *e)
return reason;
}
-static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len)
+int brcmf_set_wsec(struct brcmf_if *ifp, const u8 *key, u16 key_len, u16 flags)
{
struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_wsec_pmk_le pmk;
int err;
+ if (key_len > sizeof(pmk.key)) {
+ bphy_err(drvr, "key must be less than %zu bytes\n",
+ sizeof(pmk.key));
+ return -EINVAL;
+ }
+
memset(&pmk, 0, sizeof(pmk));
- /* pass pmk directly */
- pmk.key_len = cpu_to_le16(pmk_len);
- pmk.flags = cpu_to_le16(0);
- memcpy(pmk.key, pmk_data, pmk_len);
+ /* pass key material directly */
+ pmk.key_len = cpu_to_le16(key_len);
+ pmk.flags = cpu_to_le16(flags);
+ memcpy(pmk.key, key, key_len);
- /* store psk in firmware */
+ /* store key material in firmware */
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_WSEC_PMK,
&pmk, sizeof(pmk));
if (err < 0)
bphy_err(drvr, "failed to change PSK in firmware (len=%u)\n",
- pmk_len);
+ key_len);
return err;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_set_wsec);
-static int brcmf_set_sae_password(struct brcmf_if *ifp, const u8 *pwd_data,
- u16 pwd_len)
+static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len)
{
- struct brcmf_pub *drvr = ifp->drvr;
- struct brcmf_wsec_sae_pwd_le sae_pwd;
- int err;
-
- if (pwd_len > BRCMF_WSEC_MAX_SAE_PASSWORD_LEN) {
- bphy_err(drvr, "sae_password must be less than %d\n",
- BRCMF_WSEC_MAX_SAE_PASSWORD_LEN);
- return -EINVAL;
- }
-
- sae_pwd.key_len = cpu_to_le16(pwd_len);
- memcpy(sae_pwd.key, pwd_data, pwd_len);
-
- err = brcmf_fil_iovar_data_set(ifp, "sae_password", &sae_pwd,
- sizeof(sae_pwd));
- if (err < 0)
- bphy_err(drvr, "failed to set SAE password in firmware (len=%u)\n",
- pwd_len);
-
- return err;
+ return brcmf_set_wsec(ifp, pmk_data, pmk_len, 0);
}
static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason,
@@ -2503,8 +2490,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
bphy_err(drvr, "failed to clean up user-space RSNE\n");
goto done;
}
- err = brcmf_set_sae_password(ifp, sme->crypto.sae_pwd,
- sme->crypto.sae_pwd_len);
+ err = brcmf_fwvid_set_sae_password(ifp, &sme->crypto);
if (!err && sme->crypto.psk)
err = brcmf_set_pmk(ifp, sme->crypto.psk,
BRCMF_WSEC_MAX_PSK_LEN);
@@ -3081,7 +3067,7 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp,
struct brcmf_scb_val_le scbval;
struct brcmf_pktcnt_le pktcnt;
s32 err;
- u32 rate;
+ u32 rate = 0;
u32 rssi;
/* Get the current tx rate */
@@ -4322,6 +4308,9 @@ brcmf_pmksa_v3_op(struct brcmf_if *ifp, struct cfg80211_pmksa *pmksa,
int ret;
pmk_op = kzalloc(sizeof(*pmk_op), GFP_KERNEL);
+ if (!pmk_op)
+ return -ENOMEM;
+
pmk_op->version = cpu_to_le16(BRCMF_PMKSA_VER_3);
if (!pmksa) {
@@ -5115,6 +5104,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
bool mbss;
int is_11d;
bool supports_11d;
+ bool closednet;
brcmf_dbg(TRACE, "ctrlchn=%d, center=%d, bw=%d, beacon_interval=%d, dtim_period=%d,\n",
settings->chandef.chan->hw_value,
@@ -5254,8 +5244,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
if (crypto->sae_pwd) {
brcmf_dbg(INFO, "using SAE offload\n");
profile->use_fwauth |= BIT(BRCMF_PROFILE_FWAUTH_SAE);
- err = brcmf_set_sae_password(ifp, crypto->sae_pwd,
- crypto->sae_pwd_len);
+ err = brcmf_fwvid_set_sae_password(ifp, crypto);
if (err < 0)
goto exit;
}
@@ -5285,12 +5274,12 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
goto exit;
}
- err = brcmf_fil_iovar_int_set(ifp, "closednet",
- settings->hidden_ssid);
+ closednet =
+ (settings->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE);
+ err = brcmf_fil_iovar_int_set(ifp, "closednet", closednet);
if (err) {
bphy_err(drvr, "%s closednet error (%d)\n",
- settings->hidden_ssid ?
- "enabled" : "disabled",
+ (closednet ? "enabled" : "disabled"),
err);
goto exit;
}
@@ -5362,10 +5351,12 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev,
msleep(400);
if (profile->use_fwauth != BIT(BRCMF_PROFILE_FWAUTH_NONE)) {
+ struct cfg80211_crypto_settings crypto = {};
+
if (profile->use_fwauth & BIT(BRCMF_PROFILE_FWAUTH_PSK))
brcmf_set_pmk(ifp, NULL, 0);
if (profile->use_fwauth & BIT(BRCMF_PROFILE_FWAUTH_SAE))
- brcmf_set_sae_password(ifp, NULL, 0);
+ brcmf_fwvid_set_sae_password(ifp, &crypto);
profile->use_fwauth = BIT(BRCMF_PROFILE_FWAUTH_NONE);
}
@@ -7271,7 +7262,7 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg)
u32 nmode = 0;
u32 vhtmode = 0;
u32 bw_cap[2] = { WLC_BW_20MHZ_BIT, WLC_BW_20MHZ_BIT };
- u32 rxchain;
+ u32 rxchain = 0;
u32 nchain;
int err;
s32 i;
@@ -8437,6 +8428,7 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
brcmf_btcoex_detach(cfg);
wiphy_unregister(cfg->wiphy);
wl_deinit_priv(cfg);
+ cancel_work_sync(&cfg->escan_timeout_work);
brcmf_free_wiphy(cfg->wiphy);
kfree(cfg);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 0e1fa3f0dea2..dc3a6a537507 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -468,4 +468,6 @@ void brcmf_set_mpc(struct brcmf_if *ndev, int mpc);
void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg);
void brcmf_cfg80211_free_netdev(struct net_device *ndev);
+int brcmf_set_wsec(struct brcmf_if *ifp, const u8 *key, u16 key_len, u16 flags);
+
#endif /* BRCMFMAC_CFG80211_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index b6d458e022fa..b24faae35873 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -266,7 +266,7 @@ static int brcmf_c_process_cal_blob(struct brcmf_if *ifp)
int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
{
struct brcmf_pub *drvr = ifp->drvr;
- s8 eventmask[BRCMF_EVENTING_MASK_LEN];
+ struct brcmf_fweh_info *fweh = drvr->fweh;
u8 buf[BRCMF_DCMD_SMLEN];
struct brcmf_bus *bus;
struct brcmf_rev_info_le revinfo;
@@ -413,15 +413,21 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
brcmf_c_set_joinpref_default(ifp);
/* Setup event_msgs, enable E_IF */
- err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
- BRCMF_EVENTING_MASK_LEN);
+ err = brcmf_fil_iovar_data_get(ifp, "event_msgs", fweh->event_mask,
+ fweh->event_mask_len);
if (err) {
bphy_err(drvr, "Get event_msgs error (%d)\n", err);
goto done;
}
- setbit(eventmask, BRCMF_E_IF);
- err = brcmf_fil_iovar_data_set(ifp, "event_msgs", eventmask,
- BRCMF_EVENTING_MASK_LEN);
+ /*
+ * BRCMF_E_IF can safely be used to set the appropriate bit
+ * in the event_mask as the firmware event code is guaranteed
+ * to match the value of BRCMF_E_IF because it is old cruft
+ * that all vendors have.
+ */
+ setbit(fweh->event_mask, BRCMF_E_IF);
+ err = brcmf_fil_iovar_data_set(ifp, "event_msgs", fweh->event_mask,
+ fweh->event_mask_len);
if (err) {
bphy_err(drvr, "Set event_msgs error (%d)\n", err);
goto done;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index f599d5f896e8..bf91b1e1368f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -691,7 +691,7 @@ static int brcmf_net_mon_open(struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
- u32 monitor;
+ u32 monitor = 0;
int err;
brcmf_dbg(TRACE, "Enter\n");
@@ -1348,13 +1348,17 @@ int brcmf_attach(struct device *dev)
goto fail;
}
+ /* attach firmware event handler */
+ ret = brcmf_fweh_attach(drvr);
+ if (ret != 0) {
+ bphy_err(drvr, "brcmf_fweh_attach failed\n");
+ goto fail;
+ }
+
/* Attach to events important for core code */
brcmf_fweh_register(drvr, BRCMF_E_PSM_WATCHDOG,
brcmf_psm_watchdog_notify);
- /* attach firmware event handler */
- brcmf_fweh_attach(drvr);
-
ret = brcmf_bus_started(drvr, drvr->ops);
if (ret != 0) {
bphy_err(drvr, "dongle is not responding: err=%d\n", ret);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index e4f911dd414b..ea76b8d33401 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -122,7 +122,7 @@ struct brcmf_pub {
struct mutex proto_block;
unsigned char proto_buf[BRCMF_DCMD_MAXLEN];
- struct brcmf_fweh_info fweh;
+ struct brcmf_fweh_info *fweh;
struct brcmf_ampdu_rx_reorder
*reorder_flows[BRCMF_AMPDU_RX_REORDER_MAXFLOWS];
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
index b75652ba9359..9a4837881486 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
@@ -7,21 +7,53 @@
#include <core.h>
#include <bus.h>
#include <fwvid.h>
+#include <fwil.h>
#include "vops.h"
-static int brcmf_cyw_attach(struct brcmf_pub *drvr)
+#define BRCMF_CYW_E_LAST 197
+
+static int brcmf_cyw_set_sae_pwd(struct brcmf_if *ifp,
+ struct cfg80211_crypto_settings *crypto)
{
- pr_err("%s: executing\n", __func__);
- return 0;
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_wsec_sae_pwd_le sae_pwd;
+ u16 pwd_len = crypto->sae_pwd_len;
+ int err;
+
+ if (pwd_len > BRCMF_WSEC_MAX_SAE_PASSWORD_LEN) {
+ bphy_err(drvr, "sae_password must be less than %d\n",
+ BRCMF_WSEC_MAX_SAE_PASSWORD_LEN);
+ return -EINVAL;
+ }
+
+ sae_pwd.key_len = cpu_to_le16(pwd_len);
+ memcpy(sae_pwd.key, crypto->sae_pwd, pwd_len);
+
+ err = brcmf_fil_iovar_data_set(ifp, "sae_password", &sae_pwd,
+ sizeof(sae_pwd));
+ if (err < 0)
+ bphy_err(drvr, "failed to set SAE password in firmware (len=%u)\n",
+ pwd_len);
+
+ return err;
}
-static void brcmf_cyw_detach(struct brcmf_pub *drvr)
+static int brcmf_cyw_alloc_fweh_info(struct brcmf_pub *drvr)
{
- pr_err("%s: executing\n", __func__);
+ struct brcmf_fweh_info *fweh;
+
+ fweh = kzalloc(struct_size(fweh, evt_handler, BRCMF_CYW_E_LAST),
+ GFP_KERNEL);
+ if (!fweh)
+ return -ENOMEM;
+
+ fweh->num_event_codes = BRCMF_CYW_E_LAST;
+ drvr->fweh = fweh;
+ return 0;
}
const struct brcmf_fwvid_ops brcmf_cyw_ops = {
- .attach = brcmf_cyw_attach,
- .detach = brcmf_cyw_detach,
+ .set_sae_password = brcmf_cyw_set_sae_pwd,
+ .alloc_fweh_info = brcmf_cyw_alloc_fweh_info,
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
index 86ff174936a9..c3a602197662 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
@@ -83,6 +83,15 @@ static const struct dmi_system_id dmi_platform_data[] = {
.driver_data = (void *)&acepc_t8_data,
},
{
+ /* ACEPC W5 Pro Cherry Trail Z8350 HDMI stick, same wifi as the T8 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "3"),
+ DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
+ },
+ .driver_data = (void *)&acepc_t8_data,
+ },
+ {
/* Chuwi Hi8 Pro with D2D3_Hi8Pro.233 BIOS */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 6d10c9efbe93..f23310a77a5d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -13,6 +13,7 @@
#include "debug.h"
#include "fwil.h"
#include "fwil_types.h"
+#include "fwvid.h"
#include "feature.h"
#include "common.h"
@@ -183,7 +184,7 @@ static void brcmf_feat_wlc_version_overrides(struct brcmf_pub *drv)
static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp,
enum brcmf_feat_id id, char *name)
{
- u32 data;
+ u32 data = 0;
int err;
/* we need to know firmware error */
@@ -339,6 +340,11 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_SCAN_V2, "scan_ver");
+ brcmf_feat_wlc_version_overrides(drvr);
+ brcmf_feat_firmware_overrides(drvr);
+
+ brcmf_fwvid_feat_attach(ifp);
+
if (drvr->settings->feature_disable) {
brcmf_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n",
ifp->drvr->feat_flags,
@@ -346,9 +352,6 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
ifp->drvr->feat_flags &= ~drvr->settings->feature_disable;
}
- brcmf_feat_wlc_version_overrides(drvr);
- brcmf_feat_firmware_overrides(drvr);
-
/* set chip related quirks */
switch (drvr->bus_if->chip) {
case BRCM_CC_43236_CHIP_ID:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index 68960ae98987..f0b6a7607f16 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -14,7 +14,8 @@
#include "fweh.h"
#include "fwil.h"
#include "proto.h"
-
+#include "bus.h"
+#include "fwvid.h"
/**
* struct brcmf_fweh_queue_item - event item on event queue.
*
@@ -28,7 +29,7 @@
*/
struct brcmf_fweh_queue_item {
struct list_head q;
- enum brcmf_fweh_event_code code;
+ u32 code;
u8 ifidx;
u8 ifaddr[ETH_ALEN];
struct brcmf_event_msg_be emsg;
@@ -94,7 +95,7 @@ static void brcmf_fweh_queue_event(struct brcmf_fweh_info *fweh,
static int brcmf_fweh_call_event_handler(struct brcmf_pub *drvr,
struct brcmf_if *ifp,
- enum brcmf_fweh_event_code code,
+ u32 fwcode,
struct brcmf_event_msg *emsg,
void *data)
{
@@ -102,13 +103,13 @@ static int brcmf_fweh_call_event_handler(struct brcmf_pub *drvr,
int err = -EINVAL;
if (ifp) {
- fweh = &ifp->drvr->fweh;
+ fweh = ifp->drvr->fweh;
/* handle the event if valid interface and handler */
- if (fweh->evt_handler[code])
- err = fweh->evt_handler[code](ifp, emsg, data);
+ if (fweh->evt_handler[fwcode])
+ err = fweh->evt_handler[fwcode](ifp, emsg, data);
else
- bphy_err(drvr, "unhandled event %d ignored\n", code);
+ bphy_err(drvr, "unhandled fwevt %d ignored\n", fwcode);
} else {
bphy_err(drvr, "no interface object\n");
}
@@ -142,7 +143,7 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
is_p2pdev = ((ifevent->flags & BRCMF_E_IF_FLAG_NOIF) &&
(ifevent->role == BRCMF_E_IF_ROLE_P2P_CLIENT ||
((ifevent->role == BRCMF_E_IF_ROLE_STA) &&
- (drvr->fweh.p2pdev_setup_ongoing))));
+ (drvr->fweh->p2pdev_setup_ongoing))));
if (!is_p2pdev && (ifevent->flags & BRCMF_E_IF_FLAG_NOIF)) {
brcmf_dbg(EVENT, "event can be ignored\n");
return;
@@ -163,7 +164,7 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
return;
if (!is_p2pdev)
brcmf_proto_add_if(drvr, ifp);
- if (!drvr->fweh.evt_handler[BRCMF_E_IF])
+ if (!drvr->fweh->evt_handler[BRCMF_E_IF])
if (brcmf_net_attach(ifp, false) < 0)
return;
}
@@ -183,6 +184,45 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
}
}
+static void brcmf_fweh_map_event_code(struct brcmf_fweh_info *fweh,
+ enum brcmf_fweh_event_code code,
+ u32 *fw_code)
+{
+ int i;
+
+ if (WARN_ON(!fw_code))
+ return;
+
+ *fw_code = code;
+ if (fweh->event_map) {
+ for (i = 0; i < fweh->event_map->n_items; i++) {
+ if (fweh->event_map->items[i].code == code) {
+ *fw_code = fweh->event_map->items[i].fwevt_code;
+ break;
+ }
+ }
+ }
+}
+
+static void brcmf_fweh_map_fwevt_code(struct brcmf_fweh_info *fweh, u32 fw_code,
+ enum brcmf_fweh_event_code *code)
+{
+ int i;
+
+ if (WARN_ON(!code))
+ return;
+
+ *code = fw_code;
+ if (fweh->event_map) {
+ for (i = 0; i < fweh->event_map->n_items; i++) {
+ if (fweh->event_map->items[i].fwevt_code == fw_code) {
+ *code = fweh->event_map->items[i].code;
+ break;
+ }
+ }
+ }
+}
+
/**
* brcmf_fweh_dequeue_event() - get event from the queue.
*
@@ -221,15 +261,19 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
struct brcmf_event_msg emsg;
fweh = container_of(work, struct brcmf_fweh_info, event_work);
- drvr = container_of(fweh, struct brcmf_pub, fweh);
+ drvr = fweh->drvr;
while ((event = brcmf_fweh_dequeue_event(fweh))) {
- brcmf_dbg(EVENT, "event %s (%u) ifidx %u bsscfg %u addr %pM\n",
- brcmf_fweh_event_name(event->code), event->code,
+ enum brcmf_fweh_event_code code;
+
+ brcmf_fweh_map_fwevt_code(fweh, event->code, &code);
+ brcmf_dbg(EVENT, "event %s (%u:%u) ifidx %u bsscfg %u addr %pM\n",
+ brcmf_fweh_event_name(code), code, event->code,
event->emsg.ifidx, event->emsg.bsscfgidx,
event->emsg.addr);
if (event->emsg.bsscfgidx >= BRCMF_MAX_IFS) {
- bphy_err(drvr, "invalid bsscfg index: %u\n", event->emsg.bsscfgidx);
+ bphy_err(drvr, "invalid bsscfg index: %u\n",
+ event->emsg.bsscfgidx);
goto event_free;
}
@@ -237,7 +281,7 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
emsg_be = &event->emsg;
emsg.version = be16_to_cpu(emsg_be->version);
emsg.flags = be16_to_cpu(emsg_be->flags);
- emsg.event_code = event->code;
+ emsg.event_code = code;
emsg.status = be32_to_cpu(emsg_be->status);
emsg.reason = be32_to_cpu(emsg_be->reason);
emsg.auth_type = be32_to_cpu(emsg_be->auth_type);
@@ -283,7 +327,7 @@ event_free:
*/
void brcmf_fweh_p2pdev_setup(struct brcmf_if *ifp, bool ongoing)
{
- ifp->drvr->fweh.p2pdev_setup_ongoing = ongoing;
+ ifp->drvr->fweh->p2pdev_setup_ongoing = ongoing;
}
/**
@@ -291,12 +335,27 @@ void brcmf_fweh_p2pdev_setup(struct brcmf_if *ifp, bool ongoing)
*
* @drvr: driver information object.
*/
-void brcmf_fweh_attach(struct brcmf_pub *drvr)
+int brcmf_fweh_attach(struct brcmf_pub *drvr)
{
- struct brcmf_fweh_info *fweh = &drvr->fweh;
+ struct brcmf_fweh_info *fweh;
+ int err;
+
+ err = brcmf_fwvid_alloc_fweh_info(drvr);
+ if (err < 0)
+ return err;
+
+ fweh = drvr->fweh;
+ fweh->drvr = drvr;
+
+ fweh->event_mask_len = DIV_ROUND_UP(fweh->num_event_codes, 8);
+ fweh->event_mask = kzalloc(fweh->event_mask_len, GFP_KERNEL);
+ if (!fweh->event_mask)
+ return -ENOMEM;
+
INIT_WORK(&fweh->event_work, brcmf_fweh_event_worker);
spin_lock_init(&fweh->evt_q_lock);
INIT_LIST_HEAD(&fweh->event_q);
+ return 0;
}
/**
@@ -306,14 +365,19 @@ void brcmf_fweh_attach(struct brcmf_pub *drvr)
*/
void brcmf_fweh_detach(struct brcmf_pub *drvr)
{
- struct brcmf_fweh_info *fweh = &drvr->fweh;
+ struct brcmf_fweh_info *fweh = drvr->fweh;
+
+ if (!fweh)
+ return;
/* cancel the worker if initialized */
if (fweh->event_work.func) {
cancel_work_sync(&fweh->event_work);
WARN_ON(!list_empty(&fweh->event_q));
- memset(fweh->evt_handler, 0, sizeof(fweh->evt_handler));
}
+ drvr->fweh = NULL;
+ kfree(fweh->event_mask);
+ kfree(fweh);
}
/**
@@ -326,11 +390,17 @@ void brcmf_fweh_detach(struct brcmf_pub *drvr)
int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code,
brcmf_fweh_handler_t handler)
{
- if (drvr->fweh.evt_handler[code]) {
+ struct brcmf_fweh_info *fweh = drvr->fweh;
+ u32 evt_handler_idx;
+
+ brcmf_fweh_map_event_code(fweh, code, &evt_handler_idx);
+
+ if (fweh->evt_handler[evt_handler_idx]) {
bphy_err(drvr, "event code %d already registered\n", code);
return -ENOSPC;
}
- drvr->fweh.evt_handler[code] = handler;
+
+ fweh->evt_handler[evt_handler_idx] = handler;
brcmf_dbg(TRACE, "event handler registered for %s\n",
brcmf_fweh_event_name(code));
return 0;
@@ -345,9 +415,12 @@ int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code,
void brcmf_fweh_unregister(struct brcmf_pub *drvr,
enum brcmf_fweh_event_code code)
{
+ u32 evt_handler_idx;
+
brcmf_dbg(TRACE, "event handler cleared for %s\n",
brcmf_fweh_event_name(code));
- drvr->fweh.evt_handler[code] = NULL;
+ brcmf_fweh_map_event_code(drvr->fweh, code, &evt_handler_idx);
+ drvr->fweh->evt_handler[evt_handler_idx] = NULL;
}
/**
@@ -357,27 +430,28 @@ void brcmf_fweh_unregister(struct brcmf_pub *drvr,
*/
int brcmf_fweh_activate_events(struct brcmf_if *ifp)
{
- struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_fweh_info *fweh = ifp->drvr->fweh;
+ enum brcmf_fweh_event_code code;
int i, err;
- s8 eventmask[BRCMF_EVENTING_MASK_LEN];
- memset(eventmask, 0, sizeof(eventmask));
- for (i = 0; i < BRCMF_E_LAST; i++) {
- if (ifp->drvr->fweh.evt_handler[i]) {
+ memset(fweh->event_mask, 0, fweh->event_mask_len);
+ for (i = 0; i < fweh->num_event_codes; i++) {
+ if (fweh->evt_handler[i]) {
+ brcmf_fweh_map_fwevt_code(fweh, i, &code);
brcmf_dbg(EVENT, "enable event %s\n",
- brcmf_fweh_event_name(i));
- setbit(eventmask, i);
+ brcmf_fweh_event_name(code));
+ setbit(fweh->event_mask, i);
}
}
/* want to handle IF event as well */
brcmf_dbg(EVENT, "enable event IF\n");
- setbit(eventmask, BRCMF_E_IF);
+ setbit(fweh->event_mask, BRCMF_E_IF);
- err = brcmf_fil_iovar_data_set(ifp, "event_msgs",
- eventmask, BRCMF_EVENTING_MASK_LEN);
+ err = brcmf_fil_iovar_data_set(ifp, "event_msgs", fweh->event_mask,
+ fweh->event_mask_len);
if (err)
- bphy_err(drvr, "Set event_msgs error (%d)\n", err);
+ bphy_err(fweh->drvr, "Set event_msgs error (%d)\n", err);
return err;
}
@@ -397,21 +471,21 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
struct brcmf_event *event_packet,
u32 packet_len, gfp_t gfp)
{
- enum brcmf_fweh_event_code code;
- struct brcmf_fweh_info *fweh = &drvr->fweh;
+ u32 fwevt_idx;
+ struct brcmf_fweh_info *fweh = drvr->fweh;
struct brcmf_fweh_queue_item *event;
void *data;
u32 datalen;
/* get event info */
- code = get_unaligned_be32(&event_packet->msg.event_type);
+ fwevt_idx = get_unaligned_be32(&event_packet->msg.event_type);
datalen = get_unaligned_be32(&event_packet->msg.datalen);
data = &event_packet[1];
- if (code >= BRCMF_E_LAST)
+ if (fwevt_idx >= fweh->num_event_codes)
return;
- if (code != BRCMF_E_IF && !fweh->evt_handler[code])
+ if (fwevt_idx != BRCMF_E_IF && !fweh->evt_handler[fwevt_idx])
return;
if (datalen > BRCMF_DCMD_MAXLEN ||
@@ -422,8 +496,8 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
if (!event)
return;
+ event->code = fwevt_idx;
event->datalen = datalen;
- event->code = code;
event->ifidx = event_packet->msg.ifidx;
/* use memcpy to get aligned event message */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
index 48414e8b9389..9ca1b2aadcb5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
@@ -17,6 +17,10 @@ struct brcmf_pub;
struct brcmf_if;
struct brcmf_cfg80211_info;
+#define BRCMF_ABSTRACT_EVENT_BIT BIT(31)
+#define BRCMF_ABSTRACT_ENUM_DEF(_id, _val) \
+ BRCMF_ENUM_DEF(_id, (BRCMF_ABSTRACT_EVENT_BIT | (_val)))
+
/* list of firmware events */
#define BRCMF_FWEH_EVENT_ENUM_DEFLIST \
BRCMF_ENUM_DEF(SET_SSID, 0) \
@@ -98,16 +102,9 @@ struct brcmf_cfg80211_info;
/* firmware event codes sent by the dongle */
enum brcmf_fweh_event_code {
BRCMF_FWEH_EVENT_ENUM_DEFLIST
- /* this determines event mask length which must match
- * minimum length check in device firmware so it is
- * hard-coded here.
- */
- BRCMF_E_LAST = 139
};
#undef BRCMF_ENUM_DEF
-#define BRCMF_EVENTING_MASK_LEN DIV_ROUND_UP(BRCMF_E_LAST, 8)
-
/* flags field values in struct brcmf_event_msg */
#define BRCMF_EVENT_MSG_LINK 0x01
#define BRCMF_EVENT_MSG_FLUSHTXQ 0x02
@@ -288,27 +285,66 @@ typedef int (*brcmf_fweh_handler_t)(struct brcmf_if *ifp,
void *data);
/**
+ * struct brcmf_fweh_event_map_item - fweh event and firmware event pair.
+ *
+ * @code: fweh event code as used by higher layers.
+ * @fwevt_code: firmware event code as used by firmware.
+ *
+ * This mapping is needed when a functionally identical event has a
+ * different numerical definition between vendors. When such mapping
+ * is needed the higher layer event code should not collide with the
+ * firmware event.
+ */
+struct brcmf_fweh_event_map_item {
+ enum brcmf_fweh_event_code code;
+ u32 fwevt_code;
+};
+
+/**
+ * struct brcmf_fweh_event_map - mapping between firmware event and fweh event.
+ *
+ * @n_items: number of mapping items.
+ * @items: array of fweh event and firmware event pairs.
+ */
+struct brcmf_fweh_event_map {
+ u32 n_items;
+ const struct brcmf_fweh_event_map_item items[] __counted_by(n_items);
+};
+
+/**
* struct brcmf_fweh_info - firmware event handling information.
*
* @p2pdev_setup_ongoing: P2P device creation in progress.
* @event_work: event worker.
* @evt_q_lock: lock for event queue protection.
* @event_q: event queue.
- * @evt_handler: registered event handlers.
+ * @event_mask_len: length of @event_mask used to enable firmware events.
+ * @event_mask: byte array used in 'event_msgs' iovar command.
+ * @event_map: mapping between fweh event and firmware event which
+ * may be provided by vendor-specific module for events that need
+ * mapping.
+ * @num_event_codes: number of firmware events supported by firmware which
+ * does a minimum length check for the @event_mask. This value is to
+ * be provided by vendor-specific module determining @event_mask_len
+ * and consequently the allocation size for @event_mask.
+ * @evt_handler: event handler registry indexed by firmware event code.
*/
struct brcmf_fweh_info {
+ struct brcmf_pub *drvr;
bool p2pdev_setup_ongoing;
struct work_struct event_work;
spinlock_t evt_q_lock;
struct list_head event_q;
- int (*evt_handler[BRCMF_E_LAST])(struct brcmf_if *ifp,
- const struct brcmf_event_msg *evtmsg,
- void *data);
+ uint event_mask_len;
+ u8 *event_mask;
+ struct brcmf_fweh_event_map *event_map;
+ uint num_event_codes;
+ brcmf_fweh_handler_t evt_handler[] __counted_by(num_event_codes);
};
const char *brcmf_fweh_event_name(enum brcmf_fweh_event_code code);
-void brcmf_fweh_attach(struct brcmf_pub *drvr);
+int brcmf_fweh_attach(struct brcmf_pub *drvr);
void brcmf_fweh_detach(struct brcmf_pub *drvr);
int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code,
int (*handler)(struct brcmf_if *ifp,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
index 72fe8bce6eaf..6385a7db7f7d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
@@ -142,6 +142,7 @@ brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
return err;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_cmd_data_set);
s32
brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
@@ -160,36 +161,7 @@ brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
return err;
}
-
-
-s32
-brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data)
-{
- s32 err;
- __le32 data_le = cpu_to_le32(data);
-
- mutex_lock(&ifp->drvr->proto_block);
- brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, data);
- err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), true);
- mutex_unlock(&ifp->drvr->proto_block);
-
- return err;
-}
-
-s32
-brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
-{
- s32 err;
- __le32 data_le = cpu_to_le32(*data);
-
- mutex_lock(&ifp->drvr->proto_block);
- err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), false);
- mutex_unlock(&ifp->drvr->proto_block);
- *data = le32_to_cpu(data_le);
- brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, *data);
-
- return err;
-}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_cmd_data_get);
static u32
brcmf_create_iovar(const char *name, const char *data, u32 datalen,
@@ -239,6 +211,7 @@ brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, const void *dat
mutex_unlock(&drvr->proto_block);
return err;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_iovar_data_set);
s32
brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data,
@@ -270,26 +243,7 @@ brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data,
mutex_unlock(&drvr->proto_block);
return err;
}
-
-s32
-brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data)
-{
- __le32 data_le = cpu_to_le32(data);
-
- return brcmf_fil_iovar_data_set(ifp, name, &data_le, sizeof(data_le));
-}
-
-s32
-brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
-{
- __le32 data_le = cpu_to_le32(*data);
- s32 err;
-
- err = brcmf_fil_iovar_data_get(ifp, name, &data_le, sizeof(data_le));
- if (err == 0)
- *data = le32_to_cpu(data_le);
- return err;
-}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_iovar_data_get);
static u32
brcmf_create_bsscfg(s32 bsscfgidx, const char *name, char *data, u32 datalen,
@@ -364,6 +318,7 @@ brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name,
mutex_unlock(&drvr->proto_block);
return err;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_bsscfg_data_set);
s32
brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name,
@@ -394,28 +349,7 @@ brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name,
mutex_unlock(&drvr->proto_block);
return err;
}
-
-s32
-brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data)
-{
- __le32 data_le = cpu_to_le32(data);
-
- return brcmf_fil_bsscfg_data_set(ifp, name, &data_le,
- sizeof(data_le));
-}
-
-s32
-brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
-{
- __le32 data_le = cpu_to_le32(*data);
- s32 err;
-
- err = brcmf_fil_bsscfg_data_get(ifp, name, &data_le,
- sizeof(data_le));
- if (err == 0)
- *data = le32_to_cpu(data_le);
- return err;
-}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_bsscfg_data_get);
static u32 brcmf_create_xtlv(const char *name, u16 id, char *data, u32 len,
char *buf, u32 buflen)
@@ -465,6 +399,7 @@ s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id,
mutex_unlock(&drvr->proto_block);
return err;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_xtlv_data_set);
s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len)
@@ -494,39 +429,4 @@ s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id,
mutex_unlock(&drvr->proto_block);
return err;
}
-
-s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id, u32 data)
-{
- __le32 data_le = cpu_to_le32(data);
-
- return brcmf_fil_xtlv_data_set(ifp, name, id, &data_le,
- sizeof(data_le));
-}
-
-s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id, u32 *data)
-{
- __le32 data_le = cpu_to_le32(*data);
- s32 err;
-
- err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le));
- if (err == 0)
- *data = le32_to_cpu(data_le);
- return err;
-}
-
-s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id, u8 *data)
-{
- return brcmf_fil_xtlv_data_get(ifp, name, id, data, sizeof(*data));
-}
-
-s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id, u16 *data)
-{
- __le16 data_le = cpu_to_le16(*data);
- s32 err;
-
- err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le));
- if (err == 0)
- *data = le16_to_cpu(data_le);
- return err;
-}
-
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_xtlv_data_get);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
index bc693157c4b1..a315a7fac6a0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
@@ -81,29 +81,122 @@
s32 brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
-s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
-s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data);
+static inline
+s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data)
+{
+ s32 err;
+ __le32 data_le = cpu_to_le32(data);
-s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, const void *data,
- u32 len);
+ brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, data);
+ err = brcmf_fil_cmd_data_set(ifp, cmd, &data_le, sizeof(data_le));
+
+ return err;
+}
+static inline
+s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
+{
+ s32 err;
+ __le32 data_le = cpu_to_le32(*data);
+
+ err = brcmf_fil_cmd_data_get(ifp, cmd, &data_le, sizeof(data_le));
+ if (err == 0)
+ *data = le32_to_cpu(data_le);
+ brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, *data);
+
+ return err;
+}
+
+s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name,
+ const void *data, u32 len);
s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data,
u32 len);
-s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data);
-s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data);
-
-s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name, void *data,
- u32 len);
-s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name, void *data,
- u32 len);
-s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data);
-s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data);
+static inline
+s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data)
+{
+ __le32 data_le = cpu_to_le32(data);
+
+ return brcmf_fil_iovar_data_set(ifp, name, &data_le, sizeof(data_le));
+}
+static inline
+s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
+{
+ __le32 data_le = cpu_to_le32(*data);
+ s32 err;
+
+ err = brcmf_fil_iovar_data_get(ifp, name, &data_le, sizeof(data_le));
+ if (err == 0)
+ *data = le32_to_cpu(data_le);
+ return err;
+}
+
+
+s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name,
+ void *data, u32 len);
+s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name,
+ void *data, u32 len);
+static inline
+s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data)
+{
+ __le32 data_le = cpu_to_le32(data);
+
+ return brcmf_fil_bsscfg_data_set(ifp, name, &data_le,
+ sizeof(data_le));
+}
+static inline
+s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
+{
+ __le32 data_le = cpu_to_le32(*data);
+ s32 err;
+
+ err = brcmf_fil_bsscfg_data_get(ifp, name, &data_le,
+ sizeof(data_le));
+ if (err == 0)
+ *data = le32_to_cpu(data_le);
+ return err;
+}
+
s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len);
s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len);
-s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id, u32 data);
-s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id, u32 *data);
-s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id, u8 *data);
-s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id, u16 *data);
+static inline
+s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id,
+ u32 data)
+{
+ __le32 data_le = cpu_to_le32(data);
+
+ return brcmf_fil_xtlv_data_set(ifp, name, id, &data_le,
+ sizeof(data_le));
+}
+static inline
+s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id,
+ u32 *data)
+{
+ __le32 data_le = cpu_to_le32(*data);
+ s32 err;
+
+ err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le));
+ if (err == 0)
+ *data = le32_to_cpu(data_le);
+ return err;
+}
+static inline
+s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id,
+ u8 *data)
+{
+ return brcmf_fil_xtlv_data_get(ifp, name, id, data, sizeof(*data));
+}
+static inline
+s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id,
+ u16 *data)
+{
+ __le16 data_le = cpu_to_le16(*data);
+ s32 err;
+
+ err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le));
+ if (err == 0)
+ *data = le16_to_cpu(data_le);
+ return err;
+}
#endif /* _fwil_h_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index 9d248ba1c0b2..e74a23e11830 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -584,7 +584,7 @@ struct brcmf_wsec_key_le {
struct brcmf_wsec_pmk_le {
__le16 key_len;
__le16 flags;
- u8 key[2 * BRCMF_WSEC_MAX_PSK_LEN + 1];
+ u8 key[BRCMF_WSEC_MAX_SAE_PASSWORD_LEN];
};
/**
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c
index 86eafdb40541..41eafcda77f7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c
@@ -90,7 +90,7 @@ int brcmf_fwvid_register_vendor(enum brcmf_fwvendor fwvid, struct module *vmod,
return -ERANGE;
if (WARN_ON(!vmod) || WARN_ON(!vops) ||
- WARN_ON(!vops->attach) || WARN_ON(!vops->detach))
+ WARN_ON(!vops->alloc_fweh_info))
return -EINVAL;
if (WARN_ON(fwvid_list[fwvid].vmod))
@@ -150,7 +150,7 @@ static inline int brcmf_fwvid_request_module(enum brcmf_fwvendor fwvid)
}
#endif
-int brcmf_fwvid_attach_ops(struct brcmf_pub *drvr)
+int brcmf_fwvid_attach(struct brcmf_pub *drvr)
{
enum brcmf_fwvendor fwvid = drvr->bus_if->fwvid;
int ret;
@@ -175,7 +175,7 @@ int brcmf_fwvid_attach_ops(struct brcmf_pub *drvr)
return ret;
}
-void brcmf_fwvid_detach_ops(struct brcmf_pub *drvr)
+void brcmf_fwvid_detach(struct brcmf_pub *drvr)
{
enum brcmf_fwvendor fwvid = drvr->bus_if->fwvid;
@@ -187,9 +187,10 @@ void brcmf_fwvid_detach_ops(struct brcmf_pub *drvr)
mutex_lock(&fwvid_list_lock);
- drvr->vops = NULL;
- list_del(&drvr->bus_if->list);
-
+ if (drvr->vops) {
+ drvr->vops = NULL;
+ list_del(&drvr->bus_if->list);
+ }
mutex_unlock(&fwvid_list_lock);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h
index 43df58bb70ad..e6ac9fc341bc 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h
@@ -6,12 +6,15 @@
#define FWVID_H_
#include "firmware.h"
+#include "cfg80211.h"
struct brcmf_pub;
+struct brcmf_if;
struct brcmf_fwvid_ops {
- int (*attach)(struct brcmf_pub *drvr);
- void (*detach)(struct brcmf_pub *drvr);
+ void (*feat_attach)(struct brcmf_if *ifp);
+ int (*set_sae_password)(struct brcmf_if *ifp, struct cfg80211_crypto_settings *crypto);
+ int (*alloc_fweh_info)(struct brcmf_pub *drvr);
};
/* exported functions */
@@ -20,28 +23,37 @@ int brcmf_fwvid_register_vendor(enum brcmf_fwvendor fwvid, struct module *mod,
int brcmf_fwvid_unregister_vendor(enum brcmf_fwvendor fwvid, struct module *mod);
/* core driver functions */
-int brcmf_fwvid_attach_ops(struct brcmf_pub *drvr);
-void brcmf_fwvid_detach_ops(struct brcmf_pub *drvr);
+int brcmf_fwvid_attach(struct brcmf_pub *drvr);
+void brcmf_fwvid_detach(struct brcmf_pub *drvr);
const char *brcmf_fwvid_vendor_name(struct brcmf_pub *drvr);
-static inline int brcmf_fwvid_attach(struct brcmf_pub *drvr)
+static inline void brcmf_fwvid_feat_attach(struct brcmf_if *ifp)
{
- int ret;
+ const struct brcmf_fwvid_ops *vops = ifp->drvr->vops;
- ret = brcmf_fwvid_attach_ops(drvr);
- if (ret)
- return ret;
+ if (!vops->feat_attach)
+ return;
- return drvr->vops->attach(drvr);
+ vops->feat_attach(ifp);
}
-static inline void brcmf_fwvid_detach(struct brcmf_pub *drvr)
+static inline int brcmf_fwvid_set_sae_password(struct brcmf_if *ifp,
+ struct cfg80211_crypto_settings *crypto)
+{
+ const struct brcmf_fwvid_ops *vops = ifp->drvr->vops;
+
+ if (!vops || !vops->set_sae_password)
+ return -EOPNOTSUPP;
+
+ return vops->set_sae_password(ifp, crypto);
+}
+
+static inline int brcmf_fwvid_alloc_fweh_info(struct brcmf_pub *drvr)
{
if (!drvr->vops)
- return;
+ return -EIO;
- drvr->vops->detach(drvr);
- brcmf_fwvid_detach_ops(drvr);
+ return drvr->vops->alloc_fweh_info(drvr);
}
#endif /* FWVID_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c
index 5573a47766ad..05d7c2a4fba5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c
@@ -7,21 +7,34 @@
#include <core.h>
#include <bus.h>
#include <fwvid.h>
+#include <cfg80211.h>
#include "vops.h"
-static int brcmf_wcc_attach(struct brcmf_pub *drvr)
+#define BRCMF_WCC_E_LAST 213
+
+static int brcmf_wcc_set_sae_pwd(struct brcmf_if *ifp,
+ struct cfg80211_crypto_settings *crypto)
{
- pr_debug("%s: executing\n", __func__);
- return 0;
+ return brcmf_set_wsec(ifp, crypto->sae_pwd, crypto->sae_pwd_len,
+ BRCMF_WSEC_PASSPHRASE);
}
-static void brcmf_wcc_detach(struct brcmf_pub *drvr)
+static int brcmf_wcc_alloc_fweh_info(struct brcmf_pub *drvr)
{
- pr_debug("%s: executing\n", __func__);
+ struct brcmf_fweh_info *fweh;
+
+ fweh = kzalloc(struct_size(fweh, evt_handler, BRCMF_WCC_E_LAST),
+ GFP_KERNEL);
+ if (!fweh)
+ return -ENOMEM;
+
+ fweh->num_event_codes = BRCMF_WCC_E_LAST;
+ drvr->fweh = fweh;
+ return 0;
}
const struct brcmf_fwvid_ops brcmf_wcc_ops = {
- .attach = brcmf_wcc_attach,
- .detach = brcmf_wcc_detach,
+ .set_sae_password = brcmf_wcc_set_sae_pwd,
+ .alloc_fweh_info = brcmf_wcc_alloc_fweh_info,
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c
index 89c8829528c2..9540a05247c2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <net/mac80211.h>
#include <linux/bcma/bcma_driver_chipcommon.h>
-#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include <linux/gpio/machine.h>
#include <linux/gpio/consumer.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 543e93ec49d2..92860dc0a92e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -959,6 +959,10 @@ static int brcms_ops_beacon_set_tim(struct ieee80211_hw *hw,
}
static const struct ieee80211_ops brcms_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = brcms_ops_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = brcms_ops_start,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
index ccc621b8ed9f..a27d6f0b8819 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -383,8 +383,9 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp)
return sh;
}
-static void wlc_phy_timercb_phycal(struct brcms_phy *pi)
+static void wlc_phy_timercb_phycal(void *ptr)
{
+ struct brcms_phy *pi = ptr;
uint delay = 5;
if (PHY_PERICAL_MPHASE_PENDING(pi)) {
@@ -551,8 +552,7 @@ wlc_phy_attach(struct shared_phy *sh, struct bcma_device *d11core,
if (!pi->phycal_timer)
goto err;
- if (!wlc_phy_attach_nphy(pi))
- goto err;
+ wlc_phy_attach_nphy(pi);
} else if (ISLCNPHY(pi)) {
if (!wlc_phy_attach_lcnphy(pi))
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h
index 8668fa5558a2..70a9ec050717 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h
@@ -941,7 +941,7 @@ void wlc_phy_papd_decode_epsilon(u32 epsilon, s32 *eps_real, s32 *eps_imag);
void wlc_phy_cal_perical_mphase_reset(struct brcms_phy *pi);
void wlc_phy_cal_perical_mphase_restart(struct brcms_phy *pi);
-bool wlc_phy_attach_nphy(struct brcms_phy *pi);
+void wlc_phy_attach_nphy(struct brcms_phy *pi);
bool wlc_phy_attach_lcnphy(struct brcms_phy *pi);
void wlc_phy_detach_lcnphy(struct brcms_phy *pi);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
index 7717eb85a1db..aae2cf95fe95 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -3299,7 +3299,7 @@ wlc_lcnphy_run_samples(struct brcms_phy *pi,
if (iqcalmode) {
- and_phy_reg(pi, 0x453, (u16) ~(0x1 << 15));
+ and_phy_reg(pi, 0x453, 0xffff & ~(0x1 << 15));
or_phy_reg(pi, 0x453, (0x1 << 15));
} else {
write_phy_reg(pi, 0x63f, 1);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index 8580a2754789..d69879e1bd87 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -14546,7 +14546,7 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
wlc_phy_txpwr_apply_nphy(pi);
}
-static bool wlc_phy_txpwr_srom_read_nphy(struct brcms_phy *pi)
+static void wlc_phy_txpwr_srom_read_nphy(struct brcms_phy *pi)
{
struct ssb_sprom *sprom = &pi->d11core->bus->sprom;
@@ -14595,11 +14595,9 @@ static bool wlc_phy_txpwr_srom_read_nphy(struct brcms_phy *pi)
pi->phycal_tempdelta = 0;
wlc_phy_txpwr_srom_read_ppr_nphy(pi);
-
- return true;
}
-bool wlc_phy_attach_nphy(struct brcms_phy *pi)
+void wlc_phy_attach_nphy(struct brcms_phy *pi)
{
uint i;
@@ -14645,10 +14643,7 @@ bool wlc_phy_attach_nphy(struct brcms_phy *pi)
pi->pi_fptr.chanset = wlc_phy_chanspec_set_nphy;
pi->pi_fptr.txpwrrecalc = wlc_phy_txpower_recalc_target_nphy;
- if (!wlc_phy_txpwr_srom_read_nphy(pi))
- return false;
-
- return true;
+ wlc_phy_txpwr_srom_read_nphy(pi);
}
static s32 get_rf_pwr_offset(struct brcms_phy *pi, s16 pga_gn, s16 pad_gn)
@@ -17587,7 +17582,7 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi)
or_phy_reg(pi, 0x122, (0x1 << 0));
if (NREV_GE(pi->pubpi.phy_rev, 3))
- and_phy_reg(pi, 0x1e7, (u16) (~(0x1 << 15)));
+ and_phy_reg(pi, 0x1e7, 0x7fff);
else
or_phy_reg(pi, 0x1e7, (0x1 << 15));
@@ -18086,7 +18081,7 @@ wlc_phy_rfctrlintc_override_nphy(struct brcms_phy *pi, u8 field, u16 value,
(0x1 << 10));
and_phy_reg(pi, 0x2ff, (u16)
- ~(0x3 << 14));
+ 0xffff & ~(0x3 << 14));
or_phy_reg(pi, 0x2ff, (0x1 << 13));
or_phy_reg(pi, 0x2ff, (0x1 << 0));
} else {
@@ -21053,7 +21048,7 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
(val | MAC_PHY_FORCE_CLK));
and_phy_reg(pi, (NPHY_TO_BPHY_OFF + BPHY_BB_CONFIG),
- (u16) (~(BBCFG_RESETCCA | BBCFG_RESETRX)));
+ 0xffff & ~(BBCFG_RESETCCA | BBCFG_RESETRX));
bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), val);
}
@@ -21287,7 +21282,8 @@ void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init)
bcma_set16(pi->d11core, D11REGOFFS(psm_gpio_oe), mask);
- bcma_mask16(pi->d11core, D11REGOFFS(psm_gpio_out), ~mask);
+ bcma_mask16(pi->d11core, D11REGOFFS(psm_gpio_out),
+ 0xffff & ~mask);
if (lut_init) {
write_phy_reg(pi, 0xf8, 0x02d8);
@@ -23197,7 +23193,7 @@ void wlc_phy_stopplayback_nphy(struct brcms_phy *pi)
or_phy_reg(pi, 0xc3, NPHY_sampleCmd_STOP);
else if (playback_status & 0x2)
and_phy_reg(pi, 0xc2,
- (u16) ~NPHY_iqloCalCmdGctl_IQLO_CAL_EN);
+ 0xffff & ~NPHY_iqloCalCmdGctl_IQLO_CAL_EN);
and_phy_reg(pi, 0xc3, (u16) ~(0x1 << 2));
@@ -28202,8 +28198,9 @@ void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type)
if (NREV_GE(pi->pubpi.phy_rev, 3))
and_phy_reg(pi, 0x1e7,
- (u16) (~((0x1 << 15) |
- (0x1 << 14) | (0x1 << 13))));
+ 0xffff & ~((0x1 << 15) |
+ (0x1 << 14) |
+ (0x1 << 13)));
else
and_phy_reg(pi, 0x1e7,
(u16) (~((0x1 << 14) | (0x1 << 13))));
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
index a0de5db0cd64..b72381791536 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
@@ -57,12 +57,11 @@ void wlc_phy_shim_detach(struct phy_shim_info *physhim)
}
struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
- void (*fn)(struct brcms_phy *pi),
+ void (*fn)(void *pi),
void *arg, const char *name)
{
return (struct wlapi_timer *)
- brcms_init_timer(physhim->wl, (void (*)(void *))fn,
- arg, name);
+ brcms_init_timer(physhim->wl, fn, arg, name);
}
void wlapi_free_timer(struct wlapi_timer *t)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
index dd8774717ade..27d0934e600e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
@@ -131,7 +131,7 @@ void wlc_phy_shim_detach(struct phy_shim_info *physhim);
/* PHY to WL utility functions */
struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
- void (*fn)(struct brcms_phy *pi),
+ void (*fn)(void *pi),
void *arg, const char *name);
void wlapi_free_timer(struct wlapi_timer *t);
void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 9eaf5ec133f9..075b705a8d7b 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -3432,6 +3432,10 @@ static const struct attribute_group il3945_attribute_group = {
};
static struct ieee80211_ops il3945_mac_ops __ro_after_init = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = il3945_mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = il3945_mac_start,
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 70e420df1643..4beb7be6d51d 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -6301,6 +6301,10 @@ il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
}
static const struct ieee80211_ops il4965_mac_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = il4965_mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = il4965_mac_start,
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 17570d62c896..9d33a66a49b5 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -3438,9 +3438,7 @@ il_init_geos(struct il_priv *il)
if (!channels)
return -ENOMEM;
- rates =
- kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY),
- GFP_KERNEL);
+ rates = kcalloc(RATE_COUNT_LEGACY, sizeof(*rates), GFP_KERNEL);
if (!rates) {
kfree(channels);
return -ENOMEM;
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 20971304fdef..4b04865fc2c9 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -46,6 +46,15 @@ config IWLWIFI
if IWLWIFI
+config IWLWIFI_KUNIT_TESTS
+ tristate
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option for iwlwifi kunit tests.
+
+ If unsure, say N.
+
config IWLWIFI_LEDS
bool
depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index b983982aee45..8bb94a4c12cd 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -18,6 +18,7 @@ iwlwifi-objs += queue/tx.o
iwlwifi-objs += fw/img.o fw/notif-wait.o fw/rs.o
iwlwifi-objs += fw/dbg.o fw/pnvm.o fw/dump.o
+iwlwifi-objs += fw/regulatory.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
iwlwifi-$(CONFIG_EFI) += fw/uefi.o
@@ -33,4 +34,6 @@ obj-$(CONFIG_IWLDVM) += dvm/
obj-$(CONFIG_IWLMVM) += mvm/
obj-$(CONFIG_IWLMEI) += mei/
+obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += tests/
+
CFLAGS_iwl-devtrace.o := -I$(src)
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
index 134635c70ce8..25952d0bea99 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_AX210_UCODE_API_MAX 86
+#define IWL_AX210_UCODE_API_MAX 89
/* Lowest firmware API version supported */
#define IWL_AX210_UCODE_API_MIN 59
@@ -299,3 +299,9 @@ MODULE_FIRMWARE(IWL_MA_B_HR_B_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_B_GF_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_B_GF4_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_B_MR_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
+
+MODULE_FIRMWARE("iwlwifi-so-a0-gf-a0.pnvm");
+MODULE_FIRMWARE("iwlwifi-so-a0-gf4-a0.pnvm");
+MODULE_FIRMWARE("iwlwifi-ty-a0-gf-a0.pnvm");
+MODULE_FIRMWARE("iwlwifi-ma-b0-gf-a0.pnvm");
+MODULE_FIRMWARE("iwlwifi-ma-b0-gf4-a0.pnvm");
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index 82da957adcf6..072b0a5827d1 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_BZ_UCODE_API_MAX 86
+#define IWL_BZ_UCODE_API_MAX 90
/* Lowest firmware API version supported */
#define IWL_BZ_UCODE_API_MIN 80
@@ -129,10 +129,6 @@ static const struct iwl_base_params iwl_bz_base_params = {
IWL_DEVICE_BZ_COMMON, \
.ht_params = &iwl_22000_ht_params
-#define IWL_DEVICE_GL_A \
- IWL_DEVICE_BZ_COMMON, \
- .ht_params = &iwl_gl_a_ht_params
-
/*
* This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
* A-MPDU, with additional overhead to account for processing time.
@@ -153,6 +149,7 @@ const struct iwl_cfg_trans_params iwl_bz_trans_cfg = {
};
const char iwl_bz_name[] = "Intel(R) TBD Bz device";
+const char iwl_mtp_name[] = "Intel(R) Wi-Fi 7 BE202 160MHz";
const struct iwl_cfg iwl_cfg_bz = {
.fw_name_mac = "bz",
@@ -179,3 +176,5 @@ MODULE_FIRMWARE(IWL_BZ_A_FM_C_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BZ_A_FM4_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_GL_B_FM_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_GL_C_FM_C_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
+
+MODULE_FIRMWARE("iwlwifi-gl-c0-fm-c0.pnvm");
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
index 80eb9b499538..9b79279fd76c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_SC_UCODE_API_MAX 86
+#define IWL_SC_UCODE_API_MAX 90
/* Lowest firmware API version supported */
#define IWL_SC_UCODE_API_MIN 82
@@ -33,6 +33,10 @@
#define IWL_SC_A_GF_A_FW_PRE "iwlwifi-sc-a0-gf-a0"
#define IWL_SC_A_GF4_A_FW_PRE "iwlwifi-sc-a0-gf4-a0"
#define IWL_SC_A_WH_A_FW_PRE "iwlwifi-sc-a0-wh-a0"
+#define IWL_SC2_A_FM_C_FW_PRE "iwlwifi-sc2-a0-fm-c0"
+#define IWL_SC2_A_WH_A_FW_PRE "iwlwifi-sc2-a0-wh-a0"
+#define IWL_SC2F_A_FM_C_FW_PRE "iwlwifi-sc2f-a0-fm-c0"
+#define IWL_SC2F_A_WH_A_FW_PRE "iwlwifi-sc2f-a0-wh-a0"
#define IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(api) \
IWL_SC_A_FM_B_FW_PRE "-" __stringify(api) ".ucode"
@@ -48,6 +52,14 @@
IWL_SC_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode"
#define IWL_SC_A_WH_A_FW_MODULE_FIRMWARE(api) \
IWL_SC_A_WH_A_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_SC2_A_FM_C_FW_MODULE_FIRMWARE(api) \
+ IWL_SC2_A_FM_C_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_SC2_A_WH_A_FW_MODULE_FIRMWARE(api) \
+ IWL_SC2_A_WH_A_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_SC2F_A_FM_C_FW_MODULE_FIRMWARE(api) \
+ IWL_SC2F_A_FM_C_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_SC2F_A_WH_A_FW_MODULE_FIRMWARE(api) \
+ IWL_SC2F_A_WH_A_FW_PRE "-" __stringify(api) ".ucode"
static const struct iwl_base_params iwl_sc_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -124,6 +136,9 @@ static const struct iwl_base_params iwl_sc_base_params = {
#define IWL_DEVICE_SC \
IWL_DEVICE_BZ_COMMON, \
+ .uhb_supported = true, \
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
+ .num_rbds = IWL_NUM_RBDS_SC_EHT, \
.ht_params = &iwl_22000_ht_params
/*
@@ -149,10 +164,21 @@ const char iwl_sc_name[] = "Intel(R) TBD Sc device";
const struct iwl_cfg iwl_cfg_sc = {
.fw_name_mac = "sc",
- .uhb_supported = true,
IWL_DEVICE_SC,
- .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
- .num_rbds = IWL_NUM_RBDS_SC_EHT,
+};
+
+const char iwl_sc2_name[] = "Intel(R) TBD Sc2 device";
+
+const struct iwl_cfg iwl_cfg_sc2 = {
+ .fw_name_mac = "sc2",
+ IWL_DEVICE_SC,
+};
+
+const char iwl_sc2f_name[] = "Intel(R) TBD Sc2f device";
+
+const struct iwl_cfg iwl_cfg_sc2f = {
+ .fw_name_mac = "sc2f",
+ IWL_DEVICE_SC,
};
MODULE_FIRMWARE(IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
@@ -162,3 +188,7 @@ MODULE_FIRMWARE(IWL_SC_A_HR_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SC_A_GF_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SC_A_GF4_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SC_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC2_A_FM_C_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC2_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC2F_A_FM_C_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC2F_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 5f3d5b15f727..52b008ce53bd 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -1570,6 +1570,10 @@ static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
}
const struct ieee80211_ops iwlagn_hw_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = iwlagn_mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = iwlagn_mac_start,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index dcc4810cb324..4caf2e25a297 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -4,7 +4,6 @@
* Copyright (C) 2019-2023 Intel Corporation
*/
#include <linux/uuid.h>
-#include <linux/dmi.h>
#include "iwl-drv.h"
#include "iwl-debug.h"
#include "acpi.h"
@@ -13,68 +12,21 @@
const guid_t iwl_guid = GUID_INIT(0xF21202BF, 0x8F78, 0x4DC6,
0xA5, 0xB3, 0x1F, 0x73,
0x8E, 0x28, 0x5A, 0xDE);
-IWL_EXPORT_SYMBOL(iwl_guid);
-const guid_t iwl_rfi_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29,
- 0x81, 0x4F, 0x75, 0xE4,
- 0xDD, 0x26, 0xB5, 0xFD);
-IWL_EXPORT_SYMBOL(iwl_rfi_guid);
-
-static const struct dmi_system_id dmi_ppag_approved_list[] = {
- { .ident = "HP",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- },
- },
- { .ident = "SAMSUNG",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
- },
- },
- { .ident = "MSFT",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
- },
- },
- { .ident = "ASUS",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- },
- },
- { .ident = "GOOGLE-HP",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Google"),
- DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
- },
- },
- { .ident = "GOOGLE-ASUS",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Google"),
- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek COMPUTER INC."),
- },
- },
- { .ident = "GOOGLE-SAMSUNG",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Google"),
- DMI_MATCH(DMI_BOARD_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
- },
- },
- { .ident = "DELL",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- },
- },
- { .ident = "DELL",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- },
- },
- { .ident = "RAZER",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Razer"),
- },
- },
- {}
+static const size_t acpi_dsm_size[DSM_FUNC_NUM_FUNCS] = {
+ [DSM_FUNC_QUERY] = sizeof(u32),
+ [DSM_FUNC_DISABLE_SRD] = sizeof(u8),
+ [DSM_FUNC_ENABLE_INDONESIA_5G2] = sizeof(u8),
+ [DSM_FUNC_ENABLE_6E] = sizeof(u32),
+ [DSM_FUNC_REGULATORY_CONFIG] = sizeof(u32),
+ /* Not supported in driver */
+ [5] = (size_t)0,
+ [DSM_FUNC_11AX_ENABLEMENT] = sizeof(u32),
+ [DSM_FUNC_ENABLE_UNII4_CHAN] = sizeof(u32),
+ [DSM_FUNC_ACTIVATE_CHANNEL] = sizeof(u32),
+ [DSM_FUNC_FORCE_DISABLE_CHANNELS] = sizeof(u32),
+ [DSM_FUNC_ENERGY_DETECTION_THRESHOLD] = sizeof(u32),
+ [DSM_FUNC_RFI_CONFIG] = sizeof(u32),
};
static int iwl_acpi_get_handle(struct device *dev, acpi_string method,
@@ -200,46 +152,41 @@ out:
}
/*
- * Evaluate a DSM with no arguments and a u8 return value,
+ * This function receives a DSM function number, calculates its expected size
+ * according to Intel BIOS spec, and fills in the value in a 32-bit field.
+ * In case the expected size is smaller than 32-bit, padding will be added.
*/
-int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
- const guid_t *guid, u8 *value)
+int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt,
+ enum iwl_dsm_funcs func, u32 *value)
{
+ size_t expected_size;
+ u64 tmp;
int ret;
- u64 val;
- ret = iwl_acpi_get_dsm_integer(dev, rev, func,
- guid, &val, sizeof(u8));
+ BUILD_BUG_ON(ARRAY_SIZE(acpi_dsm_size) != DSM_FUNC_NUM_FUNCS);
- if (ret < 0)
- return ret;
-
- /* cast val (u64) to be u8 */
- *value = (u8)val;
- return 0;
-}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8);
+ if (WARN_ON(func >= ARRAY_SIZE(acpi_dsm_size)))
+ return -EINVAL;
-/*
- * Evaluate a DSM with no arguments and a u32 return value,
- */
-int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
- const guid_t *guid, u32 *value)
-{
- int ret;
- u64 val;
+ expected_size = acpi_dsm_size[func];
- ret = iwl_acpi_get_dsm_integer(dev, rev, func,
- guid, &val, sizeof(u32));
+ /* Currently all ACPI DSMs are either 8-bit or 32-bit */
+ if (expected_size != sizeof(u8) && expected_size != sizeof(u32))
+ return -EOPNOTSUPP;
- if (ret < 0)
+ ret = iwl_acpi_get_dsm_integer(fwrt->dev, ACPI_DSM_REV, func,
+ &iwl_guid, &tmp, expected_size);
+ if (ret)
return ret;
- /* cast val (u64) to be u32 */
- *value = (u32)val;
+ if ((expected_size == sizeof(u8) && tmp != (u8)tmp) ||
+ (expected_size == sizeof(u32) && tmp != (u32)tmp))
+ IWL_DEBUG_RADIO(fwrt,
+ "DSM value overflows the expected size, truncating\n");
+ *value = (u32)tmp;
+
return 0;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u32);
static union acpi_object *
iwl_acpi_get_wifi_pkg_range(struct device *dev,
@@ -307,9 +254,8 @@ iwl_acpi_get_wifi_pkg(struct device *dev,
tbl_rev);
}
-
-int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- union iwl_tas_config_cmd *cmd, int fw_ver)
+int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *tas_data)
{
union acpi_object *wifi_pkg, *data;
int ret, tbl_rev, i, block_list_size, enabled;
@@ -331,22 +277,9 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
ACPI_TYPE_INTEGER) {
u32 tas_selection =
(u32)wifi_pkg->package.elements[1].integer.value;
- u16 override_iec =
- (tas_selection & ACPI_WTAS_OVERRIDE_IEC_MSK) >> ACPI_WTAS_OVERRIDE_IEC_POS;
- u16 enabled_iec = (tas_selection & ACPI_WTAS_ENABLE_IEC_MSK) >>
- ACPI_WTAS_ENABLE_IEC_POS;
- u8 usa_tas_uhb = (tas_selection & ACPI_WTAS_USA_UHB_MSK) >> ACPI_WTAS_USA_UHB_POS;
-
- enabled = tas_selection & ACPI_WTAS_ENABLED_MSK;
- if (fw_ver <= 3) {
- cmd->v3.override_tas_iec = cpu_to_le16(override_iec);
- cmd->v3.enable_tas_iec = cpu_to_le16(enabled_iec);
- } else {
- cmd->v4.usa_tas_uhb_allowed = usa_tas_uhb;
- cmd->v4.override_tas_iec = (u8)override_iec;
- cmd->v4.enable_tas_iec = (u8)enabled_iec;
- }
+ enabled = iwl_parse_tas_selection(fwrt, tas_data,
+ tas_selection);
} else if (tbl_rev == 0 &&
wifi_pkg->package.elements[1].type == ACPI_TYPE_INTEGER) {
@@ -365,22 +298,16 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
IWL_DEBUG_RADIO(fwrt, "Reading TAS table revision %d\n", tbl_rev);
if (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER ||
wifi_pkg->package.elements[2].integer.value >
- APCI_WTAS_BLACK_LIST_MAX) {
+ IWL_WTAS_BLACK_LIST_MAX) {
IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %llu\n",
wifi_pkg->package.elements[2].integer.value);
ret = -EINVAL;
goto out_free;
}
block_list_size = wifi_pkg->package.elements[2].integer.value;
- cmd->v4.block_list_size = cpu_to_le32(block_list_size);
+ tas_data->block_list_size = cpu_to_le32(block_list_size);
IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", block_list_size);
- if (block_list_size > APCI_WTAS_BLACK_LIST_MAX) {
- IWL_DEBUG_RADIO(fwrt, "TAS invalid array size value %u\n",
- block_list_size);
- ret = -EINVAL;
- goto out_free;
- }
for (i = 0; i < block_list_size; i++) {
u32 country;
@@ -394,7 +321,7 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
}
country = wifi_pkg->package.elements[3 + i].integer.value;
- cmd->v4.block_list_array[i] = cpu_to_le32(country);
+ tas_data->block_list_array[i] = cpu_to_le32(country);
IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country);
}
@@ -403,19 +330,19 @@ out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_tas);
-int iwl_acpi_get_mcc(struct device *dev, char *mcc)
+int iwl_acpi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc)
{
union acpi_object *wifi_pkg, *data;
u32 mcc_val;
int ret, tbl_rev;
- data = iwl_acpi_get_object(dev, ACPI_WRDD_METHOD);
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_WRDD_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
- wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE,
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_WRDD_WIFI_DATA_SIZE,
&tbl_rev);
if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
@@ -439,46 +366,42 @@ out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_mcc);
-u64 iwl_acpi_get_pwr_limit(struct device *dev)
+int iwl_acpi_get_pwr_limit(struct iwl_fw_runtime *fwrt, u64 *dflt_pwr_limit)
{
union acpi_object *data, *wifi_pkg;
- u64 dflt_pwr_limit;
- int tbl_rev;
+ int tbl_rev, ret = -EINVAL;
- data = iwl_acpi_get_object(dev, ACPI_SPLC_METHOD);
- if (IS_ERR(data)) {
- dflt_pwr_limit = 0;
+ *dflt_pwr_limit = 0;
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_SPLC_METHOD);
+ if (IS_ERR(data))
goto out;
- }
- wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data,
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_SPLC_WIFI_DATA_SIZE, &tbl_rev);
if (IS_ERR(wifi_pkg) || tbl_rev != 0 ||
- wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER) {
- dflt_pwr_limit = 0;
+ wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER)
goto out_free;
- }
- dflt_pwr_limit = wifi_pkg->package.elements[1].integer.value;
+ *dflt_pwr_limit = wifi_pkg->package.elements[1].integer.value;
+ ret = 0;
out_free:
kfree(data);
out:
- return dflt_pwr_limit;
+ return ret;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_pwr_limit);
-int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
+int iwl_acpi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
{
union acpi_object *wifi_pkg, *data;
int ret, tbl_rev;
- data = iwl_acpi_get_object(dev, ACPI_ECKV_METHOD);
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_ECKV_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
- wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE,
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_ECKV_WIFI_DATA_SIZE,
&tbl_rev);
if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
@@ -499,11 +422,11 @@ out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_eckv);
-static int iwl_sar_set_profile(union acpi_object *table,
- struct iwl_sar_profile *profile,
- bool enabled, u8 num_chains, u8 num_sub_bands)
+static int iwl_acpi_sar_set_profile(union acpi_object *table,
+ struct iwl_sar_profile *profile,
+ bool enabled, u8 num_chains,
+ u8 num_sub_bands)
{
int i, j, idx = 0;
@@ -511,8 +434,8 @@ static int iwl_sar_set_profile(union acpi_object *table,
* The table from ACPI is flat, but we store it in a
* structured array.
*/
- for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV2; i++) {
- for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS_REV2; j++) {
+ for (i = 0; i < BIOS_SAR_MAX_CHAINS_PER_PROFILE; i++) {
+ for (j = 0; j < BIOS_SAR_MAX_SUB_BANDS_NUM; j++) {
/* if we don't have the values, use the default */
if (i >= num_chains || j >= num_sub_bands) {
profile->chains[i].subbands[j] = 0;
@@ -535,73 +458,7 @@ static int iwl_sar_set_profile(union acpi_object *table,
return 0;
}
-static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt,
- __le16 *per_chain, u32 n_subbands,
- int prof_a, int prof_b)
-{
- int profs[ACPI_SAR_NUM_CHAINS_REV0] = { prof_a, prof_b };
- int i, j;
-
- for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV0; i++) {
- struct iwl_sar_profile *prof;
-
- /* don't allow SAR to be disabled (profile 0 means disable) */
- if (profs[i] == 0)
- return -EPERM;
-
- /* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */
- if (profs[i] > ACPI_SAR_PROFILE_NUM)
- return -EINVAL;
-
- /* profiles go from 1 to 4, so decrement to access the array */
- prof = &fwrt->sar_profiles[profs[i] - 1];
-
- /* if the profile is disabled, do nothing */
- if (!prof->enabled) {
- IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n",
- profs[i]);
- /*
- * if one of the profiles is disabled, we
- * ignore all of them and return 1 to
- * differentiate disabled from other failures.
- */
- return 1;
- }
-
- IWL_DEBUG_INFO(fwrt,
- "SAR EWRD: chain %d profile index %d\n",
- i, profs[i]);
- IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i);
- for (j = 0; j < n_subbands; j++) {
- per_chain[i * n_subbands + j] =
- cpu_to_le16(prof->chains[i].subbands[j]);
- IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n",
- j, prof->chains[i].subbands[j]);
- }
- }
-
- return 0;
-}
-
-int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
- __le16 *per_chain, u32 n_tables, u32 n_subbands,
- int prof_a, int prof_b)
-{
- int i, ret = 0;
-
- for (i = 0; i < n_tables; i++) {
- ret = iwl_sar_fill_table(fwrt,
- &per_chain[i * n_subbands * ACPI_SAR_NUM_CHAINS_REV0],
- n_subbands, prof_a, prof_b);
- if (ret)
- break;
- }
-
- return ret;
-}
-IWL_EXPORT_SYMBOL(iwl_sar_select_profile);
-
-int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+int iwl_acpi_get_wrds_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *table, *data;
int ret, tbl_rev;
@@ -680,16 +537,15 @@ read_table:
/* The profile from WRDS is officially profile 1, but goes
* into sar_profiles[0] (because we don't have a profile 0).
*/
- ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0],
- flags & IWL_SAR_ENABLE_MSK,
- num_chains, num_sub_bands);
+ ret = iwl_acpi_sar_set_profile(table, &fwrt->sar_profiles[0],
+ flags & IWL_SAR_ENABLE_MSK,
+ num_chains, num_sub_bands);
out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_sar_get_wrds_table);
-int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *data;
bool enabled;
@@ -767,7 +623,7 @@ read_table:
* from index 1, so the maximum value allowed here is
* ACPI_SAR_PROFILES_NUM - 1.
*/
- if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
+ if (n_profiles >= BIOS_SAR_MAX_PROFILE_NUM) {
ret = -EINVAL;
goto out_free;
}
@@ -776,13 +632,15 @@ read_table:
pos = 3;
for (i = 0; i < n_profiles; i++) {
+ union acpi_object *table = &wifi_pkg->package.elements[pos];
/* The EWRD profiles officially go from 2 to 4, but we
* save them in sar_profiles[1-3] (because we don't
* have profile 0). So in the array we start from 1.
*/
- ret = iwl_sar_set_profile(&wifi_pkg->package.elements[pos],
- &fwrt->sar_profiles[i + 1], enabled,
- num_chains, num_sub_bands);
+ ret = iwl_acpi_sar_set_profile(table,
+ &fwrt->sar_profiles[i + 1],
+ enabled, num_chains,
+ num_sub_bands);
if (ret < 0)
break;
@@ -794,9 +652,8 @@ out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_sar_get_ewrd_table);
-int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
+int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *data;
int i, j, k, ret, tbl_rev;
@@ -811,7 +668,7 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
.revisions = BIT(3),
.bands = ACPI_GEO_NUM_BANDS_REV2,
.profiles = ACPI_NUM_GEO_PROFILES_REV3,
- .min_profiles = 3,
+ .min_profiles = BIOS_GEO_MIN_PROFILE_NUM,
},
{
.revisions = BIT(2),
@@ -897,7 +754,7 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
read_table:
fwrt->geo_rev = tbl_rev;
for (i = 0; i < num_profiles; i++) {
- for (j = 0; j < ACPI_GEO_NUM_BANDS_REV2; j++) {
+ for (j = 0; j < BIOS_GEO_MAX_NUM_BANDS; j++) {
union acpi_object *entry;
/*
@@ -921,7 +778,7 @@ read_table:
entry->integer.value;
}
- for (k = 0; k < ACPI_GEO_NUM_CHAINS; k++) {
+ for (k = 0; k < BIOS_GEO_NUM_CHAINS; k++) {
/* same here as above */
if (j >= num_bands) {
fwrt->geo_profiles[i].bands[j].chains[k] =
@@ -949,151 +806,26 @@ out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_sar_get_wgds_table);
-
-bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
-{
- /*
- * The PER_CHAIN_LIMIT_OFFSET_CMD command is not supported on
- * earlier firmware versions. Unfortunately, we don't have a
- * TLV API flag to rely on, so rely on the major version which
- * is in the first byte of ucode_ver. This was implemented
- * initially on version 38 and then backported to 17. It was
- * also backported to 29, but only for 7265D devices. The
- * intention was to have it in 36 as well, but not all 8000
- * family got this feature enabled. The 8000 family is the
- * only one using version 36, so skip this version entirely.
- */
- return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
- (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 &&
- fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) ||
- (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
- ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
- CSR_HW_REV_TYPE_7265D));
-}
-IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
-
-int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
- struct iwl_per_chain_offset *table,
- u32 n_bands, u32 n_profiles)
-{
- int i, j;
-
- if (!fwrt->geo_enabled)
- return -ENODATA;
-
- if (!iwl_sar_geo_support(fwrt))
- return -EOPNOTSUPP;
-
- for (i = 0; i < n_profiles; i++) {
- for (j = 0; j < n_bands; j++) {
- struct iwl_per_chain_offset *chain =
- &table[i * n_bands + j];
-
- chain->max_tx_power =
- cpu_to_le16(fwrt->geo_profiles[i].bands[j].max);
- chain->chain_a = fwrt->geo_profiles[i].bands[j].chains[0];
- chain->chain_b = fwrt->geo_profiles[i].bands[j].chains[1];
- IWL_DEBUG_RADIO(fwrt,
- "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
- i, j,
- fwrt->geo_profiles[i].bands[j].chains[0],
- fwrt->geo_profiles[i].bands[j].chains[1],
- fwrt->geo_profiles[i].bands[j].max);
- }
- }
-
- return 0;
-}
-IWL_EXPORT_SYMBOL(iwl_sar_geo_init);
-
-__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
-{
- int ret;
- u8 value;
- u32 val;
- __le32 config_bitmap = 0;
-
- /*
- * Evaluate func 'DSM_FUNC_ENABLE_INDONESIA_5G2'.
- * Setting config_bitmap Indonesia bit is valid only for HR/JF.
- */
- switch (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id)) {
- case IWL_CFG_RF_TYPE_HR1:
- case IWL_CFG_RF_TYPE_HR2:
- case IWL_CFG_RF_TYPE_JF1:
- case IWL_CFG_RF_TYPE_JF2:
- ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0,
- DSM_FUNC_ENABLE_INDONESIA_5G2,
- &iwl_guid, &value);
-
- if (!ret && value == DSM_VALUE_INDONESIA_ENABLE)
- config_bitmap |=
- cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
- break;
- default:
- break;
- }
-
- /*
- ** Evaluate func 'DSM_FUNC_DISABLE_SRD'
- */
- ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0,
- DSM_FUNC_DISABLE_SRD,
- &iwl_guid, &value);
- if (!ret) {
- if (value == DSM_VALUE_SRD_PASSIVE)
- config_bitmap |=
- cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
- else if (value == DSM_VALUE_SRD_DISABLE)
- config_bitmap |=
- cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
- }
-
- if (fw_has_capa(&fwrt->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT)) {
- /*
- ** Evaluate func 'DSM_FUNC_REGULATORY_CONFIG'
- */
- ret = iwl_acpi_get_dsm_u32(fwrt->dev, 0,
- DSM_FUNC_REGULATORY_CONFIG,
- &iwl_guid, &val);
- /*
- * China 2022 enable if the BIOS object does not exist or
- * if it is enabled in BIOS.
- */
- if (ret < 0 || val & DSM_MASK_CHINA_22_REG)
- config_bitmap |=
- cpu_to_le32(LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK);
- }
-
- return config_bitmap;
-}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_lari_config_bitmap);
int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *data, *flags;
int i, j, ret, tbl_rev, num_sub_bands = 0;
int idx = 2;
- u8 cmd_ver;
-
- fwrt->ppag_flags = 0;
- fwrt->ppag_table_valid = false;
data = iwl_acpi_get_object(fwrt->dev, ACPI_PPAG_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
- /* try to read ppag table rev 2 or 1 (both have the same data size) */
+ /* try to read ppag table rev 3, 2 or 1 (all have the same data size) */
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev);
if (!IS_ERR(wifi_pkg)) {
- if (tbl_rev == 1 || tbl_rev == 2) {
+ if (tbl_rev >= 1 && tbl_rev <= 3) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
IWL_DEBUG_RADIO(fwrt,
- "Reading PPAG table v2 (tbl_rev=%d)\n",
+ "Reading PPAG table (tbl_rev=%d)\n",
tbl_rev);
goto read_table;
} else {
@@ -1128,19 +860,8 @@ read_table:
goto out_free;
}
- fwrt->ppag_flags = flags->integer.value & ACPI_PPAG_MASK;
- cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
- WIDE_ID(PHY_OPS_GROUP,
- PER_PLATFORM_ANT_GAIN_CMD),
- IWL_FW_CMD_VER_UNKNOWN);
- if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN) {
- ret = -EINVAL;
- goto out_free;
- }
- if (!fwrt->ppag_flags && cmd_ver <= 3) {
- ret = 0;
- goto out_free;
- }
+ fwrt->ppag_flags = iwl_bios_get_ppag_flags(flags->integer.value,
+ fwrt->ppag_ver);
/*
* read, verify gain values and save them into the PPAG table.
@@ -1158,132 +879,15 @@ read_table:
}
fwrt->ppag_chains[i].subbands[j] = ent->integer.value;
- /* from ver 4 the fw deals with out of range values */
- if (cmd_ver >= 4)
- continue;
- if ((j == 0 &&
- (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_LB ||
- fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_LB)) ||
- (j != 0 &&
- (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_HB ||
- fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_HB))) {
- ret = -EINVAL;
- goto out_free;
- }
}
}
- fwrt->ppag_table_valid = true;
ret = 0;
out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_ppag_table);
-
-int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd,
- int *cmd_size)
-{
- u8 cmd_ver;
- int i, j, num_sub_bands;
- s8 *gain;
-
- /* many firmware images for JF lie about this */
- if (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id) ==
- CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF))
- return -EOPNOTSUPP;
-
- if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
- IWL_DEBUG_RADIO(fwrt,
- "PPAG capability not supported by FW, command not sent.\n");
- return -EINVAL;
- }
-
- cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
- WIDE_ID(PHY_OPS_GROUP,
- PER_PLATFORM_ANT_GAIN_CMD),
- IWL_FW_CMD_VER_UNKNOWN);
- if (!fwrt->ppag_table_valid || (cmd_ver <= 3 && !fwrt->ppag_flags)) {
- IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n");
- return -EINVAL;
- }
-
- /* The 'flags' field is the same in v1 and in v2 so we can just
- * use v1 to access it.
- */
- cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
-
- IWL_DEBUG_RADIO(fwrt, "PPAG cmd ver is %d\n", cmd_ver);
- if (cmd_ver == 1) {
- num_sub_bands = IWL_NUM_SUB_BANDS_V1;
- gain = cmd->v1.gain[0];
- *cmd_size = sizeof(cmd->v1);
- if (fwrt->ppag_ver == 1 || fwrt->ppag_ver == 2) {
- /* in this case FW supports revision 0 */
- IWL_DEBUG_RADIO(fwrt,
- "PPAG table rev is %d, send truncated table\n",
- fwrt->ppag_ver);
- }
- } else if (cmd_ver >= 2 && cmd_ver <= 4) {
- num_sub_bands = IWL_NUM_SUB_BANDS_V2;
- gain = cmd->v2.gain[0];
- *cmd_size = sizeof(cmd->v2);
- if (fwrt->ppag_ver == 0) {
- /* in this case FW supports revisions 1 or 2 */
- IWL_DEBUG_RADIO(fwrt,
- "PPAG table rev is 0, send padded table\n");
- }
- } else {
- IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
- return -EINVAL;
- }
-
- /* ppag mode */
- IWL_DEBUG_RADIO(fwrt,
- "PPAG MODE bits were read from bios: %d\n",
- cmd->v1.flags & cpu_to_le32(ACPI_PPAG_MASK));
- if ((cmd_ver == 1 && !fw_has_capa(&fwrt->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) ||
- (cmd_ver == 2 && fwrt->ppag_ver == 2)) {
- cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
- IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n");
- } else {
- IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n");
- }
-
- IWL_DEBUG_RADIO(fwrt,
- "PPAG MODE bits going to be sent: %d\n",
- cmd->v1.flags & cpu_to_le32(ACPI_PPAG_MASK));
-
- for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
- for (j = 0; j < num_sub_bands; j++) {
- gain[i * num_sub_bands + j] =
- fwrt->ppag_chains[i].subbands[j];
- IWL_DEBUG_RADIO(fwrt,
- "PPAG table: chain[%d] band[%d]: gain = %d\n",
- i, j, gain[i * num_sub_bands + j]);
- }
- }
-
- return 0;
-}
-IWL_EXPORT_SYMBOL(iwl_read_ppag_table);
-
-bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt)
-{
-
- if (!dmi_check_system(dmi_ppag_approved_list)) {
- IWL_DEBUG_RADIO(fwrt,
- "System vendor '%s' is not in the approved list, disabling PPAG.\n",
- dmi_get_system_info(DMI_SYS_VENDOR));
- fwrt->ppag_flags = 0;
- return false;
- }
-
- return true;
-}
-IWL_EXPORT_SYMBOL(iwl_acpi_is_ppag_approved);
void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
struct iwl_phy_specific_cfg *filters)
@@ -1296,7 +900,6 @@ void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
if (IS_ERR(data))
return;
- /* try to read wtas table revision 1 or revision 0*/
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_WPFC_WIFI_DATA_SIZE,
&tbl_rev);
@@ -1306,13 +909,14 @@ void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
if (tbl_rev != 0)
goto out_free;
- BUILD_BUG_ON(ARRAY_SIZE(filters->filter_cfg_chains) != ACPI_WPFC_WIFI_DATA_SIZE);
+ BUILD_BUG_ON(ARRAY_SIZE(filters->filter_cfg_chains) !=
+ ACPI_WPFC_WIFI_DATA_SIZE - 1);
for (i = 0; i < ARRAY_SIZE(filters->filter_cfg_chains); i++) {
- if (wifi_pkg->package.elements[i].type != ACPI_TYPE_INTEGER)
- return;
+ if (wifi_pkg->package.elements[i + 1].type != ACPI_TYPE_INTEGER)
+ goto out_free;
tmp.filter_cfg_chains[i] =
- cpu_to_le32(wifi_pkg->package.elements[i].integer.value);
+ cpu_to_le32(wifi_pkg->package.elements[i + 1].integer.value);
}
IWL_DEBUG_RADIO(fwrt, "Loaded WPFC filter config from ACPI\n");
@@ -1321,3 +925,38 @@ out_free:
kfree(data);
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_phy_filters);
+
+void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt)
+{
+ union acpi_object *wifi_pkg, *data;
+ int tbl_rev;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_GLAI_METHOD);
+ if (IS_ERR(data))
+ return;
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_GLAI_WIFI_DATA_SIZE,
+ &tbl_rev);
+ if (IS_ERR(wifi_pkg))
+ goto out_free;
+
+ if (tbl_rev != 0) {
+ IWL_DEBUG_RADIO(fwrt, "Invalid GLAI revision: %d\n", tbl_rev);
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ wifi_pkg->package.elements[1].integer.value > ACPI_GLAI_MAX_STATUS)
+ goto out_free;
+
+ fwrt->uefi_tables_lock_status =
+ wifi_pkg->package.elements[1].integer.value;
+
+ IWL_DEBUG_RADIO(fwrt,
+ "Loaded UEFI WIFI GUID lock status: %d from ACPI\n",
+ fwrt->uefi_tables_lock_status);
+out_free:
+ kfree(data);
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_guid_lock_status);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index e9277f6f3582..1d32b82f73db 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -7,6 +7,7 @@
#define __iwl_fw_acpi__
#include <linux/acpi.h>
+#include "fw/regulatory.h"
#include "fw/api/commands.h"
#include "fw/api/power.h"
#include "fw/api/phy.h"
@@ -25,6 +26,7 @@
#define ACPI_PPAG_METHOD "PPAG"
#define ACPI_WTAS_METHOD "WTAS"
#define ACPI_WPFC_METHOD "WPFC"
+#define ACPI_GLAI_METHOD "GLAI"
#define ACPI_WIFI_DOMAIN (0x07)
@@ -56,187 +58,90 @@
#define ACPI_EWRD_WIFI_DATA_SIZE_REV2 ((ACPI_SAR_PROFILE_NUM - 1) * \
ACPI_SAR_NUM_CHAINS_REV2 * \
ACPI_SAR_NUM_SUB_BANDS_REV2 + 3)
-#define ACPI_WPFC_WIFI_DATA_SIZE 4 /* 4 filter config words */
+#define ACPI_WPFC_WIFI_DATA_SIZE 5 /* domain and 4 filter config words */
/* revision 0 and 1 are identical, except for the semantics in the FW */
#define ACPI_GEO_NUM_BANDS_REV0 2
#define ACPI_GEO_NUM_BANDS_REV2 3
-#define ACPI_GEO_NUM_CHAINS 2
#define ACPI_WRDD_WIFI_DATA_SIZE 2
#define ACPI_SPLC_WIFI_DATA_SIZE 2
#define ACPI_ECKV_WIFI_DATA_SIZE 2
-
+/*
+ * One element for domain type,
+ * and one for the status
+ */
+#define ACPI_GLAI_WIFI_DATA_SIZE 2
+#define ACPI_GLAI_MAX_STATUS 2
/*
* TAS size: 1 elelment for type,
* 1 element for enabled field,
* 1 element for block list size,
* 16 elements for block list array
*/
-#define APCI_WTAS_BLACK_LIST_MAX 16
-#define ACPI_WTAS_WIFI_DATA_SIZE (3 + APCI_WTAS_BLACK_LIST_MAX)
-#define ACPI_WTAS_ENABLED_MSK 0x1
-#define ACPI_WTAS_OVERRIDE_IEC_MSK 0x2
-#define ACPI_WTAS_ENABLE_IEC_MSK 0x4
-#define ACPI_WTAS_OVERRIDE_IEC_POS 0x1
-#define ACPI_WTAS_ENABLE_IEC_POS 0x2
-#define ACPI_WTAS_USA_UHB_MSK BIT(16)
-#define ACPI_WTAS_USA_UHB_POS 16
-
+#define ACPI_WTAS_WIFI_DATA_SIZE (3 + IWL_WTAS_BLACK_LIST_MAX)
#define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \
IWL_NUM_SUB_BANDS_V1) + 2)
#define ACPI_PPAG_WIFI_DATA_SIZE_V2 ((IWL_NUM_CHAIN_LIMITS * \
IWL_NUM_SUB_BANDS_V2) + 2)
-/* PPAG gain value bounds in 1/8 dBm */
-#define ACPI_PPAG_MIN_LB -16
-#define ACPI_PPAG_MAX_LB 24
-#define ACPI_PPAG_MIN_HB -16
-#define ACPI_PPAG_MAX_HB 40
-#define ACPI_PPAG_MASK 3
-#define IWL_PPAG_ETSI_MASK BIT(0)
-
#define IWL_SAR_ENABLE_MSK BIT(0)
#define IWL_REDUCE_POWER_FLAGS_POS 1
-/*
- * The profile for revision 2 is a superset of revision 1, which is in
- * turn a superset of revision 0. So we can store all revisions
- * inside revision 2, which is what we represent here.
- */
-struct iwl_sar_profile_chain {
- u8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2];
-};
-
-struct iwl_sar_profile {
- bool enabled;
- struct iwl_sar_profile_chain chains[ACPI_SAR_NUM_CHAINS_REV2];
-};
-
-/* Same thing as with SAR, all revisions fit in revision 2 */
-struct iwl_geo_profile_band {
- u8 max;
- u8 chains[ACPI_GEO_NUM_CHAINS];
-};
-
-struct iwl_geo_profile {
- struct iwl_geo_profile_band bands[ACPI_GEO_NUM_BANDS_REV2];
-};
-
-/* Same thing as with SAR, all revisions fit in revision 2 */
-struct iwl_ppag_chain {
- s8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2];
-};
-
-enum iwl_dsm_funcs_rev_0 {
- DSM_FUNC_QUERY = 0,
- DSM_FUNC_DISABLE_SRD = 1,
- DSM_FUNC_ENABLE_INDONESIA_5G2 = 2,
- DSM_FUNC_ENABLE_6E = 3,
- DSM_FUNC_REGULATORY_CONFIG = 4,
- DSM_FUNC_11AX_ENABLEMENT = 6,
- DSM_FUNC_ENABLE_UNII4_CHAN = 7,
- DSM_FUNC_ACTIVATE_CHANNEL = 8,
- DSM_FUNC_FORCE_DISABLE_CHANNELS = 9,
- DSM_FUNC_ENERGY_DETECTION_THRESHOLD = 10,
-};
-
-enum iwl_dsm_values_srd {
- DSM_VALUE_SRD_ACTIVE,
- DSM_VALUE_SRD_PASSIVE,
- DSM_VALUE_SRD_DISABLE,
- DSM_VALUE_SRD_MAX
-};
-
-enum iwl_dsm_values_indonesia {
- DSM_VALUE_INDONESIA_DISABLE,
- DSM_VALUE_INDONESIA_ENABLE,
- DSM_VALUE_INDONESIA_RESERVED,
- DSM_VALUE_INDONESIA_MAX
-};
-
-/* DSM RFI uses a different GUID, so need separate definitions */
-
-#define DSM_RFI_FUNC_ENABLE 3
-
-enum iwl_dsm_values_rfi {
- DSM_VALUE_RFI_ENABLE,
- DSM_VALUE_RFI_DISABLE,
- DSM_VALUE_RFI_MAX
-};
-
-enum iwl_dsm_masks_reg {
- DSM_MASK_CHINA_22_REG = BIT(2)
-};
+/* The Inidcator whether UEFI WIFI GUID tables are locked is read from ACPI */
+#define UEFI_WIFI_GUID_UNLOCKED 0
+
+#define ACPI_DSM_REV 0
#ifdef CONFIG_ACPI
struct iwl_fw_runtime;
extern const guid_t iwl_guid;
-extern const guid_t iwl_rfi_guid;
-
-int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
- const guid_t *guid, u8 *value);
-
-int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
- const guid_t *guid, u32 *value);
/**
* iwl_acpi_get_mcc - read MCC from ACPI, if available
*
- * @dev: the struct device
+ * @fwrt: the fw runtime struct
* @mcc: output buffer (3 bytes) that will get the MCC
*
* This function tries to read the current MCC from ACPI if available.
*/
-int iwl_acpi_get_mcc(struct device *dev, char *mcc);
+int iwl_acpi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc);
-u64 iwl_acpi_get_pwr_limit(struct device *dev);
+int iwl_acpi_get_pwr_limit(struct iwl_fw_runtime *fwrt, u64 *dflt_pwr_limit);
/*
* iwl_acpi_get_eckv - read external clock validation from ACPI, if available
*
- * @dev: the struct device
+ * @fwrt: the fw runtime struct
* @extl_clk: output var (2 bytes) that will get the clk indication.
*
* This function tries to read the external clock indication
* from ACPI if available.
*/
-int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk);
-
-int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
- __le16 *per_chain, u32 n_tables, u32 n_subbands,
- int prof_a, int prof_b);
+int iwl_acpi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk);
-int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt);
+int iwl_acpi_get_wrds_table(struct iwl_fw_runtime *fwrt);
-int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt);
+int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt);
-int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt);
+int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt);
-bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt);
-
-int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
- struct iwl_per_chain_offset *table,
- u32 n_bands, u32 n_profiles);
-
-int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- union iwl_tas_config_cmd *cmd, int fw_ver);
-
-__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt);
+int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *data);
int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt);
-int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd,
- int *cmd_size);
-
-bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt);
-
void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
struct iwl_phy_specific_cfg *filters);
+void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt);
+
+int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt,
+ enum iwl_dsm_funcs func, u32 *value);
+
#else /* CONFIG_ACPI */
static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev,
@@ -245,92 +150,61 @@ static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev,
return ERR_PTR(-ENOENT);
}
-static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
- const guid_t *guid, u8 *value)
-{
- return -ENOENT;
-}
-
-static inline int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
- const guid_t *guid, u32 *value)
-{
- return -ENOENT;
-}
-
-static inline int iwl_acpi_get_mcc(struct device *dev, char *mcc)
+static inline int iwl_acpi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc)
{
return -ENOENT;
}
-static inline u64 iwl_acpi_get_pwr_limit(struct device *dev)
+static inline int iwl_acpi_get_pwr_limit(struct iwl_fw_runtime *fwrt,
+ u64 *dflt_pwr_limit)
{
+ *dflt_pwr_limit = 0;
return 0;
}
-static inline int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
+static inline int iwl_acpi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
{
return -ENOENT;
}
-static inline int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
- __le16 *per_chain, u32 n_tables, u32 n_subbands,
- int prof_a, int prof_b)
+static inline int iwl_acpi_get_wrds_table(struct iwl_fw_runtime *fwrt)
{
return -ENOENT;
}
-static inline int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+static inline int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
{
return -ENOENT;
}
-static inline int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
-{
- return -ENOENT;
-}
-
-static inline int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
+static inline int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt)
{
return 1;
}
-static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
-{
- return false;
-}
-
-static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- union iwl_tas_config_cmd *cmd, int fw_ver)
+static inline int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *data)
{
return -ENOENT;
}
-static inline __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
-{
- return 0;
-}
-
static inline int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
{
return -ENOENT;
}
-static inline int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt,
- union iwl_ppag_table_cmd *cmd, int *cmd_size)
-{
- return -ENOENT;
-}
+/* macro since the second argument doesn't always exist */
+#define iwl_acpi_get_phy_filters(fwrt, filters) do { } while (0)
-static inline bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt)
+static inline void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt)
{
- return false;
}
-static inline void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
- struct iwl_phy_specific_cfg *filters)
+static inline int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt,
+ enum iwl_dsm_funcs func, u32 *value)
{
+ return -ENOENT;
}
-
#endif /* CONFIG_ACPI */
#endif /* __iwl_fw_acpi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
index 3e81e9369224..bc27e15488f5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
+ * Copyright (C) 2023 Intel Corporation
* Copyright (C) 2013-2014, 2018-2019 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
@@ -170,7 +171,11 @@ enum iwl_bt_ci_compliance {
* @bt_activity_grading: the activity of BT &enum iwl_bt_activity_grading
* @ttc_status: is TTC enabled - one bit per PHY
* @rrc_status: is RRC enabled - one bit per PHY
- * @reserved: reserved
+ * The following fields are only for version 5, and are reserved in version 4:
+ * @wifi_loss_low_rssi: The predicted lost WiFi rate (% of air time that BT is
+ * utilizing) when the RSSI is low (<= -65 dBm)
+ * @wifi_loss_mid_high_rssi: The predicted lost WiFi rate (% of air time that
+ * BT is utilizing) when the RSSI is mid/high (>= -65 dBm)
*/
struct iwl_bt_coex_profile_notif {
__le32 mbox_msg[4];
@@ -182,7 +187,10 @@ struct iwl_bt_coex_profile_notif {
__le32 bt_activity_grading;
u8 ttc_status;
u8 rrc_status;
- __le16 reserved;
-} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4 */
+ u8 wifi_loss_low_rssi;
+ u8 wifi_loss_mid_high_rssi;
+} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4
+ * BT_COEX_PROFILE_NTFY_API_S_VER_5
+ */
#endif /* __iwl_fw_api_coex_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index ea99d41040d2..d2a74beed3a1 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -324,7 +324,7 @@ struct iwl_wowlan_patterns_cmd {
u8 n_patterns;
/**
- * @n_patterns: sta_id
+ * @sta_id: sta_id
*/
u8 sta_id;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index 751b596ea1a5..0f7903c5a4df 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -101,7 +101,7 @@ enum iwl_data_path_subcmd_ids {
RX_NO_DATA_NOTIF = 0xF5,
/**
- * @THERMAL_DUAL_CHAIN_DISABLE_REQ: firmware request for SMPS mode,
+ * @THERMAL_DUAL_CHAIN_REQUEST: firmware request for SMPS mode,
* &struct iwl_thermal_dual_chain_request
*/
THERMAL_DUAL_CHAIN_REQUEST = 0xF6,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index 394747deb269..47c914de2992 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#ifndef __iwl_fw_dbg_tlv_h__
#define __iwl_fw_dbg_tlv_h__
@@ -319,7 +319,7 @@ struct iwl_fw_ini_conf_set_tlv {
* @IWL_FW_INI_CONFIG_SET_TYPE_CSR: for CSR configuration
* @IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: for DBGC_DRAM_ADDR configuration
* @IWL_FW_INI_CONFIG_SET_TYPE_PERIPH_SCRATCH_HWM: for PERIPH SCRATCH HWM configuration
- * @IWL_FW_INI_ALLOCATION_NUM: max number of configuration supported
+ * @IWL_FW_INI_CONFIG_SET_TYPE_MAX_NUM: max number of configuration supported
*/
enum iwl_fw_ini_config_set_type {
@@ -360,6 +360,7 @@ enum iwl_fw_ini_allocation_id {
* @IWL_FW_INI_LOCATION_SRAM_PATH: SRAM location
* @IWL_FW_INI_LOCATION_DRAM_PATH: DRAM location
* @IWL_FW_INI_LOCATION_NPK_PATH: NPK location
+ * @IWL_FW_INI_LOCATION_NUM: number of valid locations
*/
enum iwl_fw_ini_buffer_location {
IWL_FW_INI_LOCATION_INVALID,
@@ -439,6 +440,7 @@ enum iwl_fw_ini_region_device_memory_subtype {
* Hard coded time points in which the driver can send hcmd or perform dump
* collection
*
+ * @IWL_FW_INI_TIME_POINT_INVALID: invalid timepoint
* @IWL_FW_INI_TIME_POINT_EARLY: pre loading the FW
* @IWL_FW_INI_TIME_POINT_AFTER_ALIVE: first cmd from host after alive notif
* @IWL_FW_INI_TIME_POINT_POST_INIT: last cmd in series of init sequence
@@ -553,7 +555,7 @@ enum iwl_fw_ini_dump_policy {
* enum iwl_fw_ini_dump_type - Determines dump type based on size defined by FW.
*
* @IWL_FW_INI_DUMP_BRIEF : only dump the most important regions
- * @IWL_FW_INI_DEBUG_MEDIUM: dump more regions than "brief", but not all regions
+ * @IWL_FW_INI_DUMP_MEDIUM: dump more regions than "brief", but not all regions
* @IWL_FW_INI_DUMP_VERBOSE : dump all regions
*/
enum iwl_fw_ini_dump_type {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index b740c65a7dca..b31ae6889bd0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -394,7 +394,7 @@ struct iwl_buf_alloc_cmd {
*
* @first_word: magic word value
* @second_word: magic word value
- * @framfrags: DRAM fragmentaion detail
+ * @dram_frags: DRAM fragmentaion detail
*/
struct iwl_dram_info {
__le32 first_word;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
index b044990c7b87..25530a29317e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -630,6 +630,7 @@ enum iwl_location_frame_format {
* @IWL_LOCATION_BW_20MHZ: 20MHz
* @IWL_LOCATION_BW_40MHZ: 40MHz
* @IWL_LOCATION_BW_80MHZ: 80MHz
+ * @IWL_LOCATION_BW_160MHZ: 160MHz
*/
enum iwl_location_bw {
IWL_LOCATION_BW_20MHZ,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index f15e6d64c298..c6d1f5644638 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -242,9 +242,9 @@ struct iwl_mac_low_latency_cmd {
* @esr_transition_timeout: the timeout required by the AP for the
* eSR transition.
* Available only from version 2 of the command.
- * This values comes from the EMLSR transition delay in the EML
+ * This value comes from the EMLSR transition delay in the EML
* Capabilities subfield.
- * @medium_sync_delay: the value as it appeasr in P802.11be_D2.2 Figure 9-1002j.
+ * @medium_sync_delay: the value as it appears in P802.11be_D2.2 Figure 9-1002j.
* @assoc_id: unique ID assigned by the AP during association
* @reserved1: alignment
* @data_policy: see &enum iwl_mac_data_policy
@@ -317,7 +317,6 @@ enum iwl_mac_config_filter_flags {
* If the NIC is not ACK_ENABLED it may use the EOF-bit in first non-0
* len delim to determine if AGG or single.
* @client: client mac data
- * @go_ibss: mac data for go or ibss
* @p2p_dev: mac data for p2p device
*/
struct iwl_mac_config_cmd {
@@ -374,7 +373,7 @@ struct iwl_mac_config_cmd {
* iwl_link_ctx_cfg_cmd::bss_color_disable
* @LINK_CONTEXT_MODIFY_EHT_PARAMS: covers iwl_link_ctx_cfg_cmd::puncture_mask.
* This flag can be set only if the MAC that this link relates to has
- * eht_support set to true.
+ * eht_support set to true. No longer used since _VER_3 of this command.
* @LINK_CONTEXT_MODIFY_ALL: set all above flags
*/
enum iwl_link_ctx_modify_flags {
@@ -447,6 +446,7 @@ enum iwl_link_ctx_flags {
* @listen_lmac: indicates whether the link should be allocated on the Listen
* Lmac or on the Main Lmac. Cannot be changed on an active Link.
* Relevant only for eSR.
+ * @reserved1: in version 2, listen_lmac became reserved
* @cck_rates: basic rates available for CCK
* @ofdm_rates: basic rates available for OFDM
* @cck_short_preamble: 1 for enabling short preamble, 0 otherwise
@@ -462,7 +462,7 @@ enum iwl_link_ctx_flags {
* @bi: beacon interval in TU, applicable only when associated
* @dtim_interval: DTIM interval in TU.
* Relevant only for GO, otherwise this is offloaded.
- * @puncture_mask: puncture mask for EHT
+ * @puncture_mask: puncture mask for EHT (removed in VER_3)
* @frame_time_rts_th: HE duration RTS threshold, in units of 32us
* @flags: a combination from &enum iwl_link_ctx_flags
* @flags_mask: what of %flags have changed. Also &enum iwl_link_ctx_flags
@@ -472,10 +472,10 @@ enum iwl_link_ctx_flags {
* @bssid_index: index of the associated VAP
* @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
* @spec_link_id: link_id as the AP knows it
- * @reserved: alignment
+ * @reserved2: alignment
* @ibss_bssid_addr: bssid for ibss
* @reserved_for_ibss_bssid_addr: reserved
- * @reserved1: reserved for future use
+ * @reserved3: reserved for future use
*/
struct iwl_link_config_cmd {
__le32 action;
@@ -486,7 +486,10 @@ struct iwl_link_config_cmd {
__le16 reserved_for_local_link_addr;
__le32 modify_mask;
__le32 active;
- __le32 listen_lmac;
+ union {
+ __le32 listen_lmac;
+ __le32 reserved1;
+ };
__le32 cck_rates;
__le32 ofdm_rates;
__le32 cck_short_preamble;
@@ -502,7 +505,7 @@ struct iwl_link_config_cmd {
struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
__le32 bi;
__le32 dtim_interval;
- __le16 puncture_mask;
+ __le16 puncture_mask; /* removed in _VER_3 */
__le16 frame_time_rts_th;
__le32 flags;
__le32 flags_mask;
@@ -512,11 +515,11 @@ struct iwl_link_config_cmd {
u8 bssid_index;
u8 bss_color;
u8 spec_link_id;
- u8 reserved;
+ u8 reserved2;
u8 ibss_bssid_addr[6];
__le16 reserved_for_ibss_bssid_addr;
- __le32 reserved1[8];
-} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1 */
+ __le32 reserved3[8];
+} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1, _VER_2, _VER_3 */
/* Currently FW supports link ids in the range 0-3 and can have
* at most two active links for each vif.
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index 55882190251c..545826973a80 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022, 2024 Intel Corporation
* Copyright (C) 2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_mac_h__
@@ -431,8 +431,8 @@ enum iwl_he_pkt_ext_constellations {
};
#define MAX_HE_SUPP_NSS 2
-#define MAX_CHANNEL_BW_INDX_API_D_VER_2 4
-#define MAX_CHANNEL_BW_INDX_API_D_VER_3 5
+#define MAX_CHANNEL_BW_INDX_API_D_VER_1 4
+#define MAX_CHANNEL_BW_INDX_API_D_VER_2 5
/**
* struct iwl_he_pkt_ext_v1 - QAM thresholds
@@ -455,7 +455,7 @@ enum iwl_he_pkt_ext_constellations {
* (0-low_th, 1-high_th)
*/
struct iwl_he_pkt_ext_v1 {
- u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_2][2];
+ u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_1][2];
} __packed; /* PKT_EXT_DOT11AX_API_S_VER_1 */
/**
@@ -480,7 +480,7 @@ struct iwl_he_pkt_ext_v1 {
* (0-low_th, 1-high_th)
*/
struct iwl_he_pkt_ext_v2 {
- u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_3][2];
+ u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_2][2];
} __packed; /* PKT_EXT_DOT11AX_API_S_VER_2 */
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 7ec959244ffc..58034dfa7e70 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_nvm_reg_h__
#define __iwl_fw_api_nvm_reg_h__
+#include "fw/regulatory.h"
/**
* enum iwl_regulatory_and_nvm_subcmd_ids - regulatory/NVM commands
*/
@@ -438,36 +439,30 @@ enum iwl_mcc_source {
MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11,
};
-#define IWL_TAS_BLOCK_LIST_MAX 16
/**
- * struct iwl_tas_config_cmd_v2 - configures the TAS
+ * struct iwl_tas_config_cmd_common - configures the TAS.
+ * This is also the v2 structure.
* @block_list_size: size of relevant field in block_list_array
* @block_list_array: list of countries where TAS must be disabled
*/
-struct iwl_tas_config_cmd_v2 {
+struct iwl_tas_config_cmd_common {
__le32 block_list_size;
- __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
+ __le32 block_list_array[IWL_WTAS_BLACK_LIST_MAX];
} __packed; /* TAS_CONFIG_CMD_API_S_VER_2 */
/**
* struct iwl_tas_config_cmd_v3 - configures the TAS
- * @block_list_size: size of relevant field in block_list_array
- * @block_list_array: list of countries where TAS must be disabled
* @override_tas_iec: indicates whether to override default value of IEC regulatory
* @enable_tas_iec: in case override_tas_iec is set -
* indicates whether IEC regulatory is enabled or disabled
*/
struct iwl_tas_config_cmd_v3 {
- __le32 block_list_size;
- __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
__le16 override_tas_iec;
__le16 enable_tas_iec;
} __packed; /* TAS_CONFIG_CMD_API_S_VER_3 */
/**
- * struct iwl_tas_config_cmd_v3 - configures the TAS
- * @block_list_size: size of relevant field in block_list_array
- * @block_list_array: list of countries where TAS must be disabled
+ * struct iwl_tas_config_cmd_v4 - configures the TAS
* @override_tas_iec: indicates whether to override default value of IEC regulatory
* @enable_tas_iec: in case override_tas_iec is set -
* indicates whether IEC regulatory is enabled or disabled
@@ -475,19 +470,20 @@ struct iwl_tas_config_cmd_v3 {
* @reserved: reserved
*/
struct iwl_tas_config_cmd_v4 {
- __le32 block_list_size;
- __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
u8 override_tas_iec;
u8 enable_tas_iec;
u8 usa_tas_uhb_allowed;
u8 reserved;
} __packed; /* TAS_CONFIG_CMD_API_S_VER_4 */
-union iwl_tas_config_cmd {
- struct iwl_tas_config_cmd_v2 v2;
- struct iwl_tas_config_cmd_v3 v3;
- struct iwl_tas_config_cmd_v4 v4;
+struct iwl_tas_config_cmd {
+ struct iwl_tas_config_cmd_common common;
+ union {
+ struct iwl_tas_config_cmd_v3 v3;
+ struct iwl_tas_config_cmd_v4 v4;
+ };
};
+
/**
* enum iwl_lari_config_masks - bit masks for the various LARI config operations
* @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
index 306ed88de463..08a2c416ce60 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
@@ -142,6 +142,8 @@ struct iwl_phy_context_cmd_v1 {
* @lmac_id: the lmac id the phy context belongs to
* @ci: channel info
* @rxchain_info: ???
+ * @sbb_bandwidth: 0 disabled, 1 - 40Mhz ... 4 - 320MHz
+ * @sbb_ctrl_channel_loc: location of the control channel
* @dsp_cfg_flags: set to 0
* @reserved: reserved to align to 64 bit
*/
@@ -152,9 +154,20 @@ struct iwl_phy_context_cmd {
/* PHY_CONTEXT_DATA_API_S_VER_3, PHY_CONTEXT_DATA_API_S_VER_4 */
struct iwl_fw_channel_info ci;
__le32 lmac_id;
- __le32 rxchain_info; /* reserved in _VER_4 */
+ union {
+ __le32 rxchain_info; /* reserved in _VER_4 */
+ struct { /* used for _VER_5/_VER_6 */
+ u8 sbb_bandwidth;
+ u8 sbb_ctrl_channel_loc;
+ __le16 puncture_mask; /* added in VER_6 */
+ };
+ };
__le32 dsp_cfg_flags;
__le32 reserved;
-} __packed; /* PHY_CONTEXT_CMD_API_VER_3, PHY_CONTEXT_CMD_API_VER_4 */
+} __packed; /* PHY_CONTEXT_CMD_API_VER_3,
+ * PHY_CONTEXT_CMD_API_VER_4,
+ * PHY_CONTEXT_CMD_API_VER_5,
+ * PHY_CONTEXT_CMD_API_VER_6
+ */
#endif /* __iwl_fw_api_phy_ctxt_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 040d83fa5424..0bf38243f88a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -506,13 +506,40 @@ struct iwl_geo_tx_power_profiles_resp {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_RSP */
/**
+ * enum iwl_ppag_flags - PPAG enable masks
+ * @IWL_PPAG_ETSI_MASK: enable PPAG in ETSI
+ * @IWL_PPAG_CHINA_MASK: enable PPAG in China
+ * @IWL_PPAG_ETSI_LPI_UHB_MASK: enable LPI in ETSI for UHB
+ * @IWL_PPAG_ETSI_VLP_UHB_MASK: enable VLP in ETSI for UHB
+ * @IWL_PPAG_ETSI_SP_UHB_MASK: enable SP in ETSI for UHB
+ * @IWL_PPAG_USA_LPI_UHB_MASK: enable LPI in USA for UHB
+ * @IWL_PPAG_USA_VLP_UHB_MASK: enable VLP in USA for UHB
+ * @IWL_PPAG_USA_SP_UHB_MASK: enable SP in USA for UHB
+ * @IWL_PPAG_CANADA_LPI_UHB_MASK: enable LPI in CANADA for UHB
+ * @IWL_PPAG_CANADA_VLP_UHB_MASK: enable VLP in CANADA for UHB
+ * @IWL_PPAG_CANADA_SP_UHB_MASK: enable SP in CANADA for UHB
+ */
+enum iwl_ppag_flags {
+ IWL_PPAG_ETSI_MASK = BIT(0),
+ IWL_PPAG_CHINA_MASK = BIT(1),
+ IWL_PPAG_ETSI_LPI_UHB_MASK = BIT(2),
+ IWL_PPAG_ETSI_VLP_UHB_MASK = BIT(3),
+ IWL_PPAG_ETSI_SP_UHB_MASK = BIT(4),
+ IWL_PPAG_USA_LPI_UHB_MASK = BIT(5),
+ IWL_PPAG_USA_VLP_UHB_MASK = BIT(6),
+ IWL_PPAG_USA_SP_UHB_MASK = BIT(7),
+ IWL_PPAG_CANADA_LPI_UHB_MASK = BIT(8),
+ IWL_PPAG_CANADA_VLP_UHB_MASK = BIT(9),
+ IWL_PPAG_CANADA_SP_UHB_MASK = BIT(10),
+};
+
+/**
* union iwl_ppag_table_cmd - union for all versions of PPAG command
* @v1: version 1
* @v2: version 2
- *
- * @flags: bit 0 - indicates enablement of PPAG for ETSI
- * bit 1 - indicates enablement of PPAG for CHINA BIOS
- * bit 1 can be used only in v3 (identical to v2)
+ * version 3, 4 and 5 are the same structure as v2,
+ * but has a different format of the flags bitmap
+ * @flags: values from &enum iwl_ppag_flags
* @gain: table of antenna gain values per chain and sub-band
* @reserved: reserved
*/
@@ -529,6 +556,11 @@ union iwl_ppag_table_cmd {
} v2;
} __packed;
+#define IWL_PPAG_CMD_V4_MASK (IWL_PPAG_ETSI_MASK | IWL_PPAG_CHINA_MASK)
+#define IWL_PPAG_CMD_V5_MASK (IWL_PPAG_CMD_V4_MASK | \
+ IWL_PPAG_ETSI_LPI_UHB_MASK | \
+ IWL_PPAG_USA_LPI_UHB_MASK)
+
#define MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE 26
#define MCC_TO_SAR_OFFSET_TABLE_COL_SIZE 13
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
index d62fed543276..d7f8a276b683 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021, 2023 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -109,6 +109,7 @@ enum iwl_sta_flags {
* @STA_KEY_FLG_EN_MSK: mask for encryption algorithmi value
* @STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from
* station info array (1 - n 1X mode)
+ * @STA_KEY_FLG_AMSDU_SPP: SPP (signaling and payload protected) A-MSDU
* @STA_KEY_FLG_KEYID_MSK: the index of the key
* @STA_KEY_FLG_KEYID_POS: key index bit position
* @STA_KEY_NOT_VALID: key is invalid
@@ -129,6 +130,7 @@ enum iwl_sta_key_flag {
STA_KEY_FLG_EN_MSK = (7 << 0),
STA_KEY_FLG_WEP_KEY_MAP = BIT(3),
+ STA_KEY_FLG_AMSDU_SPP = BIT(7),
STA_KEY_FLG_KEYID_POS = 8,
STA_KEY_FLG_KEYID_MSK = (3 << STA_KEY_FLG_KEYID_POS),
STA_KEY_NOT_VALID = BIT(11),
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index 842360b1e995..d9e4c75403b8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -76,6 +76,8 @@ enum iwl_tx_flags {
* to a secured STA
* @IWL_TX_FLAGS_HIGH_PRI: high priority frame (like EAPOL) - can affect rate
* selection, retry limits and BT kill
+ * @IWL_TX_FLAGS_RTS: firmware used an RTS
+ * @IWL_TX_FLAGS_CTS: firmware used CTS-to-self
*/
enum iwl_tx_cmd_flags {
IWL_TX_FLAGS_CMD_RATE = BIT(0),
@@ -884,6 +886,7 @@ struct iwl_tx_path_flush_cmd {
/**
* struct iwl_flush_queue_info - virtual flush queue info
+ * @tid: the tid to flush
* @queue_num: virtual queue id
* @read_before_flush: read pointer before flush
* @read_after_flush: read pointer after flush
@@ -897,6 +900,7 @@ struct iwl_flush_queue_info {
/**
* struct iwl_tx_path_flush_cmd_rsp -- queue/FIFO flush command response
+ * @sta_id: the station for which the queue was flushed
* @num_flushed_queues: number of queues in queues array
* @queues: all flushed queues
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 80fda056e46a..db6d7013df66 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1727,10 +1727,12 @@ iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
/**
* mask_apply_and_normalize - applies mask on val and normalize the result
*
- * The normalization is based on the first set bit in the mask
- *
* @val: value
* @mask: mask to apply and to normalize with
+ *
+ * The normalization is based on the first set bit in the mask
+ *
+ * Returns: the extracted value
*/
static u32 mask_apply_and_normalize(u32 val, u32 mask)
{
@@ -2199,15 +2201,16 @@ struct iwl_dump_ini_mem_ops {
};
/**
- * iwl_dump_ini_mem
- *
- * Creates a dump tlv and copy a memory region into it.
- * Returns the size of the current dump tlv or 0 if failed
+ * iwl_dump_ini_mem - dump memory region
*
* @fwrt: fw runtime struct
* @list: list to add the dump tlv to
* @reg_data: memory region
* @ops: memory dump operations
+ *
+ * Creates a dump tlv and copy a memory region into it.
+ *
+ * Returns: the size of the current dump tlv or 0 if failed
*/
static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
struct iwl_dump_ini_region_data *reg_data,
@@ -2426,9 +2429,12 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_debug_info_tlv *debug_info =
(void *)node->tlv.data;
+ BUILD_BUG_ON(sizeof(cfg_name->cfg_name) !=
+ sizeof(debug_info->debug_cfg_name));
+
cfg_name->image_type = debug_info->image_type;
cfg_name->cfg_name_len =
- cpu_to_le32(IWL_FW_INI_MAX_CFG_NAME);
+ cpu_to_le32(sizeof(cfg_name->cfg_name));
memcpy(cfg_name->cfg_name, debug_info->debug_cfg_name,
sizeof(cfg_name->cfg_name));
cfg_name++;
@@ -2872,7 +2878,8 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
le32_to_cpu(desc->trig_desc.type));
- schedule_delayed_work(&wk_data->wk, usecs_to_jiffies(delay));
+ queue_delayed_work(system_unbound_wq, &wk_data->wk,
+ usecs_to_jiffies(delay));
return 0;
}
@@ -3174,7 +3181,9 @@ int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
if (sync)
iwl_fw_dbg_collect_sync(fwrt, idx);
else
- schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay));
+ queue_delayed_work(system_unbound_wq,
+ &fwrt->dump.wks[idx].wk,
+ usecs_to_jiffies(delay));
return 0;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index eb38c686b5cb..98d56e778d99 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -306,8 +306,6 @@ static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt, bool sync)
_iwl_dbg_tlv_time_point(fwrt, tp_id, NULL, sync);
}
-void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt);
-
static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt,
struct iwl_lmac_alive *lmac,
struct iwl_umac_alive *umac)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index 06d6f7f66430..5c76e3b94968 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2014, 2018-2024 Intel Corporation
* Copyright (C) 2014-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -16,7 +16,7 @@
/**
* enum iwl_fw_error_dump_type - types of data in the dump file
* @IWL_FW_ERROR_DUMP_CSR: Control Status Registers - from offset 0
- * @IWL_FW_ERROR_DUMP_RXF:
+ * @IWL_FW_ERROR_DUMP_RXF: RX FIFO contents
* @IWL_FW_ERROR_DUMP_TXCMD: last TX command data, structured as
* &struct iwl_fw_error_dump_txcmd packets
* @IWL_FW_ERROR_DUMP_DEV_FW_INFO: struct %iwl_fw_error_dump_info
@@ -24,21 +24,24 @@
* @IWL_FW_ERROR_DUMP_FW_MONITOR: firmware monitor
* @IWL_FW_ERROR_DUMP_PRPH: range of periphery registers - there can be several
* sections like this in a single file.
+ * @IWL_FW_ERROR_DUMP_TXF: TX FIFO contents
* @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers
* @IWL_FW_ERROR_DUMP_MEM: chunk of memory
* @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump.
* Structured as &struct iwl_fw_error_dump_trigger_desc.
* @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as
* &struct iwl_fw_error_dump_rb
- * @IWL_FW_ERROR_PAGING: UMAC's image memory segments which were
+ * @IWL_FW_ERROR_DUMP_PAGING: UMAC's image memory segments which were
* paged to the DRAM.
* @IWL_FW_ERROR_DUMP_RADIO_REG: Dump the radio registers.
+ * @IWL_FW_ERROR_DUMP_INTERNAL_TXF: internal TX FIFO data
* @IWL_FW_ERROR_DUMP_EXTERNAL: used only by external code utilities, and
* for that reason is not in use in any other place in the Linux Wi-Fi
* stack.
* @IWL_FW_ERROR_DUMP_MEM_CFG: the addresses and sizes of fifos in the smem,
* which we get from the fw after ALIVE. The content is structured as
* &struct iwl_fw_error_dump_smem_cfg.
+ * @IWL_FW_ERROR_DUMP_D3_DEBUG_DATA: D3 debug data
*/
enum iwl_fw_error_dump_type {
/* 0 is deprecated */
@@ -59,8 +62,6 @@ enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_EXTERNAL = 15, /* Do not move */
IWL_FW_ERROR_DUMP_MEM_CFG = 16,
IWL_FW_ERROR_DUMP_D3_DEBUG_DATA = 17,
-
- IWL_FW_ERROR_DUMP_MAX,
};
/**
@@ -442,7 +443,7 @@ struct iwl_fw_ini_err_table_dump {
* struct iwl_fw_error_dump_rb - content of an Receive Buffer
* @index: the index of the Receive Buffer in the Rx queue
* @rxq: the RB's Rx queue
- * @reserved:
+ * @reserved: reserved
* @data: the content of the Receive Buffer
*/
struct iwl_fw_error_dump_rb {
@@ -488,7 +489,7 @@ struct iwl_fw_ini_special_device_memory {
* struct iwl_fw_error_dump_paging - content of the UMAC's image page
* block on DRAM
* @index: the index of the page block
- * @reserved:
+ * @reserved: reserved
* @data: the content of the page block
*/
struct iwl_fw_error_dump_paging {
@@ -511,6 +512,7 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
/**
* enum iwl_fw_dbg_trigger - triggers available
*
+ * @FW_DBG_TRIGGER_INVALID: invalid trigger value
* @FW_DBG_TRIGGER_USER: trigger log collection by user
* This should not be defined as a trigger to the driver, but a value the
* driver should set to indicate that the trigger was initiated by the
@@ -530,14 +532,15 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
* @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related
* events.
* @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events.
- * @FW_DBG_TX_LATENCY: trigger log collection when the tx latency goes above a
- * threshold.
- * @FW_DBG_TDLS: trigger log collection upon TDLS related events.
+ * @FW_DBG_TRIGGER_TX_LATENCY: trigger log collection when the tx latency
+ * goes above a threshold.
+ * @FW_DBG_TRIGGER_TDLS: trigger log collection upon TDLS related events.
* @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when
* the firmware sends a tx reply.
* @FW_DBG_TRIGGER_ALIVE_TIMEOUT: trigger log collection if alive flow timeouts
* @FW_DBG_TRIGGER_DRIVER: trigger log collection upon a flow failure
* in the driver.
+ * @FW_DBG_TRIGGER_MAX: beyond triggers, number for sizing arrays etc.
*/
enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_INVALID = 0,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index bfc39bd5bbc6..f69d29e531c8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2008-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2008-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -216,6 +216,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
* ADD_MODIFY_STA_KEY_API_S_VER_2.
* @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement.
* @IWL_UCODE_TLV_API_NAN2_VER2: This ucode supports NAN API version 2
+ * @IWL_UCODE_TLV_API_ADAPTIVE_DWELL: support for adaptive dwell in scanning
* @IWL_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used
* @IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY: Quota command includes a field
* indicating low latency direction.
@@ -239,14 +240,21 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S.
* @IWL_UCODE_TLV_API_MBSSID_HE: This ucode supports v2 of
* STA_CONTEXT_DOT11AX_API_S
+ * @IWL_UCODE_TLV_API_FTM_RTT_ACCURACY: version 7 of the range response API
+ * is supported by FW, this indicates the RTT confidence value
* @IWL_UCODE_TLV_API_SAR_TABLE_VER: This ucode supports different sar
* version tables.
* @IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG: This ucode supports v3 of
- * SCAN_CONFIG_DB_CMD_API_S.
+ * SCAN_CONFIG_DB_CMD_API_S.
+ * @IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP: support for setting adaptive dwell
+ * number of APs in the 5 GHz band
+ * @IWL_UCODE_TLV_API_BAND_IN_RX_DATA: FW reports band number in RX notification
* @IWL_UCODE_TLV_API_NO_HOST_DISABLE_TX: Firmware offloaded the station disable tx
* logic.
* @IWL_UCODE_TLV_API_INT_DBG_BUF_CLEAR: Firmware supports clearing the debug
* internal buffer
+ * @IWL_UCODE_TLV_API_SMART_FIFO_OFFLOAD: Firmware doesn't need the host to
+ * configure the smart fifo
*
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
@@ -287,6 +295,7 @@ enum iwl_ucode_tlv_api {
/* API Set 2 */
IWL_UCODE_TLV_API_NO_HOST_DISABLE_TX = (__force iwl_ucode_tlv_api_t)66,
IWL_UCODE_TLV_API_INT_DBG_BUF_CLEAR = (__force iwl_ucode_tlv_api_t)67,
+ IWL_UCODE_TLV_API_SMART_FIFO_OFFLOAD = (__force iwl_ucode_tlv_api_t)68,
NUM_IWL_UCODE_TLV_API
/*
@@ -383,6 +392,9 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* channels even when these are not enabled.
* @IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT: Support for indicating dump collection
* complete to FW.
+ * @IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT: Support SPP (signaling and payload
+ * protected) A-MSDU.
+ * @IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT: Support secure LTF measurement.
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -468,6 +480,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)98,
IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = (__force iwl_ucode_tlv_capa_t)100,
+ IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT = (__force iwl_ucode_tlv_capa_t)103,
IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT = (__force iwl_ucode_tlv_capa_t)104,
IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT = (__force iwl_ucode_tlv_capa_t)105,
IWL_UCODE_TLV_CAPA_SYNCED_TIME = (__force iwl_ucode_tlv_capa_t)106,
@@ -480,7 +493,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_STA_EXP_MFP_SUPPORT = (__force iwl_ucode_tlv_capa_t)114,
IWL_UCODE_TLV_CAPA_SNIFF_VALIDATE_SUPPORT = (__force iwl_ucode_tlv_capa_t)116,
IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT = (__force iwl_ucode_tlv_capa_t)117,
-
+ IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT = (__force iwl_ucode_tlv_capa_t)121,
NUM_IWL_UCODE_TLV_CAPA
/*
* This construction make both sparse (which cannot increment the previous
@@ -566,6 +579,7 @@ enum iwl_fw_dbg_reg_operator {
* struct iwl_fw_dbg_reg_op - an operation on a register
*
* @op: &enum iwl_fw_dbg_reg_operator
+ * @reserved: reserved
* @addr: offset of the register
* @val: value
*/
@@ -612,6 +626,7 @@ struct iwl_fw_dbg_mem_seg_tlv {
* @version: version of the TLV - currently 0
* @monitor_mode: &enum iwl_fw_dbg_monitor_mode
* @size_power: buffer size will be 2^(size_power + 11)
+ * @reserved: reserved
* @base_reg: addr of the base addr register (PRPH)
* @end_reg: addr of the end addr register (PRPH)
* @write_ptr_reg: the addr of the reg of the write pointer
@@ -722,6 +737,8 @@ enum iwl_fw_dbg_trigger_vif_type {
* @trig_dis_ms: the time, in milliseconds, after an occurrence of this
* trigger in which another occurrence should be ignored.
* @flags: &enum iwl_fw_dbg_trigger_flags
+ * @reserved: reserved (for alignment)
+ * @data: trigger data
*/
struct iwl_fw_dbg_trigger_tlv {
__le32 id;
@@ -762,7 +779,7 @@ struct iwl_fw_dbg_trigger_missed_bcon {
/**
* struct iwl_fw_dbg_trigger_cmd - configures trigger for messages from FW.
- * cmds: the list of commands to trigger the collection on
+ * @cmds: the list of commands to trigger the collection on
*/
struct iwl_fw_dbg_trigger_cmd {
struct cmd {
@@ -772,7 +789,7 @@ struct iwl_fw_dbg_trigger_cmd {
} __packed;
/**
- * iwl_fw_dbg_trigger_stats - configures trigger for statistics
+ * struct iwl_fw_dbg_trigger_stats - configures trigger for statistics
* @stop_offset: the offset of the value to be monitored
* @stop_threshold: the threshold above which to collect
* @start_offset: the offset of the value to be monitored
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
index 650e4bde9c17..1195e708caa9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright(c) 2020-2023 Intel Corporation
+ * Copyright(c) 2020-2024 Intel Corporation
*/
#include "iwl-drv.h"
@@ -12,6 +12,8 @@
#include "fw/api/alive.h"
#include "fw/uefi.h"
+#define IWL_PNVM_REDUCED_CAP_BIT BIT(25)
+
struct iwl_pnvm_section {
__le32 offset;
const u8 data[];
@@ -173,6 +175,7 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
while (len >= sizeof(*tlv)) {
u32 tlv_len, tlv_type;
+ u32 rf_type;
len -= sizeof(*tlv);
tlv = (const void *)data;
@@ -201,6 +204,16 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
len -= ALIGN(tlv_len, 4);
+ trans->reduced_cap_sku = false;
+ rf_type = CSR_HW_RFID_TYPE(trans->hw_rf_id);
+ if ((trans->sku_id[0] & IWL_PNVM_REDUCED_CAP_BIT) &&
+ rf_type == IWL_CFG_RF_TYPE_FM)
+ trans->reduced_cap_sku = true;
+
+ IWL_DEBUG_FW(trans,
+ "Reduced SKU device %d\n",
+ trans->reduced_cap_sku);
+
if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) &&
trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) &&
trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) {
@@ -239,7 +252,7 @@ static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
}
new_len = pnvm->size;
- *data = kmemdup(pnvm->data, pnvm->size, GFP_KERNEL);
+ *data = kvmemdup(pnvm->data, pnvm->size, GFP_KERNEL);
release_firmware(pnvm);
if (!*data)
@@ -255,21 +268,27 @@ static u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len)
struct pnvm_sku_package *package;
u8 *image = NULL;
- /* First attempt to get the PNVM from BIOS */
- package = iwl_uefi_get_pnvm(trans_p, len);
- if (!IS_ERR_OR_NULL(package)) {
- if (*len >= sizeof(*package)) {
- /* we need only the data */
- *len -= sizeof(*package);
- image = kmemdup(package->data, *len, GFP_KERNEL);
+ /* Get PNVM from BIOS for non-Intel SKU */
+ if (trans_p->sku_id[2]) {
+ package = iwl_uefi_get_pnvm(trans_p, len);
+ if (!IS_ERR_OR_NULL(package)) {
+ if (*len >= sizeof(*package)) {
+ /* we need only the data */
+ *len -= sizeof(*package);
+ image = kvmemdup(package->data,
+ *len, GFP_KERNEL);
+ }
+ /*
+ * free package regardless of whether kmemdup
+ * succeeded
+ */
+ kfree(package);
+ if (image)
+ return image;
}
- /* free package regardless of whether kmemdup succeeded */
- kfree(package);
- if (image)
- return image;
}
- /* If it's not available, try from the filesystem */
+ /* If it's not available, or for Intel SKU, try from the filesystem */
if (iwl_pnvm_get_from_fs(trans_p, &image, len))
return NULL;
return image;
@@ -314,7 +333,7 @@ static void iwl_pnvm_load_pnvm_to_trans(struct iwl_trans *trans,
set:
iwl_trans_set_pnvm(trans, capa);
free:
- kfree(data);
+ kvfree(data);
kfree(pnvm_data);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
new file mode 100644
index 000000000000..36d506463e0e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2023 Intel Corporation
+ */
+#include <linux/dmi.h>
+#include "iwl-drv.h"
+#include "iwl-debug.h"
+#include "regulatory.h"
+#include "fw/runtime.h"
+#include "fw/uefi.h"
+
+#define GET_BIOS_TABLE(__name, ...) \
+do { \
+ int ret = -ENOENT; \
+ if (fwrt->uefi_tables_lock_status > UEFI_WIFI_GUID_UNLOCKED) \
+ ret = iwl_uefi_get_ ## __name(__VA_ARGS__); \
+ if (ret < 0) \
+ ret = iwl_acpi_get_ ## __name(__VA_ARGS__); \
+ return ret; \
+} while (0)
+
+#define IWL_BIOS_TABLE_LOADER(__name) \
+int iwl_bios_get_ ## __name(struct iwl_fw_runtime *fwrt) \
+{GET_BIOS_TABLE(__name, fwrt); } \
+IWL_EXPORT_SYMBOL(iwl_bios_get_ ## __name)
+
+#define IWL_BIOS_TABLE_LOADER_DATA(__name, data_type) \
+int iwl_bios_get_ ## __name(struct iwl_fw_runtime *fwrt, \
+ data_type * data) \
+{GET_BIOS_TABLE(__name, fwrt, data); } \
+IWL_EXPORT_SYMBOL(iwl_bios_get_ ## __name)
+
+IWL_BIOS_TABLE_LOADER(wrds_table);
+IWL_BIOS_TABLE_LOADER(ewrd_table);
+IWL_BIOS_TABLE_LOADER(wgds_table);
+IWL_BIOS_TABLE_LOADER(ppag_table);
+IWL_BIOS_TABLE_LOADER_DATA(tas_table, struct iwl_tas_data);
+IWL_BIOS_TABLE_LOADER_DATA(pwr_limit, u64);
+IWL_BIOS_TABLE_LOADER_DATA(mcc, char);
+IWL_BIOS_TABLE_LOADER_DATA(eckv, u32);
+
+
+static const struct dmi_system_id dmi_ppag_approved_list[] = {
+ { .ident = "HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ },
+ },
+ { .ident = "SAMSUNG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
+ },
+ },
+ { .ident = "MSFT",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ },
+ },
+ { .ident = "ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ },
+ },
+ { .ident = "GOOGLE-HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
+ },
+ },
+ { .ident = "GOOGLE-ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek COMPUTER INC."),
+ },
+ },
+ { .ident = "GOOGLE-SAMSUNG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
+ },
+ },
+ { .ident = "DELL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ },
+ },
+ { .ident = "DELL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ },
+ },
+ { .ident = "RAZER",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Razer"),
+ },
+ },
+ { .ident = "Honor",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HONOR"),
+ },
+ },
+ {}
+};
+
+static const struct dmi_system_id dmi_tas_approved_list[] = {
+ { .ident = "HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ },
+ },
+ { .ident = "SAMSUNG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
+ },
+ },
+ { .ident = "LENOVO",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ },
+ },
+ { .ident = "DELL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ },
+ },
+ { .ident = "MSFT",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ },
+ },
+ { .ident = "Acer",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ },
+ },
+ { .ident = "ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ },
+ },
+ { .ident = "GOOGLE-HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
+ },
+ },
+ { .ident = "MSI",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."),
+ },
+ },
+ { .ident = "Honor",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HONOR"),
+ },
+ },
+ /* keep last */
+ {}
+};
+
+bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
+{
+ /*
+ * The PER_CHAIN_LIMIT_OFFSET_CMD command is not supported on
+ * earlier firmware versions. Unfortunately, we don't have a
+ * TLV API flag to rely on, so rely on the major version which
+ * is in the first byte of ucode_ver. This was implemented
+ * initially on version 38 and then backported to 17. It was
+ * also backported to 29, but only for 7265D devices. The
+ * intention was to have it in 36 as well, but not all 8000
+ * family got this feature enabled. The 8000 family is the
+ * only one using version 36, so skip this version entirely.
+ */
+ return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 &&
+ fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) ||
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
+ ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
+ CSR_HW_REV_TYPE_7265D));
+}
+IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
+
+int iwl_sar_geo_fill_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_per_chain_offset *table,
+ u32 n_bands, u32 n_profiles)
+{
+ int i, j;
+
+ if (!fwrt->geo_enabled)
+ return -ENODATA;
+
+ if (!iwl_sar_geo_support(fwrt))
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < n_profiles; i++) {
+ for (j = 0; j < n_bands; j++) {
+ struct iwl_per_chain_offset *chain =
+ &table[i * n_bands + j];
+
+ chain->max_tx_power =
+ cpu_to_le16(fwrt->geo_profiles[i].bands[j].max);
+ chain->chain_a =
+ fwrt->geo_profiles[i].bands[j].chains[0];
+ chain->chain_b =
+ fwrt->geo_profiles[i].bands[j].chains[1];
+ IWL_DEBUG_RADIO(fwrt,
+ "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
+ i, j,
+ fwrt->geo_profiles[i].bands[j].chains[0],
+ fwrt->geo_profiles[i].bands[j].chains[1],
+ fwrt->geo_profiles[i].bands[j].max);
+ }
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_geo_fill_table);
+
+static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt,
+ __le16 *per_chain, u32 n_subbands,
+ int prof_a, int prof_b)
+{
+ int profs[BIOS_SAR_NUM_CHAINS] = { prof_a, prof_b };
+ int i, j;
+
+ for (i = 0; i < BIOS_SAR_NUM_CHAINS; i++) {
+ struct iwl_sar_profile *prof;
+
+ /* don't allow SAR to be disabled (profile 0 means disable) */
+ if (profs[i] == 0)
+ return -EPERM;
+
+ /* we are off by one, so allow up to BIOS_SAR_MAX_PROFILE_NUM */
+ if (profs[i] > BIOS_SAR_MAX_PROFILE_NUM)
+ return -EINVAL;
+
+ /* profiles go from 1 to 4, so decrement to access the array */
+ prof = &fwrt->sar_profiles[profs[i] - 1];
+
+ /* if the profile is disabled, do nothing */
+ if (!prof->enabled) {
+ IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n",
+ profs[i]);
+ /*
+ * if one of the profiles is disabled, we
+ * ignore all of them and return 1 to
+ * differentiate disabled from other failures.
+ */
+ return 1;
+ }
+
+ IWL_DEBUG_INFO(fwrt,
+ "SAR EWRD: chain %d profile index %d\n",
+ i, profs[i]);
+ IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i);
+ for (j = 0; j < n_subbands; j++) {
+ per_chain[i * n_subbands + j] =
+ cpu_to_le16(prof->chains[i].subbands[j]);
+ IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n",
+ j, prof->chains[i].subbands[j]);
+ }
+ }
+
+ return 0;
+}
+
+int iwl_sar_fill_profile(struct iwl_fw_runtime *fwrt,
+ __le16 *per_chain, u32 n_tables, u32 n_subbands,
+ int prof_a, int prof_b)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < n_tables; i++) {
+ ret = iwl_sar_fill_table(fwrt,
+ &per_chain[i * n_subbands * BIOS_SAR_NUM_CHAINS],
+ n_subbands, prof_a, prof_b);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_fill_profile);
+
+static bool iwl_ppag_value_valid(struct iwl_fw_runtime *fwrt, int chain,
+ int subband)
+{
+ s8 ppag_val = fwrt->ppag_chains[chain].subbands[subband];
+
+ if ((subband == 0 &&
+ (ppag_val > IWL_PPAG_MAX_LB || ppag_val < IWL_PPAG_MIN_LB)) ||
+ (subband != 0 &&
+ (ppag_val > IWL_PPAG_MAX_HB || ppag_val < IWL_PPAG_MIN_HB))) {
+ IWL_DEBUG_RADIO(fwrt, "Invalid PPAG value: %d\n", ppag_val);
+ return false;
+ }
+ return true;
+}
+
+int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
+ union iwl_ppag_table_cmd *cmd, int *cmd_size)
+{
+ u8 cmd_ver;
+ int i, j, num_sub_bands;
+ s8 *gain;
+ bool send_ppag_always;
+
+ /* many firmware images for JF lie about this */
+ if (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id) ==
+ CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF))
+ return -EOPNOTSUPP;
+
+ if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG capability not supported by FW, command not sent.\n");
+ return -EINVAL;
+ }
+
+ cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
+ WIDE_ID(PHY_OPS_GROUP,
+ PER_PLATFORM_ANT_GAIN_CMD), 1);
+ /*
+ * Starting from ver 4, driver needs to send the PPAG CMD regardless
+ * if PPAG is enabled/disabled or valid/invalid.
+ */
+ send_ppag_always = cmd_ver > 3;
+
+ /* Don't send PPAG if it is disabled */
+ if (!send_ppag_always && !fwrt->ppag_flags) {
+ IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n");
+ return -EINVAL;
+ }
+
+ /* The 'flags' field is the same in v1 and in v2 so we can just
+ * use v1 to access it.
+ */
+ cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
+
+ IWL_DEBUG_RADIO(fwrt, "PPAG cmd ver is %d\n", cmd_ver);
+ if (cmd_ver == 1) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V1;
+ gain = cmd->v1.gain[0];
+ *cmd_size = sizeof(cmd->v1);
+ if (fwrt->ppag_ver >= 1) {
+ /* in this case FW supports revision 0 */
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table rev is %d, send truncated table\n",
+ fwrt->ppag_ver);
+ }
+ } else if (cmd_ver >= 2 && cmd_ver <= 5) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V2;
+ gain = cmd->v2.gain[0];
+ *cmd_size = sizeof(cmd->v2);
+ if (fwrt->ppag_ver == 0) {
+ /* in this case FW supports revisions 1,2 or 3 */
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table rev is 0, send padded table\n");
+ }
+ } else {
+ IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
+ return -EINVAL;
+ }
+
+ /* ppag mode */
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG MODE bits were read from bios: %d\n",
+ le32_to_cpu(cmd->v1.flags));
+
+ if (cmd_ver == 5)
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V5_MASK);
+ else if (cmd_ver < 5)
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V4_MASK);
+
+ if ((cmd_ver == 1 &&
+ !fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) ||
+ (cmd_ver == 2 && fwrt->ppag_ver >= 2)) {
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
+ IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n");
+ } else {
+ IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n");
+ }
+
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG MODE bits going to be sent: %d\n",
+ le32_to_cpu(cmd->v1.flags));
+
+ for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
+ for (j = 0; j < num_sub_bands; j++) {
+ if (!send_ppag_always &&
+ !iwl_ppag_value_valid(fwrt, i, j))
+ return -EINVAL;
+
+ gain[i * num_sub_bands + j] =
+ fwrt->ppag_chains[i].subbands[j];
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table: chain[%d] band[%d]: gain = %d\n",
+ i, j, gain[i * num_sub_bands + j]);
+ }
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_fill_ppag_table);
+
+bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt)
+{
+ if (!dmi_check_system(dmi_ppag_approved_list)) {
+ IWL_DEBUG_RADIO(fwrt,
+ "System vendor '%s' is not in the approved list, disabling PPAG.\n",
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
+ fwrt->ppag_flags = 0;
+ return false;
+ }
+
+ return true;
+}
+IWL_EXPORT_SYMBOL(iwl_is_ppag_approved);
+
+bool iwl_is_tas_approved(void)
+{
+ return dmi_check_system(dmi_tas_approved_list);
+}
+IWL_EXPORT_SYMBOL(iwl_is_tas_approved);
+
+int iwl_parse_tas_selection(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *tas_data,
+ const u32 tas_selection)
+{
+ u8 override_iec = u32_get_bits(tas_selection,
+ IWL_WTAS_OVERRIDE_IEC_MSK);
+ u8 enabled_iec = u32_get_bits(tas_selection, IWL_WTAS_ENABLE_IEC_MSK);
+ u8 usa_tas_uhb = u32_get_bits(tas_selection, IWL_WTAS_USA_UHB_MSK);
+ int enabled = tas_selection & IWL_WTAS_ENABLED_MSK;
+
+ IWL_DEBUG_RADIO(fwrt, "TAS selection as read from BIOS: 0x%x\n",
+ tas_selection);
+
+ tas_data->usa_tas_uhb_allowed = usa_tas_uhb;
+ tas_data->override_tas_iec = override_iec;
+ tas_data->enable_tas_iec = enabled_iec;
+
+ return enabled;
+}
+
+__le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
+{
+ int ret;
+ u32 val;
+ __le32 config_bitmap = 0;
+
+ switch (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id)) {
+ case IWL_CFG_RF_TYPE_HR1:
+ case IWL_CFG_RF_TYPE_HR2:
+ case IWL_CFG_RF_TYPE_JF1:
+ case IWL_CFG_RF_TYPE_JF2:
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_INDONESIA_5G2,
+ &val);
+
+ if (!ret && val == DSM_VALUE_INDONESIA_ENABLE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
+ break;
+ default:
+ break;
+ }
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_DISABLE_SRD, &val);
+ if (!ret) {
+ if (val == DSM_VALUE_SRD_PASSIVE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
+ else if (val == DSM_VALUE_SRD_DISABLE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
+ }
+
+ if (fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT)) {
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_REGULATORY_CONFIG,
+ &val);
+ /*
+ * China 2022 enable if the BIOS object does not exist or
+ * if it is enabled in BIOS.
+ */
+ if (ret < 0 || val & DSM_MASK_CHINA_22_REG)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK);
+ }
+
+ return config_bitmap;
+}
+IWL_EXPORT_SYMBOL(iwl_get_lari_config_bitmap);
+
+int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
+ u32 *value)
+{
+ GET_BIOS_TABLE(dsm, fwrt, func, value);
+}
+IWL_EXPORT_SYMBOL(iwl_bios_get_dsm);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
new file mode 100644
index 000000000000..28e774766847
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2023 Intel Corporation
+ */
+
+#ifndef __fw_regulatory_h__
+#define __fw_regulatory_h__
+
+#include "fw/img.h"
+#include "fw/api/commands.h"
+#include "fw/api/power.h"
+#include "fw/api/phy.h"
+#include "fw/api/config.h"
+#include "fw/img.h"
+#include "iwl-trans.h"
+
+#define BIOS_SAR_MAX_PROFILE_NUM 4
+/*
+ * Each SAR profile has (up to, depends on the table revision) 4 chains:
+ * chain A, chain B, chain A when in CDB, chain B when in CDB
+ */
+#define BIOS_SAR_MAX_CHAINS_PER_PROFILE 4
+#define BIOS_SAR_NUM_CHAINS 2
+#define BIOS_SAR_MAX_SUB_BANDS_NUM 11
+
+#define BIOS_GEO_NUM_CHAINS 2
+#define BIOS_GEO_MAX_NUM_BANDS 3
+#define BIOS_GEO_MAX_PROFILE_NUM 8
+#define BIOS_GEO_MIN_PROFILE_NUM 3
+
+#define IWL_SAR_ENABLE_MSK BIT(0)
+
+/* PPAG gain value bounds in 1/8 dBm */
+#define IWL_PPAG_MIN_LB -16
+#define IWL_PPAG_MAX_LB 24
+#define IWL_PPAG_MIN_HB -16
+#define IWL_PPAG_MAX_HB 40
+
+#define IWL_PPAG_ETSI_CHINA_MASK 3
+#define IWL_PPAG_REV3_MASK 0x7FF
+
+#define IWL_WTAS_BLACK_LIST_MAX 16
+#define IWL_WTAS_ENABLED_MSK 0x1
+#define IWL_WTAS_OVERRIDE_IEC_MSK 0x2
+#define IWL_WTAS_ENABLE_IEC_MSK 0x4
+#define IWL_WTAS_USA_UHB_MSK BIT(16)
+
+/*
+ * The profile for revision 2 is a superset of revision 1, which is in
+ * turn a superset of revision 0. So we can store all revisions
+ * inside revision 2, which is what we represent here.
+ */
+
+/*
+ * struct iwl_sar_profile_chain - per-chain values of a SAR profile
+ * @subbands: the SAR value for each subband
+ */
+struct iwl_sar_profile_chain {
+ u8 subbands[BIOS_SAR_MAX_SUB_BANDS_NUM];
+};
+
+/*
+ * struct iwl_sar_profile - SAR profile from SAR tables
+ * @enabled: whether the profile is enabled or not
+ * @chains: per-chain SAR values
+ */
+struct iwl_sar_profile {
+ bool enabled;
+ struct iwl_sar_profile_chain chains[BIOS_SAR_MAX_CHAINS_PER_PROFILE];
+};
+
+/* Same thing as with SAR, all revisions fit in revision 2 */
+
+/*
+ * struct iwl_geo_profile_band - per-band geo SAR offsets
+ * @max: the max tx power allowed for the band
+ * @chains: SAR offsets values for each chain
+ */
+struct iwl_geo_profile_band {
+ u8 max;
+ u8 chains[BIOS_GEO_NUM_CHAINS];
+};
+
+/*
+ * struct iwl_geo_profile - geo profile
+ * @bands: per-band table of the SAR offsets
+ */
+struct iwl_geo_profile {
+ struct iwl_geo_profile_band bands[BIOS_GEO_MAX_NUM_BANDS];
+};
+
+/* Same thing as with SAR, all revisions fit in revision 2 */
+struct iwl_ppag_chain {
+ s8 subbands[BIOS_SAR_MAX_SUB_BANDS_NUM];
+};
+
+struct iwl_tas_data {
+ __le32 block_list_size;
+ __le32 block_list_array[IWL_WTAS_BLACK_LIST_MAX];
+ u8 override_tas_iec;
+ u8 enable_tas_iec;
+ u8 usa_tas_uhb_allowed;
+};
+
+/* For DSM revision 0 and 4 */
+enum iwl_dsm_funcs {
+ DSM_FUNC_QUERY = 0,
+ DSM_FUNC_DISABLE_SRD = 1,
+ DSM_FUNC_ENABLE_INDONESIA_5G2 = 2,
+ DSM_FUNC_ENABLE_6E = 3,
+ DSM_FUNC_REGULATORY_CONFIG = 4,
+ DSM_FUNC_11AX_ENABLEMENT = 6,
+ DSM_FUNC_ENABLE_UNII4_CHAN = 7,
+ DSM_FUNC_ACTIVATE_CHANNEL = 8,
+ DSM_FUNC_FORCE_DISABLE_CHANNELS = 9,
+ DSM_FUNC_ENERGY_DETECTION_THRESHOLD = 10,
+ DSM_FUNC_RFI_CONFIG = 11,
+ DSM_FUNC_NUM_FUNCS = 12,
+};
+
+enum iwl_dsm_values_srd {
+ DSM_VALUE_SRD_ACTIVE,
+ DSM_VALUE_SRD_PASSIVE,
+ DSM_VALUE_SRD_DISABLE,
+ DSM_VALUE_SRD_MAX
+};
+
+enum iwl_dsm_values_indonesia {
+ DSM_VALUE_INDONESIA_DISABLE,
+ DSM_VALUE_INDONESIA_ENABLE,
+ DSM_VALUE_INDONESIA_RESERVED,
+ DSM_VALUE_INDONESIA_MAX
+};
+
+enum iwl_dsm_values_rfi {
+ DSM_VALUE_RFI_DLVR_DISABLE = BIT(0),
+ DSM_VALUE_RFI_DDR_DISABLE = BIT(1),
+};
+
+#define DSM_VALUE_RFI_DISABLE (DSM_VALUE_RFI_DLVR_DISABLE |\
+ DSM_VALUE_RFI_DDR_DISABLE)
+
+enum iwl_dsm_masks_reg {
+ DSM_MASK_CHINA_22_REG = BIT(2)
+};
+
+struct iwl_fw_runtime;
+
+bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt);
+
+int iwl_sar_geo_fill_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_per_chain_offset *table,
+ u32 n_bands, u32 n_profiles);
+
+int iwl_sar_fill_profile(struct iwl_fw_runtime *fwrt,
+ __le16 *per_chain, u32 n_tables, u32 n_subbands,
+ int prof_a, int prof_b);
+
+int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
+ union iwl_ppag_table_cmd *cmd,
+ int *cmd_size);
+
+bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt);
+
+bool iwl_is_tas_approved(void);
+
+int iwl_parse_tas_selection(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *tas_data,
+ const u32 tas_selection);
+
+int iwl_bios_get_wrds_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_bios_get_ewrd_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_bios_get_wgds_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_bios_get_ppag_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_bios_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *data);
+
+int iwl_bios_get_pwr_limit(struct iwl_fw_runtime *fwrt,
+ u64 *dflt_pwr_limit);
+
+int iwl_bios_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc);
+int iwl_bios_get_eckv(struct iwl_fw_runtime *fwrt, u32 *ext_clk);
+
+__le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt);
+
+int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
+ u32 *value);
+
+static inline u32 iwl_bios_get_ppag_flags(const u32 ppag_modes,
+ const u8 ppag_ver)
+{
+ return ppag_modes & (ppag_ver < 3 ? IWL_PPAG_ETSI_CHINA_MASK :
+ IWL_PPAG_REV3_MASK);
+}
+#endif /* __fw_regulatory_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 357727774db9..b2bc4fd37abf 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#ifndef __iwl_fw_runtime_h__
#define __iwl_fw_runtime_h__
@@ -14,6 +14,7 @@
#include "fw/api/power.h"
#include "iwl-eeprom-parse.h"
#include "fw/acpi.h"
+#include "fw/regulatory.h"
struct iwl_fw_runtime_ops {
void (*dump_start)(void *ctx);
@@ -100,6 +101,11 @@ struct iwl_txf_iter_data {
* @dump: debug dump data
* @uats_enabled: VLP or AFC AP is enabled
* @uats_table: AP type table
+ * @uefi_tables_lock_status: The status of the WIFI GUID UEFI variables lock:
+ * 0: Unlocked, 1 and 2: Locked.
+ * Only read the UEFI variables if locked.
+ * @sar_profiles: sar profiles as read from WRDS/EWRD BIOS tables
+ * @geo_profiles: geographic profiles as read from WGDS BIOS table
*/
struct iwl_fw_runtime {
struct iwl_trans *trans;
@@ -158,24 +164,22 @@ struct iwl_fw_runtime {
#ifdef CONFIG_IWLWIFI_DEBUGFS
bool tpc_enabled;
#endif /* CONFIG_IWLWIFI_DEBUGFS */
-#ifdef CONFIG_ACPI
- struct iwl_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM];
+ struct iwl_sar_profile sar_profiles[BIOS_SAR_MAX_PROFILE_NUM];
u8 sar_chain_a_profile;
u8 sar_chain_b_profile;
- struct iwl_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES_REV3];
+ u8 reduced_power_flags;
+ struct iwl_geo_profile geo_profiles[BIOS_GEO_MAX_PROFILE_NUM];
u32 geo_rev;
u32 geo_num_profiles;
bool geo_enabled;
struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS];
u32 ppag_flags;
- u32 ppag_ver;
- bool ppag_table_valid;
+ u8 ppag_ver;
struct iwl_sar_offset_mapping_cmd sgom_table;
bool sgom_enabled;
- u8 reduced_power_flags;
- bool uats_enabled;
struct iwl_uats_table_cmd uats_table;
-#endif
+ u8 uefi_tables_lock_status;
+ bool uats_enabled;
};
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index 2964c5fb11e9..e81fc0129b9d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright(c) 2021-2023 Intel Corporation
+ * Copyright(c) 2021-2024 Intel Corporation
*/
#include "iwl-drv.h"
@@ -76,6 +76,42 @@ void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
return data;
}
+static
+void *iwl_uefi_get_verified_variable(struct iwl_trans *trans,
+ efi_char16_t *uefi_var_name,
+ char *var_name,
+ unsigned int expected_size,
+ unsigned long *size)
+{
+ void *var;
+ unsigned long var_size;
+
+ var = iwl_uefi_get_variable(uefi_var_name, &IWL_EFI_VAR_GUID,
+ &var_size);
+
+ if (IS_ERR(var)) {
+ IWL_DEBUG_RADIO(trans,
+ "%s UEFI variable not found 0x%lx\n", var_name,
+ PTR_ERR(var));
+ return var;
+ }
+
+ if (var_size < expected_size) {
+ IWL_DEBUG_RADIO(trans,
+ "Invalid %s UEFI variable len (%lu)\n",
+ var_name, var_size);
+ kfree(var);
+ return ERR_PTR(-EINVAL);
+ }
+
+ IWL_DEBUG_RADIO(trans, "%s from UEFI with size %lu\n", var_name,
+ var_size);
+
+ if (size)
+ *size = var_size;
+ return var;
+}
+
int iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data,
u32 tlv_len, struct iwl_pnvm_image *pnvm_data)
{
@@ -230,26 +266,13 @@ u8 *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
unsigned long package_size;
u8 *data;
- package = iwl_uefi_get_variable(IWL_UEFI_REDUCED_POWER_NAME,
- &IWL_EFI_VAR_GUID, &package_size);
-
- if (IS_ERR(package)) {
- IWL_DEBUG_FW(trans,
- "Reduced Power UEFI variable not found 0x%lx (len %lu)\n",
- PTR_ERR(package), package_size);
+ package = iwl_uefi_get_verified_variable(trans,
+ IWL_UEFI_REDUCED_POWER_NAME,
+ "Reduced Power",
+ sizeof(*package),
+ &package_size);
+ if (IS_ERR(package))
return ERR_CAST(package);
- }
-
- if (package_size < sizeof(*package)) {
- IWL_DEBUG_FW(trans,
- "Invalid Reduced Power UEFI variable len (%lu)\n",
- package_size);
- kfree(package);
- return ERR_PTR(-EINVAL);
- }
-
- IWL_DEBUG_FW(trans, "Read reduced power from UEFI with size %lu\n",
- package_size);
IWL_DEBUG_FW(trans, "rev %d, total_size %d, n_skus %d\n",
package->rev, package->total_size, package->n_skus);
@@ -283,32 +306,15 @@ static int iwl_uefi_step_parse(struct uefi_cnv_common_step_data *common_step_dat
void iwl_uefi_get_step_table(struct iwl_trans *trans)
{
struct uefi_cnv_common_step_data *data;
- unsigned long package_size;
int ret;
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return;
- data = iwl_uefi_get_variable(IWL_UEFI_STEP_NAME, &IWL_EFI_VAR_GUID,
- &package_size);
-
- if (IS_ERR(data)) {
- IWL_DEBUG_FW(trans,
- "STEP UEFI variable not found 0x%lx\n",
- PTR_ERR(data));
+ data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_STEP_NAME,
+ "STEP", sizeof(*data), NULL);
+ if (IS_ERR(data))
return;
- }
-
- if (package_size < sizeof(*data)) {
- IWL_DEBUG_FW(trans,
- "Invalid STEP table UEFI variable len (%lu)\n",
- package_size);
- kfree(data);
- return;
- }
-
- IWL_DEBUG_FW(trans, "Read STEP from UEFI with size %lu\n",
- package_size);
ret = iwl_uefi_step_parse(data, trans);
if (ret < 0)
@@ -318,7 +324,6 @@ void iwl_uefi_get_step_table(struct iwl_trans *trans)
}
IWL_EXPORT_SYMBOL(iwl_uefi_get_step_table);
-#ifdef CONFIG_ACPI
static int iwl_uefi_sgom_parse(struct uefi_cnv_wlan_sgom_data *sgom_data,
struct iwl_fw_runtime *fwrt)
{
@@ -355,31 +360,15 @@ void iwl_uefi_get_sgom_table(struct iwl_trans *trans,
struct iwl_fw_runtime *fwrt)
{
struct uefi_cnv_wlan_sgom_data *data;
- unsigned long package_size;
int ret;
if (!fwrt->geo_enabled)
return;
- data = iwl_uefi_get_variable(IWL_UEFI_SGOM_NAME, &IWL_EFI_VAR_GUID,
- &package_size);
- if (IS_ERR(data)) {
- IWL_DEBUG_FW(trans,
- "SGOM UEFI variable not found 0x%lx\n",
- PTR_ERR(data));
- return;
- }
-
- if (package_size < sizeof(*data)) {
- IWL_DEBUG_FW(trans,
- "Invalid SGOM table UEFI variable len (%lu)\n",
- package_size);
- kfree(data);
+ data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_SGOM_NAME,
+ "SGOM", sizeof(*data), NULL);
+ if (IS_ERR(data))
return;
- }
-
- IWL_DEBUG_FW(trans, "Read SGOM from UEFI with size %lu\n",
- package_size);
ret = iwl_uefi_sgom_parse(data, fwrt);
if (ret < 0)
@@ -404,28 +393,12 @@ int iwl_uefi_get_uats_table(struct iwl_trans *trans,
struct iwl_fw_runtime *fwrt)
{
struct uefi_cnv_wlan_uats_data *data;
- unsigned long package_size;
int ret;
- data = iwl_uefi_get_variable(IWL_UEFI_UATS_NAME, &IWL_EFI_VAR_GUID,
- &package_size);
- if (IS_ERR(data)) {
- IWL_DEBUG_FW(trans,
- "UATS UEFI variable not found 0x%lx\n",
- PTR_ERR(data));
+ data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_UATS_NAME,
+ "UATS", sizeof(*data), NULL);
+ if (IS_ERR(data))
return -EINVAL;
- }
-
- if (package_size < sizeof(*data)) {
- IWL_DEBUG_FW(trans,
- "Invalid UATS table UEFI variable len (%lu)\n",
- package_size);
- kfree(data);
- return -EINVAL;
- }
-
- IWL_DEBUG_FW(trans, "Read UATS from UEFI with size %lu\n",
- package_size);
ret = iwl_uefi_uats_parse(data, fwrt);
if (ret < 0) {
@@ -438,4 +411,298 @@ int iwl_uefi_get_uats_table(struct iwl_trans *trans,
return 0;
}
IWL_EXPORT_SYMBOL(iwl_uefi_get_uats_table);
-#endif /* CONFIG_ACPI */
+
+static void iwl_uefi_set_sar_profile(struct iwl_fw_runtime *fwrt,
+ struct uefi_sar_profile *uefi_sar_prof,
+ u8 prof_index, bool enabled)
+{
+ memcpy(&fwrt->sar_profiles[prof_index].chains, uefi_sar_prof,
+ sizeof(struct uefi_sar_profile));
+
+ fwrt->sar_profiles[prof_index].enabled = enabled & IWL_SAR_ENABLE_MSK;
+}
+
+int iwl_uefi_get_wrds_table(struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_var_wrds *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WRDS_NAME,
+ "WRDS", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_WRDS_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WRDS revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ /* The profile from WRDS is officially profile 1, but goes
+ * into sar_profiles[0] (because we don't have a profile 0).
+ */
+ iwl_uefi_set_sar_profile(fwrt, &data->sar_profile, 0, data->mode);
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_var_ewrd *data;
+ int i, ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_EWRD_NAME,
+ "EWRD", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_EWRD_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI EWRD revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ if (data->num_profiles >= BIOS_SAR_MAX_PROFILE_NUM) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < data->num_profiles; i++)
+ /* The EWRD profiles officially go from 2 to 4, but we
+ * save them in sar_profiles[1-3] (because we don't
+ * have profile 0). So in the array we start from 1.
+ */
+ iwl_uefi_set_sar_profile(fwrt, &data->sar_profiles[i], i + 1,
+ data->mode);
+
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_wgds_table(struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_var_wgds *data;
+ int i, ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WGDS_NAME,
+ "WGDS", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_WGDS_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WGDS revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ if (data->num_profiles < BIOS_GEO_MIN_PROFILE_NUM ||
+ data->num_profiles > BIOS_GEO_MAX_PROFILE_NUM) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Invalid number of profiles in WGDS: %d\n",
+ data->num_profiles);
+ goto out;
+ }
+
+ fwrt->geo_rev = data->revision;
+ for (i = 0; i < data->num_profiles; i++)
+ memcpy(&fwrt->geo_profiles[i], &data->geo_profiles[i],
+ sizeof(struct iwl_geo_profile));
+
+ fwrt->geo_num_profiles = data->num_profiles;
+ fwrt->geo_enabled = true;
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_ppag_table(struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_var_ppag *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_PPAG_NAME,
+ "PPAG", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision < IWL_UEFI_MIN_PPAG_REV ||
+ data->revision > IWL_UEFI_MAX_PPAG_REV) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI PPAG revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ fwrt->ppag_ver = data->revision;
+ fwrt->ppag_flags = iwl_bios_get_ppag_flags(data->ppag_modes,
+ fwrt->ppag_ver);
+
+ BUILD_BUG_ON(sizeof(fwrt->ppag_chains) != sizeof(data->ppag_chains));
+ memcpy(&fwrt->ppag_chains, &data->ppag_chains,
+ sizeof(data->ppag_chains));
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *tas_data)
+{
+ struct uefi_cnv_var_wtas *uefi_tas;
+ int ret = 0, enabled, i;
+
+ uefi_tas = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WTAS_NAME,
+ "WTAS", sizeof(*uefi_tas), NULL);
+ if (IS_ERR(uefi_tas))
+ return -EINVAL;
+
+ if (uefi_tas->revision != IWL_UEFI_WTAS_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WTAS revision:%d\n",
+ uefi_tas->revision);
+ goto out;
+ }
+
+ enabled = iwl_parse_tas_selection(fwrt, tas_data,
+ uefi_tas->tas_selection);
+ if (!enabled) {
+ IWL_DEBUG_RADIO(fwrt, "TAS not enabled\n");
+ ret = 0;
+ goto out;
+ }
+
+ IWL_DEBUG_RADIO(fwrt, "Reading TAS table revision %d\n",
+ uefi_tas->revision);
+ if (uefi_tas->black_list_size > IWL_WTAS_BLACK_LIST_MAX) {
+ IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %d\n",
+ uefi_tas->black_list_size);
+ ret = -EINVAL;
+ goto out;
+ }
+ tas_data->block_list_size = cpu_to_le32(uefi_tas->black_list_size);
+ IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", uefi_tas->black_list_size);
+
+ for (i = 0; i < uefi_tas->black_list_size; i++) {
+ tas_data->block_list_array[i] =
+ cpu_to_le32(uefi_tas->black_list[i]);
+ IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n",
+ uefi_tas->black_list[i]);
+ }
+out:
+ kfree(uefi_tas);
+ return ret;
+}
+
+int iwl_uefi_get_pwr_limit(struct iwl_fw_runtime *fwrt,
+ u64 *dflt_pwr_limit)
+{
+ struct uefi_cnv_var_splc *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_SPLC_NAME,
+ "SPLC", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_SPLC_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI SPLC revision:%d\n",
+ data->revision);
+ goto out;
+ }
+ *dflt_pwr_limit = data->default_pwr_limit;
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc)
+{
+ struct uefi_cnv_var_wrdd *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WRDD_NAME,
+ "WRDD", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_WRDD_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WRDD revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ if (data->mcc != UEFI_MCC_CHINA) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "UEFI WRDD is supported only for CN\n");
+ goto out;
+ }
+
+ mcc[0] = (data->mcc >> 8) & 0xff;
+ mcc[1] = data->mcc & 0xff;
+ mcc[2] = '\0';
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
+{
+ struct uefi_cnv_var_eckv *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_ECKV_NAME,
+ "ECKV", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_ECKV_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WRDD revision:%d\n",
+ data->revision);
+ goto out;
+ }
+ *extl_clk = data->ext_clock_valid;
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
+ u32 *value)
+{
+ struct uefi_cnv_var_general_cfg *data;
+ int ret = -EINVAL;
+
+ /* Not supported function index */
+ if (func >= DSM_FUNC_NUM_FUNCS || func == 5)
+ return -EOPNOTSUPP;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_DSM_NAME,
+ "DSM", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_DSM_REVISION) {
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI DSM revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ if (ARRAY_SIZE(data->functions) != UEFI_MAX_DSM_FUNCS) {
+ IWL_DEBUG_RADIO(fwrt, "Invalid size of DSM functions array\n");
+ goto out;
+ }
+
+ *value = data->functions[func];
+ ret = 0;
+out:
+ kfree(data);
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
index bf61a8df1225..303cc299d1bc 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
@@ -5,15 +5,38 @@
#ifndef __iwl_fw_uefi__
#define __iwl_fw_uefi__
+#include "fw/regulatory.h"
+
#define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm"
#define IWL_UEFI_REDUCED_POWER_NAME L"UefiCnvWlanReducedPower"
#define IWL_UEFI_SGOM_NAME L"UefiCnvWlanSarGeoOffsetMapping"
#define IWL_UEFI_STEP_NAME L"UefiCnvCommonSTEP"
#define IWL_UEFI_UATS_NAME L"CnvUefiWlanUATS"
+#define IWL_UEFI_WRDS_NAME L"UefiCnvWlanWRDS"
+#define IWL_UEFI_EWRD_NAME L"UefiCnvWlanEWRD"
+#define IWL_UEFI_WGDS_NAME L"UefiCnvWlanWGDS"
+#define IWL_UEFI_PPAG_NAME L"UefiCnvWlanPPAG"
+#define IWL_UEFI_WTAS_NAME L"UefiCnvWlanWTAS"
+#define IWL_UEFI_SPLC_NAME L"UefiCnvWlanSPLC"
+#define IWL_UEFI_WRDD_NAME L"UefiCnvWlanWRDD"
+#define IWL_UEFI_ECKV_NAME L"UefiCnvWlanECKV"
+#define IWL_UEFI_DSM_NAME L"UefiCnvWlanGeneralCfg"
+
#define IWL_SGOM_MAP_SIZE 339
#define IWL_UATS_MAP_SIZE 339
+#define IWL_UEFI_WRDS_REVISION 2
+#define IWL_UEFI_EWRD_REVISION 2
+#define IWL_UEFI_WGDS_REVISION 3
+#define IWL_UEFI_MIN_PPAG_REV 1
+#define IWL_UEFI_MAX_PPAG_REV 3
+#define IWL_UEFI_WTAS_REVISION 1
+#define IWL_UEFI_SPLC_REVISION 0
+#define IWL_UEFI_WRDD_REVISION 0
+#define IWL_UEFI_ECKV_REVISION 0
+#define IWL_UEFI_DSM_REVISION 4
+
struct pnvm_sku_package {
u8 rev;
u32 total_size;
@@ -42,6 +65,120 @@ struct uefi_cnv_common_step_data {
} __packed;
/*
+ * struct uefi_sar_profile - a SAR profile as defined in UEFI
+ *
+ * @chains: a per-chain table of SAR values
+ */
+struct uefi_sar_profile {
+ struct iwl_sar_profile_chain chains[BIOS_SAR_MAX_CHAINS_PER_PROFILE];
+} __packed;
+
+/*
+ * struct uefi_cnv_var_wrds - WRDS table as defined in UEFI
+ *
+ * @revision: the revision of the table
+ * @mode: is WRDS enbaled/disabled
+ * @sar_profile: sar profile #1
+ */
+struct uefi_cnv_var_wrds {
+ u8 revision;
+ u32 mode;
+ struct uefi_sar_profile sar_profile;
+} __packed;
+
+/*
+ * struct uefi_cnv_var_ewrd - EWRD table as defined in UEFI
+ * @revision: the revision of the table
+ * @mode: is WRDS enbaled/disabled
+ * @num_profiles: how many additional profiles we have in this table (0-3)
+ * @sar_profiles: the additional SAR profiles (#2-#4)
+ */
+struct uefi_cnv_var_ewrd {
+ u8 revision;
+ u32 mode;
+ u32 num_profiles;
+ struct uefi_sar_profile sar_profiles[BIOS_SAR_MAX_PROFILE_NUM - 1];
+} __packed;
+
+/*
+ * struct uefi_cnv_var_wgds - WGDS table as defined in UEFI
+ * @revision: the revision of the table
+ * @num_profiles: the number of geo profiles we have in the table.
+ * The first 3 are mandatory, and can have up to 8.
+ * @geo_profiles: a per-profile table of the offsets to add to SAR values.
+ */
+struct uefi_cnv_var_wgds {
+ u8 revision;
+ u8 num_profiles;
+ struct iwl_geo_profile geo_profiles[BIOS_GEO_MAX_PROFILE_NUM];
+} __packed;
+
+/*
+ * struct uefi_cnv_var_ppag - PPAG table as defined in UEFI
+ * @revision: the revision of the table
+ * @ppag_modes: values from &enum iwl_ppag_flags
+ * @ppag_chains: the PPAG values per chain and band
+ */
+struct uefi_cnv_var_ppag {
+ u8 revision;
+ u32 ppag_modes;
+ struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS];
+} __packed;
+
+/* struct uefi_cnv_var_wtas - WTAS tabled as defined in UEFI
+ * @revision: the revision of the table
+ * @tas_selection: different options of TAS enablement.
+ * @black_list_size: the number of defined entried in the black list
+ * @black_list: a list of countries that are not allowed to use the TAS feature
+ */
+struct uefi_cnv_var_wtas {
+ u8 revision;
+ u32 tas_selection;
+ u8 black_list_size;
+ u16 black_list[IWL_WTAS_BLACK_LIST_MAX];
+} __packed;
+
+/* struct uefi_cnv_var_splc - SPLC tabled as defined in UEFI
+ * @revision: the revision of the table
+ * @default_pwr_limit: The default maximum power per device
+ */
+struct uefi_cnv_var_splc {
+ u8 revision;
+ u32 default_pwr_limit;
+} __packed;
+
+#define UEFI_MCC_CHINA 0x434e
+
+/* struct uefi_cnv_var_wrdd - WRDD table as defined in UEFI
+ * @revision: the revision of the table
+ * @mcc: country identifier as defined in ISO/IEC 3166-1 Alpha 2 code
+ */
+struct uefi_cnv_var_wrdd {
+ u8 revision;
+ u32 mcc;
+} __packed;
+
+/* struct uefi_cnv_var_eckv - ECKV table as defined in UEFI
+ * @revision: the revision of the table
+ * @ext_clock_valid: indicates if external 32KHz clock is valid
+ */
+struct uefi_cnv_var_eckv {
+ u8 revision;
+ u32 ext_clock_valid;
+} __packed;
+
+#define UEFI_MAX_DSM_FUNCS 32
+
+/* struct uefi_cnv_var_general_cfg - DSM-like table as defined in UEFI
+ * @revision: the revision of the table
+ * @functions: payload of the different DSM functions
+ */
+struct uefi_cnv_var_general_cfg {
+ u8 revision;
+ u32 functions[UEFI_MAX_DSM_FUNCS];
+} __packed;
+
+/*
* This is known to be broken on v4.19 and to work on v5.4. Until we
* figure out why this is the case and how to make it work, simply
* disable the feature in old kernels.
@@ -55,6 +192,21 @@ int iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
void iwl_uefi_get_step_table(struct iwl_trans *trans);
int iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data,
u32 tlv_len, struct iwl_pnvm_image *pnvm_data);
+int iwl_uefi_get_wrds_table(struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_ewrd_table(struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_wgds_table(struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_ppag_table(struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *data);
+int iwl_uefi_get_pwr_limit(struct iwl_fw_runtime *fwrt,
+ u64 *dflt_pwr_limit);
+int iwl_uefi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc);
+int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk);
+int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
+ u32 *value);
+void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_uats_table(struct iwl_trans *trans,
+ struct iwl_fw_runtime *fwrt);
#else /* CONFIG_EFI */
static inline void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
{
@@ -85,13 +237,56 @@ iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data,
{
return 0;
}
-#endif /* CONFIG_EFI */
-#if defined(CONFIG_EFI) && defined(CONFIG_ACPI)
-void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt);
-int iwl_uefi_get_uats_table(struct iwl_trans *trans,
- struct iwl_fw_runtime *fwrt);
-#else
+static inline int iwl_uefi_get_wrds_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_wgds_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_ppag_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *data)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_pwr_limit(struct iwl_fw_runtime *fwrt,
+ u64 *dflt_pwr_limit)
+{
+ *dflt_pwr_limit = 0;
+ return 0;
+}
+
+static inline int iwl_uefi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt,
+ enum iwl_dsm_funcs func, u32 *value)
+{
+ return -ENOENT;
+}
+
static inline
void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt)
{
@@ -103,6 +298,5 @@ int iwl_uefi_get_uats_table(struct iwl_trans *trans,
{
return 0;
}
-
-#endif
+#endif /* CONFIG_EFI */
#endif /* __iwl_fw_uefi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index ae6f1cd4d660..6aa4f7f9c708 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#ifndef __IWL_CONFIG_H__
#define __IWL_CONFIG_H__
@@ -12,6 +12,7 @@
#include <linux/ieee80211.h>
#include <linux/nl80211.h>
#include "iwl-csr.h"
+#include "iwl-drv.h"
enum iwl_device_family {
IWL_DEVICE_FAMILY_UNDEFINED,
@@ -418,6 +419,8 @@ struct iwl_cfg {
#define IWL_CFG_MAC_TYPE_BZ 0x46
#define IWL_CFG_MAC_TYPE_GL 0x47
#define IWL_CFG_MAC_TYPE_SC 0x48
+#define IWL_CFG_MAC_TYPE_SC2 0x49
+#define IWL_CFG_MAC_TYPE_SC2F 0x4A
#define IWL_CFG_RF_TYPE_TH 0x105
#define IWL_CFG_RF_TYPE_TH1 0x108
@@ -442,6 +445,9 @@ struct iwl_cfg {
#define IWL_CFG_NO_160 0x1
#define IWL_CFG_160 0x0
+#define IWL_CFG_NO_320 0x1
+#define IWL_CFG_320 0x0
+
#define IWL_CFG_CORES_BT 0x0
#define IWL_CFG_CORES_BT_GNSS 0x5
@@ -471,6 +477,15 @@ struct iwl_dev_info {
const char *name;
};
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+extern const struct iwl_dev_info iwl_dev_info_table[];
+extern const unsigned int iwl_dev_info_table_size;
+const struct iwl_dev_info *
+iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
+ u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb,
+ u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step);
+#endif
+
/*
* This list declares the config structures for all devices.
*/
@@ -526,7 +541,10 @@ extern const char iwl_ax221_name[];
extern const char iwl_ax231_name[];
extern const char iwl_ax411_name[];
extern const char iwl_bz_name[];
+extern const char iwl_mtp_name[];
extern const char iwl_sc_name[];
+extern const char iwl_sc2_name[];
+extern const char iwl_sc2f_name[];
#if IS_ENABLED(CONFIG_IWLDVM)
extern const struct iwl_cfg iwl5300_agn_cfg;
extern const struct iwl_cfg iwl5100_agn_cfg;
@@ -632,6 +650,8 @@ extern const struct iwl_cfg iwl_cfg_bz;
extern const struct iwl_cfg iwl_cfg_gl;
extern const struct iwl_cfg iwl_cfg_sc;
+extern const struct iwl_cfg iwl_cfg_sc2;
+extern const struct iwl_cfg iwl_cfg_sc2f;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 72075720969c..561d0c261123 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -64,21 +64,22 @@ dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
[IWL_DBG_TLV_TYPE_CONF_SET] = {.min_ver = 1, .max_ver = 1,},
};
-static int iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
- struct list_head *list)
+/* add a new TLV node, returning it so it can be modified */
+static struct iwl_ucode_tlv *iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
+ struct list_head *list)
{
u32 len = le32_to_cpu(tlv->length);
struct iwl_dbg_tlv_node *node;
- node = kzalloc(sizeof(*node) + len, GFP_KERNEL);
+ node = kzalloc(struct_size(node, tlv.data, len), GFP_KERNEL);
if (!node)
- return -ENOMEM;
+ return NULL;
memcpy(&node->tlv, tlv, sizeof(node->tlv));
memcpy(node->tlv.data, tlv->data, len);
list_add_tail(&node->list, list);
- return 0;
+ return &node->tlv;
}
static bool iwl_dbg_tlv_ver_support(const struct iwl_ucode_tlv *tlv)
@@ -103,10 +104,18 @@ static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans,
if (le32_to_cpu(tlv->length) != sizeof(*debug_info))
return -EINVAL;
+ /* we use this as a string, ensure input was NUL terminated */
+ if (strnlen(debug_info->debug_cfg_name,
+ sizeof(debug_info->debug_cfg_name)) ==
+ sizeof(debug_info->debug_cfg_name))
+ return -EINVAL;
+
IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n",
debug_info->debug_cfg_name);
- return iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list);
+ if (!iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list))
+ return -ENOMEM;
+ return 0;
}
static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
@@ -175,7 +184,9 @@ static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans,
return -EINVAL;
}
- return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list);
+ if (!iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list))
+ return -ENOMEM;
+ return 0;
}
static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
@@ -246,11 +257,9 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
const struct iwl_ucode_tlv *tlv)
{
const struct iwl_fw_ini_trigger_tlv *trig = (const void *)tlv->data;
- struct iwl_fw_ini_trigger_tlv *dup_trig;
u32 tp = le32_to_cpu(trig->time_point);
u32 rf = le32_to_cpu(trig->reset_fw);
- struct iwl_ucode_tlv *dup = NULL;
- int ret;
+ struct iwl_ucode_tlv *new_tlv;
if (le32_to_cpu(tlv->length) < sizeof(*trig))
return -EINVAL;
@@ -267,20 +276,18 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
"WRT: time point %u for trigger TLV with reset_fw %u\n",
tp, rf);
trans->dbg.last_tp_resetfw = 0xFF;
+
+ new_tlv = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
+ if (!new_tlv)
+ return -ENOMEM;
+
if (!le32_to_cpu(trig->occurrences)) {
- dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length),
- GFP_KERNEL);
- if (!dup)
- return -ENOMEM;
- dup_trig = (void *)dup->data;
- dup_trig->occurrences = cpu_to_le32(-1);
- tlv = dup;
- }
+ struct iwl_fw_ini_trigger_tlv *new_trig = (void *)new_tlv->data;
- ret = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
- kfree(dup);
+ new_trig->occurrences = cpu_to_le32(-1);
+ }
- return ret;
+ return 0;
}
static int iwl_dbg_tlv_config_set(struct iwl_trans *trans,
@@ -304,7 +311,9 @@ static int iwl_dbg_tlv_config_set(struct iwl_trans *trans,
return -EINVAL;
}
- return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].config_list);
+ if (!iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].config_list))
+ return -ENOMEM;
+ return 0;
}
static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
@@ -1148,7 +1157,9 @@ iwl_dbg_tlv_add_active_trigger(struct iwl_fw_runtime *fwrt,
if (!match) {
IWL_DEBUG_FW(fwrt, "WRT: Enabling trigger (time point %u)\n",
le32_to_cpu(trig->time_point));
- return iwl_dbg_tlv_add(trig_tlv, trig_list);
+ if (!iwl_dbg_tlv_add(trig_tlv, trig_list))
+ return -ENOMEM;
+ return 0;
}
return iwl_dbg_tlv_override_trig_node(fwrt, trig_tlv, match);
@@ -1234,7 +1245,7 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
}
}
- fwrt->trans->dbg.restart_required = FALSE;
+ fwrt->trans->dbg.restart_required = false;
IWL_DEBUG_FW(fwrt, "WRT: tp %d, reset_fw %d\n",
tp, dump_data.trig->reset_fw);
IWL_DEBUG_FW(fwrt,
@@ -1244,22 +1255,22 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
if (fwrt->trans->trans_cfg->device_family ==
IWL_DEVICE_FAMILY_9000) {
- fwrt->trans->dbg.restart_required = TRUE;
+ fwrt->trans->dbg.restart_required = true;
} else if (tp == IWL_FW_INI_TIME_POINT_FW_ASSERT &&
fwrt->trans->dbg.last_tp_resetfw ==
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
- fwrt->trans->dbg.restart_required = FALSE;
+ fwrt->trans->dbg.restart_required = false;
fwrt->trans->dbg.last_tp_resetfw = 0xFF;
IWL_DEBUG_FW(fwrt, "WRT: FW_ASSERT due to reset_fw_mode-no restart\n");
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW) {
IWL_DEBUG_FW(fwrt, "WRT: stop and reload firmware\n");
- fwrt->trans->dbg.restart_required = TRUE;
+ fwrt->trans->dbg.restart_required = true;
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
IWL_DEBUG_FW(fwrt,
"WRT: stop only and no reload firmware\n");
- fwrt->trans->dbg.restart_required = FALSE;
+ fwrt->trans->dbg.restart_required = false;
fwrt->trans->dbg.last_tp_resetfw =
le32_to_cpu(dump_data.trig->reset_fw);
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index abf8001bdac1..4696d73c8971 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -187,6 +187,7 @@ const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf)
case IWL_CFG_RF_TYPE_HR1:
case IWL_CFG_RF_TYPE_HR2:
rf = "hr";
+ rf_step = 'b';
break;
case IWL_CFG_RF_TYPE_GF:
rf = "gf";
@@ -1424,35 +1425,25 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
const struct iwl_op_mode_ops *ops = op->ops;
struct dentry *dbgfs_dir = NULL;
struct iwl_op_mode *op_mode = NULL;
- int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY;
/* also protects start/stop from racing against each other */
lockdep_assert_held(&iwlwifi_opmode_table_mtx);
- for (retry = 0; retry <= max_retry; retry++) {
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
- drv->dbgfs_op_mode = debugfs_create_dir(op->name,
- drv->dbgfs_drv);
- dbgfs_dir = drv->dbgfs_op_mode;
+ drv->dbgfs_op_mode = debugfs_create_dir(op->name,
+ drv->dbgfs_drv);
+ dbgfs_dir = drv->dbgfs_op_mode;
#endif
- op_mode = ops->start(drv->trans, drv->trans->cfg,
- &drv->fw, dbgfs_dir);
-
- if (op_mode)
- return op_mode;
-
- if (test_bit(STATUS_TRANS_DEAD, &drv->trans->status))
- break;
-
- IWL_ERR(drv, "retry init count %d\n", retry);
+ op_mode = ops->start(drv->trans, drv->trans->cfg,
+ &drv->fw, dbgfs_dir);
+ if (op_mode)
+ return op_mode;
#ifdef CONFIG_IWLWIFI_DEBUGFS
- debugfs_remove_recursive(drv->dbgfs_op_mode);
- drv->dbgfs_op_mode = NULL;
+ debugfs_remove_recursive(drv->dbgfs_op_mode);
+ drv->dbgfs_op_mode = NULL;
#endif
- }
return NULL;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
index 3d1a27ba35c6..1549ff429549 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
@@ -6,6 +6,7 @@
#ifndef __iwl_drv_h__
#define __iwl_drv_h__
#include <linux/export.h>
+#include <kunit/visibility.h>
/* for all modules */
#define DRV_NAME "iwlwifi"
@@ -89,8 +90,13 @@ void iwl_drv_stop(struct iwl_drv *drv);
#define IWL_EXPORT_SYMBOL(sym)
#endif
-/* max retry for init flow */
-#define IWL_MAX_INIT_RETRY 2
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+#define EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(sym) EXPORT_SYMBOL_IF_KUNIT(sym)
+#define VISIBLE_IF_IWLWIFI_KUNIT
+#else
+#define EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(sym)
+#define VISIBLE_IF_IWLWIFI_KUNIT static
+#endif
#define FW_NAME_PRE_BUFSIZE 64
struct iwl_trans;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
index 5aab64c63a13..2b290fab1ef2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
@@ -270,7 +270,7 @@ enum iwl_eeprom_enhanced_txpwr_flags {
};
/**
- * struct iwl_eeprom_enhanced_txpwr
+ * struct iwl_eeprom_enhanced_txpwr - enhanced regulatory TX power limits
* @flags: entry flags
* @channel: channel number
* @chain_a_max: chain a max power in 1/2 dBm
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index e0400ba2ab74..6ba374efaacb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2021, 2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021, 2023-2024 Intel Corporation
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fh_h__
@@ -570,18 +570,19 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
/**
* struct iwl_rb_status - reserve buffer status
* host memory mapped FH registers
- * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
- * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
- * @finished_rb_num [0:11] - Indicates the index of the current RB
+ * @closed_rb_num: [0:11] Indicates the index of the RB which was closed
+ * @closed_fr_num: [0:11] Indicates the index of the RX Frame which was closed
+ * @finished_rb_num: [0:11] Indicates the index of the current RB
* in which the last frame was written to
- * @finished_fr_num [0:11] - Indicates the index of the RX Frame
+ * @finished_fr_num: [0:11] Indicates the index of the RX Frame
* which was transferred
+ * @__spare: reserved
*/
struct iwl_rb_status {
__le16 closed_rb_num;
__le16 closed_fr_num;
__le16 finished_rb_num;
- __le16 finished_fr_nam;
+ __le16 finished_fr_num;
__le32 __spare;
} __packed;
@@ -651,15 +652,15 @@ struct iwl_tfd_tb {
*
* This structure contains dma address and length of transmission address
*
- * @tb_len length of the tx buffer
- * @addr 64 bits dma address
+ * @tb_len: length of the tx buffer
+ * @addr: 64 bits dma address
*/
struct iwl_tfh_tb {
__le16 tb_len;
__le64 addr;
} __packed;
-/**
+/*
* Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
* Both driver and device share these circular buffers, each of which must be
* contiguous 256 TFDs.
@@ -698,10 +699,11 @@ struct iwl_tfd {
/**
* struct iwl_tfh_tfd - Transmit Frame Descriptor (TFD)
- * @ num_tbs 0-4 number of active tbs
- * 5 -15 reserved
- * @ tbs[25] transmit frame buffer descriptors
- * @ __pad padding
+ * @num_tbs:
+ * 0-4 number of active tbs
+ * 5-15 reserved
+ * @tbs: transmit frame buffer descriptors
+ * @__pad: padding
*/
struct iwl_tfh_tfd {
__le16 num_tbs;
@@ -718,10 +720,12 @@ struct iwl_tfh_tfd {
* struct iwlagn_schedq_bc_tbl scheduler byte count table
* base physical address provided by SCD_DRAM_BASE_ADDR
* For devices up to 22000:
- * @tfd_offset 0-12 - tx command byte count
+ * @tfd_offset:
+ * For devices up to 22000:
+ * 0-12 - tx command byte count
* 12-16 - station index
- * For 22000:
- * @tfd_offset 0-12 - tx command byte count
+ * For 22000:
+ * 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 2f6774ec37b2..baa39a18087a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -156,6 +156,8 @@ static struct ieee80211_rate iwl_cfg80211_rates[] = {
* @NVM_CHANNEL_80MHZ: 80 MHz channel okay
* @NVM_CHANNEL_160MHZ: 160 MHz channel okay
* @NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?)
+ * @NVM_CHANNEL_VLP: client support connection to UHB VLP AP
+ * @NVM_CHANNEL_AFC: client support connection to UHB AFC AP
*/
enum iwl_nvm_channel_flags {
NVM_CHANNEL_VALID = BIT(0),
@@ -170,6 +172,8 @@ enum iwl_nvm_channel_flags {
NVM_CHANNEL_80MHZ = BIT(10),
NVM_CHANNEL_160MHZ = BIT(11),
NVM_CHANNEL_DC_HIGH = BIT(12),
+ NVM_CHANNEL_VLP = BIT(13),
+ NVM_CHANNEL_AFC = BIT(14),
};
/**
@@ -309,7 +313,7 @@ static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
/* Note: already can print up to 101 characters, 110 is the limit! */
IWL_DEBUG_DEV(dev, level,
- "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
chan, flags,
CHECK_AND_PRINT_I(VALID),
CHECK_AND_PRINT_I(IBSS),
@@ -322,7 +326,9 @@ static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
CHECK_AND_PRINT_I(40MHZ),
CHECK_AND_PRINT_I(80MHZ),
CHECK_AND_PRINT_I(160MHZ),
- CHECK_AND_PRINT_I(DC_HIGH));
+ CHECK_AND_PRINT_I(DC_HIGH),
+ CHECK_AND_PRINT_I(VLP),
+ CHECK_AND_PRINT_I(AFC));
#undef CHECK_AND_PRINT_I
}
@@ -366,6 +372,12 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, enum nl80211_band band,
(flags & IEEE80211_CHAN_NO_IR))
flags |= IEEE80211_CHAN_IR_CONCURRENT;
+ /* Set the AP type for the UHB case. */
+ if (!(nvm_flags & NVM_CHANNEL_VLP))
+ flags |= IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT;
+ if (!(nvm_flags & NVM_CHANNEL_AFC))
+ flags |= IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT;
+
return flags;
}
@@ -695,10 +707,11 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI,
.phy_cap_info[5] =
+ FIELD_PREP_CONST(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK,
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US) |
IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
- IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
- IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT,
+ IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP,
.phy_cap_info[6] =
IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK |
IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP,
@@ -732,6 +745,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
/*
* PPE thresholds for NSS = 2, and RU index bitmap set
* to 0xc.
+ * Note: just for stating what we want, not present in
+ * the transmitted data due to not including
+ * IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT.
*/
.eht_ppe_thres = {0xc1, 0x0e, 0xe0 }
},
@@ -744,7 +760,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
.mac_cap_info[0] =
IEEE80211_HE_MAC_CAP0_HTC_HE,
.mac_cap_info[1] =
- IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
.mac_cap_info[3] =
IEEE80211_HE_MAC_CAP3_OMI_CONTROL,
@@ -799,7 +814,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI,
.phy_cap_info[5] =
- IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT,
+ FIELD_PREP_CONST(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK,
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US),
},
/* For all MCS and bandwidth, set 2 NSS for both Tx and
@@ -827,6 +843,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
/*
* PPE thresholds for NSS = 2, and RU index bitmap set
* to 0xc.
+ * Note: just for stating what we want, not present in
+ * the transmitted data due to not including
+ * IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT.
*/
.eht_ppe_thres = {0xc1, 0x0e, 0xe0 }
},
@@ -890,8 +909,9 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
bool is_ap = iftype_data->types_mask & BIT(NL80211_IFTYPE_AP);
bool no_320;
- no_320 = !trans->trans_cfg->integrated &&
- trans->pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB;
+ no_320 = (!trans->trans_cfg->integrated &&
+ trans->pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB) ||
+ trans->reduced_cap_sku;
if (!data->sku_cap_11be_enable || iwlwifi_mod_params.disable_11be)
iftype_data->eht_cap.has_eht = false;
@@ -1056,6 +1076,26 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
iftype_data->he_cap.he_cap_elem.phy_cap_info[7] &=
~IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ;
}
+
+ if (trans->step_urm) {
+ iftype_data->eht_cap.eht_mcs_nss_supp.bw._320.rx_tx_mcs11_max_nss = 0;
+ iftype_data->eht_cap.eht_mcs_nss_supp.bw._320.rx_tx_mcs13_max_nss = 0;
+ }
+
+ if (trans->no_160)
+ iftype_data->he_cap.he_cap_elem.phy_cap_info[0] &=
+ ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+
+ if (trans->reduced_cap_sku) {
+ memset(&iftype_data->eht_cap.eht_mcs_nss_supp.bw._320, 0,
+ sizeof(iftype_data->eht_cap.eht_mcs_nss_supp.bw._320));
+ iftype_data->eht_cap.eht_mcs_nss_supp.bw._80.rx_tx_mcs13_max_nss = 0;
+ iftype_data->eht_cap.eht_mcs_nss_supp.bw._160.rx_tx_mcs13_max_nss = 0;
+ iftype_data->eht_cap.eht_cap_elem.phy_cap_info[8] &=
+ ~IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA;
+ iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &=
+ ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK;
+ }
}
static void iwl_init_he_hw_capab(struct iwl_trans *trans,
@@ -1572,7 +1612,8 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
int ch_idx, u16 nvm_flags,
struct iwl_reg_capa reg_capa,
- const struct iwl_cfg *cfg)
+ const struct iwl_cfg *cfg,
+ bool uats_enabled)
{
u32 flags = NL80211_RRF_NO_HT40;
@@ -1617,6 +1658,16 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
flags &= ~NL80211_RRF_NO_IR;
}
}
+
+ /* Set the AP type for the UHB case. */
+ if (uats_enabled) {
+ if (!(nvm_flags & NVM_CHANNEL_VLP))
+ flags |= NL80211_RRF_NO_6GHZ_VLP_CLIENT;
+
+ if (!(nvm_flags & NVM_CHANNEL_AFC))
+ flags |= NL80211_RRF_NO_6GHZ_AFC_CLIENT;
+ }
+
/*
* reg_capa is per regulatory domain so apply it for every channel
*/
@@ -1671,7 +1722,7 @@ static struct iwl_reg_capa iwl_get_reg_capa(u32 flags, u8 resp_ver)
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
- u16 geo_info, u32 cap, u8 resp_ver)
+ u16 geo_info, u32 cap, u8 resp_ver, bool uats_enabled)
{
int ch_idx;
u16 ch_flags;
@@ -1737,7 +1788,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
ch_flags, reg_capa,
- cfg);
+ cfg, uats_enabled);
/* we can't continue the same rule */
if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
@@ -2097,7 +2148,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
!!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
nvm->sku_cap_mimo_disabled =
!!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
- if (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM)
+ if (CSR_HW_RFID_TYPE(trans->hw_rf_id) >= IWL_CFG_RF_TYPE_FM)
nvm->sku_cap_11be_enable = true;
/* Initialize PHY sku data */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index 651ed25b683b..fd9c3bed9407 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -50,7 +50,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
- u16 geo_info, u32 cap, u8 resp_ver);
+ u16 geo_info, u32 cap, u8 resp_ver, bool uats_enabled);
/**
* struct iwl_nvm_section - describes an NVM section in memory.
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
index 3dc618a7c70f..1ca82f3e4ebf 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
@@ -68,9 +68,11 @@ struct iwl_cfg;
* Must be atomic and called with BH disabled.
* @queue_not_full: notifies that a HW queue is not full any more.
* Must be atomic and called with BH disabled.
- * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
+ * @hw_rf_kill: notifies of a change in the HW rf kill switch. True means that
* the radio is killed. Return %true if the device should be stopped by
* the transport immediately after the call. May sleep.
+ * Note that this must not return %true for newer devices using gen2 PCIe
+ * transport.
* @free_skb: allows the transport layer to free skbs that haven't been
* reclaimed by the op_mode. This can happen when the driver is freed and
* there are Tx packets pending in the transport layer.
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index dd32c287b983..a7d44df06eab 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -368,12 +368,19 @@ enum {
WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000,
};
-#define CNVI_AUX_MISC_CHIP 0xA200B0
+#define CNVI_AUX_MISC_CHIP 0xA200B0
+#define CNVI_AUX_MISC_CHIP_MAC_STEP(_val) (((_val) & 0xf000000) >> 24)
+#define CNVI_AUX_MISC_CHIP_PROD_TYPE(_val) ((_val) & 0xfff)
+#define CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_U 0x930
+
#define CNVR_AUX_MISC_CHIP 0xA2B800
#define CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM 0xA29890
#define CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR 0xA29938
#define CNVI_SCU_SEQ_DATA_DW9 0xA27488
+#define CNVI_PMU_STEP_FLOW 0xA2D588
+#define CNVI_PMU_STEP_FLOW_FORCE_URM BIT(2)
+
#define PREG_AUX_BUS_WPROT_0 0xA04CC0
/* device family 9000 WPROT register */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 5789a8735976..b93cef7b2330 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -519,6 +519,7 @@ struct iwl_pnvm_image {
* Must be atomic
* @reclaim: free packet until ssn. Returns a list of freed packets.
* Must be atomic
+ * @set_q_ptrs: set queue pointers internally, after D3 when HW state changed
* @txq_enable: setup a queue. To setup an AC queue, use the
* iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
* this one. The op_mode must not configure the HCMD queue. The scheduler
@@ -528,6 +529,8 @@ struct iwl_pnvm_image {
* hardware scheduler bug. May sleep.
* @txq_disable: de-configure a Tx queue to send AMPDUs
* Must be atomic
+ * @txq_alloc: Allocate a new TX queue, may sleep.
+ * @txq_free: Free a previously allocated TX queue.
* @txq_set_shared_mode: change Tx queue shared/unshared marking
* @wait_tx_queues_empty: wait until tx queues are empty. May sleep.
* @wait_txq_empty: wait until specific tx queue is empty. May sleep.
@@ -547,23 +550,27 @@ struct iwl_pnvm_image {
* the op_mode. May be called several times before start_fw, can't be
* called after that.
* @set_pmi: set the power pmi state
+ * @sw_reset: trigger software reset of the NIC
* @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
* Sleeping is not allowed between grab_nic_access and
* release_nic_access.
* @release_nic_access: let the NIC go to sleep. The "flags" parameter
* must be the same one that was sent before to the grab_nic_access.
- * @set_bits_mask - set SRAM register according to value and mask.
+ * @set_bits_mask: set SRAM register according to value and mask.
* @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
* TX'ed commands and similar. The buffer will be vfree'd by the caller.
* Note that the transport must fill in the proper file headers.
* @debugfs_cleanup: used in the driver unload flow to make a proper cleanup
* of the trans debugfs
+ * @sync_nmi: trigger a firmware NMI and wait for it to complete
* @load_pnvm: save the pnvm data in DRAM
* @set_pnvm: set the pnvm data in the prph scratch buffer, inside the
* context info.
* @load_reduce_power: copy reduce power table to the corresponding DRAM memory
* @set_reduce_power: set reduce power table addresses in the sratch buffer
* @interrupts: disable/enable interrupts to transport
+ * @imr_dma_data: set up IMR DMA
+ * @rxq_dma_data: retrieve RX queue DMA data, see @struct iwl_trans_rxq_dma_data
*/
struct iwl_trans_ops {
@@ -775,7 +782,7 @@ struct iwl_self_init_dram {
* @imr_size: imr dram size received from fw
* @sram_addr: sram address from debug tlv
* @sram_size: sram size from debug tlv
- * @imr2sram_remainbyte`: size remained after each dma transfer
+ * @imr2sram_remainbyte: size remained after each dma transfer
* @imr_curr_addr: current dst address used during dma transfer
* @imr_base_addr: imr address received from fw
*/
@@ -822,12 +829,16 @@ struct iwl_pc_data {
* @fw_mon: DRAM buffer for firmware monitor
* @hw_error: equals true if hw error interrupt was received from the FW
* @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location
+ * @unsupported_region_msk: unsupported regions out of active_regions
* @active_regions: active regions
* @debug_info_tlv_list: list of debug info TLVs
* @time_point: array of debug time points
* @periodic_trig_list: periodic triggers list
* @domains_bitmap: bitmap of active domains other than &IWL_FW_INI_DOMAIN_ALWAYS_ON
* @ucode_preset: preset based on ucode
+ * @restart_required: indicates debug restart is required
+ * @last_tp_resetfw: last handling of reset during debug timepoint
+ * @imr_data: IMR debug data allocation
* @dump_file_name_ext: dump file name extension
* @dump_file_name_ext_valid: dump file name extension if valid or not
* @num_pc: number of program counter for cpu
@@ -930,6 +941,7 @@ struct iwl_pcie_first_tb_buf {
* @wd_timeout: queue watchdog timeout (jiffies) - per queue
* @frozen: tx stuck queue timer is frozen
* @frozen_expiry_remainder: remember how long until the timer fires
+ * @block: queue is blocked
* @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
* @write_ptr: 1-st empty entry (index) host_w
* @read_ptr: last used entry (index) host_r
@@ -938,6 +950,8 @@ struct iwl_pcie_first_tb_buf {
* @id: queue id
* @low_mark: low watermark, resume queue if free space more than this
* @high_mark: high watermark, stop queue if free space less than this
+ * @overflow_q: overflow queue for handling frames that didn't fit on HW queue
+ * @overflow_tx: need to transmit from overflow
*
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
* descriptors) and required locking structures.
@@ -990,10 +1004,19 @@ struct iwl_txq {
* @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @page_offs: offset from skb->cb to mac header page pointer
* @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
- * @queue_used - bit mask of used queues
- * @queue_stopped - bit mask of stopped queues
+ * @queue_used: bit mask of used queues
+ * @queue_stopped: bit mask of stopped queues
+ * @txq: array of TXQ data structures representing the TXQs
* @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
* @queue_alloc_cmd_ver: queue allocation command version
+ * @bc_pool: bytecount DMA allocations pool
+ * @bc_tbl_size: bytecount table size
+ * @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO
+ * (and similar usage)
+ * @tfd: TFD data
+ * @tfd.max_tbs: max number of buffers per TFD
+ * @tfd.size: TFD size
+ * @tfd.addr_size: TFD/TB address size
*/
struct iwl_trans_txqs {
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
@@ -1026,27 +1049,35 @@ struct iwl_trans_txqs {
/**
* struct iwl_trans - transport common data
*
- * @csme_own - true if we couldn't get ownership on the device
- * @ops - pointer to iwl_trans_ops
- * @op_mode - pointer to the op_mode
+ * @csme_own: true if we couldn't get ownership on the device
+ * @ops: pointer to iwl_trans_ops
+ * @op_mode: pointer to the op_mode
* @trans_cfg: the trans-specific configuration part
- * @cfg - pointer to the configuration
- * @drv - pointer to iwl_drv
+ * @cfg: pointer to the configuration
+ * @drv: pointer to iwl_drv
+ * @state: current device state
* @status: a bit-mask of transport status flags
- * @dev - pointer to struct device * that represents the device
+ * @dev: pointer to struct device * that represents the device
* @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
* 0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
- * @hw_rf_id a u32 with the device RF ID
- * @hw_crf_id a u32 with the device CRF ID
- * @hw_wfpm_id a u32 with the device wfpm ID
+ * @hw_rf_id: a u32 with the device RF ID
+ * @hw_cnv_id: a u32 with the device CNV ID
+ * @hw_crf_id: a u32 with the device CRF ID
+ * @hw_wfpm_id: a u32 with the device wfpm ID
* @hw_id: a u32 with the ID of the device / sub-device.
* Set during transport allocation.
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
+ * @sku_id: the SKU identifier (for PNVM matching)
+ * @pnvm_loaded: indicates PNVM was loaded
+ * @hw_rev: the revision data of the HW
* @hw_rev_step: The mac step of the HW
* @pm_support: set to true in start_hw if link pm is supported
* @ltr_enabled: set to true if the LTR is enabled
* @fail_to_parse_pnvm_image: set to true if pnvm parsing failed
+ * @reduce_power_loaded: indicates reduced power section was loaded
* @failed_to_load_reduce_power_image: set to true if pnvm loading failed
+ * @command_groups: pointer to command group name list array
+ * @command_groups_size: array size of @command_groups
* @wide_cmd_header: true when ucode supports wide command header format
* @wait_command_queue: wait queue for sync commands
* @num_rx_queues: number of RX queues allocated by the transport;
@@ -1055,19 +1086,29 @@ struct iwl_trans_txqs {
* @iml: a pointer to the image loader itself
* @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
* The user should use iwl_trans_{alloc,free}_tx_cmd.
+ * @dev_cmd_pool_name: name for the TX command allocation pool
+ * @dbgfs_dir: iwlwifi debugfs base dir for this device
+ * @sync_cmd_lockdep_map: lockdep map for checking sync commands
* @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
* starting the firmware, used for tracing
* @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
* start of the 802.11 header in the @rx_mpdu_cmd
+ * @dbg: additional debug data, see &struct iwl_trans_debug
+ * @init_dram: FW initialization DMA data
* @system_pm_mode: the system-wide power management mode in use.
* This mode is set dynamically, depending on the WoWLAN values
* configured from the userspace at runtime.
+ * @name: the device name
* @txqs: transport tx queues data.
* @mbx_addr_0_step: step address data 0
* @mbx_addr_1_step: step address data 1
* @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*),
* only valid for discrete (not integrated) NICs
* @invalid_tx_cmd: invalid TX command buffer
+ * @reduced_cap_sku: reduced capability supported SKU
+ * @no_160: device not supporting 160 MHz
+ * @step_urm: STEP is in URM, no support for MCS>9 in 320 MHz
+ * @trans_specific: data for the specific transport this is allocated for/with
*/
struct iwl_trans {
bool csme_own;
@@ -1090,6 +1131,8 @@ struct iwl_trans {
u32 hw_id;
char hw_id_str[52];
u32 sku_id[3];
+ bool reduced_cap_sku;
+ u8 no_160:1, step_urm:1;
u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 9fe1761691ec..535edb51d1c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -181,6 +181,9 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
struct iwl_mvm_sta *mvmsta;
u32 value;
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return 0;
+
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
if (!mvmsta)
return 0;
@@ -252,6 +255,124 @@ static void iwl_mvm_bt_coex_tcm_based_ci(struct iwl_mvm *mvm,
swap(data->primary, data->secondary);
}
+static void iwl_mvm_bt_coex_enable_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, bool enable)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int link_id;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
+ return;
+
+ /* Done already */
+ if (mvmvif->bt_coex_esr_disabled == !enable)
+ return;
+
+ mvmvif->bt_coex_esr_disabled = !enable;
+
+ /* Nothing to do */
+ if (mvmvif->esr_active == enable)
+ return;
+
+ if (enable) {
+ /* Try to re-enable eSR*/
+ iwl_mvm_mld_select_links(mvm, vif, false);
+ return;
+ }
+
+ /*
+ * Find the primary link, as we want to switch to it and drop the
+ * secondary one.
+ */
+ link_id = iwl_mvm_mld_get_primary_link(mvm, vif, vif->active_links);
+ WARN_ON(link_id < 0);
+
+ ieee80211_set_active_links_async(vif,
+ vif->active_links & BIT(link_id));
+}
+
+/*
+ * This function receives the LB link id and checks if eSR should be
+ * enabled or disabled (due to BT coex)
+ */
+bool
+iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id, int primary_link)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
+ bool have_wifi_loss_rate =
+ iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ BT_PROFILE_NOTIFICATION, 0) > 4;
+ s8 link_rssi = 0;
+ u8 wifi_loss_rate;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvm->last_bt_notif.wifi_loss_low_rssi == BT_OFF)
+ return true;
+
+ /* If LB link is the primary one we should always disable eSR */
+ if (link_id == primary_link)
+ return false;
+
+ /* The feature is not supported */
+ if (!have_wifi_loss_rate)
+ return true;
+
+ /*
+ * We might not have a link_info when checking whether we can
+ * (re)enable eSR - the LB link might not exist yet
+ */
+ if (link_info)
+ link_rssi = (s8)link_info->beacon_stats.avg_signal;
+
+ /*
+ * In case we don't know the RSSI - take the lower wifi loss,
+ * so we will more likely enter eSR, and if RSSI is low -
+ * we will get an update on this and exit eSR.
+ */
+ if (!link_rssi)
+ wifi_loss_rate = mvm->last_bt_notif.wifi_loss_mid_high_rssi;
+
+ else if (!mvmvif->bt_coex_esr_disabled)
+ /* RSSI needs to get really low to disable eSR... */
+ wifi_loss_rate =
+ link_rssi <= -IWL_MVM_BT_COEX_DISABLE_ESR_THRESH ?
+ mvm->last_bt_notif.wifi_loss_low_rssi :
+ mvm->last_bt_notif.wifi_loss_mid_high_rssi;
+ else
+ /* ...And really high before we enable it back */
+ wifi_loss_rate =
+ link_rssi <= -IWL_MVM_BT_COEX_ENABLE_ESR_THRESH ?
+ mvm->last_bt_notif.wifi_loss_low_rssi :
+ mvm->last_bt_notif.wifi_loss_mid_high_rssi;
+
+ return wifi_loss_rate <= IWL_MVM_BT_COEX_WIFI_LOSS_THRESH;
+}
+
+void iwl_mvm_bt_coex_update_vif_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id)
+{
+ unsigned long usable_links = ieee80211_vif_usable_links(vif);
+ int primary_link = iwl_mvm_mld_get_primary_link(mvm, vif,
+ usable_links);
+ bool enable;
+
+ /* Not assoc, not MLD vif or only one usable link */
+ if (primary_link < 0)
+ return;
+
+ enable = iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link_id,
+ primary_link);
+
+ iwl_mvm_bt_coex_enable_esr(mvm, vif, enable);
+}
+
static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_bt_iterator_data *data,
@@ -297,6 +418,8 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
return;
}
+ iwl_mvm_bt_coex_update_vif_esr(mvm, vif, link_id);
+
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2))
min_ag_for_static_smps = BT_VERY_HIGH_TRAFFIC;
else
@@ -432,6 +555,10 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
return;
}
+ /* When BT is off this will be 0 */
+ if (data->notif->wifi_loss_low_rssi == BT_OFF)
+ iwl_mvm_bt_coex_enable_esr(mvm, vif, true);
+
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++)
iwl_mvm_bt_notif_per_link(mvm, vif, data, link_id);
}
@@ -454,6 +581,11 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_bt_notif_iterator, &data);
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ rcu_read_unlock();
+ return;
+ }
+
iwl_mvm_bt_coex_tcm_based_ci(mvm, &data);
if (data.primary) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index c832068b5718..f5122c4678a1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -11,6 +11,9 @@
#include "fw-api.h"
#define IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM 20
+#define IWL_MVM_BT_COEX_DISABLE_ESR_THRESH 69
+#define IWL_MVM_BT_COEX_ENABLE_ESR_THRESH 63
+#define IWL_MVM_BT_COEX_WIFI_LOSS_THRESH 0
#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 05b64176859e..553c6fffc7c6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -450,9 +450,9 @@ static void iwl_mvm_wowlan_get_rsc_v5_data(struct ieee80211_hw *hw,
}
static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_vif_link_info *mvm_link)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_TSC_RSC_PARAM,
IWL_FW_CMD_VER_UNKNOWN);
int ret;
@@ -461,16 +461,14 @@ static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
struct wowlan_key_rsc_v5_data data = {};
int i;
- data.rsc = kmalloc(sizeof(*data.rsc), GFP_KERNEL);
+ data.rsc = kzalloc(sizeof(*data.rsc), GFP_KERNEL);
if (!data.rsc)
return -ENOMEM;
- memset(data.rsc, 0xff, sizeof(*data.rsc));
-
for (i = 0; i < ARRAY_SIZE(data.rsc->mcast_key_id_map); i++)
data.rsc->mcast_key_id_map[i] =
IWL_MCAST_KEY_MAP_INVALID;
- data.rsc->sta_id = cpu_to_le32(mvmvif->deflink.ap_sta_id);
+ data.rsc->sta_id = cpu_to_le32(mvm_link->ap_sta_id);
ieee80211_iter_keys(mvm->hw, vif,
iwl_mvm_wowlan_get_rsc_v5_data,
@@ -494,7 +492,7 @@ static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
if (ver == 4) {
size = sizeof(*data.rsc_tsc);
data.rsc_tsc->sta_id =
- cpu_to_le32(mvmvif->deflink.ap_sta_id);
+ cpu_to_le32(mvm_link->ap_sta_id);
} else {
/* ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN */
size = sizeof(data.rsc_tsc->params);
@@ -668,10 +666,9 @@ static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm,
}
static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
+ struct iwl_mvm_vif_link_info *mvm_link,
struct cfg80211_wowlan *wowlan)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_wowlan_patterns_cmd *pattern_cmd;
struct iwl_host_cmd cmd = {
.id = WOWLAN_PATTERNS,
@@ -693,7 +690,7 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
pattern_cmd->n_patterns = wowlan->n_patterns;
if (ver >= 3)
- pattern_cmd->sta_id = mvmvif->deflink.ap_sta_id;
+ pattern_cmd->sta_id = mvm_link->ap_sta_id;
for (i = 0; i < wowlan->n_patterns; i++) {
int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
@@ -723,14 +720,15 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_chanctx_conf *ctx;
u8 chains_static, chains_dynamic;
- struct cfg80211_chan_def chandef;
+ struct cfg80211_chan_def chandef, ap_def;
int ret, i;
struct iwl_binding_cmd_v1 binding_cmd = {};
struct iwl_time_quota_cmd quota_cmd = {};
struct iwl_time_quota_data *quota;
u32 status;
- if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm)))
+ if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm) ||
+ ieee80211_vif_is_mld(vif)))
return -EINVAL;
/* add back the PHY */
@@ -744,12 +742,13 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EINVAL;
}
chandef = ctx->def;
+ ap_def = ctx->ap;
chains_static = ctx->rx_chains_static;
chains_dynamic = ctx->rx_chains_dynamic;
rcu_read_unlock();
ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->deflink.phy_ctxt, &chandef,
- chains_static, chains_dynamic);
+ &ap_def, chains_static, chains_dynamic);
if (ret)
return ret;
@@ -927,6 +926,9 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
+ if (ap_sta->mfp)
+ wowlan_config_cmd->flags |= IS_11W_ASSOC;
+
if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) < 6) {
/* Query the last used seqno and set it */
int ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
@@ -987,7 +989,8 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
}
static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_vif_link_info *mvm_link)
{
bool unified = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
@@ -1016,7 +1019,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
return -EIO;
}
- ret = iwl_mvm_wowlan_config_rsc_tsc(mvm, vif);
+ ret = iwl_mvm_wowlan_config_rsc_tsc(mvm, vif, mvm_link);
if (ret)
return ret;
@@ -1030,7 +1033,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
if (ver == 2) {
size = sizeof(tkip_data.tkip);
tkip_data.tkip.sta_id =
- cpu_to_le32(mvmvif->deflink.ap_sta_id);
+ cpu_to_le32(mvm_link->ap_sta_id);
} else if (ver == 1 || ver == IWL_FW_CMD_VER_UNKNOWN) {
size = sizeof(struct iwl_wowlan_tkip_params_cmd_ver_1);
} else {
@@ -1079,7 +1082,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
kek_kck_cmd.kek_len = cpu_to_le16(mvmvif->rekey_data.kek_len);
kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
kek_kck_cmd.akm = cpu_to_le32(mvmvif->rekey_data.akm);
- kek_kck_cmd.sta_id = cpu_to_le32(mvmvif->deflink.ap_sta_id);
+ kek_kck_cmd.sta_id = cpu_to_le32(mvm_link->ap_sta_id);
if (cmd_ver == 4) {
cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v4);
@@ -1112,6 +1115,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
struct cfg80211_wowlan *wowlan,
struct iwl_wowlan_config_cmd *wowlan_config_cmd,
struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
+ struct iwl_mvm_vif_link_info *mvm_link,
struct ieee80211_sta *ap_sta)
{
int ret;
@@ -1130,7 +1134,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
return ret;
}
- ret = iwl_mvm_wowlan_config_key_params(mvm, vif);
+ ret = iwl_mvm_wowlan_config_key_params(mvm, vif, mvm_link);
if (ret)
return ret;
@@ -1142,7 +1146,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE))
- ret = iwl_mvm_send_patterns(mvm, vif, wowlan);
+ ret = iwl_mvm_send_patterns(mvm, mvm_link, wowlan);
else
ret = iwl_mvm_send_patterns_v1(mvm, wowlan);
if (ret)
@@ -1223,6 +1227,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
struct ieee80211_vif *vif = NULL;
struct iwl_mvm_vif *mvmvif = NULL;
struct ieee80211_sta *ap_sta = NULL;
+ struct iwl_mvm_vif_link_info *mvm_link;
struct iwl_d3_manager_config d3_cfg_cmd_data = {
/*
* Program the minimum sleep time to 10 seconds, as many
@@ -1237,7 +1242,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
.data[0] = &d3_cfg_cmd_data,
.len[0] = sizeof(d3_cfg_cmd_data),
};
- int ret;
+ int ret, primary_link;
int len __maybe_unused;
bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
@@ -1251,21 +1256,46 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
return -EINVAL;
}
+ vif = iwl_mvm_get_bss_vif(mvm);
+ if (IS_ERR_OR_NULL(vif))
+ return 1;
+
+ if (ieee80211_vif_is_mld(vif) && vif->cfg.assoc) {
+ /*
+ * Select the 'best' link. May need to revisit, it seems
+ * better to not optimize for throughput but rather range,
+ * reliability and power here - and select 2.4 GHz ...
+ */
+ primary_link =
+ iwl_mvm_mld_get_primary_link(mvm, vif,
+ vif->active_links);
+
+ if (WARN_ONCE(primary_link < 0, "no primary link in 0x%x\n",
+ vif->active_links))
+ primary_link = __ffs(vif->active_links);
+
+ ret = ieee80211_set_active_links(vif, BIT(primary_link));
+ if (ret)
+ return ret;
+ } else {
+ primary_link = 0;
+ }
+
mutex_lock(&mvm->mutex);
set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
synchronize_net();
- vif = iwl_mvm_get_bss_vif(mvm);
- if (IS_ERR_OR_NULL(vif)) {
- ret = 1;
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mvm_link = mvmvif->link[primary_link];
+ if (WARN_ON_ONCE(!mvm_link)) {
+ ret = -EINVAL;
goto out_noreset;
}
- mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- if (mvmvif->deflink.ap_sta_id == IWL_MVM_INVALID_STA) {
+ if (mvm_link->ap_sta_id == IWL_MVM_INVALID_STA) {
/* if we're not associated, this must be netdetect */
if (!wowlan->nd_config) {
ret = 1;
@@ -1283,10 +1313,10 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
.offloading_tid = 0,
};
- wowlan_config_cmd.sta_id = mvmvif->deflink.ap_sta_id;
+ wowlan_config_cmd.sta_id = mvm_link->ap_sta_id;
ap_sta = rcu_dereference_protected(
- mvm->fw_id_to_mac_id[mvmvif->deflink.ap_sta_id],
+ mvm->fw_id_to_mac_id[mvm_link->ap_sta_id],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(ap_sta)) {
ret = -EINVAL;
@@ -1303,7 +1333,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (ret)
goto out_noreset;
ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
- vif, mvmvif, ap_sta);
+ vif, mvmvif, mvm_link, ap_sta);
if (ret)
goto out;
@@ -1469,7 +1499,8 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
status->pattern_number;
if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
- IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH |
+ IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE))
wakeup.disconnect = true;
if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)
@@ -1493,6 +1524,9 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET)
wakeup.tcp_match = true;
+ if (reasons & IWL_WAKEUP_BY_11W_UNPROTECTED_DEAUTH_OR_DISASSOC)
+ wakeup.unprot_deauth_disassoc = true;
+
if (status->wake_packet) {
int pktsize = status->wake_packet_bufsize;
int pktlen = status->wake_packet_length;
@@ -1846,9 +1880,12 @@ iwl_mvm_d3_set_igtk_bigtk_ipn(const struct iwl_multicast_key_data *key,
memcpy(seq->aes_gmac.pn, key->ipn, sizeof(seq->aes_gmac.pn));
break;
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
BUILD_BUG_ON(sizeof(seq->aes_cmac.pn) != sizeof(key->ipn));
memcpy(seq->aes_cmac.pn, key->ipn, sizeof(seq->aes_cmac.pn));
break;
+ default:
+ WARN_ON(1);
}
}
@@ -1938,7 +1975,7 @@ static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
struct ieee80211_vif *vif,
struct iwl_mvm *mvm, u32 gtk_cipher)
{
- int i;
+ int i, j;
struct ieee80211_key_conf *key;
struct {
struct ieee80211_key_conf conf;
@@ -1946,6 +1983,7 @@ static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
} conf = {
.conf.cipher = gtk_cipher,
};
+ int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP);
BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP);
@@ -1979,10 +2017,18 @@ static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
memcpy(conf.conf.key, status->gtk[i].key,
sizeof(status->gtk[i].key));
- key = ieee80211_gtk_rekey_add(vif, &conf.conf);
+ key = ieee80211_gtk_rekey_add(vif, &conf.conf, link_id);
if (IS_ERR(key))
return false;
- iwl_mvm_set_key_rx_seq_idx(key, status, i);
+
+ for (j = 0; j < ARRAY_SIZE(status->gtk_seq); j++) {
+ if (!status->gtk_seq[j].valid ||
+ status->gtk_seq[j].key_id != key->keyidx)
+ continue;
+ iwl_mvm_set_key_rx_seq_idx(key, status, j);
+ break;
+ }
+ WARN_ON(j == ARRAY_SIZE(status->gtk_seq));
}
return true;
@@ -2002,6 +2048,7 @@ iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status,
.conf.keyidx = key_data->id,
};
struct ieee80211_key_seq seq;
+ int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
if (!key_data->len)
return true;
@@ -2027,17 +2074,17 @@ iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status,
BUILD_BUG_ON(sizeof(conf.key) < sizeof(key_data->key));
memcpy(conf.conf.key, key_data->key, conf.conf.keylen);
- key_config = ieee80211_gtk_rekey_add(vif, &conf.conf);
+ key_config = ieee80211_gtk_rekey_add(vif, &conf.conf, link_id);
if (IS_ERR(key_config))
return false;
ieee80211_set_key_rx_seq(key_config, 0, &seq);
if (key_config->keyidx == 4 || key_config->keyidx == 5) {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
- struct iwl_mvm_vif_link_info *mvm_link =
- mvmvif->link[link_id];
+ struct iwl_mvm_vif_link_info *mvm_link;
+ link_id = link_id < 0 ? 0 : link_id;
+ mvm_link = mvmvif->link[link_id];
mvm_link->igtk = key_config;
}
@@ -2072,7 +2119,6 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
.status = status,
};
int i;
-
u32 disconnection_reasons =
IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
@@ -2080,9 +2126,6 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
if (!status || !vif->bss_conf.bssid)
return false;
- if (status->wakeup_reasons & disconnection_reasons)
- return false;
-
if (iwl_mvm_lookup_wowlan_status_ver(mvm) > 6 ||
iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
WOWLAN_INFO_NOTIFICATION,
@@ -2143,6 +2186,9 @@ out:
mvmvif->seqno = status->non_qos_seq_ctr + 0x10;
}
+ if (status->wakeup_reasons & disconnection_reasons)
+ return false;
+
return true;
}
@@ -2200,7 +2246,10 @@ static void iwl_mvm_convert_gtk_v3(struct iwl_wowlan_status_data *status,
static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
struct iwl_wowlan_igtk_status *data)
{
+ int i;
+
BUILD_BUG_ON(sizeof(status->igtk.key) < sizeof(data->key));
+ BUILD_BUG_ON(sizeof(status->igtk.ipn) != sizeof(data->ipn));
if (!data->key_len)
return;
@@ -2212,7 +2261,10 @@ static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
+ WOWLAN_IGTK_MIN_INDEX;
memcpy(status->igtk.key, data->key, sizeof(data->key));
- memcpy(status->igtk.ipn, data->ipn, sizeof(data->ipn));
+
+ /* mac80211 expects big endian for memcmp() to work, convert */
+ for (i = 0; i < sizeof(data->ipn); i++)
+ status->igtk.ipn[i] = data->ipn[sizeof(data->ipn) - i - 1];
}
static void iwl_mvm_convert_bigtk(struct iwl_wowlan_status_data *status,
@@ -2846,6 +2898,9 @@ iwl_mvm_choose_query_wakeup_reasons(struct iwl_mvm *mvm,
u8 sta_id = mvm->net_detect ? IWL_MVM_INVALID_STA :
mvmvif->deflink.ap_sta_id;
+ /* bug - FW with MLO has status notification */
+ WARN_ON(ieee80211_vif_is_mld(vif));
+
d3_data->status = iwl_mvm_send_wowlan_get_status(mvm, sta_id);
}
@@ -2954,7 +3009,7 @@ static void iwl_mvm_nd_match_info_handler(struct iwl_mvm *mvm,
if (results->matched_profiles) {
memcpy(results->matches, notif->matches, matches_len);
- d3_data->nd_results_valid = TRUE;
+ d3_data->nd_results_valid = true;
}
/* no scan should be active at this point */
@@ -3352,6 +3407,7 @@ static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
+ unsigned long end = jiffies + 60 * HZ;
u32 pme_asserted;
while (true) {
@@ -3365,6 +3421,12 @@ static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
if (msleep_interruptible(100))
break;
+
+ if (time_is_before_jiffies(end)) {
+ IWL_ERR(mvm,
+ "ending pseudo-D3 with timeout after ~60 seconds\n");
+ return -ETIMEDOUT;
+ }
}
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index e8b881596baf..51b01f7528be 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -381,9 +381,9 @@ static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
mutex_lock(&mvm->mutex);
iwl_dbgfs_update_bf(vif, param, value);
if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
- ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
else
- ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@@ -578,34 +578,47 @@ static ssize_t iwl_dbgfs_rx_phyinfo_write(struct ieee80211_vif *vif, char *buf,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
- struct ieee80211_chanctx_conf *chanctx_conf;
- struct iwl_mvm_phy_ctxt *phy_ctxt;
+ struct ieee80211_bss_conf *link_conf;
u16 value;
- int ret;
+ int link_id, ret = -EINVAL;
ret = kstrtou16(buf, 0, &value);
if (ret)
return ret;
mutex_lock(&mvm->mutex);
- rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf);
- /* make sure the channel context is assigned */
- if (!chanctx_conf) {
+ mvm->dbgfs_rx_phyinfo = value;
+
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct cfg80211_chan_def min_def, ap_def;
+ struct iwl_mvm_phy_ctxt *phy_ctxt;
+ u8 chains_static, chains_dynamic;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(link_conf->chanctx_conf);
+ if (!chanctx_conf) {
+ rcu_read_unlock();
+ continue;
+ }
+ /* A command can't be sent with RCU lock held, so copy
+ * everything here and use it after unlocking
+ */
+ min_def = chanctx_conf->min_def;
+ ap_def = chanctx_conf->ap;
+ chains_static = chanctx_conf->rx_chains_static;
+ chains_dynamic = chanctx_conf->rx_chains_dynamic;
rcu_read_unlock();
- mutex_unlock(&mvm->mutex);
- return -EINVAL;
- }
- phy_ctxt = &mvm->phy_ctxts[*(u16 *)chanctx_conf->drv_priv];
- rcu_read_unlock();
+ phy_ctxt = mvmvif->link[link_id]->phy_ctxt;
+ if (!phy_ctxt)
+ continue;
- mvm->dbgfs_rx_phyinfo = value;
+ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &min_def, &ap_def,
+ chains_static, chains_dynamic);
+ }
- ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chanctx_conf->min_def,
- chanctx_conf->rx_chains_static,
- chanctx_conf->rx_chains_dynamic);
mutex_unlock(&mvm->mutex);
return ret ?: count;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index edc8204f7c0e..79f4ac8cbc72 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -391,9 +391,7 @@ static ssize_t iwl_dbgfs_wifi_6e_enable_read(struct file *file,
char buf[12];
u32 value;
- err = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_ENABLE_6E,
- &iwl_guid, &value);
+ err = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENABLE_6E, &value);
if (err)
return err;
@@ -877,14 +875,14 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file,
le16_to_cpu(rsp->curr_mcc));
pos += scnprintf(pos, endpos - pos, "Block list entries:");
- for (i = 0; i < APCI_WTAS_BLACK_LIST_MAX; i++)
+ for (i = 0; i < IWL_WTAS_BLACK_LIST_MAX; i++)
pos += scnprintf(pos, endpos - pos, " 0x%x",
le16_to_cpu(rsp->block_list[i]));
pos += scnprintf(pos, endpos - pos, "\nOEM name: %s\n",
- dmi_get_system_info(DMI_SYS_VENDOR));
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
pos += scnprintf(pos, endpos - pos, "\tVendor In Approved List: %s\n",
- iwl_mvm_is_vendor_in_approved_list() ? "YES" : "NO");
+ iwl_is_tas_approved() ? "YES" : "NO");
pos += scnprintf(pos, endpos - pos,
"\tDo TAS Support Dual Radio?: %s\n",
rsp->in_dual_radio ? "TRUE" : "FALSE");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 233ae81884a0..4863a3c74640 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2023 Intel Corporation
*/
#include <linux/etherdevice.h>
#include <linux/math64.h>
@@ -821,9 +821,10 @@ iwl_mvm_ftm_put_target_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* If secure LTF is turned off, replace the flag with PMF only
*/
flags = le32_to_cpu(target->initiator_ap_flags);
- if ((flags & IWL_INITIATOR_AP_FLAGS_SECURED) &&
- !IWL_MVM_FTM_INITIATOR_SECURE_LTF) {
- flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
+ if (flags & IWL_INITIATOR_AP_FLAGS_SECURED) {
+ if (!IWL_MVM_FTM_INITIATOR_SECURE_LTF)
+ flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
+
flags |= IWL_INITIATOR_AP_FLAGS_PMF;
target->initiator_ap_flags = cpu_to_le32(flags);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
index 8f10590f9cdd..8e760300a1ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
@@ -12,6 +12,9 @@ struct iwl_mvm_pasn_sta {
struct list_head list;
struct iwl_mvm_int_sta int_sta;
u8 addr[ETH_ALEN];
+
+ /* must be last as it followed by buffer holding the key */
+ struct ieee80211_key_conf keyconf;
};
struct iwl_mvm_pasn_hltk_data {
@@ -303,6 +306,10 @@ static void iwl_mvm_resp_del_pasn_sta(struct iwl_mvm *mvm,
{
list_del(&sta->list);
+ if (sta->keyconf.keylen)
+ iwl_mvm_sec_key_del_pasn(mvm, vif, BIT(sta->int_sta.sta_id),
+ &sta->keyconf);
+
if (iwl_mvm_has_mld_api(mvm->fw))
iwl_mvm_mld_rm_sta_id(mvm, sta->int_sta.sta_id);
else
@@ -342,6 +349,12 @@ int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
}
if (hltk && hltk_len) {
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT)) {
+ IWL_ERR(mvm, "No support for secure LTF measurement\n");
+ return -EINVAL;
+ }
+
hltk_data.cipher = iwl_mvm_cipher_to_location_cipher(cipher);
if (hltk_data.cipher == IWL_LOCATION_CIPHER_INVALID) {
IWL_ERR(mvm, "invalid cipher: %u\n", cipher);
@@ -352,12 +365,12 @@ int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
}
if (tk && tk_len) {
- sta = kzalloc(sizeof(*sta), GFP_KERNEL);
+ sta = kzalloc(sizeof(*sta) + tk_len, GFP_KERNEL);
if (!sta)
return -ENOBUFS;
ret = iwl_mvm_add_pasn_sta(mvm, vif, &sta->int_sta, addr,
- cipher, tk, tk_len);
+ cipher, tk, tk_len, &sta->keyconf);
if (ret) {
kfree(sta);
return ret;
@@ -425,7 +438,7 @@ int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
rcu_read_unlock();
phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
- ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx.def,
+ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx.def, &ctx.ap,
ctx.rx_chains_static,
ctx.rx_chains_dynamic);
if (ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 1252084662c6..e1c2b7fc92ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -16,6 +16,7 @@
#include "fw/acpi.h"
#include "fw/pnvm.h"
#include "fw/uefi.h"
+#include "fw/regulatory.h"
#include "mvm.h"
#include "fw/dbg.h"
@@ -487,7 +488,6 @@ static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
#endif /* CONFIG_ACPI */
}
-#if defined(CONFIG_ACPI) && defined(CONFIG_EFI)
static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
{
u8 cmd_ver;
@@ -567,17 +567,6 @@ static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
return ret;
}
-#else
-
-static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
-{
- return 0;
-}
-
-static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
-{
-}
-#endif
static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
{
@@ -677,6 +666,11 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
NULL);
+ if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ)
+ mvm->trans->step_urm = !!(iwl_read_umac_prph(mvm->trans,
+ CNVI_PMU_STEP_FLOW) &
+ CNVI_PMU_STEP_FLOW_FORCE_URM);
+
/* Send init config command to mark that we are sending NVM access
* commands
*/
@@ -890,7 +884,6 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
sizeof(cmd), &cmd);
}
-#ifdef CONFIG_ACPI
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
{
u32 cmd_id = REDUCE_TX_POWER_CMD;
@@ -931,9 +924,9 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
/* all structs have the same common part, add it */
len += sizeof(cmd.common);
- ret = iwl_sar_select_profile(&mvm->fwrt, per_chain,
- IWL_NUM_CHAIN_TABLES,
- n_subbands, prof_a, prof_b);
+ ret = iwl_sar_fill_profile(&mvm->fwrt, per_chain,
+ IWL_NUM_CHAIN_TABLES,
+ n_subbands, prof_a, prof_b);
/* return on error or if the profile is disabled (positive number) */
if (ret)
@@ -989,7 +982,7 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
resp = (void *)cmd.resp_pkt->data;
ret = le32_to_cpu(resp->profile_idx);
- if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES_REV3))
+ if (WARN_ON(ret > BIOS_GEO_MAX_PROFILE_NUM))
ret = -EIO;
iwl_free_resp(&cmd);
@@ -1003,7 +996,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
u16 len;
u32 n_bands;
u32 n_profiles;
- u32 sk = 0;
+ __le32 sk = cpu_to_le32(0);
int ret;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
@@ -1020,27 +1013,35 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
/* the ops field is at the same spot for all versions, so set in v1 */
cmd.v1.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
+ /* Only set to South Korea if the table revision is 1 */
+ if (mvm->fwrt.geo_rev == 1)
+ sk = cpu_to_le32(1);
+
if (cmd_ver == 5) {
len = sizeof(cmd.v5);
n_bands = ARRAY_SIZE(cmd.v5.table[0]);
- n_profiles = ACPI_NUM_GEO_PROFILES_REV3;
+ n_profiles = BIOS_GEO_MAX_PROFILE_NUM;
+ cmd.v5.table_revision = sk;
} else if (cmd_ver == 4) {
len = sizeof(cmd.v4);
n_bands = ARRAY_SIZE(cmd.v4.table[0]);
- n_profiles = ACPI_NUM_GEO_PROFILES_REV3;
+ n_profiles = BIOS_GEO_MAX_PROFILE_NUM;
+ cmd.v4.table_revision = sk;
} else if (cmd_ver == 3) {
len = sizeof(cmd.v3);
n_bands = ARRAY_SIZE(cmd.v3.table[0]);
- n_profiles = ACPI_NUM_GEO_PROFILES;
+ n_profiles = BIOS_GEO_MIN_PROFILE_NUM;
+ cmd.v3.table_revision = sk;
} else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
len = sizeof(cmd.v2);
n_bands = ARRAY_SIZE(cmd.v2.table[0]);
- n_profiles = ACPI_NUM_GEO_PROFILES;
+ n_profiles = BIOS_GEO_MIN_PROFILE_NUM;
+ cmd.v2.table_revision = sk;
} else {
len = sizeof(cmd.v1);
n_bands = ARRAY_SIZE(cmd.v1.table[0]);
- n_profiles = ACPI_NUM_GEO_PROFILES;
+ n_profiles = BIOS_GEO_MIN_PROFILE_NUM;
}
BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, table) !=
@@ -1052,8 +1053,8 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) !=
offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, table));
/* the table is at the same position for all versions, so set use v1 */
- ret = iwl_sar_geo_init(&mvm->fwrt, &cmd.v1.table[0][0],
- n_bands, n_profiles);
+ ret = iwl_sar_geo_fill_table(&mvm->fwrt, &cmd.v1.table[0][0],
+ n_bands, n_profiles);
/*
* It is a valid scenario to not support SAR, or miss wgds table,
@@ -1062,27 +1063,6 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
if (ret)
return 0;
- /* Only set to South Korea if the table revision is 1 */
- if (mvm->fwrt.geo_rev == 1)
- sk = 1;
-
- /*
- * Set the table_revision to South Korea (1) or not (0). The
- * element name is misleading, as it doesn't contain the table
- * revision number, but whether the South Korea variation
- * should be used.
- * This must be done after calling iwl_sar_geo_init().
- */
- if (cmd_ver == 5)
- cmd.v5.table_revision = cpu_to_le32(sk);
- else if (cmd_ver == 4)
- cmd.v4.table_revision = cpu_to_le32(sk);
- else if (cmd_ver == 3)
- cmd.v3.table_revision = cpu_to_le32(sk);
- else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
- IWL_UCODE_TLV_API_SAR_TABLE_VER))
- cmd.v2.table_revision = cpu_to_le32(sk);
-
return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
}
@@ -1091,7 +1071,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
union iwl_ppag_table_cmd cmd;
int ret, cmd_size;
- ret = iwl_read_ppag_table(&mvm->fwrt, &cmd, &cmd_size);
+ ret = iwl_fill_ppag_table(&mvm->fwrt, &cmd, &cmd_size);
/* Not supporting PPAG table is a valid scenario */
if (ret < 0)
return 0;
@@ -1110,80 +1090,19 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
{
/* no need to read the table, done in INIT stage */
- if (!(iwl_acpi_is_ppag_approved(&mvm->fwrt)))
+ if (!(iwl_is_ppag_approved(&mvm->fwrt)))
return 0;
return iwl_mvm_ppag_send_cmd(mvm);
}
-static const struct dmi_system_id dmi_tas_approved_list[] = {
- { .ident = "HP",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- },
- },
- { .ident = "SAMSUNG",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
- },
- },
- { .ident = "LENOVO",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- },
- },
- { .ident = "DELL",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- },
- },
- { .ident = "MSFT",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
- },
- },
- { .ident = "Acer",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- },
- },
- { .ident = "ASUS",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- },
- },
- { .ident = "GOOGLE-HP",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Google"),
- DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
- },
- },
- { .ident = "MSI",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."),
- },
- },
- { .ident = "Honor",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HONOR"),
- },
- },
- /* keep last */
- {}
-};
-
-bool iwl_mvm_is_vendor_in_approved_list(void)
-{
- return dmi_check_system(dmi_tas_approved_list);
-}
-
static bool iwl_mvm_add_to_tas_block_list(__le32 *list, __le32 *le_size, unsigned int mcc)
{
int i;
u32 size = le32_to_cpu(*le_size);
/* Verify that there is room for another country */
- if (size >= IWL_TAS_BLOCK_LIST_MAX)
+ if (size >= IWL_WTAS_BLACK_LIST_MAX)
return false;
for (i = 0; i < size; i++) {
@@ -1200,21 +1119,21 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
{
u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG);
int ret;
- union iwl_tas_config_cmd cmd = {};
+ struct iwl_tas_data data = {};
+ struct iwl_tas_config_cmd cmd = {};
int cmd_size, fw_ver;
- BUILD_BUG_ON(ARRAY_SIZE(cmd.v3.block_list_array) <
- APCI_WTAS_BLACK_LIST_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(data.block_list_array) !=
+ IWL_WTAS_BLACK_LIST_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(cmd.common.block_list_array) !=
+ IWL_WTAS_BLACK_LIST_MAX);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
IWL_DEBUG_RADIO(mvm, "TAS not enabled in FW\n");
return;
}
- fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
- IWL_FW_CMD_VER_UNKNOWN);
-
- ret = iwl_acpi_get_tas(&mvm->fwrt, &cmd, fw_ver);
+ ret = iwl_bios_get_tas_table(&mvm->fwrt, &data);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"TAS table invalid or unavailable. (%d)\n",
@@ -1225,16 +1144,16 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
if (ret == 0)
return;
- if (!iwl_mvm_is_vendor_in_approved_list()) {
+ if (!iwl_is_tas_approved()) {
IWL_DEBUG_RADIO(mvm,
"System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n",
- dmi_get_system_info(DMI_SYS_VENDOR));
- if ((!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
- &cmd.v4.block_list_size,
- IWL_MCC_US)) ||
- (!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
- &cmd.v4.block_list_size,
- IWL_MCC_CANADA))) {
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
+ if ((!iwl_mvm_add_to_tas_block_list(data.block_list_array,
+ &data.block_list_size,
+ IWL_MCC_US)) ||
+ (!iwl_mvm_add_to_tas_block_list(data.block_list_array,
+ &data.block_list_size,
+ IWL_MCC_CANADA))) {
IWL_DEBUG_RADIO(mvm,
"Unable to add US/Canada to TAS block list, disabling TAS\n");
return;
@@ -1242,41 +1161,64 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
} else {
IWL_DEBUG_RADIO(mvm,
"System vendor '%s' is in the approved list.\n",
- dmi_get_system_info(DMI_SYS_VENDOR));
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
}
- /* v4 is the same size as v3, so no need to differentiate here */
- cmd_size = fw_ver < 3 ?
- sizeof(struct iwl_tas_config_cmd_v2) :
- sizeof(struct iwl_tas_config_cmd_v3);
+ fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ memcpy(&cmd.common, &data, sizeof(struct iwl_tas_config_cmd_common));
+
+ /* Set v3 or v4 specific parts. will be trunctated for fw_ver < 3 */
+ if (fw_ver == 4) {
+ cmd.v4.override_tas_iec = data.override_tas_iec;
+ cmd.v4.enable_tas_iec = data.enable_tas_iec;
+ cmd.v4.usa_tas_uhb_allowed = data.usa_tas_uhb_allowed;
+ } else {
+ cmd.v3.override_tas_iec = cpu_to_le16(data.override_tas_iec);
+ cmd.v3.enable_tas_iec = cpu_to_le16(data.enable_tas_iec);
+ }
+
+ cmd_size = sizeof(struct iwl_tas_config_cmd_common);
+ if (fw_ver >= 3)
+ /* v4 is the same size as v3 */
+ cmd_size += sizeof(struct iwl_tas_config_cmd_v3);
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd);
if (ret < 0)
IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
}
-static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
+static bool iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
{
- u8 value;
- int ret = iwl_acpi_get_dsm_u8(mvm->fwrt.dev, 0, DSM_RFI_FUNC_ENABLE,
- &iwl_rfi_guid, &value);
+ u32 value = 0;
+ /* default behaviour is disabled */
+ bool bios_enable_rfi = false;
+ int ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_RFI_CONFIG, &value);
+
if (ret < 0) {
IWL_DEBUG_RADIO(mvm, "Failed to get DSM RFI, ret=%d\n", ret);
+ return bios_enable_rfi;
+ }
- } else if (value >= DSM_VALUE_RFI_MAX) {
- IWL_DEBUG_RADIO(mvm, "DSM RFI got invalid value, ret=%d\n",
- value);
-
- } else if (value == DSM_VALUE_RFI_ENABLE) {
+ value &= DSM_VALUE_RFI_DISABLE;
+ /* RFI BIOS CONFIG value can be 0 or 3 only.
+ * i.e 0 means DDR and DLVR enabled. 3 means DDR and DLVR disabled.
+ * 1 and 2 are invalid BIOS configurations, So, it's not possible to
+ * disable ddr/dlvr separately.
+ */
+ if (!value) {
IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to enable\n");
- return DSM_VALUE_RFI_ENABLE;
+ bios_enable_rfi = true;
+ } else if (value == DSM_VALUE_RFI_DISABLE) {
+ IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to disable\n");
+ } else {
+ IWL_DEBUG_RADIO(mvm,
+ "DSM RFI got invalid value, value=%d\n", value);
}
- IWL_DEBUG_RADIO(mvm, "DSM RFI is disabled\n");
-
- /* default behaviour is disabled */
- return DSM_VALUE_RFI_DISABLE;
+ return bios_enable_rfi;
}
static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
@@ -1288,43 +1230,34 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
WIDE_ID(REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE), 1);
- cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt);
+ cmd.config_bitmap = iwl_get_lari_config_bitmap(&mvm->fwrt);
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_11AX_ENABLEMENT,
- &iwl_guid, &value);
+ ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_11AX_ENABLEMENT, &value);
if (!ret)
cmd.oem_11ax_allow_bitmap = cpu_to_le32(value);
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_ENABLE_UNII4_CHAN,
- &iwl_guid, &value);
+ ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value);
if (!ret)
cmd.oem_unii4_allow_bitmap = cpu_to_le32(value);
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_ACTIVATE_CHANNEL,
- &iwl_guid, &value);
+ ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value);
if (!ret) {
if (cmd_ver < 8)
value &= ~ACTIVATE_5G2_IN_WW_MASK;
cmd.chan_state_active_bitmap = cpu_to_le32(value);
}
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_ENABLE_6E,
- &iwl_guid, &value);
+ ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENABLE_6E, &value);
if (!ret)
cmd.oem_uhb_allow_bitmap = cpu_to_le32(value);
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_FORCE_DISABLE_CHANNELS,
- &iwl_guid, &value);
+ ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_FORCE_DISABLE_CHANNELS,
+ &value);
if (!ret)
cmd.force_disable_channels_bitmap = cpu_to_le32(value);
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_ENERGY_DETECTION_THRESHOLD,
- &iwl_guid, &value);
+ ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENERGY_DETECTION_THRESHOLD,
+ &value);
if (!ret)
cmd.edt_bitmap = cpu_to_le32(value);
@@ -1390,15 +1323,17 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
if (le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_VLP_AP_SUPPORTED ||
le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_AFC_AP_SUPPORTED)
- mvm->fwrt.uats_enabled = TRUE;
+ mvm->fwrt.uats_enabled = true;
}
-void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
+void iwl_mvm_get_bios_tables(struct iwl_mvm *mvm)
{
int ret;
+ iwl_acpi_get_guid_lock_status(&mvm->fwrt);
+
/* read PPAG table */
- ret = iwl_acpi_get_ppag_table(&mvm->fwrt);
+ ret = iwl_bios_get_ppag_table(&mvm->fwrt);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"PPAG BIOS table invalid or unavailable. (%d)\n",
@@ -1406,7 +1341,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
}
/* read SAR tables */
- ret = iwl_sar_get_wrds_table(&mvm->fwrt);
+ ret = iwl_bios_get_wrds_table(&mvm->fwrt);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"WRDS SAR BIOS table invalid or unavailable. (%d)\n",
@@ -1415,7 +1350,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
* If not available, don't fail and don't bother with EWRD and
* WGDS */
- if (!iwl_sar_get_wgds_table(&mvm->fwrt)) {
+ if (!iwl_bios_get_wgds_table(&mvm->fwrt)) {
/*
* If basic SAR is not available, we check for WGDS,
* which should *not* be available either. If it is
@@ -1426,7 +1361,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
}
} else {
- ret = iwl_sar_get_ewrd_table(&mvm->fwrt);
+ ret = iwl_bios_get_ewrd_table(&mvm->fwrt);
/* if EWRD is not available, we can still use
* WRDS, so don't fail */
if (ret < 0)
@@ -1436,7 +1371,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
/* read geo SAR table */
if (iwl_sar_geo_support(&mvm->fwrt)) {
- ret = iwl_sar_get_wgds_table(&mvm->fwrt);
+ ret = iwl_bios_get_wgds_table(&mvm->fwrt);
if (ret < 0)
IWL_DEBUG_RADIO(mvm,
"Geo SAR BIOS table invalid or unavailable. (%d)\n",
@@ -1446,59 +1381,18 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
}
iwl_acpi_get_phy_filters(&mvm->fwrt, &mvm->phy_filters);
-}
-#else /* CONFIG_ACPI */
-inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm,
- int prof_a, int prof_b)
-{
- return 1;
+ if (iwl_bios_get_eckv(&mvm->fwrt, &mvm->ext_clock_valid))
+ IWL_DEBUG_RADIO(mvm, "ECKV table doesn't exist in BIOS\n");
}
-inline int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
+static void iwl_mvm_disconnect_iterator(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
{
- return -ENOENT;
+ if (vif->type == NL80211_IFTYPE_STATION)
+ ieee80211_hw_restart_disconnect(vif);
}
-static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
-{
- return 0;
-}
-
-int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
-{
- return -ENOENT;
-}
-
-static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
-{
- return 0;
-}
-
-static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
-{
-}
-
-static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
-{
-}
-
-bool iwl_mvm_is_vendor_in_approved_list(void)
-{
- return false;
-}
-
-static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
-{
- return DSM_VALUE_RFI_DISABLE;
-}
-
-void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
-{
-}
-
-#endif /* CONFIG_ACPI */
-
void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
{
u32 error_log_size = mvm->fw->ucode_capa.error_log_size;
@@ -1543,10 +1437,15 @@ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
/* skb respond is only relevant in ERROR_RECOVERY_UPDATE_DB */
if (flags & ERROR_RECOVERY_UPDATE_DB) {
resp = le32_to_cpu(*(__le32 *)host_cmd.resp_pkt->data);
- if (resp)
+ if (resp) {
IWL_ERR(mvm,
"Failed to send recovery cmd blob was invalid %d\n",
resp);
+
+ ieee80211_iterate_interfaces(mvm->hw, 0,
+ iwl_mvm_disconnect_iterator,
+ mvm);
+ }
}
}
@@ -1781,9 +1680,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (!mvm->ptp_data.ptp_clock)
iwl_mvm_ptp_init(mvm);
- if (iwl_acpi_get_eckv(mvm->dev, &mvm->ext_clock_valid))
- IWL_DEBUG_INFO(mvm, "ECKV table doesn't exist in BIOS\n");
-
ret = iwl_mvm_ppag_init(mvm);
if (ret)
goto error;
@@ -1803,7 +1699,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
iwl_mvm_uats_init(mvm);
if (iwl_rfi_supported(mvm)) {
- if (iwl_mvm_eval_dsm_rfi(mvm) == DSM_VALUE_RFI_ENABLE)
+ if (iwl_mvm_eval_dsm_rfi(mvm))
iwl_rfi_send_config_cmd(mvm, NULL);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
index be48b0fc9cb6..f13f13e6b71a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022 - 2023 Intel Corporation
+ * Copyright (C) 2022 - 2024 Intel Corporation
*/
#include "mvm.h"
#include "time-event.h"
@@ -53,6 +53,8 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
unsigned int link_id = link_conf->link_id;
struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
struct iwl_link_config_cmd cmd = {};
+ unsigned int cmd_id = WIDE_ID(MAC_CONF_GROUP, LINK_CONFIG_CMD);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1);
if (WARN_ON_ONCE(!link_info))
return -EINVAL;
@@ -84,7 +86,8 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (vif->type == NL80211_IFTYPE_ADHOC && link_conf->bssid)
memcpy(cmd.ibss_bssid_addr, link_conf->bssid, ETH_ALEN);
- cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac);
+ if (cmd_ver < 2)
+ cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac);
return iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_ADD);
}
@@ -100,6 +103,8 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_link_config_cmd cmd = {};
u32 ht_flag, flags = 0, flags_mask = 0;
int ret;
+ unsigned int cmd_id = WIDE_ID(MAC_CONF_GROUP, LINK_CONFIG_CMD);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1);
if (WARN_ON_ONCE(!link_info ||
link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID))
@@ -190,12 +195,21 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
}
if (changes & LINK_CONTEXT_MODIFY_EHT_PARAMS) {
+ struct ieee80211_chanctx_conf *ctx;
+ struct cfg80211_chan_def *def = NULL;
+
+ rcu_read_lock();
+ ctx = rcu_dereference(link_conf->chanctx_conf);
+ if (ctx)
+ def = iwl_mvm_chanctx_def(mvm, ctx);
+
if (iwlwifi_mod_params.disable_11be ||
- !link_conf->eht_support)
+ !link_conf->eht_support || !def ||
+ iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) >= 6)
changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS;
else
- cmd.puncture_mask =
- cpu_to_le16(link_conf->eht_puncturing);
+ cmd.puncture_mask = cpu_to_le16(def->punctured);
+ rcu_read_unlock();
}
cmd.bss_color = link_conf->he_bss_color.color;
@@ -224,7 +238,8 @@ send_cmd:
cmd.flags = cpu_to_le32(flags);
cmd.flags_mask = cpu_to_le32(flags_mask);
cmd.spec_link_id = link_conf->link_id;
- cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac);
+ if (cmd_ver < 2)
+ cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac);
ret = iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_MODIFY);
if (!ret && (changes & LINK_CONTEXT_MODIFY_ACTIVE))
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 25a5a31e63c2..228ede7b8957 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -466,7 +466,7 @@ void iwl_mvm_set_fw_protection_flags(struct iwl_mvm *mvm,
break;
case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
/* Protect when channel wider than 20MHz */
- if (link_conf->chandef.width > NL80211_CHAN_WIDTH_20)
+ if (link_conf->chanreq.oper.width > NL80211_CHAN_WIDTH_20)
*protection_flags |= cpu_to_le32(ht_flag);
break;
default:
@@ -505,7 +505,7 @@ void iwl_mvm_set_fw_qos_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (link_conf->qos)
*qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
- if (link_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
+ if (link_conf->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT)
*qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN);
}
@@ -921,8 +921,8 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct iwl_mvm *mvm,
link_conf = rcu_dereference(vif->link_conf[link_id]);
if (link_conf) {
basic = link_conf->basic_rates;
- if (link_conf->chandef.chan)
- band = link_conf->chandef.chan->band;
+ if (link_conf->chanreq.oper.chan)
+ band = link_conf->chanreq.oper.chan->band;
}
rcu_read_unlock();
}
@@ -1477,8 +1477,8 @@ static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
mvmvif->csa_countdown = true;
- if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) {
- int c = ieee80211_beacon_update_cntdwn(csa_vif);
+ if (!ieee80211_beacon_cntdwn_is_complete(csa_vif, 0)) {
+ int c = ieee80211_beacon_update_cntdwn(csa_vif, 0);
iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif,
&csa_vif->bss_conf);
@@ -1497,7 +1497,7 @@ static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
}
} else if (!iwl_mvm_te_scheduled(&mvmvif->time_event_data)) {
/* we don't have CSA NoA scheduled yet, switch now */
- ieee80211_csa_finish(csa_vif);
+ ieee80211_csa_finish(csa_vif, 0);
RCU_INIT_POINTER(mvm->csa_vif, NULL);
}
}
@@ -1637,10 +1637,22 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
* TODO: the threshold should be adjusted based on latency conditions,
* and/or in case of a CS flow on one of the other AP vifs.
*/
- if (rx_missed_bcon > IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG)
- iwl_mvm_connection_loss(mvm, vif, "missed beacons");
- else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD)
- ieee80211_beacon_loss(vif);
+ if (rx_missed_bcon >= IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG) {
+ if (rx_missed_bcon_since_rx >= IWL_MVM_MISSED_BEACONS_SINCE_RX_THOLD) {
+ iwl_mvm_connection_loss(mvm, vif, "missed beacons");
+ } else {
+ IWL_WARN(mvm,
+ "missed beacons exceeds threshold, but receiving data. Stay connected, Expect bugs.\n");
+ IWL_WARN(mvm,
+ "missed_beacons:%d, missed_beacons_since_rx:%d\n",
+ rx_missed_bcon, rx_missed_bcon_since_rx);
+ }
+ } else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD) {
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ ieee80211_beacon_loss(vif);
+ else
+ ieee80211_cqm_beacon_loss_notify(vif, GFP_ATOMIC);
+ }
iwl_dbg_tlv_time_point(&mvm->fwrt,
IWL_FW_INI_TIME_POINT_MISSED_BEACONS, &tp_data);
@@ -1843,7 +1855,7 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
msecs_to_jiffies(IWL_MVM_CS_UNBLOCK_TX_TIMEOUT *
csa_vif->bss_conf.beacon_int));
- ieee80211_csa_finish(csa_vif);
+ ieee80211_csa_finish(csa_vif, 0);
rcu_read_unlock();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 53e26c3c3a9a..1935630d3def 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -138,7 +138,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
resp->channels,
__le16_to_cpu(resp->mcc),
__le16_to_cpu(resp->geo_info),
- le32_to_cpu(resp->cap), resp_ver);
+ le32_to_cpu(resp->cap), resp_ver,
+ mvm->fwrt.uats_enabled);
/* Store the return source id */
src_id = resp->source_id;
if (IS_ERR_OR_NULL(regd)) {
@@ -263,6 +264,9 @@ static const u8 tm_if_types_ext_capa_sta[] = {
__bf_shf(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY) | \
IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US << \
__bf_shf(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY))
+#define IWL_MVM_MLD_CAPA_OPS FIELD_PREP_CONST( \
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP, \
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME)
static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = {
{
@@ -272,6 +276,7 @@ static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = {
.extended_capabilities_len = sizeof(he_if_types_ext_capa_sta),
/* relevant only if EHT is supported */
.eml_capabilities = IWL_MVM_EMLSR_CAPA,
+ .mld_capa_and_ops = IWL_MVM_MLD_CAPA_OPS,
},
{
.iftype = NL80211_IFTYPE_STATION,
@@ -280,6 +285,7 @@ static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = {
.extended_capabilities_len = sizeof(tm_if_types_ext_capa_sta),
/* relevant only if EHT is supported */
.eml_capabilities = IWL_MVM_EMLSR_CAPA,
+ .mld_capa_and_ops = IWL_MVM_MLD_CAPA_OPS,
},
};
@@ -490,6 +496,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_TIME_SYNC_BOTH_FTM_TM))
hw->wiphy->hw_timestamp_max_peers = 1;
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT))
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT);
+
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
hw->wiphy->features |=
NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
@@ -695,6 +706,18 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
}
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(LOCATION_GROUP,
+ TOF_RANGE_REQ_CMD),
+ IWL_FW_CMD_VER_UNKNOWN) >= 11) {
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT))
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_SECURE_LTF);
+ }
+
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
#ifdef CONFIG_PM_SLEEP
@@ -1195,14 +1218,12 @@ int iwl_mvm_mac_start(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
- int retry, max_retry = 0;
mutex_lock(&mvm->mutex);
/* we are starting the mac not in error flow, and restart is enabled */
if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) &&
iwlwifi_mod_params.fw_restart) {
- max_retry = IWL_MAX_INIT_RETRY;
/*
* This will prevent mac80211 recovery flows to trigger during
* init failures
@@ -1210,13 +1231,7 @@ int iwl_mvm_mac_start(struct ieee80211_hw *hw)
set_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
}
- for (retry = 0; retry <= max_retry; retry++) {
- ret = __iwl_mvm_mac_start(mvm);
- if (!ret || mvm->pldr_sync)
- break;
-
- IWL_ERR(mvm, "mac start retry %d\n", retry);
- }
+ ret = __iwl_mvm_mac_start(mvm);
clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
mutex_unlock(&mvm->mutex);
@@ -1350,6 +1365,7 @@ void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
* discover that its list is now empty.
*/
cancel_work_sync(&mvm->async_handlers_wk);
+ wiphy_work_cancel(hw->wiphy, &mvm->async_handlers_wiphy_wk);
}
struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
@@ -1433,7 +1449,7 @@ int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) {
- ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif);
if (ret)
goto out_unlock;
@@ -1454,7 +1470,8 @@ out_unlock:
}
void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -1617,7 +1634,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
goto out_remove_mac;
/* beacon filtering */
- ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
if (ret)
goto out_remove_mac;
@@ -1628,6 +1645,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
}
+ if (vif->p2p || iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) < 5)
+ vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW;
+
if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
mvm->p2p_device_vif = vif;
@@ -1638,7 +1658,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_MONITOR) {
mvm->monitor_on = true;
mvm->monitor_p80 =
- iwl_mvm_chandef_get_primary_80(&vif->bss_conf.chandef);
+ iwl_mvm_chandef_get_primary_80(&vif->bss_conf.chanreq.oper);
}
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
@@ -2560,7 +2580,7 @@ iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm,
iwl_mvm_stop_session_protection(mvm, vif);
iwl_mvm_sf_update(mvm, vif, false);
- WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif));
}
if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS |
@@ -2581,7 +2601,7 @@ iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm,
/* FIXME: need to update per link when FW API will
* support it
*/
- ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif);
if (ret)
IWL_ERR(mvm,
"failed to update CQM thresholds\n");
@@ -2608,9 +2628,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
*/
if (changes & BSS_CHANGED_ASSOC && vif->cfg.assoc) {
if ((vif->bss_conf.he_support &&
- !iwlwifi_mod_params.disable_11ax) ||
- (vif->bss_conf.eht_support &&
- !iwlwifi_mod_params.disable_11be))
+ !iwlwifi_mod_params.disable_11ax))
iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->deflink.ap_sta_id);
iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
@@ -2619,10 +2637,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
/* Update MU EDCA params */
if (changes & BSS_CHANGED_QOS && mvmvif->associated &&
vif->cfg.assoc &&
- ((vif->bss_conf.he_support &&
- !iwlwifi_mod_params.disable_11ax) ||
- (vif->bss_conf.eht_support &&
- !iwlwifi_mod_params.disable_11be)))
+ (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax))
iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->deflink.ap_sta_id);
/*
@@ -3418,16 +3433,16 @@ iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw,
.tolerated = true,
};
- if (WARN_ON_ONCE(!link_conf->chandef.chan ||
+ if (WARN_ON_ONCE(!link_conf->chanreq.oper.chan ||
!mvmvif->link[link_id]))
return;
- if (!(link_conf->chandef.chan->flags & IEEE80211_CHAN_RADAR)) {
+ if (!(link_conf->chanreq.oper.chan->flags & IEEE80211_CHAN_RADAR)) {
mvmvif->link[link_id]->he_ru_2mhz_block = false;
return;
}
- cfg80211_bss_iter(hw->wiphy, &link_conf->chandef,
+ cfg80211_bss_iter(hw->wiphy, &link_conf->chanreq.oper,
iwl_mvm_check_he_obss_narrow_bw_ru_iter,
&iter_data);
@@ -3487,10 +3502,10 @@ static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm,
return;
/* FIXME: MEI needs to be updated for MLO */
- if (!vif->bss_conf.chandef.chan)
+ if (!vif->bss_conf.chanreq.oper.chan)
return;
- conn_info.channel = vif->bss_conf.chandef.chan->hw_value;
+ conn_info.channel = vif->bss_conf.chanreq.oper.chan->hw_value;
switch (mvm_sta->pairwise_cipher) {
case WLAN_CIPHER_SUITE_TKIP:
@@ -3698,6 +3713,19 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm,
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
mvmvif->ap_sta = sta;
+ /*
+ * Initialize the rates here already - this really tells
+ * the firmware only what the supported legacy rates are
+ * (may be) since it's initialized already from what the
+ * AP advertised in the beacon/probe response. This will
+ * allow the firmware to send auth/assoc frames with one
+ * of the supported rates already, rather than having to
+ * use a mandatory rate.
+ * If we're the AP, we'll just assume mandatory rates at
+ * this point, but we know nothing about the STA anyway.
+ */
+ iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
+
return 0;
}
@@ -3724,10 +3752,8 @@ iwl_mvm_sta_state_auth_to_assoc(struct ieee80211_hw *hw,
* the default bss_conf
*/
if (!mvm->mld_api_is_used &&
- ((vif->bss_conf.he_support &&
- !iwlwifi_mod_params.disable_11ax) ||
- (vif->bss_conf.eht_support &&
- !iwlwifi_mod_params.disable_11be)))
+ (vif->bss_conf.he_support &&
+ !iwlwifi_mod_params.disable_11ax))
iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->deflink.sta_id);
} else if (vif->type == NL80211_IFTYPE_STATION) {
iwl_mvm_vif_set_he_support(hw, vif, sta, true);
@@ -3779,7 +3805,7 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
NL80211_TDLS_ENABLE_LINK);
} else {
/* enable beacon filtering */
- WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif));
mvmvif->authorized = 1;
@@ -3796,13 +3822,17 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
mvm_sta->authorized = true;
- iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
-
/* MFP is set by default before the station is authorized.
* Clear it here in case it's not used.
*/
- if (!sta->mfp)
- return callbacks->update_sta(mvm, vif, sta);
+ if (!sta->mfp) {
+ int ret = callbacks->update_sta(mvm, vif, sta);
+
+ if (ret)
+ return ret;
+ }
+
+ iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
return 0;
}
@@ -3833,7 +3863,7 @@ iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm,
mvmvif->authorized = 0;
/* disable beacon filtering */
- iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ iwl_mvm_disable_beacon_filter(mvm, vif);
}
return 0;
@@ -4412,44 +4442,6 @@ static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
return true;
}
-#define AUX_ROC_MIN_DURATION MSEC_TO_TU(100)
-#define AUX_ROC_MIN_DELAY MSEC_TO_TU(200)
-#define AUX_ROC_MAX_DELAY MSEC_TO_TU(600)
-#define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20)
-#define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10)
-
-static void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
- u32 duration_ms,
- u32 *duration_tu,
- u32 *delay)
-{
- u32 dtim_interval = vif->bss_conf.dtim_period *
- vif->bss_conf.beacon_int;
-
- *delay = AUX_ROC_MIN_DELAY;
- *duration_tu = MSEC_TO_TU(duration_ms);
-
- /*
- * If we are associated we want the delay time to be at least one
- * dtim interval so that the FW can wait until after the DTIM and
- * then start the time event, this will potentially allow us to
- * remain off-channel for the max duration.
- * Since we want to use almost a whole dtim interval we would also
- * like the delay to be for 2-3 dtim intervals, in case there are
- * other time events with higher priority.
- */
- if (vif->cfg.assoc) {
- *delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
- /* We cannot remain off-channel longer than the DTIM interval */
- if (dtim_interval <= *duration_tu) {
- *duration_tu = dtim_interval - AUX_ROC_SAFETY_BUFFER;
- if (*duration_tu <= AUX_ROC_MIN_DURATION)
- *duration_tu = dtim_interval -
- AUX_ROC_MIN_SAFETY_BUFFER;
- }
- }
-}
-
static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
struct ieee80211_channel *channel,
struct ieee80211_vif *vif,
@@ -4547,48 +4539,6 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
return res;
}
-static int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
- struct ieee80211_channel *channel,
- struct ieee80211_vif *vif,
- int duration, u32 activity)
-{
- int res;
- u32 duration_tu, delay;
- struct iwl_roc_req roc_req = {
- .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
- .activity = cpu_to_le32(activity),
- .sta_id = cpu_to_le32(mvm->aux_sta.sta_id),
- };
-
- lockdep_assert_held(&mvm->mutex);
-
- /* Set the channel info data */
- iwl_mvm_set_chan_info(mvm, &roc_req.channel_info,
- channel->hw_value,
- iwl_mvm_phy_band_from_nl80211(channel->band),
- IWL_PHY_CHANNEL_MODE20, 0);
-
- iwl_mvm_roc_duration_and_delay(vif, duration, &duration_tu,
- &delay);
- roc_req.duration = cpu_to_le32(duration_tu);
- roc_req.max_delay = cpu_to_le32(delay);
-
- IWL_DEBUG_TE(mvm,
- "\t(requested = %ums, max_delay = %ums)\n",
- duration, delay);
- IWL_DEBUG_TE(mvm,
- "Requesting to remain on channel %u for %utu\n",
- channel->hw_value, duration_tu);
-
- /* Set the node address */
- memcpy(roc_req.node_addr, vif->addr, ETH_ALEN);
-
- res = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
- 0, sizeof(roc_req), &roc_req);
-
- return res;
-}
-
static int iwl_mvm_add_aux_sta_for_hs20(struct iwl_mvm *mvm, u32 lmac_id)
{
int ret = 0;
@@ -4705,7 +4655,7 @@ static int iwl_mvm_p2p_find_phy_ctxt(struct iwl_mvm *mvm,
cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
return iwl_mvm_phy_ctxt_add(mvm, mvmvif->deflink.phy_ctxt,
- &chandef, 1, 1);
+ &chandef, NULL, 1, 1);
}
/* Execute the common part for MLD and non-MLD modes */
@@ -4793,8 +4743,8 @@ static void iwl_mvm_ftm_responder_chanctx_iter(void *_data, u8 *mac,
data->responder = true;
}
-static bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm,
- struct ieee80211_chanctx_conf *ctx)
+bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm,
+ struct ieee80211_chanctx_conf *ctx)
{
struct iwl_mvm_ftm_responder_iter_data data = {
.responder = false,
@@ -4813,9 +4763,7 @@ static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
{
u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
struct iwl_mvm_phy_ctxt *phy_ctxt;
- bool use_def = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx) ||
- iwl_mvm_enable_fils(mvm, ctx);
- struct cfg80211_chan_def *def = use_def ? &ctx->def : &ctx->min_def;
+ struct cfg80211_chan_def *def = iwl_mvm_chanctx_def(mvm, ctx);
int ret;
lockdep_assert_held(&mvm->mutex);
@@ -4828,7 +4776,7 @@ static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
goto out;
}
- ret = iwl_mvm_phy_ctxt_add(mvm, phy_ctxt, def,
+ ret = iwl_mvm_phy_ctxt_add(mvm, phy_ctxt, def, &ctx->ap,
ctx->rx_chains_static,
ctx->rx_chains_dynamic);
if (ret) {
@@ -4881,9 +4829,7 @@ void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
- bool use_def = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx) ||
- iwl_mvm_enable_fils(mvm, ctx);
- struct cfg80211_chan_def *def = use_def ? &ctx->def : &ctx->min_def;
+ struct cfg80211_chan_def *def = iwl_mvm_chanctx_def(mvm, ctx);
if (WARN_ONCE((phy_ctxt->ref > 1) &&
(changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
@@ -4908,7 +4854,7 @@ void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
}
iwl_mvm_bt_coex_vif_change(mvm);
- iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def,
+ iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, &ctx->ap,
ctx->rx_chains_static,
ctx->rx_chains_dynamic);
@@ -5361,8 +5307,8 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
return -EINVAL;
if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
- return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
- return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ return iwl_mvm_enable_beacon_filter(mvm, vif);
+ return iwl_mvm_disable_beacon_filter(mvm, vif);
}
return -EOPNOTSUPP;
@@ -5446,7 +5392,7 @@ static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm,
iwl_mvm_csa_client_absent(mvm, vif);
if (mvmvif->bf_data.bf_enabled) {
- int ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ int ret = iwl_mvm_disable_beacon_filter(mvm, vif);
if (ret)
return ret;
@@ -5606,8 +5552,16 @@ void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
if (chsw->count >= mvmvif->csa_count && chsw->block_tx) {
if (mvmvif->csa_misbehave) {
+ struct ieee80211_bss_conf *link_conf;
+
/* Second time, give up on this AP*/
- iwl_mvm_abort_channel_switch(hw, vif);
+
+ link_conf = wiphy_dereference(hw->wiphy,
+ vif->link_conf[chsw->link_id]);
+ if (WARN_ON(!link_conf))
+ return;
+
+ iwl_mvm_abort_channel_switch(hw, vif, link_conf);
ieee80211_chswitch_done(vif, false, 0);
mvmvif->csa_misbehave = false;
return;
@@ -6108,6 +6062,7 @@ void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
}
}
+#define SYNC_RX_QUEUE_TIMEOUT (HZ)
void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
enum iwl_mvm_rxq_notif_type type,
bool sync,
@@ -6156,11 +6111,12 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
ret = wait_event_timeout(mvm->rx_sync_waitq,
READ_ONCE(mvm->queue_sync_state) == 0 ||
- iwl_mvm_is_radio_killed(mvm),
- HZ);
- WARN_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm),
- "queue sync: failed to sync, state is 0x%lx\n",
- mvm->queue_sync_state);
+ iwl_mvm_is_radio_hw_killed(mvm),
+ SYNC_RX_QUEUE_TIMEOUT);
+ WARN_ONCE(!ret && !iwl_mvm_is_radio_hw_killed(mvm),
+ "queue sync: failed to sync, state is 0x%lx, cookie %d\n",
+ mvm->queue_sync_state,
+ mvm->queue_sync_cookie);
}
out:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
index ea3e9e9c6e26..8a38fc4b0b0f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022 - 2023 Intel Corporation
+ * Copyright (C) 2022 - 2024 Intel Corporation
*/
#include <linux/kernel.h>
#include <net/mac80211.h>
@@ -62,11 +62,13 @@ u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm,
struct ieee80211_key_conf *keyconf)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool pairwise = keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
+ bool igtk = keyconf->keyidx == 4 || keyconf->keyidx == 5;
u32 flags = 0;
lockdep_assert_held(&mvm->mutex);
- if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ if (!pairwise)
flags |= IWL_SEC_KEY_FLAG_MCAST_KEY;
switch (keyconf->cipher) {
@@ -96,14 +98,19 @@ u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm,
if (!sta && vif->type == NL80211_IFTYPE_STATION)
sta = mvmvif->ap_sta;
- /* Set the MFP flag also for an AP interface where the key is an IGTK
- * key as in such a case the station would always be NULL
+ /*
+ * If we are installing an iGTK (in AP or STA mode), we need to tell
+ * the firmware this key will en/decrypt MGMT frames.
+ * Same goes if we are installing a pairwise key for an MFP station.
+ * In case we're installing a groupwise key (which is not an iGTK),
+ * then, we will not use this key for MGMT frames.
*/
- if ((!IS_ERR_OR_NULL(sta) && sta->mfp) ||
- (vif->type == NL80211_IFTYPE_AP &&
- (keyconf->keyidx == 4 || keyconf->keyidx == 5)))
+ if ((!IS_ERR_OR_NULL(sta) && sta->mfp && pairwise) || igtk)
flags |= IWL_SEC_KEY_FLAG_MFP;
+ if (keyconf->flags & IEEE80211_KEY_FLAG_SPP_AMSDU)
+ flags |= IWL_SEC_KEY_FLAG_SPP_AMSDU;
+
return flags;
}
@@ -335,6 +342,21 @@ static int _iwl_mvm_sec_key_del(struct iwl_mvm *mvm,
return ret;
}
+int iwl_mvm_sec_key_del_pasn(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 sta_mask,
+ struct ieee80211_key_conf *keyconf)
+{
+ u32 key_flags = iwl_mvm_get_sec_flags(mvm, vif, NULL, keyconf) |
+ IWL_SEC_KEY_FLAG_MFP;
+
+ if (WARN_ON(!sta_mask))
+ return -EINVAL;
+
+ return __iwl_mvm_sec_key_del(mvm, sta_mask, key_flags, keyconf->keyidx,
+ 0);
+}
+
int iwl_mvm_sec_key_del(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
index f313a8d771e4..bb7851042177 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022 - 2023 Intel Corporation
+ * Copyright (C) 2022 - 2024 Intel Corporation
*/
#include "mvm.h"
@@ -167,7 +167,7 @@ static int iwl_mvm_mld_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
iwl_mvm_mld_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_PROMISC |
- MAC_FILTER_IN_CONTROL_AND_MGMT |
+ MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT |
MAC_CFG_FILTER_ACCEPT_BEACON |
MAC_CFG_FILTER_ACCEPT_PROBE_REQ |
MAC_CFG_FILTER_ACCEPT_GRP);
@@ -205,8 +205,11 @@ static int iwl_mvm_mld_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
cmd.p2p_dev.is_disc_extended =
iwl_mac_ctxt_p2p_dev_has_extended_disc(mvm, vif);
- /* Override the filter flags to accept only probe requests */
- cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_ACCEPT_PROBE_REQ);
+ /* Override the filter flags to accept all management frames. This is
+ * needed to support both P2P device discovery using probe requests and
+ * P2P service discovery using action frames
+ */
+ cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT);
return iwl_mvm_mld_mac_ctxt_send_cmd(mvm, &cmd);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index 893b69fc841b..084314bf6f36 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022-2023 Intel Corporation
+ * Copyright (C) 2022-2024 Intel Corporation
*/
#include "mvm.h"
@@ -47,7 +47,7 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
goto out_unlock;
/* beacon filtering */
- ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
if (ret)
goto out_remove_mac;
@@ -254,9 +254,6 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
if (!rcu_access_pointer(link_conf->chanctx_conf))
n_active++;
- if (n_active > iwl_mvm_max_active_links(mvm, vif))
- return -EOPNOTSUPP;
-
if (WARN_ON_ONCE(!mvmvif->link[link_id]))
return -EINVAL;
@@ -607,6 +604,7 @@ static int iwl_mvm_mld_mac_sta_state(struct ieee80211_hw *hw,
struct iwl_mvm_link_sel_data {
u8 link_id;
enum nl80211_band band;
+ enum nl80211_chan_width width;
bool active;
};
@@ -658,7 +656,8 @@ void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
continue;
data[n_data].link_id = link_id;
- data[n_data].band = link_conf->chandef.chan->band;
+ data[n_data].band = link_conf->chanreq.oper.chan->band;
+ data[n_data].width = link_conf->chanreq.oper.width;
data[n_data].active = vif->active_links & BIT(link_id);
n_data++;
}
@@ -753,8 +752,8 @@ iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm,
link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS;
}
- /* Update EHT Puncturing info */
- if (changes & BSS_CHANGED_EHT_PUNCTURING && vif->cfg.assoc)
+ /* if associated, maybe puncturing changed - we'll check later */
+ if (vif->cfg.assoc)
link_changes |= LINK_CONTEXT_MODIFY_EHT_PARAMS;
if (link_changes) {
@@ -1122,17 +1121,12 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
{
struct iwl_mvm_vif_link_info *new_link[IEEE80211_MLD_MAX_NUM_LINKS] = {};
- unsigned int n_active = iwl_mvm_mld_count_active_links(vif);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
u16 removed = old_links & ~new_links;
u16 added = new_links & ~old_links;
int err, i;
- if (hweight16(new_links) > 1 &&
- n_active > iwl_mvm_max_active_links(mvm, vif))
- return -EOPNOTSUPP;
-
for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
int r;
@@ -1224,6 +1218,146 @@ iwl_mvm_mld_change_sta_links(struct ieee80211_hw *hw,
return ret;
}
+/*
+ * This function receives a subset of the usable links bitmap and
+ * returns the primary link id, and -1 if such link doesn't exist
+ * (e.g. non-MLO connection) or wasn't found.
+ */
+int iwl_mvm_mld_get_primary_link(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ unsigned long usable_links)
+{
+ struct iwl_mvm_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
+ u8 link_id, n_data = 0;
+
+ if (!ieee80211_vif_is_mld(vif) || !vif->cfg.assoc)
+ return -1;
+
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
+
+ data[n_data].link_id = link_id;
+ data[n_data].band = link_conf->chanreq.oper.chan->band;
+ data[n_data].width = link_conf->chanreq.oper.width;
+ data[n_data].active = true;
+ n_data++;
+ }
+
+ if (n_data <= 1)
+ return -1;
+
+ /* The logic should be modified to handle more than 2 links */
+ WARN_ON_ONCE(n_data > 2);
+
+ /* Primary link is the link with the wider bandwidth or higher band */
+ if (data[0].width > data[1].width)
+ return data[0].link_id;
+ if (data[0].width < data[1].width)
+ return data[1].link_id;
+ if (data[0].band >= data[1].band)
+ return data[0].link_id;
+
+ return data[1].link_id;
+}
+
+/*
+ * This function receives a bitmap of usable links and check if we can enter
+ * eSR on those links.
+ */
+static bool iwl_mvm_can_enter_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ unsigned long desired_links)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int primary_link = iwl_mvm_mld_get_primary_link(mvm, vif,
+ desired_links);
+ const struct wiphy_iftype_ext_capab *ext_capa;
+ bool ret = true;
+ int link_id;
+
+ if (primary_link < 0)
+ return false;
+
+ if (!(vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP))
+ return false;
+
+ ext_capa = cfg80211_get_iftype_ext_capa(mvm->hw->wiphy,
+ ieee80211_vif_type_p2p(vif));
+ if (!ext_capa ||
+ !(ext_capa->eml_capabilities & IEEE80211_EML_CAP_EMLSR_SUPP))
+ return false;
+
+ for_each_set_bit(link_id, &desired_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
+
+ /* BT Coex effects eSR mode only if one of the link is on LB */
+ if (link_conf->chanreq.oper.chan->band != NL80211_BAND_2GHZ)
+ continue;
+
+ ret = iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link_id,
+ primary_link);
+ // Mark eSR as disabled for the next time
+ if (!ret)
+ mvmvif->bt_coex_esr_disabled = true;
+ break;
+ }
+
+ return ret;
+}
+
+static bool iwl_mvm_mld_can_activate_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 desired_links)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int n_links = hweight16(desired_links);
+ bool ret = true;
+
+ if (n_links <= 1)
+ return true;
+
+ mutex_lock(&mvm->mutex);
+
+ /* Check if HW supports the wanted number of links */
+ if (n_links > iwl_mvm_max_active_links(mvm, vif)) {
+ ret = false;
+ goto unlock;
+ }
+
+ /* If it is an eSR device, check that we can enter eSR */
+ if (iwl_mvm_is_esr_supported(mvm->fwrt.trans))
+ ret = iwl_mvm_can_enter_esr(mvm, vif, desired_links);
+unlock:
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static enum ieee80211_neg_ttlm_res
+iwl_mvm_mld_can_neg_ttlm(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_neg_ttlm *neg_ttlm)
+{
+ u16 map;
+ u8 i;
+
+ /* Verify all TIDs are mapped to the same links set */
+ map = neg_ttlm->downlink[0];
+ for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++) {
+ if (neg_ttlm->downlink[i] != neg_ttlm->uplink[i] ||
+ neg_ttlm->uplink[i] != map)
+ return NEG_TTLM_RES_REJECT;
+ }
+
+ return NEG_TTLM_RES_ACCEPT;
+}
+
const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.tx = iwl_mvm_mac_tx,
.wake_tx_queue = iwl_mvm_mac_wake_tx_queue,
@@ -1318,4 +1452,6 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.change_vif_links = iwl_mvm_mld_change_vif_links,
.change_sta_links = iwl_mvm_mld_change_sta_links,
+ .can_activate_links = iwl_mvm_mld_can_activate_links,
+ .can_neg_ttlm = iwl_mvm_mld_can_neg_ttlm,
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 81dbef6947f5..a10b48947bca 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -40,8 +40,9 @@
#define IWL_MVM_MAX_ADDRESSES 5
/* RSSI offset for WkP */
#define IWL_RSSI_OFFSET 50
+#define IWL_MVM_MISSED_BEACONS_SINCE_RX_THOLD 4
#define IWL_MVM_MISSED_BEACONS_THRESHOLD 8
-#define IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG 16
+#define IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG 19
/* A TimeUnit is 1024 microsecond */
#define MSEC_TO_TU(_msec) (_msec*1000/1024)
@@ -105,6 +106,7 @@ struct iwl_mvm_phy_ctxt {
/* track for RLC config command */
u32 center_freq1;
bool rlc_disabled;
+ u32 channel_load_by_us;
};
struct iwl_mvm_time_event_data {
@@ -121,7 +123,7 @@ struct iwl_mvm_time_event_data {
* if the te is in the time event list or not (when id == TE_MAX)
*/
u32 id;
- u8 link_id;
+ s8 link_id;
};
/* Power management */
@@ -359,6 +361,7 @@ struct iwl_mvm_vif_link_info {
* @pm_enabled - indicate if MAC power management is allowed
* @monitor_active: indicates that monitor context is configured, and that the
* interface should get quota etc.
+ * @bt_coex_esr_disabled: indicates if esr is disabled due to bt coex
* @low_latency: bit flags for low latency
* see enum &iwl_mvm_low_latency_cause for causes.
* @low_latency_actual: boolean, indicates low latency is set,
@@ -389,6 +392,7 @@ struct iwl_mvm_vif {
bool pm_enabled;
bool monitor_active;
bool esr_active;
+ bool bt_coex_esr_disabled;
u8 low_latency: 6;
u8 low_latency_actual: 1;
@@ -537,8 +541,8 @@ struct iwl_mvm_tt_mgmt {
#ifdef CONFIG_THERMAL
/**
- *struct iwl_mvm_thermal_device - thermal zone related data
- * @temp_trips: temperature thresholds for report
+ * struct iwl_mvm_thermal_device - thermal zone related data
+ * @trips: temperature thresholds for report
* @fw_trips_index: keep indexes to original array - temp_trips
* @tzone: thermal zone device data
*/
@@ -848,6 +852,9 @@ struct iwl_mvm {
spinlock_t async_handlers_lock;
struct work_struct async_handlers_wk;
+ /* For async rx handlers that require the wiphy lock */
+ struct wiphy_work async_handlers_wiphy_wk;
+
struct work_struct roc_done_wk;
unsigned long init_status;
@@ -1215,7 +1222,6 @@ struct iwl_mvm {
* @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active
* @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
* @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
- * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
* @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it)
* @IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE: suppress one error log
* if this is set, when intentionally triggered
@@ -1230,7 +1236,6 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_IN_HW_RESTART,
IWL_MVM_STATUS_ROC_AUX_RUNNING,
IWL_MVM_STATUS_FIRMWARE_RUNNING,
- IWL_MVM_STATUS_NEED_FLUSH_P2P,
IWL_MVM_STATUS_IN_D3,
IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE,
IWL_MVM_STATUS_STARTING,
@@ -1567,13 +1572,17 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_trans *trans = mvm->fwrt.trans;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
if (vif->type == NL80211_IFTYPE_AP)
return mvm->fw->ucode_capa.num_beacons;
- if (iwl_mvm_is_esr_supported(trans) ||
- (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM &&
- CSR_HW_RFID_IS_CDB(trans->hw_rf_id)))
+ if ((iwl_mvm_is_esr_supported(trans) &&
+ !mvmvif->bt_coex_esr_disabled) ||
+ ((CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM &&
+ CSR_HW_RFID_IS_CDB(trans->hw_rf_id))))
return IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM;
return 1;
@@ -1805,18 +1814,20 @@ void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
/* MVM PHY */
struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm);
int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
u8 chains_static, u8 chains_dynamic);
int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
u8 chains_static, u8 chains_dynamic);
void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt);
void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt);
int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm);
-u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef);
-u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef);
+u8 iwl_mvm_get_channel_width(const struct cfg80211_chan_def *chandef);
+u8 iwl_mvm_get_ctrl_pos(const struct cfg80211_chan_def *chandef);
int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
u8 chains_static, u8 chains_dynamic);
@@ -2120,6 +2131,12 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants);
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac);
+bool iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id, int primary_link);
+void iwl_mvm_bt_coex_update_vif_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id);
/* beacon filtering */
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -2133,11 +2150,9 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
{}
#endif
int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags);
+ struct ieee80211_vif *vif);
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags);
+ struct ieee80211_vif *vif);
/* SMPS */
void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum iwl_mvm_smps_type_request req_type,
@@ -2370,7 +2385,7 @@ u64 iwl_mvm_ptp_get_adj_time(struct iwl_mvm *mvm, u64 base_time);
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b);
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm);
int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm);
-void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm);
+void iwl_mvm_get_bios_tables(struct iwl_mvm *mvm);
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_mvm_link_sta_add_debugfs(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -2391,6 +2406,10 @@ int iwl_mvm_sec_key_del(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *keyconf);
+int iwl_mvm_sec_key_del_pasn(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 sta_mask,
+ struct ieee80211_key_conf *keyconf);
void iwl_mvm_sec_key_remove_ap(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mvm_vif_link_info *link,
@@ -2515,7 +2534,7 @@ static inline void iwl_mvm_set_chan_info(struct iwl_mvm *mvm,
static inline void
iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
struct iwl_fw_channel_info *ci,
- struct cfg80211_chan_def *chandef)
+ const struct cfg80211_chan_def *chandef)
{
enum nl80211_band band = chandef->chan->band;
@@ -2605,7 +2624,6 @@ static inline bool iwl_mvm_mei_filter_scan(struct iwl_mvm *mvm,
void iwl_mvm_send_roaming_forbidden_event(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool forbidden);
-bool iwl_mvm_is_vendor_in_approved_list(void);
/* Callbacks for ieee80211_ops */
void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
@@ -2695,7 +2713,8 @@ int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw);
void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw);
@@ -2734,4 +2753,28 @@ bool iwl_mvm_enable_fils(struct iwl_mvm *mvm,
struct ieee80211_chanctx_conf *ctx);
void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool valid_links_changed);
+int iwl_mvm_mld_get_primary_link(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ unsigned long usable_links);
+
+bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm,
+ struct ieee80211_chanctx_conf *ctx);
+
+static inline struct cfg80211_chan_def *
+iwl_mvm_chanctx_def(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx)
+{
+ bool use_def = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx) ||
+ iwl_mvm_enable_fils(mvm, ctx);
+
+ return use_def ? &ctx->def : &ctx->min_def;
+}
+
+void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
+ u32 duration_ms,
+ u32 *duration_tu,
+ u32 *delay);
+int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_channel *channel,
+ struct ieee80211_vif *vif,
+ int duration, u32 activity);
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index c0dd441e800e..ae8177222881 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -590,7 +590,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
return -EIO;
if (iwl_mvm_is_wifi_mcc_supported(mvm) &&
- !iwl_acpi_get_mcc(mvm->dev, mcc)) {
+ !iwl_bios_get_mcc(&mvm->fwrt, mcc)) {
kfree(regd);
regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc,
MCC_SOURCE_BIOS, NULL);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index adbbe19aeae5..a93981cb9714 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -161,9 +161,9 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
if (!vif || vif->type != NL80211_IFTYPE_STATION)
return;
- if (!vif->bss_conf.chandef.chan ||
- vif->bss_conf.chandef.chan->band != NL80211_BAND_2GHZ ||
- vif->bss_conf.chandef.width < NL80211_CHAN_WIDTH_40)
+ if (!vif->bss_conf.chanreq.oper.chan ||
+ vif->bss_conf.chanreq.oper.chan->band != NL80211_BAND_2GHZ ||
+ vif->bss_conf.chanreq.oper.width < NL80211_CHAN_WIDTH_40)
return;
if (!vif->cfg.assoc)
@@ -219,7 +219,7 @@ void iwl_mvm_update_link_smps(struct ieee80211_vif *vif,
return;
if (mvm->fw_static_smps_request &&
- link_conf->chandef.width == NL80211_CHAN_WIDTH_160 &&
+ link_conf->chanreq.oper.width == NL80211_CHAN_WIDTH_160 &&
link_conf->he_support)
mode = IEEE80211_SMPS_STATIC;
@@ -259,7 +259,7 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
}
/**
- * enum iwl_rx_handler_context context for Rx handler
+ * enum iwl_rx_handler_context: context for Rx handler
* @RX_HANDLER_SYNC : this means that it will be called in the Rx path
* which can't acquire mvm->mutex.
* @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
@@ -267,15 +267,19 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
* it will be called from a worker with mvm->mutex held.
* @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
* mutex itself, it will be called from a worker without mvm->mutex held.
+ * @RX_HANDLER_ASYNC_LOCKED_WIPHY: If the handler needs to hold the wiphy lock
+ * and mvm->mutex. Will be handled with the wiphy_work queue infra
+ * instead of regular work queue.
*/
enum iwl_rx_handler_context {
RX_HANDLER_SYNC,
RX_HANDLER_ASYNC_LOCKED,
RX_HANDLER_ASYNC_UNLOCKED,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
};
/**
- * struct iwl_rx_handlers handler for FW notification
+ * struct iwl_rx_handlers: handler for FW notification
* @cmd_id: command id
* @min_size: minimum size to expect for the notification
* @context: see &iwl_rx_handler_context
@@ -316,7 +320,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
struct iwl_tlc_update_notif),
RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
- RX_HANDLER_ASYNC_LOCKED, struct iwl_bt_coex_profile_notif),
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+ struct iwl_bt_coex_profile_notif),
RX_HANDLER_NO_SIZE(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
@@ -324,7 +329,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_NOTIF,
iwl_mvm_handle_rx_system_oper_stats,
- RX_HANDLER_ASYNC_LOCKED,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
struct iwl_system_statistics_notif_oper),
RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF,
iwl_mvm_handle_rx_system_oper_part1_stats,
@@ -673,6 +678,8 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
/* this forward declaration can avoid to export the function */
static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
+static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy,
+ struct wiphy_work *work);
static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
{
@@ -682,7 +689,7 @@ static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
if (!backoff)
return 0;
- dflt_pwr_limit = iwl_acpi_get_pwr_limit(mvm->dev);
+ iwl_bios_get_pwr_limit(&mvm->fwrt, &dflt_pwr_limit);
while (backoff->pwr) {
if (dflt_pwr_limit >= backoff->pwr)
@@ -1194,7 +1201,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm,
&iwl_mvm_sanitize_ops, mvm, dbgfs_dir);
- iwl_mvm_get_acpi_tables(mvm);
+ iwl_mvm_get_bios_tables(mvm);
iwl_uefi_get_sgom_table(trans, &mvm->fwrt);
iwl_uefi_get_step_table(trans);
@@ -1265,6 +1272,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_LIST_HEAD(&mvm->add_stream_txqs);
spin_lock_init(&mvm->add_stream_lock);
+ wiphy_work_init(&mvm->async_handlers_wiphy_wk,
+ iwl_mvm_async_handlers_wiphy_wk);
init_waitqueue_head(&mvm->rx_sync_waitq);
mvm->queue_sync_state = 0;
@@ -1551,35 +1560,62 @@ void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
spin_unlock_bh(&mvm->async_handlers_lock);
}
-static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
+/*
+ * This function receives a bitmap of rx async handler contexts
+ * (&iwl_rx_handler_context) to handle, and runs only them
+ */
+static void iwl_mvm_async_handlers_by_context(struct iwl_mvm *mvm,
+ u8 contexts)
{
- struct iwl_mvm *mvm =
- container_of(wk, struct iwl_mvm, async_handlers_wk);
struct iwl_async_handler_entry *entry, *tmp;
LIST_HEAD(local_list);
- /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
-
/*
- * Sync with Rx path with a lock. Remove all the entries from this list,
- * add them to a local one (lock free), and then handle them.
+ * Sync with Rx path with a lock. Remove all the entries of the
+ * wanted contexts from this list, add them to a local one (lock free),
+ * and then handle them.
*/
spin_lock_bh(&mvm->async_handlers_lock);
- list_splice_init(&mvm->async_handlers_list, &local_list);
+ list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
+ if (!(BIT(entry->context) & contexts))
+ continue;
+ list_del(&entry->list);
+ list_add_tail(&entry->list, &local_list);
+ }
spin_unlock_bh(&mvm->async_handlers_lock);
list_for_each_entry_safe(entry, tmp, &local_list, list) {
- if (entry->context == RX_HANDLER_ASYNC_LOCKED)
+ if (entry->context != RX_HANDLER_ASYNC_UNLOCKED)
mutex_lock(&mvm->mutex);
entry->fn(mvm, &entry->rxb);
iwl_free_rxb(&entry->rxb);
list_del(&entry->list);
- if (entry->context == RX_HANDLER_ASYNC_LOCKED)
+ if (entry->context != RX_HANDLER_ASYNC_UNLOCKED)
mutex_unlock(&mvm->mutex);
kfree(entry);
}
}
+static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy,
+ struct wiphy_work *wk)
+{
+ struct iwl_mvm *mvm =
+ container_of(wk, struct iwl_mvm, async_handlers_wiphy_wk);
+ u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED_WIPHY);
+
+ iwl_mvm_async_handlers_by_context(mvm, contexts);
+}
+
+static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm =
+ container_of(wk, struct iwl_mvm, async_handlers_wk);
+ u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED) |
+ BIT(RX_HANDLER_ASYNC_UNLOCKED);
+
+ iwl_mvm_async_handlers_by_context(mvm, contexts);
+}
+
static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
@@ -1659,7 +1695,11 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
spin_lock(&mvm->async_handlers_lock);
list_add_tail(&entry->list, &mvm->async_handlers_list);
spin_unlock(&mvm->async_handlers_lock);
- schedule_work(&mvm->async_handlers_wk);
+ if (rx_h->context == RX_HANDLER_ASYNC_LOCKED_WIPHY)
+ wiphy_work_queue(mvm->hw->wiphy,
+ &mvm->async_handlers_wiphy_wk);
+ else
+ schedule_work(&mvm->async_handlers_wk);
break;
}
}
@@ -1788,12 +1828,8 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
{
- bool state = iwl_mvm_is_radio_killed(mvm);
-
- if (state)
- wake_up(&mvm->rx_sync_waitq);
-
- wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state);
+ wiphy_rfkill_set_hw_state(mvm->hw->wiphy,
+ iwl_mvm_is_radio_killed(mvm));
}
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
@@ -1818,10 +1854,12 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
bool rfkill_safe_init_done = READ_ONCE(mvm->rfkill_safe_init_done);
bool unified = iwl_mvm_has_unified_ucode(mvm);
- if (state)
+ if (state) {
set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
- else
+ wake_up(&mvm->rx_sync_waitq);
+ } else {
clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+ }
iwl_mvm_set_rfkill_state(mvm);
@@ -1955,7 +1993,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
ieee80211_restart_hw(mvm->hw);
} else if (mvm->fwrt.trans->dbg.restart_required) {
IWL_DEBUG_INFO(mvm, "FW restart requested after debug collection\n");
- mvm->fwrt.trans->dbg.restart_required = FALSE;
+ mvm->fwrt.trans->dbg.restart_required = false;
ieee80211_restart_hw(mvm->hw);
} else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) {
ieee80211_restart_hw(mvm->hw);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index 334d1f59f6e4..ce264b386029 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
*/
@@ -9,7 +9,7 @@
#include "mvm.h"
/* Maps the driver specific channel width definition to the fw values */
-u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
+u8 iwl_mvm_get_channel_width(const struct cfg80211_chan_def *chandef)
{
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20_NOHT:
@@ -33,7 +33,7 @@ u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
* Maps the driver specific control channel position (relative to the center
* freq) definitions to the the fw values
*/
-u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef)
+u8 iwl_mvm_get_ctrl_pos(const struct cfg80211_chan_def *chandef)
{
int offs = chandef->chan->center_freq - chandef->center_freq1;
int abs_offs = abs(offs);
@@ -116,7 +116,7 @@ static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt,
struct iwl_phy_context_cmd_v1 *cmd,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
u8 chains_static, u8 chains_dynamic)
{
struct iwl_phy_context_cmd_tail *tail =
@@ -137,7 +137,7 @@ static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt,
struct iwl_phy_context_cmd *cmd,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
u8 chains_static, u8 chains_dynamic)
{
cmd->lmac_id = cpu_to_le32(iwl_mvm_get_lmac_id(mvm,
@@ -197,14 +197,18 @@ int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
*/
static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
u8 chains_static, u8 chains_dynamic,
u32 action)
{
int ret;
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1);
- if (ver == 3 || ver == 4) {
+ if (ver < 5 || !ap || !ap->chan)
+ ap = NULL;
+
+ if (ver >= 3 && ver <= 6) {
struct iwl_phy_context_cmd cmd = {};
/* Set the command header fields */
@@ -215,6 +219,14 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
chains_static,
chains_dynamic);
+ if (ap) {
+ cmd.sbb_bandwidth = iwl_mvm_get_channel_width(ap);
+ cmd.sbb_ctrl_channel_loc = iwl_mvm_get_ctrl_pos(ap);
+ }
+
+ if (ver == 6)
+ cmd.puncture_mask = cpu_to_le16(chandef->punctured);
+
ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD,
0, sizeof(cmd), &cmd);
} else if (ver < 3) {
@@ -254,7 +266,8 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
* Send a command to add a PHY context based on the current HW configuration.
*/
int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
u8 chains_static, u8 chains_dynamic)
{
int ret;
@@ -267,7 +280,7 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
ctxt->width = chandef->width;
ctxt->center_freq1 = chandef->center_freq1;
- ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, ap,
chains_static, chains_dynamic,
FW_CTXT_ACTION_ADD);
@@ -300,7 +313,8 @@ void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
* changed.
*/
int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
u8 chains_static, u8 chains_dynamic)
{
enum iwl_ctxt_action action = FW_CTXT_ACTION_MODIFY;
@@ -324,7 +338,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
int ret;
/* ... remove it here ...*/
- ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, NULL,
chains_static, chains_dynamic,
FW_CTXT_ACTION_REMOVE);
if (ret)
@@ -338,7 +352,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
ctxt->width = chandef->width;
ctxt->center_freq1 = chandef->center_freq1;
- return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, ap,
chains_static, chains_dynamic,
action);
}
@@ -358,7 +372,7 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
cfg80211_chandef_create(&chandef, ctxt->channel, NL80211_CHAN_NO_HT);
- iwl_mvm_phy_ctxt_apply(mvm, ctxt, &chandef, 1, 1,
+ iwl_mvm_phy_ctxt_apply(mvm, ctxt, &chandef, NULL, 1, 1,
FW_CTXT_ACTION_REMOVE);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index 1b9b06e0443f..41e68aa6bec8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2019, 2021-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -20,8 +20,7 @@
static
int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
- struct iwl_beacon_filter_cmd *cmd,
- u32 flags)
+ struct iwl_beacon_filter_cmd *cmd)
{
u16 len;
@@ -62,7 +61,7 @@ int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
len = offsetof(struct iwl_beacon_filter_cmd,
bf_threshold_absolute_low);
- return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, flags,
+ return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, 0,
len, cmd);
}
@@ -813,8 +812,7 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_beacon_filter_cmd *cmd,
- u32 cmd_flags)
+ struct iwl_beacon_filter_cmd *cmd)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
@@ -825,7 +823,7 @@ static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd);
iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd);
- ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags);
+ ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd);
if (!ret)
mvmvif->bf_data.bf_enabled = true;
@@ -834,20 +832,18 @@ static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
}
int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags)
+ struct ieee80211_vif *vif)
{
struct iwl_beacon_filter_cmd cmd = {
IWL_BF_CMD_CONFIG_DEFAULTS,
.bf_enable_beacon_filter = cpu_to_le32(1),
};
- return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags);
+ return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd);
}
static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags)
+ struct ieee80211_vif *vif)
{
struct iwl_beacon_filter_cmd cmd = {};
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -856,7 +852,7 @@ static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
- ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
+ ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
if (!ret)
mvmvif->bf_data.bf_enabled = false;
@@ -865,10 +861,9 @@ static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
}
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags)
+ struct ieee80211_vif *vif)
{
- return _iwl_mvm_disable_beacon_filter(mvm, vif, flags);
+ return _iwl_mvm_disable_beacon_filter(mvm, vif);
}
static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm)
@@ -919,7 +914,7 @@ static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm,
!vif->cfg.ps ||
iwl_mvm_vif_low_latency(mvmvif));
- return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0);
+ return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd);
}
int iwl_mvm_power_update_ps(struct iwl_mvm *mvm)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 6cba8a353b53..00860feefa7a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include "rs.h"
#include "fw-api.h"
@@ -479,9 +479,15 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
}
if (flags & IWL_TLC_NOTIF_FLAG_AMSDU && !mvm_link_sta->orig_amsdu_len) {
+ u32 enabled = le32_to_cpu(notif->amsdu_enabled);
u16 size = le32_to_cpu(notif->amsdu_size);
int i;
+ if (size < 2000) {
+ size = 0;
+ enabled = 0;
+ }
+
if (link_sta->agg.max_amsdu_len < size) {
/*
* In debug link_sta->agg.max_amsdu_len < size
@@ -492,7 +498,7 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
goto out;
}
- mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);
+ mvmsta->amsdu_enabled = enabled;
mvmsta->max_amsdu_len = size;
link_sta->agg.max_rc_amsdu_len = mvmsta->max_amsdu_len;
@@ -525,10 +531,10 @@ u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta,
const struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
const struct ieee80211_sta_eht_cap *eht_cap = &link_sta->eht_cap;
- if (WARN_ON_ONCE(!link_conf->chandef.chan))
+ if (WARN_ON_ONCE(!link_conf->chanreq.oper.chan))
return IEEE80211_MAX_MPDU_LEN_VHT_3895;
- if (link_conf->chandef.chan->band == NL80211_BAND_6GHZ) {
+ if (link_conf->chanreq.oper.chan->band == NL80211_BAND_6GHZ) {
switch (le16_get_bits(link_sta->he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN)) {
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
@@ -538,7 +544,7 @@ u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta,
default:
return IEEE80211_MAX_MPDU_LEN_VHT_3895;
}
- } else if (link_conf->chandef.chan->band == NL80211_BAND_2GHZ &&
+ } else if (link_conf->chanreq.oper.chan->band == NL80211_BAND_2GHZ &&
eht_cap->has_eht) {
switch (u8_get_bits(eht_cap->eht_cap_elem.mac_cap_info[0],
IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 481d68cbbbd8..a8c4e354e2ce 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -4161,6 +4161,8 @@ static int rs_drv_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
* @mvm: The mvm component
* @mvmsta: The station
* @enable: Enable Tx protection?
+ *
+ * Returns: an error code
*/
int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool enable)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 8caa971770c6..b1add7942c5b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -752,6 +752,19 @@ iwl_mvm_update_tcm_from_stats(struct iwl_mvm *mvm, __le32 *air_time_le,
spin_unlock(&mvm->tcm.lock);
}
+static void iwl_mvm_handle_per_phy_stats(struct iwl_mvm *mvm,
+ struct iwl_stats_ntfy_per_phy *per_phy)
+{
+ int i;
+
+ for (i = 0; i < NUM_PHY_CTX; i++) {
+ if (!mvm->phy_ctxts[i].ref)
+ continue;
+ mvm->phy_ctxts[i].channel_load_by_us =
+ le32_to_cpu(per_phy[i].channel_load_by_us);
+ }
+}
+
static void
iwl_mvm_stats_ver_15(struct iwl_mvm *mvm,
struct iwl_statistics_operational_ntfy *stats)
@@ -766,6 +779,7 @@ iwl_mvm_stats_ver_15(struct iwl_mvm *mvm,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_stat_iterator_all_macs,
&data);
+ iwl_mvm_handle_per_phy_stats(mvm, stats->per_phy);
}
static void
@@ -841,6 +855,7 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
struct iwl_stats_ntfy_per_link *link_stats;
struct ieee80211_bss_conf *bss_conf;
struct iwl_mvm_vif *mvmvif;
+ struct iwl_mvm_vif_link_info *link_info;
int link_id;
int sig;
@@ -857,20 +872,26 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
continue;
mvmvif = iwl_mvm_vif_from_mac80211(bss_conf->vif);
- if (!mvmvif || !mvmvif->link[link_id])
+ link_info = mvmvif->link[link_id];
+ if (!link_info)
continue;
link_stats = &per_link[fw_link_id];
- mvmvif->link[link_id]->beacon_stats.num_beacons =
+ link_info->beacon_stats.num_beacons =
le32_to_cpu(link_stats->beacon_counter);
/* we basically just use the u8 to store 8 bits and then treat
* it as a s8 whenever we take it out to a different type.
*/
- mvmvif->link[link_id]->beacon_stats.avg_signal =
+ link_info->beacon_stats.avg_signal =
-le32_to_cpu(link_stats->beacon_average_energy);
+ if (link_info->phy_ctxt &&
+ link_info->phy_ctxt->channel->band == NL80211_BAND_2GHZ)
+ iwl_mvm_bt_coex_update_vif_esr(mvm, bss_conf->vif,
+ link_id);
+
/* make sure that beacon statistics don't go backwards with TCM
* request to clear statistics
*/
@@ -935,6 +956,7 @@ void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm,
ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter,
average_energy);
+ iwl_mvm_handle_per_phy_stats(mvm, stats->per_phy);
}
void iwl_mvm_handle_rx_system_oper_part1_stats(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index af15d470c69b..1484eaedf452 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -282,6 +282,7 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
u32 status,
struct ieee80211_rx_status *stats)
{
+ struct wireless_dev *wdev;
struct iwl_mvm_sta *mvmsta;
struct iwl_mvm_vif *mvmvif;
u8 keyid;
@@ -303,9 +304,15 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
if (!ieee80211_is_beacon(hdr->frame_control))
return 0;
+ if (!sta)
+ return -1;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
/* key mismatch - will also report !MIC_OK but we shouldn't count it */
if (!(status & IWL_RX_MPDU_STATUS_KEY_VALID))
- return -1;
+ goto report;
/* good cases */
if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK &&
@@ -314,13 +321,6 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
return 0;
}
- if (!sta)
- return -1;
-
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
- mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
-
/*
* both keys will have the same cipher and MIC length, use
* whichever one is available
@@ -329,11 +329,11 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
if (!key) {
key = rcu_dereference(mvmvif->bcn_prot.keys[1]);
if (!key)
- return -1;
+ goto report;
}
if (len < key->icv_len + IEEE80211_GMAC_PN_LEN + 2)
- return -1;
+ goto report;
/* get the real key ID */
keyid = frame[len - key->icv_len - IEEE80211_GMAC_PN_LEN - 2];
@@ -347,7 +347,7 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
return -1;
key = rcu_dereference(mvmvif->bcn_prot.keys[keyid - 6]);
if (!key)
- return -1;
+ goto report;
}
/* Report status to mac80211 */
@@ -355,6 +355,10 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
ieee80211_key_mic_failure(key);
else if (status & IWL_RX_MPDU_STATUS_REPLAY_ERROR)
ieee80211_key_replay(key);
+report:
+ wdev = ieee80211_vif_to_wdev(mvmsta->vif);
+ if (wdev->netdev)
+ cfg80211_rx_unprot_mlme_mgmt(wdev->netdev, (void *)hdr, len);
return -1;
}
@@ -397,8 +401,11 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
case IWL_RX_MPDU_STATUS_SEC_GCM:
BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN);
/* alg is CCM: check MIC only */
- if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
+ if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) {
+ IWL_DEBUG_DROP(mvm,
+ "Dropping packet, bad MIC (CCM/GCM)\n");
return -1;
+ }
stats->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED;
*crypt_len = IEEE80211_CCMP_HDR_LEN;
@@ -516,11 +523,9 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
* (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
*/
if (ieee80211_is_ctl(hdr->frame_control) ||
- ieee80211_is_qos_nullfunc(hdr->frame_control) ||
- is_multicast_ether_addr(hdr->addr1)) {
- rx_status->flag |= RX_FLAG_DUP_VALIDATED;
+ ieee80211_is_any_nullfunc(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1))
return false;
- }
if (ieee80211_is_data_qos(hdr->frame_control)) {
/* frame has qos control */
@@ -646,10 +651,8 @@ static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm,
rcu_read_lock();
ba_data = rcu_dereference(mvm->baid_map[baid]);
- if (!ba_data) {
- WARN(true, "BAID %d not found in map\n", baid);
+ if (WARN(!ba_data, "BAID %d not found in map\n", baid))
goto out;
- }
/* pick any STA ID to find the pointer */
sta_id = ffs(ba_data->sta_mask) - 1;
@@ -685,11 +688,11 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
return;
len -= sizeof(*notif) + sizeof(*internal_notif);
- if (internal_notif->sync &&
- mvm->queue_sync_cookie != internal_notif->cookie) {
- WARN_ONCE(1, "Received expired RX queue sync message\n");
+ if (WARN_ONCE(internal_notif->sync &&
+ mvm->queue_sync_cookie != internal_notif->cookie,
+ "Received expired RX queue sync message (cookie %d but wanted %d, queue %d)\n",
+ internal_notif->cookie, mvm->queue_sync_cookie, queue))
return;
- }
switch (internal_notif->type) {
case IWL_MVM_RXQ_EMPTY:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 7b6f1cdca067..f3e3986b4c72 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -241,13 +241,11 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
return IWL_SCAN_TYPE_FRAGMENTED;
/*
- * in case of DCM with GO where BSS DTIM interval < 220msec
- * set all scan requests as fast-balance scan
+ * in case of DCM with P2P GO set all scan requests as
+ * fast-balance scan
*/
if (vif && vif->type == NL80211_IFTYPE_STATION &&
- data.is_dcm_with_p2p_go &&
- ((vif->bss_conf.beacon_int *
- vif->bss_conf.dtim_period) < 220))
+ data.is_dcm_with_p2p_go)
return IWL_SCAN_TYPE_FAST_BALANCE;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
index 30d4233595e8..16285ae7cae9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2013-2014, 2018-2019, 2022-2023 Intel Corporation
+ * Copyright (C) 2013-2014, 2018-2019, 2022-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
*/
#include "mvm.h"
@@ -232,6 +232,9 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
};
struct ieee80211_sta *sta = NULL;
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SMART_FIFO_OFFLOAD))
+ return 0;
/*
* Ignore the call if we are in HW Restart flow, or if the handled
* vif is a p2p device.
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index c2e0cff740e9..491c449fd431 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2015, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2015, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -71,7 +71,7 @@ u32 iwl_mvm_get_sta_ampdu_dens(struct ieee80211_link_sta *link_sta,
mpdu_dens = link_sta->ht_cap.ampdu_density;
}
- if (link_conf->chandef.chan->band == NL80211_BAND_6GHZ) {
+ if (link_conf->chanreq.oper.chan->band == NL80211_BAND_6GHZ) {
/* overwrite HT values on 6 GHz */
mpdu_dens = le16_get_bits(link_sta->he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
@@ -208,7 +208,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
if (sta->deflink.ht_cap.ht_supported ||
- mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ)
+ mvm_sta->vif->bss_conf.chanreq.oper.chan->band == NL80211_BAND_6GHZ)
add_sta_cmd.station_flags_msk |=
cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
STA_FLG_AGG_MPDU_DENS_MSK);
@@ -3017,16 +3017,6 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
kfree_rcu(baid_data, rcu_head);
IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
-
- /*
- * After we've deleted it, do another queue sync
- * so if an IWL_MVM_RXQ_NSSN_SYNC was concurrently
- * running it won't find a new session in the old
- * BAID. It can find the NULL pointer for the BAID,
- * but we must not have it find a different session.
- */
- iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY,
- true, NULL, 0);
}
return 0;
@@ -3587,6 +3577,9 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
key_flags = cpu_to_le16(keyidx);
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
+ if (key->flags & IEEE80211_KEY_FLAG_SPP_AMSDU)
+ key_flags |= cpu_to_le16(STA_KEY_FLG_AMSDU_SPP);
+
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
@@ -4326,12 +4319,12 @@ u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
- u8 *key, u32 key_len)
+ u8 *key, u32 key_len,
+ struct ieee80211_key_conf *keyconf)
{
int ret;
u16 queue;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct ieee80211_key_conf *keyconf;
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
bool mld = iwl_mvm_has_mld_api(mvm->fw);
@@ -4356,12 +4349,6 @@ int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (ret)
goto out;
- keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL);
- if (!keyconf) {
- ret = -ENOBUFS;
- goto out;
- }
-
keyconf->cipher = cipher;
memcpy(keyconf->key, key, key_len);
keyconf->keylen = key_len;
@@ -4382,10 +4369,9 @@ int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
0, NULL, 0, 0, true);
}
- kfree(keyconf);
- return 0;
out:
- iwl_mvm_dealloc_int_sta(mvm, sta);
+ if (ret)
+ iwl_mvm_dealloc_int_sta(mvm, sta);
return ret;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 3cf8a70274ce..b3450569864e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -575,7 +575,8 @@ int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm, struct ieee80211_txq *txq);
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
- u8 *key, u32 key_len);
+ u8 *key, u32 key_len,
+ struct ieee80211_key_conf *key_conf_out);
void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 id);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 2e653a417d62..a59d264a11c5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -45,32 +45,24 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
te_data->link_id = -1;
}
-void iwl_mvm_roc_done_wk(struct work_struct *wk)
+static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
{
- struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
-
/*
* Clear the ROC_RUNNING status bit.
* This will cause the TX path to drop offchannel transmissions.
* That would also be done by mac80211, but it is racy, in particular
- * in the case that the time event actually completed in the firmware
- * (which is handled in iwl_mvm_te_handle_notif).
- */
- clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
-
- synchronize_net();
-
- /*
- * Flush the offchannel queue -- this is called when the time
+ * in the case that the time event actually completed in the firmware.
+ *
+ * Also flush the offchannel queue -- this is called when the time
* event finishes or is canceled, so that frames queued for it
* won't get stuck on the queue and be transmitted in the next
* time event.
*/
-
- mutex_lock(&mvm->mutex);
- if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
+ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
struct iwl_mvm_vif *mvmvif;
+ synchronize_net();
+
/*
* NB: access to this pointer would be racy, but the flush bit
* can only be set when we had a P2P-Device VIF, and we have a
@@ -105,21 +97,16 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
}
}
- /*
- * Clear the ROC_AUX_RUNNING status bit.
- * This will cause the TX path to drop offchannel transmissions.
- * That would also be done by mac80211, but it is racy, in particular
- * in the case that the time event actually completed in the firmware
- * (which is handled in iwl_mvm_te_handle_notif).
- */
+ /* Do the same for AUX ROC */
if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
- /* do the same in case of hot spot 2.0 */
+ synchronize_net();
+
iwl_mvm_flush_sta(mvm, mvm->aux_sta.sta_id,
mvm->aux_sta.tfd_queue_msk);
if (mvm->mld_api_is_used) {
iwl_mvm_mld_rm_aux_sta(mvm);
- goto out_unlock;
+ return;
}
/* In newer version of this command an aux station is added only
@@ -128,8 +115,14 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
if (iwl_mvm_has_new_station_api(mvm->fw))
iwl_mvm_rm_aux_sta(mvm);
}
+}
-out_unlock:
+void iwl_mvm_roc_done_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_cleanup_roc(mvm);
mutex_unlock(&mvm->mutex);
}
@@ -163,12 +156,12 @@ static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
* So we just do nothing here and the switch
* will be performed on the last TBTT.
*/
- if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) {
+ if (!ieee80211_beacon_cntdwn_is_complete(csa_vif, 0)) {
IWL_WARN(mvm, "CSA NOA started too early\n");
goto out_unlock;
}
- ieee80211_csa_finish(csa_vif);
+ ieee80211_csa_finish(csa_vif, 0);
rcu_read_unlock();
@@ -294,18 +287,6 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
}
}
-static void iwl_mvm_p2p_roc_finished(struct iwl_mvm *mvm)
-{
- /*
- * If the IWL_MVM_STATUS_NEED_FLUSH_P2P is already set, then the
- * roc_done_wk is already scheduled or running, so don't schedule it
- * again to avoid a race where the roc_done_wk clears this bit after
- * it is set here, affecting the next run of the roc_done_wk.
- */
- if (!test_and_set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status))
- iwl_mvm_roc_finished(mvm);
-}
-
/*
* Handles a FW notification for an event that is known to the driver.
*
@@ -357,7 +338,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
switch (te_data->vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
ieee80211_remain_on_channel_expired(mvm->hw);
- iwl_mvm_p2p_roc_finished(mvm);
+ iwl_mvm_roc_finished(mvm);
break;
case NL80211_IFTYPE_STATION:
/*
@@ -692,7 +673,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
/* Determine whether mac or link id should be used, and validate the link id */
static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- u32 link_id)
+ s8 link_id)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ver = iwl_fw_lookup_cmd_ver(mvm->fw,
@@ -706,8 +687,7 @@ static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm,
"Invalid link ID for session protection: %u\n", link_id))
return -EINVAL;
- if (WARN(ieee80211_vif_is_mld(vif) &&
- !(vif->active_links & BIT(link_id)),
+ if (WARN(!mvmvif->link[link_id]->active,
"Session Protection on an inactive link: %u\n", link_id))
return -EINVAL;
@@ -716,7 +696,7 @@ static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm,
static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- u32 id, u32 link_id)
+ u32 id, s8 link_id)
{
int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id);
struct iwl_mvm_session_prot_cmd cmd = {
@@ -745,7 +725,7 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
struct ieee80211_vif *vif = te_data->vif;
struct iwl_mvm_vif *mvmvif;
enum nl80211_iftype iftype;
- unsigned int link_id;
+ s8 link_id;
if (!vif)
return false;
@@ -783,7 +763,7 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
iwl_mvm_cancel_session_protection(mvm, vif, id,
link_id);
if (iftype == NL80211_IFTYPE_P2P_DEVICE) {
- iwl_mvm_p2p_roc_finished(mvm);
+ iwl_mvm_roc_finished(mvm);
}
}
return false;
@@ -929,7 +909,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
if (WARN(ver > 2 && mvmvif->time_event_data.link_id >= 0 &&
mvmvif->time_event_data.link_id != notif_link_id,
- "SESION_PROTECTION_NOTIF was received for link %u, while the current time event is on link %u\n",
+ "SESSION_PROTECTION_NOTIF was received for link %u, while the current time event is on link %u\n",
notif_link_id, mvmvif->time_event_data.link_id))
goto out_unlock;
@@ -973,7 +953,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
/* End TE, notify mac80211 */
mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
mvmvif->time_event_data.link_id = -1;
- iwl_mvm_p2p_roc_finished(mvm);
+ iwl_mvm_roc_finished(mvm);
ieee80211_remain_on_channel_expired(mvm->hw);
} else if (le32_to_cpu(notif->start)) {
if (WARN_ON(mvmvif->time_event_data.id !=
@@ -987,6 +967,86 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
rcu_read_unlock();
}
+#define AUX_ROC_MIN_DURATION MSEC_TO_TU(100)
+#define AUX_ROC_MIN_DELAY MSEC_TO_TU(200)
+#define AUX_ROC_MAX_DELAY MSEC_TO_TU(600)
+#define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20)
+#define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10)
+
+void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
+ u32 duration_ms,
+ u32 *duration_tu,
+ u32 *delay)
+{
+ u32 dtim_interval = vif->bss_conf.dtim_period *
+ vif->bss_conf.beacon_int;
+
+ *delay = AUX_ROC_MIN_DELAY;
+ *duration_tu = MSEC_TO_TU(duration_ms);
+
+ /*
+ * If we are associated we want the delay time to be at least one
+ * dtim interval so that the FW can wait until after the DTIM and
+ * then start the time event, this will potentially allow us to
+ * remain off-channel for the max duration.
+ * Since we want to use almost a whole dtim interval we would also
+ * like the delay to be for 2-3 dtim intervals, in case there are
+ * other time events with higher priority.
+ */
+ if (vif->cfg.assoc) {
+ *delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
+ /* We cannot remain off-channel longer than the DTIM interval */
+ if (dtim_interval <= *duration_tu) {
+ *duration_tu = dtim_interval - AUX_ROC_SAFETY_BUFFER;
+ if (*duration_tu <= AUX_ROC_MIN_DURATION)
+ *duration_tu = dtim_interval -
+ AUX_ROC_MIN_SAFETY_BUFFER;
+ }
+ }
+}
+
+int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_channel *channel,
+ struct ieee80211_vif *vif,
+ int duration, u32 activity)
+{
+ int res;
+ u32 duration_tu, delay;
+ struct iwl_roc_req roc_req = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ .activity = cpu_to_le32(activity),
+ .sta_id = cpu_to_le32(mvm->aux_sta.sta_id),
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Set the channel info data */
+ iwl_mvm_set_chan_info(mvm, &roc_req.channel_info,
+ channel->hw_value,
+ iwl_mvm_phy_band_from_nl80211(channel->band),
+ IWL_PHY_CHANNEL_MODE20, 0);
+
+ iwl_mvm_roc_duration_and_delay(vif, duration, &duration_tu,
+ &delay);
+ roc_req.duration = cpu_to_le32(duration_tu);
+ roc_req.max_delay = cpu_to_le32(delay);
+
+ IWL_DEBUG_TE(mvm,
+ "\t(requested = %ums, max_delay = %ums)\n",
+ duration, delay);
+ IWL_DEBUG_TE(mvm,
+ "Requesting to remain on channel %u for %utu\n",
+ channel->hw_value, duration_tu);
+
+ /* Set the node address */
+ memcpy(roc_req.node_addr, vif->addr, ETH_ALEN);
+
+ res = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
+ 0, sizeof(roc_req), &roc_req);
+
+ return res;
+}
+
static int
iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
@@ -1164,18 +1224,22 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ te_data = &mvmvif->time_event_data;
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ if (te_data->id >= SESSION_PROTECT_CONF_MAX_ID) {
+ IWL_DEBUG_TE(mvm,
+ "No remain on channel event\n");
+ return;
+ }
+
iwl_mvm_cancel_session_protection(mvm, vif,
- mvmvif->time_event_data.id,
- mvmvif->time_event_data.link_id);
- iwl_mvm_p2p_roc_finished(mvm);
+ te_data->id,
+ te_data->link_id);
} else {
iwl_mvm_roc_station_remove(mvm, mvmvif);
- iwl_mvm_roc_finished(mvm);
}
-
- return;
+ goto cleanup_roc;
}
te_data = iwl_mvm_get_roc_te(mvm);
@@ -1186,13 +1250,21 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
- if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
- iwl_mvm_p2p_roc_finished(mvm);
- } else {
+ else
iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
- iwl_mvm_roc_finished(mvm);
- }
+
+cleanup_roc:
+ /*
+ * In case we get here before the ROC event started,
+ * (so the status bit isn't set) set it here so iwl_mvm_cleanup_roc will
+ * cleanup things properly
+ */
+ set_bit(vif->type == NL80211_IFTYPE_P2P_DEVICE ?
+ IWL_MVM_STATUS_ROC_RUNNING : IWL_MVM_STATUS_ROC_AUX_RUNNING,
+ &mvm->status);
+ iwl_mvm_cleanup_roc(mvm);
}
void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm,
@@ -1297,7 +1369,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
const u16 notif[] = { WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF) };
struct iwl_notification_wait wait_notif;
- int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id);
+ int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, (s8)link_id);
struct iwl_mvm_session_prot_cmd cmd = {
.id_and_color = cpu_to_le32(mac_link_id),
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 461f26d9214e..782ddc8c296b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -520,6 +520,31 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
}
}
+static bool iwl_mvm_use_host_rate(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *info)
+{
+ if (unlikely(!mvmsta))
+ return true;
+
+ if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))
+ return true;
+
+ if (likely(ieee80211_is_data(hdr->frame_control) &&
+ mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED))
+ return false;
+
+ /*
+ * Not a data frame, use host rate if on an old device that
+ * can't possibly be doing MLO (firmware may be selecting a
+ * bad rate), if we might be doing MLO we need to let FW pick
+ * (since we don't necesarily know the link), but FW rate
+ * selection was fixed.
+ */
+ return mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ;
+}
+
static void iwl_mvm_copy_hdr(void *cmd, const void *hdr, int hdrlen,
const u8 *addr3_override)
{
@@ -567,12 +592,12 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
/*
- * For data and mgmt packets rate info comes from the fw. Only
+ * For data and mgmt packets rate info comes from the fw (for
+ * new devices, older FW is somewhat broken for this). Only
* set rate/antenna for injected frames with fixed rate, or
- * when no sta is given.
+ * when no sta is given, or with older firmware.
*/
- if (unlikely(!sta ||
- info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) {
+ if (unlikely(iwl_mvm_use_host_rate(mvm, mvmsta, hdr, info))) {
flags |= IWL_TX_FLAGS_CMD_RATE;
rate_n_flags =
iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
@@ -881,10 +906,10 @@ unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
if (WARN_ON(!link_conf))
band = NL80211_BAND_2GHZ;
else
- band = link_conf->chandef.chan->band;
+ band = link_conf->chanreq.oper.chan->band;
rcu_read_unlock();
} else {
- band = mvmsta->vif->bss_conf.chandef.chan->band;
+ band = mvmsta->vif->bss_conf.chanreq.oper.chan->band;
}
lmac = iwl_mvm_get_lmac_id(mvm, band);
@@ -926,9 +951,15 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
next = skb_gso_segment(skb, netdev_flags);
skb_shinfo(skb)->gso_size = mss;
skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
- if (WARN_ON_ONCE(IS_ERR(next)))
- return -EINVAL;
- else if (next)
+
+ if (IS_ERR(next) && PTR_ERR(next) == -ENOMEM)
+ return -ENOMEM;
+
+ if (WARN_ONCE(IS_ERR(next),
+ "skb_gso_segment error: %d\n", (int)PTR_ERR(next)))
+ return PTR_ERR(next);
+
+ if (next)
consume_skb(skb);
skb_list_walk_safe(next, tmp, next) {
@@ -984,8 +1015,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
u8 tid;
- snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
- tcp_hdrlen(skb);
+ snap_ip_tcp = 8 + skb_network_header_len(skb) + tcp_hdrlen(skb);
if (!mvmsta->max_amsdu_len ||
!ieee80211_is_data_qos(hdr->frame_control) ||
@@ -1636,12 +1666,18 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
* of the batch. This is why the SSN of the SCD is written at the end of the
* whole struct at a variable offset. This function knows how to cope with the
* variable offset and returns the SSN of the SCD.
+ *
+ * For 22000-series and lower, this is just 12 bits. For later, 16 bits.
*/
static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
struct iwl_mvm_tx_resp *tx_resp)
{
- return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
- tx_resp->frame_count) & 0xfff;
+ u32 val = le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
+ tx_resp->frame_count);
+
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return val & 0xFFFF;
+ return val & 0xFFF;
}
static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
@@ -2174,6 +2210,12 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
tfd_cnt, pkt_len))
return;
+ IWL_DEBUG_TX_REPLY(mvm,
+ "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
+ sta_id, le32_to_cpu(ba_res->flags),
+ le16_to_cpu(ba_res->txed),
+ le16_to_cpu(ba_res->done));
+
rcu_read_lock();
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
@@ -2209,12 +2251,6 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
iwl_mvm_tx_airtime(mvm, mvmsta,
le32_to_cpu(ba_res->wireless_time));
rcu_read_unlock();
-
- IWL_DEBUG_TX_REPLY(mvm,
- "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
- sta_id, le32_to_cpu(ba_res->flags),
- le16_to_cpu(ba_res->txed),
- le16_to_cpu(ba_res->done));
return;
}
@@ -2246,9 +2282,6 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
rcu_read_unlock();
- iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
- tid_data->rate_n_flags, false);
-
IWL_DEBUG_TX_REPLY(mvm,
"BA_NOTIFICATION Received from %pM, sta_id = %d\n",
ba_notif->sta_addr, ba_notif->sta_id);
@@ -2261,6 +2294,9 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
ba_notif->reduced_txp);
+
+ iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
+ tid_data->rate_n_flags, false);
}
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 91286018a69d..ab56ff87c6f9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -249,6 +249,8 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
* This is the special case in which init is set and we call a callback in
* this case to clear the state indicating that station creation is in
* progress.
+ *
+ * Returns: an error code indicating success or failure
*/
int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index fa4a14546860..c8fc8b4fd85c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -119,7 +119,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
prph_sc_ctrl->version.version = 0;
prph_sc_ctrl->version.mac_id =
- cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
+ cpu_to_le16((u16)trans->hw_rev);
prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
control_flags |= IWL_PRPH_SCRATCH_MTR_MODE;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 5f55efe64bf5..0fa92704cd14 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2023 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-fh.h"
@@ -180,7 +180,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
ctxt_info->version.version = 0;
ctxt_info->version.mac_id =
- cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
+ cpu_to_le16((u16)trans->hw_rev);
/* size is in DWs */
ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 2c9b98c8184b..4a657036b9d6 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -502,12 +502,16 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* Bz devices */
{IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0x272D, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)},
/* Sc devices */
{IWL_PCI_DEVICE(0xE440, PCI_ANY_ID, iwl_sc_trans_cfg)},
+ {IWL_PCI_DEVICE(0xE340, PCI_ANY_ID, iwl_sc_trans_cfg)},
+ {IWL_PCI_DEVICE(0xD340, PCI_ANY_ID, iwl_sc_trans_cfg)},
+ {IWL_PCI_DEVICE(0x6E70, PCI_ANY_ID, iwl_sc_trans_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
@@ -526,7 +530,7 @@ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \
IWL_CFG_ANY, _cfg, _name)
-static const struct iwl_dev_info iwl_dev_info_table[] = {
+VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
#if IS_ENABLED(CONFIG_IWLMVM)
/* 9000 */
IWL_DEV_INFO(0x2526, 0x1550, iwl9260_2ac_cfg, iwl9260_killer_1550_name),
@@ -1008,8 +1012,13 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_gl, iwl_bz_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_NO_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_gl, iwl_mtp_name),
/* SoF with JF2 */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
@@ -1115,8 +1124,24 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_sc, iwl_sc_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2, iwl_sc2_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2f, iwl_sc2f_name),
#endif /* CONFIG_IWLMVM */
};
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table);
+
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+const unsigned int iwl_dev_info_table_size = ARRAY_SIZE(iwl_dev_info_table);
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table_size);
+#endif
/*
* Read rf id and cdb info from prph register and store it
@@ -1143,6 +1168,20 @@ static void get_crf_id(struct iwl_trans *iwl_trans)
iwl_trans->hw_cnv_id =
iwl_read_prph_no_grab(iwl_trans, CNVI_AUX_MISC_CHIP);
+ /* In BZ, the MAC step must be read from the CNVI aux register */
+ if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ) {
+ u8 step = CNVI_AUX_MISC_CHIP_MAC_STEP(iwl_trans->hw_cnv_id);
+
+ /* For BZ-U, take B step also when A step is indicated */
+ if ((CNVI_AUX_MISC_CHIP_PROD_TYPE(iwl_trans->hw_cnv_id) ==
+ CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_U) &&
+ step == SILICON_A_STEP)
+ step = SILICON_B_STEP;
+
+ iwl_trans->hw_rev_step = step;
+ iwl_trans->hw_rev |= step;
+ }
+
/* Read cdb info (also contains the jacket info if needed in the future */
iwl_trans->hw_wfpm_id =
iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR);
@@ -1236,7 +1275,7 @@ out:
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
-static const struct iwl_dev_info *
+VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info *
iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb,
u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step)
@@ -1299,6 +1338,7 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
return NULL;
}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_pci_find_dev_info);
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -1382,6 +1422,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (dev_info) {
iwl_trans->cfg = dev_info->cfg;
iwl_trans->name = dev_info->name;
+ iwl_trans->no_160 = dev_info->no_160 == IWL_CFG_NO_160;
}
#if IS_ENABLED(CONFIG_IWLMVM)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 63e13577aff8..6c76b2dd6878 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1484,12 +1484,9 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq)
IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
state ? "disabled" : "enabled");
- if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
- if (trans->trans_cfg->gen2)
- _iwl_trans_pcie_gen2_stop_device(trans);
- else
- _iwl_trans_pcie_stop_device(trans, from_irq);
- }
+ if (iwl_op_mode_hw_rf_kill(trans->op_mode, state) &&
+ !WARN_ON(trans->trans_cfg->gen2))
+ _iwl_trans_pcie_stop_device(trans, from_irq);
}
void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
@@ -1718,6 +1715,7 @@ enable_msi:
static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
{
+#if defined(CONFIG_SMP)
int iter_rx_q, i, ret, cpu, offset;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1738,6 +1736,7 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
"Failed to set affinity mask for IRQ %d\n",
trans_pcie->msix_entries[i].vector);
}
+#endif
}
static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 6c2b37e56c78..fa8eba47dc4c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1331,7 +1331,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
trans->txqs.tfd.size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
- ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
+ ip_hdrlen = skb_network_header_len(skb);
snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
amsdu_pad = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
index ca74b1b63cac..33973a60d0bf 100644
--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include <net/tso.h>
#include <linux/tcp.h>
@@ -271,9 +271,10 @@ static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
meta = NULL;
goto unmap;
}
- IWL_WARN(trans,
- "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
- len, (unsigned long long)oldphys, (unsigned long long)phys);
+ IWL_DEBUG_TX(trans,
+ "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
+ len, (unsigned long long)oldphys,
+ (unsigned long long)phys);
ret = 0;
unmap:
@@ -352,7 +353,7 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
&dev_cmd->hdr, start_len, 0);
- ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
+ ip_hdrlen = skb_network_header_len(skb);
snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
amsdu_pad = 0;
@@ -1601,8 +1602,8 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
if (read_ptr == tfd_num)
goto out;
- IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
- txq_id, txq->read_ptr, tfd_num, ssn);
+ IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n",
+ txq_id, read_ptr, txq->read_ptr, tfd_num, ssn);
/*Since we free until index _not_ inclusive, the one before index is
* the last we will free. This one must be used */
@@ -1630,7 +1631,8 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) {
struct sk_buff *skb = txq->entries[read_ptr].skb;
- if (WARN_ON_ONCE(!skb))
+ if (WARN_ONCE(!skb, "no SKB at %d (%d) on queue %d\n",
+ read_ptr, txq->read_ptr, txq_id))
continue;
iwl_txq_free_tso_page(trans, skb);
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/tests/Makefile
new file mode 100644
index 000000000000..5658471bdf0a
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/tests/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+iwlwifi-tests-y += module.o devinfo.o
+
+ccflags-y += -I$(srctree)/$(src)/../
+
+obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlwifi-tests.o
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
new file mode 100644
index 000000000000..7aa47fce6e2d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for the iwlwifi device info table
+ *
+ * Copyright (C) 2023 Intel Corporation
+ */
+#include <kunit/test.h>
+#include "iwl-drv.h"
+#include "iwl-config.h"
+
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+
+static void iwl_pci_print_dev_info(const char *pfx, const struct iwl_dev_info *di)
+{
+ printk(KERN_DEBUG "%sdev=%.4x,subdev=%.4x,mac_type=%.4x,mac_step=%.4x,rf_type=%.4x,cdb=%d,jacket=%d,rf_id=%.2x,no_160=%d,cores=%.2x\n",
+ pfx, di->device, di->subdevice, di->mac_type, di->mac_step,
+ di->rf_type, di->cdb, di->jacket, di->rf_id, di->no_160,
+ di->cores);
+}
+
+static void devinfo_table_order(struct kunit *test)
+{
+ int idx;
+
+ for (idx = 0; idx < iwl_dev_info_table_size; idx++) {
+ const struct iwl_dev_info *di = &iwl_dev_info_table[idx];
+ const struct iwl_dev_info *ret;
+
+ ret = iwl_pci_find_dev_info(di->device, di->subdevice,
+ di->mac_type, di->mac_step,
+ di->rf_type, di->cdb,
+ di->jacket, di->rf_id,
+ di->no_160, di->cores, di->rf_step);
+ if (ret != di) {
+ iwl_pci_print_dev_info("searched: ", di);
+ iwl_pci_print_dev_info("found: ", ret);
+ KUNIT_FAIL(test,
+ "unusable entry at index %d (found index %d instead)\n",
+ idx, (int)(ret - iwl_dev_info_table));
+ }
+ }
+}
+
+static struct kunit_case devinfo_test_cases[] = {
+ KUNIT_CASE(devinfo_table_order),
+ {}
+};
+
+static struct kunit_suite iwlwifi_devinfo = {
+ .name = "iwlwifi-devinfo",
+ .test_cases = devinfo_test_cases,
+};
+
+kunit_test_suite(iwlwifi_devinfo);
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/module.c b/drivers/net/wireless/intel/iwlwifi/tests/module.c
new file mode 100644
index 000000000000..0c54f818e5a7
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/tests/module.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Module boilerplate for the iwlwifi kunit module.
+ *
+ * Copyright (C) 2023 Intel Corporation
+ */
+#include <linux/module.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("kunit tests for iwlwifi");
diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
index c6084683aedd..687841b2fa2a 100644
--- a/drivers/net/wireless/intersil/p54/main.c
+++ b/drivers/net/wireless/intersil/p54/main.c
@@ -704,6 +704,10 @@ static void p54_set_coverage_class(struct ieee80211_hw *dev,
}
static const struct ieee80211_ops p54_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = p54_tx_80211,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = p54_start,
diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c
index 104d2b6dc9af..5a525da434c2 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.c
+++ b/drivers/net/wireless/marvell/libertas/cmd.c
@@ -1132,7 +1132,7 @@ int lbs_allocate_cmd_buffer(struct lbs_private *priv)
if (!cmdarray[i].cmdbuf) {
lbs_deb_host("ALLOC_CMD_BUF: ptempvirtualaddr is NULL\n");
ret = -1;
- goto done;
+ goto free_cmd_array;
}
}
@@ -1140,8 +1140,17 @@ int lbs_allocate_cmd_buffer(struct lbs_private *priv)
init_waitqueue_head(&cmdarray[i].cmdwait_q);
lbs_cleanup_and_insert_cmd(priv, &cmdarray[i]);
}
- ret = 0;
+ return 0;
+free_cmd_array:
+ for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
+ if (cmdarray[i].cmdbuf) {
+ kfree(cmdarray[i].cmdbuf);
+ cmdarray[i].cmdbuf = NULL;
+ }
+ }
+ kfree(priv->cmd_array);
+ priv->cmd_array = NULL;
done:
return ret;
}
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index 199d33ed3bb9..9cca69fe04d7 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -473,6 +473,10 @@ static int lbtf_op_get_survey(struct ieee80211_hw *hw, int idx,
}
static const struct ieee80211_ops lbtf_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = lbtf_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = lbtf_op_start,
diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c
index da211372a481..b90f922f1cdc 100644
--- a/drivers/net/wireless/marvell/mwifiex/11h.c
+++ b/drivers/net/wireless/marvell/mwifiex/11h.c
@@ -288,6 +288,6 @@ void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work)
mwifiex_dbg(priv->adapter, MSG,
"indicating channel switch completion to kernel\n");
wiphy_lock(priv->wdev.wiphy);
- cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef, 0, 0);
+ cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef, 0);
wiphy_unlock(priv->wdev.wiphy);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index 90e401100898..c0c635e74bc5 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -392,12 +392,10 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
chan_list =
(struct mwifiex_ie_types_chan_list_param_set *) *buffer;
- memset(chan_list, 0,
- sizeof(struct mwifiex_ie_types_chan_list_param_set));
+ memset(chan_list, 0, struct_size(chan_list, chan_scan_param, 1));
chan_list->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
- chan_list->header.len = cpu_to_le16(
- sizeof(struct mwifiex_ie_types_chan_list_param_set) -
- sizeof(struct mwifiex_ie_types_header));
+ chan_list->header.len =
+ cpu_to_le16(sizeof(struct mwifiex_chan_scan_param_set));
chan_list->chan_scan_param[0].chan_number =
bss_desc->bcn_ht_oper->primary_chan;
chan_list->chan_scan_param[0].radio_type =
@@ -411,8 +409,8 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
(bss_desc->bcn_ht_oper->ht_param &
IEEE80211_HT_PARAM_CHA_SEC_OFFSET));
- *buffer += sizeof(struct mwifiex_ie_types_chan_list_param_set);
- ret_len += sizeof(struct mwifiex_ie_types_chan_list_param_set);
+ *buffer += struct_size(chan_list, chan_scan_param, 1);
+ ret_len += struct_size(chan_list, chan_scan_param, 1);
}
if (bss_desc->bcn_bss_co_2040) {
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 3604abcbcff9..b909a7665e9c 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -3359,7 +3359,7 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv,
}
if (!wowlan->patterns[i].pkt_offset) {
- if (!(byte_seq[0] & 0x01) &&
+ if (is_unicast_ether_addr(byte_seq) &&
(byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 1)) {
mef_cfg->criteria |= MWIFIEX_CRITERIA_UNICAST;
continue;
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index f9c9fec7c792..9deaf59dcb62 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -566,14 +566,8 @@ mwifiex_verext_write(struct file *file, const char __user *ubuf,
int ret;
u32 versionstrsel;
struct mwifiex_private *priv = (void *)file->private_data;
- char buf[16];
- memset(buf, 0, sizeof(buf));
-
- if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
- return -EFAULT;
-
- ret = kstrtou32(buf, 10, &versionstrsel);
+ ret = kstrtou32_from_user(ubuf, count, 10, &versionstrsel);
if (ret)
return ret;
@@ -874,19 +868,14 @@ mwifiex_timeshare_coex_write(struct file *file, const char __user *ubuf,
{
bool timeshare_coex;
struct mwifiex_private *priv = file->private_data;
- char kbuf[16];
int ret;
if (priv->adapter->fw_api_ver != MWIFIEX_FW_V15)
return -EOPNOTSUPP;
- memset(kbuf, 0, sizeof(kbuf));
-
- if (copy_from_user(&kbuf, ubuf, min_t(size_t, sizeof(kbuf) - 1, count)))
- return -EFAULT;
-
- if (kstrtobool(kbuf, &timeshare_coex))
- return -EINVAL;
+ ret = kstrtobool_from_user(ubuf, count, &timeshare_coex);
+ if (ret)
+ return ret;
ret = mwifiex_send_cmd(priv, HostCmd_CMD_ROBUST_COEX,
HostCmd_ACT_GEN_SET, 0, &timeshare_coex, true);
@@ -970,9 +959,6 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
priv->dfs_dev_dir = debugfs_create_dir(priv->netdev->name,
mwifiex_dfs_dir);
- if (!priv->dfs_dev_dir)
- return;
-
MWIFIEX_DFS_ADD_FILE(info);
MWIFIEX_DFS_ADD_FILE(debug);
MWIFIEX_DFS_ADD_FILE(getlog);
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 62f3c9a52a1d..3adc447b715f 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -770,7 +770,7 @@ struct mwifiex_chan_scan_param_set {
struct mwifiex_ie_types_chan_list_param_set {
struct mwifiex_ie_types_header header;
- struct mwifiex_chan_scan_param_set chan_scan_param[1];
+ struct mwifiex_chan_scan_param_set chan_scan_param[];
} __packed;
struct mwifiex_ie_types_rxba_sync {
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 318b42b1896f..175882485a19 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -28,11 +28,9 @@
#include <linux/inetdevice.h>
#include <linux/devcoredump.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index a2ddac363b10..0326b121747c 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -664,15 +664,14 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
/* Copy the current channel TLV to the command being
prepared */
- memcpy(chan_tlv_out->chan_scan_param + tlv_idx,
+ memcpy(&chan_tlv_out->chan_scan_param[tlv_idx],
tmp_chan_list,
- sizeof(chan_tlv_out->chan_scan_param));
+ sizeof(*chan_tlv_out->chan_scan_param));
/* Increment the TLV header length by the size
appended */
le16_unaligned_add_cpu(&chan_tlv_out->header.len,
- sizeof(
- chan_tlv_out->chan_scan_param));
+ sizeof(*chan_tlv_out->chan_scan_param));
/*
* The tlv buffer length is set to the number of bytes
@@ -2369,12 +2368,11 @@ int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv,
chan_idx < MWIFIEX_BG_SCAN_CHAN_MAX &&
bgscan_cfg_in->chan_list[chan_idx].chan_number;
chan_idx++) {
- temp_chan = chan_list_tlv->chan_scan_param + chan_idx;
+ temp_chan = &chan_list_tlv->chan_scan_param[chan_idx];
/* Increment the TLV header length by size appended */
le16_unaligned_add_cpu(&chan_list_tlv->header.len,
- sizeof(
- chan_list_tlv->chan_scan_param));
+ sizeof(*chan_list_tlv->chan_scan_param));
temp_chan->chan_number =
bgscan_cfg_in->chan_list[chan_idx].chan_number;
@@ -2413,7 +2411,7 @@ int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv,
chan_scan_param);
le16_unaligned_add_cpu(&chan_list_tlv->header.len,
chan_num *
- sizeof(chan_list_tlv->chan_scan_param[0]));
+ sizeof(*chan_list_tlv->chan_scan_param));
}
tlv_pos += (sizeof(chan_list_tlv->header)
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index 00a5679b5c51..8558995e8fc7 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -871,7 +871,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
}
} else {
memcpy(ra, skb->data, ETH_ALEN);
- if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
+ if (is_multicast_ether_addr(ra) || mwifiex_is_skb_mgmt_frame(skb))
eth_broadcast_addr(ra);
ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
}
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index 13bcb123d122..ce8fea76dbb2 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -5610,6 +5610,10 @@ static void mwl8k_sw_scan_complete(struct ieee80211_hw *hw,
}
static const struct ieee80211_ops mwl8k_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mwl8k_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = mwl8k_start,
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index d6575fe18c6b..f7f2d9a8ab0f 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_MT792x_USB) += mt792x-usb.o
mt76-y := \
mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \
- tx.o agg-rx.o mcu.o
+ tx.o agg-rx.o mcu.o wed.o
mt76-$(CONFIG_PCI) += pci.o
mt76-$(CONFIG_NL80211_TESTMODE) += testmode.o
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 10cbd9e560e7..07c386c7b4d0 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -122,7 +122,7 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
struct ieee80211_bar *bar = mt76_skb_get_hdr(skb);
struct mt76_wcid *wcid = status->wcid;
struct mt76_rx_tid *tid;
- u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
+ u8 tidno;
u16 seqno;
if (!ieee80211_is_ctl(bar->frame_control))
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 00230f106294..72a7bd5a8576 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -197,9 +197,8 @@ mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
q->tail = q->head;
}
-static void
-__mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
- bool reset_idx)
+void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
+ bool reset_idx)
{
if (!q || !q->ndesc)
return;
@@ -219,8 +218,7 @@ __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
mt76_dma_sync_idx(dev, q);
}
-static void
-mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
+void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
{
__mt76_dma_queue_reset(dev, q, true);
}
@@ -632,9 +630,8 @@ free_skb:
return ret;
}
-static int
-mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
- bool allow_direct)
+int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
+ bool allow_direct)
{
int len = SKB_WITH_OVERHEAD(q->buf_size);
int frames = 0;
@@ -681,81 +678,6 @@ done:
return frames;
}
-int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
-{
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
- int ret = 0, type, ring;
- u16 flags;
-
- if (!q || !q->ndesc)
- return -EINVAL;
-
- flags = q->flags;
- if (!q->wed || !mtk_wed_device_active(q->wed))
- q->flags &= ~MT_QFLAG_WED;
-
- if (!(q->flags & MT_QFLAG_WED))
- return 0;
-
- type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
- ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
-
- switch (type) {
- case MT76_WED_Q_TX:
- ret = mtk_wed_device_tx_ring_setup(q->wed, ring, q->regs,
- reset);
- if (!ret)
- q->wed_regs = q->wed->tx_ring[ring].reg_base;
- break;
- case MT76_WED_Q_TXFREE:
- /* WED txfree queue needs ring to be initialized before setup */
- q->flags = 0;
- mt76_dma_queue_reset(dev, q);
- mt76_dma_rx_fill(dev, q, false);
-
- ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs);
- if (!ret)
- q->wed_regs = q->wed->txfree_ring.reg_base;
- break;
- case MT76_WED_Q_RX:
- ret = mtk_wed_device_rx_ring_setup(q->wed, ring, q->regs,
- reset);
- if (!ret)
- q->wed_regs = q->wed->rx_ring[ring].reg_base;
- break;
- case MT76_WED_RRO_Q_DATA:
- q->flags &= ~MT_QFLAG_WED;
- __mt76_dma_queue_reset(dev, q, false);
- mtk_wed_device_rro_rx_ring_setup(q->wed, ring, q->regs);
- q->head = q->ndesc - 1;
- q->queued = q->head;
- break;
- case MT76_WED_RRO_Q_MSDU_PG:
- q->flags &= ~MT_QFLAG_WED;
- __mt76_dma_queue_reset(dev, q, false);
- mtk_wed_device_msdu_pg_rx_ring_setup(q->wed, ring, q->regs);
- q->head = q->ndesc - 1;
- q->queued = q->head;
- break;
- case MT76_WED_RRO_Q_IND:
- q->flags &= ~MT_QFLAG_WED;
- mt76_dma_queue_reset(dev, q);
- mt76_dma_rx_fill(dev, q, false);
- mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs);
- break;
- default:
- ret = -EINVAL;
- break;
- }
- q->flags = flags;
-
- return ret;
-#else
- return 0;
-#endif
-}
-EXPORT_SYMBOL_GPL(mt76_dma_wed_setup);
-
static int
mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize,
@@ -800,7 +722,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
if (ret)
return ret;
- ret = mt76_dma_wed_setup(dev, q, false);
+ ret = mt76_wed_dma_setup(dev, q, false);
if (ret)
return ret;
@@ -863,7 +785,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
mt76_dma_rx_cleanup(dev, q);
/* reset WED rx queues */
- mt76_dma_wed_setup(dev, q, true);
+ mt76_wed_dma_setup(dev, q, true);
if (mt76_queue_is_wed_tx_free(q))
return;
@@ -1054,20 +976,6 @@ void mt76_dma_attach(struct mt76_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76_dma_attach);
-void mt76_dma_wed_reset(struct mt76_dev *dev)
-{
- struct mt76_mmio *mmio = &dev->mmio;
-
- if (!test_bit(MT76_STATE_WED_RESET, &dev->phy.state))
- return;
-
- complete(&mmio->wed_reset);
-
- if (!wait_for_completion_timeout(&mmio->wed_reset_complete, 3 * HZ))
- dev_err(dev->dev, "wed reset complete timeout\n");
-}
-EXPORT_SYMBOL_GPL(mt76_dma_wed_reset);
-
void mt76_dma_cleanup(struct mt76_dev *dev)
{
int i;
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
index c479cc6388ef..1de5a2b20f74 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.h
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -79,15 +79,18 @@ enum mt76_dma_wed_ind_reason {
int mt76_dma_rx_poll(struct napi_struct *napi, int budget);
void mt76_dma_attach(struct mt76_dev *dev);
void mt76_dma_cleanup(struct mt76_dev *dev);
-int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset);
-void mt76_dma_wed_reset(struct mt76_dev *dev);
+int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
+ bool allow_direct);
+void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
+ bool reset_idx);
+void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q);
static inline void
mt76_dma_reset_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
dev->queue_ops->reset_q(dev, q);
if (mtk_wed_device_active(&dev->mmio.wed))
- mt76_dma_wed_setup(dev, q, true);
+ mt76_wed_dma_setup(dev, q, true);
}
static inline void
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 8a3a90d1bfac..068206e48aec 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -579,13 +579,18 @@ EXPORT_SYMBOL_GPL(mt76_unregister_phy);
int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
{
+ bool is_qrx = mt76_queue_is_rx(dev, q);
struct page_pool_params pp_params = {
.order = 0,
.flags = 0,
.nid = NUMA_NO_NODE,
.dev = dev->dma_dev,
};
- int idx = q - dev->q_rx;
+ int idx = is_qrx ? q - dev->q_rx : -1;
+
+ /* Allocate page_pools just for rx/wed_tx_free queues */
+ if (!is_qrx && !mt76_queue_is_wed_tx_free(q))
+ return 0;
switch (idx) {
case MT_RXQ_MAIN:
@@ -604,6 +609,9 @@ int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
pp_params.dma_dir = DMA_FROM_DEVICE;
pp_params.max_len = PAGE_SIZE;
pp_params.offset = 0;
+ /* NAPI is available just for rx queues */
+ if (idx >= 0 && idx < ARRAY_SIZE(dev->napi))
+ pp_params.napi = &dev->napi[idx];
}
q->page_pool = page_pool_create(&pp_params);
@@ -1613,8 +1621,8 @@ EXPORT_SYMBOL_GPL(mt76_get_sar_power);
static void
__mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
- if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
- ieee80211_csa_finish(vif);
+ if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif, 0))
+ ieee80211_csa_finish(vif, 0);
}
void mt76_csa_finish(struct mt76_dev *dev)
@@ -1638,7 +1646,7 @@ __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!vif->bss_conf.csa_active)
return;
- dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif);
+ dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif, 0);
}
void mt76_csa_check(struct mt76_dev *dev)
@@ -1854,19 +1862,3 @@ enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
return MT_DFS_STATE_ACTIVE;
}
EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
-
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
-int mt76_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct net_device *netdev, enum tc_setup_type type,
- void *type_data)
-{
- struct mt76_phy *phy = hw->priv;
- struct mtk_wed_device *wed = &phy->dev->mmio.wed;
-
- if (!mtk_wed_device_active(wed))
- return -EOPNOTSUPP;
-
- return mtk_wed_device_setup_tc(wed, netdev, type, type_data);
-}
-EXPORT_SYMBOL_GPL(mt76_net_setup_tc);
-#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c
index c3e0e23e0161..cd2e9737c3bf 100644
--- a/drivers/net/wireless/mediatek/mt76/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mmio.c
@@ -85,113 +85,6 @@ void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr,
}
EXPORT_SYMBOL_GPL(mt76_set_irq_mask);
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
-void mt76_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
-{
- struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
- int i;
-
- for (i = 0; i < dev->rx_token_size; i++) {
- struct mt76_txwi_cache *t;
-
- t = mt76_rx_token_release(dev, i);
- if (!t || !t->ptr)
- continue;
-
- mt76_put_page_pool_buf(t->ptr, false);
- t->ptr = NULL;
-
- mt76_put_rxwi(dev, t);
- }
-
- mt76_free_pending_rxwi(dev);
-}
-EXPORT_SYMBOL_GPL(mt76_mmio_wed_release_rx_buf);
-
-u32 mt76_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
-{
- struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
- struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc;
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
- int i, len = SKB_WITH_OVERHEAD(q->buf_size);
- struct mt76_txwi_cache *t = NULL;
-
- for (i = 0; i < size; i++) {
- enum dma_data_direction dir;
- dma_addr_t addr;
- u32 offset;
- int token;
- void *buf;
-
- t = mt76_get_rxwi(dev);
- if (!t)
- goto unmap;
-
- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
- if (!buf)
- goto unmap;
-
- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
- dir = page_pool_get_dma_dir(q->page_pool);
- dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
-
- desc->buf0 = cpu_to_le32(addr);
- token = mt76_rx_token_consume(dev, buf, t, addr);
- if (token < 0) {
- mt76_put_page_pool_buf(buf, false);
- goto unmap;
- }
-
- token = FIELD_PREP(MT_DMA_CTL_TOKEN, token);
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- token |= FIELD_PREP(MT_DMA_CTL_SDP0_H, addr >> 32);
-#endif
- desc->token |= cpu_to_le32(token);
- desc++;
- }
-
- return 0;
-
-unmap:
- if (t)
- mt76_put_rxwi(dev, t);
- mt76_mmio_wed_release_rx_buf(wed);
-
- return -ENOMEM;
-}
-EXPORT_SYMBOL_GPL(mt76_mmio_wed_init_rx_buf);
-
-int mt76_mmio_wed_offload_enable(struct mtk_wed_device *wed)
-{
- struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
-
- spin_lock_bh(&dev->token_lock);
- dev->token_size = wed->wlan.token_start;
- spin_unlock_bh(&dev->token_lock);
-
- return !wait_event_timeout(dev->tx_wait, !dev->wed_token_count, HZ);
-}
-EXPORT_SYMBOL_GPL(mt76_mmio_wed_offload_enable);
-
-void mt76_mmio_wed_offload_disable(struct mtk_wed_device *wed)
-{
- struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
-
- spin_lock_bh(&dev->token_lock);
- dev->token_size = dev->drv->token_size;
- spin_unlock_bh(&dev->token_lock);
-}
-EXPORT_SYMBOL_GPL(mt76_mmio_wed_offload_disable);
-
-void mt76_mmio_wed_reset_complete(struct mtk_wed_device *wed)
-{
- struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
-
- complete(&dev->mmio.wed_reset_complete);
-}
-EXPORT_SYMBOL_GPL(mt76_mmio_wed_reset_complete);
-#endif /*CONFIG_NET_MEDIATEK_SOC_WED */
-
void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
{
static const struct mt76_bus_ops mt76_mmio_ops = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index b20c34d5a0f7..a91f6ddacbd9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -210,6 +210,8 @@ struct mt76_queue {
u16 first;
u16 head;
u16 tail;
+ u8 hw_idx;
+ u8 ep;
int ndesc;
int queued;
int buf_size;
@@ -217,7 +219,6 @@ struct mt76_queue {
bool blocked;
u8 buf_offset;
- u8 hw_idx;
u16 flags;
struct mtk_wed_device *wed;
@@ -1081,12 +1082,6 @@ bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
void mt76_pci_disable_aspm(struct pci_dev *pdev);
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
-int mt76_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct net_device *netdev, enum tc_setup_type type,
- void *type_data);
-#endif /*CONFIG_NET_MEDIATEK_SOC_WED */
-
static inline u16 mt76_chip(struct mt76_dev *dev)
{
return dev->rev >> 16;
@@ -1097,13 +1092,34 @@ static inline u16 mt76_rev(struct mt76_dev *dev)
return dev->rev & 0xffff;
}
+void mt76_wed_release_rx_buf(struct mtk_wed_device *wed);
+void mt76_wed_offload_disable(struct mtk_wed_device *wed);
+void mt76_wed_reset_complete(struct mtk_wed_device *wed);
+void mt76_wed_dma_reset(struct mt76_dev *dev);
+int mt76_wed_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct net_device *netdev, enum tc_setup_type type,
+ void *type_data);
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
-u32 mt76_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size);
-void mt76_mmio_wed_release_rx_buf(struct mtk_wed_device *wed);
-int mt76_mmio_wed_offload_enable(struct mtk_wed_device *wed);
-void mt76_mmio_wed_offload_disable(struct mtk_wed_device *wed);
-void mt76_mmio_wed_reset_complete(struct mtk_wed_device *wed);
-#endif /*CONFIG_NET_MEDIATEK_SOC_WED */
+u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size);
+int mt76_wed_offload_enable(struct mtk_wed_device *wed);
+int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset);
+#else
+static inline u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
+{
+ return 0;
+}
+
+static inline int mt76_wed_offload_enable(struct mtk_wed_device *wed)
+{
+ return 0;
+}
+
+static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q,
+ bool reset)
+{
+ return 0;
+}
+#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
#define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
#define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
@@ -1470,13 +1486,6 @@ static inline bool mt76u_urb_error(struct urb *urb)
urb->status != -ENOENT;
}
-/* Map hardware queues to usb endpoints */
-static inline u8 q2ep(u8 qid)
-{
- /* TODO: take management packets to queue 5 */
- return qid + 1;
-}
-
static inline int
mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
int timeout, int ep)
@@ -1598,6 +1607,18 @@ s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
struct mt76_power_limits *dest,
s8 target_power);
+static inline bool mt76_queue_is_rx(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
+ if (q == &dev->q_rx[i])
+ return true;
+ }
+
+ return false;
+}
+
static inline bool mt76_queue_is_wed_tx_free(struct mt76_queue *q)
{
return (q->flags & MT_QFLAG_WED) &&
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index e2146d30e553..9b49267b1eab 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -701,6 +701,10 @@ static void mt7603_tx(struct ieee80211_hw *hw,
}
const struct ieee80211_ops mt7603_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt7603_tx,
.start = mt7603_start,
.stop = mt7603_stop,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index ae34d019e588..c807bd8d928d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -353,7 +353,7 @@ static void
mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
if (vif->bss_conf.csa_active)
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
}
static void
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
index fdde3d70b300..98d64d3d2993 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
@@ -227,6 +227,11 @@ static inline bool is_mt7992(struct mt76_dev *dev)
return mt76_chip(dev) == 0x7992;
}
+static inline bool is_mt799x(struct mt76_dev *dev)
+{
+ return is_mt7996(dev) || is_mt7992(dev);
+}
+
static inline bool is_mt7622(struct mt76_dev *dev)
{
if (!IS_ENABLED(CONFIG_MT7622_WMAC))
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
index bd2a92467a97..5f132115ebfc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
@@ -32,6 +32,11 @@ enum {
MT_LMAC_PSMP0,
};
+enum {
+ MT_TXS_MPDU_FMT = 0,
+ MT_TXS_PPDU_FMT = 2,
+};
+
#define MT_TX_FREE_MSDU_CNT GENMASK(9, 0)
#define MT_TX_FREE_WLAN_ID GENMASK(23, 14)
#define MT_TX_FREE_COUNT GENMASK(12, 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
index c7914643e9c0..b841bf628d02 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
@@ -544,7 +544,7 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
val = FIELD_PREP(MT_TXD5_PID, pid);
if (pid >= MT_PACKET_ID_FIRST) {
val |= MT_TXD5_TX_STATUS_HOST;
- amsdu_en = amsdu_en && !is_mt7921(dev);
+ amsdu_en = 0;
}
txwi[5] = cpu_to_le32(val);
@@ -579,6 +579,8 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
spe_idx = 24 + phy_idx;
txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX, spe_idx));
}
+
+ txwi[7] &= ~cpu_to_le32(MT_TXD7_HW_AMSDU);
}
}
EXPORT_SYMBOL_GPL(mt76_connac2_mac_write_txwi);
@@ -714,6 +716,9 @@ bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
struct sk_buff_head list;
struct sk_buff *skb;
+ if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) == MT_TXS_PPDU_FMT)
+ return false;
+
mt76_tx_status_lock(dev, &list);
skb = mt76_tx_status_skb_get(dev, wcid, pid, &list);
if (skb) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 3a20ba0d2492..af0c2b2aacb0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -66,7 +66,7 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len,
if ((!is_connac_v1(dev) && addr == MCU_PATCH_ADDRESS) ||
(is_mt7921(dev) && addr == 0x900000) ||
- (is_mt7925(dev) && addr == 0x900000) ||
+ (is_mt7925(dev) && (addr == 0x900000 || addr == 0xe0002800)) ||
(is_mt7996(dev) && addr == 0x900000) ||
(is_mt7992(dev) && addr == 0x900000))
cmd = MCU_CMD(PATCH_START_REQ);
@@ -283,6 +283,9 @@ __mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
};
struct sk_buff *skb;
+ if (is_mt799x(dev) && !wcid->sta)
+ hdr.muar_idx = 0xe;
+
mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo,
&hdr.wlan_idx_hi);
skb = mt76_mcu_msg_alloc(dev, NULL, len);
@@ -2101,7 +2104,7 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
int j, msg_len, num_ch;
struct sk_buff *skb;
- num_ch = i == batch_size - 1 ? n_chan % batch_len : batch_len;
+ num_ch = i == batch_size - 1 ? n_chan - i * batch_len : batch_len;
msg_len = sizeof(tx_power_tlv) + num_ch * sizeof(sku_tlbv);
skb = mt76_mcu_msg_alloc(dev, NULL, msg_len);
if (!skb) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index ae6d0179727d..657a4d1f856b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -808,6 +808,7 @@ enum {
STA_REC_MLD = 0x20,
STA_REC_EHT = 0x22,
STA_REC_PN_INFO = 0x26,
+ STA_REC_KEY_V3 = 0x27,
STA_REC_HDRT = 0x28,
STA_REC_HDR_TRANS = 0x2B,
STA_REC_MAX_NUM
@@ -935,6 +936,9 @@ enum {
PHY_TYPE_INDEX_NUM
};
+#define HR_DSSS_ERP_BASIC_RATE GENMASK(3, 0)
+#define OFDM_BASIC_RATE (BIT(6) | BIT(8) | BIT(10))
+
#define PHY_TYPE_BIT_HR_DSSS BIT(PHY_TYPE_HR_DSSS_INDEX)
#define PHY_TYPE_BIT_ERP BIT(PHY_TYPE_ERP_INDEX)
#define PHY_TYPE_BIT_OFDM BIT(PHY_TYPE_OFDM_INDEX)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 293e66fa83d5..79b7996ad1a8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -59,6 +59,10 @@ mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
static const struct ieee80211_ops mt76x0e_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt76x02_tx,
.start = mt76x0e_start,
.stop = mt76x0e_stop,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index dd042949cf82..bba44f289b4e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -118,6 +118,10 @@ static int mt76x0u_start(struct ieee80211_hw *hw)
}
static const struct ieee80211_ops mt76x0u_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt76x02_tx,
.start = mt76x0u_start,
.stop = mt76x0u_stop,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 85a78dea4085..29b9a15f8dbe 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -67,7 +67,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
struct mt76_tx_info *tx_info)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
- int pid, len = tx_info->skb->len, ep = q2ep(dev->mphy.q_tx[qid]->hw_idx);
+ int pid, len = tx_info->skb->len, ep = dev->mphy.q_tx[qid]->ep;
struct mt76x02_txwi *txwi;
bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
enum mt76_qsel qsel;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index b38bb7a2362b..bfc8c69f43fa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -132,6 +132,10 @@ static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant,
}
const struct ieee80211_ops mt76x2_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt76x02_tx,
.start = mt76x2_start,
.stop = mt76x2_stop,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index ca78e14251c2..e92bb871f231 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -18,6 +18,7 @@ static const struct usb_device_id mt76x2u_device_table[] = {
{ USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
{ USB_DEVICE(0x0e8d, 0x7632) }, /* HC-M7662BU1 */
{ USB_DEVICE(0x2c4e, 0x0103) }, /* Mercury UD13 */
+ { USB_DEVICE(0x0846, 0x9014) }, /* Netgear WNDA3100v3 */
{ USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
{ USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */
{ USB_DEVICE(0x045e, 0x02fe) }, /* XBox One Wireless Adapter */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index ac07ed1f63a3..9fe390fdd730 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -103,6 +103,10 @@ mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
}
const struct ieee80211_ops mt76x2u_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt76x02_tx,
.start = mt76x2u_start,
.stop = mt76x2u_stop,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
index c91a1c54027f..0baa82c8df5a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
@@ -614,7 +614,7 @@ int mt7915_dma_reset(struct mt7915_dev *dev, bool force)
mtk_wed_device_dma_reset(wed);
mt7915_dma_disable(dev, force);
- mt76_dma_wed_reset(&dev->mt76);
+ mt76_wed_dma_reset(&dev->mt76);
/* reset hw queues */
for (i = 0; i < __MT_TXQ_MAX; i++) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index b01edbed969c..e45361111f9b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -1520,12 +1520,6 @@ void mt7915_mac_reset_work(struct work_struct *work)
if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA))
return;
- if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
- mtk_wed_device_stop(&dev->mt76.mmio.wed);
- if (!is_mt798x(&dev->mt76))
- mt76_wr(dev, MT_INT_WED_MASK_CSR, 0);
- }
-
ieee80211_stop_queues(mt76_hw(dev));
if (ext_phy)
ieee80211_stop_queues(ext_phy->hw);
@@ -1545,6 +1539,9 @@ void mt7915_mac_reset_work(struct work_struct *work)
mutex_lock(&dev->mt76.mutex);
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
+ mtk_wed_device_stop(&dev->mt76.mmio.wed);
+
mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index df2d4279790d..3709d18da0e6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -1708,6 +1708,6 @@ const struct ieee80211_ops mt7915_ops = {
.set_radar_background = mt7915_set_radar_background,
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
.net_fill_forward_path = mt7915_net_fill_forward_path,
- .net_setup_tc = mt76_net_setup_tc,
+ .net_setup_tc = mt76_wed_net_setup_tc,
#endif
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index c67c4f6ca2aa..d90f98c50039 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -228,7 +228,7 @@ mt7915_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!vif->bss_conf.csa_active || vif->type == NL80211_IFTYPE_STATION)
return;
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
}
static void
@@ -463,10 +463,10 @@ static bool mt7915_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw,
.tolerated = true,
};
- if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR))
+ if (!(vif->bss_conf.chanreq.oper.chan->flags & IEEE80211_CHAN_RADAR))
return false;
- cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef,
+ cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chanreq.oper,
mt7915_check_he_obss_narrow_bw_ru_iter,
&iter_data);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index 3039f53e2245..d6ecd698cdcd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -490,6 +490,11 @@ static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
return dev->reg.map[i].maps + ofs;
}
+ return 0;
+}
+
+static u32 __mt7915_reg_remap_addr(struct mt7915_dev *dev, u32 addr)
+{
if ((addr >= MT_INFRA_BASE && addr < MT_WFSYS0_PHY_START) ||
(addr >= MT_WFSYS0_PHY_START && addr < MT_WFSYS1_PHY_START) ||
(addr >= MT_WFSYS1_PHY_START && addr <= MT_WFSYS1_PHY_END))
@@ -514,15 +519,30 @@ void mt7915_memcpy_fromio(struct mt7915_dev *dev, void *buf, u32 offset,
{
u32 addr = __mt7915_reg_addr(dev, offset);
- memcpy_fromio(buf, dev->mt76.mmio.regs + addr, len);
+ if (addr) {
+ memcpy_fromio(buf, dev->mt76.mmio.regs + addr, len);
+ return;
+ }
+
+ spin_lock_bh(&dev->reg_lock);
+ memcpy_fromio(buf, dev->mt76.mmio.regs +
+ __mt7915_reg_remap_addr(dev, offset), len);
+ spin_unlock_bh(&dev->reg_lock);
}
static u32 mt7915_rr(struct mt76_dev *mdev, u32 offset)
{
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
- u32 addr = __mt7915_reg_addr(dev, offset);
+ u32 addr = __mt7915_reg_addr(dev, offset), val;
- return dev->bus_ops->rr(mdev, addr);
+ if (addr)
+ return dev->bus_ops->rr(mdev, addr);
+
+ spin_lock_bh(&dev->reg_lock);
+ val = dev->bus_ops->rr(mdev, __mt7915_reg_remap_addr(dev, offset));
+ spin_unlock_bh(&dev->reg_lock);
+
+ return val;
}
static void mt7915_wr(struct mt76_dev *mdev, u32 offset, u32 val)
@@ -530,7 +550,14 @@ static void mt7915_wr(struct mt76_dev *mdev, u32 offset, u32 val)
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
u32 addr = __mt7915_reg_addr(dev, offset);
- dev->bus_ops->wr(mdev, addr, val);
+ if (addr) {
+ dev->bus_ops->wr(mdev, addr, val);
+ return;
+ }
+
+ spin_lock_bh(&dev->reg_lock);
+ dev->bus_ops->wr(mdev, __mt7915_reg_remap_addr(dev, offset), val);
+ spin_unlock_bh(&dev->reg_lock);
}
static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
@@ -538,7 +565,14 @@ static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
u32 addr = __mt7915_reg_addr(dev, offset);
- return dev->bus_ops->rmw(mdev, addr, mask, val);
+ if (addr)
+ return dev->bus_ops->rmw(mdev, addr, mask, val);
+
+ spin_lock_bh(&dev->reg_lock);
+ val = dev->bus_ops->rmw(mdev, __mt7915_reg_remap_addr(dev, offset), mask, val);
+ spin_unlock_bh(&dev->reg_lock);
+
+ return val;
}
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
@@ -672,13 +706,13 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
}
wed->wlan.init_buf = mt7915_wed_init_buf;
- wed->wlan.offload_enable = mt76_mmio_wed_offload_enable;
- wed->wlan.offload_disable = mt76_mmio_wed_offload_disable;
- wed->wlan.init_rx_buf = mt76_mmio_wed_init_rx_buf;
- wed->wlan.release_rx_buf = mt76_mmio_wed_release_rx_buf;
+ wed->wlan.offload_enable = mt76_wed_offload_enable;
+ wed->wlan.offload_disable = mt76_wed_offload_disable;
+ wed->wlan.init_rx_buf = mt76_wed_init_rx_buf;
+ wed->wlan.release_rx_buf = mt76_wed_release_rx_buf;
wed->wlan.update_wo_rx_stats = mt7915_mmio_wed_update_rx_stats;
wed->wlan.reset = mt7915_mmio_wed_reset;
- wed->wlan.reset_complete = mt76_mmio_wed_reset_complete;
+ wed->wlan.reset_complete = mt76_wed_reset_complete;
dev->mt76.rx_token_size = wed->wlan.rx_npkt;
@@ -707,6 +741,7 @@ static int mt7915_mmio_init(struct mt76_dev *mdev,
dev = container_of(mdev, struct mt7915_dev, mt76);
mt76_mmio_init(&dev->mt76, mem_base);
+ spin_lock_init(&dev->reg_lock);
switch (device_id) {
case 0x7915:
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index 4727d9c7b11d..6e79bc65f5a5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -287,6 +287,7 @@ struct mt7915_dev {
struct list_head sta_rc_list;
struct list_head twt_list;
+ spinlock_t reg_lock;
u32 hw_pattern;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
index 8b4809703efc..f5b99917c08e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
@@ -516,7 +516,8 @@ static int mt798x_wmac_adie_patch_7976(struct mt7915_dev *dev, u8 adie)
if (ret)
return ret;
- if (version == 0x8a00 || version == 0x8a10 || version == 0x8b00) {
+ if (version == 0x8a00 || version == 0x8a10 ||
+ version == 0x8b00 || version == 0x8c10) {
rg_xo_01 = 0x1d59080f;
rg_xo_03 = 0x34c00fe0;
} else {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index 48433c6d5e7d..ef0c721d26e3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -138,9 +138,14 @@ mt7921_regd_notifier(struct wiphy *wiphy,
if (pm->suspended)
return;
+ dev->regd_in_progress = true;
+
mt792x_mutex_acquire(dev);
mt7921_regd_update(dev);
mt792x_mutex_release(dev);
+
+ dev->regd_in_progress = false;
+ wake_up(&dev->wait);
}
int mt7921_mac_init(struct mt792x_dev *dev)
@@ -261,6 +266,7 @@ int mt7921_register_device(struct mt792x_dev *dev)
spin_lock_init(&dev->pm.wake.lock);
mutex_init(&dev->pm.mutex);
init_waitqueue_head(&dev->pm.wait);
+ init_waitqueue_head(&dev->wait);
if (mt76_is_sdio(&dev->mt76))
init_waitqueue_head(&dev->mt76.sdio.wait);
spin_lock_init(&dev->pm.txq_lock);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 0d5adc5ddae3..ca36de34171b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -325,6 +325,19 @@ static void mt7921_roc_iter(void *priv, u8 *mac,
mt7921_mcu_abort_roc(phy, mvif, phy->roc_token_id);
}
+void mt7921_roc_abort_sync(struct mt792x_dev *dev)
+{
+ struct mt792x_phy *phy = &dev->phy;
+
+ del_timer_sync(&phy->roc_timer);
+ cancel_work_sync(&phy->roc_work);
+ if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
+ ieee80211_iterate_active_interfaces(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7921_roc_iter, (void *)phy);
+}
+EXPORT_SYMBOL_GPL(mt7921_roc_abort_sync);
+
void mt7921_roc_work(struct work_struct *work)
{
struct mt792x_phy *phy;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index f5582477c7e4..8b4ce32a2cd1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -1272,7 +1272,7 @@ int __mt7921_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
.mtcl_conf = mt792x_acpi_get_mtcl_conf(&dev->phy, alpha2),
};
int ret, valid_cnt = 0;
- u16 buf_len = 0;
+ u32 buf_len = 0;
u8 *pos;
if (!clc)
@@ -1283,7 +1283,7 @@ int __mt7921_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
if (mt76_find_power_limits_node(&dev->mt76))
req.cap |= CLC_CAP_DTS_EN;
- buf_len = le16_to_cpu(clc->len) - sizeof(*clc);
+ buf_len = le32_to_cpu(clc->len) - sizeof(*clc);
pos = clc->data;
while (buf_len > 16) {
struct mt7921_clc_rule *rule = (struct mt7921_clc_rule *)pos;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index 1cb21133992b..3016636d18c6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -322,4 +322,5 @@ int mt7921_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
enum mt7921_roc_req type, u8 token_id);
int mt7921_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
u8 token_id);
+void mt7921_roc_abort_sync(struct mt792x_dev *dev);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index dde26f327478..cda853e86676 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/of.h>
#include "mt7921.h"
#include "../mt76_connac2_mac.h"
@@ -369,6 +370,9 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
if (ret)
goto err_free_irq;
+ if (of_property_read_bool(dev->mt76.dev->of_node, "wakeup-source"))
+ device_init_wakeup(dev->mt76.dev, true);
+
return 0;
err_free_irq:
@@ -386,7 +390,11 @@ static void mt7921_pci_remove(struct pci_dev *pdev)
struct mt76_dev *mdev = pci_get_drvdata(pdev);
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
+ if (of_property_read_bool(dev->mt76.dev->of_node, "wakeup-source"))
+ device_init_wakeup(dev->mt76.dev, false);
+
mt7921e_unregister_device(dev);
+ set_bit(MT76_REMOVED, &mdev->phy.state);
devm_free_irq(&pdev->dev, pdev->irq, dev);
mt76_free_device(&dev->mt76);
pci_free_irq_vectors(pdev);
@@ -405,10 +413,15 @@ static int mt7921_pci_suspend(struct device *device)
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
+ mt7921_roc_abort_sync(dev);
+
err = mt792x_mcu_drv_pmctrl(dev);
if (err < 0)
goto restore_suspend;
+ wait_event_timeout(dev->wait,
+ !dev->regd_in_progress, 5 * HZ);
+
err = mt76_connac_mcu_set_hif_suspend(mdev, true);
if (err)
goto restore_suspend;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
index a9ce1e746b95..004d942ee11a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
@@ -216,6 +216,8 @@ static int mt7921s_suspend(struct device *__dev)
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
+ mt7921_roc_abort_sync(dev);
+
err = mt792x_mcu_drv_pmctrl(dev);
if (err < 0)
goto restore_suspend;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/init.c b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
index 8f9b7a2f376c..c4cbc8976046 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
@@ -2,11 +2,61 @@
/* Copyright (C) 2023 MediaTek Inc. */
#include <linux/etherdevice.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/thermal.h>
#include <linux/firmware.h>
#include "mt7925.h"
#include "mac.h"
#include "mcu.h"
+static ssize_t mt7925_thermal_temp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ switch (to_sensor_dev_attr(attr)->index) {
+ case 0: {
+ struct mt792x_phy *phy = dev_get_drvdata(dev);
+ struct mt792x_dev *mdev = phy->dev;
+ int temperature;
+
+ mt792x_mutex_acquire(mdev);
+ temperature = mt7925_mcu_get_temperature(phy);
+ mt792x_mutex_release(mdev);
+
+ if (temperature < 0)
+ return temperature;
+ /* display in millidegree Celsius */
+ return sprintf(buf, "%u\n", temperature * 1000);
+ }
+ default:
+ return -EINVAL;
+ }
+}
+static SENSOR_DEVICE_ATTR_RO(temp1_input, mt7925_thermal_temp, 0);
+
+static struct attribute *mt7925_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mt7925_hwmon);
+
+static int mt7925_thermal_init(struct mt792x_phy *phy)
+{
+ struct wiphy *wiphy = phy->mt76->hw->wiphy;
+ struct device *hwmon;
+ const char *name;
+
+ if (!IS_REACHABLE(CONFIG_HWMON))
+ return 0;
+
+ name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7925_%s",
+ wiphy_name(wiphy));
+
+ hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, phy,
+ mt7925_hwmon_groups);
+ return PTR_ERR_OR_ZERO(hwmon);
+}
static void
mt7925_regd_notifier(struct wiphy *wiphy,
struct regulatory_request *req)
@@ -142,6 +192,12 @@ static void mt7925_init_work(struct work_struct *work)
return;
}
+ ret = mt7925_thermal_init(&dev->phy);
+ if (ret) {
+ dev_err(dev->mt76.dev, "thermal init failed\n");
+ return;
+ }
+
/* we support chip reset now */
dev->hw_init_done = true;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
index 125a1be3cb64..6179798a8845 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
@@ -359,6 +359,7 @@ mt7925_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mvif->sta.wcid.phy_idx = mvif->mt76.band_idx;
mvif->sta.wcid.hw_key_idx = -1;
mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ mvif->sta.vif = mvif;
mt76_wcid_init(&mvif->sta.wcid);
mt7925_mac_wtbl_update(dev, idx,
@@ -526,7 +527,7 @@ static int mt7925_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (cmd == SET_KEY && !mvif->mt76.cipher) {
struct mt792x_phy *phy = mt792x_hw_phy(hw);
- mvif->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher);
+ mvif->mt76.cipher = mt7925_mcu_get_cipher(key->cipher);
mt7925_mcu_add_bss_info(phy, mvif->mt76.ctx, vif, sta, true);
}
@@ -710,7 +711,7 @@ static void mt7925_bss_info_changed(struct ieee80211_hw *hw,
if (slottime != phy->slottime) {
phy->slottime = slottime;
- mt792x_mac_set_timeing(phy);
+ mt7925_mcu_set_timing(phy, vif);
}
}
@@ -1274,6 +1275,25 @@ mt7925_channel_switch_beacon(struct ieee80211_hw *hw,
}
static int
+mt7925_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ static const u8 mq_to_aci[] = {
+ [IEEE80211_AC_VO] = 3,
+ [IEEE80211_AC_VI] = 2,
+ [IEEE80211_AC_BE] = 0,
+ [IEEE80211_AC_BK] = 1,
+ };
+
+ /* firmware uses access class index */
+ mvif->queue_params[mq_to_aci[queue]] = *params;
+
+ return 0;
+}
+
+static int
mt7925_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf)
{
@@ -1396,7 +1416,7 @@ const struct ieee80211_ops mt7925_ops = {
.add_interface = mt7925_add_interface,
.remove_interface = mt792x_remove_interface,
.config = mt7925_config,
- .conf_tx = mt792x_conf_tx,
+ .conf_tx = mt7925_conf_tx,
.configure_filter = mt7925_configure_filter,
.bss_info_changed = mt7925_bss_info_changed,
.start_ap = mt7925_start_ap,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
index c5fd7116929b..bd37cb8d734b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
@@ -656,6 +656,42 @@ int mt7925_mcu_fw_log_2_host(struct mt792x_dev *dev, u8 ctrl)
return ret;
}
+int mt7925_mcu_get_temperature(struct mt792x_phy *phy)
+{
+ struct {
+ u8 _rsv[4];
+
+ __le16 tag;
+ __le16 len;
+ u8 _rsv2[4];
+ } __packed req = {
+ .tag = cpu_to_le16(0x0),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ };
+ struct mt7925_thermal_evt {
+ u8 rsv[4];
+ __le32 temperature;
+ } __packed * evt;
+ struct mt792x_dev *dev = phy->dev;
+ int temperature, ret;
+ struct sk_buff *skb;
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76,
+ MCU_WM_UNI_CMD_QUERY(THERMAL),
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ skb_pull(skb, 4 + sizeof(struct tlv));
+ evt = (struct mt7925_thermal_evt *)skb->data;
+
+ temperature = le32_to_cpu(evt->temperature);
+
+ dev_kfree_skb(skb);
+
+ return temperature;
+}
+
static void
mt7925_mcu_parse_phy_cap(struct mt792x_dev *dev, char *data)
{
@@ -814,6 +850,7 @@ mt7925_mcu_sta_hdr_trans_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct sta_rec_hdr_trans *hdr_trans;
struct mt76_wcid *wcid;
struct tlv *tlv;
@@ -827,7 +864,11 @@ mt7925_mcu_sta_hdr_trans_tlv(struct sk_buff *skb,
else
hdr_trans->from_ds = true;
- wcid = (struct mt76_wcid *)sta->drv_priv;
+ if (sta)
+ wcid = (struct mt76_wcid *)sta->drv_priv;
+ else
+ wcid = &mvif->sta.wcid;
+
if (!wcid)
return;
@@ -895,7 +936,7 @@ int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif)
e = (struct edca *)tlv;
e->set = WMM_PARAM_SET;
- e->queue = ac + mvif->mt76.wmm_idx * MT76_CONNAC_MAX_WMM_SETS;
+ e->queue = ac;
e->aifs = q->aifs;
e->txop = cpu_to_le16(q->txop);
@@ -921,61 +962,67 @@ mt7925_mcu_sta_key_tlv(struct mt76_wcid *wcid,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd)
{
+ struct mt792x_sta *msta = container_of(wcid, struct mt792x_sta, wcid);
struct sta_rec_sec_uni *sec;
+ struct mt792x_vif *mvif = msta->vif;
+ struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif;
struct tlv *tlv;
- tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
+ sta = msta == &mvif->sta ?
+ NULL :
+ container_of((void *)msta, struct ieee80211_sta, drv_priv);
+ vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv);
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V3, sizeof(*sec));
sec = (struct sta_rec_sec_uni *)tlv;
- sec->add = cmd;
+ sec->bss_idx = mvif->mt76.idx;
+ sec->is_authenticator = 0;
+ sec->mgmt_prot = 0;
+ sec->wlan_idx = (u8)wcid->idx;
+
+ if (sta) {
+ sec->tx_key = 1;
+ sec->key_type = 1;
+ memcpy(sec->peer_addr, sta->addr, ETH_ALEN);
+ } else {
+ memcpy(sec->peer_addr, vif->bss_conf.bssid, ETH_ALEN);
+ }
if (cmd == SET_KEY) {
- struct sec_key_uni *sec_key;
u8 cipher;
- cipher = mt76_connac_mcu_get_cipher(key->cipher);
- if (cipher == MCU_CIPHER_NONE)
+ sec->add = 1;
+ cipher = mt7925_mcu_get_cipher(key->cipher);
+ if (cipher == CONNAC3_CIPHER_NONE)
return -EOPNOTSUPP;
- sec_key = &sec->key[0];
- sec_key->cipher_len = sizeof(*sec_key);
-
- if (cipher == MCU_CIPHER_BIP_CMAC_128) {
- sec_key->wlan_idx = cpu_to_le16(wcid->idx);
- sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
- sec_key->key_id = sta_key_conf->keyidx;
- sec_key->key_len = 16;
- memcpy(sec_key->key, sta_key_conf->key, 16);
-
- sec_key = &sec->key[1];
- sec_key->wlan_idx = cpu_to_le16(wcid->idx);
- sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
- sec_key->cipher_len = sizeof(*sec_key);
- sec_key->key_len = 16;
- memcpy(sec_key->key, key->key, 16);
- sec->n_cipher = 2;
+ if (cipher == CONNAC3_CIPHER_BIP_CMAC_128) {
+ sec->cipher_id = CONNAC3_CIPHER_BIP_CMAC_128;
+ sec->key_id = sta_key_conf->keyidx;
+ sec->key_len = 32;
+ memcpy(sec->key, sta_key_conf->key, 16);
+ memcpy(sec->key + 16, key->key, 16);
} else {
- sec_key->wlan_idx = cpu_to_le16(wcid->idx);
- sec_key->cipher_id = cipher;
- sec_key->key_id = key->keyidx;
- sec_key->key_len = key->keylen;
- memcpy(sec_key->key, key->key, key->keylen);
+ sec->cipher_id = cipher;
+ sec->key_id = key->keyidx;
+ sec->key_len = key->keylen;
+ memcpy(sec->key, key->key, key->keylen);
- if (cipher == MCU_CIPHER_TKIP) {
+ if (cipher == CONNAC3_CIPHER_TKIP) {
/* Rx/Tx MIC keys are swapped */
- memcpy(sec_key->key + 16, key->key + 24, 8);
- memcpy(sec_key->key + 24, key->key + 16, 8);
+ memcpy(sec->key + 16, key->key + 24, 8);
+ memcpy(sec->key + 24, key->key + 16, 8);
}
/* store key_conf for BIP batch update */
- if (cipher == MCU_CIPHER_AES_CCMP) {
+ if (cipher == CONNAC3_CIPHER_AES_CCMP) {
memcpy(sta_key_conf->key, key->key, key->keylen);
sta_key_conf->keyidx = key->keyidx;
}
-
- sec->n_cipher = 1;
}
} else {
- sec->n_cipher = 0;
+ sec->add = 0;
}
return 0;
@@ -1460,12 +1507,10 @@ mt7925_mcu_sta_phy_tlv(struct sk_buff *skb,
struct tlv *tlv;
u8 af = 0, mm = 0;
- if (!sta->deflink.ht_cap.ht_supported && !sta->deflink.he_6ghz_capa.capa)
- return;
-
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PHY, sizeof(*phy));
phy = (struct sta_rec_phy *)tlv;
phy->phy_type = mt76_connac_get_phy_mode_v2(mvif->phy->mt76, vif, chandef->chan->band, sta);
+ phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates);
if (sta->deflink.ht_cap.ht_supported) {
af = sta->deflink.ht_cap.ampdu_factor;
mm = sta->deflink.ht_cap.ampdu_density;
@@ -1573,8 +1618,6 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy,
{
struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv;
struct mt76_dev *dev = phy->dev;
- struct wtbl_req_hdr *wtbl_hdr;
- struct tlv *sta_wtbl;
struct sk_buff *skb;
skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid,
@@ -1598,30 +1641,11 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy,
mt7925_mcu_sta_state_v2_tlv(phy, skb, info->sta,
info->vif, info->rcpi,
info->state);
- mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->sta);
mt7925_mcu_sta_mld_tlv(skb, info->vif, info->sta);
}
- sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
- sizeof(struct tlv));
-
- wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, info->wcid,
- WTBL_RESET_AND_SET,
- sta_wtbl, &skb);
- if (IS_ERR(wtbl_hdr))
- return PTR_ERR(wtbl_hdr);
-
- if (info->enable) {
- mt76_connac_mcu_wtbl_generic_tlv(dev, skb, info->vif,
- info->sta, sta_wtbl,
- wtbl_hdr);
- mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, info->vif, info->wcid,
- sta_wtbl, wtbl_hdr);
- if (info->sta)
- mt76_connac_mcu_wtbl_ht_tlv(dev, skb, info->sta,
- sta_wtbl, wtbl_hdr,
- true, true);
- }
+ if (info->enable)
+ mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->sta);
return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
}
@@ -2049,9 +2073,9 @@ mt7925_mcu_bss_basic_tlv(struct sk_buff *skb,
struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->chandef;
enum nl80211_band band = chandef->chan->band;
struct mt76_connac_bss_basic_tlv *basic_req;
- u8 idx, basic_phy;
struct tlv *tlv;
int conn_type;
+ u8 idx;
tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_BASIC, sizeof(*basic_req));
basic_req = (struct mt76_connac_bss_basic_tlv *)tlv;
@@ -2062,8 +2086,10 @@ mt7925_mcu_bss_basic_tlv(struct sk_buff *skb,
basic_req->phymode_ext = mt7925_get_phy_mode_ext(phy, vif, band, sta);
- basic_phy = mt76_connac_get_phy_mode_v2(phy, vif, band, sta);
- basic_req->nonht_basic_phy = cpu_to_le16(basic_phy);
+ if (band == NL80211_BAND_2GHZ)
+ basic_req->nonht_basic_phy = cpu_to_le16(PHY_TYPE_ERP_INDEX);
+ else
+ basic_req->nonht_basic_phy = cpu_to_le16(PHY_TYPE_OFDM_INDEX);
memcpy(basic_req->bssid, vif->bss_conf.bssid, ETH_ALEN);
basic_req->phymode = mt76_connac_get_phy_mode(phy, vif, band, sta);
@@ -2122,21 +2148,21 @@ mt7925_mcu_bss_sec_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
sec = (struct bss_sec_tlv *)tlv;
switch (mvif->cipher) {
- case MCU_CIPHER_GCMP_256:
- case MCU_CIPHER_GCMP:
+ case CONNAC3_CIPHER_GCMP_256:
+ case CONNAC3_CIPHER_GCMP:
sec->mode = MODE_WPA3_SAE;
sec->status = 8;
break;
- case MCU_CIPHER_AES_CCMP:
+ case CONNAC3_CIPHER_AES_CCMP:
sec->mode = MODE_WPA2_PSK;
sec->status = 6;
break;
- case MCU_CIPHER_TKIP:
+ case CONNAC3_CIPHER_TKIP:
sec->mode = MODE_WPA2_PSK;
sec->status = 4;
break;
- case MCU_CIPHER_WEP104:
- case MCU_CIPHER_WEP40:
+ case CONNAC3_CIPHER_WEP104:
+ case CONNAC3_CIPHER_WEP40:
sec->mode = MODE_SHARED;
sec->status = 0;
break;
@@ -2167,6 +2193,11 @@ mt7925_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt792x_phy *phy,
bmc = (struct bss_rate_tlv *)tlv;
+ if (band == NL80211_BAND_2GHZ)
+ bmc->basic_rate = cpu_to_le16(HR_DSSS_ERP_BASIC_RATE);
+ else
+ bmc->basic_rate = cpu_to_le16(OFDM_BASIC_RATE);
+
bmc->short_preamble = (band == NL80211_BAND_2GHZ);
bmc->bc_fixed_rate = idx;
bmc->mc_fixed_rate = idx;
@@ -2249,6 +2280,38 @@ mt7925_mcu_bss_color_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
vif->bss_conf.he_bss_color.color : 0;
}
+static void
+mt7925_mcu_bss_ifs_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_phy *phy = mvif->phy;
+ struct bss_ifs_time_tlv *ifs_time;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_IFS_TIME, sizeof(*ifs_time));
+ ifs_time = (struct bss_ifs_time_tlv *)tlv;
+ ifs_time->slot_valid = true;
+ ifs_time->slot_time = cpu_to_le16(phy->slottime);
+}
+
+int mt7925_mcu_set_timing(struct mt792x_phy *phy,
+ struct ieee80211_vif *vif)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_dev *dev = phy->dev;
+ struct sk_buff *skb;
+
+ skb = __mt7925_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+ MT7925_BSS_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7925_mcu_bss_ifs_tlv(skb, vif);
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD(BSS_INFO_UPDATE), true);
+}
+
int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
struct ieee80211_chanctx_conf *ctx,
struct ieee80211_vif *vif,
@@ -2273,6 +2336,7 @@ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
mt7925_mcu_bss_bmc_tlv(skb, phy, ctx, vif, sta);
mt7925_mcu_bss_qos_tlv(skb, vif);
mt7925_mcu_bss_mld_tlv(skb, vif, sta);
+ mt7925_mcu_bss_ifs_tlv(skb, vif);
if (vif->bss_conf.he_support) {
mt7925_mcu_bss_he_tlv(skb, vif, phy);
@@ -2845,12 +2909,16 @@ int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
if (cmd & __MCU_CMD_FIELD_UNI) {
uni_txd = (struct mt76_connac2_mcu_uni_txd *)txd;
uni_txd->len = cpu_to_le16(skb->len - sizeof(uni_txd->txd));
- uni_txd->option = MCU_CMD_UNI_EXT_ACK;
uni_txd->cid = cpu_to_le16(mcu_cmd);
uni_txd->s2d_index = MCU_S2D_H2N;
uni_txd->pkt_type = MCU_PKT_ID;
uni_txd->seq = seq;
+ if (cmd & __MCU_CMD_FIELD_QUERY)
+ uni_txd->option = MCU_CMD_UNI_QUERY_ACK;
+ else
+ uni_txd->option = MCU_CMD_UNI_EXT_ACK;
+
goto exit;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
index 3c41e21303b1..2a0bbfe7bfa5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
@@ -159,6 +159,20 @@ enum {
UNI_EVENT_SCAN_DONE_NLO = 3,
};
+enum connac3_mcu_cipher_type {
+ CONNAC3_CIPHER_NONE = 0,
+ CONNAC3_CIPHER_WEP40 = 1,
+ CONNAC3_CIPHER_TKIP = 2,
+ CONNAC3_CIPHER_AES_CCMP = 4,
+ CONNAC3_CIPHER_WEP104 = 5,
+ CONNAC3_CIPHER_BIP_CMAC_128 = 6,
+ CONNAC3_CIPHER_WEP128 = 7,
+ CONNAC3_CIPHER_WAPI = 8,
+ CONNAC3_CIPHER_CCMP_256 = 10,
+ CONNAC3_CIPHER_GCMP = 11,
+ CONNAC3_CIPHER_GCMP_256 = 12,
+};
+
struct mt7925_mcu_scan_chinfo_event {
u8 nr_chan;
u8 alpha2[3];
@@ -208,7 +222,7 @@ struct scan_req_tlv {
__le16 channel_dwell_time; /* channel Dwell interval */
__le16 timeout_value;
__le16 probe_delay_time;
- u8 func_mask_ext;
+ __le32 func_mask_ext;
};
struct scan_ssid_tlv {
@@ -334,7 +348,8 @@ struct bss_req_hdr {
struct bss_rate_tlv {
__le16 tag;
__le16 len;
- u8 __rsv1[4];
+ u8 __rsv1[2];
+ __le16 basic_rate;
__le16 bc_trans;
__le16 mc_trans;
u8 short_preamble;
@@ -382,25 +397,22 @@ struct sta_rec_eht {
u8 _rsv2[3];
} __packed;
-struct sec_key_uni {
- __le16 wlan_idx;
- u8 mgmt_prot;
- u8 cipher_id;
- u8 cipher_len;
- u8 key_id;
- u8 key_len;
- u8 need_resp;
- u8 key[32];
-} __packed;
-
struct sta_rec_sec_uni {
__le16 tag;
__le16 len;
u8 add;
- u8 n_cipher;
- u8 rsv[2];
-
- struct sec_key_uni key[2];
+ u8 tx_key;
+ u8 key_type;
+ u8 is_authenticator;
+ u8 peer_addr[6];
+ u8 bss_idx;
+ u8 cipher_id;
+ u8 key_id;
+ u8 key_len;
+ u8 wlan_idx;
+ u8 mgmt_prot;
+ u8 key[32];
+ u8 key_rsc[16];
} __packed;
struct sta_rec_hdr_trans {
@@ -428,6 +440,22 @@ struct sta_rec_mld {
} __packed link[2];
} __packed;
+struct bss_ifs_time_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 slot_valid;
+ u8 sifs_valid;
+ u8 rifs_valid;
+ u8 eifs_valid;
+ __le16 slot_time;
+ __le16 sifs_time;
+ __le16 rifs_time;
+ __le16 eifs_time;
+ u8 eifs_cck_valid;
+ u8 rsv;
+ __le16 eifs_cck_time;
+} __packed;
+
#define MT7925_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
sizeof(struct sta_rec_basic) + \
sizeof(struct sta_rec_bf) + \
@@ -440,7 +468,7 @@ struct sta_rec_mld {
sizeof(struct sta_rec_bfee) + \
sizeof(struct sta_rec_phy) + \
sizeof(struct sta_rec_ra) + \
- sizeof(struct sta_rec_sec) + \
+ sizeof(struct sta_rec_sec_uni) + \
sizeof(struct sta_rec_ra_fixed) + \
sizeof(struct sta_rec_he_6g_capa) + \
sizeof(struct sta_rec_eht) + \
@@ -455,6 +483,7 @@ struct sta_rec_mld {
sizeof(struct bss_mld_tlv) + \
sizeof(struct bss_info_uni_he) + \
sizeof(struct bss_info_uni_bss_color) + \
+ sizeof(struct bss_ifs_time_tlv) + \
sizeof(struct tlv))
#define MT_CONNAC3_SKU_POWER_LIMIT 449
@@ -509,6 +538,33 @@ struct mt7925_wow_pattern_tlv {
u8 rsv[4];
} __packed;
+static inline enum connac3_mcu_cipher_type
+mt7925_mcu_get_cipher(int cipher)
+{
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return CONNAC3_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return CONNAC3_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return CONNAC3_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ return CONNAC3_CIPHER_BIP_CMAC_128;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return CONNAC3_CIPHER_AES_CCMP;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return CONNAC3_CIPHER_CCMP_256;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return CONNAC3_CIPHER_GCMP;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return CONNAC3_CIPHER_GCMP_256;
+ case WLAN_CIPHER_SUITE_SMS4:
+ return CONNAC3_CIPHER_WAPI;
+ default:
+ return CONNAC3_CIPHER_NONE;
+ }
+}
+
int mt7925_mcu_set_dbdc(struct mt76_phy *phy);
int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_scan_request *scan_req);
@@ -525,6 +581,8 @@ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
int enable);
+int mt7925_mcu_set_timing(struct mt792x_phy *phy,
+ struct ieee80211_vif *vif);
int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable);
int mt7925_mcu_set_channel_domain(struct mt76_phy *phy);
int mt7925_mcu_set_radio_en(struct mt792x_phy *phy, bool enable);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
index 33785f526acf..8a4a71f6bcb6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
@@ -271,6 +271,7 @@ int mt7925_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif,
bool enable);
int mt7925_mcu_config_sniffer(struct mt792x_vif *vif,
struct ieee80211_chanctx_conf *ctx);
+int mt7925_mcu_get_temperature(struct mt792x_phy *phy);
int mt7925_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
index 1fd99a856541..07b74d492ce1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
@@ -386,6 +386,8 @@ static int mt7925_pci_probe(struct pci_dev *pdev,
dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+ mt76_rmw_field(dev, MT_HW_EMI_CTL, MT_HW_EMI_CTL_SLPPROT_EN, 1);
+
ret = mt792x_wfsys_reset(dev);
if (ret)
goto err_free_dev;
@@ -425,6 +427,7 @@ static void mt7925_pci_remove(struct pci_dev *pdev)
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
mt7925e_unregister_device(dev);
+ set_bit(MT76_REMOVED, &mdev->phy.state);
devm_free_irq(&pdev->dev, pdev->irq, dev);
mt76_free_device(&dev->mt76);
pci_free_irq_vectors(pdev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h
index 3c897b34aaa7..a8556de3d480 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x.h
@@ -186,6 +186,8 @@ struct mt792x_dev {
bool hw_init_done:1;
bool fw_assert:1;
bool has_eht:1;
+ bool regd_in_progress:1;
+ wait_queue_head_t wait;
struct work_struct init_work;
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
index e7afea87e82e..9317f8ff2070 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
@@ -66,13 +66,15 @@ free:
}
/* MTCL : Country List Table for 6G band */
-static void
+static int
mt792x_asar_acpi_read_mtcl(struct mt792x_dev *dev, u8 **table, u8 *version)
{
- if (mt792x_acpi_read(dev, MT792x_ACPI_MTCL, table, NULL) < 0)
- *version = 1;
- else
- *version = 2;
+ int ret;
+
+ *version = ((ret = mt792x_acpi_read(dev, MT792x_ACPI_MTCL, table, NULL)) < 0)
+ ? 1 : 2;
+
+ return ret;
}
/* MTDS : Dynamic SAR Power Table */
@@ -166,16 +168,16 @@ int mt792x_init_acpi_sar(struct mt792x_dev *dev)
if (!asar)
return -ENOMEM;
- mt792x_asar_acpi_read_mtcl(dev, (u8 **)&asar->countrylist, &asar->ver);
+ ret = mt792x_asar_acpi_read_mtcl(dev, (u8 **)&asar->countrylist, &asar->ver);
+ if (ret) {
+ devm_kfree(dev->mt76.dev, asar->countrylist);
+ asar->countrylist = NULL;
+ }
- /* MTDS is mandatory. Return error if table is invalid */
ret = mt792x_asar_acpi_read_mtds(dev, (u8 **)&asar->dyn, asar->ver);
if (ret) {
devm_kfree(dev->mt76.dev, asar->dyn);
- devm_kfree(dev->mt76.dev, asar->countrylist);
- devm_kfree(dev->mt76.dev, asar);
-
- return ret;
+ asar->dyn = NULL;
}
/* MTGS is optional */
@@ -290,7 +292,7 @@ int mt792x_init_acpi_sar_power(struct mt792x_phy *phy, bool set_default)
const struct cfg80211_sar_capa *capa = phy->mt76->hw->wiphy->sar_capa;
int i;
- if (!phy->acpisar)
+ if (!phy->acpisar || !((struct mt792x_acpi_sar *)phy->acpisar)->dyn)
return 0;
/* When ACPI SAR enabled in HW, we should apply rules for .frp
@@ -353,11 +355,15 @@ static u8
mt792x_acpi_get_mtcl_map(int row, int column, struct mt792x_asar_cl *cl)
{
u8 config = 0;
+ u8 mode_6g, mode_5g9;
+
+ mode_6g = (cl->mode_6g > 0x02) ? 0 : cl->mode_6g;
+ mode_5g9 = (cl->mode_5g9 > 0x01) ? 0 : cl->mode_5g9;
- if (cl->cl6g[row] & BIT(column))
- config |= (cl->mode_6g & 0x3) << 2;
+ if ((cl->cl6g[row] & BIT(column)) || cl->mode_6g == 0x02)
+ config |= (mode_6g & 0x3) << 2;
if (cl->version > 1 && cl->cl5g9[row] & BIT(column))
- config |= (cl->mode_5g9 & 0x3);
+ config |= (mode_5g9 & 0x3);
return config;
}
@@ -374,7 +380,7 @@ u8 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2)
"AT", "BE", "BG", "CY", "CZ", "HR", "DK", "EE",
"FI", "FR", "DE", "GR", "HU", "IS", "IE", "IT",
"LV", "LI", "LT", "LU", "MT", "NL", "NO", "PL",
- "PT", "RO", "MT", "SK", "SI", "ES", "CH",
+ "PT", "RO", "SK", "SI", "ES", "SE", "CH",
};
struct mt792x_acpi_sar *sar = phy->acpisar;
struct mt792x_asar_cl *cl;
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
index c42101aa9e45..a405af8d9052 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
@@ -354,6 +354,7 @@ static const char mt792x_gstrings_stats[][ETH_GSTRING_LEN] = {
"v_tx_bw_40",
"v_tx_bw_80",
"v_tx_bw_160",
+ "v_tx_bw_320",
"v_tx_mcs_0",
"v_tx_mcs_1",
"v_tx_mcs_2",
@@ -684,9 +685,10 @@ mt792x_get_mac80211_ops(struct device *dev,
if (!(*fw_features & MT792x_FW_CAP_CNM)) {
ops->remain_on_channel = NULL;
ops->cancel_remain_on_channel = NULL;
- ops->add_chanctx = NULL;
- ops->remove_chanctx = NULL;
- ops->change_chanctx = NULL;
+ ops->add_chanctx = ieee80211_emulate_add_chanctx;
+ ops->remove_chanctx = ieee80211_emulate_remove_chanctx;
+ ops->change_chanctx = ieee80211_emulate_change_chanctx;
+ ops->switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx;
ops->assign_vif_chanctx = NULL;
ops->unassign_vif_chanctx = NULL;
ops->mgd_prepare_tx = NULL;
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_dma.c b/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
index 488326ce5ed4..5cc2d59b774a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
@@ -12,6 +12,8 @@ irqreturn_t mt792x_irq_handler(int irq, void *dev_instance)
{
struct mt792x_dev *dev = dev_instance;
+ if (test_bit(MT76_REMOVED, &dev->mt76.phy.state))
+ return IRQ_NONE;
mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
@@ -123,14 +125,13 @@ static void mt792x_dma_prefetch(struct mt792x_dev *dev)
int mt792x_dma_enable(struct mt792x_dev *dev)
{
- if (is_mt7925(&dev->mt76))
- mt76_rmw(dev, MT_UWFDMA0_GLO_CFG_EXT1, BIT(28), BIT(28));
-
/* configure perfetch settings */
mt792x_dma_prefetch(dev);
/* reset dma idx */
mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
+ if (is_mt7925(&dev->mt76))
+ mt76_wr(dev, MT_WFDMA0_RST_DRX_PTR, ~0);
/* configure delay interrupt */
mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
@@ -140,12 +141,20 @@ int mt792x_dma_enable(struct mt792x_dev *dev)
MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN |
MT_WFDMA0_GLO_CFG_CLK_GAT_DIS |
MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ FIELD_PREP(MT_WFDMA0_GLO_CFG_DMA_SIZE, 3) |
+ MT_WFDMA0_GLO_CFG_FIFO_DIS_CHECK |
+ MT_WFDMA0_GLO_CFG_RX_WB_DDONE |
MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
mt76_set(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+ if (is_mt7925(&dev->mt76)) {
+ mt76_rmw(dev, MT_UWFDMA0_GLO_CFG_EXT1, BIT(28), BIT(28));
+ mt76_set(dev, MT_WFDMA0_INT_RX_PRI, 0x0F00);
+ mt76_set(dev, MT_WFDMA0_INT_TX_PRI, 0x7F00);
+ }
mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
/* enable interrupts for TX/RX rings */
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_regs.h b/drivers/net/wireless/mediatek/mt76/mt792x_regs.h
index a99af23e4b56..458cfd0260b1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_regs.h
@@ -292,9 +292,12 @@
#define MT_WFDMA0_GLO_CFG_TX_DMA_BUSY BIT(1)
#define MT_WFDMA0_GLO_CFG_RX_DMA_EN BIT(2)
#define MT_WFDMA0_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MT_WFDMA0_GLO_CFG_DMA_SIZE GENMASK(5, 4)
#define MT_WFDMA0_GLO_CFG_TX_WB_DDONE BIT(6)
#define MT_WFDMA0_GLO_CFG_FW_DWLD_BYPASS_DMASHDL BIT(9)
+#define MT_WFDMA0_GLO_CFG_FIFO_DIS_CHECK BIT(11)
#define MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
+#define MT_WFDMA0_GLO_CFG_RX_WB_DDONE BIT(13)
#define MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN BIT(15)
#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO BIT(27)
@@ -322,6 +325,8 @@
#define MT_WFDMA0_RST_DTX_PTR MT_WFDMA0(0x20c)
#define MT_WFDMA0_RST_DRX_PTR MT_WFDMA0(0x280)
+#define MT_WFDMA0_INT_RX_PRI MT_WFDMA0(0x298)
+#define MT_WFDMA0_INT_TX_PRI MT_WFDMA0(0x29c)
#define MT_WFDMA0_GLO_CFG_EXT0 MT_WFDMA0(0x2b0)
#define MT_WFDMA0_CSR_TX_DMASHDL_ENABLE BIT(6)
#define MT_WFDMA0_PRI_DLY_INT_CFG0 MT_WFDMA0(0x2f0)
@@ -389,6 +394,9 @@
#define MT_HW_CHIPID 0x70010200
#define MT_HW_REV 0x70010204
+#define MT_HW_EMI_CTL 0x18011100
+#define MT_HW_EMI_CTL_SLPPROT_EN BIT(1)
+
#define MT_PCIE_MAC_BASE 0x10000
#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs))
#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188)
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
index 589a3efb9f8c..b49668a4b784 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
@@ -121,44 +121,25 @@ static void mt792xu_uhw_wr(struct mt76_dev *dev, u32 addr, u32 val)
static void mt792xu_dma_prefetch(struct mt792x_dev *dev)
{
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(0),
- MT_WPDMA0_MAX_CNT_MASK, 4);
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(0),
- MT_WPDMA0_BASE_PTR_MASK, 0x80);
-
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(1),
- MT_WPDMA0_MAX_CNT_MASK, 4);
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(1),
- MT_WPDMA0_BASE_PTR_MASK, 0xc0);
-
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(2),
- MT_WPDMA0_MAX_CNT_MASK, 4);
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(2),
- MT_WPDMA0_BASE_PTR_MASK, 0x100);
-
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(3),
- MT_WPDMA0_MAX_CNT_MASK, 4);
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(3),
- MT_WPDMA0_BASE_PTR_MASK, 0x140);
-
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(4),
- MT_WPDMA0_MAX_CNT_MASK, 4);
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(4),
- MT_WPDMA0_BASE_PTR_MASK, 0x180);
-
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(16),
- MT_WPDMA0_MAX_CNT_MASK, 4);
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(16),
- MT_WPDMA0_BASE_PTR_MASK, 0x280);
-
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(17),
- MT_WPDMA0_MAX_CNT_MASK, 4);
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(17),
- MT_WPDMA0_BASE_PTR_MASK, 0x2c0);
+#define DMA_PREFETCH_CONF(_idx_, _cnt_, _base_) \
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL((_idx_)), \
+ MT_WPDMA0_MAX_CNT_MASK | MT_WPDMA0_BASE_PTR_MASK, \
+ FIELD_PREP(MT_WPDMA0_MAX_CNT_MASK, (_cnt_)) | \
+ FIELD_PREP(MT_WPDMA0_BASE_PTR_MASK, (_base_)))
+
+ DMA_PREFETCH_CONF(0, 4, 0x080);
+ DMA_PREFETCH_CONF(1, 4, 0x0c0);
+ DMA_PREFETCH_CONF(2, 4, 0x100);
+ DMA_PREFETCH_CONF(3, 4, 0x140);
+ DMA_PREFETCH_CONF(4, 4, 0x180);
+ DMA_PREFETCH_CONF(16, 4, 0x280);
+ DMA_PREFETCH_CONF(17, 4, 0x2c0);
}
static void mt792xu_wfdma_init(struct mt792x_dev *dev)
{
+ int i;
+
mt792xu_dma_prefetch(dev);
mt76_clear(dev, MT_UWFDMA0_GLO_CFG, MT_WFDMA0_GLO_CFG_OMIT_RX_INFO);
@@ -169,10 +150,27 @@ static void mt792xu_wfdma_init(struct mt792x_dev *dev)
MT_WFDMA0_GLO_CFG_TX_DMA_EN |
MT_WFDMA0_GLO_CFG_RX_DMA_EN);
- /* disable dmashdl */
- mt76_clear(dev, MT_UWFDMA0_GLO_CFG_EXT0,
- MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
- mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
+ mt76_rmw(dev, MT_DMASHDL_REFILL, MT_DMASHDL_REFILL_MASK, 0xffe00000);
+ mt76_clear(dev, MT_DMASHDL_PAGE, MT_DMASHDL_GROUP_SEQ_ORDER);
+ mt76_rmw(dev, MT_DMASHDL_PKT_MAX_SIZE,
+ MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE,
+ FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) |
+ FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 0));
+ for (i = 0; i < 5; i++)
+ mt76_wr(dev, MT_DMASHDL_GROUP_QUOTA(i),
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x3) |
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0xfff));
+ for (i = 5; i < 16; i++)
+ mt76_wr(dev, MT_DMASHDL_GROUP_QUOTA(i),
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x0) |
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x0));
+ mt76_wr(dev, MT_DMASHDL_Q_MAP(0), 0x32013201);
+ mt76_wr(dev, MT_DMASHDL_Q_MAP(1), 0x32013201);
+ mt76_wr(dev, MT_DMASHDL_Q_MAP(2), 0x55555444);
+ mt76_wr(dev, MT_DMASHDL_Q_MAP(3), 0x55555444);
+
+ mt76_wr(dev, MT_DMASHDL_SCHED_SET(0), 0x76540132);
+ mt76_wr(dev, MT_DMASHDL_SCHED_SET(1), 0xFEDCBA98);
mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
index 483ad81b6eec..73e633d0d700 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
@@ -237,7 +237,8 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset)
MT_WFDMA0_GLO_CFG_TX_DMA_EN |
MT_WFDMA0_GLO_CFG_RX_DMA_EN |
MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
- MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 |
+ MT_WFDMA0_GLO_CFG_EXT_EN);
if (dev->hif2)
mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
@@ -694,7 +695,7 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
mtk_wed_device_dma_reset(&dev->mt76.mmio.wed);
mt7996_dma_disable(dev, force);
- mt76_dma_wed_reset(&dev->mt76);
+ mt76_wed_dma_reset(&dev->mt76);
/* reset hw queues */
for (i = 0; i < __MT_TXQ_MAX; i++) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
index 0cf0d1fe420a..283df84f1b43 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
@@ -493,7 +493,7 @@ static void mt7996_mac_init_basic_rates(struct mt7996_dev *dev)
void mt7996_mac_init(struct mt7996_dev *dev)
{
-#define HIF_TXD_V2_1 4
+#define HIF_TXD_V2_1 0x21
int i;
mt76_clear(dev, MT_MDP_DCR2, MT_MDP_DCR2_RX_TRANS_SHORT);
@@ -507,11 +507,6 @@ void mt7996_mac_init(struct mt7996_dev *dev)
mt76_rmw_field(dev, i, MT_LED_GPIO_SEL_MASK, 4);
}
- /* txs report queue */
- mt76_rmw_field(dev, MT_DMA_TCRF1(0), MT_DMA_TCRF1_QIDX, 0);
- mt76_rmw_field(dev, MT_DMA_TCRF1(1), MT_DMA_TCRF1_QIDX, 6);
- mt76_rmw_field(dev, MT_DMA_TCRF1(2), MT_DMA_TCRF1_QIDX, 0);
-
/* rro module init */
if (is_mt7996(&dev->mt76))
mt7996_mcu_set_rro(dev, UNI_RRO_SET_PLATFORM_TYPE, 2);
@@ -1012,11 +1007,12 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
/* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
elem->phy_cap_info[7] |= min_t(int, sts - 1, 2) << 3;
- if (vif != NL80211_IFTYPE_AP)
+ if (!(vif == NL80211_IFTYPE_AP || vif == NL80211_IFTYPE_STATION))
return;
elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
- elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+ if (vif == NL80211_IFTYPE_AP)
+ elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
sts - 1) |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index 53258488d49f..0384fb059ddf 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -732,6 +732,9 @@ mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi,
FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
txwi[2] |= cpu_to_le32(val);
+
+ if (wcid->amsdu)
+ txwi[3] |= cpu_to_le32(MT_TXD3_HW_AMSDU);
}
static void
@@ -862,8 +865,6 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
val |= MT_TXD3_PROTECT_FRAME;
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
val |= MT_TXD3_NO_ACK;
- if (wcid->amsdu)
- val |= MT_TXD3_HW_AMSDU;
txwi[3] = cpu_to_le32(val);
txwi[4] = 0;
@@ -1188,25 +1189,28 @@ mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_tx_info *info;
struct sk_buff_head list;
struct rate_info rate = {};
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
bool cck = false;
u32 txrate, txs, mode, stbc;
txs = le32_to_cpu(txs_data[0]);
mt76_tx_status_lock(mdev, &list);
- skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
- if (skb) {
- info = IEEE80211_SKB_CB(skb);
- if (!(txs & MT_TXS0_ACK_ERROR_MASK))
- info->flags |= IEEE80211_TX_STAT_ACK;
+ /* only report MPDU TXS */
+ if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) == 0) {
+ skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
+ if (skb) {
+ info = IEEE80211_SKB_CB(skb);
+ if (!(txs & MT_TXS0_ACK_ERROR_MASK))
+ info->flags |= IEEE80211_TX_STAT_ACK;
- info->status.ampdu_len = 1;
- info->status.ampdu_ack_len =
- !!(info->flags & IEEE80211_TX_STAT_ACK);
+ info->status.ampdu_len = 1;
+ info->status.ampdu_ack_len =
+ !!(info->flags & IEEE80211_TX_STAT_ACK);
- info->status.rates[0].idx = -1;
+ info->status.rates[0].idx = -1;
+ }
}
if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wcid->sta) {
@@ -2527,6 +2531,34 @@ static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt)
return 0;
}
+static bool
+mt7996_mac_twt_param_equal(struct mt7996_sta *msta,
+ struct ieee80211_twt_params *twt_agrt)
+{
+ u16 type = le16_to_cpu(twt_agrt->req_type);
+ u8 exp;
+ int i;
+
+ exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type);
+ for (i = 0; i < MT7996_MAX_STA_TWT_AGRT; i++) {
+ struct mt7996_twt_flow *f;
+
+ if (!(msta->twt.flowid_mask & BIT(i)))
+ continue;
+
+ f = &msta->twt.flow[i];
+ if (f->duration == twt_agrt->min_twt_dur &&
+ f->mantissa == twt_agrt->mantissa &&
+ f->exp == exp &&
+ f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) &&
+ f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) &&
+ f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER))
+ return true;
+ }
+
+ return false;
+}
+
void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
struct ieee80211_twt_setup *twt)
@@ -2538,8 +2570,7 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
enum ieee80211_twt_setup_cmd sta_setup_cmd;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mt7996_twt_flow *flow;
- int flowid, table_id;
- u8 exp;
+ u8 flowid, table_id, exp;
if (mt7996_mac_check_twt_req(twt))
goto out;
@@ -2552,9 +2583,19 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
goto unlock;
+ if (twt_agrt->min_twt_dur < MT7996_MIN_TWT_DUR) {
+ setup_cmd = TWT_SETUP_CMD_DICTATE;
+ twt_agrt->min_twt_dur = MT7996_MIN_TWT_DUR;
+ goto unlock;
+ }
+
+ if (mt7996_mac_twt_param_equal(msta, twt_agrt))
+ goto unlock;
+
flowid = ffs(~msta->twt.flowid_mask) - 1;
- le16p_replace_bits(&twt_agrt->req_type, flowid,
- IEEE80211_TWT_REQTYPE_FLOWID);
+ twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID);
+ twt_agrt->req_type |= le16_encode_bits(flowid,
+ IEEE80211_TWT_REQTYPE_FLOWID);
table_id = ffs(~dev->twt.table_mask) - 1;
exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
@@ -2601,10 +2642,10 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
unlock:
mutex_unlock(&dev->mt76.mutex);
out:
- le16p_replace_bits(&twt_agrt->req_type, setup_cmd,
- IEEE80211_TWT_REQTYPE_SETUP_CMD);
- twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) |
- (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED);
+ twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD);
+ twt_agrt->req_type |=
+ le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD);
+ twt->control = twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED;
}
void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index 51deea84b642..f7da8d6dd903 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -350,9 +350,12 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
case WLAN_CIPHER_SUITE_SMS4:
+ break;
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- break;
+ if (key->keyidx == 6 || key->keyidx == 7)
+ break;
+ fallthrough;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
default:
@@ -1450,6 +1453,10 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
#endif
const struct ieee80211_ops mt7996_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt7996_tx,
.start = mt7996_start,
.stop = mt7996_stop,
@@ -1495,6 +1502,6 @@ const struct ieee80211_ops mt7996_ops = {
.set_radar_background = mt7996_set_radar_background,
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
.net_fill_forward_path = mt7996_net_fill_forward_path,
- .net_setup_tc = mt76_net_setup_tc,
+ .net_setup_tc = mt76_wed_net_setup_tc,
#endif
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index 699be57309c2..b44abe2acc81 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -341,7 +341,7 @@ mt7996_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!vif->bss_conf.csa_active || vif->type == NL80211_IFTYPE_STATION)
return;
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
}
static void
@@ -732,13 +732,10 @@ void mt7996_mcu_rx_event(struct mt7996_dev *dev, struct sk_buff *skb)
static struct tlv *
mt7996_mcu_add_uni_tlv(struct sk_buff *skb, u16 tag, u16 len)
{
- struct tlv *ptlv, tlv = {
- .tag = cpu_to_le16(tag),
- .len = cpu_to_le16(len),
- };
+ struct tlv *ptlv = skb_put(skb, len);
- ptlv = skb_put(skb, len);
- memcpy(ptlv, &tlv, sizeof(tlv));
+ ptlv->tag = cpu_to_le16(tag);
+ ptlv->len = cpu_to_le16(len);
return ptlv;
}
@@ -1240,6 +1237,9 @@ mt7996_mcu_sta_he_6g_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
static void
mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
{
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct ieee80211_vif *vif = container_of((void *)msta->vif,
+ struct ieee80211_vif, drv_priv);
struct ieee80211_eht_mcs_nss_supp *mcs_map;
struct ieee80211_eht_cap_elem_fixed *elem;
struct sta_rec_eht *eht;
@@ -1259,8 +1259,17 @@ mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
eht->phy_cap = cpu_to_le64(*(u64 *)elem->phy_cap_info);
eht->phy_cap_ext = cpu_to_le64(elem->phy_cap_info[8]);
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
- memcpy(eht->mcs_map_bw20, &mcs_map->only_20mhz, sizeof(eht->mcs_map_bw20));
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)) == 0) {
+ memcpy(eht->mcs_map_bw20, &mcs_map->only_20mhz,
+ sizeof(eht->mcs_map_bw20));
+ return;
+ }
+
memcpy(eht->mcs_map_bw80, &mcs_map->bw._80, sizeof(eht->mcs_map_bw80));
memcpy(eht->mcs_map_bw160, &mcs_map->bw._160, sizeof(eht->mcs_map_bw160));
memcpy(eht->mcs_map_bw320, &mcs_map->bw._320, sizeof(eht->mcs_map_bw320));
@@ -2510,7 +2519,7 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw,
info = IEEE80211_SKB_CB(skb);
info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx);
- len = sizeof(*bcn) + MT_TXD_SIZE + skb->len;
+ len = ALIGN(sizeof(*bcn) + MT_TXD_SIZE + skb->len, 4);
tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_BCN_CONTENT, len);
bcn = (struct bss_bcn_content_tlv *)tlv;
bcn->enable = en;
@@ -2579,8 +2588,7 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
info->band = band;
info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx);
- len = sizeof(*discov) + MT_TXD_SIZE + skb->len;
-
+ len = ALIGN(sizeof(*discov) + MT_TXD_SIZE + skb->len, 4);
tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_OFFLOAD, len);
discov = (struct bss_inband_discovery_tlv *)tlv;
@@ -3539,7 +3547,7 @@ int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset)
u32 addr = le32_to_cpu(*(__le32 *)(skb->data + 12));
u8 *buf = (u8 *)dev->mt76.eeprom.data + addr;
- skb_pull(skb, 64);
+ skb_pull(skb, 48);
memcpy(buf, skb->data, MT7996_EEPROM_BLOCK_SIZE);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
index 36cacc495c75..43468bcaffc6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
@@ -800,10 +800,10 @@ enum {
sizeof(struct sta_rec_hdr_trans) + \
sizeof(struct tlv))
-#define MT7996_MAX_BEACON_SIZE 1342
+#define MT7996_MAX_BEACON_SIZE 1338
#define MT7996_BEACON_UPDATE_SIZE (sizeof(struct bss_req_hdr) + \
sizeof(struct bss_bcn_content_tlv) + \
- MT_TXD_SIZE + \
+ 4 + MT_TXD_SIZE + \
sizeof(struct bss_bcn_cntdwn_tlv) + \
sizeof(struct bss_bcn_mbss_tlv))
#define MT7996_MAX_BSS_OFFLOAD_SIZE (MT7996_MAX_BEACON_SIZE + \
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
index 9f2abfa273c9..304e5fd14803 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
@@ -140,7 +140,6 @@ static u32 mt7996_reg_map_l1(struct mt7996_dev *dev, u32 addr)
u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr);
u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr);
- dev->reg_l1_backup = dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L1);
dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L1,
MT_HIF_REMAP_L1_MASK,
FIELD_PREP(MT_HIF_REMAP_L1_MASK, base));
@@ -155,7 +154,6 @@ static u32 mt7996_reg_map_l2(struct mt7996_dev *dev, u32 addr)
u32 offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
u32 base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
- dev->reg_l2_backup = dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2);
dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2,
MT_HIF_REMAP_L2_MASK,
FIELD_PREP(MT_HIF_REMAP_L2_MASK, base));
@@ -165,26 +163,10 @@ static u32 mt7996_reg_map_l2(struct mt7996_dev *dev, u32 addr)
return MT_HIF_REMAP_BASE_L2 + offset;
}
-static void mt7996_reg_remap_restore(struct mt7996_dev *dev)
-{
- /* remap to ori status */
- if (unlikely(dev->reg_l1_backup)) {
- dev->bus_ops->wr(&dev->mt76, MT_HIF_REMAP_L1, dev->reg_l1_backup);
- dev->reg_l1_backup = 0;
- }
-
- if (dev->reg_l2_backup) {
- dev->bus_ops->wr(&dev->mt76, MT_HIF_REMAP_L2, dev->reg_l2_backup);
- dev->reg_l2_backup = 0;
- }
-}
-
static u32 __mt7996_reg_addr(struct mt7996_dev *dev, u32 addr)
{
int i;
- mt7996_reg_remap_restore(dev);
-
if (addr < 0x100000)
return addr;
@@ -201,6 +183,11 @@ static u32 __mt7996_reg_addr(struct mt7996_dev *dev, u32 addr)
return dev->reg.map[i].mapped + ofs;
}
+ return 0;
+}
+
+static u32 __mt7996_reg_remap_addr(struct mt7996_dev *dev, u32 addr)
+{
if ((addr >= MT_INFRA_BASE && addr < MT_WFSYS0_PHY_START) ||
(addr >= MT_WFSYS0_PHY_START && addr < MT_WFSYS1_PHY_START) ||
(addr >= MT_WFSYS1_PHY_START && addr <= MT_WFSYS1_PHY_END))
@@ -225,28 +212,60 @@ void mt7996_memcpy_fromio(struct mt7996_dev *dev, void *buf, u32 offset,
{
u32 addr = __mt7996_reg_addr(dev, offset);
- memcpy_fromio(buf, dev->mt76.mmio.regs + addr, len);
+ if (addr) {
+ memcpy_fromio(buf, dev->mt76.mmio.regs + addr, len);
+ return;
+ }
+
+ spin_lock_bh(&dev->reg_lock);
+ memcpy_fromio(buf, dev->mt76.mmio.regs +
+ __mt7996_reg_remap_addr(dev, offset), len);
+ spin_unlock_bh(&dev->reg_lock);
}
static u32 mt7996_rr(struct mt76_dev *mdev, u32 offset)
{
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ u32 addr = __mt7996_reg_addr(dev, offset), val;
+
+ if (addr)
+ return dev->bus_ops->rr(mdev, addr);
- return dev->bus_ops->rr(mdev, __mt7996_reg_addr(dev, offset));
+ spin_lock_bh(&dev->reg_lock);
+ val = dev->bus_ops->rr(mdev, __mt7996_reg_remap_addr(dev, offset));
+ spin_unlock_bh(&dev->reg_lock);
+
+ return val;
}
static void mt7996_wr(struct mt76_dev *mdev, u32 offset, u32 val)
{
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ u32 addr = __mt7996_reg_addr(dev, offset);
- dev->bus_ops->wr(mdev, __mt7996_reg_addr(dev, offset), val);
+ if (addr) {
+ dev->bus_ops->wr(mdev, addr, val);
+ return;
+ }
+
+ spin_lock_bh(&dev->reg_lock);
+ dev->bus_ops->wr(mdev, __mt7996_reg_remap_addr(dev, offset), val);
+ spin_unlock_bh(&dev->reg_lock);
}
static u32 mt7996_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
{
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ u32 addr = __mt7996_reg_addr(dev, offset);
+
+ if (addr)
+ return dev->bus_ops->rmw(mdev, addr, mask, val);
+
+ spin_lock_bh(&dev->reg_lock);
+ val = dev->bus_ops->rmw(mdev, __mt7996_reg_remap_addr(dev, offset), mask, val);
+ spin_unlock_bh(&dev->reg_lock);
- return dev->bus_ops->rmw(mdev, __mt7996_reg_addr(dev, offset), mask, val);
+ return val;
}
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
@@ -391,13 +410,13 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
wed->wlan.amsdu_max_len = 1536;
wed->wlan.init_buf = mt7996_wed_init_buf;
- wed->wlan.init_rx_buf = mt76_mmio_wed_init_rx_buf;
- wed->wlan.release_rx_buf = mt76_mmio_wed_release_rx_buf;
- wed->wlan.offload_enable = mt76_mmio_wed_offload_enable;
- wed->wlan.offload_disable = mt76_mmio_wed_offload_disable;
+ wed->wlan.init_rx_buf = mt76_wed_init_rx_buf;
+ wed->wlan.release_rx_buf = mt76_wed_release_rx_buf;
+ wed->wlan.offload_enable = mt76_wed_offload_enable;
+ wed->wlan.offload_disable = mt76_wed_offload_disable;
if (!hif2) {
wed->wlan.reset = mt7996_mmio_wed_reset;
- wed->wlan.reset_complete = mt76_mmio_wed_reset_complete;
+ wed->wlan.reset_complete = mt76_wed_reset_complete;
}
if (mtk_wed_device_attach(wed))
@@ -421,6 +440,7 @@ static int mt7996_mmio_init(struct mt76_dev *mdev,
dev = container_of(mdev, struct mt7996_dev, mt76);
mt76_mmio_init(&dev->mt76, mem_base);
+ spin_lock_init(&dev->reg_lock);
switch (device_id) {
case 0x7990:
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
index bc73bcb47bf0..36d1f247d55a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
@@ -53,6 +53,7 @@
#define MT7996_MAX_TWT_AGRT 16
#define MT7996_MAX_STA_TWT_AGRT 8
+#define MT7996_MIN_TWT_DUR 64
#define MT7996_MAX_QUEUE (__MT_RXQ_MAX + __MT_MCUQ_MAX + 3)
/* NOTE: used to map mt76_rates. idx may change if firmware expands table */
@@ -320,12 +321,11 @@ struct mt7996_dev {
struct rchan *relay_fwlog;
struct {
- u8 table_mask;
+ u16 table_mask;
u8 n_agrt;
} twt;
- u32 reg_l1_backup;
- u32 reg_l2_backup;
+ spinlock_t reg_lock;
u8 wtbl_size_group;
};
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 5a0bcb5071bd..342c3aea549d 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -767,7 +767,7 @@ static void mt76u_status_worker(struct mt76_worker *w)
if (!test_bit(MT76_STATE_RUNNING, &dev->phy.state))
return;
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ for (i = 0; i <= MT_TXQ_PSD; i++) {
q = dev->phy.q_tx[i];
if (!q)
continue;
@@ -872,9 +872,8 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
if (err < 0)
return err;
- mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
- q->entry[idx].urb, mt76u_complete_tx,
- &q->entry[idx]);
+ mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q->ep, q->entry[idx].urb,
+ mt76u_complete_tx, &q->entry[idx]);
q->head = (q->head + 1) % q->ndesc;
q->entry[idx].skb = tx_info.skb;
@@ -906,9 +905,13 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
}
}
-static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
+static void
+mt76u_ac_to_hwq(struct mt76_dev *dev, struct mt76_queue *q, u8 qid)
{
- if (mt76_chip(dev) == 0x7663) {
+ u8 ac = qid < IEEE80211_NUM_ACS ? qid : IEEE80211_AC_BE;
+
+ switch (mt76_chip(dev)) {
+ case 0x7663: {
static const u8 lmac_queue_map[] = {
/* ac to lmac mapping */
[IEEE80211_AC_BK] = 0,
@@ -917,33 +920,36 @@ static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
[IEEE80211_AC_VO] = 4,
};
- if (WARN_ON(ac >= ARRAY_SIZE(lmac_queue_map)))
- return 1; /* BE */
-
- return lmac_queue_map[ac];
+ q->hw_idx = lmac_queue_map[ac];
+ q->ep = q->hw_idx + 1;
+ break;
+ }
+ case 0x7961:
+ case 0x7925:
+ q->hw_idx = mt76_ac_to_hwq(ac);
+ q->ep = qid == MT_TXQ_PSD ? MT_EP_OUT_HCCA : q->hw_idx + 1;
+ break;
+ default:
+ q->hw_idx = mt76_ac_to_hwq(ac);
+ q->ep = q->hw_idx + 1;
+ break;
}
-
- return mt76_ac_to_hwq(ac);
}
static int mt76u_alloc_tx(struct mt76_dev *dev)
{
- struct mt76_queue *q;
- int i, j, err;
+ int i;
for (i = 0; i <= MT_TXQ_PSD; i++) {
- if (i >= IEEE80211_NUM_ACS) {
- dev->phy.q_tx[i] = dev->phy.q_tx[0];
- continue;
- }
+ struct mt76_queue *q;
+ int j, err;
q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
if (!q)
return -ENOMEM;
spin_lock_init(&q->lock);
- q->hw_idx = mt76u_ac_to_hwq(dev, i);
-
+ mt76u_ac_to_hwq(dev, q, i);
dev->phy.q_tx[i] = q;
q->entry = devm_kcalloc(dev->dev,
@@ -969,7 +975,7 @@ static void mt76u_free_tx(struct mt76_dev *dev)
mt76_worker_teardown(&dev->usb.status_worker);
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ for (i = 0; i <= MT_TXQ_PSD; i++) {
struct mt76_queue *q;
int j;
@@ -999,7 +1005,7 @@ void mt76u_stop_tx(struct mt76_dev *dev)
dev_err(dev->dev, "timed out waiting for pending tx\n");
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ for (i = 0; i <= MT_TXQ_PSD; i++) {
q = dev->phy.q_tx[i];
if (!q)
continue;
@@ -1013,7 +1019,7 @@ void mt76u_stop_tx(struct mt76_dev *dev)
/* On device removal we maight queue skb's, but mt76u_tx_kick()
* will fail to submit urb, cleanup those skb's manually.
*/
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ for (i = 0; i <= MT_TXQ_PSD; i++) {
q = dev->phy.q_tx[i];
if (!q)
continue;
diff --git a/drivers/net/wireless/mediatek/mt76/wed.c b/drivers/net/wireless/mediatek/mt76/wed.c
new file mode 100644
index 000000000000..f89e4537555c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/wed.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2023 Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include "mt76.h"
+#include "dma.h"
+
+void mt76_wed_release_rx_buf(struct mtk_wed_device *wed)
+{
+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+ int i;
+
+ for (i = 0; i < dev->rx_token_size; i++) {
+ struct mt76_txwi_cache *t;
+
+ t = mt76_rx_token_release(dev, i);
+ if (!t || !t->ptr)
+ continue;
+
+ mt76_put_page_pool_buf(t->ptr, false);
+ t->ptr = NULL;
+
+ mt76_put_rxwi(dev, t);
+ }
+
+ mt76_free_pending_rxwi(dev);
+}
+EXPORT_SYMBOL_GPL(mt76_wed_release_rx_buf);
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
+{
+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+ struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc;
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ int i, len = SKB_WITH_OVERHEAD(q->buf_size);
+ struct mt76_txwi_cache *t = NULL;
+
+ for (i = 0; i < size; i++) {
+ enum dma_data_direction dir;
+ dma_addr_t addr;
+ u32 offset;
+ int token;
+ void *buf;
+
+ t = mt76_get_rxwi(dev);
+ if (!t)
+ goto unmap;
+
+ buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
+ if (!buf)
+ goto unmap;
+
+ addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
+ dir = page_pool_get_dma_dir(q->page_pool);
+ dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
+
+ desc->buf0 = cpu_to_le32(addr);
+ token = mt76_rx_token_consume(dev, buf, t, addr);
+ if (token < 0) {
+ mt76_put_page_pool_buf(buf, false);
+ goto unmap;
+ }
+
+ token = FIELD_PREP(MT_DMA_CTL_TOKEN, token);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ token |= FIELD_PREP(MT_DMA_CTL_SDP0_H, addr >> 32);
+#endif
+ desc->token |= cpu_to_le32(token);
+ desc++;
+ }
+
+ return 0;
+
+unmap:
+ if (t)
+ mt76_put_rxwi(dev, t);
+ mt76_wed_release_rx_buf(wed);
+
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(mt76_wed_init_rx_buf);
+
+int mt76_wed_offload_enable(struct mtk_wed_device *wed)
+{
+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+
+ spin_lock_bh(&dev->token_lock);
+ dev->token_size = wed->wlan.token_start;
+ spin_unlock_bh(&dev->token_lock);
+
+ return !wait_event_timeout(dev->tx_wait, !dev->wed_token_count, HZ);
+}
+EXPORT_SYMBOL_GPL(mt76_wed_offload_enable);
+
+int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
+{
+ int ret = 0, type, ring;
+ u16 flags;
+
+ if (!q || !q->ndesc)
+ return -EINVAL;
+
+ flags = q->flags;
+ if (!q->wed || !mtk_wed_device_active(q->wed))
+ q->flags &= ~MT_QFLAG_WED;
+
+ if (!(q->flags & MT_QFLAG_WED))
+ return 0;
+
+ type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
+ ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
+
+ switch (type) {
+ case MT76_WED_Q_TX:
+ ret = mtk_wed_device_tx_ring_setup(q->wed, ring, q->regs,
+ reset);
+ if (!ret)
+ q->wed_regs = q->wed->tx_ring[ring].reg_base;
+ break;
+ case MT76_WED_Q_TXFREE:
+ /* WED txfree queue needs ring to be initialized before setup */
+ q->flags = 0;
+ mt76_dma_queue_reset(dev, q);
+ mt76_dma_rx_fill(dev, q, false);
+
+ ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs);
+ if (!ret)
+ q->wed_regs = q->wed->txfree_ring.reg_base;
+ break;
+ case MT76_WED_Q_RX:
+ ret = mtk_wed_device_rx_ring_setup(q->wed, ring, q->regs,
+ reset);
+ if (!ret)
+ q->wed_regs = q->wed->rx_ring[ring].reg_base;
+ break;
+ case MT76_WED_RRO_Q_DATA:
+ q->flags &= ~MT_QFLAG_WED;
+ __mt76_dma_queue_reset(dev, q, false);
+ mtk_wed_device_rro_rx_ring_setup(q->wed, ring, q->regs);
+ q->head = q->ndesc - 1;
+ q->queued = q->head;
+ break;
+ case MT76_WED_RRO_Q_MSDU_PG:
+ q->flags &= ~MT_QFLAG_WED;
+ __mt76_dma_queue_reset(dev, q, false);
+ mtk_wed_device_msdu_pg_rx_ring_setup(q->wed, ring, q->regs);
+ q->head = q->ndesc - 1;
+ q->queued = q->head;
+ break;
+ case MT76_WED_RRO_Q_IND:
+ q->flags &= ~MT_QFLAG_WED;
+ mt76_dma_queue_reset(dev, q);
+ mt76_dma_rx_fill(dev, q, false);
+ mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ q->flags = flags;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_wed_dma_setup);
+#endif /*CONFIG_NET_MEDIATEK_SOC_WED */
+
+void mt76_wed_offload_disable(struct mtk_wed_device *wed)
+{
+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+
+ spin_lock_bh(&dev->token_lock);
+ dev->token_size = dev->drv->token_size;
+ spin_unlock_bh(&dev->token_lock);
+}
+EXPORT_SYMBOL_GPL(mt76_wed_offload_disable);
+
+void mt76_wed_reset_complete(struct mtk_wed_device *wed)
+{
+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+
+ complete(&dev->mmio.wed_reset_complete);
+}
+EXPORT_SYMBOL_GPL(mt76_wed_reset_complete);
+
+int mt76_wed_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct mt76_phy *phy = hw->priv;
+ struct mtk_wed_device *wed = &phy->dev->mmio.wed;
+
+ if (!mtk_wed_device_active(wed))
+ return -EOPNOTSUPP;
+
+ return mtk_wed_device_setup_tc(wed, netdev, type, type_data);
+}
+EXPORT_SYMBOL_GPL(mt76_wed_net_setup_tc);
+
+void mt76_wed_dma_reset(struct mt76_dev *dev)
+{
+ struct mt76_mmio *mmio = &dev->mmio;
+
+ if (!test_bit(MT76_STATE_WED_RESET, &dev->phy.state))
+ return;
+
+ complete(&mmio->wed_reset);
+
+ if (!wait_for_completion_timeout(&mmio->wed_reset_complete, 3 * HZ))
+ dev_err(dev->dev, "wed reset complete timeout\n");
+}
+EXPORT_SYMBOL_GPL(mt76_wed_dma_reset);
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index c8d332456a6b..a7330576486b 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -405,6 +405,10 @@ out:
}
const struct ieee80211_ops mt7601u_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt7601u_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = mt7601u_start,
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index ad2509d8c99a..089102ed9ae5 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -356,7 +356,7 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
memcpy(vif->auth.ssid.ssid, sme->ssid, sme->ssid_len);
vif->auth.ssid.ssid_len = sme->ssid_len;
}
- vif->auth.key_mgmt_suite = cpu_to_be32(sme->crypto.akm_suites[0]);
+ vif->auth.key_mgmt_suite = sme->crypto.akm_suites[0];
ether_addr_copy(vif->auth.bssid, sme->bssid);
break;
@@ -1518,7 +1518,7 @@ static struct wilc_vif *wilc_get_vif_from_type(struct wilc *wl, int type)
{
struct wilc_vif *vif;
- list_for_each_entry_rcu(vif, &wl->vif_list, list) {
+ wilc_for_each_vif(wl, vif) {
if (vif->iftype == type)
return vif;
}
@@ -1609,7 +1609,6 @@ static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
cfg80211_unregister_netdevice(vif->ndev);
vif->monitor_flag = 0;
- wilc_set_operation_mode(vif, 0, 0, 0);
mutex_lock(&wl->vif_mutex);
list_del_rcu(&vif->list);
wl->vif_num--;
@@ -1804,15 +1803,24 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
INIT_LIST_HEAD(&wl->rxq_head.list);
INIT_LIST_HEAD(&wl->vif_list);
+ wl->hif_workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
+ wiphy_name(wl->wiphy));
+ if (!wl->hif_workqueue) {
+ ret = -ENOMEM;
+ goto free_cfg;
+ }
vif = wilc_netdev_ifc_init(wl, "wlan%d", WILC_STATION_MODE,
NL80211_IFTYPE_STATION, false);
if (IS_ERR(vif)) {
ret = PTR_ERR(vif);
- goto free_cfg;
+ goto free_hq;
}
return 0;
+free_hq:
+ destroy_workqueue(wl->hif_workqueue);
+
free_cfg:
wilc_wlan_cfg_deinit(wl);
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index 839f142663e8..f1085ccb7eed 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -107,7 +107,7 @@ static struct wilc_vif *wilc_get_vif_from_idx(struct wilc *wilc, int idx)
if (index < 0 || index >= WILC_NUM_CONCURRENT_IFC)
return NULL;
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ wilc_for_each_vif(wilc, vif) {
if (vif->idx == index)
return vif;
}
@@ -377,38 +377,49 @@ struct wilc_join_bss_param *
wilc_parse_join_bss_param(struct cfg80211_bss *bss,
struct cfg80211_crypto_settings *crypto)
{
- struct wilc_join_bss_param *param;
- struct ieee80211_p2p_noa_attr noa_attr;
- u8 rates_len = 0;
- const u8 *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie;
+ const u8 *ies_data, *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie;
const u8 *ht_ie, *wpa_ie, *wmm_ie, *rsn_ie;
+ struct ieee80211_p2p_noa_attr noa_attr;
+ const struct cfg80211_bss_ies *ies;
+ struct wilc_join_bss_param *param;
+ u8 rates_len = 0, ies_len;
int ret;
- const struct cfg80211_bss_ies *ies = rcu_dereference(bss->ies);
param = kzalloc(sizeof(*param), GFP_KERNEL);
if (!param)
return NULL;
+ rcu_read_lock();
+ ies = rcu_dereference(bss->ies);
+ ies_data = kmemdup(ies->data, ies->len, GFP_ATOMIC);
+ if (!ies_data) {
+ rcu_read_unlock();
+ kfree(param);
+ return NULL;
+ }
+ ies_len = ies->len;
+ rcu_read_unlock();
+
param->beacon_period = cpu_to_le16(bss->beacon_interval);
param->cap_info = cpu_to_le16(bss->capability);
param->bss_type = WILC_FW_BSS_TYPE_INFRA;
param->ch = ieee80211_frequency_to_channel(bss->channel->center_freq);
ether_addr_copy(param->bssid, bss->bssid);
- ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
+ ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies_data, ies_len);
if (ssid_elm) {
if (ssid_elm[1] <= IEEE80211_MAX_SSID_LEN)
memcpy(param->ssid, ssid_elm + 2, ssid_elm[1]);
}
- tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies->data, ies->len);
+ tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies_data, ies_len);
if (tim_elm && tim_elm[1] >= 2)
param->dtim_period = tim_elm[3];
memset(param->p_suites, 0xFF, 3);
memset(param->akm_suites, 0xFF, 3);
- rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies->data, ies->len);
+ rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies_data, ies_len);
if (rates_ie) {
rates_len = rates_ie[1];
if (rates_len > WILC_MAX_RATES_SUPPORTED)
@@ -419,7 +430,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
if (rates_len < WILC_MAX_RATES_SUPPORTED) {
supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
- ies->data, ies->len);
+ ies_data, ies_len);
if (supp_rates_ie) {
u8 ext_rates = supp_rates_ie[1];
@@ -434,11 +445,11 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
}
}
- ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies->data, ies->len);
+ ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies_data, ies_len);
if (ht_ie)
param->ht_capable = true;
- ret = cfg80211_get_p2p_attr(ies->data, ies->len,
+ ret = cfg80211_get_p2p_attr(ies_data, ies_len,
IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
(u8 *)&noa_attr, sizeof(noa_attr));
if (ret > 0) {
@@ -462,7 +473,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
}
wmm_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WMM,
- ies->data, ies->len);
+ ies_data, ies_len);
if (wmm_ie) {
struct ieee80211_wmm_param_ie *ie;
@@ -477,13 +488,13 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
wpa_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPA,
- ies->data, ies->len);
+ ies_data, ies_len);
if (wpa_ie) {
param->mode_802_11i = 1;
param->rsn_found = true;
}
- rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies->data, ies->len);
+ rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies_data, ies_len);
if (rsn_ie) {
int rsn_ie_len = sizeof(struct element) + rsn_ie[1];
int offset = 8;
@@ -517,6 +528,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
param->akm_suites[i] = crypto->akm_suites[i] & 0xFF;
}
+ kfree(ies_data);
return (void *)param;
}
@@ -1555,26 +1567,28 @@ int wilc_deinit(struct wilc_vif *vif)
void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
{
- int result;
- struct host_if_msg *msg;
- int id;
struct host_if_drv *hif_drv;
+ struct host_if_msg *msg;
struct wilc_vif *vif;
+ int srcu_idx;
+ int result;
+ int id;
id = get_unaligned_le32(&buffer[length - 4]);
+ srcu_idx = srcu_read_lock(&wilc->srcu);
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif)
- return;
- hif_drv = vif->hif_drv;
+ goto out;
+ hif_drv = vif->hif_drv;
if (!hif_drv) {
netdev_err(vif->ndev, "driver not init[%p]\n", hif_drv);
- return;
+ goto out;
}
msg = wilc_alloc_work(vif, handle_rcvd_ntwrk_info, false);
if (IS_ERR(msg))
- return;
+ goto out;
msg->body.net_info.frame_len = get_unaligned_le16(&buffer[6]) - 1;
msg->body.net_info.rssi = buffer[8];
@@ -1583,7 +1597,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
GFP_KERNEL);
if (!msg->body.net_info.mgmt) {
kfree(msg);
- return;
+ goto out;
}
result = wilc_enqueue_work(msg);
@@ -1592,43 +1606,41 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
kfree(msg->body.net_info.mgmt);
kfree(msg);
}
+out:
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
{
- int result;
- struct host_if_msg *msg;
- int id;
struct host_if_drv *hif_drv;
+ struct host_if_msg *msg;
struct wilc_vif *vif;
+ int srcu_idx;
+ int result;
+ int id;
mutex_lock(&wilc->deinit_lock);
id = get_unaligned_le32(&buffer[length - 4]);
+ srcu_idx = srcu_read_lock(&wilc->srcu);
vif = wilc_get_vif_from_idx(wilc, id);
- if (!vif) {
- mutex_unlock(&wilc->deinit_lock);
- return;
- }
+ if (!vif)
+ goto out;
hif_drv = vif->hif_drv;
if (!hif_drv) {
- mutex_unlock(&wilc->deinit_lock);
- return;
+ goto out;
}
if (!hif_drv->conn_info.conn_result) {
netdev_err(vif->ndev, "%s: conn_result is NULL\n", __func__);
- mutex_unlock(&wilc->deinit_lock);
- return;
+ goto out;
}
msg = wilc_alloc_work(vif, handle_rcvd_gnrl_async_info, false);
- if (IS_ERR(msg)) {
- mutex_unlock(&wilc->deinit_lock);
- return;
- }
+ if (IS_ERR(msg))
+ goto out;
msg->body.mac_info.status = buffer[7];
result = wilc_enqueue_work(msg);
@@ -1636,32 +1648,36 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
kfree(msg);
}
-
+out:
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
mutex_unlock(&wilc->deinit_lock);
}
void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length)
{
- int result;
- int id;
struct host_if_drv *hif_drv;
struct wilc_vif *vif;
+ int srcu_idx;
+ int result;
+ int id;
id = get_unaligned_le32(&buffer[length - 4]);
+ srcu_idx = srcu_read_lock(&wilc->srcu);
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif)
- return;
- hif_drv = vif->hif_drv;
+ goto out;
- if (!hif_drv)
- return;
+ hif_drv = vif->hif_drv;
+ if (!hif_drv) {
+ goto out;
+ }
if (hif_drv->usr_scan_req.scan_result) {
struct host_if_msg *msg;
msg = wilc_alloc_work(vif, handle_scan_complete, false);
if (IS_ERR(msg))
- return;
+ goto out;
result = wilc_enqueue_work(msg);
if (result) {
@@ -1670,6 +1686,8 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length)
kfree(msg);
}
}
+out:
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
int wilc_remain_on_channel(struct wilc_vif *vif, u64 cookie, u16 chan,
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
index 81e8f25863f5..710e29bea560 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
@@ -96,7 +96,7 @@ static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header)
struct wilc_vif *vif;
struct ieee80211_hdr *h = (struct ieee80211_hdr *)mac_header;
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ wilc_for_each_vif(wilc, vif) {
if (vif->iftype == WILC_STATION_MODE)
if (ether_addr_equal_unaligned(h->addr2, vif->bssid)) {
ndev = vif->ndev;
@@ -132,7 +132,7 @@ int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
struct wilc_vif *vif;
srcu_idx = srcu_read_lock(&wilc->srcu);
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ wilc_for_each_vif(wilc, vif) {
if (!is_zero_ether_addr(vif->bssid))
ret_val++;
}
@@ -140,6 +140,19 @@ int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
return ret_val;
}
+static void wilc_wake_tx_queues(struct wilc *wl)
+{
+ int srcu_idx;
+ struct wilc_vif *ifc;
+
+ srcu_idx = srcu_read_lock(&wl->srcu);
+ wilc_for_each_vif(wl, ifc) {
+ if (ifc->mac_opened && netif_queue_stopped(ifc->ndev))
+ netif_wake_queue(ifc->ndev);
+ }
+ srcu_read_unlock(&wl->srcu, srcu_idx);
+}
+
static int wilc_txq_task(void *vp)
{
int ret;
@@ -160,17 +173,7 @@ static int wilc_txq_task(void *vp)
do {
ret = wilc_wlan_handle_txq(wl, &txq_count);
if (txq_count < FLOW_CONTROL_LOWER_THRESHOLD) {
- int srcu_idx;
- struct wilc_vif *ifc;
-
- srcu_idx = srcu_read_lock(&wl->srcu);
- list_for_each_entry_rcu(ifc, &wl->vif_list,
- list) {
- if (ifc->mac_opened &&
- netif_queue_stopped(ifc->ndev))
- netif_wake_queue(ifc->ndev);
- }
- srcu_read_unlock(&wl->srcu, srcu_idx);
+ wilc_wake_tx_queues(wl);
}
if (ret != WILC_VMM_ENTRY_FULL_RETRY)
break;
@@ -284,7 +287,7 @@ static int wilc_init_fw_config(struct net_device *dev, struct wilc_vif *vif)
if (!wilc_wlan_cfg_set(vif, 0, WID_11G_OPERATING_MODE, &b, 1, 0, 0))
goto fail;
- b = WILC_FW_PREAMBLE_SHORT;
+ b = WILC_FW_PREAMBLE_AUTO;
if (!wilc_wlan_cfg_set(vif, 0, WID_PREAMBLE, &b, 1, 0, 0))
goto fail;
@@ -416,7 +419,7 @@ static int wilc_init_fw_config(struct net_device *dev, struct wilc_vif *vif)
b = 1;
if (!wilc_wlan_cfg_set(vif, 0, WID_11N_IMMEDIATE_BA_ENABLED, &b, 1,
- 1, 1))
+ 1, 0))
goto fail;
return 0;
@@ -665,7 +668,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
/* Verify MAC Address is not already in use: */
srcu_idx = srcu_read_lock(&wilc->srcu);
- list_for_each_entry_rcu(tmp_vif, &wilc->vif_list, list) {
+ wilc_for_each_vif(wilc, tmp_vif) {
wilc_get_mac_address(tmp_vif, mac_addr);
if (ether_addr_equal(addr->sa_data, mac_addr)) {
if (vif != tmp_vif) {
@@ -768,7 +771,7 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
struct wilc_vif *vif;
srcu_idx = srcu_read_lock(&wilc->srcu);
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ wilc_for_each_vif(wilc, vif) {
if (vif->mac_opened)
netif_stop_queue(vif->ndev);
}
@@ -811,19 +814,21 @@ static int wilc_mac_close(struct net_device *ndev)
void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
u32 pkt_offset)
{
- unsigned int frame_len = 0;
- int stats;
unsigned char *buff_to_send = NULL;
- struct sk_buff *skb;
struct net_device *wilc_netdev;
+ unsigned int frame_len = 0;
struct wilc_vif *vif;
+ struct sk_buff *skb;
+ int srcu_idx;
+ int stats;
if (!wilc)
return;
+ srcu_idx = srcu_read_lock(&wilc->srcu);
wilc_netdev = get_if_handler(wilc, buff);
if (!wilc_netdev)
- return;
+ goto out;
buff += pkt_offset;
vif = netdev_priv(wilc_netdev);
@@ -834,7 +839,7 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
skb = dev_alloc_skb(frame_len);
if (!skb)
- return;
+ goto out;
skb->dev = wilc_netdev;
@@ -847,6 +852,8 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
stats = netif_rx(skb);
netdev_dbg(wilc_netdev, "netif_rx ret value is: %d\n", stats);
}
+out:
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth)
@@ -855,7 +862,7 @@ void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth)
struct wilc_vif *vif;
srcu_idx = srcu_read_lock(&wilc->srcu);
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ wilc_for_each_vif(wilc, vif) {
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buff;
u16 type = le16_to_cpup((__le16 *)buff);
u32 type_bit = BIT(type >> 4);
@@ -890,8 +897,7 @@ static const struct net_device_ops wilc_netdev_ops = {
void wilc_netdev_cleanup(struct wilc *wilc)
{
- struct wilc_vif *vif;
- int srcu_idx, ifc_cnt = 0;
+ struct wilc_vif *vif, *vif_tmp;
if (!wilc)
return;
@@ -901,32 +907,19 @@ void wilc_netdev_cleanup(struct wilc *wilc)
wilc->firmware = NULL;
}
- srcu_idx = srcu_read_lock(&wilc->srcu);
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ list_for_each_entry_safe(vif, vif_tmp, &wilc->vif_list, list) {
+ mutex_lock(&wilc->vif_mutex);
+ list_del_rcu(&vif->list);
+ wilc->vif_num--;
+ mutex_unlock(&wilc->vif_mutex);
+ synchronize_srcu(&wilc->srcu);
if (vif->ndev)
unregister_netdev(vif->ndev);
}
- srcu_read_unlock(&wilc->srcu, srcu_idx);
wilc_wfi_deinit_mon_interface(wilc, false);
destroy_workqueue(wilc->hif_workqueue);
- while (ifc_cnt < WILC_NUM_CONCURRENT_IFC) {
- mutex_lock(&wilc->vif_mutex);
- if (wilc->vif_num <= 0) {
- mutex_unlock(&wilc->vif_mutex);
- break;
- }
- vif = wilc_get_wl_to_vif(wilc);
- if (!IS_ERR(vif))
- list_del_rcu(&vif->list);
-
- wilc->vif_num--;
- mutex_unlock(&wilc->vif_mutex);
- synchronize_srcu(&wilc->srcu);
- ifc_cnt++;
- }
-
wilc_wlan_cfg_deinit(wilc);
wlan_deinit_locks(wilc);
wiphy_unregister(wilc->wiphy);
@@ -941,7 +934,7 @@ static u8 wilc_get_available_idx(struct wilc *wl)
int srcu_idx;
srcu_idx = srcu_read_lock(&wl->srcu);
- list_for_each_entry_rcu(vif, &wl->vif_list, list) {
+ wilc_for_each_vif(wl, vif) {
if (vif->idx == 0)
idx = 1;
else
@@ -989,13 +982,6 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
goto error;
}
- wl->hif_workqueue = alloc_ordered_workqueue("%s-wq", WQ_MEM_RECLAIM,
- ndev->name);
- if (!wl->hif_workqueue) {
- ret = -ENOMEM;
- goto unregister_netdev;
- }
-
ndev->needs_free_netdev = true;
vif->iftype = vif_type;
vif->idx = wilc_get_available_idx(wl);
@@ -1008,12 +994,11 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
return vif;
-unregister_netdev:
+error:
if (rtnl_locked)
cfg80211_unregister_netdevice(ndev);
else
unregister_netdev(ndev);
- error:
free_netdev(ndev);
return ERR_PTR(ret);
}
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h
index aafe3dc44ac6..5937d6d45695 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.h
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.h
@@ -13,6 +13,7 @@
#include <net/ieee80211_radiotap.h>
#include <linux/if_arp.h>
#include <linux/gpio/consumer.h>
+#include <linux/rculist.h>
#include "hif.h"
#include "wlan.h"
@@ -29,6 +30,11 @@
#define TX_BACKOFF_WEIGHT_MS 1
+#define wilc_for_each_vif(w, v) \
+ struct wilc *_w = w; \
+ list_for_each_entry_srcu(v, &_w->vif_list, list, \
+ srcu_read_lock_held(&_w->srcu))
+
struct wilc_wfi_stats {
unsigned long rx_packets;
unsigned long tx_packets;
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index 1d8b241ce43c..61c3572ce321 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -42,7 +42,7 @@ MODULE_PARM_DESC(enable_crc16,
#define WILC_SPI_RSP_HDR_EXTRA_DATA 8
struct wilc_spi {
- bool isinit; /* true if SPI protocol has been configured */
+ bool isinit; /* true if wilc_spi_init was successful */
bool probing_crc; /* true if we're probing chip's CRC config */
bool crc7_enabled; /* true if crc7 is currently enabled */
bool crc16_enabled; /* true if crc16 is currently enabled */
@@ -55,6 +55,8 @@ struct wilc_spi {
static const struct wilc_hif_func wilc_hif_spi;
static int wilc_spi_reset(struct wilc *wilc);
+static int wilc_spi_configure_bus_protocol(struct wilc *wilc);
+static int wilc_validate_chipid(struct wilc *wilc);
/********************************************
*
@@ -192,11 +194,11 @@ static void wilc_wlan_power(struct wilc *wilc, bool on)
/* assert ENABLE: */
gpiod_set_value(gpios->enable, 1);
mdelay(5);
- /* assert RESET: */
- gpiod_set_value(gpios->reset, 1);
- } else {
/* deassert RESET: */
gpiod_set_value(gpios->reset, 0);
+ } else {
+ /* assert RESET: */
+ gpiod_set_value(gpios->reset, 1);
/* deassert ENABLE: */
gpiod_set_value(gpios->enable, 0);
}
@@ -232,8 +234,27 @@ static int wilc_bus_probe(struct spi_device *spi)
}
clk_prepare_enable(wilc->rtc_clk);
+ dev_info(&spi->dev, "Selected CRC config: crc7=%s, crc16=%s\n",
+ enable_crc7 ? "on" : "off", enable_crc16 ? "on" : "off");
+
+ /* we need power to configure the bus protocol and to read the chip id: */
+
+ wilc_wlan_power(wilc, true);
+
+ ret = wilc_spi_configure_bus_protocol(wilc);
+ if (ret)
+ goto power_down;
+
+ ret = wilc_validate_chipid(wilc);
+ if (ret)
+ goto power_down;
+
+ wilc_wlan_power(wilc, false);
return 0;
+power_down:
+ clk_disable_unprepare(wilc->rtc_clk);
+ wilc_wlan_power(wilc, false);
netdev_cleanup:
wilc_netdev_cleanup(wilc);
free:
@@ -301,7 +322,6 @@ static int wilc_spi_tx(struct wilc *wilc, u8 *b, u32 len)
memset(&msg, 0, sizeof(msg));
spi_message_init(&msg);
- msg.spi = spi;
spi_message_add_tail(&tr, &msg);
ret = spi_sync(spi, &msg);
@@ -344,7 +364,6 @@ static int wilc_spi_rx(struct wilc *wilc, u8 *rb, u32 rlen)
memset(&msg, 0, sizeof(msg));
spi_message_init(&msg);
- msg.spi = spi;
spi_message_add_tail(&tr, &msg);
ret = spi_sync(spi, &msg);
@@ -382,8 +401,6 @@ static int wilc_spi_tx_rx(struct wilc *wilc, u8 *wb, u8 *rb, u32 rlen)
memset(&msg, 0, sizeof(msg));
spi_message_init(&msg);
- msg.spi = spi;
-
spi_message_add_tail(&tr, &msg);
ret = spi_sync(spi, &msg);
if (ret < 0)
@@ -477,7 +494,7 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz)
********************************************/
static u8 wilc_get_crc7(u8 *buffer, u32 len)
{
- return crc7_be(0xfe, buffer, len);
+ return crc7_be(0xfe, buffer, len) | 0x01;
}
static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b,
@@ -1106,26 +1123,34 @@ static int wilc_spi_deinit(struct wilc *wilc)
static int wilc_spi_init(struct wilc *wilc, bool resume)
{
- struct spi_device *spi = to_spi_device(wilc->dev);
struct wilc_spi *spi_priv = wilc->bus_data;
- u32 reg;
- u32 chipid;
- int ret, i;
+ int ret;
if (spi_priv->isinit) {
/* Confirm we can read chipid register without error: */
- ret = wilc_spi_read_reg(wilc, WILC_CHIPID, &chipid);
- if (ret == 0)
+ if (wilc_validate_chipid(wilc) == 0)
return 0;
-
- dev_err(&spi->dev, "Fail cmd read chip id...\n");
}
wilc_wlan_power(wilc, true);
- /*
- * configure protocol
- */
+ ret = wilc_spi_configure_bus_protocol(wilc);
+ if (ret) {
+ wilc_wlan_power(wilc, false);
+ return ret;
+ }
+
+ spi_priv->isinit = true;
+
+ return 0;
+}
+
+static int wilc_spi_configure_bus_protocol(struct wilc *wilc)
+{
+ struct spi_device *spi = to_spi_device(wilc->dev);
+ struct wilc_spi *spi_priv = wilc->bus_data;
+ u32 reg;
+ int ret, i;
/*
* Infer the CRC settings that are currently in effect. This
@@ -1177,6 +1202,15 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
spi_priv->probing_crc = false;
+ return 0;
+}
+
+static int wilc_validate_chipid(struct wilc *wilc)
+{
+ struct spi_device *spi = to_spi_device(wilc->dev);
+ u32 chipid;
+ int ret;
+
/*
* make sure can read chip id without protocol error
*/
@@ -1185,9 +1219,10 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
dev_err(&spi->dev, "Fail cmd read chip id...\n");
return ret;
}
-
- spi_priv->isinit = true;
-
+ if (!is_wilc1000(chipid)) {
+ dev_err(&spi->dev, "Unknown chip id 0x%x\n", chipid);
+ return -ENODEV;
+ }
return 0;
}
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
index 9eb115c79c90..a9e872a7b2c3 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
@@ -12,11 +12,6 @@
#define WAKE_UP_TRIAL_RETRY 10000
-static inline bool is_wilc1000(u32 id)
-{
- return (id & (~WILC_CHIP_REV_FIELD)) == WILC_1000_BASE_ID;
-}
-
static inline void acquire_bus(struct wilc *wilc, enum bus_acquire acquire)
{
mutex_lock(&wilc->hif_cs);
@@ -730,7 +725,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
mutex_lock(&wilc->txq_add_to_head_cs);
srcu_idx = srcu_read_lock(&wilc->srcu);
- list_for_each_entry_rcu(vif, &wilc->vif_list, list)
+ wilc_for_each_vif(wilc, vif)
wilc_wlan_txq_filter_dup_tcp_ack(vif->ndev);
srcu_read_unlock(&wilc->srcu, srcu_idx);
@@ -1198,27 +1193,32 @@ int wilc_wlan_stop(struct wilc *wilc, struct wilc_vif *vif)
acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
- ret = wilc->hif_func->hif_read_reg(wilc, WILC_GP_REG_0, &reg);
- if (ret) {
- netdev_err(vif->ndev, "Error while reading reg\n");
+ ret = wilc->hif_func->hif_read_reg(wilc, GLOBAL_MODE_CONTROL, &reg);
+ if (ret)
goto release;
- }
- ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_0,
- (reg | WILC_ABORT_REQ_BIT));
- if (ret) {
- netdev_err(vif->ndev, "Error while writing reg\n");
+ reg &= ~WILC_GLOBAL_MODE_ENABLE_WIFI;
+ ret = wilc->hif_func->hif_write_reg(wilc, GLOBAL_MODE_CONTROL, reg);
+ if (ret)
goto release;
- }
- ret = wilc->hif_func->hif_read_reg(wilc, WILC_FW_HOST_COMM, &reg);
+ ret = wilc->hif_func->hif_read_reg(wilc, PWR_SEQ_MISC_CTRL, &reg);
+ if (ret)
+ goto release;
+
+ reg &= ~WILC_PWR_SEQ_ENABLE_WIFI_SLEEP;
+ ret = wilc->hif_func->hif_write_reg(wilc, PWR_SEQ_MISC_CTRL, reg);
+ if (ret)
+ goto release;
+
+ ret = wilc->hif_func->hif_read_reg(wilc, WILC_GP_REG_0, &reg);
if (ret) {
netdev_err(vif->ndev, "Error while reading reg\n");
goto release;
}
- reg = BIT(0);
- ret = wilc->hif_func->hif_write_reg(wilc, WILC_FW_HOST_COMM, reg);
+ ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_0,
+ (reg | WILC_ABORT_REQ_BIT));
if (ret) {
netdev_err(vif->ndev, "Error while writing reg\n");
goto release;
@@ -1410,7 +1410,7 @@ static int init_chip(struct net_device *dev)
struct wilc_vif *vif = netdev_priv(dev);
struct wilc *wilc = vif->wilc;
- acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY);
+ acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
chipid = wilc_get_chipid(wilc, true);
@@ -1440,7 +1440,7 @@ static int init_chip(struct net_device *dev)
}
release:
- release_bus(wilc, WILC_BUS_RELEASE_ONLY);
+ release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
return ret;
}
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.h b/drivers/net/wireless/microchip/wilc1000/wlan.h
index a72cd5cac81d..54643d8fef04 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.h
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.h
@@ -156,6 +156,12 @@
#define WILC_GP_REG_0 0x149c
#define WILC_GP_REG_1 0x14a0
+#define GLOBAL_MODE_CONTROL 0x1614
+#define PWR_SEQ_MISC_CTRL 0x3008
+
+#define WILC_GLOBAL_MODE_ENABLE_WIFI BIT(0)
+#define WILC_PWR_SEQ_ENABLE_WIFI_SLEEP BIT(28)
+
#define WILC_HAVE_SDIO_IRQ_GPIO BIT(0)
#define WILC_HAVE_USE_PMU BIT(1)
#define WILC_HAVE_SLEEP_CLK_SRC_RTC BIT(2)
@@ -403,6 +409,11 @@ struct wilc_cfg_rsp {
struct wilc_vif;
+static inline bool is_wilc1000(u32 id)
+{
+ return (id & (~WILC_CHIP_REV_FIELD)) == WILC_1000_BASE_ID;
+}
+
int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
u32 buffer_size);
int wilc_wlan_start(struct wilc *wilc);
diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c
index 506d2f31efb5..641f847d47ab 100644
--- a/drivers/net/wireless/purelifi/plfxlc/mac.c
+++ b/drivers/net/wireless/purelifi/plfxlc/mac.c
@@ -7,7 +7,6 @@
#include <linux/etherdevice.h>
#include <linux/slab.h>
#include <linux/usb.h>
-#include <linux/gpio.h>
#include <linux/jiffies.h>
#include <net/ieee80211_radiotap.h>
@@ -685,6 +684,10 @@ static int plfxlc_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
}
static const struct ieee80211_ops plfxlc_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = plfxlc_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = plfxlc_op_start,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 3b283e93a13e..76b07db284f8 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -478,7 +478,7 @@ qtnf_event_handle_freq_change(struct qtnf_wmac *mac,
continue;
wiphy_lock(priv_to_wiphy(vif->mac));
- cfg80211_ch_switch_notify(vif->netdev, &chandef, 0, 0);
+ cfg80211_ch_switch_notify(vif->netdev, &chandef, 0);
wiphy_unlock(priv_to_wiphy(vif->mac));
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
index 13dd672b825e..42e21e9f303b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
@@ -1705,6 +1705,10 @@ static int rt2400pci_tx_last_beacon(struct ieee80211_hw *hw)
}
static const struct ieee80211_ops rt2400pci_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
index ecddda4c471e..36ddc5a69fa4 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
@@ -2003,6 +2003,10 @@ static int rt2500pci_tx_last_beacon(struct ieee80211_hw *hw)
}
static const struct ieee80211_ops rt2500pci_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
index 13fdcff0ad66..09923765e2db 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
@@ -1794,6 +1794,10 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
}
static const struct ieee80211_ops rt2500usb_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index aaf31857ae1e..3bb81bcff0ac 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -10946,13 +10946,13 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
/* Apparently the data is read from end to start */
reg = rt2800_register_read_lock(rt2x00dev, efuse_data3_reg);
/* The returned value is in CPU order, but eeprom is le */
- *(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
+ *(__le32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
reg = rt2800_register_read_lock(rt2x00dev, efuse_data2_reg);
- *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
+ *(__le32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
reg = rt2800_register_read_lock(rt2x00dev, efuse_data1_reg);
- *(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg);
+ *(__le32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg);
reg = rt2800_register_read_lock(rt2x00dev, efuse_data0_reg);
- *(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg);
+ *(__le32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg);
mutex_unlock(&rt2x00dev->csr_mutex);
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
index dcb56f708a5f..14c45aba836f 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
@@ -287,6 +287,10 @@ static int rt2800pci_read_eeprom(struct rt2x00_dev *rt2x00dev)
}
static const struct ieee80211_ops rt2800pci_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
index 7118d4f9038d..701ba54bf3e5 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
@@ -132,6 +132,10 @@ static int rt2800soc_write_firmware(struct rt2x00_dev *rt2x00dev,
}
static const struct ieee80211_ops rt2800soc_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index b2a8e75a901b..160bef79acdb 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -629,6 +629,10 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
}
static const struct ieee80211_ops rt2800usb_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c b/drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c
index ad95f9eba301..1000fbfb94b8 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c
@@ -197,10 +197,7 @@ void rt2x00crypto_rx_insert_iv(struct sk_buff *skb,
transfer += header_length;
} else {
skb_push(skb, iv_len + align);
- if (align < icv_len)
- skb_put(skb, icv_len - align);
- else if (align > icv_len)
- skb_trim(skb, rxdesc->size + iv_len + icv_len);
+ skb_put(skb, icv_len - align);
/* Move ieee80211 header */
memmove(skb->data + transfer,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
index 483723bf514b..d1cd5694e3c7 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
@@ -2872,6 +2872,10 @@ static u64 rt61pci_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
static const struct ieee80211_ops rt61pci_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
index dfa9d5213898..b79dda952a33 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
@@ -2291,6 +2291,10 @@ static u64 rt73usb_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
static const struct ieee80211_ops rt73usb_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
index f6c25a52b69a..77b6cb7e1f6b 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
@@ -1607,6 +1607,10 @@ static void rtl8180_configure_filter(struct ieee80211_hw *dev,
}
static const struct ieee80211_ops rtl8180_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rtl8180_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rtl8180_start,
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
index 04945f905d6d..78d99afa373d 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
@@ -1377,6 +1377,10 @@ static int rtl8187_conf_tx(struct ieee80211_hw *dev,
static const struct ieee80211_ops rtl8187_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rtl8187_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rtl8187_start,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 4695fb4e2d2d..fd92d23c43d9 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -6,6 +6,7 @@
*/
#include <asm/byteorder.h>
+#include <linux/average.h>
#define RTL8XXXU_DEBUG_REG_WRITE 0x01
#define RTL8XXXU_DEBUG_REG_READ 0x02
@@ -498,6 +499,7 @@ struct rtl8xxxu_txdesc40 {
#define DESC_RATE_ID_SHIFT 16
#define DESC_RATE_ID_MASK 0xf
#define TXDESC_NAVUSEHDR BIT(20)
+#define TXDESC_EN_DESC_ID BIT(21)
#define TXDESC_SEC_RC4 0x00400000
#define TXDESC_SEC_AES 0x00c00000
#define TXDESC_PKT_OFFSET_SHIFT 26
@@ -1774,6 +1776,8 @@ struct rtl8xxxu_cfo_tracking {
#define RTL8XXXU_HW_LED_CONTROL 2
#define RTL8XXXU_MAX_MAC_ID_NUM 128
#define RTL8XXXU_BC_MC_MACID 0
+#define RTL8XXXU_BC_MC_MACID1 1
+#define RTL8XXXU_MAX_SEC_CAM_NUM 64
struct rtl8xxxu_priv {
struct ieee80211_hw *hw;
@@ -1855,6 +1859,8 @@ struct rtl8xxxu_priv {
int next_mbox;
int nr_out_eps;
+ /* Ensure no added or deleted stas while iterating */
+ struct mutex sta_mutex;
struct mutex h2c_mutex;
/* Protect the indirect register accesses of RTL8710BU. */
struct mutex syson_indirect_access_mutex;
@@ -1889,18 +1895,14 @@ struct rtl8xxxu_priv {
u8 pi_enabled:1;
u8 no_pape:1;
u8 int_buf[USB_INTR_CONTENT_LENGTH];
- u8 rssi_level;
DECLARE_BITMAP(tx_aggr_started, IEEE80211_NUM_TIDS);
DECLARE_BITMAP(tid_tx_operational, IEEE80211_NUM_TIDS);
- /*
- * Only one virtual interface permitted because only STA mode
- * is supported and no iface_combinations are provided.
- */
- struct ieee80211_vif *vif;
+
+ struct ieee80211_vif *vifs[2];
struct delayed_work ra_watchdog;
struct work_struct c2hcmd_work;
struct sk_buff_head c2hcmd_queue;
- struct work_struct update_beacon_work;
+ struct delayed_work update_beacon_work;
struct rtl8xxxu_btcoex bt_coex;
struct rtl8xxxu_ra_report ra_report;
struct rtl8xxxu_cfo_tracking cfo_tracking;
@@ -1910,13 +1912,23 @@ struct rtl8xxxu_priv {
char led_name[32];
struct led_classdev led_cdev;
DECLARE_BITMAP(mac_id_map, RTL8XXXU_MAX_MAC_ID_NUM);
+ DECLARE_BITMAP(cam_map, RTL8XXXU_MAX_SEC_CAM_NUM);
};
+DECLARE_EWMA(rssi, 10, 16);
+
struct rtl8xxxu_sta_info {
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
u8 macid;
+ struct ewma_rssi avg_rssi;
+ u8 rssi_level;
+};
+
+struct rtl8xxxu_vif {
+ int port_num;
+ u8 hw_key_idx;
};
struct rtl8xxxu_rx_urb {
@@ -1986,11 +1998,13 @@ struct rtl8xxxu_fileops {
u8 init_reg_rxfltmap:1;
u8 init_reg_pkt_life_time:1;
u8 init_reg_hmtfr:1;
+ u8 supports_concurrent:1;
u8 ampdu_max_time;
u8 ustime_tsf_edca;
u16 max_aggr_num;
u8 supports_ap:1;
u16 max_macid_num;
+ u16 max_sec_cam_num;
u32 adda_1t_init;
u32 adda_1t_path_on;
u32 adda_2t_path_on_a;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c
index 6d0f975f891b..afe9cc1b49dc 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c
@@ -1699,7 +1699,7 @@ void rtl8188e_handle_ra_tx_report2(struct rtl8xxxu_priv *priv, struct sk_buff *s
/* We only use macid 0, so only the first item is relevant.
* AP mode will use more of them if it's ever implemented.
*/
- if (!priv->vif || priv->vif->type == NL80211_IFTYPE_STATION)
+ if (!priv->vifs[0] || priv->vifs[0]->type == NL80211_IFTYPE_STATION)
items = 1;
for (macid = 0; macid < items; macid++) {
@@ -1882,6 +1882,7 @@ struct rtl8xxxu_fileops rtl8188eu_fops = {
.has_tx_report = 1,
.init_reg_pkt_life_time = 1,
.gen2_thermal_meter = 1,
+ .max_sec_cam_num = 32,
.adda_1t_init = 0x0b1b25a0,
.adda_1t_path_on = 0x0bdb25a0,
/*
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
index 1e1c8fa194cb..464216d007ce 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
@@ -1751,6 +1751,8 @@ struct rtl8xxxu_fileops rtl8188fu_fops = {
.max_aggr_num = 0x0c14,
.supports_ap = 1,
.max_macid_num = 16,
+ .max_sec_cam_num = 16,
+ .supports_concurrent = 1,
.adda_1t_init = 0x03c00014,
.adda_1t_path_on = 0x03c00014,
.trxff_boundary = 0x3f7f,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
index b30a9a513cb8..3ee7d8f87da6 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
@@ -613,6 +613,7 @@ struct rtl8xxxu_fileops rtl8192cu_fops = {
.rx_agg_buf_size = 16000,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16),
+ .max_sec_cam_num = 32,
.adda_1t_init = 0x0b1b25a0,
.adda_1t_path_on = 0x0bdb25a0,
.adda_2t_path_on_a = 0x04db25a4,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index 47bcaec6f2db..63b73ace27ec 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -1769,6 +1769,7 @@ struct rtl8xxxu_fileops rtl8192eu_fops = {
.needs_full_init = 1,
.supports_ap = 1,
.max_macid_num = 128,
+ .max_sec_cam_num = 64,
.adda_1t_init = 0x0fc01616,
.adda_1t_path_on = 0x0fc01616,
.adda_2t_path_on_a = 0x0fc01616,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c
index 28e93835e05a..21e4204769d0 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c
@@ -2014,26 +2014,40 @@ static int rtl8192fu_led_brightness_set(struct led_classdev *led_cdev,
struct rtl8xxxu_priv *priv = container_of(led_cdev,
struct rtl8xxxu_priv,
led_cdev);
- u16 ledcfg;
+ u32 ledcfg;
/* Values obtained by observing the USB traffic from the Windows driver. */
rtl8xxxu_write32(priv, REG_SW_GPIO_SHARE_CTRL_0, 0x20080);
rtl8xxxu_write32(priv, REG_SW_GPIO_SHARE_CTRL_1, 0x1b0000);
- ledcfg = rtl8xxxu_read16(priv, REG_LEDCFG0);
+ ledcfg = rtl8xxxu_read32(priv, REG_LEDCFG0);
+
+ /* Comfast CF-826F uses LED1. Asus USB-N13 C1 uses LED0. Set both. */
+
+ u32p_replace_bits(&ledcfg, LED_GPIO_ENABLE, LEDCFG0_LED2EN);
+ u32p_replace_bits(&ledcfg, LED_IO_MODE_OUTPUT, LEDCFG0_LED0_IO_MODE);
+ u32p_replace_bits(&ledcfg, LED_IO_MODE_OUTPUT, LEDCFG0_LED1_IO_MODE);
if (brightness == LED_OFF) {
- /* Value obtained like above. */
- ledcfg = BIT(1) | BIT(7);
+ u32p_replace_bits(&ledcfg, LED_MODE_SW_CTRL, LEDCFG0_LED0CM);
+ u32p_replace_bits(&ledcfg, LED_SW_OFF, LEDCFG0_LED0SV);
+ u32p_replace_bits(&ledcfg, LED_MODE_SW_CTRL, LEDCFG0_LED1CM);
+ u32p_replace_bits(&ledcfg, LED_SW_OFF, LEDCFG0_LED1SV);
} else if (brightness == LED_ON) {
- /* Value obtained like above. */
- ledcfg = BIT(1) | BIT(7) | BIT(11);
+ u32p_replace_bits(&ledcfg, LED_MODE_SW_CTRL, LEDCFG0_LED0CM);
+ u32p_replace_bits(&ledcfg, LED_SW_ON, LEDCFG0_LED0SV);
+ u32p_replace_bits(&ledcfg, LED_MODE_SW_CTRL, LEDCFG0_LED1CM);
+ u32p_replace_bits(&ledcfg, LED_SW_ON, LEDCFG0_LED1SV);
} else if (brightness == RTL8XXXU_HW_LED_CONTROL) {
- /* Value obtained by brute force. */
- ledcfg = BIT(8) | BIT(9);
+ u32p_replace_bits(&ledcfg, LED_MODE_TX_OR_RX_EVENTS,
+ LEDCFG0_LED0CM);
+ u32p_replace_bits(&ledcfg, LED_SW_OFF, LEDCFG0_LED0SV);
+ u32p_replace_bits(&ledcfg, LED_MODE_TX_OR_RX_EVENTS,
+ LEDCFG0_LED1CM);
+ u32p_replace_bits(&ledcfg, LED_SW_OFF, LEDCFG0_LED1SV);
}
- rtl8xxxu_write16(priv, REG_LEDCFG0, ledcfg);
+ rtl8xxxu_write32(priv, REG_LEDCFG0, ledcfg);
return 0;
}
@@ -2081,6 +2095,7 @@ struct rtl8xxxu_fileops rtl8192fu_fops = {
.max_aggr_num = 0x1f1f,
.supports_ap = 1,
.max_macid_num = 128,
+ .max_sec_cam_num = 64,
.trxff_boundary = 0x3f3f,
.pbp_rx = PBP_PAGE_SIZE_256,
.pbp_tx = PBP_PAGE_SIZE_256,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c
index 871b8cca8a18..46d57510e9fc 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c
@@ -1877,6 +1877,7 @@ struct rtl8xxxu_fileops rtl8710bu_fops = {
.max_aggr_num = 0x0c14,
.supports_ap = 1,
.max_macid_num = 16,
+ .max_sec_cam_num = 32,
.adda_1t_init = 0x03c00016,
.adda_1t_path_on = 0x03c00016,
.trxff_boundary = 0x3f7f,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
index 15a30e496221..ad1bb9377ca2 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
@@ -510,6 +510,7 @@ struct rtl8xxxu_fileops rtl8723au_fops = {
.rx_agg_buf_size = 16000,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16),
+ .max_sec_cam_num = 32,
.adda_1t_init = 0x0b1b25a0,
.adda_1t_path_on = 0x0bdb25a0,
.adda_2t_path_on_a = 0x04db25a4,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
index 954369ed6226..9640c841d20a 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -1744,6 +1744,7 @@ struct rtl8xxxu_fileops rtl8723bu_fops = {
.max_aggr_num = 0x0c14,
.supports_ap = 1,
.max_macid_num = 128,
+ .max_sec_cam_num = 64,
.adda_1t_init = 0x01c00014,
.adda_1t_path_on = 0x01c00014,
.adda_2t_path_on_a = 0x01c00014,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 180907319e8c..4a49f8f9d80f 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -1633,33 +1633,41 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
}
static void rtl8xxxu_set_linktype(struct rtl8xxxu_priv *priv,
- enum nl80211_iftype linktype)
+ enum nl80211_iftype linktype, int port_num)
{
- u8 val8;
-
- val8 = rtl8xxxu_read8(priv, REG_MSR);
- val8 &= ~MSR_LINKTYPE_MASK;
+ u8 val8, type;
switch (linktype) {
case NL80211_IFTYPE_UNSPECIFIED:
- val8 |= MSR_LINKTYPE_NONE;
+ type = MSR_LINKTYPE_NONE;
break;
case NL80211_IFTYPE_ADHOC:
- val8 |= MSR_LINKTYPE_ADHOC;
+ type = MSR_LINKTYPE_ADHOC;
break;
case NL80211_IFTYPE_STATION:
- val8 |= MSR_LINKTYPE_STATION;
+ type = MSR_LINKTYPE_STATION;
break;
case NL80211_IFTYPE_AP:
- val8 |= MSR_LINKTYPE_AP;
+ type = MSR_LINKTYPE_AP;
break;
default:
- goto out;
+ return;
+ }
+
+ switch (port_num) {
+ case 0:
+ val8 = rtl8xxxu_read8(priv, REG_MSR) & 0x0c;
+ val8 |= type;
+ break;
+ case 1:
+ val8 = rtl8xxxu_read8(priv, REG_MSR) & 0x03;
+ val8 |= type << 2;
+ break;
+ default:
+ return;
}
rtl8xxxu_write8(priv, REG_MSR, val8);
-out:
- return;
}
static void
@@ -3572,27 +3580,47 @@ void rtl8723a_phy_lc_calibrate(struct rtl8xxxu_priv *priv)
rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00);
}
-static int rtl8xxxu_set_mac(struct rtl8xxxu_priv *priv)
+static int rtl8xxxu_set_mac(struct rtl8xxxu_priv *priv, int port_num)
{
int i;
u16 reg;
- reg = REG_MACID;
+ switch (port_num) {
+ case 0:
+ reg = REG_MACID;
+ break;
+ case 1:
+ reg = REG_MACID1;
+ break;
+ default:
+ WARN_ONCE(1, "%s: invalid port_num\n", __func__);
+ return -EINVAL;
+ }
for (i = 0; i < ETH_ALEN; i++)
- rtl8xxxu_write8(priv, reg + i, priv->mac_addr[i]);
+ rtl8xxxu_write8(priv, reg + i, priv->vifs[port_num]->addr[i]);
return 0;
}
-static int rtl8xxxu_set_bssid(struct rtl8xxxu_priv *priv, const u8 *bssid)
+static int rtl8xxxu_set_bssid(struct rtl8xxxu_priv *priv, const u8 *bssid, int port_num)
{
int i;
u16 reg;
dev_dbg(&priv->udev->dev, "%s: (%pM)\n", __func__, bssid);
- reg = REG_BSSID;
+ switch (port_num) {
+ case 0:
+ reg = REG_BSSID;
+ break;
+ case 1:
+ reg = REG_BSSID1;
+ break;
+ default:
+ WARN_ONCE(1, "%s: invalid port_num\n", __func__);
+ return -EINVAL;
+ }
for (i = 0; i < ETH_ALEN; i++)
rtl8xxxu_write8(priv, reg + i, bssid[i]);
@@ -4025,10 +4053,13 @@ static inline u8 rtl8xxxu_get_macid(struct rtl8xxxu_priv *priv,
{
struct rtl8xxxu_sta_info *sta_info;
- if (!priv->vif || priv->vif->type == NL80211_IFTYPE_STATION || !sta)
+ if (!sta)
return 0;
sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
+ if (!sta_info)
+ return 0;
+
return sta_info->macid;
}
@@ -4235,9 +4266,6 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
rtl8xxxu_write32(priv, REG_HIMR, 0xffffffff);
}
- rtl8xxxu_set_mac(priv);
- rtl8xxxu_set_linktype(priv, NL80211_IFTYPE_STATION);
-
/*
* Configure initial WMAC settings
*/
@@ -4511,6 +4539,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
rtl8188e_ra_info_init_all(&priv->ra_info);
set_bit(RTL8XXXU_BC_MC_MACID, priv->mac_id_map);
+ set_bit(RTL8XXXU_BC_MC_MACID1, priv->mac_id_map);
exit:
return ret;
@@ -4530,8 +4559,10 @@ static void rtl8xxxu_cam_write(struct rtl8xxxu_priv *priv,
* This is a bit of a hack - the lower bits of the cipher
* suite selector happens to match the cipher index in the CAM
*/
- addr = key->keyidx << CAM_CMD_KEY_SHIFT;
+ addr = key->hw_key_idx << CAM_CMD_KEY_SHIFT;
ctrl = (key->cipher & 0x0f) << 2 | key->keyidx | CAM_WRITE_VALID;
+ if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ ctrl |= BIT(6);
for (j = 5; j >= 0; j--) {
switch (j) {
@@ -4574,7 +4605,7 @@ static int rtl8xxxu_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
{
struct rtl8xxxu_priv *priv = hw->priv;
- schedule_work(&priv->update_beacon_work);
+ schedule_delayed_work(&priv->update_beacon_work, 0);
return 0;
}
@@ -4839,10 +4870,9 @@ static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg)
dev_dbg(&priv->udev->dev, "%s: rates %08x\n", __func__, rate_cfg);
- while (rate_cfg) {
- rate_cfg = (rate_cfg >> 1);
- rate_idx++;
- }
+ if (rate_cfg)
+ rate_idx = __fls(rate_cfg);
+
rtl8xxxu_write8(priv, REG_INIRTS_RATE_SEL, rate_idx);
}
@@ -4888,14 +4918,20 @@ static void rtl8xxxu_set_aifs(struct rtl8xxxu_priv *priv, u8 slot_time)
u8 aifs, aifsn, sifs;
int i;
- if (priv->vif) {
+ for (i = 0; i < ARRAY_SIZE(priv->vifs); i++) {
struct ieee80211_sta *sta;
+ if (!priv->vifs[i])
+ continue;
+
rcu_read_lock();
- sta = ieee80211_find_sta(priv->vif, priv->vif->bss_conf.bssid);
+ sta = ieee80211_find_sta(priv->vifs[i], priv->vifs[i]->bss_conf.bssid);
if (sta)
wireless_mode = rtl8xxxu_wireless_mode(priv->hw, sta);
rcu_read_unlock();
+
+ if (wireless_mode)
+ break;
}
if (priv->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ ||
@@ -4952,19 +4988,21 @@ static void
rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf, u64 changed)
{
+ struct rtl8xxxu_vif *rtlvif = (struct rtl8xxxu_vif *)vif->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
+ struct rtl8xxxu_sta_info *sta_info;
struct ieee80211_sta *sta;
struct rtl8xxxu_ra_report *rarpt;
+ u8 val8, macid;
u32 val32;
- u8 val8;
rarpt = &priv->ra_report;
if (changed & BSS_CHANGED_ASSOC) {
dev_dbg(dev, "Changed ASSOC: %i!\n", vif->cfg.assoc);
- rtl8xxxu_set_linktype(priv, vif->type);
+ rtl8xxxu_set_linktype(priv, vif->type, rtlvif->port_num);
if (vif->cfg.assoc) {
u32 ramask;
@@ -4980,6 +5018,7 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
rcu_read_unlock();
goto error;
}
+ macid = rtl8xxxu_get_macid(priv, sta);
if (sta->deflink.ht_cap.ht_supported)
dev_info(dev, "%s: HT supported\n", __func__);
@@ -5000,19 +5039,20 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
bw = RATE_INFO_BW_40;
else
bw = RATE_INFO_BW_20;
+
+ sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
+ sta_info->rssi_level = RTL8XXXU_RATR_STA_INIT;
rcu_read_unlock();
rtl8xxxu_update_ra_report(rarpt, highest_rate, sgi, bw);
- priv->vif = vif;
- priv->rssi_level = RTL8XXXU_RATR_STA_INIT;
-
priv->fops->update_rate_mask(priv, ramask, 0, sgi,
- bw == RATE_INFO_BW_40, 0);
+ bw == RATE_INFO_BW_40, macid);
rtl8xxxu_write8(priv, REG_BCN_MAX_ERR, 0xff);
- rtl8xxxu_stop_tx_beacon(priv);
+ if (rtlvif->port_num == 0)
+ rtl8xxxu_stop_tx_beacon(priv);
/* joinbss sequence */
rtl8xxxu_write16(priv, REG_BCN_PSR_RPT,
@@ -5054,7 +5094,7 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (changed & BSS_CHANGED_BSSID) {
dev_dbg(dev, "Changed BSSID!\n");
- rtl8xxxu_set_bssid(priv, bss_conf->bssid);
+ rtl8xxxu_set_bssid(priv, bss_conf->bssid, rtlvif->port_num);
}
if (changed & BSS_CHANGED_BASIC_RATES) {
@@ -5070,7 +5110,7 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
if (changed & BSS_CHANGED_BEACON)
- schedule_work(&priv->update_beacon_work);
+ schedule_delayed_work(&priv->update_beacon_work, 0);
error:
return;
@@ -5079,11 +5119,12 @@ error:
static int rtl8xxxu_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf)
{
+ struct rtl8xxxu_vif *rtlvif = (struct rtl8xxxu_vif *)vif->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
dev_dbg(dev, "Start AP mode\n");
- rtl8xxxu_set_bssid(priv, vif->bss_conf.bssid);
+ rtl8xxxu_set_bssid(priv, vif->bss_conf.bssid, rtlvif->port_num);
rtl8xxxu_write16(priv, REG_BCN_INTERVAL, vif->bss_conf.beacon_int);
priv->fops->report_connect(priv, RTL8XXXU_BC_MC_MACID, 0, true);
@@ -5509,13 +5550,14 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
struct rtl8xxxu_tx_urb *tx_urb;
struct ieee80211_sta *sta = NULL;
struct ieee80211_vif *vif = tx_info->control.vif;
+ struct rtl8xxxu_vif *rtlvif = vif ? (struct rtl8xxxu_vif *)vif->drv_priv : NULL;
struct device *dev = &priv->udev->dev;
u32 queue, rts_rate;
u16 pktlen = skb->len;
int tx_desc_size = priv->fops->tx_desc_size;
u8 macid;
int ret;
- bool ampdu_enable, sgi = false, short_preamble = false;
+ bool ampdu_enable, sgi = false, short_preamble = false, bmc = false;
if (skb_headroom(skb) < tx_desc_size) {
dev_warn(dev,
@@ -5557,10 +5599,14 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
tx_desc->txdw0 =
TXDESC_OWN | TXDESC_FIRST_SEGMENT | TXDESC_LAST_SEGMENT;
if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
- is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
+ is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
tx_desc->txdw0 |= TXDESC_BROADMULTICAST;
+ bmc = true;
+ }
+
tx_desc->txdw1 = cpu_to_le32(queue << TXDESC_QUEUE_SHIFT);
+ macid = rtl8xxxu_get_macid(priv, sta);
if (tx_info->control.hw_key) {
switch (tx_info->control.hw_key->cipher) {
@@ -5575,6 +5621,10 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
default:
break;
}
+ if (bmc && rtlvif && rtlvif->hw_key_idx != 0xff) {
+ tx_desc->txdw1 |= cpu_to_le32(TXDESC_EN_DESC_ID);
+ macid = rtlvif->hw_key_idx;
+ }
}
/* (tx_info->flags & IEEE80211_TX_CTL_AMPDU) && */
@@ -5618,7 +5668,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
else
rts_rate = 0;
- macid = rtl8xxxu_get_macid(priv, sta);
priv->fops->fill_txdesc(hw, hdr, tx_info, tx_desc, sgi, short_preamble,
ampdu_enable, rts_rate, macid);
@@ -5680,18 +5729,44 @@ static void rtl8xxxu_send_beacon_frame(struct ieee80211_hw *hw,
static void rtl8xxxu_update_beacon_work_callback(struct work_struct *work)
{
struct rtl8xxxu_priv *priv =
- container_of(work, struct rtl8xxxu_priv, update_beacon_work);
+ container_of(work, struct rtl8xxxu_priv, update_beacon_work.work);
struct ieee80211_hw *hw = priv->hw;
- struct ieee80211_vif *vif = priv->vif;
+ struct ieee80211_vif *vif = priv->vifs[0];
if (!vif) {
WARN_ONCE(true, "no vif to update beacon\n");
return;
}
+ if (vif->bss_conf.csa_active) {
+ if (ieee80211_beacon_cntdwn_is_complete(vif, 0)) {
+ ieee80211_csa_finish(vif, 0);
+ return;
+ }
+ schedule_delayed_work(&priv->update_beacon_work,
+ msecs_to_jiffies(vif->bss_conf.beacon_int));
+ }
rtl8xxxu_send_beacon_frame(hw, vif);
}
+static inline bool rtl8xxxu_is_packet_match_bssid(struct rtl8xxxu_priv *priv,
+ struct ieee80211_hdr *hdr,
+ int port_num)
+{
+ return priv->vifs[port_num] &&
+ priv->vifs[port_num]->type == NL80211_IFTYPE_STATION &&
+ priv->vifs[port_num]->cfg.assoc &&
+ ether_addr_equal(priv->vifs[port_num]->bss_conf.bssid, hdr->addr2);
+}
+
+static inline bool rtl8xxxu_is_sta_sta(struct rtl8xxxu_priv *priv)
+{
+ return (priv->vifs[0] && priv->vifs[0]->cfg.assoc &&
+ priv->vifs[0]->type == NL80211_IFTYPE_STATION) &&
+ (priv->vifs[1] && priv->vifs[1]->cfg.assoc &&
+ priv->vifs[1]->type == NL80211_IFTYPE_STATION);
+}
+
void rtl8723au_rx_parse_phystats(struct rtl8xxxu_priv *priv,
struct ieee80211_rx_status *rx_status,
struct rtl8723au_phy_stats *phy_stats,
@@ -5708,12 +5783,11 @@ void rtl8723au_rx_parse_phystats(struct rtl8xxxu_priv *priv,
rx_status->signal = priv->fops->cck_rssi(priv, phy_stats);
} else {
bool parse_cfo = priv->fops->set_crystal_cap &&
- priv->vif &&
- priv->vif->type == NL80211_IFTYPE_STATION &&
- priv->vif->cfg.assoc &&
!crc_icv_err &&
!ieee80211_is_ctl(hdr->frame_control) &&
- ether_addr_equal(priv->vif->bss_conf.bssid, hdr->addr2);
+ !rtl8xxxu_is_sta_sta(priv) &&
+ (rtl8xxxu_is_packet_match_bssid(priv, hdr, 0) ||
+ rtl8xxxu_is_packet_match_bssid(priv, hdr, 1));
if (parse_cfo) {
priv->cfo_tracking.cfo_tail[0] = phy_stats->path_cfotail[0];
@@ -5748,12 +5822,11 @@ static void jaguar2_rx_parse_phystats_type1(struct rtl8xxxu_priv *priv,
bool crc_icv_err)
{
bool parse_cfo = priv->fops->set_crystal_cap &&
- priv->vif &&
- priv->vif->type == NL80211_IFTYPE_STATION &&
- priv->vif->cfg.assoc &&
!crc_icv_err &&
!ieee80211_is_ctl(hdr->frame_control) &&
- ether_addr_equal(priv->vif->bss_conf.bssid, hdr->addr2);
+ !rtl8xxxu_is_sta_sta(priv) &&
+ (rtl8xxxu_is_packet_match_bssid(priv, hdr, 0) ||
+ rtl8xxxu_is_packet_match_bssid(priv, hdr, 1));
u8 pwdb_max = 0;
int rx_path;
@@ -6029,18 +6102,20 @@ void rtl8723bu_update_bt_link_info(struct rtl8xxxu_priv *priv, u8 bt_info)
btcoex->bt_busy = false;
}
+static inline bool rtl8xxxu_is_assoc(struct rtl8xxxu_priv *priv)
+{
+ return (priv->vifs[0] && priv->vifs[0]->cfg.assoc) ||
+ (priv->vifs[1] && priv->vifs[1]->cfg.assoc);
+}
+
static
void rtl8723bu_handle_bt_inquiry(struct rtl8xxxu_priv *priv)
{
- struct ieee80211_vif *vif;
struct rtl8xxxu_btcoex *btcoex;
- bool wifi_connected;
- vif = priv->vif;
btcoex = &priv->bt_coex;
- wifi_connected = (vif && vif->cfg.assoc);
- if (!wifi_connected) {
+ if (!rtl8xxxu_is_assoc(priv)) {
rtl8723bu_set_ps_tdma(priv, 0x8, 0x0, 0x0, 0x0, 0x0);
rtl8723bu_set_coex_with_type(priv, 0);
} else if (btcoex->has_sco || btcoex->has_hid || btcoex->has_a2dp) {
@@ -6058,15 +6133,11 @@ void rtl8723bu_handle_bt_inquiry(struct rtl8xxxu_priv *priv)
static
void rtl8723bu_handle_bt_info(struct rtl8xxxu_priv *priv)
{
- struct ieee80211_vif *vif;
struct rtl8xxxu_btcoex *btcoex;
- bool wifi_connected;
- vif = priv->vif;
btcoex = &priv->bt_coex;
- wifi_connected = (vif && vif->cfg.assoc);
- if (wifi_connected) {
+ if (rtl8xxxu_is_assoc(priv)) {
u32 val32 = 0;
u32 high_prio_tx = 0, high_prio_rx = 0;
@@ -6249,6 +6320,76 @@ static void rtl8188e_c2hcmd_callback(struct work_struct *work)
}
}
+#define rtl8xxxu_iterate_vifs_atomic(priv, iterator, data) \
+ ieee80211_iterate_active_interfaces_atomic((priv)->hw, \
+ IEEE80211_IFACE_ITER_NORMAL, iterator, data)
+
+struct rtl8xxxu_rx_update_rssi_data {
+ struct rtl8xxxu_priv *priv;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_rx_status *rx_status;
+ u8 *bssid;
+};
+
+static void rtl8xxxu_rx_update_rssi_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct rtl8xxxu_rx_update_rssi_data *iter_data = data;
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr *hdr = iter_data->hdr;
+ struct rtl8xxxu_priv *priv = iter_data->priv;
+ struct rtl8xxxu_sta_info *sta_info;
+ struct ieee80211_rx_status *rx_status = iter_data->rx_status;
+ u8 *bssid = iter_data->bssid;
+
+ if (!ether_addr_equal(vif->bss_conf.bssid, bssid))
+ return;
+
+ if (!(ether_addr_equal(vif->addr, hdr->addr1) ||
+ ieee80211_is_beacon(hdr->frame_control)))
+ return;
+
+ sta = ieee80211_find_sta_by_ifaddr(priv->hw, hdr->addr2,
+ vif->addr);
+ if (!sta)
+ return;
+
+ sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
+ ewma_rssi_add(&sta_info->avg_rssi, -rx_status->signal);
+}
+
+static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr)
+{
+ __le16 fc = hdr->frame_control;
+ u8 *bssid;
+
+ if (ieee80211_has_tods(fc))
+ bssid = hdr->addr1;
+ else if (ieee80211_has_fromds(fc))
+ bssid = hdr->addr2;
+ else
+ bssid = hdr->addr3;
+
+ return bssid;
+}
+
+static void rtl8xxxu_rx_update_rssi(struct rtl8xxxu_priv *priv,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee80211_hdr *hdr)
+{
+ struct rtl8xxxu_rx_update_rssi_data data = {};
+
+ if (ieee80211_is_ctl(hdr->frame_control))
+ return;
+
+ data.priv = priv;
+ data.hdr = hdr;
+ data.rx_status = rx_status;
+ data.bssid = get_hdr_bssid(hdr);
+
+ rtl8xxxu_iterate_vifs_atomic(priv, rtl8xxxu_rx_update_rssi_iter, &data);
+}
+
int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
{
struct ieee80211_hw *hw = priv->hw;
@@ -6308,18 +6449,26 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
skb_queue_tail(&priv->c2hcmd_queue, skb);
schedule_work(&priv->c2hcmd_work);
} else {
+ struct ieee80211_hdr *hdr;
+
phy_stats = (struct rtl8723au_phy_stats *)skb->data;
skb_pull(skb, drvinfo_sz + desc_shift);
skb_trim(skb, pkt_len);
- if (rx_desc->phy_stats)
+ hdr = (struct ieee80211_hdr *)skb->data;
+ if (rx_desc->phy_stats) {
priv->fops->parse_phystats(
priv, rx_status, phy_stats,
rx_desc->rxmcs,
- (struct ieee80211_hdr *)skb->data,
+ hdr,
rx_desc->crc32 || rx_desc->icverr);
+ if (!rx_desc->crc32 && !rx_desc->icverr)
+ rtl8xxxu_rx_update_rssi(priv,
+ rx_status,
+ hdr);
+ }
rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
@@ -6416,10 +6565,15 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
} else {
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- if (rx_desc->phy_stats)
+ if (rx_desc->phy_stats) {
priv->fops->parse_phystats(priv, rx_status, phy_stats,
rx_desc->rxmcs, hdr,
rx_desc->crc32 || rx_desc->icverr);
+ if (!rx_desc->crc32 && !rx_desc->icverr)
+ rtl8xxxu_rx_update_rssi(priv,
+ rx_status,
+ hdr);
+ }
rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
@@ -6563,29 +6717,123 @@ error:
return ret;
}
+static void rtl8xxxu_switch_ports(struct rtl8xxxu_priv *priv)
+{
+ u8 macid[ETH_ALEN], bssid[ETH_ALEN], macid_1[ETH_ALEN], bssid_1[ETH_ALEN];
+ u8 msr, bcn_ctrl, bcn_ctrl_1, atimwnd[2], atimwnd_1[2];
+ struct rtl8xxxu_vif *rtlvif;
+ struct ieee80211_vif *vif;
+ u8 tsftr[8], tsftr_1[8];
+ int i;
+
+ msr = rtl8xxxu_read8(priv, REG_MSR);
+ bcn_ctrl = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
+ bcn_ctrl_1 = rtl8xxxu_read8(priv, REG_BEACON_CTRL_1);
+
+ for (i = 0; i < ARRAY_SIZE(atimwnd); i++)
+ atimwnd[i] = rtl8xxxu_read8(priv, REG_ATIMWND + i);
+ for (i = 0; i < ARRAY_SIZE(atimwnd_1); i++)
+ atimwnd_1[i] = rtl8xxxu_read8(priv, REG_ATIMWND_1 + i);
+
+ for (i = 0; i < ARRAY_SIZE(tsftr); i++)
+ tsftr[i] = rtl8xxxu_read8(priv, REG_TSFTR + i);
+ for (i = 0; i < ARRAY_SIZE(tsftr); i++)
+ tsftr_1[i] = rtl8xxxu_read8(priv, REG_TSFTR1 + i);
+
+ for (i = 0; i < ARRAY_SIZE(macid); i++)
+ macid[i] = rtl8xxxu_read8(priv, REG_MACID + i);
+
+ for (i = 0; i < ARRAY_SIZE(bssid); i++)
+ bssid[i] = rtl8xxxu_read8(priv, REG_BSSID + i);
+
+ for (i = 0; i < ARRAY_SIZE(macid_1); i++)
+ macid_1[i] = rtl8xxxu_read8(priv, REG_MACID1 + i);
+
+ for (i = 0; i < ARRAY_SIZE(bssid_1); i++)
+ bssid_1[i] = rtl8xxxu_read8(priv, REG_BSSID1 + i);
+
+ /* disable bcn function, disable update TSF */
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL, (bcn_ctrl &
+ (~BEACON_FUNCTION_ENABLE)) | BEACON_DISABLE_TSF_UPDATE);
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL_1, (bcn_ctrl_1 &
+ (~BEACON_FUNCTION_ENABLE)) | BEACON_DISABLE_TSF_UPDATE);
+
+ /* switch msr */
+ msr = (msr & 0xf0) | ((msr & 0x03) << 2) | ((msr & 0x0c) >> 2);
+ rtl8xxxu_write8(priv, REG_MSR, msr);
+
+ /* write port0 */
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL, bcn_ctrl_1 & ~BEACON_FUNCTION_ENABLE);
+ for (i = 0; i < ARRAY_SIZE(atimwnd_1); i++)
+ rtl8xxxu_write8(priv, REG_ATIMWND + i, atimwnd_1[i]);
+ for (i = 0; i < ARRAY_SIZE(tsftr_1); i++)
+ rtl8xxxu_write8(priv, REG_TSFTR + i, tsftr_1[i]);
+ for (i = 0; i < ARRAY_SIZE(macid_1); i++)
+ rtl8xxxu_write8(priv, REG_MACID + i, macid_1[i]);
+ for (i = 0; i < ARRAY_SIZE(bssid_1); i++)
+ rtl8xxxu_write8(priv, REG_BSSID + i, bssid_1[i]);
+
+ /* write port1 */
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL_1, bcn_ctrl & ~BEACON_FUNCTION_ENABLE);
+ for (i = 0; i < ARRAY_SIZE(atimwnd); i++)
+ rtl8xxxu_write8(priv, REG_ATIMWND_1 + i, atimwnd[i]);
+ for (i = 0; i < ARRAY_SIZE(tsftr); i++)
+ rtl8xxxu_write8(priv, REG_TSFTR1 + i, tsftr[i]);
+ for (i = 0; i < ARRAY_SIZE(macid); i++)
+ rtl8xxxu_write8(priv, REG_MACID1 + i, macid[i]);
+ for (i = 0; i < ARRAY_SIZE(bssid); i++)
+ rtl8xxxu_write8(priv, REG_BSSID1 + i, bssid[i]);
+
+ /* write bcn ctl */
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL, bcn_ctrl_1);
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL_1, bcn_ctrl);
+
+ vif = priv->vifs[0];
+ priv->vifs[0] = priv->vifs[1];
+ priv->vifs[1] = vif;
+
+ /* priv->vifs[0] is NULL here, based on how this function is currently
+ * called from rtl8xxxu_add_interface().
+ * When this function will be used in the future for a different
+ * scenario, please check whether vifs[0] or vifs[1] can be NULL and if
+ * necessary add code to set port_num = 1.
+ */
+ rtlvif = (struct rtl8xxxu_vif *)priv->vifs[1]->drv_priv;
+ rtlvif->port_num = 1;
+}
+
static int rtl8xxxu_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
+ struct rtl8xxxu_vif *rtlvif = (struct rtl8xxxu_vif *)vif->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
- int ret;
+ int port_num;
u8 val8;
- if (!priv->vif)
- priv->vif = vif;
+ if (!priv->vifs[0])
+ port_num = 0;
+ else if (!priv->vifs[1])
+ port_num = 1;
else
return -EOPNOTSUPP;
switch (vif->type) {
case NL80211_IFTYPE_STATION:
- rtl8xxxu_stop_tx_beacon(priv);
+ if (port_num == 0) {
+ rtl8xxxu_stop_tx_beacon(priv);
- val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
- val8 |= BEACON_ATIM | BEACON_FUNCTION_ENABLE |
- BEACON_DISABLE_TSF_UPDATE;
- rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
- ret = 0;
+ val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
+ val8 |= BEACON_ATIM | BEACON_FUNCTION_ENABLE |
+ BEACON_DISABLE_TSF_UPDATE;
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
+ }
break;
case NL80211_IFTYPE_AP:
+ if (port_num == 1) {
+ rtl8xxxu_switch_ports(priv);
+ port_num = 0;
+ }
+
rtl8xxxu_write8(priv, REG_BEACON_CTRL,
BEACON_DISABLE_TSF_UPDATE | BEACON_CTRL_MBSSID);
rtl8xxxu_write8(priv, REG_ATIMWND, 0x0c); /* 12ms */
@@ -6602,29 +6850,31 @@ static int rtl8xxxu_add_interface(struct ieee80211_hw *hw,
val8 = rtl8xxxu_read8(priv, REG_CCK_CHECK);
val8 &= ~BIT_BCN_PORT_SEL;
rtl8xxxu_write8(priv, REG_CCK_CHECK, val8);
-
- ret = 0;
break;
default:
- ret = -EOPNOTSUPP;
+ return -EOPNOTSUPP;
}
- rtl8xxxu_set_linktype(priv, vif->type);
+ priv->vifs[port_num] = vif;
+ rtlvif->port_num = port_num;
+ rtlvif->hw_key_idx = 0xff;
+
+ rtl8xxxu_set_linktype(priv, vif->type, port_num);
ether_addr_copy(priv->mac_addr, vif->addr);
- rtl8xxxu_set_mac(priv);
+ rtl8xxxu_set_mac(priv, port_num);
- return ret;
+ return 0;
}
static void rtl8xxxu_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
+ struct rtl8xxxu_vif *rtlvif = (struct rtl8xxxu_vif *)vif->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
dev_dbg(&priv->udev->dev, "%s\n", __func__);
- if (priv->vif)
- priv->vif = NULL;
+ priv->vifs[rtlvif->port_num] = NULL;
}
static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed)
@@ -6746,8 +6996,8 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw,
else
rcr |= RCR_CHECK_BSSID_BEACON | RCR_CHECK_BSSID_MATCH;
- if (priv->vif && priv->vif->type == NL80211_IFTYPE_AP)
- rcr &= ~RCR_CHECK_BSSID_MATCH;
+ if (priv->vifs[0] && priv->vifs[0]->type == NL80211_IFTYPE_AP)
+ rcr &= ~(RCR_CHECK_BSSID_MATCH | RCR_CHECK_BSSID_BEACON);
if (*total_flags & FIF_CONTROL)
rcr |= RCR_ACCEPT_CTRL_FRAME;
@@ -6784,11 +7034,19 @@ static int rtl8xxxu_set_rts_threshold(struct ieee80211_hw *hw, u32 rts)
return 0;
}
+static int rtl8xxxu_get_free_sec_cam(struct ieee80211_hw *hw)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+
+ return find_first_zero_bit(priv->cam_map, priv->fops->max_sec_cam_num);
+}
+
static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
+ struct rtl8xxxu_vif *rtlvif = (struct rtl8xxxu_vif *)vif->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
u8 mac_addr[ETH_ALEN];
@@ -6800,9 +7058,6 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
dev_dbg(dev, "%s: cmd %02x, cipher %08x, index %i\n",
__func__, cmd, key->cipher, key->keyidx);
- if (vif->type != NL80211_IFTYPE_STATION)
- return -EOPNOTSUPP;
-
if (key->keyidx > 3)
return -EOPNOTSUPP;
@@ -6826,7 +7081,7 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ether_addr_copy(mac_addr, sta->addr);
} else {
dev_dbg(dev, "%s: group key\n", __func__);
- eth_broadcast_addr(mac_addr);
+ ether_addr_copy(mac_addr, vif->bss_conf.bssid);
}
val16 = rtl8xxxu_read16(priv, REG_CR);
@@ -6840,16 +7095,28 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
switch (cmd) {
case SET_KEY:
- key->hw_key_idx = key->keyidx;
+
+ retval = rtl8xxxu_get_free_sec_cam(hw);
+ if (retval < 0)
+ return -EOPNOTSUPP;
+
+ key->hw_key_idx = retval;
+
+ if (vif->type == NL80211_IFTYPE_AP && !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ rtlvif->hw_key_idx = key->hw_key_idx;
+
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
rtl8xxxu_cam_write(priv, key, mac_addr);
+ set_bit(key->hw_key_idx, priv->cam_map);
retval = 0;
break;
case DISABLE_KEY:
rtl8xxxu_write32(priv, REG_CAM_WRITE, 0x00000000);
val32 = CAM_CMD_POLLING | CAM_CMD_WRITE |
- key->keyidx << CAM_CMD_KEY_SHIFT;
+ key->hw_key_idx << CAM_CMD_KEY_SHIFT;
rtl8xxxu_write32(priv, REG_CAM_CMD, val32);
+ rtlvif->hw_key_idx = 0xff;
+ clear_bit(key->hw_key_idx, priv->cam_map);
retval = 0;
break;
default:
@@ -6930,6 +7197,7 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
int signal, struct ieee80211_sta *sta,
bool force)
{
+ struct rtl8xxxu_sta_info *sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
struct ieee80211_hw *hw = priv->hw;
u16 wireless_mode;
u8 rssi_level, ratr_idx;
@@ -6938,7 +7206,7 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
u8 go_up_gap = 5;
u8 macid = rtl8xxxu_get_macid(priv, sta);
- rssi_level = priv->rssi_level;
+ rssi_level = sta_info->rssi_level;
snr = rtl8xxxu_signal_to_snr(signal);
snr_thresh_high = RTL8XXXU_SNR_THRESH_HIGH;
snr_thresh_low = RTL8XXXU_SNR_THRESH_LOW;
@@ -6963,18 +7231,16 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
else
rssi_level = RTL8XXXU_RATR_STA_LOW;
- if (rssi_level != priv->rssi_level || force) {
+ if (rssi_level != sta_info->rssi_level || force) {
int sgi = 0;
u32 rate_bitmap = 0;
- rcu_read_lock();
rate_bitmap = (sta->deflink.supp_rates[0] & 0xfff) |
(sta->deflink.ht_cap.mcs.rx_mask[0] << 12) |
(sta->deflink.ht_cap.mcs.rx_mask[1] << 20);
if (sta->deflink.ht_cap.cap &
(IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))
sgi = 1;
- rcu_read_unlock();
wireless_mode = rtl8xxxu_wireless_mode(hw, sta);
switch (wireless_mode) {
@@ -7055,7 +7321,7 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
break;
}
- priv->rssi_level = rssi_level;
+ sta_info->rssi_level = rssi_level;
priv->fops->update_rate_mask(priv, rate_bitmap, ratr_idx, sgi, txbw_40mhz, macid);
}
}
@@ -7085,7 +7351,7 @@ static void rtl8xxxu_track_cfo(struct rtl8xxxu_priv *priv)
int cfo_khz_a, cfo_khz_b, cfo_average;
int crystal_cap;
- if (!priv->vif || !priv->vif->cfg.assoc) {
+ if (!rtl8xxxu_is_assoc(priv)) {
/* Reset */
cfo->adjust = true;
@@ -7148,41 +7414,64 @@ static void rtl8xxxu_track_cfo(struct rtl8xxxu_priv *priv)
rtl8xxxu_set_atc_status(priv, abs(cfo_average) >= CFO_TH_ATC);
}
-static void rtl8xxxu_watchdog_callback(struct work_struct *work)
+static void rtl8xxxu_ra_iter(void *data, struct ieee80211_sta *sta)
{
- struct ieee80211_vif *vif;
- struct rtl8xxxu_priv *priv;
+ struct rtl8xxxu_sta_info *sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
+ struct rtl8xxxu_priv *priv = data;
+ int signal = -ewma_rssi_read(&sta_info->avg_rssi);
- priv = container_of(work, struct rtl8xxxu_priv, ra_watchdog.work);
- vif = priv->vif;
+ priv->fops->report_rssi(priv, rtl8xxxu_get_macid(priv, sta),
+ rtl8xxxu_signal_to_snr(signal));
+ rtl8xxxu_refresh_rate_mask(priv, signal, sta, false);
+}
- if (vif && vif->type == NL80211_IFTYPE_STATION) {
- int signal;
- struct ieee80211_sta *sta;
+struct rtl8xxxu_stas_entry {
+ struct list_head list;
+ struct ieee80211_sta *sta;
+};
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
- if (!sta) {
- struct device *dev = &priv->udev->dev;
+struct rtl8xxxu_iter_stas_data {
+ struct rtl8xxxu_priv *priv;
+ struct list_head list;
+};
- dev_dbg(dev, "%s: no sta found\n", __func__);
- rcu_read_unlock();
- goto out;
- }
- rcu_read_unlock();
+static void rtl8xxxu_collect_sta_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtl8xxxu_iter_stas_data *iter_stas = data;
+ struct rtl8xxxu_stas_entry *stas_entry;
- signal = ieee80211_ave_rssi(vif);
+ stas_entry = kmalloc(sizeof(*stas_entry), GFP_ATOMIC);
+ if (!stas_entry)
+ return;
- priv->fops->report_rssi(priv, 0,
- rtl8xxxu_signal_to_snr(signal));
+ stas_entry->sta = sta;
+ list_add_tail(&stas_entry->list, &iter_stas->list);
+}
- if (priv->fops->set_crystal_cap)
- rtl8xxxu_track_cfo(priv);
+static void rtl8xxxu_watchdog_callback(struct work_struct *work)
+{
- rtl8xxxu_refresh_rate_mask(priv, signal, sta, false);
+ struct rtl8xxxu_iter_stas_data iter_data;
+ struct rtl8xxxu_stas_entry *sta_entry, *tmp;
+ struct rtl8xxxu_priv *priv;
+
+ priv = container_of(work, struct rtl8xxxu_priv, ra_watchdog.work);
+ iter_data.priv = priv;
+ INIT_LIST_HEAD(&iter_data.list);
+
+ mutex_lock(&priv->sta_mutex);
+ ieee80211_iterate_stations_atomic(priv->hw, rtl8xxxu_collect_sta_iter,
+ &iter_data);
+ list_for_each_entry_safe(sta_entry, tmp, &iter_data.list, list) {
+ list_del_init(&sta_entry->list);
+ rtl8xxxu_ra_iter(priv, sta_entry->sta);
+ kfree(sta_entry);
}
+ mutex_unlock(&priv->sta_mutex);
+
+ if (priv->fops->set_crystal_cap)
+ rtl8xxxu_track_cfo(priv);
-out:
schedule_delayed_work(&priv->ra_watchdog, 2 * HZ);
}
@@ -7304,7 +7593,9 @@ static void rtl8xxxu_stop(struct ieee80211_hw *hw)
if (priv->usb_interrupts)
rtl8xxxu_write32(priv, REG_USB_HIMR, 0);
+ cancel_work_sync(&priv->c2hcmd_work);
cancel_delayed_work_sync(&priv->ra_watchdog);
+ cancel_delayed_work_sync(&priv->update_beacon_work);
rtl8xxxu_free_rx_resources(priv);
rtl8xxxu_free_tx_resources(priv);
@@ -7315,16 +7606,34 @@ static int rtl8xxxu_sta_add(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct rtl8xxxu_sta_info *sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
+ struct rtl8xxxu_vif *rtlvif = (struct rtl8xxxu_vif *)vif->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
+ mutex_lock(&priv->sta_mutex);
+ ewma_rssi_init(&sta_info->avg_rssi);
if (vif->type == NL80211_IFTYPE_AP) {
+ sta_info->rssi_level = RTL8XXXU_RATR_STA_INIT;
sta_info->macid = rtl8xxxu_acquire_macid(priv);
- if (sta_info->macid >= RTL8XXXU_MAX_MAC_ID_NUM)
+ if (sta_info->macid >= RTL8XXXU_MAX_MAC_ID_NUM) {
+ mutex_unlock(&priv->sta_mutex);
return -ENOSPC;
+ }
rtl8xxxu_refresh_rate_mask(priv, 0, sta, true);
priv->fops->report_connect(priv, sta_info->macid, H2C_MACID_ROLE_STA, true);
+ } else {
+ switch (rtlvif->port_num) {
+ case 0:
+ sta_info->macid = RTL8XXXU_BC_MC_MACID;
+ break;
+ case 1:
+ sta_info->macid = RTL8XXXU_BC_MC_MACID1;
+ break;
+ default:
+ break;
+ }
}
+ mutex_unlock(&priv->sta_mutex);
return 0;
}
@@ -7336,13 +7645,19 @@ static int rtl8xxxu_sta_remove(struct ieee80211_hw *hw,
struct rtl8xxxu_sta_info *sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
+ mutex_lock(&priv->sta_mutex);
if (vif->type == NL80211_IFTYPE_AP)
rtl8xxxu_release_macid(priv, sta_info->macid);
+ mutex_unlock(&priv->sta_mutex);
return 0;
}
static const struct ieee80211_ops rtl8xxxu_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rtl8xxxu_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.add_interface = rtl8xxxu_add_interface,
@@ -7476,6 +7791,20 @@ static void rtl8xxxu_deinit_led(struct rtl8xxxu_priv *priv)
led_classdev_unregister(led);
}
+static const struct ieee80211_iface_limit rtl8xxxu_limits[] = {
+ { .max = 2, .types = BIT(NL80211_IFTYPE_STATION), },
+ { .max = 1, .types = BIT(NL80211_IFTYPE_AP), },
+};
+
+static const struct ieee80211_iface_combination rtl8xxxu_combinations[] = {
+ {
+ .limits = rtl8xxxu_limits,
+ .n_limits = ARRAY_SIZE(rtl8xxxu_limits),
+ .max_interfaces = 2,
+ .num_different_channels = 1,
+ },
+};
+
static int rtl8xxxu_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
@@ -7522,7 +7851,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
untested = 0;
break;
case 0x2357:
- if (id->idProduct == 0x0109)
+ if (id->idProduct == 0x0109 || id->idProduct == 0x0135)
untested = 0;
break;
case 0x0b05:
@@ -7555,13 +7884,14 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
mutex_init(&priv->usb_buf_mutex);
mutex_init(&priv->syson_indirect_access_mutex);
mutex_init(&priv->h2c_mutex);
+ mutex_init(&priv->sta_mutex);
INIT_LIST_HEAD(&priv->tx_urb_free_list);
spin_lock_init(&priv->tx_urb_lock);
INIT_LIST_HEAD(&priv->rx_urb_pending_list);
spin_lock_init(&priv->rx_urb_lock);
INIT_WORK(&priv->rx_urb_wq, rtl8xxxu_rx_urb_work);
INIT_DELAYED_WORK(&priv->ra_watchdog, rtl8xxxu_watchdog_callback);
- INIT_WORK(&priv->update_beacon_work, rtl8xxxu_update_beacon_work_callback);
+ INIT_DELAYED_WORK(&priv->update_beacon_work, rtl8xxxu_update_beacon_work_callback);
skb_queue_head_init(&priv->c2hcmd_queue);
usb_set_intfdata(interface, hw);
@@ -7611,6 +7941,8 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
if (ret)
goto err_set_intfdata;
+ hw->vif_data_size = sizeof(struct rtl8xxxu_vif);
+
hw->wiphy->max_scan_ssids = 1;
hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
if (priv->fops->max_macid_num)
@@ -7620,6 +7952,13 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
hw->queues = 4;
+ hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+
+ if (priv->fops->supports_concurrent) {
+ hw->wiphy->iface_combinations = rtl8xxxu_combinations;
+ hw->wiphy->n_iface_combinations = ARRAY_SIZE(rtl8xxxu_combinations);
+ }
+
sband = &rtl8xxxu_supported_band;
sband->ht_cap.ht_supported = true;
sband->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
@@ -7806,6 +8145,9 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192fu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x318b, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192fu_fops},
+/* TP-Link TL-WN823N V2 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0135, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192fu_fops},
#ifdef CONFIG_RTL8XXXU_UNTESTED
/* Still supported by rtlwifi */
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8176, 0xff, 0xff, 0xff),
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
index 920ee50e2115..61c0c0ec07b3 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
@@ -146,6 +146,21 @@
#define GPIO_INTM_EDGE_TRIG_IRQ BIT(9)
#define REG_LEDCFG0 0x004c
+#define LEDCFG0_LED0CM GENMASK(2, 0)
+#define LEDCFG0_LED1CM GENMASK(10, 8)
+#define LED_MODE_SW_CTRL 0x0
+#define LED_MODE_TX_OR_RX_EVENTS 0x3
+#define LEDCFG0_LED0SV BIT(3)
+#define LEDCFG0_LED1SV BIT(11)
+#define LED_SW_OFF 0x0
+#define LED_SW_ON 0x1
+#define LEDCFG0_LED0_IO_MODE BIT(7)
+#define LEDCFG0_LED1_IO_MODE BIT(15)
+#define LED_IO_MODE_OUTPUT 0x0
+#define LED_IO_MODE_INPUT 0x1
+#define LEDCFG0_LED2EN BIT(21)
+#define LED_GPIO_DISABLE 0x0
+#define LED_GPIO_ENABLE 0x1
#define LEDCFG0_DPDT_SELECT BIT(23)
#define REG_LEDCFG1 0x004d
#define LEDCFG1_HW_LED_CONTROL BIT(1)
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 69e97647e3d6..2e60a6991ca1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -1903,6 +1903,10 @@ void rtl_init_sw_leds(struct ieee80211_hw *hw)
EXPORT_SYMBOL(rtl_init_sw_leds);
const struct ieee80211_ops rtl_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.start = rtl_op_start,
.stop = rtl_op_stop,
.tx = rtl_op_tx,
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c
index 2e945554ed6d..c1fbc29d5ca1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.c
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c
@@ -1287,18 +1287,44 @@ int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
}
EXPORT_SYMBOL_GPL(rtl_get_hwinfo);
-void rtl_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size)
+static void _rtl_fw_block_write_usb(struct ieee80211_hw *hw, u8 *buffer, u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 start = START_ADDRESS;
+ u32 n;
+
+ while (size > 0) {
+ if (size >= 64)
+ n = 64;
+ else if (size >= 8)
+ n = 8;
+ else
+ n = 1;
+
+ rtl_write_chunk(rtlpriv, start, n, buffer);
+
+ start += n;
+ buffer += n;
+ size -= n;
+ }
+}
+
+void rtl_fw_block_write(struct ieee80211_hw *hw, u8 *buffer, u32 size)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 *pu4byteptr = (u8 *)buffer;
u32 i;
- for (i = 0; i < size; i++)
- rtl_write_byte(rtlpriv, (START_ADDRESS + i), *(pu4byteptr + i));
+ if (rtlpriv->rtlhal.interface == INTF_PCI) {
+ for (i = 0; i < size; i++)
+ rtl_write_byte(rtlpriv, (START_ADDRESS + i),
+ *(buffer + i));
+ } else if (rtlpriv->rtlhal.interface == INTF_USB) {
+ _rtl_fw_block_write_usb(hw, buffer, size);
+ }
}
EXPORT_SYMBOL_GPL(rtl_fw_block_write);
-void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, const u8 *buffer,
+void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, u8 *buffer,
u32 size)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.h b/drivers/net/wireless/realtek/rtlwifi/efuse.h
index 1ec59f439382..4821625ad1e5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.h
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.h
@@ -91,8 +91,8 @@ void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate);
int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
int max_size, u8 *hwinfo, int *params);
void rtl_fill_dummy(u8 *pfwbuf, u32 *pfwlen);
-void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, const u8 *buffer,
+void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, u8 *buffer,
u32 size);
-void rtl_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size);
+void rtl_fw_block_write(struct ieee80211_hw *hw, u8 *buffer, u32 size);
void rtl_efuse_ops_init(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 96ce05bcf0b3..11709b6c83f1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -378,13 +378,13 @@ static void _rtl_pci_io_handler_init(struct device *dev,
rtlpriv->io.dev = dev;
- rtlpriv->io.write8_async = pci_write8_async;
- rtlpriv->io.write16_async = pci_write16_async;
- rtlpriv->io.write32_async = pci_write32_async;
+ rtlpriv->io.write8 = pci_write8_async;
+ rtlpriv->io.write16 = pci_write16_async;
+ rtlpriv->io.write32 = pci_write32_async;
- rtlpriv->io.read8_sync = pci_read8_sync;
- rtlpriv->io.read16_sync = pci_read16_sync;
- rtlpriv->io.read32_sync = pci_read32_sync;
+ rtlpriv->io.read8 = pci_read8_sync;
+ rtlpriv->io.read16 = pci_read16_sync;
+ rtlpriv->io.read32 = pci_read32_sync;
}
static bool _rtl_update_earlymode_info(struct ieee80211_hw *hw,
@@ -2374,7 +2374,6 @@ EXPORT_SYMBOL(rtl_pci_resume);
#endif /* CONFIG_PM_SLEEP */
const struct rtl_intf_ops rtl_pci_ops = {
- .read_efuse_byte = read_efuse_byte,
.adapter_start = rtl_pci_start,
.adapter_stop = rtl_pci_stop,
.check_buddy_priv = rtl_pci_check_buddy_priv,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
index 50e139186a93..ed151754fc6e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
@@ -350,7 +350,6 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- bool defaultadapter = true;
__le32 *pdesc = (__le32 *)pdesc8;
u16 seq_number;
__le16 fc = hdr->frame_control;
@@ -503,9 +502,6 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
if ((!ieee80211_is_data_qos(fc)) && ppsc->fwctrl_lps) {
set_tx_desc_hwseq_en(pdesc, 1);
set_tx_desc_pkt_id(pdesc, 8);
-
- if (!defaultadapter)
- set_tx_desc_qos(pdesc, 1);
}
set_tx_desc_more_frag(pdesc, (lastseg ? 0 : 1));
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h
index 91e4427ab022..4757f93b84e4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h
@@ -11,7 +11,7 @@
#define CHIP_VENDOR_UMC_B_CUT BIT(6)
#define IS_92C_1T2R(version) \
- (((version) & CHIP_92C) && ((version) & CHIP_92C_1T2R))
+ (((version) & CHIP_92C_1T2R) == CHIP_92C_1T2R)
#define IS_VENDOR_UMC(version) \
(((version) & CHIP_VENDOR_UMC) ? true : false)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 5ec0eb8773a5..4217c9a08d01 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -622,6 +622,9 @@ static void _rtl92cu_init_chipn_two_out_ep_priority(struct ieee80211_hw *hw,
u16 valuelow;
switch (queue_sel) {
+ default:
+ WARN_ON(1);
+ fallthrough;
case (TX_SELE_HQ | TX_SELE_LQ):
valuehi = QUEUE_HIGH;
valuelow = QUEUE_LOW;
@@ -634,9 +637,6 @@ static void _rtl92cu_init_chipn_two_out_ep_priority(struct ieee80211_hw *hw,
valuehi = QUEUE_HIGH;
valuelow = QUEUE_NORMAL;
break;
- default:
- WARN_ON(1);
- break;
}
if (!wmm_enable) {
beq = valuelow;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
index 4ff0d4118193..a76f2dc8a977 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
@@ -101,7 +101,8 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
rtlphy->rf_type = RF_1T1R;
rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"Chip RF Type: %s\n",
- rtlphy->rf_type == RF_2T2R ? "RF_2T2R" : "RF_1T1R");
+ rtlphy->rf_type == RF_2T2R ? "RF_2T2R" :
+ rtlphy->rf_type == RF_1T2R ? "RF_1T2R" : "RF_1T1R");
if (get_rf_type(rtlphy) == RF_1T1R)
rtlpriv->dm.rfpath_rxenable[0] = true;
else
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index 20b4aac69642..48be7e346efc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -40,7 +40,7 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->dm.thermalvalue = 0;
/* for firmware buf */
- rtlpriv->rtlhal.pfirmware = vzalloc(0x4000);
+ rtlpriv->rtlhal.pfirmware = kmalloc(0x4000, GFP_KERNEL);
if (!rtlpriv->rtlhal.pfirmware) {
pr_err("Can't alloc buffer for fw\n");
return 1;
@@ -61,7 +61,7 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
fw_name, rtlpriv->io.dev,
GFP_KERNEL, hw, rtl_fw_cb);
if (err) {
- vfree(rtlpriv->rtlhal.pfirmware);
+ kfree(rtlpriv->rtlhal.pfirmware);
rtlpriv->rtlhal.pfirmware = NULL;
}
return err;
@@ -72,7 +72,7 @@ static void rtl92cu_deinit_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (rtlpriv->rtlhal.pfirmware) {
- vfree(rtlpriv->rtlhal.pfirmware);
+ kfree(rtlpriv->rtlhal.pfirmware);
rtlpriv->rtlhal.pfirmware = NULL;
}
}
@@ -145,7 +145,6 @@ MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = {
/* rx */
- .in_ep_num = RTL92C_USB_BULK_IN_NUM,
.rx_urb_num = RTL92C_NUM_RX_URBS,
.rx_max_size = RTL92C_SIZE_MAX_RX_BUFFER,
.usb_rx_hdl = rtl8192cu_rx_hdl,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
index 2f44c8aa6066..aa702ba7c9f5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
@@ -79,68 +79,75 @@ static int configvernoutep(struct ieee80211_hw *hw)
static void twooutepmapping(struct ieee80211_hw *hw, bool is_chip8,
bool bwificfg, struct rtl_ep_map *ep_map)
{
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (bwificfg) { /* for WMM */
rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
"USB Chip-B & WMM Setting.....\n");
- ep_map->ep_mapping[RTL_TXQ_BE] = 2;
- ep_map->ep_mapping[RTL_TXQ_BK] = 3;
- ep_map->ep_mapping[RTL_TXQ_VI] = 3;
- ep_map->ep_mapping[RTL_TXQ_VO] = 2;
- ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
- ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
- ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
} else { /* typical setting */
rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
"USB typical Setting.....\n");
- ep_map->ep_mapping[RTL_TXQ_BE] = 3;
- ep_map->ep_mapping[RTL_TXQ_BK] = 3;
- ep_map->ep_mapping[RTL_TXQ_VI] = 2;
- ep_map->ep_mapping[RTL_TXQ_VO] = 2;
- ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
- ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
- ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
}
}
static void threeoutepmapping(struct ieee80211_hw *hw, bool bwificfg,
struct rtl_ep_map *ep_map)
{
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (bwificfg) { /* for WMM */
rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
"USB 3EP Setting for WMM.....\n");
- ep_map->ep_mapping[RTL_TXQ_BE] = 5;
- ep_map->ep_mapping[RTL_TXQ_BK] = 3;
- ep_map->ep_mapping[RTL_TXQ_VI] = 3;
- ep_map->ep_mapping[RTL_TXQ_VO] = 2;
- ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
- ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
- ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[2];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
} else { /* typical setting */
rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
"USB 3EP Setting for typical.....\n");
- ep_map->ep_mapping[RTL_TXQ_BE] = 5;
- ep_map->ep_mapping[RTL_TXQ_BK] = 5;
- ep_map->ep_mapping[RTL_TXQ_VI] = 3;
- ep_map->ep_mapping[RTL_TXQ_VO] = 2;
- ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
- ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
- ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[2];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[2];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
}
}
static void oneoutepmapping(struct ieee80211_hw *hw, struct rtl_ep_map *ep_map)
{
- ep_map->ep_mapping[RTL_TXQ_BE] = 2;
- ep_map->ep_mapping[RTL_TXQ_BK] = 2;
- ep_map->ep_mapping[RTL_TXQ_VI] = 2;
- ep_map->ep_mapping[RTL_TXQ_VO] = 2;
- ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
- ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
- ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
}
static int _out_ep_mapping(struct ieee80211_hw *hw)
@@ -475,9 +482,9 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- bool defaultadapter = true;
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ struct rtl_sta_info *sta_entry;
+ u8 agg_state = RTL_AGG_STOP;
+ u8 ampdu_density = 0;
u16 seq_number;
__le16 fc = hdr->frame_control;
u8 rate_flag = info->control.rates[0].flags;
@@ -486,6 +493,7 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
skb_get_queue_mapping(skb));
u8 *txdesc8;
__le32 *txdesc;
+ u8 tid;
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
rtl_get_tcb_desc(hw, info, sta, skb, tcb_desc);
@@ -499,10 +507,21 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_tx_rate(txdesc, tcb_desc->hw_rate);
if (tcb_desc->use_shortgi || tcb_desc->use_shortpreamble)
set_tx_desc_data_shortgi(txdesc, 1);
- if (mac->tids[tid].agg.agg_state == RTL_AGG_ON &&
- info->flags & IEEE80211_TX_CTL_AMPDU) {
+
+ if (sta) {
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ tid = ieee80211_get_tid(hdr);
+ agg_state = sta_entry->tids[tid].agg.agg_state;
+ ampdu_density = sta->deflink.ht_cap.ampdu_density;
+ }
+
+ if (agg_state == RTL_AGG_OPERATIONAL &&
+ info->flags & IEEE80211_TX_CTL_AMPDU) {
set_tx_desc_agg_enable(txdesc, 1);
set_tx_desc_max_agg_num(txdesc, 0x14);
+ set_tx_desc_ampdu_density(txdesc, ampdu_density);
+ tcb_desc->rts_enable = 1;
+ tcb_desc->rts_rate = DESC_RATE24M;
} else {
set_tx_desc_agg_break(txdesc, 1);
}
@@ -537,14 +556,6 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_data_bw(txdesc, 0);
set_tx_desc_data_sc(txdesc, 0);
}
- rcu_read_lock();
- sta = ieee80211_find_sta(mac->vif, mac->bssid);
- if (sta) {
- u8 ampdu_density = sta->deflink.ht_cap.ampdu_density;
-
- set_tx_desc_ampdu_density(txdesc, ampdu_density);
- }
- rcu_read_unlock();
if (info->control.hw_key) {
struct ieee80211_key_conf *keyconf = info->control.hw_key;
@@ -587,8 +598,6 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
ppsc->fwctrl_lps) {
set_tx_desc_hwseq_en(txdesc, 1);
set_tx_desc_pkt_id(txdesc, 8);
- if (!defaultadapter)
- set_tx_desc_qos(txdesc, 1);
}
if (ieee80211_has_morefrags(fc))
set_tx_desc_more_frag(txdesc, 1);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
index 5f81cab205cc..09e61dc0f317 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
@@ -4,15 +4,12 @@
#ifndef __RTL92CU_TRX_H__
#define __RTL92CU_TRX_H__
-#define RTL92C_USB_BULK_IN_NUM 1
#define RTL92C_NUM_RX_URBS 8
#define RTL92C_NUM_TX_URBS 32
#define RTL92C_SIZE_MAX_RX_BUFFER 15360 /* 8192 */
#define RX_DRV_INFO_SIZE_UNIT 8
-#define RTL_AGG_ON 1
-
enum usb_rx_agg_mode {
USB_RX_AGG_DISABLE,
USB_RX_AGG_DMA,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index 743ac6871bf4..4ba42f6be3f2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -1669,10 +1669,8 @@ static void _rtl92de_efuse_update_chip_version(struct ieee80211_hw *hw)
u8 cutvalue[2];
u16 chipvalue;
- rtlpriv->intf_ops->read_efuse_byte(hw, EEPROME_CHIP_VERSION_H,
- &cutvalue[1]);
- rtlpriv->intf_ops->read_efuse_byte(hw, EEPROME_CHIP_VERSION_L,
- &cutvalue[0]);
+ read_efuse_byte(hw, EEPROME_CHIP_VERSION_H, &cutvalue[1]);
+ read_efuse_byte(hw, EEPROME_CHIP_VERSION_L, &cutvalue[0]);
chipvalue = (cutvalue[1] << 8) | cutvalue[0];
switch (chipvalue) {
case 0xAA55:
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
index 02ac69c08ed3..192982ec8152 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
@@ -42,6 +42,7 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
bool packet_beacon)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
struct phy_sts_cck_8192d *cck_buf;
s8 rx_pwr_all, rx_pwr[4];
@@ -62,9 +63,7 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
u8 report, cck_highpwr;
cck_buf = (struct phy_sts_cck_8192d *)p_drvinfo;
if (ppsc->rfpwr_state == ERFON)
- cck_highpwr = (u8) rtl_get_bbreg(hw,
- RFPGA0_XA_HSSIPARAMETER2,
- BIT(9));
+ cck_highpwr = rtlphy->cck_high_power;
else
cck_highpwr = false;
if (!cck_highpwr) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
index d9823ddab7be..65bfc14702f4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
@@ -349,7 +349,6 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- bool b_defaultadapter = true;
/* bool b_trigger_ac = false; */
u8 *pdesc8 = (u8 *)pdesc_tx;
__le32 *pdesc = (__le32 *)pdesc8;
@@ -503,10 +502,7 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_hwseq_en_8723(pdesc, 1);
/* set_tx_desc_hwseq_en(pdesc, 1); */
/* set_tx_desc_pkt_id(pdesc, 8); */
-
- if (!b_defaultadapter)
- set_tx_desc_hwseq_sel_8723(pdesc, 1);
- /* set_tx_desc_qos(pdesc, 1); */
+ /* set_tx_desc_qos(pdesc, 1); */
}
set_tx_desc_more_frag(pdesc, (lastseg ? 0 : 1));
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 30bf2775a335..6e8c87a2fae4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -23,86 +23,23 @@ MODULE_DESCRIPTION("USB basic driver for rtlwifi");
#define MAX_USBCTRL_VENDORREQ_TIMES 10
-static void usbctrl_async_callback(struct urb *urb)
-{
- if (urb) {
- /* free dr */
- kfree(urb->setup_packet);
- /* free databuf */
- kfree(urb->transfer_buffer);
- }
-}
-
-static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
- u16 value, u16 index, void *pdata,
- u16 len)
-{
- int rc;
- unsigned int pipe;
- u8 reqtype;
- struct usb_ctrlrequest *dr;
- struct urb *urb;
- const u16 databuf_maxlen = REALTEK_USB_VENQT_MAX_BUF_SIZE;
- u8 *databuf;
-
- if (WARN_ON_ONCE(len > databuf_maxlen))
- len = databuf_maxlen;
-
- pipe = usb_sndctrlpipe(udev, 0); /* write_out */
- reqtype = REALTEK_USB_VENQT_WRITE;
-
- dr = kzalloc(sizeof(*dr), GFP_ATOMIC);
- if (!dr)
- return -ENOMEM;
-
- databuf = kzalloc(databuf_maxlen, GFP_ATOMIC);
- if (!databuf) {
- kfree(dr);
- return -ENOMEM;
- }
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- kfree(databuf);
- kfree(dr);
- return -ENOMEM;
- }
-
- dr->bRequestType = reqtype;
- dr->bRequest = request;
- dr->wValue = cpu_to_le16(value);
- dr->wIndex = cpu_to_le16(index);
- dr->wLength = cpu_to_le16(len);
- /* data are already in little-endian order */
- memcpy(databuf, pdata, len);
- usb_fill_control_urb(urb, udev, pipe,
- (unsigned char *)dr, databuf, len,
- usbctrl_async_callback, NULL);
- rc = usb_submit_urb(urb, GFP_ATOMIC);
- if (rc < 0) {
- kfree(databuf);
- kfree(dr);
- }
- usb_free_urb(urb);
- return rc;
-}
-
-static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
- u16 value, u16 index, void *pdata,
- u16 len)
+static void _usbctrl_vendorreq_sync(struct usb_device *udev, u8 reqtype,
+ u16 value, void *pdata, u16 len)
{
unsigned int pipe;
int status;
- u8 reqtype;
int vendorreq_times = 0;
static int count;
- pipe = usb_rcvctrlpipe(udev, 0); /* read_in */
- reqtype = REALTEK_USB_VENQT_READ;
+ if (reqtype == REALTEK_USB_VENQT_READ)
+ pipe = usb_rcvctrlpipe(udev, 0); /* read_in */
+ else
+ pipe = usb_sndctrlpipe(udev, 0); /* write_out */
do {
- status = usb_control_msg(udev, pipe, request, reqtype, value,
- index, pdata, len, 1000);
+ status = usb_control_msg(udev, pipe, REALTEK_USB_VENQT_CMD_REQ,
+ reqtype, value, REALTEK_USB_VENQT_CMD_IDX,
+ pdata, len, 1000);
if (status < 0) {
/* firmware download is checksumed, don't retry */
if ((value >= FW_8192C_START_ADDRESS &&
@@ -114,18 +51,15 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
} while (++vendorreq_times < MAX_USBCTRL_VENDORREQ_TIMES);
if (status < 0 && count++ < 4)
- pr_err("reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x\n",
- value, status, *(u32 *)pdata);
- return status;
+ dev_err(&udev->dev, "reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x reqtype=0x%x\n",
+ value, status, *(u32 *)pdata, reqtype);
}
static u32 _usb_read_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len)
{
struct device *dev = rtlpriv->io.dev;
struct usb_device *udev = to_usb_device(dev);
- u8 request;
u16 wvalue;
- u16 index;
__le32 *data;
unsigned long flags;
@@ -134,14 +68,33 @@ static u32 _usb_read_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len)
rtlpriv->usb_data_index = 0;
data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
spin_unlock_irqrestore(&rtlpriv->locks.usb_lock, flags);
- request = REALTEK_USB_VENQT_CMD_REQ;
- index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
wvalue = (u16)addr;
- _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
+ _usbctrl_vendorreq_sync(udev, REALTEK_USB_VENQT_READ, wvalue, data, len);
return le32_to_cpu(*data);
}
+
+static void _usb_write_sync(struct rtl_priv *rtlpriv, u32 addr, u32 val, u16 len)
+{
+ struct device *dev = rtlpriv->io.dev;
+ struct usb_device *udev = to_usb_device(dev);
+ unsigned long flags;
+ __le32 *data;
+ u16 wvalue;
+
+ spin_lock_irqsave(&rtlpriv->locks.usb_lock, flags);
+ if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT)
+ rtlpriv->usb_data_index = 0;
+ data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
+ spin_unlock_irqrestore(&rtlpriv->locks.usb_lock, flags);
+
+ wvalue = (u16)(addr & 0x0000ffff);
+ *data = cpu_to_le32(val);
+
+ _usbctrl_vendorreq_sync(udev, REALTEK_USB_VENQT_WRITE, wvalue, data, len);
+}
+
static u8 _usb_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
{
return (u8)_usb_read_sync(rtlpriv, addr, 1);
@@ -157,45 +110,27 @@ static u32 _usb_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
return _usb_read_sync(rtlpriv, addr, 4);
}
-static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val,
- u16 len)
+static void _usb_write8_sync(struct rtl_priv *rtlpriv, u32 addr, u8 val)
{
- u8 request;
- u16 wvalue;
- u16 index;
- __le32 data;
- int ret;
-
- request = REALTEK_USB_VENQT_CMD_REQ;
- index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
- wvalue = (u16)(addr&0x0000ffff);
- data = cpu_to_le32(val);
-
- ret = _usbctrl_vendorreq_async_write(udev, request, wvalue,
- index, &data, len);
- if (ret < 0)
- dev_err(&udev->dev, "error %d writing at 0x%x\n", ret, addr);
+ _usb_write_sync(rtlpriv, addr, val, 1);
}
-static void _usb_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val)
+static void _usb_write16_sync(struct rtl_priv *rtlpriv, u32 addr, u16 val)
{
- struct device *dev = rtlpriv->io.dev;
-
- _usb_write_async(to_usb_device(dev), addr, val, 1);
+ _usb_write_sync(rtlpriv, addr, val, 2);
}
-static void _usb_write16_async(struct rtl_priv *rtlpriv, u32 addr, u16 val)
+static void _usb_write32_sync(struct rtl_priv *rtlpriv, u32 addr, u32 val)
{
- struct device *dev = rtlpriv->io.dev;
-
- _usb_write_async(to_usb_device(dev), addr, val, 2);
+ _usb_write_sync(rtlpriv, addr, val, 4);
}
-static void _usb_write32_async(struct rtl_priv *rtlpriv, u32 addr, u32 val)
+static void _usb_write_chunk_sync(struct rtl_priv *rtlpriv, u32 addr,
+ u32 length, u8 *data)
{
- struct device *dev = rtlpriv->io.dev;
+ struct usb_device *udev = to_usb_device(rtlpriv->io.dev);
- _usb_write_async(to_usb_device(dev), addr, val, 4);
+ _usbctrl_vendorreq_sync(udev, REALTEK_USB_VENQT_WRITE, addr, data, length);
}
static void _rtl_usb_io_handler_init(struct device *dev,
@@ -205,12 +140,13 @@ static void _rtl_usb_io_handler_init(struct device *dev,
rtlpriv->io.dev = dev;
mutex_init(&rtlpriv->io.bb_mutex);
- rtlpriv->io.write8_async = _usb_write8_async;
- rtlpriv->io.write16_async = _usb_write16_async;
- rtlpriv->io.write32_async = _usb_write32_async;
- rtlpriv->io.read8_sync = _usb_read8_sync;
- rtlpriv->io.read16_sync = _usb_read16_sync;
- rtlpriv->io.read32_sync = _usb_read32_sync;
+ rtlpriv->io.write8 = _usb_write8_sync;
+ rtlpriv->io.write16 = _usb_write16_sync;
+ rtlpriv->io.write32 = _usb_write32_sync;
+ rtlpriv->io.write_chunk = _usb_write_chunk_sync;
+ rtlpriv->io.read8 = _usb_read8_sync;
+ rtlpriv->io.read16 = _usb_read16_sync;
+ rtlpriv->io.read32 = _usb_read32_sync;
}
static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
@@ -280,7 +216,6 @@ static int _rtl_usb_init_rx(struct ieee80211_hw *hw)
rtlusb->rx_max_size = rtlpriv->cfg->usb_interface_cfg->rx_max_size;
rtlusb->rx_urb_num = rtlpriv->cfg->usb_interface_cfg->rx_urb_num;
- rtlusb->in_ep = rtlpriv->cfg->usb_interface_cfg->in_ep_num;
rtlusb->usb_rx_hdl = rtlpriv->cfg->usb_interface_cfg->usb_rx_hdl;
rtlusb->usb_rx_segregate_hdl =
rtlpriv->cfg->usb_interface_cfg->usb_rx_segregate_hdl;
@@ -312,20 +247,38 @@ static int _rtl_usb_init(struct ieee80211_hw *hw)
pep_desc = &usb_intf->cur_altsetting->endpoint[epidx].desc;
- if (usb_endpoint_dir_in(pep_desc))
+ if (usb_endpoint_dir_in(pep_desc)) {
+ if (usb_endpoint_xfer_bulk(pep_desc)) {
+ /* The vendor drivers assume there is only one
+ * bulk in ep and that it's the first in ep.
+ */
+ if (rtlusb->in_ep_nums == 0)
+ rtlusb->in_ep = usb_endpoint_num(pep_desc);
+ else
+ pr_warn("%s: bulk in endpoint is not the first in endpoint\n",
+ __func__);
+ }
+
rtlusb->in_ep_nums++;
- else if (usb_endpoint_dir_out(pep_desc))
+ } else if (usb_endpoint_dir_out(pep_desc)) {
+ if (rtlusb->out_ep_nums < RTL_USB_MAX_BULKOUT_NUM) {
+ if (usb_endpoint_xfer_bulk(pep_desc))
+ rtlusb->out_eps[rtlusb->out_ep_nums] =
+ usb_endpoint_num(pep_desc);
+ } else {
+ pr_warn("%s: found more bulk out endpoints than the expected %d\n",
+ __func__, RTL_USB_MAX_BULKOUT_NUM);
+ }
+
rtlusb->out_ep_nums++;
+ }
rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
"USB EP(0x%02x), MaxPacketSize=%d, Interval=%d\n",
pep_desc->bEndpointAddress, pep_desc->wMaxPacketSize,
pep_desc->bInterval);
}
- if (rtlusb->in_ep_nums < rtlpriv->cfg->usb_interface_cfg->in_ep_num) {
- pr_err("Too few input end points found\n");
- return -EINVAL;
- }
+
if (rtlusb->out_ep_nums == 0) {
pr_err("No output end points found\n");
return -EINVAL;
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.h b/drivers/net/wireless/realtek/rtlwifi/usb.h
index 3bf85b23eec1..12529afc0510 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.h
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.h
@@ -19,6 +19,7 @@
#define RTL_USB_MAX_TXQ_NUM 4 /* max tx queue */
#define RTL_USB_MAX_EP_NUM 6 /* max ep number */
+#define RTL_USB_MAX_BULKOUT_NUM 4
#define RTL_USB_MAX_TX_URBS_NUM 8
enum rtl_txq {
@@ -94,6 +95,7 @@ struct rtl_usb {
/* Tx */
u8 out_ep_nums ;
+ u8 out_eps[RTL_USB_MAX_BULKOUT_NUM];
u8 out_queue_sel;
struct rtl_ep_map ep_map;
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index d87cd2252eac..9fabf597cfd6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -1397,8 +1397,6 @@ struct rtl_phy {
#define RTL_AGG_PROGRESS 1
#define RTL_AGG_START 2
#define RTL_AGG_OPERATIONAL 3
-#define RTL_AGG_OFF 0
-#define RTL_AGG_ON 1
#define RTL_RX_AGG_START 1
#define RTL_RX_AGG_STOP 0
#define RTL_AGG_EMPTYING_HW_QUEUE_ADDBA 2
@@ -1447,13 +1445,15 @@ struct rtl_io {
/*PCI IO map */
unsigned long pci_base_addr; /*device I/O address */
- void (*write8_async)(struct rtl_priv *rtlpriv, u32 addr, u8 val);
- void (*write16_async)(struct rtl_priv *rtlpriv, u32 addr, u16 val);
- void (*write32_async)(struct rtl_priv *rtlpriv, u32 addr, u32 val);
+ void (*write8)(struct rtl_priv *rtlpriv, u32 addr, u8 val);
+ void (*write16)(struct rtl_priv *rtlpriv, u32 addr, u16 val);
+ void (*write32)(struct rtl_priv *rtlpriv, u32 addr, u32 val);
+ void (*write_chunk)(struct rtl_priv *rtlpriv, u32 addr, u32 length,
+ u8 *data);
- u8 (*read8_sync)(struct rtl_priv *rtlpriv, u32 addr);
- u16 (*read16_sync)(struct rtl_priv *rtlpriv, u32 addr);
- u32 (*read32_sync)(struct rtl_priv *rtlpriv, u32 addr);
+ u8 (*read8)(struct rtl_priv *rtlpriv, u32 addr);
+ u16 (*read16)(struct rtl_priv *rtlpriv, u32 addr);
+ u32 (*read32)(struct rtl_priv *rtlpriv, u32 addr);
};
@@ -1471,7 +1471,6 @@ struct rtl_mac {
enum nl80211_iftype opmode;
/*Probe Beacon management */
- struct rtl_tid_data tids[MAX_TID_COUNT];
enum rtl_link_state link_state;
int n_channels;
@@ -2290,7 +2289,6 @@ struct rtl_hal_ops {
struct rtl_intf_ops {
/*com */
- void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
int (*adapter_start)(struct ieee80211_hw *hw);
void (*adapter_stop)(struct ieee80211_hw *hw);
bool (*check_buddy_priv)(struct ieee80211_hw *hw,
@@ -2354,7 +2352,6 @@ struct rtl_mod_params {
struct rtl_hal_usbint_cfg {
/* data - rx */
- u32 in_ep_num;
u32 rx_urb_num;
u32 rx_max_size;
@@ -2916,25 +2913,25 @@ extern u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M];
static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr)
{
- return rtlpriv->io.read8_sync(rtlpriv, addr);
+ return rtlpriv->io.read8(rtlpriv, addr);
}
static inline u16 rtl_read_word(struct rtl_priv *rtlpriv, u32 addr)
{
- return rtlpriv->io.read16_sync(rtlpriv, addr);
+ return rtlpriv->io.read16(rtlpriv, addr);
}
static inline u32 rtl_read_dword(struct rtl_priv *rtlpriv, u32 addr)
{
- return rtlpriv->io.read32_sync(rtlpriv, addr);
+ return rtlpriv->io.read32(rtlpriv, addr);
}
static inline void rtl_write_byte(struct rtl_priv *rtlpriv, u32 addr, u8 val8)
{
- rtlpriv->io.write8_async(rtlpriv, addr, val8);
+ rtlpriv->io.write8(rtlpriv, addr, val8);
if (rtlpriv->cfg->write_readback)
- rtlpriv->io.read8_sync(rtlpriv, addr);
+ rtlpriv->io.read8(rtlpriv, addr);
}
static inline void rtl_write_byte_with_val32(struct ieee80211_hw *hw,
@@ -2947,19 +2944,25 @@ static inline void rtl_write_byte_with_val32(struct ieee80211_hw *hw,
static inline void rtl_write_word(struct rtl_priv *rtlpriv, u32 addr, u16 val16)
{
- rtlpriv->io.write16_async(rtlpriv, addr, val16);
+ rtlpriv->io.write16(rtlpriv, addr, val16);
if (rtlpriv->cfg->write_readback)
- rtlpriv->io.read16_sync(rtlpriv, addr);
+ rtlpriv->io.read16(rtlpriv, addr);
}
static inline void rtl_write_dword(struct rtl_priv *rtlpriv,
u32 addr, u32 val32)
{
- rtlpriv->io.write32_async(rtlpriv, addr, val32);
+ rtlpriv->io.write32(rtlpriv, addr, val32);
if (rtlpriv->cfg->write_readback)
- rtlpriv->io.read32_sync(rtlpriv, addr);
+ rtlpriv->io.read32(rtlpriv, addr);
+}
+
+static inline void rtl_write_chunk(struct rtl_priv *rtlpriv,
+ u32 addr, u32 length, u8 *data)
+{
+ rtlpriv->io.write_chunk(rtlpriv, addr, length, data);
}
static inline u32 rtl_get_bbreg(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index 1b2ad81838be..5b2036798159 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -316,23 +316,13 @@ static ssize_t rtw_debugfs_set_single_input(struct file *filp,
{
struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
- struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
- char tmp[32 + 1];
u32 input;
- int num;
int ret;
- ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+ ret = kstrtou32_from_user(buffer, count, 0, &input);
if (ret)
return ret;
- num = kstrtoint(tmp, 0, &input);
-
- if (num) {
- rtw_warn(rtwdev, "kstrtoint failed\n");
- return num;
- }
-
debugfs_priv->cb_data = input;
return count;
@@ -485,19 +475,12 @@ static ssize_t rtw_debugfs_set_fix_rate(struct file *filp,
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u8 fix_rate;
- char tmp[32 + 1];
int ret;
- ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+ ret = kstrtou8_from_user(buffer, count, 0, &fix_rate);
if (ret)
return ret;
- ret = kstrtou8(tmp, 0, &fix_rate);
- if (ret) {
- rtw_warn(rtwdev, "invalid args, [rate]\n");
- return ret;
- }
-
dm_info->fix_rate = fix_rate;
return count;
@@ -879,20 +862,13 @@ static ssize_t rtw_debugfs_set_coex_enable(struct file *filp,
struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw_coex *coex = &rtwdev->coex;
- char tmp[32 + 1];
bool enable;
int ret;
- ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+ ret = kstrtobool_from_user(buffer, count, &enable);
if (ret)
return ret;
- ret = kstrtobool(tmp, &enable);
- if (ret) {
- rtw_warn(rtwdev, "invalid arguments\n");
- return ret;
- }
-
mutex_lock(&rtwdev->mutex);
coex->manual_control = !enable;
mutex_unlock(&rtwdev->mutex);
@@ -951,18 +927,13 @@ static ssize_t rtw_debugfs_set_fw_crash(struct file *filp,
struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
- char tmp[32 + 1];
bool input;
int ret;
- ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+ ret = kstrtobool_from_user(buffer, count, &input);
if (ret)
return ret;
- ret = kstrtobool(tmp, &input);
- if (ret)
- return -EINVAL;
-
if (!input)
return -EINVAL;
@@ -1030,11 +1001,12 @@ static ssize_t rtw_debugfs_set_dm_cap(struct file *filp,
struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
- int bit;
+ int ret, bit;
bool en;
- if (kstrtoint_from_user(buffer, count, 10, &bit))
- return -EINVAL;
+ ret = kstrtoint_from_user(buffer, count, 10, &bit);
+ if (ret)
+ return ret;
en = bit > 0;
bit = abs(bit);
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index 298663b03580..0c1c1ff31085 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -309,6 +309,13 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq;
ret = rtw_pwr_seq_parser(rtwdev, pwr_seq);
+ if (pwr_on && rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB) {
+ if (chip->id == RTW_CHIP_TYPE_8822C ||
+ chip->id == RTW_CHIP_TYPE_8822B ||
+ chip->id == RTW_CHIP_TYPE_8821C)
+ rtw_write8_clr(rtwdev, REG_SYS_STATUS1 + 1, BIT(0));
+ }
+
if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_SDIO)
rtw_write32(rtwdev, REG_SDIO_HIMR, imr);
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index d8d68f16014e..7af5bf7fe5b6 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -927,6 +927,10 @@ static void rtw_ops_sta_rc_update(struct ieee80211_hw *hw,
}
const struct ieee80211_ops rtw_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rtw_ops_tx,
.wake_tx_queue = rtw_ops_wake_tx_queue,
.start = rtw_ops_start,
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 6d22628129d0..ffba6b88f392 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -2032,8 +2032,6 @@ static int rtw_chip_board_info_setup(struct rtw_dev *rtwdev)
rtw_phy_setup_phy_cond(rtwdev, hal->pkg_type);
rtw_phy_init_tx_power(rtwdev);
- if (rfe_def->agc_btg_tbl)
- rtw_load_table(rtwdev, rfe_def->agc_btg_tbl);
rtw_load_table(rtwdev, rfe_def->phy_pg_tbl);
rtw_load_table(rtwdev, rfe_def->txpwr_lmt_tbl);
rtw_phy_tx_power_by_rate_config(hal);
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index 2bfc0e822b8d..9986a4cb37eb 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -1450,6 +1450,7 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
const struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
struct pci_dev *pdev = rtwpci->pdev;
const struct rtw_intf_phy_para *para;
u16 cut;
@@ -1498,6 +1499,9 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
rtw_err(rtwdev, "failed to set PCI cap, ret = %d\n",
ret);
}
+
+ if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 5)
+ rtw_write32_mask(rtwdev, REG_ANAPARSW_MAC_0, BIT_CF_L_V2, 0x1);
}
static int __maybe_unused rtw_pci_suspend(struct device *dev)
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index 128e75a81bf3..37ef80c9091d 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -1761,12 +1761,15 @@ static void rtw_load_rfk_table(struct rtw_dev *rtwdev)
void rtw_phy_load_tables(struct rtw_dev *rtwdev)
{
+ const struct rtw_rfe_def *rfe_def = rtw_get_rfe_def(rtwdev);
const struct rtw_chip_info *chip = rtwdev->chip;
u8 rf_path;
rtw_load_table(rtwdev, chip->mac_tbl);
rtw_load_table(rtwdev, chip->bb_tbl);
rtw_load_table(rtwdev, chip->agc_tbl);
+ if (rfe_def->agc_btg_tbl)
+ rtw_load_table(rtwdev, rfe_def->agc_btg_tbl);
rtw_load_rfk_table(rtwdev);
for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++) {
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index 1634f03784f1..b122f226924b 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -557,6 +557,9 @@
#define REG_RFE_INV16 0x0cbe
#define BIT_RFE_BUF_EN BIT(3)
+#define REG_ANAPARSW_MAC_0 0x1010
+#define BIT_CF_L_V2 GENMASK(29, 28)
+
#define REG_ANAPAR_XTAL_0 0x1040
#define BIT_XCAP_0 GENMASK(23, 10)
#define REG_CPU_DMEM_CON 0x1080
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index 429bb420b056..fe5d8e188350 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -773,9 +773,9 @@ static void rtw8821c_false_alarm_statistics(struct rtw_dev *rtwdev)
dm_info->cck_fa_cnt = cck_fa_cnt;
dm_info->ofdm_fa_cnt = ofdm_fa_cnt;
+ dm_info->total_fa_cnt = ofdm_fa_cnt;
if (cck_enable)
dm_info->total_fa_cnt += cck_fa_cnt;
- dm_info->total_fa_cnt = ofdm_fa_cnt;
crc32_cnt = rtw_read32(rtwdev, REG_CRC_CCK);
dm_info->cck_ok_cnt = FIELD_GET(GENMASK(15, 0), crc32_cnt);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
index 7a5cbdc31ef7..e2c7d9f87683 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
@@ -9,24 +9,36 @@
#include "usb.h"
static const struct usb_device_id rtw_8821cu_id_table[] = {
- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb82b, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x2006, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8731, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8811, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb820, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc821, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb82b, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc80c, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc811, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc820, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc821, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82a, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82b, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc811, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8811CU */
- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8811, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8811CU */
- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x2006, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* TOTOLINK A650UA v3 */
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82c, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x331d, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* D-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xc811, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* Edimax */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xd811, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* Edimax */
{},
};
MODULE_DEVICE_TABLE(usb, rtw_8821cu_id_table);
diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
index e6ab1ac6d709..a0188511099a 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.c
+++ b/drivers/net/wireless/realtek/rtw88/usb.c
@@ -33,6 +33,36 @@ static void rtw_usb_fill_tx_checksum(struct rtw_usb *rtwusb,
rtw_tx_fill_txdesc_checksum(rtwdev, &pkt_info, skb->data);
}
+static void rtw_usb_reg_sec(struct rtw_dev *rtwdev, u32 addr, __le32 *data)
+{
+ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+ struct usb_device *udev = rtwusb->udev;
+ bool reg_on_section = false;
+ u16 t_reg = 0x4e0;
+ u8 t_len = 1;
+ int status;
+
+ /* There are three sections:
+ * 1. on (0x00~0xFF; 0x1000~0x10FF): this section is always powered on
+ * 2. off (< 0xFE00, excluding "on" section): this section could be
+ * powered off
+ * 3. local (>= 0xFE00): usb specific registers section
+ */
+ if (addr <= 0xff || (addr >= 0x1000 && addr <= 0x10ff))
+ reg_on_section = true;
+
+ if (!reg_on_section)
+ return;
+
+ status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ RTW_USB_CMD_REQ, RTW_USB_CMD_WRITE,
+ t_reg, 0, data, t_len, 500);
+
+ if (status != t_len && status != -ENODEV)
+ rtw_err(rtwdev, "%s: reg 0x%x, usb write %u fail, status: %d\n",
+ __func__, t_reg, t_len, status);
+}
+
static u32 rtw_usb_read(struct rtw_dev *rtwdev, u32 addr, u16 len)
{
struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
@@ -58,6 +88,11 @@ static u32 rtw_usb_read(struct rtw_dev *rtwdev, u32 addr, u16 len)
rtw_err(rtwdev, "read register 0x%x failed with %d\n",
addr, ret);
+ if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C ||
+ rtwdev->chip->id == RTW_CHIP_TYPE_8822B ||
+ rtwdev->chip->id == RTW_CHIP_TYPE_8821C)
+ rtw_usb_reg_sec(rtwdev, addr, data);
+
return le32_to_cpu(*data);
}
@@ -102,6 +137,11 @@ static void rtw_usb_write(struct rtw_dev *rtwdev, u32 addr, u32 val, int len)
if (ret < 0 && ret != -ENODEV && count++ < 4)
rtw_err(rtwdev, "write register 0x%x failed with %d\n",
addr, ret);
+
+ if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C ||
+ rtwdev->chip->id == RTW_CHIP_TYPE_8822B ||
+ rtwdev->chip->id == RTW_CHIP_TYPE_8821C)
+ rtw_usb_reg_sec(rtwdev, addr, data);
}
static void rtw_usb_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
index 914c94988b2f..11fbdd142162 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.c
+++ b/drivers/net/wireless/realtek/rtw89/cam.c
@@ -777,3 +777,64 @@ void rtw89_cam_fill_dctl_sec_cam_info_v1(struct rtw89_dev *rtwdev,
SET_DCTL_SEC_ENT5_V1(cmd, addr_cam->sec_ent[5]);
SET_DCTL_SEC_ENT6_V1(cmd, addr_cam->sec_ent[6]);
}
+
+void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta,
+ struct rtw89_h2c_dctlinfo_ud_v2 *h2c)
+{
+ struct rtw89_addr_cam_entry *addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
+
+ h2c->c0 = le32_encode_bits(rtwsta ? rtwsta->mac_id : rtwvif->mac_id,
+ DCTLINFO_V2_C0_MACID) |
+ le32_encode_bits(1, DCTLINFO_V2_C0_OP);
+
+ h2c->w4 = le32_encode_bits(addr_cam->sec_ent_keyid[0],
+ DCTLINFO_V2_W4_SEC_ENT0_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[1],
+ DCTLINFO_V2_W4_SEC_ENT1_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[2],
+ DCTLINFO_V2_W4_SEC_ENT2_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[3],
+ DCTLINFO_V2_W4_SEC_ENT3_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[4],
+ DCTLINFO_V2_W4_SEC_ENT4_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[5],
+ DCTLINFO_V2_W4_SEC_ENT5_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[6],
+ DCTLINFO_V2_W4_SEC_ENT6_KEYID);
+ h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_SEC_ENT0_KEYID |
+ DCTLINFO_V2_W4_SEC_ENT1_KEYID |
+ DCTLINFO_V2_W4_SEC_ENT2_KEYID |
+ DCTLINFO_V2_W4_SEC_ENT3_KEYID |
+ DCTLINFO_V2_W4_SEC_ENT4_KEYID |
+ DCTLINFO_V2_W4_SEC_ENT5_KEYID |
+ DCTLINFO_V2_W4_SEC_ENT6_KEYID);
+
+ h2c->w5 = le32_encode_bits(addr_cam->sec_cam_map[0],
+ DCTLINFO_V2_W5_SEC_ENT_VALID_V1) |
+ le32_encode_bits(addr_cam->sec_ent[0],
+ DCTLINFO_V2_W5_SEC_ENT0_V1);
+ h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_SEC_ENT_VALID_V1 |
+ DCTLINFO_V2_W5_SEC_ENT0_V1);
+
+ h2c->w6 = le32_encode_bits(addr_cam->sec_ent[1],
+ DCTLINFO_V2_W6_SEC_ENT1_V1) |
+ le32_encode_bits(addr_cam->sec_ent[2],
+ DCTLINFO_V2_W6_SEC_ENT2_V1) |
+ le32_encode_bits(addr_cam->sec_ent[3],
+ DCTLINFO_V2_W6_SEC_ENT3_V1) |
+ le32_encode_bits(addr_cam->sec_ent[4],
+ DCTLINFO_V2_W6_SEC_ENT4_V1);
+ h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_SEC_ENT1_V1 |
+ DCTLINFO_V2_W6_SEC_ENT2_V1 |
+ DCTLINFO_V2_W6_SEC_ENT3_V1 |
+ DCTLINFO_V2_W6_SEC_ENT4_V1);
+
+ h2c->w7 = le32_encode_bits(addr_cam->sec_ent[5],
+ DCTLINFO_V2_W7_SEC_ENT5_V1) |
+ le32_encode_bits(addr_cam->sec_ent[6],
+ DCTLINFO_V2_W7_SEC_ENT6_V1);
+ h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_SEC_ENT5_V1 |
+ DCTLINFO_V2_W7_SEC_ENT6_V1);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/cam.h b/drivers/net/wireless/realtek/rtw89/cam.h
index 83c160a614e6..fa09d11c345c 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.h
+++ b/drivers/net/wireless/realtek/rtw89/cam.h
@@ -352,6 +352,111 @@ static inline void FWCMD_SET_ADDR_BSSID_BSSID5(void *cmd, u32 value)
le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(31, 24));
}
+struct rtw89_h2c_dctlinfo_ud_v2 {
+ __le32 c0;
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+ __le32 w8;
+ __le32 w9;
+ __le32 w10;
+ __le32 w11;
+ __le32 w12;
+ __le32 w13;
+ __le32 w14;
+ __le32 w15;
+ __le32 m0;
+ __le32 m1;
+ __le32 m2;
+ __le32 m3;
+ __le32 m4;
+ __le32 m5;
+ __le32 m6;
+ __le32 m7;
+ __le32 m8;
+ __le32 m9;
+ __le32 m10;
+ __le32 m11;
+ __le32 m12;
+ __le32 m13;
+ __le32 m14;
+ __le32 m15;
+} __packed;
+
+#define DCTLINFO_V2_C0_MACID GENMASK(6, 0)
+#define DCTLINFO_V2_C0_OP BIT(7)
+
+#define DCTLINFO_V2_W0_QOS_FIELD_H GENMASK(7, 0)
+#define DCTLINFO_V2_W0_HW_EXSEQ_MACID GENMASK(14, 8)
+#define DCTLINFO_V2_W0_QOS_DATA BIT(15)
+#define DCTLINFO_V2_W0_AES_IV_L GENMASK(31, 16)
+#define DCTLINFO_V2_W0_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W1_AES_IV_H GENMASK(31, 0)
+#define DCTLINFO_V2_W1_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W2_SEQ0 GENMASK(11, 0)
+#define DCTLINFO_V2_W2_SEQ1 GENMASK(23, 12)
+#define DCTLINFO_V2_W2_AMSDU_MAX_LEN GENMASK(26, 24)
+#define DCTLINFO_V2_W2_STA_AMSDU_EN BIT(27)
+#define DCTLINFO_V2_W2_CHKSUM_OFLD_EN BIT(28)
+#define DCTLINFO_V2_W2_WITH_LLC BIT(29)
+#define DCTLINFO_V2_W2_NAT25_EN BIT(30)
+#define DCTLINFO_V2_W2_IS_MLD BIT(31)
+#define DCTLINFO_V2_W2_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W3_SEQ2 GENMASK(11, 0)
+#define DCTLINFO_V2_W3_SEQ3 GENMASK(23, 12)
+#define DCTLINFO_V2_W3_TGT_IND GENMASK(27, 24)
+#define DCTLINFO_V2_W3_TGT_IND_EN BIT(28)
+#define DCTLINFO_V2_W3_HTC_LB GENMASK(31, 29)
+#define DCTLINFO_V2_W3_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W4_VLAN_TAG_SEL GENMASK(7, 5)
+#define DCTLINFO_V2_W4_HTC_ORDER BIT(8)
+#define DCTLINFO_V2_W4_SEC_KEY_ID GENMASK(10, 9)
+#define DCTLINFO_V2_W4_VLAN_RX_DYNAMIC_PCP_EN BIT(11)
+#define DCTLINFO_V2_W4_VLAN_RX_PKT_DROP BIT(12)
+#define DCTLINFO_V2_W4_VLAN_RX_VALID BIT(13)
+#define DCTLINFO_V2_W4_VLAN_TX_VALID BIT(14)
+#define DCTLINFO_V2_W4_WAPI BIT(15)
+#define DCTLINFO_V2_W4_SEC_ENT_MODE GENMASK(17, 16)
+#define DCTLINFO_V2_W4_SEC_ENT0_KEYID GENMASK(19, 18)
+#define DCTLINFO_V2_W4_SEC_ENT1_KEYID GENMASK(21, 20)
+#define DCTLINFO_V2_W4_SEC_ENT2_KEYID GENMASK(23, 22)
+#define DCTLINFO_V2_W4_SEC_ENT3_KEYID GENMASK(25, 24)
+#define DCTLINFO_V2_W4_SEC_ENT4_KEYID GENMASK(27, 26)
+#define DCTLINFO_V2_W4_SEC_ENT5_KEYID GENMASK(29, 28)
+#define DCTLINFO_V2_W4_SEC_ENT6_KEYID GENMASK(31, 30)
+#define DCTLINFO_V2_W4_ALL GENMASK(31, 5)
+#define DCTLINFO_V2_W5_SEC_ENT7_KEYID GENMASK(1, 0)
+#define DCTLINFO_V2_W5_SEC_ENT8_KEYID GENMASK(3, 2)
+#define DCTLINFO_V2_W5_SEC_ENT_VALID_V1 GENMASK(23, 8)
+#define DCTLINFO_V2_W5_SEC_ENT0_V1 GENMASK(31, 24)
+#define DCTLINFO_V2_W5_ALL (GENMASK(31, 8) | GENMASK(3, 0))
+#define DCTLINFO_V2_W6_SEC_ENT1_V1 GENMASK(7, 0)
+#define DCTLINFO_V2_W6_SEC_ENT2_V1 GENMASK(15, 8)
+#define DCTLINFO_V2_W6_SEC_ENT3_V1 GENMASK(23, 16)
+#define DCTLINFO_V2_W6_SEC_ENT4_V1 GENMASK(31, 24)
+#define DCTLINFO_V2_W6_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W7_SEC_ENT5_V1 GENMASK(7, 0)
+#define DCTLINFO_V2_W7_SEC_ENT6_V1 GENMASK(15, 8)
+#define DCTLINFO_V2_W7_SEC_ENT7 GENMASK(23, 16)
+#define DCTLINFO_V2_W7_SEC_ENT8 GENMASK(31, 24)
+#define DCTLINFO_V2_W7_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W8_MLD_SMA_L_V1 GENMASK(31, 0)
+#define DCTLINFO_V2_W8_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W9_MLD_SMA_H_V1 GENMASK(15, 0)
+#define DCTLINFO_V2_W9_MLD_TMA_L_V1 GENMASK(31, 16)
+#define DCTLINFO_V2_W9_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W10_MLD_TMA_H_V1 GENMASK(31, 0)
+#define DCTLINFO_V2_W10_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W11_MLD_TA_BSSID_L_V1 GENMASK(31, 0)
+#define DCTLINFO_V2_W11_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W12_MLD_TA_BSSID_H_V1 GENMASK(15, 0)
+#define DCTLINFO_V2_W12_ALL GENMASK(15, 0)
+
int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
@@ -373,6 +478,10 @@ void rtw89_cam_fill_dctl_sec_cam_info_v1(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta,
u8 *cmd);
+void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta,
+ struct rtw89_h2c_dctlinfo_ud_v2 *h2c);
int rtw89_cam_fill_bssid_cam_info(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta, u8 *cmd);
diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
index cbf6821af6b8..051a3cad6101 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.c
+++ b/drivers/net/wireless/realtek/rtw89/chan.c
@@ -212,33 +212,68 @@ void rtw89_entity_init(struct rtw89_dev *rtwdev)
rtw89_config_default_chandef(rtwdev);
}
+static void rtw89_entity_calculate_weight(struct rtw89_dev *rtwdev,
+ struct rtw89_entity_weight *w)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chanctx_cfg *cfg;
+ struct rtw89_vif *rtwvif;
+ int idx;
+
+ for_each_set_bit(idx, hal->entity_map, NUM_OF_RTW89_SUB_ENTITY) {
+ cfg = hal->sub[idx].cfg;
+ if (!cfg) {
+ /* doesn't run with chanctx ops; one channel at most */
+ w->active_chanctxs = 1;
+ break;
+ }
+
+ if (cfg->ref_count > 0)
+ w->active_chanctxs++;
+ }
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ if (rtwvif->chanctx_assigned)
+ w->active_roles++;
+ }
+}
+
enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
{
+ DECLARE_BITMAP(recalc_map, NUM_OF_RTW89_SUB_ENTITY) = {};
struct rtw89_hal *hal = &rtwdev->hal;
const struct cfg80211_chan_def *chandef;
+ struct rtw89_entity_weight w = {};
enum rtw89_entity_mode mode;
struct rtw89_chan chan;
- u8 weight;
- u8 last;
u8 idx;
lockdep_assert_held(&rtwdev->mutex);
- weight = bitmap_weight(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
- switch (weight) {
+ bitmap_copy(recalc_map, hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+
+ rtw89_entity_calculate_weight(rtwdev, &w);
+ switch (w.active_chanctxs) {
default:
- rtw89_warn(rtwdev, "unknown ent chan weight: %d\n", weight);
- bitmap_zero(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+ rtw89_warn(rtwdev, "unknown ent chanctxs weight: %d\n",
+ w.active_chanctxs);
+ bitmap_zero(recalc_map, NUM_OF_RTW89_SUB_ENTITY);
fallthrough;
case 0:
rtw89_config_default_chandef(rtwdev);
+ set_bit(RTW89_SUB_ENTITY_0, recalc_map);
fallthrough;
case 1:
- last = RTW89_SUB_ENTITY_0;
mode = RTW89_ENTITY_MODE_SCC;
break;
- case 2:
- last = RTW89_SUB_ENTITY_1;
+ case 2 ... NUM_OF_RTW89_SUB_ENTITY:
+ if (w.active_roles != NUM_OF_RTW89_MCC_ROLES) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "unhandled ent: %d chanctxs %d roles\n",
+ w.active_chanctxs, w.active_roles);
+ return RTW89_ENTITY_MODE_UNHANDLED;
+ }
+
mode = rtw89_get_entity_mode(rtwdev);
if (mode == RTW89_ENTITY_MODE_MCC)
break;
@@ -247,7 +282,7 @@ enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
break;
}
- for (idx = 0; idx <= last; idx++) {
+ for_each_set_bit(idx, recalc_map, NUM_OF_RTW89_SUB_ENTITY) {
chandef = rtw89_chandef_get(rtwdev, idx);
rtw89_get_channel_params(chandef, &chan);
if (chan.channel == 0) {
@@ -287,6 +322,13 @@ static void rtw89_chanctx_notify(struct rtw89_dev *rtwdev,
}
}
+static bool rtw89_concurrent_via_mrc(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
+
+ return chip_gen == RTW89_CHIP_BE;
+}
+
/* This function centrally manages how MCC roles are sorted and iterated.
* And, it guarantees that ordered_idx is less than NUM_OF_RTW89_MCC_ROLES.
* So, if data needs to pass an array for ordered_idx, the array can declare
@@ -320,19 +362,12 @@ int rtw89_iterate_mcc_roles(struct rtw89_dev *rtwdev,
return 0;
}
-/* For now, IEEE80211_HW_TIMING_BEACON_ONLY can make things simple to ensure
- * correctness of MCC calculation logic below. We have noticed that once driver
- * declares WIPHY_FLAG_SUPPORTS_MLO, the use of IEEE80211_HW_TIMING_BEACON_ONLY
- * will be restricted. We will make an alternative in driver when it is ready
- * for MLO.
- */
static u32 rtw89_mcc_get_tbtt_ofst(struct rtw89_dev *rtwdev,
struct rtw89_mcc_role *role, u64 tsf)
{
struct rtw89_vif *rtwvif = role->rtwvif;
- struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
u32 bcn_intvl_us = ieee80211_tu_to_usec(role->beacon_interval);
- u64 sync_tsf = vif->bss_conf.sync_tsf;
+ u64 sync_tsf = READ_ONCE(rtwvif->sync_bcn_tsf);
u32 remainder;
if (tsf < sync_tsf) {
@@ -346,16 +381,13 @@ static u32 rtw89_mcc_get_tbtt_ofst(struct rtw89_dev *rtwdev,
return remainder;
}
-static u16 rtw89_mcc_get_bcn_ofst(struct rtw89_dev *rtwdev)
+static int __mcc_fw_req_tsf(struct rtw89_dev *rtwdev, u64 *tsf_ref, u64 *tsf_aux)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
struct rtw89_mcc_role *ref = &mcc->role_ref;
struct rtw89_mcc_role *aux = &mcc->role_aux;
struct rtw89_mac_mcc_tsf_rpt rpt = {};
struct rtw89_fw_mcc_tsf_req req = {};
- u32 bcn_intvl_ref_us = ieee80211_tu_to_usec(ref->beacon_interval);
- u32 tbtt_ofst_ref, tbtt_ofst_aux;
- u64 tsf_ref, tsf_aux;
int ret;
req.group = mcc->group;
@@ -365,11 +397,63 @@ static u16 rtw89_mcc_get_bcn_ofst(struct rtw89_dev *rtwdev)
if (ret) {
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"MCC h2c failed to request tsf: %d\n", ret);
- return RTW89_MCC_DFLT_BCN_OFST_TIME;
+ return ret;
+ }
+
+ *tsf_ref = (u64)rpt.tsf_x_high << 32 | rpt.tsf_x_low;
+ *tsf_aux = (u64)rpt.tsf_y_high << 32 | rpt.tsf_y_low;
+
+ return 0;
+}
+
+static int __mrc_fw_req_tsf(struct rtw89_dev *rtwdev, u64 *tsf_ref, u64 *tsf_aux)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ struct rtw89_fw_mrc_req_tsf_arg arg = {};
+ struct rtw89_mac_mrc_tsf_rpt rpt = {};
+ int ret;
+
+ BUILD_BUG_ON(RTW89_MAC_MRC_MAX_REQ_TSF_NUM < NUM_OF_RTW89_MCC_ROLES);
+
+ arg.num = 2;
+ arg.infos[0].band = ref->rtwvif->mac_idx;
+ arg.infos[0].port = ref->rtwvif->port;
+ arg.infos[1].band = aux->rtwvif->mac_idx;
+ arg.infos[1].port = aux->rtwvif->port;
+
+ ret = rtw89_fw_h2c_mrc_req_tsf(rtwdev, &arg, &rpt);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to request tsf: %d\n", ret);
+ return ret;
}
- tsf_ref = (u64)rpt.tsf_x_high << 32 | rpt.tsf_x_low;
- tsf_aux = (u64)rpt.tsf_y_high << 32 | rpt.tsf_y_low;
+ *tsf_ref = rpt.tsfs[0];
+ *tsf_aux = rpt.tsfs[1];
+
+ return 0;
+}
+
+static u16 rtw89_mcc_get_bcn_ofst(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ u32 bcn_intvl_ref_us = ieee80211_tu_to_usec(ref->beacon_interval);
+ u32 tbtt_ofst_ref, tbtt_ofst_aux;
+ u64 tsf_ref, tsf_aux;
+ int ret;
+
+ if (rtw89_concurrent_via_mrc(rtwdev))
+ ret = __mrc_fw_req_tsf(rtwdev, &tsf_ref, &tsf_aux);
+ else
+ ret = __mcc_fw_req_tsf(rtwdev, &tsf_ref, &tsf_aux);
+
+ if (ret)
+ return RTW89_MCC_DFLT_BCN_OFST_TIME;
+
tbtt_ofst_ref = rtw89_mcc_get_tbtt_ofst(rtwdev, ref, tsf_ref);
tbtt_ofst_aux = rtw89_mcc_get_tbtt_ofst(rtwdev, aux, tsf_aux);
@@ -392,6 +476,28 @@ void rtw89_mcc_role_fw_macid_bitmap_set_bit(struct rtw89_mcc_role *mcc_role,
mcc_role->macid_bitmap[idx] |= BIT(pos);
}
+static
+u32 rtw89_mcc_role_fw_macid_bitmap_to_u32(struct rtw89_mcc_role *mcc_role)
+{
+ unsigned int macid;
+ unsigned int i, j;
+ u32 bitmap = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mcc_role->macid_bitmap); i++) {
+ for (j = 0; j < 8; j++) {
+ macid = i * 8 + j;
+ if (macid >= 32)
+ goto out;
+
+ if (mcc_role->macid_bitmap[i] & BIT(j))
+ bitmap |= BIT(macid);
+ }
+ }
+
+out:
+ return bitmap;
+}
+
static void rtw89_mcc_role_macid_sta_iter(void *data, struct ieee80211_sta *sta)
{
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
@@ -588,6 +694,9 @@ static int rtw89_mcc_fill_all_roles(struct rtw89_dev *rtwdev)
int ret;
rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ if (!rtwvif->chanctx_assigned)
+ continue;
+
if (sel.bind_vif[rtwvif->sub_entity_idx]) {
rtw89_warn(rtwdev,
"MCC skip extra vif <macid %d> on chanctx[%d]\n",
@@ -1150,7 +1259,11 @@ static void rtw89_mcc_sync_tbtt(struct rtw89_dev *rtwdev,
tsf_ofst_tgt = bcn_intvl_src_us - remainder;
config->sync.macid_tgt = tgt->rtwvif->mac_id;
+ config->sync.band_tgt = tgt->rtwvif->mac_idx;
+ config->sync.port_tgt = tgt->rtwvif->port;
config->sync.macid_src = src->rtwvif->mac_id;
+ config->sync.band_src = src->rtwvif->mac_idx;
+ config->sync.port_src = src->rtwvif->port;
config->sync.offset = tsf_ofst_tgt / 1024;
config->sync.enable = true;
@@ -1297,6 +1410,37 @@ static int __mcc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *ro
return 0;
}
+static
+void __mrc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *role,
+ struct rtw89_fw_mrc_add_arg *arg, u8 slot_idx)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_policy *policy = &role->policy;
+ struct rtw89_fw_mrc_add_slot_arg *slot_arg;
+ const struct rtw89_chan *chan;
+
+ slot_arg = &arg->slots[slot_idx];
+ role->slot_idx = slot_idx;
+
+ slot_arg->duration = role->duration;
+ slot_arg->role_num = 1;
+
+ chan = rtw89_chan_get(rtwdev, role->rtwvif->sub_entity_idx);
+
+ slot_arg->roles[0].role_type = RTW89_H2C_MRC_ROLE_WIFI;
+ slot_arg->roles[0].is_master = role == ref;
+ slot_arg->roles[0].band = chan->band_type;
+ slot_arg->roles[0].bw = chan->band_width;
+ slot_arg->roles[0].central_ch = chan->channel;
+ slot_arg->roles[0].primary_ch = chan->primary_channel;
+ slot_arg->roles[0].en_tx_null = !policy->dis_tx_null;
+ slot_arg->roles[0].null_early = policy->tx_null_early;
+ slot_arg->roles[0].macid = role->rtwvif->mac_id;
+ slot_arg->roles[0].macid_main_bitmap =
+ rtw89_mcc_role_fw_macid_bitmap_to_u32(role);
+}
+
static int __mcc_fw_add_bt_role(struct rtw89_dev *rtwdev)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
@@ -1318,6 +1462,20 @@ static int __mcc_fw_add_bt_role(struct rtw89_dev *rtwdev)
return 0;
}
+static
+void __mrc_fw_add_bt_role(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_mrc_add_arg *arg, u8 slot_idx)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_bt_role *bt_role = &mcc->bt_role;
+ struct rtw89_fw_mrc_add_slot_arg *slot_arg = &arg->slots[slot_idx];
+
+ slot_arg->duration = bt_role->duration;
+ slot_arg->role_num = 1;
+
+ slot_arg->roles[0].role_type = RTW89_H2C_MRC_ROLE_BT;
+}
+
static int __mcc_fw_start(struct rtw89_dev *rtwdev, bool replace)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
@@ -1403,6 +1561,130 @@ static int __mcc_fw_start(struct rtw89_dev *rtwdev, bool replace)
return 0;
}
+static void __mrc_fw_add_courtesy(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_mrc_add_arg *arg)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ struct rtw89_mcc_config *config = &mcc->config;
+ struct rtw89_mcc_pattern *pattern = &config->pattern;
+ struct rtw89_mcc_courtesy *courtesy = &pattern->courtesy;
+ struct rtw89_fw_mrc_add_slot_arg *slot_arg_src;
+ u8 slot_idx_tgt;
+
+ if (!courtesy->enable)
+ return;
+
+ if (courtesy->macid_src == ref->rtwvif->mac_id) {
+ slot_arg_src = &arg->slots[ref->slot_idx];
+ slot_idx_tgt = aux->slot_idx;
+ } else {
+ slot_arg_src = &arg->slots[aux->slot_idx];
+ slot_idx_tgt = ref->slot_idx;
+ }
+
+ slot_arg_src->courtesy_target = slot_idx_tgt;
+ slot_arg_src->courtesy_period = courtesy->slot_num;
+ slot_arg_src->courtesy_en = true;
+}
+
+static int __mrc_fw_start(struct rtw89_dev *rtwdev, bool replace)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ struct rtw89_mcc_config *config = &mcc->config;
+ struct rtw89_mcc_pattern *pattern = &config->pattern;
+ struct rtw89_mcc_sync *sync = &config->sync;
+ struct rtw89_fw_mrc_start_arg start_arg = {};
+ struct rtw89_fw_mrc_add_arg add_arg = {};
+ int ret;
+
+ BUILD_BUG_ON(RTW89_MAC_MRC_MAX_ADD_SLOT_NUM <
+ NUM_OF_RTW89_MCC_ROLES + 1 /* bt role */);
+
+ if (replace) {
+ start_arg.old_sch_idx = mcc->group;
+ start_arg.action = RTW89_H2C_MRC_START_ACTION_REPLACE_OLD;
+ mcc->group = RTW89_MCC_NEXT_GROUP(mcc->group);
+ }
+
+ add_arg.sch_idx = mcc->group;
+ add_arg.sch_type = RTW89_H2C_MRC_SCH_BAND0_ONLY;
+
+ switch (pattern->plan) {
+ case RTW89_MCC_PLAN_TAIL_BT:
+ __mrc_fw_add_role(rtwdev, ref, &add_arg, 0);
+ __mrc_fw_add_role(rtwdev, aux, &add_arg, 1);
+ __mrc_fw_add_bt_role(rtwdev, &add_arg, 2);
+
+ add_arg.slot_num = 3;
+ add_arg.btc_in_sch = true;
+ break;
+ case RTW89_MCC_PLAN_MID_BT:
+ __mrc_fw_add_role(rtwdev, ref, &add_arg, 0);
+ __mrc_fw_add_bt_role(rtwdev, &add_arg, 1);
+ __mrc_fw_add_role(rtwdev, aux, &add_arg, 2);
+
+ add_arg.slot_num = 3;
+ add_arg.btc_in_sch = true;
+ break;
+ case RTW89_MCC_PLAN_NO_BT:
+ __mrc_fw_add_role(rtwdev, ref, &add_arg, 0);
+ __mrc_fw_add_role(rtwdev, aux, &add_arg, 1);
+
+ add_arg.slot_num = 2;
+ add_arg.btc_in_sch = false;
+ break;
+ default:
+ rtw89_warn(rtwdev, "MCC unknown plan: %d\n", pattern->plan);
+ return -EFAULT;
+ }
+
+ __mrc_fw_add_courtesy(rtwdev, &add_arg);
+
+ ret = rtw89_fw_h2c_mrc_add(rtwdev, &add_arg);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to trigger add: %d\n", ret);
+ return ret;
+ }
+
+ if (sync->enable) {
+ struct rtw89_fw_mrc_sync_arg sync_arg = {
+ .offset = sync->offset,
+ .src = {
+ .band = sync->band_src,
+ .port = sync->port_src,
+ },
+ .dest = {
+ .band = sync->band_tgt,
+ .port = sync->port_tgt,
+ },
+ };
+
+ ret = rtw89_fw_h2c_mrc_sync(rtwdev, &sync_arg);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to trigger sync: %d\n", ret);
+ return ret;
+ }
+ }
+
+ start_arg.sch_idx = mcc->group;
+ start_arg.start_tsf = config->start_tsf;
+
+ ret = rtw89_fw_h2c_mrc_start(rtwdev, &start_arg);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to trigger start: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int __mcc_fw_set_duration_no_bt(struct rtw89_dev *rtwdev, bool sync_changed)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
@@ -1444,6 +1726,60 @@ static int __mcc_fw_set_duration_no_bt(struct rtw89_dev *rtwdev, bool sync_chang
return 0;
}
+static int __mrc_fw_set_duration_no_bt(struct rtw89_dev *rtwdev, bool sync_changed)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_config *config = &mcc->config;
+ struct rtw89_mcc_sync *sync = &config->sync;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ struct rtw89_fw_mrc_upd_duration_arg dur_arg = {
+ .sch_idx = mcc->group,
+ .start_tsf = config->start_tsf,
+ .slot_num = 2,
+ .slots[0] = {
+ .slot_idx = ref->slot_idx,
+ .duration = ref->duration,
+ },
+ .slots[1] = {
+ .slot_idx = aux->slot_idx,
+ .duration = aux->duration,
+ },
+ };
+ struct rtw89_fw_mrc_sync_arg sync_arg = {
+ .offset = sync->offset,
+ .src = {
+ .band = sync->band_src,
+ .port = sync->port_src,
+ },
+ .dest = {
+ .band = sync->band_tgt,
+ .port = sync->port_tgt,
+ },
+
+ };
+ int ret;
+
+ ret = rtw89_fw_h2c_mrc_upd_duration(rtwdev, &dur_arg);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to update duration: %d\n", ret);
+ return ret;
+ }
+
+ if (!sync->enable || !sync_changed)
+ return 0;
+
+ ret = rtw89_fw_h2c_mrc_sync(rtwdev, &sync_arg);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to trigger sync: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static void rtw89_mcc_handle_beacon_noa(struct rtw89_dev *rtwdev, bool enable)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
@@ -1494,7 +1830,7 @@ static void rtw89_mcc_handle_beacon_noa(struct rtw89_dev *rtwdev, bool enable)
if (!rtwvif_go->chanctx_assigned)
return;
- rtw89_fw_h2c_update_beacon(rtwdev, rtwvif_go);
+ rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_go);
}
static void rtw89_mcc_start_beacon_noa(struct rtw89_dev *rtwdev)
@@ -1562,7 +1898,11 @@ static int rtw89_mcc_start(struct rtw89_dev *rtwdev)
if (ret)
return ret;
- ret = __mcc_fw_start(rtwdev, false);
+ if (rtw89_concurrent_via_mrc(rtwdev))
+ ret = __mrc_fw_start(rtwdev, false);
+ else
+ ret = __mcc_fw_start(rtwdev, false);
+
if (ret)
return ret;
@@ -1580,16 +1920,23 @@ static void rtw89_mcc_stop(struct rtw89_dev *rtwdev)
rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC stop\n");
- ret = rtw89_fw_h2c_stop_mcc(rtwdev, mcc->group,
- ref->rtwvif->mac_id, true);
- if (ret)
- rtw89_debug(rtwdev, RTW89_DBG_CHAN,
- "MCC h2c failed to trigger stop: %d\n", ret);
+ if (rtw89_concurrent_via_mrc(rtwdev)) {
+ ret = rtw89_fw_h2c_mrc_del(rtwdev, mcc->group);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to trigger del: %d\n", ret);
+ } else {
+ ret = rtw89_fw_h2c_stop_mcc(rtwdev, mcc->group,
+ ref->rtwvif->mac_id, true);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MCC h2c failed to trigger stop: %d\n", ret);
- ret = rtw89_fw_h2c_del_mcc_group(rtwdev, mcc->group, true);
- if (ret)
- rtw89_debug(rtwdev, RTW89_DBG_CHAN,
- "MCC h2c failed to delete group: %d\n", ret);
+ ret = rtw89_fw_h2c_del_mcc_group(rtwdev, mcc->group, true);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MCC h2c failed to delete group: %d\n", ret);
+ }
rtw89_chanctx_notify(rtwdev, RTW89_CHANCTX_STATE_MCC_STOP);
@@ -1615,7 +1962,11 @@ static int rtw89_mcc_update(struct rtw89_dev *rtwdev)
if (old_cfg.pattern.plan != RTW89_MCC_PLAN_NO_BT ||
config->pattern.plan != RTW89_MCC_PLAN_NO_BT) {
- ret = __mcc_fw_start(rtwdev, true);
+ if (rtw89_concurrent_via_mrc(rtwdev))
+ ret = __mrc_fw_start(rtwdev, true);
+ else
+ ret = __mcc_fw_start(rtwdev, true);
+
if (ret)
return ret;
} else {
@@ -1624,7 +1975,11 @@ static int rtw89_mcc_update(struct rtw89_dev *rtwdev)
else
sync_changed = true;
- ret = __mcc_fw_set_duration_no_bt(rtwdev, sync_changed);
+ if (rtw89_concurrent_via_mrc(rtwdev))
+ ret = __mrc_fw_set_duration_no_bt(rtwdev, sync_changed);
+ else
+ ret = __mcc_fw_set_duration_no_bt(rtwdev, sync_changed);
+
if (ret)
return ret;
}
@@ -1666,12 +2021,75 @@ static void rtw89_mcc_track(struct rtw89_dev *rtwdev)
rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_BCN_OFFSET_CHANGE);
}
+static int __mcc_fw_upd_macid_bitmap(struct rtw89_dev *rtwdev,
+ struct rtw89_mcc_role *upd)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ int ret;
+
+ ret = rtw89_fw_h2c_mcc_macid_bitmap(rtwdev, mcc->group,
+ upd->rtwvif->mac_id,
+ upd->macid_bitmap);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MCC h2c failed to update macid bitmap: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __mrc_fw_upd_macid_bitmap(struct rtw89_dev *rtwdev,
+ struct rtw89_mcc_role *cur,
+ struct rtw89_mcc_role *upd)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_fw_mrc_upd_bitmap_arg arg = {};
+ u32 old = rtw89_mcc_role_fw_macid_bitmap_to_u32(cur);
+ u32 new = rtw89_mcc_role_fw_macid_bitmap_to_u32(upd);
+ u32 add = new & ~old;
+ u32 del = old & ~new;
+ int ret;
+ int i;
+
+ arg.sch_idx = mcc->group;
+ arg.macid = upd->rtwvif->mac_id;
+
+ for (i = 0; i < 32; i++) {
+ if (add & BIT(i)) {
+ arg.client_macid = i;
+ arg.action = RTW89_H2C_MRC_UPD_BITMAP_ACTION_ADD;
+
+ ret = rtw89_fw_h2c_mrc_upd_bitmap(rtwdev, &arg);
+ if (ret)
+ goto err;
+ }
+ }
+
+ for (i = 0; i < 32; i++) {
+ if (del & BIT(i)) {
+ arg.client_macid = i;
+ arg.action = RTW89_H2C_MRC_UPD_BITMAP_ACTION_DEL;
+
+ ret = rtw89_fw_h2c_mrc_upd_bitmap(rtwdev, &arg);
+ if (ret)
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to update bitmap: %d\n", ret);
+ return ret;
+}
+
static int rtw89_mcc_upd_map_iterator(struct rtw89_dev *rtwdev,
struct rtw89_mcc_role *mcc_role,
unsigned int ordered_idx,
void *data)
{
- struct rtw89_mcc_info *mcc = &rtwdev->mcc;
struct rtw89_mcc_role upd = {
.rtwvif = mcc_role->rtwvif,
};
@@ -1685,14 +2103,13 @@ static int rtw89_mcc_upd_map_iterator(struct rtw89_dev *rtwdev,
sizeof(mcc_role->macid_bitmap)) == 0)
return 0;
- ret = rtw89_fw_h2c_mcc_macid_bitmap(rtwdev, mcc->group,
- upd.rtwvif->mac_id,
- upd.macid_bitmap);
- if (ret) {
- rtw89_debug(rtwdev, RTW89_DBG_CHAN,
- "MCC h2c failed to update macid bitmap: %d\n", ret);
+ if (rtw89_concurrent_via_mrc(rtwdev))
+ ret = __mrc_fw_upd_macid_bitmap(rtwdev, mcc_role, &upd);
+ else
+ ret = __mcc_fw_upd_macid_bitmap(rtwdev, &upd);
+
+ if (ret)
return ret;
- }
memcpy(mcc_role->macid_bitmap, upd.macid_bitmap,
sizeof(mcc_role->macid_bitmap));
@@ -1900,6 +2317,41 @@ void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev)
rtw89_queue_chanctx_work(rtwdev);
}
+static void rtw89_swap_sub_entity(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx1,
+ enum rtw89_sub_entity_idx idx2)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_sub_entity tmp;
+ struct rtw89_vif *rtwvif;
+ u8 cur;
+
+ if (idx1 == idx2)
+ return;
+
+ hal->sub[idx1].cfg->idx = idx2;
+ hal->sub[idx2].cfg->idx = idx1;
+
+ tmp = hal->sub[idx1];
+ hal->sub[idx1] = hal->sub[idx2];
+ hal->sub[idx2] = tmp;
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ if (!rtwvif->chanctx_assigned)
+ continue;
+ if (rtwvif->sub_entity_idx == idx1)
+ rtwvif->sub_entity_idx = idx2;
+ else if (rtwvif->sub_entity_idx == idx2)
+ rtwvif->sub_entity_idx = idx1;
+ }
+
+ cur = atomic_read(&hal->roc_entity_idx);
+ if (cur == idx1)
+ atomic_set(&hal->roc_entity_idx, idx2);
+ else if (cur == idx2)
+ atomic_set(&hal->roc_entity_idx, idx1);
+}
+
int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
struct ieee80211_chanctx_conf *ctx)
{
@@ -1913,8 +2365,8 @@ int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
return -ENOENT;
rtw89_config_entity_chandef(rtwdev, idx, &ctx->def);
- rtw89_set_channel(rtwdev);
cfg->idx = idx;
+ cfg->ref_count = 0;
hal->sub[idx].cfg = cfg;
return 0;
}
@@ -1924,47 +2376,8 @@ void rtw89_chanctx_ops_remove(struct rtw89_dev *rtwdev,
{
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
- enum rtw89_entity_mode mode;
- struct rtw89_vif *rtwvif;
- u8 drop, roll;
-
- drop = cfg->idx;
- if (drop != RTW89_SUB_ENTITY_0)
- goto out;
- roll = find_next_bit(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY, drop + 1);
-
- /* Follow rtw89_config_default_chandef() when rtw89_entity_recalc(). */
- if (roll == NUM_OF_RTW89_SUB_ENTITY)
- goto out;
-
- /* RTW89_SUB_ENTITY_0 is going to release, and another exists.
- * Make another roll down to RTW89_SUB_ENTITY_0 to replace.
- */
- hal->sub[roll].cfg->idx = RTW89_SUB_ENTITY_0;
- hal->sub[RTW89_SUB_ENTITY_0] = hal->sub[roll];
-
- rtw89_for_each_rtwvif(rtwdev, rtwvif) {
- if (rtwvif->sub_entity_idx == roll)
- rtwvif->sub_entity_idx = RTW89_SUB_ENTITY_0;
- }
-
- atomic_cmpxchg(&hal->roc_entity_idx, roll, RTW89_SUB_ENTITY_0);
-
- drop = roll;
-
-out:
- mode = rtw89_get_entity_mode(rtwdev);
- switch (mode) {
- case RTW89_ENTITY_MODE_MCC:
- rtw89_mcc_stop(rtwdev);
- break;
- default:
- break;
- }
-
- clear_bit(drop, hal->entity_map);
- rtw89_set_channel(rtwdev);
+ clear_bit(cfg->idx, hal->entity_map);
}
void rtw89_chanctx_ops_change(struct rtw89_dev *rtwdev,
@@ -1985,16 +2398,73 @@ int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
struct ieee80211_chanctx_conf *ctx)
{
struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
+ struct rtw89_entity_weight w = {};
rtwvif->sub_entity_idx = cfg->idx;
rtwvif->chanctx_assigned = true;
- return 0;
+ cfg->ref_count++;
+
+ if (cfg->idx == RTW89_SUB_ENTITY_0)
+ goto out;
+
+ rtw89_entity_calculate_weight(rtwdev, &w);
+ if (w.active_chanctxs != 1)
+ goto out;
+
+ /* put the first active chanctx at RTW89_SUB_ENTITY_0 */
+ rtw89_swap_sub_entity(rtwdev, cfg->idx, RTW89_SUB_ENTITY_0);
+
+out:
+ return rtw89_set_channel(rtwdev);
}
void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct ieee80211_chanctx_conf *ctx)
{
+ struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_entity_weight w = {};
+ enum rtw89_sub_entity_idx roll;
+ enum rtw89_entity_mode cur;
+
rtwvif->sub_entity_idx = RTW89_SUB_ENTITY_0;
rtwvif->chanctx_assigned = false;
+ cfg->ref_count--;
+
+ if (cfg->ref_count != 0)
+ goto out;
+
+ if (cfg->idx != RTW89_SUB_ENTITY_0)
+ goto out;
+
+ roll = find_next_bit(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY,
+ cfg->idx + 1);
+ /* Follow rtw89_config_default_chandef() when rtw89_entity_recalc(). */
+ if (roll == NUM_OF_RTW89_SUB_ENTITY)
+ goto out;
+
+ /* RTW89_SUB_ENTITY_0 is going to release, and another exists.
+ * Make another roll down to RTW89_SUB_ENTITY_0 to replace.
+ */
+ rtw89_swap_sub_entity(rtwdev, cfg->idx, roll);
+
+out:
+ rtw89_entity_calculate_weight(rtwdev, &w);
+
+ cur = rtw89_get_entity_mode(rtwdev);
+ switch (cur) {
+ case RTW89_ENTITY_MODE_MCC:
+ /* If still multi-roles, re-plan MCC for chanctx changes.
+ * Otherwise, just stop MCC.
+ */
+ rtw89_mcc_stop(rtwdev);
+ if (w.active_roles == NUM_OF_RTW89_MCC_ROLES)
+ rtw89_mcc_start(rtwdev);
+ break;
+ default:
+ break;
+ }
+
+ rtw89_set_channel(rtwdev);
}
diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h
index 9b98d8f4ee9d..ffa412f281f3 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.h
+++ b/drivers/net/wireless/realtek/rtw89/chan.h
@@ -38,6 +38,11 @@ enum rtw89_chanctx_pause_reasons {
RTW89_CHANCTX_PAUSE_REASON_ROC,
};
+struct rtw89_entity_weight {
+ unsigned int active_chanctxs;
+ unsigned int active_roles;
+};
+
static inline bool rtw89_get_entity_state(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index f37afb4cbb63..d9b66d43f32e 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -129,68 +129,75 @@ static const u32 cxtbl[] = {
static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
/* firmware version must be in decreasing order for each chip */
+ {RTL8922A, RTW89_FW_VER_CODE(0, 35, 8, 0),
+ .fcxbtcrpt = 8, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7,
+ .fcxstep = 7, .fcxnullsta = 7, .fcxmreg = 7, .fcxgpiodbg = 7,
+ .fcxbtver = 7, .fcxbtscan = 7, .fcxbtafh = 7, .fcxbtdevinfo = 7,
+ .fwlrole = 2, .frptmap = 7, .fcxctrl = 7, .fcxinit = 7,
+ .drvinfo_type = 1, .info_buf = 1800, .max_role_num = 6,
+ },
{RTL8851B, RTW89_FW_VER_CODE(0, 29, 29, 0),
.fcxbtcrpt = 105, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 5,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 2, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 2, .fcxbtafh = 2, .fcxbtdevinfo = 1,
- .fwlrole = 2, .frptmap = 3, .fcxctrl = 1,
- .info_buf = 1800, .max_role_num = 6,
+ .fwlrole = 2, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6,
},
{RTL8852C, RTW89_FW_VER_CODE(0, 27, 57, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
- .fwlrole = 1, .frptmap = 3, .fcxctrl = 1,
- .info_buf = 1280, .max_role_num = 5,
+ .fwlrole = 1, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
},
{RTL8852C, RTW89_FW_VER_CODE(0, 27, 42, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
- .fwlrole = 1, .frptmap = 2, .fcxctrl = 1,
- .info_buf = 1280, .max_role_num = 5,
+ .fwlrole = 1, .frptmap = 2, .fcxctrl = 1, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
},
{RTL8852C, RTW89_FW_VER_CODE(0, 27, 0, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
- .fwlrole = 1, .frptmap = 2, .fcxctrl = 1,
- .info_buf = 1280, .max_role_num = 5,
+ .fwlrole = 1, .frptmap = 2, .fcxctrl = 1, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
},
{RTL8852B, RTW89_FW_VER_CODE(0, 29, 29, 0),
.fcxbtcrpt = 105, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 5,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 2, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 2, .fcxbtafh = 2, .fcxbtdevinfo = 1,
- .fwlrole = 2, .frptmap = 3, .fcxctrl = 1,
- .info_buf = 1800, .max_role_num = 6,
+ .fwlrole = 2, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6,
},
{RTL8852B, RTW89_FW_VER_CODE(0, 29, 14, 0),
.fcxbtcrpt = 5, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 4,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
- .fwlrole = 1, .frptmap = 3, .fcxctrl = 1,
- .info_buf = 1800, .max_role_num = 6,
+ .fwlrole = 1, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6,
},
{RTL8852B, RTW89_FW_VER_CODE(0, 27, 0, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
- .fwlrole = 1, .frptmap = 1, .fcxctrl = 1,
- .info_buf = 1280, .max_role_num = 5,
+ .fwlrole = 1, .frptmap = 1, .fcxctrl = 1, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
},
{RTL8852A, RTW89_FW_VER_CODE(0, 13, 37, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
- .fwlrole = 1, .frptmap = 3, .fcxctrl = 1,
- .info_buf = 1280, .max_role_num = 5,
+ .fwlrole = 1, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
},
{RTL8852A, RTW89_FW_VER_CODE(0, 13, 0, 0),
.fcxbtcrpt = 1, .fcxtdma = 1, .fcxslots = 1, .fcxcysta = 2,
.fcxstep = 2, .fcxnullsta = 1, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
- .fwlrole = 0, .frptmap = 0, .fcxctrl = 0,
- .info_buf = 1024, .max_role_num = 5,
+ .fwlrole = 0, .frptmap = 0, .fcxctrl = 0, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1024, .max_role_num = 5,
},
/* keep it to be the last as default entry */
@@ -198,8 +205,8 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
.fcxbtcrpt = 1, .fcxtdma = 1, .fcxslots = 1, .fcxcysta = 2,
.fcxstep = 2, .fcxnullsta = 1, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
- .fwlrole = 0, .frptmap = 0, .fcxctrl = 0,
- .info_buf = 1024, .max_role_num = 5,
+ .fwlrole = 0, .frptmap = 0, .fcxctrl = 0, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1024, .max_role_num = 5,
},
};
@@ -351,17 +358,26 @@ enum btc_cx_poicy_type {
/* TDMA off + pri: WL_Rx = BT, BT_HI > WL_Tx > BT_Lo */
BTC_CXP_OFF_EQ3 = (BTC_CXP_OFF << 8) | 5,
+ /* TDMA off + pri: WL_Rx = BT, BT_HI > WL_Tx > BT_Lo */
+ BTC_CXP_OFF_EQ4 = (BTC_CXP_OFF << 8) | 6,
+
+ /* TDMA off + pri: WL_Rx = BT, BT_HI > WL_Tx > BT_Lo */
+ BTC_CXP_OFF_EQ5 = (BTC_CXP_OFF << 8) | 7,
+
/* TDMA off + pri: BT_Hi > WL > BT_Lo */
- BTC_CXP_OFF_BWB0 = (BTC_CXP_OFF << 8) | 6,
+ BTC_CXP_OFF_BWB0 = (BTC_CXP_OFF << 8) | 8,
/* TDMA off + pri: WL_Hi-Tx > BT_Hi_Rx, BT_Hi > WL > BT_Lo */
- BTC_CXP_OFF_BWB1 = (BTC_CXP_OFF << 8) | 7,
+ BTC_CXP_OFF_BWB1 = (BTC_CXP_OFF << 8) | 9,
/* TDMA off + pri: WL_Hi-Tx > BT, BT_Hi > other-WL > BT_Lo */
- BTC_CXP_OFF_BWB2 = (BTC_CXP_OFF << 8) | 8,
+ BTC_CXP_OFF_BWB2 = (BTC_CXP_OFF << 8) | 10,
/* TDMA off + pri: WL_Hi-Tx = BT */
- BTC_CXP_OFF_BWB3 = (BTC_CXP_OFF << 8) | 9,
+ BTC_CXP_OFF_BWB3 = (BTC_CXP_OFF << 8) | 11,
+
+ /* TDMA off + pri: WL > BT, Block-BT*/
+ BTC_CXP_OFF_WL2 = (BTC_CXP_OFF << 8) | 12,
/* TDMA off+Bcn-Protect + pri: WL_Hi-Tx > BT_Hi_Rx, BT_Hi > WL > BT_Lo*/
BTC_CXP_OFFB_BWB0 = (BTC_CXP_OFFB << 8) | 0,
@@ -676,20 +692,25 @@ static void _run_coex(struct rtw89_dev *rtwdev,
static void _write_scbd(struct rtw89_dev *rtwdev, u32 val, bool state);
static void _update_bt_scbd(struct rtw89_dev *rtwdev, bool only_update);
-static void _send_fw_cmd(struct rtw89_dev *rtwdev, u8 h2c_class, u8 h2c_func,
- void *param, u16 len)
+static int _send_fw_cmd(struct rtw89_dev *rtwdev, u8 h2c_class, u8 h2c_func,
+ void *param, u16 len)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &cx->wl;
+ struct rtw89_btc_dm *dm = &btc->dm;
int ret;
- if (!wl->status.map.init_ok) {
+ if (len > BTC_H2C_MAXLEN || len == 0) {
+ btc->fwinfo.cnt_h2c_fail++;
+ dm->error.map.h2c_buffer_over = true;
+ return -EINVAL;
+ } else if (!wl->status.map.init_ok) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): return by btc not init!!\n", __func__);
pfwinfo->cnt_h2c_fail++;
- return;
+ return -EINVAL;
} else if ((wl->status.map.rf_off_pre == BTC_LPS_RF_OFF &&
wl->status.map.rf_off == BTC_LPS_RF_OFF) ||
(wl->status.map.lps_pre == BTC_LPS_RF_OFF &&
@@ -697,20 +718,23 @@ static void _send_fw_cmd(struct rtw89_dev *rtwdev, u8 h2c_class, u8 h2c_func,
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): return by wl off!!\n", __func__);
pfwinfo->cnt_h2c_fail++;
- return;
+ return -EINVAL;
}
- pfwinfo->cnt_h2c++;
-
ret = rtw89_fw_h2c_raw_with_hdr(rtwdev, h2c_class, h2c_func, param, len,
false, true);
- if (ret != 0)
+ if (ret)
pfwinfo->cnt_h2c_fail++;
+ else
+ pfwinfo->cnt_h2c++;
+
+ return ret;
}
static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
@@ -728,7 +752,9 @@ static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type)
if (type & BTC_RESET_CTRL) {
memset(&btc->ctrl, 0, sizeof(btc->ctrl));
- btc->ctrl.trace_step = FCXDEF_STEP;
+ btc->manual_ctrl = false;
+ if (ver->fcxctrl != 7)
+ btc->ctrl.ctrl.trace_step = FCXDEF_STEP;
}
/* Init Coex variables that are not zero */
@@ -777,22 +803,27 @@ static void _get_reg_status(struct rtw89_dev *rtwdev, u8 type, u8 *val)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
- struct rtw89_btc_module *md = &btc->mdinfo;
+ union rtw89_btc_module_info *md = &btc->mdinfo;
union rtw89_btc_fbtc_mreg_val *pmreg;
u32 pre_agc_addr = R_BTC_BB_PRE_AGC_S1;
u32 reg_val;
- u8 idx;
+ u8 idx, switch_type;
- if (md->ant.btg_pos == RF_PATH_A)
+ if (ver->fcxinit == 7)
+ switch_type = md->md_v7.switch_type;
+ else
+ switch_type = md->md.switch_type;
+
+ if (btc->btg_pos == RF_PATH_A)
pre_agc_addr = R_BTC_BB_PRE_AGC_S0;
switch (type) {
case BTC_CSTATUS_TXDIV_POS:
- if (md->switch_type == BTC_SWITCH_INTERNAL)
+ if (switch_type == BTC_SWITCH_INTERNAL)
*val = BTC_ANT_DIV_MAIN;
break;
case BTC_CSTATUS_RXDIV_POS:
- if (md->switch_type == BTC_SWITCH_INTERNAL)
+ if (switch_type == BTC_SWITCH_INTERNAL)
*val = BTC_ANT_DIV_MAIN;
break;
case BTC_CSTATUS_BB_GNT_MUX:
@@ -1117,7 +1148,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
void *rpt_content = NULL, *pfinfo = NULL;
u8 rpt_type = 0;
u16 wl_slot_set = 0, wl_slot_real = 0;
- u32 trace_step = btc->ctrl.trace_step, rpt_len = 0, diff_t = 0;
+ u32 trace_step = 0, rpt_len = 0, diff_t = 0;
u32 cnt_leak_slot, bt_slot_real, bt_slot_set, cnt_rx_imr;
u8 i, val = 0;
@@ -1207,6 +1238,9 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
break;
case BTC_RPT_TYPE_STEP:
pcinfo = &pfwinfo->rpt_fbtc_step.cinfo;
+ if (ver->fcxctrl != 7)
+ trace_step = btc->ctrl.ctrl.trace_step;
+
if (ver->fcxstep == 2) {
pfinfo = &pfwinfo->rpt_fbtc_step.finfo.v2;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_step.finfo.v2.step[0]) *
@@ -1920,6 +1954,7 @@ static void rtw89_btc_fw_en_rpt(struct rtw89_dev *rtwdev,
struct rtw89_btc_btf_fwinfo *fwinfo = &btc->fwinfo;
struct rtw89_btc_btf_set_report r = {0};
u32 val, bit_map;
+ int ret;
if ((wl_smap->rf_off || wl_smap->lps != BTC_LPS_OFF) && rpt_state != 0)
return;
@@ -1938,13 +1973,13 @@ static void rtw89_btc_fw_en_rpt(struct rtw89_dev *rtwdev,
if (val == fwinfo->rpt_en_map)
return;
- fwinfo->rpt_en_map = val;
-
r.fver = BTF_SET_REPORT_VER;
r.enable = cpu_to_le32(val);
r.para = cpu_to_le32(rpt_state);
- _send_fw_cmd(rtwdev, BTFC_SET, SET_REPORT_EN, &r, sizeof(r));
+ ret = _send_fw_cmd(rtwdev, BTFC_SET, SET_REPORT_EN, &r, sizeof(r));
+ if (!ret)
+ fwinfo->rpt_en_map = val;
}
static void rtw89_btc_fw_set_slots(struct rtw89_dev *rtwdev, u8 num,
@@ -2032,6 +2067,7 @@ static void _fw_set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
+ int ret;
dm->run_action = action;
@@ -2060,11 +2096,12 @@ static void _fw_set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
if (btc->lps == 1)
rtw89_set_coex_ctrl_lps(rtwdev, btc->lps);
- _send_fw_cmd(rtwdev, BTFC_SET, SET_CX_POLICY,
- btc->policy, btc->policy_len);
-
- memcpy(&dm->tdma_now, &dm->tdma, sizeof(dm->tdma_now));
- memcpy(&dm->slot_now, &dm->slot, sizeof(dm->slot_now));
+ ret = _send_fw_cmd(rtwdev, BTFC_SET, SET_CX_POLICY,
+ btc->policy, btc->policy_len);
+ if (!ret) {
+ memcpy(&dm->tdma_now, &dm->tdma, sizeof(dm->tdma_now));
+ memcpy(&dm->slot_now, &dm->slot, sizeof(dm->slot_now));
+ }
if (btc->update_policy_force)
btc->update_policy_force = false;
@@ -2083,20 +2120,32 @@ static void _fw_set_drv_info(struct rtw89_dev *rtwdev, u8 type)
switch (type) {
case CXDRVINFO_INIT:
- rtw89_fw_h2c_cxdrv_init(rtwdev);
+ if (ver->fcxinit == 7)
+ rtw89_fw_h2c_cxdrv_init_v7(rtwdev, type);
+ else
+ rtw89_fw_h2c_cxdrv_init(rtwdev, type);
break;
case CXDRVINFO_ROLE:
if (ver->fwlrole == 0)
- rtw89_fw_h2c_cxdrv_role(rtwdev);
+ rtw89_fw_h2c_cxdrv_role(rtwdev, type);
else if (ver->fwlrole == 1)
- rtw89_fw_h2c_cxdrv_role_v1(rtwdev);
+ rtw89_fw_h2c_cxdrv_role_v1(rtwdev, type);
else if (ver->fwlrole == 2)
- rtw89_fw_h2c_cxdrv_role_v2(rtwdev);
+ rtw89_fw_h2c_cxdrv_role_v2(rtwdev, type);
break;
case CXDRVINFO_CTRL:
- rtw89_fw_h2c_cxdrv_ctrl(rtwdev);
+ if (ver->drvinfo_type == 1)
+ type = 2;
+
+ if (ver->fcxctrl == 7)
+ rtw89_fw_h2c_cxdrv_ctrl_v7(rtwdev, type);
+ else
+ rtw89_fw_h2c_cxdrv_ctrl(rtwdev, type);
break;
case CXDRVINFO_TRX:
+ if (ver->drvinfo_type == 1)
+ type = 3;
+
dm->trx_info.tx_power = u32_get_bits(rf_para.wl_tx_power,
RTW89_BTC_WL_DEF_TX_PWR);
dm->trx_info.rx_gain = u32_get_bits(rf_para.wl_rx_gain,
@@ -2107,11 +2156,18 @@ static void _fw_set_drv_info(struct rtw89_dev *rtwdev, u8 type)
RTW89_BTC_WL_DEF_TX_PWR);
dm->trx_info.cn = wl->cn_report;
dm->trx_info.nhm = wl->nhm.pwr;
- rtw89_fw_h2c_cxdrv_trx(rtwdev);
+ rtw89_fw_h2c_cxdrv_trx(rtwdev, type);
break;
case CXDRVINFO_RFK:
- rtw89_fw_h2c_cxdrv_rfk(rtwdev);
+ if (ver->drvinfo_type == 1)
+ return;
+
+ rtw89_fw_h2c_cxdrv_rfk(rtwdev, type);
break;
+ case CXDRVINFO_TXPWR:
+ case CXDRVINFO_FDDT:
+ case CXDRVINFO_MLO:
+ case CXDRVINFO_OSI:
default:
break;
}
@@ -2261,20 +2317,25 @@ static void _set_bt_tx_power(struct rtw89_dev *rtwdev, u8 level)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ int ret;
u8 buf;
- if (bt->rf_para.tx_pwr_freerun == level)
+ if (btc->cx.cnt_bt[BTC_BCNT_INFOUPDATE] == 0)
return;
- bt->rf_para.tx_pwr_freerun = level;
- btc->dm.rf_trx_para.bt_tx_power = level;
+ if (bt->rf_para.tx_pwr_freerun == level)
+ return;
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): level = %d\n",
__func__, level);
buf = (s8)(-level);
- _send_fw_cmd(rtwdev, BTFC_SET, SET_BT_TX_PWR, &buf, 1);
+ ret = _send_fw_cmd(rtwdev, BTFC_SET, SET_BT_TX_PWR, &buf, 1);
+ if (!ret) {
+ bt->rf_para.tx_pwr_freerun = level;
+ btc->dm.rf_trx_para.bt_tx_power = level;
+ }
}
#define BTC_BT_RX_NORMAL_LVL 7
@@ -2284,6 +2345,9 @@ static void _set_bt_rx_gain(struct rtw89_dev *rtwdev, u8 level)
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ if (btc->cx.cnt_bt[BTC_BCNT_INFOUPDATE] == 0)
+ return;
+
if ((bt->rf_para.rx_gain_freerun == level ||
level > BTC_BT_RX_NORMAL_LVL) &&
(!rtwdev->chip->scbd || bt->lna_constrain == level))
@@ -2333,7 +2397,7 @@ static void _set_rf_trx_para(struct rtw89_dev *rtwdev)
}
/* decide trx_para_level */
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
/* fix LNA2 + TIA gain not change by GNT_BT */
if ((btc->dm.wl_btg_rx && b->profile_cnt.now != 0) ||
dm->bt_only == 1)
@@ -2435,7 +2499,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
u8 en = 0, i, ch = 0, bw = 0;
u8 mode, connect_cnt;
- if (btc->ctrl.manual || wl->status.map.scan)
+ if (btc->manual_ctrl || wl->status.map.scan)
return;
if (ver->fwlrole == 0) {
@@ -2560,8 +2624,16 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
struct rtw89_btc_bt_hid_desc *hid = &bt_linfo->hid_desc;
+ union rtw89_btc_module_info *md = &btc->mdinfo;
+ const struct rtw89_btc_ver *ver = btc->ver;
+ u8 isolation;
+
+ if (ver->fcxinit == 7)
+ isolation = md->md_v7.ant.isolation;
+ else
+ isolation = md->md.ant.isolation;
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
btc->dm.trx_para_level = 0;
return false;
}
@@ -2584,7 +2656,7 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
}
/* TODO get isolation by BT psd */
- if (btc->mdinfo.ant.isolation >= BTC_FREERUN_ANTISO_MIN) {
+ if (isolation >= BTC_FREERUN_ANTISO_MIN) {
btc->dm.trx_para_level = 5;
return true;
}
@@ -2712,7 +2784,7 @@ void rtw89_btc_set_policy(struct rtw89_dev *rtwdev, u16 policy_type)
u8 type;
u32 tbl_w1, tbl_b1, tbl_b4;
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
if (btc->cx.wl.status.map._4way)
tbl_w1 = cxtbl[1];
else
@@ -3023,12 +3095,13 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
struct rtw89_btc_wl_role_info_v1 *wl_rinfo = &btc->cx.wl.role_info_v1;
struct rtw89_btc_bt_hid_desc *hid = &btc->cx.bt.link_info.hid_desc;
struct rtw89_btc_bt_hfp_desc *hfp = &btc->cx.bt.link_info.hfp_desc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
u8 type, null_role;
u32 tbl_w1, tbl_b1, tbl_b4;
type = FIELD_GET(BTC_CXP_MASK, policy_type);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
if (btc->cx.wl.status.map._4way)
tbl_w1 = cxtbl[1];
else if (hid->exist && hid->type == BTC_HID_218)
@@ -3048,9 +3121,16 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
tbl_b4 = cxtbl[2];
}
} else {
- tbl_w1 = cxtbl[16];
tbl_b1 = cxtbl[17];
tbl_b4 = cxtbl[17];
+
+ if (wl->bg_mode)
+ tbl_w1 = cxtbl[8];
+ else if ((wl->status.map.traffic_dir & BIT(RTW89_TFC_UL)) &&
+ hid->exist)
+ tbl_w1 = cxtbl[19];
+ else
+ tbl_w1 = cxtbl[16];
}
btc->bt_req_en = false;
@@ -3615,7 +3695,7 @@ static void _action_bt_idle(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (btc->ant_type == BTC_ANT_SHARED) { /* shared-antenna */
switch (btc->cx.state_map) {
case BTC_WBUSY_BNOSCAN: /*wl-busy + bt idle*/
case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-idle */
@@ -3654,7 +3734,7 @@ static void _action_bt_hfp(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
if (btc->cx.wl.status.map._4way) {
_set_policy(rtwdev, BTC_CXP_OFF_WL, BTC_ACT_BT_HFP);
} else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL)) {
@@ -3664,7 +3744,12 @@ static void _action_bt_hfp(struct rtw89_dev *rtwdev)
_set_policy(rtwdev, BTC_CXP_OFF_BWB1, BTC_ACT_BT_HFP);
}
} else {
- _set_policy(rtwdev, BTC_CXP_OFF_EQ2, BTC_ACT_BT_HFP);
+ if (wl->bg_mode)
+ _set_policy(rtwdev, BTC_CXP_OFF_BWB1, BTC_ACT_BT_HFP);
+ else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL))
+ _set_policy(rtwdev, BTC_CXP_OFF_EQ5, BTC_ACT_BT_HFP);
+ else
+ _set_policy(rtwdev, BTC_CXP_OFF_EQ2, BTC_ACT_BT_HFP);
}
}
@@ -3679,7 +3764,7 @@ static void _action_bt_hid(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (btc->ant_type == BTC_ANT_SHARED) { /* shared-antenna */
if (wl->status.map._4way) {
policy_type = BTC_CXP_OFF_WL;
} else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL)) {
@@ -3697,7 +3782,12 @@ static void _action_bt_hid(struct rtw89_dev *rtwdev)
policy_type = BTC_CXP_OFF_BWB1;
}
} else { /* dedicated-antenna */
- policy_type = BTC_CXP_OFF_EQ3;
+ if (wl->bg_mode)
+ policy_type = BTC_CXP_OFF_BWB1;
+ else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL))
+ policy_type = BTC_CXP_OFF_EQ4;
+ else
+ policy_type = BTC_CXP_OFF_EQ3;
}
_set_policy(rtwdev, policy_type, BTC_ACT_BT_HID);
@@ -3947,7 +4037,7 @@ static void _action_wl_other(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED)
+ if (btc->ant_type == BTC_ANT_SHARED)
_set_policy(rtwdev, BTC_CXP_OFFB_BWB0, BTC_ACT_WL_OTHER);
else
_set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_OTHER);
@@ -3991,7 +4081,7 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
u32 is_btg;
u8 i, val;
- if (btc->ctrl.manual)
+ if (btc->manual_ctrl)
return;
if (ver->fwlrole == 0)
@@ -4063,7 +4153,7 @@ static void _set_wl_preagc_ctrl(struct rtw89_dev *rtwdev)
struct rtw89_btc_dm *dm = &btc->dm;
u8 is_preagc, val;
- if (btc->ctrl.manual)
+ if (btc->manual_ctrl)
return;
if (wl_rinfo->link_mode == BTC_WLINK_25G_MCC)
@@ -4083,7 +4173,7 @@ static void _set_wl_preagc_ctrl(struct rtw89_dev *rtwdev)
else if (ver->fwlrole == 2 && wl_rinfo->dbcc_en &&
wl_rinfo->dbcc_2g_phy != RTW89_PHY_1)
is_preagc = BTC_PREAGC_DISABLE;
- else if (btc->mdinfo.ant.type == BTC_ANT_SHARED)
+ else if (btc->ant_type == BTC_ANT_SHARED)
is_preagc = BTC_PREAGC_DISABLE;
else
is_preagc = BTC_PREAGC_ENABLE;
@@ -4187,13 +4277,12 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
struct rtw89_txtime_data data = {.rtwdev = rtwdev};
- u8 mode;
- u8 tx_retry;
+ u8 mode, igno_bt, tx_retry;
u32 tx_time;
u16 enable;
bool reenable = false;
- if (btc->ctrl.manual)
+ if (btc->manual_ctrl)
return;
if (ver->fwlrole == 0)
@@ -4205,7 +4294,12 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
else
return;
- if (btc->dm.freerun || btc->ctrl.igno_bt || b->profile_cnt.now == 0 ||
+ if (ver->fcxctrl == 7)
+ igno_bt = btc->ctrl.ctrl_v7.igno_bt;
+ else
+ igno_bt = btc->ctrl.ctrl.igno_bt;
+
+ if (btc->dm.freerun || igno_bt || b->profile_cnt.now == 0 ||
mode == BTC_WLINK_5G || mode == BTC_WLINK_NOLINK) {
enable = 0;
tx_time = BTC_MAX_TX_TIME_DEF;
@@ -4402,7 +4496,7 @@ static void _action_wl_scan(struct rtw89_dev *rtwdev)
if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) {
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W25G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED)
+ if (btc->ant_type == BTC_ANT_SHARED)
_set_policy(rtwdev, BTC_CXP_OFFE_DEF,
BTC_RSN_NTFY_SCAN_START);
else
@@ -4430,7 +4524,7 @@ static void _action_wl_25g_mcc(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W25G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
if (btc->cx.bt.link_info.profile_cnt.now == 0)
_set_policy(rtwdev, BTC_CXP_OFFE_DEF2,
BTC_ACT_WL_25G_MCC);
@@ -4447,7 +4541,7 @@ static void _action_wl_2g_mcc(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (btc->ant_type == BTC_ANT_SHARED) { /* shared-antenna */
if (btc->cx.bt.link_info.profile_cnt.now == 0)
_set_policy(rtwdev, BTC_CXP_OFFE_DEF2,
BTC_ACT_WL_2G_MCC);
@@ -4465,7 +4559,7 @@ static void _action_wl_2g_scc(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (btc->ant_type == BTC_ANT_SHARED) { /* shared-antenna */
if (btc->cx.bt.link_info.profile_cnt.now == 0)
_set_policy(rtwdev,
BTC_CXP_OFFE_DEF2, BTC_ACT_WL_2G_SCC);
@@ -4487,7 +4581,7 @@ static void _action_wl_2g_scc_v1(struct rtw89_dev *rtwdev)
u16 policy_type = BTC_CXP_OFF_BT;
u32 dur;
- if (btc->mdinfo.ant.type == BTC_ANT_DEDICATED) {
+ if (btc->ant_type == BTC_ANT_DEDICATED) {
policy_type = BTC_CXP_OFF_EQ0;
} else {
/* shared-antenna */
@@ -4549,7 +4643,7 @@ static void _action_wl_2g_scc_v2(struct rtw89_dev *rtwdev)
u16 policy_type = BTC_CXP_OFF_BT;
u32 dur;
- if (btc->mdinfo.ant.type == BTC_ANT_DEDICATED) {
+ if (btc->ant_type == BTC_ANT_DEDICATED) {
policy_type = BTC_CXP_OFF_EQ0;
} else {
/* shared-antenna */
@@ -4607,7 +4701,7 @@ static void _action_wl_2g_ap(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
if (btc->cx.bt.link_info.profile_cnt.now == 0)
_set_policy(rtwdev, BTC_CXP_OFFE_DEF2,
BTC_ACT_WL_2G_AP);
@@ -4624,7 +4718,7 @@ static void _action_wl_2g_go(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (btc->ant_type == BTC_ANT_SHARED) { /* shared-antenna */
if (btc->cx.bt.link_info.profile_cnt.now == 0)
_set_policy(rtwdev,
BTC_CXP_OFFE_DEF2, BTC_ACT_WL_2G_GO);
@@ -4642,7 +4736,7 @@ static void _action_wl_2g_gc(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (btc->ant_type == BTC_ANT_SHARED) { /* shared-antenna */
_action_by_bt(rtwdev);
} else {/* dedicated-antenna */
_set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_2G_GC);
@@ -4655,7 +4749,7 @@ static void _action_wl_2g_nan(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (btc->ant_type == BTC_ANT_SHARED) { /* shared-antenna */
if (btc->cx.bt.link_info.profile_cnt.now == 0)
_set_policy(rtwdev,
BTC_CXP_OFFE_DEF2, BTC_ACT_WL_2G_NAN);
@@ -5351,7 +5445,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
- u8 mode;
+ u8 mode, igno_bt, always_freerun;
lockdep_assert_held(&rtwdev->mutex);
@@ -5368,20 +5462,28 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
else
return;
+ if (ver->fcxctrl == 7) {
+ igno_bt = btc->ctrl.ctrl_v7.igno_bt;
+ always_freerun = btc->ctrl.ctrl_v7.always_freerun;
+ } else {
+ igno_bt = btc->ctrl.ctrl.igno_bt;
+ always_freerun = btc->ctrl.ctrl.always_freerun;
+ }
+
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): reason=%d, mode=%d\n",
__func__, reason, mode);
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): wl_only=%d, bt_only=%d\n",
__func__, dm->wl_only, dm->bt_only);
/* Be careful to change the following function sequence!! */
- if (btc->ctrl.manual) {
+ if (btc->manual_ctrl) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): return for Manual CTRL!!\n",
__func__);
return;
}
- if (btc->ctrl.igno_bt &&
+ if (igno_bt &&
(reason == BTC_RSN_UPDATE_BT_INFO ||
reason == BTC_RSN_UPDATE_BT_SCBD)) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -5418,24 +5520,24 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
dm->freerun = false;
dm->cnt_dm[BTC_DCNT_RUN]++;
dm->fddt_train = BTC_FDDT_DISABLE;
- btc->ctrl.igno_bt = false;
bt->scan_rx_low_pri = false;
+ igno_bt = false;
- if (btc->ctrl.always_freerun) {
+ if (always_freerun) {
_action_freerun(rtwdev);
- btc->ctrl.igno_bt = true;
+ igno_bt = true;
goto exit;
}
if (dm->wl_only) {
_action_wl_only(rtwdev);
- btc->ctrl.igno_bt = true;
+ igno_bt = true;
goto exit;
}
if (wl->status.map.rf_off || wl->status.map.lps || dm->bt_only) {
_action_wl_off(rtwdev, mode);
- btc->ctrl.igno_bt = true;
+ igno_bt = true;
goto exit;
}
@@ -5525,6 +5627,10 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
exit:
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): exit\n", __func__);
+ if (ver->fcxctrl == 7)
+ btc->ctrl.ctrl_v7.igno_bt = igno_bt;
+ else
+ btc->ctrl.ctrl.igno_bt = igno_bt;
_action_common(rtwdev);
}
@@ -5560,16 +5666,26 @@ static void _set_init_info(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
- dm->init_info.wl_only = (u8)dm->wl_only;
- dm->init_info.bt_only = (u8)dm->bt_only;
- dm->init_info.wl_init_ok = (u8)wl->status.map.init_ok;
- dm->init_info.dbcc_en = rtwdev->dbcc_en;
- dm->init_info.cx_other = btc->cx.other.type;
- dm->init_info.wl_guard_ch = chip->afh_guard_ch;
- dm->init_info.module = btc->mdinfo;
+ if (ver->fcxinit == 7) {
+ dm->init_info.init_v7.wl_only = (u8)dm->wl_only;
+ dm->init_info.init_v7.bt_only = (u8)dm->bt_only;
+ dm->init_info.init_v7.wl_init_ok = (u8)wl->status.map.init_ok;
+ dm->init_info.init_v7.cx_other = btc->cx.other.type;
+ dm->init_info.init_v7.wl_guard_ch = chip->afh_guard_ch;
+ dm->init_info.init_v7.module = btc->mdinfo.md_v7;
+ } else {
+ dm->init_info.init.wl_only = (u8)dm->wl_only;
+ dm->init_info.init.bt_only = (u8)dm->bt_only;
+ dm->init_info.init.wl_init_ok = (u8)wl->status.map.init_ok;
+ dm->init_info.init.dbcc_en = rtwdev->dbcc_en;
+ dm->init_info.init.cx_other = btc->cx.other.type;
+ dm->init_info.init.wl_guard_ch = chip->afh_guard_ch;
+ dm->init_info.init.module = btc->mdinfo.md;
+ }
}
void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode)
@@ -5578,11 +5694,15 @@ void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode)
struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_btc_ver *ver = btc->ver;
_reset_btc_var(rtwdev, BTC_RESET_ALL);
btc->dm.run_reason = BTC_RSN_NONE;
btc->dm.run_action = BTC_ACT_NONE;
- btc->ctrl.igno_bt = true;
+ if (ver->fcxctrl == 7)
+ btc->ctrl.ctrl_v7.igno_bt = true;
+ else
+ btc->ctrl.ctrl.igno_bt = true;
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): mode=%d\n", __func__, mode);
@@ -6298,7 +6418,7 @@ static void rtw89_btc_ntfy_wl_sta_iter(void *data, struct ieee80211_sta *sta)
if (BTC_RSSI_LOW(link_info->rssi_state[i]))
rssi_map |= BIT(i);
- if (btc->mdinfo.ant.type == BTC_ANT_DEDICATED &&
+ if (btc->ant_type == BTC_ANT_DEDICATED &&
BTC_RSSI_CHANGE(link_info->rssi_state[i]))
is_sta_change = true;
}
@@ -6489,13 +6609,16 @@ void rtw89_btc_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m)
{
+ union rtw89_btc_module_info *md = &rtwdev->btc.mdinfo;
const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
u32 ver_main = 0, ver_sub = 0, ver_hotfix = 0, id_branch = 0;
+ u8 cv, rfe, iso, ant_num, ant_single_pos;
if (!(dm->coex_info_map & BTC_COEX_INFO_CX))
return;
@@ -6545,11 +6668,24 @@ static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m)
ver_main, ver_sub, ver_hotfix, id_branch,
bt->ver_info.fw, bt->run_patch_code ? "patch" : "ROM");
+ if (ver->fcxinit == 7) {
+ cv = md->md_v7.kt_ver;
+ rfe = md->md_v7.rfe_type;
+ iso = md->md_v7.ant.isolation;
+ ant_num = md->md_v7.ant.num;
+ ant_single_pos = md->md_v7.ant.single_pos;
+ } else {
+ cv = md->md.cv;
+ rfe = md->md.rfe_type;
+ iso = md->md.ant.isolation;
+ ant_num = md->md.ant.num;
+ ant_single_pos = md->md.ant.single_pos;
+ }
+
seq_printf(m, " %-15s : cv:%x, rfe_type:0x%x, ant_iso:%d, ant_pg:%d, %s",
- "[hw_info]", btc->mdinfo.cv, btc->mdinfo.rfe_type,
- btc->mdinfo.ant.isolation, btc->mdinfo.ant.num,
- (btc->mdinfo.ant.num > 1 ? "" : (btc->mdinfo.ant.single_pos ?
- "1Ant_Pos:S1, " : "1Ant_Pos:S0, ")));
+ "[hw_info]", cv, rfe, iso, ant_num,
+ ant_num > 1 ? "" :
+ ant_single_pos ? "1Ant_Pos:S1, " : "1Ant_Pos:S0, ");
seq_printf(m, "3rd_coex:%d, dbcc:%d, tx_num:%d, rx_num:%d\n",
btc->cx.other.type, rtwdev->dbcc_en, hal->tx_nss,
@@ -6722,20 +6858,26 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_bt_info *bt = &cx->bt;
struct rtw89_btc_wl_info *wl = &cx->wl;
- struct rtw89_btc_module *module = &btc->mdinfo;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
+ union rtw89_btc_module_info *md = &btc->mdinfo;
u8 *afh = bt_linfo->afh_map;
u8 *afh_le = bt_linfo->afh_map_le;
+ u8 bt_pos;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_BT))
return;
+ if (ver->fcxinit == 7)
+ bt_pos = md->md_v7.bt_pos;
+ else
+ bt_pos = md->md.bt_pos;
+
seq_puts(m, "========== [BT Status] ==========\n");
seq_printf(m, " %-15s : enable:%s, btg:%s%s, connect:%s, ",
"[status]", bt->enable.now ? "Y" : "N",
bt->btg_type ? "Y" : "N",
- (bt->enable.now && (bt->btg_type != module->bt_pos) ?
+ (bt->enable.now && (bt->btg_type != bt_pos) ?
"(efuse-mismatch!!)" : ""),
(bt_linfo->status.map.connect ? "Y" : "N"));
@@ -6934,10 +7076,13 @@ static const char *steps_to_str(u16 step)
CASE_BTC_POLICY_STR(OFF_EQ1);
CASE_BTC_POLICY_STR(OFF_EQ2);
CASE_BTC_POLICY_STR(OFF_EQ3);
+ CASE_BTC_POLICY_STR(OFF_EQ4);
+ CASE_BTC_POLICY_STR(OFF_EQ5);
CASE_BTC_POLICY_STR(OFF_BWB0);
CASE_BTC_POLICY_STR(OFF_BWB1);
CASE_BTC_POLICY_STR(OFF_BWB2);
CASE_BTC_POLICY_STR(OFF_BWB3);
+ CASE_BTC_POLICY_STR(OFF_WL2);
CASE_BTC_POLICY_STR(OFFB_BWB0);
CASE_BTC_POLICY_STR(OFFE_DEF);
CASE_BTC_POLICY_STR(OFFE_DEF2);
@@ -7123,21 +7268,22 @@ static void _show_dm_step(struct rtw89_dev *rtwdev, struct seq_file *m)
static void _show_dm_info(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ u8 igno_bt;
if (!(dm->coex_info_map & BTC_COEX_INFO_DM))
return;
seq_printf(m, "========== [Mechanism Status %s] ==========\n",
- (btc->ctrl.manual ? "(Manual)" : "(Auto)"));
+ (btc->manual_ctrl ? "(Manual)" : "(Auto)"));
seq_printf(m,
" %-15s : type:%s, reason:%s(), action:%s(), ant_path:%s, init_mode:%s, run_cnt:%d\n",
"[status]",
- module->ant.type == BTC_ANT_SHARED ? "shared" : "dedicated",
+ btc->ant_type == BTC_ANT_SHARED ? "shared" : "dedicated",
steps_to_str(dm->run_reason),
steps_to_str(dm->run_action | BTC_ACT_EXT_BIT),
id_to_ant(FIELD_GET(GENMASK(7, 0), dm->set_ant_path)),
@@ -7146,8 +7292,13 @@ static void _show_dm_info(struct rtw89_dev *rtwdev, struct seq_file *m)
_show_dm_step(rtwdev, m);
+ if (ver->fcxctrl == 7)
+ igno_bt = btc->ctrl.ctrl_v7.igno_bt;
+ else
+ igno_bt = btc->ctrl.ctrl.igno_bt;
+
seq_printf(m, " %-15s : wl_only:%d, bt_only:%d, igno_bt:%d, free_run:%d, wl_ps_ctrl:%d, wl_mimo_ps:%d, ",
- "[dm_flag]", dm->wl_only, dm->bt_only, btc->ctrl.igno_bt,
+ "[dm_flag]", dm->wl_only, dm->bt_only, igno_bt,
dm->freerun, btc->lps, dm->wl_mimo_ps);
seq_printf(m, "leak_ap:%d, fw_offload:%s%s\n", dm->leak_ap,
@@ -7888,10 +8039,11 @@ static void _show_fbtc_step_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
struct rtw89_btc_fbtc_steps_v2 *pstep = NULL;
+ const struct rtw89_btc_ver *ver = btc->ver;
u8 type, val, cnt = 0, state = 0;
bool outloop = false;
u16 i, diff_t, n_start = 0, n_stop = 0;
- u16 pos_old, pos_new;
+ u16 pos_old, pos_new, trace_step;
pcinfo = &pfwinfo->rpt_fbtc_step.cinfo;
if (!pcinfo->valid)
@@ -7908,11 +8060,16 @@ static void _show_fbtc_step_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
do {
switch (state) {
case 0:
+ if (ver->fcxctrl == 7 || ver->fcxctrl == 1)
+ trace_step = 50;
+ else
+ trace_step = btc->ctrl.ctrl.trace_step;
+
n_start = pos_old;
if (pos_new >= pos_old)
n_stop = pos_new;
else
- n_stop = btc->ctrl.trace_step - 1;
+ n_stop = trace_step - 1;
state = 1;
break;
@@ -8742,7 +8899,7 @@ void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
seq_printf(m, "WL FW / BT FW %d.%d.%d.%d / NA\n",
fw_suit->major_ver, fw_suit->minor_ver,
fw_suit->sub_ver, fw_suit->sub_idex);
- seq_printf(m, "manual %d\n", btc->ctrl.manual);
+ seq_printf(m, "manual %d\n", btc->manual_ctrl);
seq_puts(m, "=========================================\n");
diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h
index 46e25c6f88a6..13303830684e 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.h
+++ b/drivers/net/wireless/realtek/rtw89/coex.h
@@ -7,6 +7,8 @@
#include "core.h"
+#define BTC_H2C_MAXLEN 2020
+
enum btc_mode {
BTC_MODE_NORMAL,
BTC_MODE_WL,
@@ -23,6 +25,7 @@ enum btc_wl_rfk_type {
BTC_WRFKT_DACK = 4,
BTC_WRFKT_RXDCK = 5,
BTC_WRFKT_TSSI = 6,
+ BTC_WRFKT_CHLK = 7,
};
#define NM_EXEC false
@@ -152,6 +155,10 @@ enum btc_lps_state {
#define BTC_REG_NOTFOUND 0xff
+#define R_BTC_ZB_COEX_TBL_0 0xE328
+#define R_BTC_ZB_COEX_TBL_1 0xE32c
+#define R_BTC_ZB_BREAK_TBL 0xE350
+
enum btc_ant_div_pos {
BTC_ANT_DIV_MAIN = 0,
BTC_ANT_DIV_AUX = 1,
@@ -180,6 +187,20 @@ enum btc_btgctrl_type {
BTC_BTGCTRL_BB_GNT_NOTFOUND,
};
+enum btc_wa_type {
+ BTC_WA_5G_HI_CH_RX = BIT(0),
+ BTC_WA_NULL_AP = BIT(1),
+ BTC_WA_HFP_ZB = BIT(2), /* HFP PTA req bit4 define issue */
+};
+
+enum btc_3cx_type {
+ BTC_3CX_NONE = 0,
+ BTC_3CX_BT2 = BIT(0),
+ BTC_3CX_ZB = BIT(1),
+ BTC_3CX_LTE = BIT(2),
+ BTC_3CX_MAX,
+};
+
void rtw89_btc_ntfy_poweron(struct rtw89_dev *rtwdev);
void rtw89_btc_ntfy_poweroff(struct rtw89_dev *rtwdev);
void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode);
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index fd527a249996..d474b8d5df3d 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -372,7 +372,7 @@ void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev)
chip->ops->set_txpwr(rtwdev, chan, phy_idx);
}
-void rtw89_set_channel(struct rtw89_dev *rtwdev)
+int rtw89_set_channel(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -399,7 +399,7 @@ void rtw89_set_channel(struct rtw89_dev *rtwdev)
break;
default:
WARN(1, "Invalid ent mode: %d\n", mode);
- return;
+ return -EINVAL;
}
roc_idx = atomic_read(&hal->roc_entity_idx);
@@ -426,6 +426,7 @@ void rtw89_set_channel(struct rtw89_dev *rtwdev)
}
rtw89_set_entity_state(rtwdev, true);
+ return 0;
}
void rtw89_get_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
@@ -1176,7 +1177,8 @@ static __le32 rtw89_build_txwd_info2_v1(struct rtw89_tx_desc_info *desc_info)
static __le32 rtw89_build_txwd_info4(struct rtw89_tx_desc_info *desc_info)
{
- u32 dword = FIELD_PREP(RTW89_TXWD_INFO4_RTS_EN, 1) |
+ bool rts_en = !desc_info->is_bmc;
+ u32 dword = FIELD_PREP(RTW89_TXWD_INFO4_RTS_EN, rts_en) |
FIELD_PREP(RTW89_TXWD_INFO4_HW_RTS_EN, 1);
return cpu_to_le32(dword);
@@ -1329,7 +1331,8 @@ static __le32 rtw89_build_txwd_info2_v2(struct rtw89_tx_desc_info *desc_info)
static __le32 rtw89_build_txwd_info4_v2(struct rtw89_tx_desc_info *desc_info)
{
- u32 dword = FIELD_PREP(BE_TXD_INFO4_RTS_EN, 1) |
+ bool rts_en = !desc_info->is_bmc;
+ u32 dword = FIELD_PREP(BE_TXD_INFO4_RTS_EN, rts_en) |
FIELD_PREP(BE_TXD_INFO4_HW_RTS_EN, 1);
return cpu_to_le32(dword);
@@ -1866,6 +1869,17 @@ static void rtw89_core_cancel_6ghz_probe_tx(struct rtw89_dev *rtwdev,
ieee80211_queue_work(rtwdev->hw, &rtwdev->cancel_6ghz_probe_work);
}
+static void rtw89_vif_sync_bcn_tsf(struct rtw89_vif *rtwvif,
+ struct ieee80211_hdr *hdr, size_t len)
+{
+ struct ieee80211_mgmt *mgmt = (typeof(mgmt))hdr;
+
+ if (len < offsetof(typeof(*mgmt), u.beacon.variable))
+ return;
+
+ WRITE_ONCE(rtwvif->sync_bcn_tsf, le64_to_cpu(mgmt->u.beacon.timestamp));
+}
+
static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -1896,8 +1910,10 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
return;
if (ieee80211_is_beacon(hdr->frame_control)) {
- if (vif->type == NL80211_IFTYPE_STATION)
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ rtw89_vif_sync_bcn_tsf(rtwvif, hdr, skb->len);
rtw89_fw_h2c_rssi_offload(rtwdev, phy_ppdu);
+ }
pkt_stat->beacon_nr++;
}
@@ -3345,6 +3361,14 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
return ret;
}
+ ret = rtw89_chip_h2c_default_cmac_tbl(rtwdev, rtwvif, rtwsta);
+ if (ret)
+ return ret;
+
+ ret = rtw89_chip_h2c_default_dmac_tbl(rtwdev, rtwvif, rtwsta);
+ if (ret)
+ return ret;
+
rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_REMOTE_STA_CHANGE);
}
@@ -3393,7 +3417,7 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, true);
}
- ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
+ ret = rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c cmac table\n");
return ret;
@@ -3442,7 +3466,7 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
}
}
- ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
+ ret = rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c cmac table\n");
return ret;
@@ -3485,6 +3509,8 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
rtw89_warn(rtwdev, "failed to send h2c general packet\n");
return ret;
}
+
+ rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, true);
}
return ret;
@@ -3611,7 +3637,8 @@ static void rtw89_init_vht_cap(struct rtw89_dev *rtwdev,
cpu_to_le16(867), cpu_to_le16(1733), cpu_to_le16(2600), cpu_to_le16(3467),
};
const struct rtw89_chip_info *chip = rtwdev->chip;
- const __le16 *highest = chip->support_bw160 ? highest_bw160 : highest_bw80;
+ const __le16 *highest = chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160) ?
+ highest_bw160 : highest_bw80;
struct rtw89_hal *hal = &rtwdev->hal;
u16 tx_mcs_map = 0, rx_mcs_map = 0;
u8 sts_cap = 3;
@@ -3640,34 +3667,34 @@ static void rtw89_init_vht_cap(struct rtw89_dev *rtwdev,
vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
vht_cap->cap |= sts_cap << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
- if (chip->support_bw160)
+ if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
IEEE80211_VHT_CAP_SHORT_GI_160;
vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(rx_mcs_map);
vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(tx_mcs_map);
vht_cap->vht_mcs.rx_highest = highest[hal->rx_nss - 1];
vht_cap->vht_mcs.tx_highest = highest[hal->tx_nss - 1];
-}
-#define RTW89_SBAND_IFTYPES_NR 2
+ if (ieee80211_hw_check(rtwdev->hw, SUPPORTS_VHT_EXT_NSS_BW))
+ vht_cap->vht_mcs.tx_highest |=
+ cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
+}
static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
enum nl80211_band band,
- struct ieee80211_supported_band *sband)
+ enum nl80211_iftype iftype,
+ struct ieee80211_sband_iftype_data *iftype_data)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_hal *hal = &rtwdev->hal;
- struct ieee80211_sband_iftype_data *iftype_data;
bool no_ng16 = (chip->chip_id == RTL8852A && hal->cv == CHIP_CBV) ||
(chip->chip_id == RTL8852B && hal->cv == CHIP_CAV);
+ struct ieee80211_sta_he_cap *he_cap;
+ int nss = hal->rx_nss;
+ u8 *mac_cap_info;
+ u8 *phy_cap_info;
u16 mcs_map = 0;
int i;
- int nss = hal->rx_nss;
- int idx = 0;
-
- iftype_data = kcalloc(RTW89_SBAND_IFTYPES_NR, sizeof(*iftype_data), GFP_KERNEL);
- if (!iftype_data)
- return;
for (i = 0; i < 8; i++) {
if (i < nss)
@@ -3676,12 +3703,196 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
mcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2);
}
- for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
- struct ieee80211_sta_he_cap *he_cap;
- u8 *mac_cap_info;
- u8 *phy_cap_info;
+ he_cap = &iftype_data->he_cap;
+ mac_cap_info = he_cap->he_cap_elem.mac_cap_info;
+ phy_cap_info = he_cap->he_cap_elem.phy_cap_info;
+
+ he_cap->has_he = true;
+ mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE;
+ if (iftype == NL80211_IFTYPE_STATION)
+ mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
+ mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_ALL_ACK |
+ IEEE80211_HE_MAC_CAP2_BSR;
+ mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2;
+ if (iftype == NL80211_IFTYPE_AP)
+ mac_cap_info[3] |= IEEE80211_HE_MAC_CAP3_OMI_CONTROL;
+ mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_OPS |
+ IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU;
+ if (iftype == NL80211_IFTYPE_STATION)
+ mac_cap_info[5] = IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX;
+ if (band == NL80211_BAND_2GHZ) {
+ phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ } else {
+ phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
+ if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
+ phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+ }
+ phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
+ phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_DOPPLER_TX;
+ phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM;
+ if (iftype == NL80211_IFTYPE_STATION)
+ phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2;
+ if (iftype == NL80211_IFTYPE_AP)
+ phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU;
+ phy_cap_info[4] = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4;
+ if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
+ phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
+ phy_cap_info[5] = no_ng16 ? 0 :
+ IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
+ IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
+ phy_cap_info[6] = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
+ IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
+ IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE;
+ phy_cap_info[7] = IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP7_MAX_NC_1;
+ phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996;
+ if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
+ phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
+ phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
+ u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
+ if (iftype == NL80211_IFTYPE_STATION)
+ phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
+ he_cap->he_mcs_nss_supp.rx_mcs_80 = cpu_to_le16(mcs_map);
+ he_cap->he_mcs_nss_supp.tx_mcs_80 = cpu_to_le16(mcs_map);
+ if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160)) {
+ he_cap->he_mcs_nss_supp.rx_mcs_160 = cpu_to_le16(mcs_map);
+ he_cap->he_mcs_nss_supp.tx_mcs_160 = cpu_to_le16(mcs_map);
+ }
+
+ if (band == NL80211_BAND_6GHZ) {
+ __le16 capa;
- switch (i) {
+ capa = le16_encode_bits(IEEE80211_HT_MPDU_DENSITY_NONE,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
+ le16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
+ le16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
+ IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
+ iftype_data->he_6ghz_capa.capa = capa;
+ }
+}
+
+static void rtw89_init_eht_cap(struct rtw89_dev *rtwdev,
+ enum nl80211_band band,
+ enum nl80211_iftype iftype,
+ struct ieee80211_sband_iftype_data *iftype_data)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct ieee80211_eht_cap_elem_fixed *eht_cap_elem;
+ struct ieee80211_eht_mcs_nss_supp *eht_nss;
+ struct ieee80211_sta_eht_cap *eht_cap;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ bool support_320mhz = false;
+ int sts = 8;
+ u8 val;
+
+ if (chip->chip_gen == RTW89_CHIP_AX)
+ return;
+
+ if (band == NL80211_BAND_6GHZ &&
+ chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_320))
+ support_320mhz = true;
+
+ eht_cap = &iftype_data->eht_cap;
+ eht_cap_elem = &eht_cap->eht_cap_elem;
+ eht_nss = &eht_cap->eht_mcs_nss_supp;
+
+ eht_cap->has_eht = true;
+
+ eht_cap_elem->mac_cap_info[0] =
+ u8_encode_bits(IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_7991,
+ IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK);
+ eht_cap_elem->mac_cap_info[1] = 0;
+
+ eht_cap_elem->phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE;
+ if (support_320mhz)
+ eht_cap_elem->phy_cap_info[0] |=
+ IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
+
+ eht_cap_elem->phy_cap_info[0] |=
+ u8_encode_bits(u8_get_bits(sts - 1, BIT(0)),
+ IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK);
+ eht_cap_elem->phy_cap_info[1] =
+ u8_encode_bits(u8_get_bits(sts - 1, GENMASK(2, 1)),
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) |
+ u8_encode_bits(sts - 1,
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK);
+ if (support_320mhz)
+ eht_cap_elem->phy_cap_info[1] |=
+ u8_encode_bits(sts - 1,
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK);
+
+ eht_cap_elem->phy_cap_info[2] = 0;
+
+ eht_cap_elem->phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK;
+
+ eht_cap_elem->phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+ u8_encode_bits(1, IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK);
+
+ eht_cap_elem->phy_cap_info[5] =
+ u8_encode_bits(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US,
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK);
+
+ eht_cap_elem->phy_cap_info[6] = 0;
+ eht_cap_elem->phy_cap_info[7] = 0;
+ eht_cap_elem->phy_cap_info[8] = 0;
+
+ val = u8_encode_bits(hal->rx_nss, IEEE80211_EHT_MCS_NSS_RX) |
+ u8_encode_bits(hal->tx_nss, IEEE80211_EHT_MCS_NSS_TX);
+ eht_nss->bw._80.rx_tx_mcs9_max_nss = val;
+ eht_nss->bw._80.rx_tx_mcs11_max_nss = val;
+ eht_nss->bw._80.rx_tx_mcs13_max_nss = val;
+ eht_nss->bw._160.rx_tx_mcs9_max_nss = val;
+ eht_nss->bw._160.rx_tx_mcs11_max_nss = val;
+ eht_nss->bw._160.rx_tx_mcs13_max_nss = val;
+ if (support_320mhz) {
+ eht_nss->bw._320.rx_tx_mcs9_max_nss = val;
+ eht_nss->bw._320.rx_tx_mcs11_max_nss = val;
+ eht_nss->bw._320.rx_tx_mcs13_max_nss = val;
+ }
+}
+
+#define RTW89_SBAND_IFTYPES_NR 2
+
+static void rtw89_init_he_eht_cap(struct rtw89_dev *rtwdev,
+ enum nl80211_band band,
+ struct ieee80211_supported_band *sband)
+{
+ struct ieee80211_sband_iftype_data *iftype_data;
+ enum nl80211_iftype iftype;
+ int idx = 0;
+
+ iftype_data = kcalloc(RTW89_SBAND_IFTYPES_NR, sizeof(*iftype_data), GFP_KERNEL);
+ if (!iftype_data)
+ return;
+
+ for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
+ switch (iftype) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP:
break;
@@ -3694,92 +3905,10 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
break;
}
- iftype_data[idx].types_mask = BIT(i);
- he_cap = &iftype_data[idx].he_cap;
- mac_cap_info = he_cap->he_cap_elem.mac_cap_info;
- phy_cap_info = he_cap->he_cap_elem.phy_cap_info;
-
- he_cap->has_he = true;
- mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE;
- if (i == NL80211_IFTYPE_STATION)
- mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
- mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_ALL_ACK |
- IEEE80211_HE_MAC_CAP2_BSR;
- mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2;
- if (i == NL80211_IFTYPE_AP)
- mac_cap_info[3] |= IEEE80211_HE_MAC_CAP3_OMI_CONTROL;
- mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_OPS |
- IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU;
- if (i == NL80211_IFTYPE_STATION)
- mac_cap_info[5] = IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX;
- if (band == NL80211_BAND_2GHZ) {
- phy_cap_info[0] =
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
- } else {
- phy_cap_info[0] =
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
- if (chip->support_bw160)
- phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
- }
- phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
- IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
- IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
- phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
- IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
- IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
- IEEE80211_HE_PHY_CAP2_DOPPLER_TX;
- phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM;
- if (i == NL80211_IFTYPE_STATION)
- phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM |
- IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2;
- if (i == NL80211_IFTYPE_AP)
- phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU;
- phy_cap_info[4] = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
- IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4;
- if (chip->support_bw160)
- phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
- phy_cap_info[5] = no_ng16 ? 0 :
- IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
- IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
- phy_cap_info[6] = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
- IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
- IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
- IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE;
- phy_cap_info[7] = IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
- IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
- IEEE80211_HE_PHY_CAP7_MAX_NC_1;
- phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
- IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI |
- IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996;
- if (chip->support_bw160)
- phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
- IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
- phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
- IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
- IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
- IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
- u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
- IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
- if (i == NL80211_IFTYPE_STATION)
- phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
- he_cap->he_mcs_nss_supp.rx_mcs_80 = cpu_to_le16(mcs_map);
- he_cap->he_mcs_nss_supp.tx_mcs_80 = cpu_to_le16(mcs_map);
- if (chip->support_bw160) {
- he_cap->he_mcs_nss_supp.rx_mcs_160 = cpu_to_le16(mcs_map);
- he_cap->he_mcs_nss_supp.tx_mcs_160 = cpu_to_le16(mcs_map);
- }
-
- if (band == NL80211_BAND_6GHZ) {
- __le16 capa;
+ iftype_data[idx].types_mask = BIT(iftype);
- capa = le16_encode_bits(IEEE80211_HT_MPDU_DENSITY_NONE,
- IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
- le16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
- IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
- le16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
- IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
- iftype_data[idx].he_6ghz_capa.capa = capa;
- }
+ rtw89_init_he_cap(rtwdev, band, iftype, &iftype_data[idx]);
+ rtw89_init_eht_cap(rtwdev, band, iftype, &iftype_data[idx]);
idx++;
}
@@ -3800,7 +3929,7 @@ static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev)
if (!sband_2ghz)
goto err;
rtw89_init_ht_cap(rtwdev, &sband_2ghz->ht_cap);
- rtw89_init_he_cap(rtwdev, NL80211_BAND_2GHZ, sband_2ghz);
+ rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_2GHZ, sband_2ghz);
hw->wiphy->bands[NL80211_BAND_2GHZ] = sband_2ghz;
}
@@ -3810,7 +3939,7 @@ static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev)
goto err;
rtw89_init_ht_cap(rtwdev, &sband_5ghz->ht_cap);
rtw89_init_vht_cap(rtwdev, &sband_5ghz->vht_cap);
- rtw89_init_he_cap(rtwdev, NL80211_BAND_5GHZ, sband_5ghz);
+ rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_5GHZ, sband_5ghz);
hw->wiphy->bands[NL80211_BAND_5GHZ] = sband_5ghz;
}
@@ -3818,7 +3947,7 @@ static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev)
sband_6ghz = kmemdup(&rtw89_sband_6ghz, size, GFP_KERNEL);
if (!sband_6ghz)
goto err;
- rtw89_init_he_cap(rtwdev, NL80211_BAND_6GHZ, sband_6ghz);
+ rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_6GHZ, sband_6ghz);
hw->wiphy->bands[NL80211_BAND_6GHZ] = sband_6ghz;
}
@@ -3879,7 +4008,7 @@ void rtw89_core_update_beacon_work(struct work_struct *work)
rtwdev = rtwvif->rtwdev;
mutex_lock(&rtwdev->mutex);
- rtw89_fw_h2c_update_beacon(rtwdev, rtwvif);
+ rtw89_chip_h2c_update_beacon(rtwdev, rtwvif);
mutex_unlock(&rtwdev->mutex);
}
@@ -3944,7 +4073,6 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
{
int ret;
- rtwdev->mac.qta_mode = RTW89_QTA_SCC;
ret = rtw89_mac_init(rtwdev);
if (ret) {
rtw89_err(rtwdev, "mac init fail, ret:%d\n", ret);
@@ -3961,6 +4089,7 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
return ret;
rtw89_phy_init_bb_reg(rtwdev);
+ rtw89_chip_bb_postinit(rtwdev);
rtw89_phy_init_rf_reg(rtwdev, false);
rtw89_btc_ntfy_init(rtwdev, BTC_MODE_NORMAL);
@@ -3983,6 +4112,7 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
set_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
+ rtw89_chip_rfk_init_late(rtwdev);
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON);
rtw89_fw_h2c_fw_log(rtwdev, rtwdev->fw.log.enable);
rtw89_fw_h2c_init_ba_cam(rtwdev);
@@ -4078,6 +4208,15 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
rtw89_traffic_stats_init(rtwdev, &rtwdev->stats);
rtwdev->hal.rx_fltr = DEFAULT_AX_RX_FLTR;
+ rtwdev->dbcc_en = false;
+ rtwdev->mlo_dbcc_mode = MLO_DBCC_NOT_SUPPORT;
+ rtwdev->mac.qta_mode = RTW89_QTA_SCC;
+
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
+ rtwdev->dbcc_en = true;
+ rtwdev->mac.qta_mode = RTW89_QTA_DBCC;
+ rtwdev->mlo_dbcc_mode = MLO_2_PLUS_0_1RF;
+ }
INIT_WORK(&btc->eapol_notify_work, rtw89_btc_ntfy_eapol_packet_work);
INIT_WORK(&btc->arp_notify_work, rtw89_btc_ntfy_arp_packet_work);
@@ -4085,6 +4224,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
INIT_WORK(&btc->icmp_notify_work, rtw89_btc_ntfy_icmp_packet_work);
init_completion(&rtwdev->fw.req.completion);
+ init_completion(&rtwdev->rfk_wait.completion);
schedule_work(&rtwdev->load_firmware_work);
@@ -4290,6 +4430,7 @@ EXPORT_SYMBOL(rtw89_chip_info_setup);
static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct ieee80211_hw *hw = rtwdev->hw;
struct rtw89_efuse *efuse = &rtwdev->efuse;
struct rtw89_hal *hal = &rtwdev->hal;
@@ -4324,8 +4465,8 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
- /* ref: description of rtw89_mcc_get_tbtt_ofst() in chan.c */
- ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
+ if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
+ ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
if (RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
ieee80211_hw_set(hw, CONNECTION_MONITOR);
@@ -4362,6 +4503,8 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
hw->wiphy->max_remain_on_channel_duration = 1000;
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SCAN_RANDOM_SN);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL);
ret = rtw89_core_set_supported_band(rtwdev);
if (ret) {
@@ -4453,9 +4596,10 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &early_fw);
if (no_chanctx) {
- ops->add_chanctx = NULL;
- ops->remove_chanctx = NULL;
- ops->change_chanctx = NULL;
+ ops->add_chanctx = ieee80211_emulate_add_chanctx;
+ ops->remove_chanctx = ieee80211_emulate_remove_chanctx;
+ ops->change_chanctx = ieee80211_emulate_change_chanctx;
+ ops->switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx;
ops->assign_vif_chanctx = NULL;
ops->unassign_vif_chanctx = NULL;
ops->remain_on_channel = NULL;
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index ea6df859ba15..2e854c9af709 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -17,6 +17,7 @@ struct rtw89_pci_info;
struct rtw89_mac_gen_def;
struct rtw89_phy_gen_def;
struct rtw89_efuse_block_cfg;
+struct rtw89_h2c_rf_tssi;
struct rtw89_fw_txpwr_track_cfg;
struct rtw89_phy_rfk_log_fmt;
@@ -32,6 +33,7 @@ extern const struct ieee80211_ops rtw89_ops;
#define MASKDWORD 0xffffffff
#define RFREG_MASK 0xfffff
#define INV_RF_DATA 0xffffffff
+#define BYPASS_CR_DATA 0xbabecafe
#define RTW89_TRACK_WORK_PERIOD round_jiffies_relative(HZ * 2)
#define RTW89_FORBID_BA_TIMER round_jiffies_relative(HZ * 4)
@@ -878,7 +880,7 @@ enum rtw89_ps_mode {
#define RTW89_5G_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1)
#define RTW89_6G_BW_NUM (RTW89_CHANNEL_WIDTH_320 + 1)
#define RTW89_BYR_BW_NUM (RTW89_CHANNEL_WIDTH_320 + 1)
-#define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1)
+#define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_320 + 1)
enum rtw89_ru_bandwidth {
RTW89_RU26 = 0,
@@ -956,6 +958,9 @@ struct rtw89_port_reg {
u32 mbssid;
u32 mbssid_drop;
u32 tsf_sync;
+ u32 ptcl_dbg;
+ u32 ptcl_dbg_info;
+ u32 bcn_drop_all;
u32 hiq_win[RTW89_PORT_NUM];
};
@@ -1146,9 +1151,15 @@ struct rtw89_mac_ax_gnt {
u8 gnt_wl;
} __packed;
+struct rtw89_mac_ax_wl_act {
+ u8 wlan_act_en;
+ u8 wlan_act;
+};
+
#define RTW89_MAC_AX_COEX_GNT_NR 2
struct rtw89_mac_ax_coex_gnt {
struct rtw89_mac_ax_gnt band[RTW89_MAC_AX_COEX_GNT_NR];
+ struct rtw89_mac_ax_wl_act bt[RTW89_MAC_AX_COEX_GNT_NR];
};
enum rtw89_btc_ncnt {
@@ -1266,6 +1277,18 @@ struct rtw89_btc_ant_info {
u8 stream_cnt: 4;
};
+struct rtw89_btc_ant_info_v7 {
+ u8 type; /* shared, dedicated(non-shared) */
+ u8 num; /* antenna count */
+ u8 isolation;
+ u8 single_pos;/* wifi 1ss-1ant at 0:S0 or 1:S1 */
+
+ u8 diversity; /* only for wifi use 1-antenna */
+ u8 btg_pos; /* btg-circuit at 0:S0/1:S1/others:all */
+ u8 stream_cnt; /* spatial_stream count */
+ u8 rsvd;
+} __packed;
+
enum rtw89_tfc_dir {
RTW89_TFC_UL,
RTW89_TFC_DL,
@@ -1660,6 +1683,16 @@ struct rtw89_btc_dm_emap {
u32 wl_e2g_hang: 1;
u32 wl_ver_mismatch: 1;
u32 bt_ver_mismatch: 1;
+ u32 rfe_type0: 1;
+ u32 h2c_buffer_over: 1;
+ u32 bt_tx_hang: 1; /* for SNR too low bug, BT has no Tx req*/
+ u32 wl_no_sta_ntfy: 1;
+
+ u32 h2c_bmap_mismatch: 1;
+ u32 c2h_bmap_mismatch: 1;
+ u32 h2c_struct_invalid: 1;
+ u32 c2h_struct_invalid: 1;
+ u32 h2c_c2h_buffer_mismatch: 1;
};
union rtw89_btc_dm_error_map {
@@ -1708,6 +1741,7 @@ struct rtw89_btc_wl_info {
u8 cn_report;
u8 coex_mode;
+ bool bg_mode;
bool scbd_change;
u32 scbd;
};
@@ -1725,6 +1759,25 @@ struct rtw89_btc_module {
u8 kt_ver_adie;
};
+struct rtw89_btc_module_v7 {
+ u8 rfe_type;
+ u8 kt_ver;
+ u8 bt_solo;
+ u8 bt_pos; /* wl-end view: get from efuse, must compare bt.btg_type*/
+
+ u8 switch_type; /* WL/BT switch type: 0: internal, 1: external */
+ u8 wa_type; /* WA type: 0:none, 1: 51B 5G_Hi-Ch_Rx */
+ u8 kt_ver_adie;
+ u8 rsvd;
+
+ struct rtw89_btc_ant_info_v7 ant;
+} __packed;
+
+union rtw89_btc_module_info {
+ struct rtw89_btc_module md;
+ struct rtw89_btc_module_v7 md_v7;
+};
+
#define RTW89_BTC_DM_MAXSTEP 30
#define RTW89_BTC_DM_CNT_MAX (RTW89_BTC_DM_MAXSTEP * 8)
@@ -1747,6 +1800,25 @@ struct rtw89_btc_init_info {
u16 rsvd;
};
+struct rtw89_btc_init_info_v7 {
+ u8 wl_guard_ch;
+ u8 wl_only;
+ u8 wl_init_ok;
+ u8 rsvd3;
+
+ u8 cx_other;
+ u8 bt_only;
+ u8 pta_mode;
+ u8 pta_direction;
+
+ struct rtw89_btc_module_v7 module;
+} __packed;
+
+union rtw89_btc_init_info_u {
+ struct rtw89_btc_init_info init;
+ struct rtw89_btc_init_info_v7 init_v7;
+};
+
struct rtw89_btc_wl_tx_limit_para {
u16 enable;
u32 tx_time; /* unit: us */
@@ -2485,7 +2557,7 @@ struct rtw89_btc_dm {
struct rtw89_btc_fbtc_tdma tdma;
struct rtw89_btc_fbtc_tdma tdma_now;
struct rtw89_mac_ax_coex_gnt gnt;
- struct rtw89_btc_init_info init_info; /* pass to wl_fw if offload */
+ union rtw89_btc_init_info_u init_info; /* pass to wl_fw if offload */
struct rtw89_btc_rf_trx_para rf_trx_para;
struct rtw89_btc_wl_tx_limit_para wl_tx_limit;
struct rtw89_btc_dm_step dm_step;
@@ -2534,6 +2606,18 @@ struct rtw89_btc_ctrl {
u32 rsvd: 12;
};
+struct rtw89_btc_ctrl_v7 {
+ u8 manual;
+ u8 igno_bt;
+ u8 always_freerun;
+ u8 rsvd;
+} __packed;
+
+union rtw89_btc_ctrl_list {
+ struct rtw89_btc_ctrl ctrl;
+ struct rtw89_btc_ctrl_v7 ctrl_v7;
+};
+
struct rtw89_btc_dbg {
/* cmd "rb" */
bool rb_done;
@@ -2706,7 +2790,9 @@ struct rtw89_btc_ver {
u8 fwlrole;
u8 frptmap;
u8 fcxctrl;
+ u8 fcxinit;
+ u8 drvinfo_type;
u16 info_buf;
u8 max_role_num;
};
@@ -2718,8 +2804,8 @@ struct rtw89_btc {
struct rtw89_btc_cx cx;
struct rtw89_btc_dm dm;
- struct rtw89_btc_ctrl ctrl;
- struct rtw89_btc_module mdinfo;
+ union rtw89_btc_ctrl_list ctrl;
+ union rtw89_btc_module_info mdinfo;
struct rtw89_btc_btf_fwinfo fwinfo;
struct rtw89_btc_dbg dbg;
@@ -2731,11 +2817,14 @@ struct rtw89_btc {
u32 bt_req_len;
u8 policy[RTW89_BTC_POLICY_MAXLEN];
+ u8 ant_type;
+ u8 btg_pos;
u16 policy_len;
u16 policy_type;
bool bt_req_en;
bool update_policy_force;
bool lps;
+ bool manual_ctrl;
};
enum rtw89_btc_hmsg {
@@ -2875,7 +2964,7 @@ struct rtw89_ba_cam_entry {
#define RTW89_MAX_ADDR_CAM_NUM 128
#define RTW89_MAX_BSSID_CAM_NUM 20
#define RTW89_MAX_SEC_CAM_NUM 128
-#define RTW89_MAX_BA_CAM_NUM 8
+#define RTW89_MAX_BA_CAM_NUM 24
#define RTW89_SEC_CAM_IN_ADDR_CAM 7
struct rtw89_addr_cam_entry {
@@ -2932,6 +3021,7 @@ struct rtw89_sta {
struct ewma_evm evm_min[RF_PATH_MAX];
struct ewma_evm evm_max[RF_PATH_MAX];
struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS];
+ DECLARE_BITMAP(ampdu_map, IEEE80211_NUM_TIDS);
struct ieee80211_rx_status rx_status;
u16 rx_hw_rate;
__le32 htc_template;
@@ -3041,6 +3131,7 @@ struct rtw89_vif {
u8 bcn_hit_cond;
u8 hit_rule;
u8 last_noa_nr;
+ u64 sync_bcn_tsf;
bool offchan;
bool trigger;
bool lsig_txop;
@@ -3111,7 +3202,7 @@ struct rtw89_hci_ops {
void (*ctrl_txdma_ch)(struct rtw89_dev *rtwdev, bool enable);
void (*ctrl_txdma_fw_ch)(struct rtw89_dev *rtwdev, bool enable);
void (*ctrl_trxhci)(struct rtw89_dev *rtwdev, bool enable);
- int (*poll_txdma_ch)(struct rtw89_dev *rtwdev);
+ int (*poll_txdma_ch_idle)(struct rtw89_dev *rtwdev);
void (*clr_idx_all)(struct rtw89_dev *rtwdev);
void (*clear)(struct rtw89_dev *rtwdev, struct pci_dev *pdev);
void (*disable_intr)(struct rtw89_dev *rtwdev);
@@ -3131,6 +3222,7 @@ struct rtw89_chip_ops {
int (*enable_bb_rf)(struct rtw89_dev *rtwdev);
int (*disable_bb_rf)(struct rtw89_dev *rtwdev);
void (*bb_preinit)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+ void (*bb_postinit)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
void (*bb_reset)(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
void (*bb_sethw)(struct rtw89_dev *rtwdev);
@@ -3152,7 +3244,9 @@ struct rtw89_chip_ops {
int (*read_phycap)(struct rtw89_dev *rtwdev, u8 *phycap_map);
void (*fem_setup)(struct rtw89_dev *rtwdev);
void (*rfe_gpio)(struct rtw89_dev *rtwdev);
+ void (*rfk_hw_init)(struct rtw89_dev *rtwdev);
void (*rfk_init)(struct rtw89_dev *rtwdev);
+ void (*rfk_init_late)(struct rtw89_dev *rtwdev);
void (*rfk_channel)(struct rtw89_dev *rtwdev);
void (*rfk_band_changed)(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
@@ -3196,6 +3290,22 @@ struct rtw89_chip_ops {
int (*h2c_dctl_sec_cam)(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta);
+ int (*h2c_default_cmac_tbl)(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
+ int (*h2c_assoc_cmac_tbl)(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+ int (*h2c_ampdu_cmac_tbl)(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+ int (*h2c_default_dmac_tbl)(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
+ int (*h2c_update_beacon)(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
+ int (*h2c_ba_cam)(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool valid, struct ieee80211_ampdu_params *params);
void (*btc_set_rfe)(struct rtw89_dev *rtwdev);
void (*btc_init_cfg)(struct rtw89_dev *rtwdev);
@@ -3225,8 +3335,62 @@ enum rtw89_dma_ch {
RTW89_DMA_CH_NUM = 13
};
+#define MLO_MODE_FOR_BB0_BB1_RF(bb0, bb1, rf) ((rf) << 12 | (bb1) << 4 | (bb0))
+
+enum rtw89_mlo_dbcc_mode {
+ MLO_DBCC_NOT_SUPPORT = 1,
+ MLO_0_PLUS_2_1RF = MLO_MODE_FOR_BB0_BB1_RF(0, 2, 1),
+ MLO_0_PLUS_2_2RF = MLO_MODE_FOR_BB0_BB1_RF(0, 2, 2),
+ MLO_1_PLUS_1_1RF = MLO_MODE_FOR_BB0_BB1_RF(1, 1, 1),
+ MLO_1_PLUS_1_2RF = MLO_MODE_FOR_BB0_BB1_RF(1, 1, 2),
+ MLO_2_PLUS_0_1RF = MLO_MODE_FOR_BB0_BB1_RF(2, 0, 1),
+ MLO_2_PLUS_0_2RF = MLO_MODE_FOR_BB0_BB1_RF(2, 0, 2),
+ MLO_2_PLUS_2_2RF = MLO_MODE_FOR_BB0_BB1_RF(2, 2, 2),
+ DBCC_LEGACY = 0xffffffff,
+};
+
+enum rtw89_scan_be_operation {
+ RTW89_SCAN_OP_STOP,
+ RTW89_SCAN_OP_START,
+ RTW89_SCAN_OP_SETPARM,
+ RTW89_SCAN_OP_GETRPT,
+ RTW89_SCAN_OP_NUM
+};
+
+enum rtw89_scan_be_mode {
+ RTW89_SCAN_MODE_SA,
+ RTW89_SCAN_MODE_MACC,
+ RTW89_SCAN_MODE_NUM
+};
+
+enum rtw89_scan_be_opmode {
+ RTW89_SCAN_OPMODE_NONE,
+ RTW89_SCAN_OPMODE_TBTT,
+ RTW89_SCAN_OPMODE_INTV,
+ RTW89_SCAN_OPMODE_CNT,
+ RTW89_SCAN_OPMODE_NUM,
+};
+
+struct rtw89_scan_option {
+ bool enable;
+ bool target_ch_mode;
+ u8 num_macc_role;
+ u8 num_opch;
+ u8 repeat;
+ u16 norm_pd;
+ u16 slow_pd;
+ u16 norm_cy;
+ u8 opch_end;
+ u64 prohib_chan;
+ enum rtw89_phy_idx band;
+ enum rtw89_scan_be_operation operation;
+ enum rtw89_scan_be_mode scan_mode;
+ enum rtw89_mlo_dbcc_mode mlo_mode;
+};
+
enum rtw89_qta_mode {
RTW89_QTA_SCC,
+ RTW89_QTA_DBCC,
RTW89_QTA_DLFW,
RTW89_QTA_WOW,
@@ -3713,7 +3877,7 @@ struct rtw89_chip_info {
u32 rf_base_addr[2];
u8 support_chanctx_num;
u8 support_bands;
- bool support_bw160;
+ u16 support_bandwidths;
bool support_unii4;
bool ul_tb_waveform_ctrl;
bool ul_tb_pwr_diff;
@@ -3790,6 +3954,7 @@ struct rtw89_chip_info {
const u32 *c2h_regs;
struct rtw89_reg_def c2h_counter_reg;
const struct rtw89_page_regs *page_regs;
+ u32 wow_reason_reg;
bool cfo_src_fd;
bool cfo_hw_comp;
const struct rtw89_reg_def *dcfo_comp;
@@ -3838,7 +4003,7 @@ enum rtw89_host_rpr_mode {
RTW89_RPR_MODE_STF
};
-#define RTW89_COMPLETION_BUF_SIZE 24
+#define RTW89_COMPLETION_BUF_SIZE 40
#define RTW89_WAIT_COND_IDLE UINT_MAX
struct rtw89_completion_data {
@@ -3897,6 +4062,7 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_NO_DEEP_PS,
RTW89_FW_FEATURE_NO_LPS_PG,
RTW89_FW_FEATURE_BEACON_FILTER,
+ RTW89_FW_FEATURE_MACID_PAUSE_SLEEP,
};
struct rtw89_fw_suit {
@@ -3957,6 +4123,19 @@ struct rtw89_fw_elm_info {
struct rtw89_phy_rfk_log_fmt *rfk_log_fmt;
};
+enum rtw89_fw_mss_dev_type {
+ RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF = 0xF,
+ RTW89_FW_MSS_DEV_TYPE_FWSEC_INV = 0xFF,
+};
+
+struct rtw89_fw_secure {
+ bool secure_boot;
+ u32 sb_sel_mgn;
+ u8 mss_dev_type;
+ u8 mss_cust_idx;
+ u8 mss_key_num;
+};
+
struct rtw89_fw_info {
struct rtw89_fw_req_info req;
int fw_format;
@@ -3971,6 +4150,7 @@ struct rtw89_fw_info {
struct rtw89_fw_log log;
u32 feature_map;
struct rtw89_fw_elm_info elm_info;
+ struct rtw89_fw_secure sec;
};
#define RTW89_CHK_FW_FEATURE(_feat, _fw) \
@@ -4045,6 +4225,7 @@ struct rtw89_tas_info {
struct rtw89_chanctx_cfg {
enum rtw89_sub_entity_idx idx;
+ int ref_count;
};
enum rtw89_chanctx_changes {
@@ -4064,13 +4245,16 @@ enum rtw89_entity_mode {
RTW89_ENTITY_MODE_MCC,
NUM_OF_RTW89_ENTITY_MODE,
- RTW89_ENTITY_MODE_INVALID = NUM_OF_RTW89_ENTITY_MODE,
+ RTW89_ENTITY_MODE_INVALID = -EINVAL,
+ RTW89_ENTITY_MODE_UNHANDLED = -ESRCH,
};
struct rtw89_sub_entity {
struct cfg80211_chan_def chandef;
struct rtw89_chan chan;
struct rtw89_chan_rcd rcd;
+
+ /* only assigned when running with chanctx_ops */
struct rtw89_chanctx_cfg *cfg;
};
@@ -4123,6 +4307,7 @@ enum rtw89_flags {
RTW89_FLAG_CMAC1_FUNC,
RTW89_FLAG_FW_RDY,
RTW89_FLAG_RUNNING,
+ RTW89_FLAG_PROBE_DONE,
RTW89_FLAG_BFEE_MON,
RTW89_FLAG_BFEE_EN,
RTW89_FLAG_BFEE_TIMER_KEEP,
@@ -4179,6 +4364,21 @@ struct rtw89_phy_stat {
struct rtw89_pkt_stat last_pkt_stat;
};
+enum rtw89_rfk_report_state {
+ RTW89_RFK_STATE_START = 0x0,
+ RTW89_RFK_STATE_OK = 0x1,
+ RTW89_RFK_STATE_FAIL = 0x2,
+ RTW89_RFK_STATE_TIMEOUT = 0x3,
+ RTW89_RFK_STATE_H2C_CMD_ERR = 0x4,
+};
+
+struct rtw89_rfk_wait_info {
+ struct completion completion;
+ ktime_t start_time;
+ enum rtw89_rfk_report_state state;
+ u8 version;
+};
+
#define RTW89_DACK_PATH_NR 2
#define RTW89_DACK_IDX_NR 2
#define RTW89_DACK_MSBK_NR 16
@@ -4194,15 +4394,18 @@ struct rtw89_dack_info {
bool msbk_timeout[RTW89_DACK_PATH_NR];
};
-#define RTW89_IQK_CHS_NR 2
-#define RTW89_IQK_PATH_NR 4
+#define RTW89_RFK_CHS_NR 3
struct rtw89_rfk_mcc_info {
- u8 ch[RTW89_IQK_CHS_NR];
- u8 band[RTW89_IQK_CHS_NR];
+ u8 ch[RTW89_RFK_CHS_NR];
+ u8 band[RTW89_RFK_CHS_NR];
+ u8 bw[RTW89_RFK_CHS_NR];
u8 table_idx;
};
+#define RTW89_IQK_CHS_NR 2
+#define RTW89_IQK_PATH_NR 4
+
struct rtw89_lck_info {
u8 thermal[RF_PATH_MAX];
};
@@ -4380,6 +4583,11 @@ struct rtw89_cfo_tracking_info {
u8 lock_cnt;
};
+enum rtw89_tssi_mode {
+ RTW89_TSSI_NORMAL = 0,
+ RTW89_TSSI_SCAN = 1,
+};
+
enum rtw89_tssi_alimk_band {
TSSI_ALIMK_2G = 0,
TSSI_ALIMK_5GL,
@@ -4589,6 +4797,7 @@ struct rtw89_hw_scan_info {
struct ieee80211_vif *scanning_vif;
struct list_head pkt_list[NUM_NL80211_BANDS];
struct rtw89_chan op_chan;
+ bool abort;
u32 last_chan_idx;
};
@@ -4605,6 +4814,48 @@ enum rtw89_phy_bb_gain_band {
RTW89_BB_GAIN_BAND_NR,
};
+enum rtw89_phy_gain_band_be {
+ RTW89_BB_GAIN_BAND_2G_BE = 0,
+ RTW89_BB_GAIN_BAND_5G_L_BE = 1,
+ RTW89_BB_GAIN_BAND_5G_M_BE = 2,
+ RTW89_BB_GAIN_BAND_5G_H_BE = 3,
+ RTW89_BB_GAIN_BAND_6G_L0_BE = 4,
+ RTW89_BB_GAIN_BAND_6G_L1_BE = 5,
+ RTW89_BB_GAIN_BAND_6G_M0_BE = 6,
+ RTW89_BB_GAIN_BAND_6G_M1_BE = 7,
+ RTW89_BB_GAIN_BAND_6G_H0_BE = 8,
+ RTW89_BB_GAIN_BAND_6G_H1_BE = 9,
+ RTW89_BB_GAIN_BAND_6G_UH0_BE = 10,
+ RTW89_BB_GAIN_BAND_6G_UH1_BE = 11,
+
+ RTW89_BB_GAIN_BAND_NR_BE,
+};
+
+enum rtw89_phy_bb_bw_be {
+ RTW89_BB_BW_20_40 = 0,
+ RTW89_BB_BW_80_160_320 = 1,
+
+ RTW89_BB_BW_NR_BE,
+};
+
+enum rtw89_bw20_sc {
+ RTW89_BW20_SC_20M = 1,
+ RTW89_BW20_SC_40M = 2,
+ RTW89_BW20_SC_80M = 4,
+ RTW89_BW20_SC_160M = 8,
+ RTW89_BW20_SC_320M = 16,
+};
+
+enum rtw89_cmac_table_bw {
+ RTW89_CMAC_BW_20M = 0,
+ RTW89_CMAC_BW_40M = 1,
+ RTW89_CMAC_BW_80M = 2,
+ RTW89_CMAC_BW_160M = 3,
+ RTW89_CMAC_BW_320M = 4,
+
+ RTW89_CMAC_BW_NR,
+};
+
enum rtw89_phy_bb_rxsc_num {
RTW89_BB_RXSC_NUM_40 = 9, /* SC: 0, 1~8 */
RTW89_BB_RXSC_NUM_80 = 13, /* SC: 0, 1~8, 9~12 */
@@ -4627,6 +4878,27 @@ struct rtw89_phy_bb_gain_info {
[RTW89_BB_RXSC_NUM_160];
};
+struct rtw89_phy_bb_gain_info_be {
+ s8 lna_gain[RTW89_BB_GAIN_BAND_NR_BE][RTW89_BB_BW_NR_BE][RF_PATH_MAX]
+ [LNA_GAIN_NUM];
+ s8 tia_gain[RTW89_BB_GAIN_BAND_NR_BE][RTW89_BB_BW_NR_BE][RF_PATH_MAX]
+ [TIA_GAIN_NUM];
+ s8 lna_gain_bypass[RTW89_BB_GAIN_BAND_NR_BE][RTW89_BB_BW_NR_BE]
+ [RF_PATH_MAX][LNA_GAIN_NUM];
+ s8 lna_op1db[RTW89_BB_GAIN_BAND_NR_BE][RTW89_BB_BW_NR_BE]
+ [RF_PATH_MAX][LNA_GAIN_NUM];
+ s8 tia_lna_op1db[RTW89_BB_GAIN_BAND_NR_BE][RTW89_BB_BW_NR_BE]
+ [RF_PATH_MAX][LNA_GAIN_NUM + 1];
+ s8 rpl_ofst_20[RTW89_BB_GAIN_BAND_NR_BE][RF_PATH_MAX]
+ [RTW89_BW20_SC_20M];
+ s8 rpl_ofst_40[RTW89_BB_GAIN_BAND_NR_BE][RF_PATH_MAX]
+ [RTW89_BW20_SC_40M];
+ s8 rpl_ofst_80[RTW89_BB_GAIN_BAND_NR_BE][RF_PATH_MAX]
+ [RTW89_BW20_SC_80M];
+ s8 rpl_ofst_160[RTW89_BB_GAIN_BAND_NR_BE][RF_PATH_MAX]
+ [RTW89_BW20_SC_160M];
+};
+
struct rtw89_phy_efuse_gain {
bool offset_valid;
bool comp_valid;
@@ -4681,6 +4953,9 @@ struct rtw89_mcc_role {
struct rtw89_mcc_policy policy;
struct rtw89_mcc_limit limit;
+ /* only valid when running with FW MRC mechanism */
+ u8 slot_idx;
+
/* byte-array in LE order for FW */
u8 macid_bitmap[BITS_TO_BYTES(RTW89_MAX_MAC_ID_NUM)];
@@ -4724,7 +4999,11 @@ struct rtw89_mcc_sync {
bool enable;
u16 offset; /* TU */
u8 macid_src;
+ u8 band_src;
+ u8 port_src;
u8 macid_tgt;
+ u8 band_tgt;
+ u8 port_tgt;
};
struct rtw89_mcc_config {
@@ -4757,6 +5036,7 @@ struct rtw89_dev {
const struct ieee80211_ops *ops;
bool dbcc_en;
+ enum rtw89_mlo_dbcc_mode mlo_dbcc_mode;
struct rtw89_hw_scan_info scan_info;
const struct rtw89_chip_info *chip;
const struct rtw89_pci_info *pci_info;
@@ -4806,6 +5086,7 @@ struct rtw89_dev {
DECLARE_BITMAP(pkt_offload, RTW89_MAX_PKT_OFLD_NUM);
struct rtw89_phy_stat phystat;
+ struct rtw89_rfk_wait_info rfk_wait;
struct rtw89_dack_info dack;
struct rtw89_iqk_info iqk;
struct rtw89_dpk_info dpk;
@@ -4824,7 +5105,10 @@ struct rtw89_dev {
struct rtw89_env_monitor_info env_monitor;
struct rtw89_dig_info dig;
struct rtw89_phy_ch_info ch_info;
- struct rtw89_phy_bb_gain_info bb_gain;
+ union {
+ struct rtw89_phy_bb_gain_info ax;
+ struct rtw89_phy_bb_gain_info_be be;
+ } bb_gain;
struct rtw89_phy_efuse_gain efuse_gain;
struct rtw89_phy_ul_tb_info ul_tb_info;
struct rtw89_antdiv_info antdiv;
@@ -4969,12 +5253,12 @@ static inline void rtw89_hci_ctrl_trxhci(struct rtw89_dev *rtwdev, bool enable)
rtwdev->hci.ops->ctrl_trxhci(rtwdev, enable);
}
-static inline int rtw89_hci_poll_txdma_ch(struct rtw89_dev *rtwdev)
+static inline int rtw89_hci_poll_txdma_ch_idle(struct rtw89_dev *rtwdev)
{
int ret = 0;
- if (rtwdev->hci.ops->poll_txdma_ch)
- ret = rtwdev->hci.ops->poll_txdma_ch(rtwdev);
+ if (rtwdev->hci.ops->poll_txdma_ch_idle)
+ ret = rtwdev->hci.ops->poll_txdma_ch_idle(rtwdev);
return ret;
}
@@ -5437,6 +5721,14 @@ static inline void rtw89_chip_rfe_gpio(struct rtw89_dev *rtwdev)
chip->ops->rfe_gpio(rtwdev);
}
+static inline void rtw89_chip_rfk_hw_init(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->rfk_hw_init)
+ chip->ops->rfk_hw_init(rtwdev);
+}
+
static inline
void rtw89_chip_bb_preinit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
@@ -5446,6 +5738,20 @@ void rtw89_chip_bb_preinit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
chip->ops->bb_preinit(rtwdev, phy_idx);
}
+static inline
+void rtw89_chip_bb_postinit(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (!chip->ops->bb_postinit)
+ return;
+
+ chip->ops->bb_postinit(rtwdev, RTW89_PHY_0);
+
+ if (rtwdev->dbcc_en)
+ chip->ops->bb_postinit(rtwdev, RTW89_PHY_1);
+}
+
static inline void rtw89_chip_bb_sethw(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -5462,6 +5768,14 @@ static inline void rtw89_chip_rfk_init(struct rtw89_dev *rtwdev)
chip->ops->rfk_init(rtwdev);
}
+static inline void rtw89_chip_rfk_init_late(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->rfk_init_late)
+ chip->ops->rfk_init_late(rtwdev);
+}
+
static inline void rtw89_chip_rfk_channel(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -5750,6 +6064,18 @@ out:
rcu_read_unlock();
}
+static inline bool rtw89_is_mlo_1_1(struct rtw89_dev *rtwdev)
+{
+ switch (rtwdev->mlo_dbcc_mode) {
+ case MLO_1_PLUS_1_1RF:
+ case MLO_1_PLUS_1_2RF:
+ case DBCC_LEGACY:
+ return true;
+ default:
+ return false;
+ }
+}
+
int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel);
int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
@@ -5815,7 +6141,7 @@ void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev);
void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef);
void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef,
struct rtw89_chan *chan);
-void rtw89_set_channel(struct rtw89_dev *rtwdev);
+int rtw89_set_channel(struct rtw89_dev *rtwdev);
void rtw89_get_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
struct rtw89_chan *chan);
u8 rtw89_core_acquire_bit_map(unsigned long *addr, unsigned long size);
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index 44829a148185..affffc4092ba 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -3427,14 +3427,17 @@ static ssize_t rtw89_debug_priv_btc_manual_set(struct file *filp,
struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw89_btc *btc = &rtwdev->btc;
- bool btc_manual;
+ const struct rtw89_btc_ver *ver = btc->ver;
int ret;
- ret = kstrtobool_from_user(user_buf, count, &btc_manual);
+ ret = kstrtobool_from_user(user_buf, count, &btc->manual_ctrl);
if (ret)
return ret;
- btc->ctrl.manual = btc_manual;
+ if (ver->fcxctrl == 7)
+ btc->ctrl.ctrl_v7.manual = btc->manual_ctrl;
+ else
+ btc->ctrl.ctrl.manual = btc->manual_ctrl;
return count;
}
diff --git a/drivers/net/wireless/realtek/rtw89/efuse.h b/drivers/net/wireless/realtek/rtw89/efuse.h
index 5c6787179bad..72416f56a071 100644
--- a/drivers/net/wireless/realtek/rtw89/efuse.h
+++ b/drivers/net/wireless/realtek/rtw89/efuse.h
@@ -23,5 +23,6 @@ int rtw89_parse_efuse_map_be(struct rtw89_dev *rtwdev);
int rtw89_parse_phycap_map_be(struct rtw89_dev *rtwdev);
int rtw89_cnv_efuse_state_be(struct rtw89_dev *rtwdev, bool idle);
int rtw89_read_efuse_ver(struct rtw89_dev *rtwdev, u8 *efv);
+int rtw89_efuse_read_fw_secure_be(struct rtw89_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/efuse_be.c b/drivers/net/wireless/realtek/rtw89/efuse_be.c
index 8e8b7cd315f7..0be26d5fdf7c 100644
--- a/drivers/net/wireless/realtek/rtw89/efuse_be.c
+++ b/drivers/net/wireless/realtek/rtw89/efuse_be.c
@@ -7,6 +7,31 @@
#include "mac.h"
#include "reg.h"
+#define EFUSE_EXTERNALPN_ADDR_BE 0x1580
+#define EFUSE_B1_MSSDEVTYPE_MASK GENMASK(3, 0)
+#define EFUSE_B1_MSSCUSTIDX0_MASK GENMASK(7, 4)
+#define EFUSE_SERIALNUM_ADDR_BE 0x1581
+#define EFUSE_B2_MSSKEYNUM_MASK GENMASK(3, 0)
+#define EFUSE_B2_MSSCUSTIDX1_MASK BIT(6)
+#define EFUSE_SB_CRYP_SEL_ADDR 0x1582
+#define EFUSE_SB_CRYP_SEL_SIZE 2
+#define EFUSE_SB_CRYP_SEL_DEFAULT 0xFFFF
+#define SB_SEL_MGN_MAX_SIZE 2
+#define EFUSE_SEC_BE_START 0x1580
+#define EFUSE_SEC_BE_SIZE 4
+
+enum rtw89_efuse_mss_dev_type {
+ MSS_DEV_TYPE_FWSEC_DEF = 0xF,
+ MSS_DEV_TYPE_FWSEC_WINLIN_INBOX = 0xC,
+ MSS_DEV_TYPE_FWSEC_NONLIN_INBOX_NON_COB = 0xA,
+ MSS_DEV_TYPE_FWSEC_NONLIN_INBOX_COB = 0x9,
+ MSS_DEV_TYPE_FWSEC_NONWIN_INBOX = 0x6,
+};
+
+static const u32 sb_sel_mgn[SB_SEL_MGN_MAX_SIZE] = {
+ 0x8000100, 0xC000180
+};
+
static void rtw89_enable_efuse_pwr_cut_ddv_be(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -418,3 +443,120 @@ out_free:
return ret;
}
+
+static u16 get_sb_cryp_sel_idx(u16 sb_cryp_sel)
+{
+ u8 low_bit, high_bit, cnt_zero = 0;
+ u8 idx, sel_form_v, sel_idx_v;
+ u16 sb_cryp_sel_v = 0x0;
+
+ sel_form_v = u16_get_bits(sb_cryp_sel, MASKBYTE0);
+ sel_idx_v = u16_get_bits(sb_cryp_sel, MASKBYTE1);
+
+ for (idx = 0; idx < 4; idx++) {
+ low_bit = !!(sel_form_v & BIT(idx));
+ high_bit = !!(sel_form_v & BIT(7 - idx));
+ if (low_bit != high_bit)
+ return U16_MAX;
+ if (low_bit)
+ continue;
+
+ cnt_zero++;
+ if (cnt_zero == 1)
+ sb_cryp_sel_v = idx * 16;
+ else if (cnt_zero > 1)
+ return U16_MAX;
+ }
+
+ low_bit = u8_get_bits(sel_idx_v, 0x0F);
+ high_bit = u8_get_bits(sel_idx_v, 0xF0);
+
+ if ((low_bit ^ high_bit) != 0xF)
+ return U16_MAX;
+
+ return sb_cryp_sel_v + low_bit;
+}
+
+static u8 get_mss_dev_type_idx(struct rtw89_dev *rtwdev, u8 mss_dev_type)
+{
+ switch (mss_dev_type) {
+ case MSS_DEV_TYPE_FWSEC_WINLIN_INBOX:
+ mss_dev_type = 0x0;
+ break;
+ case MSS_DEV_TYPE_FWSEC_NONLIN_INBOX_NON_COB:
+ mss_dev_type = 0x1;
+ break;
+ case MSS_DEV_TYPE_FWSEC_NONLIN_INBOX_COB:
+ mss_dev_type = 0x2;
+ break;
+ case MSS_DEV_TYPE_FWSEC_NONWIN_INBOX:
+ mss_dev_type = 0x3;
+ break;
+ case MSS_DEV_TYPE_FWSEC_DEF:
+ mss_dev_type = RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF;
+ break;
+ default:
+ rtw89_warn(rtwdev, "unknown mss_dev_type %d", mss_dev_type);
+ mss_dev_type = RTW89_FW_MSS_DEV_TYPE_FWSEC_INV;
+ break;
+ }
+
+ return mss_dev_type;
+}
+
+int rtw89_efuse_read_fw_secure_be(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
+ u32 sec_addr = EFUSE_SEC_BE_START;
+ u32 sec_size = EFUSE_SEC_BE_SIZE;
+ u16 sb_cryp_sel, sb_cryp_sel_idx;
+ u8 sec_map[EFUSE_SEC_BE_SIZE];
+ u8 mss_dev_type;
+ u8 b1, b2;
+ int ret;
+
+ ret = rtw89_dump_physical_efuse_map_be(rtwdev, sec_map,
+ sec_addr, sec_size, false);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to dump secsel map\n");
+ return ret;
+ }
+
+ sb_cryp_sel = sec_map[EFUSE_SB_CRYP_SEL_ADDR - sec_addr] |
+ sec_map[EFUSE_SB_CRYP_SEL_ADDR - sec_addr + 1] << 8;
+ if (sb_cryp_sel == EFUSE_SB_CRYP_SEL_DEFAULT)
+ goto out;
+
+ sb_cryp_sel_idx = get_sb_cryp_sel_idx(sb_cryp_sel);
+ if (sb_cryp_sel_idx >= SB_SEL_MGN_MAX_SIZE) {
+ rtw89_warn(rtwdev, "invalid SB cryp sel idx %d\n", sb_cryp_sel_idx);
+ goto out;
+ }
+
+ sec->sb_sel_mgn = sb_sel_mgn[sb_cryp_sel_idx];
+
+ b1 = sec_map[EFUSE_EXTERNALPN_ADDR_BE - sec_addr];
+ b2 = sec_map[EFUSE_SERIALNUM_ADDR_BE - sec_addr];
+
+ mss_dev_type = u8_get_bits(b1, EFUSE_B1_MSSDEVTYPE_MASK);
+ sec->mss_cust_idx = 0x1F - (u8_get_bits(b1, EFUSE_B1_MSSCUSTIDX0_MASK) |
+ u8_get_bits(b2, EFUSE_B2_MSSCUSTIDX1_MASK) << 4);
+ sec->mss_key_num = 0xF - u8_get_bits(b2, EFUSE_B2_MSSKEYNUM_MASK);
+
+ sec->mss_dev_type = get_mss_dev_type_idx(rtwdev, mss_dev_type);
+ if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_INV) {
+ rtw89_warn(rtwdev, "invalid mss_dev_type %d\n", mss_dev_type);
+ goto out;
+ }
+
+ sec->secure_boot = true;
+
+out:
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "MSS secure_boot=%d dev_type=%d cust_idx=%d key_num=%d\n",
+ sec->secure_boot, sec->mss_dev_type, sec->mss_cust_idx,
+ sec->mss_key_num);
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_efuse_read_fw_secure_be);
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 09684cea9731..185cd339c085 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -13,6 +13,8 @@
#include "reg.h"
#include "util.h"
+static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C};
+
union rtw89_fw_element_arg {
size_t offset;
enum rtw89_rf_path rf_path;
@@ -163,6 +165,161 @@ static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 le
return 0;
}
+static int __get_mssc_key_idx(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mss_pool_hdr *mss_hdr,
+ u32 rmp_tbl_size, u32 *key_idx)
+{
+ struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
+ u32 sel_byte_idx;
+ u32 mss_sel_idx;
+ u8 sel_bit_idx;
+ int i;
+
+ if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF) {
+ if (!mss_hdr->defen)
+ return -ENOENT;
+
+ mss_sel_idx = sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) +
+ sec->mss_key_num;
+ } else {
+ if (mss_hdr->defen)
+ mss_sel_idx = FWDL_MSS_POOL_DEFKEYSETS_SIZE << 3;
+ else
+ mss_sel_idx = 0;
+ mss_sel_idx += sec->mss_dev_type * le16_to_cpu(mss_hdr->msskey_num_max) *
+ le16_to_cpu(mss_hdr->msscust_max) +
+ sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) +
+ sec->mss_key_num;
+ }
+
+ sel_byte_idx = mss_sel_idx >> 3;
+ sel_bit_idx = mss_sel_idx & 0x7;
+
+ if (sel_byte_idx >= rmp_tbl_size)
+ return -EFAULT;
+
+ if (!(mss_hdr->rmp_tbl[sel_byte_idx] & BIT(sel_bit_idx)))
+ return -ENOENT;
+
+ *key_idx = hweight8(mss_hdr->rmp_tbl[sel_byte_idx] & (BIT(sel_bit_idx) - 1));
+
+ for (i = 0; i < sel_byte_idx; i++)
+ *key_idx += hweight8(mss_hdr->rmp_tbl[i]);
+
+ return 0;
+}
+
+static int __parse_formatted_mssc(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_bin_info *info,
+ struct rtw89_fw_hdr_section_info *section_info,
+ const struct rtw89_fw_hdr_section_v1 *section,
+ const void *content,
+ u32 *mssc_len)
+{
+ const struct rtw89_fw_mss_pool_hdr *mss_hdr = content + section_info->len;
+ const union rtw89_fw_section_mssc_content *section_content = content;
+ struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
+ u32 rmp_tbl_size;
+ u32 key_sign_len;
+ u32 real_key_idx;
+ u32 sb_sel_ver;
+ int ret;
+
+ if (memcmp(mss_signature, mss_hdr->signature, sizeof(mss_signature)) != 0) {
+ rtw89_err(rtwdev, "[ERR] wrong MSS signature\n");
+ return -ENOENT;
+ }
+
+ if (mss_hdr->rmpfmt == MSS_POOL_RMP_TBL_BITMASK) {
+ rmp_tbl_size = (le16_to_cpu(mss_hdr->msskey_num_max) *
+ le16_to_cpu(mss_hdr->msscust_max) *
+ mss_hdr->mssdev_max) >> 3;
+ if (mss_hdr->defen)
+ rmp_tbl_size += FWDL_MSS_POOL_DEFKEYSETS_SIZE;
+ } else {
+ rtw89_err(rtwdev, "[ERR] MSS Key Pool Remap Table Format Unsupport:%X\n",
+ mss_hdr->rmpfmt);
+ return -EINVAL;
+ }
+
+ if (rmp_tbl_size + sizeof(*mss_hdr) != le32_to_cpu(mss_hdr->key_raw_offset)) {
+ rtw89_err(rtwdev, "[ERR] MSS Key Pool Format Error:0x%X + 0x%X != 0x%X\n",
+ rmp_tbl_size, (int)sizeof(*mss_hdr),
+ le32_to_cpu(mss_hdr->key_raw_offset));
+ return -EINVAL;
+ }
+
+ key_sign_len = le16_to_cpu(section_content->key_sign_len.v) >> 2;
+ if (!key_sign_len)
+ key_sign_len = 512;
+
+ if (info->dsp_checksum)
+ key_sign_len += FWDL_SECURITY_CHKSUM_LEN;
+
+ *mssc_len = sizeof(*mss_hdr) + rmp_tbl_size +
+ le16_to_cpu(mss_hdr->keypair_num) * key_sign_len;
+
+ if (!sec->secure_boot)
+ goto out;
+
+ sb_sel_ver = le32_to_cpu(section_content->sb_sel_ver.v);
+ if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn)
+ goto ignore;
+
+ ret = __get_mssc_key_idx(rtwdev, mss_hdr, rmp_tbl_size, &real_key_idx);
+ if (ret)
+ goto ignore;
+
+ section_info->key_addr = content + section_info->len +
+ le32_to_cpu(mss_hdr->key_raw_offset) +
+ key_sign_len * real_key_idx;
+ section_info->key_len = key_sign_len;
+ section_info->key_idx = real_key_idx;
+
+out:
+ if (info->secure_section_exist) {
+ section_info->ignore = true;
+ return 0;
+ }
+
+ info->secure_section_exist = true;
+
+ return 0;
+
+ignore:
+ section_info->ignore = true;
+
+ return 0;
+}
+
+static int __parse_security_section(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_bin_info *info,
+ struct rtw89_fw_hdr_section_info *section_info,
+ const struct rtw89_fw_hdr_section_v1 *section,
+ const void *content,
+ u32 *mssc_len)
+{
+ int ret;
+
+ section_info->mssc =
+ le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC);
+
+ if (section_info->mssc == FORMATTED_MSSC) {
+ ret = __parse_formatted_mssc(rtwdev, info, section_info,
+ section, content, mssc_len);
+ if (ret)
+ return -EINVAL;
+ } else {
+ *mssc_len = section_info->mssc * FWDL_SECURITY_SIGLEN;
+ if (info->dsp_checksum)
+ *mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN;
+
+ info->secure_section_exist = true;
+ }
+
+ return 0;
+}
+
static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
struct rtw89_fw_bin_info *info)
{
@@ -173,10 +330,12 @@ static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 le
const u8 *fw_end = fw + len;
const u8 *bin;
u32 base_hdr_len;
- u32 mssc_len = 0;
+ u32 mssc_len;
+ int ret;
u32 i;
info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM);
+ info->dsp_checksum = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_DSP_CHKSUM);
base_hdr_len = struct_size(fw_hdr, sections, info->section_num);
info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR);
@@ -199,16 +358,9 @@ static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 le
section_info = info->section_info;
for (i = 0; i < info->section_num; i++) {
section = &fw_hdr->sections[i];
+
section_info->type =
le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE);
- if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
- section_info->mssc =
- le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC);
- mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN;
- } else {
- section_info->mssc = 0;
- }
-
section_info->len =
le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE);
if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM))
@@ -217,15 +369,40 @@ static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 le
section_info->dladdr =
le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR);
section_info->addr = bin;
- bin += section_info->len;
+
+ if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
+ ret = __parse_security_section(rtwdev, info, section_info,
+ section, bin, &mssc_len);
+ if (ret)
+ return ret;
+ } else {
+ section_info->mssc = 0;
+ mssc_len = 0;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n",
+ i, section_info->type, section_info->len,
+ section_info->mssc, mssc_len, bin - fw);
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n",
+ section_info->ignore, section_info->key_addr,
+ section_info->key_addr ?
+ section_info->key_addr - section_info->addr : 0,
+ section_info->key_len, section_info->key_idx);
+
+ bin += section_info->len + mssc_len;
section_info++;
}
- if (fw_end != bin + mssc_len) {
+ if (fw_end != bin) {
rtw89_err(rtwdev, "[ERR]fw bin size\n");
return -EINVAL;
}
+ if (!info->secure_section_exist)
+ rtw89_warn(rtwdev, "no firmware secure section\n");
+
return 0;
}
@@ -458,6 +635,8 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD),
};
static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
@@ -919,9 +1098,56 @@ static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev,
len + H2C_HEADER_LEN));
}
-static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
+static u32 __rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_bin_info *info,
+ struct rtw89_fw_hdr *fw_hdr)
+{
+ le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN,
+ FW_HDR_W7_PART_SIZE);
+
+ return 0;
+}
+
+static u32 __rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_bin_info *info,
+ struct rtw89_fw_hdr_v1 *fw_hdr)
{
+ struct rtw89_fw_hdr_section_info *section_info;
+ struct rtw89_fw_hdr_section_v1 *section;
+ u8 dst_sec_idx = 0;
+ u8 sec_idx;
+
+ le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN,
+ FW_HDR_V1_W7_PART_SIZE);
+
+ for (sec_idx = 0; sec_idx < info->section_num; sec_idx++) {
+ section_info = &info->section_info[sec_idx];
+ section = &fw_hdr->sections[sec_idx];
+
+ if (section_info->ignore)
+ continue;
+
+ if (dst_sec_idx != sec_idx)
+ fw_hdr->sections[dst_sec_idx] = *section;
+
+ dst_sec_idx++;
+ }
+
+ le32p_replace_bits(&fw_hdr->w6, dst_sec_idx, FW_HDR_V1_W6_SEC_NUM);
+
+ return (info->section_num - dst_sec_idx) * sizeof(*section);
+}
+
+static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_suit *fw_suit,
+ struct rtw89_fw_bin_info *info)
+{
+ u32 len = info->hdr_len - info->dynamic_hdr_len;
+ struct rtw89_fw_hdr_v1 *fw_hdr_v1;
+ const u8 *fw = fw_suit->data;
+ struct rtw89_fw_hdr *fw_hdr;
struct sk_buff *skb;
+ u32 truncated;
u32 ret = 0;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
@@ -931,7 +1157,26 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 l
}
skb_put_data(skb, fw, len);
- SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN);
+
+ switch (fw_suit->hdr_ver) {
+ case 0:
+ fw_hdr = (struct rtw89_fw_hdr *)skb->data;
+ truncated = __rtw89_fw_download_tweak_hdr_v0(rtwdev, info, fw_hdr);
+ break;
+ case 1:
+ fw_hdr_v1 = (struct rtw89_fw_hdr_v1 *)skb->data;
+ truncated = __rtw89_fw_download_tweak_hdr_v1(rtwdev, info, fw_hdr_v1);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto fail;
+ }
+
+ if (truncated) {
+ len -= truncated;
+ skb_trim(skb, len);
+ }
+
rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FWDL,
H2C_FUNC_MAC_FWHDR_DL, len);
@@ -950,12 +1195,14 @@ fail:
return ret;
}
-static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
+static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_suit *fw_suit,
+ struct rtw89_fw_bin_info *info)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
int ret;
- ret = __rtw89_fw_download_hdr(rtwdev, fw, len);
+ ret = __rtw89_fw_download_hdr(rtwdev, fw_suit, info);
if (ret) {
rtw89_err(rtwdev, "[ERR]FW header download\n");
return ret;
@@ -979,9 +1226,21 @@ static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
struct sk_buff *skb;
const u8 *section = info->addr;
u32 residue_len = info->len;
+ bool copy_key = false;
u32 pkt_len;
int ret;
+ if (info->ignore)
+ return 0;
+
+ if (info->key_addr && info->key_len) {
+ if (info->len > FWDL_SECTION_PER_PKT_LEN || info->len < info->key_len)
+ rtw89_warn(rtwdev, "ignore to copy key data because of len %d, %d, %d\n",
+ info->len, FWDL_SECTION_PER_PKT_LEN, info->key_len);
+ else
+ copy_key = true;
+ }
+
while (residue_len) {
if (residue_len >= FWDL_SECTION_PER_PKT_LEN)
pkt_len = FWDL_SECTION_PER_PKT_LEN;
@@ -995,6 +1254,10 @@ static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
}
skb_put_data(skb, section, pkt_len);
+ if (copy_key)
+ memcpy(skb->data + pkt_len - info->key_len,
+ info->key_addr, info->key_len);
+
ret = rtw89_h2c_tx(rtwdev, skb, true);
if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
@@ -1101,7 +1364,7 @@ static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev,
struct rtw89_fw_suit *fw_suit)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
- struct rtw89_fw_bin_info info;
+ struct rtw89_fw_bin_info info = {};
int ret;
ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info);
@@ -1120,8 +1383,7 @@ static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev,
return ret;
}
- ret = rtw89_fw_download_hdr(rtwdev, fw_suit->data, info.hdr_len -
- info.dynamic_hdr_len);
+ ret = rtw89_fw_download_hdr(rtwdev, fw_suit, &info);
if (ret)
return ret;
@@ -1485,13 +1747,108 @@ fail:
}
EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1);
-#define H2C_BA_CAM_LEN 8
+int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ struct rtw89_h2c_dctlinfo_ud_v2 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data;
+
+ rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif, rtwsta, h2c);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2);
+
+int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
+ struct rtw89_h2c_dctlinfo_ud_v2 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data;
+
+ h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V2_C0_MACID) |
+ le32_encode_bits(1, DCTLINFO_V2_C0_OP);
+
+ h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_ALL);
+ h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_ALL);
+ h2c->m2 = cpu_to_le32(DCTLINFO_V2_W2_ALL);
+ h2c->m3 = cpu_to_le32(DCTLINFO_V2_W3_ALL);
+ h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_ALL);
+ h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_ALL);
+ h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_ALL);
+ h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_ALL);
+ h2c->m8 = cpu_to_le32(DCTLINFO_V2_W8_ALL);
+ h2c->m9 = cpu_to_le32(DCTLINFO_V2_W9_ALL);
+ h2c->m10 = cpu_to_le32(DCTLINFO_V2_W10_ALL);
+ h2c->m11 = cpu_to_le32(DCTLINFO_V2_W11_ALL);
+ h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2);
+
int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
bool valid, struct ieee80211_ampdu_params *params)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ struct rtw89_h2c_ba_cam *h2c;
u8 macid = rtwsta->mac_id;
+ u32 len = sizeof(*h2c);
struct sk_buff *skb;
u8 entry_idx;
int ret;
@@ -1509,32 +1866,34 @@ int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
return 0;
}
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
return -ENOMEM;
}
- skb_put(skb, H2C_BA_CAM_LEN);
- SET_BA_CAM_MACID(skb->data, macid);
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_ba_cam *)skb->data;
+
+ h2c->w0 = le32_encode_bits(macid, RTW89_H2C_BA_CAM_W0_MACID);
if (chip->bacam_ver == RTW89_BACAM_V0_EXT)
- SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
+ h2c->w1 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1);
else
- SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx);
+ h2c->w0 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W0_ENTRY_IDX);
if (!valid)
goto end;
- SET_BA_CAM_VALID(skb->data, valid);
- SET_BA_CAM_TID(skb->data, params->tid);
+ h2c->w0 |= le32_encode_bits(valid, RTW89_H2C_BA_CAM_W0_VALID) |
+ le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_W0_TID);
if (params->buf_size > 64)
- SET_BA_CAM_BMAP_SIZE(skb->data, 4);
+ h2c->w0 |= le32_encode_bits(4, RTW89_H2C_BA_CAM_W0_BMAP_SIZE);
else
- SET_BA_CAM_BMAP_SIZE(skb->data, 0);
+ h2c->w0 |= le32_encode_bits(0, RTW89_H2C_BA_CAM_W0_BMAP_SIZE);
/* If init req is set, hw will set the ssn */
- SET_BA_CAM_INIT_REQ(skb->data, 1);
- SET_BA_CAM_SSN(skb->data, params->ssn);
+ h2c->w0 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_INIT_REQ) |
+ le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_W0_SSN);
if (chip->bacam_ver == RTW89_BACAM_V0_EXT) {
- SET_BA_CAM_STD_EN(skb->data, 1);
- SET_BA_CAM_BAND(skb->data, rtwvif->mac_idx);
+ h2c->w1 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W1_STD_EN) |
+ le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BA_CAM_W1_BAND);
}
end:
@@ -1542,7 +1901,7 @@ end:
H2C_CAT_MAC,
H2C_CL_BA_CAM,
H2C_FUNC_MAC_BA_CAM, 0, 1,
- H2C_BA_CAM_LEN);
+ len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
@@ -1556,31 +1915,35 @@ fail:
return ret;
}
+EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam);
static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev,
u8 entry_idx, u8 uid)
{
+ struct rtw89_h2c_ba_cam *h2c;
+ u32 len = sizeof(*h2c);
struct sk_buff *skb;
int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n");
return -ENOMEM;
}
- skb_put(skb, H2C_BA_CAM_LEN);
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_ba_cam *)skb->data;
- SET_BA_CAM_VALID(skb->data, 1);
- SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
- SET_BA_CAM_UID(skb->data, uid);
- SET_BA_CAM_BAND(skb->data, 0);
- SET_BA_CAM_STD_EN(skb->data, 0);
+ h2c->w0 = le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_VALID);
+ h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1) |
+ le32_encode_bits(uid, RTW89_H2C_BA_CAM_W1_UID) |
+ le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_BAND) |
+ le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_STD_EN);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC,
H2C_CL_BA_CAM,
H2C_FUNC_MAC_BA_CAM, 0, 1,
- H2C_BA_CAM_LEN);
+ len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
@@ -1609,14 +1972,132 @@ void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev)
}
}
+int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool valid, struct ieee80211_ampdu_params *params)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ struct rtw89_h2c_ba_cam_v1 *h2c;
+ u8 macid = rtwsta->mac_id;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ u8 entry_idx;
+ u8 bmap_size;
+ int ret;
+
+ ret = valid ?
+ rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) :
+ rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx);
+ if (ret) {
+ /* it still works even if we don't have static BA CAM, because
+ * hardware can create dynamic BA CAM automatically.
+ */
+ rtw89_debug(rtwdev, RTW89_DBG_TXRX,
+ "failed to %s entry tid=%d for h2c ba cam\n",
+ valid ? "alloc" : "free", params->tid);
+ return 0;
+ }
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_ba_cam_v1 *)skb->data;
+
+ if (params->buf_size > 512)
+ bmap_size = 10;
+ else if (params->buf_size > 256)
+ bmap_size = 8;
+ else if (params->buf_size > 64)
+ bmap_size = 4;
+ else
+ bmap_size = 0;
+
+ h2c->w0 = le32_encode_bits(valid, RTW89_H2C_BA_CAM_V1_W0_VALID) |
+ le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W0_INIT_REQ) |
+ le32_encode_bits(macid, RTW89_H2C_BA_CAM_V1_W0_MACID_MASK) |
+ le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_V1_W0_TID_MASK) |
+ le32_encode_bits(bmap_size, RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK) |
+ le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_V1_W0_SSN_MASK);
+
+ entry_idx += chip->bacam_dynamic_num; /* std entry right after dynamic ones */
+ h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK) |
+ le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN) |
+ le32_encode_bits(!!rtwvif->mac_idx, RTW89_H2C_BA_CAM_V1_W1_BAND_SEL);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_BA_CAM,
+ H2C_FUNC_MAC_BA_CAM_V1, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam_v1);
+
+int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users,
+ u8 offset, u8 mac_idx)
+{
+ struct rtw89_h2c_ba_cam_init *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam init\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_ba_cam_init *)skb->data;
+
+ h2c->w0 = le32_encode_bits(users, RTW89_H2C_BA_CAM_INIT_USERS_MASK) |
+ le32_encode_bits(offset, RTW89_H2C_BA_CAM_INIT_OFFSET_MASK) |
+ le32_encode_bits(mac_idx, RTW89_H2C_BA_CAM_INIT_BAND_SEL);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_BA_CAM,
+ H2C_FUNC_MAC_BA_CAM_INIT, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
#define H2C_LOG_CFG_LEN 12
int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
{
struct sk_buff *skb;
- u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
- BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0;
+ u32 comp = 0;
int ret;
+ if (enable)
+ comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
+ BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) |
+ BIT(RTW89_FW_LOG_COMP_SCAN);
+
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n");
@@ -1815,6 +2296,50 @@ fail:
return ret;
}
+int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+ rtwvif->sub_entity_idx);
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_h2c_lps_ch_info *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ if (chip->chip_gen != RTW89_CHIP_BE)
+ return 0;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ch_info\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_lps_ch_info *)skb->data;
+
+ h2c->info[0].central_ch = chan->channel;
+ h2c->info[0].pri_ch = chan->primary_channel;
+ h2c->info[0].band = chan->band_type;
+ h2c->info[0].bw = chan->band_width;
+ h2c->mlo_dbcc_mode_lps = cpu_to_le32(MLO_2_PLUS_0_1RF);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
+ H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
#define H2C_P2P_ACT_LEN 20
int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_p2p_noa_desc *desc,
@@ -1892,11 +2417,12 @@ static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev,
#define H2C_CMC_TBL_LEN 68
int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif)
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 macid = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
struct sk_buff *skb;
- u8 macid = rtwvif->mac_id;
int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
@@ -1937,6 +2463,91 @@ fail:
return ret;
}
+EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl);
+
+int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
+ struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
+
+ h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) |
+ le32_encode_bits(1, CCTLINFO_G7_C0_OP);
+
+ h2c->w0 = le32_encode_bits(4, CCTLINFO_G7_W0_DATARATE);
+ h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_ALL);
+
+ h2c->w1 = le32_encode_bits(4, CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE) |
+ le32_encode_bits(0xa, CCTLINFO_G7_W1_RTSRATE) |
+ le32_encode_bits(4, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
+ h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_ALL);
+
+ h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_ALL);
+
+ h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_ALL);
+
+ h2c->w4 = le32_encode_bits(0xFFFF, CCTLINFO_G7_W4_ACT_SUBCH_CBW);
+ h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ALL);
+
+ h2c->w5 = le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) |
+ le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) |
+ le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) |
+ le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) |
+ le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
+ h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_ALL);
+
+ h2c->w6 = le32_encode_bits(0xb, CCTLINFO_G7_W6_RESP_REF_RATE);
+ h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ALL);
+
+ h2c->w7 = le32_encode_bits(1, CCTLINFO_G7_W7_NC) |
+ le32_encode_bits(1, CCTLINFO_G7_W7_NR) |
+ le32_encode_bits(1, CCTLINFO_G7_W7_CB) |
+ le32_encode_bits(0x1, CCTLINFO_G7_W7_CSI_PARA_EN) |
+ le32_encode_bits(0xb, CCTLINFO_G7_W7_CSI_FIX_RATE);
+ h2c->m7 = cpu_to_le32(CCTLINFO_G7_W7_ALL);
+
+ h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_ALL);
+
+ h2c->w14 = le32_encode_bits(0, CCTLINFO_G7_W14_VO_CURR_RATE) |
+ le32_encode_bits(0, CCTLINFO_G7_W14_VI_CURR_RATE) |
+ le32_encode_bits(0, CCTLINFO_G7_W14_BE_CURR_RATE_L);
+ h2c->m14 = cpu_to_le32(CCTLINFO_G7_W14_ALL);
+
+ h2c->w15 = le32_encode_bits(0, CCTLINFO_G7_W15_BE_CURR_RATE_H) |
+ le32_encode_bits(0, CCTLINFO_G7_W15_BK_CURR_RATE) |
+ le32_encode_bits(0, CCTLINFO_G7_W15_MGNT_CURR_RATE);
+ h2c->m15 = cpu_to_le32(CCTLINFO_G7_W15_ALL);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7);
static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta, u8 *pads)
@@ -1950,9 +2561,6 @@ static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
u16 ppe;
int i;
- if (!sta->deflink.he_cap.has_he)
- return;
-
ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]);
if (!ppe_th) {
@@ -2011,7 +2619,7 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
int ret;
memset(pads, 0, sizeof(pads));
- if (sta)
+ if (sta && sta->deflink.he_cap.has_he)
__get_sta_he_pkt_padding(rtwdev, sta, pads);
if (vif->p2p)
@@ -2073,6 +2681,246 @@ fail:
return ret;
}
+EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl);
+
+static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev,
+ struct ieee80211_sta *sta, u8 *pads)
+{
+ u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
+ u16 ppe_thres_hdr;
+ u8 ppe16, ppe8;
+ u8 n, idx, sh;
+ u8 ru_bitmap;
+ bool ppe_th;
+ u16 ppe;
+ int i;
+
+ ppe_th = !!u8_get_bits(sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5],
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT);
+ if (!ppe_th) {
+ u8 pad;
+
+ pad = u8_get_bits(sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5],
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK);
+
+ for (i = 0; i < RTW89_PPE_BW_NUM; i++)
+ pads[i] = pad;
+
+ return;
+ }
+
+ ppe_thres_hdr = get_unaligned_le16(sta->deflink.eht_cap.eht_ppe_thres);
+ ru_bitmap = u16_get_bits(ppe_thres_hdr,
+ IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ n = hweight8(ru_bitmap);
+ n = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE +
+ (n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2) * nss;
+
+ for (i = 0; i < RTW89_PPE_BW_NUM; i++) {
+ if (!(ru_bitmap & BIT(i))) {
+ pads[i] = 1;
+ continue;
+ }
+
+ idx = n >> 3;
+ sh = n & 7;
+ n += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2;
+
+ ppe = get_unaligned_le16(sta->deflink.eht_cap.eht_ppe_thres + idx);
+ ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
+ sh += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE;
+ ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
+
+ if (ppe16 != 7 && ppe8 == 7)
+ pads[i] = 2;
+ else if (ppe8 != 7)
+ pads[i] = 1;
+ else
+ pads[i] = 0;
+ }
+}
+
+int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
+ u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
+ struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
+ u8 pads[RTW89_PPE_BW_NUM];
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ u16 lowest_rate;
+ int ret;
+
+ memset(pads, 0, sizeof(pads));
+ if (sta) {
+ if (sta->deflink.eht_cap.has_eht)
+ __get_sta_eht_pkt_padding(rtwdev, sta, pads);
+ else if (sta->deflink.he_cap.has_he)
+ __get_sta_he_pkt_padding(rtwdev, sta, pads);
+ }
+
+ if (vif->p2p)
+ lowest_rate = RTW89_HW_RATE_OFDM6;
+ else if (chan->band_type == RTW89_BAND_2G)
+ lowest_rate = RTW89_HW_RATE_CCK1;
+ else
+ lowest_rate = RTW89_HW_RATE_OFDM6;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
+
+ h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) |
+ le32_encode_bits(1, CCTLINFO_G7_C0_OP);
+
+ h2c->w0 = le32_encode_bits(1, CCTLINFO_G7_W0_DISRTSFB) |
+ le32_encode_bits(1, CCTLINFO_G7_W0_DISDATAFB);
+ h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_DISRTSFB |
+ CCTLINFO_G7_W0_DISDATAFB);
+
+ h2c->w1 = le32_encode_bits(lowest_rate, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
+ h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
+
+ h2c->w2 = le32_encode_bits(0, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL);
+ h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL);
+
+ h2c->w3 = le32_encode_bits(0, CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL);
+ h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL);
+
+ h2c->w4 = le32_encode_bits(rtwvif->port, CCTLINFO_G7_W4_MULTI_PORT_ID);
+ h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_MULTI_PORT_ID);
+
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) {
+ h2c->w4 |= le32_encode_bits(0, CCTLINFO_G7_W4_DATA_DCM);
+ h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_DATA_DCM);
+ }
+
+ if (vif->bss_conf.eht_support) {
+ u16 punct = vif->bss_conf.chanreq.oper.punctured;
+
+ h2c->w4 |= le32_encode_bits(~punct,
+ CCTLINFO_G7_W4_ACT_SUBCH_CBW);
+ h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW);
+ }
+
+ h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20],
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) |
+ le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40],
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) |
+ le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80],
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) |
+ le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160],
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) |
+ le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320],
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
+ h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 |
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 |
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 |
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 |
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
+
+ h2c->w6 = le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0,
+ CCTLINFO_G7_W6_ULDL);
+ h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ULDL);
+
+ if (sta) {
+ h2c->w8 = le32_encode_bits(sta->deflink.he_cap.has_he,
+ CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT);
+ h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7);
+
+int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ u16 agg_num = 0;
+ u8 ba_bmap = 0;
+ int ret;
+ u8 tid;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac g7\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
+
+ for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) {
+ if (agg_num == 0)
+ agg_num = rtwsta->ampdu_params[tid].agg_num;
+ else
+ agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num);
+ }
+
+ if (agg_num <= 0x20)
+ ba_bmap = 3;
+ else if (agg_num > 0x20 && agg_num <= 0x40)
+ ba_bmap = 0;
+ else if (agg_num > 0x40 && agg_num <= 0x80)
+ ba_bmap = 1;
+ else if (agg_num > 0x80 && agg_num <= 0x100)
+ ba_bmap = 2;
+ else if (agg_num > 0x100 && agg_num <= 0x200)
+ ba_bmap = 4;
+ else if (agg_num > 0x200 && agg_num <= 0x400)
+ ba_bmap = 5;
+
+ h2c->c0 = le32_encode_bits(rtwsta->mac_id, CCTLINFO_G7_C0_MACID) |
+ le32_encode_bits(1, CCTLINFO_G7_C0_OP);
+
+ h2c->w3 = le32_encode_bits(ba_bmap, CCTLINFO_G7_W3_BA_BMAP);
+ h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_BA_BMAP);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7);
int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta)
@@ -2155,18 +3003,20 @@ fail:
return ret;
}
-#define H2C_BCN_BASE_LEN 12
int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif)
{
- struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
rtwvif->sub_entity_idx);
- struct sk_buff *skb;
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ struct rtw89_h2c_bcn_upd *h2c;
struct sk_buff *skb_beacon;
- u16 tim_offset;
+ struct ieee80211_hdr *hdr;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
int bcn_total_len;
u16 beacon_rate;
+ u16 tim_offset;
void *noa_data;
u8 noa_len;
int ret;
@@ -2192,23 +3042,27 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
skb_put_data(skb_beacon, noa_data, noa_len);
}
- bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len;
+ hdr = (struct ieee80211_hdr *)skb_beacon;
+ tim_offset -= ieee80211_hdrlen(hdr->frame_control);
+
+ bcn_total_len = len + skb_beacon->len;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
dev_kfree_skb_any(skb_beacon);
return -ENOMEM;
}
- skb_put(skb, H2C_BCN_BASE_LEN);
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_bcn_upd *)skb->data;
- SET_BCN_UPD_PORT(skb->data, rtwvif->port);
- SET_BCN_UPD_MBSSID(skb->data, 0);
- SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx);
- SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset);
- SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id);
- SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL);
- SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE);
- SET_BCN_UPD_RATE(skb->data, beacon_rate);
+ h2c->w0 = le32_encode_bits(rtwvif->port, RTW89_H2C_BCN_UPD_W0_PORT) |
+ le32_encode_bits(0, RTW89_H2C_BCN_UPD_W0_MBSSID) |
+ le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) |
+ le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST);
+ h2c->w1 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) |
+ le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_W1_SSN_SEL) |
+ le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_W1_SSN_MODE) |
+ le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_W1_RATE);
skb_put_data(skb, skb_beacon->data, skb_beacon->len);
dev_kfree_skb_any(skb_beacon);
@@ -2227,6 +3081,90 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
return 0;
}
+EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon);
+
+int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ struct rtw89_h2c_bcn_upd_be *h2c;
+ struct sk_buff *skb_beacon;
+ struct ieee80211_hdr *hdr;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int bcn_total_len;
+ u16 beacon_rate;
+ u16 tim_offset;
+ void *noa_data;
+ u8 noa_len;
+ int ret;
+
+ if (vif->p2p)
+ beacon_rate = RTW89_HW_RATE_OFDM6;
+ else if (chan->band_type == RTW89_BAND_2G)
+ beacon_rate = RTW89_HW_RATE_CCK1;
+ else
+ beacon_rate = RTW89_HW_RATE_OFDM6;
+
+ skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
+ NULL, 0);
+ if (!skb_beacon) {
+ rtw89_err(rtwdev, "failed to get beacon skb\n");
+ return -ENOMEM;
+ }
+
+ noa_len = rtw89_p2p_noa_fetch(rtwvif, &noa_data);
+ if (noa_len &&
+ (noa_len <= skb_tailroom(skb_beacon) ||
+ pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) {
+ skb_put_data(skb_beacon, noa_data, noa_len);
+ }
+
+ hdr = (struct ieee80211_hdr *)skb_beacon;
+ tim_offset -= ieee80211_hdrlen(hdr->frame_control);
+
+ bcn_total_len = len + skb_beacon->len;
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
+ dev_kfree_skb_any(skb_beacon);
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_bcn_upd_be *)skb->data;
+
+ h2c->w0 = le32_encode_bits(rtwvif->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) |
+ le32_encode_bits(0, RTW89_H2C_BCN_UPD_BE_W0_MBSSID) |
+ le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) |
+ le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST);
+ h2c->w1 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) |
+ le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL) |
+ le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE) |
+ le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_BE_W1_RATE);
+
+ skb_put_data(skb, skb_beacon->data, skb_beacon->len);
+ dev_kfree_skb_any(skb_beacon);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_BCN_UPD_BE, 0, 1,
+ bcn_total_len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be);
#define H2C_ROLE_MAINTAIN_LEN 4
int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
@@ -2277,45 +3215,93 @@ fail:
return ret;
}
-#define H2C_JOIN_INFO_LEN 4
+static enum rtw89_fw_sta_type
+rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ struct ieee80211_sta *sta = rtwsta_to_sta_safe(rtwsta);
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+
+ if (!sta)
+ goto by_vif;
+
+ if (sta->deflink.eht_cap.has_eht)
+ return RTW89_FW_BE_STA;
+ else if (sta->deflink.he_cap.has_he)
+ return RTW89_FW_AX_STA;
+ else
+ return RTW89_FW_N_AC_STA;
+
+by_vif:
+ if (vif->bss_conf.eht_support)
+ return RTW89_FW_BE_STA;
+ else if (vif->bss_conf.he_support)
+ return RTW89_FW_AX_STA;
+ else
+ return RTW89_FW_N_AC_STA;
+}
+
int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta, bool dis_conn)
{
struct sk_buff *skb;
u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
u8 self_role = rtwvif->self_role;
+ enum rtw89_fw_sta_type sta_type;
u8 net_type = rtwvif->net_type;
+ struct rtw89_h2c_join_v1 *h2c_v1;
+ struct rtw89_h2c_join *h2c;
+ u32 len = sizeof(*h2c);
+ bool format_v1 = false;
int ret;
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
+ len = sizeof(*h2c_v1);
+ format_v1 = true;
+ }
+
if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) {
self_role = RTW89_SELF_ROLE_AP_CLIENT;
net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type;
}
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
return -ENOMEM;
}
- skb_put(skb, H2C_JOIN_INFO_LEN);
- SET_JOININFO_MACID(skb->data, mac_id);
- SET_JOININFO_OP(skb->data, dis_conn);
- SET_JOININFO_BAND(skb->data, rtwvif->mac_idx);
- SET_JOININFO_WMM(skb->data, rtwvif->wmm);
- SET_JOININFO_TGR(skb->data, rtwvif->trigger);
- SET_JOININFO_ISHESTA(skb->data, 0);
- SET_JOININFO_DLBW(skb->data, 0);
- SET_JOININFO_TF_MAC_PAD(skb->data, 0);
- SET_JOININFO_DL_T_PE(skb->data, 0);
- SET_JOININFO_PORT_ID(skb->data, rtwvif->port);
- SET_JOININFO_NET_TYPE(skb->data, net_type);
- SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role);
- SET_JOININFO_SELF_ROLE(skb->data, self_role);
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_join *)skb->data;
+
+ h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_JOININFO_W0_MACID) |
+ le32_encode_bits(dis_conn, RTW89_H2C_JOININFO_W0_OP) |
+ le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_JOININFO_W0_BAND) |
+ le32_encode_bits(rtwvif->wmm, RTW89_H2C_JOININFO_W0_WMM) |
+ le32_encode_bits(rtwvif->trigger, RTW89_H2C_JOININFO_W0_TGR) |
+ le32_encode_bits(0, RTW89_H2C_JOININFO_W0_ISHESTA) |
+ le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DLBW) |
+ le32_encode_bits(0, RTW89_H2C_JOININFO_W0_TF_MAC_PAD) |
+ le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DL_T_PE) |
+ le32_encode_bits(rtwvif->port, RTW89_H2C_JOININFO_W0_PORT_ID) |
+ le32_encode_bits(net_type, RTW89_H2C_JOININFO_W0_NET_TYPE) |
+ le32_encode_bits(rtwvif->wifi_role, RTW89_H2C_JOININFO_W0_WIFI_ROLE) |
+ le32_encode_bits(self_role, RTW89_H2C_JOININFO_W0_SELF_ROLE);
+ if (!format_v1)
+ goto done;
+
+ h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data;
+
+ sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif, rtwsta);
+
+ h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE);
+ h2c_v1->w2 = 0;
+
+done:
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
H2C_FUNC_MAC_JOININFO, 0, 1,
- H2C_JOIN_INFO_LEN);
+ len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
@@ -2368,24 +3354,49 @@ fail:
int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
bool pause)
{
- struct rtw89_fw_macid_pause_grp h2c = {{0}};
- u8 len = sizeof(struct rtw89_fw_macid_pause_grp);
+ struct rtw89_fw_macid_pause_sleep_grp *h2c_new;
+ struct rtw89_fw_macid_pause_grp *h2c;
+ __le32 set = cpu_to_le32(BIT(sh));
+ u8 h2c_macid_pause_id;
struct sk_buff *skb;
+ u32 len;
int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
+ if (RTW89_CHK_FW_FEATURE(MACID_PAUSE_SLEEP, &rtwdev->fw)) {
+ h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE_SLEEP;
+ len = sizeof(*h2c_new);
+ } else {
+ h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE;
+ len = sizeof(*h2c);
+ }
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
- rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
+ rtw89_err(rtwdev, "failed to alloc skb for h2c macid pause\n");
return -ENOMEM;
}
- h2c.mask_grp[grp] = cpu_to_le32(BIT(sh));
- if (pause)
- h2c.pause_grp[grp] = cpu_to_le32(BIT(sh));
- skb_put_data(skb, &h2c, len);
+ skb_put(skb, len);
+
+ if (h2c_macid_pause_id == H2C_FUNC_MAC_MACID_PAUSE_SLEEP) {
+ h2c_new = (struct rtw89_fw_macid_pause_sleep_grp *)skb->data;
+
+ h2c_new->n[0].pause_mask_grp[grp] = set;
+ h2c_new->n[0].sleep_mask_grp[grp] = set;
+ if (pause) {
+ h2c_new->n[0].pause_grp[grp] = set;
+ h2c_new->n[0].sleep_grp[grp] = set;
+ }
+ } else {
+ h2c = (struct rtw89_fw_macid_pause_grp *)skb->data;
+
+ h2c->mask_grp[grp] = set;
+ if (pause)
+ h2c->pause_grp[grp] = set;
+ }
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
- H2C_FUNC_MAC_MACID_PAUSE, 1, 0,
+ h2c_macid_pause_id, 1, 0,
len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
@@ -2516,6 +3527,8 @@ int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
{
struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
struct ieee80211_bss_conf *bss_conf = vif ? &vif->bss_conf : NULL;
+ s32 thold = RTW89_DEFAULT_CQM_THOLD;
+ u32 hyst = RTW89_DEFAULT_CQM_HYST;
struct rtw89_h2c_bcnfltr *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
@@ -2536,14 +3549,19 @@ int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
skb_put(skb, len);
h2c = (struct rtw89_h2c_bcnfltr *)skb->data;
+ if (bss_conf->cqm_rssi_hyst)
+ hyst = bss_conf->cqm_rssi_hyst;
+ if (bss_conf->cqm_rssi_thold)
+ thold = bss_conf->cqm_rssi_thold;
+
h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) |
le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) |
le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) |
le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT,
RTW89_H2C_BCNFLTR_W0_MODE) |
le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) |
- le32_encode_bits(bss_conf->cqm_rssi_hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) |
- le32_encode_bits(bss_conf->cqm_rssi_thold + MAX_RSSI,
+ le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) |
+ le32_encode_bits(thold + MAX_RSSI,
RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) |
le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID);
@@ -2735,11 +3753,11 @@ fail:
return ret;
}
-int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
+int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
- struct rtw89_btc_init_info *init_info = &dm->init_info;
+ struct rtw89_btc_init_info *init_info = &dm->init_info.init;
struct rtw89_btc_module *module = &init_info->module;
struct rtw89_btc_ant_info *ant = &module->ant;
struct rtw89_h2c_cxinit *h2c;
@@ -2755,7 +3773,7 @@ int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
skb_put(skb, len);
h2c = (struct rtw89_h2c_cxinit *)skb->data;
- h2c->hdr.type = CXDRVINFO_INIT;
+ h2c->hdr.type = type;
h2c->hdr.len = len - H2C_LEN_CXDRVHDR;
h2c->ant_type = ant->type;
@@ -2802,12 +3820,53 @@ fail:
return ret;
}
+int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_init_info_v7 *init_info = &dm->init_info.init_v7;
+ struct rtw89_h2c_cxinit_v7 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init_v7\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cxinit_v7 *)skb->data;
+
+ h2c->hdr.type = type;
+ h2c->hdr.ver = btc->ver->fcxinit;
+ h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
+ h2c->init = *init_info;
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, BTFC_SET,
+ SET_DRV_INFO, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
#define PORT_DATA_OFFSET 4
#define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12
#define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \
(4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR)
-int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
+int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
@@ -2832,7 +3891,7 @@ int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
skb_put(skb, len);
cmd = skb->data;
- RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
+ RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
@@ -2888,7 +3947,7 @@ fail:
#define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \
(4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
-int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev)
+int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
@@ -2912,7 +3971,7 @@ int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev)
skb_put(skb, len);
cmd = skb->data;
- RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
+ RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
@@ -2978,7 +4037,7 @@ fail:
#define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \
(4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
-int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev)
+int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
@@ -3002,7 +4061,7 @@ int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev)
skb_put(skb, len);
cmd = skb->data;
- RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
+ RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
@@ -3062,11 +4121,11 @@ fail:
}
#define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
-int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
+int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
- struct rtw89_btc_ctrl *ctrl = &btc->ctrl;
+ struct rtw89_btc_ctrl *ctrl = &btc->ctrl.ctrl;
struct sk_buff *skb;
u8 *cmd;
int ret;
@@ -3079,7 +4138,7 @@ int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
skb_put(skb, H2C_LEN_CXDRVINFO_CTRL);
cmd = skb->data;
- RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL);
+ RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR);
RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual);
@@ -3106,8 +4165,47 @@ fail:
return ret;
}
+int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_ctrl_v7 *ctrl = &btc->ctrl.ctrl_v7;
+ struct rtw89_h2c_cxctrl_v7 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cxctrl_v7 *)skb->data;
+
+ h2c->hdr.type = type;
+ h2c->hdr.ver = btc->ver->fcxctrl;
+ h2c->hdr.len = sizeof(*h2c) - H2C_LEN_CXDRVHDR_V7;
+ h2c->ctrl = *ctrl;
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, BTFC_SET,
+ SET_DRV_INFO, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
#define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR)
-int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev)
+int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_trx_info *trx = &btc->dm.trx_info;
@@ -3123,7 +4221,7 @@ int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev)
skb_put(skb, H2C_LEN_CXDRVINFO_TRX);
cmd = skb->data;
- RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_TRX);
+ RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR);
RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl);
@@ -3163,7 +4261,7 @@ fail:
}
#define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR)
-int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
+int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
@@ -3180,7 +4278,7 @@ int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
skb_put(skb, H2C_LEN_CXDRVINFO_RFK);
cmd = skb->data;
- RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK);
+ RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR);
RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state);
@@ -3296,62 +4394,163 @@ int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
return 0;
}
-#define H2C_LEN_SCAN_LIST_OFFLOAD 4
-int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
+int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num,
struct list_head *chan_list)
{
struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_h2c_chinfo_elem *elem;
struct rtw89_mac_chinfo *ch_info;
+ struct rtw89_h2c_chinfo *h2c;
struct sk_buff *skb;
- int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE;
unsigned int cond;
- u8 *cmd;
+ int skb_len;
int ret;
+ static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE);
+
+ skb_len = struct_size(h2c, elem, ch_num);
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
return -ENOMEM;
}
- skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD);
- cmd = skb->data;
+ skb_put(skb, sizeof(*h2c));
+ h2c = (struct rtw89_h2c_chinfo *)skb->data;
+
+ h2c->ch_num = ch_num;
+ h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */
+
+ list_for_each_entry(ch_info, chan_list, list) {
+ elem = (struct rtw89_h2c_chinfo_elem *)skb_put(skb, sizeof(*elem));
+
+ elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_W0_PERIOD) |
+ le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_W0_DWELL) |
+ le32_encode_bits(ch_info->central_ch, RTW89_H2C_CHINFO_W0_CENTER_CH) |
+ le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_W0_PRI_CH);
+
+ elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_W1_BW) |
+ le32_encode_bits(ch_info->notify_action, RTW89_H2C_CHINFO_W1_ACTION) |
+ le32_encode_bits(ch_info->num_pkt, RTW89_H2C_CHINFO_W1_NUM_PKT) |
+ le32_encode_bits(ch_info->tx_pkt, RTW89_H2C_CHINFO_W1_TX) |
+ le32_encode_bits(ch_info->pause_data, RTW89_H2C_CHINFO_W1_PAUSE_DATA) |
+ le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_W1_BAND) |
+ le32_encode_bits(ch_info->probe_id, RTW89_H2C_CHINFO_W1_PKT_ID) |
+ le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_W1_DFS) |
+ le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_W1_TX_NULL) |
+ le32_encode_bits(ch_info->rand_seq_num, RTW89_H2C_CHINFO_W1_RANDOM);
+
+ elem->w2 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_W2_PKT0) |
+ le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_W2_PKT1) |
+ le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_W2_PKT2) |
+ le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_W2_PKT3);
+
+ elem->w3 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_W3_PKT4) |
+ le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_W3_PKT5) |
+ le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_W3_PKT6) |
+ le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_W3_PKT7);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
+
+ cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH;
+
+ ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
+ struct list_head *chan_list)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_h2c_chinfo_elem_be *elem;
+ struct rtw89_mac_chinfo_be *ch_info;
+ struct rtw89_h2c_chinfo *h2c;
+ struct sk_buff *skb;
+ unsigned int cond;
+ int skb_len;
+ int ret;
+
+ static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE);
+
+ skb_len = struct_size(h2c, elem, ch_num);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, sizeof(*h2c));
+ h2c = (struct rtw89_h2c_chinfo *)skb->data;
- RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len);
- /* in unit of 4 bytes */
- RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4);
+ h2c->ch_num = ch_num;
+ h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */
+ h2c->arg = u8_encode_bits(RTW89_PHY_0, RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK);
list_for_each_entry(ch_info, chan_list, list) {
- cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE);
-
- RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period);
- RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time);
- RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch);
- RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch);
- RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw);
- RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action);
- RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt);
- RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt);
- RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data);
- RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band);
- RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id);
- RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch);
- RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null);
- RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num);
- RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]);
- RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]);
- RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]);
- RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]);
- RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]);
- RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]);
- RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]);
- RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]);
+ elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem));
+
+ elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD) |
+ le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) |
+ le32_encode_bits(ch_info->central_ch,
+ RTW89_H2C_CHINFO_BE_W0_CENTER_CH) |
+ le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH);
+
+ elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_BE_W1_BW) |
+ le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_BE_W1_CH_BAND) |
+ le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_BE_W1_DFS) |
+ le32_encode_bits(ch_info->pause_data,
+ RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA) |
+ le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_BE_W1_TX_NULL) |
+ le32_encode_bits(ch_info->rand_seq_num,
+ RTW89_H2C_CHINFO_BE_W1_RANDOM) |
+ le32_encode_bits(ch_info->notify_action,
+ RTW89_H2C_CHINFO_BE_W1_NOTIFY) |
+ le32_encode_bits(ch_info->probe_id != 0xff ? 1 : 0,
+ RTW89_H2C_CHINFO_BE_W1_PROBE) |
+ le32_encode_bits(ch_info->leave_crit,
+ RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT) |
+ le32_encode_bits(ch_info->chkpt_timer,
+ RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER);
+
+ elem->w2 = le32_encode_bits(ch_info->leave_time,
+ RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME) |
+ le32_encode_bits(ch_info->leave_th,
+ RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH) |
+ le32_encode_bits(ch_info->tx_pkt_ctrl,
+ RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL);
+
+ elem->w3 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_BE_W3_PKT0) |
+ le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_BE_W3_PKT1) |
+ le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_BE_W3_PKT2) |
+ le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_BE_W3_PKT3);
+
+ elem->w4 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_BE_W4_PKT4) |
+ le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_BE_W4_PKT5) |
+ le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_BE_W4_PKT6) |
+ le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_BE_W4_PKT7);
+
+ elem->w5 = le32_encode_bits(ch_info->sw_def, RTW89_H2C_CHINFO_BE_W5_SW_DEF) |
+ le32_encode_bits(ch_info->fw_probe0_ssids,
+ RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS);
+
+ elem->w6 = le32_encode_bits(ch_info->fw_probe0_shortssids,
+ RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) |
+ le32_encode_bits(ch_info->fw_probe0_bssids,
+ RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS);
}
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
- cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_ADD_SCANOFLD_CH);
+ cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH;
ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
if (ret) {
@@ -3410,7 +4609,10 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
H2C_FUNC_SCANOFLD, 1, 1,
len);
- cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_SCANOFLD);
+ if (option->enable)
+ cond = RTW89_SCANOFLD_WAIT_COND_START;
+ else
+ cond = RTW89_SCANOFLD_WAIT_COND_STOP;
ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
if (ret) {
@@ -3421,6 +4623,169 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
return 0;
}
+static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *option)
+{
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *chan;
+ u8 i, idx;
+
+ sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ];
+
+ for (i = 0; i < sband->n_channels; i++) {
+ chan = &sband->channels[i];
+ if (chan->flags & IEEE80211_CHAN_DISABLED) {
+ idx = (chan->hw_value - 1) / 4;
+ option->prohib_chan |= BIT(idx);
+ }
+ }
+}
+
+int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *option,
+ struct rtw89_vif *rtwvif)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_h2c_scanofld_be_macc_role *macc_role;
+ struct rtw89_chan *op = &scan_info->op_chan;
+ struct rtw89_h2c_scanofld_be_opch *opch;
+ struct rtw89_h2c_scanofld_be *h2c;
+ struct sk_buff *skb;
+ u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role;
+ u8 opch_size = sizeof(*opch) * option->num_opch;
+ u8 probe_id[NUM_NL80211_BANDS];
+ unsigned int cond;
+ void *ptr;
+ int ret;
+ u32 len;
+ u8 i;
+
+ rtw89_scan_get_6g_disabled_chan(rtwdev, option);
+
+ len = sizeof(*h2c) + macc_role_size + opch_size;
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_scanofld_be *)skb->data;
+ ptr = skb->data;
+
+ h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) |
+ le32_encode_bits(option->scan_mode,
+ RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) |
+ le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_BE_W0_REPEAT) |
+ le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END) |
+ le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH) |
+ le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_BE_W0_MACID) |
+ le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_BE_W0_PORT) |
+ le32_encode_bits(option->band, RTW89_H2C_SCANOFLD_BE_W0_BAND);
+
+ h2c->w1 = le32_encode_bits(option->num_macc_role, RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE) |
+ le32_encode_bits(option->num_opch, RTW89_H2C_SCANOFLD_BE_W1_NUM_OP) |
+ le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_BE_W1_NORM_PD);
+
+ h2c->w2 = le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD) |
+ le32_encode_bits(option->norm_cy, RTW89_H2C_SCANOFLD_BE_W2_NORM_CY) |
+ le32_encode_bits(option->opch_end, RTW89_H2C_SCANOFLD_BE_W2_OPCH_END);
+
+ h2c->w3 = le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID) |
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID) |
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID) |
+ le32_encode_bits(probe_id[NL80211_BAND_2GHZ], RTW89_H2C_SCANOFLD_BE_W3_PROBEID);
+
+ h2c->w4 = le32_encode_bits(probe_id[NL80211_BAND_5GHZ],
+ RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) |
+ le32_encode_bits(probe_id[NL80211_BAND_6GHZ],
+ RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) |
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START);
+
+ h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE);
+
+ h2c->w6 = le32_encode_bits(option->prohib_chan,
+ RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW);
+ h2c->w7 = le32_encode_bits(option->prohib_chan >> 32,
+ RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH);
+ ptr += sizeof(*h2c);
+
+ for (i = 0; i < option->num_macc_role; i++) {
+ macc_role = (struct rtw89_h2c_scanofld_be_macc_role *)&h2c->role[i];
+ macc_role->w0 =
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) |
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) |
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID) |
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END);
+ ptr += sizeof(*macc_role);
+ }
+
+ for (i = 0; i < option->num_opch; i++) {
+ opch = ptr;
+ opch->w0 = le32_encode_bits(rtwvif->mac_id,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID) |
+ le32_encode_bits(option->band,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND) |
+ le32_encode_bits(rtwvif->port,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) |
+ le32_encode_bits(RTW89_SCAN_OPMODE_INTV,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) |
+ le32_encode_bits(true,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL) |
+ le32_encode_bits(RTW89_OFF_CHAN_TIME / 10,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL);
+
+ opch->w1 = le32_encode_bits(RTW89_CHANNEL_TIME,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION) |
+ le32_encode_bits(op->band_type,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) |
+ le32_encode_bits(op->band_width,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) |
+ le32_encode_bits(0x3,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY) |
+ le32_encode_bits(op->primary_channel,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH) |
+ le32_encode_bits(op->channel,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH);
+
+ opch->w2 = le32_encode_bits(0,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) |
+ le32_encode_bits(0,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) |
+ le32_encode_bits(2,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS);
+
+ opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0) |
+ le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1) |
+ le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) |
+ le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3);
+ ptr += sizeof(*opch);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_SCANOFLD_BE, 1, 1,
+ len);
+
+ if (option->enable)
+ cond = RTW89_SCANOFLD_BE_WAIT_COND_START;
+ else
+ cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP;
+
+ ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan be ofld\n");
+ return ret;
+ }
+
+ return 0;
+}
+
int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
struct rtw89_fw_h2c_rf_reg_info *info,
u16 len, u8 page)
@@ -3497,6 +4862,328 @@ fail:
}
EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc);
+int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
+ struct rtw89_fw_h2c_rfk_pre_info *h2c;
+ u8 tbl_sel = rfk_mcc->table_idx;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ u8 tbl, path;
+ u32 val32;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data;
+
+ h2c->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
+
+ BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR);
+
+ for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) {
+ for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
+ h2c->dbcc.ch[path][tbl] = cpu_to_le32(rfk_mcc->ch[tbl]);
+ h2c->dbcc.band[path][tbl] = cpu_to_le32(rfk_mcc->band[tbl]);
+ }
+ }
+
+ for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
+ h2c->tbl.cur_ch[path] = cpu_to_le32(rfk_mcc->ch[tbl_sel]);
+ h2c->tbl.cur_band[path] = cpu_to_le32(rfk_mcc->band[tbl_sel]);
+ }
+
+ h2c->phy_idx = cpu_to_le32(phy_idx);
+ h2c->cur_band = cpu_to_le32(rfk_mcc->band[tbl_sel]);
+ h2c->cur_bw = cpu_to_le32(rfk_mcc->bw[tbl_sel]);
+ h2c->cur_center_ch = cpu_to_le32(rfk_mcc->ch[tbl_sel]);
+
+ val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1);
+ h2c->ktbl_sel0 = cpu_to_le32(val32);
+ val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1);
+ h2c->ktbl_sel1 = cpu_to_le32(val32);
+ val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
+ h2c->rfmod0 = cpu_to_le32(val32);
+ val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK);
+ h2c->rfmod1 = cpu_to_le32(val32);
+
+ if (rtw89_is_mlo_1_1(rtwdev))
+ h2c->mlo_1_1 = cpu_to_le32(1);
+
+ h2c->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_PRE_NOTIFY, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_tssi_mode tssi_mode)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+ RTW89_SUB_ENTITY_0);
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_h2c_rf_tssi *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF TSSI\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_tssi *)skb->data;
+
+ h2c->len = cpu_to_le16(len);
+ h2c->phy = phy_idx;
+ h2c->ch = chan->channel;
+ h2c->bw = chan->band_width;
+ h2c->band = chan->band_type;
+ h2c->hwtx_en = true;
+ h2c->cv = hal->cv;
+ h2c->tssi_mode = tssi_mode;
+
+ rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(rtwdev, phy_idx, chan, h2c);
+ rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(rtwdev, phy_idx, chan, h2c);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_TSSI_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_h2c_rf_iqk *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF IQK\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_iqk *)skb->data;
+
+ h2c->phy_idx = cpu_to_le32(phy_idx);
+ h2c->dbcc = cpu_to_le32(rtwdev->dbcc_en);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_IQK_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+ RTW89_SUB_ENTITY_0);
+ struct rtw89_h2c_rf_dpk *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF DPK\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_dpk *)skb->data;
+
+ h2c->len = len;
+ h2c->phy = phy_idx;
+ h2c->dpk_enable = true;
+ h2c->kpath = RF_AB;
+ h2c->cur_band = chan->band_type;
+ h2c->cur_bw = chan->band_width;
+ h2c->cur_ch = chan->channel;
+ h2c->dpk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_DPK_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+ RTW89_SUB_ENTITY_0);
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_h2c_rf_txgapk *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXGAPK\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_txgapk *)skb->data;
+
+ h2c->len = len;
+ h2c->ktype = 2;
+ h2c->phy = phy_idx;
+ h2c->kpath = RF_AB;
+ h2c->band = chan->band_type;
+ h2c->bw = chan->band_width;
+ h2c->ch = chan->channel;
+ h2c->cv = hal->cv;
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_TXGAPK_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_h2c_rf_dack *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF DACK\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_dack *)skb->data;
+
+ h2c->len = cpu_to_le32(len);
+ h2c->phy = cpu_to_le32(phy_idx);
+ h2c->type = cpu_to_le32(0);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_DACK_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+ RTW89_SUB_ENTITY_0);
+ struct rtw89_h2c_rf_rxdck *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF RXDCK\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_rxdck *)skb->data;
+
+ h2c->len = len;
+ h2c->phy = phy_idx;
+ h2c->is_afe = false;
+ h2c->kpath = RF_AB;
+ h2c->cur_band = chan->band_type;
+ h2c->cur_bw = chan->band_width;
+ h2c->cur_ch = chan->channel;
+ h2c->rxdck_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_RXDCK_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
bool rack, bool dack)
@@ -3600,7 +5287,7 @@ static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev,
default:
return false;
case RTW89_C2H_CAT_MAC:
- return rtw89_mac_c2h_chk_atomic(rtwdev, class, func);
+ return rtw89_mac_c2h_chk_atomic(rtwdev, c2h, class, func);
case RTW89_C2H_CAT_OUTSRC:
return rtw89_phy_c2h_chk_atomic(rtwdev, class, func);
}
@@ -4050,8 +5737,66 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
}
}
-static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif, bool connected)
+static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
+ int ssid_num,
+ struct rtw89_mac_chinfo_be *ch_info)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct cfg80211_scan_request *req = rtwvif->scan_req;
+ struct rtw89_pktofld_info *info;
+ u8 band, probe_count = 0, i;
+
+ ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
+ ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
+ ch_info->bw = RTW89_SCAN_WIDTH;
+ ch_info->tx_null = false;
+ ch_info->pause_data = false;
+ ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
+
+ if (ssid_num) {
+ band = rtw89_hw_to_nl80211_band(ch_info->ch_band);
+
+ list_for_each_entry(info, &scan_info->pkt_list[band], list) {
+ if (info->channel_6ghz &&
+ ch_info->pri_ch != info->channel_6ghz)
+ continue;
+ ch_info->pkt_id[probe_count++] = info->id;
+ if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
+ break;
+ }
+ }
+
+ if (ch_info->ch_band == RTW89_BAND_6G) {
+ if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) ||
+ !ch_info->is_psc) {
+ ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
+ if (!req->duration_mandatory)
+ ch_info->period -= RTW89_DWELL_TIME_6G;
+ }
+ }
+
+ for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++)
+ ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE;
+
+ switch (chan_type) {
+ case RTW89_CHAN_DFS:
+ if (ch_info->ch_band != RTW89_BAND_6G)
+ ch_info->period =
+ max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME);
+ ch_info->dwell_time = RTW89_DWELL_TIME;
+ break;
+ case RTW89_CHAN_ACTIVE:
+ break;
+ default:
+ rtw89_warn(rtwdev, "Channel type out of bound\n");
+ break;
+ }
+}
+
+int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool connected)
{
struct cfg80211_scan_request *req = rtwvif->scan_req;
struct rtw89_mac_chinfo *ch_info, *tmp;
@@ -4074,7 +5819,7 @@ static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
goto out;
}
- if (req->duration_mandatory)
+ if (req->duration)
ch_info->period = req->duration;
else if (channel->band == NL80211_BAND_6GHZ)
ch_info->period = RTW89_CHANNEL_TIME_6G +
@@ -4127,9 +5872,69 @@ out:
return ret;
}
+int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool connected)
+{
+ struct cfg80211_scan_request *req = rtwvif->scan_req;
+ struct rtw89_mac_chinfo_be *ch_info, *tmp;
+ struct ieee80211_channel *channel;
+ struct list_head chan_list;
+ enum rtw89_chan_type type;
+ int list_len, ret;
+ bool random_seq;
+ u32 idx;
+
+ random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN);
+ INIT_LIST_HEAD(&chan_list);
+
+ for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
+ idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
+ idx++, list_len++) {
+ channel = req->channels[idx];
+ ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
+ if (!ch_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (req->duration)
+ ch_info->period = req->duration;
+ else if (channel->band == NL80211_BAND_6GHZ)
+ ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G;
+ else
+ ch_info->period = RTW89_CHANNEL_TIME;
+
+ ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
+ ch_info->central_ch = channel->hw_value;
+ ch_info->pri_ch = channel->hw_value;
+ ch_info->rand_seq_num = random_seq;
+ ch_info->is_psc = cfg80211_channel_is_psc(channel);
+
+ if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
+ type = RTW89_CHAN_DFS;
+ else
+ type = RTW89_CHAN_ACTIVE;
+ rtw89_hw_scan_add_chan_be(rtwdev, type, req->n_ssids, ch_info);
+
+ list_add_tail(&ch_info->list, &chan_list);
+ }
+
+ rtwdev->scan_info.last_chan_idx = idx;
+ ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list);
+
+out:
+ list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
+ list_del(&ch_info->list);
+ kfree(ch_info);
+ }
+
+ return ret;
+}
+
static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool connected)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
int ret;
ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif);
@@ -4137,7 +5942,7 @@ static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
rtw89_err(rtwdev, "Update probe request failed\n");
goto out;
}
- ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif, connected);
+ ret = mac->add_chan_list(rtwdev, rtwvif, connected);
out:
return ret;
}
@@ -4154,9 +5959,11 @@ void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
rtw89_get_channel(rtwdev, rtwvif, &rtwdev->scan_info.op_chan);
rtwdev->scan_info.scanning_vif = vif;
rtwdev->scan_info.last_chan_idx = 0;
+ rtwdev->scan_info.abort = false;
rtwvif->scan_ies = &scan_req->ies;
rtwvif->scan_req = req;
ieee80211_stop_queues(rtwdev->hw);
+ rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif, false);
if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
get_random_mask_addr(mac_addr, req->mac_addr,
@@ -4181,10 +5988,10 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
struct cfg80211_scan_info info = {
.aborted = aborted,
};
- struct rtw89_vif *rtwvif;
if (!vif)
return;
@@ -4197,22 +6004,29 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
rtw89_core_scan_complete(rtwdev, vif, true);
ieee80211_scan_completed(rtwdev->hw, &info);
ieee80211_wake_queues(rtwdev->hw);
+ rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif, true);
rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true);
rtw89_release_pkt_list(rtwdev);
- rtwvif = (struct rtw89_vif *)vif->drv_priv;
rtwvif->scan_req = NULL;
rtwvif->scan_ies = NULL;
scan_info->last_chan_idx = 0;
scan_info->scanning_vif = NULL;
+ scan_info->abort = false;
rtw89_chanctx_proceed(rtwdev);
}
void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
{
- rtw89_hw_scan_offload(rtwdev, vif, false);
- rtw89_hw_scan_complete(rtwdev, vif, true);
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ int ret;
+
+ scan_info->abort = true;
+
+ ret = rtw89_hw_scan_offload(rtwdev, vif, false);
+ if (ret)
+ rtw89_hw_scan_complete(rtwdev, vif, true);
}
static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev)
@@ -4231,6 +6045,7 @@ static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev)
int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
bool enable)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
struct rtw89_scan_option opt = {0};
struct rtw89_vif *rtwvif;
bool connected;
@@ -4248,7 +6063,18 @@ int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
if (ret)
goto out;
}
- ret = rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif);
+
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
+ opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP;
+ opt.scan_mode = RTW89_SCAN_MODE_SA;
+ opt.band = RTW89_PHY_0;
+ opt.num_macc_role = 0;
+ opt.mlo_mode = rtwdev->mlo_dbcc_mode;
+ opt.num_opch = connected ? 1 : 0;
+ opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID;
+ }
+
+ ret = mac->scan_offload(rtwdev, &opt, rtwvif);
out:
return ret;
}
@@ -4922,6 +6748,372 @@ int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev,
return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
}
+static
+u32 rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_add_slot_arg *slot_arg,
+ struct rtw89_h2c_mrc_add_slot *slot_h2c)
+{
+ bool fill_h2c = !!slot_h2c;
+ unsigned int i;
+
+ if (!fill_h2c)
+ goto calc_len;
+
+ slot_h2c->w0 = le32_encode_bits(slot_arg->duration,
+ RTW89_H2C_MRC_ADD_SLOT_W0_DURATION) |
+ le32_encode_bits(slot_arg->courtesy_en,
+ RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN) |
+ le32_encode_bits(slot_arg->role_num,
+ RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM);
+ slot_h2c->w1 = le32_encode_bits(slot_arg->courtesy_period,
+ RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD) |
+ le32_encode_bits(slot_arg->courtesy_target,
+ RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET);
+
+ for (i = 0; i < slot_arg->role_num; i++) {
+ slot_h2c->roles[i].w0 =
+ le32_encode_bits(slot_arg->roles[i].macid,
+ RTW89_H2C_MRC_ADD_ROLE_W0_MACID) |
+ le32_encode_bits(slot_arg->roles[i].role_type,
+ RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE) |
+ le32_encode_bits(slot_arg->roles[i].is_master,
+ RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER) |
+ le32_encode_bits(slot_arg->roles[i].en_tx_null,
+ RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN) |
+ le32_encode_bits(false,
+ RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE) |
+ le32_encode_bits(false,
+ RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN);
+ slot_h2c->roles[i].w1 =
+ le32_encode_bits(slot_arg->roles[i].central_ch,
+ RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG) |
+ le32_encode_bits(slot_arg->roles[i].primary_ch,
+ RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH) |
+ le32_encode_bits(slot_arg->roles[i].bw,
+ RTW89_H2C_MRC_ADD_ROLE_W1_BW) |
+ le32_encode_bits(slot_arg->roles[i].band,
+ RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE) |
+ le32_encode_bits(slot_arg->roles[i].null_early,
+ RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY) |
+ le32_encode_bits(false,
+ RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS) |
+ le32_encode_bits(true,
+ RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC);
+ slot_h2c->roles[i].macid_main_bitmap =
+ cpu_to_le32(slot_arg->roles[i].macid_main_bitmap);
+ slot_h2c->roles[i].macid_paired_bitmap =
+ cpu_to_le32(slot_arg->roles[i].macid_paired_bitmap);
+ }
+
+calc_len:
+ return struct_size(slot_h2c, roles, slot_arg->role_num);
+}
+
+int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_add_arg *arg)
+{
+ struct rtw89_h2c_mrc_add *h2c_head;
+ struct sk_buff *skb;
+ unsigned int i;
+ void *tmp;
+ u32 len;
+ int ret;
+
+ len = sizeof(*h2c_head);
+ for (i = 0; i < arg->slot_num; i++)
+ len += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], NULL);
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc add\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ tmp = skb->data;
+
+ h2c_head = tmp;
+ h2c_head->w0 = le32_encode_bits(arg->sch_idx,
+ RTW89_H2C_MRC_ADD_W0_SCH_IDX) |
+ le32_encode_bits(arg->sch_type,
+ RTW89_H2C_MRC_ADD_W0_SCH_TYPE) |
+ le32_encode_bits(arg->slot_num,
+ RTW89_H2C_MRC_ADD_W0_SLOT_NUM) |
+ le32_encode_bits(arg->btc_in_sch,
+ RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH);
+
+ tmp += sizeof(*h2c_head);
+ for (i = 0; i < arg->slot_num; i++)
+ tmp += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], tmp);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_ADD_MRC, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ dev_kfree_skb_any(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_start_arg *arg)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ struct rtw89_h2c_mrc_start *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ unsigned int cond;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc start\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mrc_start *)skb->data;
+
+ h2c->w0 = le32_encode_bits(arg->sch_idx,
+ RTW89_H2C_MRC_START_W0_SCH_IDX) |
+ le32_encode_bits(arg->old_sch_idx,
+ RTW89_H2C_MRC_START_W0_OLD_SCH_IDX) |
+ le32_encode_bits(arg->action,
+ RTW89_H2C_MRC_START_W0_ACTION);
+
+ h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32);
+ h2c->start_tsf_low = cpu_to_le32(arg->start_tsf);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_START_MRC, 0, 0,
+ len);
+
+ cond = RTW89_MRC_WAIT_COND(arg->sch_idx, H2C_FUNC_START_MRC);
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+}
+
+int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ struct rtw89_h2c_mrc_del *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ unsigned int cond;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc del\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mrc_del *)skb->data;
+
+ h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_DEL_MRC, 0, 0,
+ len);
+
+ cond = RTW89_MRC_WAIT_COND(sch_idx, H2C_FUNC_DEL_MRC);
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+}
+
+int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_req_tsf_arg *arg,
+ struct rtw89_mac_mrc_tsf_rpt *rpt)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ struct rtw89_h2c_mrc_req_tsf *h2c;
+ struct rtw89_mac_mrc_tsf_rpt *tmp;
+ struct sk_buff *skb;
+ unsigned int i;
+ u32 len;
+ int ret;
+
+ len = struct_size(h2c, infos, arg->num);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc req tsf\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mrc_req_tsf *)skb->data;
+
+ h2c->req_tsf_num = arg->num;
+ for (i = 0; i < arg->num; i++)
+ h2c->infos[i] =
+ u8_encode_bits(arg->infos[i].band,
+ RTW89_H2C_MRC_REQ_TSF_INFO_BAND) |
+ u8_encode_bits(arg->infos[i].port,
+ RTW89_H2C_MRC_REQ_TSF_INFO_PORT);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_MRC_REQ_TSF, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_MRC_WAIT_COND_REQ_TSF);
+ if (ret)
+ return ret;
+
+ tmp = (struct rtw89_mac_mrc_tsf_rpt *)wait->data.buf;
+ *rpt = *tmp;
+
+ return 0;
+}
+
+int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_upd_bitmap_arg *arg)
+{
+ struct rtw89_h2c_mrc_upd_bitmap *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc upd bitmap\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mrc_upd_bitmap *)skb->data;
+
+ h2c->w0 = le32_encode_bits(arg->sch_idx,
+ RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX) |
+ le32_encode_bits(arg->action,
+ RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION) |
+ le32_encode_bits(arg->macid,
+ RTW89_H2C_MRC_UPD_BITMAP_W0_MACID);
+ h2c->w1 = le32_encode_bits(arg->client_macid,
+ RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_MRC_UPD_BITMAP, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ dev_kfree_skb_any(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_sync_arg *arg)
+{
+ struct rtw89_h2c_mrc_sync *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc sync\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mrc_sync *)skb->data;
+
+ h2c->w0 = le32_encode_bits(true, RTW89_H2C_MRC_SYNC_W0_SYNC_EN) |
+ le32_encode_bits(arg->src.port,
+ RTW89_H2C_MRC_SYNC_W0_SRC_PORT) |
+ le32_encode_bits(arg->src.band,
+ RTW89_H2C_MRC_SYNC_W0_SRC_BAND) |
+ le32_encode_bits(arg->dest.port,
+ RTW89_H2C_MRC_SYNC_W0_DEST_PORT) |
+ le32_encode_bits(arg->dest.band,
+ RTW89_H2C_MRC_SYNC_W0_DEST_BAND);
+ h2c->w1 = le32_encode_bits(arg->offset, RTW89_H2C_MRC_SYNC_W1_OFFSET);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_MRC_SYNC, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ dev_kfree_skb_any(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_upd_duration_arg *arg)
+{
+ struct rtw89_h2c_mrc_upd_duration *h2c;
+ struct sk_buff *skb;
+ unsigned int i;
+ u32 len;
+ int ret;
+
+ len = struct_size(h2c, slots, arg->slot_num);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc upd duration\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mrc_upd_duration *)skb->data;
+
+ h2c->w0 = le32_encode_bits(arg->sch_idx,
+ RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX) |
+ le32_encode_bits(arg->slot_num,
+ RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM) |
+ le32_encode_bits(false,
+ RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH);
+
+ h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32);
+ h2c->start_tsf_low = cpu_to_le32(arg->start_tsf);
+
+ for (i = 0; i < arg->slot_num; i++) {
+ h2c->slots[i] =
+ le32_encode_bits(arg->slots[i].slot_idx,
+ RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX) |
+ le32_encode_bits(arg->slots[i].duration,
+ RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_MRC_UPD_DURATION, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ dev_kfree_skb_any(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len)
{
static const u8 zeros[U8_MAX] = {};
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 01016588b1fc..44311f65b4fa 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -64,6 +64,8 @@ struct rtw89_h2creg_sch_tx_en {
#define RTW89_H2CREG_SCH_TX_EN_W1_MASK GENMASK(15, 0)
#define RTW89_H2CREG_SCH_TX_EN_W1_BAND BIT(16)
+#define RTW89_H2CREG_WOW_CPUIO_RX_CTRL_EN GENMASK(23, 16)
+
#define RTW89_H2CREG_MAX 4
#define RTW89_C2HREG_MAX 4
#define RTW89_C2HREG_HDR_LEN 2
@@ -95,7 +97,9 @@ enum rtw89_mac_h2c_type {
RTW89_FWCMD_H2CREG_FUNC_FWERR,
RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE,
RTW89_FWCMD_H2CREG_FUNC_GETPKT_INFORM,
- RTW89_FWCMD_H2CREG_FUNC_SCH_TX_EN
+ RTW89_FWCMD_H2CREG_FUNC_SCH_TX_EN,
+ RTW89_FWCMD_H2CREG_FUNC_WOW_TRX_STOP = 0x6,
+ RTW89_FWCMD_H2CREG_FUNC_WOW_CPUIO_RX_CTRL = 0xA,
};
enum rtw89_mac_c2h_type {
@@ -104,7 +108,8 @@ enum rtw89_mac_c2h_type {
RTW89_FWCMD_C2HREG_FUNC_ERR_MSG,
RTW89_FWCMD_C2HREG_FUNC_PHY_CAP,
RTW89_FWCMD_C2HREG_FUNC_TX_PAUSE_RPT,
- RTW89_FWCMD_C2HREG_FUNC_NULL = 0xFF
+ RTW89_FWCMD_C2HREG_FUNC_WOW_CPUIO_RX_ACK = 0xA,
+ RTW89_FWCMD_C2HREG_FUNC_NULL = 0xFF,
};
enum rtw89_fw_c2h_category {
@@ -149,6 +154,7 @@ enum rtw89_fw_log_comp {
RTW89_FW_LOG_COMP_TWT,
RTW89_FW_LOG_COMP_RF,
RTW89_FW_LOG_COMP_MCC = 20,
+ RTW89_FW_LOG_COMP_SCAN = 28,
};
enum rtw89_pkt_offload_op {
@@ -169,6 +175,16 @@ enum rtw89_scanofld_notify_reason {
RTW89_SCAN_ENTER_CH_NOTIFY,
RTW89_SCAN_LEAVE_CH_NOTIFY,
RTW89_SCAN_END_SCAN_NOTIFY,
+ RTW89_SCAN_REPORT_NOTIFY,
+ RTW89_SCAN_CHKPT_NOTIFY,
+ RTW89_SCAN_ENTER_OP_NOTIFY,
+ RTW89_SCAN_LEAVE_OP_NOTIFY,
+};
+
+enum rtw89_scanofld_status {
+ RTW89_SCAN_STATUS_NOTIFY,
+ RTW89_SCAN_STATUS_SUCCESS,
+ RTW89_SCAN_STATUS_FAIL,
};
enum rtw89_chan_type {
@@ -184,6 +200,9 @@ enum rtw89_p2pps_action {
RTW89_P2P_ACT_TERMINATE = 3,
};
+#define RTW89_DEFAULT_CQM_HYST 4
+#define RTW89_DEFAULT_CQM_THOLD -70
+
enum rtw89_bcn_fltr_offload_mode {
RTW89_BCN_FLTR_OFFLOAD_MODE_0 = 0,
RTW89_BCN_FLTR_OFFLOAD_MODE_1,
@@ -216,6 +235,10 @@ struct rtw89_fw_hdr_section_info {
u32 dladdr;
u32 mssc;
u8 type;
+ bool ignore;
+ const u8 *key_addr;
+ u32 key_len;
+ u32 key_idx;
};
struct rtw89_fw_bin_info {
@@ -223,6 +246,8 @@ struct rtw89_fw_bin_info {
u32 hdr_len;
bool dynamic_hdr_en;
u32 dynamic_hdr_len;
+ bool dsp_checksum;
+ bool secure_section_exist;
struct rtw89_fw_hdr_section_info section_info[FWDL_SECTION_MAX_NUM];
};
@@ -231,6 +256,15 @@ struct rtw89_fw_macid_pause_grp {
__le32 mask_grp[4];
} __packed;
+struct rtw89_fw_macid_pause_sleep_grp {
+ struct {
+ __le32 pause_grp[4];
+ __le32 pause_mask_grp[4];
+ __le32 sleep_grp[4];
+ __le32 sleep_mask_grp[4];
+ } __packed n[4];
+} __packed;
+
#define RTW89_H2C_MAX_SIZE 2048
#define RTW89_CHANNEL_TIME 45
#define RTW89_CHANNEL_TIME_6G 20
@@ -243,6 +277,7 @@ struct rtw89_fw_macid_pause_grp {
#define RTW89_SCANOFLD_MAX_IE_LEN 512
#define RTW89_SCANOFLD_PKT_NONE 0xFF
#define RTW89_SCANOFLD_DEBUG_MASK 0x1F
+#define RTW89_CHAN_INVALID 0xFF
#define RTW89_MAC_CHINFO_SIZE 28
#define RTW89_SCAN_LIST_GUARD 4
#define RTW89_SCAN_LIST_LIMIT \
@@ -274,9 +309,32 @@ struct rtw89_mac_chinfo {
bool is_psc;
};
-struct rtw89_scan_option {
- bool enable;
- bool target_ch_mode;
+struct rtw89_mac_chinfo_be {
+ u8 period;
+ u8 dwell_time;
+ u8 central_ch;
+ u8 pri_ch;
+ u8 bw:3;
+ u8 ch_band:2;
+ u8 dfs_ch:1;
+ u8 pause_data:1;
+ u8 tx_null:1;
+ u8 rand_seq_num:1;
+ u8 notify_action:5;
+ u8 probe_id;
+ u8 leave_crit;
+ u8 chkpt_timer;
+ u8 leave_time;
+ u8 leave_th;
+ u16 tx_pkt_ctrl;
+ u8 pkt_id[RTW89_SCANOFLD_MAX_SSID];
+ u8 sw_def;
+ u16 fw_probe0_ssids;
+ u16 fw_probe0_shortssids;
+ u16 fw_probe0_bssids;
+
+ struct list_head list;
+ bool is_psc;
};
struct rtw89_pktofld_info {
@@ -419,6 +477,7 @@ static inline void RTW89_SET_EDCA_PARAM(void *cmd, u32 val)
#define FWDL_SECURITY_SECTION_TYPE 9
#define FWDL_SECURITY_SIGLEN 512
+#define FWDL_SECURITY_CHKSUM_LEN 8
struct rtw89_fw_dynhdr_sec {
__le32 w0;
@@ -472,6 +531,7 @@ struct rtw89_fw_hdr {
#define FW_HDR_W4_MIN GENMASK(31, 24)
#define FW_HDR_W5_YEAR GENMASK(31, 0)
#define FW_HDR_W6_SEC_NUM GENMASK(15, 8)
+#define FW_HDR_W7_PART_SIZE GENMASK(15, 0)
#define FW_HDR_W7_DYN_HDR BIT(16)
#define FW_HDR_W7_CMD_VERSERION GENMASK(31, 24)
@@ -489,6 +549,7 @@ struct rtw89_fw_hdr_section_v1 {
#define FWSECTION_HDR_V1_W1_CHECKSUM BIT(28)
#define FWSECTION_HDR_V1_W1_REDL BIT(29)
#define FWSECTION_HDR_V1_W2_MSSC GENMASK(7, 0)
+#define FORMATTED_MSSC 0xFF
#define FWSECTION_HDR_V1_W2_BBMCU_IDX GENMASK(27, 24)
struct rtw89_fw_hdr_v1 {
@@ -521,12 +582,42 @@ struct rtw89_fw_hdr_v1 {
#define FW_HDR_V1_W5_YEAR GENMASK(15, 0)
#define FW_HDR_V1_W5_HDR_SIZE GENMASK(31, 16)
#define FW_HDR_V1_W6_SEC_NUM GENMASK(15, 8)
+#define FW_HDR_V1_W6_DSP_CHKSUM BIT(24)
+#define FW_HDR_V1_W7_PART_SIZE GENMASK(15, 0)
#define FW_HDR_V1_W7_DYN_HDR BIT(16)
-static inline void SET_FW_HDR_PART_SIZE(void *fwhdr, u32 val)
-{
- le32p_replace_bits((__le32 *)fwhdr + 7, val, GENMASK(15, 0));
-}
+enum rtw89_fw_mss_pool_rmp_tbl_type {
+ MSS_POOL_RMP_TBL_BITMASK = 0x0,
+ MSS_POOL_RMP_TBL_RECORD = 0x1,
+};
+
+#define FWDL_MSS_POOL_DEFKEYSETS_SIZE 8
+
+struct rtw89_fw_mss_pool_hdr {
+ u8 signature[8]; /* equal to mss_signature[] */
+ __le32 rmp_tbl_offset;
+ __le32 key_raw_offset;
+ u8 defen;
+ u8 rsvd[3];
+ u8 rmpfmt; /* enum rtw89_fw_mss_pool_rmp_tbl_type */
+ u8 mssdev_max;
+ __le16 keypair_num;
+ __le16 msscust_max;
+ __le16 msskey_num_max;
+ __le32 rsvd3;
+ u8 rmp_tbl[];
+} __packed;
+
+union rtw89_fw_section_mssc_content {
+ struct {
+ u8 pad[58];
+ __le32 v;
+ } __packed sb_sel_ver;
+ struct {
+ u8 pad[60];
+ __le16 v;
+ } __packed key_sign_len;
+} __packed;
static inline void SET_CTRL_INFO_MACID(void *table, u32 val)
{
@@ -1198,6 +1289,149 @@ static inline void SET_CMC_TBL_CSI_BW(void *table, u32 val)
GENMASK(31, 30));
}
+struct rtw89_h2c_cctlinfo_ud_g7 {
+ __le32 c0;
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+ __le32 w8;
+ __le32 w9;
+ __le32 w10;
+ __le32 w11;
+ __le32 w12;
+ __le32 w13;
+ __le32 w14;
+ __le32 w15;
+ __le32 m0;
+ __le32 m1;
+ __le32 m2;
+ __le32 m3;
+ __le32 m4;
+ __le32 m5;
+ __le32 m6;
+ __le32 m7;
+ __le32 m8;
+ __le32 m9;
+ __le32 m10;
+ __le32 m11;
+ __le32 m12;
+ __le32 m13;
+ __le32 m14;
+ __le32 m15;
+} __packed;
+
+#define CCTLINFO_G7_C0_MACID GENMASK(6, 0)
+#define CCTLINFO_G7_C0_OP BIT(7)
+
+#define CCTLINFO_G7_W0_DATARATE GENMASK(11, 0)
+#define CCTLINFO_G7_W0_DATA_GI_LTF GENMASK(14, 12)
+#define CCTLINFO_G7_W0_TRYRATE BIT(15)
+#define CCTLINFO_G7_W0_ARFR_CTRL GENMASK(17, 16)
+#define CCTLINFO_G7_W0_DIS_HE1SS_STBC BIT(18)
+#define CCTLINFO_G7_W0_ACQ_RPT_EN BIT(20)
+#define CCTLINFO_G7_W0_MGQ_RPT_EN BIT(21)
+#define CCTLINFO_G7_W0_ULQ_RPT_EN BIT(22)
+#define CCTLINFO_G7_W0_TWTQ_RPT_EN BIT(23)
+#define CCTLINFO_G7_W0_FORCE_TXOP BIT(24)
+#define CCTLINFO_G7_W0_DISRTSFB BIT(25)
+#define CCTLINFO_G7_W0_DISDATAFB BIT(26)
+#define CCTLINFO_G7_W0_NSTR_EN BIT(27)
+#define CCTLINFO_G7_W0_AMPDU_DENSITY GENMASK(31, 28)
+#define CCTLINFO_G7_W0_ALL (GENMASK(31, 20) | GENMASK(18, 0))
+#define CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE GENMASK(11, 0)
+#define CCTLINFO_G7_W1_RTS_TXCNT_LMT GENMASK(15, 12)
+#define CCTLINFO_G7_W1_RTSRATE GENMASK(27, 16)
+#define CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE GENMASK(31, 28)
+#define CCTLINFO_G7_W1_ALL GENMASK(31, 0)
+#define CCTLINFO_G7_W2_DATA_TX_CNT_LMT GENMASK(5, 0)
+#define CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL BIT(6)
+#define CCTLINFO_G7_W2_MAX_AGG_NUM_SEL BIT(7)
+#define CCTLINFO_G7_W2_RTS_EN BIT(8)
+#define CCTLINFO_G7_W2_CTS2SELF_EN BIT(9)
+#define CCTLINFO_G7_W2_CCA_RTS GENMASK(11, 10)
+#define CCTLINFO_G7_W2_HW_RTS_EN BIT(12)
+#define CCTLINFO_G7_W2_RTS_DROP_DATA_MODE GENMASK(14, 13)
+#define CCTLINFO_G7_W2_PRELD_EN BIT(15)
+#define CCTLINFO_G7_W2_AMPDU_MAX_LEN GENMASK(26, 16)
+#define CCTLINFO_G7_W2_UL_MU_DIS BIT(27)
+#define CCTLINFO_G7_W2_AMPDU_MAX_TIME GENMASK(31, 28)
+#define CCTLINFO_G7_W2_ALL GENMASK(31, 0)
+#define CCTLINFO_G7_W3_MAX_AGG_NUM GENMASK(7, 0)
+#define CCTLINFO_G7_W3_DATA_BW GENMASK(10, 8)
+#define CCTLINFO_G7_W3_DATA_BW_ER BIT(11)
+#define CCTLINFO_G7_W3_BA_BMAP GENMASK(14, 12)
+#define CCTLINFO_G7_W3_VCS_STBC BIT(15)
+#define CCTLINFO_G7_W3_VO_LFTIME_SEL GENMASK(18, 16)
+#define CCTLINFO_G7_W3_VI_LFTIME_SEL GENMASK(21, 19)
+#define CCTLINFO_G7_W3_BE_LFTIME_SEL GENMASK(24, 22)
+#define CCTLINFO_G7_W3_BK_LFTIME_SEL GENMASK(27, 25)
+#define CCTLINFO_G7_W3_AMPDU_TIME_SEL BIT(28)
+#define CCTLINFO_G7_W3_AMPDU_LEN_SEL BIT(29)
+#define CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL BIT(30)
+#define CCTLINFO_G7_W3_LSIG_TXOP_EN BIT(31)
+#define CCTLINFO_G7_W3_ALL GENMASK(31, 0)
+#define CCTLINFO_G7_W4_MULTI_PORT_ID GENMASK(2, 0)
+#define CCTLINFO_G7_W4_BYPASS_PUNC BIT(3)
+#define CCTLINFO_G7_W4_MBSSID GENMASK(7, 4)
+#define CCTLINFO_G7_W4_DATA_DCM BIT(8)
+#define CCTLINFO_G7_W4_DATA_ER BIT(9)
+#define CCTLINFO_G7_W4_DATA_LDPC BIT(10)
+#define CCTLINFO_G7_W4_DATA_STBC BIT(11)
+#define CCTLINFO_G7_W4_A_CTRL_BQR BIT(12)
+#define CCTLINFO_G7_W4_A_CTRL_BSR BIT(14)
+#define CCTLINFO_G7_W4_A_CTRL_CAS BIT(15)
+#define CCTLINFO_G7_W4_ACT_SUBCH_CBW GENMASK(31, 16)
+#define CCTLINFO_G7_W4_ALL (GENMASK(31, 14) | GENMASK(12, 0))
+#define CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 GENMASK(1, 0)
+#define CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 GENMASK(3, 2)
+#define CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 GENMASK(5, 4)
+#define CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 GENMASK(7, 6)
+#define CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4 GENMASK(9, 8)
+#define CCTLINFO_G7_W5_SR_RATE GENMASK(14, 10)
+#define CCTLINFO_G7_W5_TID_DISABLE GENMASK(23, 16)
+#define CCTLINFO_G7_W5_ADDR_CAM_INDEX GENMASK(31, 24)
+#define CCTLINFO_G7_W5_ALL (GENMASK(31, 16) | GENMASK(14, 0))
+#define CCTLINFO_G7_W6_AID12_PAID GENMASK(11, 0)
+#define CCTLINFO_G7_W6_RESP_REF_RATE GENMASK(23, 12)
+#define CCTLINFO_G7_W6_ULDL BIT(31)
+#define CCTLINFO_G7_W6_ALL (BIT(31) | GENMASK(23, 0))
+#define CCTLINFO_G7_W7_NC GENMASK(2, 0)
+#define CCTLINFO_G7_W7_NR GENMASK(5, 3)
+#define CCTLINFO_G7_W7_NG GENMASK(7, 6)
+#define CCTLINFO_G7_W7_CB GENMASK(9, 8)
+#define CCTLINFO_G7_W7_CS GENMASK(11, 10)
+#define CCTLINFO_G7_W7_CSI_STBC_EN BIT(13)
+#define CCTLINFO_G7_W7_CSI_LDPC_EN BIT(14)
+#define CCTLINFO_G7_W7_CSI_PARA_EN BIT(15)
+#define CCTLINFO_G7_W7_CSI_FIX_RATE GENMASK(27, 16)
+#define CCTLINFO_G7_W7_CSI_BW GENMASK(31, 29)
+#define CCTLINFO_G7_W7_ALL (GENMASK(31, 29) | GENMASK(27, 13) | GENMASK(11, 0))
+#define CCTLINFO_G7_W8_ALL_ACK_SUPPORT BIT(0)
+#define CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT BIT(1)
+#define CCTLINFO_G7_W8_BSR_OM_UPD_EN BIT(2)
+#define CCTLINFO_G7_W8_MACID_FWD_IDC BIT(3)
+#define CCTLINFO_G7_W8_AZ_SEC_EN BIT(4)
+#define CCTLINFO_G7_W8_CSI_SEC_EN BIT(5)
+#define CCTLINFO_G7_W8_FIX_UL_ADDRCAM_IDX BIT(6)
+#define CCTLINFO_G7_W8_CTRL_CNT_VLD BIT(7)
+#define CCTLINFO_G7_W8_CTRL_CNT GENMASK(11, 8)
+#define CCTLINFO_G7_W8_RESP_SEC_TYPE GENMASK(15, 12)
+#define CCTLINFO_G7_W8_ALL GENMASK(15, 0)
+/* W9~13 are reserved */
+#define CCTLINFO_G7_W14_VO_CURR_RATE GENMASK(11, 0)
+#define CCTLINFO_G7_W14_VI_CURR_RATE GENMASK(23, 12)
+#define CCTLINFO_G7_W14_BE_CURR_RATE_L GENMASK(31, 24)
+#define CCTLINFO_G7_W14_ALL GENMASK(31, 0)
+#define CCTLINFO_G7_W15_BE_CURR_RATE_H GENMASK(3, 0)
+#define CCTLINFO_G7_W15_BK_CURR_RATE GENMASK(15, 4)
+#define CCTLINFO_G7_W15_MGNT_CURR_RATE GENMASK(27, 16)
+#define CCTLINFO_G7_W15_ALL GENMASK(27, 0)
+
static inline void SET_DCTL_MACID_V1(void *table, u32 val)
{
le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0));
@@ -1500,105 +1734,98 @@ static inline void SET_DCTL_SEC_ENT6_V1(void *table, u32 val)
GENMASK(31, 24));
}
-static inline void SET_BCN_UPD_PORT(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
-}
-
-static inline void SET_BCN_UPD_MBSSID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8));
-}
-
-static inline void SET_BCN_UPD_BAND(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 16));
-}
-
-static inline void SET_BCN_UPD_GRP_IE_OFST(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, (val - 24) | BIT(7), GENMASK(31, 24));
-}
-
-static inline void SET_BCN_UPD_MACID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(7, 0));
-}
-
-static inline void SET_BCN_UPD_SSN_SEL(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(9, 8));
-}
-
-static inline void SET_BCN_UPD_SSN_MODE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(11, 10));
-}
-
-static inline void SET_BCN_UPD_RATE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(20, 12));
-}
-
-static inline void SET_BCN_UPD_TXPWR(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(23, 21));
-}
-
-static inline void SET_BCN_UPD_TXINFO_CTRL_EN(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(0));
-}
-
-static inline void SET_BCN_UPD_NTX_PATH_EN(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(4, 1));
-}
-
-static inline void SET_BCN_UPD_PATH_MAP_A(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(6, 5));
-}
-
-static inline void SET_BCN_UPD_PATH_MAP_B(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(8, 7));
-}
-
-static inline void SET_BCN_UPD_PATH_MAP_C(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(10, 9));
-}
-
-static inline void SET_BCN_UPD_PATH_MAP_D(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(12, 11));
-}
-
-static inline void SET_BCN_UPD_PATH_ANTSEL_A(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(13));
-}
-
-static inline void SET_BCN_UPD_PATH_ANTSEL_B(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(14));
-}
-
-static inline void SET_BCN_UPD_PATH_ANTSEL_C(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(15));
-}
+struct rtw89_h2c_bcn_upd {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+} __packed;
-static inline void SET_BCN_UPD_PATH_ANTSEL_D(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(16));
-}
+#define RTW89_H2C_BCN_UPD_W0_PORT GENMASK(7, 0)
+#define RTW89_H2C_BCN_UPD_W0_MBSSID GENMASK(15, 8)
+#define RTW89_H2C_BCN_UPD_W0_BAND GENMASK(23, 16)
+#define RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST GENMASK(31, 24)
+#define RTW89_H2C_BCN_UPD_W1_MACID GENMASK(7, 0)
+#define RTW89_H2C_BCN_UPD_W1_SSN_SEL GENMASK(9, 8)
+#define RTW89_H2C_BCN_UPD_W1_SSN_MODE GENMASK(11, 10)
+#define RTW89_H2C_BCN_UPD_W1_RATE GENMASK(20, 12)
+#define RTW89_H2C_BCN_UPD_W1_TXPWR GENMASK(23, 21)
+#define RTW89_H2C_BCN_UPD_W2_TXINFO_CTRL_EN BIT(0)
+#define RTW89_H2C_BCN_UPD_W2_NTX_PATH_EN GENMASK(4, 1)
+#define RTW89_H2C_BCN_UPD_W2_PATH_MAP_A GENMASK(6, 5)
+#define RTW89_H2C_BCN_UPD_W2_PATH_MAP_B GENMASK(8, 7)
+#define RTW89_H2C_BCN_UPD_W2_PATH_MAP_C GENMASK(10, 9)
+#define RTW89_H2C_BCN_UPD_W2_PATH_MAP_D GENMASK(12, 11)
+#define RTW89_H2C_BCN_UPD_W2_PATH_ANTSEL_A BIT(13)
+#define RTW89_H2C_BCN_UPD_W2_PATH_ANTSEL_B BIT(14)
+#define RTW89_H2C_BCN_UPD_W2_PATH_ANTSEL_C BIT(15)
+#define RTW89_H2C_BCN_UPD_W2_PATH_ANTSEL_D BIT(16)
+#define RTW89_H2C_BCN_UPD_W2_CSA_OFST GENMASK(31, 17)
+
+struct rtw89_h2c_bcn_upd_be {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+ __le32 w8;
+ __le32 w9;
+ __le32 w10;
+ __le32 w11;
+ __le32 w12;
+ __le32 w13;
+ __le32 w14;
+ __le32 w15;
+ __le32 w16;
+ __le32 w17;
+ __le32 w18;
+ __le32 w19;
+ __le32 w20;
+ __le32 w21;
+ __le32 w22;
+ __le32 w23;
+ __le32 w24;
+ __le32 w25;
+ __le32 w26;
+ __le32 w27;
+ __le32 w28;
+ __le32 w29;
+} __packed;
-static inline void SET_BCN_UPD_CSA_OFST(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(31, 17));
-}
+#define RTW89_H2C_BCN_UPD_BE_W0_PORT GENMASK(7, 0)
+#define RTW89_H2C_BCN_UPD_BE_W0_MBSSID GENMASK(15, 8)
+#define RTW89_H2C_BCN_UPD_BE_W0_BAND GENMASK(23, 16)
+#define RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST GENMASK(31, 24)
+#define RTW89_H2C_BCN_UPD_BE_W1_MACID GENMASK(7, 0)
+#define RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL GENMASK(9, 8)
+#define RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE GENMASK(11, 10)
+#define RTW89_H2C_BCN_UPD_BE_W1_RATE GENMASK(20, 12)
+#define RTW89_H2C_BCN_UPD_BE_W1_TXPWR GENMASK(23, 21)
+#define RTW89_H2C_BCN_UPD_BE_W1_MACID_EXT GENMASK(31, 24)
+#define RTW89_H2C_BCN_UPD_BE_W2_TXINFO_CTRL_EN BIT(0)
+#define RTW89_H2C_BCN_UPD_BE_W2_NTX_PATH_EN GENMASK(4, 1)
+#define RTW89_H2C_BCN_UPD_BE_W2_PATH_MAP_A GENMASK(6, 5)
+#define RTW89_H2C_BCN_UPD_BE_W2_PATH_MAP_B GENMASK(8, 7)
+#define RTW89_H2C_BCN_UPD_BE_W2_PATH_MAP_C GENMASK(10, 9)
+#define RTW89_H2C_BCN_UPD_BE_W2_PATH_MAP_D GENMASK(12, 11)
+#define RTW89_H2C_BCN_UPD_BE_W2_ANTSEL_A BIT(13)
+#define RTW89_H2C_BCN_UPD_BE_W2_ANTSEL_B BIT(14)
+#define RTW89_H2C_BCN_UPD_BE_W2_ANTSEL_C BIT(15)
+#define RTW89_H2C_BCN_UPD_BE_W2_ANTSEL_D BIT(16)
+#define RTW89_H2C_BCN_UPD_BE_W2_CSA_OFST GENMASK(31, 17)
+#define RTW89_H2C_BCN_UPD_BE_W3_MLIE_CSA_OFST GENMASK(15, 0)
+#define RTW89_H2C_BCN_UPD_BE_W3_CRITICAL_UPD_FLAG_OFST GENMASK(31, 16)
+#define RTW89_H2C_BCN_UPD_BE_W4_VAP1_DTIM_CNT_OFST GENMASK(15, 0)
+#define RTW89_H2C_BCN_UPD_BE_W4_VAP2_DTIM_CNT_OFST GENMASK(31, 16)
+#define RTW89_H2C_BCN_UPD_BE_W5_VAP3_DTIM_CNT_OFST GENMASK(15, 0)
+#define RTW89_H2C_BCN_UPD_BE_W5_VAP4_DTIM_CNT_OFST GENMASK(31, 16)
+#define RTW89_H2C_BCN_UPD_BE_W6_VAP5_DTIM_CNT_OFST GENMASK(15, 0)
+#define RTW89_H2C_BCN_UPD_BE_W6_VAP6_DTIM_CNT_OFST GENMASK(31, 16)
+#define RTW89_H2C_BCN_UPD_BE_W7_VAP7_DTIM_CNT_OFST GENMASK(15, 0)
+#define RTW89_H2C_BCN_UPD_BE_W7_ECSA_OFST GENMASK(30, 16)
+#define RTW89_H2C_BCN_UPD_BE_W7_PROTECTION_KEY_ID BIT(31)
static inline void SET_FWROLE_MAINTAIN_MACID(void *h2c, u32 val)
{
@@ -1620,70 +1847,46 @@ static inline void SET_FWROLE_MAINTAIN_WIFI_ROLE(void *h2c, u32 val)
le32p_replace_bits((__le32 *)h2c, val, GENMASK(16, 13));
}
-static inline void SET_JOININFO_MACID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
-}
-
-static inline void SET_JOININFO_OP(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(8));
-}
-
-static inline void SET_JOININFO_BAND(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(9));
-}
-
-static inline void SET_JOININFO_WMM(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(11, 10));
-}
-
-static inline void SET_JOININFO_TGR(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(12));
-}
-
-static inline void SET_JOININFO_ISHESTA(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(13));
-}
-
-static inline void SET_JOININFO_DLBW(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 14));
-}
-
-static inline void SET_JOININFO_TF_MAC_PAD(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(17, 16));
-}
-
-static inline void SET_JOININFO_DL_T_PE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(20, 18));
-}
-
-static inline void SET_JOININFO_PORT_ID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 21));
-}
+enum rtw89_fw_sta_type { /* value of RTW89_H2C_JOININFO_W1_STA_TYPE */
+ RTW89_FW_N_AC_STA = 0,
+ RTW89_FW_AX_STA = 1,
+ RTW89_FW_BE_STA = 2,
+};
-static inline void SET_JOININFO_NET_TYPE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(25, 24));
-}
+struct rtw89_h2c_join {
+ __le32 w0;
+} __packed;
-static inline void SET_JOININFO_WIFI_ROLE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(29, 26));
-}
+struct rtw89_h2c_join_v1 {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+} __packed;
-static inline void SET_JOININFO_SELF_ROLE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 30));
-}
+#define RTW89_H2C_JOININFO_W0_MACID GENMASK(7, 0)
+#define RTW89_H2C_JOININFO_W0_OP BIT(8)
+#define RTW89_H2C_JOININFO_W0_BAND BIT(9)
+#define RTW89_H2C_JOININFO_W0_WMM GENMASK(11, 10)
+#define RTW89_H2C_JOININFO_W0_TGR BIT(12)
+#define RTW89_H2C_JOININFO_W0_ISHESTA BIT(13)
+#define RTW89_H2C_JOININFO_W0_DLBW GENMASK(15, 14)
+#define RTW89_H2C_JOININFO_W0_TF_MAC_PAD GENMASK(17, 16)
+#define RTW89_H2C_JOININFO_W0_DL_T_PE GENMASK(20, 18)
+#define RTW89_H2C_JOININFO_W0_PORT_ID GENMASK(23, 21)
+#define RTW89_H2C_JOININFO_W0_NET_TYPE GENMASK(25, 24)
+#define RTW89_H2C_JOININFO_W0_WIFI_ROLE GENMASK(29, 26)
+#define RTW89_H2C_JOININFO_W0_SELF_ROLE GENMASK(31, 30)
+#define RTW89_H2C_JOININFO_W1_STA_TYPE GENMASK(2, 0)
+#define RTW89_H2C_JOININFO_W1_IS_MLD BIT(3)
+#define RTW89_H2C_JOININFO_W1_MAIN_MACID GENMASK(11, 4)
+#define RTW89_H2C_JOININFO_W1_MLO_MODE BIT(12)
+#define RTW89_H2C_JOININFO_W1_EMLSR_CAB BIT(13)
+#define RTW89_H2C_JOININFO_W1_NSTR_EN BIT(14)
+#define RTW89_H2C_JOININFO_W1_INIT_PWR_STATE BIT(15)
+#define RTW89_H2C_JOININFO_W1_EMLSR_PADDING GENMASK(18, 16)
+#define RTW89_H2C_JOININFO_W1_EMLSR_TRANS_DELAY GENMASK(21, 19)
+#define RTW89_H2C_JOININFO_W2_MACID_EXT GENMASK(7, 0)
+#define RTW89_H2C_JOININFO_W2_MAIN_MACID_EXT GENMASK(15, 8)
struct rtw89_h2c_notify_dbcc {
__le32 w0;
@@ -1741,60 +1944,47 @@ static inline void SET_LOG_CFG_COMP_EXT(void *h2c, u32 val)
le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(31, 0));
}
-static inline void SET_BA_CAM_VALID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(0));
-}
-
-static inline void SET_BA_CAM_INIT_REQ(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(1));
-}
-
-static inline void SET_BA_CAM_ENTRY_IDX(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(3, 2));
-}
-
-static inline void SET_BA_CAM_TID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 4));
-}
-
-static inline void SET_BA_CAM_MACID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8));
-}
-
-static inline void SET_BA_CAM_BMAP_SIZE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(19, 16));
-}
-
-static inline void SET_BA_CAM_SSN(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 20));
-}
-
-static inline void SET_BA_CAM_UID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 1, val, GENMASK(7, 0));
-}
+struct rtw89_h2c_ba_cam {
+ __le32 w0;
+ __le32 w1;
+} __packed;
-static inline void SET_BA_CAM_STD_EN(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 1, val, BIT(8));
-}
+#define RTW89_H2C_BA_CAM_W0_VALID BIT(0)
+#define RTW89_H2C_BA_CAM_W0_INIT_REQ BIT(1)
+#define RTW89_H2C_BA_CAM_W0_ENTRY_IDX GENMASK(3, 2)
+#define RTW89_H2C_BA_CAM_W0_TID GENMASK(7, 4)
+#define RTW89_H2C_BA_CAM_W0_MACID GENMASK(15, 8)
+#define RTW89_H2C_BA_CAM_W0_BMAP_SIZE GENMASK(19, 16)
+#define RTW89_H2C_BA_CAM_W0_SSN GENMASK(31, 20)
+#define RTW89_H2C_BA_CAM_W1_UID GENMASK(7, 0)
+#define RTW89_H2C_BA_CAM_W1_STD_EN BIT(8)
+#define RTW89_H2C_BA_CAM_W1_BAND BIT(9)
+#define RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1 GENMASK(31, 28)
+
+struct rtw89_h2c_ba_cam_v1 {
+ __le32 w0;
+ __le32 w1;
+} __packed;
-static inline void SET_BA_CAM_BAND(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 1, val, BIT(9));
-}
+#define RTW89_H2C_BA_CAM_V1_W0_VALID BIT(0)
+#define RTW89_H2C_BA_CAM_V1_W0_INIT_REQ BIT(1)
+#define RTW89_H2C_BA_CAM_V1_W0_TID_MASK GENMASK(7, 4)
+#define RTW89_H2C_BA_CAM_V1_W0_MACID_MASK GENMASK(15, 8)
+#define RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK GENMASK(19, 16)
+#define RTW89_H2C_BA_CAM_V1_W0_SSN_MASK GENMASK(31, 20)
+#define RTW89_H2C_BA_CAM_V1_W1_UID_VALUE_MASK GENMASK(7, 0)
+#define RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN BIT(8)
+#define RTW89_H2C_BA_CAM_V1_W1_BAND_SEL BIT(9)
+#define RTW89_H2C_BA_CAM_V1_W1_MLD_EN BIT(10)
+#define RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK GENMASK(31, 24)
+
+struct rtw89_h2c_ba_cam_init {
+ __le32 w0;
+} __packed;
-static inline void SET_BA_CAM_ENTRY_IDX_V1(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 1, val, GENMASK(31, 28));
-}
+#define RTW89_H2C_BA_CAM_INIT_USERS_MASK GENMASK(7, 0)
+#define RTW89_H2C_BA_CAM_INIT_OFFSET_MASK GENMASK(19, 12)
+#define RTW89_H2C_BA_CAM_INIT_BAND_SEL BIT(24)
static inline void SET_LPS_PARM_MACID(void *h2c, u32 val)
{
@@ -1846,6 +2036,17 @@ static inline void SET_LPS_PARM_LASTRPWM(void *h2c, u32 val)
le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(15, 8));
}
+struct rtw89_h2c_lps_ch_info {
+ struct {
+ u8 pri_ch;
+ u8 central_ch;
+ u8 bw;
+ u8 band;
+ } __packed info[2];
+
+ __le32 mlo_dbcc_mode_lps;
+} __packed;
+
static inline void RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(void *cmd, u32 val)
{
le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 0));
@@ -2128,9 +2329,15 @@ enum rtw89_btc_btf_set {
SET_BT_IGNORE_WLAN_ACT,
SET_BT_TX_PWR,
SET_BT_LNA_CONSTRAIN,
- SET_BT_GOLDEN_RX_RANGE,
+ SET_BT_QUERY_DEV_LIST,
+ SET_BT_QUERY_DEV_INFO,
SET_BT_PSD_REPORT,
SET_H2C_TEST,
+ SET_IOFLD_RF,
+ SET_IOFLD_BB,
+ SET_IOFLD_MAC,
+ SET_IOFLD_SCBD,
+ SET_H2C_MACRO,
SET_MAX1,
};
@@ -2144,6 +2351,10 @@ enum rtw89_btc_cxdrvinfo {
CXDRVINFO_CTRL,
CXDRVINFO_SCAN,
CXDRVINFO_TRX, /* WL traffic to WL fw */
+ CXDRVINFO_TXPWR,
+ CXDRVINFO_FDDT,
+ CXDRVINFO_MLO,
+ CXDRVINFO_OSI,
CXDRVINFO_MAX,
};
@@ -2170,7 +2381,19 @@ struct rtw89_h2c_cxhdr {
u8 len;
} __packed;
+struct rtw89_h2c_cxhdr_v7 {
+ u8 type;
+ u8 ver;
+ u8 len;
+} __packed;
+
+struct rtw89_h2c_cxctrl_v7 {
+ struct rtw89_h2c_cxhdr_v7 hdr;
+ struct rtw89_btc_ctrl_v7 ctrl;
+} __packed;
+
#define H2C_LEN_CXDRVHDR sizeof(struct rtw89_h2c_cxhdr)
+#define H2C_LEN_CXDRVHDR_V7 sizeof(struct rtw89_h2c_cxhdr_v7)
struct rtw89_h2c_cxinit {
struct rtw89_h2c_cxhdr hdr;
@@ -2204,6 +2427,11 @@ struct rtw89_h2c_cxinit {
#define RTW89_H2C_CXINIT_INFO_CX_OTHER BIT(3)
#define RTW89_H2C_CXINIT_INFO_BT_ONLY BIT(4)
+struct rtw89_h2c_cxinit_v7 {
+ struct rtw89_h2c_cxhdr_v7 hdr;
+ struct rtw89_btc_init_info_v7 init;
+} __packed;
+
static inline void RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(void *cmd, u8 val)
{
u8p_replace_bits((u8 *)(cmd) + 2, val, GENMASK(7, 0));
@@ -2569,135 +2797,91 @@ static inline void RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(void *cmd, u32 val)
le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(31, 16));
}
-static inline void RTW89_SET_FWCMD_SCANOFLD_CH_NUM(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(7, 0));
-}
-
-static inline void RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(15, 8));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PERIOD(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(7, 0));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_DWELL(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(15, 8));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_CENTER_CH(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(23, 16));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PRI_CH(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(31, 24));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_BW(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(2, 0));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_ACTION(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(7, 3));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_NUM_PKT(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(11, 8));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_TX(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(12));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(13));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_BAND(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(15, 14));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT_ID(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(23, 16));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_DFS(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(24));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_TX_NULL(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(25));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_RANDOM(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(26));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_CFG_TX(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(27));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT0(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(7, 0));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT1(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(15, 8));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT2(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(23, 16));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT3(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(31, 24));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT4(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(7, 0));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT5(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(15, 8));
-}
+struct rtw89_h2c_chinfo_elem {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+} __packed;
-static inline void RTW89_SET_FWCMD_CHINFO_PKT6(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(23, 16));
-}
+#define RTW89_H2C_CHINFO_W0_PERIOD GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_W0_DWELL GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_W0_CENTER_CH GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_W0_PRI_CH GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_W1_BW GENMASK(2, 0)
+#define RTW89_H2C_CHINFO_W1_ACTION GENMASK(7, 3)
+#define RTW89_H2C_CHINFO_W1_NUM_PKT GENMASK(11, 8)
+#define RTW89_H2C_CHINFO_W1_TX BIT(12)
+#define RTW89_H2C_CHINFO_W1_PAUSE_DATA BIT(13)
+#define RTW89_H2C_CHINFO_W1_BAND GENMASK(15, 14)
+#define RTW89_H2C_CHINFO_W1_PKT_ID GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_W1_DFS BIT(24)
+#define RTW89_H2C_CHINFO_W1_TX_NULL BIT(25)
+#define RTW89_H2C_CHINFO_W1_RANDOM BIT(26)
+#define RTW89_H2C_CHINFO_W1_CFG_TX BIT(27)
+#define RTW89_H2C_CHINFO_W2_PKT0 GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_W2_PKT1 GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_W2_PKT2 GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_W2_PKT3 GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_W3_PKT4 GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_W3_PKT5 GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_W3_PKT6 GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_W3_PKT7 GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_W4_POWER_IDX GENMASK(15, 0)
+
+struct rtw89_h2c_chinfo_elem_be {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+} __packed;
-static inline void RTW89_SET_FWCMD_CHINFO_PKT7(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(31, 24));
-}
+#define RTW89_H2C_CHINFO_BE_W0_PERIOD GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_BE_W0_DWELL GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_BE_W0_CENTER_CH GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_BE_W0_PRI_CH GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_BE_W1_BW GENMASK(2, 0)
+#define RTW89_H2C_CHINFO_BE_W1_CH_BAND GENMASK(4, 3)
+#define RTW89_H2C_CHINFO_BE_W1_DFS BIT(5)
+#define RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA BIT(6)
+#define RTW89_H2C_CHINFO_BE_W1_TX_NULL BIT(7)
+#define RTW89_H2C_CHINFO_BE_W1_RANDOM BIT(8)
+#define RTW89_H2C_CHINFO_BE_W1_NOTIFY GENMASK(13, 9)
+#define RTW89_H2C_CHINFO_BE_W1_PROBE BIT(14)
+#define RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT GENMASK(17, 15)
+#define RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL GENMASK(31, 16)
+#define RTW89_H2C_CHINFO_BE_W3_PKT0 GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_BE_W3_PKT1 GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_BE_W3_PKT2 GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_BE_W3_PKT3 GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_BE_W4_PKT4 GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_BE_W4_PKT5 GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_BE_W4_PKT6 GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_BE_W4_PKT7 GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_BE_W5_SW_DEF GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS GENMASK(31, 16)
+#define RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS GENMASK(15, 0)
+#define RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS GENMASK(31, 16)
+
+struct rtw89_h2c_chinfo {
+ u8 ch_num;
+ u8 elem_size;
+ u8 arg;
+ u8 rsvd0;
+ struct rtw89_h2c_chinfo_elem elem[] __counted_by(ch_num);
+} __packed;
-static inline void RTW89_SET_FWCMD_CHINFO_POWER_IDX(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 16), val, GENMASK(15, 0));
-}
+#define RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK BIT(0)
+#define RTW89_H2C_CHINFO_ARG_APPEND_MASK BIT(1)
struct rtw89_h2c_scanofld {
__le32 w0;
@@ -2726,6 +2910,79 @@ struct rtw89_h2c_scanofld {
#define RTW89_H2C_SCANOFLD_W2_NORM_PD GENMASK(15, 0)
#define RTW89_H2C_SCANOFLD_W2_SLOW_PD GENMASK(23, 16)
+struct rtw89_h2c_scanofld_be_macc_role {
+ __le32 w0;
+} __packed;
+
+#define RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND GENMASK(1, 0)
+#define RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT GENMASK(4, 2)
+#define RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID GENMASK(23, 8)
+#define RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END GENMASK(31, 24)
+
+struct rtw89_h2c_scanofld_be_opch {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+} __packed;
+
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID GENMASK(15, 0)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND GENMASK(17, 16)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT GENMASK(20, 18)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY GENMASK(22, 21)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL BIT(23)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL GENMASK(31, 24)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND GENMASK(9, 8)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW GENMASK(12, 10)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY GENMASK(14, 13)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH GENMASK(23, 16)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH GENMASK(31, 24)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF GENMASK(15, 8)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS GENMASK(18, 16)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0 GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1 GENMASK(15, 8)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2 GENMASK(23, 16)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3 GENMASK(31, 24)
+
+struct rtw89_h2c_scanofld_be {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+ struct rtw89_h2c_scanofld_be_macc_role role[];
+} __packed;
+
+#define RTW89_H2C_SCANOFLD_BE_W0_OP GENMASK(1, 0)
+#define RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE GENMASK(3, 2)
+#define RTW89_H2C_SCANOFLD_BE_W0_REPEAT GENMASK(5, 4)
+#define RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END BIT(6)
+#define RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH BIT(7)
+#define RTW89_H2C_SCANOFLD_BE_W0_MACID GENMASK(23, 8)
+#define RTW89_H2C_SCANOFLD_BE_W0_PORT GENMASK(26, 24)
+#define RTW89_H2C_SCANOFLD_BE_W0_BAND GENMASK(28, 27)
+#define RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_W1_NUM_OP GENMASK(15, 8)
+#define RTW89_H2C_SCANOFLD_BE_W1_NORM_PD GENMASK(31, 16)
+#define RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD GENMASK(15, 0)
+#define RTW89_H2C_SCANOFLD_BE_W2_NORM_CY GENMASK(23, 16)
+#define RTW89_H2C_SCANOFLD_BE_W2_OPCH_END GENMASK(31, 24)
+#define RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID GENMASK(15, 8)
+#define RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID GENMASK(23, 16)
+#define RTW89_H2C_SCANOFLD_BE_W3_PROBEID GENMASK(31, 24)
+#define RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G GENMASK(15, 8)
+#define RTW89_H2C_SCANOFLD_BE_W4_DELAY_START GENMASK(31, 16)
+#define RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE GENMASK(31, 0)
+#define RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW GENMASK(31, 0)
+#define RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH GENMASK(31, 0)
+
static inline void RTW89_SET_FWCMD_P2P_MACID(void *cmd, u32 val)
{
le32p_replace_bits((__le32 *)cmd, val, GENMASK(7, 0));
@@ -3160,6 +3417,225 @@ inline void RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(void *cmd, u32 val)
le32p_replace_bits((__le32 *)cmd + 4, val, GENMASK(31, 0));
}
+enum rtw89_h2c_mrc_sch_types {
+ RTW89_H2C_MRC_SCH_BAND0_ONLY = 0,
+ RTW89_H2C_MRC_SCH_BAND1_ONLY = 1,
+ RTW89_H2C_MRC_SCH_DUAL_BAND = 2,
+};
+
+enum rtw89_h2c_mrc_role_types {
+ RTW89_H2C_MRC_ROLE_WIFI = 0,
+ RTW89_H2C_MRC_ROLE_BT = 1,
+ RTW89_H2C_MRC_ROLE_EMPTY = 2,
+};
+
+#define RTW89_MAC_MRC_MAX_ADD_SLOT_NUM 3
+#define RTW89_MAC_MRC_MAX_ADD_ROLE_NUM_PER_SLOT 1 /* before MLO */
+
+struct rtw89_fw_mrc_add_slot_arg {
+ u16 duration; /* unit: TU */
+ bool courtesy_en;
+ u8 courtesy_period;
+ u8 courtesy_target; /* slot idx */
+
+ unsigned int role_num;
+ struct {
+ enum rtw89_h2c_mrc_role_types role_type;
+ bool is_master;
+ bool en_tx_null;
+ enum rtw89_band band;
+ enum rtw89_bandwidth bw;
+ u8 macid;
+ u8 central_ch;
+ u8 primary_ch;
+ u8 null_early; /* unit: TU */
+
+ /* if MLD, for macid: [0, chip::support_mld_num)
+ * otherwise, for macid: [0, 32)
+ */
+ u32 macid_main_bitmap;
+ /* for MLD, bit X maps to macid: X + chip::support_mld_num */
+ u32 macid_paired_bitmap;
+ } roles[RTW89_MAC_MRC_MAX_ADD_ROLE_NUM_PER_SLOT];
+};
+
+struct rtw89_fw_mrc_add_arg {
+ u8 sch_idx;
+ enum rtw89_h2c_mrc_sch_types sch_type;
+ bool btc_in_sch;
+
+ unsigned int slot_num;
+ struct rtw89_fw_mrc_add_slot_arg slots[RTW89_MAC_MRC_MAX_ADD_SLOT_NUM];
+};
+
+struct rtw89_h2c_mrc_add_role {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 macid_main_bitmap;
+ __le32 macid_paired_bitmap;
+} __packed;
+
+#define RTW89_H2C_MRC_ADD_ROLE_W0_MACID GENMASK(15, 0)
+#define RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE GENMASK(23, 16)
+#define RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER BIT(24)
+#define RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE BIT(25)
+#define RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN BIT(26)
+#define RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN BIT(27)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG GENMASK(7, 0)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH GENMASK(15, 8)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_BW GENMASK(19, 16)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE GENMASK(21, 20)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS BIT(22)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC BIT(23)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY GENMASK(31, 24)
+#define RTW89_H2C_MRC_ADD_ROLE_W2_ALT_PERIOD GENMASK(7, 0)
+#define RTW89_H2C_MRC_ADD_ROLE_W2_ALT_ROLE_TYPE GENMASK(15, 8)
+#define RTW89_H2C_MRC_ADD_ROLE_W2_ALT_ROLE_MACID GENMASK(23, 16)
+
+struct rtw89_h2c_mrc_add_slot {
+ __le32 w0;
+ __le32 w1;
+ struct rtw89_h2c_mrc_add_role roles[];
+} __packed;
+
+#define RTW89_H2C_MRC_ADD_SLOT_W0_DURATION GENMASK(15, 0)
+#define RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN BIT(17)
+#define RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM GENMASK(31, 24)
+#define RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD GENMASK(7, 0)
+#define RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET GENMASK(15, 8)
+
+struct rtw89_h2c_mrc_add {
+ __le32 w0;
+ /* Logically append flexible struct rtw89_h2c_mrc_add_slot, but there
+ * are other flexible array inside it. We cannot access them correctly
+ * through this struct. So, in case misusing, we don't really declare
+ * it here.
+ */
+} __packed;
+
+#define RTW89_H2C_MRC_ADD_W0_SCH_IDX GENMASK(3, 0)
+#define RTW89_H2C_MRC_ADD_W0_SCH_TYPE GENMASK(7, 4)
+#define RTW89_H2C_MRC_ADD_W0_SLOT_NUM GENMASK(15, 8)
+#define RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH BIT(16)
+
+enum rtw89_h2c_mrc_start_actions {
+ RTW89_H2C_MRC_START_ACTION_START_NEW = 0,
+ RTW89_H2C_MRC_START_ACTION_REPLACE_OLD = 1,
+};
+
+struct rtw89_fw_mrc_start_arg {
+ u8 sch_idx;
+ u8 old_sch_idx;
+ u64 start_tsf;
+ enum rtw89_h2c_mrc_start_actions action;
+};
+
+struct rtw89_h2c_mrc_start {
+ __le32 w0;
+ __le32 start_tsf_low;
+ __le32 start_tsf_high;
+} __packed;
+
+#define RTW89_H2C_MRC_START_W0_SCH_IDX GENMASK(3, 0)
+#define RTW89_H2C_MRC_START_W0_OLD_SCH_IDX GENMASK(7, 4)
+#define RTW89_H2C_MRC_START_W0_ACTION GENMASK(15, 8)
+
+struct rtw89_h2c_mrc_del {
+ __le32 w0;
+} __packed;
+
+#define RTW89_H2C_MRC_DEL_W0_SCH_IDX GENMASK(3, 0)
+#define RTW89_H2C_MRC_DEL_W0_DEL_ALL BIT(4)
+#define RTW89_H2C_MRC_DEL_W0_STOP_ONLY BIT(5)
+#define RTW89_H2C_MRC_DEL_W0_SPECIFIC_ROLE_EN BIT(6)
+#define RTW89_H2C_MRC_DEL_W0_STOP_SLOT_IDX GENMASK(15, 8)
+#define RTW89_H2C_MRC_DEL_W0_SPECIFIC_ROLE_MACID GENMASK(31, 16)
+
+#define RTW89_MAC_MRC_MAX_REQ_TSF_NUM 2
+
+struct rtw89_fw_mrc_req_tsf_arg {
+ unsigned int num;
+ struct {
+ u8 band;
+ u8 port;
+ } infos[RTW89_MAC_MRC_MAX_REQ_TSF_NUM];
+};
+
+struct rtw89_h2c_mrc_req_tsf {
+ u8 req_tsf_num;
+ u8 infos[] __counted_by(req_tsf_num);
+} __packed;
+
+#define RTW89_H2C_MRC_REQ_TSF_INFO_BAND GENMASK(3, 0)
+#define RTW89_H2C_MRC_REQ_TSF_INFO_PORT GENMASK(7, 4)
+
+enum rtw89_h2c_mrc_upd_bitmap_actions {
+ RTW89_H2C_MRC_UPD_BITMAP_ACTION_DEL = 0,
+ RTW89_H2C_MRC_UPD_BITMAP_ACTION_ADD = 1,
+};
+
+struct rtw89_fw_mrc_upd_bitmap_arg {
+ u8 sch_idx;
+ u8 macid;
+ u8 client_macid;
+ enum rtw89_h2c_mrc_upd_bitmap_actions action;
+};
+
+struct rtw89_h2c_mrc_upd_bitmap {
+ __le32 w0;
+ __le32 w1;
+} __packed;
+
+#define RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX GENMASK(3, 0)
+#define RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION BIT(4)
+#define RTW89_H2C_MRC_UPD_BITMAP_W0_MACID GENMASK(31, 16)
+#define RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID GENMASK(15, 0)
+
+struct rtw89_fw_mrc_sync_arg {
+ u8 offset; /* unit: TU */
+ struct {
+ u8 band;
+ u8 port;
+ } src, dest;
+};
+
+struct rtw89_h2c_mrc_sync {
+ __le32 w0;
+ __le32 w1;
+} __packed;
+
+#define RTW89_H2C_MRC_SYNC_W0_SYNC_EN BIT(0)
+#define RTW89_H2C_MRC_SYNC_W0_SRC_PORT GENMASK(11, 8)
+#define RTW89_H2C_MRC_SYNC_W0_SRC_BAND GENMASK(15, 12)
+#define RTW89_H2C_MRC_SYNC_W0_DEST_PORT GENMASK(19, 16)
+#define RTW89_H2C_MRC_SYNC_W0_DEST_BAND GENMASK(23, 20)
+#define RTW89_H2C_MRC_SYNC_W1_OFFSET GENMASK(15, 0)
+
+struct rtw89_fw_mrc_upd_duration_arg {
+ u8 sch_idx;
+ u64 start_tsf;
+
+ unsigned int slot_num;
+ struct {
+ u8 slot_idx;
+ u16 duration; /* unit: TU */
+ } slots[RTW89_MAC_MRC_MAX_ADD_SLOT_NUM];
+};
+
+struct rtw89_h2c_mrc_upd_duration {
+ __le32 w0;
+ __le32 start_tsf_low;
+ __le32 start_tsf_high;
+ __le32 slots[];
+} __packed;
+
+#define RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX GENMASK(3, 0)
+#define RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM GENMASK(15, 8)
+#define RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH BIT(16)
+#define RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX GENMASK(7, 0)
+#define RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION GENMASK(31, 16)
+
#define RTW89_C2H_HEADER_LEN 8
struct rtw89_c2h_hdr {
@@ -3275,20 +3751,29 @@ struct rtw89_c2h_ra_rpt {
#define RTW89_GET_MAC_C2H_PKTOFLD_LEN(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 16))
-#define RTW89_GET_MAC_C2H_SCANOFLD_PRI_CH(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 0))
-#define RTW89_GET_MAC_C2H_SCANOFLD_RSP(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(19, 16))
-#define RTW89_GET_MAC_C2H_SCANOFLD_STATUS(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(23, 20))
-#define RTW89_GET_MAC_C2H_ACTUAL_PERIOD(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 24))
-#define RTW89_GET_MAC_C2H_SCANOFLD_TX_FAIL(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(3, 0))
-#define RTW89_GET_MAC_C2H_SCANOFLD_AIR_DENSITY(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(7, 4))
-#define RTW89_GET_MAC_C2H_SCANOFLD_BAND(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(25, 24))
+struct rtw89_c2h_scanofld {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+} __packed;
+
+#define RTW89_C2H_SCANOFLD_W2_PRI_CH GENMASK(7, 0)
+#define RTW89_C2H_SCANOFLD_W2_RSN GENMASK(19, 16)
+#define RTW89_C2H_SCANOFLD_W2_STATUS GENMASK(23, 20)
+#define RTW89_C2H_SCANOFLD_W2_PERIOD GENMASK(31, 24)
+#define RTW89_C2H_SCANOFLD_W5_TX_FAIL GENMASK(3, 0)
+#define RTW89_C2H_SCANOFLD_W5_AIR_DENSITY GENMASK(7, 4)
+#define RTW89_C2H_SCANOFLD_W5_BAND GENMASK(25, 24)
+#define RTW89_C2H_SCANOFLD_W5_MAC_IDX BIT(26)
+#define RTW89_C2H_SCANOFLD_W6_SW_DEF GENMASK(7, 0)
+#define RTW89_C2H_SCANOFLD_W6_EXPECT_PERIOD GENMASK(15, 8)
+#define RTW89_C2H_SCANOFLD_W6_FW_DEF GENMASK(23, 16)
+#define RTW89_C2H_SCANOFLD_W7_REPORT_TSF GENMASK(31, 0)
#define RTW89_GET_MAC_C2H_MCC_RCV_ACK_GROUP(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(1, 0))
@@ -3339,6 +3824,36 @@ static_assert(sizeof(struct rtw89_mac_mcc_tsf_rpt) <= RTW89_COMPLETION_BUF_SIZE)
#define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_TSF_HIGH(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 4), GENMASK(31, 0))
+struct rtw89_mac_mrc_tsf_rpt {
+ unsigned int num;
+ u64 tsfs[RTW89_MAC_MRC_MAX_REQ_TSF_NUM];
+};
+
+static_assert(sizeof(struct rtw89_mac_mrc_tsf_rpt) <= RTW89_COMPLETION_BUF_SIZE);
+
+struct rtw89_c2h_mrc_tsf_rpt_info {
+ __le32 tsf_low;
+ __le32 tsf_high;
+} __packed;
+
+struct rtw89_c2h_mrc_tsf_rpt {
+ struct rtw89_c2h_hdr hdr;
+ __le32 w2;
+ struct rtw89_c2h_mrc_tsf_rpt_info infos[];
+} __packed;
+
+#define RTW89_C2H_MRC_TSF_RPT_W2_REQ_TSF_NUM GENMASK(7, 0)
+
+struct rtw89_c2h_mrc_status_rpt {
+ struct rtw89_c2h_hdr hdr;
+ __le32 w2;
+ __le32 tsf_low;
+ __le32 tsf_high;
+} __packed;
+
+#define RTW89_C2H_MRC_STATUS_RPT_W2_STATUS GENMASK(5, 0)
+#define RTW89_C2H_MRC_STATUS_RPT_W2_SCH_IDX GENMASK(7, 6)
+
struct rtw89_c2h_pkt_ofld_rsp {
__le32 w0;
__le32 w1;
@@ -3647,6 +4162,9 @@ struct rtw89_fw_h2c_rf_reg_info {
#define H2C_FUNC_MAC_BCN_UPD 0x5
#define H2C_FUNC_MAC_DCTLINFO_UD_V1 0x9
#define H2C_FUNC_MAC_CCTLINFO_UD_V1 0xa
+#define H2C_FUNC_MAC_DCTLINFO_UD_V2 0xc
+#define H2C_FUNC_MAC_BCN_UPD_BE 0xd
+#define H2C_FUNC_MAC_CCTLINFO_UD_G7 0x11
/* CLASS 6 - Address CAM */
#define H2C_CL_MAC_ADDR_CAM_UPDATE 0x6
@@ -3672,6 +4190,8 @@ enum rtw89_fw_ofld_h2c_func {
H2C_FUNC_CFG_BCNFLTR = 0x1e,
H2C_FUNC_OFLD_RSSI = 0x1f,
H2C_FUNC_OFLD_TP = 0x20,
+ H2C_FUNC_MAC_MACID_PAUSE_SLEEP = 0x28,
+ H2C_FUNC_SCANOFLD_BE = 0x2c,
NUM_OF_RTW89_FW_OFLD_H2C_FUNC,
};
@@ -3683,6 +4203,14 @@ enum rtw89_fw_ofld_h2c_func {
RTW89_FW_OFLD_WAIT_COND(RTW89_PKT_OFLD_WAIT_TAG(pkt_id, pkt_op), \
H2C_FUNC_PACKET_OFLD)
+#define RTW89_SCANOFLD_WAIT_COND_ADD_CH RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_ADD_SCANOFLD_CH)
+
+#define RTW89_SCANOFLD_WAIT_COND_START RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_SCANOFLD)
+#define RTW89_SCANOFLD_WAIT_COND_STOP RTW89_FW_OFLD_WAIT_COND(1, H2C_FUNC_SCANOFLD)
+#define RTW89_SCANOFLD_BE_WAIT_COND_START RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_SCANOFLD_BE)
+#define RTW89_SCANOFLD_BE_WAIT_COND_STOP RTW89_FW_OFLD_WAIT_COND(1, H2C_FUNC_SCANOFLD_BE)
+
+
/* CLASS 10 - Security CAM */
#define H2C_CL_MAC_SEC_CAM 0xa
#define H2C_FUNC_MAC_SEC_UPD 0x1
@@ -3690,6 +4218,8 @@ enum rtw89_fw_ofld_h2c_func {
/* CLASS 12 - BA CAM */
#define H2C_CL_BA_CAM 0xc
#define H2C_FUNC_MAC_BA_CAM 0x0
+#define H2C_FUNC_MAC_BA_CAM_V1 0x1
+#define H2C_FUNC_MAC_BA_CAM_INIT 0x2
/* CLASS 14 - MCC */
#define H2C_CL_MCC 0xe
@@ -3710,15 +4240,50 @@ enum rtw89_mcc_h2c_func {
#define RTW89_MCC_WAIT_COND(group, func) \
((group) * NUM_OF_RTW89_MCC_H2C_FUNC + (func))
+/* CLASS 24 - MRC */
+#define H2C_CL_MRC 0x18
+enum rtw89_mrc_h2c_func {
+ H2C_FUNC_MRC_REQ_TSF = 0x0,
+ H2C_FUNC_ADD_MRC = 0x1,
+ H2C_FUNC_START_MRC = 0x2,
+ H2C_FUNC_DEL_MRC = 0x3,
+ H2C_FUNC_MRC_SYNC = 0x4,
+ H2C_FUNC_MRC_UPD_DURATION = 0x5,
+ H2C_FUNC_MRC_UPD_BITMAP = 0x6,
+
+ NUM_OF_RTW89_MRC_H2C_FUNC,
+};
+
+/* can consider MRC's sch_idx as MCC's group */
+#define RTW89_MRC_WAIT_COND(sch_idx, func) \
+ ((sch_idx) * NUM_OF_RTW89_MRC_H2C_FUNC + (func))
+
+#define RTW89_MRC_WAIT_COND_REQ_TSF \
+ RTW89_MRC_WAIT_COND(0 /* don't care */, H2C_FUNC_MRC_REQ_TSF)
+
#define H2C_CAT_OUTSRC 0x2
#define H2C_CL_OUTSRC_RA 0x1
#define H2C_FUNC_OUTSRC_RA_MACIDCFG 0x0
+#define H2C_CL_OUTSRC_DM 0x2
+#define H2C_FUNC_FW_LPS_CH_INFO 0xb
+
#define H2C_CL_OUTSRC_RF_REG_A 0x8
#define H2C_CL_OUTSRC_RF_REG_B 0x9
#define H2C_CL_OUTSRC_RF_FW_NOTIFY 0xa
#define H2C_FUNC_OUTSRC_RF_GET_MCCCH 0x2
+#define H2C_CL_OUTSRC_RF_FW_RFK 0xb
+
+enum rtw89_rfk_offload_h2c_func {
+ H2C_FUNC_RFK_TSSI_OFFLOAD = 0x0,
+ H2C_FUNC_RFK_IQK_OFFLOAD = 0x1,
+ H2C_FUNC_RFK_DPK_OFFLOAD = 0x3,
+ H2C_FUNC_RFK_TXGAPK_OFFLOAD = 0x4,
+ H2C_FUNC_RFK_DACK_OFFLOAD = 0x5,
+ H2C_FUNC_RFK_RXDCK_OFFLOAD = 0x6,
+ H2C_FUNC_RFK_PRE_NOTIFY = 0x8,
+};
struct rtw89_fw_h2c_rf_get_mccch {
__le32 ch_0;
@@ -3729,6 +4294,114 @@ struct rtw89_fw_h2c_rf_get_mccch {
__le32 current_band_type;
} __packed;
+#define NUM_OF_RTW89_FW_RFK_PATH 2
+#define NUM_OF_RTW89_FW_RFK_TBL 3
+
+struct rtw89_fw_h2c_rfk_pre_info {
+ struct {
+ __le32 ch[NUM_OF_RTW89_FW_RFK_PATH][NUM_OF_RTW89_FW_RFK_TBL];
+ __le32 band[NUM_OF_RTW89_FW_RFK_PATH][NUM_OF_RTW89_FW_RFK_TBL];
+ } __packed dbcc;
+
+ __le32 mlo_mode;
+ struct {
+ __le32 cur_ch[NUM_OF_RTW89_FW_RFK_PATH];
+ __le32 cur_band[NUM_OF_RTW89_FW_RFK_PATH];
+ } __packed tbl;
+
+ __le32 phy_idx;
+ __le32 cur_band;
+ __le32 cur_bw;
+ __le32 cur_center_ch;
+
+ __le32 ktbl_sel0;
+ __le32 ktbl_sel1;
+ __le32 rfmod0;
+ __le32 rfmod1;
+
+ __le32 mlo_1_1;
+ __le32 rfe_type;
+ __le32 drv_mode;
+
+ struct {
+ __le32 ch[NUM_OF_RTW89_FW_RFK_PATH];
+ __le32 band[NUM_OF_RTW89_FW_RFK_PATH];
+ } __packed mlo;
+} __packed;
+
+struct rtw89_h2c_rf_tssi {
+ __le16 len;
+ u8 phy;
+ u8 ch;
+ u8 bw;
+ u8 band;
+ u8 hwtx_en;
+ u8 cv;
+ s8 curr_tssi_cck_de[2];
+ s8 curr_tssi_cck_de_20m[2];
+ s8 curr_tssi_cck_de_40m[2];
+ s8 curr_tssi_efuse_cck_de[2];
+ s8 curr_tssi_ofdm_de[2];
+ s8 curr_tssi_ofdm_de_20m[2];
+ s8 curr_tssi_ofdm_de_40m[2];
+ s8 curr_tssi_ofdm_de_80m[2];
+ s8 curr_tssi_ofdm_de_160m[2];
+ s8 curr_tssi_ofdm_de_320m[2];
+ s8 curr_tssi_efuse_ofdm_de[2];
+ s8 curr_tssi_ofdm_de_diff_20m[2];
+ s8 curr_tssi_ofdm_de_diff_80m[2];
+ s8 curr_tssi_ofdm_de_diff_160m[2];
+ s8 curr_tssi_ofdm_de_diff_320m[2];
+ s8 curr_tssi_trim_de[2];
+ u8 pg_thermal[2];
+ u8 ftable[2][128];
+ u8 tssi_mode;
+} __packed;
+
+struct rtw89_h2c_rf_iqk {
+ __le32 phy_idx;
+ __le32 dbcc;
+} __packed;
+
+struct rtw89_h2c_rf_dpk {
+ u8 len;
+ u8 phy;
+ u8 dpk_enable;
+ u8 kpath;
+ u8 cur_band;
+ u8 cur_bw;
+ u8 cur_ch;
+ u8 dpk_dbg_en;
+} __packed;
+
+struct rtw89_h2c_rf_txgapk {
+ u8 len;
+ u8 ktype;
+ u8 phy;
+ u8 kpath;
+ u8 band;
+ u8 bw;
+ u8 ch;
+ u8 cv;
+} __packed;
+
+struct rtw89_h2c_rf_dack {
+ __le32 len;
+ __le32 phy;
+ __le32 type;
+} __packed;
+
+struct rtw89_h2c_rf_rxdck {
+ u8 len;
+ u8 phy;
+ u8 is_afe;
+ u8 kpath;
+ u8 cur_band;
+ u8 cur_bw;
+ u8 cur_ch;
+ u8 rxdck_dbg_en;
+} __packed;
+
enum rtw89_rf_log_type {
RTW89_RF_RUN_LOG = 0,
RTW89_RF_RPT_LOG = 1,
@@ -3800,6 +4473,12 @@ struct rtw89_c2h_rf_txgapk_rpt_log {
u8 rsv1;
} __packed;
+struct rtw89_c2h_rfk_report {
+ struct rtw89_c2h_hdr hdr;
+ u8 state; /* enum rtw89_rfk_report_state */
+ u8 version;
+} __packed;
+
#define RTW89_FW_RSVD_PLE_SIZE 0x800
#define RTW89_FW_BACKTRACE_INFO_SIZE 8
@@ -3830,21 +4509,39 @@ void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u8 type, u8 cat, u8 class, u8 func,
bool rack, bool dack, u32 len);
int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif);
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
+int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
+int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta);
int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta);
int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif);
+int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *vif,
struct rtw89_sta *rtwsta, const u8 *scan_mac_addr);
int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta);
+int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h);
void rtw89_fw_c2h_work(struct work_struct *work);
int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
@@ -3866,25 +4563,41 @@ int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu);
int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi);
-int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev);
-int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev);
-int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev);
-int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev);
-int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev);
-int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev);
-int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id);
int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
struct sk_buff *skb_ofld);
-int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
+int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num,
struct list_head *chan_list);
+int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
+ struct list_head *chan_list);
int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
struct rtw89_scan_option *opt,
struct rtw89_vif *vif);
+int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *opt,
+ struct rtw89_vif *vif);
int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
struct rtw89_fw_h2c_rf_reg_info *info,
u16 len, u8 page);
int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
+int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_tssi_mode tssi_mode);
+int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
bool rack, bool dack);
@@ -3898,10 +4611,16 @@ void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev,
void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw);
int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
bool valid, struct ieee80211_ampdu_params *params);
+int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool valid, struct ieee80211_ampdu_params *params);
void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users,
+ u8 offset, u8 mac_idx);
int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
struct rtw89_lps_parm *lps_param);
+int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len);
struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len);
int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
@@ -3916,6 +4635,10 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
bool enable);
void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
+int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool connected);
+int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool connected);
int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
const struct rtw89_pkt_drop_params *params);
@@ -3956,6 +4679,20 @@ int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source,
u8 target, u8 offset);
int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev,
const struct rtw89_fw_mcc_duration *p);
+int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_add_arg *arg);
+int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_start_arg *arg);
+int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx);
+int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_req_tsf_arg *arg,
+ struct rtw89_mac_mrc_tsf_rpt *rpt);
+int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_upd_bitmap_arg *arg);
+int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_sync_arg *arg);
+int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_upd_duration_arg *arg);
static inline void rtw89_fw_h2c_init_ba_cam(struct rtw89_dev *rtwdev)
{
@@ -3965,6 +4702,65 @@ static inline void rtw89_fw_h2c_init_ba_cam(struct rtw89_dev *rtwdev)
rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(rtwdev);
}
+static inline int rtw89_chip_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->h2c_default_cmac_tbl(rtwdev, rtwvif, rtwsta);
+}
+
+static inline int rtw89_chip_h2c_default_dmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->h2c_default_dmac_tbl)
+ return chip->ops->h2c_default_dmac_tbl(rtwdev, rtwvif, rtwsta);
+
+ return 0;
+}
+
+static inline int rtw89_chip_h2c_update_beacon(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->h2c_update_beacon(rtwdev, rtwvif);
+}
+
+static inline int rtw89_chip_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->h2c_assoc_cmac_tbl(rtwdev, vif, sta);
+}
+
+static inline int rtw89_chip_h2c_ampdu_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->h2c_ampdu_cmac_tbl)
+ return chip->ops->h2c_ampdu_cmac_tbl(rtwdev, vif, sta);
+
+ return 0;
+}
+
+static inline
+int rtw89_chip_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool valid, struct ieee80211_ampdu_params *params)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->h2c_ba_cam(rtwdev, rtwsta, valid, params);
+}
+
/* must consider compatibility; don't insert new in the mid */
struct rtw89_fw_txpwr_byrate_entry {
u8 band;
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index c485ef2cc3d3..aa5b396b5d2b 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -1625,7 +1625,7 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.wde_size19 = {RTW89_WDE_PG_64, 3328, 0,},
/* PCIE */
.ple_size0 = {RTW89_PLE_PG_128, 1520, 16,},
- .ple_size0_v1 = {RTW89_PLE_PG_128, 2672, 256, 212992,},
+ .ple_size0_v1 = {RTW89_PLE_PG_128, 2688, 240, 212992,},
.ple_size3_v1 = {RTW89_PLE_PG_128, 2928, 0, 212992,},
/* DLFW */
.ple_size4 = {RTW89_PLE_PG_128, 64, 1472,},
@@ -1650,8 +1650,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.wde_qt17 = {0, 0, 0, 0,},
/* 8852C PCIE SCC */
.wde_qt18 = {3228, 60, 0, 40,},
- .ple_qt0 = {320, 0, 32, 16, 13, 13, 292, 0, 32, 18, 1, 4, 0,},
- .ple_qt1 = {320, 0, 32, 16, 1944, 1944, 2223, 0, 1963, 1949, 1, 1935, 0,},
+ .ple_qt0 = {320, 320, 32, 16, 13, 13, 292, 292, 64, 18, 1, 4, 0,},
+ .ple_qt1 = {320, 320, 32, 16, 1316, 1316, 1595, 1595, 1367, 1321, 1, 1307, 0,},
/* PCIE SCC */
.ple_qt4 = {264, 0, 16, 20, 26, 13, 356, 0, 32, 40, 8,},
/* PCIE SCC */
@@ -1677,7 +1677,7 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.ple_qt_52b_wow = {147, 0, 16, 20, 157, 13, 133, 0, 172, 14, 24, 0,},
/* 8851B PCIE WOW */
.ple_qt_51b_wow = {147, 0, 16, 20, 157, 13, 133, 0, 172, 14, 24, 0,},
- .ple_rsvd_qt0 = {2, 112, 56, 6, 6, 6, 6, 0, 0, 62,},
+ .ple_rsvd_qt0 = {2, 107, 107, 6, 6, 6, 6, 0, 0, 0,},
.ple_rsvd_qt1 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
.rsvd0_size0 = {212992, 0,},
.rsvd1_size0 = {587776, 2048,},
@@ -2025,6 +2025,9 @@ void rtw89_mac_hw_mgnt_sec(struct rtw89_dev *rtwdev, bool enable)
{
u32 msk32 = B_AX_UC_MGNT_DEC | B_AX_BMC_MGNT_DEC;
+ if (rtwdev->chip->chip_gen != RTW89_CHIP_AX)
+ return;
+
if (enable)
rtw89_write32_set(rtwdev, R_AX_SEC_ENG_CTRL, msk32);
else
@@ -2537,6 +2540,9 @@ static int spatial_reuse_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RX_SR_CTRL, mac_idx);
rtw89_write8_clr(rtwdev, reg, B_AX_SR_EN);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BSSID_SRC_CTRL, mac_idx);
+ rtw89_write8_set(rtwdev, reg, B_AX_PLCP_SRC_EN);
+
return 0;
}
@@ -3192,13 +3198,11 @@ static int set_cpuio_ax(struct rtw89_dev *rtwdev,
return 0;
}
-int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode)
+int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode,
+ bool band1_en)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
const struct rtw89_dle_mem *cfg;
- struct rtw89_cpuio_ctrl ctrl_para = {0};
- u16 pkt_id;
- int ret;
cfg = get_dle_mem_cfg(rtwdev, mode);
if (!cfg) {
@@ -3213,6 +3217,16 @@ int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mod
dle_quota_cfg(rtwdev, cfg, INVALID_QT_WCPU);
+ return mac->dle_quota_change(rtwdev, band1_en);
+}
+
+static int dle_quota_change_ax(struct rtw89_dev *rtwdev, bool band1_en)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ struct rtw89_cpuio_ctrl ctrl_para = {0};
+ u16 pkt_id;
+ int ret;
+
ret = mac->dle_buf_req(rtwdev, 0x20, true, &pkt_id);
if (ret) {
rtw89_err(rtwdev, "[ERR]WDE DLE buf req\n");
@@ -3301,7 +3315,7 @@ static int band1_enable_ax(struct rtw89_dev *rtwdev)
return ret;
}
- ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode);
+ ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode, true);
if (ret) {
rtw89_err(rtwdev, "[ERR]DLE quota change %d\n", ret);
return ret;
@@ -3676,6 +3690,28 @@ static int trx_init_ax(struct rtw89_dev *rtwdev)
return 0;
}
+static int rtw89_mac_feat_init(struct rtw89_dev *rtwdev)
+{
+#define BACAM_1024BMP_OCC_ENTRY 4
+#define BACAM_MAX_RU_SUPPORT_B0_STA 1
+#define BACAM_MAX_RU_SUPPORT_B1_STA 1
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 users, offset;
+
+ if (chip->bacam_ver != RTW89_BACAM_V1)
+ return 0;
+
+ offset = 0;
+ users = BACAM_MAX_RU_SUPPORT_B0_STA;
+ rtw89_fw_h2c_init_ba_cam_users(rtwdev, users, offset, RTW89_MAC_0);
+
+ offset += users * BACAM_1024BMP_OCC_ENTRY;
+ users = BACAM_MAX_RU_SUPPORT_B1_STA;
+ rtw89_fw_h2c_init_ba_cam_users(rtwdev, users, offset, RTW89_MAC_1);
+
+ return 0;
+}
+
static void rtw89_disable_fw_watchdog(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
@@ -3910,6 +3946,10 @@ int rtw89_mac_init(struct rtw89_dev *rtwdev)
if (ret)
goto fail;
+ ret = rtw89_mac_feat_init(rtwdev);
+ if (ret)
+ goto fail;
+
if (rtwdev->hci.ops->mac_post_init) {
ret = rtwdev->hci.ops->mac_post_init(rtwdev);
if (ret)
@@ -4000,6 +4040,9 @@ static const struct rtw89_port_reg rtw89_port_base_ax = {
.mbssid = R_AX_MBSSID_CTRL,
.mbssid_drop = R_AX_MBSSID_DROP_0,
.tsf_sync = R_AX_PORT0_TSF_SYNC,
+ .ptcl_dbg = R_AX_PTCL_DBG,
+ .ptcl_dbg_info = R_AX_PTCL_DBG_INFO,
+ .bcn_drop_all = R_AX_BCN_DROP_ALL0,
.hiq_win = {R_AX_P0MB_HGQ_WINDOW_CFG_0, R_AX_PORT_HGQ_WINDOW_CFG,
R_AX_PORT_HGQ_WINDOW_CFG + 1, R_AX_PORT_HGQ_WINDOW_CFG + 2,
R_AX_PORT_HGQ_WINDOW_CFG + 3},
@@ -4008,13 +4051,15 @@ static const struct rtw89_port_reg rtw89_port_base_ax = {
static void rtw89_mac_check_packet_ctrl(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, u8 type)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ const struct rtw89_port_reg *p = mac->port_base;
u8 mask = B_AX_PTCL_DBG_INFO_MASK_BY_PORT(rtwvif->port);
u32 reg_info, reg_ctrl;
u32 val;
int ret;
- reg_info = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_DBG_INFO, rtwvif->mac_idx);
- reg_ctrl = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_DBG, rtwvif->mac_idx);
+ reg_info = rtw89_mac_reg_by_idx(rtwdev, p->ptcl_dbg_info, rtwvif->mac_idx);
+ reg_ctrl = rtw89_mac_reg_by_idx(rtwdev, p->ptcl_dbg, rtwvif->mac_idx);
rtw89_write32_mask(rtwdev, reg_ctrl, B_AX_PTCL_DBG_SEL_MASK, type);
rtw89_write32_set(rtwdev, reg_ctrl, B_AX_PTCL_DBG_EN);
@@ -4031,7 +4076,7 @@ static void rtw89_mac_bcn_drop(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvi
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
const struct rtw89_port_reg *p = mac->port_base;
- rtw89_write32_set(rtwdev, R_AX_BCN_DROP_ALL0, BIT(rtwvif->port));
+ rtw89_write32_set(rtwdev, p->bcn_drop_all, BIT(rtwvif->port));
rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_SETUP_MASK, 1);
rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_area, B_AX_BCN_MSK_AREA_MASK, 0);
rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_HOLD_MASK, 0);
@@ -4044,9 +4089,9 @@ static void rtw89_mac_bcn_drop(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvi
if (rtwvif->port == RTW89_PORT_0)
rtw89_mac_check_packet_ctrl(rtwdev, rtwvif, AX_PTCL_DBG_BCNQ_NUM1);
- rtw89_write32_clr(rtwdev, R_AX_BCN_DROP_ALL0, BIT(rtwvif->port));
+ rtw89_write32_clr(rtwdev, p->bcn_drop_all, BIT(rtwvif->port));
rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TBTT_PROHIB_EN);
- fsleep(2);
+ fsleep(2000);
}
#define BCN_INTERVAL 100
@@ -4159,13 +4204,11 @@ static void rtw89_mac_port_cfg_rx_sw(struct rtw89_dev *rtwdev,
rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, bit);
}
-static void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif)
+void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool en)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
const struct rtw89_port_reg *p = mac->port_base;
- bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA ||
- rtwvif->net_type == RTW89_NET_TYPE_AD_HOC;
if (en)
rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TSF_UDT_EN);
@@ -4173,6 +4216,15 @@ static void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev,
rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TSF_UDT_EN);
}
+static void rtw89_mac_port_cfg_rx_sync_by_nettype(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA ||
+ rtwvif->net_type == RTW89_NET_TYPE_AD_HOC;
+
+ rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif, en);
+}
+
static void rtw89_mac_port_cfg_tx_sw(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool en)
{
@@ -4471,7 +4523,11 @@ int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
if (ret)
return ret;
- ret = rtw89_fw_h2c_default_cmac_tbl(rtwdev, rtwvif);
+ ret = rtw89_chip_h2c_default_cmac_tbl(rtwdev, rtwvif, NULL);
+ if (ret)
+ return ret;
+
+ ret = rtw89_chip_h2c_default_dmac_tbl(rtwdev, rtwvif, NULL);
if (ret)
return ret;
@@ -4508,7 +4564,7 @@ int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtw89_mac_port_cfg_net_type(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_prct(rtwdev, rtwvif);
rtw89_mac_port_cfg_rx_sw(rtwdev, rtwvif);
- rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_rx_sync_by_nettype(rtwdev, rtwvif);
rtw89_mac_port_cfg_tx_sw_by_nettype(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_intv(rtwdev, rtwvif);
rtw89_mac_port_cfg_hiq_win(rtwdev, rtwvif);
@@ -4571,6 +4627,7 @@ void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif)
{
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
struct ieee80211_hw *hw = rtwdev->hw;
bool tolerated = true;
u32 reg;
@@ -4578,18 +4635,19 @@ void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
if (!vif->bss_conf.he_support || vif->type != NL80211_IFTYPE_STATION)
return;
- if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR))
+ if (!(vif->bss_conf.chanreq.oper.chan->flags & IEEE80211_CHAN_RADAR))
return;
- cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef,
+ cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chanreq.oper,
rtw89_mac_check_he_obss_narrow_bw_ru_iter,
&tolerated);
- reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RXTRIG_TEST_USER_2, rtwvif->mac_idx);
+ reg = rtw89_mac_reg_by_idx(rtwdev, mac->narrow_bw_ru_dis.addr,
+ rtwvif->mac_idx);
if (tolerated)
- rtw89_write32_clr(rtwdev, reg, B_AX_RXTRIG_RU26_DIS);
+ rtw89_write32_clr(rtwdev, reg, mac->narrow_bw_ru_dis.mask);
else
- rtw89_write32_set(rtwdev, reg, B_AX_RXTRIG_RU26_DIS);
+ rtw89_write32_set(rtwdev, reg, mac->narrow_bw_ru_dis.mask);
}
void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
@@ -4641,35 +4699,52 @@ static bool rtw89_is_op_chan(struct rtw89_dev *rtwdev, u8 band, u8 channel)
}
static void
-rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u32 len)
{
+ const struct rtw89_c2h_scanofld *c2h =
+ (const struct rtw89_c2h_scanofld *)skb->data;
struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
struct rtw89_chan new;
- u8 reason, status, tx_fail, band, actual_period;
- u32 last_chan = rtwdev->scan_info.last_chan_idx;
+ u8 reason, status, tx_fail, band, actual_period, expect_period;
+ u32 last_chan = rtwdev->scan_info.last_chan_idx, report_tsf;
+ u8 mac_idx, sw_def, fw_def;
u16 chan;
int ret;
if (!rtwvif)
return;
- tx_fail = RTW89_GET_MAC_C2H_SCANOFLD_TX_FAIL(c2h->data);
- status = RTW89_GET_MAC_C2H_SCANOFLD_STATUS(c2h->data);
- chan = RTW89_GET_MAC_C2H_SCANOFLD_PRI_CH(c2h->data);
- reason = RTW89_GET_MAC_C2H_SCANOFLD_RSP(c2h->data);
- band = RTW89_GET_MAC_C2H_SCANOFLD_BAND(c2h->data);
- actual_period = RTW89_GET_MAC_C2H_ACTUAL_PERIOD(c2h->data);
+ tx_fail = le32_get_bits(c2h->w5, RTW89_C2H_SCANOFLD_W5_TX_FAIL);
+ status = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_STATUS);
+ chan = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_PRI_CH);
+ reason = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_RSN);
+ band = le32_get_bits(c2h->w5, RTW89_C2H_SCANOFLD_W5_BAND);
+ actual_period = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_PERIOD);
+ mac_idx = le32_get_bits(c2h->w5, RTW89_C2H_SCANOFLD_W5_MAC_IDX);
+
if (!(rtwdev->chip->support_bands & BIT(NL80211_BAND_6GHZ)))
band = chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G;
rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
- "band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d, actual: %d\n",
- band, chan, reason, status, tx_fail, actual_period);
+ "mac_idx[%d] band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d, actual: %d\n",
+ mac_idx, band, chan, reason, status, tx_fail, actual_period);
+
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
+ sw_def = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_SW_DEF);
+ expect_period = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_EXPECT_PERIOD);
+ fw_def = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_FW_DEF);
+ report_tsf = le32_get_bits(c2h->w7, RTW89_C2H_SCANOFLD_W7_REPORT_TSF);
+
+ rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
+ "sw_def: %d, fw_def: %d, tsf: %x, expect: %d\n",
+ sw_def, fw_def, report_tsf, expect_period);
+ }
switch (reason) {
+ case RTW89_SCAN_LEAVE_OP_NOTIFY:
case RTW89_SCAN_LEAVE_CH_NOTIFY:
if (rtw89_is_op_chan(rtwdev, band, chan)) {
rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, false);
@@ -4685,9 +4760,10 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
rtw89_warn(rtwdev, "HW scan failed: %d\n", ret);
}
} else {
- rtw89_hw_scan_complete(rtwdev, vif, false);
+ rtw89_hw_scan_complete(rtwdev, vif, rtwdev->scan_info.abort);
}
break;
+ case RTW89_SCAN_ENTER_OP_NOTIFY:
case RTW89_SCAN_ENTER_CH_NOTIFY:
if (rtw89_is_op_chan(rtwdev, band, chan)) {
rtw89_assign_entity_chan(rtwdev, rtwvif->sub_entity_idx,
@@ -4807,8 +4883,13 @@ rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 le
default:
return;
case H2C_FUNC_ADD_SCANOFLD_CH:
+ cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH;
+ break;
case H2C_FUNC_SCANOFLD:
- cond = RTW89_FW_OFLD_WAIT_COND(0, h2c_func);
+ cond = RTW89_SCANOFLD_WAIT_COND_START;
+ break;
+ case H2C_FUNC_SCANOFLD_BE:
+ cond = RTW89_SCANOFLD_BE_WAIT_COND_START;
break;
}
@@ -5021,6 +5102,84 @@ rtw89_mac_c2h_mcc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32
rtw89_complete_cond(&rtwdev->mcc.wait, cond, &data);
}
+static void
+rtw89_mac_c2h_mrc_tsf_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ const struct rtw89_c2h_mrc_tsf_rpt *c2h_rpt;
+ struct rtw89_completion_data data = {};
+ struct rtw89_mac_mrc_tsf_rpt *rpt;
+ unsigned int i;
+
+ c2h_rpt = (const struct rtw89_c2h_mrc_tsf_rpt *)c2h->data;
+ rpt = (struct rtw89_mac_mrc_tsf_rpt *)data.buf;
+ rpt->num = min_t(u8, RTW89_MAC_MRC_MAX_REQ_TSF_NUM,
+ le32_get_bits(c2h_rpt->w2,
+ RTW89_C2H_MRC_TSF_RPT_W2_REQ_TSF_NUM));
+
+ for (i = 0; i < rpt->num; i++) {
+ u32 tsf_high = le32_to_cpu(c2h_rpt->infos[i].tsf_high);
+ u32 tsf_low = le32_to_cpu(c2h_rpt->infos[i].tsf_low);
+
+ rpt->tsfs[i] = (u64)tsf_high << 32 | tsf_low;
+
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H TSF RPT: index %u> %llu\n",
+ i, rpt->tsfs[i]);
+ }
+
+ rtw89_complete_cond(wait, RTW89_MRC_WAIT_COND_REQ_TSF, &data);
+}
+
+static void
+rtw89_mac_c2h_mrc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ const struct rtw89_c2h_mrc_status_rpt *c2h_rpt;
+ struct rtw89_completion_data data = {};
+ enum rtw89_mac_mrc_status status;
+ unsigned int cond;
+ bool next = false;
+ u32 tsf_high;
+ u32 tsf_low;
+ u8 sch_idx;
+ u8 func;
+
+ c2h_rpt = (const struct rtw89_c2h_mrc_status_rpt *)c2h->data;
+ sch_idx = le32_get_bits(c2h_rpt->w2, RTW89_C2H_MRC_STATUS_RPT_W2_SCH_IDX);
+ status = le32_get_bits(c2h_rpt->w2, RTW89_C2H_MRC_STATUS_RPT_W2_STATUS);
+ tsf_high = le32_to_cpu(c2h_rpt->tsf_high);
+ tsf_low = le32_to_cpu(c2h_rpt->tsf_low);
+
+ switch (status) {
+ case RTW89_MAC_MRC_START_SCH_OK:
+ func = H2C_FUNC_START_MRC;
+ break;
+ case RTW89_MAC_MRC_STOP_SCH_OK:
+ /* H2C_FUNC_DEL_MRC without STOP_ONLY, so wait for DEL_SCH_OK */
+ func = H2C_FUNC_DEL_MRC;
+ next = true;
+ break;
+ case RTW89_MAC_MRC_DEL_SCH_OK:
+ func = H2C_FUNC_DEL_MRC;
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "invalid MRC C2H STS RPT: status %d\n", status);
+ return;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H STS RPT: sch_idx %d, status %d, tsf %llu\n",
+ sch_idx, status, (u64)tsf_high << 32 | tsf_low);
+
+ if (next)
+ return;
+
+ cond = RTW89_MRC_WAIT_COND(sch_idx, func);
+ rtw89_complete_cond(wait, cond, &data);
+}
+
static
void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
@@ -5052,7 +5211,39 @@ void (* const rtw89_mac_c2h_mcc_handler[])(struct rtw89_dev *rtwdev,
[RTW89_MAC_C2H_FUNC_MCC_STATUS_RPT] = rtw89_mac_c2h_mcc_status_rpt,
};
-bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func)
+static
+void (* const rtw89_mac_c2h_mrc_handler[])(struct rtw89_dev *rtwdev,
+ struct sk_buff *c2h, u32 len) = {
+ [RTW89_MAC_C2H_FUNC_MRC_TSF_RPT] = rtw89_mac_c2h_mrc_tsf_rpt,
+ [RTW89_MAC_C2H_FUNC_MRC_STATUS_RPT] = rtw89_mac_c2h_mrc_status_rpt,
+};
+
+static void rtw89_mac_c2h_scanofld_rsp_atomic(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb)
+{
+ const struct rtw89_c2h_scanofld *c2h =
+ (const struct rtw89_c2h_scanofld *)skb->data;
+ struct rtw89_wait_info *fw_ofld_wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_completion_data data = {};
+ unsigned int cond;
+ u8 status, reason;
+
+ status = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_STATUS);
+ reason = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_RSN);
+ data.err = status != RTW89_SCAN_STATUS_SUCCESS;
+
+ if (reason == RTW89_SCAN_END_SCAN_NOTIFY) {
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
+ cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP;
+ else
+ cond = RTW89_SCANOFLD_WAIT_COND_STOP;
+
+ rtw89_complete_cond(fw_ofld_wait, cond, &data);
+ }
+}
+
+bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+ u8 class, u8 func)
{
switch (class) {
default:
@@ -5069,11 +5260,16 @@ bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func)
switch (func) {
default:
return false;
+ case RTW89_MAC_C2H_FUNC_SCANOFLD_RSP:
+ rtw89_mac_c2h_scanofld_rsp_atomic(rtwdev, c2h);
+ return false;
case RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP:
return true;
}
case RTW89_MAC_C2H_CLASS_MCC:
return true;
+ case RTW89_MAC_C2H_CLASS_MRC:
+ return true;
}
}
@@ -5096,6 +5292,10 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
if (func < NUM_OF_RTW89_MAC_C2H_FUNC_MCC)
handler = rtw89_mac_c2h_mcc_handler[func];
break;
+ case RTW89_MAC_C2H_CLASS_MRC:
+ if (func < NUM_OF_RTW89_MAC_C2H_FUNC_MRC)
+ handler = rtw89_mac_c2h_mrc_handler[func];
+ break;
case RTW89_MAC_C2H_CLASS_FWDBG:
return;
default:
@@ -5115,8 +5315,7 @@ bool rtw89_mac_get_txpwr_cr_ax(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
u32 reg_base, u32 *cr)
{
- const struct rtw89_dle_mem *dle_mem = rtwdev->chip->dle_mem;
- enum rtw89_qta_mode mode = dle_mem->mode;
+ enum rtw89_qta_mode mode = rtwdev->mac.qta_mode;
u32 addr = rtw89_mac_reg_by_idx(rtwdev, reg_base, phy_idx);
if (addr < R_AX_PWR_RATE_CTRL || addr > CMAC1_END_ADDR_AX) {
@@ -5143,7 +5342,8 @@ error:
return false;
}
-int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
+static
+int rtw89_mac_cfg_ppdu_status_ax(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
{
u32 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PPDU_STAT, mac_idx);
int ret;
@@ -5166,7 +5366,6 @@ int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
return 0;
}
-EXPORT_SYMBOL(rtw89_mac_cfg_ppdu_status);
void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx)
{
@@ -5419,7 +5618,8 @@ int rtw89_mac_cfg_gnt_v1(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_mac_cfg_gnt_v1);
-int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt)
+static
+int rtw89_mac_cfg_plt_ax(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt)
{
u32 reg;
u16 val;
@@ -5515,7 +5715,7 @@ bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev)
return !!val;
}
-u16 rtw89_mac_get_plt_cnt(struct rtw89_dev *rtwdev, u8 band)
+static u16 rtw89_mac_get_plt_cnt_ax(struct rtw89_dev *rtwdev, u8 band)
{
u32 reg;
u16 cnt;
@@ -6069,6 +6269,41 @@ int rtw89_mac_ptk_drop_by_band_and_wait(struct rtw89_dev *rtwdev,
return ret;
}
+static int rtw89_wow_config_mac_ax(struct rtw89_dev *rtwdev, bool enable_wow)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ int ret;
+
+ if (enable_wow) {
+ ret = rtw89_mac_resize_ple_rx_quota(rtwdev, true);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]patch rx qta %d\n", ret);
+ return ret;
+ }
+
+ rtw89_write32_set(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP);
+ rtw89_write32_clr(rtwdev, mac->rx_fltr, B_AX_SNIFFER_MODE);
+ rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
+ rtw89_write32(rtwdev, R_AX_ACTION_FWD0, 0);
+ rtw89_write32(rtwdev, R_AX_ACTION_FWD1, 0);
+ rtw89_write32(rtwdev, R_AX_TF_FWD, 0);
+ rtw89_write32(rtwdev, R_AX_HW_RPT_FWD, 0);
+ } else {
+ ret = rtw89_mac_resize_ple_rx_quota(rtwdev, false);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]patch rx qta %d\n", ret);
+ return ret;
+ }
+
+ rtw89_write32_clr(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP);
+ rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
+ rtw89_write32(rtwdev, R_AX_ACTION_FWD0, TRXCFG_MPDU_PROC_ACT_FRWD);
+ rtw89_write32(rtwdev, R_AX_TF_FWD, TRXCFG_MPDU_PROC_TF_FRWD);
+ }
+
+ return 0;
+}
+
static u8 rtw89_fw_get_rdy_ax(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type)
{
u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL);
@@ -6096,6 +6331,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.rx_fltr = R_AX_RX_FLTR_OPT,
.port_base = &rtw89_port_base_ax,
.agg_len_ht = R_AX_AGG_LEN_HT_0,
+ .ps_status = R_AX_PPWRBIT_SETTING,
.muedca_ctrl = {
.addr = R_AX_MUEDCA_EN,
@@ -6106,6 +6342,11 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.mask = B_AX_BFMEE_HT_NDPA_EN | B_AX_BFMEE_VHT_NDPA_EN |
B_AX_BFMEE_HE_NDPA_EN,
},
+ .narrow_bw_ru_dis = {
+ .addr = R_AX_RXTRIG_TEST_USER_2,
+ .mask = B_AX_RXTRIG_RU26_DIS,
+ },
+ .wow_ctrl = {.addr = R_AX_WOW_CTRL, .mask = B_AX_WOW_WOWEN,},
.check_mac_en = rtw89_mac_check_mac_en_ax,
.sys_init = sys_init_ax,
@@ -6117,6 +6358,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.bf_assoc = rtw89_mac_bf_assoc_ax,
.typ_fltr_opt = rtw89_mac_typ_fltr_opt_ax,
+ .cfg_ppdu_status = rtw89_mac_cfg_ppdu_status_ax,
.dle_mix_cfg = dle_mix_cfg_ax,
.chk_dle_rdy = chk_dle_rdy_ax,
@@ -6128,6 +6370,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.wde_quota_cfg = wde_quota_cfg_ax,
.ple_quota_cfg = ple_quota_cfg_ax,
.set_cpuio = set_cpuio_ax,
+ .dle_quota_change = dle_quota_change_ax,
.disable_cpu = rtw89_mac_disable_cpu_ax,
.fwdl_enable_wcpu = rtw89_mac_enable_cpu_ax,
@@ -6137,6 +6380,9 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.parse_phycap_map = rtw89_parse_phycap_map_ax,
.cnv_efuse_state = rtw89_cnv_efuse_state_ax,
+ .cfg_plt = rtw89_mac_cfg_plt_ax,
+ .get_plt_cnt = rtw89_mac_get_plt_cnt_ax,
+
.get_txpwr_cr = rtw89_mac_get_txpwr_cr_ax,
.write_xtal_si = rtw89_mac_write_xtal_si_ax,
@@ -6146,5 +6392,10 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.dump_err_status = rtw89_mac_dump_err_status_ax,
.is_txq_empty = mac_is_txq_empty_ax,
+
+ .add_chan_list = rtw89_hw_scan_add_chan_list,
+ .scan_offload = rtw89_fw_h2c_scan_offload,
+
+ .wow_config_mac = rtw89_wow_config_mac_ax,
};
EXPORT_SYMBOL(rtw89_mac_gen_ax);
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index ed98b49809a4..6fb457153a11 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -169,6 +169,12 @@ enum rtw89_mac_ax_l0_to_l1_event {
MAC_AX_L0_TO_L1_EVENT_MAX = 15,
};
+enum rtw89_mac_wow_fw_status {
+ WOWLAN_NOT_READY = 0x00,
+ WOWLAN_SLEEP_READY = 0x01,
+ WOWLAN_RESUME_READY = 0x02,
+};
+
#define RTW89_PORT_OFFSET_TU_TO_32US(shift_tu) ((shift_tu) * 1024 / 32)
enum rtw89_mac_dbg_port_sel {
@@ -406,13 +412,21 @@ enum rtw89_mac_c2h_mcc_func {
NUM_OF_RTW89_MAC_C2H_FUNC_MCC,
};
+enum rtw89_mac_c2h_mrc_func {
+ RTW89_MAC_C2H_FUNC_MRC_TSF_RPT = 0,
+ RTW89_MAC_C2H_FUNC_MRC_STATUS_RPT = 1,
+
+ NUM_OF_RTW89_MAC_C2H_FUNC_MRC,
+};
+
enum rtw89_mac_c2h_class {
- RTW89_MAC_C2H_CLASS_INFO,
- RTW89_MAC_C2H_CLASS_OFLD,
- RTW89_MAC_C2H_CLASS_TWT,
- RTW89_MAC_C2H_CLASS_WOW,
- RTW89_MAC_C2H_CLASS_MCC,
- RTW89_MAC_C2H_CLASS_FWDBG,
+ RTW89_MAC_C2H_CLASS_INFO = 0x0,
+ RTW89_MAC_C2H_CLASS_OFLD = 0x1,
+ RTW89_MAC_C2H_CLASS_TWT = 0x2,
+ RTW89_MAC_C2H_CLASS_WOW = 0x3,
+ RTW89_MAC_C2H_CLASS_MCC = 0x4,
+ RTW89_MAC_C2H_CLASS_FWDBG = 0x5,
+ RTW89_MAC_C2H_CLASS_MRC = 0xe,
RTW89_MAC_C2H_CLASS_MAX,
};
@@ -441,6 +455,12 @@ enum rtw89_mac_mcc_status {
RTW89_MAC_MCC_TXNULL1_FAIL = 27,
};
+enum rtw89_mac_mrc_status {
+ RTW89_MAC_MRC_START_SCH_OK = 0,
+ RTW89_MAC_MRC_STOP_SCH_OK = 1,
+ RTW89_MAC_MRC_DEL_SCH_OK = 2,
+};
+
struct rtw89_mac_ax_coex {
#define RTW89_MAC_AX_COEX_RTK_MODE 0
#define RTW89_MAC_AX_COEX_CSR_MODE 1
@@ -894,9 +914,12 @@ struct rtw89_mac_gen_def {
u32 rx_fltr;
const struct rtw89_port_reg *port_base;
u32 agg_len_ht;
+ u32 ps_status;
struct rtw89_reg_def muedca_ctrl;
struct rtw89_reg_def bfee_ctrl;
+ struct rtw89_reg_def narrow_bw_ru_dis;
+ struct rtw89_reg_def wow_ctrl;
int (*check_mac_en)(struct rtw89_dev *rtwdev, u8 band,
enum rtw89_mac_hwmod_sel sel);
@@ -913,6 +936,7 @@ struct rtw89_mac_gen_def {
enum rtw89_machdr_frame_type type,
enum rtw89_mac_fwd_target fwd_target,
u8 mac_idx);
+ int (*cfg_ppdu_status)(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable);
int (*dle_mix_cfg)(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg);
int (*chk_dle_rdy)(struct rtw89_dev *rtwdev, bool wde_or_ple);
@@ -930,6 +954,7 @@ struct rtw89_mac_gen_def {
const struct rtw89_ple_quota *max_cfg);
int (*set_cpuio)(struct rtw89_dev *rtwdev,
struct rtw89_cpuio_ctrl *ctrl_para, bool wd);
+ int (*dle_quota_change)(struct rtw89_dev *rtwdev, bool band1_en);
void (*disable_cpu)(struct rtw89_dev *rtwdev);
int (*fwdl_enable_wcpu)(struct rtw89_dev *rtwdev, u8 boot_reason,
@@ -940,6 +965,9 @@ struct rtw89_mac_gen_def {
int (*parse_phycap_map)(struct rtw89_dev *rtwdev);
int (*cnv_efuse_state)(struct rtw89_dev *rtwdev, bool idle);
+ int (*cfg_plt)(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt);
+ u16 (*get_plt_cnt)(struct rtw89_dev *rtwdev, u8 band);
+
bool (*get_txpwr_cr)(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
u32 reg_base, u32 *cr);
@@ -952,6 +980,14 @@ struct rtw89_mac_gen_def {
enum mac_ax_err_info err);
bool (*is_txq_empty)(struct rtw89_dev *rtwdev);
+
+ int (*add_chan_list)(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool connected);
+ int (*scan_offload)(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *option,
+ struct rtw89_vif *rtwvif);
+
+ int (*wow_config_mac)(struct rtw89_dev *rtwdev, bool enable_wow);
};
extern const struct rtw89_mac_gen_def rtw89_mac_gen_ax;
@@ -1086,6 +1122,8 @@ void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev,
u16 offset_tu);
int rtw89_mac_port_get_tsf(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
u64 *tsf);
+void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool en);
void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif);
void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
@@ -1127,7 +1165,8 @@ static inline int rtw89_chip_reset_bb_rf(struct rtw89_dev *rtwdev)
u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev);
int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err);
-bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func);
+bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+ u8 class, u8 func);
void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u32 len, u8 class, u8 func);
int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev);
@@ -1135,9 +1174,20 @@ int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
u32 *tx_en, enum rtw89_sch_tx_sel sel);
int rtw89_mac_stop_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx,
u32 *tx_en, enum rtw89_sch_tx_sel sel);
+int rtw89_mac_stop_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 *tx_en, enum rtw89_sch_tx_sel sel);
int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
int rtw89_mac_resume_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
-int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_ids, bool enable);
+int rtw89_mac_resume_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
+
+static inline
+int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+
+ return mac->cfg_ppdu_status(rtwdev, mac_idx, enable);
+}
+
void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx);
void rtw89_mac_flush_txq(struct rtw89_dev *rtwdev, u32 queues, bool drop);
int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex *coex);
@@ -1147,13 +1197,31 @@ int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
int rtw89_mac_cfg_gnt_v1(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
-int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt);
-u16 rtw89_mac_get_plt_cnt(struct rtw89_dev *rtwdev, u8 band);
+int rtw89_mac_cfg_gnt_v2(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
+
+static inline
+int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+
+ return mac->cfg_plt(rtwdev, plt);
+}
+
+static inline
+u16 rtw89_mac_get_plt_cnt(struct rtw89_dev *rtwdev, u8 band)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+
+ return mac->get_plt_cnt(rtwdev, band);
+}
+
void rtw89_mac_cfg_sb(struct rtw89_dev *rtwdev, u32 val);
u32 rtw89_mac_get_sb(struct rtw89_dev *rtwdev);
bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev);
int rtw89_mac_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl);
int rtw89_mac_cfg_ctrl_path_v1(struct rtw89_dev *rtwdev, bool wl);
+int rtw89_mac_cfg_ctrl_path_v2(struct rtw89_dev *rtwdev, bool wl);
void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter);
void rtw89_mac_notify_wake(struct rtw89_dev *rtwdev);
@@ -1306,6 +1374,7 @@ enum rtw89_mac_xtal_si_offset {
#define XTAL_SI_BIG_PWR_CUT BIT(1)
XTAL_SI_XTAL_DRV = 0x15,
#define XTAL_SI_DRV_LATCH BIT(4)
+ XTAL_SI_XTAL_PLL = 0x16,
XTAL_SI_XTAL_XMD_2 = 0x24,
#define XTAL_SI_LDO_LPS GENMASK(6, 4)
XTAL_SI_XTAL_XMD_4 = 0x26,
@@ -1339,6 +1408,7 @@ enum rtw89_mac_xtal_si_offset {
XTAL_SI_SRAM_CTRL = 0xA1,
#define XTAL_SI_SRAM_DIS BIT(1)
#define FULL_BIT_MASK GENMASK(7, 0)
+ XTAL_SI_APBT = 0xD1,
XTAL_SI_PLL = 0xE0,
XTAL_SI_PLL_1 = 0xE1,
};
@@ -1364,7 +1434,8 @@ int rtw89_mac_resize_ple_rx_quota(struct rtw89_dev *rtwdev, bool wow);
int rtw89_mac_ptk_drop_by_band_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_mac_idx band);
void rtw89_mac_hw_mgnt_sec(struct rtw89_dev *rtwdev, bool wow);
-int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode);
+int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode,
+ bool band1_en);
int rtw89_mac_get_dle_rsvd_qt_cfg(struct rtw89_dev *rtwdev,
enum rtw89_mac_dle_rsvd_qt_type type,
struct rtw89_mac_dle_rsvd_qt_cfg *cfg);
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index 93889d2fface..31d1ffb16e83 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -441,7 +441,7 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
* when disconnected by peer
*/
if (rtwdev->scanning)
- rtw89_hw_scan_abort(rtwdev, vif);
+ rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
}
}
@@ -449,10 +449,11 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
ether_addr_copy(rtwvif->bssid, conf->bssid);
rtw89_cam_bssid_changed(rtwdev, rtwvif);
rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
+ WRITE_ONCE(rtwvif->sync_bcn_tsf, 0);
}
if (changed & BSS_CHANGED_BEACON)
- rtw89_fw_h2c_update_beacon(rtwdev, rtwvif);
+ rtw89_chip_h2c_update_beacon(rtwdev, rtwvif);
if (changed & BSS_CHANGED_ERP_SLOT)
rtw89_conf_tx(rtwdev, rtwvif);
@@ -497,7 +498,7 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
ether_addr_copy(rtwvif->bssid, vif->bss_conf.bssid);
rtw89_cam_bssid_changed(rtwdev, rtwvif);
rtw89_mac_port_update(rtwdev, rtwvif);
- rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
+ rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_TYPE_CHANGE);
rtw89_fw_h2c_join_info(rtwdev, rtwvif, NULL, true);
rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
@@ -518,7 +519,7 @@ void rtw89_ops_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_lock(&rtwdev->mutex);
rtw89_mac_stop_ap(rtwdev, rtwvif);
- rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
+ rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
rtw89_fw_h2c_join_info(rtwdev, rtwvif, NULL, true);
mutex_unlock(&rtwdev->mutex);
}
@@ -660,6 +661,8 @@ static int rtw89_ops_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mutex_lock(&rtwdev->mutex);
clear_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags);
+ clear_bit(tid, rtwsta->ampdu_map);
+ rtw89_chip_h2c_ampdu_cmac_tbl(rtwdev, vif, sta);
mutex_unlock(&rtwdev->mutex);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
@@ -668,17 +671,19 @@ static int rtw89_ops_ampdu_action(struct ieee80211_hw *hw,
set_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags);
rtwsta->ampdu_params[tid].agg_num = params->buf_size;
rtwsta->ampdu_params[tid].amsdu = params->amsdu;
+ set_bit(tid, rtwsta->ampdu_map);
rtw89_leave_ps_mode(rtwdev);
+ rtw89_chip_h2c_ampdu_cmac_tbl(rtwdev, vif, sta);
mutex_unlock(&rtwdev->mutex);
break;
case IEEE80211_AMPDU_RX_START:
mutex_lock(&rtwdev->mutex);
- rtw89_fw_h2c_ba_cam(rtwdev, rtwsta, true, params);
+ rtw89_chip_h2c_ba_cam(rtwdev, rtwsta, true, params);
mutex_unlock(&rtwdev->mutex);
break;
case IEEE80211_AMPDU_RX_STOP:
mutex_lock(&rtwdev->mutex);
- rtw89_fw_h2c_ba_cam(rtwdev, rtwsta, false, params);
+ rtw89_chip_h2c_ba_cam(rtwdev, rtwsta, false, params);
mutex_unlock(&rtwdev->mutex);
break;
default:
@@ -990,7 +995,7 @@ static int rtw89_ops_remain_on_channel(struct ieee80211_hw *hw,
}
if (rtwdev->scanning)
- rtw89_hw_scan_abort(rtwdev, vif);
+ rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
if (type == IEEE80211_ROC_TYPE_MGMT_TX)
roc->state = RTW89_ROC_MGMT;
diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c
index be30c9346293..f16467377eab 100644
--- a/drivers/net/wireless/realtek/rtw89/mac_be.c
+++ b/drivers/net/wireless/realtek/rtw89/mac_be.c
@@ -52,6 +52,9 @@ static const struct rtw89_port_reg rtw89_port_base_be = {
.mbssid = R_BE_MBSSID_CTRL,
.mbssid_drop = R_BE_MBSSID_DROP_0,
.tsf_sync = R_BE_PORT_0_TSF_SYNC,
+ .ptcl_dbg = R_BE_PTCL_DBG,
+ .ptcl_dbg_info = R_BE_PTCL_DBG_INFO,
+ .bcn_drop_all = R_BE_BCN_DROP_ALL0,
.hiq_win = {R_BE_P0MB_HGQ_WINDOW_CFG_0, R_BE_PORT_HGQ_WINDOW_CFG,
R_BE_PORT_HGQ_WINDOW_CFG + 1, R_BE_PORT_HGQ_WINDOW_CFG + 2,
R_BE_PORT_HGQ_WINDOW_CFG + 3},
@@ -988,6 +991,9 @@ static int spatial_reuse_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_RX_SR_CTRL, mac_idx);
rtw89_write8_clr(rtwdev, reg, B_BE_SR_EN | B_BE_SR_CTRL_PLCP_EN);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_BSSID_SRC_CTRL, mac_idx);
+ rtw89_write8_set(rtwdev, reg, B_BE_PLCP_SRC_EN);
+
return 0;
}
@@ -995,7 +1001,8 @@ static int tmac_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
{
u32 reg;
- rtw89_write32_clr(rtwdev, R_BE_TB_PPDU_CTRL, B_BE_QOSNULL_UPD_MUEDCA_EN);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TB_PPDU_CTRL, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, B_BE_QOSNULL_UPD_MUEDCA_EN);
reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_WMTX_TCR_BE_4, mac_idx);
rtw89_write32_mask(rtwdev, reg, B_BE_EHT_HE_PPDU_4XLTF_ZLD_USTIMER_MASK, 0x12);
@@ -1449,6 +1456,71 @@ static int set_cpuio_be(struct rtw89_dev *rtwdev,
return 0;
}
+static int dle_upd_qta_aval_page_be(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_dle_ctrl_type type,
+ enum rtw89_mac_dle_ple_quota_id quota_id)
+{
+ u32 val;
+
+ if (type == DLE_CTRL_TYPE_WDE) {
+ rtw89_write32_mask(rtwdev, R_BE_WDE_BUFMGN_CTL,
+ B_BE_WDE_AVAL_UPD_QTAID_MASK, quota_id);
+ rtw89_write32_set(rtwdev, R_BE_WDE_BUFMGN_CTL, B_BE_WDE_AVAL_UPD_REQ);
+
+ return read_poll_timeout(rtw89_read32, val,
+ !(val & B_BE_WDE_AVAL_UPD_REQ),
+ 1, 2000, false, rtwdev, R_BE_WDE_BUFMGN_CTL);
+ } else if (type == DLE_CTRL_TYPE_PLE) {
+ rtw89_write32_mask(rtwdev, R_BE_PLE_BUFMGN_CTL,
+ B_BE_PLE_AVAL_UPD_QTAID_MASK, quota_id);
+ rtw89_write32_set(rtwdev, R_BE_PLE_BUFMGN_CTL, B_BE_PLE_AVAL_UPD_REQ);
+
+ return read_poll_timeout(rtw89_read32, val,
+ !(val & B_BE_PLE_AVAL_UPD_REQ),
+ 1, 2000, false, rtwdev, R_BE_PLE_BUFMGN_CTL);
+ }
+
+ rtw89_warn(rtwdev, "%s wrong type %d\n", __func__, type);
+ return -EINVAL;
+}
+
+static int dle_quota_change_be(struct rtw89_dev *rtwdev, bool band1_en)
+{
+ int ret;
+
+ if (band1_en) {
+ ret = dle_upd_qta_aval_page_be(rtwdev, DLE_CTRL_TYPE_PLE,
+ PLE_QTAID_B0_TXPL);
+ if (ret) {
+ rtw89_err(rtwdev, "update PLE B0 TX avail page fail %d\n", ret);
+ return ret;
+ }
+
+ ret = dle_upd_qta_aval_page_be(rtwdev, DLE_CTRL_TYPE_PLE,
+ PLE_QTAID_CMAC0_RX);
+ if (ret) {
+ rtw89_err(rtwdev, "update PLE CMAC0 RX avail page fail %d\n", ret);
+ return ret;
+ }
+ } else {
+ ret = dle_upd_qta_aval_page_be(rtwdev, DLE_CTRL_TYPE_PLE,
+ PLE_QTAID_B1_TXPL);
+ if (ret) {
+ rtw89_err(rtwdev, "update PLE B1 TX avail page fail %d\n", ret);
+ return ret;
+ }
+
+ ret = dle_upd_qta_aval_page_be(rtwdev, DLE_CTRL_TYPE_PLE,
+ PLE_QTAID_CMAC1_RX);
+ if (ret) {
+ rtw89_err(rtwdev, "update PLE CMAC1 RX avail page fail %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int preload_init_be(struct rtw89_dev *rtwdev, u8 mac_idx,
enum rtw89_qta_mode mode)
{
@@ -1480,6 +1552,13 @@ static int preload_init_be(struct rtw89_dev *rtwdev, u8 mac_idx,
static int dbcc_bb_ctrl_be(struct rtw89_dev *rtwdev, bool bb1_en)
{
+ u32 set = B_BE_FEN_BB1PLAT_RSTB | B_BE_FEN_BB1_IP_RSTN;
+
+ if (bb1_en)
+ rtw89_write32_set(rtwdev, R_BE_FEN_RST_ENABLE, set);
+ else
+ rtw89_write32_clr(rtwdev, R_BE_FEN_RST_ENABLE, set);
+
return 0;
}
@@ -1538,7 +1617,7 @@ static int band1_enable_be(struct rtw89_dev *rtwdev)
return ret;
}
- ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode);
+ ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode, true);
if (ret) {
rtw89_err(rtwdev, "[ERR]DLE quota change %d\n", ret);
return ret;
@@ -1593,7 +1672,7 @@ static int band1_disable_be(struct rtw89_dev *rtwdev)
return ret;
}
- ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode);
+ ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode, false);
if (ret) {
rtw89_err(rtwdev, "[ERR]DLE quota change %d\n", ret);
return ret;
@@ -1616,7 +1695,7 @@ static int dbcc_enable_be(struct rtw89_dev *rtwdev, bool enable)
if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags)) {
ret = rtw89_fw_h2c_notify_dbcc(rtwdev, true);
if (ret) {
- rtw89_err(rtwdev, "%s:[ERR]notfify dbcc1 fail %d\n",
+ rtw89_err(rtwdev, "%s:[ERR] notify dbcc1 fail %d\n",
__func__, ret);
return ret;
}
@@ -1625,7 +1704,7 @@ static int dbcc_enable_be(struct rtw89_dev *rtwdev, bool enable)
if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags)) {
ret = rtw89_fw_h2c_notify_dbcc(rtwdev, false);
if (ret) {
- rtw89_err(rtwdev, "%s:[ERR]notfify dbcc1 fail %d\n",
+ rtw89_err(rtwdev, "%s:[ERR] notify dbcc1 fail %d\n",
__func__, ret);
return ret;
}
@@ -1718,12 +1797,220 @@ static int trx_init_be(struct rtw89_dev *rtwdev)
return 0;
}
+int rtw89_mac_cfg_gnt_v2(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex_gnt *gnt_cfg)
+{
+ u32 val = 0;
+
+ if (gnt_cfg->band[0].gnt_bt)
+ val |= B_BE_GNT_BT_BB0_VAL | B_BE_GNT_BT_RX_BB0_VAL |
+ B_BE_GNT_BT_TX_BB0_VAL;
+
+ if (gnt_cfg->band[0].gnt_bt_sw_en)
+ val |= B_BE_GNT_BT_BB0_SWCTRL | B_BE_GNT_BT_RX_BB0_SWCTRL |
+ B_BE_GNT_BT_TX_BB0_SWCTRL;
+
+ if (gnt_cfg->band[0].gnt_wl)
+ val |= B_BE_GNT_WL_BB0_VAL | B_BE_GNT_WL_RX_VAL |
+ B_BE_GNT_WL_TX_VAL | B_BE_GNT_WL_BB_PWR_VAL;
+
+ if (gnt_cfg->band[0].gnt_wl_sw_en)
+ val |= B_BE_GNT_WL_BB0_SWCTRL | B_BE_GNT_WL_RX_SWCTRL |
+ B_BE_GNT_WL_TX_SWCTRL | B_BE_GNT_WL_BB_PWR_SWCTRL;
+
+ if (gnt_cfg->band[1].gnt_bt)
+ val |= B_BE_GNT_BT_BB1_VAL | B_BE_GNT_BT_RX_BB1_VAL |
+ B_BE_GNT_BT_TX_BB1_VAL;
+
+ if (gnt_cfg->band[1].gnt_bt_sw_en)
+ val |= B_BE_GNT_BT_BB1_SWCTRL | B_BE_GNT_BT_RX_BB1_SWCTRL |
+ B_BE_GNT_BT_TX_BB1_SWCTRL;
+
+ if (gnt_cfg->band[1].gnt_wl)
+ val |= B_BE_GNT_WL_BB1_VAL | B_BE_GNT_WL_RX_VAL |
+ B_BE_GNT_WL_TX_VAL | B_BE_GNT_WL_BB_PWR_VAL;
+
+ if (gnt_cfg->band[1].gnt_wl_sw_en)
+ val |= B_BE_GNT_WL_BB1_SWCTRL | B_BE_GNT_WL_RX_SWCTRL |
+ B_BE_GNT_WL_TX_SWCTRL | B_BE_GNT_WL_BB_PWR_SWCTRL;
+
+ if (gnt_cfg->bt[0].wlan_act_en)
+ val |= B_BE_WL_ACT_SWCTRL;
+ if (gnt_cfg->bt[0].wlan_act)
+ val |= B_BE_WL_ACT_VAL;
+ if (gnt_cfg->bt[1].wlan_act_en)
+ val |= B_BE_WL_ACT2_SWCTRL;
+ if (gnt_cfg->bt[1].wlan_act)
+ val |= B_BE_WL_ACT2_VAL;
+
+ rtw89_write32(rtwdev, R_BE_GNT_SW_CTRL, val);
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_cfg_gnt_v2);
+
+int rtw89_mac_cfg_ctrl_path_v2(struct rtw89_dev *rtwdev, bool wl)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_mac_ax_gnt *g = dm->gnt.band;
+ struct rtw89_mac_ax_wl_act *gbt = dm->gnt.bt;
+ int i;
+
+ if (wl)
+ return 0;
+
+ for (i = 0; i < RTW89_PHY_MAX; i++) {
+ g[i].gnt_bt_sw_en = 1;
+ g[i].gnt_bt = 1;
+ g[i].gnt_wl_sw_en = 1;
+ g[i].gnt_wl = 0;
+ gbt[i].wlan_act = 1;
+ gbt[i].wlan_act_en = 0;
+ }
+
+ return rtw89_mac_cfg_gnt_v2(rtwdev, &dm->gnt);
+}
+EXPORT_SYMBOL(rtw89_mac_cfg_ctrl_path_v2);
+
+static
+int rtw89_mac_cfg_plt_be(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt)
+{
+ u32 reg;
+ u16 val;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, plt->band, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_BT_PLT, plt->band);
+ val = (plt->tx & RTW89_MAC_AX_PLT_LTE_RX ? B_BE_TX_PLT_GNT_LTE_RX : 0) |
+ (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_BE_TX_PLT_GNT_BT_TX : 0) |
+ (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_BE_TX_PLT_GNT_BT_RX : 0) |
+ (plt->tx & RTW89_MAC_AX_PLT_GNT_WL ? B_BE_TX_PLT_GNT_WL : 0) |
+ (plt->rx & RTW89_MAC_AX_PLT_LTE_RX ? B_BE_RX_PLT_GNT_LTE_RX : 0) |
+ (plt->rx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_BE_RX_PLT_GNT_BT_TX : 0) |
+ (plt->rx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_BE_RX_PLT_GNT_BT_RX : 0) |
+ (plt->rx & RTW89_MAC_AX_PLT_GNT_WL ? B_BE_RX_PLT_GNT_WL : 0) |
+ B_BE_PLT_EN;
+ rtw89_write16(rtwdev, reg, val);
+
+ return 0;
+}
+
+static u16 rtw89_mac_get_plt_cnt_be(struct rtw89_dev *rtwdev, u8 band)
+{
+ u32 reg;
+ u16 cnt;
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_BT_PLT, band);
+ cnt = rtw89_read32_mask(rtwdev, reg, B_BE_BT_PLT_PKT_CNT_MASK);
+ rtw89_write16_set(rtwdev, reg, B_BE_BT_PLT_RST);
+
+ return cnt;
+}
+
+static int rtw89_set_hw_sch_tx_en_v2(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 tx_en, u32 tx_en_mask)
+{
+ u32 reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_CTN_DRV_TXEN, mac_idx);
+ u32 val;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ val = rtw89_read32(rtwdev, reg);
+ val = (val & ~tx_en_mask) | (tx_en & tx_en_mask);
+ rtw89_write32(rtwdev, reg, val);
+
+ return 0;
+}
+
+int rtw89_mac_stop_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 *tx_en, enum rtw89_sch_tx_sel sel)
+{
+ int ret;
+
+ *tx_en = rtw89_read32(rtwdev,
+ rtw89_mac_reg_by_idx(rtwdev, R_BE_CTN_DRV_TXEN, mac_idx));
+
+ switch (sel) {
+ case RTW89_SCH_TX_SEL_ALL:
+ ret = rtw89_set_hw_sch_tx_en_v2(rtwdev, mac_idx, 0,
+ B_BE_CTN_TXEN_ALL_MASK);
+ if (ret)
+ return ret;
+ break;
+ case RTW89_SCH_TX_SEL_HIQ:
+ ret = rtw89_set_hw_sch_tx_en_v2(rtwdev, mac_idx,
+ 0, B_BE_CTN_TXEN_HGQ);
+ if (ret)
+ return ret;
+ break;
+ case RTW89_SCH_TX_SEL_MG0:
+ ret = rtw89_set_hw_sch_tx_en_v2(rtwdev, mac_idx,
+ 0, B_BE_CTN_TXEN_MGQ);
+ if (ret)
+ return ret;
+ break;
+ case RTW89_SCH_TX_SEL_MACID:
+ ret = rtw89_set_hw_sch_tx_en_v2(rtwdev, mac_idx, 0,
+ B_BE_CTN_TXEN_ALL_MASK);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_stop_sch_tx_v2);
+
+int rtw89_mac_resume_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
+{
+ int ret;
+
+ ret = rtw89_set_hw_sch_tx_en_v2(rtwdev, mac_idx, tx_en,
+ B_BE_CTN_TXEN_ALL_MASK);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_resume_sch_tx_v2);
+
+static
+int rtw89_mac_cfg_ppdu_status_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
+{
+ u32 reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PPDU_STAT, mac_idx);
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ if (!enable) {
+ rtw89_write32_clr(rtwdev, reg, B_BE_PPDU_STAT_RPT_EN);
+ return 0;
+ }
+
+ rtw89_write32_mask(rtwdev, R_BE_HW_PPDU_STATUS, B_BE_FWD_PPDU_STAT_MASK, 3);
+ rtw89_write32(rtwdev, reg, B_BE_PPDU_STAT_RPT_EN | B_BE_PPDU_MAC_INFO |
+ B_BE_APP_RX_CNT_RPT | B_BE_APP_PLCP_HDR_RPT |
+ B_BE_PPDU_STAT_RPT_CRC32 | B_BE_PPDU_STAT_RPT_DMA);
+
+ return 0;
+}
+
static bool rtw89_mac_get_txpwr_cr_be(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
u32 reg_base, u32 *cr)
{
- const struct rtw89_dle_mem *dle_mem = rtwdev->chip->dle_mem;
- enum rtw89_qta_mode mode = dle_mem->mode;
+ enum rtw89_qta_mode mode = rtwdev->mac.qta_mode;
int ret;
ret = rtw89_mac_check_mac_en(rtwdev, (enum rtw89_mac_idx)phy_idx,
@@ -2020,6 +2307,52 @@ static void rtw89_mac_dump_qta_lost_be(struct rtw89_dev *rtwdev)
dump_err_status_dispatcher_be(rtwdev);
}
+static int rtw89_mac_cpu_io_rx(struct rtw89_dev *rtwdev, bool wow_enable)
+{
+ struct rtw89_mac_h2c_info h2c_info = {};
+ struct rtw89_mac_c2h_info c2h_info = {};
+ u32 ret;
+
+ h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_WOW_CPUIO_RX_CTRL;
+ h2c_info.content_len = sizeof(h2c_info.u.hdr);
+ h2c_info.u.hdr.w0 = u32_encode_bits(wow_enable, RTW89_H2CREG_WOW_CPUIO_RX_CTRL_EN);
+
+ ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info);
+ if (ret)
+ return ret;
+
+ if (c2h_info.id != RTW89_FWCMD_C2HREG_FUNC_WOW_CPUIO_RX_ACK)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int rtw89_wow_config_mac_be(struct rtw89_dev *rtwdev, bool enable_wow)
+{
+ if (enable_wow) {
+ rtw89_write32_set(rtwdev, R_BE_RX_STOP, B_BE_HOST_RX_STOP);
+ rtw89_write32_clr(rtwdev, R_BE_RX_FLTR_OPT, B_BE_SNIFFER_MODE);
+ rtw89_mac_cpu_io_rx(rtwdev, enable_wow);
+ rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
+ rtw89_write32(rtwdev, R_BE_FWD_ERR, 0);
+ rtw89_write32(rtwdev, R_BE_FWD_ACTN0, 0);
+ rtw89_write32(rtwdev, R_BE_FWD_ACTN1, 0);
+ rtw89_write32(rtwdev, R_BE_FWD_ACTN2, 0);
+ rtw89_write32(rtwdev, R_BE_FWD_TF0, 0);
+ rtw89_write32(rtwdev, R_BE_FWD_TF1, 0);
+ rtw89_write32(rtwdev, R_BE_FWD_ERR, 0);
+ rtw89_write32(rtwdev, R_BE_HW_PPDU_STATUS, 0);
+ rtw89_write8(rtwdev, R_BE_DBG_WOW_READY, WOWLAN_NOT_READY);
+ } else {
+ rtw89_mac_cpu_io_rx(rtwdev, enable_wow);
+ rtw89_write32_clr(rtwdev, R_BE_RX_STOP, B_BE_HOST_RX_STOP);
+ rtw89_write32_set(rtwdev, R_BE_RX_FLTR_OPT, R_BE_RX_FLTR_OPT);
+ rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
+ }
+
+ return 0;
+}
+
static void rtw89_mac_dump_cmac_err_status_be(struct rtw89_dev *rtwdev,
u8 band)
{
@@ -2218,6 +2551,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.rx_fltr = R_BE_RX_FLTR_OPT,
.port_base = &rtw89_port_base_be,
.agg_len_ht = R_BE_AGG_LEN_HT_0,
+ .ps_status = R_BE_WMTX_POWER_BE_BIT_CTL,
.muedca_ctrl = {
.addr = R_BE_MUEDCA_EN,
@@ -2228,6 +2562,11 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.mask = B_BE_BFMEE_HT_NDPA_EN | B_BE_BFMEE_VHT_NDPA_EN |
B_BE_BFMEE_HE_NDPA_EN | B_BE_BFMEE_EHT_NDPA_EN,
},
+ .narrow_bw_ru_dis = {
+ .addr = R_BE_RXTRIG_TEST_USER_2,
+ .mask = B_BE_RXTRIG_RU26_DIS,
+ },
+ .wow_ctrl = {.addr = R_BE_WOW_CTRL, .mask = B_BE_WOW_WOWEN,},
.check_mac_en = rtw89_mac_check_mac_en_be,
.sys_init = sys_init_be,
@@ -2239,6 +2578,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.bf_assoc = rtw89_mac_bf_assoc_be,
.typ_fltr_opt = rtw89_mac_typ_fltr_opt_be,
+ .cfg_ppdu_status = rtw89_mac_cfg_ppdu_status_be,
.dle_mix_cfg = dle_mix_cfg_be,
.chk_dle_rdy = chk_dle_rdy_be,
@@ -2250,6 +2590,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.wde_quota_cfg = wde_quota_cfg_be,
.ple_quota_cfg = ple_quota_cfg_be,
.set_cpuio = set_cpuio_be,
+ .dle_quota_change = dle_quota_change_be,
.disable_cpu = rtw89_mac_disable_cpu_be,
.fwdl_enable_wcpu = rtw89_mac_fwdl_enable_wcpu_be,
@@ -2259,6 +2600,9 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.parse_phycap_map = rtw89_parse_phycap_map_be,
.cnv_efuse_state = rtw89_cnv_efuse_state_be,
+ .cfg_plt = rtw89_mac_cfg_plt_be,
+ .get_plt_cnt = rtw89_mac_get_plt_cnt_be,
+
.get_txpwr_cr = rtw89_mac_get_txpwr_cr_be,
.write_xtal_si = rtw89_mac_write_xtal_si_be,
@@ -2268,5 +2612,10 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.dump_err_status = rtw89_mac_dump_err_status_be,
.is_txq_empty = mac_is_txq_empty_be,
+
+ .add_chan_list = rtw89_hw_scan_add_chan_list_be,
+ .scan_offload = rtw89_fw_h2c_scan_offload_be,
+
+ .wow_config_mac = rtw89_wow_config_mac_be,
};
EXPORT_SYMBOL(rtw89_mac_gen_be);
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index 769f1ce62ebc..19001130ad94 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -155,8 +155,8 @@ static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev,
DMA_FROM_DEVICE);
}
-static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
- struct sk_buff *skb)
+static void rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb)
{
struct rtw89_pci_rxbd_info *rxbd_info;
struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
@@ -166,11 +166,59 @@ static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS);
rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE);
rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG);
+}
+
+static int rtw89_pci_validate_rx_tag(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_rx_ring *rx_ring,
+ struct sk_buff *skb)
+{
+ struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ u32 target_rx_tag;
+
+ if (!info->check_rx_tag)
+ return 0;
+
+ /* valid range is 1 ~ 0x1FFF */
+ if (rx_ring->target_rx_tag == 0)
+ target_rx_tag = 1;
+ else
+ target_rx_tag = rx_ring->target_rx_tag;
+
+ if (rx_info->tag != target_rx_tag) {
+ rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "mismatch RX tag 0x%x 0x%x\n",
+ rx_info->tag, target_rx_tag);
+ return -EAGAIN;
+ }
return 0;
}
-static void rtw89_pci_ctrl_txdma_ch_pcie(struct rtw89_dev *rtwdev, bool enable)
+static
+int rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_rx_ring *rx_ring,
+ struct sk_buff *skb)
+{
+ struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
+ int rx_tag_retry = 100;
+ int ret;
+
+ do {
+ rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
+ rtw89_pci_rxbd_info_update(rtwdev, skb);
+
+ ret = rtw89_pci_validate_rx_tag(rtwdev, rx_ring, skb);
+ if (ret != -EAGAIN)
+ break;
+ } while (rx_tag_retry--);
+
+ /* update target rx_tag for next RX */
+ rx_ring->target_rx_tag = rx_info->tag + 1;
+
+ return ret;
+}
+
+static void rtw89_pci_ctrl_txdma_ch_ax(struct rtw89_dev *rtwdev, bool enable)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
@@ -187,7 +235,7 @@ static void rtw89_pci_ctrl_txdma_ch_pcie(struct rtw89_dev *rtwdev, bool enable)
}
}
-static void rtw89_pci_ctrl_txdma_fw_ch_pcie(struct rtw89_dev *rtwdev, bool enable)
+static void rtw89_pci_ctrl_txdma_fw_ch_ax(struct rtw89_dev *rtwdev, bool enable)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
@@ -259,9 +307,8 @@ static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring);
skb = rx_ring->buf[skb_idx];
- rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
- ret = rtw89_pci_rxbd_info_update(rtwdev, skb);
+ ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb);
if (ret) {
rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
bd_ring->wp, ret);
@@ -549,9 +596,8 @@ static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev,
skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring);
skb = rx_ring->buf[skb_idx];
- rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
- ret = rtw89_pci_rxbd_info_update(rtwdev, skb);
+ ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb);
if (ret) {
rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
bd_ring->wp, ret);
@@ -705,7 +751,7 @@ void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev,
rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0;
isrs->isrs[0] = isrs->ind_isrs & B_BE_HCI_AXIDMA_INT ?
rtw89_read32(rtwdev, R_BE_HAXI_HISR00) & rtwpci->intrs[0] : 0;
- isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR);
+ isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR) & rtwpci->intrs[1];
if (isrs->halt_c2h_isrs)
rtw89_write32(rtwdev, R_BE_HISR0, isrs->halt_c2h_isrs);
@@ -1550,6 +1596,7 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
bd_ring->rp = 0;
rx_ring->diliver_skb = NULL;
rx_ring->diliver_desc.ready = false;
+ rx_ring->target_rx_tag = 0;
rtw89_write16(rtwdev, addr_num, bd_ring->len);
rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
@@ -1907,22 +1954,87 @@ static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u
return 0;
}
+static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data)
+{
+ u16 addr_2lsb = addr & B_AX_DBI_2LSB;
+ u16 write_addr;
+ u8 flag;
+ int ret;
+
+ write_addr = addr & B_AX_DBI_ADDR_MSK;
+ write_addr |= u16_encode_bits(BIT(addr_2lsb), B_AX_DBI_WREN_MSK);
+ rtw89_write8(rtwdev, R_AX_DBI_WDATA + addr_2lsb, data);
+ rtw89_write16(rtwdev, R_AX_DBI_FLAG, write_addr);
+ rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16);
+
+ ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
+ 10 * RTW89_PCI_WR_RETRY_CNT, false,
+ rtwdev, R_AX_DBI_FLAG + 2);
+ if (ret)
+ rtw89_err(rtwdev, "failed to write DBI register, addr=0x%X\n",
+ addr);
+
+ return ret;
+}
+
+static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value)
+{
+ u16 read_addr = addr & B_AX_DBI_ADDR_MSK;
+ u8 flag;
+ int ret;
+
+ rtw89_write16(rtwdev, R_AX_DBI_FLAG, read_addr);
+ rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16);
+
+ ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
+ 10 * RTW89_PCI_WR_RETRY_CNT, false,
+ rtwdev, R_AX_DBI_FLAG + 2);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to read DBI register, addr=0x%X\n",
+ addr);
+ return ret;
+ }
+
+ read_addr = R_AX_DBI_RDATA + (addr & 3);
+ *value = rtw89_read8(rtwdev, read_addr);
+
+ return 0;
+}
+
static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr,
u8 data)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
struct pci_dev *pdev = rtwpci->pdev;
+ int ret;
- return pci_write_config_byte(pdev, addr, data);
+ ret = pci_write_config_byte(pdev, addr, data);
+ if (!ret)
+ return 0;
+
+ if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B)
+ ret = rtw89_dbi_write8(rtwdev, addr, data);
+
+ return ret;
}
static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr,
u8 *value)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
struct pci_dev *pdev = rtwpci->pdev;
+ int ret;
- return pci_read_config_byte(pdev, addr, value);
+ ret = pci_read_config_byte(pdev, addr, value);
+ if (!ret)
+ return 0;
+
+ if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B)
+ ret = rtw89_dbi_read8(rtwdev, addr, value);
+
+ return ret;
}
static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr,
@@ -2412,7 +2524,7 @@ static void rtw89_pci_clr_idx_all_ax(struct rtw89_dev *rtwdev)
B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX);
}
-static int rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev *rtwdev)
+static int rtw89_pci_poll_txdma_ch_idle_ax(struct rtw89_dev *rtwdev)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
u32 ret, check, dma_busy;
@@ -2439,7 +2551,7 @@ static int rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev *rtwdev)
return 0;
}
-static int rtw89_poll_rxdma_ch_idle_pcie(struct rtw89_dev *rtwdev)
+static int rtw89_pci_poll_rxdma_ch_idle_ax(struct rtw89_dev *rtwdev)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
u32 ret, check, dma_busy;
@@ -2459,13 +2571,13 @@ static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev)
{
u32 ret;
- ret = rtw89_poll_txdma_ch_idle_pcie(rtwdev);
+ ret = rtw89_pci_poll_txdma_ch_idle_ax(rtwdev);
if (ret) {
rtw89_err(rtwdev, "txdma ch busy\n");
return ret;
}
- ret = rtw89_poll_rxdma_ch_idle_pcie(rtwdev);
+ ret = rtw89_pci_poll_rxdma_ch_idle_ax(rtwdev);
if (ret) {
rtw89_err(rtwdev, "rxdma ch busy\n");
return ret;
@@ -2644,8 +2756,8 @@ static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
}
/* disable all channels except to FW CMD channel to download firmware */
- rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, false);
- rtw89_pci_ctrl_txdma_fw_ch_pcie(rtwdev, true);
+ rtw89_pci_ctrl_txdma_ch_ax(rtwdev, false);
+ rtw89_pci_ctrl_txdma_fw_ch_ax(rtwdev, true);
/* start DMA activities */
rtw89_pci_ctrl_dma_all(rtwdev, true);
@@ -2758,7 +2870,7 @@ static int rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev *rtwdev)
}
/* enable DMA for all queues */
- rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, true);
+ rtw89_pci_ctrl_txdma_ch_ax(rtwdev, true);
/* Release PCI IO */
rtw89_write32_clr(rtwdev, info->dma_stop1.addr,
@@ -3148,6 +3260,7 @@ static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
rx_ring->buf_sz = buf_sz;
rx_ring->diliver_skb = NULL;
rx_ring->diliver_desc.ready = false;
+ rx_ring->target_rx_tag = 0;
for (i = 0; i < len; i++) {
skb = dev_alloc_skb(buf_sz);
@@ -3387,8 +3500,7 @@ static void rtw89_pci_recovery_intr_mask_v2(struct rtw89_dev *rtwdev)
rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0;
rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
rtwpci->intrs[0] = 0;
- rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 |
- B_BE_PCIE_RX_RPQ0_IMR0_V1;
+ rtwpci->intrs[1] = 0;
}
static void rtw89_pci_default_intr_mask_v2(struct rtw89_dev *rtwdev)
@@ -3540,12 +3652,20 @@ static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev)
static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
{
- enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
- int ret;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
if (rtw89_pci_disable_clkreq)
return;
+ gen_def->clkreq_set(rtwdev, enable);
+}
+
+static void rtw89_pci_clkreq_set_ax(struct rtw89_dev *rtwdev, bool enable)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ int ret;
+
ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
PCIE_CLKDLY_HW_30US);
if (ret)
@@ -3577,24 +3697,31 @@ static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
{
- enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
- u8 value = 0;
- int ret;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
if (rtw89_pci_disable_aspm_l1)
return;
+ gen_def->aspm_set(rtwdev, enable);
+}
+
+static void rtw89_pci_aspm_set_ax(struct rtw89_dev *rtwdev, bool enable)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ u8 value = 0;
+ int ret;
+
ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value);
if (ret)
- rtw89_err(rtwdev, "failed to read ASPM Delay\n");
+ rtw89_warn(rtwdev, "failed to read ASPM Delay\n");
- value &= ~(RTW89_L1DLY_MASK | RTW89_L0DLY_MASK);
- value |= FIELD_PREP(RTW89_L1DLY_MASK, PCIE_L1DLY_16US) |
- FIELD_PREP(RTW89_L0DLY_MASK, PCIE_L0SDLY_4US);
+ u8p_replace_bits(&value, PCIE_L1DLY_16US, RTW89_L1DLY_MASK);
+ u8p_replace_bits(&value, PCIE_L0SDLY_4US, RTW89_L0DLY_MASK);
ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value);
if (ret)
- rtw89_err(rtwdev, "failed to read ASPM Delay\n");
+ rtw89_warn(rtwdev, "failed to read ASPM Delay\n");
if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
if (enable)
@@ -3681,6 +3808,17 @@ static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev)
static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable)
{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
+
+ if (rtw89_pci_disable_l1ss)
+ return;
+
+ gen_def->l1ss_set(rtwdev, enable);
+}
+
+static void rtw89_pci_l1ss_set_ax(struct rtw89_dev *rtwdev, bool enable)
+{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
int ret;
@@ -3954,6 +4092,14 @@ const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
.lv1rst_stop_dma = rtw89_pci_lv1rst_stop_dma_ax,
.lv1rst_start_dma = rtw89_pci_lv1rst_start_dma_ax,
+
+ .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_ax,
+ .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_ax,
+ .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle_ax,
+
+ .aspm_set = rtw89_pci_aspm_set_ax,
+ .clkreq_set = rtw89_pci_clkreq_set_ax,
+ .l1ss_set = rtw89_pci_l1ss_set_ax,
};
EXPORT_SYMBOL(rtw89_pci_gen_ax);
@@ -3988,10 +4134,11 @@ static const struct rtw89_hci_ops rtw89_pci_ops = {
.recovery_start = rtw89_pci_ops_recovery_start,
.recovery_complete = rtw89_pci_ops_recovery_complete,
- .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_pcie,
- .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_pcie,
+ .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch,
+ .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch,
.ctrl_trxhci = rtw89_pci_ctrl_dma_trx,
- .poll_txdma_ch = rtw89_poll_txdma_ch_idle_pcie,
+ .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle,
+
.clr_idx_all = rtw89_pci_clr_idx_all,
.clear = rtw89_pci_clear_resource,
.disable_intr = rtw89_pci_disable_intr_lock,
@@ -4068,6 +4215,8 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_free_irq;
}
+ set_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags);
+
return 0;
err_free_irq:
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index ca5de77fee90..a63b6b7c9bfa 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -42,6 +42,7 @@
#define B_AX_DBI_WFLAG BIT(16)
#define B_AX_DBI_WREN_MSK GENMASK(15, 12)
#define B_AX_DBI_ADDR_MSK GENMASK(11, 2)
+#define B_AX_DBI_2LSB GENMASK(1, 0)
#define R_AX_DBI_WDATA 0x1094
#define R_AX_DBI_RDATA 0x1098
@@ -281,6 +282,21 @@
#define B_BE_PCIE_EN_SWENT_L23 BIT(1)
#define B_BE_SEL_REQ_EXIT_L1 BIT(0)
+#define R_BE_PCIE_MIX_CFG 0x300C
+#define B_BE_L1SS_TIMEOUT_CTRL BIT(18)
+#define B_BE_ASPM_CTRL_L1 BIT(17)
+#define B_BE_ASPM_CTRL_L0 BIT(16)
+#define B_BE_XFER_PENDING_FW BIT(11)
+#define B_BE_XFER_PENDING BIT(10)
+#define B_BE_REQ_EXIT_L1 BIT(9)
+#define B_BE_REQ_ENTR_L1 BIT(8)
+#define B_BE_L1SUB_ENABLE BIT(0)
+
+#define R_BE_L1_CLK_CTRL 0x3010
+#define B_BE_RAS_SD_HOLD_LTSSM BIT(12)
+#define B_BE_CLK_REQ_N BIT(1)
+#define B_BE_CLK_PM_EN BIT(0)
+
#define R_BE_PCIE_LAT_CTRL 0x3044
#define B_BE_ELBI_PHY_REMAP_MASK GENMASK(29, 24)
#define B_BE_SYS_SUS_L12_EN BIT(17)
@@ -289,6 +305,8 @@
#define B_BE_RTK_LDO_POWER_LATENCY_MASK GENMASK(11, 10)
#define B_BE_RTK_LDO_BIAS_LATENCY_MASK GENMASK(9, 8)
#define B_BE_CLK_REQ_LAT_MASK GENMASK(7, 4)
+#define B_BE_RTK_PM_SEL_OPT BIT(1)
+#define B_BE_CLK_REQ_SEL BIT(0)
#define R_BE_PCIE_HIMR0 0x30B0
#define B_BE_PCIE_HB1_IND_INTA_IMR BIT(31)
@@ -924,6 +942,8 @@
#define B_BE_SER_L1SUB_IMR BIT(1)
#define B_BE_SER_PMU_IMR BIT(0)
+#define R_BE_REG_PL1_ISR 0x34B4
+
#define R_BE_RX_APPEND_MODE 0x8920
#define B_BE_APPEND_OFFSET_MASK GENMASK(23, 16)
#define B_BE_APPEND_LEN_MASK GENMASK(15, 0)
@@ -996,7 +1016,7 @@
#define RTW89_PCI_TXWD_NUM_MAX 512
#define RTW89_PCI_TXWD_PAGE_SIZE 128
#define RTW89_PCI_ADDRINFO_MAX 4
-#define RTW89_PCI_RX_BUF_SIZE 11460
+#define RTW89_PCI_RX_BUF_SIZE (11454 + 40) /* +40 for rtw89_rxdesc_long_v2 */
#define RTW89_PCI_POLL_BDRAM_RST_CNT 100
#define RTW89_PCI_MULTITAG 8
@@ -1065,6 +1085,15 @@ enum rtw89_pcie_clkdly_hw {
PCIE_CLKDLY_HW_200US = 0x5,
};
+enum rtw89_pcie_clkdly_hw_v1 {
+ PCIE_CLKDLY_HW_V1_0 = 0,
+ PCIE_CLKDLY_HW_V1_16US = 0x1,
+ PCIE_CLKDLY_HW_V1_32US = 0x2,
+ PCIE_CLKDLY_HW_V1_64US = 0x3,
+ PCIE_CLKDLY_HW_V1_80US = 0x4,
+ PCIE_CLKDLY_HW_V1_96US = 0x5,
+};
+
enum mac_ax_bd_trunc_mode {
MAC_AX_BD_NORM,
MAC_AX_BD_TRUNC,
@@ -1215,6 +1244,14 @@ struct rtw89_pci_gen_def {
int (*lv1rst_stop_dma)(struct rtw89_dev *rtwdev);
int (*lv1rst_start_dma)(struct rtw89_dev *rtwdev);
+
+ void (*ctrl_txdma_ch)(struct rtw89_dev *rtwdev, bool enable);
+ void (*ctrl_txdma_fw_ch)(struct rtw89_dev *rtwdev, bool enable);
+ int (*poll_txdma_ch_idle)(struct rtw89_dev *rtwdev);
+
+ void (*aspm_set)(struct rtw89_dev *rtwdev, bool enable);
+ void (*clkreq_set)(struct rtw89_dev *rtwdev, bool enable);
+ void (*l1ss_set)(struct rtw89_dev *rtwdev, bool enable);
};
struct rtw89_pci_info {
@@ -1234,6 +1271,7 @@ struct rtw89_pci_info {
enum mac_ax_pcie_func_ctrl io_rcy_en;
enum mac_ax_io_rcy_tmr io_rcy_tmr;
bool rx_ring_eq_is_full;
+ bool check_rx_tag;
u32 init_cfg_reg;
u32 txhci_en_bit;
@@ -1276,7 +1314,7 @@ struct rtw89_pci_tx_data {
struct rtw89_pci_rx_info {
dma_addr_t dma;
- u32 fs:1, ls:1, tag:11, len:14;
+ u32 fs:1, ls:1, tag:13, len:14;
};
#define RTW89_PCI_TXBD_OPTION_LS BIT(14)
@@ -1405,6 +1443,7 @@ struct rtw89_pci_rx_ring {
u32 buf_sz;
struct sk_buff *diliver_skb;
struct rtw89_rx_desc_info diliver_desc;
+ u32 target_rx_tag:13;
};
struct rtw89_pci_isrs {
@@ -1521,6 +1560,7 @@ static inline bool rtw89_pci_ltr_is_err_reg_val(u32 val)
}
extern const struct dev_pm_ops rtw89_pm_ops;
+extern const struct dev_pm_ops rtw89_pm_ops_be;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be;
@@ -1676,4 +1716,27 @@ static inline int rtw89_pci_reset_bdram(struct rtw89_dev *rtwdev)
return gen_def->rst_bdram(rtwdev);
}
+static inline void rtw89_pci_ctrl_txdma_ch(struct rtw89_dev *rtwdev, bool enable)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
+
+ return gen_def->ctrl_txdma_ch(rtwdev, enable);
+}
+
+static inline void rtw89_pci_ctrl_txdma_fw_ch(struct rtw89_dev *rtwdev, bool enable)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
+
+ return gen_def->ctrl_txdma_fw_ch(rtwdev, enable);
+}
+
+static inline int rtw89_pci_poll_txdma_ch_idle(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
+
+ return gen_def->poll_txdma_ch_idle(rtwdev);
+}
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/pci_be.c b/drivers/net/wireless/realtek/rtw89/pci_be.c
index 629ffa4bee91..7cc328222965 100644
--- a/drivers/net/wireless/realtek/rtw89/pci_be.c
+++ b/drivers/net/wireless/realtek/rtw89/pci_be.c
@@ -19,6 +19,54 @@ enum pcie_rxbd_mode {
#define PL0_TMR_MAC_1MS 0x27100
#define PL0_TMR_AUX_1MS 0x1E848
+static void rtw89_pci_aspm_set_be(struct rtw89_dev *rtwdev, bool enable)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
+ u8 value = 0;
+ int ret;
+
+ ret = pci_read_config_byte(pdev, RTW89_PCIE_ASPM_CTRL, &value);
+ if (ret)
+ rtw89_warn(rtwdev, "failed to read ASPM Delay\n");
+
+ u8p_replace_bits(&value, PCIE_L1DLY_16US, RTW89_L1DLY_MASK);
+
+ ret = pci_write_config_byte(pdev, RTW89_PCIE_ASPM_CTRL, value);
+ if (ret)
+ rtw89_warn(rtwdev, "failed to write ASPM Delay\n");
+
+ if (enable)
+ rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
+ B_BE_ASPM_CTRL_L1);
+ else
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
+ B_BE_ASPM_CTRL_L1);
+}
+
+static void rtw89_pci_l1ss_set_be(struct rtw89_dev *rtwdev, bool enable)
+{
+ if (enable)
+ rtw89_write32_set(rtwdev, R_BE_PCIE_MIX_CFG,
+ B_BE_L1SUB_ENABLE);
+ else
+ rtw89_write32_clr(rtwdev, R_BE_PCIE_MIX_CFG,
+ B_BE_L1SUB_ENABLE);
+}
+
+static void rtw89_pci_clkreq_set_be(struct rtw89_dev *rtwdev, bool enable)
+{
+ rtw89_write32_mask(rtwdev, R_BE_PCIE_LAT_CTRL, B_BE_CLK_REQ_LAT_MASK,
+ PCIE_CLKDLY_HW_V1_0);
+
+ if (enable)
+ rtw89_write32_set(rtwdev, R_BE_L1_CLK_CTRL,
+ B_BE_CLK_PM_EN);
+ else
+ rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL,
+ B_BE_CLK_PM_EN);
+}
+
static void _patch_pcie_power_wake_be(struct rtw89_dev *rtwdev, bool power_up)
{
if (power_up)
@@ -105,6 +153,10 @@ static void rtw89_pci_ctrl_trxdma_pcie_be(struct rtw89_dev *rtwdev,
val |= B_BE_STOP_AXI_MST;
rtw89_write32(rtwdev, R_BE_HAXI_INIT_CFG1, val);
+
+ if (io_en == MAC_AX_PCIE_ENABLE)
+ rtw89_write32_mask(rtwdev, R_BE_HAXI_MST_WDT_TIMEOUT_SEL_V1,
+ B_BE_HAXI_MST_WDT_TIMEOUT_SEL_MASK, 4);
}
static void rtw89_pci_clr_idx_all_be(struct rtw89_dev *rtwdev)
@@ -257,6 +309,7 @@ static void rtw89_pci_ser_setting_be(struct rtw89_dev *rtwdev)
rtw89_write32(rtwdev, R_BE_PL1_DBG_INFO, 0x0);
rtw89_write32_set(rtwdev, R_BE_FWS1IMR, B_BE_PCIE_SER_TIMEOUT_INDIC_EN);
rtw89_write32_set(rtwdev, R_BE_SER_PL1_CTRL, B_BE_PL1_SER_PL1_EN);
+ rtw89_write32_mask(rtwdev, R_BE_SER_PL1_CTRL, B_BE_PL1_TIMER_UNIT_MASK, 1);
val32 = rtw89_read32(rtwdev, R_BE_REG_PL1_MASK);
val32 |= B_BE_SER_PMU_IMR | B_BE_SER_L1SUB_IMR | B_BE_SER_PM_MASTER_IMR |
@@ -264,8 +317,7 @@ static void rtw89_pci_ser_setting_be(struct rtw89_dev *rtwdev)
rtw89_write32(rtwdev, R_BE_REG_PL1_MASK, val32);
}
-static void rtw89_pci_ctrl_txdma_ch_be(struct rtw89_dev *rtwdev, bool all_en,
- bool h2c_en)
+static void rtw89_pci_ctrl_txdma_ch_be(struct rtw89_dev *rtwdev, bool enable)
{
u32 mask_all;
u32 val;
@@ -278,12 +330,19 @@ static void rtw89_pci_ctrl_txdma_ch_be(struct rtw89_dev *rtwdev, bool all_en,
val = rtw89_read32(rtwdev, R_BE_HAXI_DMA_STOP1);
val |= B_BE_STOP_CH13 | B_BE_STOP_CH14;
- if (all_en)
+ if (enable)
val &= ~mask_all;
else
val |= mask_all;
- if (h2c_en)
+ rtw89_write32(rtwdev, R_BE_HAXI_DMA_STOP1, val);
+}
+
+static void rtw89_pci_ctrl_txdma_fw_ch_be(struct rtw89_dev *rtwdev, bool enable)
+{
+ u32 val = rtw89_read32(rtwdev, R_BE_HAXI_DMA_STOP1);
+
+ if (enable)
val &= ~B_BE_STOP_CH12;
else
val |= B_BE_STOP_CH12;
@@ -322,7 +381,8 @@ static int rtw89_pci_ops_mac_pre_init_be(struct rtw89_dev *rtwdev)
rtw89_pci_pcie_setting_be(rtwdev);
rtw89_pci_ser_setting_be(rtwdev);
- rtw89_pci_ctrl_txdma_ch_be(rtwdev, false, true);
+ rtw89_pci_ctrl_txdma_ch_be(rtwdev, false);
+ rtw89_pci_ctrl_txdma_fw_ch_be(rtwdev, true);
rtw89_pci_ctrl_trxdma_pcie_be(rtwdev, MAC_AX_PCIE_ENABLE,
MAC_AX_PCIE_ENABLE, MAC_AX_PCIE_ENABLE);
@@ -432,7 +492,8 @@ static int rtw89_pci_ops_mac_post_init_be(struct rtw89_dev *rtwdev)
rtw89_pci_ctrl_trxdma_pcie_be(rtwdev, MAC_AX_PCIE_IGNORE,
MAC_AX_PCIE_IGNORE, MAC_AX_PCIE_ENABLE);
rtw89_pci_ctrl_wpdma_pcie_be(rtwdev, true);
- rtw89_pci_ctrl_txdma_ch_be(rtwdev, true, true);
+ rtw89_pci_ctrl_txdma_ch_be(rtwdev, true);
+ rtw89_pci_ctrl_txdma_fw_ch_be(rtwdev, true);
rtw89_pci_configure_mit_be(rtwdev);
return 0;
@@ -489,6 +550,46 @@ static int rtw89_pci_lv1rst_start_dma_be(struct rtw89_dev *rtwdev)
return 0;
}
+static int __maybe_unused rtw89_pci_suspend_be(struct device *dev)
+{
+ struct ieee80211_hw *hw = dev_get_drvdata(dev);
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ rtw89_write32_set(rtwdev, R_BE_RSV_CTRL, B_BE_WLOCK_1C_BIT6);
+ rtw89_write32_set(rtwdev, R_BE_RSV_CTRL, B_BE_R_DIS_PRST);
+ rtw89_write32_clr(rtwdev, R_BE_RSV_CTRL, B_BE_WLOCK_1C_BIT6);
+ rtw89_write32_set(rtwdev, R_BE_PCIE_FRZ_CLK, B_BE_PCIE_FRZ_REG_RST);
+ rtw89_write32_clr(rtwdev, R_BE_REG_PL1_MASK, B_BE_SER_PM_MASTER_IMR);
+ return 0;
+}
+
+static int __maybe_unused rtw89_pci_resume_be(struct device *dev)
+{
+ struct ieee80211_hw *hw = dev_get_drvdata(dev);
+ struct rtw89_dev *rtwdev = hw->priv;
+ u32 polling;
+ int ret;
+
+ rtw89_write32_set(rtwdev, R_BE_RSV_CTRL, B_BE_WLOCK_1C_BIT6);
+ rtw89_write32_clr(rtwdev, R_BE_RSV_CTRL, B_BE_R_DIS_PRST);
+ rtw89_write32_clr(rtwdev, R_BE_RSV_CTRL, B_BE_WLOCK_1C_BIT6);
+ rtw89_write32_clr(rtwdev, R_BE_PCIE_FRZ_CLK, B_BE_PCIE_FRZ_REG_RST);
+ rtw89_write32_clr(rtwdev, R_BE_SER_PL1_CTRL, B_BE_PL1_SER_PL1_EN);
+
+ ret = read_poll_timeout_atomic(rtw89_read32, polling, !polling, 1, 1000,
+ false, rtwdev, R_BE_REG_PL1_ISR);
+ if (ret)
+ rtw89_warn(rtwdev, "[ERR] PCIE SER clear polling fail\n");
+
+ rtw89_write32_set(rtwdev, R_BE_SER_PL1_CTRL, B_BE_PL1_SER_PL1_EN);
+ rtw89_write32_set(rtwdev, R_BE_REG_PL1_MASK, B_BE_SER_PM_MASTER_IMR);
+
+ return 0;
+}
+
+SIMPLE_DEV_PM_OPS(rtw89_pm_ops_be, rtw89_pci_suspend_be, rtw89_pci_resume_be);
+EXPORT_SYMBOL(rtw89_pm_ops_be);
+
const struct rtw89_pci_gen_def rtw89_pci_gen_be = {
.isr_rdu = B_BE_RDU_CH1_INT | B_BE_RDU_CH0_INT,
.isr_halt_c2h = B_BE_HALT_C2H_INT,
@@ -505,5 +606,13 @@ const struct rtw89_pci_gen_def rtw89_pci_gen_be = {
.lv1rst_stop_dma = rtw89_pci_lv1rst_stop_dma_be,
.lv1rst_start_dma = rtw89_pci_lv1rst_start_dma_be,
+
+ .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_be,
+ .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_be,
+ .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle_be,
+
+ .aspm_set = rtw89_pci_aspm_set_be,
+ .clkreq_set = rtw89_pci_clkreq_set_be,
+ .l1ss_set = rtw89_pci_l1ss_set_be,
};
EXPORT_SYMBOL(rtw89_pci_gen_be);
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index bafc7b1cc104..12da63d64307 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -13,6 +13,13 @@
#include "txrx.h"
#include "util.h"
+static u32 rtw89_phy0_phy1_offset(struct rtw89_dev *rtwdev, u32 addr)
+{
+ const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
+
+ return phy->phy0_phy1_offset(rtwdev, addr);
+}
+
static u16 get_max_amsdu_len(struct rtw89_dev *rtwdev,
const struct rtw89_ra_report *report)
{
@@ -718,6 +725,53 @@ u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_phy_get_txsc);
+u8 rtw89_phy_get_txsb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
+ enum rtw89_bandwidth dbw)
+{
+ enum rtw89_bandwidth cbw = chan->band_width;
+ u8 pri_ch = chan->primary_channel;
+ u8 central_ch = chan->channel;
+ u8 txsb_idx = 0;
+
+ if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20)
+ return txsb_idx;
+
+ switch (cbw) {
+ case RTW89_CHANNEL_WIDTH_40:
+ txsb_idx = pri_ch > central_ch ? 1 : 0;
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ if (dbw == RTW89_CHANNEL_WIDTH_20)
+ txsb_idx = (pri_ch - central_ch + 6) / 4;
+ else
+ txsb_idx = pri_ch > central_ch ? 1 : 0;
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ if (dbw == RTW89_CHANNEL_WIDTH_20)
+ txsb_idx = (pri_ch - central_ch + 14) / 4;
+ else if (dbw == RTW89_CHANNEL_WIDTH_40)
+ txsb_idx = (pri_ch - central_ch + 12) / 8;
+ else
+ txsb_idx = pri_ch > central_ch ? 1 : 0;
+ break;
+ case RTW89_CHANNEL_WIDTH_320:
+ if (dbw == RTW89_CHANNEL_WIDTH_20)
+ txsb_idx = (pri_ch - central_ch + 30) / 4;
+ else if (dbw == RTW89_CHANNEL_WIDTH_40)
+ txsb_idx = (pri_ch - central_ch + 28) / 8;
+ else if (dbw == RTW89_CHANNEL_WIDTH_80)
+ txsb_idx = (pri_ch - central_ch + 24) / 16;
+ else
+ txsb_idx = pri_ch > central_ch ? 1 : 0;
+ break;
+ default:
+ break;
+ }
+
+ return txsb_idx;
+}
+EXPORT_SYMBOL(rtw89_phy_get_txsb);
+
static bool rtw89_phy_check_swsi_busy(struct rtw89_dev *rtwdev)
{
return !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_W_BUSY_V1) ||
@@ -796,6 +850,71 @@ u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
}
EXPORT_SYMBOL(rtw89_phy_read_rf_v1);
+static u32 rtw89_phy_read_full_rf_v2_a(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path rf_path, u32 addr)
+{
+ static const u16 r_addr_ofst[2] = {0x2C24, 0x2D24};
+ static const u16 addr_ofst[2] = {0x2ADC, 0x2BDC};
+ bool busy, done;
+ int ret;
+ u32 val;
+
+ rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_CTL_MASK, 0x1);
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy,
+ 1, 3800, false,
+ rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_BUSY);
+ if (ret) {
+ rtw89_warn(rtwdev, "poll HWSI is busy\n");
+ return INV_RF_DATA;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_MASK, addr);
+ rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_RD, 0x1);
+ udelay(2);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done,
+ 1, 3800, false,
+ rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_RDONE);
+ if (ret) {
+ rtw89_warn(rtwdev, "read HWSI is busy\n");
+ val = INV_RF_DATA;
+ goto out;
+ }
+
+ val = rtw89_phy_read32_mask(rtwdev, r_addr_ofst[rf_path], RFREG_MASK);
+out:
+ rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_POLL_MASK, 0);
+
+ return val;
+}
+
+static u32 rtw89_phy_read_rf_v2_a(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path rf_path, u32 addr, u32 mask)
+{
+ u32 val;
+
+ val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr);
+
+ return (val & mask) >> __ffs(mask);
+}
+
+u32 rtw89_phy_read_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask)
+{
+ bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK);
+
+ if (rf_path >= rtwdev->chip->rf_path_num) {
+ rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return INV_RF_DATA;
+ }
+
+ if (ad_sel)
+ return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask);
+ else
+ return rtw89_phy_read_rf_v2_a(rtwdev, rf_path, addr, mask);
+}
+EXPORT_SYMBOL(rtw89_phy_read_rf_v2);
+
bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data)
{
@@ -875,6 +994,66 @@ bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
}
EXPORT_SYMBOL(rtw89_phy_write_rf_v1);
+static
+bool rtw89_phy_write_full_rf_v2_a(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 data)
+{
+ static const u32 addr_is_idle[2] = {0x2C24, 0x2D24};
+ static const u32 addr_ofst[2] = {0x2AE0, 0x2BE0};
+ bool busy;
+ u32 val;
+ int ret;
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy,
+ 1, 3800, false,
+ rtwdev, addr_is_idle[rf_path], BIT(29));
+ if (ret) {
+ rtw89_warn(rtwdev, "[%s] HWSI is busy\n", __func__);
+ return false;
+ }
+
+ val = u32_encode_bits(addr, B_HWSI_DATA_ADDR) |
+ u32_encode_bits(data, B_HWSI_DATA_VAL);
+
+ rtw89_phy_write32(rtwdev, addr_ofst[rf_path], val);
+
+ return true;
+}
+
+static
+bool rtw89_phy_write_rf_a_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask, u32 data)
+{
+ u32 val;
+
+ if (mask == RFREG_MASK) {
+ val = data;
+ } else {
+ val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr);
+ val &= ~mask;
+ val |= (data << __ffs(mask)) & mask;
+ }
+
+ return rtw89_phy_write_full_rf_v2_a(rtwdev, rf_path, addr, val);
+}
+
+bool rtw89_phy_write_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask, u32 data)
+{
+ bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK);
+
+ if (rf_path >= rtwdev->chip->rf_path_num) {
+ rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return INV_RF_DATA;
+ }
+
+ if (ad_sel)
+ return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data);
+ else
+ return rtw89_phy_write_rf_a_v2(rtwdev, rf_path, addr, mask, data);
+}
+EXPORT_SYMBOL(rtw89_phy_write_rf_v2);
+
static bool rtw89_chip_rf_v1(struct rtw89_dev *rtwdev)
{
return rtwdev->chip->ops->write_rf == rtw89_phy_write_rf_v1;
@@ -893,20 +1072,30 @@ static void rtw89_phy_config_bb_reg(struct rtw89_dev *rtwdev,
enum rtw89_rf_path rf_path,
void *extra_data)
{
- if (reg->addr == 0xfe)
+ u32 addr;
+
+ if (reg->addr == 0xfe) {
mdelay(50);
- else if (reg->addr == 0xfd)
+ } else if (reg->addr == 0xfd) {
mdelay(5);
- else if (reg->addr == 0xfc)
+ } else if (reg->addr == 0xfc) {
mdelay(1);
- else if (reg->addr == 0xfb)
+ } else if (reg->addr == 0xfb) {
udelay(50);
- else if (reg->addr == 0xfa)
+ } else if (reg->addr == 0xfa) {
udelay(5);
- else if (reg->addr == 0xf9)
+ } else if (reg->addr == 0xf9) {
udelay(1);
- else
- rtw89_phy_write32(rtwdev, reg->addr, reg->data);
+ } else if (reg->data == BYPASS_CR_DATA) {
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Bypass CR 0x%x\n", reg->addr);
+ } else {
+ addr = reg->addr;
+
+ if ((uintptr_t)extra_data == RTW89_PHY_1)
+ addr += rtw89_phy0_phy1_offset(rtwdev, reg->addr);
+
+ rtw89_phy_write32(rtwdev, addr, reg->data);
+ }
}
union rtw89_phy_bb_gain_arg {
@@ -929,7 +1118,7 @@ static void
rtw89_phy_cfg_bb_gain_error(struct rtw89_dev *rtwdev,
union rtw89_phy_bb_gain_arg arg, u32 data)
{
- struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 type = arg.type;
u8 path = arg.path;
u8 gband = arg.gain_band;
@@ -968,7 +1157,7 @@ static void
rtw89_phy_cfg_bb_rpl_ofst(struct rtw89_dev *rtwdev,
union rtw89_phy_bb_gain_arg arg, u32 data)
{
- struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 rxsc_start = arg.rxsc_start;
u8 bw = arg.bw;
u8 path = arg.path;
@@ -1050,7 +1239,7 @@ static void
rtw89_phy_cfg_bb_gain_bypass(struct rtw89_dev *rtwdev,
union rtw89_phy_bb_gain_arg arg, u32 data)
{
- struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 type = arg.type;
u8 path = arg.path;
u8 gband = arg.gain_band;
@@ -1077,7 +1266,7 @@ static void
rtw89_phy_cfg_bb_gain_op1db(struct rtw89_dev *rtwdev,
union rtw89_phy_bb_gain_arg arg, u32 data)
{
- struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 type = arg.type;
u8 path = arg.path;
u8 gband = arg.gain_band;
@@ -1108,10 +1297,10 @@ rtw89_phy_cfg_bb_gain_op1db(struct rtw89_dev *rtwdev,
}
}
-static void rtw89_phy_config_bb_gain(struct rtw89_dev *rtwdev,
- const struct rtw89_reg2_def *reg,
- enum rtw89_rf_path rf_path,
- void *extra_data)
+static void rtw89_phy_config_bb_gain_ax(struct rtw89_dev *rtwdev,
+ const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path,
+ void *extra_data)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
union rtw89_phy_bb_gain_arg arg = { .addr = reg->addr };
@@ -1420,12 +1609,15 @@ void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev)
bb_table = elm_info->bb_tbl ? elm_info->bb_tbl : chip->bb_table;
rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg, NULL);
+ if (rtwdev->dbcc_en)
+ rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg,
+ (void *)RTW89_PHY_1);
rtw89_chip_init_txpwr_unit(rtwdev, RTW89_PHY_0);
bb_gain_table = elm_info->bb_gain ? elm_info->bb_gain : chip->bb_gain_table;
if (bb_gain_table)
rtw89_phy_init_reg(rtwdev, bb_gain_table,
- rtw89_phy_config_bb_gain, NULL);
+ chip->phy_def->config_bb_gain, NULL);
rtw89_phy_bb_reset(rtwdev, RTW89_PHY_0);
}
@@ -1467,11 +1659,9 @@ void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio)
kfree(rf_reg_info);
}
-static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
+static void rtw89_phy_preinit_rf_nctl_ax(struct rtw89_dev *rtwdev)
{
- struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
const struct rtw89_chip_info *chip = rtwdev->chip;
- const struct rtw89_phy_table *nctl_table;
u32 val;
int ret;
@@ -1491,6 +1681,15 @@ static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
1000, false, rtwdev);
if (ret)
rtw89_err(rtwdev, "failed to poll nctl block\n");
+}
+
+static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_phy_table *nctl_table;
+
+ rtw89_phy_preinit_rf_nctl(rtwdev);
nctl_table = elm_info->rf_nctl ? elm_info->rf_nctl : chip->nctl_table;
rtw89_phy_init_reg(rtwdev, nctl_table, rtw89_phy_config_bb_reg, NULL);
@@ -1499,14 +1698,11 @@ static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
rtw89_rfk_parser(rtwdev, chip->nctl_post_table);
}
-static u32 rtw89_phy0_phy1_offset(struct rtw89_dev *rtwdev, u32 addr)
+static u32 rtw89_phy0_phy1_offset_ax(struct rtw89_dev *rtwdev, u32 addr)
{
u32 phy_page = addr >> 8;
u32 ofst = 0;
- if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
- return addr < 0x10000 ? 0x20000 : 0;
-
switch (phy_page) {
case 0x6:
case 0x7:
@@ -1561,6 +1757,7 @@ void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_1);
}
+EXPORT_SYMBOL(rtw89_phy_set_phy_regs);
void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
const struct rtw89_phy_reg3_tbl *tbl)
@@ -2699,9 +2896,63 @@ void (* const rtw89_phy_c2h_rfk_log_handler[])(struct rtw89_dev *rtwdev,
[RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK] = rtw89_phy_c2h_rfk_log_txgapk,
};
+static
+void rtw89_phy_rfk_report_prep(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait;
+
+ wait->state = RTW89_RFK_STATE_START;
+ wait->start_time = ktime_get();
+ reinit_completion(&wait->completion);
+}
+
+static
+int rtw89_phy_rfk_report_wait(struct rtw89_dev *rtwdev, const char *rfk_name,
+ unsigned int ms)
+{
+ struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait;
+ unsigned long time_left;
+
+ /* Since we can't receive C2H event during SER, use a fixed delay. */
+ if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) {
+ fsleep(1000 * ms / 2);
+ goto out;
+ }
+
+ time_left = wait_for_completion_timeout(&wait->completion,
+ msecs_to_jiffies(ms));
+ if (time_left == 0) {
+ rtw89_warn(rtwdev, "failed to wait RF %s\n", rfk_name);
+ return -ETIMEDOUT;
+ } else if (wait->state != RTW89_RFK_STATE_OK) {
+ rtw89_warn(rtwdev, "failed to do RF %s result from state %d\n",
+ rfk_name, wait->state);
+ return -EFAULT;
+ }
+
+out:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "RF %s takes %lld ms to complete\n",
+ rfk_name, ktime_ms_delta(ktime_get(), wait->start_time));
+
+ return 0;
+}
+
static void
rtw89_phy_c2h_rfk_report_state(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
{
+ const struct rtw89_c2h_rfk_report *report =
+ (const struct rtw89_c2h_rfk_report *)c2h->data;
+ struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait;
+
+ wait->state = report->state;
+ wait->version = report->version;
+
+ complete(&wait->completion);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "RFK report state %d with version %d (%*ph)\n",
+ wait->state, wait->version,
+ (int)(len - sizeof(report->hdr)), &report->state);
}
static
@@ -2772,6 +3023,726 @@ void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
handler(rtwdev, skb, len);
}
+int rtw89_phy_rfk_pre_ntfy_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_pre_ntfy(rtwdev, phy_idx);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "PRE_NTFY", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_pre_ntfy_and_wait);
+
+int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_tssi_mode tssi_mode,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_tssi(rtwdev, phy_idx, tssi_mode);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "TSSI", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_tssi_and_wait);
+
+int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_iqk(rtwdev, phy_idx);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "IQK", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_iqk_and_wait);
+
+int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_dpk(rtwdev, phy_idx);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "DPK", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_dpk_and_wait);
+
+int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_txgapk(rtwdev, phy_idx);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "TXGAPK", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_txgapk_and_wait);
+
+int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_dack(rtwdev, phy_idx);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "DACK", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_dack_and_wait);
+
+int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_rxdck(rtwdev, phy_idx);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "RX_DCK", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_rxdck_and_wait);
+
+static u32 phy_tssi_get_cck_group(u8 ch)
+{
+ switch (ch) {
+ case 1 ... 2:
+ return 0;
+ case 3 ... 5:
+ return 1;
+ case 6 ... 8:
+ return 2;
+ case 9 ... 11:
+ return 3;
+ case 12 ... 13:
+ return 4;
+ case 14:
+ return 5;
+ }
+
+ return 0;
+}
+
+#define PHY_TSSI_EXTRA_GROUP_BIT BIT(31)
+#define PHY_TSSI_EXTRA_GROUP(idx) (PHY_TSSI_EXTRA_GROUP_BIT | (idx))
+#define PHY_IS_TSSI_EXTRA_GROUP(group) ((group) & PHY_TSSI_EXTRA_GROUP_BIT)
+#define PHY_TSSI_EXTRA_GET_GROUP_IDX1(group) \
+ ((group) & ~PHY_TSSI_EXTRA_GROUP_BIT)
+#define PHY_TSSI_EXTRA_GET_GROUP_IDX2(group) \
+ (PHY_TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
+
+static u32 phy_tssi_get_ofdm_group(u8 ch)
+{
+ switch (ch) {
+ case 1 ... 2:
+ return 0;
+ case 3 ... 5:
+ return 1;
+ case 6 ... 8:
+ return 2;
+ case 9 ... 11:
+ return 3;
+ case 12 ... 14:
+ return 4;
+ case 36 ... 40:
+ return 5;
+ case 41 ... 43:
+ return PHY_TSSI_EXTRA_GROUP(5);
+ case 44 ... 48:
+ return 6;
+ case 49 ... 51:
+ return PHY_TSSI_EXTRA_GROUP(6);
+ case 52 ... 56:
+ return 7;
+ case 57 ... 59:
+ return PHY_TSSI_EXTRA_GROUP(7);
+ case 60 ... 64:
+ return 8;
+ case 100 ... 104:
+ return 9;
+ case 105 ... 107:
+ return PHY_TSSI_EXTRA_GROUP(9);
+ case 108 ... 112:
+ return 10;
+ case 113 ... 115:
+ return PHY_TSSI_EXTRA_GROUP(10);
+ case 116 ... 120:
+ return 11;
+ case 121 ... 123:
+ return PHY_TSSI_EXTRA_GROUP(11);
+ case 124 ... 128:
+ return 12;
+ case 129 ... 131:
+ return PHY_TSSI_EXTRA_GROUP(12);
+ case 132 ... 136:
+ return 13;
+ case 137 ... 139:
+ return PHY_TSSI_EXTRA_GROUP(13);
+ case 140 ... 144:
+ return 14;
+ case 149 ... 153:
+ return 15;
+ case 154 ... 156:
+ return PHY_TSSI_EXTRA_GROUP(15);
+ case 157 ... 161:
+ return 16;
+ case 162 ... 164:
+ return PHY_TSSI_EXTRA_GROUP(16);
+ case 165 ... 169:
+ return 17;
+ case 170 ... 172:
+ return PHY_TSSI_EXTRA_GROUP(17);
+ case 173 ... 177:
+ return 18;
+ }
+
+ return 0;
+}
+
+static u32 phy_tssi_get_6g_ofdm_group(u8 ch)
+{
+ switch (ch) {
+ case 1 ... 5:
+ return 0;
+ case 6 ... 8:
+ return PHY_TSSI_EXTRA_GROUP(0);
+ case 9 ... 13:
+ return 1;
+ case 14 ... 16:
+ return PHY_TSSI_EXTRA_GROUP(1);
+ case 17 ... 21:
+ return 2;
+ case 22 ... 24:
+ return PHY_TSSI_EXTRA_GROUP(2);
+ case 25 ... 29:
+ return 3;
+ case 33 ... 37:
+ return 4;
+ case 38 ... 40:
+ return PHY_TSSI_EXTRA_GROUP(4);
+ case 41 ... 45:
+ return 5;
+ case 46 ... 48:
+ return PHY_TSSI_EXTRA_GROUP(5);
+ case 49 ... 53:
+ return 6;
+ case 54 ... 56:
+ return PHY_TSSI_EXTRA_GROUP(6);
+ case 57 ... 61:
+ return 7;
+ case 65 ... 69:
+ return 8;
+ case 70 ... 72:
+ return PHY_TSSI_EXTRA_GROUP(8);
+ case 73 ... 77:
+ return 9;
+ case 78 ... 80:
+ return PHY_TSSI_EXTRA_GROUP(9);
+ case 81 ... 85:
+ return 10;
+ case 86 ... 88:
+ return PHY_TSSI_EXTRA_GROUP(10);
+ case 89 ... 93:
+ return 11;
+ case 97 ... 101:
+ return 12;
+ case 102 ... 104:
+ return PHY_TSSI_EXTRA_GROUP(12);
+ case 105 ... 109:
+ return 13;
+ case 110 ... 112:
+ return PHY_TSSI_EXTRA_GROUP(13);
+ case 113 ... 117:
+ return 14;
+ case 118 ... 120:
+ return PHY_TSSI_EXTRA_GROUP(14);
+ case 121 ... 125:
+ return 15;
+ case 129 ... 133:
+ return 16;
+ case 134 ... 136:
+ return PHY_TSSI_EXTRA_GROUP(16);
+ case 137 ... 141:
+ return 17;
+ case 142 ... 144:
+ return PHY_TSSI_EXTRA_GROUP(17);
+ case 145 ... 149:
+ return 18;
+ case 150 ... 152:
+ return PHY_TSSI_EXTRA_GROUP(18);
+ case 153 ... 157:
+ return 19;
+ case 161 ... 165:
+ return 20;
+ case 166 ... 168:
+ return PHY_TSSI_EXTRA_GROUP(20);
+ case 169 ... 173:
+ return 21;
+ case 174 ... 176:
+ return PHY_TSSI_EXTRA_GROUP(21);
+ case 177 ... 181:
+ return 22;
+ case 182 ... 184:
+ return PHY_TSSI_EXTRA_GROUP(22);
+ case 185 ... 189:
+ return 23;
+ case 193 ... 197:
+ return 24;
+ case 198 ... 200:
+ return PHY_TSSI_EXTRA_GROUP(24);
+ case 201 ... 205:
+ return 25;
+ case 206 ... 208:
+ return PHY_TSSI_EXTRA_GROUP(25);
+ case 209 ... 213:
+ return 26;
+ case 214 ... 216:
+ return PHY_TSSI_EXTRA_GROUP(26);
+ case 217 ... 221:
+ return 27;
+ case 225 ... 229:
+ return 28;
+ case 230 ... 232:
+ return PHY_TSSI_EXTRA_GROUP(28);
+ case 233 ... 237:
+ return 29;
+ case 238 ... 240:
+ return PHY_TSSI_EXTRA_GROUP(29);
+ case 241 ... 245:
+ return 30;
+ case 246 ... 248:
+ return PHY_TSSI_EXTRA_GROUP(30);
+ case 249 ... 253:
+ return 31;
+ }
+
+ return 0;
+}
+
+static u32 phy_tssi_get_trim_group(u8 ch)
+{
+ switch (ch) {
+ case 1 ... 8:
+ return 0;
+ case 9 ... 14:
+ return 1;
+ case 36 ... 48:
+ return 2;
+ case 49 ... 51:
+ return PHY_TSSI_EXTRA_GROUP(2);
+ case 52 ... 64:
+ return 3;
+ case 100 ... 112:
+ return 4;
+ case 113 ... 115:
+ return PHY_TSSI_EXTRA_GROUP(4);
+ case 116 ... 128:
+ return 5;
+ case 132 ... 144:
+ return 6;
+ case 149 ... 177:
+ return 7;
+ }
+
+ return 0;
+}
+
+static u32 phy_tssi_get_6g_trim_group(u8 ch)
+{
+ switch (ch) {
+ case 1 ... 13:
+ return 0;
+ case 14 ... 16:
+ return PHY_TSSI_EXTRA_GROUP(0);
+ case 17 ... 29:
+ return 1;
+ case 33 ... 45:
+ return 2;
+ case 46 ... 48:
+ return PHY_TSSI_EXTRA_GROUP(2);
+ case 49 ... 61:
+ return 3;
+ case 65 ... 77:
+ return 4;
+ case 78 ... 80:
+ return PHY_TSSI_EXTRA_GROUP(4);
+ case 81 ... 93:
+ return 5;
+ case 97 ... 109:
+ return 6;
+ case 110 ... 112:
+ return PHY_TSSI_EXTRA_GROUP(6);
+ case 113 ... 125:
+ return 7;
+ case 129 ... 141:
+ return 8;
+ case 142 ... 144:
+ return PHY_TSSI_EXTRA_GROUP(8);
+ case 145 ... 157:
+ return 9;
+ case 161 ... 173:
+ return 10;
+ case 174 ... 176:
+ return PHY_TSSI_EXTRA_GROUP(10);
+ case 177 ... 189:
+ return 11;
+ case 193 ... 205:
+ return 12;
+ case 206 ... 208:
+ return PHY_TSSI_EXTRA_GROUP(12);
+ case 209 ... 221:
+ return 13;
+ case 225 ... 237:
+ return 14;
+ case 238 ... 240:
+ return PHY_TSSI_EXTRA_GROUP(14);
+ case 241 ... 253:
+ return 15;
+ }
+
+ return 0;
+}
+
+static s8 phy_tssi_get_ofdm_de(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ enum rtw89_band band = chan->band_type;
+ u8 ch = chan->channel;
+ u32 gidx_1st;
+ u32 gidx_2nd;
+ s8 de_1st;
+ s8 de_2nd;
+ u32 gidx;
+ s8 val;
+
+ if (band == RTW89_BAND_6G)
+ goto calc_6g;
+
+ gidx = phy_tssi_get_ofdm_group(ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
+ path, gidx);
+
+ if (PHY_IS_TSSI_EXTRA_GROUP(gidx)) {
+ gidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(gidx);
+ gidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(gidx);
+ de_1st = tssi_info->tssi_mcs[path][gidx_1st];
+ de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
+ val = (de_1st + de_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
+ path, val, de_1st, de_2nd);
+ } else {
+ val = tssi_info->tssi_mcs[path][gidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
+ }
+
+ return val;
+
+calc_6g:
+ gidx = phy_tssi_get_6g_ofdm_group(ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
+ path, gidx);
+
+ if (PHY_IS_TSSI_EXTRA_GROUP(gidx)) {
+ gidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(gidx);
+ gidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(gidx);
+ de_1st = tssi_info->tssi_6g_mcs[path][gidx_1st];
+ de_2nd = tssi_info->tssi_6g_mcs[path][gidx_2nd];
+ val = (de_1st + de_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
+ path, val, de_1st, de_2nd);
+ } else {
+ val = tssi_info->tssi_6g_mcs[path][gidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
+ }
+
+ return val;
+}
+
+static s8 phy_tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ enum rtw89_band band = chan->band_type;
+ u8 ch = chan->channel;
+ u32 tgidx_1st;
+ u32 tgidx_2nd;
+ s8 tde_1st;
+ s8 tde_2nd;
+ u32 tgidx;
+ s8 val;
+
+ if (band == RTW89_BAND_6G)
+ goto calc_6g;
+
+ tgidx = phy_tssi_get_trim_group(ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
+ path, tgidx);
+
+ if (PHY_IS_TSSI_EXTRA_GROUP(tgidx)) {
+ tgidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
+ tgidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
+ tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
+ tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
+ val = (tde_1st + tde_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
+ path, val, tde_1st, tde_2nd);
+ } else {
+ val = tssi_info->tssi_trim[path][tgidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
+ path, val);
+ }
+
+ return val;
+
+calc_6g:
+ tgidx = phy_tssi_get_6g_trim_group(ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
+ path, tgidx);
+
+ if (PHY_IS_TSSI_EXTRA_GROUP(tgidx)) {
+ tgidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
+ tgidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
+ tde_1st = tssi_info->tssi_trim_6g[path][tgidx_1st];
+ tde_2nd = tssi_info->tssi_trim_6g[path][tgidx_2nd];
+ val = (tde_1st + tde_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
+ path, val, tde_1st, tde_2nd);
+ } else {
+ val = tssi_info->tssi_trim_6g[path][tgidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
+ path, val);
+ }
+
+ return val;
+}
+
+void rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan,
+ struct rtw89_h2c_rf_tssi *h2c)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ u8 ch = chan->channel;
+ s8 trim_de;
+ s8 ofdm_de;
+ s8 cck_de;
+ u8 gidx;
+ s8 val;
+ int i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
+ phy, ch);
+
+ for (i = RF_PATH_A; i <= RF_PATH_B; i++) {
+ trim_de = phy_tssi_get_ofdm_trim_de(rtwdev, phy, chan, i);
+ h2c->curr_tssi_trim_de[i] = trim_de;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d trim_de=0x%x\n", i, trim_de);
+
+ gidx = phy_tssi_get_cck_group(ch);
+ cck_de = tssi_info->tssi_cck[i][gidx];
+ val = u32_get_bits(cck_de + trim_de, 0xff);
+
+ h2c->curr_tssi_cck_de[i] = 0x0;
+ h2c->curr_tssi_cck_de_20m[i] = val;
+ h2c->curr_tssi_cck_de_40m[i] = val;
+ h2c->curr_tssi_efuse_cck_de[i] = cck_de;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d cck_de=0x%x\n", i, cck_de);
+
+ ofdm_de = phy_tssi_get_ofdm_de(rtwdev, phy, chan, i);
+ val = u32_get_bits(ofdm_de + trim_de, 0xff);
+
+ h2c->curr_tssi_ofdm_de[i] = 0x0;
+ h2c->curr_tssi_ofdm_de_20m[i] = val;
+ h2c->curr_tssi_ofdm_de_40m[i] = val;
+ h2c->curr_tssi_ofdm_de_80m[i] = val;
+ h2c->curr_tssi_ofdm_de_160m[i] = val;
+ h2c->curr_tssi_ofdm_de_320m[i] = val;
+ h2c->curr_tssi_efuse_ofdm_de[i] = ofdm_de;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d ofdm_de=0x%x\n", i, ofdm_de);
+ }
+}
+
+void rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan,
+ struct rtw89_h2c_rf_tssi *h2c)
+{
+ struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk;
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const s8 *thm_up[RF_PATH_B + 1] = {};
+ const s8 *thm_down[RF_PATH_B + 1] = {};
+ u8 subband = chan->subband_type;
+ s8 thm_ofst[128] = {0};
+ u8 thermal;
+ u8 path;
+ u8 i, j;
+
+ switch (subband) {
+ default:
+ case RTW89_CH_2G:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0];
+ break;
+ case RTW89_CH_5G_BAND_1:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0];
+ break;
+ case RTW89_CH_5G_BAND_3:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1];
+ break;
+ case RTW89_CH_5G_BAND_4:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2];
+ break;
+ case RTW89_CH_6G_BAND_IDX0:
+ case RTW89_CH_6G_BAND_IDX1:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][0];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][0];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][0];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][0];
+ break;
+ case RTW89_CH_6G_BAND_IDX2:
+ case RTW89_CH_6G_BAND_IDX3:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][1];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][1];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][1];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][1];
+ break;
+ case RTW89_CH_6G_BAND_IDX4:
+ case RTW89_CH_6G_BAND_IDX5:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][2];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][2];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][2];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][2];
+ break;
+ case RTW89_CH_6G_BAND_IDX6:
+ case RTW89_CH_6G_BAND_IDX7:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][3];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][3];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][3];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][3];
+ break;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] tmeter tbl on subband: %u\n", subband);
+
+ for (path = RF_PATH_A; path <= RF_PATH_B; path++) {
+ thermal = tssi_info->thermal[path];
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "path: %u, pg thermal: 0x%x\n", path, thermal);
+
+ if (thermal == 0xff) {
+ h2c->pg_thermal[path] = 0x38;
+ memset(h2c->ftable[path], 0, sizeof(h2c->ftable[path]));
+ continue;
+ }
+
+ h2c->pg_thermal[path] = thermal;
+
+ i = 0;
+ for (j = 0; j < 64; j++)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ thm_up[path][i++] :
+ thm_up[path][DELTA_SWINGIDX_SIZE - 1];
+
+ i = 1;
+ for (j = 127; j >= 64; j--)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ -thm_down[path][i++] :
+ -thm_down[path][DELTA_SWINGIDX_SIZE - 1];
+
+ for (i = 0; i < 128; i += 4) {
+ h2c->ftable[path][i + 0] = thm_ofst[i + 3];
+ h2c->ftable[path][i + 1] = thm_ofst[i + 2];
+ h2c->ftable[path][i + 2] = thm_ofst[i + 1];
+ h2c->ftable[path][i + 3] = thm_ofst[i + 0];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "thm ofst [%x]: %02x %02x %02x %02x\n",
+ i, thm_ofst[i], thm_ofst[i + 1],
+ thm_ofst[i + 2], thm_ofst[i + 3]);
+ }
+ }
+}
+
static u8 rtw89_phy_cfo_get_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo)
{
const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info;
@@ -4551,6 +5522,9 @@ static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev, u8 rxb_idx)
static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev,
const struct rtw89_agc_gaincode_set set)
{
+ if (!rtwdev->hal.support_igi)
+ return;
+
rtw89_phy_dig_set_lna_idx(rtwdev, set.lna_idx);
rtw89_phy_dig_set_tia_idx(rtwdev, set.tia_idx);
rtw89_phy_dig_set_rxb_idx(rtwdev, set.rxb_idx);
@@ -4606,7 +5580,8 @@ static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
s8 cck_cca_th;
u32 pd_val = 0;
- under_region += PD_TH_SB_FLTR_CMP_VAL;
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_AX)
+ under_region += PD_TH_SB_FLTR_CMP_VAL;
switch (cbw) {
case RTW89_CHANNEL_WIDTH_40:
@@ -4953,12 +5928,15 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
rtw89_physts_parsing_init(rtwdev);
rtw89_phy_dig_init(rtwdev);
rtw89_phy_cfo_init(rtwdev);
+ rtw89_phy_bb_wrap_init(rtwdev);
rtw89_phy_edcca_init(rtwdev);
+ rtw89_phy_ch_info_init(rtwdev);
rtw89_phy_ul_tb_info_init(rtwdev);
rtw89_phy_antdiv_init(rtwdev);
rtw89_chip_rfe_gpio(rtwdev);
rtw89_phy_antdiv_set_ant(rtwdev);
+ rtw89_chip_rfk_hw_init(rtwdev);
rtw89_phy_init_rf_nctl(rtwdev);
rtw89_chip_rfk_init(rtwdev);
rtw89_chip_set_txpwr_ctrl(rtwdev);
@@ -5400,6 +6378,78 @@ void rtw89_phy_edcca_track(struct rtw89_dev *rtwdev)
rtw89_phy_edcca_log(rtwdev);
}
+enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK] kpath dbcc_en: 0x%x, mode=0x%x, PHY%d\n",
+ rtwdev->dbcc_en, rtwdev->mlo_dbcc_mode, phy_idx);
+
+ switch (rtwdev->mlo_dbcc_mode) {
+ case MLO_1_PLUS_1_1RF:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_A;
+ else
+ return RF_B;
+ case MLO_1_PLUS_1_2RF:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_A;
+ else
+ return RF_D;
+ case MLO_0_PLUS_2_1RF:
+ case MLO_2_PLUS_0_1RF:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_AB;
+ else
+ return RF_AB;
+ case MLO_0_PLUS_2_2RF:
+ case MLO_2_PLUS_0_2RF:
+ case MLO_2_PLUS_2_2RF:
+ default:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_AB;
+ else
+ return RF_CD;
+ }
+}
+EXPORT_SYMBOL(rtw89_phy_get_kpath);
+
+enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK] kpath dbcc_en: 0x%x, mode=0x%x, PHY%d\n",
+ rtwdev->dbcc_en, rtwdev->mlo_dbcc_mode, phy_idx);
+
+ switch (rtwdev->mlo_dbcc_mode) {
+ case MLO_1_PLUS_1_1RF:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_PATH_A;
+ else
+ return RF_PATH_B;
+ case MLO_1_PLUS_1_2RF:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_PATH_A;
+ else
+ return RF_PATH_D;
+ case MLO_0_PLUS_2_1RF:
+ case MLO_2_PLUS_0_1RF:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_PATH_A;
+ else
+ return RF_PATH_B;
+ case MLO_0_PLUS_2_2RF:
+ case MLO_2_PLUS_0_2RF:
+ case MLO_2_PLUS_2_2RF:
+ default:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_PATH_A;
+ else
+ return RF_PATH_C;
+ }
+}
+EXPORT_SYMBOL(rtw89_phy_get_syn_sel);
+
static const struct rtw89_ccx_regs rtw89_ccx_regs_ax = {
.setting_addr = R_CCX,
.edcca_opt_mask = B_CCX_EDCCA_OPT_MSK,
@@ -5476,6 +6526,11 @@ const struct rtw89_phy_gen_def rtw89_phy_gen_ax = {
.ccx = &rtw89_ccx_regs_ax,
.physts = &rtw89_physts_regs_ax,
.cfo = &rtw89_cfo_regs_ax,
+ .phy0_phy1_offset = rtw89_phy0_phy1_offset_ax,
+ .config_bb_gain = rtw89_phy_config_bb_gain_ax,
+ .preinit_rf_nctl = rtw89_phy_preinit_rf_nctl_ax,
+ .bb_wrap_init = NULL,
+ .ch_info_init = NULL,
.set_txpwr_byrate = rtw89_phy_set_txpwr_byrate_ax,
.set_txpwr_offset = rtw89_phy_set_txpwr_offset_ax,
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index 3e379077c6ca..082231ebbee5 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -7,6 +7,7 @@
#include "core.h"
+#define RTW89_BBMCU_ADDR_OFFSET 0x30000
#define RTW89_RF_ADDR_ADSEL_MASK BIT(16)
#define get_phy_headline(addr) FIELD_GET(GENMASK(31, 28), addr)
@@ -509,6 +510,14 @@ struct rtw89_phy_gen_def {
const struct rtw89_ccx_regs *ccx;
const struct rtw89_physts_regs *physts;
const struct rtw89_cfo_regs *cfo;
+ u32 (*phy0_phy1_offset)(struct rtw89_dev *rtwdev, u32 addr);
+ void (*config_bb_gain)(struct rtw89_dev *rtwdev,
+ const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path,
+ void *extra_data);
+ void (*preinit_rf_nctl)(struct rtw89_dev *rtwdev);
+ void (*bb_wrap_init)(struct rtw89_dev *rtwdev);
+ void (*ch_info_init)(struct rtw89_dev *rtwdev);
void (*set_txpwr_byrate)(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
@@ -604,6 +613,15 @@ static inline u32 rtw89_phy_read32_mask(struct rtw89_dev *rtwdev,
return rtw89_read32_mask(rtwdev, addr + phy->cr_base, mask);
}
+static inline void rtw89_bbmcu_write32(struct rtw89_dev *rtwdev,
+ u32 addr, u32 data, enum rtw89_phy_idx phy_idx)
+{
+ if (phy_idx && addr < 0x10000)
+ addr += 0x20000;
+
+ rtw89_write32(rtwdev, addr + RTW89_BBMCU_ADDR_OFFSET, data);
+}
+
static inline
enum rtw89_gain_offset rtw89_subband_to_gain_offset_band_of_ofdm(enum rtw89_subband subband)
{
@@ -664,6 +682,38 @@ enum rtw89_phy_bb_gain_band rtw89_subband_to_bb_gain_band(enum rtw89_subband sub
}
}
+static inline
+enum rtw89_phy_gain_band_be rtw89_subband_to_gain_band_be(enum rtw89_subband subband)
+{
+ switch (subband) {
+ default:
+ case RTW89_CH_2G:
+ return RTW89_BB_GAIN_BAND_2G_BE;
+ case RTW89_CH_5G_BAND_1:
+ return RTW89_BB_GAIN_BAND_5G_L_BE;
+ case RTW89_CH_5G_BAND_3:
+ return RTW89_BB_GAIN_BAND_5G_M_BE;
+ case RTW89_CH_5G_BAND_4:
+ return RTW89_BB_GAIN_BAND_5G_H_BE;
+ case RTW89_CH_6G_BAND_IDX0:
+ return RTW89_BB_GAIN_BAND_6G_L0_BE;
+ case RTW89_CH_6G_BAND_IDX1:
+ return RTW89_BB_GAIN_BAND_6G_L1_BE;
+ case RTW89_CH_6G_BAND_IDX2:
+ return RTW89_BB_GAIN_BAND_6G_M0_BE;
+ case RTW89_CH_6G_BAND_IDX3:
+ return RTW89_BB_GAIN_BAND_6G_M1_BE;
+ case RTW89_CH_6G_BAND_IDX4:
+ return RTW89_BB_GAIN_BAND_6G_H0_BE;
+ case RTW89_CH_6G_BAND_IDX5:
+ return RTW89_BB_GAIN_BAND_6G_H1_BE;
+ case RTW89_CH_6G_BAND_IDX6:
+ return RTW89_BB_GAIN_BAND_6G_UH0_BE;
+ case RTW89_CH_6G_BAND_IDX7:
+ return RTW89_BB_GAIN_BAND_6G_UH1_BE;
+ }
+}
+
enum rtw89_rfk_flag {
RTW89_RFK_F_WRF = 0,
RTW89_RFK_F_WM = 1,
@@ -728,14 +778,20 @@ void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_bandwidth dbw);
+u8 rtw89_phy_get_txsb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
+ enum rtw89_bandwidth dbw);
u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask);
u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask);
+u32 rtw89_phy_read_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask);
bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data);
bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data);
+bool rtw89_phy_write_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask, u32 data);
void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev);
void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio);
void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
@@ -759,6 +815,29 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
u8 ru, u8 ntx, u8 ch);
+static inline void rtw89_phy_preinit_rf_nctl(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
+
+ phy->preinit_rf_nctl(rtwdev);
+}
+
+static inline void rtw89_phy_bb_wrap_init(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
+
+ if (phy->bb_wrap_init)
+ phy->bb_wrap_init(rtwdev);
+}
+
+static inline void rtw89_phy_ch_info_init(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
+
+ if (phy->ch_info_init)
+ phy->ch_info_init(rtwdev);
+}
+
static inline
void rtw89_phy_set_txpwr_byrate(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
@@ -809,6 +888,36 @@ void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func);
void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u32 len, u8 class, u8 func);
+int rtw89_phy_rfk_pre_ntfy_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms);
+int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_tssi_mode tssi_mode,
+ unsigned int ms);
+int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms);
+int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms);
+int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms);
+int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms);
+int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms);
+void rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan,
+ struct rtw89_h2c_rf_tssi *h2c);
+void rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan,
+ struct rtw89_h2c_rf_tssi *h2c);
void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev);
void rtw89_phy_cfo_track_work(struct work_struct *work);
void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val,
@@ -836,5 +945,9 @@ void rtw89_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx,
void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev, bool scan);
void rtw89_phy_edcca_track(struct rtw89_dev *rtwdev);
void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev);
+enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
+enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/phy_be.c b/drivers/net/wireless/realtek/rtw89/phy_be.c
index 63eeeea72b68..be0148f2b96f 100644
--- a/drivers/net/wireless/realtek/rtw89/phy_be.c
+++ b/drivers/net/wireless/realtek/rtw89/phy_be.c
@@ -78,6 +78,332 @@ static const struct rtw89_cfo_regs rtw89_cfo_regs_be = {
.valid_0_mask = B_DCFO_OPT_EN_V1,
};
+static u32 rtw89_phy0_phy1_offset_be(struct rtw89_dev *rtwdev, u32 addr)
+{
+ u32 phy_page = addr >> 8;
+ u32 ofst = 0;
+
+ if ((phy_page >= 0x4 && phy_page <= 0xF) ||
+ (phy_page >= 0x20 && phy_page <= 0x2B) ||
+ (phy_page >= 0x40 && phy_page <= 0x4f) ||
+ (phy_page >= 0x60 && phy_page <= 0x6f) ||
+ (phy_page >= 0xE4 && phy_page <= 0xE5) ||
+ (phy_page >= 0xE8 && phy_page <= 0xED))
+ ofst = 0x1000;
+ else
+ ofst = 0x0;
+
+ return ofst;
+}
+
+union rtw89_phy_bb_gain_arg_be {
+ u32 addr;
+ struct {
+ u8 type;
+#define BB_GAIN_TYPE_SUB0_BE GENMASK(3, 0)
+#define BB_GAIN_TYPE_SUB1_BE GENMASK(7, 4)
+ u8 path_bw;
+#define BB_GAIN_PATH_BE GENMASK(3, 0)
+#define BB_GAIN_BW_BE GENMASK(7, 4)
+ u8 gain_band;
+ u8 cfg_type;
+ } __packed;
+} __packed;
+
+static void
+rtw89_phy_cfg_bb_gain_error_be(struct rtw89_dev *rtwdev,
+ union rtw89_phy_bb_gain_arg_be arg, u32 data)
+{
+ struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
+ u8 bw_type = u8_get_bits(arg.path_bw, BB_GAIN_BW_BE);
+ u8 path = u8_get_bits(arg.path_bw, BB_GAIN_PATH_BE);
+ u8 gband = arg.gain_band;
+ u8 type = arg.type;
+ int i;
+
+ switch (type) {
+ case 0:
+ for (i = 0; i < 4; i++, data >>= 8)
+ gain->lna_gain[gband][bw_type][path][i] = data & 0xff;
+ break;
+ case 1:
+ for (i = 4; i < 7; i++, data >>= 8)
+ gain->lna_gain[gband][bw_type][path][i] = data & 0xff;
+ break;
+ case 2:
+ for (i = 0; i < 2; i++, data >>= 8)
+ gain->tia_gain[gband][bw_type][path][i] = data & 0xff;
+ break;
+ default:
+ rtw89_warn(rtwdev,
+ "bb gain error {0x%x:0x%x} with unknown type: %d\n",
+ arg.addr, data, type);
+ break;
+ }
+}
+
+static void
+rtw89_phy_cfg_bb_rpl_ofst_be(struct rtw89_dev *rtwdev,
+ union rtw89_phy_bb_gain_arg_be arg, u32 data)
+{
+ struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
+ u8 type_sub0 = u8_get_bits(arg.type, BB_GAIN_TYPE_SUB0_BE);
+ u8 type_sub1 = u8_get_bits(arg.type, BB_GAIN_TYPE_SUB1_BE);
+ u8 path = u8_get_bits(arg.path_bw, BB_GAIN_PATH_BE);
+ u8 gband = arg.gain_band;
+ u8 ofst = 0;
+ int i;
+
+ switch (type_sub1) {
+ case RTW89_CMAC_BW_20M:
+ gain->rpl_ofst_20[gband][path][0] = (s8)data;
+ break;
+ case RTW89_CMAC_BW_40M:
+ for (i = 0; i < RTW89_BW20_SC_40M; i++, data >>= 8)
+ gain->rpl_ofst_40[gband][path][i] = data & 0xff;
+ break;
+ case RTW89_CMAC_BW_80M:
+ for (i = 0; i < RTW89_BW20_SC_80M; i++, data >>= 8)
+ gain->rpl_ofst_80[gband][path][i] = data & 0xff;
+ break;
+ case RTW89_CMAC_BW_160M:
+ if (type_sub0 == 0)
+ ofst = 0;
+ else
+ ofst = RTW89_BW20_SC_80M;
+
+ for (i = 0; i < RTW89_BW20_SC_80M; i++, data >>= 8)
+ gain->rpl_ofst_160[gband][path][i + ofst] = data & 0xff;
+ break;
+ default:
+ rtw89_warn(rtwdev,
+ "bb rpl ofst {0x%x:0x%x} with unknown type_sub1: %d\n",
+ arg.addr, data, type_sub1);
+ break;
+ }
+}
+
+static void
+rtw89_phy_cfg_bb_gain_op1db_be(struct rtw89_dev *rtwdev,
+ union rtw89_phy_bb_gain_arg_be arg, u32 data)
+{
+ struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
+ u8 bw_type = u8_get_bits(arg.path_bw, BB_GAIN_BW_BE);
+ u8 path = u8_get_bits(arg.path_bw, BB_GAIN_PATH_BE);
+ u8 gband = arg.gain_band;
+ u8 type = arg.type;
+ int i;
+
+ switch (type) {
+ case 0:
+ for (i = 0; i < 4; i++, data >>= 8)
+ gain->lna_op1db[gband][bw_type][path][i] = data & 0xff;
+ break;
+ case 1:
+ for (i = 4; i < 7; i++, data >>= 8)
+ gain->lna_op1db[gband][bw_type][path][i] = data & 0xff;
+ break;
+ case 2:
+ for (i = 0; i < 4; i++, data >>= 8)
+ gain->tia_lna_op1db[gband][bw_type][path][i] = data & 0xff;
+ break;
+ case 3:
+ for (i = 4; i < 8; i++, data >>= 8)
+ gain->tia_lna_op1db[gband][bw_type][path][i] = data & 0xff;
+ break;
+ default:
+ rtw89_warn(rtwdev,
+ "bb gain op1db {0x%x:0x%x} with unknown type: %d\n",
+ arg.addr, data, type);
+ break;
+ }
+}
+
+static void rtw89_phy_config_bb_gain_be(struct rtw89_dev *rtwdev,
+ const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path,
+ void *extra_data)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ union rtw89_phy_bb_gain_arg_be arg = { .addr = reg->addr };
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+ u8 bw_type = u8_get_bits(arg.path_bw, BB_GAIN_BW_BE);
+ u8 path = u8_get_bits(arg.path_bw, BB_GAIN_PATH_BE);
+
+ if (bw_type >= RTW89_BB_BW_NR_BE)
+ return;
+
+ if (arg.gain_band >= RTW89_BB_GAIN_BAND_NR_BE)
+ return;
+
+ if (path >= chip->rf_path_num)
+ return;
+
+ if (arg.addr >= 0xf9 && arg.addr <= 0xfe) {
+ rtw89_warn(rtwdev, "bb gain table with flow ctrl\n");
+ return;
+ }
+
+ switch (arg.cfg_type) {
+ case 0:
+ rtw89_phy_cfg_bb_gain_error_be(rtwdev, arg, reg->data);
+ break;
+ case 1:
+ rtw89_phy_cfg_bb_rpl_ofst_be(rtwdev, arg, reg->data);
+ break;
+ case 2:
+ /* ignore BB gain bypass */
+ break;
+ case 3:
+ rtw89_phy_cfg_bb_gain_op1db_be(rtwdev, arg, reg->data);
+ break;
+ case 4:
+ /* This cfg_type is only used by rfe_type >= 50 with eFEM */
+ if (efuse->rfe_type < 50)
+ break;
+ fallthrough;
+ default:
+ rtw89_warn(rtwdev,
+ "bb gain {0x%x:0x%x} with unknown cfg type: %d\n",
+ arg.addr, reg->data, arg.cfg_type);
+ break;
+ }
+}
+
+static void rtw89_phy_preinit_rf_nctl_be(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_write32_mask(rtwdev, R_GOTX_IQKDPK_C0, B_GOTX_IQKDPK, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_GOTX_IQKDPK_C1, B_GOTX_IQKDPK, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_IQKDPK_HC, B_IQKDPK_HC, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CLK_GCK, B_CLK_GCK, 0x00fffff);
+ rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_CLKEN, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST, B_IQK_DPK_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_PRST, B_IQK_DPK_PRST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_PRST_C1, B_IQK_DPK_PRST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_TXRFC, B_TXRFC_RST, 0x1);
+
+ if (rtwdev->dbcc_en) {
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST_C1, B_IQK_DPK_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_TXRFC_C1, B_TXRFC_RST, 0x1);
+ }
+}
+
+static
+void rtw89_phy_bb_wrap_pwr_by_macid_init(struct rtw89_dev *rtwdev)
+{
+ u32 macid_idx, cr, base_macid_lmt, max_macid = 32;
+
+ base_macid_lmt = R_BE_PWR_MACID_LMT_BASE;
+
+ for (macid_idx = 0; macid_idx < 4 * max_macid; macid_idx += 4) {
+ cr = base_macid_lmt + macid_idx;
+ rtw89_write32(rtwdev, cr, 0x03007F7F);
+ }
+}
+
+static
+void rtw89_phy_bb_wrap_tx_path_by_macid_init(struct rtw89_dev *rtwdev)
+{
+ int i, max_macid = 32;
+ u32 cr = R_BE_PWR_MACID_PATH_BASE;
+
+ for (i = 0; i < max_macid; i++, cr += 4)
+ rtw89_write32(rtwdev, cr, 0x03C86000);
+}
+
+static void rtw89_phy_bb_wrap_tpu_set_all(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx mac_idx)
+{
+ u32 addr;
+
+ for (addr = R_BE_PWR_BY_RATE; addr <= R_BE_PWR_BY_RATE_END; addr += 4)
+ rtw89_write32(rtwdev, addr, 0);
+ for (addr = R_BE_PWR_RULMT_START; addr <= R_BE_PWR_RULMT_END; addr += 4)
+ rtw89_write32(rtwdev, addr, 0);
+ for (addr = R_BE_PWR_RATE_OFST_CTRL; addr <= R_BE_PWR_RATE_OFST_END; addr += 4)
+ rtw89_write32(rtwdev, addr, 0);
+
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_REF_CTRL, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_LMT_DB, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_OFST_LMTBF, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_LMTBF_DB, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_RATE_CTRL, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_BYRATE_DB, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_OFST_RULMT, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_RULMT_DB, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_OFST_SW, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_SW_DB, 0);
+}
+
+static
+void rtw89_phy_bb_wrap_listen_path_en_init(struct rtw89_dev *rtwdev)
+{
+ u32 addr;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_1, RTW89_CMAC_SEL);
+ if (ret)
+ return;
+
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_LISTEN_PATH, RTW89_MAC_1);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_LISTEN_PATH_EN, 0x2);
+}
+
+static void rtw89_phy_bb_wrap_force_cr_init(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx mac_idx)
+{
+ u32 addr;
+
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FORCE_LMT, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_LMT_ON, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_BOOST, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_RATE_ON, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_OFST_RULMT, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_RU_ENON, 0);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_RU_ON, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FORCE_MACID, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_MACID_ON, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_COEX_CTRL, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_COEX_ON, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_RATE_CTRL, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_FORCE_PWR_BY_RATE_EN, 0);
+}
+
+static void rtw89_phy_bb_wrap_ftm_init(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx mac_idx)
+{
+ u32 addr;
+
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FTM, mac_idx);
+ rtw89_write32(rtwdev, addr, 0xE4E431);
+
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FTM_SS, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, 0x7, 0);
+}
+
+static void rtw89_phy_bb_wrap_init_be(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_mac_idx mac_idx = RTW89_MAC_0;
+
+ rtw89_phy_bb_wrap_pwr_by_macid_init(rtwdev);
+ rtw89_phy_bb_wrap_tx_path_by_macid_init(rtwdev);
+ rtw89_phy_bb_wrap_listen_path_en_init(rtwdev);
+ rtw89_phy_bb_wrap_force_cr_init(rtwdev, mac_idx);
+ rtw89_phy_bb_wrap_ftm_init(rtwdev, mac_idx);
+ rtw89_phy_bb_wrap_tpu_set_all(rtwdev, mac_idx);
+}
+
+static void rtw89_phy_ch_info_init_be(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_write32_mask(rtwdev, R_CHINFO_SEG, B_CHINFO_SEG_LEN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CHINFO_SEG, B_CHINFO_SEG, 0xf);
+ rtw89_phy_write32_mask(rtwdev, R_CHINFO_DATA, B_CHINFO_DATA_BITMAP, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_ELM_SRC, B_CHINFO_ELM_BITMAP, 0x40303);
+ rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_ELM_SRC, B_CHINFO_SRC, 0x0);
+ rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_TYPE_SCAL, B_CHINFO_TYPE, 0x3);
+ rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_TYPE_SCAL, B_CHINFO_SCAL, 0x0);
+}
+
struct rtw89_byr_spec_ent_be {
struct rtw89_rate_desc init;
u8 num_of_idx;
@@ -644,6 +970,11 @@ const struct rtw89_phy_gen_def rtw89_phy_gen_be = {
.ccx = &rtw89_ccx_regs_be,
.physts = &rtw89_physts_regs_be,
.cfo = &rtw89_cfo_regs_be,
+ .phy0_phy1_offset = rtw89_phy0_phy1_offset_be,
+ .config_bb_gain = rtw89_phy_config_bb_gain_be,
+ .preinit_rf_nctl = rtw89_phy_preinit_rf_nctl_be,
+ .bb_wrap_init = rtw89_phy_bb_wrap_init_be,
+ .ch_info_init = rtw89_phy_ch_info_init_be,
.set_txpwr_byrate = rtw89_phy_set_txpwr_byrate_be,
.set_txpwr_offset = rtw89_phy_set_txpwr_offset_be,
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index 917c01e5e9ed..31290d8cb7f7 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -14,6 +14,7 @@
static int rtw89_fw_leave_lps_check(struct rtw89_dev *rtwdev, u8 macid)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
u32 pwr_en_bit = 0xE;
u32 chk_msk = pwr_en_bit << (4 * macid);
u32 polling;
@@ -21,7 +22,7 @@ static int rtw89_fw_leave_lps_check(struct rtw89_dev *rtwdev, u8 macid)
ret = read_poll_timeout_atomic(rtw89_read32_mask, polling, !polling,
1000, 50000, false, rtwdev,
- R_AX_PPWRBIT_SETTING, chk_msk);
+ mac->ps_status, chk_msk);
if (ret) {
rtw89_info(rtwdev, "rtw89: failed to leave lps state\n");
return -EBUSY;
@@ -83,16 +84,17 @@ void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
rtw89_ps_power_mode_change(rtwdev, false);
}
-static void __rtw89_enter_lps(struct rtw89_dev *rtwdev, u8 mac_id)
+static void __rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
struct rtw89_lps_parm lps_param = {
- .macid = mac_id,
+ .macid = rtwvif->mac_id,
.psmode = RTW89_MAC_AX_PS_MODE_LEGACY,
.lastrpwm = RTW89_LAST_RPWM_PS,
};
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_FW_CTRL);
rtw89_fw_h2c_lps_parm(rtwdev, &lps_param);
+ rtw89_fw_h2c_lps_ch_info(rtwdev, rtwvif);
}
static void __rtw89_leave_lps(struct rtw89_dev *rtwdev, u8 mac_id)
@@ -123,7 +125,7 @@ void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
if (test_and_set_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags))
return;
- __rtw89_enter_lps(rtwdev, rtwvif->mac_id);
+ __rtw89_enter_lps(rtwdev, rtwvif);
if (ps_mode)
__rtw89_enter_ps_mode(rtwdev, rtwvif);
}
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 8456e2b0c14f..72e448e91b6f 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -3246,6 +3246,13 @@
#define R_AX_RX_SR_CTRL_C1 0xEE4A
#define B_AX_SR_EN BIT(0)
+#define R_AX_BSSID_SRC_CTRL 0xCE4B
+#define R_AX_BSSID_SRC_CTRL_C1 0xEE4B
+#define B_AX_BSSID_MATCH BIT(3)
+#define B_AX_PARTIAL_AID_MATCH BIT(2)
+#define B_AX_BSSCOLOR_MATCH BIT(1)
+#define B_AX_PLCP_SRC_EN BIT(0)
+
#define R_AX_CSIRPT_OPTION 0xCE64
#define R_AX_CSIRPT_OPTION_C1 0xEE64
#define B_AX_CSIPRT_HESU_AID_EN BIT(25)
@@ -3503,8 +3510,13 @@
#define B_AX_PTA_EDCCA_EN BIT(0)
#define R_BTC_COEX_WL_REQ 0xDA24
+#define R_BTC_COEX_WL_REQ_BE 0xE324
+#define B_BTC_TX_NULL_HI BIT(23)
#define B_BTC_TX_BCN_HI BIT(22)
+#define B_BTC_TX_TRI_HI BIT(17)
#define B_BTC_RSP_ACK_HI BIT(10)
+#define B_BTC_PRI_MASK_TX_TIME GENMASK(4, 3)
+#define B_BTC_PRI_MASK_RX_TIME_V1 GENMASK(2, 1)
#define R_BTC_BREAK_TABLE 0xDA2C
#define BTC_BREAK_PARAM 0xf0ffffff
@@ -3752,6 +3764,19 @@
#define B_BE_SYM_PADPDN_WL_RFC1_1P3 BIT(6)
#define B_BE_SYM_PADPDN_WL_RFC0_1P3 BIT(5)
+#define R_BE_RSV_CTRL 0x001C
+#define B_BE_HR_BE_DBG GENMASK(23, 12)
+#define B_BE_R_SYM_DIS_PCIE_FLR BIT(9)
+#define B_BE_R_EN_HRST_PWRON BIT(8)
+#define B_BE_LOCK_ALL_EN BIT(7)
+#define B_BE_R_DIS_PRST BIT(6)
+#define B_BE_WLOCK_1C_BIT6 BIT(5)
+#define B_BE_WLOCK_40 BIT(4)
+#define B_BE_WLOCK_08 BIT(3)
+#define B_BE_WLOCK_04 BIT(2)
+#define B_BE_WLOCK_00 BIT(1)
+#define B_BE_WLOCK_ALL BIT(0)
+
#define R_BE_AFE_LDO_CTRL 0x0020
#define B_BE_FORCE_MACBBBT_PWR_ON BIT(31)
#define B_BE_R_SYM_WLPOFF_P4_PC_EN BIT(28)
@@ -4033,6 +4058,30 @@
#define B_BE_SYSON_DIS_PMCR_BE_WRMSK BIT(2)
#define B_BE_SYSON_R_BE_ARB_MASK GENMASK(1, 0)
+#define R_BE_MEM_PWR_CTRL 0x00D0
+#define B_BE_DMEM5_WLMCU_DS BIT(31)
+#define B_BE_DMEM4_WLMCU_DS BIT(30)
+#define B_BE_DMEM3_WLMCU_DS BIT(29)
+#define B_BE_DMEM2_WLMCU_DS BIT(28)
+#define B_BE_DMEM1_WLMCU_DS BIT(27)
+#define B_BE_DMEM0_WLMCU_DS BIT(26)
+#define B_BE_IMEM5_WLMCU_DS BIT(25)
+#define B_BE_IMEM4_WLMCU_DS BIT(24)
+#define B_BE_IMEM3_WLMCU_DS BIT(23)
+#define B_BE_IMEM2_WLMCU_DS BIT(22)
+#define B_BE_IMEM1_WLMCU_DS BIT(21)
+#define B_BE_IMEM0_WLMCU_DS BIT(20)
+#define B_BE_MEM_BBMCU1_DS BIT(19)
+#define B_BE_MEM_BBMCU0_DS_V1 BIT(17)
+#define B_BE_MEM_BT_DS BIT(10)
+#define B_BE_MEM_SDIO_LS BIT(9)
+#define B_BE_MEM_SDIO_DS BIT(8)
+#define B_BE_MEM_USB_LS BIT(7)
+#define B_BE_MEM_USB_DS BIT(6)
+#define B_BE_MEM_PCI_LS BIT(5)
+#define B_BE_MEM_PCI_DS BIT(4)
+#define B_BE_MEM_WLMAC_LS BIT(3)
+
#define R_BE_PCIE_MIO_INTF 0x00E4
#define B_BE_AON_MIO_EPHY_1K_SEL_MASK GENMASK(29, 24)
#define B_BE_PCIE_MIO_ADDR_PAGE_V1_MASK GENMASK(20, 16)
@@ -4401,12 +4450,28 @@
#define R_BE_LTR_LATENCY_IDX2_V1 0x361C
#define R_BE_LTR_LATENCY_IDX3_V1 0x3620
+#define R_BE_H2CREG_DATA0 0x7140
+#define R_BE_H2CREG_DATA1 0x7144
+#define R_BE_H2CREG_DATA2 0x7148
+#define R_BE_H2CREG_DATA3 0x714C
+#define R_BE_C2HREG_DATA0 0x7150
+#define R_BE_C2HREG_DATA1 0x7154
+#define R_BE_C2HREG_DATA2 0x7158
+#define R_BE_C2HREG_DATA3 0x715C
+#define R_BE_H2CREG_CTRL 0x7160
+#define B_BE_H2CREG_TRIGGER BIT(0)
+#define R_BE_C2HREG_CTRL 0x7164
+#define B_BE_C2HREG_TRIGGER BIT(0)
+
#define R_BE_HCI_FUNC_EN 0x7880
#define B_BE_HCI_CR_PROTECT BIT(31)
#define B_BE_HCI_TRXBUF_EN BIT(2)
#define B_BE_HCI_RXDMA_EN BIT(1)
#define B_BE_HCI_TXDMA_EN BIT(0)
+#define R_BE_DBG_WOW_READY 0x815E
+#define B_BE_DBG_WOW_READY GENMASK(7, 0)
+
#define R_BE_DMAC_FUNC_EN 0x8400
#define B_BE_DMAC_CRPRT BIT(31)
#define B_BE_MAC_FUNC_EN BIT(30)
@@ -4488,6 +4553,42 @@
#define B_BE_RMAC_PPDU_HANG_CNT_MASK GENMASK(23, 16)
#define B_BE_SER_L0_COUNTER_MASK GENMASK(8, 0)
+#define R_BE_DMAC_SYS_CR32B 0x842C
+#define B_BE_DMAC_BB_PHY1_MASK GENMASK(31, 16)
+#define B_BE_DMAC_BB_PHY0_MASK GENMASK(15, 0)
+#define B_BE_DMAC_BB_CTRL_39 BIT(31)
+#define B_BE_DMAC_BB_CTRL_38 BIT(30)
+#define B_BE_DMAC_BB_CTRL_37 BIT(29)
+#define B_BE_DMAC_BB_CTRL_36 BIT(28)
+#define B_BE_DMAC_BB_CTRL_35 BIT(27)
+#define B_BE_DMAC_BB_CTRL_34 BIT(26)
+#define B_BE_DMAC_BB_CTRL_33 BIT(25)
+#define B_BE_DMAC_BB_CTRL_32 BIT(24)
+#define B_BE_DMAC_BB_CTRL_31 BIT(23)
+#define B_BE_DMAC_BB_CTRL_30 BIT(22)
+#define B_BE_DMAC_BB_CTRL_29 BIT(21)
+#define B_BE_DMAC_BB_CTRL_28 BIT(20)
+#define B_BE_DMAC_BB_CTRL_27 BIT(19)
+#define B_BE_DMAC_BB_CTRL_26 BIT(18)
+#define B_BE_DMAC_BB_CTRL_25 BIT(17)
+#define B_BE_DMAC_BB_CTRL_24 BIT(16)
+#define B_BE_DMAC_BB_CTRL_23 BIT(15)
+#define B_BE_DMAC_BB_CTRL_22 BIT(14)
+#define B_BE_DMAC_BB_CTRL_21 BIT(13)
+#define B_BE_DMAC_BB_CTRL_20 BIT(12)
+#define B_BE_DMAC_BB_CTRL_19 BIT(11)
+#define B_BE_DMAC_BB_CTRL_18 BIT(10)
+#define B_BE_DMAC_BB_CTRL_17 BIT(9)
+#define B_BE_DMAC_BB_CTRL_16 BIT(8)
+#define B_BE_DMAC_BB_CTRL_15 BIT(7)
+#define B_BE_DMAC_BB_CTRL_14 BIT(6)
+#define B_BE_DMAC_BB_CTRL_13 BIT(5)
+#define B_BE_DMAC_BB_CTRL_12 BIT(4)
+#define B_BE_DMAC_BB_CTRL_11 BIT(3)
+#define B_BE_DMAC_BB_CTRL_10 BIT(2)
+#define B_BE_DMAC_BB_CTRL_9 BIT(1)
+#define B_BE_DMAC_BB_CTRL_8 BIT(0)
+
#define R_BE_DLE_EMPTY0 0x8430
#define B_BE_PLE_EMPTY_QTA_DMAC_H2D BIT(27)
#define B_BE_PLE_EMPTY_QTA_DMAC_CPUIO BIT(26)
@@ -4924,6 +5025,12 @@
B_BE_CR_WRFF_OVERFLOW_ERR_INT_EN | \
B_BE_CR_WRFF_UNDERFLOW_ERR_INT_EN)
+#define R_BE_RX_STOP 0x8914
+#define B_BE_CPU_RX_STOP BIT(17)
+#define B_BE_HOST_RX_STOP BIT(16)
+#define B_BE_CPU_RX_CH_STOP_MSK GENMASK(15, 8)
+#define B_BE_HOST_RX_CH_STOP_MSK GENMASK(5, 0)
+
#define R_BE_DISP_FWD_WLAN_0 0x8938
#define B_BE_FWD_WLAN_CPU_TYPE_13_MASK GENMASK(31, 30)
#define B_BE_FWD_WLAN_CPU_TYPE_12_MASK GENMASK(29, 28)
@@ -4947,6 +5054,11 @@
#define B_BE_WDE_START_BOUND_MASK GENMASK(14, 8)
#define B_BE_WDE_PAGE_SEL_MASK GENMASK(1, 0)
+#define R_BE_WDE_BUFMGN_CTL 0x8C10
+#define B_BE_WDE_AVAL_UPD_REQ BIT(29)
+#define B_BE_WDE_AVAL_UPD_QTAID_MASK GENMASK(27, 24)
+#define B_BE_WDE_BUFMGN_FRZTMR_MODE BIT(0)
+
#define R_BE_WDE_ERR_IMR 0x8C38
#define B_BE_WDE_DATCHN_CAMREQ_ERR_INT_EN BIT(29)
#define B_BE_WDE_DATCHN_ADRERR_ERR_INT_EN BIT(28)
@@ -5063,6 +5175,11 @@
#define B_BE_PLE_START_BOUND_MASK GENMASK(14, 8)
#define B_BE_PLE_PAGE_SEL_MASK GENMASK(1, 0)
+#define R_BE_PLE_BUFMGN_CTL 0x9010
+#define B_BE_PLE_AVAL_UPD_REQ BIT(29)
+#define B_BE_PLE_AVAL_UPD_QTAID_MASK GENMASK(27, 24)
+#define B_BE_PLE_BUFMGN_FRZTMR_MODE BIT(0)
+
#define R_BE_PLE_ERR_IMR 0x9038
#define B_BE_PLE_DATCHN_CAMREQ_ERR_INT_EN BIT(29)
#define B_BE_PLE_DATCHN_ADRERR_ERR_INT_EN BIT(28)
@@ -5429,6 +5546,21 @@
#define B_BE_DROP_NONDMA_PPDU BIT(2)
#define B_BE_APPEND_FCS BIT(0)
+#define R_BE_FWD_ERR 0x9C10
+#define R_BE_FWD_ACTN0 0x9C14
+#define R_BE_FWD_ACTN1 0x9C18
+#define R_BE_FWD_ACTN2 0x9C1C
+#define R_BE_FWD_TF0 0x9C20
+#define R_BE_FWD_TF1 0x9C24
+
+#define R_BE_HW_PPDU_STATUS 0x9C30
+#define B_BE_FWD_RPKTTYPE_MASK GENMASK(31, 26)
+#define B_BE_FWD_PPDU_PRTID_MASK GENMASK(25, 23)
+#define B_BE_FWD_PPDU_FW_RLS BIT(22)
+#define B_BE_FWD_PPDU_QUEID_MASK GENMASK(21, 16)
+#define B_BE_FWD_OTHER_RPKT_MASK GENMASK(15, 8)
+#define B_BE_FWD_PPDU_STAT_MASK GENMASK(7, 0)
+
#define R_BE_CUT_AMSDU_CTRL 0x9C94
#define B_BE_EN_CUT_AMSDU BIT(31)
#define B_BE_CUT_AMSDU_CHKLEN_EN BIT(30)
@@ -5437,6 +5569,12 @@
#define B_BE_CUT_AMSDU_CHKLEN_L_TH_MASK GENMASK(23, 16)
#define B_BE_CUT_AMSDU_CHKLEN_H_TH_MASK GENMASK(15, 0)
+#define R_BE_WOW_CTRL 0x9CB8
+#define B_BE_WOW_HCI BIT(5)
+#define B_BE_WOW_DROP BIT(2)
+#define B_BE_WOW_WOWEN BIT(1)
+#define B_BE_WOW_FORCE_WAKEUP BIT(0)
+
#define R_BE_RX_HDRTRNS 0x9CC0
#define B_BE_RX_MGN_MLD_ADDR_EN BIT(6)
#define B_BE_HDR_INFO_MASK GENMASK(5, 4)
@@ -5727,6 +5865,9 @@
#define B_BE_STOP_CH1 BIT(1)
#define B_BE_STOP_CH0 BIT(0)
+#define R_BE_HAXI_MST_WDT_TIMEOUT_SEL_V1 0xB02C
+#define B_BE_HAXI_MST_WDT_TIMEOUT_SEL_MASK GENMASK(4, 0)
+
#define R_BE_HAXI_IDCT_MSK 0xB0B8
#define B_BE_HAXI_RRESP_ERR_IDCT_MSK BIT(7)
#define B_BE_HAXI_BRESP_ERR_IDCT_MSK BIT(6)
@@ -5777,6 +5918,15 @@
#define B_BE_PREC_PAGE_CH12_V1_MASK GENMASK(21, 16)
#define B_BE_PREC_PAGE_CH011_V1_MASK GENMASK(5, 0)
+#define R_BE_CH0_PAGE_CTRL 0xB718
+#define B_BE_CH0_GRP BIT(31)
+#define B_BE_CH0_MAX_PG_MASK GENMASK(28, 16)
+#define B_BE_CH0_MIN_PG_MASK GENMASK(12, 0)
+
+#define R_BE_CH0_PAGE_INFO 0xB750
+#define B_BE_CH0_AVAL_PG_MASK GENMASK(28, 16)
+#define B_BE_CH0_USE_PG_MASK GENMASK(12, 0)
+
#define R_BE_PUB_PAGE_INFO3 0xB78C
#define B_BE_G1_AVAL_PG_MASK GENMASK(28, 16)
#define B_BE_G0_AVAL_PG_MASK GENMASK(12, 0)
@@ -5822,6 +5972,39 @@
#define B_BE_MACID_ACQ_GRP0_CLR_P BIT(2)
#define B_BE_R_MACID_ACQ_CHK_EN BIT(0)
+#define R_BE_BT_BREAK_TABLE 0x0E344
+
+#define R_BE_GNT_SW_CTRL 0x0E348
+#define B_BE_WL_ACT2_VAL BIT(25)
+#define B_BE_WL_ACT2_SWCTRL BIT(24)
+#define B_BE_WL_ACT_VAL BIT(23)
+#define B_BE_WL_ACT_SWCTRL BIT(22)
+#define B_BE_GNT_BT_RX_BB1_VAL BIT(21)
+#define B_BE_GNT_BT_RX_BB1_SWCTRL BIT(20)
+#define B_BE_GNT_BT_TX_BB1_VAL BIT(19)
+#define B_BE_GNT_BT_TX_BB1_SWCTRL BIT(18)
+#define B_BE_GNT_BT_RX_BB0_VAL BIT(17)
+#define B_BE_GNT_BT_RX_BB0_SWCTRL BIT(16)
+#define B_BE_GNT_BT_TX_BB0_VAL BIT(15)
+#define B_BE_GNT_BT_TX_BB0_SWCTRL BIT(14)
+#define B_BE_GNT_WL_RX_VAL BIT(13)
+#define B_BE_GNT_WL_RX_SWCTRL BIT(12)
+#define B_BE_GNT_WL_TX_VAL BIT(11)
+#define B_BE_GNT_WL_TX_SWCTRL BIT(10)
+#define B_BE_GNT_BT_BB1_VAL BIT(9)
+#define B_BE_GNT_BT_BB1_SWCTRL BIT(8)
+#define B_BE_GNT_WL_BB1_VAL BIT(7)
+#define B_BE_GNT_WL_BB1_SWCTRL BIT(6)
+#define B_BE_GNT_BT_BB0_VAL BIT(5)
+#define B_BE_GNT_BT_BB0_SWCTRL BIT(4)
+#define B_BE_GNT_WL_BB0_VAL BIT(3)
+#define B_BE_GNT_WL_BB0_SWCTRL BIT(2)
+#define B_BE_GNT_WL_BB_PWR_VAL BIT(1)
+#define B_BE_GNT_WL_BB_PWR_SWCTRL BIT(0)
+
+#define R_BE_PWR_MACID_PATH_BASE 0x0E500
+#define R_BE_PWR_MACID_LMT_BASE 0x0ED00
+
#define R_BE_CMAC_FUNC_EN 0x10000
#define R_BE_CMAC_FUNC_EN_C1 0x14000
#define B_BE_CMAC_CRPRT BIT(31)
@@ -5873,6 +6056,16 @@
B_BE_RMAC_CKEN | B_BE_TXTIME_CKEN | B_BE_RESP_PKTCTL_CKEN | \
B_BE_SIGB_CKEN)
+#define R_BE_WMAC_RFMOD 0x10010
+#define R_BE_WMAC_RFMOD_C1 0x14010
+#define B_BE_CMAC_ASSERTION BIT(31)
+#define B_BE_WMAC_RFMOD_MASK GENMASK(2, 0)
+#define BE_WMAC_RFMOD_20M 0
+#define BE_WMAC_RFMOD_40M 1
+#define BE_WMAC_RFMOD_80M 2
+#define BE_WMAC_RFMOD_160M 3
+#define BE_WMAC_RFMOD_320M 4
+
#define R_BE_TX_SUB_BAND_VALUE 0x10088
#define R_BE_TX_SUB_BAND_VALUE_C1 0x14088
#define B_BE_PRI20_BITMAP_MASK GENMASK(31, 16)
@@ -6009,6 +6202,13 @@
#define B_BE_MACTX_LATENCY_MASK GENMASK(10, 8)
#define B_BE_PREBKF_TIME_MASK GENMASK(4, 0)
+#define R_BE_PREBKF_CFG_1 0x1033C
+#define R_BE_PREBKF_CFG_1_C1 0x1433C
+#define B_BE_SIFS_TIMEOUT_TB_AGGR_MASK GENMASK(31, 24)
+#define B_BE_SIFS_PREBKF_MASK GENMASK(23, 16)
+#define B_BE_SIFS_TIMEOUT_T2_MASK GENMASK(14, 8)
+#define B_BE_SIFS_MACTXEN_T1_MASK GENMASK(6, 0)
+
#define R_BE_CCA_CFG_0 0x10340
#define R_BE_CCA_CFG_0_C1 0x14340
#define B_BE_R_SIFS_AGGR_TIME_V1_MASK GENMASK(31, 24)
@@ -6050,11 +6250,36 @@
#define R_BE_MUEDCA_EN 0x10370
#define R_BE_MUEDCA_EN_C1 0x14370
+#define B_BE_SIFS_TIMEOUT_TB_T2_MASK GENMASK(30, 24)
+#define B_BE_SIFS_MACTXEN_TB_T1_MASK GENMASK(22, 16)
#define B_BE_MUEDCA_WMM_SEL BIT(8)
-#define B_BE_SET_MUEDCATIMER_TF_1 BIT(5)
+#define B_BE_SET_MUEDCATIMER_TF_MASK GENMASK(5, 4)
#define B_BE_SET_MUEDCATIMER_TF_0 BIT(4)
+#define B_BE_MUEDCA_EN_MASK GENMASK(1, 0)
#define B_BE_MUEDCA_EN_0 BIT(0)
+#define R_BE_CTN_DRV_TXEN 0x10398
+#define R_BE_CTN_DRV_TXEN_C1 0x14398
+#define B_BE_CTN_TXEN_TWT_3 BIT(17)
+#define B_BE_CTN_TXEN_TWT_2 BIT(16)
+#define B_BE_CTN_TXEN_TWT_1 BIT(15)
+#define B_BE_CTN_TXEN_TWT_0 BIT(14)
+#define B_BE_CTN_TXEN_ULQ BIT(13)
+#define B_BE_CTN_TXEN_BCNQ BIT(12)
+#define B_BE_CTN_TXEN_HGQ BIT(11)
+#define B_BE_CTN_TXEN_CPUMGQ BIT(10)
+#define B_BE_CTN_TXEN_MGQ1 BIT(9)
+#define B_BE_CTN_TXEN_MGQ BIT(8)
+#define B_BE_CTN_TXEN_VO_1 BIT(7)
+#define B_BE_CTN_TXEN_VI_1 BIT(6)
+#define B_BE_CTN_TXEN_BK_1 BIT(5)
+#define B_BE_CTN_TXEN_BE_1 BIT(4)
+#define B_BE_CTN_TXEN_VO_0 BIT(3)
+#define B_BE_CTN_TXEN_VI_0 BIT(2)
+#define B_BE_CTN_TXEN_BK_0 BIT(1)
+#define B_BE_CTN_TXEN_BE_0 BIT(0)
+#define B_BE_CTN_TXEN_ALL_MASK GENMASK(17, 0)
+
#define R_BE_TB_CHK_CCA_NAV 0x103AC
#define R_BE_TB_CHK_CCA_NAV_C1 0x143AC
#define B_BE_TB_CHK_TX_NAV BIT(15)
@@ -6212,6 +6437,8 @@
#define R_BE_TSFTR_HIGH_P0_C1 0x1443C
#define B_BE_TSFTR_HIGH_P0_MASK GENMASK(31, 0)
+#define R_BE_BCN_DROP_ALL0 0x10560
+
#define R_BE_MBSSID_CTRL 0x10568
#define R_BE_MBSSID_CTRL_C1 0x14568
#define B_BE_MBSSID_MODE_SEL BIT(20)
@@ -6282,6 +6509,17 @@
#define B_BE_SPEC_SIFS_OFDM_PTCL_MASK GENMASK(15, 8)
#define B_BE_SPEC_SIFS_CCK_PTCL_MASK GENMASK(7, 0)
+#define R_BE_TXRATE_CHK 0x10828
+#define R_BE_TXRATE_CHK_C1 0x14828
+#define B_BE_LATENCY_PADDING_PKT_TH_MASK GENMASK(31, 24)
+#define B_BE_PLCP_FETCH_BUFF_MASK GENMASK(23, 16)
+#define B_BE_OFDM_CCK_ERR_PROC BIT(6)
+#define B_BE_PKT_LAST_TX BIT(5)
+#define B_BE_BAND_MODE BIT(4)
+#define B_BE_MAX_TXNSS_MASK GENMASK(3, 2)
+#define B_BE_RTS_LIMIT_IN_OFDM6 BIT(1)
+#define B_BE_CHECK_CCK_EN BIT(0)
+
#define R_BE_MBSSID_DROP_0 0x1083C
#define R_BE_MBSSID_DROP_0_C1 0x1483C
#define B_BE_GI_LTF_FB_SEL BIT(30)
@@ -6289,6 +6527,20 @@
#define B_BE_PORT_DROP_4_0_MASK GENMASK(20, 16)
#define B_BE_MBSSID_DROP_15_0_MASK GENMASK(15, 0)
+#define R_BE_BT_PLT 0x1087C
+#define R_BE_BT_PLT_C1 0x1487C
+#define B_BE_BT_PLT_PKT_CNT_MASK GENMASK(31, 16)
+#define B_BE_BT_PLT_RST BIT(9)
+#define B_BE_PLT_EN BIT(8)
+#define B_BE_RX_PLT_GNT_LTE_RX BIT(7)
+#define B_BE_RX_PLT_GNT_BT_RX BIT(6)
+#define B_BE_RX_PLT_GNT_BT_TX BIT(5)
+#define B_BE_RX_PLT_GNT_WL BIT(4)
+#define B_BE_TX_PLT_GNT_LTE_RX BIT(3)
+#define B_BE_TX_PLT_GNT_BT_RX BIT(2)
+#define B_BE_TX_PLT_GNT_BT_TX BIT(1)
+#define B_BE_TX_PLT_GNT_WL BIT(0)
+
#define R_BE_PTCL_BSS_COLOR_0 0x108A0
#define R_BE_PTCL_BSS_COLOR_0_C1 0x148A0
#define B_BE_BSS_COLOB_BE_PORT_3_MASK GENMASK(29, 24)
@@ -6398,6 +6650,10 @@
#define B_BE_PTCL_DROP BIT(5)
#define B_BE_PTCL_TX_QUEUE_IDX_MASK GENMASK(4, 0)
+#define R_BE_PTCL_DBG_INFO 0x108F0
+
+#define R_BE_PTCL_DBG 0x108F4
+
#define R_BE_RX_ERROR_FLAG 0x10C00
#define R_BE_RX_ERROR_FLAG_C1 0x14C00
#define B_BE_RX_CSI_NOT_RELEASE_ERROR BIT(31)
@@ -6676,6 +6932,9 @@
#define B_BE_UPD_HGQMD BIT(1)
#define B_BE_UPD_TIMIE BIT(0)
+#define R_BE_WMTX_POWER_BE_BIT_CTL 0x10E0C
+#define R_BE_WMTX_POWER_BE_BIT_CTL_C1 0x14E0C
+
#define R_BE_WMTX_TCR_BE_4 0x10E2C
#define R_BE_WMTX_TCR_BE_4_C1 0x14E2C
#define B_BE_UL_EHT_MUMIMO_LTF_MODE BIT(30)
@@ -7056,6 +7315,20 @@
#define S_BE_BACAM_RST_ENT 1
#define S_BE_BACAM_RST_ALL 2
+#define R_BE_PPDU_STAT 0x11440
+#define R_BE_PPDU_STAT_C1 0x15440
+#define B_BE_STAT_IORST BIT(13)
+#define B_BE_STAT_GCKDIS BIT(12)
+#define B_BE_PPDU_STAT_WR_BW_MASK GENMASK(11, 10)
+#define B_BE_PPDU_STAT_RPT_TRIG BIT(8)
+#define B_BE_PPDU_STAT_RPT_DMA BIT(6)
+#define B_BE_PPDU_STAT_RPT_CRC32 BIT(5)
+#define B_BE_PPDU_STAT_RPT_ADDR BIT(4)
+#define B_BE_APP_PLCP_HDR_RPT BIT(3)
+#define B_BE_APP_RX_CNT_RPT BIT(2)
+#define B_BE_PPDU_MAC_INFO BIT(1)
+#define B_BE_PPDU_STAT_RPT_EN BIT(0)
+
#define R_BE_RX_SR_CTRL 0x1144A
#define R_BE_RX_SR_CTRL_C1 0x1544A
#define B_BE_SR_OP_MODE_MASK GENMASK(5, 4)
@@ -7063,6 +7336,13 @@
#define B_BE_SR_CTRL_PLCP_EN BIT(1)
#define B_BE_SR_EN BIT(0)
+#define R_BE_BSSID_SRC_CTRL 0x1144B
+#define R_BE_BSSID_SRC_CTRL_C1 0x1544B
+#define B_BE_BSSID_MATCH BIT(3)
+#define B_BE_PARTIAL_AID_MATCH BIT(2)
+#define B_BE_BSSCOLOR_MATCH BIT(1)
+#define B_BE_PLCP_SRC_EN BIT(0)
+
#define R_BE_CSIRPT_OPTION 0x11464
#define R_BE_CSIRPT_OPTION_C1 0x15464
#define B_BE_CSIPRT_EHTSU_AID_EN BIT(26)
@@ -7178,12 +7458,56 @@
#define R_BE_PWR_MODULE 0x11900
#define R_BE_PWR_MODULE_C1 0x15900
+#define R_BE_PWR_LISTEN_PATH 0x11988
+#define B_BE_PWR_LISTEN_PATH_EN GENMASK(31, 28)
+
+#define R_BE_PWR_REF_CTRL 0x11A20
+#define B_BE_PWR_REF_CTRL_OFDM GENMASK(9, 1)
+#define B_BE_PWR_REF_CTRL_CCK GENMASK(18, 10)
+#define B_BE_PWR_OFST_LMT_DB GENMASK(27, 19)
+#define R_BE_PWR_OFST_LMTBF 0x11A24
+#define B_BE_PWR_OFST_LMTBF_DB GENMASK(8, 0)
+#define R_BE_PWR_FORCE_LMT 0x11A28
+#define B_BE_PWR_FORCE_LMT_ON BIT(6)
+
+#define R_BE_PWR_RATE_CTRL 0x11A2C
+#define B_BE_PWR_OFST_BYRATE_DB GENMASK(8, 0)
+#define B_BE_FORCE_PWR_BY_RATE_EN BIT(19)
+#define B_BE_FORCE_PWR_BY_RATE_VAL GENMASK(28, 20)
#define R_BE_PWR_RATE_OFST_CTRL 0x11A30
+#define R_BE_PWR_RATE_OFST_END 0x11A38
+#define R_BE_PWR_RULMT_START 0x12048
+#define R_BE_PWR_RULMT_END 0x120e4
+
+#define R_BE_PWR_BOOST 0x11A40
+#define B_BE_PWR_CTRL_SEL BIT(16)
+#define B_BE_PWR_FORCE_RATE_ON BIT(29)
+#define R_BE_PWR_OFST_RULMT 0x11A44
+#define B_BE_PWR_OFST_RULMT_DB GENMASK(17, 9)
+#define B_BE_PWR_FORCE_RU_ON BIT(18)
+#define B_BE_PWR_FORCE_RU_ENON BIT(28)
+#define R_BE_PWR_FORCE_MACID 0x11A48
+#define B_BE_PWR_FORCE_MACID_ON BIT(9)
+
+#define R_BE_PWR_REG_CTRL 0x11A50
+#define B_BE_PWR_BT_EN BIT(23)
+
+#define R_BE_PWR_COEX_CTRL 0x11A54
+#define B_BE_PWR_BT_VAL GENMASK(8, 0)
+#define B_BE_PWR_FORCE_COEX_ON GENMASK(29, 27)
+
+#define R_BE_PWR_OFST_SW 0x11AE8
+#define B_BE_PWR_OFST_SW_DB GENMASK(27, 24)
+
+#define R_BE_PWR_FTM 0x11B00
+#define R_BE_PWR_FTM_SS 0x11B04
+
#define R_BE_PWR_BY_RATE 0x11E00
#define R_BE_PWR_BY_RATE_MAX 0x11FA8
#define R_BE_PWR_LMT 0x11FAC
#define R_BE_PWR_LMT_MAX 0x12040
+#define R_BE_PWR_BY_RATE_END 0x12044
#define R_BE_PWR_RU_LMT 0x12048
#define R_BE_PWR_RU_LMT_MAX 0x120E4
@@ -7223,6 +7547,7 @@
#define RR_MOD_M_RXBB GENMASK(9, 5)
#define RR_MOD_LO_SEL BIT(1)
#define RR_MODOPT 0x01
+#define RR_TXG_SEL GENMASK(19, 17)
#define RR_MODOPT_M_TXPWR GENMASK(5, 0)
#define RR_WLSEL 0x02
#define RR_WLSEL_AG GENMASK(18, 16)
@@ -7256,6 +7581,12 @@
#define CFGCH_BAND0_2G 0
#define CFGCH_BAND0_5G 1
#define CFGCH_BAND0_6G 0
+#define RR_CFGCH_BW_V2 GENMASK(12, 10)
+#define CFGCH_BW_V2_20M 0
+#define CFGCH_BW_V2_40M 1
+#define CFGCH_BW_V2_80M 2
+#define CFGCH_BW_V2_160M 3
+#define CFGCH_BW_V2_320M 4
#define RR_CFGCH_BW GENMASK(11, 10)
#define RR_CFGCH_CH GENMASK(7, 0)
#define CFGCH_BW_20M 3
@@ -7292,6 +7623,7 @@
#define RR_LUTWD0_LB GENMASK(5, 0)
#define RR_TM 0x42
#define RR_TM_TRI BIT(19)
+#define RR_TM_VAL_V1 GENMASK(7, 0)
#define RR_TM_VAL GENMASK(6, 1)
#define RR_TM2 0x43
#define RR_TM2_OFF GENMASK(19, 16)
@@ -7325,8 +7657,12 @@
#define RR_TXAC 0x5f
#define RR_TXAC_IQG GENMASK(3, 0)
#define RR_BIASA 0x60
-#define RR_BIASA_TXG GENMASK(15, 12)
#define RR_BIASA_TXA GENMASK(19, 16)
+#define RR_BIASA_TXG GENMASK(15, 12)
+#define RR_BIASD_TXA_V1 GENMASK(15, 12)
+#define RR_BIASA_TXA_V1 GENMASK(11, 8)
+#define RR_BIASD_TXG_V1 GENMASK(7, 4)
+#define RR_BIASA_TXG_V1 GENMASK(3, 0)
#define RR_BIASA_A GENMASK(2, 0)
#define RR_BIASA2 0x63
#define RR_BIASA2_LB GENMASK(4, 2)
@@ -7410,6 +7746,7 @@
#define RR_MIXER_GN GENMASK(4, 3)
#define RR_POW 0xa0
#define RR_POW_SYN GENMASK(3, 2)
+#define RR_POW_SYN_V1 GENMASK(3, 0)
#define RR_LOGEN 0xa3
#define RR_LOGEN_RPT GENMASK(19, 16)
#define RR_SX 0xaf
@@ -7436,6 +7773,8 @@
#define RR_MMD 0xd5
#define RR_MMD_RST_EN BIT(8)
#define RR_MMD_RST_SYN BIT(6)
+#define RR_SMD 0xd6
+#define RR_VCO2 BIT(19)
#define RR_IQKPLL 0xdc
#define RR_IQKPLL_MOD GENMASK(9, 8)
#define RR_SYNLUT 0xdd
@@ -7459,15 +7798,24 @@
#define RR_RFC_CKEN BIT(1)
#define R_UPD_P0 0x0000
+#define R_BBCLK 0x0000
+#define B_CLK_640M BIT(2)
#define R_RSTB_WATCH_DOG 0x000C
#define B_P0_RSTB_WATCH_DOG BIT(0)
#define B_P1_RSTB_WATCH_DOG BIT(1)
#define B_UPD_P0_EN BIT(31)
+#define R_EMLSR 0x0044
+#define B_EMLSR_PARM GENMASK(27, 12)
#define R_SPOOF_CG 0x00B4
#define B_SPOOF_CG_EN BIT(17)
+#define R_CHINFO_SEG 0x00B4
+#define B_CHINFO_SEG_LEN GENMASK(2, 0)
+#define B_CHINFO_SEG GENMASK(16, 7)
#define R_DFS_FFT_CG 0x00B8
#define B_DFS_CG_EN BIT(1)
#define B_DFS_FFT_EN BIT(0)
+#define R_CHINFO_DATA 0x00C0
+#define B_CHINFO_DATA_BITMAP GENMASK(22, 0)
#define R_ANAPAR_PW15 0x030C
#define B_ANAPAR_PW15 GENMASK(31, 24)
#define B_ANAPAR_PW15_H GENMASK(27, 24)
@@ -7497,6 +7845,23 @@
#define B_SWSI_READ_ADDR_ADDR_V1 GENMASK(7, 0)
#define B_SWSI_READ_ADDR_PATH_V1 GENMASK(10, 8)
#define B_SWSI_READ_ADDR_V1 GENMASK(10, 0)
+#define R_BRK_R 0x0418
+#define B_VHTMCS_LMT GENMASK(22, 21)
+#define B_HTMCS_LMT GENMASK(9, 8)
+#define R_BRK_EHT 0x0474
+#define B_RXEHT_NSS_MAX GENMASK(4, 2)
+#define R_BRK_RXEHT 0x0478
+#define B_RXEHT_N_USER_MAX GENMASK(31, 24)
+#define B_RXEHTTB_NSS_MAX GENMASK(16, 14)
+#define R_EN_SND_WO_NDP 0x047c
+#define R_EN_SND_WO_NDP_C1 0x147c
+#define B_EN_SND_WO_NDP BIT(1)
+#define R_BRK_HE 0x0480
+#define B_TB_NSS_MAX GENMASK(25, 23)
+#define B_NSS_MAX GENMASK(16, 14)
+#define B_N_USR_MAX GENMASK(13, 6)
+#define R_RXCCA_BE1 0x0520
+#define B_RXCCA_BE1_DIS BIT(0)
#define R_UPD_CLK_ADC 0x0700
#define B_UPD_CLK_ADC_VAL GENMASK(26, 25)
#define B_UPD_CLK_ADC_ON BIT(24)
@@ -7543,6 +7908,7 @@
#define B_PMAC_RXMOD_MSK GENMASK(7, 4)
#define R_MAC_SEL 0x09A4
#define B_MAC_SEL_OFDM_TRI_FILTER BIT(31)
+#define B_MAC_SEL GENMASK(19, 17)
#define B_MAC_SEL_PWR_EN BIT(16)
#define B_MAC_SEL_DPD_EN BIT(10)
#define B_MAC_SEL_MOD GENMASK(4, 2)
@@ -7588,19 +7954,28 @@
#define R_PD_CTRL 0x0C3C
#define B_PD_HIT_DIS BIT(9)
#define R_IOQ_IQK_DPK 0x0C60
+#define B_IOQ_IQK_DPK_CLKEN GENMASK(1, 0)
#define B_IOQ_IQK_DPK_EN BIT(1)
#define R_GNT_BT_WGT_EN 0x0C6C
#define B_GNT_BT_WGT_EN BIT(21)
+#define R_IQK_DPK_RST 0x0C6C
+#define R_IQK_DPK_RST_C1 0x1C6C
+#define B_IQK_DPK_RST BIT(0)
#define R_TX_COLLISION_T2R_ST 0x0C70
#define B_TX_COLLISION_T2R_ST_M GENMASK(25, 20)
#define R_TXGATING 0x0C74
#define B_TXGATING_EN BIT(4)
+#define R_TXRFC 0x0C7C
+#define R_TXRFC_C1 0x1C7C
+#define B_TXRFC_RST GENMASK(23, 21)
#define R_PD_ARBITER_OFF 0x0C80
#define B_PD_ARBITER_OFF BIT(31)
#define R_SNDCCA_A1 0x0C9C
#define B_SNDCCA_A1_EN GENMASK(19, 12)
#define R_SNDCCA_A2 0x0CA0
#define B_SNDCCA_A2_VAL GENMASK(19, 12)
+#define R_UDP_COEEF 0x0CBC
+#define B_UDP_COEEF BIT(19)
#define R_TX_COLLISION_T2R_ST_BE 0x0CC8
#define B_TX_COLLISION_T2R_ST_BE_M GENMASK(13, 8)
#define R_RXHT_MCS_LIMIT 0x0D18
@@ -7624,7 +7999,11 @@
#define R_CTLTOP 0x1008
#define B_CTLTOP_ON BIT(23)
#define B_CTLTOP_VAL GENMASK(15, 12)
+#define R_CLK_GCK 0x1008
+#define B_CLK_GCK GENMASK(24, 0)
#define R_EDCCA_RPT_SEL_BE 0x10CC
+#define R_ADC_FIFO_V1 0x10FC
+#define B_ADC_FIFO_EN_V1 GENMASK(31, 24)
#define R_S0_HW_SI_DIS 0x1200
#define B_S0_HW_SI_DIS_W_R_TRIG GENMASK(30, 28)
#define R_P0_RXCK 0x12A0
@@ -7771,6 +8150,27 @@
#define B_P80_AT_HIGH_FREQ_RU_ALLOC_PHY0 BIT(13)
#define R_DBCC_80P80_SEL_EVM_RPT2 0x2A10
#define B_DBCC_80P80_SEL_EVM_RPT2_EN BIT(0)
+#define R_AFEDAC0 0x2A5C
+#define B_AFEDAC0 GENMASK(31, 27)
+#define R_AFEDAC1 0x2A60
+#define B_AFEDAC1 GENMASK(2, 0)
+#define R_IQKDPK_HC 0x2AB8
+#define B_IQKDPK_HC BIT(28)
+#define R_HWSI_ADD0 0x2ADC
+#define R_HWSI_ADD1 0x2BDC
+#define B_HWSI_ADD_MASK GENMASK(11, 4)
+#define B_HWSI_ADD_CTL_MASK GENMASK(2, 0)
+#define B_HWSI_ADD_RD BIT(2)
+#define B_HWSI_ADD_POLL_MASK GENMASK(1, 0)
+#define B_HWSI_ADD_RUN BIT(1)
+#define B_HWSI_ADD_BUSY BIT(0)
+#define R_HWSI_DATA 0x2AE0
+#define B_HWSI_DATA_VAL GENMASK(27, 8)
+#define B_HWSI_DATA_ADDR GENMASK(7, 0)
+#define R_HWSI_VAL0 0x2C24
+#define R_HWSI_VAL1 0x2D24
+#define B_HWSI_VAL_RDONE BIT(31)
+#define B_HWSI_VAL_BUSY BIT(29)
#define R_P1_EN_SOUND_WO_NDP 0x2D7C
#define B_P1_EN_SOUND_WO_NDP BIT(1)
#define R_EDCCA_RPT_A_BE 0x2E38
@@ -7806,8 +8206,30 @@
#define R_S1_ADDCK 0x3E00
#define B_S1_ADDCK_I GENMASK(9, 0)
#define B_S1_ADDCK_Q GENMASK(19, 10)
+#define R_OP1DB_A 0x40B0
+#define B_OP1DB_A GENMASK(31, 24)
+#define R_OP1DB1_A 0x40BC
+#define B_TIA10_A GENMASK(15, 0)
+#define B_TIA1_A GENMASK(15, 8)
+#define B_TIA0_A GENMASK(7, 0)
+#define R_BKOFF_A 0x40E0
+#define B_BKOFF_IBADC_A GENMASK(23, 18)
+#define R_BACKOFF_A 0x40E4
+#define B_LNA_IBADC_A GENMASK(29, 18)
+#define B_BACKOFF_LNA_A GENMASK(29, 24)
+#define B_BACKOFF_IBADC_A GENMASK(23, 18)
+#define R_RXBY_WBADC_A 0x40F4
+#define B_RXBY_WBADC_A GENMASK(14, 10)
#define R_MUIC 0x40F8
#define B_MUIC_EN BIT(0)
+#define R_BT_RXBY_WBADC_A 0x4160
+#define B_BT_RXBY_WBADC_A BIT(31)
+#define R_BT_SHARE_A 0x4164
+#define B_BT_SHARE_A BIT(0)
+#define B_BT_TRK_OFF_A BIT(1)
+#define B_BTG_PATH_A BIT(4)
+#define R_FORCE_FIR_A 0x418C
+#define B_FORCE_FIR_A GENMASK(1, 0)
#define R_DCFO 0x4264
#define B_DCFO GENMASK(7, 0)
#define R_SEG0CSI 0x42AC
@@ -7846,8 +8268,30 @@
#define R_DPD_BF 0x44a0
#define B_DPD_BF_OFDM GENMASK(16, 12)
#define B_DPD_BF_SCA GENMASK(6, 0)
+#define R_LNA_OP 0x44B0
+#define B_LNA6 GENMASK(31, 24)
+#define R_LNA_TIA 0x44BC
+#define B_TIA10_B GENMASK(15, 0)
+#define B_TIA1_B GENMASK(15, 8)
+#define B_TIA0_B GENMASK(7, 0)
+#define R_BKOFF_B 0x44E0
+#define B_BKOFF_IBADC_B GENMASK(23, 18)
+#define R_BACKOFF_B 0x44E4
+#define B_LNA_IBADC_B GENMASK(29, 18)
+#define B_BACKOFF_LNA_B GENMASK(29, 24)
+#define B_BACKOFF_IBADC_B GENMASK(23, 18)
+#define R_RXBY_WBADC_B 0x44F4
+#define B_RXBY_WBADC_B GENMASK(14, 10)
+#define R_BT_RXBY_WBADC_B 0x4560
+#define B_BT_RXBY_WBADC_B BIT(31)
+#define R_BT_SHARE_B 0x4564
+#define B_BT_SHARE_B BIT(0)
+#define B_BT_TRK_OFF_B BIT(1)
+#define B_BTG_PATH_B BIT(4)
#define R_TXPATH_SEL 0x458C
#define B_TXPATH_SEL_MSK GENMASK(31, 28)
+#define R_FORCE_FIR_B 0x458C
+#define B_FORCE_FIR_B GENMASK(1, 0)
#define R_TXPWR 0x4594
#define B_TXPWR_MSK GENMASK(30, 22)
#define R_TXNSS_MAP 0x45B4
@@ -7910,10 +8354,12 @@
#define R_PATH0_P20_FOLLOW_BY_PAGCUGC 0x46A0
#define R_PATH0_P20_FOLLOW_BY_PAGCUGC_V1 0x4C24
#define R_PATH0_P20_FOLLOW_BY_PAGCUGC_V2 0x46E8
+#define R_PATH0_P20_FOLLOW_BY_PAGCUGC_V3 0x41C8
#define B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH0_S20_FOLLOW_BY_PAGCUGC 0x46A4
#define R_PATH0_S20_FOLLOW_BY_PAGCUGC_V1 0x4C28
#define R_PATH0_S20_FOLLOW_BY_PAGCUGC_V2 0x46EC
+#define R_PATH0_S20_FOLLOW_BY_PAGCUGC_V3 0x41CC
#define B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH0_RXB_INIT_V1 0x46A8
#define B_PATH0_RXB_INIT_IDX_MSK_V1 GENMASK(14, 10)
@@ -7958,10 +8404,12 @@
#define R_PATH1_P20_FOLLOW_BY_PAGCUGC 0x4774
#define R_PATH1_P20_FOLLOW_BY_PAGCUGC_V1 0x4CE8
#define R_PATH1_P20_FOLLOW_BY_PAGCUGC_V2 0x47A8
+#define R_PATH1_P20_FOLLOW_BY_PAGCUGC_V3 0x45C8
#define B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH1_S20_FOLLOW_BY_PAGCUGC 0x4778
#define R_PATH1_S20_FOLLOW_BY_PAGCUGC_V1 0x4CEC
#define R_PATH1_S20_FOLLOW_BY_PAGCUGC_V2 0x47AC
+#define R_PATH1_S20_FOLLOW_BY_PAGCUGC_V3 0x45CC
#define B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH1_G_TIA0_LNA6_OP1DB_V1 0x4778
#define B_PATH1_G_TIA0_LNA6_OP1DB_V1 GENMASK(7, 0)
@@ -8092,6 +8540,15 @@
#define B_PATH1_5MDET_SB2 BIT(8)
#define B_PATH1_5MDET_SB0 BIT(6)
#define B_PATH1_5MDET_TH GENMASK(5, 0)
+#define R_S0S1_CSI_WGT 0x4D34
+#define B_S0S1_CSI_WGT_EN BIT(0)
+#define B_S0S1_CSI_WGT_TONE_IDX GENMASK(31, 20)
+#define R_CHINFO_ELM_SRC 0x4D84
+#define B_CHINFO_ELM_BITMAP GENMASK(22, 0)
+#define B_CHINFO_SRC GENMASK(31, 30)
+#define R_CHINFO_TYPE_SCAL 0x4D88
+#define B_CHINFO_TYPE GENMASK(2, 1)
+#define B_CHINFO_SCAL BIT(8)
#define R_RPL_BIAS_COMP 0x4DF0
#define B_RPL_BIAS_COMP_MASK GENMASK(7, 0)
#define R_RPL_PATHAB 0x4E0C
@@ -8239,14 +8696,90 @@
#define B_S0_DACKQ8_K GENMASK(15, 8)
#define R_DCFO_WEIGHT_V1 0x6244
#define B_DCFO_WEIGHT_MSK_V1 GENMASK(31, 28)
+#define R_DAC_CLK 0x625C
+#define B_DAC_CLK GENMASK(31, 30)
#define R_DCFO_OPT_V1 0x6260
#define B_DCFO_OPT_EN_V1 BIT(17)
+#define R_TXFCTR 0x627C
+#define B_TXFCTR_THD GENMASK(19, 10)
+#define R_TXSCALE 0x6284
+#define B_TXFCTR_EN BIT(19)
+#define R_PCOEFF01 0x6684
+#define B_PCOEFF01 GENMASK(23, 0)
+#define R_PCOEFF23 0x6688
+#define B_PCOEFF23 GENMASK(23, 0)
+#define R_PCOEFF45 0x668c
+#define B_PCOEFF45 GENMASK(23, 0)
+#define R_PCOEFF67 0x6690
+#define B_PCOEFF67 GENMASK(23, 0)
+#define R_PCOEFF89 0x6694
+#define B_PCOEFF89 GENMASK(23, 0)
+#define R_PCOEFFAB 0x6698
+#define B_PCOEFFAB GENMASK(23, 0)
+#define R_PCOEFFCD 0x669c
+#define B_PCOEFFCD GENMASK(23, 0)
+#define R_PCOEFFEF 0x66a0
+#define B_PCOEFFEF GENMASK(23, 0)
+#define R_MGAIN_BIAS 0x672c
+#define B_MGAIN_BIAS_BW20 GENMASK(3, 0)
+#define B_MGAIN_BIAS_BW40 GENMASK(7, 4)
+#define R_CCK_RPL_OFST 0x6750
+#define B_CCK_RPL_OFST GENMASK(7, 0)
+#define R_BK_FC0INV 0x6758
+#define B_BK_FC0INV GENMASK(18, 0)
+#define R_CCK_FC0INV 0x675c
+#define B_CCK_FC0INV GENMASK(18, 0)
#define R_SEG0R_EDCCA_LVL_BE 0x69EC
#define R_SEG0R_PPDU_LVL_BE 0x69F0
#define R_SEGSND 0x6A14
#define B_SEGSND_EN BIT(31)
+#define R_DBCC 0x6B48
+#define B_DBCC_EN BIT(0)
+#define R_FC0 0x6B4C
+#define B_BW40_2XFFT BIT(31)
+#define B_FC0 GENMASK(12, 0)
+#define R_FC0INV_SBW 0x6B50
+#define B_SMALLBW GENMASK(31, 30)
+#define B_RX_BT_SG0 GENMASK(25, 22)
+#define B_RX_1RCCA GENMASK(17, 14)
+#define B_FC0_INV GENMASK(6, 0)
+#define R_ANT_CHBW 0x6B54
+#define B_ANT_BT_SHARE BIT(16)
+#define B_CHBW_BW GENMASK(14, 12)
+#define B_CHBW_PRICH GENMASK(11, 8)
+#define B_ANT_RX_SG0 GENMASK(3, 0)
+#define R_SLOPE 0x6B6C
+#define B_EHT_RATE_TH GENMASK(31, 28)
+#define B_SLOPE_B GENMASK(27, 14)
+#define B_SLOPE_A GENMASK(13, 0)
+#define R_SC_CORNER 0x6B70
+#define B_SC_CORNER GENMASK(10, 0)
+#define R_MAG_A 0x6BF4
+#define B_MGA_AEND GENMASK(31, 24)
+#define R_MAG_AB 0x6BF8
+#define B_BY_SLOPE GENMASK(31, 24)
+#define B_MAG_AB GENMASK(23, 0)
+#define R_BEDGE 0x6BFC
+#define B_EHT_MCS14 BIT(31)
+#define B_HE_RATE_TH GENMASK(30, 27)
+#define R_BEDGE2 0x6C00
+#define B_EHT_MCS15 BIT(31)
+#define B_HT_VHT_TH GENMASK(11, 0)
+#define R_BEDGE3 0x6C04
+#define B_TB_EN BIT(23)
+#define B_HEMU_EN BIT(21)
+#define B_HEERSU_EN BIT(19)
+#define B_EHTTB_EN BIT(15)
+#define B_BEDGE_CFG GENMASK(1, 0)
+#define R_SU_PUNC 0x6C08
+#define B_SU_PUNC_EN BIT(1)
+#define R_BEDGE5 0x6C10
+#define B_HWGEN_EN BIT(25)
+#define B_PWROFST_COMP BIT(20)
#define R_RPL_BIAS_COMP1 0x6DF0
#define B_RPL_BIAS_COMP1_MASK GENMASK(7, 0)
+#define R_DBCC_FA 0x703C
+#define B_DBCC_FA BIT(12)
#define R_P1_TSSI_ALIM1 0x7630
#define B_P1_TSSI_ALIM1 GENMASK(29, 0)
#define B_P1_TSSI_ALIM11 GENMASK(29, 20)
@@ -8389,8 +8922,12 @@
#define B_PRT_COM_RXBB_V1 GENMASK(4, 0)
#define B_PRT_COM_DONE BIT(0)
#define R_COEF_SEL 0x8104
+#define R_COEF_SEL_C1 0x8204
#define B_COEF_SEL_IQC BIT(0)
+#define B_COEF_SEL_IQC_V1 GENMASK(1, 0)
#define B_COEF_SEL_MDPD BIT(8)
+#define B_COEF_SEL_MDPD_V1 GENMASK(9, 8)
+#define B_COEF_SEL_EN BIT(31)
#define R_CFIR_SYS 0x8120
#define R_IQK_RES 0x8124
#define B_IQK_RES_K BIT(28)
@@ -8412,8 +8949,10 @@
#define B_RFGAIN_BND GENMASK(4, 0)
#define R_CFIR_MAP 0x8150
#define R_CFIR_LUT 0x8154
+#define R_CFIR_LUT_C1 0x8254
#define B_CFIR_LUT_SEL BIT(8)
#define B_CFIR_LUT_SET BIT(4)
+#define B_CFIR_LUT_G5 BIT(5)
#define B_CFIR_LUT_G3 BIT(3)
#define B_CFIR_LUT_G2 BIT(2)
#define B_CFIR_LUT_GP_V1 GENMASK(2, 0)
@@ -8626,6 +9165,35 @@
#define B_DACKN0_V GENMASK(21, 14)
#define R_DACKN1_CTL 0xC224
#define B_DACKN1_V GENMASK(21, 14)
+#define R_GAIN_MAP0 0xE44C
+#define B_GAIN_MAP0_EN BIT(0)
+#define R_GAIN_MAP1 0xE54C
+#define B_GAIN_MAP1_EN BIT(0)
+#define R_GOTX_IQKDPK_C0 0xE464
+#define R_GOTX_IQKDPK_C1 0xE564
+#define B_GOTX_IQKDPK GENMASK(28, 27)
+#define R_IQK_DPK_PRST 0xE4AC
+#define R_IQK_DPK_PRST_C1 0xE5AC
+#define B_IQK_DPK_PRST BIT(27)
+#define R_TXPWR_RSTA 0xE60C
+#define B_TXPWR_RSTA BIT(16)
+#define R_TSSI_PWR_P0 0xE610
+#define R_TSSI_PWR_P1 0xE710
+#define B_TSSI_CONT_EN BIT(3)
+#define R_TSSI_MAP_OFST_P0 0xE620
+#define R_TSSI_MAP_OFST_P1 0xE720
+#define B_TSSI_MAP_OFST_OFDM GENMASK(17, 9)
+#define B_TSSI_MAP_OFST_CCK GENMASK(26, 18)
+#define R_TXAGC_REF0_P0 0xE628
+#define R_TXAGC_REF0_P1 0xE728
+#define B_TXAGC_REF0_OFDM_DBM GENMASK(8, 0)
+#define B_TXAGC_REF0_CCK_DBM GENMASK(17, 9)
+#define B_TXAGC_REF0_OFDM_CW GENMASK(26, 18)
+#define R_TXAGC_REF1_P0 0xE62C
+#define R_TXAGC_REF1_P1 0xE72C
+#define B_TXAGC_REF1_CCK_CW GENMASK(8, 0)
+#define R_TXPWR_RSTB 0xE70C
+#define B_TXPWR_RSTB BIT(16)
/* WiFi CPU local domain */
#define R_AX_WDT_CTRL 0x0040
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
index 5c167a9278ce..51d3e61eaa1d 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -901,7 +901,7 @@ static void rtw8851b_set_gain_error(struct rtw89_dev *rtwdev,
enum rtw89_subband subband,
enum rtw89_rf_path path)
{
- const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 gain_band = rtw89_subband_to_bb_gain_band(subband);
s32 val;
u32 reg;
@@ -987,7 +987,7 @@ next:
static
void rtw8851b_set_rxsc_rpl_comp(struct rtw89_dev *rtwdev, enum rtw89_subband subband)
{
- const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 band = rtw89_subband_to_bb_gain_band(subband);
u32 val;
@@ -1921,41 +1921,81 @@ static u8 rtw8851b_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_p
static void rtw8851b_btc_set_rfe(struct rtw89_dev *rtwdev)
{
- struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
+ const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
+ union rtw89_btc_module_info *md = &rtwdev->btc.mdinfo;
- module->rfe_type = rtwdev->efuse.rfe_type;
- module->cv = rtwdev->hal.cv;
- module->bt_solo = 0;
- module->switch_type = BTC_SWITCH_INTERNAL;
- module->ant.isolation = 10;
- module->kt_ver_adie = rtwdev->hal.acv;
+ if (ver->fcxinit == 7) {
+ md->md_v7.rfe_type = rtwdev->efuse.rfe_type;
+ md->md_v7.kt_ver = rtwdev->hal.cv;
+ md->md_v7.bt_solo = 0;
+ md->md_v7.switch_type = BTC_SWITCH_INTERNAL;
+ md->md_v7.ant.isolation = 10;
+ md->md_v7.kt_ver_adie = rtwdev->hal.acv;
- if (module->rfe_type == 0)
- return;
+ if (md->md_v7.rfe_type == 0)
+ return;
- /* rfe_type 3*n+1: 1-Ant(shared),
- * 3*n+2: 2-Ant+Div(non-shared),
- * 3*n+3: 2-Ant+no-Div(non-shared)
- */
- module->ant.num = (module->rfe_type % 3 == 1) ? 1 : 2;
- /* WL-1ss at S0, btg at s0 (On 1 WL RF) */
- module->ant.single_pos = RF_PATH_A;
- module->ant.btg_pos = RF_PATH_A;
- module->ant.stream_cnt = 1;
-
- if (module->ant.num == 1) {
- module->ant.type = BTC_ANT_SHARED;
- module->bt_pos = BTC_BT_BTG;
- module->wa_type = 1;
- module->ant.diversity = 0;
- } else { /* ant.num == 2 */
- module->ant.type = BTC_ANT_DEDICATED;
- module->bt_pos = BTC_BT_ALONE;
- module->switch_type = BTC_SWITCH_EXTERNAL;
- module->wa_type = 0;
- if (module->rfe_type % 3 == 2)
- module->ant.diversity = 1;
+ /* rfe_type 3*n+1: 1-Ant(shared),
+ * 3*n+2: 2-Ant+Div(non-shared),
+ * 3*n+3: 2-Ant+no-Div(non-shared)
+ */
+ md->md_v7.ant.num = (md->md_v7.rfe_type % 3 == 1) ? 1 : 2;
+ /* WL-1ss at S0, btg at s0 (On 1 WL RF) */
+ md->md_v7.ant.single_pos = RF_PATH_A;
+ md->md_v7.ant.btg_pos = RF_PATH_A;
+ md->md_v7.ant.stream_cnt = 1;
+
+ if (md->md_v7.ant.num == 1) {
+ md->md_v7.ant.type = BTC_ANT_SHARED;
+ md->md_v7.bt_pos = BTC_BT_BTG;
+ md->md_v7.wa_type = 1;
+ md->md_v7.ant.diversity = 0;
+ } else { /* ant.num == 2 */
+ md->md_v7.ant.type = BTC_ANT_DEDICATED;
+ md->md_v7.bt_pos = BTC_BT_ALONE;
+ md->md_v7.switch_type = BTC_SWITCH_EXTERNAL;
+ md->md_v7.wa_type = 0;
+ if (md->md_v7.rfe_type % 3 == 2)
+ md->md_v7.ant.diversity = 1;
+ }
+ rtwdev->btc.btg_pos = md->md_v7.ant.btg_pos;
+ rtwdev->btc.ant_type = md->md_v7.ant.type;
+ } else {
+ md->md.rfe_type = rtwdev->efuse.rfe_type;
+ md->md.cv = rtwdev->hal.cv;
+ md->md.bt_solo = 0;
+ md->md.switch_type = BTC_SWITCH_INTERNAL;
+ md->md.ant.isolation = 10;
+ md->md.kt_ver_adie = rtwdev->hal.acv;
+
+ if (md->md.rfe_type == 0)
+ return;
+
+ /* rfe_type 3*n+1: 1-Ant(shared),
+ * 3*n+2: 2-Ant+Div(non-shared),
+ * 3*n+3: 2-Ant+no-Div(non-shared)
+ */
+ md->md.ant.num = (md->md.rfe_type % 3 == 1) ? 1 : 2;
+ /* WL-1ss at S0, btg at s0 (On 1 WL RF) */
+ md->md.ant.single_pos = RF_PATH_A;
+ md->md.ant.btg_pos = RF_PATH_A;
+ md->md.ant.stream_cnt = 1;
+
+ if (md->md.ant.num == 1) {
+ md->md.ant.type = BTC_ANT_SHARED;
+ md->md.bt_pos = BTC_BT_BTG;
+ md->md.wa_type = 1;
+ md->md.ant.diversity = 0;
+ } else { /* ant.num == 2 */
+ md->md.ant.type = BTC_ANT_DEDICATED;
+ md->md.bt_pos = BTC_BT_ALONE;
+ md->md.switch_type = BTC_SWITCH_EXTERNAL;
+ md->md.wa_type = 0;
+ if (md->md.rfe_type % 3 == 2)
+ md->md.ant.diversity = 1;
+ }
+ rtwdev->btc.btg_pos = md->md.ant.btg_pos;
+ rtwdev->btc.ant_type = md->md.ant.type;
}
}
@@ -1965,7 +2005,7 @@ void rtw8851b_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val)
if (group > BTC_BT_SS_GROUP)
group--; /* Tx-group=1, Rx-group=2 */
- if (rtwdev->btc.mdinfo.ant.type == BTC_ANT_SHARED) /* 1-Ant */
+ if (rtwdev->btc.ant_type == BTC_ANT_SHARED) /* 1-Ant */
group += 3;
rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, group);
@@ -1980,9 +2020,9 @@ static void rtw8851b_btc_init_cfg(struct rtw89_dev *rtwdev)
};
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
- struct rtw89_btc_ant_info *ant = &module->ant;
- u8 path, path_min, path_max;
+ union rtw89_btc_module_info *md = &btc->mdinfo;
+ const struct rtw89_btc_ver *ver = btc->ver;
+ u8 path, path_min, path_max, str_cnt, ant_sing_pos;
/* PTA init */
rtw89_mac_coex_init(rtwdev, &coex_params);
@@ -1991,9 +2031,17 @@ static void rtw8851b_btc_init_cfg(struct rtw89_dev *rtwdev)
chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_TX_RESP, true);
chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_BEACON, true);
+ if (ver->fcxinit == 7) {
+ str_cnt = md->md_v7.ant.stream_cnt;
+ ant_sing_pos = md->md_v7.ant.single_pos;
+ } else {
+ str_cnt = md->md.ant.stream_cnt;
+ ant_sing_pos = md->md.ant.single_pos;
+ }
+
/* for 1-Ant && 1-ss case: only 1-path */
- if (ant->stream_cnt == 1) {
- path_min = ant->single_pos;
+ if (str_cnt == 1) {
+ path_min = ant_sing_pos;
path_max = path_min;
} else {
path_min = RF_PATH_A;
@@ -2016,7 +2064,7 @@ static void rtw8851b_btc_init_cfg(struct rtw89_dev *rtwdev)
/* if GNT_WL = 0 && BT = Tx_group -->
* Shared-Ant && BTG-path:WL mask(0x55f), others:WL THRU(0x5ff)
*/
- if (ant->type == BTC_ANT_SHARED && ant->btg_pos == path)
+ if (btc->ant_type == BTC_ANT_SHARED && btc->btg_pos == path)
rtw8851b_set_trx_mask(rtwdev, path, BTC_BT_TX_GROUP, 0x55f);
else
rtw8851b_set_trx_mask(rtwdev, path, BTC_BT_TX_GROUP, 0x5ff);
@@ -2148,19 +2196,18 @@ void rtw8851b_btc_update_bt_cnt(struct rtw89_dev *rtwdev)
static void rtw8851b_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state)
{
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_ant_info *ant = &btc->mdinfo.ant;
- rtw89_write_rf(rtwdev, ant->btg_pos, RR_LUTWE, RFREG_MASK, 0x80000);
- rtw89_write_rf(rtwdev, ant->btg_pos, RR_LUTWA, RFREG_MASK, 0x1);
- rtw89_write_rf(rtwdev, ant->btg_pos, RR_LUTWD1, RFREG_MASK, 0x110);
+ rtw89_write_rf(rtwdev, btc->btg_pos, RR_LUTWE, RFREG_MASK, 0x80000);
+ rtw89_write_rf(rtwdev, btc->btg_pos, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, btc->btg_pos, RR_LUTWD1, RFREG_MASK, 0x110);
/* set WL standby = Rx for GNT_BT_Tx = 1->0 settle issue */
if (state)
- rtw89_write_rf(rtwdev, ant->btg_pos, RR_LUTWD0, RFREG_MASK, 0x179c);
+ rtw89_write_rf(rtwdev, btc->btg_pos, RR_LUTWD0, RFREG_MASK, 0x179c);
else
- rtw89_write_rf(rtwdev, ant->btg_pos, RR_LUTWD0, RFREG_MASK, 0x208);
+ rtw89_write_rf(rtwdev, btc->btg_pos, RR_LUTWD0, RFREG_MASK, 0x208);
- rtw89_write_rf(rtwdev, ant->btg_pos, RR_LUTWE, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, btc->btg_pos, RR_LUTWE, RFREG_MASK, 0x0);
}
#define LNA2_51B_MA 0x700
@@ -2175,7 +2222,6 @@ static void rtw8851b_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
* level=1 Fix LNA2=5: TIA 1/0= (LNA2,TIAN6) = (5,0)/(5,1) = 18dB/12dB
*/
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_ant_info *ant = &btc->mdinfo.ant;
const struct rtw89_reg2_def *rf;
u32 n, i, val;
@@ -2203,10 +2249,10 @@ static void rtw8851b_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
for (i = 0; i < n; i++, rf++) {
val = rf->data;
/* bit[10] = 1 if non-shared-ant for 8851b */
- if (btc->mdinfo.ant.type == BTC_ANT_DEDICATED)
+ if (btc->ant_type == BTC_ANT_DEDICATED)
val |= 0x4;
- rtw89_write_rf(rtwdev, ant->btg_pos, rf->addr, LNA2_51B_MA, val);
+ rtw89_write_rf(rtwdev, btc->btg_pos, rf->addr, LNA2_51B_MA, val);
}
}
@@ -2299,6 +2345,7 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.enable_bb_rf = rtw8851b_mac_enable_bb_rf,
.disable_bb_rf = rtw8851b_mac_disable_bb_rf,
.bb_preinit = NULL,
+ .bb_postinit = NULL,
.bb_reset = rtw8851b_bb_reset,
.bb_sethw = rtw8851b_bb_sethw,
.read_rf = rtw89_phy_read_rf_v1,
@@ -2309,7 +2356,9 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.read_phycap = rtw8851b_read_phycap,
.fem_setup = NULL,
.rfe_gpio = rtw8851b_rfe_gpio,
+ .rfk_hw_init = NULL,
.rfk_init = rtw8851b_rfk_init,
+ .rfk_init_late = NULL,
.rfk_channel = rtw8851b_rfk_channel,
.rfk_band_changed = rtw8851b_rfk_band_changed,
.rfk_scan = rtw8851b_rfk_scan,
@@ -2334,6 +2383,12 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.stop_sch_tx = rtw89_mac_stop_sch_tx,
.resume_sch_tx = rtw89_mac_resume_sch_tx,
.h2c_dctl_sec_cam = NULL,
+ .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
+ .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
+ .h2c_ampdu_cmac_tbl = NULL,
+ .h2c_default_dmac_tbl = NULL,
+ .h2c_update_beacon = rtw89_fw_h2c_update_beacon,
+ .h2c_ba_cam = rtw89_fw_h2c_ba_cam,
.btc_set_rfe = rtw8851b_btc_set_rfe,
.btc_init_cfg = rtw8851b_btc_init_cfg,
@@ -2394,7 +2449,9 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.support_chanctx_num = 0,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ),
- .support_bw160 = false,
+ .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = true,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
@@ -2449,6 +2506,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
.c2h_regs = rtw8851b_c2h_regs,
.page_regs = &rtw8851b_page_regs,
+ .wow_reason_reg = R_AX_C2HREG_DATA3 + 3,
.cfo_src_fd = true,
.cfo_hw_comp = true,
.dcfo_comp = &rtw8851b_dcfo_comp,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c b/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c
index 8cb5bde8f625..522883c8dfb9 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c
@@ -5345,7 +5345,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][48] = 72,
[0][0][1][0][RTW89_ETSI][48] = 127,
[0][0][1][0][RTW89_MKK][48] = 127,
- [0][0][1][0][RTW89_IC][48] = 127,
+ [0][0][1][0][RTW89_IC][48] = 72,
[0][0][1][0][RTW89_KCC][48] = 127,
[0][0][1][0][RTW89_ACMA][48] = 127,
[0][0][1][0][RTW89_CN][48] = 127,
@@ -5353,7 +5353,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][50] = 72,
[0][0][1][0][RTW89_ETSI][50] = 127,
[0][0][1][0][RTW89_MKK][50] = 127,
- [0][0][1][0][RTW89_IC][50] = 127,
+ [0][0][1][0][RTW89_IC][50] = 72,
[0][0][1][0][RTW89_KCC][50] = 127,
[0][0][1][0][RTW89_ACMA][50] = 127,
[0][0][1][0][RTW89_CN][50] = 127,
@@ -5361,7 +5361,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][52] = 72,
[0][0][1][0][RTW89_ETSI][52] = 127,
[0][0][1][0][RTW89_MKK][52] = 127,
- [0][0][1][0][RTW89_IC][52] = 127,
+ [0][0][1][0][RTW89_IC][52] = 72,
[0][0][1][0][RTW89_KCC][52] = 127,
[0][0][1][0][RTW89_ACMA][52] = 127,
[0][0][1][0][RTW89_CN][52] = 127,
@@ -5793,7 +5793,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][48] = 74,
[0][0][2][0][RTW89_ETSI][48] = 127,
[0][0][2][0][RTW89_MKK][48] = 127,
- [0][0][2][0][RTW89_IC][48] = 127,
+ [0][0][2][0][RTW89_IC][48] = 74,
[0][0][2][0][RTW89_KCC][48] = 127,
[0][0][2][0][RTW89_ACMA][48] = 127,
[0][0][2][0][RTW89_CN][48] = 127,
@@ -5801,7 +5801,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][50] = 76,
[0][0][2][0][RTW89_ETSI][50] = 127,
[0][0][2][0][RTW89_MKK][50] = 127,
- [0][0][2][0][RTW89_IC][50] = 127,
+ [0][0][2][0][RTW89_IC][50] = 76,
[0][0][2][0][RTW89_KCC][50] = 127,
[0][0][2][0][RTW89_ACMA][50] = 127,
[0][0][2][0][RTW89_CN][50] = 127,
@@ -5809,7 +5809,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][52] = 76,
[0][0][2][0][RTW89_ETSI][52] = 127,
[0][0][2][0][RTW89_MKK][52] = 127,
- [0][0][2][0][RTW89_IC][52] = 127,
+ [0][0][2][0][RTW89_IC][52] = 76,
[0][0][2][0][RTW89_KCC][52] = 127,
[0][0][2][0][RTW89_ACMA][52] = 127,
[0][0][2][0][RTW89_CN][52] = 127,
@@ -6361,7 +6361,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][47] = 84,
[1][0][2][0][RTW89_ETSI][47] = 127,
[1][0][2][0][RTW89_MKK][47] = 127,
- [1][0][2][0][RTW89_IC][47] = 127,
+ [1][0][2][0][RTW89_IC][47] = 84,
[1][0][2][0][RTW89_KCC][47] = 127,
[1][0][2][0][RTW89_ACMA][47] = 127,
[1][0][2][0][RTW89_CN][47] = 127,
@@ -6369,7 +6369,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][51] = 84,
[1][0][2][0][RTW89_ETSI][51] = 127,
[1][0][2][0][RTW89_MKK][51] = 127,
- [1][0][2][0][RTW89_IC][51] = 127,
+ [1][0][2][0][RTW89_IC][51] = 84,
[1][0][2][0][RTW89_KCC][51] = 127,
[1][0][2][0][RTW89_ACMA][51] = 127,
[1][0][2][0][RTW89_CN][51] = 127,
@@ -6649,7 +6649,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_FCC][49] = 74,
[2][0][2][0][RTW89_ETSI][49] = 127,
[2][0][2][0][RTW89_MKK][49] = 127,
- [2][0][2][0][RTW89_IC][49] = 127,
+ [2][0][2][0][RTW89_IC][49] = 74,
[2][0][2][0][RTW89_KCC][49] = 127,
[2][0][2][0][RTW89_ACMA][49] = 127,
[2][0][2][0][RTW89_CN][49] = 127,
@@ -7975,7 +7975,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][48] = 42,
[0][0][RTW89_ETSI][48] = 127,
[0][0][RTW89_MKK][48] = 127,
- [0][0][RTW89_IC][48] = 127,
+ [0][0][RTW89_IC][48] = 42,
[0][0][RTW89_KCC][48] = 127,
[0][0][RTW89_ACMA][48] = 127,
[0][0][RTW89_CN][48] = 127,
@@ -7983,7 +7983,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][50] = 42,
[0][0][RTW89_ETSI][50] = 127,
[0][0][RTW89_MKK][50] = 127,
- [0][0][RTW89_IC][50] = 127,
+ [0][0][RTW89_IC][50] = 42,
[0][0][RTW89_KCC][50] = 127,
[0][0][RTW89_ACMA][50] = 127,
[0][0][RTW89_CN][50] = 127,
@@ -7991,7 +7991,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][52] = 40,
[0][0][RTW89_ETSI][52] = 127,
[0][0][RTW89_MKK][52] = 127,
- [0][0][RTW89_IC][52] = 127,
+ [0][0][RTW89_IC][52] = 40,
[0][0][RTW89_KCC][52] = 127,
[0][0][RTW89_ACMA][52] = 127,
[0][0][RTW89_CN][52] = 127,
@@ -8423,7 +8423,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][48] = 52,
[1][0][RTW89_ETSI][48] = 127,
[1][0][RTW89_MKK][48] = 127,
- [1][0][RTW89_IC][48] = 127,
+ [1][0][RTW89_IC][48] = 52,
[1][0][RTW89_KCC][48] = 127,
[1][0][RTW89_ACMA][48] = 127,
[1][0][RTW89_CN][48] = 127,
@@ -8431,7 +8431,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][50] = 52,
[1][0][RTW89_ETSI][50] = 127,
[1][0][RTW89_MKK][50] = 127,
- [1][0][RTW89_IC][50] = 127,
+ [1][0][RTW89_IC][50] = 52,
[1][0][RTW89_KCC][50] = 127,
[1][0][RTW89_ACMA][50] = 127,
[1][0][RTW89_CN][50] = 127,
@@ -8439,7 +8439,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][52] = 52,
[1][0][RTW89_ETSI][52] = 127,
[1][0][RTW89_MKK][52] = 127,
- [1][0][RTW89_IC][52] = 127,
+ [1][0][RTW89_IC][52] = 52,
[1][0][RTW89_KCC][52] = 127,
[1][0][RTW89_ACMA][52] = 127,
[1][0][RTW89_CN][52] = 127,
@@ -8871,7 +8871,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][48] = 64,
[2][0][RTW89_ETSI][48] = 127,
[2][0][RTW89_MKK][48] = 127,
- [2][0][RTW89_IC][48] = 127,
+ [2][0][RTW89_IC][48] = 64,
[2][0][RTW89_KCC][48] = 127,
[2][0][RTW89_ACMA][48] = 127,
[2][0][RTW89_CN][48] = 127,
@@ -8879,7 +8879,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][50] = 64,
[2][0][RTW89_ETSI][50] = 127,
[2][0][RTW89_MKK][50] = 127,
- [2][0][RTW89_IC][50] = 127,
+ [2][0][RTW89_IC][50] = 64,
[2][0][RTW89_KCC][50] = 127,
[2][0][RTW89_ACMA][50] = 127,
[2][0][RTW89_CN][50] = 127,
@@ -8887,7 +8887,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][52] = 60,
[2][0][RTW89_ETSI][52] = 127,
[2][0][RTW89_MKK][52] = 127,
- [2][0][RTW89_IC][52] = 127,
+ [2][0][RTW89_IC][52] = 60,
[2][0][RTW89_KCC][52] = 127,
[2][0][RTW89_ACMA][52] = 127,
[2][0][RTW89_CN][52] = 127,
@@ -11055,7 +11055,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][48] = 72,
[0][0][1][0][RTW89_ETSI][48] = 127,
[0][0][1][0][RTW89_MKK][48] = 127,
- [0][0][1][0][RTW89_IC][48] = 127,
+ [0][0][1][0][RTW89_IC][48] = 72,
[0][0][1][0][RTW89_KCC][48] = 127,
[0][0][1][0][RTW89_ACMA][48] = 127,
[0][0][1][0][RTW89_CN][48] = 127,
@@ -11063,7 +11063,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][50] = 72,
[0][0][1][0][RTW89_ETSI][50] = 127,
[0][0][1][0][RTW89_MKK][50] = 127,
- [0][0][1][0][RTW89_IC][50] = 127,
+ [0][0][1][0][RTW89_IC][50] = 72,
[0][0][1][0][RTW89_KCC][50] = 127,
[0][0][1][0][RTW89_ACMA][50] = 127,
[0][0][1][0][RTW89_CN][50] = 127,
@@ -11071,7 +11071,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][52] = 72,
[0][0][1][0][RTW89_ETSI][52] = 127,
[0][0][1][0][RTW89_MKK][52] = 127,
- [0][0][1][0][RTW89_IC][52] = 127,
+ [0][0][1][0][RTW89_IC][52] = 72,
[0][0][1][0][RTW89_KCC][52] = 127,
[0][0][1][0][RTW89_ACMA][52] = 127,
[0][0][1][0][RTW89_CN][52] = 127,
@@ -11503,7 +11503,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][48] = 74,
[0][0][2][0][RTW89_ETSI][48] = 127,
[0][0][2][0][RTW89_MKK][48] = 127,
- [0][0][2][0][RTW89_IC][48] = 127,
+ [0][0][2][0][RTW89_IC][48] = 74,
[0][0][2][0][RTW89_KCC][48] = 127,
[0][0][2][0][RTW89_ACMA][48] = 127,
[0][0][2][0][RTW89_CN][48] = 127,
@@ -11511,7 +11511,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][50] = 74,
[0][0][2][0][RTW89_ETSI][50] = 127,
[0][0][2][0][RTW89_MKK][50] = 127,
- [0][0][2][0][RTW89_IC][50] = 127,
+ [0][0][2][0][RTW89_IC][50] = 74,
[0][0][2][0][RTW89_KCC][50] = 127,
[0][0][2][0][RTW89_ACMA][50] = 127,
[0][0][2][0][RTW89_CN][50] = 127,
@@ -11519,7 +11519,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][52] = 74,
[0][0][2][0][RTW89_ETSI][52] = 127,
[0][0][2][0][RTW89_MKK][52] = 127,
- [0][0][2][0][RTW89_IC][52] = 127,
+ [0][0][2][0][RTW89_IC][52] = 74,
[0][0][2][0][RTW89_KCC][52] = 127,
[0][0][2][0][RTW89_ACMA][52] = 127,
[0][0][2][0][RTW89_CN][52] = 127,
@@ -12071,7 +12071,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][47] = 80,
[1][0][2][0][RTW89_ETSI][47] = 127,
[1][0][2][0][RTW89_MKK][47] = 127,
- [1][0][2][0][RTW89_IC][47] = 127,
+ [1][0][2][0][RTW89_IC][47] = 80,
[1][0][2][0][RTW89_KCC][47] = 127,
[1][0][2][0][RTW89_ACMA][47] = 127,
[1][0][2][0][RTW89_CN][47] = 127,
@@ -12079,7 +12079,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][51] = 80,
[1][0][2][0][RTW89_ETSI][51] = 127,
[1][0][2][0][RTW89_MKK][51] = 127,
- [1][0][2][0][RTW89_IC][51] = 127,
+ [1][0][2][0][RTW89_IC][51] = 80,
[1][0][2][0][RTW89_KCC][51] = 127,
[1][0][2][0][RTW89_ACMA][51] = 127,
[1][0][2][0][RTW89_CN][51] = 127,
@@ -12359,7 +12359,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_FCC][49] = 72,
[2][0][2][0][RTW89_ETSI][49] = 127,
[2][0][2][0][RTW89_MKK][49] = 127,
- [2][0][2][0][RTW89_IC][49] = 127,
+ [2][0][2][0][RTW89_IC][49] = 72,
[2][0][2][0][RTW89_KCC][49] = 127,
[2][0][2][0][RTW89_ACMA][49] = 127,
[2][0][2][0][RTW89_CN][49] = 127,
@@ -13685,7 +13685,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][48] = 40,
[0][0][RTW89_ETSI][48] = 127,
[0][0][RTW89_MKK][48] = 127,
- [0][0][RTW89_IC][48] = 127,
+ [0][0][RTW89_IC][48] = 40,
[0][0][RTW89_KCC][48] = 127,
[0][0][RTW89_ACMA][48] = 127,
[0][0][RTW89_CN][48] = 127,
@@ -13693,7 +13693,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][50] = 42,
[0][0][RTW89_ETSI][50] = 127,
[0][0][RTW89_MKK][50] = 127,
- [0][0][RTW89_IC][50] = 127,
+ [0][0][RTW89_IC][50] = 42,
[0][0][RTW89_KCC][50] = 127,
[0][0][RTW89_ACMA][50] = 127,
[0][0][RTW89_CN][50] = 127,
@@ -13701,7 +13701,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][52] = 38,
[0][0][RTW89_ETSI][52] = 127,
[0][0][RTW89_MKK][52] = 127,
- [0][0][RTW89_IC][52] = 127,
+ [0][0][RTW89_IC][52] = 38,
[0][0][RTW89_KCC][52] = 127,
[0][0][RTW89_ACMA][52] = 127,
[0][0][RTW89_CN][52] = 127,
@@ -14133,7 +14133,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][48] = 52,
[1][0][RTW89_ETSI][48] = 127,
[1][0][RTW89_MKK][48] = 127,
- [1][0][RTW89_IC][48] = 127,
+ [1][0][RTW89_IC][48] = 52,
[1][0][RTW89_KCC][48] = 127,
[1][0][RTW89_ACMA][48] = 127,
[1][0][RTW89_CN][48] = 127,
@@ -14141,7 +14141,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][50] = 52,
[1][0][RTW89_ETSI][50] = 127,
[1][0][RTW89_MKK][50] = 127,
- [1][0][RTW89_IC][50] = 127,
+ [1][0][RTW89_IC][50] = 52,
[1][0][RTW89_KCC][50] = 127,
[1][0][RTW89_ACMA][50] = 127,
[1][0][RTW89_CN][50] = 127,
@@ -14149,7 +14149,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][52] = 50,
[1][0][RTW89_ETSI][52] = 127,
[1][0][RTW89_MKK][52] = 127,
- [1][0][RTW89_IC][52] = 127,
+ [1][0][RTW89_IC][52] = 50,
[1][0][RTW89_KCC][52] = 127,
[1][0][RTW89_ACMA][52] = 127,
[1][0][RTW89_CN][52] = 127,
@@ -14581,7 +14581,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][48] = 62,
[2][0][RTW89_ETSI][48] = 127,
[2][0][RTW89_MKK][48] = 127,
- [2][0][RTW89_IC][48] = 127,
+ [2][0][RTW89_IC][48] = 62,
[2][0][RTW89_KCC][48] = 127,
[2][0][RTW89_ACMA][48] = 127,
[2][0][RTW89_CN][48] = 127,
@@ -14589,7 +14589,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][50] = 62,
[2][0][RTW89_ETSI][50] = 127,
[2][0][RTW89_MKK][50] = 127,
- [2][0][RTW89_IC][50] = 127,
+ [2][0][RTW89_IC][50] = 62,
[2][0][RTW89_KCC][50] = 127,
[2][0][RTW89_ACMA][50] = 127,
[2][0][RTW89_CN][50] = 127,
@@ -14597,7 +14597,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][52] = 60,
[2][0][RTW89_ETSI][52] = 127,
[2][0][RTW89_MKK][52] = 127,
- [2][0][RTW89_IC][52] = 127,
+ [2][0][RTW89_IC][52] = 60,
[2][0][RTW89_KCC][52] = 127,
[2][0][RTW89_ACMA][52] = 127,
[2][0][RTW89_CN][52] = 127,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851be.c b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
index ade69bd30fc8..ca1374a71727 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
@@ -25,6 +25,8 @@ static const struct rtw89_pci_info rtw8851b_pci_info = {
.autok_en = MAC_AX_PCIE_DISABLE,
.io_rcy_en = MAC_AX_PCIE_DISABLE,
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
+ .rx_ring_eq_is_full = false,
+ .check_rx_tag = false,
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 0c76c52ce22c..2deadec715cf 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -1665,28 +1665,55 @@ static u8 rtw8852a_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_p
static void rtw8852a_btc_set_rfe(struct rtw89_dev *rtwdev)
{
- struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
+ const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
+ union rtw89_btc_module_info *md = &rtwdev->btc.mdinfo;
- module->rfe_type = rtwdev->efuse.rfe_type;
- module->cv = rtwdev->hal.cv;
- module->bt_solo = 0;
- module->switch_type = BTC_SWITCH_INTERNAL;
+ if (ver->fcxinit == 7) {
+ md->md_v7.rfe_type = rtwdev->efuse.rfe_type;
+ md->md_v7.kt_ver = rtwdev->hal.cv;
+ md->md_v7.bt_solo = 0;
+ md->md_v7.switch_type = BTC_SWITCH_INTERNAL;
- if (module->rfe_type > 0)
- module->ant.num = (module->rfe_type % 2 ? 2 : 3);
- else
- module->ant.num = 2;
+ if (md->md_v7.rfe_type > 0)
+ md->md_v7.ant.num = (md->md_v7.rfe_type % 2 ? 2 : 3);
+ else
+ md->md_v7.ant.num = 2;
- module->ant.diversity = 0;
- module->ant.isolation = 10;
+ md->md_v7.ant.diversity = 0;
+ md->md_v7.ant.isolation = 10;
- if (module->ant.num == 3) {
- module->ant.type = BTC_ANT_DEDICATED;
- module->bt_pos = BTC_BT_ALONE;
+ if (md->md_v7.ant.num == 3) {
+ md->md_v7.ant.type = BTC_ANT_DEDICATED;
+ md->md_v7.bt_pos = BTC_BT_ALONE;
+ } else {
+ md->md_v7.ant.type = BTC_ANT_SHARED;
+ md->md_v7.bt_pos = BTC_BT_BTG;
+ }
+ rtwdev->btc.btg_pos = md->md_v7.ant.btg_pos;
+ rtwdev->btc.ant_type = md->md_v7.ant.type;
} else {
- module->ant.type = BTC_ANT_SHARED;
- module->bt_pos = BTC_BT_BTG;
+ md->md.rfe_type = rtwdev->efuse.rfe_type;
+ md->md.cv = rtwdev->hal.cv;
+ md->md.bt_solo = 0;
+ md->md.switch_type = BTC_SWITCH_INTERNAL;
+
+ if (md->md.rfe_type > 0)
+ md->md.ant.num = (md->md.rfe_type % 2 ? 2 : 3);
+ else
+ md->md.ant.num = 2;
+
+ md->md.ant.diversity = 0;
+ md->md.ant.isolation = 10;
+
+ if (md->md.ant.num == 3) {
+ md->md.ant.type = BTC_ANT_DEDICATED;
+ md->md.bt_pos = BTC_BT_ALONE;
+ } else {
+ md->md.ant.type = BTC_ANT_SHARED;
+ md->md.bt_pos = BTC_BT_BTG;
+ }
+ rtwdev->btc.btg_pos = md->md.ant.btg_pos;
+ rtwdev->btc.ant_type = md->md.ant.type;
}
}
@@ -1717,7 +1744,6 @@ static void rtw8852a_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en,
static void rtw8852a_btc_init_cfg(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_mac_ax_coex coex_params = {
.pta_mode = RTW89_MAC_AX_COEX_RTK_MODE,
@@ -1736,7 +1762,7 @@ static void rtw8852a_btc_init_cfg(struct rtw89_dev *rtwdev)
rtw89_write_rf(rtwdev, RF_PATH_B, RR_WLSEL, 0xfffff, 0x0);
/* set WL Tx thru in TRX mask table if GNT_WL = 0 && BT_S1 = ss group */
- if (module->ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
rtw8852a_set_trx_mask(rtwdev,
RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff);
rtw8852a_set_trx_mask(rtwdev,
@@ -2043,6 +2069,7 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.enable_bb_rf = rtw89_mac_enable_bb_rf,
.disable_bb_rf = rtw89_mac_disable_bb_rf,
.bb_preinit = NULL,
+ .bb_postinit = NULL,
.bb_reset = rtw8852a_bb_reset,
.bb_sethw = rtw8852a_bb_sethw,
.read_rf = rtw89_phy_read_rf,
@@ -2053,7 +2080,9 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.read_phycap = rtw8852a_read_phycap,
.fem_setup = rtw8852a_fem_setup,
.rfe_gpio = NULL,
+ .rfk_hw_init = NULL,
.rfk_init = rtw8852a_rfk_init,
+ .rfk_init_late = NULL,
.rfk_channel = rtw8852a_rfk_channel,
.rfk_band_changed = rtw8852a_rfk_band_changed,
.rfk_scan = rtw8852a_rfk_scan,
@@ -2078,6 +2107,12 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.stop_sch_tx = rtw89_mac_stop_sch_tx,
.resume_sch_tx = rtw89_mac_resume_sch_tx,
.h2c_dctl_sec_cam = NULL,
+ .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
+ .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
+ .h2c_ampdu_cmac_tbl = NULL,
+ .h2c_default_dmac_tbl = NULL,
+ .h2c_update_beacon = rtw89_fw_h2c_update_beacon,
+ .h2c_ba_cam = rtw89_fw_h2c_ba_cam,
.btc_set_rfe = rtw8852a_btc_set_rfe,
.btc_init_cfg = rtw8852a_btc_init_cfg,
@@ -2130,7 +2165,9 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.support_chanctx_num = 1,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ),
- .support_bw160 = false,
+ .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = false,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
@@ -2186,6 +2223,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.c2h_regs = rtw8852a_c2h_regs,
.c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
.page_regs = &rtw8852a_page_regs,
+ .wow_reason_reg = R_AX_C2HREG_DATA3 + 3,
.cfo_src_fd = false,
.cfo_hw_comp = false,
.dcfo_comp = &rtw8852a_dcfo_comp,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
index f1e890bde049..7c6ffedb77e2 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
@@ -26,6 +26,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
.io_rcy_en = MAC_AX_PCIE_DISABLE,
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
.rx_ring_eq_is_full = false,
+ .check_rx_tag = false,
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index de887a35f3fb..d025c4135e1c 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -988,7 +988,7 @@ static void rtw8852b_set_gain_error(struct rtw89_dev *rtwdev,
enum rtw89_subband subband,
enum rtw89_rf_path path)
{
- const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 gain_band = rtw89_subband_to_bb_gain_band(subband);
s32 val;
u32 reg;
@@ -1086,7 +1086,7 @@ next:
static
void rtw8852b_set_rxsc_rpl_comp(struct rtw89_dev *rtwdev, enum rtw89_subband subband)
{
- const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 band = rtw89_subband_to_bb_gain_band(subband);
u32 val;
@@ -2125,28 +2125,55 @@ static u8 rtw8852b_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_p
static void rtw8852b_btc_set_rfe(struct rtw89_dev *rtwdev)
{
- struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
+ const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
+ union rtw89_btc_module_info *md = &rtwdev->btc.mdinfo;
- module->rfe_type = rtwdev->efuse.rfe_type;
- module->cv = rtwdev->hal.cv;
- module->bt_solo = 0;
- module->switch_type = BTC_SWITCH_INTERNAL;
+ if (ver->fcxinit == 7) {
+ md->md_v7.rfe_type = rtwdev->efuse.rfe_type;
+ md->md_v7.kt_ver = rtwdev->hal.cv;
+ md->md_v7.bt_solo = 0;
+ md->md_v7.switch_type = BTC_SWITCH_INTERNAL;
- if (module->rfe_type > 0)
- module->ant.num = module->rfe_type % 2 ? 2 : 3;
- else
- module->ant.num = 2;
+ if (md->md_v7.rfe_type > 0)
+ md->md_v7.ant.num = (md->md_v7.rfe_type % 2 ? 2 : 3);
+ else
+ md->md_v7.ant.num = 2;
- module->ant.diversity = 0;
- module->ant.isolation = 10;
+ md->md_v7.ant.diversity = 0;
+ md->md_v7.ant.isolation = 10;
- if (module->ant.num == 3) {
- module->ant.type = BTC_ANT_DEDICATED;
- module->bt_pos = BTC_BT_ALONE;
+ if (md->md_v7.ant.num == 3) {
+ md->md_v7.ant.type = BTC_ANT_DEDICATED;
+ md->md_v7.bt_pos = BTC_BT_ALONE;
+ } else {
+ md->md_v7.ant.type = BTC_ANT_SHARED;
+ md->md_v7.bt_pos = BTC_BT_BTG;
+ }
+ rtwdev->btc.btg_pos = md->md_v7.ant.btg_pos;
+ rtwdev->btc.ant_type = md->md_v7.ant.type;
} else {
- module->ant.type = BTC_ANT_SHARED;
- module->bt_pos = BTC_BT_BTG;
+ md->md.rfe_type = rtwdev->efuse.rfe_type;
+ md->md.cv = rtwdev->hal.cv;
+ md->md.bt_solo = 0;
+ md->md.switch_type = BTC_SWITCH_INTERNAL;
+
+ if (md->md.rfe_type > 0)
+ md->md.ant.num = (md->md.rfe_type % 2 ? 2 : 3);
+ else
+ md->md.ant.num = 2;
+
+ md->md.ant.diversity = 0;
+ md->md.ant.isolation = 10;
+
+ if (md->md.ant.num == 3) {
+ md->md.ant.type = BTC_ANT_DEDICATED;
+ md->md.bt_pos = BTC_BT_ALONE;
+ } else {
+ md->md.ant.type = BTC_ANT_SHARED;
+ md->md.bt_pos = BTC_BT_BTG;
+ }
+ rtwdev->btc.btg_pos = md->md.ant.btg_pos;
+ rtwdev->btc.ant_type = md->md.ant.type;
}
}
@@ -2162,7 +2189,6 @@ void rtw8852b_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val)
static void rtw8852b_btc_init_cfg(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_mac_ax_coex coex_params = {
.pta_mode = RTW89_MAC_AX_COEX_RTK_MODE,
@@ -2181,7 +2207,7 @@ static void rtw8852b_btc_init_cfg(struct rtw89_dev *rtwdev)
rtw89_write_rf(rtwdev, RF_PATH_B, RR_WLSEL, RFREG_MASK, 0x0);
/* set WL Tx thru in TRX mask table if GNT_WL = 0 && BT_S1 = ss group */
- if (module->ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
rtw8852b_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff);
rtw8852b_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_SS_GROUP, 0x5ff);
/* set path-A(S0) Tx/Rx no-mask if GNT_WL=0 && BT_S1=tx group */
@@ -2468,6 +2494,7 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.enable_bb_rf = rtw8852b_mac_enable_bb_rf,
.disable_bb_rf = rtw8852b_mac_disable_bb_rf,
.bb_preinit = NULL,
+ .bb_postinit = NULL,
.bb_reset = rtw8852b_bb_reset,
.bb_sethw = rtw8852b_bb_sethw,
.read_rf = rtw89_phy_read_rf_v1,
@@ -2478,7 +2505,9 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.read_phycap = rtw8852b_read_phycap,
.fem_setup = NULL,
.rfe_gpio = NULL,
+ .rfk_hw_init = NULL,
.rfk_init = rtw8852b_rfk_init,
+ .rfk_init_late = NULL,
.rfk_channel = rtw8852b_rfk_channel,
.rfk_band_changed = rtw8852b_rfk_band_changed,
.rfk_scan = rtw8852b_rfk_scan,
@@ -2503,6 +2532,12 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.stop_sch_tx = rtw89_mac_stop_sch_tx,
.resume_sch_tx = rtw89_mac_resume_sch_tx,
.h2c_dctl_sec_cam = NULL,
+ .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
+ .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
+ .h2c_ampdu_cmac_tbl = NULL,
+ .h2c_default_dmac_tbl = NULL,
+ .h2c_update_beacon = rtw89_fw_h2c_update_beacon,
+ .h2c_ba_cam = rtw89_fw_h2c_ba_cam,
.btc_set_rfe = rtw8852b_btc_set_rfe,
.btc_init_cfg = rtw8852b_btc_init_cfg,
@@ -2564,7 +2599,9 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.support_chanctx_num = 0,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ),
- .support_bw160 = false,
+ .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = true,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
@@ -2620,6 +2657,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
.c2h_regs = rtw8852b_c2h_regs,
.page_regs = &rtw8852b_page_regs,
+ .wow_reason_reg = R_AX_C2HREG_DATA3 + 3,
.cfo_src_fd = true,
.cfo_hw_comp = true,
.dcfo_comp = &rtw8852b_dcfo_comp,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c
index d2ce16e98bac..07945d06dc59 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c
@@ -16936,7 +16936,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_WW][8] = 52,
[0][0][1][0][RTW89_WW][10] = 52,
[0][0][1][0][RTW89_WW][12] = 52,
- [0][0][1][0][RTW89_WW][14] = 1,
+ [0][0][1][0][RTW89_WW][14] = 52,
[0][0][1][0][RTW89_WW][15] = 52,
[0][0][1][0][RTW89_WW][17] = 52,
[0][0][1][0][RTW89_WW][19] = 52,
@@ -16954,10 +16954,10 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_WW][42] = 28,
[0][0][1][0][RTW89_WW][44] = 28,
[0][0][1][0][RTW89_WW][46] = 28,
- [0][0][1][0][RTW89_WW][48] = 78,
- [0][0][1][0][RTW89_WW][50] = 78,
- [0][0][1][0][RTW89_WW][52] = 78,
- [0][1][1][0][RTW89_WW][0] = 1,
+ [0][0][1][0][RTW89_WW][48] = 76,
+ [0][0][1][0][RTW89_WW][50] = 76,
+ [0][0][1][0][RTW89_WW][52] = 76,
+ [0][1][1][0][RTW89_WW][0] = 30,
[0][1][1][0][RTW89_WW][2] = 32,
[0][1][1][0][RTW89_WW][4] = 30,
[0][1][1][0][RTW89_WW][6] = 30,
@@ -16982,9 +16982,9 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_WW][42] = 16,
[0][1][1][0][RTW89_WW][44] = 16,
[0][1][1][0][RTW89_WW][46] = 16,
- [0][1][1][0][RTW89_WW][48] = 56,
- [0][1][1][0][RTW89_WW][50] = 56,
- [0][1][1][0][RTW89_WW][52] = 56,
+ [0][1][1][0][RTW89_WW][48] = 50,
+ [0][1][1][0][RTW89_WW][50] = 50,
+ [0][1][1][0][RTW89_WW][52] = 50,
[0][0][2][0][RTW89_WW][0] = 42,
[0][0][2][0][RTW89_WW][2] = 42,
[0][0][2][0][RTW89_WW][4] = 42,
@@ -17038,9 +17038,9 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_WW][42] = 16,
[0][1][2][0][RTW89_WW][44] = 16,
[0][1][2][0][RTW89_WW][46] = 16,
- [0][1][2][0][RTW89_WW][48] = 58,
- [0][1][2][0][RTW89_WW][50] = 58,
- [0][1][2][0][RTW89_WW][52] = 58,
+ [0][1][2][0][RTW89_WW][48] = 50,
+ [0][1][2][0][RTW89_WW][50] = 52,
+ [0][1][2][0][RTW89_WW][52] = 52,
[0][1][2][1][RTW89_WW][0] = 14,
[0][1][2][1][RTW89_WW][2] = 14,
[0][1][2][1][RTW89_WW][4] = 14,
@@ -17066,9 +17066,9 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_WW][42] = 4,
[0][1][2][1][RTW89_WW][44] = 4,
[0][1][2][1][RTW89_WW][46] = 4,
- [0][1][2][1][RTW89_WW][48] = 58,
- [0][1][2][1][RTW89_WW][50] = 58,
- [0][1][2][1][RTW89_WW][52] = 58,
+ [0][1][2][1][RTW89_WW][48] = 50,
+ [0][1][2][1][RTW89_WW][50] = 52,
+ [0][1][2][1][RTW89_WW][52] = 52,
[1][0][2][0][RTW89_WW][1] = 42,
[1][0][2][0][RTW89_WW][5] = 42,
[1][0][2][0][RTW89_WW][9] = 52,
@@ -17095,8 +17095,8 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_WW][36] = 50,
[1][1][2][0][RTW89_WW][39] = 16,
[1][1][2][0][RTW89_WW][43] = 16,
- [1][1][2][0][RTW89_WW][47] = 68,
- [1][1][2][0][RTW89_WW][51] = 66,
+ [1][1][2][0][RTW89_WW][47] = 62,
+ [1][1][2][0][RTW89_WW][51] = 62,
[1][1][2][1][RTW89_WW][1] = 16,
[1][1][2][1][RTW89_WW][5] = 16,
[1][1][2][1][RTW89_WW][9] = 28,
@@ -17109,8 +17109,8 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_WW][36] = 36,
[1][1][2][1][RTW89_WW][39] = 4,
[1][1][2][1][RTW89_WW][43] = 4,
- [1][1][2][1][RTW89_WW][47] = 68,
- [1][1][2][1][RTW89_WW][51] = 66,
+ [1][1][2][1][RTW89_WW][47] = 62,
+ [1][1][2][1][RTW89_WW][51] = 62,
[2][0][2][0][RTW89_WW][3] = 42,
[2][0][2][0][RTW89_WW][11] = 52,
[2][0][2][0][RTW89_WW][18] = 52,
@@ -17227,7 +17227,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][14] = 78,
[0][0][1][0][RTW89_CN][14] = 58,
[0][0][1][0][RTW89_QATAR][14] = 58,
- [0][0][1][0][RTW89_UK][14] = 1,
+ [0][0][1][0][RTW89_UK][14] = 58,
[0][0][1][0][RTW89_FCC][15] = 76,
[0][0][1][0][RTW89_ETSI][15] = 58,
[0][0][1][0][RTW89_MKK][15] = 76,
@@ -17435,7 +17435,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][48] = 78,
[0][0][1][0][RTW89_ETSI][48] = 127,
[0][0][1][0][RTW89_MKK][48] = 127,
- [0][0][1][0][RTW89_IC][48] = 127,
+ [0][0][1][0][RTW89_IC][48] = 76,
[0][0][1][0][RTW89_KCC][48] = 127,
[0][0][1][0][RTW89_ACMA][48] = 127,
[0][0][1][0][RTW89_CHILE][48] = 127,
@@ -17447,7 +17447,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][50] = 78,
[0][0][1][0][RTW89_ETSI][50] = 127,
[0][0][1][0][RTW89_MKK][50] = 127,
- [0][0][1][0][RTW89_IC][50] = 127,
+ [0][0][1][0][RTW89_IC][50] = 76,
[0][0][1][0][RTW89_KCC][50] = 127,
[0][0][1][0][RTW89_ACMA][50] = 127,
[0][0][1][0][RTW89_CHILE][50] = 127,
@@ -17459,7 +17459,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][52] = 78,
[0][0][1][0][RTW89_ETSI][52] = 127,
[0][0][1][0][RTW89_MKK][52] = 127,
- [0][0][1][0][RTW89_IC][52] = 127,
+ [0][0][1][0][RTW89_IC][52] = 76,
[0][0][1][0][RTW89_KCC][52] = 127,
[0][0][1][0][RTW89_ACMA][52] = 127,
[0][0][1][0][RTW89_CHILE][52] = 127,
@@ -17479,7 +17479,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][0] = 50,
[0][1][1][0][RTW89_CN][0] = 46,
[0][1][1][0][RTW89_QATAR][0] = 46,
- [0][1][1][0][RTW89_UK][0] = 1,
+ [0][1][1][0][RTW89_UK][0] = 46,
[0][1][1][0][RTW89_FCC][2] = 68,
[0][1][1][0][RTW89_ETSI][2] = 46,
[0][1][1][0][RTW89_MKK][2] = 48,
@@ -17771,7 +17771,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_FCC][48] = 56,
[0][1][1][0][RTW89_ETSI][48] = 127,
[0][1][1][0][RTW89_MKK][48] = 127,
- [0][1][1][0][RTW89_IC][48] = 127,
+ [0][1][1][0][RTW89_IC][48] = 50,
[0][1][1][0][RTW89_KCC][48] = 127,
[0][1][1][0][RTW89_ACMA][48] = 127,
[0][1][1][0][RTW89_CHILE][48] = 127,
@@ -17783,7 +17783,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_FCC][50] = 56,
[0][1][1][0][RTW89_ETSI][50] = 127,
[0][1][1][0][RTW89_MKK][50] = 127,
- [0][1][1][0][RTW89_IC][50] = 127,
+ [0][1][1][0][RTW89_IC][50] = 50,
[0][1][1][0][RTW89_KCC][50] = 127,
[0][1][1][0][RTW89_ACMA][50] = 127,
[0][1][1][0][RTW89_CHILE][50] = 127,
@@ -17795,7 +17795,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_FCC][52] = 56,
[0][1][1][0][RTW89_ETSI][52] = 127,
[0][1][1][0][RTW89_MKK][52] = 127,
- [0][1][1][0][RTW89_IC][52] = 127,
+ [0][1][1][0][RTW89_IC][52] = 50,
[0][1][1][0][RTW89_KCC][52] = 127,
[0][1][1][0][RTW89_ACMA][52] = 127,
[0][1][1][0][RTW89_CHILE][52] = 127,
@@ -18107,7 +18107,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][48] = 78,
[0][0][2][0][RTW89_ETSI][48] = 127,
[0][0][2][0][RTW89_MKK][48] = 127,
- [0][0][2][0][RTW89_IC][48] = 127,
+ [0][0][2][0][RTW89_IC][48] = 78,
[0][0][2][0][RTW89_KCC][48] = 127,
[0][0][2][0][RTW89_ACMA][48] = 127,
[0][0][2][0][RTW89_CHILE][48] = 127,
@@ -18119,7 +18119,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][50] = 78,
[0][0][2][0][RTW89_ETSI][50] = 127,
[0][0][2][0][RTW89_MKK][50] = 127,
- [0][0][2][0][RTW89_IC][50] = 127,
+ [0][0][2][0][RTW89_IC][50] = 78,
[0][0][2][0][RTW89_KCC][50] = 127,
[0][0][2][0][RTW89_ACMA][50] = 127,
[0][0][2][0][RTW89_CHILE][50] = 127,
@@ -18131,7 +18131,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][52] = 78,
[0][0][2][0][RTW89_ETSI][52] = 127,
[0][0][2][0][RTW89_MKK][52] = 127,
- [0][0][2][0][RTW89_IC][52] = 127,
+ [0][0][2][0][RTW89_IC][52] = 78,
[0][0][2][0][RTW89_KCC][52] = 127,
[0][0][2][0][RTW89_ACMA][52] = 127,
[0][0][2][0][RTW89_CHILE][52] = 127,
@@ -18443,7 +18443,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_FCC][48] = 58,
[0][1][2][0][RTW89_ETSI][48] = 127,
[0][1][2][0][RTW89_MKK][48] = 127,
- [0][1][2][0][RTW89_IC][48] = 127,
+ [0][1][2][0][RTW89_IC][48] = 50,
[0][1][2][0][RTW89_KCC][48] = 127,
[0][1][2][0][RTW89_ACMA][48] = 127,
[0][1][2][0][RTW89_CHILE][48] = 127,
@@ -18455,7 +18455,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_FCC][50] = 58,
[0][1][2][0][RTW89_ETSI][50] = 127,
[0][1][2][0][RTW89_MKK][50] = 127,
- [0][1][2][0][RTW89_IC][50] = 127,
+ [0][1][2][0][RTW89_IC][50] = 52,
[0][1][2][0][RTW89_KCC][50] = 127,
[0][1][2][0][RTW89_ACMA][50] = 127,
[0][1][2][0][RTW89_CHILE][50] = 127,
@@ -18467,7 +18467,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_FCC][52] = 58,
[0][1][2][0][RTW89_ETSI][52] = 127,
[0][1][2][0][RTW89_MKK][52] = 127,
- [0][1][2][0][RTW89_IC][52] = 127,
+ [0][1][2][0][RTW89_IC][52] = 52,
[0][1][2][0][RTW89_KCC][52] = 127,
[0][1][2][0][RTW89_ACMA][52] = 127,
[0][1][2][0][RTW89_CHILE][52] = 127,
@@ -18779,7 +18779,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_FCC][48] = 58,
[0][1][2][1][RTW89_ETSI][48] = 127,
[0][1][2][1][RTW89_MKK][48] = 127,
- [0][1][2][1][RTW89_IC][48] = 127,
+ [0][1][2][1][RTW89_IC][48] = 50,
[0][1][2][1][RTW89_KCC][48] = 127,
[0][1][2][1][RTW89_ACMA][48] = 127,
[0][1][2][1][RTW89_CHILE][48] = 127,
@@ -18791,7 +18791,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_FCC][50] = 58,
[0][1][2][1][RTW89_ETSI][50] = 127,
[0][1][2][1][RTW89_MKK][50] = 127,
- [0][1][2][1][RTW89_IC][50] = 127,
+ [0][1][2][1][RTW89_IC][50] = 52,
[0][1][2][1][RTW89_KCC][50] = 127,
[0][1][2][1][RTW89_ACMA][50] = 127,
[0][1][2][1][RTW89_CHILE][50] = 127,
@@ -18803,7 +18803,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_FCC][52] = 58,
[0][1][2][1][RTW89_ETSI][52] = 127,
[0][1][2][1][RTW89_MKK][52] = 127,
- [0][1][2][1][RTW89_IC][52] = 127,
+ [0][1][2][1][RTW89_IC][52] = 52,
[0][1][2][1][RTW89_KCC][52] = 127,
[0][1][2][1][RTW89_ACMA][52] = 127,
[0][1][2][1][RTW89_CHILE][52] = 127,
@@ -18959,7 +18959,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][47] = 78,
[1][0][2][0][RTW89_ETSI][47] = 127,
[1][0][2][0][RTW89_MKK][47] = 127,
- [1][0][2][0][RTW89_IC][47] = 127,
+ [1][0][2][0][RTW89_IC][47] = 78,
[1][0][2][0][RTW89_KCC][47] = 127,
[1][0][2][0][RTW89_ACMA][47] = 127,
[1][0][2][0][RTW89_CHILE][47] = 127,
@@ -18971,7 +18971,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][51] = 70,
[1][0][2][0][RTW89_ETSI][51] = 127,
[1][0][2][0][RTW89_MKK][51] = 127,
- [1][0][2][0][RTW89_IC][51] = 127,
+ [1][0][2][0][RTW89_IC][51] = 78,
[1][0][2][0][RTW89_KCC][51] = 127,
[1][0][2][0][RTW89_ACMA][51] = 127,
[1][0][2][0][RTW89_CHILE][51] = 127,
@@ -19127,7 +19127,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_FCC][47] = 68,
[1][1][2][0][RTW89_ETSI][47] = 127,
[1][1][2][0][RTW89_MKK][47] = 127,
- [1][1][2][0][RTW89_IC][47] = 127,
+ [1][1][2][0][RTW89_IC][47] = 62,
[1][1][2][0][RTW89_KCC][47] = 127,
[1][1][2][0][RTW89_ACMA][47] = 127,
[1][1][2][0][RTW89_CHILE][47] = 127,
@@ -19139,7 +19139,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_FCC][51] = 66,
[1][1][2][0][RTW89_ETSI][51] = 127,
[1][1][2][0][RTW89_MKK][51] = 127,
- [1][1][2][0][RTW89_IC][51] = 127,
+ [1][1][2][0][RTW89_IC][51] = 62,
[1][1][2][0][RTW89_KCC][51] = 127,
[1][1][2][0][RTW89_ACMA][51] = 127,
[1][1][2][0][RTW89_CHILE][51] = 127,
@@ -19295,7 +19295,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_FCC][47] = 68,
[1][1][2][1][RTW89_ETSI][47] = 127,
[1][1][2][1][RTW89_MKK][47] = 127,
- [1][1][2][1][RTW89_IC][47] = 127,
+ [1][1][2][1][RTW89_IC][47] = 62,
[1][1][2][1][RTW89_KCC][47] = 127,
[1][1][2][1][RTW89_ACMA][47] = 127,
[1][1][2][1][RTW89_CHILE][47] = 127,
@@ -19307,7 +19307,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_FCC][51] = 66,
[1][1][2][1][RTW89_ETSI][51] = 127,
[1][1][2][1][RTW89_MKK][51] = 127,
- [1][1][2][1][RTW89_IC][51] = 127,
+ [1][1][2][1][RTW89_IC][51] = 62,
[1][1][2][1][RTW89_KCC][51] = 127,
[1][1][2][1][RTW89_ACMA][51] = 127,
[1][1][2][1][RTW89_CHILE][51] = 127,
@@ -19391,7 +19391,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_FCC][49] = 64,
[2][0][2][0][RTW89_ETSI][49] = 127,
[2][0][2][0][RTW89_MKK][49] = 127,
- [2][0][2][0][RTW89_IC][49] = 127,
+ [2][0][2][0][RTW89_IC][49] = 74,
[2][0][2][0][RTW89_KCC][49] = 127,
[2][0][2][0][RTW89_ACMA][49] = 127,
[2][0][2][0][RTW89_CHILE][49] = 127,
@@ -19475,7 +19475,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_FCC][49] = 58,
[2][1][2][0][RTW89_ETSI][49] = 127,
[2][1][2][0][RTW89_MKK][49] = 127,
- [2][1][2][0][RTW89_IC][49] = 127,
+ [2][1][2][0][RTW89_IC][49] = 66,
[2][1][2][0][RTW89_KCC][49] = 127,
[2][1][2][0][RTW89_ACMA][49] = 127,
[2][1][2][0][RTW89_CHILE][49] = 127,
@@ -19559,7 +19559,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_FCC][49] = 58,
[2][1][2][1][RTW89_ETSI][49] = 127,
[2][1][2][1][RTW89_MKK][49] = 127,
- [2][1][2][1][RTW89_IC][49] = 127,
+ [2][1][2][1][RTW89_IC][49] = 66,
[2][1][2][1][RTW89_KCC][49] = 127,
[2][1][2][1][RTW89_ACMA][49] = 127,
[2][1][2][1][RTW89_CHILE][49] = 127,
@@ -20723,9 +20723,9 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_WW][42] = 14,
[0][1][RTW89_WW][44] = 14,
[0][1][RTW89_WW][46] = 14,
- [0][1][RTW89_WW][48] = 20,
- [0][1][RTW89_WW][50] = 20,
- [0][1][RTW89_WW][52] = 20,
+ [0][1][RTW89_WW][48] = 16,
+ [0][1][RTW89_WW][50] = 16,
+ [0][1][RTW89_WW][52] = 16,
[1][0][RTW89_WW][0] = 34,
[1][0][RTW89_WW][2] = 34,
[1][0][RTW89_WW][4] = 34,
@@ -20779,9 +20779,9 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_WW][42] = 16,
[1][1][RTW89_WW][44] = 16,
[1][1][RTW89_WW][46] = 16,
- [1][1][RTW89_WW][48] = 32,
- [1][1][RTW89_WW][50] = 32,
- [1][1][RTW89_WW][52] = 32,
+ [1][1][RTW89_WW][48] = 28,
+ [1][1][RTW89_WW][50] = 30,
+ [1][1][RTW89_WW][52] = 30,
[2][0][RTW89_WW][0] = 44,
[2][0][RTW89_WW][2] = 44,
[2][0][RTW89_WW][4] = 44,
@@ -20835,9 +20835,9 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_WW][42] = 16,
[2][1][RTW89_WW][44] = 16,
[2][1][RTW89_WW][46] = 16,
- [2][1][RTW89_WW][48] = 44,
- [2][1][RTW89_WW][50] = 44,
- [2][1][RTW89_WW][52] = 44,
+ [2][1][RTW89_WW][48] = 40,
+ [2][1][RTW89_WW][50] = 40,
+ [2][1][RTW89_WW][52] = 40,
[0][0][RTW89_FCC][0] = 52,
[0][0][RTW89_ETSI][0] = 24,
[0][0][RTW89_MKK][0] = 26,
@@ -21141,7 +21141,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][48] = 32,
[0][0][RTW89_ETSI][48] = 127,
[0][0][RTW89_MKK][48] = 127,
- [0][0][RTW89_IC][48] = 127,
+ [0][0][RTW89_IC][48] = 42,
[0][0][RTW89_KCC][48] = 127,
[0][0][RTW89_ACMA][48] = 127,
[0][0][RTW89_CHILE][48] = 127,
@@ -21153,7 +21153,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][50] = 32,
[0][0][RTW89_ETSI][50] = 127,
[0][0][RTW89_MKK][50] = 127,
- [0][0][RTW89_IC][50] = 127,
+ [0][0][RTW89_IC][50] = 42,
[0][0][RTW89_KCC][50] = 127,
[0][0][RTW89_ACMA][50] = 127,
[0][0][RTW89_CHILE][50] = 127,
@@ -21165,7 +21165,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][52] = 32,
[0][0][RTW89_ETSI][52] = 127,
[0][0][RTW89_MKK][52] = 127,
- [0][0][RTW89_IC][52] = 127,
+ [0][0][RTW89_IC][52] = 40,
[0][0][RTW89_KCC][52] = 127,
[0][0][RTW89_ACMA][52] = 127,
[0][0][RTW89_CHILE][52] = 127,
@@ -21477,7 +21477,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_FCC][48] = 20,
[0][1][RTW89_ETSI][48] = 127,
[0][1][RTW89_MKK][48] = 127,
- [0][1][RTW89_IC][48] = 127,
+ [0][1][RTW89_IC][48] = 16,
[0][1][RTW89_KCC][48] = 127,
[0][1][RTW89_ACMA][48] = 127,
[0][1][RTW89_CHILE][48] = 127,
@@ -21489,7 +21489,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_FCC][50] = 20,
[0][1][RTW89_ETSI][50] = 127,
[0][1][RTW89_MKK][50] = 127,
- [0][1][RTW89_IC][50] = 127,
+ [0][1][RTW89_IC][50] = 16,
[0][1][RTW89_KCC][50] = 127,
[0][1][RTW89_ACMA][50] = 127,
[0][1][RTW89_CHILE][50] = 127,
@@ -21501,7 +21501,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_FCC][52] = 20,
[0][1][RTW89_ETSI][52] = 127,
[0][1][RTW89_MKK][52] = 127,
- [0][1][RTW89_IC][52] = 127,
+ [0][1][RTW89_IC][52] = 16,
[0][1][RTW89_KCC][52] = 127,
[0][1][RTW89_ACMA][52] = 127,
[0][1][RTW89_CHILE][52] = 127,
@@ -21813,7 +21813,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][48] = 44,
[1][0][RTW89_ETSI][48] = 127,
[1][0][RTW89_MKK][48] = 127,
- [1][0][RTW89_IC][48] = 127,
+ [1][0][RTW89_IC][48] = 54,
[1][0][RTW89_KCC][48] = 127,
[1][0][RTW89_ACMA][48] = 127,
[1][0][RTW89_CHILE][48] = 127,
@@ -21825,7 +21825,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][50] = 44,
[1][0][RTW89_ETSI][50] = 127,
[1][0][RTW89_MKK][50] = 127,
- [1][0][RTW89_IC][50] = 127,
+ [1][0][RTW89_IC][50] = 54,
[1][0][RTW89_KCC][50] = 127,
[1][0][RTW89_ACMA][50] = 127,
[1][0][RTW89_CHILE][50] = 127,
@@ -21837,7 +21837,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][52] = 44,
[1][0][RTW89_ETSI][52] = 127,
[1][0][RTW89_MKK][52] = 127,
- [1][0][RTW89_IC][52] = 127,
+ [1][0][RTW89_IC][52] = 52,
[1][0][RTW89_KCC][52] = 127,
[1][0][RTW89_ACMA][52] = 127,
[1][0][RTW89_CHILE][52] = 127,
@@ -22149,7 +22149,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_FCC][48] = 32,
[1][1][RTW89_ETSI][48] = 127,
[1][1][RTW89_MKK][48] = 127,
- [1][1][RTW89_IC][48] = 127,
+ [1][1][RTW89_IC][48] = 28,
[1][1][RTW89_KCC][48] = 127,
[1][1][RTW89_ACMA][48] = 127,
[1][1][RTW89_CHILE][48] = 127,
@@ -22161,7 +22161,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_FCC][50] = 32,
[1][1][RTW89_ETSI][50] = 127,
[1][1][RTW89_MKK][50] = 127,
- [1][1][RTW89_IC][50] = 127,
+ [1][1][RTW89_IC][50] = 30,
[1][1][RTW89_KCC][50] = 127,
[1][1][RTW89_ACMA][50] = 127,
[1][1][RTW89_CHILE][50] = 127,
@@ -22173,7 +22173,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_FCC][52] = 32,
[1][1][RTW89_ETSI][52] = 127,
[1][1][RTW89_MKK][52] = 127,
- [1][1][RTW89_IC][52] = 127,
+ [1][1][RTW89_IC][52] = 30,
[1][1][RTW89_KCC][52] = 127,
[1][1][RTW89_ACMA][52] = 127,
[1][1][RTW89_CHILE][52] = 127,
@@ -22486,7 +22486,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_ETSI][48] = 127,
[2][0][RTW89_MKK][48] = 127,
[2][0][RTW89_IC][48] = 127,
- [2][0][RTW89_KCC][48] = 127,
+ [2][0][RTW89_KCC][48] = 66,
[2][0][RTW89_ACMA][48] = 127,
[2][0][RTW89_CHILE][48] = 127,
[2][0][RTW89_UKRAINE][48] = 127,
@@ -22498,7 +22498,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_ETSI][50] = 127,
[2][0][RTW89_MKK][50] = 127,
[2][0][RTW89_IC][50] = 127,
- [2][0][RTW89_KCC][50] = 127,
+ [2][0][RTW89_KCC][50] = 66,
[2][0][RTW89_ACMA][50] = 127,
[2][0][RTW89_CHILE][50] = 127,
[2][0][RTW89_UKRAINE][50] = 127,
@@ -22510,7 +22510,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_ETSI][52] = 127,
[2][0][RTW89_MKK][52] = 127,
[2][0][RTW89_IC][52] = 127,
- [2][0][RTW89_KCC][52] = 127,
+ [2][0][RTW89_KCC][52] = 66,
[2][0][RTW89_ACMA][52] = 127,
[2][0][RTW89_CHILE][52] = 127,
[2][0][RTW89_UKRAINE][52] = 127,
@@ -22821,7 +22821,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_FCC][48] = 44,
[2][1][RTW89_ETSI][48] = 127,
[2][1][RTW89_MKK][48] = 127,
- [2][1][RTW89_IC][48] = 127,
+ [2][1][RTW89_IC][48] = 40,
[2][1][RTW89_KCC][48] = 127,
[2][1][RTW89_ACMA][48] = 127,
[2][1][RTW89_CHILE][48] = 127,
@@ -22833,7 +22833,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_FCC][50] = 44,
[2][1][RTW89_ETSI][50] = 127,
[2][1][RTW89_MKK][50] = 127,
- [2][1][RTW89_IC][50] = 127,
+ [2][1][RTW89_IC][50] = 40,
[2][1][RTW89_KCC][50] = 127,
[2][1][RTW89_ACMA][50] = 127,
[2][1][RTW89_CHILE][50] = 127,
@@ -22845,7 +22845,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_FCC][52] = 44,
[2][1][RTW89_ETSI][52] = 127,
[2][1][RTW89_MKK][52] = 127,
- [2][1][RTW89_IC][52] = 127,
+ [2][1][RTW89_IC][52] = 40,
[2][1][RTW89_KCC][52] = 127,
[2][1][RTW89_ACMA][52] = 127,
[2][1][RTW89_CHILE][52] = 127,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
index 920b20bbcfb7..ed71364e6437 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
@@ -26,6 +26,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
.io_rcy_en = MAC_AX_PCIE_DISABLE,
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
.rx_ring_eq_is_full = false,
+ .check_rx_tag = false,
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index 8618d0204f66..17e6164855fa 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -842,7 +842,7 @@ static void rtw8852c_set_gain_error(struct rtw89_dev *rtwdev,
enum rtw89_subband subband,
enum rtw89_rf_path path)
{
- const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 gain_band = rtw89_subband_to_bb_gain_band(subband);
s32 val;
u32 reg;
@@ -2365,28 +2365,55 @@ static u8 rtw8852c_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_p
static void rtw8852c_btc_set_rfe(struct rtw89_dev *rtwdev)
{
- struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
+ const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
+ union rtw89_btc_module_info *md = &rtwdev->btc.mdinfo;
- module->rfe_type = rtwdev->efuse.rfe_type;
- module->cv = rtwdev->hal.cv;
- module->bt_solo = 0;
- module->switch_type = BTC_SWITCH_INTERNAL;
+ if (ver->fcxinit == 7) {
+ md->md_v7.rfe_type = rtwdev->efuse.rfe_type;
+ md->md_v7.kt_ver = rtwdev->hal.cv;
+ md->md_v7.bt_solo = 0;
+ md->md_v7.switch_type = BTC_SWITCH_INTERNAL;
- if (module->rfe_type > 0)
- module->ant.num = (module->rfe_type % 2 ? 2 : 3);
- else
- module->ant.num = 2;
+ if (md->md_v7.rfe_type > 0)
+ md->md_v7.ant.num = (md->md_v7.rfe_type % 2 ? 2 : 3);
+ else
+ md->md_v7.ant.num = 2;
- module->ant.diversity = 0;
- module->ant.isolation = 10;
+ md->md_v7.ant.diversity = 0;
+ md->md_v7.ant.isolation = 10;
- if (module->ant.num == 3) {
- module->ant.type = BTC_ANT_DEDICATED;
- module->bt_pos = BTC_BT_ALONE;
+ if (md->md_v7.ant.num == 3) {
+ md->md_v7.ant.type = BTC_ANT_DEDICATED;
+ md->md_v7.bt_pos = BTC_BT_ALONE;
+ } else {
+ md->md_v7.ant.type = BTC_ANT_SHARED;
+ md->md_v7.bt_pos = BTC_BT_BTG;
+ }
+ rtwdev->btc.btg_pos = md->md_v7.ant.btg_pos;
+ rtwdev->btc.ant_type = md->md_v7.ant.type;
} else {
- module->ant.type = BTC_ANT_SHARED;
- module->bt_pos = BTC_BT_BTG;
+ md->md.rfe_type = rtwdev->efuse.rfe_type;
+ md->md.cv = rtwdev->hal.cv;
+ md->md.bt_solo = 0;
+ md->md.switch_type = BTC_SWITCH_INTERNAL;
+
+ if (md->md.rfe_type > 0)
+ md->md.ant.num = (md->md.rfe_type % 2 ? 2 : 3);
+ else
+ md->md.ant.num = 2;
+
+ md->md.ant.diversity = 0;
+ md->md.ant.isolation = 10;
+
+ if (md->md.ant.num == 3) {
+ md->md.ant.type = BTC_ANT_DEDICATED;
+ md->md.bt_pos = BTC_BT_ALONE;
+ } else {
+ md->md.ant.type = BTC_ANT_SHARED;
+ md->md.bt_pos = BTC_BT_BTG;
+ }
+ rtwdev->btc.btg_pos = md->md.ant.btg_pos;
+ rtwdev->btc.ant_type = md->md.ant.type;
}
}
@@ -2449,7 +2476,6 @@ void rtw8852c_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val)
static void rtw8852c_btc_init_cfg(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_mac_ax_coex coex_params = {
.pta_mode = RTW89_MAC_AX_COEX_RTK_MODE,
@@ -2468,7 +2494,7 @@ static void rtw8852c_btc_init_cfg(struct rtw89_dev *rtwdev)
rtw89_write_rf(rtwdev, RF_PATH_B, RR_WLSEL, RFREG_MASK, 0x0);
/* set WL Tx thru in TRX mask table if GNT_WL = 0 && BT_S1 = ss group */
- if (module->ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
rtw8852c_set_trx_mask(rtwdev,
RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff);
rtw8852c_set_trx_mask(rtwdev,
@@ -2813,6 +2839,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.enable_bb_rf = rtw8852c_mac_enable_bb_rf,
.disable_bb_rf = rtw8852c_mac_disable_bb_rf,
.bb_preinit = NULL,
+ .bb_postinit = NULL,
.bb_reset = rtw8852c_bb_reset,
.bb_sethw = rtw8852c_bb_sethw,
.read_rf = rtw89_phy_read_rf_v1,
@@ -2823,7 +2850,9 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.read_phycap = rtw8852c_read_phycap,
.fem_setup = NULL,
.rfe_gpio = NULL,
+ .rfk_hw_init = NULL,
.rfk_init = rtw8852c_rfk_init,
+ .rfk_init_late = NULL,
.rfk_channel = rtw8852c_rfk_channel,
.rfk_band_changed = rtw8852c_rfk_band_changed,
.rfk_scan = rtw8852c_rfk_scan,
@@ -2848,6 +2877,12 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.stop_sch_tx = rtw89_mac_stop_sch_tx_v1,
.resume_sch_tx = rtw89_mac_resume_sch_tx_v1,
.h2c_dctl_sec_cam = rtw89_fw_h2c_dctl_sec_cam_v1,
+ .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
+ .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
+ .h2c_ampdu_cmac_tbl = NULL,
+ .h2c_default_dmac_tbl = NULL,
+ .h2c_update_beacon = rtw89_fw_h2c_update_beacon,
+ .h2c_ba_cam = rtw89_fw_h2c_ba_cam,
.btc_set_rfe = rtw8852c_btc_set_rfe,
.btc_init_cfg = rtw8852c_btc_init_cfg,
@@ -2902,7 +2937,10 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ) |
BIT(NL80211_BAND_6GHZ),
- .support_bw160 = true,
+ .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160),
.support_unii4 = true,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = true,
@@ -2959,6 +2997,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
.c2h_regs = rtw8852c_c2h_regs,
.page_regs = &rtw8852c_page_regs,
+ .wow_reason_reg = R_AX_C2HREG_DATA3_V1 + 3,
.cfo_src_fd = false,
.cfo_hw_comp = false,
.dcfo_comp = &rtw8852c_dcfo_comp,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
index 4592de3dbd94..583ea673a4f5 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
@@ -35,6 +35,7 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
.io_rcy_en = MAC_AX_PCIE_ENABLE,
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
.rx_ring_eq_is_full = false,
+ .check_rx_tag = false,
.init_cfg_reg = R_AX_HAXI_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN_V1,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 0e7300cc6d9e..367459bd1345 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2023 Realtek Corporation
*/
+#include "coex.h"
#include "debug.h"
#include "efuse.h"
#include "fw.h"
@@ -9,12 +10,16 @@
#include "phy.h"
#include "reg.h"
#include "rtw8922a.h"
+#include "rtw8922a_rfk.h"
+#include "util.h"
#define RTW8922A_FW_FORMAT_MAX 0
#define RTW8922A_FW_BASENAME "rtw89/rtw8922a_fw"
#define RTW8922A_MODULE_FIRMWARE \
RTW8922A_FW_BASENAME ".bin"
+#define HE_N_USER_MAX_8922A 4
+
static const struct rtw89_hfc_ch_cfg rtw8922a_hfc_chcfg_pcie[] = {
{2, 1641, grp_0}, /* ACH 0 */
{2, 1641, grp_0}, /* ACH 1 */
@@ -43,6 +48,8 @@ static const struct rtw89_hfc_pub_cfg rtw8922a_hfc_pubcfg_pcie = {
static const struct rtw89_hfc_param_ini rtw8922a_hfc_param_ini_pcie[] = {
[RTW89_QTA_SCC] = {rtw8922a_hfc_chcfg_pcie, &rtw8922a_hfc_pubcfg_pcie,
&rtw89_mac_size.hfc_prec_cfg_c0, RTW89_HCIFC_POH},
+ [RTW89_QTA_DBCC] = {rtw8922a_hfc_chcfg_pcie, &rtw8922a_hfc_pubcfg_pcie,
+ &rtw89_mac_size.hfc_prec_cfg_c0, RTW89_HCIFC_POH},
[RTW89_QTA_DLFW] = {NULL, NULL, &rtw89_mac_size.hfc_prec_cfg_c2,
RTW89_HCIFC_POH},
[RTW89_QTA_INVALID] = {NULL},
@@ -54,6 +61,11 @@ static const struct rtw89_dle_mem rtw8922a_dle_mem_pcie[] = {
&rtw89_mac_size.wde_qt0_v1, &rtw89_mac_size.ple_qt0,
&rtw89_mac_size.ple_qt1, &rtw89_mac_size.ple_rsvd_qt0,
&rtw89_mac_size.rsvd0_size0, &rtw89_mac_size.rsvd1_size0},
+ [RTW89_QTA_DBCC] = {RTW89_QTA_DBCC, &rtw89_mac_size.wde_size0_v1,
+ &rtw89_mac_size.ple_size0_v1, &rtw89_mac_size.wde_qt0_v1,
+ &rtw89_mac_size.wde_qt0_v1, &rtw89_mac_size.ple_qt0,
+ &rtw89_mac_size.ple_qt1, &rtw89_mac_size.ple_rsvd_qt0,
+ &rtw89_mac_size.rsvd0_size0, &rtw89_mac_size.rsvd1_size0},
[RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size4_v1,
&rtw89_mac_size.ple_size3_v1, &rtw89_mac_size.wde_qt4,
&rtw89_mac_size.wde_qt4, &rtw89_mac_size.ple_qt9,
@@ -63,6 +75,31 @@ static const struct rtw89_dle_mem rtw8922a_dle_mem_pcie[] = {
NULL},
};
+static const u32 rtw8922a_h2c_regs[RTW89_H2CREG_MAX] = {
+ R_BE_H2CREG_DATA0, R_BE_H2CREG_DATA1, R_BE_H2CREG_DATA2,
+ R_BE_H2CREG_DATA3
+};
+
+static const u32 rtw8922a_c2h_regs[RTW89_H2CREG_MAX] = {
+ R_BE_C2HREG_DATA0, R_BE_C2HREG_DATA1, R_BE_C2HREG_DATA2,
+ R_BE_C2HREG_DATA3
+};
+
+static const struct rtw89_page_regs rtw8922a_page_regs = {
+ .hci_fc_ctrl = R_BE_HCI_FC_CTRL,
+ .ch_page_ctrl = R_BE_CH_PAGE_CTRL,
+ .ach_page_ctrl = R_BE_CH0_PAGE_CTRL,
+ .ach_page_info = R_BE_CH0_PAGE_INFO,
+ .pub_page_info3 = R_BE_PUB_PAGE_INFO3,
+ .pub_page_ctrl1 = R_BE_PUB_PAGE_CTRL1,
+ .pub_page_ctrl2 = R_BE_PUB_PAGE_CTRL2,
+ .pub_page_info1 = R_BE_PUB_PAGE_INFO1,
+ .pub_page_info2 = R_BE_PUB_PAGE_INFO2,
+ .wp_page_ctrl1 = R_BE_WP_PAGE_CTRL1,
+ .wp_page_ctrl2 = R_BE_WP_PAGE_CTRL2,
+ .wp_page_info1 = R_BE_WP_PAGE_INFO1,
+};
+
static const struct rtw89_reg_imr rtw8922a_imr_dmac_regs[] = {
{R_BE_DISP_HOST_IMR, B_BE_DISP_HOST_IMR_CLR, B_BE_DISP_HOST_IMR_SET},
{R_BE_DISP_CPU_IMR, B_BE_DISP_CPU_IMR_CLR, B_BE_DISP_CPU_IMR_SET},
@@ -119,6 +156,51 @@ static const struct rtw89_imr_table rtw8922a_imr_cmac_table = {
.n_regs = ARRAY_SIZE(rtw8922a_imr_cmac_regs),
};
+static const struct rtw89_rrsr_cfgs rtw8922a_rrsr_cfgs = {
+ .ref_rate = {R_BE_TRXPTCL_RESP_1, B_BE_WMAC_RESP_REF_RATE_SEL, 0},
+ .rsc = {R_BE_PTCL_RRSR1, B_BE_RSC_MASK, 2},
+};
+
+static const struct rtw89_dig_regs rtw8922a_dig_regs = {
+ .seg0_pd_reg = R_SEG0R_PD_V2,
+ .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK,
+ .pd_spatial_reuse_en = B_SEG0R_PD_SPATIAL_REUSE_EN_MSK_V1,
+ .bmode_pd_reg = R_BMODE_PDTH_EN_V2,
+ .bmode_cca_rssi_limit_en = B_BMODE_PDTH_LIMIT_EN_MSK_V1,
+ .bmode_pd_lower_bound_reg = R_BMODE_PDTH_V2,
+ .bmode_rssi_nocca_low_th_mask = B_BMODE_PDTH_LOWER_BOUND_MSK_V1,
+ .p0_lna_init = {R_PATH0_LNA_INIT_V1, B_PATH0_LNA_INIT_IDX_MSK},
+ .p1_lna_init = {R_PATH1_LNA_INIT_V1, B_PATH1_LNA_INIT_IDX_MSK},
+ .p0_tia_init = {R_PATH0_TIA_INIT_V1, B_PATH0_TIA_INIT_IDX_MSK_V1},
+ .p1_tia_init = {R_PATH1_TIA_INIT_V1, B_PATH1_TIA_INIT_IDX_MSK_V1},
+ .p0_rxb_init = {R_PATH0_RXB_INIT_V1, B_PATH0_RXB_INIT_IDX_MSK_V1},
+ .p1_rxb_init = {R_PATH1_RXB_INIT_V1, B_PATH1_RXB_INIT_IDX_MSK_V1},
+ .p0_p20_pagcugc_en = {R_PATH0_P20_FOLLOW_BY_PAGCUGC_V3,
+ B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p0_s20_pagcugc_en = {R_PATH0_S20_FOLLOW_BY_PAGCUGC_V3,
+ B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_p20_pagcugc_en = {R_PATH1_P20_FOLLOW_BY_PAGCUGC_V3,
+ B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_s20_pagcugc_en = {R_PATH1_S20_FOLLOW_BY_PAGCUGC_V3,
+ B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+};
+
+static const struct rtw89_edcca_regs rtw8922a_edcca_regs = {
+ .edcca_level = R_SEG0R_EDCCA_LVL_BE,
+ .edcca_mask = B_EDCCA_LVL_MSK0,
+ .edcca_p_mask = B_EDCCA_LVL_MSK1,
+ .ppdu_level = R_SEG0R_PPDU_LVL_BE,
+ .ppdu_mask = B_EDCCA_LVL_MSK1,
+ .rpt_a = R_EDCCA_RPT_A_BE,
+ .rpt_b = R_EDCCA_RPT_B_BE,
+ .rpt_sel = R_EDCCA_RPT_SEL_BE,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ .rpt_sel_be = R_EDCCA_RPTREG_SEL_BE,
+ .rpt_sel_be_mask = B_EDCCA_RPTREG_SEL_BE_MSK,
+ .tx_collision_t2r_st = R_TX_COLLISION_T2R_ST_BE,
+ .tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_BE_M,
+};
+
static const struct rtw89_efuse_block_cfg rtw8922a_efuse_blocks[] = {
[RTW89_EFUSE_BLOCK_SYS] = {.offset = 0x00000, .size = 0x310},
[RTW89_EFUSE_BLOCK_RF] = {.offset = 0x10000, .size = 0x240},
@@ -130,6 +212,36 @@ static const struct rtw89_efuse_block_cfg rtw8922a_efuse_blocks[] = {
[RTW89_EFUSE_BLOCK_ADIE] = {.offset = 0x70000, .size = 0x10},
};
+static void rtw8922a_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (en) {
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_A, B_BT_SHARE_A, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_A, B_BTG_PATH_A, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_B, B_BT_SHARE_B, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_B, B_BTG_PATH_B, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_OP, B_LNA6, 0x20, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_TIA, B_TIA0_B, 0x30, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_ANT_BT_SHARE, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_RX_BT_SG0, 0x2, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN,
+ 0x1, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_A, B_BT_SHARE_A, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_A, B_BTG_PATH_A, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_B, B_BT_SHARE_B, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_B, B_BTG_PATH_B, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_OP, B_LNA6, 0x1a, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_TIA, B_TIA0_B, 0x2a, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0xc, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_ANT_BT_SHARE, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_RX_BT_SG0, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN,
+ 0x0, phy_idx);
+ }
+}
+
static int rtw8922a_pwr_on_func(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
@@ -273,6 +385,9 @@ static int rtw8922a_pwr_on_func(struct rtw89_dev *rtwdev)
rtw89_write32_set(rtwdev, R_BE_FEN_RST_ENABLE, B_BE_FEN_BB_IP_RSTN |
B_BE_FEN_BBPLAT_RSTB);
+ if (!test_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags))
+ rtw89_efuse_read_fw_secure_be(rtwdev);
+
return 0;
}
@@ -574,6 +689,32 @@ static void rtw8922a_phycap_parsing_pa_bias_trim(struct rtw89_dev *rtwdev,
}
}
+static void rtw8922a_pa_bias_trim(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ u8 pabias_2g, pabias_5g;
+ u8 i;
+
+ if (!info->pg_pa_bias_trim) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] no PG, do nothing\n");
+
+ return;
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8922A; i++) {
+ pabias_2g = FIELD_GET(GENMASK(3, 0), info->pa_bias_trim[i]);
+ pabias_5g = FIELD_GET(GENMASK(7, 4), info->pa_bias_trim[i]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] path=%d 2G=0x%x 5G=0x%x\n",
+ i, pabias_2g, pabias_5g);
+
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXG_V1, pabias_2g);
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXA_V1, pabias_5g);
+ }
+}
+
static void rtw8922a_phycap_parsing_pad_bias_trim(struct rtw89_dev *rtwdev,
u8 *phycap_map)
{
@@ -591,6 +732,31 @@ static void rtw8922a_phycap_parsing_pad_bias_trim(struct rtw89_dev *rtwdev,
}
}
+static void rtw8922a_pad_bias_trim(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ u8 pad_bias_2g, pad_bias_5g;
+ u8 i;
+
+ if (!info->pg_pa_bias_trim) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PAD_BIAS][TRIM] no PG, do nothing\n");
+ return;
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8922A; i++) {
+ pad_bias_2g = u8_get_bits(info->pad_bias_trim[i], GENMASK(3, 0));
+ pad_bias_5g = u8_get_bits(info->pad_bias_trim[i], GENMASK(7, 4));
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PAD_BIAS][TRIM] path=%d 2G=0x%x 5G=0x%x\n",
+ i, pad_bias_2g, pad_bias_5g);
+
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASD_TXG_V1, pad_bias_2g);
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASD_TXA_V1, pad_bias_5g);
+ }
+}
+
static int rtw8922a_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map)
{
rtw8922a_phycap_parsing_thermal_trim(rtwdev, phycap_map);
@@ -600,6 +766,1547 @@ static int rtw8922a_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map)
return 0;
}
+static void rtw8922a_power_trim(struct rtw89_dev *rtwdev)
+{
+ rtw8922a_pa_bias_trim(rtwdev);
+ rtw8922a_pad_bias_trim(rtwdev);
+}
+
+static void rtw8922a_set_channel_mac(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ u8 mac_idx)
+{
+ u32 sub_carr = rtw89_mac_reg_by_idx(rtwdev, R_BE_TX_SUB_BAND_VALUE, mac_idx);
+ u32 chk_rate = rtw89_mac_reg_by_idx(rtwdev, R_BE_TXRATE_CHK, mac_idx);
+ u32 rf_mod = rtw89_mac_reg_by_idx(rtwdev, R_BE_WMAC_RFMOD, mac_idx);
+ u8 txsb20 = 0, txsb40 = 0, txsb80 = 0;
+ u8 rf_mod_val, chk_rate_mask;
+ u32 txsb;
+ u32 reg;
+
+ switch (chan->band_width) {
+ case RTW89_CHANNEL_WIDTH_160:
+ txsb80 = rtw89_phy_get_txsb(rtwdev, chan, RTW89_CHANNEL_WIDTH_80);
+ fallthrough;
+ case RTW89_CHANNEL_WIDTH_80:
+ txsb40 = rtw89_phy_get_txsb(rtwdev, chan, RTW89_CHANNEL_WIDTH_40);
+ fallthrough;
+ case RTW89_CHANNEL_WIDTH_40:
+ txsb20 = rtw89_phy_get_txsb(rtwdev, chan, RTW89_CHANNEL_WIDTH_20);
+ break;
+ default:
+ break;
+ }
+
+ switch (chan->band_width) {
+ case RTW89_CHANNEL_WIDTH_160:
+ rf_mod_val = BE_WMAC_RFMOD_160M;
+ txsb = u32_encode_bits(txsb20, B_BE_TXSB_20M_MASK) |
+ u32_encode_bits(txsb40, B_BE_TXSB_40M_MASK) |
+ u32_encode_bits(txsb80, B_BE_TXSB_80M_MASK);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rf_mod_val = BE_WMAC_RFMOD_80M;
+ txsb = u32_encode_bits(txsb20, B_BE_TXSB_20M_MASK) |
+ u32_encode_bits(txsb40, B_BE_TXSB_40M_MASK);
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rf_mod_val = BE_WMAC_RFMOD_40M;
+ txsb = u32_encode_bits(txsb20, B_BE_TXSB_20M_MASK);
+ break;
+ case RTW89_CHANNEL_WIDTH_20:
+ default:
+ rf_mod_val = BE_WMAC_RFMOD_20M;
+ txsb = 0;
+ break;
+ }
+
+ if (txsb20 <= BE_PRI20_BITMAP_MAX)
+ txsb |= u32_encode_bits(BIT(txsb20), B_BE_PRI20_BITMAP_MASK);
+
+ rtw89_write8_mask(rtwdev, rf_mod, B_BE_WMAC_RFMOD_MASK, rf_mod_val);
+ rtw89_write32(rtwdev, sub_carr, txsb);
+
+ switch (chan->band_type) {
+ case RTW89_BAND_2G:
+ chk_rate_mask = B_BE_BAND_MODE;
+ break;
+ case RTW89_BAND_5G:
+ case RTW89_BAND_6G:
+ chk_rate_mask = B_BE_CHECK_CCK_EN | B_BE_RTS_LIMIT_IN_OFDM6;
+ break;
+ default:
+ rtw89_warn(rtwdev, "Invalid band_type:%d\n", chan->band_type);
+ return;
+ }
+
+ rtw89_write8_clr(rtwdev, chk_rate, B_BE_BAND_MODE | B_BE_CHECK_CCK_EN |
+ B_BE_RTS_LIMIT_IN_OFDM6);
+ rtw89_write8_set(rtwdev, chk_rate, chk_rate_mask);
+
+ switch (chan->band_width) {
+ case RTW89_CHANNEL_WIDTH_320:
+ case RTW89_CHANNEL_WIDTH_160:
+ case RTW89_CHANNEL_WIDTH_80:
+ case RTW89_CHANNEL_WIDTH_40:
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PREBKF_CFG_1, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_SIFS_MACTXEN_T1_MASK, 0x41);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_MUEDCA_EN, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_SIFS_MACTXEN_TB_T1_MASK, 0x41);
+ break;
+ default:
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PREBKF_CFG_1, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_SIFS_MACTXEN_T1_MASK, 0x3f);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_MUEDCA_EN, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_SIFS_MACTXEN_TB_T1_MASK, 0x3e);
+ break;
+ }
+}
+
+static const u32 rtw8922a_sco_barker_threshold[14] = {
+ 0x1fe4f, 0x1ff5e, 0x2006c, 0x2017b, 0x2028a, 0x20399, 0x204a8, 0x205b6,
+ 0x206c5, 0x207d4, 0x208e3, 0x209f2, 0x20b00, 0x20d8a
+};
+
+static const u32 rtw8922a_sco_cck_threshold[14] = {
+ 0x2bdac, 0x2bf21, 0x2c095, 0x2c209, 0x2c37e, 0x2c4f2, 0x2c666, 0x2c7db,
+ 0x2c94f, 0x2cac3, 0x2cc38, 0x2cdac, 0x2cf21, 0x2d29e
+};
+
+static int rtw8922a_ctrl_sco_cck(struct rtw89_dev *rtwdev,
+ u8 primary_ch, enum rtw89_bandwidth bw,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 ch_element;
+
+ if (primary_ch >= 14)
+ return -EINVAL;
+
+ ch_element = primary_ch - 1;
+
+ rtw89_phy_write32_idx(rtwdev, R_BK_FC0INV, B_BK_FC0INV,
+ rtw8922a_sco_barker_threshold[ch_element],
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CCK_FC0INV, B_CCK_FC0INV,
+ rtw8922a_sco_cck_threshold[ch_element],
+ phy_idx);
+
+ return 0;
+}
+
+struct rtw8922a_bb_gain {
+ u32 gain_g[BB_PATH_NUM_8922A];
+ u32 gain_a[BB_PATH_NUM_8922A];
+ u32 gain_g_mask;
+ u32 gain_a_mask;
+};
+
+static const struct rtw89_reg_def rpl_comp_bw160[RTW89_BW20_SC_160M] = {
+ { .addr = 0x41E8, .mask = 0xFF00},
+ { .addr = 0x41E8, .mask = 0xFF0000},
+ { .addr = 0x41E8, .mask = 0xFF000000},
+ { .addr = 0x41EC, .mask = 0xFF},
+ { .addr = 0x41EC, .mask = 0xFF00},
+ { .addr = 0x41EC, .mask = 0xFF0000},
+ { .addr = 0x41EC, .mask = 0xFF000000},
+ { .addr = 0x41F0, .mask = 0xFF}
+};
+
+static const struct rtw89_reg_def rpl_comp_bw80[RTW89_BW20_SC_80M] = {
+ { .addr = 0x41F4, .mask = 0xFF},
+ { .addr = 0x41F4, .mask = 0xFF00},
+ { .addr = 0x41F4, .mask = 0xFF0000},
+ { .addr = 0x41F4, .mask = 0xFF000000}
+};
+
+static const struct rtw89_reg_def rpl_comp_bw40[RTW89_BW20_SC_40M] = {
+ { .addr = 0x41F0, .mask = 0xFF0000},
+ { .addr = 0x41F0, .mask = 0xFF000000}
+};
+
+static const struct rtw89_reg_def rpl_comp_bw20[RTW89_BW20_SC_20M] = {
+ { .addr = 0x41F0, .mask = 0xFF00}
+};
+
+static const struct rtw8922a_bb_gain bb_gain_lna[LNA_GAIN_NUM] = {
+ { .gain_g = {0x409c, 0x449c}, .gain_a = {0x406C, 0x446C},
+ .gain_g_mask = 0xFF00, .gain_a_mask = 0xFF},
+ { .gain_g = {0x409c, 0x449c}, .gain_a = {0x406C, 0x446C},
+ .gain_g_mask = 0xFF000000, .gain_a_mask = 0xFF0000},
+ { .gain_g = {0x40a0, 0x44a0}, .gain_a = {0x4070, 0x4470},
+ .gain_g_mask = 0xFF00, .gain_a_mask = 0xFF},
+ { .gain_g = {0x40a0, 0x44a0}, .gain_a = {0x4070, 0x4470},
+ .gain_g_mask = 0xFF000000, .gain_a_mask = 0xFF0000},
+ { .gain_g = {0x40a4, 0x44a4}, .gain_a = {0x4074, 0x4474},
+ .gain_g_mask = 0xFF00, .gain_a_mask = 0xFF},
+ { .gain_g = {0x40a4, 0x44a4}, .gain_a = {0x4074, 0x4474},
+ .gain_g_mask = 0xFF000000, .gain_a_mask = 0xFF0000},
+ { .gain_g = {0x40a8, 0x44a8}, .gain_a = {0x4078, 0x4478},
+ .gain_g_mask = 0xFF00, .gain_a_mask = 0xFF},
+};
+
+static const struct rtw8922a_bb_gain bb_gain_tia[TIA_GAIN_NUM] = {
+ { .gain_g = {0x4054, 0x4454}, .gain_a = {0x4054, 0x4454},
+ .gain_g_mask = 0x7FC0000, .gain_a_mask = 0x1FF},
+ { .gain_g = {0x4058, 0x4458}, .gain_a = {0x4054, 0x4454},
+ .gain_g_mask = 0x1FF, .gain_a_mask = 0x3FE00 },
+};
+
+struct rtw8922a_bb_gain_bypass {
+ u32 gain_g[BB_PATH_NUM_8922A];
+ u32 gain_a[BB_PATH_NUM_8922A];
+ u32 gain_mask_g;
+ u32 gain_mask_a;
+};
+
+static void rtw8922a_set_rpl_gain(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
+ u8 gain_band = rtw89_subband_to_gain_band_be(chan->subband_type);
+ u32 reg_path_ofst = 0;
+ u32 mask;
+ s32 val;
+ u32 reg;
+ int i;
+
+ if (path == RF_PATH_B)
+ reg_path_ofst = 0x400;
+
+ for (i = 0; i < RTW89_BW20_SC_160M; i++) {
+ reg = rpl_comp_bw160[i].addr | reg_path_ofst;
+ mask = rpl_comp_bw160[i].mask;
+ val = gain->rpl_ofst_160[gain_band][path][i];
+ rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx);
+ }
+
+ for (i = 0; i < RTW89_BW20_SC_80M; i++) {
+ reg = rpl_comp_bw80[i].addr | reg_path_ofst;
+ mask = rpl_comp_bw80[i].mask;
+ val = gain->rpl_ofst_80[gain_band][path][i];
+ rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx);
+ }
+
+ for (i = 0; i < RTW89_BW20_SC_40M; i++) {
+ reg = rpl_comp_bw40[i].addr | reg_path_ofst;
+ mask = rpl_comp_bw40[i].mask;
+ val = gain->rpl_ofst_40[gain_band][path][i];
+ rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx);
+ }
+
+ for (i = 0; i < RTW89_BW20_SC_20M; i++) {
+ reg = rpl_comp_bw20[i].addr | reg_path_ofst;
+ mask = rpl_comp_bw20[i].mask;
+ val = gain->rpl_ofst_20[gain_band][path][i];
+ rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx);
+ }
+}
+
+static void rtw8922a_set_lna_tia_gain(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
+ u8 gain_band = rtw89_subband_to_gain_band_be(chan->subband_type);
+ enum rtw89_phy_bb_bw_be bw_type;
+ s32 val;
+ u32 reg;
+ u32 mask;
+ int i;
+
+ bw_type = chan->band_width <= RTW89_CHANNEL_WIDTH_40 ?
+ RTW89_BB_BW_20_40 : RTW89_BB_BW_80_160_320;
+
+ for (i = 0; i < LNA_GAIN_NUM; i++) {
+ if (chan->band_type == RTW89_BAND_2G) {
+ reg = bb_gain_lna[i].gain_g[path];
+ mask = bb_gain_lna[i].gain_g_mask;
+ } else {
+ reg = bb_gain_lna[i].gain_a[path];
+ mask = bb_gain_lna[i].gain_a_mask;
+ }
+ val = gain->lna_gain[gain_band][bw_type][path][i];
+ rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx);
+ }
+
+ for (i = 0; i < TIA_GAIN_NUM; i++) {
+ if (chan->band_type == RTW89_BAND_2G) {
+ reg = bb_gain_tia[i].gain_g[path];
+ mask = bb_gain_tia[i].gain_g_mask;
+ } else {
+ reg = bb_gain_tia[i].gain_a[path];
+ mask = bb_gain_tia[i].gain_a_mask;
+ }
+ val = gain->tia_gain[gain_band][bw_type][path][i];
+ rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx);
+ }
+}
+
+static void rtw8922a_set_gain(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_set_lna_tia_gain(rtwdev, chan, path, phy_idx);
+ rtw8922a_set_rpl_gain(rtwdev, chan, path, phy_idx);
+}
+
+static void rtw8922a_set_rx_gain_normal_cck(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+ s8 value = -gain->offset[path][RTW89_GAIN_OFFSET_2G_CCK]; /* S(8,2) */
+ u8 fraction = value & 0x3;
+
+ if (fraction) {
+ rtw89_phy_write32_mask(rtwdev, R_MGAIN_BIAS, B_MGAIN_BIAS_BW20,
+ (0x4 - fraction) << 1);
+ rtw89_phy_write32_mask(rtwdev, R_MGAIN_BIAS, B_MGAIN_BIAS_BW40,
+ (0x4 - fraction) << 1);
+
+ value >>= 2;
+ rtw89_phy_write32_mask(rtwdev, R_CCK_RPL_OFST, B_CCK_RPL_OFST,
+ value + 1 + 0xdc);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_MGAIN_BIAS, B_MGAIN_BIAS_BW20, 0);
+ rtw89_phy_write32_mask(rtwdev, R_MGAIN_BIAS, B_MGAIN_BIAS_BW40, 0);
+
+ value >>= 2;
+ rtw89_phy_write32_mask(rtwdev, R_CCK_RPL_OFST, B_CCK_RPL_OFST,
+ value + 0xdc);
+ }
+}
+
+static void rtw8922a_set_rx_gain_normal_ofdm(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path)
+{
+ static const u32 rssi_tb_bias_comp[2] = {0x41f8, 0x45f8};
+ static const u32 rssi_tb_ext_comp[2] = {0x4208, 0x4608};
+ static const u32 rssi_ofst_addr[2] = {0x40c8, 0x44c8};
+ static const u32 rpl_bias_comp[2] = {0x41e8, 0x45e8};
+ static const u32 rpl_ext_comp[2] = {0x41f8, 0x45f8};
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+ enum rtw89_gain_offset gain_band;
+ s8 v1, v2, v3;
+ s32 value;
+
+ gain_band = rtw89_subband_to_gain_offset_band_of_ofdm(chan->subband_type);
+ value = gain->offset[path][gain_band];
+ rtw89_phy_write32_mask(rtwdev, rssi_ofst_addr[path], 0xff000000, value + 0xF8);
+
+ value *= -4;
+ v1 = clamp_t(s32, value, S8_MIN, S8_MAX);
+ value -= v1;
+ v2 = clamp_t(s32, value, S8_MIN, S8_MAX);
+ value -= v2;
+ v3 = clamp_t(s32, value, S8_MIN, S8_MAX);
+
+ rtw89_phy_write32_mask(rtwdev, rpl_bias_comp[path], 0xff, v1);
+ rtw89_phy_write32_mask(rtwdev, rpl_ext_comp[path], 0xff, v2);
+ rtw89_phy_write32_mask(rtwdev, rpl_ext_comp[path], 0xff00, v3);
+
+ rtw89_phy_write32_mask(rtwdev, rssi_tb_bias_comp[path], 0xff0000, v1);
+ rtw89_phy_write32_mask(rtwdev, rssi_tb_ext_comp[path], 0xff0000, v2);
+ rtw89_phy_write32_mask(rtwdev, rssi_tb_ext_comp[path], 0xff000000, v3);
+}
+
+static void rtw8922a_set_rx_gain_normal(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+
+ if (!gain->offset_valid)
+ return;
+
+ if (chan->band_type == RTW89_BAND_2G)
+ rtw8922a_set_rx_gain_normal_cck(rtwdev, chan, path);
+
+ rtw8922a_set_rx_gain_normal_ofdm(rtwdev, chan, path);
+}
+
+static void rtw8922a_set_cck_parameters(struct rtw89_dev *rtwdev, u8 central_ch,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (central_ch == 14) {
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF01, B_PCOEFF01, 0x3b13ff, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF23, B_PCOEFF23, 0x1c42de, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF45, B_PCOEFF45, 0xfdb0ad, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF67, B_PCOEFF67, 0xf60f6e, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF89, B_PCOEFF89, 0xfd8f92, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFFAB, B_PCOEFFAB, 0x02d011, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFFCD, B_PCOEFFCD, 0x01c02c, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFFEF, B_PCOEFFEF, 0xfff00a, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF01, B_PCOEFF01, 0x3a63ca, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF23, B_PCOEFF23, 0x2a833f, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF45, B_PCOEFF45, 0x1491f8, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF67, B_PCOEFF67, 0x03c0b0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF89, B_PCOEFF89, 0xfccff1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFFAB, B_PCOEFFAB, 0xfccfc3, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFFCD, B_PCOEFFCD, 0xfebfdc, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFFEF, B_PCOEFFEF, 0xffdff7, phy_idx);
+ }
+}
+
+static void rtw8922a_ctrl_ch(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ static const u32 band_sel[2] = {0x4160, 0x4560};
+ u16 central_freq = chan->freq;
+ u8 central_ch = chan->channel;
+ u8 band = chan->band_type;
+ bool is_2g = band == RTW89_BAND_2G;
+ u8 chan_idx;
+ u8 path;
+ u8 sco;
+
+ if (!central_freq) {
+ rtw89_warn(rtwdev, "Invalid central_freq\n");
+ return;
+ }
+
+ rtw8922a_set_gain(rtwdev, chan, RF_PATH_A, phy_idx);
+ rtw8922a_set_gain(rtwdev, chan, RF_PATH_B, phy_idx);
+
+ for (path = RF_PATH_A; path < BB_PATH_NUM_8922A; path++)
+ rtw89_phy_write32_idx(rtwdev, band_sel[path], BIT((26)), is_2g, phy_idx);
+
+ rtw8922a_set_rx_gain_normal(rtwdev, chan, RF_PATH_A);
+ rtw8922a_set_rx_gain_normal(rtwdev, chan, RF_PATH_B);
+
+ rtw89_phy_write32_idx(rtwdev, R_FC0, B_FC0, central_freq, phy_idx);
+ sco = DIV_ROUND_CLOSEST(1 << 18, central_freq);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_FC0_INV, sco, phy_idx);
+
+ if (band == RTW89_BAND_2G)
+ rtw8922a_set_cck_parameters(rtwdev, central_ch, phy_idx);
+
+ chan_idx = rtw89_encode_chan_idx(rtwdev, chan->primary_channel, band);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_PIN_SEL, B_CH_IDX_SEG0, chan_idx, phy_idx);
+}
+
+static void
+rtw8922a_ctrl_bw(struct rtw89_dev *rtwdev, u8 pri_sb, u8 bw,
+ enum rtw89_phy_idx phy_idx)
+{
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_BW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_SMALLBW, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_PRICH, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_DAC_CLK, B_DAC_CLK, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP0, B_GAIN_MAP0_EN, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP1, B_GAIN_MAP1_EN, 0x0, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_10:
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_BW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_SMALLBW, 0x2, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_PRICH, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_DAC_CLK, B_DAC_CLK, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP0, B_GAIN_MAP0_EN, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP1, B_GAIN_MAP1_EN, 0x0, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_20:
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_BW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_SMALLBW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_PRICH, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_DAC_CLK, B_DAC_CLK, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP0, B_GAIN_MAP0_EN, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP1, B_GAIN_MAP1_EN, 0x0, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_BW, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_SMALLBW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_PRICH, pri_sb, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_DAC_CLK, B_DAC_CLK, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP0, B_GAIN_MAP0_EN, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP1, B_GAIN_MAP1_EN, 0x0, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_BW, 0x2, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_SMALLBW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_PRICH, pri_sb, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_DAC_CLK, B_DAC_CLK, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP0, B_GAIN_MAP0_EN, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP1, B_GAIN_MAP1_EN, 0x1, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_BW, 0x3, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_SMALLBW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_PRICH, pri_sb, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_DAC_CLK, B_DAC_CLK, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP0, B_GAIN_MAP0_EN, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP1, B_GAIN_MAP1_EN, 0x1, phy_idx);
+ break;
+ default:
+ rtw89_warn(rtwdev, "Fail to switch bw (bw:%d, pri_sb:%d)\n", bw,
+ pri_sb);
+ break;
+ }
+
+ if (bw == RTW89_CHANNEL_WIDTH_40)
+ rtw89_phy_write32_idx(rtwdev, R_FC0, B_BW40_2XFFT, 1, phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, R_FC0, B_BW40_2XFFT, 0, phy_idx);
+}
+
+static u32 rtw8922a_spur_freq(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan)
+{
+ return 0;
+}
+
+#define CARRIER_SPACING_312_5 312500 /* 312.5 kHz */
+#define CARRIER_SPACING_78_125 78125 /* 78.125 kHz */
+#define MAX_TONE_NUM 2048
+
+static void rtw8922a_set_csi_tone_idx(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ s32 freq_diff, csi_idx, csi_tone_idx;
+ u32 spur_freq;
+
+ spur_freq = rtw8922a_spur_freq(rtwdev, chan);
+ if (spur_freq == 0) {
+ rtw89_phy_write32_idx(rtwdev, R_S0S1_CSI_WGT, B_S0S1_CSI_WGT_EN,
+ 0, phy_idx);
+ return;
+ }
+
+ freq_diff = (spur_freq - chan->freq) * 1000000;
+ csi_idx = s32_div_u32_round_closest(freq_diff, CARRIER_SPACING_78_125);
+ s32_div_u32_round_down(csi_idx, MAX_TONE_NUM, &csi_tone_idx);
+
+ rtw89_phy_write32_idx(rtwdev, R_S0S1_CSI_WGT, B_S0S1_CSI_WGT_TONE_IDX,
+ csi_tone_idx, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_S0S1_CSI_WGT, B_S0S1_CSI_WGT_EN, 1, phy_idx);
+}
+
+static const struct rtw89_nbi_reg_def rtw8922a_nbi_reg_def[] = {
+ [RF_PATH_A] = {
+ .notch1_idx = {0x41a0, 0xFF},
+ .notch1_frac_idx = {0x41a0, 0xC00},
+ .notch1_en = {0x41a0, 0x1000},
+ .notch2_idx = {0x41ac, 0xFF},
+ .notch2_frac_idx = {0x41ac, 0xC00},
+ .notch2_en = {0x41ac, 0x1000},
+ },
+ [RF_PATH_B] = {
+ .notch1_idx = {0x45a0, 0xFF},
+ .notch1_frac_idx = {0x45a0, 0xC00},
+ .notch1_en = {0x45a0, 0x1000},
+ .notch2_idx = {0x45ac, 0xFF},
+ .notch2_frac_idx = {0x45ac, 0xC00},
+ .notch2_en = {0x45ac, 0x1000},
+ },
+};
+
+static void rtw8922a_set_nbi_tone_idx(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_nbi_reg_def *nbi = &rtw8922a_nbi_reg_def[path];
+ s32 nbi_frac_idx, nbi_frac_tone_idx;
+ s32 nbi_idx, nbi_tone_idx;
+ bool notch2_chk = false;
+ u32 spur_freq, fc;
+ s32 freq_diff;
+
+ spur_freq = rtw8922a_spur_freq(rtwdev, chan);
+ if (spur_freq == 0) {
+ rtw89_phy_write32_idx(rtwdev, nbi->notch1_en.addr,
+ nbi->notch1_en.mask, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch2_en.addr,
+ nbi->notch2_en.mask, 0, phy_idx);
+ return;
+ }
+
+ fc = chan->freq;
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_160) {
+ fc = (spur_freq > fc) ? fc + 40 : fc - 40;
+ if ((fc > spur_freq &&
+ chan->channel < chan->primary_channel) ||
+ (fc < spur_freq &&
+ chan->channel > chan->primary_channel))
+ notch2_chk = true;
+ }
+
+ freq_diff = (spur_freq - fc) * 1000000;
+ nbi_idx = s32_div_u32_round_down(freq_diff, CARRIER_SPACING_312_5,
+ &nbi_frac_idx);
+
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_20) {
+ s32_div_u32_round_down(nbi_idx + 32, 64, &nbi_tone_idx);
+ } else {
+ u16 tone_para = (chan->band_width == RTW89_CHANNEL_WIDTH_40) ?
+ 128 : 256;
+
+ s32_div_u32_round_down(nbi_idx, tone_para, &nbi_tone_idx);
+ }
+ nbi_frac_tone_idx =
+ s32_div_u32_round_closest(nbi_frac_idx, CARRIER_SPACING_78_125);
+
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_160 && notch2_chk) {
+ rtw89_phy_write32_idx(rtwdev, nbi->notch2_idx.addr,
+ nbi->notch2_idx.mask, nbi_tone_idx, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch2_frac_idx.addr,
+ nbi->notch2_frac_idx.mask, nbi_frac_tone_idx,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch2_en.addr,
+ nbi->notch2_en.mask, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch2_en.addr,
+ nbi->notch2_en.mask, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch1_en.addr,
+ nbi->notch1_en.mask, 0, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, nbi->notch1_idx.addr,
+ nbi->notch1_idx.mask, nbi_tone_idx, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch1_frac_idx.addr,
+ nbi->notch1_frac_idx.mask, nbi_frac_tone_idx,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch1_en.addr,
+ nbi->notch1_en.mask, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch1_en.addr,
+ nbi->notch1_en.mask, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch2_en.addr,
+ nbi->notch2_en.mask, 0, phy_idx);
+ }
+}
+
+static void rtw8922a_spur_elimination(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_set_csi_tone_idx(rtwdev, chan, phy_idx);
+ rtw8922a_set_nbi_tone_idx(rtwdev, chan, RF_PATH_A, phy_idx);
+ rtw8922a_set_nbi_tone_idx(rtwdev, chan, RF_PATH_B, phy_idx);
+}
+
+static void rtw8922a_ctrl_afe_dac(struct rtw89_dev *rtwdev, enum rtw89_bandwidth bw,
+ enum rtw89_rf_path path)
+{
+ u32 cr_ofst = 0x0;
+
+ if (path == RF_PATH_B)
+ cr_ofst = 0x100;
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ case RTW89_CHANNEL_WIDTH_10:
+ case RTW89_CHANNEL_WIDTH_20:
+ case RTW89_CHANNEL_WIDTH_40:
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_phy_write32_mask(rtwdev, R_AFEDAC0 + cr_ofst, B_AFEDAC0, 0xE);
+ rtw89_phy_write32_mask(rtwdev, R_AFEDAC1 + cr_ofst, B_AFEDAC1, 0x7);
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rtw89_phy_write32_mask(rtwdev, R_AFEDAC0 + cr_ofst, B_AFEDAC0, 0xD);
+ rtw89_phy_write32_mask(rtwdev, R_AFEDAC1 + cr_ofst, B_AFEDAC1, 0x6);
+ break;
+ default:
+ break;
+ }
+}
+
+static const struct rtw89_reg2_def bb_mcu0_init_reg[] = {
+ {0x6990, 0x00000000},
+ {0x6994, 0x00000000},
+ {0x6998, 0x00000000},
+ {0x6820, 0xFFFFFFFE},
+ {0x6800, 0xC0000FFE},
+ {0x6808, 0x76543210},
+ {0x6814, 0xBFBFB000},
+ {0x6818, 0x0478C009},
+ {0x6800, 0xC0000FFF},
+ {0x6820, 0xFFFFFFFF},
+};
+
+static const struct rtw89_reg2_def bb_mcu1_init_reg[] = {
+ {0x6990, 0x00000000},
+ {0x6994, 0x00000000},
+ {0x6998, 0x00000000},
+ {0x6820, 0xFFFFFFFE},
+ {0x6800, 0xC0000FFE},
+ {0x6808, 0x76543210},
+ {0x6814, 0xBFBFB000},
+ {0x6818, 0x0478C009},
+ {0x6800, 0xC0000FFF},
+ {0x6820, 0xFFFFFFFF},
+};
+
+static void rtw8922a_bbmcu_cr_init(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_reg2_def *reg;
+ int size;
+ int i;
+
+ if (phy_idx == RTW89_PHY_0) {
+ reg = bb_mcu0_init_reg;
+ size = ARRAY_SIZE(bb_mcu0_init_reg);
+ } else {
+ reg = bb_mcu1_init_reg;
+ size = ARRAY_SIZE(bb_mcu1_init_reg);
+ }
+
+ for (i = 0; i < size; i++, reg++)
+ rtw89_bbmcu_write32(rtwdev, reg->addr, reg->data, phy_idx);
+}
+
+static const u32 dmac_sys_mask[2] = {B_BE_DMAC_BB_PHY0_MASK, B_BE_DMAC_BB_PHY1_MASK};
+static const u32 bbrst_mask[2] = {B_BE_FEN_BBPLAT_RSTB, B_BE_FEN_BB1PLAT_RSTB};
+static const u32 glbrst_mask[2] = {B_BE_FEN_BB_IP_RSTN, B_BE_FEN_BB1_IP_RSTN};
+static const u32 mcu_bootrdy_mask[2] = {B_BE_BOOT_RDY0, B_BE_BOOT_RDY1};
+
+static void rtw8922a_bb_preinit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ u32 rdy = 0;
+
+ if (phy_idx == RTW89_PHY_1)
+ rdy = 1;
+
+ rtw89_write32_mask(rtwdev, R_BE_DMAC_SYS_CR32B, dmac_sys_mask[phy_idx], 0x7FF9);
+ rtw89_write32_mask(rtwdev, R_BE_FEN_RST_ENABLE, glbrst_mask[phy_idx], 0x0);
+ rtw89_write32_mask(rtwdev, R_BE_FEN_RST_ENABLE, bbrst_mask[phy_idx], 0x0);
+ rtw89_write32_mask(rtwdev, R_BE_FEN_RST_ENABLE, glbrst_mask[phy_idx], 0x1);
+ rtw89_write32_mask(rtwdev, R_BE_FEN_RST_ENABLE, mcu_bootrdy_mask[phy_idx], rdy);
+ rtw89_write32_mask(rtwdev, R_BE_MEM_PWR_CTRL, B_BE_MEM_BBMCU0_DS_V1, 0);
+
+ fsleep(1);
+ rtw8922a_bbmcu_cr_init(rtwdev, phy_idx);
+}
+
+static void rtw8922a_bb_postinit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ if (phy_idx == RTW89_PHY_0)
+ rtw89_write32_set(rtwdev, R_BE_FEN_RST_ENABLE, mcu_bootrdy_mask[phy_idx]);
+ rtw89_write32_set(rtwdev, R_BE_FEN_RST_ENABLE, bbrst_mask[phy_idx]);
+
+ rtw89_phy_write32_set(rtwdev, R_BBCLK, B_CLK_640M);
+ rtw89_phy_write32_clr(rtwdev, R_TXSCALE, B_TXFCTR_EN);
+ rtw89_phy_set_phy_regs(rtwdev, R_TXFCTR, B_TXFCTR_THD, 0x200);
+ rtw89_phy_set_phy_regs(rtwdev, R_SLOPE, B_EHT_RATE_TH, 0xA);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE, B_HE_RATE_TH, 0xA);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE2, B_HT_VHT_TH, 0xAAA);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE, B_EHT_MCS14, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE2, B_EHT_MCS15, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE3, B_EHTTB_EN, 0x0);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE3, B_HEERSU_EN, 0x0);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE3, B_HEMU_EN, 0x0);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE3, B_TB_EN, 0x0);
+ rtw89_phy_set_phy_regs(rtwdev, R_SU_PUNC, B_SU_PUNC_EN, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE5, B_HWGEN_EN, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE5, B_PWROFST_COMP, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_MAG_AB, B_BY_SLOPE, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_MAG_A, B_MGA_AEND, 0xe0);
+ rtw89_phy_set_phy_regs(rtwdev, R_MAG_AB, B_MAG_AB, 0xe0c000);
+ rtw89_phy_set_phy_regs(rtwdev, R_SLOPE, B_SLOPE_A, 0x3FE0);
+ rtw89_phy_set_phy_regs(rtwdev, R_SLOPE, B_SLOPE_B, 0x3FE0);
+ rtw89_phy_set_phy_regs(rtwdev, R_SC_CORNER, B_SC_CORNER, 0x200);
+ rtw89_phy_write32_idx(rtwdev, R_UDP_COEEF, B_UDP_COEEF, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_UDP_COEEF, B_UDP_COEEF, 0x1, phy_idx);
+}
+
+static void rtw8922a_bb_reset_en(struct rtw89_dev *rtwdev, enum rtw89_band band,
+ bool en, enum rtw89_phy_idx phy_idx)
+{
+ if (en) {
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx);
+ if (band == RTW89_BAND_2G)
+ rtw89_phy_write32_idx(rtwdev, R_RXCCA_BE1,
+ B_RXCCA_BE1_DIS, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, R_RXCCA_BE1, B_RXCCA_BE1_DIS, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x1, phy_idx);
+ fsleep(1);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, phy_idx);
+ }
+}
+
+static int rtw8922a_ctrl_tx_path_tmac(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path tx_path,
+ enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_reg2_def path_com_cr[] = {
+ {0x11A00, 0x21C86900},
+ {0x11A04, 0x00E4E433},
+ {0x11A08, 0x39390CC9},
+ {0x11A0C, 0x4E433240},
+ {0x11A10, 0x90CC900E},
+ {0x11A14, 0x00240393},
+ {0x11A18, 0x201C8600},
+ };
+ int ret = 0;
+ u32 reg;
+ int i;
+
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL, 0x0, phy_idx);
+
+ if (phy_idx == RTW89_PHY_1 && !rtwdev->dbcc_en)
+ return 0;
+
+ if (tx_path == RF_PATH_A) {
+ path_com_cr[0].data = 0x21C82900;
+ path_com_cr[1].data = 0x00E4E431;
+ path_com_cr[2].data = 0x39390C49;
+ path_com_cr[3].data = 0x4E431240;
+ path_com_cr[4].data = 0x90C4900E;
+ path_com_cr[6].data = 0x201C8200;
+ } else if (tx_path == RF_PATH_B) {
+ path_com_cr[0].data = 0x21C04900;
+ path_com_cr[1].data = 0x00E4E032;
+ path_com_cr[2].data = 0x39380C89;
+ path_com_cr[3].data = 0x4E032240;
+ path_com_cr[4].data = 0x80C8900E;
+ path_com_cr[6].data = 0x201C0400;
+ } else if (tx_path == RF_PATH_AB) {
+ path_com_cr[0].data = 0x21C86900;
+ path_com_cr[1].data = 0x00E4E433;
+ path_com_cr[2].data = 0x39390CC9;
+ path_com_cr[3].data = 0x4E433240;
+ path_com_cr[4].data = 0x90CC900E;
+ path_com_cr[6].data = 0x201C8600;
+ } else {
+ ret = -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(path_com_cr); i++) {
+ reg = rtw89_mac_reg_by_idx(rtwdev, path_com_cr[i].addr, phy_idx);
+ rtw89_write32(rtwdev, reg, path_com_cr[i].data);
+ }
+
+ return ret;
+}
+
+static void rtw8922a_bb_reset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+}
+
+static int rtw8922a_cfg_rx_nss_limit(struct rtw89_dev *rtwdev, u8 rx_nss,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (rx_nss == 1) {
+ rtw89_phy_write32_idx(rtwdev, R_BRK_R, B_HTMCS_LMT, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_R, B_VHTMCS_LMT, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_HE, B_N_USR_MAX,
+ HE_N_USER_MAX_8922A, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_HE, B_NSS_MAX, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_HE, B_TB_NSS_MAX, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_EHT, B_RXEHT_NSS_MAX, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_RXEHT, B_RXEHTTB_NSS_MAX, 0,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_RXEHT, B_RXEHT_N_USER_MAX,
+ HE_N_USER_MAX_8922A, phy_idx);
+ } else if (rx_nss == 2) {
+ rtw89_phy_write32_idx(rtwdev, R_BRK_R, B_HTMCS_LMT, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_R, B_VHTMCS_LMT, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_HE, B_N_USR_MAX,
+ HE_N_USER_MAX_8922A, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_HE, B_NSS_MAX, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_HE, B_TB_NSS_MAX, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_EHT, B_RXEHT_NSS_MAX, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_RXEHT, B_RXEHTTB_NSS_MAX, 1,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_RXEHT, B_RXEHT_N_USER_MAX,
+ HE_N_USER_MAX_8922A, phy_idx);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rtw8922a_tssi_reset(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (rtwdev->mlo_dbcc_mode == MLO_1_PLUS_1_1RF) {
+ if (phy_idx == RTW89_PHY_0) {
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTA, B_TXPWR_RSTA, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTA, B_TXPWR_RSTA, 0x1);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTB, B_TXPWR_RSTB, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTB, B_TXPWR_RSTB, 0x1);
+ }
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTA, B_TXPWR_RSTA, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTA, B_TXPWR_RSTA, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTB, B_TXPWR_RSTB, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTB, B_TXPWR_RSTB, 0x1);
+ }
+}
+
+static int rtw8922a_ctrl_rx_path_tmac(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path rx_path,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 rx_nss = (rx_path == RF_PATH_AB) ? 2 : 1;
+
+ /* Set to 0 first to avoid abnormal EDCCA report */
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_ANT_RX_SG0, 0x0, phy_idx);
+
+ if (rx_path == RF_PATH_A) {
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_ANT_RX_SG0, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_RX_1RCCA, 1, phy_idx);
+ rtw8922a_cfg_rx_nss_limit(rtwdev, rx_nss, phy_idx);
+ rtw8922a_tssi_reset(rtwdev, rx_path, phy_idx);
+ } else if (rx_path == RF_PATH_B) {
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_ANT_RX_SG0, 0x2, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_RX_1RCCA, 2, phy_idx);
+ rtw8922a_cfg_rx_nss_limit(rtwdev, rx_nss, phy_idx);
+ rtw8922a_tssi_reset(rtwdev, rx_path, phy_idx);
+ } else if (rx_path == RF_PATH_AB) {
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_ANT_RX_SG0, 0x3, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_RX_1RCCA, 3, phy_idx);
+ rtw8922a_cfg_rx_nss_limit(rtwdev, rx_nss, phy_idx);
+ rtw8922a_tssi_reset(rtwdev, rx_path, phy_idx);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rtw8922a_ctrl_mlo(struct rtw89_dev *rtwdev, enum rtw89_mlo_dbcc_mode mode)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+
+ if (mode == MLO_1_PLUS_1_1RF || mode == DBCC_LEGACY) {
+ rtw89_phy_write32_mask(rtwdev, R_DBCC, B_DBCC_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_DBCC_FA, B_DBCC_FA, 0x0);
+ } else if (mode == MLO_2_PLUS_0_1RF || mode == MLO_0_PLUS_2_1RF ||
+ mode == MLO_DBCC_NOT_SUPPORT) {
+ rtw89_phy_write32_mask(rtwdev, R_DBCC, B_DBCC_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DBCC_FA, B_DBCC_FA, 0x1);
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (mode == MLO_2_PLUS_0_1RF) {
+ rtw8922a_ctrl_afe_dac(rtwdev, chan->band_width, RF_PATH_A);
+ rtw8922a_ctrl_afe_dac(rtwdev, chan->band_width, RF_PATH_B);
+ } else {
+ rtw89_warn(rtwdev, "unsupported MLO mode %d\n", mode);
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x6180);
+
+ if (mode == MLO_2_PLUS_0_1RF) {
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xBBAB);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xABA9);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEBA9);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEAA9);
+ } else if (mode == MLO_0_PLUS_2_1RF) {
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xBBAB);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xAFFF);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEFFF);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEEFF);
+ } else if ((mode == MLO_1_PLUS_1_1RF) || (mode == DBCC_LEGACY)) {
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x7BAB);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x3BAB);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x3AAB);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x180);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x0);
+ }
+
+ return 0;
+}
+
+static void rtw8922a_bb_sethw(struct rtw89_dev *rtwdev)
+{
+ u32 reg;
+
+ rtw89_phy_write32_clr(rtwdev, R_EN_SND_WO_NDP, B_EN_SND_WO_NDP);
+ rtw89_phy_write32_clr(rtwdev, R_EN_SND_WO_NDP_C1, B_EN_SND_WO_NDP);
+
+ rtw89_write32_mask(rtwdev, R_BE_PWR_BOOST, B_BE_PWR_CTRL_SEL, 0);
+ if (rtwdev->dbcc_en) {
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_BOOST, RTW89_MAC_1);
+ rtw89_write32_mask(rtwdev, reg, B_BE_PWR_CTRL_SEL, 0);
+ }
+
+ rtw8922a_ctrl_mlo(rtwdev, rtwdev->mlo_dbcc_mode);
+}
+
+static void rtw8922a_ctrl_cck_en(struct rtw89_dev *rtwdev, bool cck_en,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (cck_en) {
+ rtw89_phy_write32_idx(rtwdev, R_RXCCA_BE1, B_RXCCA_BE1_DIS, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PD_ARBITER_OFF, B_PD_ARBITER_OFF,
+ 0, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, R_RXCCA_BE1, B_RXCCA_BE1_DIS, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PD_ARBITER_OFF, B_PD_ARBITER_OFF,
+ 1, phy_idx);
+ }
+}
+
+static void rtw8922a_set_channel_bb(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ bool cck_en = chan->band_type == RTW89_BAND_2G;
+ u8 pri_sb = chan->pri_sb_idx;
+
+ if (cck_en)
+ rtw8922a_ctrl_sco_cck(rtwdev, chan->primary_channel,
+ chan->band_width, phy_idx);
+
+ rtw8922a_ctrl_ch(rtwdev, chan, phy_idx);
+ rtw8922a_ctrl_bw(rtwdev, pri_sb, chan->band_width, phy_idx);
+ rtw8922a_ctrl_cck_en(rtwdev, cck_en, phy_idx);
+ rtw8922a_spur_elimination(rtwdev, chan, phy_idx);
+
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx);
+ rtw8922a_tssi_reset(rtwdev, RF_PATH_AB, phy_idx);
+}
+
+static void rtw8922a_pre_set_channel_bb(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (!rtwdev->dbcc_en)
+ return;
+
+ if (phy_idx == RTW89_PHY_0) {
+ rtw89_phy_write32_mask(rtwdev, R_DBCC, B_DBCC_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x6180);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xBBAB);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xABA9);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEBA9);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEAA9);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_DBCC, B_DBCC_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xBBAB);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xAFFF);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEFFF);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEEFF);
+ }
+}
+
+static void rtw8922a_post_set_channel_bb(struct rtw89_dev *rtwdev,
+ enum rtw89_mlo_dbcc_mode mode)
+{
+ if (!rtwdev->dbcc_en)
+ return;
+
+ rtw8922a_ctrl_mlo(rtwdev, mode);
+}
+
+static void rtw8922a_set_channel(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_set_channel_mac(rtwdev, chan, mac_idx);
+ rtw8922a_set_channel_bb(rtwdev, chan, phy_idx);
+ rtw8922a_set_channel_rf(rtwdev, chan, phy_idx);
+}
+
+static void rtw8922a_dfs_en_idx(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, enum rtw89_rf_path path,
+ bool en)
+{
+ u32 path_ofst = (path == RF_PATH_B) ? 0x100 : 0x0;
+
+ if (en)
+ rtw89_phy_write32_idx(rtwdev, 0x2800 + path_ofst, BIT(1), 1,
+ phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, 0x2800 + path_ofst, BIT(1), 0,
+ phy_idx);
+}
+
+static void rtw8922a_dfs_en(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_dfs_en_idx(rtwdev, phy_idx, RF_PATH_A, en);
+ rtw8922a_dfs_en_idx(rtwdev, phy_idx, RF_PATH_B, en);
+}
+
+static void rtw8922a_adc_en_path(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool en)
+{
+ u32 val;
+
+ val = rtw89_phy_read32_mask(rtwdev, R_ADC_FIFO_V1, B_ADC_FIFO_EN_V1);
+
+ if (en) {
+ if (path == RF_PATH_A)
+ val &= ~0x1;
+ else
+ val &= ~0x2;
+ } else {
+ if (path == RF_PATH_A)
+ val |= 0x1;
+ else
+ val |= 0x2;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO_V1, B_ADC_FIFO_EN_V1, val);
+}
+
+static void rtw8922a_adc_en(struct rtw89_dev *rtwdev, bool en, u8 phy_idx)
+{
+ if (rtwdev->mlo_dbcc_mode == MLO_1_PLUS_1_1RF) {
+ if (phy_idx == RTW89_PHY_0)
+ rtw8922a_adc_en_path(rtwdev, RF_PATH_A, en);
+ else
+ rtw8922a_adc_en_path(rtwdev, RF_PATH_B, en);
+ } else {
+ rtw8922a_adc_en_path(rtwdev, RF_PATH_A, en);
+ rtw8922a_adc_en_path(rtwdev, RF_PATH_B, en);
+ }
+}
+
+static
+void rtw8922a_hal_reset(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, enum rtw89_mac_idx mac_idx,
+ enum rtw89_band band, u32 *tx_en, bool enter)
+{
+ if (enter) {
+ rtw89_chip_stop_sch_tx(rtwdev, mac_idx, tx_en, RTW89_SCH_TX_SEL_ALL);
+ rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, false);
+ rtw8922a_dfs_en(rtwdev, false, phy_idx);
+ rtw8922a_tssi_cont_en_phyidx(rtwdev, false, phy_idx);
+ rtw8922a_adc_en(rtwdev, false, phy_idx);
+ fsleep(40);
+ rtw8922a_bb_reset_en(rtwdev, band, false, phy_idx);
+ } else {
+ rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, true);
+ rtw8922a_adc_en(rtwdev, true, phy_idx);
+ rtw8922a_dfs_en(rtwdev, true, phy_idx);
+ rtw8922a_tssi_cont_en_phyidx(rtwdev, true, phy_idx);
+ rtw8922a_bb_reset_en(rtwdev, band, true, phy_idx);
+ rtw89_chip_resume_sch_tx(rtwdev, mac_idx, *tx_en);
+ }
+}
+
+static void rtw8922a_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
+ struct rtw89_channel_help_params *p,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (enter) {
+ rtw8922a_pre_set_channel_bb(rtwdev, phy_idx);
+ rtw8922a_pre_set_channel_rf(rtwdev, phy_idx);
+ }
+
+ rtw8922a_hal_reset(rtwdev, phy_idx, mac_idx, chan->band_type, &p->tx_en, enter);
+
+ if (!enter) {
+ rtw8922a_post_set_channel_bb(rtwdev, rtwdev->mlo_dbcc_mode);
+ rtw8922a_post_set_channel_rf(rtwdev, phy_idx);
+ }
+}
+
+static void rtw8922a_rfk_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
+
+ rtwdev->is_tssi_mode[RF_PATH_A] = false;
+ rtwdev->is_tssi_mode[RF_PATH_B] = false;
+ memset(rfk_mcc, 0, sizeof(*rfk_mcc));
+}
+
+static void rtw8922a_rfk_init_late(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_rfk_pre_ntfy_and_wait(rtwdev, RTW89_PHY_0, 5);
+
+ rtw89_phy_rfk_dack_and_wait(rtwdev, RTW89_PHY_0, 58);
+ rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, 32);
+}
+
+static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
+{
+ u32 rf_mode;
+ u8 path;
+ int ret;
+
+ for (path = 0; path < RF_PATH_NUM_8922A; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, rf_mode != 2,
+ 2, 5000, false, rtwdev, path, 0x00,
+ RR_MOD_MASK);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK] Wait S%d to Rx mode!! (ret = %d)\n",
+ path, ret);
+ }
+}
+
+static void rtw8922a_rfk_channel(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
+ u32 tx_en;
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_CHLK, BTC_WRFK_START);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ _wait_rx_mode(rtwdev, RF_AB);
+
+ rtw89_phy_rfk_pre_ntfy_and_wait(rtwdev, phy_idx, 5);
+ rtw89_phy_rfk_txgapk_and_wait(rtwdev, phy_idx, 54);
+ rtw89_phy_rfk_iqk_and_wait(rtwdev, phy_idx, 84);
+ rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, RTW89_TSSI_NORMAL, 6);
+ rtw89_phy_rfk_dpk_and_wait(rtwdev, phy_idx, 34);
+ rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, 32);
+
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_CHLK, BTC_WRFK_STOP);
+}
+
+static void rtw8922a_rfk_band_changed(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, RTW89_TSSI_SCAN, 6);
+}
+
+static void rtw8922a_rfk_scan(struct rtw89_dev *rtwdev, bool start)
+{
+}
+
+static void rtw8922a_rfk_track(struct rtw89_dev *rtwdev)
+{
+}
+
+static void rtw8922a_set_txpwr_ref(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ s16 ref_ofdm = 0;
+ s16 ref_cck = 0;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr reference\n");
+
+ rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_BE_PWR_REF_CTRL,
+ B_BE_PWR_REF_CTRL_OFDM, ref_ofdm);
+ rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_BE_PWR_REF_CTRL,
+ B_BE_PWR_REF_CTRL_CCK, ref_cck);
+}
+
+static void rtw8922a_bb_tx_triangular(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 ctrl = en ? 0x1 : 0x0;
+
+ rtw89_phy_write32_idx(rtwdev, R_BEDGE3, B_BEDGE_CFG, ctrl, phy_idx);
+}
+
+static void rtw8922a_set_tx_shape(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
+ const struct rtw89_tx_shape *tx_shape = &rfe_parms->tx_shape;
+ u8 tx_shape_idx;
+ u8 band, regd;
+
+ band = chan->band_type;
+ regd = rtw89_regd_get(rtwdev, band);
+ tx_shape_idx = (*tx_shape->lmt)[band][RTW89_RS_OFDM][regd];
+
+ if (tx_shape_idx == 0)
+ rtw8922a_bb_tx_triangular(rtwdev, false, phy_idx);
+ else
+ rtw8922a_bb_tx_triangular(rtwdev, true, phy_idx);
+}
+
+static void rtw8922a_set_txpwr(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_phy_set_txpwr_byrate(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_offset(rtwdev, chan, phy_idx);
+ rtw8922a_set_tx_shape(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
+}
+
+static void rtw8922a_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_set_txpwr_ref(rtwdev, phy_idx);
+}
+
+static void rtw8922a_ctrl_trx_path(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path tx_path, u8 tx_nss,
+ enum rtw89_rf_path rx_path, u8 rx_nss)
+{
+ enum rtw89_phy_idx phy_idx;
+
+ for (phy_idx = RTW89_PHY_0; phy_idx <= RTW89_PHY_1; phy_idx++) {
+ rtw8922a_ctrl_tx_path_tmac(rtwdev, tx_path, phy_idx);
+ rtw8922a_ctrl_rx_path_tmac(rtwdev, rx_path, phy_idx);
+ rtw8922a_cfg_rx_nss_limit(rtwdev, rx_nss, phy_idx);
+ }
+}
+
+static void rtw8922a_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (en) {
+ rtw89_phy_write32_idx(rtwdev, R_FORCE_FIR_A, B_FORCE_FIR_A, 0x3, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RXBY_WBADC_A, B_RXBY_WBADC_A,
+ 0xf, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_RXBY_WBADC_A, B_BT_RXBY_WBADC_A,
+ 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_A, B_BT_TRK_OFF_A, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_OP1DB_A, B_OP1DB_A, 0x80, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_OP1DB1_A, B_TIA10_A, 0x8080, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BACKOFF_A, B_LNA_IBADC_A, 0x34, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BKOFF_A, B_BKOFF_IBADC_A, 0x34, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FORCE_FIR_B, B_FORCE_FIR_B, 0x3, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RXBY_WBADC_B, B_RXBY_WBADC_B,
+ 0xf, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_RXBY_WBADC_B, B_BT_RXBY_WBADC_B,
+ 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_B, B_BT_TRK_OFF_B, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_OP, B_LNA6, 0x80, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_TIA, B_TIA10_B, 0x8080, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BACKOFF_B, B_LNA_IBADC_B, 0x34, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BKOFF_B, B_BKOFF_IBADC_B, 0x34, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, R_FORCE_FIR_A, B_FORCE_FIR_A, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RXBY_WBADC_A, B_RXBY_WBADC_A,
+ 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_RXBY_WBADC_A, B_BT_RXBY_WBADC_A,
+ 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_A, B_BT_TRK_OFF_A, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_OP1DB_A, B_OP1DB_A, 0x1a, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_OP1DB1_A, B_TIA10_A, 0x2a2a, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BACKOFF_A, B_LNA_IBADC_A, 0x7a6, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BKOFF_A, B_BKOFF_IBADC_A, 0x26, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FORCE_FIR_B, B_FORCE_FIR_B, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RXBY_WBADC_B, B_RXBY_WBADC_B,
+ 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_RXBY_WBADC_B, B_BT_RXBY_WBADC_B,
+ 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_B, B_BT_TRK_OFF_B, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_OP, B_LNA6, 0x20, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_TIA, B_TIA10_B, 0x2a30, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BACKOFF_B, B_LNA_IBADC_B, 0x7a6, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BKOFF_B, B_BKOFF_IBADC_B, 0x26, phy_idx);
+ }
+}
+
+static void rtw8922a_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u8 ntx_path = RF_PATH_AB;
+ u32 tx_en0, tx_en1;
+
+ if (hal->antenna_tx == RF_A)
+ ntx_path = RF_PATH_A;
+ else if (hal->antenna_tx == RF_B)
+ ntx_path = RF_PATH_B;
+
+ rtw8922a_hal_reset(rtwdev, RTW89_PHY_0, RTW89_MAC_0, band, &tx_en0, true);
+ if (rtwdev->dbcc_en)
+ rtw8922a_hal_reset(rtwdev, RTW89_PHY_1, RTW89_MAC_1, band,
+ &tx_en1, true);
+
+ rtw8922a_ctrl_trx_path(rtwdev, ntx_path, 2, RF_PATH_AB, 2);
+
+ rtw8922a_hal_reset(rtwdev, RTW89_PHY_0, RTW89_MAC_0, band, &tx_en0, false);
+ if (rtwdev->dbcc_en)
+ rtw8922a_hal_reset(rtwdev, RTW89_PHY_1, RTW89_MAC_1, band,
+ &tx_en0, false);
+}
+
+static u8 rtw8922a_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ int th;
+
+ /* read thermal only if debugging */
+ if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_CFO | RTW89_DBG_RFK_TRACK))
+ return 80;
+
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1);
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x0);
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1);
+
+ fsleep(200);
+
+ th = rtw89_read_rf(rtwdev, rf_path, RR_TM, RR_TM_VAL_V1);
+ th += (s8)info->thermal_trim[rf_path];
+
+ return clamp_t(int, th, 0, U8_MAX);
+}
+
+static void rtw8922a_btc_set_rfe(struct rtw89_dev *rtwdev)
+{
+ union rtw89_btc_module_info *md = &rtwdev->btc.mdinfo;
+ struct rtw89_btc_module_v7 *module = &md->md_v7;
+
+ module->rfe_type = rtwdev->efuse.rfe_type;
+ module->kt_ver = rtwdev->hal.cv;
+ module->bt_solo = 0;
+ module->switch_type = BTC_SWITCH_INTERNAL;
+ module->wa_type = 0;
+
+ module->ant.type = BTC_ANT_SHARED;
+ module->ant.num = 2;
+ module->ant.isolation = 10;
+ module->ant.diversity = 0;
+ module->ant.single_pos = RF_PATH_A;
+ module->ant.btg_pos = RF_PATH_B;
+
+ if (module->kt_ver <= 1)
+ module->wa_type |= BTC_WA_HFP_ZB;
+
+ rtwdev->btc.cx.other.type = BTC_3CX_NONE;
+
+ if (module->rfe_type == 0) {
+ rtwdev->btc.dm.error.map.rfe_type0 = true;
+ return;
+ }
+
+ module->ant.num = (module->rfe_type % 2) ? 2 : 3;
+
+ if (module->kt_ver == 0)
+ module->ant.num = 2;
+
+ if (module->ant.num == 3) {
+ module->ant.type = BTC_ANT_DEDICATED;
+ module->bt_pos = BTC_BT_ALONE;
+ } else {
+ module->ant.type = BTC_ANT_SHARED;
+ module->bt_pos = BTC_BT_BTG;
+ }
+ rtwdev->btc.btg_pos = module->ant.btg_pos;
+ rtwdev->btc.ant_type = module->ant.type;
+}
+
+static
+void rtw8922a_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val)
+{
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, group);
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, val);
+}
+
+static void rtw8922a_btc_init_cfg(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_ant_info_v7 *ant = &btc->mdinfo.md_v7.ant;
+ u32 wl_pri, path_min, path_max;
+ u8 path;
+
+ /* for 1-Ant && 1-ss case: only 1-path */
+ if (ant->num == 1) {
+ path_min = ant->single_pos;
+ path_max = path_min;
+ } else {
+ path_min = RF_PATH_A;
+ path_max = RF_PATH_B;
+ }
+
+ path = path_min;
+
+ for (path = path_min; path <= path_max; path++) {
+ /* set DEBUG_LUT_RFMODE_MASK = 1 to start trx-mask-setup */
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, BIT(17));
+
+ /* if GNT_WL=0 && BT=SS_group --> WL Tx/Rx = THRU */
+ rtw8922a_set_trx_mask(rtwdev, path, BTC_BT_SS_GROUP, 0x5ff);
+
+ /* if GNT_WL=0 && BT=Rx_group --> WL-Rx = THRU + WL-Tx = MASK */
+ rtw8922a_set_trx_mask(rtwdev, path, BTC_BT_RX_GROUP, 0x5df);
+
+ /* if GNT_WL = 0 && BT = Tx_group -->
+ * Shared-Ant && BTG-path:WL mask(0x55f), others:WL THRU(0x5ff)
+ */
+ if (btc->ant_type == BTC_ANT_SHARED && btc->btg_pos == path)
+ rtw8922a_set_trx_mask(rtwdev, path, BTC_BT_TX_GROUP, 0x5ff);
+ else
+ rtw8922a_set_trx_mask(rtwdev, path, BTC_BT_TX_GROUP, 0x5ff);
+
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0);
+ }
+
+ /* set WL PTA Hi-Pri: Ack-Tx, beacon-tx, Trig-frame-Tx, Null-Tx*/
+ wl_pri = B_BTC_RSP_ACK_HI | B_BTC_TX_BCN_HI | B_BTC_TX_TRI_HI |
+ B_BTC_TX_NULL_HI;
+ rtw89_write32(rtwdev, R_BTC_COEX_WL_REQ_BE, wl_pri);
+
+ /* set PTA break table */
+ rtw89_write32(rtwdev, R_BE_BT_BREAK_TABLE, BTC_BREAK_PARAM);
+
+ /* ZB coex table init for HFP PTA req-cmd bit-4 define issue COEX-900*/
+ rtw89_write32(rtwdev, R_BTC_ZB_COEX_TBL_0, 0xda5a5a5a);
+
+ rtw89_write32(rtwdev, R_BTC_ZB_COEX_TBL_1, 0xda5a5a5a);
+
+ rtw89_write32(rtwdev, R_BTC_ZB_BREAK_TBL, 0xf0ffffff);
+ btc->cx.wl.status.map.init_ok = true;
+}
+
+static void rtw8922a_fill_freq_with_ppdu(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status)
+{
+ u8 chan_idx = phy_ppdu->chan_idx;
+ enum nl80211_band band;
+ u8 ch;
+
+ if (chan_idx == 0)
+ return;
+
+ rtw89_decode_chan_idx(rtwdev, chan_idx, &ch, &band);
+ status->freq = ieee80211_channel_to_frequency(ch, band);
+ status->band = band;
+}
+
+static void rtw8922a_query_ppdu(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status)
+{
+ u8 path;
+ u8 *rx_power = phy_ppdu->rssi;
+
+ status->signal =
+ RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
+ for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
+ status->chains |= BIT(path);
+ status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
+ }
+ if (phy_ppdu->valid)
+ rtw8922a_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
+}
+
+static int rtw8922a_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ rtw89_write8_set(rtwdev, R_BE_FEN_RST_ENABLE,
+ B_BE_FEN_BBPLAT_RSTB | B_BE_FEN_BB_IP_RSTN);
+ rtw89_write32(rtwdev, R_BE_DMAC_SYS_CR32B, 0x7FF97FF9);
+
+ return 0;
+}
+
+static int rtw8922a_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ rtw89_write8_clr(rtwdev, R_BE_FEN_RST_ENABLE,
+ B_BE_FEN_BBPLAT_RSTB | B_BE_FEN_BB_IP_RSTN);
+
+ return 0;
+}
+
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support rtw_wowlan_stub_8922a = {
.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
@@ -610,10 +2317,56 @@ static const struct wiphy_wowlan_support rtw_wowlan_stub_8922a = {
#endif
static const struct rtw89_chip_ops rtw8922a_chip_ops = {
+ .enable_bb_rf = rtw8922a_mac_enable_bb_rf,
+ .disable_bb_rf = rtw8922a_mac_disable_bb_rf,
+ .bb_preinit = rtw8922a_bb_preinit,
+ .bb_postinit = rtw8922a_bb_postinit,
+ .bb_reset = rtw8922a_bb_reset,
+ .bb_sethw = rtw8922a_bb_sethw,
+ .read_rf = rtw89_phy_read_rf_v2,
+ .write_rf = rtw89_phy_write_rf_v2,
+ .set_channel = rtw8922a_set_channel,
+ .set_channel_help = rtw8922a_set_channel_help,
.read_efuse = rtw8922a_read_efuse,
.read_phycap = rtw8922a_read_phycap,
+ .fem_setup = NULL,
+ .rfe_gpio = NULL,
+ .rfk_hw_init = rtw8922a_rfk_hw_init,
+ .rfk_init = rtw8922a_rfk_init,
+ .rfk_init_late = rtw8922a_rfk_init_late,
+ .rfk_channel = rtw8922a_rfk_channel,
+ .rfk_band_changed = rtw8922a_rfk_band_changed,
+ .rfk_scan = rtw8922a_rfk_scan,
+ .rfk_track = rtw8922a_rfk_track,
+ .power_trim = rtw8922a_power_trim,
+ .set_txpwr = rtw8922a_set_txpwr,
+ .set_txpwr_ctrl = rtw8922a_set_txpwr_ctrl,
+ .init_txpwr_unit = NULL,
+ .get_thermal = rtw8922a_get_thermal,
+ .ctrl_btg_bt_rx = rtw8922a_ctrl_btg_bt_rx,
+ .query_ppdu = rtw8922a_query_ppdu,
+ .ctrl_nbtg_bt_tx = rtw8922a_ctrl_nbtg_bt_tx,
+ .cfg_txrx_path = rtw8922a_bb_cfg_txrx_path,
+ .set_txpwr_ul_tb_offset = NULL,
.pwr_on_func = rtw8922a_pwr_on_func,
.pwr_off_func = rtw8922a_pwr_off_func,
+ .query_rxdesc = rtw89_core_query_rxdesc_v2,
+ .fill_txdesc = rtw89_core_fill_txdesc_v2,
+ .fill_txdesc_fwcmd = rtw89_core_fill_txdesc_fwcmd_v2,
+ .cfg_ctrl_path = rtw89_mac_cfg_ctrl_path_v2,
+ .mac_cfg_gnt = rtw89_mac_cfg_gnt_v2,
+ .stop_sch_tx = rtw89_mac_stop_sch_tx_v2,
+ .resume_sch_tx = rtw89_mac_resume_sch_tx_v2,
+ .h2c_dctl_sec_cam = rtw89_fw_h2c_dctl_sec_cam_v2,
+ .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl_g7,
+ .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl_g7,
+ .h2c_ampdu_cmac_tbl = rtw89_fw_h2c_ampdu_cmac_tbl_g7,
+ .h2c_default_dmac_tbl = rtw89_fw_h2c_default_dmac_tbl_v2,
+ .h2c_update_beacon = rtw89_fw_h2c_update_beacon_be,
+ .h2c_ba_cam = rtw89_fw_h2c_ba_cam_v1,
+
+ .btc_set_rfe = rtw8922a_btc_set_rfe,
+ .btc_init_cfg = rtw8922a_btc_init_cfg,
};
const struct rtw89_chip_info rtw8922a_chip_info = {
@@ -650,11 +2403,16 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = NULL,
+ .dig_regs = &rtw8922a_dig_regs,
.tssi_dbw_table = NULL,
- .support_chanctx_num = 1,
+ .support_chanctx_num = 2,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ) |
BIT(NL80211_BAND_6GHZ),
+ .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160),
.support_unii4 = true,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
@@ -665,7 +2423,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.acam_num = 128,
.bcam_num = 20,
.scam_num = 32,
- .bacam_num = 8,
+ .bacam_num = 24,
.bacam_dynamic_num = 8,
.bacam_ver = RTW89_BACAM_V1,
.ppdu_max_usr = 16,
@@ -683,10 +2441,19 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
BIT(RTW89_PS_MODE_CLK_GATED) |
BIT(RTW89_PS_MODE_PWR_GATED),
.low_power_hci_modes = 0,
+ .h2c_cctl_func_id = H2C_FUNC_MAC_CCTLINFO_UD_G7,
.hci_func_en_addr = R_BE_HCI_FUNC_EN,
.h2c_desc_size = sizeof(struct rtw89_rxdesc_short_v2),
.txwd_body_size = sizeof(struct rtw89_txwd_body_v2),
.txwd_info_size = sizeof(struct rtw89_txwd_info_v2),
+ .h2c_ctrl_reg = R_BE_H2CREG_CTRL,
+ .h2c_counter_reg = {R_BE_UDM1 + 1, B_BE_UDM1_HALMAC_H2C_DEQ_CNT_MASK >> 8},
+ .h2c_regs = rtw8922a_h2c_regs,
+ .c2h_ctrl_reg = R_BE_C2HREG_CTRL,
+ .c2h_counter_reg = {R_BE_UDM1 + 1, B_BE_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
+ .c2h_regs = rtw8922a_c2h_regs,
+ .page_regs = &rtw8922a_page_regs,
+ .wow_reason_reg = R_AX_C2HREG_DATA3_V1 + 3,
.cfo_src_fd = true,
.cfo_hw_comp = true,
.dcfo_comp = NULL,
@@ -694,9 +2461,11 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.imr_info = NULL,
.imr_dmac_table = &rtw8922a_imr_dmac_table,
.imr_cmac_table = &rtw8922a_imr_cmac_table,
+ .rrsr_cfgs = &rtw8922a_rrsr_cfgs,
.bss_clr_vld = {R_BSS_CLR_VLD_V2, B_BSS_CLR_VLD0_V2},
.bss_clr_map_reg = R_BSS_CLR_MAP_V2,
.dma_ch_mask = 0,
+ .edcca_regs = &rtw8922a_edcca_regs,
#ifdef CONFIG_PM
.wowlan_stub = &rtw_wowlan_stub_8922a,
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
new file mode 100644
index 000000000000..2a371829268c
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2023 Realtek Corporation
+ */
+
+#include "chan.h"
+#include "debug.h"
+#include "mac.h"
+#include "phy.h"
+#include "reg.h"
+#include "rtw8922a.h"
+#include "rtw8922a_rfk.h"
+
+static void rtw8922a_tssi_cont_en(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_rf_path path)
+{
+ static const u32 tssi_trk_man[2] = {R_TSSI_PWR_P0, R_TSSI_PWR_P1};
+
+ if (en)
+ rtw89_phy_write32_mask(rtwdev, tssi_trk_man[path], B_TSSI_CONT_EN, 0);
+ else
+ rtw89_phy_write32_mask(rtwdev, tssi_trk_man[path], B_TSSI_CONT_EN, 1);
+}
+
+void rtw8922a_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx)
+{
+ if (rtwdev->mlo_dbcc_mode == MLO_1_PLUS_1_1RF) {
+ if (phy_idx == RTW89_PHY_0)
+ rtw8922a_tssi_cont_en(rtwdev, en, RF_PATH_A);
+ else
+ rtw8922a_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ } else {
+ rtw8922a_tssi_cont_en(rtwdev, en, RF_PATH_A);
+ rtw8922a_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ }
+}
+
+static
+void rtw8922a_ctl_band_ch_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ u8 central_ch, enum rtw89_band band,
+ enum rtw89_bandwidth bw)
+{
+ const u32 rf_addr[2] = {RR_CFGCH, RR_CFGCH_V1};
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u32 rf_reg[RF_PATH_NUM_8922A][2];
+ u8 synpath;
+ u32 rf18;
+ u8 kpath;
+ u8 path;
+ u8 i;
+
+ rf_reg[RF_PATH_A][0] = rtw89_read_rf(rtwdev, RF_PATH_A, rf_addr[0], RFREG_MASK);
+ rf_reg[RF_PATH_A][1] = rtw89_read_rf(rtwdev, RF_PATH_A, rf_addr[1], RFREG_MASK);
+ rf_reg[RF_PATH_B][0] = rtw89_read_rf(rtwdev, RF_PATH_B, rf_addr[0], RFREG_MASK);
+ rf_reg[RF_PATH_B][1] = rtw89_read_rf(rtwdev, RF_PATH_B, rf_addr[1], RFREG_MASK);
+
+ kpath = rtw89_phy_get_kpath(rtwdev, phy);
+ synpath = rtw89_phy_get_syn_sel(rtwdev, phy);
+
+ rf18 = rtw89_read_rf(rtwdev, synpath, RR_CFGCH, RFREG_MASK);
+ if (rf18 == INV_RF_DATA) {
+ rtw89_warn(rtwdev, "[RFK] Invalid RF18 value\n");
+ return;
+ }
+
+ for (path = 0; path < RF_PATH_NUM_8922A; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ for (i = 0; i < 2; i++) {
+ if (rf_reg[path][i] == INV_RF_DATA) {
+ rtw89_warn(rtwdev,
+ "[RFK] Invalid RF_0x18 for Path-%d\n", path);
+ return;
+ }
+
+ rf_reg[path][i] &= ~(RR_CFGCH_BAND1 | RR_CFGCH_BW |
+ RR_CFGCH_BAND0 | RR_CFGCH_CH);
+ rf_reg[path][i] |= u32_encode_bits(central_ch, RR_CFGCH_CH);
+
+ if (band == RTW89_BAND_2G)
+ rtw89_write_rf(rtwdev, path, RR_SMD, RR_VCO2, 0x0);
+ else
+ rtw89_write_rf(rtwdev, path, RR_SMD, RR_VCO2, 0x1);
+
+ switch (band) {
+ case RTW89_BAND_2G:
+ default:
+ break;
+ case RTW89_BAND_5G:
+ rf_reg[path][i] |=
+ u32_encode_bits(CFGCH_BAND1_5G, RR_CFGCH_BAND1) |
+ u32_encode_bits(CFGCH_BAND0_5G, RR_CFGCH_BAND0);
+ break;
+ case RTW89_BAND_6G:
+ rf_reg[path][i] |=
+ u32_encode_bits(CFGCH_BAND1_6G, RR_CFGCH_BAND1) |
+ u32_encode_bits(CFGCH_BAND0_6G, RR_CFGCH_BAND0);
+ break;
+ }
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ case RTW89_CHANNEL_WIDTH_10:
+ case RTW89_CHANNEL_WIDTH_20:
+ default:
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rf_reg[path][i] |=
+ u32_encode_bits(CFGCH_BW_V2_40M, RR_CFGCH_BW_V2);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rf_reg[path][i] |=
+ u32_encode_bits(CFGCH_BW_V2_80M, RR_CFGCH_BW_V2);
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rf_reg[path][i] |=
+ u32_encode_bits(CFGCH_BW_V2_160M, RR_CFGCH_BW_V2);
+ break;
+ case RTW89_CHANNEL_WIDTH_320:
+ rf_reg[path][i] |=
+ u32_encode_bits(CFGCH_BW_V2_320M, RR_CFGCH_BW_V2);
+ break;
+ }
+
+ rtw89_write_rf(rtwdev, path, rf_addr[i],
+ RFREG_MASK, rf_reg[path][i]);
+ fsleep(100);
+ }
+ }
+
+ if (hal->cv != CHIP_CAV)
+ return;
+
+ if (band == RTW89_BAND_2G) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x80000);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x00003);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD1, RFREG_MASK, 0x0c990);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0xebe38);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x00000);
+ } else {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x80000);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x00003);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD1, RFREG_MASK, 0x0c190);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0xebe38);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x00000);
+ }
+}
+
+void rtw8922a_set_channel_rf(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_ctl_band_ch_bw(rtwdev, phy_idx, chan->channel, chan->band_type,
+ chan->band_width);
+}
+
+enum _rf_syn_pow {
+ RF_SYN_ON_OFF,
+ RF_SYN_OFF_ON,
+ RF_SYN_ALLON,
+ RF_SYN_ALLOFF,
+};
+
+static void rtw8922a_set_syn01_cav(struct rtw89_dev *rtwdev, enum _rf_syn_pow syn)
+{
+ if (syn == RF_SYN_ALLON) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
+
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x3);
+ } else if (syn == RF_SYN_ON_OFF) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
+
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x0);
+ } else if (syn == RF_SYN_OFF_ON) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0);
+
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x3);
+ } else if (syn == RF_SYN_ALLOFF) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x0);
+ }
+}
+
+static void rtw8922a_set_syn01_cbv(struct rtw89_dev *rtwdev, enum _rf_syn_pow syn)
+{
+ if (syn == RF_SYN_ALLON) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN_V1, 0xf);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN_V1, 0xf);
+ } else if (syn == RF_SYN_ON_OFF) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN_V1, 0xf);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN_V1, 0x0);
+ } else if (syn == RF_SYN_OFF_ON) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN_V1, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN_V1, 0xf);
+ } else if (syn == RF_SYN_ALLOFF) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN_V1, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN_V1, 0x0);
+ }
+}
+
+static void rtw8922a_set_syn01(struct rtw89_dev *rtwdev, enum _rf_syn_pow syn)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "SYN config=%d\n", syn);
+
+ if (hal->cv == CHIP_CAV)
+ rtw8922a_set_syn01_cav(rtwdev, syn);
+ else
+ rtw8922a_set_syn01_cbv(rtwdev, syn);
+}
+
+static void rtw8922a_chlk_ktbl_sel(struct rtw89_dev *rtwdev, u8 kpath, u8 idx)
+{
+ u32 tmp;
+
+ if (idx > 2) {
+ rtw89_warn(rtwdev, "[DBCC][ERROR]indx is out of limit!! index(%d)", idx);
+ return;
+ }
+
+ if (kpath & RF_A) {
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1, idx);
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_MDPD_V1, idx);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RR_TXG_SEL, 0x4 | idx);
+
+ tmp = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, BIT(0));
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, tmp);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, BIT(1));
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G5, tmp);
+ }
+
+ if (kpath & RF_B) {
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1, idx);
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_MDPD_V1, idx);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RR_TXG_SEL, 0x4 | idx);
+
+ tmp = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, BIT(0));
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT_C1, B_CFIR_LUT_G3, tmp);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, BIT(1));
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT_C1, B_CFIR_LUT_G5, tmp);
+ }
+}
+
+static void rtw8922a_chlk_reload(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
+ enum rtw89_sub_entity_idx sub_entity_idx;
+ const struct rtw89_chan *chan;
+ enum rtw89_entity_mode mode;
+ u8 s0_tbl, s1_tbl;
+ u8 tbl_sel;
+
+ mode = rtw89_get_entity_mode(rtwdev);
+ switch (mode) {
+ case RTW89_ENTITY_MODE_MCC_PREPARE:
+ sub_entity_idx = RTW89_SUB_ENTITY_1;
+ tbl_sel = 1;
+ break;
+ default:
+ sub_entity_idx = RTW89_SUB_ENTITY_0;
+ tbl_sel = 0;
+ break;
+ }
+
+ chan = rtw89_chan_get(rtwdev, sub_entity_idx);
+
+ rfk_mcc->ch[tbl_sel] = chan->channel;
+ rfk_mcc->band[tbl_sel] = chan->band_type;
+ rfk_mcc->bw[tbl_sel] = chan->band_width;
+ rfk_mcc->table_idx = tbl_sel;
+
+ s0_tbl = tbl_sel;
+ s1_tbl = tbl_sel;
+
+ rtw8922a_chlk_ktbl_sel(rtwdev, RF_A, s0_tbl);
+ rtw8922a_chlk_ktbl_sel(rtwdev, RF_B, s1_tbl);
+}
+
+static void rtw8922a_rfk_mlo_ctrl(struct rtw89_dev *rtwdev)
+{
+ enum _rf_syn_pow syn_pow;
+
+ if (!rtwdev->dbcc_en)
+ goto set_rfk_reload;
+
+ switch (rtwdev->mlo_dbcc_mode) {
+ case MLO_0_PLUS_2_1RF:
+ syn_pow = RF_SYN_OFF_ON;
+ break;
+ case MLO_0_PLUS_2_2RF:
+ case MLO_1_PLUS_1_2RF:
+ case MLO_2_PLUS_0_1RF:
+ case MLO_2_PLUS_0_2RF:
+ case MLO_2_PLUS_2_2RF:
+ case MLO_DBCC_NOT_SUPPORT:
+ default:
+ syn_pow = RF_SYN_ON_OFF;
+ break;
+ case MLO_1_PLUS_1_1RF:
+ case DBCC_LEGACY:
+ syn_pow = RF_SYN_ALLON;
+ break;
+ }
+
+ rtw8922a_set_syn01(rtwdev, syn_pow);
+
+set_rfk_reload:
+ rtw8922a_chlk_reload(rtwdev);
+}
+
+static void rtw8922a_rfk_pll_init(struct rtw89_dev *rtwdev)
+{
+ int ret;
+ u8 tmp;
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_PLL_1, &tmp);
+ if (ret)
+ return;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_PLL_1, tmp | 0xf8, 0xFF);
+ if (ret)
+ return;
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_APBT, &tmp);
+ if (ret)
+ return;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_APBT, tmp & ~0x60, 0xFF);
+ if (ret)
+ return;
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_PLL, &tmp);
+ if (ret)
+ return;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_PLL, tmp | 0x38, 0xFF);
+ if (ret)
+ return;
+}
+
+void rtw8922a_rfk_hw_init(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->dbcc_en)
+ rtw8922a_rfk_mlo_ctrl(rtwdev);
+
+ rtw8922a_rfk_pll_init(rtwdev);
+}
+
+void rtw8922a_pre_set_channel_rf(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ bool mlo_1_1;
+
+ if (!rtwdev->dbcc_en)
+ return;
+
+ mlo_1_1 = rtw89_is_mlo_1_1(rtwdev);
+ if (mlo_1_1)
+ rtw8922a_set_syn01(rtwdev, RF_SYN_ALLON);
+ else if (phy_idx == RTW89_PHY_0)
+ rtw8922a_set_syn01(rtwdev, RF_SYN_ON_OFF);
+ else
+ rtw8922a_set_syn01(rtwdev, RF_SYN_OFF_ON);
+
+ fsleep(1000);
+}
+
+void rtw8922a_post_set_channel_rf(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_rfk_mlo_ctrl(rtwdev);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.h
new file mode 100644
index 000000000000..66bdd57c1eea
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2023 Realtek Corporation
+ */
+
+#ifndef __RTW89_8922A_RFK_H__
+#define __RTW89_8922A_RFK_H__
+
+#include "core.h"
+
+void rtw8922a_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx);
+void rtw8922a_set_channel_rf(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx);
+void rtw8922a_rfk_hw_init(struct rtw89_dev *rtwdev);
+void rtw8922a_pre_set_channel_rf(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8922a_post_set_channel_rf(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
index 7b3d98d2c402..4981b657bd7b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
@@ -26,6 +26,7 @@ static const struct rtw89_pci_info rtw8922a_pci_info = {
.io_rcy_en = MAC_AX_PCIE_ENABLE,
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_DEF,
.rx_ring_eq_is_full = true,
+ .check_rx_tag = true,
.init_cfg_reg = R_BE_HAXI_INIT_CFG1,
.txhci_en_bit = B_BE_TXDMA_EN,
@@ -79,7 +80,7 @@ static struct pci_driver rtw89_8922ae_driver = {
.id_table = rtw89_8922ae_id_table,
.probe = rtw89_pci_probe,
.remove = rtw89_pci_remove,
- .driver.pm = &rtw89_pm_ops,
+ .driver.pm = &rtw89_pm_ops_be,
};
module_pci_driver(rtw89_8922ae_driver);
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index 5c7ca36c09b6..ccad026defb5 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -41,34 +41,8 @@ static void rtw89_wow_leave_lps(struct rtw89_dev *rtwdev)
static int rtw89_wow_config_mac(struct rtw89_dev *rtwdev, bool enable_wow)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
- int ret;
-
- if (enable_wow) {
- ret = rtw89_mac_resize_ple_rx_quota(rtwdev, true);
- if (ret) {
- rtw89_err(rtwdev, "[ERR]patch rx qta %d\n", ret);
- return ret;
- }
- rtw89_write32_set(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP);
- rtw89_write32_clr(rtwdev, mac->rx_fltr, B_AX_SNIFFER_MODE);
- rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
- rtw89_write32(rtwdev, R_AX_ACTION_FWD0, 0);
- rtw89_write32(rtwdev, R_AX_ACTION_FWD1, 0);
- rtw89_write32(rtwdev, R_AX_TF_FWD, 0);
- rtw89_write32(rtwdev, R_AX_HW_RPT_FWD, 0);
- } else {
- ret = rtw89_mac_resize_ple_rx_quota(rtwdev, false);
- if (ret) {
- rtw89_err(rtwdev, "[ERR]patch rx qta %d\n", ret);
- return ret;
- }
- rtw89_write32_clr(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP);
- rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
- rtw89_write32(rtwdev, R_AX_ACTION_FWD0, TRXCFG_MPDU_PROC_ACT_FRWD);
- rtw89_write32(rtwdev, R_AX_TF_FWD, TRXCFG_MPDU_PROC_TF_FRWD);
- }
- return 0;
+ return mac->wow_config_mac(rtwdev, enable_wow);
}
static void rtw89_wow_set_rx_filter(struct rtw89_dev *rtwdev, bool enable)
@@ -85,21 +59,14 @@ static void rtw89_wow_set_rx_filter(struct rtw89_dev *rtwdev, bool enable)
static void rtw89_wow_show_wakeup_reason(struct rtw89_dev *rtwdev)
{
- enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ u32 wow_reason_reg = rtwdev->chip->wow_reason_reg;
struct cfg80211_wowlan_nd_info nd_info;
struct cfg80211_wowlan_wakeup wakeup = {
.pattern_idx = -1,
};
- u32 wow_reason_reg;
u8 reason;
- if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B)
- wow_reason_reg = R_AX_C2HREG_DATA3 + 3;
- else
- wow_reason_reg = R_AX_C2HREG_DATA3_V1 + 3;
-
reason = rtw89_read8(rtwdev, wow_reason_reg);
-
switch (reason) {
case RTW89_WOW_RSN_RX_DEAUTH:
wakeup.disconnect = true;
@@ -470,13 +437,14 @@ static int rtw89_wow_cfg_wake(struct rtw89_dev *rtwdev, bool wow)
static int rtw89_wow_check_fw_status(struct rtw89_dev *rtwdev, bool wow_enable)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
u8 polling;
int ret;
ret = read_poll_timeout_atomic(rtw89_read8_mask, polling,
wow_enable == !!polling,
50, 50000, false, rtwdev,
- R_AX_WOW_CTRL, B_AX_WOW_WOWEN);
+ mac->wow_ctrl.addr, mac->wow_ctrl.mask);
if (ret)
rtw89_err(rtwdev, "failed to check wow status %s\n",
wow_enable ? "enabled" : "disabled");
@@ -519,7 +487,7 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow)
return ret;
}
- ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, wow_vif, wow_sta);
+ ret = rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, wow_vif, wow_sta);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c assoc cmac tbl\n");
return ret;
@@ -566,7 +534,7 @@ static int rtw89_wow_enable_trx_pre(struct rtw89_dev *rtwdev)
rtw89_mac_ptk_drop_by_band_and_wait(rtwdev, RTW89_MAC_0);
- ret = rtw89_hci_poll_txdma_ch(rtwdev);
+ ret = rtw89_hci_poll_txdma_ch_idle(rtwdev);
if (ret) {
rtw89_err(rtwdev, "txdma ch busy\n");
return ret;
@@ -589,7 +557,7 @@ static int rtw89_wow_enable_trx_post(struct rtw89_dev *rtwdev)
rtw89_hci_disable_intr(rtwdev);
rtw89_hci_ctrl_trxhci(rtwdev, false);
- ret = rtw89_hci_poll_txdma_ch(rtwdev);
+ ret = rtw89_hci_poll_txdma_ch_idle(rtwdev);
if (ret) {
rtw89_err(rtwdev, "failed to poll txdma ch idle pcie\n");
return ret;
@@ -699,14 +667,14 @@ static int rtw89_wow_fw_stop(struct rtw89_dev *rtwdev)
goto out;
}
+ rtw89_fw_release_general_pkt_list(rtwdev, true);
+
ret = rtw89_wow_cfg_wake(rtwdev, false);
if (ret) {
rtw89_err(rtwdev, "wow: failed to disable config wake\n");
goto out;
}
- rtw89_fw_release_general_pkt_list(rtwdev, true);
-
ret = rtw89_wow_check_fw_status(rtwdev, false);
if (ret) {
rtw89_err(rtwdev, "wow: failed to check disable fw ready\n");
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 05890536e353..211fa25b9a78 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -740,7 +740,7 @@ u16 rsi_get_connected_channel(struct ieee80211_vif *vif)
return 0;
bss = &vif->bss_conf;
- channel = bss->chandef.chan;
+ channel = bss->chanreq.oper.chan;
if (!channel)
return 0;
@@ -759,7 +759,7 @@ static void rsi_switch_channel(struct rsi_hw *adapter,
if (!vif)
return;
- channel = vif->bss_conf.chandef.chan;
+ channel = vif->bss_conf.chanreq.oper.chan;
if (!channel)
return;
@@ -1957,6 +1957,10 @@ static int rsi_mac80211_resume(struct ieee80211_hw *hw)
#endif
static const struct ieee80211_ops mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rsi_mac80211_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rsi_mac80211_start,
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index 10a465686439..dccc139cabb2 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -232,17 +232,17 @@ static int rsi_usb_reg_write(struct usb_device *usbdev,
if (!usb_reg_buf)
return status;
- usb_reg_buf[0] = (cpu_to_le32(value) & 0x00ff);
- usb_reg_buf[1] = (cpu_to_le32(value) & 0xff00) >> 8;
- usb_reg_buf[2] = (cpu_to_le32(value) & 0x00ff0000) >> 16;
- usb_reg_buf[3] = (cpu_to_le32(value) & 0xff000000) >> 24;
+ usb_reg_buf[0] = value & 0x00ff;
+ usb_reg_buf[1] = (value & 0xff00) >> 8;
+ usb_reg_buf[2] = (value & 0x00ff0000) >> 16;
+ usb_reg_buf[3] = (value & 0xff000000) >> 24;
status = usb_control_msg(usbdev,
usb_sndctrlpipe(usbdev, 0),
USB_VENDOR_REGISTER_WRITE,
RSI_USB_REQ_OUT,
- ((cpu_to_le32(reg) & 0xffff0000) >> 16),
- (cpu_to_le32(reg) & 0xffff),
+ (reg & 0xffff0000) >> 16,
+ reg & 0xffff,
(void *)usb_reg_buf,
len,
USB_CTRL_SET_TIMEOUT);
diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c
index 537caf9d914a..a904602f02ce 100644
--- a/drivers/net/wireless/silabs/wfx/sta.c
+++ b/drivers/net/wireless/silabs/wfx/sta.c
@@ -144,13 +144,13 @@ static int wfx_get_ps_timeout(struct wfx_vif *wvif, bool *enable_ps)
struct wfx_vif *wvif_ch0 = wdev_to_wvif(wvif->wdev, 0);
struct ieee80211_vif *vif_ch0 = wvif_to_vif(wvif_ch0);
- chan0 = vif_ch0->bss_conf.chandef.chan;
+ chan0 = vif_ch0->bss_conf.chanreq.oper.chan;
}
if (wdev_to_wvif(wvif->wdev, 1)) {
struct wfx_vif *wvif_ch1 = wdev_to_wvif(wvif->wdev, 1);
struct ieee80211_vif *vif_ch1 = wvif_to_vif(wvif_ch1);
- chan1 = vif_ch1->bss_conf.chandef.chan;
+ chan1 = vif_ch1->bss_conf.chanreq.oper.chan;
}
if (chan0 && chan1 && vif->type != NL80211_IFTYPE_AP) {
if (chan0->hw_value == chan1->hw_value) {
@@ -344,6 +344,7 @@ static int wfx_set_mfp_ap(struct wfx_vif *wvif)
const int pairwise_cipher_suite_count_offset = 8 / sizeof(u16);
const int pairwise_cipher_suite_size = 4 / sizeof(u16);
const int akm_suite_size = 4 / sizeof(u16);
+ int ret = -EINVAL;
const u16 *ptr;
if (unlikely(!skb))
@@ -352,22 +353,26 @@ static int wfx_set_mfp_ap(struct wfx_vif *wvif)
ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset,
skb->len - ieoffset);
if (unlikely(!ptr))
- return -EINVAL;
+ goto free_skb;
ptr += pairwise_cipher_suite_count_offset;
if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
- return -EINVAL;
+ goto free_skb;
ptr += 1 + pairwise_cipher_suite_size * *ptr;
if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
- return -EINVAL;
+ goto free_skb;
ptr += 1 + akm_suite_size * *ptr;
if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
- return -EINVAL;
+ goto free_skb;
wfx_hif_set_mfp(wvif, *ptr & BIT(7), *ptr & BIT(6));
- return 0;
+ ret = 0;
+
+free_skb:
+ dev_kfree_skb(skb);
+ return ret;
}
int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/st/cw1200/cw1200_sdio.c b/drivers/net/wireless/st/cw1200/cw1200_sdio.c
index 4c30b5772ce0..00c4731d8f8e 100644
--- a/drivers/net/wireless/st/cw1200/cw1200_sdio.c
+++ b/drivers/net/wireless/st/cw1200/cw1200_sdio.c
@@ -8,7 +8,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdio_func.h>
@@ -178,12 +178,15 @@ static int cw1200_sdio_irq_unsubscribe(struct hwbus_priv *self)
return ret;
}
+/* Like the rest of the driver, this only supports one device per system */
+static struct gpio_desc *cw1200_reset;
+static struct gpio_desc *cw1200_powerup;
+
static int cw1200_sdio_off(const struct cw1200_platform_data_sdio *pdata)
{
- if (pdata->reset) {
- gpio_set_value(pdata->reset, 0);
+ if (cw1200_reset) {
+ gpiod_set_value(cw1200_reset, 0);
msleep(30); /* Min is 2 * CLK32K cycles */
- gpio_free(pdata->reset);
}
if (pdata->power_ctrl)
@@ -196,16 +199,21 @@ static int cw1200_sdio_off(const struct cw1200_platform_data_sdio *pdata)
static int cw1200_sdio_on(const struct cw1200_platform_data_sdio *pdata)
{
- /* Ensure I/Os are pulled low */
- if (pdata->reset) {
- gpio_request(pdata->reset, "cw1200_wlan_reset");
- gpio_direction_output(pdata->reset, 0);
+ /* Ensure I/Os are pulled low (reset is active low) */
+ cw1200_reset = devm_gpiod_get_optional(NULL, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(cw1200_reset)) {
+ pr_err("could not get CW1200 SDIO reset GPIO\n");
+ return PTR_ERR(cw1200_reset);
}
- if (pdata->powerup) {
- gpio_request(pdata->powerup, "cw1200_wlan_powerup");
- gpio_direction_output(pdata->powerup, 0);
+ gpiod_set_consumer_name(cw1200_reset, "cw1200_wlan_reset");
+ cw1200_powerup = devm_gpiod_get_optional(NULL, "powerup", GPIOD_OUT_LOW);
+ if (IS_ERR(cw1200_powerup)) {
+ pr_err("could not get CW1200 SDIO powerup GPIO\n");
+ return PTR_ERR(cw1200_powerup);
}
- if (pdata->reset || pdata->powerup)
+ gpiod_set_consumer_name(cw1200_powerup, "cw1200_wlan_powerup");
+
+ if (cw1200_reset || cw1200_powerup)
msleep(10); /* Settle time? */
/* Enable 3v3 and 1v8 to hardware */
@@ -226,13 +234,13 @@ static int cw1200_sdio_on(const struct cw1200_platform_data_sdio *pdata)
}
/* Enable POWERUP signal */
- if (pdata->powerup) {
- gpio_set_value(pdata->powerup, 1);
+ if (cw1200_powerup) {
+ gpiod_set_value(cw1200_powerup, 1);
msleep(250); /* or more..? */
}
- /* Enable RSTn signal */
- if (pdata->reset) {
- gpio_set_value(pdata->reset, 1);
+ /* Deassert RSTn signal, note active low */
+ if (cw1200_reset) {
+ gpiod_set_value(cw1200_reset, 0);
msleep(50); /* Or more..? */
}
return 0;
diff --git a/drivers/net/wireless/st/cw1200/cw1200_spi.c b/drivers/net/wireless/st/cw1200/cw1200_spi.c
index c82c0688b549..4f346fb977a9 100644
--- a/drivers/net/wireless/st/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/st/cw1200/cw1200_spi.c
@@ -11,7 +11,7 @@
*/
#include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
@@ -38,6 +38,8 @@ struct hwbus_priv {
const struct cw1200_platform_data_spi *pdata;
spinlock_t lock; /* Serialize all bus operations */
wait_queue_head_t wq;
+ struct gpio_desc *reset;
+ struct gpio_desc *powerup;
int claimed;
};
@@ -80,7 +82,7 @@ static int cw1200_spi_memcpy_fromio(struct hwbus_priv *self,
#endif
/* Header is LE16 */
- regaddr = cpu_to_le16(regaddr);
+ regaddr = (__force u16)cpu_to_le16(regaddr);
/* We have to byteswap if the SPI bus is limited to 8b operation
or we are running on a Big Endian system
@@ -145,7 +147,7 @@ static int cw1200_spi_memcpy_toio(struct hwbus_priv *self,
#endif
/* Header is LE16 */
- regaddr = cpu_to_le16(regaddr);
+ regaddr = (__force u16)cpu_to_le16(regaddr);
/* We have to byteswap if the SPI bus is limited to 8b operation
or we are running on a Big Endian system
@@ -275,12 +277,12 @@ static void cw1200_spi_irq_unsubscribe(struct hwbus_priv *self)
free_irq(self->func->irq, self);
}
-static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata)
+static int cw1200_spi_off(struct hwbus_priv *self, const struct cw1200_platform_data_spi *pdata)
{
- if (pdata->reset) {
- gpio_set_value(pdata->reset, 0);
+ if (self->reset) {
+ /* Assert RESET, note active low */
+ gpiod_set_value(self->reset, 1);
msleep(30); /* Min is 2 * CLK32K cycles */
- gpio_free(pdata->reset);
}
if (pdata->power_ctrl)
@@ -291,18 +293,12 @@ static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata)
return 0;
}
-static int cw1200_spi_on(const struct cw1200_platform_data_spi *pdata)
+static int cw1200_spi_on(struct hwbus_priv *self, const struct cw1200_platform_data_spi *pdata)
{
/* Ensure I/Os are pulled low */
- if (pdata->reset) {
- gpio_request(pdata->reset, "cw1200_wlan_reset");
- gpio_direction_output(pdata->reset, 0);
- }
- if (pdata->powerup) {
- gpio_request(pdata->powerup, "cw1200_wlan_powerup");
- gpio_direction_output(pdata->powerup, 0);
- }
- if (pdata->reset || pdata->powerup)
+ gpiod_direction_output(self->reset, 1); /* Active low */
+ gpiod_direction_output(self->powerup, 0);
+ if (self->reset || self->powerup)
msleep(10); /* Settle time? */
/* Enable 3v3 and 1v8 to hardware */
@@ -323,13 +319,13 @@ static int cw1200_spi_on(const struct cw1200_platform_data_spi *pdata)
}
/* Enable POWERUP signal */
- if (pdata->powerup) {
- gpio_set_value(pdata->powerup, 1);
+ if (self->powerup) {
+ gpiod_set_value(self->powerup, 1);
msleep(250); /* or more..? */
}
- /* Enable RSTn signal */
- if (pdata->reset) {
- gpio_set_value(pdata->reset, 1);
+ /* Assert RSTn signal, note active low */
+ if (self->reset) {
+ gpiod_set_value(self->reset, 0);
msleep(50); /* Or more..? */
}
return 0;
@@ -381,20 +377,33 @@ static int cw1200_spi_probe(struct spi_device *func)
spi_get_chipselect(func, 0), func->mode, func->bits_per_word,
func->max_speed_hz);
- if (cw1200_spi_on(plat_data)) {
+ self = devm_kzalloc(&func->dev, sizeof(*self), GFP_KERNEL);
+ if (!self) {
+ pr_err("Can't allocate SPI hwbus_priv.");
+ return -ENOMEM;
+ }
+
+ /* Request reset asserted */
+ self->reset = devm_gpiod_get_optional(&func->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(self->reset))
+ return dev_err_probe(&func->dev, PTR_ERR(self->reset),
+ "could not get reset GPIO\n");
+ gpiod_set_consumer_name(self->reset, "cw1200_wlan_reset");
+
+ self->powerup = devm_gpiod_get_optional(&func->dev, "powerup", GPIOD_OUT_LOW);
+ if (IS_ERR(self->powerup))
+ return dev_err_probe(&func->dev, PTR_ERR(self->powerup),
+ "could not get powerup GPIO\n");
+ gpiod_set_consumer_name(self->reset, "cw1200_wlan_powerup");
+
+ if (cw1200_spi_on(self, plat_data)) {
pr_err("spi_on() failed!\n");
- return -1;
+ return -ENODEV;
}
if (spi_setup(func)) {
pr_err("spi_setup() failed!\n");
- return -1;
- }
-
- self = devm_kzalloc(&func->dev, sizeof(*self), GFP_KERNEL);
- if (!self) {
- pr_err("Can't allocate SPI hwbus_priv.");
- return -ENOMEM;
+ return -ENODEV;
}
self->pdata = plat_data;
@@ -416,7 +425,7 @@ static int cw1200_spi_probe(struct spi_device *func)
if (status) {
cw1200_spi_irq_unsubscribe(self);
- cw1200_spi_off(plat_data);
+ cw1200_spi_off(self, plat_data);
}
return status;
@@ -434,7 +443,7 @@ static void cw1200_spi_disconnect(struct spi_device *func)
self->core = NULL;
}
}
- cw1200_spi_off(dev_get_platdata(&func->dev));
+ cw1200_spi_off(self, dev_get_platdata(&func->dev));
}
static int __maybe_unused cw1200_spi_suspend(struct device *dev)
diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c
index 381013e0db63..a54a7b86864f 100644
--- a/drivers/net/wireless/st/cw1200/main.c
+++ b/drivers/net/wireless/st/cw1200/main.c
@@ -203,6 +203,10 @@ static const unsigned long cw1200_ttl[] = {
};
static const struct ieee80211_ops cw1200_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.start = cw1200_start,
.stop = cw1200_stop,
.add_interface = cw1200_add_interface,
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index cd9a41f59f32..0da2d29dd7bd 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1351,6 +1351,10 @@ static struct ieee80211_supported_band wl1251_band_2ghz = {
};
static const struct ieee80211_ops wl1251_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.start = wl1251_op_start,
.stop = wl1251_op_stop,
.add_interface = wl1251_op_add_interface,
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 1e082d039b82..2499dc908305 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -233,7 +233,7 @@ void wlcore_event_channel_switch(struct wl1271 *wl,
cancel_delayed_work(&wlvif->channel_switch_work);
} else {
set_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags);
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
}
}
}
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 5736acb4d206..ef12169f8044 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -2910,7 +2910,7 @@ static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
int ret;
wlvif->aid = vif->cfg.aid;
- wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
+ wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chanreq.oper);
wlvif->beacon_int = bss_conf->beacon_int;
wlvif->wmm_enabled = bss_conf->qos;
@@ -4242,7 +4242,7 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
/* Handle HT information change */
if ((changed & BSS_CHANGED_HT) &&
- (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
+ (bss_conf->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT)) {
ret = wl1271_acx_set_ht_information(wl, wlvif,
bss_conf->ht_operation_mode);
if (ret < 0) {
@@ -4515,7 +4515,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
/* Handle new association with HT. Do this after join. */
if (sta_exists) {
bool enabled =
- bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
+ bss_conf->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT;
ret = wlcore_hw_set_peer_cap(wl,
&sta_ht_cap,
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index eb5482ed76ae..92fb5b8dcdae 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -16,7 +16,6 @@
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
-#include <linux/gpio.h>
#include <linux/pm_runtime.h>
#include <linux/printk.h>
#include <linux/of.h>
@@ -75,8 +74,8 @@ static int __must_check wl12xx_sdio_raw_read(struct device *child, int addr,
sdio_release_host(func);
- if (WARN_ON(ret))
- dev_err(child->parent, "sdio read failed (%d)\n", ret);
+ if (ret)
+ dev_err_ratelimited(child->parent, "sdio read failed (%d)\n", ret);
if (unlikely(dump)) {
printk(KERN_DEBUG "wlcore_sdio: READ from 0x%04x\n", addr);
@@ -120,8 +119,8 @@ static int __must_check wl12xx_sdio_raw_write(struct device *child, int addr,
sdio_release_host(func);
- if (WARN_ON(ret))
- dev_err(child->parent, "sdio write failed (%d)\n", ret);
+ if (ret)
+ dev_err_ratelimited(child->parent, "sdio write failed (%d)\n", ret);
return ret;
}
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index a84340c2075f..b55fe320633c 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -4,7 +4,7 @@
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2023 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
*/
/*
@@ -196,8 +196,11 @@ static const struct ieee80211_regdomain hwsim_world_regdom_custom_04 = {
.reg_rules = {
REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0),
REG_RULE(2484 - 10, 2484 + 10, 40, 0, 20, 0),
- REG_RULE(5150 - 10, 5240 + 10, 80, 0, 30, 0),
+ REG_RULE(5150 - 10, 5240 + 10, 80, 0, 30, NL80211_RRF_AUTO_BW),
REG_RULE(5260 - 10, 5320 + 10, 80, 0, 30,
+ NL80211_RRF_DFS_CONCURRENT | NL80211_RRF_DFS |
+ NL80211_RRF_AUTO_BW),
+ REG_RULE(5500 - 10, 5720 + 10, 160, 0, 30,
NL80211_RRF_DFS_CONCURRENT | NL80211_RRF_DFS),
REG_RULE(5745 - 10, 5825 + 10, 80, 0, 30, 0),
REG_RULE(5855 - 10, 5925 + 10, 80, 0, 33, 0),
@@ -213,6 +216,7 @@ static const struct ieee80211_regdomain *hwsim_world_regdom_custom[] = {
struct hwsim_vif_priv {
u32 magic;
+ u32 skip_beacons;
u8 bssid[ETH_ALEN];
bool assoc;
bool bcn_en;
@@ -2128,6 +2132,16 @@ static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
return 0;
}
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void mac80211_hwsim_vif_add_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
+
+ debugfs_create_u32("skip_beacons", 0600, vif->debugfs_dir,
+ &vp->skip_beacons);
+}
+#endif
static int mac80211_hwsim_change_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -2193,12 +2207,19 @@ static void __mac80211_hwsim_beacon_tx(struct ieee80211_bss_conf *link_conf,
struct ieee80211_vif *vif,
struct sk_buff *skb)
{
+ struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
struct ieee80211_tx_info *info;
struct ieee80211_rate *txrate;
struct ieee80211_mgmt *mgmt;
/* TODO: get MCS */
int bitrate = 100;
+ if (vp->skip_beacons) {
+ vp->skip_beacons--;
+ dev_kfree_skb(skb);
+ return;
+ }
+
info = IEEE80211_SKB_CB(skb);
if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE))
ieee80211_get_tx_rates(vif, NULL, skb,
@@ -2284,8 +2305,8 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
rcu_dereference(link_conf->chanctx_conf)->def.chan);
}
- if (link_conf->csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
- ieee80211_csa_finish(vif);
+ if (link_conf->csa_active && ieee80211_beacon_cntdwn_is_complete(vif, link_id))
+ ieee80211_csa_finish(vif, link_id);
}
static enum hrtimer_restart
@@ -2462,7 +2483,7 @@ static void mac80211_hwsim_vif_info_changed(struct ieee80211_hw *hw,
}
if (vif->type == NL80211_IFTYPE_STATION &&
- changed & BSS_CHANGED_MLD_VALID_LINKS) {
+ changed & (BSS_CHANGED_MLD_VALID_LINKS | BSS_CHANGED_MLD_TTLM)) {
u16 usable_links = ieee80211_vif_usable_links(vif);
if (vif->active_links != usable_links)
@@ -2653,10 +2674,11 @@ static int mac80211_hwsim_sta_state(struct ieee80211_hw *hw,
return mac80211_hwsim_sta_add(hw, vif, sta);
/*
- * when client is authorized (AP station marked as such),
- * enable all links
+ * in an MLO connection, when client is authorized
+ * (AP station marked as such), enable all links
*/
- if (vif->type == NL80211_IFTYPE_STATION &&
+ if (ieee80211_vif_is_mld(vif) &&
+ vif->type == NL80211_IFTYPE_STATION &&
new_state == IEEE80211_STA_AUTHORIZED && !sta->tdls)
ieee80211_set_active_links_async(vif,
ieee80211_vif_usable_links(vif));
@@ -2738,6 +2760,24 @@ static int mac80211_hwsim_get_survey(struct ieee80211_hw *hw, int idx,
return 0;
}
+static enum ieee80211_neg_ttlm_res
+mac80211_hwsim_can_neg_ttlm(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_neg_ttlm *neg_ttlm)
+{
+ u32 i;
+
+ /* For testing purposes, accept if all TIDs are mapped to the same links
+ * set, otherwise reject.
+ */
+ for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++) {
+ if (neg_ttlm->downlink[i] != neg_ttlm->uplink[i] ||
+ neg_ttlm->downlink[i] != neg_ttlm->downlink[0])
+ return NEG_TTLM_RES_REJECT;
+ }
+
+ return NEG_TTLM_RES_ACCEPT;
+}
+
#ifdef CONFIG_NL80211_TESTMODE
/*
* This section contains example code for using netlink
@@ -3175,6 +3215,47 @@ static void mac80211_hwsim_unassign_vif_chanctx(struct ieee80211_hw *hw,
}
}
+static int mac80211_hwsim_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ int i;
+
+ if (n_vifs <= 0)
+ return -EINVAL;
+
+ wiphy_dbg(hw->wiphy,
+ "switch vif channel context mode: %u\n", mode);
+
+ for (i = 0; i < n_vifs; i++) {
+ hwsim_check_chanctx_magic(vifs[i].old_ctx);
+ wiphy_dbg(hw->wiphy,
+ "switch vif channel context: %d MHz/width: %d/cfreqs:%d/%d MHz -> %d MHz/width: %d/cfreqs:%d/%d MHz\n",
+ vifs[i].old_ctx->def.chan->center_freq,
+ vifs[i].old_ctx->def.width,
+ vifs[i].old_ctx->def.center_freq1,
+ vifs[i].old_ctx->def.center_freq2,
+ vifs[i].new_ctx->def.chan->center_freq,
+ vifs[i].new_ctx->def.width,
+ vifs[i].new_ctx->def.center_freq1,
+ vifs[i].new_ctx->def.center_freq2);
+
+ switch (mode) {
+ case CHANCTX_SWMODE_REASSIGN_VIF:
+ hwsim_check_chanctx_magic(vifs[i].new_ctx);
+ break;
+ case CHANCTX_SWMODE_SWAP_CONTEXTS:
+ hwsim_set_chanctx_magic(vifs[i].new_ctx);
+ hwsim_clear_chanctx_magic(vifs[i].old_ctx);
+ break;
+ default:
+ WARN_ON("Invalid mode");
+ }
+ }
+ return 0;
+}
+
static const char mac80211_hwsim_gstrings_stats[][ETH_GSTRING_LEN] = {
"tx_pkts_nic",
"tx_bytes_nic",
@@ -3839,6 +3920,13 @@ out:
return err;
}
+#ifdef CONFIG_MAC80211_DEBUGFS
+#define HWSIM_DEBUGFS_OPS \
+ .vif_add_debugfs = mac80211_hwsim_vif_add_debugfs,
+#else
+#define HWSIM_DEBUGFS_OPS
+#endif
+
#define HWSIM_COMMON_OPS \
.tx = mac80211_hwsim_tx, \
.wake_tx_queue = ieee80211_handle_wake_tx_queue, \
@@ -3863,7 +3951,8 @@ out:
.get_et_stats = mac80211_hwsim_get_et_stats, \
.get_et_strings = mac80211_hwsim_get_et_strings, \
.start_pmsr = mac80211_hwsim_start_pmsr, \
- .abort_pmsr = mac80211_hwsim_abort_pmsr,
+ .abort_pmsr = mac80211_hwsim_abort_pmsr, \
+ HWSIM_DEBUGFS_OPS
#define HWSIM_NON_MLO_OPS \
.sta_add = mac80211_hwsim_sta_add, \
@@ -3877,6 +3966,10 @@ static const struct ieee80211_ops mac80211_hwsim_ops = {
HWSIM_NON_MLO_OPS
.sw_scan_start = mac80211_hwsim_sw_scan,
.sw_scan_complete = mac80211_hwsim_sw_scan_complete,
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
};
#define HWSIM_CHANCTX_OPS \
@@ -3888,7 +3981,8 @@ static const struct ieee80211_ops mac80211_hwsim_ops = {
.remove_chanctx = mac80211_hwsim_remove_chanctx, \
.change_chanctx = mac80211_hwsim_change_chanctx, \
.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx,\
- .unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx,
+ .unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx, \
+ .switch_vif_chanctx = mac80211_hwsim_switch_vif_chanctx,
static const struct ieee80211_ops mac80211_hwsim_mchan_ops = {
HWSIM_COMMON_OPS
@@ -3903,6 +3997,7 @@ static const struct ieee80211_ops mac80211_hwsim_mlo_ops = {
.change_vif_links = mac80211_hwsim_change_vif_links,
.change_sta_links = mac80211_hwsim_change_sta_links,
.sta_state = mac80211_hwsim_sta_state,
+ .can_neg_ttlm = mac80211_hwsim_can_neg_ttlm,
};
struct hwsim_new_radio_params {
@@ -4965,6 +5060,33 @@ static void mac80211_hwsim_sband_capab(struct ieee80211_supported_band *sband)
BIT(NL80211_IFTYPE_MESH_POINT) | \
BIT(NL80211_IFTYPE_OCB))
+static const u8 iftypes_ext_capa_ap[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF |
+ WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB,
+ [8] = WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB,
+ [9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT,
+};
+
+#define MAC80211_HWSIM_MLD_CAPA_OPS \
+ FIELD_PREP_CONST(IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP, \
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME) | \
+ FIELD_PREP_CONST(IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS, \
+ IEEE80211_MLD_MAX_NUM_LINKS - 1)
+
+static const struct wiphy_iftype_ext_capab mac80211_hwsim_iftypes_ext_capa[] = {
+ {
+ .iftype = NL80211_IFTYPE_AP,
+ .extended_capabilities = iftypes_ext_capa_ap,
+ .extended_capabilities_mask = iftypes_ext_capa_ap,
+ .extended_capabilities_len = sizeof(iftypes_ext_capa_ap),
+ .eml_capabilities = IEEE80211_EML_CAP_EMLSR_SUPP |
+ IEEE80211_EML_CAP_EMLMR_SUPPORT,
+ .mld_capa_and_ops = MAC80211_HWSIM_MLD_CAPA_OPS,
+ },
+};
+
static int mac80211_hwsim_new_radio(struct genl_info *info,
struct hwsim_new_radio_params *param)
{
@@ -5159,6 +5281,10 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
ieee80211_hw_set(hw, CONNECTION_MONITOR);
ieee80211_hw_set(hw, AP_LINK_PS);
+
+ hw->wiphy->iftype_ext_capab = mac80211_hwsim_iftypes_ext_capa;
+ hw->wiphy->num_iftype_ext_capab =
+ ARRAY_SIZE(mac80211_hwsim_iftypes_ext_capa);
} else {
ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
@@ -5309,7 +5435,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
schedule_timeout_interruptible(1);
}
- /* TODO: Add param */
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_DFS_CONCURRENT);
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.h b/drivers/net/wireless/virtual/mac80211_hwsim.h
index 4676cdaf4cfd..21b1afd83dc1 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.h
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.h
@@ -3,7 +3,7 @@
* mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
- * Copyright (C) 2020, 2022-2023 Intel Corporation
+ * Copyright (C) 2020, 2022-2024 Intel Corporation
*/
#ifndef __MAC80211_HWSIM_H
@@ -84,6 +84,8 @@ enum hwsim_tx_control_flags {
* @HWSIM_CMD_START_PMSR: request to start peer measurement with the
* %HWSIM_ATTR_PMSR_REQUEST. Result will be sent back asynchronously
* with %HWSIM_CMD_REPORT_PMSR.
+ * @HWSIM_CMD_ABORT_PMSR: Abort previously started peer measurement.
+ * @HWSIM_CMD_REPORT_PMSR: Report peer measurement data.
* @__HWSIM_CMD_MAX: enum limit
*/
enum hwsim_commands {
@@ -298,6 +300,7 @@ enum hwsim_vqs {
* Information about a receiving or transmitting bitrate
* that can be mapped to struct rate_info
*
+ * @__HWSIM_RATE_INFO_ATTR_INVALID: reserved, netlink attribute 0 is invalid
* @HWSIM_RATE_INFO_ATTR_FLAGS: bitflag of flags from &enum rate_info_flags
* @HWSIM_RATE_INFO_ATTR_MCS: mcs index if struct describes an HT/VHT/HE rate
* @HWSIM_RATE_INFO_ATTR_LEGACY: bitrate in 100kbit/s for 802.11abg
diff --git a/drivers/net/wireless/virtual/virt_wifi.c b/drivers/net/wireless/virtual/virt_wifi.c
index ba14d83353a4..6a84ec58d618 100644
--- a/drivers/net/wireless/virtual/virt_wifi.c
+++ b/drivers/net/wireless/virtual/virt_wifi.c
@@ -453,7 +453,7 @@ static int virt_wifi_net_device_get_iflink(const struct net_device *dev)
{
struct virt_wifi_netdev_priv *priv = netdev_priv(dev);
- return priv->lowerdev->ifindex;
+ return READ_ONCE(priv->lowerdev->ifindex);
}
static const struct net_device_ops virt_wifi_ops = {
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_def.h b/drivers/net/wireless/zydas/zd1211rw/zd_def.h
index 8ca2d0aab170..2f55e8deee82 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_def.h
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_def.h
@@ -12,7 +12,7 @@
#include <linux/stringify.h>
#include <linux/device.h>
-typedef u16 __nocast zd_addr_t;
+typedef u16 zd_addr_t;
#define dev_printk_f(level, dev, fmt, args...) \
dev_printk(level, dev, "%s() " fmt, __func__, ##args)
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
index 5d534e15a844..900c063bd724 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
@@ -1343,6 +1343,10 @@ static u64 zd_op_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
static const struct ieee80211_ops zd_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = zd_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = zd_op_start,
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index 8505d84eeed6..f3b567a13ded 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -380,7 +380,7 @@ static inline void handle_regs_int(struct urb *urb)
spin_lock_irqsave(&intr->lock, flags);
int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2));
- if (int_num == CR_INTERRUPT) {
+ if (int_num == (u16)CR_INTERRUPT) {
struct zd_mac *mac = zd_hw_mac(zd_usb_to_hw(urb->context));
spin_lock(&mac->lock);
memcpy(&mac->intr_buffer, urb->transfer_buffer,
@@ -416,7 +416,8 @@ out:
spin_unlock_irqrestore(&intr->lock, flags);
/* CR_INTERRUPT might override read_reg too. */
- if (int_num == CR_INTERRUPT && atomic_read(&intr->read_regs_enabled))
+ if (int_num == (u16)CR_INTERRUPT &&
+ atomic_read(&intr->read_regs_enabled))
handle_regs_int_override(urb);
}
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
index cc70360364b7..abc41a7089fa 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -57,8 +57,6 @@
#define CHECK_Q_STOP_TIMEOUT_US 1000000
#define CHECK_Q_STOP_STEP_US 10000
-#define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header))
-
static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
enum mtk_txrx tx_rx, unsigned int index)
{
@@ -161,7 +159,7 @@ static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool
skb_reset_tail_pointer(skb);
skb_put(skb, le16_to_cpu(gpd->data_buff_len));
- ret = md_ctrl->recv_skb(queue, skb);
+ ret = queue->recv_skb(queue, skb);
/* Break processing, will try again later */
if (ret < 0)
return ret;
@@ -897,13 +895,13 @@ static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno,
/**
* t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets.
- * @md_ctrl: CLDMA context structure.
+ * @queue: CLDMA queue.
* @recv_skb: Receiving skb callback.
*/
-void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
+void t7xx_cldma_set_recv_skb(struct cldma_queue *queue,
int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb))
{
- md_ctrl->recv_skb = recv_skb;
+ queue->recv_skb = recv_skb;
}
/**
@@ -993,6 +991,28 @@ allow_sleep:
return ret;
}
+static void t7xx_cldma_adjust_config(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id)
+{
+ int qno;
+
+ for (qno = 0; qno < CLDMA_RXQ_NUM; qno++) {
+ md_ctrl->rx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ;
+ t7xx_cldma_set_recv_skb(&md_ctrl->rxq[qno], t7xx_port_proxy_recv_skb);
+ }
+
+ md_ctrl->rx_ring[CLDMA_RXQ_NUM - 1].pkt_size = CLDMA_JUMBO_BUFF_SZ;
+
+ for (qno = 0; qno < CLDMA_TXQ_NUM; qno++)
+ md_ctrl->tx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ;
+
+ if (cfg_id == CLDMA_DEDICATED_Q_CFG) {
+ md_ctrl->tx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ;
+ md_ctrl->rx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ;
+ t7xx_cldma_set_recv_skb(&md_ctrl->rxq[CLDMA_Q_IDX_DUMP],
+ t7xx_port_proxy_recv_skb_from_dedicated_queue);
+ }
+}
+
static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
{
char dma_pool_name[32];
@@ -1018,16 +1038,9 @@ static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
dev_err(md_ctrl->dev, "control TX ring init fail\n");
goto err_free_tx_ring;
}
-
- md_ctrl->tx_ring[i].pkt_size = CLDMA_MTU;
}
for (j = 0; j < CLDMA_RXQ_NUM; j++) {
- md_ctrl->rx_ring[j].pkt_size = CLDMA_MTU;
-
- if (j == CLDMA_RXQ_NUM - 1)
- md_ctrl->rx_ring[j].pkt_size = CLDMA_JUMBO_BUFF_SZ;
-
ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]);
if (ret) {
dev_err(md_ctrl->dev, "Control RX ring init fail\n");
@@ -1094,6 +1107,7 @@ int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
{
struct device *dev = &t7xx_dev->pdev->dev;
struct cldma_ctrl *md_ctrl;
+ int qno;
md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL);
if (!md_ctrl)
@@ -1102,7 +1116,9 @@ int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
md_ctrl->t7xx_dev = t7xx_dev;
md_ctrl->dev = dev;
md_ctrl->hif_id = hif_id;
- md_ctrl->recv_skb = t7xx_cldma_default_recv_skb;
+ for (qno = 0; qno < CLDMA_RXQ_NUM; qno++)
+ md_ctrl->rxq[qno].recv_skb = t7xx_cldma_default_recv_skb;
+
t7xx_hw_info_init(md_ctrl);
t7xx_dev->md->md_ctrl[hif_id] = md_ctrl;
return 0;
@@ -1332,9 +1348,10 @@ err_workqueue:
return -ENOMEM;
}
-void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl)
+void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id)
{
t7xx_cldma_late_release(md_ctrl);
+ t7xx_cldma_adjust_config(md_ctrl, cfg_id);
t7xx_cldma_late_init(md_ctrl);
}
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
index 4410bac6993a..f2d9941be9c8 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
@@ -31,6 +31,10 @@
#include "t7xx_cldma.h"
#include "t7xx_pci.h"
+#define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header))
+#define CLDMA_SHARED_Q_BUFF_SZ 3584
+#define CLDMA_DEDICATED_Q_BUFF_SZ 2048
+
/**
* enum cldma_id - Identifiers for CLDMA HW units.
* @CLDMA_ID_MD: Modem control channel.
@@ -55,6 +59,11 @@ struct cldma_gpd {
__le16 not_used2;
};
+enum cldma_cfg {
+ CLDMA_SHARED_Q_CFG,
+ CLDMA_DEDICATED_Q_CFG,
+};
+
struct cldma_request {
struct cldma_gpd *gpd; /* Virtual address for CPU */
dma_addr_t gpd_addr; /* Physical address for DMA */
@@ -82,6 +91,7 @@ struct cldma_queue {
wait_queue_head_t req_wq; /* Only for TX */
struct workqueue_struct *worker;
struct work_struct cldma_work;
+ int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb);
};
struct cldma_ctrl {
@@ -101,24 +111,22 @@ struct cldma_ctrl {
struct md_pm_entity *pm_entity;
struct t7xx_cldma_hw hw_info;
bool is_late_init;
- int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb);
};
+#define CLDMA_Q_IDX_DUMP 1
#define GPD_FLAGS_HWO BIT(0)
#define GPD_FLAGS_IOC BIT(7)
#define GPD_DMAPOOL_ALIGN 16
-#define CLDMA_MTU 3584 /* 3.5kB */
-
int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev);
void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl);
int t7xx_cldma_init(struct cldma_ctrl *md_ctrl);
void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl);
-void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl);
+void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id);
void t7xx_cldma_start(struct cldma_ctrl *md_ctrl);
int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl);
void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl);
-void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
+void t7xx_cldma_set_recv_skb(struct cldma_queue *queue,
int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb));
int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb);
void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx);
diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.c b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
index 24e7d491468e..8d864d4ed77f 100644
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
@@ -177,6 +177,11 @@ int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev)
return t7xx_acpi_reset(t7xx_dev, "_RST");
}
+int t7xx_acpi_pldr_func(struct t7xx_pci_dev *t7xx_dev)
+{
+ return t7xx_acpi_reset(t7xx_dev, "MRST._RST");
+}
+
static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev)
{
u32 val;
@@ -192,6 +197,7 @@ static irqreturn_t t7xx_rgu_isr_thread(int irq, void *data)
{
struct t7xx_pci_dev *t7xx_dev = data;
+ t7xx_mode_update(t7xx_dev, T7XX_RESET);
msleep(RGU_RESET_DELAY_MS);
t7xx_reset_device_via_pmic(t7xx_dev);
return IRQ_HANDLED;
@@ -529,7 +535,7 @@ static void t7xx_md_hk_wq(struct work_struct *work)
/* Clear the HS2 EXIT event appended in core_reset() */
t7xx_fsm_clr_event(ctl, FSM_EVENT_MD_HS2_EXIT);
- t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD]);
+ t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD], CLDMA_SHARED_Q_CFG);
t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]);
t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2);
md->core_md.handshake_ongoing = true;
@@ -544,7 +550,7 @@ static void t7xx_ap_hk_wq(struct work_struct *work)
/* Clear the HS2 EXIT event appended in t7xx_core_reset(). */
t7xx_fsm_clr_event(ctl, FSM_EVENT_AP_HS2_EXIT);
t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]);
- t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP]);
+ t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP], CLDMA_SHARED_Q_CFG);
t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]);
md->core_ap.handshake_ongoing = true;
t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT);
@@ -758,6 +764,7 @@ err_destroy_hswq:
void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)
{
+ enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
struct t7xx_modem *md = t7xx_dev->md;
t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
@@ -765,7 +772,8 @@ void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)
if (!md->md_init_finish)
return;
- t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
+ if (mode != T7XX_RESET && mode != T7XX_UNKNOWN)
+ t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
t7xx_port_proxy_uninit(md->port_prox);
t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.h b/drivers/net/wwan/t7xx/t7xx_modem_ops.h
index abe633cf7adc..b39e945a92e0 100644
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.h
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.h
@@ -85,6 +85,7 @@ int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev);
void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev);
void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev);
int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev);
+int t7xx_acpi_pldr_func(struct t7xx_pci_dev *t7xx_dev);
int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev);
#endif /* __T7XX_MODEM_OPS_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
index 91256e005b84..e0b1e7a616ca 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -52,6 +52,81 @@
#define PM_RESOURCE_POLL_TIMEOUT_US 10000
#define PM_RESOURCE_POLL_STEP_US 100
+static const char * const t7xx_mode_names[] = {
+ [T7XX_UNKNOWN] = "unknown",
+ [T7XX_READY] = "ready",
+ [T7XX_RESET] = "reset",
+ [T7XX_FASTBOOT_SWITCHING] = "fastboot_switching",
+ [T7XX_FASTBOOT_DOWNLOAD] = "fastboot_download",
+ [T7XX_FASTBOOT_DUMP] = "fastboot_dump",
+};
+
+static_assert(ARRAY_SIZE(t7xx_mode_names) == T7XX_MODE_LAST);
+
+static ssize_t t7xx_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct t7xx_pci_dev *t7xx_dev;
+ struct pci_dev *pdev;
+ int index = 0;
+
+ pdev = to_pci_dev(dev);
+ t7xx_dev = pci_get_drvdata(pdev);
+ if (!t7xx_dev)
+ return -ENODEV;
+
+ index = sysfs_match_string(t7xx_mode_names, buf);
+ if (index == T7XX_FASTBOOT_SWITCHING) {
+ WRITE_ONCE(t7xx_dev->mode, T7XX_FASTBOOT_SWITCHING);
+ } else if (index == T7XX_RESET) {
+ WRITE_ONCE(t7xx_dev->mode, T7XX_RESET);
+ t7xx_acpi_pldr_func(t7xx_dev);
+ }
+
+ return count;
+};
+
+static ssize_t t7xx_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ enum t7xx_mode mode = T7XX_UNKNOWN;
+ struct t7xx_pci_dev *t7xx_dev;
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(dev);
+ t7xx_dev = pci_get_drvdata(pdev);
+ if (!t7xx_dev)
+ return -ENODEV;
+
+ mode = READ_ONCE(t7xx_dev->mode);
+ if (mode < T7XX_MODE_LAST)
+ return sysfs_emit(buf, "%s\n", t7xx_mode_names[mode]);
+
+ return sysfs_emit(buf, "%s\n", t7xx_mode_names[T7XX_UNKNOWN]);
+}
+
+static DEVICE_ATTR_RW(t7xx_mode);
+
+static struct attribute *t7xx_mode_attr[] = {
+ &dev_attr_t7xx_mode.attr,
+ NULL
+};
+
+static const struct attribute_group t7xx_mode_attribute_group = {
+ .attrs = t7xx_mode_attr,
+};
+
+void t7xx_mode_update(struct t7xx_pci_dev *t7xx_dev, enum t7xx_mode mode)
+{
+ if (!t7xx_dev)
+ return;
+
+ WRITE_ONCE(t7xx_dev->mode, mode);
+ sysfs_notify(&t7xx_dev->pdev->dev.kobj, NULL, "t7xx_mode");
+}
+
enum t7xx_pm_state {
MTK_PM_EXCEPTION,
MTK_PM_INIT, /* Device initialized, but handshake not completed */
@@ -108,7 +183,7 @@ static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev)
pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS);
pm_runtime_use_autosuspend(&pdev->dev);
- return t7xx_wait_pm_config(t7xx_dev);
+ return 0;
}
void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
@@ -279,7 +354,8 @@ static int __t7xx_pci_pm_suspend(struct pci_dev *pdev)
int ret;
t7xx_dev = pci_get_drvdata(pdev);
- if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) {
+ if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT ||
+ READ_ONCE(t7xx_dev->mode) != T7XX_READY) {
dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n");
return -EFAULT;
}
@@ -729,16 +805,28 @@ static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
t7xx_pcie_mac_interrupts_dis(t7xx_dev);
+ ret = sysfs_create_group(&t7xx_dev->pdev->dev.kobj,
+ &t7xx_mode_attribute_group);
+ if (ret)
+ goto err_md_exit;
+
ret = t7xx_interrupt_init(t7xx_dev);
- if (ret) {
- t7xx_md_exit(t7xx_dev);
- return ret;
- }
+ if (ret)
+ goto err_remove_group;
+
t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
t7xx_pcie_mac_interrupts_en(t7xx_dev);
return 0;
+
+err_remove_group:
+ sysfs_remove_group(&t7xx_dev->pdev->dev.kobj,
+ &t7xx_mode_attribute_group);
+
+err_md_exit:
+ t7xx_md_exit(t7xx_dev);
+ return ret;
}
static void t7xx_pci_remove(struct pci_dev *pdev)
@@ -747,6 +835,9 @@ static void t7xx_pci_remove(struct pci_dev *pdev)
int i;
t7xx_dev = pci_get_drvdata(pdev);
+
+ sysfs_remove_group(&t7xx_dev->pdev->dev.kobj,
+ &t7xx_mode_attribute_group);
t7xx_md_exit(t7xx_dev);
for (i = 0; i < EXT_INT_NUM; i++) {
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.h b/drivers/net/wwan/t7xx/t7xx_pci.h
index f08f1ab74469..49a11586d8d8 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.h
+++ b/drivers/net/wwan/t7xx/t7xx_pci.h
@@ -43,6 +43,16 @@ struct t7xx_addr_base {
typedef irqreturn_t (*t7xx_intr_callback)(int irq, void *param);
+enum t7xx_mode {
+ T7XX_UNKNOWN,
+ T7XX_READY,
+ T7XX_RESET,
+ T7XX_FASTBOOT_SWITCHING,
+ T7XX_FASTBOOT_DOWNLOAD,
+ T7XX_FASTBOOT_DUMP,
+ T7XX_MODE_LAST, /* must always be last */
+};
+
/* struct t7xx_pci_dev - MTK device context structure
* @intr_handler: array of handler function for request_threaded_irq
* @intr_thread: array of thread_fn for request_threaded_irq
@@ -59,6 +69,7 @@ typedef irqreturn_t (*t7xx_intr_callback)(int irq, void *param);
* @md_pm_lock: protects PCIe sleep lock
* @sleep_disable_count: PCIe L1.2 lock counter
* @sleep_lock_acquire: indicates that sleep has been disabled
+ * @mode: indicates the device mode
*/
struct t7xx_pci_dev {
t7xx_intr_callback intr_handler[EXT_INT_NUM];
@@ -82,6 +93,7 @@ struct t7xx_pci_dev {
#ifdef CONFIG_WWAN_DEBUGFS
struct dentry *debugfs_dir;
#endif
+ u32 mode;
};
enum t7xx_pm_id {
@@ -120,5 +132,5 @@ int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_enti
int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity);
void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev);
void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev);
-
+void t7xx_mode_update(struct t7xx_pci_dev *t7xx_dev, enum t7xx_mode mode);
#endif /* __T7XX_PCI_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_port.h b/drivers/net/wwan/t7xx/t7xx_port.h
index 4ae8a00a8532..f74d3bab810d 100644
--- a/drivers/net/wwan/t7xx/t7xx_port.h
+++ b/drivers/net/wwan/t7xx/t7xx_port.h
@@ -75,6 +75,8 @@ enum port_ch {
PORT_CH_DSS6_TX = 0x20df,
PORT_CH_DSS7_RX = 0x20e0,
PORT_CH_DSS7_TX = 0x20e1,
+
+ PORT_CH_UNIMPORTANT = 0xffff,
};
struct t7xx_port;
@@ -135,11 +137,13 @@ struct t7xx_port {
};
};
+int t7xx_get_port_mtu(struct t7xx_port *port);
struct sk_buff *t7xx_port_alloc_skb(int payload);
struct sk_buff *t7xx_ctrl_alloc_skb(int payload);
int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb);
int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header,
unsigned int ex_msg);
+int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb);
int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg,
unsigned int ex_msg);
diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.c b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
index 274846d39fbf..7d6388bf1d7c 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
@@ -48,6 +48,9 @@
i < (proxy)->port_count; \
i++, (p) = &(proxy)->ports[i])
+#define T7XX_MAX_POSSIBLE_PORTS_NUM \
+ (max(ARRAY_SIZE(t7xx_port_conf), ARRAY_SIZE(t7xx_early_port_conf)))
+
static const struct t7xx_port_conf t7xx_port_conf[] = {
{
.tx_ch = PORT_CH_UART2_TX,
@@ -100,6 +103,21 @@ static const struct t7xx_port_conf t7xx_port_conf[] = {
},
};
+static const struct t7xx_port_conf t7xx_early_port_conf[] = {
+ {
+ .tx_ch = PORT_CH_UNIMPORTANT,
+ .rx_ch = PORT_CH_UNIMPORTANT,
+ .txq_index = CLDMA_Q_IDX_DUMP,
+ .rxq_index = CLDMA_Q_IDX_DUMP,
+ .txq_exp_index = CLDMA_Q_IDX_DUMP,
+ .rxq_exp_index = CLDMA_Q_IDX_DUMP,
+ .path_id = CLDMA_ID_AP,
+ .ops = &wwan_sub_port_ops,
+ .name = "fastboot",
+ .port_type = WWAN_PORT_FASTBOOT,
+ },
+};
+
static struct t7xx_port *t7xx_proxy_get_port_by_ch(struct port_proxy *port_prox, enum port_ch ch)
{
const struct t7xx_port_conf *port_conf;
@@ -214,7 +232,17 @@ int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb)
return 0;
}
-static int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb)
+int t7xx_get_port_mtu(struct t7xx_port *port)
+{
+ enum cldma_id path_id = port->port_conf->path_id;
+ int tx_qno = t7xx_port_get_queue_no(port);
+ struct cldma_ctrl *md_ctrl;
+
+ md_ctrl = port->t7xx_dev->md->md_ctrl[path_id];
+ return md_ctrl->tx_ring[tx_qno].pkt_size;
+}
+
+int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb)
{
enum cldma_id path_id = port->port_conf->path_id;
struct cldma_ctrl *md_ctrl;
@@ -329,6 +357,39 @@ static void t7xx_proxy_setup_ch_mapping(struct port_proxy *port_prox)
}
}
+/**
+ * t7xx_port_proxy_recv_skb_from_dedicated_queue() - Dispatch early port received skb.
+ * @queue: CLDMA queue.
+ * @skb: Socket buffer.
+ *
+ * Return:
+ ** 0 - Packet consumed.
+ ** -ERROR - Failed to process skb.
+ */
+int t7xx_port_proxy_recv_skb_from_dedicated_queue(struct cldma_queue *queue, struct sk_buff *skb)
+{
+ struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev;
+ struct port_proxy *port_prox = t7xx_dev->md->port_prox;
+ const struct t7xx_port_conf *port_conf;
+ struct t7xx_port *port;
+ int ret;
+
+ port = &port_prox->ports[0];
+ if (WARN_ON_ONCE(port->port_conf->rxq_index != queue->index)) {
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ port_conf = port->port_conf;
+ ret = port_conf->ops->recv_skb(port, skb);
+ if (ret < 0 && ret != -ENOBUFS) {
+ dev_err(port->dev, "drop on RX ch %d, %d\n", port_conf->rx_ch, ret);
+ dev_kfree_skb_any(skb);
+ }
+
+ return ret;
+}
+
static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev,
struct cldma_queue *queue, u16 channel)
{
@@ -359,7 +420,7 @@ static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev
** 0 - Packet consumed.
** -ERROR - Failed to process skb.
*/
-static int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
+int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
{
struct ccci_header *ccci_h = (struct ccci_header *)skb->data;
struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev;
@@ -444,33 +505,56 @@ static void t7xx_proxy_init_all_ports(struct t7xx_modem *md)
spin_lock_init(&port->port_update_lock);
port->chan_enable = false;
- if (port_conf->ops->init)
+ if (port_conf->ops && port_conf->ops->init)
port_conf->ops->init(port);
}
t7xx_proxy_setup_ch_mapping(port_prox);
}
+void t7xx_port_proxy_set_cfg(struct t7xx_modem *md, enum port_cfg_id cfg_id)
+{
+ struct port_proxy *port_prox = md->port_prox;
+ const struct t7xx_port_conf *port_conf;
+ u32 port_count;
+ int i;
+
+ t7xx_port_proxy_uninit(port_prox);
+
+ if (cfg_id == PORT_CFG_ID_EARLY) {
+ port_conf = t7xx_early_port_conf;
+ port_count = ARRAY_SIZE(t7xx_early_port_conf);
+ } else {
+ port_conf = t7xx_port_conf;
+ port_count = ARRAY_SIZE(t7xx_port_conf);
+ }
+
+ for (i = 0; i < port_count; i++)
+ port_prox->ports[i].port_conf = &port_conf[i];
+
+ port_prox->cfg_id = cfg_id;
+ port_prox->port_count = port_count;
+
+ t7xx_proxy_init_all_ports(md);
+}
+
static int t7xx_proxy_alloc(struct t7xx_modem *md)
{
- unsigned int port_count = ARRAY_SIZE(t7xx_port_conf);
struct device *dev = &md->t7xx_dev->pdev->dev;
struct port_proxy *port_prox;
- int i;
- port_prox = devm_kzalloc(dev, sizeof(*port_prox) + sizeof(struct t7xx_port) * port_count,
+ port_prox = devm_kzalloc(dev,
+ struct_size(port_prox,
+ ports,
+ T7XX_MAX_POSSIBLE_PORTS_NUM),
GFP_KERNEL);
if (!port_prox)
return -ENOMEM;
md->port_prox = port_prox;
port_prox->dev = dev;
+ t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_EARLY);
- for (i = 0; i < port_count; i++)
- port_prox->ports[i].port_conf = &t7xx_port_conf[i];
-
- port_prox->port_count = port_count;
- t7xx_proxy_init_all_ports(md);
return 0;
}
@@ -492,8 +576,6 @@ int t7xx_port_proxy_init(struct t7xx_modem *md)
if (ret)
return ret;
- t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_AP], t7xx_port_proxy_recv_skb);
- t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb);
return 0;
}
@@ -505,7 +587,7 @@ void t7xx_port_proxy_uninit(struct port_proxy *port_prox)
for_each_proxy_port(i, port, port_prox) {
const struct t7xx_port_conf *port_conf = port->port_conf;
- if (port_conf->ops->uninit)
+ if (port_conf->ops && port_conf->ops->uninit)
port_conf->ops->uninit(port);
}
}
diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.h b/drivers/net/wwan/t7xx/t7xx_port_proxy.h
index 81d059fbc0fb..7f5706811445 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.h
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.h
@@ -31,11 +31,18 @@
#define RX_QUEUE_MAXLEN 32
#define CTRL_QUEUE_MAXLEN 16
+enum port_cfg_id {
+ PORT_CFG_ID_INVALID,
+ PORT_CFG_ID_NORMAL,
+ PORT_CFG_ID_EARLY,
+};
+
struct port_proxy {
int port_count;
struct list_head rx_ch_ports[PORT_CH_ID_MASK + 1];
struct list_head queue_ports[CLDMA_NUM][MTK_QUEUES];
struct device *dev;
+ enum port_cfg_id cfg_id;
struct t7xx_port ports[];
};
@@ -98,5 +105,8 @@ void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int
int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg);
int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id,
bool en_flag);
+void t7xx_port_proxy_set_cfg(struct t7xx_modem *md, enum port_cfg_id cfg_id);
+int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb);
+int t7xx_port_proxy_recv_skb_from_dedicated_queue(struct cldma_queue *queue, struct sk_buff *skb);
#endif /* __T7XX_PORT_PROXY_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_port_wwan.c b/drivers/net/wwan/t7xx/t7xx_port_wwan.c
index 17389c8f6600..4b23ba693f3f 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_wwan.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_wwan.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
+ * Copyright (c) 2024, Fibocom Wireless Inc.
*
* Authors:
* Amir Hanania <amir.hanania@intel.com>
@@ -15,6 +16,7 @@
* Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
* Eliot Lee <eliot.lee@intel.com>
* Sreehari Kancharla <sreehari.kancharla@intel.com>
+ * Jinjian Song <jinjian.song@fibocom.com>
*/
#include <linux/atomic.h>
@@ -33,7 +35,7 @@
#include "t7xx_port_proxy.h"
#include "t7xx_state_monitor.h"
-static int t7xx_port_ctrl_start(struct wwan_port *port)
+static int t7xx_port_wwan_start(struct wwan_port *port)
{
struct t7xx_port *port_mtk = wwan_port_get_drvdata(port);
@@ -44,30 +46,60 @@ static int t7xx_port_ctrl_start(struct wwan_port *port)
return 0;
}
-static void t7xx_port_ctrl_stop(struct wwan_port *port)
+static void t7xx_port_wwan_stop(struct wwan_port *port)
{
struct t7xx_port *port_mtk = wwan_port_get_drvdata(port);
atomic_dec(&port_mtk->usage_cnt);
}
-static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
+static int t7xx_port_fastboot_tx(struct t7xx_port *port, struct sk_buff *skb)
+{
+ struct sk_buff *cur = skb, *tx_skb;
+ size_t actual, len, offset = 0;
+ int txq_mtu;
+ int ret;
+
+ txq_mtu = t7xx_get_port_mtu(port);
+ if (txq_mtu < 0)
+ return -EINVAL;
+
+ actual = cur->len;
+ while (actual) {
+ len = min_t(size_t, actual, txq_mtu);
+ tx_skb = __dev_alloc_skb(len, GFP_KERNEL);
+ if (!tx_skb)
+ return -ENOMEM;
+
+ skb_put_data(tx_skb, cur->data + offset, len);
+
+ ret = t7xx_port_send_raw_skb(port, tx_skb);
+ if (ret) {
+ dev_kfree_skb(tx_skb);
+ dev_err(port->dev, "Write error on fastboot port, %d\n", ret);
+ break;
+ }
+ offset += len;
+ actual -= len;
+ }
+
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static int t7xx_port_ctrl_tx(struct t7xx_port *port, struct sk_buff *skb)
{
- struct t7xx_port *port_private = wwan_port_get_drvdata(port);
const struct t7xx_port_conf *port_conf;
struct sk_buff *cur = skb, *cloned;
struct t7xx_fsm_ctl *ctl;
enum md_state md_state;
int cnt = 0, ret;
- if (!port_private->chan_enable)
- return -EINVAL;
-
- port_conf = port_private->port_conf;
- ctl = port_private->t7xx_dev->md->fsm_ctl;
+ port_conf = port->port_conf;
+ ctl = port->t7xx_dev->md->fsm_ctl;
md_state = t7xx_fsm_get_md_state(ctl);
if (md_state == MD_STATE_WAITING_FOR_HS1 || md_state == MD_STATE_WAITING_FOR_HS2) {
- dev_warn(port_private->dev, "Cannot write to %s port when md_state=%d\n",
+ dev_warn(port->dev, "Cannot write to %s port when md_state=%d\n",
port_conf->name, md_state);
return -ENODEV;
}
@@ -75,10 +107,10 @@ static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
while (cur) {
cloned = skb_clone(cur, GFP_KERNEL);
cloned->len = skb_headlen(cur);
- ret = t7xx_port_send_skb(port_private, cloned, 0, 0);
+ ret = t7xx_port_send_skb(port, cloned, 0, 0);
if (ret) {
dev_kfree_skb(cloned);
- dev_err(port_private->dev, "Write error on %s port, %d\n",
+ dev_err(port->dev, "Write error on %s port, %d\n",
port_conf->name, ret);
return cnt ? cnt + ret : ret;
}
@@ -93,14 +125,53 @@ static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
return 0;
}
+static int t7xx_port_wwan_tx(struct wwan_port *port, struct sk_buff *skb)
+{
+ struct t7xx_port *port_private = wwan_port_get_drvdata(port);
+ const struct t7xx_port_conf *port_conf = port_private->port_conf;
+ int ret;
+
+ if (!port_private->chan_enable)
+ return -EINVAL;
+
+ if (port_conf->port_type != WWAN_PORT_FASTBOOT)
+ ret = t7xx_port_ctrl_tx(port_private, skb);
+ else
+ ret = t7xx_port_fastboot_tx(port_private, skb);
+
+ return ret;
+}
+
static const struct wwan_port_ops wwan_ops = {
- .start = t7xx_port_ctrl_start,
- .stop = t7xx_port_ctrl_stop,
- .tx = t7xx_port_ctrl_tx,
+ .start = t7xx_port_wwan_start,
+ .stop = t7xx_port_wwan_stop,
+ .tx = t7xx_port_wwan_tx,
};
+static void t7xx_port_wwan_create(struct t7xx_port *port)
+{
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+ unsigned int header_len = sizeof(struct ccci_header), mtu;
+ struct wwan_port_caps caps;
+
+ if (!port->wwan.wwan_port) {
+ mtu = t7xx_get_port_mtu(port);
+ caps.frag_len = mtu - header_len;
+ caps.headroom_len = header_len;
+ port->wwan.wwan_port = wwan_create_port(port->dev, port_conf->port_type,
+ &wwan_ops, &caps, port);
+ if (IS_ERR(port->wwan.wwan_port))
+ dev_err(port->dev, "Unable to create WWAN port %s", port_conf->name);
+ }
+}
+
static int t7xx_port_wwan_init(struct t7xx_port *port)
{
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+
+ if (port_conf->port_type == WWAN_PORT_FASTBOOT)
+ t7xx_port_wwan_create(port);
+
port->rx_length_th = RX_QUEUE_MAXLEN;
return 0;
}
@@ -152,20 +223,14 @@ static int t7xx_port_wwan_disable_chl(struct t7xx_port *port)
static void t7xx_port_wwan_md_state_notify(struct t7xx_port *port, unsigned int state)
{
const struct t7xx_port_conf *port_conf = port->port_conf;
- unsigned int header_len = sizeof(struct ccci_header);
- struct wwan_port_caps caps;
+
+ if (port_conf->port_type == WWAN_PORT_FASTBOOT)
+ return;
if (state != MD_STATE_READY)
return;
- if (!port->wwan.wwan_port) {
- caps.frag_len = CLDMA_MTU - header_len;
- caps.headroom_len = header_len;
- port->wwan.wwan_port = wwan_create_port(port->dev, port_conf->port_type,
- &wwan_ops, &caps, port);
- if (IS_ERR(port->wwan.wwan_port))
- dev_err(port->dev, "Unable to create WWWAN port %s", port_conf->name);
- }
+ t7xx_port_wwan_create(port);
}
struct port_ops wwan_sub_port_ops = {
diff --git a/drivers/net/wwan/t7xx/t7xx_reg.h b/drivers/net/wwan/t7xx/t7xx_reg.h
index c41d7d094c08..9c7dc72ac6f6 100644
--- a/drivers/net/wwan/t7xx/t7xx_reg.h
+++ b/drivers/net/wwan/t7xx/t7xx_reg.h
@@ -101,11 +101,33 @@ enum t7xx_pm_resume_state {
PM_RESUME_REG_STATE_L2_EXP,
};
+enum host_event_e {
+ HOST_EVENT_INIT = 0,
+ FASTBOOT_DL_NOTIFY = 0x3,
+};
+
#define T7XX_PCIE_MISC_DEV_STATUS 0x0d1c
#define MISC_STAGE_MASK GENMASK(2, 0)
#define MISC_RESET_TYPE_PLDR BIT(26)
#define MISC_RESET_TYPE_FLDR BIT(27)
-#define LINUX_STAGE 4
+#define MISC_RESET_TYPE_PLDR BIT(26)
+#define MISC_LK_EVENT_MASK GENMASK(11, 8)
+#define HOST_EVENT_MASK GENMASK(31, 28)
+
+enum lk_event_id {
+ LK_EVENT_NORMAL = 0,
+ LK_EVENT_CREATE_PD_PORT = 1,
+ LK_EVENT_CREATE_POST_DL_PORT = 2,
+ LK_EVENT_RESET = 7,
+};
+
+enum t7xx_device_stage {
+ T7XX_DEV_STAGE_INIT = 0,
+ T7XX_DEV_STAGE_BROM_PRE = 1,
+ T7XX_DEV_STAGE_BROM_POST = 2,
+ T7XX_DEV_STAGE_LK = 3,
+ T7XX_DEV_STAGE_LINUX = 4,
+};
#define T7XX_PCIE_RESOURCE_STATUS 0x0d28
#define T7XX_PCIE_RESOURCE_STS_MSK GENMASK(4, 0)
diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
index 0bc97430211b..9889ca4621cf 100644
--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
@@ -47,6 +47,13 @@
#define FSM_MD_EX_PASS_TIMEOUT_MS 45000
#define FSM_CMD_TIMEOUT_MS 2000
+#define wait_for_expected_dev_stage(status) \
+ read_poll_timeout(ioread32, status, \
+ ((status & MISC_STAGE_MASK) == T7XX_DEV_STAGE_LINUX) || \
+ ((status & MISC_STAGE_MASK) == T7XX_DEV_STAGE_LK), 100000, \
+ 20000000, false, IREG_BASE(md->t7xx_dev) + \
+ T7XX_PCIE_MISC_DEV_STATUS)
+
void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
{
struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
@@ -206,6 +213,55 @@ static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comm
fsm_finish_command(ctl, cmd, 0);
}
+static void t7xx_host_event_notify(struct t7xx_modem *md, unsigned int event_id)
+{
+ u32 value;
+
+ value = ioread32(IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
+ value &= ~HOST_EVENT_MASK;
+ value |= FIELD_PREP(HOST_EVENT_MASK, event_id);
+ iowrite32(value, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
+}
+
+static void t7xx_lk_stage_event_handling(struct t7xx_fsm_ctl *ctl, unsigned int status)
+{
+ struct t7xx_modem *md = ctl->md;
+ struct cldma_ctrl *md_ctrl;
+ enum lk_event_id lk_event;
+ struct device *dev;
+ struct t7xx_port *port;
+
+ dev = &md->t7xx_dev->pdev->dev;
+ lk_event = FIELD_GET(MISC_LK_EVENT_MASK, status);
+ switch (lk_event) {
+ case LK_EVENT_NORMAL:
+ case LK_EVENT_RESET:
+ break;
+
+ case LK_EVENT_CREATE_PD_PORT:
+ case LK_EVENT_CREATE_POST_DL_PORT:
+ md_ctrl = md->md_ctrl[CLDMA_ID_AP];
+ t7xx_cldma_hif_hw_init(md_ctrl);
+ t7xx_cldma_stop(md_ctrl);
+ t7xx_cldma_switch_cfg(md_ctrl, CLDMA_DEDICATED_Q_CFG);
+
+ port = &ctl->md->port_prox->ports[0];
+ port->port_conf->ops->enable_chl(port);
+
+ t7xx_cldma_start(md_ctrl);
+
+ if (lk_event == LK_EVENT_CREATE_POST_DL_PORT)
+ t7xx_mode_update(md->t7xx_dev, T7XX_FASTBOOT_DOWNLOAD);
+ else
+ t7xx_mode_update(md->t7xx_dev, T7XX_FASTBOOT_DUMP);
+ break;
+
+ default:
+ dev_err(dev, "Invalid LK event %d\n", lk_event);
+ break;
+ }
+}
+
static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl)
{
ctl->curr_state = FSM_STATE_STOPPED;
@@ -226,8 +282,9 @@ static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comman
static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
{
- struct t7xx_pci_dev *t7xx_dev;
- struct cldma_ctrl *md_ctrl;
+ struct cldma_ctrl *md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD];
+ struct t7xx_pci_dev *t7xx_dev = ctl->md->t7xx_dev;
+ enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
int err;
if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) {
@@ -235,18 +292,20 @@ static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comma
return;
}
- md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD];
- t7xx_dev = ctl->md->t7xx_dev;
-
ctl->curr_state = FSM_STATE_STOPPING;
t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP);
t7xx_cldma_stop(md_ctrl);
- if (!ctl->md->rgu_irq_asserted) {
- t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP);
- /* Wait for the DRM disable to take effect */
- msleep(FSM_DRM_DISABLE_DELAY_MS);
+ if (mode == T7XX_FASTBOOT_SWITCHING)
+ t7xx_host_event_notify(ctl->md, FASTBOOT_DL_NOTIFY);
+
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP);
+ /* Wait for the DRM disable to take effect */
+ msleep(FSM_DRM_DISABLE_DELAY_MS);
+ if (mode == T7XX_FASTBOOT_SWITCHING) {
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
+ } else {
err = t7xx_acpi_fldr_func(t7xx_dev);
if (err)
t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
@@ -272,6 +331,7 @@ static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl)
ctl->curr_state = FSM_STATE_READY;
t7xx_fsm_broadcast_ready_state(ctl);
+ t7xx_mode_update(md->t7xx_dev, T7XX_READY);
t7xx_md_event_notify(md, FSM_READY);
}
@@ -317,7 +377,8 @@ static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl)
static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
{
struct t7xx_modem *md = ctl->md;
- u32 dev_status;
+ struct device *dev;
+ u32 status;
int ret;
if (!md)
@@ -329,23 +390,53 @@ static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command
return;
}
+ dev = &md->t7xx_dev->pdev->dev;
ctl->curr_state = FSM_STATE_PRE_START;
t7xx_md_event_notify(md, FSM_PRE_START);
- ret = read_poll_timeout(ioread32, dev_status,
- (dev_status & MISC_STAGE_MASK) == LINUX_STAGE, 20000, 2000000,
- false, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
+ ret = wait_for_expected_dev_stage(status);
+
if (ret) {
- struct device *dev = &md->t7xx_dev->pdev->dev;
+ dev_err(dev, "read poll timeout %d\n", ret);
+ goto finish_command;
+ }
- fsm_finish_command(ctl, cmd, -ETIMEDOUT);
- dev_err(dev, "Invalid device status 0x%lx\n", dev_status & MISC_STAGE_MASK);
- return;
+ if (status != ctl->status || cmd->flag != 0) {
+ u32 stage = FIELD_GET(MISC_STAGE_MASK, status);
+
+ switch (stage) {
+ case T7XX_DEV_STAGE_INIT:
+ case T7XX_DEV_STAGE_BROM_PRE:
+ case T7XX_DEV_STAGE_BROM_POST:
+ dev_dbg(dev, "BROM_STAGE Entered\n");
+ ret = t7xx_fsm_append_cmd(ctl, FSM_CMD_START, 0);
+ break;
+
+ case T7XX_DEV_STAGE_LK:
+ dev_dbg(dev, "LK_STAGE Entered\n");
+ t7xx_lk_stage_event_handling(ctl, status);
+ break;
+
+ case T7XX_DEV_STAGE_LINUX:
+ dev_dbg(dev, "LINUX_STAGE Entered\n");
+ t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM |
+ D2H_INT_ASYNC_MD_HK | D2H_INT_ASYNC_AP_HK);
+ if (cmd->flag == 0)
+ break;
+ t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]);
+ t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]);
+ t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_NORMAL);
+ ret = fsm_routine_starting(ctl);
+ break;
+
+ default:
+ break;
+ }
+ ctl->status = status;
}
- t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]);
- t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]);
- fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl));
+finish_command:
+ fsm_finish_command(ctl, cmd, ret);
}
static int fsm_main_thread(void *data)
@@ -517,6 +608,7 @@ void t7xx_fsm_reset(struct t7xx_modem *md)
fsm_flush_event_cmd_qs(ctl);
ctl->curr_state = FSM_STATE_STOPPED;
ctl->exp_flg = false;
+ ctl->status = T7XX_DEV_STAGE_INIT;
}
int t7xx_fsm_init(struct t7xx_modem *md)
diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.h b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
index b0b3662ae6d7..7b0a9baf488c 100644
--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.h
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
@@ -96,6 +96,7 @@ struct t7xx_fsm_ctl {
bool exp_flg;
spinlock_t notifier_lock; /* Protects notifier list */
struct list_head notifier_list;
+ u32 status; /* Device boot stage */
};
struct t7xx_fsm_event {
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
index 72e01e550a16..17431f1b1a0c 100644
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -26,7 +26,9 @@
static DEFINE_MUTEX(wwan_register_lock); /* WWAN device create|remove lock */
static DEFINE_IDA(minors); /* minors for WWAN port chardevs */
static DEFINE_IDA(wwan_dev_ids); /* for unique WWAN device IDs */
-static struct class *wwan_class;
+static const struct class wwan_class = {
+ .name = "wwan",
+};
static int wwan_major;
static struct dentry *wwan_debugfs_dir;
@@ -130,7 +132,7 @@ static struct wwan_device *wwan_dev_get_by_parent(struct device *parent)
{
struct device *dev;
- dev = class_find_device(wwan_class, NULL, parent, wwan_dev_parent_match);
+ dev = class_find_device(&wwan_class, NULL, parent, wwan_dev_parent_match);
if (!dev)
return ERR_PTR(-ENODEV);
@@ -147,7 +149,7 @@ static struct wwan_device *wwan_dev_get_by_name(const char *name)
{
struct device *dev;
- dev = class_find_device(wwan_class, NULL, name, wwan_dev_name_match);
+ dev = class_find_device(&wwan_class, NULL, name, wwan_dev_name_match);
if (!dev)
return ERR_PTR(-ENODEV);
@@ -183,7 +185,7 @@ static struct wwan_device *wwan_dev_get_by_debugfs(struct dentry *dir)
{
struct device *dev;
- dev = class_find_device(wwan_class, NULL, dir, wwan_dev_debugfs_match);
+ dev = class_find_device(&wwan_class, NULL, dir, wwan_dev_debugfs_match);
if (!dev)
return ERR_PTR(-ENODEV);
@@ -239,7 +241,7 @@ static struct wwan_device *wwan_create_dev(struct device *parent)
}
wwandev->dev.parent = parent;
- wwandev->dev.class = wwan_class;
+ wwandev->dev.class = &wwan_class;
wwandev->dev.type = &wwan_dev_type;
wwandev->id = id;
dev_set_name(&wwandev->dev, "wwan%d", wwandev->id);
@@ -265,7 +267,7 @@ done_unlock:
static int is_wwan_child(struct device *dev, void *data)
{
- return dev->class == wwan_class;
+ return dev->class == &wwan_class;
}
static void wwan_remove_dev(struct wwan_device *wwandev)
@@ -328,6 +330,10 @@ static const struct {
.name = "XMMRPC",
.devsuf = "xmmrpc",
},
+ [WWAN_PORT_FASTBOOT] = {
+ .name = "FASTBOOT",
+ .devsuf = "fastboot",
+ },
};
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
@@ -371,7 +377,7 @@ static struct wwan_port *wwan_port_get_by_minor(unsigned int minor)
{
struct device *dev;
- dev = class_find_device(wwan_class, NULL, &minor, wwan_port_minor_match);
+ dev = class_find_device(&wwan_class, NULL, &minor, wwan_port_minor_match);
if (!dev)
return ERR_PTR(-ENODEV);
@@ -401,7 +407,7 @@ static int __wwan_port_dev_assign_name(struct wwan_port *port, const char *fmt)
return -ENOMEM;
/* Collect ids of same name format ports */
- class_dev_iter_init(&iter, wwan_class, NULL, &wwan_port_dev_type);
+ class_dev_iter_init(&iter, &wwan_class, NULL, &wwan_port_dev_type);
while ((dev = class_dev_iter_next(&iter))) {
if (dev->parent != &wwandev->dev)
continue;
@@ -473,7 +479,7 @@ struct wwan_port *wwan_create_port(struct device *parent,
mutex_init(&port->data_lock);
port->dev.parent = &wwandev->dev;
- port->dev.class = wwan_class;
+ port->dev.class = &wwan_class;
port->dev.type = &wwan_port_dev_type;
port->dev.devt = MKDEV(wwan_major, minor);
dev_set_drvdata(&port->dev, drvdata);
@@ -916,7 +922,7 @@ static int wwan_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
-static struct device_type wwan_type = { .name = "wwan" };
+static const struct device_type wwan_type = { .name = "wwan" };
static struct net_device *wwan_rtnl_alloc(struct nlattr *tb[],
const char *ifname,
@@ -1208,11 +1214,9 @@ static int __init wwan_init(void)
if (err)
return err;
- wwan_class = class_create("wwan");
- if (IS_ERR(wwan_class)) {
- err = PTR_ERR(wwan_class);
+ err = class_register(&wwan_class);
+ if (err)
goto unregister;
- }
/* chrdev used for wwan ports */
wwan_major = __register_chrdev(0, 0, WWAN_MAX_MINORS, "wwan_port",
@@ -1229,7 +1233,7 @@ static int __init wwan_init(void)
return 0;
destroy:
- class_destroy(wwan_class);
+ class_unregister(&wwan_class);
unregister:
rtnl_link_unregister(&wwan_rtnl_link_ops);
return err;
@@ -1240,7 +1244,7 @@ static void __exit wwan_exit(void)
debugfs_remove_recursive(wwan_debugfs_dir);
__unregister_chrdev(wwan_major, 0, WWAN_MAX_MINORS, "wwan_port");
rtnl_link_unregister(&wwan_rtnl_link_ops);
- class_destroy(wwan_class);
+ class_unregister(&wwan_class);
}
module_init(wwan_init);
diff --git a/drivers/net/wwan/wwan_hwsim.c b/drivers/net/wwan/wwan_hwsim.c
index ff3dd24ddb33..b02befd1b6fb 100644
--- a/drivers/net/wwan/wwan_hwsim.c
+++ b/drivers/net/wwan/wwan_hwsim.c
@@ -25,7 +25,9 @@ static int wwan_hwsim_devsnum = 2;
module_param_named(devices, wwan_hwsim_devsnum, int, 0444);
MODULE_PARM_DESC(devices, "Number of simulated devices");
-static struct class *wwan_hwsim_class;
+static const struct class wwan_hwsim_class = {
+ .name = "wwan_hwsim",
+};
static struct dentry *wwan_hwsim_debugfs_topdir;
static struct dentry *wwan_hwsim_debugfs_devcreate;
@@ -277,7 +279,7 @@ static struct wwan_hwsim_dev *wwan_hwsim_dev_new(void)
spin_unlock(&wwan_hwsim_devs_lock);
dev->dev.release = wwan_hwsim_dev_release;
- dev->dev.class = wwan_hwsim_class;
+ dev->dev.class = &wwan_hwsim_class;
dev_set_name(&dev->dev, "hwsim%u", dev->id);
spin_lock_init(&dev->ports_lock);
@@ -511,11 +513,9 @@ static int __init wwan_hwsim_init(void)
if (!wwan_wq)
return -ENOMEM;
- wwan_hwsim_class = class_create("wwan_hwsim");
- if (IS_ERR(wwan_hwsim_class)) {
- err = PTR_ERR(wwan_hwsim_class);
+ err = class_register(&wwan_hwsim_class);
+ if (err)
goto err_wq_destroy;
- }
wwan_hwsim_debugfs_topdir = debugfs_create_dir("wwan_hwsim", NULL);
wwan_hwsim_debugfs_devcreate =
@@ -534,7 +534,7 @@ err_clean_devs:
wwan_hwsim_free_devs();
flush_workqueue(wwan_wq); /* Wait deletion works completion */
debugfs_remove(wwan_hwsim_debugfs_topdir);
- class_destroy(wwan_hwsim_class);
+ class_unregister(&wwan_hwsim_class);
err_wq_destroy:
destroy_workqueue(wwan_wq);
@@ -547,7 +547,7 @@ static void __exit wwan_hwsim_exit(void)
wwan_hwsim_free_devs();
flush_workqueue(wwan_wq); /* Wait deletion works completion */
debugfs_remove(wwan_hwsim_debugfs_topdir);
- class_destroy(wwan_hwsim_class);
+ class_unregister(&wwan_hwsim_class);
destroy_workqueue(wwan_wq);
}
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index a6d596e05602..3692b56cb58d 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1344,7 +1344,6 @@ static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
{
- struct page *page;
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
unsigned int noreclaim_flag;
@@ -1355,11 +1354,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
if (queue->hdr_digest || queue->data_digest)
nvme_tcp_free_crypto(queue);
- if (queue->pf_cache.va) {
- page = virt_to_head_page(queue->pf_cache.va);
- __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
- queue->pf_cache.va = NULL;
- }
+ page_frag_cache_drain(&queue->pf_cache);
noreclaim_flag = memalloc_noreclaim_save();
/* ->sock will be released by fput() */
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index c8655fc5aa5b..2aa5762e9f50 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1591,7 +1591,6 @@ static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
static void nvmet_tcp_release_queue_work(struct work_struct *w)
{
- struct page *page;
struct nvmet_tcp_queue *queue =
container_of(w, struct nvmet_tcp_queue, release_work);
@@ -1615,8 +1614,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
if (queue->hdr_digest || queue->data_digest)
nvmet_tcp_free_crypto(queue);
ida_free(&nvmet_tcp_queue_ida, queue->idx);
- page = virt_to_head_page(queue->pf_cache.va);
- __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
+ page_frag_cache_drain(&queue->pf_cache);
kfree(queue);
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index c3585229c12a..ccee56615f78 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -4353,8 +4353,8 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
if (res->end > IO_SPACE_LIMIT)
return -EINVAL;
- return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
- pgprot_device(PAGE_KERNEL));
+ return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
+ pgprot_device(PAGE_KERNEL));
#else
/*
* This architecture does not have memory mapped I/O space,
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 5dd5f188e14f..604541dcb320 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -155,6 +155,18 @@ config PTP_1588_CLOCK_IDTCM
To compile this driver as a module, choose M here: the module
will be called ptp_clockmatrix.
+config PTP_1588_CLOCK_FC3W
+ tristate "RENESAS FemtoClock3 Wireless as PTP clock"
+ depends on PTP_1588_CLOCK && I2C
+ default n
+ help
+ This driver adds support for using Renesas FemtoClock3 Wireless
+ as a PTP clock. This clock is only useful if your time stamping
+ MAC is connected to the RENESAS chip.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp_fc3.
+
config PTP_1588_CLOCK_MOCK
tristate "Mock-up PTP clock"
depends on PTP_1588_CLOCK
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
index dea0cebd2303..68bf02078053 100644
--- a/drivers/ptp/Makefile
+++ b/drivers/ptp/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp-qoriq.o
ptp-qoriq-y += ptp_qoriq.o
ptp-qoriq-$(CONFIG_DEBUG_FS) += ptp_qoriq_debugfs.o
obj-$(CONFIG_PTP_1588_CLOCK_IDTCM) += ptp_clockmatrix.o
+obj-$(CONFIG_PTP_1588_CLOCK_FC3W) += ptp_fc3.o
obj-$(CONFIG_PTP_1588_CLOCK_IDT82P33) += ptp_idt82p33.o
obj-$(CONFIG_PTP_1588_CLOCK_MOCK) += ptp_mock.o
obj-$(CONFIG_PTP_1588_CLOCK_VMW) += ptp_vmw.o
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 15b804ba4868..c56cd0f63909 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -4,7 +4,6 @@
*
* Copyright (C) 2010 OMICRON electronics GmbH
*/
-#include <linux/idr.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -16,6 +15,7 @@
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
+#include <linux/xarray.h>
#include <uapi/linux/sched/types.h>
#include "ptp_private.h"
@@ -25,13 +25,16 @@
#define PTP_PPS_EVENT PPS_CAPTUREASSERT
#define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC)
-struct class *ptp_class;
+const struct class ptp_class = {
+ .name = "ptp",
+ .dev_groups = ptp_groups
+};
/* private globals */
static dev_t ptp_devt;
-static DEFINE_IDA(ptp_clocks_map);
+static DEFINE_XARRAY_ALLOC(ptp_clocks_map);
/* time stamp event queue operations */
@@ -44,18 +47,31 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
struct ptp_clock_event *src)
{
struct ptp_extts_event *dst;
+ struct timespec64 offset_ts;
unsigned long flags;
s64 seconds;
u32 remainder;
- seconds = div_u64_rem(src->timestamp, 1000000000, &remainder);
+ if (src->type == PTP_CLOCK_EXTTS) {
+ seconds = div_u64_rem(src->timestamp, 1000000000, &remainder);
+ } else if (src->type == PTP_CLOCK_EXTOFF) {
+ offset_ts = ns_to_timespec64(src->offset);
+ seconds = offset_ts.tv_sec;
+ remainder = offset_ts.tv_nsec;
+ } else {
+ WARN(1, "%s: unknown type %d\n", __func__, src->type);
+ return;
+ }
spin_lock_irqsave(&queue->lock, flags);
dst = &queue->buf[queue->tail];
dst->index = src->index;
+ dst->flags = PTP_EXTTS_EVENT_VALID;
dst->t.sec = seconds;
dst->t.nsec = remainder;
+ if (src->type == PTP_CLOCK_EXTOFF)
+ dst->flags |= PTP_EXT_OFFSET;
/* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */
if (!queue_free(queue))
@@ -188,7 +204,7 @@ static void ptp_clock_release(struct device *dev)
bitmap_free(tsevq->mask);
kfree(tsevq);
debugfs_remove(ptp->debugfs_root);
- ida_free(&ptp_clocks_map, ptp->index);
+ xa_erase(&ptp_clocks_map, ptp->index);
kfree(ptp);
}
@@ -220,7 +236,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
{
struct ptp_clock *ptp;
struct timestamp_event_queue *queue = NULL;
- int err = 0, index, major = MAJOR(ptp_devt);
+ int err, index, major = MAJOR(ptp_devt);
char debugfsname[16];
size_t size;
@@ -228,16 +244,16 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
return ERR_PTR(-EINVAL);
/* Initialize a clock structure. */
- err = -ENOMEM;
ptp = kzalloc(sizeof(struct ptp_clock), GFP_KERNEL);
- if (ptp == NULL)
+ if (!ptp) {
+ err = -ENOMEM;
goto no_memory;
+ }
- index = ida_alloc_max(&ptp_clocks_map, MINORMASK, GFP_KERNEL);
- if (index < 0) {
- err = index;
+ err = xa_alloc(&ptp_clocks_map, &index, ptp, xa_limit_31b,
+ GFP_KERNEL);
+ if (err)
goto no_slot;
- }
ptp->clock.ops = ptp_clock_ops;
ptp->info = info;
@@ -245,13 +261,17 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
ptp->index = index;
INIT_LIST_HEAD(&ptp->tsevqs);
queue = kzalloc(sizeof(*queue), GFP_KERNEL);
- if (!queue)
+ if (!queue) {
+ err = -ENOMEM;
goto no_memory_queue;
+ }
list_add_tail(&queue->qlist, &ptp->tsevqs);
spin_lock_init(&ptp->tsevqs_lock);
queue->mask = bitmap_alloc(PTP_MAX_CHANNELS, GFP_KERNEL);
- if (!queue->mask)
+ if (!queue->mask) {
+ err = -ENOMEM;
goto no_memory_bitmap;
+ }
bitmap_set(queue->mask, 0, PTP_MAX_CHANNELS);
spin_lock_init(&queue->lock);
mutex_init(&ptp->pincfg_mux);
@@ -322,7 +342,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
/* Initialize a new device of our class in our clock structure. */
device_initialize(&ptp->dev);
ptp->dev.devt = ptp->devid;
- ptp->dev.class = ptp_class;
+ ptp->dev.class = &ptp_class;
ptp->dev.parent = parent;
ptp->dev.groups = ptp->pin_attr_groups;
ptp->dev.release = ptp_clock_release;
@@ -365,7 +385,7 @@ no_memory_bitmap:
list_del(&queue->qlist);
kfree(queue);
no_memory_queue:
- ida_free(&ptp_clocks_map, index);
+ xa_erase(&ptp_clocks_map, index);
no_slot:
kfree(ptp);
no_memory:
@@ -417,6 +437,7 @@ void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
break;
case PTP_CLOCK_EXTTS:
+ case PTP_CLOCK_EXTOFF:
/* Enqueue timestamp on selected queues */
spin_lock_irqsave(&ptp->tsevqs_lock, flags);
list_for_each_entry(tsevq, &ptp->tsevqs, qlist) {
@@ -495,19 +516,19 @@ EXPORT_SYMBOL(ptp_cancel_worker_sync);
static void __exit ptp_exit(void)
{
- class_destroy(ptp_class);
+ class_unregister(&ptp_class);
unregister_chrdev_region(ptp_devt, MINORMASK + 1);
- ida_destroy(&ptp_clocks_map);
+ xa_destroy(&ptp_clocks_map);
}
static int __init ptp_init(void)
{
int err;
- ptp_class = class_create("ptp");
- if (IS_ERR(ptp_class)) {
+ err = class_register(&ptp_class);
+ if (err) {
pr_err("ptp: failed to allocate class\n");
- return PTR_ERR(ptp_class);
+ return err;
}
err = alloc_chrdev_region(&ptp_devt, 0, MINORMASK + 1, "ptp");
@@ -516,12 +537,11 @@ static int __init ptp_init(void)
goto no_region;
}
- ptp_class->dev_groups = ptp_groups;
pr_info("PTP clock support registered\n");
return 0;
no_region:
- class_destroy(ptp_class);
+ class_unregister(&ptp_class);
return err;
}
diff --git a/drivers/ptp/ptp_fc3.c b/drivers/ptp/ptp_fc3.c
new file mode 100644
index 000000000000..6ef982862e27
--- /dev/null
+++ b/drivers/ptp/ptp_fc3.c
@@ -0,0 +1,1014 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PTP hardware clock driver for the FemtoClock3 family of timing and
+ * synchronization devices.
+ *
+ * Copyright (C) 2023 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/timekeeping.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/bitfield.h>
+#include <linux/mfd/rsmu.h>
+#include <linux/mfd/idtRC38xxx_reg.h>
+#include <asm/unaligned.h>
+
+#include "ptp_private.h"
+#include "ptp_fc3.h"
+
+MODULE_DESCRIPTION("Driver for IDT FemtoClock3(TM) family");
+MODULE_AUTHOR("IDT support-1588 <IDT-support-1588@lm.renesas.com>");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
+
+/*
+ * The name of the firmware file to be loaded
+ * over-rides any automatic selection
+ */
+static char *firmware;
+module_param(firmware, charp, 0);
+
+static s64 ns2counters(struct idtfc3 *idtfc3, s64 nsec, u32 *sub_ns)
+{
+ s64 sync;
+ s32 rem;
+
+ if (likely(nsec >= 0)) {
+ sync = div_u64_rem(nsec, idtfc3->ns_per_sync, &rem);
+ *sub_ns = rem;
+ } else {
+ sync = -div_u64_rem(-nsec - 1, idtfc3->ns_per_sync, &rem) - 1;
+ *sub_ns = idtfc3->ns_per_sync - rem - 1;
+ }
+
+ return sync * idtfc3->ns_per_sync;
+}
+
+static s64 tdc_meas2offset(struct idtfc3 *idtfc3, u64 meas_read)
+{
+ s64 coarse, fine;
+
+ fine = sign_extend64(FIELD_GET(FINE_MEAS_MASK, meas_read), 12);
+ coarse = sign_extend64(FIELD_GET(COARSE_MEAS_MASK, meas_read), (39 - 13));
+
+ fine = div64_s64(fine * NSEC_PER_SEC, idtfc3->tdc_apll_freq * 62LL);
+ coarse = div64_s64(coarse * NSEC_PER_SEC, idtfc3->time_ref_freq);
+
+ return coarse + fine;
+}
+
+static s64 tdc_offset2phase(struct idtfc3 *idtfc3, s64 offset_ns)
+{
+ if (offset_ns > idtfc3->ns_per_sync / 2)
+ offset_ns -= idtfc3->ns_per_sync;
+
+ return offset_ns * idtfc3->tdc_offset_sign;
+}
+
+static int idtfc3_set_lpf_mode(struct idtfc3 *idtfc3, u8 mode)
+{
+ int err;
+
+ if (mode >= LPF_INVALID)
+ return -EINVAL;
+
+ if (idtfc3->lpf_mode == mode)
+ return 0;
+
+ err = regmap_bulk_write(idtfc3->regmap, LPF_MODE_CNFG, &mode, sizeof(mode));
+ if (err)
+ return err;
+
+ idtfc3->lpf_mode = mode;
+
+ return 0;
+}
+
+static int idtfc3_enable_lpf(struct idtfc3 *idtfc3, bool enable)
+{
+ u8 val;
+ int err;
+
+ err = regmap_bulk_read(idtfc3->regmap, LPF_CTRL, &val, sizeof(val));
+ if (err)
+ return err;
+
+ if (enable == true)
+ val |= LPF_EN;
+ else
+ val &= ~LPF_EN;
+
+ return regmap_bulk_write(idtfc3->regmap, LPF_CTRL, &val, sizeof(val));
+}
+
+static int idtfc3_get_time_ref_freq(struct idtfc3 *idtfc3)
+{
+ int err;
+ u8 buf[4];
+ u8 time_ref_div;
+ u8 time_clk_div;
+
+ err = regmap_bulk_read(idtfc3->regmap, TIME_CLOCK_MEAS_DIV_CNFG, buf, sizeof(buf));
+ if (err)
+ return err;
+ time_ref_div = FIELD_GET(TIME_REF_DIV_MASK, get_unaligned_le32(buf)) + 1;
+
+ err = regmap_bulk_read(idtfc3->regmap, TIME_CLOCK_COUNT, buf, 1);
+ if (err)
+ return err;
+ time_clk_div = (buf[0] & TIME_CLOCK_COUNT_MASK) + 1;
+ idtfc3->time_ref_freq = idtfc3->hw_param.time_clk_freq *
+ time_clk_div / time_ref_div;
+
+ return 0;
+}
+
+static int idtfc3_get_tdc_offset_sign(struct idtfc3 *idtfc3)
+{
+ int err;
+ u8 buf[4];
+ u32 val;
+ u8 sig1, sig2;
+
+ err = regmap_bulk_read(idtfc3->regmap, TIME_CLOCK_TDC_FANOUT_CNFG, buf, sizeof(buf));
+ if (err)
+ return err;
+
+ val = get_unaligned_le32(buf);
+ if ((val & TIME_SYNC_TO_TDC_EN) != TIME_SYNC_TO_TDC_EN) {
+ dev_err(idtfc3->dev, "TIME_SYNC_TO_TDC_EN is off !!!");
+ return -EINVAL;
+ }
+
+ sig1 = FIELD_GET(SIG1_MUX_SEL_MASK, val);
+ sig2 = FIELD_GET(SIG2_MUX_SEL_MASK, val);
+
+ if ((sig1 == sig2) || ((sig1 != TIME_SYNC) && (sig2 != TIME_SYNC))) {
+ dev_err(idtfc3->dev, "Invalid tdc_mux_sel sig1=%d sig2=%d", sig1, sig2);
+ return -EINVAL;
+ } else if (sig1 == TIME_SYNC) {
+ idtfc3->tdc_offset_sign = 1;
+ } else if (sig2 == TIME_SYNC) {
+ idtfc3->tdc_offset_sign = -1;
+ }
+
+ return 0;
+}
+
+static int idtfc3_lpf_bw(struct idtfc3 *idtfc3, u8 shift, u8 mult)
+{
+ u8 val = FIELD_PREP(LPF_BW_SHIFT, shift) | FIELD_PREP(LPF_BW_MULT, mult);
+
+ return regmap_bulk_write(idtfc3->regmap, LPF_BW_CNFG, &val, sizeof(val));
+}
+
+static int idtfc3_enable_tdc(struct idtfc3 *idtfc3, bool enable, u8 meas_mode)
+{
+ int err;
+ u8 val = 0;
+
+ /* Disable TDC first */
+ err = regmap_bulk_write(idtfc3->regmap, TIME_CLOCK_MEAS_CTRL, &val, sizeof(val));
+ if (err)
+ return err;
+
+ if (enable == false)
+ return idtfc3_lpf_bw(idtfc3, LPF_BW_SHIFT_DEFAULT, LPF_BW_MULT_DEFAULT);
+
+ if (meas_mode >= MEAS_MODE_INVALID)
+ return -EINVAL;
+
+ /* Change TDC meas mode */
+ err = regmap_bulk_write(idtfc3->regmap, TIME_CLOCK_MEAS_CNFG,
+ &meas_mode, sizeof(meas_mode));
+ if (err)
+ return err;
+
+ /* Enable TDC */
+ val = TDC_MEAS_EN;
+ if (meas_mode == CONTINUOUS)
+ val |= TDC_MEAS_START;
+ err = regmap_bulk_write(idtfc3->regmap, TIME_CLOCK_MEAS_CTRL, &val, sizeof(val));
+ if (err)
+ return err;
+
+ return idtfc3_lpf_bw(idtfc3, LPF_BW_SHIFT_1PPS, LPF_BW_MULT_DEFAULT);
+}
+
+static bool get_tdc_meas(struct idtfc3 *idtfc3, s64 *offset_ns)
+{
+ bool valid = false;
+ u8 buf[9];
+ u8 val;
+ int err;
+
+ while (true) {
+ err = regmap_bulk_read(idtfc3->regmap, TDC_FIFO_STS,
+ &val, sizeof(val));
+ if (err)
+ return false;
+
+ if (val & FIFO_EMPTY)
+ break;
+
+ err = regmap_bulk_read(idtfc3->regmap, TDC_FIFO_READ_REQ,
+ &buf, sizeof(buf));
+ if (err)
+ return false;
+
+ valid = true;
+ }
+
+ if (valid)
+ *offset_ns = tdc_meas2offset(idtfc3, get_unaligned_le64(&buf[1]));
+
+ return valid;
+}
+
+static int check_tdc_fifo_overrun(struct idtfc3 *idtfc3)
+{
+ u8 val;
+ int err;
+
+ /* Check if FIFO is overrun */
+ err = regmap_bulk_read(idtfc3->regmap, TDC_FIFO_STS, &val, sizeof(val));
+ if (err)
+ return err;
+
+ if (!(val & FIFO_FULL))
+ return 0;
+
+ dev_warn(idtfc3->dev, "TDC FIFO overrun !!!");
+
+ err = idtfc3_enable_tdc(idtfc3, true, CONTINUOUS);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int get_tdc_meas_continuous(struct idtfc3 *idtfc3)
+{
+ int err;
+ s64 offset_ns;
+ struct ptp_clock_event event;
+
+ err = check_tdc_fifo_overrun(idtfc3);
+ if (err)
+ return err;
+
+ if (get_tdc_meas(idtfc3, &offset_ns) && offset_ns >= 0) {
+ event.index = 0;
+ event.offset = tdc_offset2phase(idtfc3, offset_ns);
+ event.type = PTP_CLOCK_EXTOFF;
+ ptp_clock_event(idtfc3->ptp_clock, &event);
+ }
+
+ return 0;
+}
+
+static int idtfc3_read_subcounter(struct idtfc3 *idtfc3)
+{
+ u8 buf[5] = {0};
+ int err;
+
+ err = regmap_bulk_read(idtfc3->regmap, TOD_COUNTER_READ_REQ,
+ &buf, sizeof(buf));
+ if (err)
+ return err;
+
+ /* sync_counter_value is [31:82] and sub_sync_counter_value is [0:30] */
+ return get_unaligned_le32(&buf[1]) & SUB_SYNC_COUNTER_MASK;
+}
+
+static int idtfc3_tod_update_is_done(struct idtfc3 *idtfc3)
+{
+ int err;
+ u8 req;
+
+ err = read_poll_timeout_atomic(regmap_bulk_read, err, !req, USEC_PER_MSEC,
+ idtfc3->tc_write_timeout, true, idtfc3->regmap,
+ TOD_SYNC_LOAD_REQ_CTRL, &req, 1);
+ if (err)
+ dev_err(idtfc3->dev, "TOD counter write timeout !!!");
+
+ return err;
+}
+
+static int idtfc3_write_subcounter(struct idtfc3 *idtfc3, u32 counter)
+{
+ u8 buf[18] = {0};
+ int err;
+
+ /* sync_counter_value is [31:82] and sub_sync_counter_value is [0:30] */
+ put_unaligned_le32(counter & SUB_SYNC_COUNTER_MASK, &buf[0]);
+
+ buf[16] = SUB_SYNC_LOAD_ENABLE | SYNC_LOAD_ENABLE;
+ buf[17] = SYNC_LOAD_REQ;
+
+ err = regmap_bulk_write(idtfc3->regmap, TOD_SYNC_LOAD_VAL_CTRL,
+ &buf, sizeof(buf));
+ if (err)
+ return err;
+
+ return idtfc3_tod_update_is_done(idtfc3);
+}
+
+static int idtfc3_timecounter_update(struct idtfc3 *idtfc3, u32 counter, s64 ns)
+{
+ int err;
+
+ err = idtfc3_write_subcounter(idtfc3, counter);
+ if (err)
+ return err;
+
+ /* Update time counter */
+ idtfc3->ns = ns;
+ idtfc3->last_counter = counter;
+
+ return 0;
+}
+
+static int idtfc3_timecounter_read(struct idtfc3 *idtfc3)
+{
+ int now, delta;
+
+ now = idtfc3_read_subcounter(idtfc3);
+ if (now < 0)
+ return now;
+
+ /* calculate the delta since the last idtfc3_timecounter_read(): */
+ if (now >= idtfc3->last_counter)
+ delta = now - idtfc3->last_counter;
+ else
+ delta = idtfc3->sub_sync_count - idtfc3->last_counter + now;
+
+ /* Update time counter */
+ idtfc3->ns += delta * idtfc3->ns_per_counter;
+ idtfc3->last_counter = now;
+
+ return 0;
+}
+
+static int _idtfc3_gettime(struct idtfc3 *idtfc3, struct timespec64 *ts)
+{
+ int err;
+
+ err = idtfc3_timecounter_read(idtfc3);
+ if (err)
+ return err;
+
+ *ts = ns_to_timespec64(idtfc3->ns);
+
+ return 0;
+}
+
+static int idtfc3_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ int err;
+
+ mutex_lock(idtfc3->lock);
+ err = _idtfc3_gettime(idtfc3, ts);
+ mutex_unlock(idtfc3->lock);
+
+ return err;
+}
+
+static int _idtfc3_settime(struct idtfc3 *idtfc3, const struct timespec64 *ts)
+{
+ s64 offset_ns, now_ns;
+ u32 counter, sub_ns;
+ int now;
+
+ if (timespec64_valid(ts) == false) {
+ dev_err(idtfc3->dev, "%s: invalid timespec", __func__);
+ return -EINVAL;
+ }
+
+ now = idtfc3_read_subcounter(idtfc3);
+ if (now < 0)
+ return now;
+
+ offset_ns = (idtfc3->sub_sync_count - now) * idtfc3->ns_per_counter;
+ now_ns = timespec64_to_ns(ts);
+ (void)ns2counters(idtfc3, offset_ns + now_ns, &sub_ns);
+
+ counter = sub_ns / idtfc3->ns_per_counter;
+ return idtfc3_timecounter_update(idtfc3, counter, now_ns);
+}
+
+static int idtfc3_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ int err;
+
+ mutex_lock(idtfc3->lock);
+ err = _idtfc3_settime(idtfc3, ts);
+ mutex_unlock(idtfc3->lock);
+
+ return err;
+}
+
+static int _idtfc3_adjtime(struct idtfc3 *idtfc3, s64 delta)
+{
+ /*
+ * The TOD counter can be synchronously loaded with any value,
+ * to be loaded on the next Time Sync pulse
+ */
+ s64 sync_ns;
+ u32 sub_ns;
+ u32 counter;
+
+ if (idtfc3->ns + delta < 0) {
+ dev_err(idtfc3->dev, "%lld ns adj is too large", delta);
+ return -EINVAL;
+ }
+
+ sync_ns = ns2counters(idtfc3, delta + idtfc3->ns_per_sync, &sub_ns);
+
+ counter = sub_ns / idtfc3->ns_per_counter;
+ return idtfc3_timecounter_update(idtfc3, counter, idtfc3->ns + sync_ns +
+ counter * idtfc3->ns_per_counter);
+}
+
+static int idtfc3_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ int err;
+
+ mutex_lock(idtfc3->lock);
+ err = _idtfc3_adjtime(idtfc3, delta);
+ mutex_unlock(idtfc3->lock);
+
+ return err;
+}
+
+static int _idtfc3_adjphase(struct idtfc3 *idtfc3, s32 delta)
+{
+ u8 buf[8] = {0};
+ int err;
+ s64 pcw;
+
+ err = idtfc3_set_lpf_mode(idtfc3, LPF_WP);
+ if (err)
+ return err;
+
+ /*
+ * Phase Control Word unit is: 10^9 / (TDC_APLL_FREQ * 124)
+ *
+ * delta * TDC_APLL_FREQ * 124
+ * PCW = ---------------------------
+ * 10^9
+ *
+ */
+ pcw = div_s64((s64)delta * idtfc3->tdc_apll_freq * 124, NSEC_PER_SEC);
+
+ put_unaligned_le64(pcw, buf);
+
+ return regmap_bulk_write(idtfc3->regmap, LPF_WR_PHASE_CTRL, buf, sizeof(buf));
+}
+
+static int idtfc3_adjphase(struct ptp_clock_info *ptp, s32 delta)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ int err;
+
+ mutex_lock(idtfc3->lock);
+ err = _idtfc3_adjphase(idtfc3, delta);
+ mutex_unlock(idtfc3->lock);
+
+ return err;
+}
+
+static int _idtfc3_adjfine(struct idtfc3 *idtfc3, long scaled_ppm)
+{
+ u8 buf[8] = {0};
+ int err;
+ s64 fcw;
+
+ err = idtfc3_set_lpf_mode(idtfc3, LPF_WF);
+ if (err)
+ return err;
+
+ /*
+ * Frequency Control Word unit is: 2^-44 * 10^6 ppm
+ *
+ * adjfreq:
+ * ppb * 2^44
+ * FCW = ----------
+ * 10^9
+ *
+ * adjfine:
+ * ppm_16 * 2^28
+ * FCW = -------------
+ * 10^6
+ */
+ fcw = scaled_ppm * BIT(28);
+ fcw = div_s64(fcw, 1000000);
+
+ put_unaligned_le64(fcw, buf);
+
+ return regmap_bulk_write(idtfc3->regmap, LPF_WR_FREQ_CTRL, buf, sizeof(buf));
+}
+
+static int idtfc3_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ int err;
+
+ mutex_lock(idtfc3->lock);
+ err = _idtfc3_adjfine(idtfc3, scaled_ppm);
+ mutex_unlock(idtfc3->lock);
+
+ return err;
+}
+
+static int idtfc3_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ int err = -EOPNOTSUPP;
+
+ mutex_lock(idtfc3->lock);
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ if (!on)
+ err = 0;
+ /* Only accept a 1-PPS aligned to the second. */
+ else if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
+ rq->perout.period.nsec)
+ err = -ERANGE;
+ else
+ err = 0;
+ break;
+ case PTP_CLK_REQ_EXTTS:
+ if (on) {
+ /* Only accept requests for external phase offset */
+ if ((rq->extts.flags & PTP_EXT_OFFSET) != (PTP_EXT_OFFSET))
+ err = -EOPNOTSUPP;
+ else
+ err = idtfc3_enable_tdc(idtfc3, true, CONTINUOUS);
+ } else {
+ err = idtfc3_enable_tdc(idtfc3, false, MEAS_MODE_INVALID);
+ }
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(idtfc3->lock);
+
+ if (err)
+ dev_err(idtfc3->dev, "Failed in %s with err %d!", __func__, err);
+
+ return err;
+}
+
+static long idtfc3_aux_work(struct ptp_clock_info *ptp)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ static int tdc_get;
+
+ mutex_lock(idtfc3->lock);
+ tdc_get %= TDC_GET_PERIOD;
+ if ((tdc_get == 0) || (tdc_get == TDC_GET_PERIOD / 2))
+ idtfc3_timecounter_read(idtfc3);
+ get_tdc_meas_continuous(idtfc3);
+ tdc_get++;
+ mutex_unlock(idtfc3->lock);
+
+ return idtfc3->tc_update_period;
+}
+
+static const struct ptp_clock_info idtfc3_caps = {
+ .owner = THIS_MODULE,
+ .max_adj = MAX_FFO_PPB,
+ .n_per_out = 1,
+ .n_ext_ts = 1,
+ .adjphase = &idtfc3_adjphase,
+ .adjfine = &idtfc3_adjfine,
+ .adjtime = &idtfc3_adjtime,
+ .gettime64 = &idtfc3_gettime,
+ .settime64 = &idtfc3_settime,
+ .enable = &idtfc3_enable,
+ .do_aux_work = &idtfc3_aux_work,
+};
+
+static int idtfc3_hw_calibrate(struct idtfc3 *idtfc3)
+{
+ int err = 0;
+ u8 val;
+
+ mdelay(10);
+ /*
+ * Toggle TDC_DAC_RECAL_REQ:
+ * (1) set tdc_en to 1
+ * (2) set tdc_dac_recal_req to 0
+ * (3) set tdc_dac_recal_req to 1
+ */
+ val = TDC_EN;
+ err = regmap_bulk_write(idtfc3->regmap, TDC_CTRL,
+ &val, sizeof(val));
+ if (err)
+ return err;
+ val = TDC_EN | TDC_DAC_RECAL_REQ;
+ err = regmap_bulk_write(idtfc3->regmap, TDC_CTRL,
+ &val, sizeof(val));
+ if (err)
+ return err;
+ mdelay(10);
+
+ /*
+ * Toggle APLL_REINIT:
+ * (1) set apll_reinit to 0
+ * (2) set apll_reinit to 1
+ */
+ val = 0;
+ err = regmap_bulk_write(idtfc3->regmap, SOFT_RESET_CTRL,
+ &val, sizeof(val));
+ if (err)
+ return err;
+ val = APLL_REINIT;
+ err = regmap_bulk_write(idtfc3->regmap, SOFT_RESET_CTRL,
+ &val, sizeof(val));
+ if (err)
+ return err;
+ mdelay(10);
+
+ return err;
+}
+
+static int idtfc3_init_timecounter(struct idtfc3 *idtfc3)
+{
+ int err;
+ u32 period_ms;
+
+ period_ms = idtfc3->sub_sync_count * MSEC_PER_SEC /
+ idtfc3->hw_param.time_clk_freq;
+
+ idtfc3->tc_update_period = msecs_to_jiffies(period_ms / TDC_GET_PERIOD);
+ idtfc3->tc_write_timeout = period_ms * USEC_PER_MSEC;
+
+ err = idtfc3_timecounter_update(idtfc3, 0, 0);
+ if (err)
+ return err;
+
+ err = idtfc3_timecounter_read(idtfc3);
+ if (err)
+ return err;
+
+ ptp_schedule_worker(idtfc3->ptp_clock, idtfc3->tc_update_period);
+
+ return 0;
+}
+
+static int idtfc3_get_tdc_apll_freq(struct idtfc3 *idtfc3)
+{
+ int err;
+ u8 tdc_fb_div_int;
+ u8 tdc_ref_div;
+ struct idtfc3_hw_param *param = &idtfc3->hw_param;
+
+ err = regmap_bulk_read(idtfc3->regmap, TDC_REF_DIV_CNFG,
+ &tdc_ref_div, sizeof(tdc_ref_div));
+ if (err)
+ return err;
+
+ err = regmap_bulk_read(idtfc3->regmap, TDC_FB_DIV_INT_CNFG,
+ &tdc_fb_div_int, sizeof(tdc_fb_div_int));
+ if (err)
+ return err;
+
+ tdc_fb_div_int &= TDC_FB_DIV_INT_MASK;
+ tdc_ref_div &= TDC_REF_DIV_CONFIG_MASK;
+
+ idtfc3->tdc_apll_freq = div_u64(param->xtal_freq * (u64)tdc_fb_div_int,
+ 1 << tdc_ref_div);
+
+ return 0;
+}
+
+static int idtfc3_get_fod(struct idtfc3 *idtfc3)
+{
+ int err;
+ u8 fod;
+
+ err = regmap_bulk_read(idtfc3->regmap, TIME_CLOCK_SRC, &fod, sizeof(fod));
+ if (err)
+ return err;
+
+ switch (fod) {
+ case 0:
+ idtfc3->fod_n = FOD_0;
+ break;
+ case 1:
+ idtfc3->fod_n = FOD_1;
+ break;
+ case 2:
+ idtfc3->fod_n = FOD_2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int idtfc3_get_sync_count(struct idtfc3 *idtfc3)
+{
+ int err;
+ u8 buf[4];
+
+ err = regmap_bulk_read(idtfc3->regmap, SUB_SYNC_GEN_CNFG, buf, sizeof(buf));
+ if (err)
+ return err;
+
+ idtfc3->sub_sync_count = (get_unaligned_le32(buf) & SUB_SYNC_COUNTER_MASK) + 1;
+ idtfc3->ns_per_counter = NSEC_PER_SEC / idtfc3->hw_param.time_clk_freq;
+ idtfc3->ns_per_sync = idtfc3->sub_sync_count * idtfc3->ns_per_counter;
+
+ return 0;
+}
+
+static int idtfc3_setup_hw_param(struct idtfc3 *idtfc3)
+{
+ int err;
+
+ err = idtfc3_get_fod(idtfc3);
+ if (err)
+ return err;
+
+ err = idtfc3_get_sync_count(idtfc3);
+ if (err)
+ return err;
+
+ err = idtfc3_get_time_ref_freq(idtfc3);
+ if (err)
+ return err;
+
+ return idtfc3_get_tdc_apll_freq(idtfc3);
+}
+
+static int idtfc3_configure_hw(struct idtfc3 *idtfc3)
+{
+ int err = 0;
+
+ err = idtfc3_hw_calibrate(idtfc3);
+ if (err)
+ return err;
+
+ err = idtfc3_enable_lpf(idtfc3, true);
+ if (err)
+ return err;
+
+ err = idtfc3_enable_tdc(idtfc3, false, MEAS_MODE_INVALID);
+ if (err)
+ return err;
+
+ err = idtfc3_get_tdc_offset_sign(idtfc3);
+ if (err)
+ return err;
+
+ return idtfc3_setup_hw_param(idtfc3);
+}
+
+static int idtfc3_set_overhead(struct idtfc3 *idtfc3)
+{
+ s64 current_ns = 0;
+ s64 lowest_ns = 0;
+ int err;
+ u8 i;
+ ktime_t start;
+ ktime_t stop;
+ ktime_t diff;
+
+ char buf[18] = {0};
+
+ for (i = 0; i < 5; i++) {
+ start = ktime_get_raw();
+
+ err = regmap_bulk_write(idtfc3->regmap, TOD_SYNC_LOAD_VAL_CTRL,
+ &buf, sizeof(buf));
+ if (err)
+ return err;
+
+ stop = ktime_get_raw();
+
+ diff = ktime_sub(stop, start);
+
+ current_ns = ktime_to_ns(diff);
+
+ if (i == 0) {
+ lowest_ns = current_ns;
+ } else {
+ if (current_ns < lowest_ns)
+ lowest_ns = current_ns;
+ }
+ }
+
+ idtfc3->tod_write_overhead = lowest_ns;
+
+ return err;
+}
+
+static int idtfc3_enable_ptp(struct idtfc3 *idtfc3)
+{
+ int err;
+
+ idtfc3->caps = idtfc3_caps;
+ snprintf(idtfc3->caps.name, sizeof(idtfc3->caps.name), "IDT FC3W");
+ idtfc3->ptp_clock = ptp_clock_register(&idtfc3->caps, NULL);
+
+ if (IS_ERR(idtfc3->ptp_clock)) {
+ err = PTR_ERR(idtfc3->ptp_clock);
+ idtfc3->ptp_clock = NULL;
+ return err;
+ }
+
+ err = idtfc3_set_overhead(idtfc3);
+ if (err)
+ return err;
+
+ err = idtfc3_init_timecounter(idtfc3);
+ if (err)
+ return err;
+
+ dev_info(idtfc3->dev, "TIME_SYNC_CHANNEL registered as ptp%d",
+ idtfc3->ptp_clock->index);
+
+ return 0;
+}
+
+static int idtfc3_load_firmware(struct idtfc3 *idtfc3)
+{
+ char fname[128] = FW_FILENAME;
+ const struct firmware *fw;
+ struct idtfc3_fwrc *rec;
+ u16 addr;
+ u8 val;
+ int err;
+ s32 len;
+
+ idtfc3_default_hw_param(&idtfc3->hw_param);
+
+ if (firmware) /* module parameter */
+ snprintf(fname, sizeof(fname), "%s", firmware);
+
+ dev_info(idtfc3->dev, "requesting firmware '%s'\n", fname);
+
+ err = request_firmware(&fw, fname, idtfc3->dev);
+
+ if (err) {
+ dev_err(idtfc3->dev,
+ "requesting firmware failed with err %d!\n", err);
+ return err;
+ }
+
+ dev_dbg(idtfc3->dev, "firmware size %zu bytes\n", fw->size);
+
+ rec = (struct idtfc3_fwrc *)fw->data;
+
+ for (len = fw->size; len > 0; len -= sizeof(*rec)) {
+ if (rec->reserved) {
+ dev_err(idtfc3->dev,
+ "bad firmware, reserved field non-zero\n");
+ err = -EINVAL;
+ } else {
+ val = rec->value;
+ addr = rec->hiaddr << 8 | rec->loaddr;
+
+ rec++;
+
+ err = idtfc3_set_hw_param(&idtfc3->hw_param, addr, val);
+ }
+
+ if (err != -EINVAL) {
+ err = 0;
+
+ /* Max register */
+ if (addr >= 0xE88)
+ continue;
+
+ err = regmap_bulk_write(idtfc3->regmap, addr,
+ &val, sizeof(val));
+ }
+
+ if (err)
+ goto out;
+ }
+
+ err = idtfc3_configure_hw(idtfc3);
+out:
+ release_firmware(fw);
+ return err;
+}
+
+static int idtfc3_read_device_id(struct idtfc3 *idtfc3, u16 *device_id)
+{
+ int err;
+ u8 buf[2] = {0};
+
+ err = regmap_bulk_read(idtfc3->regmap, DEVICE_ID,
+ &buf, sizeof(buf));
+ if (err) {
+ dev_err(idtfc3->dev, "%s failed with %d", __func__, err);
+ return err;
+ }
+
+ *device_id = get_unaligned_le16(buf);
+
+ return 0;
+}
+
+static int idtfc3_check_device_compatibility(struct idtfc3 *idtfc3)
+{
+ int err;
+ u16 device_id;
+
+ err = idtfc3_read_device_id(idtfc3, &device_id);
+ if (err)
+ return err;
+
+ if ((device_id & DEVICE_ID_MASK) == 0) {
+ dev_err(idtfc3->dev, "invalid device");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int idtfc3_probe(struct platform_device *pdev)
+{
+ struct rsmu_ddata *ddata = dev_get_drvdata(pdev->dev.parent);
+ struct idtfc3 *idtfc3;
+ int err;
+
+ idtfc3 = devm_kzalloc(&pdev->dev, sizeof(struct idtfc3), GFP_KERNEL);
+
+ if (!idtfc3)
+ return -ENOMEM;
+
+ idtfc3->dev = &pdev->dev;
+ idtfc3->mfd = pdev->dev.parent;
+ idtfc3->lock = &ddata->lock;
+ idtfc3->regmap = ddata->regmap;
+
+ mutex_lock(idtfc3->lock);
+
+ err = idtfc3_check_device_compatibility(idtfc3);
+ if (err) {
+ mutex_unlock(idtfc3->lock);
+ return err;
+ }
+
+ err = idtfc3_load_firmware(idtfc3);
+ if (err) {
+ if (err == -ENOENT) {
+ mutex_unlock(idtfc3->lock);
+ return -EPROBE_DEFER;
+ }
+ dev_warn(idtfc3->dev, "loading firmware failed with %d", err);
+ }
+
+ err = idtfc3_enable_ptp(idtfc3);
+ if (err) {
+ dev_err(idtfc3->dev, "idtfc3_enable_ptp failed with %d", err);
+ mutex_unlock(idtfc3->lock);
+ return err;
+ }
+
+ mutex_unlock(idtfc3->lock);
+
+ if (err) {
+ ptp_clock_unregister(idtfc3->ptp_clock);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, idtfc3);
+
+ return 0;
+}
+
+static void idtfc3_remove(struct platform_device *pdev)
+{
+ struct idtfc3 *idtfc3 = platform_get_drvdata(pdev);
+
+ ptp_clock_unregister(idtfc3->ptp_clock);
+}
+
+static struct platform_driver idtfc3_driver = {
+ .driver = {
+ .name = "rc38xxx-phc",
+ },
+ .probe = idtfc3_probe,
+ .remove_new = idtfc3_remove,
+};
+
+module_platform_driver(idtfc3_driver);
diff --git a/drivers/ptp/ptp_fc3.h b/drivers/ptp/ptp_fc3.h
new file mode 100644
index 000000000000..897101579207
--- /dev/null
+++ b/drivers/ptp/ptp_fc3.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * PTP hardware clock driver for the FemtoClock3 family of timing and
+ * synchronization devices.
+ *
+ * Copyright (C) 2023 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef PTP_IDTFC3_H
+#define PTP_IDTFC3_H
+
+#include <linux/ktime.h>
+#include <linux/ptp_clock.h>
+#include <linux/regmap.h>
+
+#define FW_FILENAME "idtfc3.bin"
+
+#define MAX_FFO_PPB (244000)
+#define TDC_GET_PERIOD (10)
+
+struct idtfc3 {
+ struct ptp_clock_info caps;
+ struct ptp_clock *ptp_clock;
+ struct device *dev;
+ /* Mutex to protect operations from being interrupted */
+ struct mutex *lock;
+ struct device *mfd;
+ struct regmap *regmap;
+ struct idtfc3_hw_param hw_param;
+ u32 sub_sync_count;
+ u32 ns_per_sync;
+ int tdc_offset_sign;
+ u64 tdc_apll_freq;
+ u32 time_ref_freq;
+ u16 fod_n;
+ u8 lpf_mode;
+ /* Time counter */
+ u32 last_counter;
+ s64 ns;
+ u32 ns_per_counter;
+ u32 tc_update_period;
+ u32 tc_write_timeout;
+ s64 tod_write_overhead;
+};
+
+#endif /* PTP_IDTFC3_H */
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 5f858e426bbd..6506cfb89aa9 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -34,6 +34,9 @@
#define PCI_VENDOR_ID_OROLIA 0x1ad7
#define PCI_DEVICE_ID_OROLIA_ARTCARD 0xa000
+#define PCI_VENDOR_ID_ADVA 0xad5a
+#define PCI_DEVICE_ID_ADVA_TIMECARD 0x0400
+
static struct class timecard_class = {
.name = "timecard",
};
@@ -63,6 +66,13 @@ struct ocp_reg {
u32 status_drift;
};
+struct ptp_ocp_servo_conf {
+ u32 servo_offset_p;
+ u32 servo_offset_i;
+ u32 servo_drift_p;
+ u32 servo_drift_i;
+};
+
#define OCP_CTRL_ENABLE BIT(0)
#define OCP_CTRL_ADJUST_TIME BIT(1)
#define OCP_CTRL_ADJUST_OFFSET BIT(2)
@@ -397,10 +407,14 @@ static int ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr);
static int ptp_ocp_art_board_init(struct ptp_ocp *bp, struct ocp_resource *r);
+static int ptp_ocp_adva_board_init(struct ptp_ocp *bp, struct ocp_resource *r);
+
static const struct ocp_attr_group fb_timecard_groups[];
static const struct ocp_attr_group art_timecard_groups[];
+static const struct ocp_attr_group adva_timecard_groups[];
+
struct ptp_ocp_eeprom_map {
u16 off;
u16 len;
@@ -700,6 +714,12 @@ static struct ocp_resource ocp_fb_resource[] = {
},
{
.setup = ptp_ocp_fb_board_init,
+ .extra = &(struct ptp_ocp_servo_conf) {
+ .servo_offset_p = 0x2000,
+ .servo_offset_i = 0x1000,
+ .servo_drift_p = 0,
+ .servo_drift_i = 0,
+ },
},
{ }
};
@@ -831,6 +851,170 @@ static struct ocp_resource ocp_art_resource[] = {
},
{
.setup = ptp_ocp_art_board_init,
+ .extra = &(struct ptp_ocp_servo_conf) {
+ .servo_offset_p = 0x2000,
+ .servo_offset_i = 0x1000,
+ .servo_drift_p = 0,
+ .servo_drift_i = 0,
+ },
+ },
+ { }
+};
+
+static struct ocp_resource ocp_adva_resource[] = {
+ {
+ OCP_MEM_RESOURCE(reg),
+ .offset = 0x01000000, .size = 0x10000,
+ },
+ {
+ OCP_EXT_RESOURCE(ts0),
+ .offset = 0x01010000, .size = 0x10000, .irq_vec = 1,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 0,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(ts1),
+ .offset = 0x01020000, .size = 0x10000, .irq_vec = 2,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 1,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(ts2),
+ .offset = 0x01060000, .size = 0x10000, .irq_vec = 6,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 2,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ /* Timestamp for PHC and/or PPS generator */
+ {
+ OCP_EXT_RESOURCE(pps),
+ .offset = 0x010C0000, .size = 0x10000, .irq_vec = 0,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 5,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(signal_out[0]),
+ .offset = 0x010D0000, .size = 0x10000, .irq_vec = 11,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 1,
+ .irq_fcn = ptp_ocp_signal_irq,
+ .enable = ptp_ocp_signal_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(signal_out[1]),
+ .offset = 0x010E0000, .size = 0x10000, .irq_vec = 12,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 2,
+ .irq_fcn = ptp_ocp_signal_irq,
+ .enable = ptp_ocp_signal_enable,
+ },
+ },
+ {
+ OCP_MEM_RESOURCE(pps_to_ext),
+ .offset = 0x01030000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(pps_to_clk),
+ .offset = 0x01040000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(tod),
+ .offset = 0x01050000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(image),
+ .offset = 0x00020000, .size = 0x1000,
+ },
+ {
+ OCP_MEM_RESOURCE(pps_select),
+ .offset = 0x00130000, .size = 0x1000,
+ },
+ {
+ OCP_MEM_RESOURCE(sma_map1),
+ .offset = 0x00140000, .size = 0x1000,
+ },
+ {
+ OCP_MEM_RESOURCE(sma_map2),
+ .offset = 0x00220000, .size = 0x1000,
+ },
+ {
+ OCP_SERIAL_RESOURCE(gnss_port),
+ .offset = 0x00160000 + 0x1000, .irq_vec = 3,
+ .extra = &(struct ptp_ocp_serial_port) {
+ .baud = 9600,
+ },
+ },
+ {
+ OCP_SERIAL_RESOURCE(mac_port),
+ .offset = 0x00180000 + 0x1000, .irq_vec = 5,
+ .extra = &(struct ptp_ocp_serial_port) {
+ .baud = 115200,
+ },
+ },
+ {
+ OCP_MEM_RESOURCE(freq_in[0]),
+ .offset = 0x01200000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(freq_in[1]),
+ .offset = 0x01210000, .size = 0x10000,
+ },
+ {
+ OCP_SPI_RESOURCE(spi_flash),
+ .offset = 0x00310400, .size = 0x10000, .irq_vec = 9,
+ .extra = &(struct ptp_ocp_flash_info) {
+ .name = "spi_altera", .pci_offset = 0,
+ .data_size = sizeof(struct altera_spi_platform_data),
+ .data = &(struct altera_spi_platform_data) {
+ .num_chipselect = 1,
+ .num_devices = 1,
+ .devices = &(struct spi_board_info) {
+ .modalias = "spi-nor",
+ },
+ },
+ },
+ },
+ {
+ OCP_I2C_RESOURCE(i2c_ctrl),
+ .offset = 0x150000, .size = 0x100, .irq_vec = 7,
+ .extra = &(struct ptp_ocp_i2c_info) {
+ .name = "ocores-i2c",
+ .fixed_rate = 50000000,
+ .data_size = sizeof(struct ocores_i2c_platform_data),
+ .data = &(struct ocores_i2c_platform_data) {
+ .clock_khz = 50000,
+ .bus_khz = 100,
+ .reg_io_width = 4, // 32-bit/4-byte
+ .reg_shift = 2, // 32-bit addressing
+ .num_devices = 2,
+ .devices = (struct i2c_board_info[]) {
+ { I2C_BOARD_INFO("24c02", 0x50) },
+ { I2C_BOARD_INFO("24mac402", 0x58),
+ .platform_data = "mac" },
+ },
+ },
+ },
+ },
+ {
+ .setup = ptp_ocp_adva_board_init,
+ .extra = &(struct ptp_ocp_servo_conf) {
+ .servo_offset_p = 0xc000,
+ .servo_offset_i = 0x1000,
+ .servo_drift_p = 0,
+ .servo_drift_i = 0,
+ },
},
{ }
};
@@ -839,6 +1023,7 @@ static const struct pci_device_id ptp_ocp_pcidev_id[] = {
{ PCI_DEVICE_DATA(FACEBOOK, TIMECARD, &ocp_fb_resource) },
{ PCI_DEVICE_DATA(CELESTICA, TIMECARD, &ocp_fb_resource) },
{ PCI_DEVICE_DATA(OROLIA, ARTCARD, &ocp_art_resource) },
+ { PCI_DEVICE_DATA(ADVA, TIMECARD, &ocp_adva_resource) },
{ }
};
MODULE_DEVICE_TABLE(pci, ptp_ocp_pcidev_id);
@@ -917,6 +1102,30 @@ static const struct ocp_selector ptp_ocp_art_sma_out[] = {
{ }
};
+static const struct ocp_selector ptp_ocp_adva_sma_in[] = {
+ { .name = "10Mhz", .value = 0x0000, .frequency = 10000000},
+ { .name = "PPS1", .value = 0x0001, .frequency = 1 },
+ { .name = "PPS2", .value = 0x0002, .frequency = 1 },
+ { .name = "TS1", .value = 0x0004, .frequency = 0 },
+ { .name = "TS2", .value = 0x0008, .frequency = 0 },
+ { .name = "FREQ1", .value = 0x0100, .frequency = 0 },
+ { .name = "FREQ2", .value = 0x0200, .frequency = 0 },
+ { .name = "None", .value = SMA_DISABLE, .frequency = 0 },
+ { }
+};
+
+static const struct ocp_selector ptp_ocp_adva_sma_out[] = {
+ { .name = "10Mhz", .value = 0x0000, .frequency = 10000000},
+ { .name = "PHC", .value = 0x0001, .frequency = 1 },
+ { .name = "MAC", .value = 0x0002, .frequency = 1 },
+ { .name = "GNSS1", .value = 0x0004, .frequency = 1 },
+ { .name = "GEN1", .value = 0x0040 },
+ { .name = "GEN2", .value = 0x0080 },
+ { .name = "GND", .value = 0x2000 },
+ { .name = "VCC", .value = 0x4000 },
+ { }
+};
+
struct ocp_sma_op {
const struct ocp_selector *tbl[2];
void (*init)(struct ptp_ocp *bp);
@@ -1363,7 +1572,7 @@ ptp_ocp_estimate_pci_timing(struct ptp_ocp *bp)
}
static int
-ptp_ocp_init_clock(struct ptp_ocp *bp)
+ptp_ocp_init_clock(struct ptp_ocp *bp, struct ptp_ocp_servo_conf *servo_conf)
{
struct timespec64 ts;
u32 ctrl;
@@ -1371,12 +1580,11 @@ ptp_ocp_init_clock(struct ptp_ocp *bp)
ctrl = OCP_CTRL_ENABLE;
iowrite32(ctrl, &bp->reg->ctrl);
- /* NO DRIFT Correction */
- /* offset_p:i 1/8, offset_i: 1/16, drift_p: 0, drift_i: 0 */
- iowrite32(0x2000, &bp->reg->servo_offset_p);
- iowrite32(0x1000, &bp->reg->servo_offset_i);
- iowrite32(0, &bp->reg->servo_drift_p);
- iowrite32(0, &bp->reg->servo_drift_i);
+ /* servo configuration */
+ iowrite32(servo_conf->servo_offset_p, &bp->reg->servo_offset_p);
+ iowrite32(servo_conf->servo_offset_i, &bp->reg->servo_offset_i);
+ iowrite32(servo_conf->servo_drift_p, &bp->reg->servo_drift_p);
+ iowrite32(servo_conf->servo_drift_p, &bp->reg->servo_drift_i);
/* latch servo values */
ctrl |= OCP_CTRL_ADJUST_SERVO;
@@ -2348,6 +2556,14 @@ static const struct ocp_sma_op ocp_fb_sma_op = {
.set_output = ptp_ocp_sma_fb_set_output,
};
+static const struct ocp_sma_op ocp_adva_sma_op = {
+ .tbl = { ptp_ocp_adva_sma_in, ptp_ocp_adva_sma_out },
+ .init = ptp_ocp_sma_fb_init,
+ .get = ptp_ocp_sma_fb_get,
+ .set_inputs = ptp_ocp_sma_fb_set_inputs,
+ .set_output = ptp_ocp_sma_fb_set_output,
+};
+
static int
ptp_ocp_set_pins(struct ptp_ocp *bp)
{
@@ -2427,7 +2643,7 @@ ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
return err;
ptp_ocp_sma_init(bp);
- return ptp_ocp_init_clock(bp);
+ return ptp_ocp_init_clock(bp, r->extra);
}
static bool
@@ -2589,7 +2805,44 @@ ptp_ocp_art_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
if (err)
return err;
- return ptp_ocp_init_clock(bp);
+ return ptp_ocp_init_clock(bp, r->extra);
+}
+
+/* ADVA specific board initializers; last "resource" registered. */
+static int
+ptp_ocp_adva_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
+{
+ int err;
+ u32 version;
+
+ bp->flash_start = 0xA00000;
+ bp->eeprom_map = fb_eeprom_map;
+ bp->sma_op = &ocp_adva_sma_op;
+
+ version = ioread32(&bp->image->version);
+ /* if lower 16 bits are empty, this is the fw loader. */
+ if ((version & 0xffff) == 0) {
+ version = version >> 16;
+ bp->fw_loader = true;
+ }
+ bp->fw_tag = 3;
+ bp->fw_version = version & 0xffff;
+ bp->fw_cap = OCP_CAP_BASIC | OCP_CAP_SIGNAL | OCP_CAP_FREQ;
+
+ ptp_ocp_tod_init(bp);
+ ptp_ocp_nmea_out_init(bp);
+ ptp_ocp_signal_init(bp);
+
+ err = ptp_ocp_attr_group_add(bp, adva_timecard_groups);
+ if (err)
+ return err;
+
+ err = ptp_ocp_set_pins(bp);
+ if (err)
+ return err;
+ ptp_ocp_sma_init(bp);
+
+ return ptp_ocp_init_clock(bp, r->extra);
}
static ssize_t
@@ -3564,6 +3817,37 @@ static const struct ocp_attr_group art_timecard_groups[] = {
{ },
};
+static struct attribute *adva_timecard_attrs[] = {
+ &dev_attr_serialnum.attr,
+ &dev_attr_gnss_sync.attr,
+ &dev_attr_clock_source.attr,
+ &dev_attr_available_clock_sources.attr,
+ &dev_attr_sma1.attr,
+ &dev_attr_sma2.attr,
+ &dev_attr_sma3.attr,
+ &dev_attr_sma4.attr,
+ &dev_attr_available_sma_inputs.attr,
+ &dev_attr_available_sma_outputs.attr,
+ &dev_attr_clock_status_drift.attr,
+ &dev_attr_clock_status_offset.attr,
+ &dev_attr_ts_window_adjust.attr,
+ &dev_attr_tod_correction.attr,
+ NULL,
+};
+
+static const struct attribute_group adva_timecard_group = {
+ .attrs = adva_timecard_attrs,
+};
+
+static const struct ocp_attr_group adva_timecard_groups[] = {
+ { .cap = OCP_CAP_BASIC, .group = &adva_timecard_group },
+ { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal0_group },
+ { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal1_group },
+ { .cap = OCP_CAP_FREQ, .group = &fb_timecard_freq0_group },
+ { .cap = OCP_CAP_FREQ, .group = &fb_timecard_freq1_group },
+ { },
+};
+
static void
gpio_input_map(char *buf, struct ptp_ocp *bp, u16 map[][2], u16 bit,
const char *def)
@@ -4209,10 +4493,11 @@ ptp_ocp_detach(struct ptp_ocp *bp)
device_unregister(&bp->dev);
}
-static int ptp_ocp_dpll_lock_status_get(const struct dpll_device *dpll,
- void *priv,
- enum dpll_lock_status *status,
- struct netlink_ext_ack *extack)
+static int
+ptp_ocp_dpll_lock_status_get(const struct dpll_device *dpll, void *priv,
+ enum dpll_lock_status *status,
+ enum dpll_lock_status_error *status_error,
+ struct netlink_ext_ack *extack)
{
struct ptp_ocp *bp = priv;
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index 45f9002a5dca..18934e28469e 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -120,7 +120,7 @@ static inline bool ptp_clock_freerun(struct ptp_clock *ptp)
return ptp_vclock_in_use(ptp);
}
-extern struct class *ptp_class;
+extern const struct class ptp_class;
/*
* see ptp_chardev.c
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
index f7a499a1bd39..a15460aaa03b 100644
--- a/drivers/ptp/ptp_sysfs.c
+++ b/drivers/ptp/ptp_sysfs.c
@@ -24,8 +24,7 @@ static ssize_t max_phase_adjustment_show(struct device *dev,
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
- return snprintf(page, PAGE_SIZE - 1, "%d\n",
- ptp->info->getmaxphase(ptp->info));
+ return sysfs_emit(page, "%d\n", ptp->info->getmaxphase(ptp->info));
}
static DEVICE_ATTR_RO(max_phase_adjustment);
@@ -34,7 +33,7 @@ static ssize_t var##_show(struct device *dev, \
struct device_attribute *attr, char *page) \
{ \
struct ptp_clock *ptp = dev_get_drvdata(dev); \
- return snprintf(page, PAGE_SIZE-1, "%d\n", ptp->info->var); \
+ return sysfs_emit(page, "%d\n", ptp->info->var); \
} \
static DEVICE_ATTR(name, 0444, var##_show, NULL);
@@ -102,8 +101,8 @@ static ssize_t extts_fifo_show(struct device *dev,
if (!qcnt)
goto out;
- cnt = snprintf(page, PAGE_SIZE, "%u %lld %u\n",
- event.index, event.t.sec, event.t.nsec);
+ cnt = sysfs_emit(page, "%u %lld %u\n",
+ event.index, event.t.sec, event.t.nsec);
out:
return cnt;
}
@@ -194,7 +193,7 @@ static ssize_t n_vclocks_show(struct device *dev,
if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
return -ERESTARTSYS;
- size = snprintf(page, PAGE_SIZE - 1, "%u\n", ptp->n_vclocks);
+ size = sysfs_emit(page, "%u\n", ptp->n_vclocks);
mutex_unlock(&ptp->n_vclocks_mux);
@@ -270,7 +269,7 @@ static ssize_t max_vclocks_show(struct device *dev,
struct ptp_clock *ptp = dev_get_drvdata(dev);
ssize_t size;
- size = snprintf(page, PAGE_SIZE - 1, "%u\n", ptp->max_vclocks);
+ size = sysfs_emit(page, "%u\n", ptp->max_vclocks);
return size;
}
diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c
index dcf752c9e045..7febfdcbde8b 100644
--- a/drivers/ptp/ptp_vclock.c
+++ b/drivers/ptp/ptp_vclock.c
@@ -241,7 +241,7 @@ int ptp_get_vclocks_index(int pclock_index, int **vclock_index)
return num;
snprintf(name, PTP_CLOCK_NAME_LEN, "ptp%d", pclock_index);
- dev = class_find_device_by_name(ptp_class, name);
+ dev = class_find_device_by_name(&ptp_class, name);
if (!dev)
return num;
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index b9934b9c2d70..9f30e0edadfe 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -384,7 +384,7 @@ static struct attribute *ssb_device_attrs[] = {
};
ATTRIBUTE_GROUPS(ssb_device);
-static struct bus_type ssb_bustype = {
+static const struct bus_type ssb_bustype = {
.name = "ssb",
.match = ssb_bus_match,
.probe = ssb_device_probe,
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index b0b262de6480..283804b49e91 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1515,7 +1515,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_TXPOWER)
RFbSetPower(priv, priv->wCurrentRate,
- conf->chandef.chan->hw_value);
+ conf->chanreq.oper.chan->hw_value);
if (changed & BSS_CHANGED_BEACON_ENABLED) {
dev_dbg(&priv->pcid->dev,
@@ -1684,6 +1684,10 @@ static void vnt_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
static const struct ieee80211_ops vnt_mac_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = vnt_tx_80211,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = vnt_start,
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 2abae90f3f52..7bbed462f062 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -794,7 +794,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
vnt_set_bss_mode(priv);
if (changed & (BSS_CHANGED_TXPOWER | BSS_CHANGED_BANDWIDTH))
- vnt_rf_setpower(priv, conf->chandef.chan);
+ vnt_rf_setpower(priv, conf->chanreq.oper.chan);
if (changed & BSS_CHANGED_BEACON_ENABLED) {
dev_dbg(&priv->usb->dev,
@@ -956,6 +956,10 @@ static void vnt_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
static const struct ieee80211_ops vnt_mac_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = vnt_tx_80211,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = vnt_start,
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index f2ed7167c848..4b2fcb228a0a 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -141,10 +141,8 @@ struct vhost_net {
unsigned tx_zcopy_err;
/* Flush in progress. Protected by tx vq lock. */
bool tx_flush;
- /* Private page frag */
- struct page_frag page_frag;
- /* Refcount bias of page frag */
- int refcnt_bias;
+ /* Private page frag cache */
+ struct page_frag_cache pf_cache;
};
static unsigned vhost_net_zcopy_mask __read_mostly;
@@ -655,41 +653,6 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
!vhost_vq_avail_empty(vq->dev, vq);
}
-static bool vhost_net_page_frag_refill(struct vhost_net *net, unsigned int sz,
- struct page_frag *pfrag, gfp_t gfp)
-{
- if (pfrag->page) {
- if (pfrag->offset + sz <= pfrag->size)
- return true;
- __page_frag_cache_drain(pfrag->page, net->refcnt_bias);
- }
-
- pfrag->offset = 0;
- net->refcnt_bias = 0;
- if (SKB_FRAG_PAGE_ORDER) {
- /* Avoid direct reclaim but allow kswapd to wake */
- pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
- __GFP_COMP | __GFP_NOWARN |
- __GFP_NORETRY,
- SKB_FRAG_PAGE_ORDER);
- if (likely(pfrag->page)) {
- pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
- goto done;
- }
- }
- pfrag->page = alloc_page(gfp);
- if (likely(pfrag->page)) {
- pfrag->size = PAGE_SIZE;
- goto done;
- }
- return false;
-
-done:
- net->refcnt_bias = USHRT_MAX;
- page_ref_add(pfrag->page, USHRT_MAX - 1);
- return true;
-}
-
#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
@@ -699,7 +662,6 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
struct vhost_net *net = container_of(vq->dev, struct vhost_net,
dev);
struct socket *sock = vhost_vq_get_backend(vq);
- struct page_frag *alloc_frag = &net->page_frag;
struct virtio_net_hdr *gso;
struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
struct tun_xdp_hdr *hdr;
@@ -710,6 +672,7 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
int sock_hlen = nvq->sock_hlen;
void *buf;
int copied;
+ int ret;
if (unlikely(len < nvq->sock_hlen))
return -EFAULT;
@@ -719,18 +682,17 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
return -ENOSPC;
buflen += SKB_DATA_ALIGN(len + pad);
- alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
- if (unlikely(!vhost_net_page_frag_refill(net, buflen,
- alloc_frag, GFP_KERNEL)))
+ buf = page_frag_alloc_align(&net->pf_cache, buflen, GFP_KERNEL,
+ SMP_CACHE_BYTES);
+ if (unlikely(!buf))
return -ENOMEM;
- buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
- copied = copy_page_from_iter(alloc_frag->page,
- alloc_frag->offset +
- offsetof(struct tun_xdp_hdr, gso),
- sock_hlen, from);
- if (copied != sock_hlen)
- return -EFAULT;
+ copied = copy_from_iter(buf + offsetof(struct tun_xdp_hdr, gso),
+ sock_hlen, from);
+ if (copied != sock_hlen) {
+ ret = -EFAULT;
+ goto err;
+ }
hdr = buf;
gso = &hdr->gso;
@@ -743,27 +705,30 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
vhost16_to_cpu(vq, gso->csum_start) +
vhost16_to_cpu(vq, gso->csum_offset) + 2);
- if (vhost16_to_cpu(vq, gso->hdr_len) > len)
- return -EINVAL;
+ if (vhost16_to_cpu(vq, gso->hdr_len) > len) {
+ ret = -EINVAL;
+ goto err;
+ }
}
len -= sock_hlen;
- copied = copy_page_from_iter(alloc_frag->page,
- alloc_frag->offset + pad,
- len, from);
- if (copied != len)
- return -EFAULT;
+ copied = copy_from_iter(buf + pad, len, from);
+ if (copied != len) {
+ ret = -EFAULT;
+ goto err;
+ }
xdp_init_buff(xdp, buflen, NULL);
xdp_prepare_buff(xdp, buf, pad, len, true);
hdr->buflen = buflen;
- --net->refcnt_bias;
- alloc_frag->offset += buflen;
-
++nvq->batched_xdp;
return 0;
+
+err:
+ page_frag_free(buf);
+ return ret;
}
static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
@@ -1353,8 +1318,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
vqs[VHOST_NET_VQ_RX]);
f->private_data = n;
- n->page_frag.page = NULL;
- n->refcnt_bias = 0;
+ n->pf_cache.va = NULL;
return 0;
}
@@ -1422,8 +1386,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
kfree(n->vqs[VHOST_NET_VQ_TX].xdp);
kfree(n->dev.vqs);
- if (n->page_frag.page)
- __page_frag_cache_drain(n->page_frag.page, n->refcnt_bias);
+ page_frag_cache_drain(&n->pf_cache);
kvfree(n);
return 0;
}
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 39ac6fdf8bca..882b89edc52a 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -37,6 +37,7 @@
#include <linux/seq_file.h>
#include <linux/compat.h>
#include <linux/rculist.h>
+#include <linux/capability.h>
#include <net/busy_poll.h>
/*
@@ -227,6 +228,11 @@ struct eventpoll {
#ifdef CONFIG_NET_RX_BUSY_POLL
/* used to track busy poll napi_id */
unsigned int napi_id;
+ /* busy poll timeout */
+ u32 busy_poll_usecs;
+ /* busy poll packet budget */
+ u16 busy_poll_budget;
+ bool prefer_busy_poll;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -387,11 +393,41 @@ static inline int ep_events_available(struct eventpoll *ep)
}
#ifdef CONFIG_NET_RX_BUSY_POLL
+/**
+ * busy_loop_ep_timeout - check if busy poll has timed out. The timeout value
+ * from the epoll instance ep is preferred, but if it is not set fallback to
+ * the system-wide global via busy_loop_timeout.
+ *
+ * @start_time: The start time used to compute the remaining time until timeout.
+ * @ep: Pointer to the eventpoll context.
+ *
+ * Return: true if the timeout has expired, false otherwise.
+ */
+static bool busy_loop_ep_timeout(unsigned long start_time,
+ struct eventpoll *ep)
+{
+ unsigned long bp_usec = READ_ONCE(ep->busy_poll_usecs);
+
+ if (bp_usec) {
+ unsigned long end_time = start_time + bp_usec;
+ unsigned long now = busy_loop_current_time();
+
+ return time_after(now, end_time);
+ } else {
+ return busy_loop_timeout(start_time);
+ }
+}
+
+static bool ep_busy_loop_on(struct eventpoll *ep)
+{
+ return !!ep->busy_poll_usecs || net_busy_loop_on();
+}
+
static bool ep_busy_loop_end(void *p, unsigned long start_time)
{
struct eventpoll *ep = p;
- return ep_events_available(ep) || busy_loop_timeout(start_time);
+ return ep_events_available(ep) || busy_loop_ep_timeout(start_time, ep);
}
/*
@@ -403,10 +439,15 @@ static bool ep_busy_loop_end(void *p, unsigned long start_time)
static bool ep_busy_loop(struct eventpoll *ep, int nonblock)
{
unsigned int napi_id = READ_ONCE(ep->napi_id);
+ u16 budget = READ_ONCE(ep->busy_poll_budget);
+ bool prefer_busy_poll = READ_ONCE(ep->prefer_busy_poll);
+
+ if (!budget)
+ budget = BUSY_POLL_BUDGET;
- if ((napi_id >= MIN_NAPI_ID) && net_busy_loop_on()) {
- napi_busy_loop(napi_id, nonblock ? NULL : ep_busy_loop_end, ep, false,
- BUSY_POLL_BUDGET);
+ if (napi_id >= MIN_NAPI_ID && ep_busy_loop_on(ep)) {
+ napi_busy_loop(napi_id, nonblock ? NULL : ep_busy_loop_end,
+ ep, prefer_busy_poll, budget);
if (ep_events_available(ep))
return true;
/*
@@ -425,12 +466,12 @@ static bool ep_busy_loop(struct eventpoll *ep, int nonblock)
*/
static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
{
- struct eventpoll *ep;
+ struct eventpoll *ep = epi->ep;
unsigned int napi_id;
struct socket *sock;
struct sock *sk;
- if (!net_busy_loop_on())
+ if (!ep_busy_loop_on(ep))
return;
sock = sock_from_file(epi->ffd.file);
@@ -442,7 +483,6 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
return;
napi_id = READ_ONCE(sk->sk_napi_id);
- ep = epi->ep;
/* Non-NAPI IDs can be rejected
* or
@@ -455,6 +495,49 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
ep->napi_id = napi_id;
}
+static long ep_eventpoll_bp_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct eventpoll *ep = file->private_data;
+ void __user *uarg = (void __user *)arg;
+ struct epoll_params epoll_params;
+
+ switch (cmd) {
+ case EPIOCSPARAMS:
+ if (copy_from_user(&epoll_params, uarg, sizeof(epoll_params)))
+ return -EFAULT;
+
+ /* pad byte must be zero */
+ if (epoll_params.__pad)
+ return -EINVAL;
+
+ if (epoll_params.busy_poll_usecs > S32_MAX)
+ return -EINVAL;
+
+ if (epoll_params.prefer_busy_poll > 1)
+ return -EINVAL;
+
+ if (epoll_params.busy_poll_budget > NAPI_POLL_WEIGHT &&
+ !capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ WRITE_ONCE(ep->busy_poll_usecs, epoll_params.busy_poll_usecs);
+ WRITE_ONCE(ep->busy_poll_budget, epoll_params.busy_poll_budget);
+ WRITE_ONCE(ep->prefer_busy_poll, epoll_params.prefer_busy_poll);
+ return 0;
+ case EPIOCGPARAMS:
+ memset(&epoll_params, 0, sizeof(epoll_params));
+ epoll_params.busy_poll_usecs = READ_ONCE(ep->busy_poll_usecs);
+ epoll_params.busy_poll_budget = READ_ONCE(ep->busy_poll_budget);
+ epoll_params.prefer_busy_poll = READ_ONCE(ep->prefer_busy_poll);
+ if (copy_to_user(uarg, &epoll_params, sizeof(epoll_params)))
+ return -EFAULT;
+ return 0;
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
#else
static inline bool ep_busy_loop(struct eventpoll *ep, int nonblock)
@@ -466,6 +549,12 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
{
}
+static long ep_eventpoll_bp_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_NET_RX_BUSY_POLL */
/*
@@ -819,6 +908,27 @@ static void ep_clear_and_put(struct eventpoll *ep)
ep_free(ep);
}
+static long ep_eventpoll_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ if (!is_file_epoll(file))
+ return -EINVAL;
+
+ switch (cmd) {
+ case EPIOCSPARAMS:
+ case EPIOCGPARAMS:
+ ret = ep_eventpoll_bp_ioctl(file, cmd, arg);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static int ep_eventpoll_release(struct inode *inode, struct file *file)
{
struct eventpoll *ep = file->private_data;
@@ -925,6 +1035,8 @@ static const struct file_operations eventpoll_fops = {
.release = ep_eventpoll_release,
.poll = ep_eventpoll_poll,
.llseek = noop_llseek,
+ .unlocked_ioctl = ep_eventpoll_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
/*
@@ -2052,6 +2164,11 @@ static int do_epoll_create(int flags)
error = PTR_ERR(file);
goto out_free_fd;
}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ ep->busy_poll_usecs = 0;
+ ep->busy_poll_budget = 0;
+ ep->prefer_busy_poll = false;
+#endif
ep->file = file;
fd_install(fd, file);
return fd;
diff --git a/fs/verity/measure.c b/fs/verity/measure.c
index bf7a5f4cccaf..3969d54158d1 100644
--- a/fs/verity/measure.c
+++ b/fs/verity/measure.c
@@ -159,9 +159,9 @@ __bpf_kfunc int bpf_get_fsverity_digest(struct file *file, struct bpf_dynptr_ker
__bpf_kfunc_end_defs();
-BTF_SET8_START(fsverity_set_ids)
+BTF_KFUNCS_START(fsverity_set_ids)
BTF_ID_FLAGS(func, bpf_get_fsverity_digest, KF_TRUSTED_ARGS)
-BTF_SET8_END(fsverity_set_ids)
+BTF_KFUNCS_END(fsverity_set_ids)
static int bpf_get_fsverity_digest_filter(const struct bpf_prog *prog, u32 kfunc_id)
{
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index ebfa12f69501..63928f173223 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -66,7 +66,8 @@
_pfx "mask is not constant"); \
BUILD_BUG_ON_MSG((_mask) == 0, _pfx "mask is zero"); \
BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
- ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \
+ ~((_mask) >> __bf_shf(_mask)) & \
+ (0 + (_val)) : 0, \
_pfx "value too large for the field"); \
BUILD_BUG_ON_MSG(__bf_cast_unsigned(_mask, _mask) > \
__bf_cast_unsigned(_reg, ~0ull), \
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index df24c8fb1009..fb3a9c93ac86 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -55,6 +55,7 @@ struct device;
* bitmap_full(src, nbits) Are all bits set in *src?
* bitmap_weight(src, nbits) Hamming Weight: number set bits
* bitmap_weight_and(src1, src2, nbits) Hamming Weight of and'ed bitmap
+ * bitmap_weight_andnot(src1, src2, nbits) Hamming Weight of andnot'ed bitmap
* bitmap_set(dst, pos, nbits) Set specified bit area
* bitmap_clear(dst, pos, nbits) Clear specified bit area
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
@@ -63,6 +64,8 @@ struct device;
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
* bitmap_cut(dst, src, first, n, nbits) Cut n bits from first, copy rest
* bitmap_replace(dst, old, new, mask, nbits) *dst = (*old & ~(*mask)) | (*new & *mask)
+ * bitmap_scatter(dst, src, mask, nbits) *dst = map(dense, sparse)(src)
+ * bitmap_gather(dst, src, mask, nbits) *dst = map(sparse, dense)(src)
* bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
* bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
* bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
@@ -172,6 +175,8 @@ bool __bitmap_subset(const unsigned long *bitmap1,
unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
unsigned int __bitmap_weight_and(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
+unsigned int __bitmap_weight_andnot(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_set(unsigned long *map, unsigned int start, int len);
void __bitmap_clear(unsigned long *map, unsigned int start, int len);
@@ -428,6 +433,15 @@ unsigned long bitmap_weight_and(const unsigned long *src1,
return __bitmap_weight_and(src1, src2, nbits);
}
+static __always_inline
+unsigned long bitmap_weight_andnot(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ return hweight_long(*src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits));
+ return __bitmap_weight_andnot(src1, src2, nbits);
+}
+
static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
unsigned int nbits)
{
@@ -490,6 +504,105 @@ static inline void bitmap_replace(unsigned long *dst,
__bitmap_replace(dst, old, new, mask, nbits);
}
+/**
+ * bitmap_scatter - Scatter a bitmap according to the given mask
+ * @dst: scattered bitmap
+ * @src: gathered bitmap
+ * @mask: mask representing bits to assign to in the scattered bitmap
+ * @nbits: number of bits in each of these bitmaps
+ *
+ * Scatters bitmap with sequential bits according to the given @mask.
+ *
+ * Example:
+ * If @src bitmap = 0x005a, with @mask = 0x1313, @dst will be 0x0302.
+ *
+ * Or in binary form
+ * @src @mask @dst
+ * 0000000001011010 0001001100010011 0000001100000010
+ *
+ * (Bits 0, 1, 2, 3, 4, 5 are copied to the bits 0, 1, 4, 8, 9, 12)
+ *
+ * A more 'visual' description of the operation:
+ * src: 0000000001011010
+ * ||||||
+ * +------+|||||
+ * | +----+||||
+ * | |+----+|||
+ * | || +-+||
+ * | || | ||
+ * mask: ...v..vv...v..vv
+ * ...0..11...0..10
+ * dst: 0000001100000010
+ *
+ * A relationship exists between bitmap_scatter() and bitmap_gather().
+ * bitmap_gather() can be seen as the 'reverse' bitmap_scatter() operation.
+ * See bitmap_scatter() for details related to this relationship.
+ */
+static inline void bitmap_scatter(unsigned long *dst, const unsigned long *src,
+ const unsigned long *mask, unsigned int nbits)
+{
+ unsigned int n = 0;
+ unsigned int bit;
+
+ bitmap_zero(dst, nbits);
+
+ for_each_set_bit(bit, mask, nbits)
+ __assign_bit(bit, dst, test_bit(n++, src));
+}
+
+/**
+ * bitmap_gather - Gather a bitmap according to given mask
+ * @dst: gathered bitmap
+ * @src: scattered bitmap
+ * @mask: mask representing bits to extract from in the scattered bitmap
+ * @nbits: number of bits in each of these bitmaps
+ *
+ * Gathers bitmap with sparse bits according to the given @mask.
+ *
+ * Example:
+ * If @src bitmap = 0x0302, with @mask = 0x1313, @dst will be 0x001a.
+ *
+ * Or in binary form
+ * @src @mask @dst
+ * 0000001100000010 0001001100010011 0000000000011010
+ *
+ * (Bits 0, 1, 4, 8, 9, 12 are copied to the bits 0, 1, 2, 3, 4, 5)
+ *
+ * A more 'visual' description of the operation:
+ * mask: ...v..vv...v..vv
+ * src: 0000001100000010
+ * ^ ^^ ^ 0
+ * | || | 10
+ * | || > 010
+ * | |+--> 1010
+ * | +--> 11010
+ * +----> 011010
+ * dst: 0000000000011010
+ *
+ * A relationship exists between bitmap_gather() and bitmap_scatter(). See
+ * bitmap_scatter() for the bitmap scatter detailed operations.
+ * Suppose scattered computed using bitmap_scatter(scattered, src, mask, n).
+ * The operation bitmap_gather(result, scattered, mask, n) leads to a result
+ * equal or equivalent to src.
+ *
+ * The result can be 'equivalent' because bitmap_scatter() and bitmap_gather()
+ * are not bijective.
+ * The result and src values are equivalent in that sense that a call to
+ * bitmap_scatter(res, src, mask, n) and a call to
+ * bitmap_scatter(res, result, mask, n) will lead to the same res value.
+ */
+static inline void bitmap_gather(unsigned long *dst, const unsigned long *src,
+ const unsigned long *mask, unsigned int nbits)
+{
+ unsigned int n = 0;
+ unsigned int bit;
+
+ bitmap_zero(dst, nbits);
+
+ for_each_set_bit(bit, mask, nbits)
+ __assign_bit(n++, dst, test_bit(bit, src));
+}
+
static inline void bitmap_next_set_region(unsigned long *bitmap,
unsigned int *rs, unsigned int *re,
unsigned int end)
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index a789266feac3..fb3c3e7181e6 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -196,7 +196,8 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
({ \
int __ret = 0; \
if (cgroup_bpf_enabled(CGROUP_INET_INGRESS) && \
- cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS)) \
+ cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS) && sk && \
+ sk_fullsock(sk)) \
__ret = __cgroup_bpf_run_filter_skb(sk, skb, \
CGROUP_INET_INGRESS); \
\
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e30100597d0a..4f20f62f9d63 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -37,6 +37,7 @@ struct perf_event;
struct bpf_prog;
struct bpf_prog_aux;
struct bpf_map;
+struct bpf_arena;
struct sock;
struct seq_file;
struct btf;
@@ -52,6 +53,10 @@ struct module;
struct bpf_func_state;
struct ftrace_ops;
struct cgroup;
+struct bpf_token;
+struct user_namespace;
+struct super_block;
+struct inode;
extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;
@@ -135,6 +140,9 @@ struct bpf_map_ops {
int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
__poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
struct poll_table_struct *pts);
+ unsigned long (*map_get_unmapped_area)(struct file *filep, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
/* Functions called by bpf_local_storage maps */
int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
@@ -247,10 +255,7 @@ struct bpf_list_node_kern {
} __attribute__((aligned(8)));
struct bpf_map {
- /* The first two cachelines with read-mostly members of which some
- * are also accessed in fast-path (e.g. ops, max_entries).
- */
- const struct bpf_map_ops *ops ____cacheline_aligned;
+ const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
#ifdef CONFIG_SECURITY
void *security;
@@ -272,17 +277,14 @@ struct bpf_map {
struct obj_cgroup *objcg;
#endif
char name[BPF_OBJ_NAME_LEN];
- /* The 3rd and 4th cacheline with misc members to avoid false sharing
- * particularly with refcounting.
- */
- atomic64_t refcnt ____cacheline_aligned;
+ struct mutex freeze_mutex;
+ atomic64_t refcnt;
atomic64_t usercnt;
/* rcu is used before freeing and work is only used during freeing */
union {
struct work_struct work;
struct rcu_head rcu;
};
- struct mutex freeze_mutex;
atomic64_t writecnt;
/* 'Ownership' of program-containing map is claimed by the first program
* that is going to use this map or by the first program which FD is
@@ -527,8 +529,8 @@ void bpf_list_head_free(const struct btf_field *field, void *list_head,
struct bpf_spin_lock *spin_lock);
void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
struct bpf_spin_lock *spin_lock);
-
-
+u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena);
+u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena);
int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
struct bpf_offload_dev;
@@ -710,6 +712,7 @@ enum bpf_arg_type {
* on eBPF program stack
*/
ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
+ ARG_PTR_TO_ARENA,
ARG_CONST_SIZE, /* number of bytes accessed from memory */
ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
@@ -881,6 +884,7 @@ enum bpf_reg_type {
* an explicit null check is required for this struct.
*/
PTR_TO_MEM, /* reg points to valid memory region */
+ PTR_TO_ARENA,
PTR_TO_BUF, /* reg points to a read/write buffer */
PTR_TO_FUNC, /* reg points to a bpf program function */
CONST_PTR_TO_DYNPTR, /* reg points to a const struct bpf_dynptr */
@@ -1185,7 +1189,6 @@ struct bpf_trampoline {
int progs_cnt[BPF_TRAMP_MAX];
/* Executable image of trampoline */
struct bpf_tramp_image *cur_image;
- struct module *mod;
};
struct bpf_attach_target_info {
@@ -1412,6 +1415,7 @@ struct bpf_jit_poke_descriptor {
struct bpf_ctx_arg_aux {
u32 offset;
enum bpf_reg_type reg_type;
+ struct btf *btf;
u32 btf_id;
};
@@ -1451,11 +1455,11 @@ struct bpf_prog_aux {
bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
bool attach_tracing_prog; /* true if tracing another tracing program */
bool func_proto_unreliable;
- bool sleepable;
bool tail_call_reachable;
bool xdp_has_frags;
bool exception_cb;
bool exception_boundary;
+ struct bpf_arena *arena;
/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
const struct btf_type *attach_func_proto;
/* function name for valid attach_btf_id */
@@ -1485,6 +1489,7 @@ struct bpf_prog_aux {
#ifdef CONFIG_SECURITY
void *security;
#endif
+ struct bpf_token *token;
struct bpf_prog_offload *offload;
struct btf *btf;
struct bpf_func_info *func_info;
@@ -1535,7 +1540,8 @@ struct bpf_prog {
enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
call_get_func_ip:1, /* Do we call get_func_ip() */
- tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */
+ tstamp_type_access:1, /* Accessed __sk_buff->tstamp_type */
+ sleepable:1; /* BPF program is sleepable */
enum bpf_prog_type type; /* Type of BPF program */
enum bpf_attach_type expected_attach_type; /* For some prog types */
u32 len; /* Number of filter blocks */
@@ -1609,6 +1615,31 @@ struct bpf_link_primer {
u32 id;
};
+struct bpf_mount_opts {
+ kuid_t uid;
+ kgid_t gid;
+ umode_t mode;
+
+ /* BPF token-related delegation options */
+ u64 delegate_cmds;
+ u64 delegate_maps;
+ u64 delegate_progs;
+ u64 delegate_attachs;
+};
+
+struct bpf_token {
+ struct work_struct work;
+ atomic64_t refcnt;
+ struct user_namespace *userns;
+ u64 allowed_cmds;
+ u64 allowed_maps;
+ u64 allowed_progs;
+ u64 allowed_attachs;
+#ifdef CONFIG_SECURITY
+ void *security;
+#endif
+};
+
struct bpf_struct_ops_value;
struct btf_member;
@@ -1673,19 +1704,64 @@ struct bpf_struct_ops {
void (*unreg)(void *kdata);
int (*update)(void *kdata, void *old_kdata);
int (*validate)(void *kdata);
- const struct btf_type *type;
- const struct btf_type *value_type;
+ void *cfi_stubs;
+ struct module *owner;
const char *name;
struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
+};
+
+/* Every member of a struct_ops type has an instance even a member is not
+ * an operator (function pointer). The "info" field will be assigned to
+ * prog->aux->ctx_arg_info of BPF struct_ops programs to provide the
+ * argument information required by the verifier to verify the program.
+ *
+ * btf_ctx_access() will lookup prog->aux->ctx_arg_info to find the
+ * corresponding entry for an given argument.
+ */
+struct bpf_struct_ops_arg_info {
+ struct bpf_ctx_arg_aux *info;
+ u32 cnt;
+};
+
+struct bpf_struct_ops_desc {
+ struct bpf_struct_ops *st_ops;
+
+ const struct btf_type *type;
+ const struct btf_type *value_type;
u32 type_id;
u32 value_id;
- void *cfi_stubs;
+
+ /* Collection of argument information for each member */
+ struct bpf_struct_ops_arg_info *arg_info;
+};
+
+enum bpf_struct_ops_state {
+ BPF_STRUCT_OPS_STATE_INIT,
+ BPF_STRUCT_OPS_STATE_INUSE,
+ BPF_STRUCT_OPS_STATE_TOBEFREE,
+ BPF_STRUCT_OPS_STATE_READY,
+};
+
+struct bpf_struct_ops_common_value {
+ refcount_t refcnt;
+ enum bpf_struct_ops_state state;
};
#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
+/* This macro helps developer to register a struct_ops type and generate
+ * type information correctly. Developers should use this macro to register
+ * a struct_ops type instead of calling __register_bpf_struct_ops() directly.
+ */
+#define register_bpf_struct_ops(st_ops, type) \
+ ({ \
+ struct bpf_struct_ops_##type { \
+ struct bpf_struct_ops_common_value common; \
+ struct type data ____cacheline_aligned_in_smp; \
+ }; \
+ BTF_TYPE_EMIT(struct bpf_struct_ops_##type); \
+ __register_bpf_struct_ops(st_ops); \
+ })
#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
-const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
-void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
bool bpf_struct_ops_get(const void *kdata);
void bpf_struct_ops_put(const void *kdata);
int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
@@ -1694,7 +1770,9 @@ int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
struct bpf_tramp_link *link,
const struct btf_func_model *model,
void *stub_func,
- void *image, void *image_end);
+ void **image, u32 *image_off,
+ bool allow_alloc);
+void bpf_struct_ops_image_free(void *image);
static inline bool bpf_try_module_get(const void *data, struct module *owner)
{
if (owner == BPF_MODULE_OWNER)
@@ -1727,15 +1805,13 @@ struct bpf_dummy_ops {
int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
#endif
+int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
+ struct btf *btf,
+ struct bpf_verifier_log *log);
+void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map);
+void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc);
#else
-static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
-{
- return NULL;
-}
-static inline void bpf_struct_ops_init(struct btf *btf,
- struct bpf_verifier_log *log)
-{
-}
+#define register_bpf_struct_ops(st_ops, type) ({ (void *)(st_ops); 0; })
static inline bool bpf_try_module_get(const void *data, struct module *owner)
{
return try_module_get(owner);
@@ -1754,6 +1830,13 @@ static inline int bpf_struct_ops_link_create(union bpf_attr *attr)
{
return -EOPNOTSUPP;
}
+static inline void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map)
+{
+}
+
+static inline void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc)
+{
+}
#endif
@@ -2029,14 +2112,14 @@ bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu,
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
item = &array->items[0];
while ((prog = READ_ONCE(item->prog))) {
- if (!prog->aux->sleepable)
+ if (!prog->sleepable)
rcu_read_lock();
run_ctx.bpf_cookie = item->bpf_cookie;
ret &= run_prog(prog, ctx);
item++;
- if (!prog->aux->sleepable)
+ if (!prog->sleepable)
rcu_read_unlock();
}
bpf_reset_run_ctx(old_run_ctx);
@@ -2068,6 +2151,7 @@ static inline void bpf_enable_instrumentation(void)
migrate_enable();
}
+extern const struct super_operations bpf_super_ops;
extern const struct file_operations bpf_map_fops;
extern const struct file_operations bpf_prog_fops;
extern const struct file_operations bpf_iter_fops;
@@ -2135,6 +2219,8 @@ int generic_map_delete_batch(struct bpf_map *map,
struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
+int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
+ unsigned long nr_pages, struct page **page_array);
#ifdef CONFIG_MEMCG_KMEM
void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
int node);
@@ -2202,24 +2288,26 @@ static inline void bpf_map_dec_elem_count(struct bpf_map *map)
extern int sysctl_unprivileged_bpf_disabled;
-static inline bool bpf_allow_ptr_leaks(void)
+bool bpf_token_capable(const struct bpf_token *token, int cap);
+
+static inline bool bpf_allow_ptr_leaks(const struct bpf_token *token)
{
- return perfmon_capable();
+ return bpf_token_capable(token, CAP_PERFMON);
}
-static inline bool bpf_allow_uninit_stack(void)
+static inline bool bpf_allow_uninit_stack(const struct bpf_token *token)
{
- return perfmon_capable();
+ return bpf_token_capable(token, CAP_PERFMON);
}
-static inline bool bpf_bypass_spec_v1(void)
+static inline bool bpf_bypass_spec_v1(const struct bpf_token *token)
{
- return cpu_mitigations_off() || perfmon_capable();
+ return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
}
-static inline bool bpf_bypass_spec_v4(void)
+static inline bool bpf_bypass_spec_v4(const struct bpf_token *token)
{
- return cpu_mitigations_off() || perfmon_capable();
+ return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
}
int bpf_map_new_fd(struct bpf_map *map, int flags);
@@ -2236,8 +2324,21 @@ int bpf_link_new_fd(struct bpf_link *link);
struct bpf_link *bpf_link_get_from_fd(u32 ufd);
struct bpf_link *bpf_link_get_curr_or_next(u32 *id);
+void bpf_token_inc(struct bpf_token *token);
+void bpf_token_put(struct bpf_token *token);
+int bpf_token_create(union bpf_attr *attr);
+struct bpf_token *bpf_token_get_from_fd(u32 ufd);
+
+bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd);
+bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type);
+bool bpf_token_allow_prog_type(const struct bpf_token *token,
+ enum bpf_prog_type prog_type,
+ enum bpf_attach_type attach_type);
+
int bpf_obj_pin_user(u32 ufd, int path_fd, const char __user *pathname);
int bpf_obj_get_user(int path_fd, const char __user *pathname, int flags);
+struct inode *bpf_get_inode(struct super_block *sb, const struct inode *dir,
+ umode_t mode);
#define BPF_ITER_FUNC_PREFIX "bpf_iter_"
#define DEFINE_BPF_ITER_FUNC(target, args...) \
@@ -2472,11 +2573,14 @@ int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *pr
struct btf *btf, const struct btf_type *t);
const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
int comp_idx, const char *tag_key);
+int btf_find_next_decl_tag(const struct btf *btf, const struct btf_type *pt,
+ int comp_idx, const char *tag_key, int last_id);
struct bpf_prog *bpf_prog_by_id(u32 id);
struct bpf_link *bpf_link_by_id(u32 id);
-const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
+const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id,
+ const struct bpf_prog *prog);
void bpf_task_storage_free(struct task_struct *task);
void bpf_cgrp_storage_free(struct cgroup *cgroup);
bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
@@ -2595,6 +2699,24 @@ static inline int bpf_obj_get_user(const char __user *pathname, int flags)
return -EOPNOTSUPP;
}
+static inline bool bpf_token_capable(const struct bpf_token *token, int cap)
+{
+ return capable(cap) || (cap != CAP_SYS_ADMIN && capable(CAP_SYS_ADMIN));
+}
+
+static inline void bpf_token_inc(struct bpf_token *token)
+{
+}
+
+static inline void bpf_token_put(struct bpf_token *token)
+{
+}
+
+static inline struct bpf_token *bpf_token_get_from_fd(u32 ufd)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline void __dev_flush(void)
{
}
@@ -2718,7 +2840,7 @@ static inline int btf_struct_access(struct bpf_verifier_log *log,
}
static inline const struct bpf_func_proto *
-bpf_base_func_proto(enum bpf_func_id func_id)
+bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
return NULL;
}
diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h
index 173ec7f43ed1..dcddb0aef7d8 100644
--- a/include/linux/bpf_local_storage.h
+++ b/include/linux/bpf_local_storage.h
@@ -129,10 +129,36 @@ bpf_local_storage_map_alloc(union bpf_attr *attr,
struct bpf_local_storage_cache *cache,
bool bpf_ma);
-struct bpf_local_storage_data *
+void __bpf_local_storage_insert_cache(struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_map *smap,
+ struct bpf_local_storage_elem *selem);
+/* If cacheit_lockit is false, this lookup function is lockless */
+static inline struct bpf_local_storage_data *
bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
struct bpf_local_storage_map *smap,
- bool cacheit_lockit);
+ bool cacheit_lockit)
+{
+ struct bpf_local_storage_data *sdata;
+ struct bpf_local_storage_elem *selem;
+
+ /* Fast path (cache hit) */
+ sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
+ bpf_rcu_lock_held());
+ if (sdata && rcu_access_pointer(sdata->smap) == smap)
+ return sdata;
+
+ /* Slow path (cache miss) */
+ hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
+ rcu_read_lock_trace_held())
+ if (rcu_access_pointer(SDATA(selem)->smap) == smap)
+ break;
+
+ if (!selem)
+ return NULL;
+ if (cacheit_lockit)
+ __bpf_local_storage_insert_cache(local_storage, smap, selem);
+ return SDATA(selem);
+}
void bpf_local_storage_destroy(struct bpf_local_storage *local_storage);
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index 94baced5a1ad..9f2a6b83b49e 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -132,6 +132,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_STRUCT_OPS, bpf_struct_ops_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_RINGBUF, ringbuf_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_BLOOM_FILTER, bloom_filter_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_USER_RINGBUF, user_ringbuf_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_ARENA, arena_map_ops)
BPF_LINK_TYPE(BPF_LINK_TYPE_RAW_TRACEPOINT, raw_tracepoint)
BPF_LINK_TYPE(BPF_LINK_TYPE_TRACING, tracing)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index d07d857ca67f..7cb1b75eee38 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -449,11 +449,12 @@ struct bpf_verifier_state {
u32 jmp_history_cnt;
u32 dfs_depth;
u32 callback_unroll_depth;
+ u32 may_goto_depth;
};
#define bpf_get_spilled_reg(slot, frame, mask) \
(((slot < frame->allocated_stack / BPF_REG_SIZE) && \
- ((1 << frame->stack[slot].slot_type[0]) & (mask))) \
+ ((1 << frame->stack[slot].slot_type[BPF_REG_SIZE - 1]) & (mask))) \
? &frame->stack[slot].spilled_ptr : NULL)
/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
@@ -547,6 +548,7 @@ struct bpf_insn_aux_data {
u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
bool zext_dst; /* this insn zero extends dst reg */
+ bool needs_zext; /* alu op needs to clear upper bits */
bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */
bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */
@@ -610,6 +612,7 @@ struct bpf_subprog_arg_info {
enum bpf_arg_type arg_type;
union {
u32 mem_size;
+ u32 btf_id;
};
};
@@ -618,6 +621,7 @@ struct bpf_subprog_info {
u32 start; /* insn idx of function entry point */
u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
u16 stack_depth; /* max. stack depth used by this function */
+ u16 stack_extra;
bool has_tail_call: 1;
bool tail_call_reachable: 1;
bool has_ld_abs: 1;
@@ -662,6 +666,7 @@ struct bpf_verifier_env {
u32 prev_insn_idx;
struct bpf_prog *prog; /* eBPF program being verified */
const struct bpf_verifier_ops *ops;
+ struct module *attach_btf_mod; /* The owner module of prog->aux->attach_btf */
struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
int stack_size; /* number of states to be processed */
bool strict_alignment; /* perform strict pointer alignment checks */
@@ -917,6 +922,15 @@ static inline void mark_verifier_state_scratched(struct bpf_verifier_env *env)
env->scratched_stack_slots = ~0ULL;
}
+static inline bool bpf_stack_narrow_access_ok(int off, int fill_size, int spill_size)
+{
+#ifdef __BIG_ENDIAN
+ off -= spill_size - fill_size;
+#endif
+
+ return !(off % BPF_REG_SIZE);
+}
+
const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type);
const char *dynptr_type_str(enum bpf_dynptr_type type);
const char *iter_type_str(const struct btf *btf, u32 btf_id);
diff --git a/include/linux/btf.h b/include/linux/btf.h
index cf5c6ff48981..f9e56fd12a9f 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -137,6 +137,7 @@ struct btf_struct_metas {
extern const struct file_operations btf_fops;
+const char *btf_get_name(const struct btf *btf);
void btf_get(struct btf *btf);
void btf_put(struct btf *btf);
int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_sz);
@@ -494,8 +495,26 @@ static inline void *btf_id_set8_contains(const struct btf_id_set8 *set, u32 id)
return bsearch(&id, set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func);
}
+bool btf_param_match_suffix(const struct btf *btf,
+ const struct btf_param *arg,
+ const char *suffix);
+int btf_ctx_arg_offset(const struct btf *btf, const struct btf_type *func_proto,
+ u32 arg_no);
+
struct bpf_verifier_log;
+#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
+struct bpf_struct_ops;
+int __register_bpf_struct_ops(struct bpf_struct_ops *st_ops);
+const struct bpf_struct_ops_desc *bpf_struct_ops_find_value(struct btf *btf, u32 value_id);
+const struct bpf_struct_ops_desc *bpf_struct_ops_find(struct btf *btf, u32 type_id);
+#else
+static inline const struct bpf_struct_ops_desc *bpf_struct_ops_find(struct btf *btf, u32 type_id)
+{
+ return NULL;
+}
+#endif
+
#ifdef CONFIG_BPF_SYSCALL
const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
const char *btf_name_by_offset(const struct btf *btf, u32 offset);
@@ -512,10 +531,9 @@ s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id);
int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt,
struct module *owner);
struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id);
-const struct btf_type *
-btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
- const struct btf_type *t, enum bpf_prog_type prog_type,
- int arg);
+bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
+ const struct btf_type *t, enum bpf_prog_type prog_type,
+ int arg);
int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_type);
bool btf_types_are_same(const struct btf *btf1, u32 id1,
const struct btf *btf2, u32 id2);
@@ -555,12 +573,12 @@ static inline struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf
{
return NULL;
}
-static inline const struct btf_member *
-btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
- const struct btf_type *t, enum bpf_prog_type prog_type,
- int arg)
+static inline bool
+btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
+ const struct btf_type *t, enum bpf_prog_type prog_type,
+ int arg)
{
- return NULL;
+ return false;
}
static inline int get_kern_ctx_btf_id(struct bpf_verifier_log *log,
enum bpf_prog_type prog_type) {
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
index a9cb10b0e2e9..e24aabfe8ecc 100644
--- a/include/linux/btf_ids.h
+++ b/include/linux/btf_ids.h
@@ -8,6 +8,9 @@ struct btf_id_set {
u32 ids[];
};
+/* This flag implies BTF_SET8 holds kfunc(s) */
+#define BTF_SET8_KFUNCS (1 << 0)
+
struct btf_id_set8 {
u32 cnt;
u32 flags;
@@ -21,6 +24,7 @@ struct btf_id_set8 {
#include <linux/compiler.h> /* for __PASTE */
#include <linux/compiler_attributes.h> /* for __maybe_unused */
+#include <linux/stringify.h>
/*
* Following macros help to define lists of BTF IDs placed
@@ -183,17 +187,18 @@ extern struct btf_id_set name;
* .word (1 << 3) | (1 << 1) | (1 << 2)
*
*/
-#define __BTF_SET8_START(name, scope) \
+#define __BTF_SET8_START(name, scope, flags) \
+__BTF_ID_LIST(name, local) \
asm( \
".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
"." #scope " __BTF_ID__set8__" #name "; \n" \
"__BTF_ID__set8__" #name ":; \n" \
-".zero 8 \n" \
+".zero 4 \n" \
+".long " __stringify(flags) "\n" \
".popsection; \n");
#define BTF_SET8_START(name) \
-__BTF_ID_LIST(name, local) \
-__BTF_SET8_START(name, local)
+__BTF_SET8_START(name, local, 0)
#define BTF_SET8_END(name) \
asm( \
@@ -202,6 +207,12 @@ asm( \
".popsection; \n"); \
extern struct btf_id_set8 name;
+#define BTF_KFUNCS_START(name) \
+__BTF_SET8_START(name, local, BTF_SET8_KFUNCS)
+
+#define BTF_KFUNCS_END(name) \
+BTF_SET8_END(name)
+
#else
#define BTF_ID_LIST(name) static u32 __maybe_unused name[64];
@@ -216,6 +227,8 @@ extern struct btf_id_set8 name;
#define BTF_SET_END(name)
#define BTF_SET8_START(name) static struct btf_id_set8 __maybe_unused name = { 0 };
#define BTF_SET8_END(name)
+#define BTF_KFUNCS_START(name) static struct btf_id_set8 __maybe_unused name = { .flags = BTF_SET8_KFUNCS };
+#define BTF_KFUNCS_END(name)
#endif /* CONFIG_DEBUG_INFO_BTF */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index cfb545841a2c..1c29947db848 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -7,6 +7,7 @@
* set of CPUs in a system, one bit position per CPU number. In general,
* only nr_cpu_ids (<= NR_CPUS) bits are valid.
*/
+#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/threads.h>
#include <linux/bitmap.h>
@@ -720,6 +721,19 @@ static inline unsigned int cpumask_weight_and(const struct cpumask *srcp1,
}
/**
+ * cpumask_weight_andnot - Count of bits in (*srcp1 & ~*srcp2)
+ * @srcp1: the cpumask to count bits (< nr_cpu_ids) in.
+ * @srcp2: the cpumask to count bits (< nr_cpu_ids) in.
+ *
+ * Return: count of bits set in both *srcp1 and *srcp2
+ */
+static inline unsigned int cpumask_weight_andnot(const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
+{
+ return bitmap_weight_andnot(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
+}
+
+/**
* cpumask_shift_right - *dstp = *srcp >> n
* @dstp: the cpumask result
* @srcp: the input to shift
@@ -977,6 +991,8 @@ static inline bool cpumask_available(cpumask_var_t mask)
}
#endif /* CONFIG_CPUMASK_OFFSTACK */
+DEFINE_FREE(free_cpumask_var, struct cpumask *, if (_T) free_cpumask_var(_T));
+
/* It's common to want to use cpu_all_mask in struct member initializers,
* so it has to refer to an address rather than a pointer. */
extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
diff --git a/include/linux/dpll.h b/include/linux/dpll.h
index e37344f6a231..d275736230b3 100644
--- a/include/linux/dpll.h
+++ b/include/linux/dpll.h
@@ -21,6 +21,7 @@ struct dpll_device_ops {
enum dpll_mode *mode, struct netlink_ext_ack *extack);
int (*lock_status_get)(const struct dpll_device *dpll, void *dpll_priv,
enum dpll_lock_status *status,
+ enum dpll_lock_status_error *status_error,
struct netlink_ext_ack *extack);
int (*temp_get)(const struct dpll_device *dpll, void *dpll_priv,
s32 *temp, struct netlink_ext_ack *extack);
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
index 407c2f281b64..5693a4be0d9a 100644
--- a/include/linux/dynamic_queue_limits.h
+++ b/include/linux/dynamic_queue_limits.h
@@ -38,14 +38,22 @@
#ifdef __KERNEL__
+#include <linux/bitops.h>
#include <asm/bug.h>
+#define DQL_HIST_LEN 4
+#define DQL_HIST_ENT(dql, idx) ((dql)->history[(idx) % DQL_HIST_LEN])
+
struct dql {
/* Fields accessed in enqueue path (dql_queued) */
unsigned int num_queued; /* Total ever queued */
unsigned int adj_limit; /* limit + num_completed */
unsigned int last_obj_cnt; /* Count at last queuing */
+ unsigned long history_head; /* top 58 bits of jiffies */
+ /* stall entries, a bit per entry */
+ unsigned long history[DQL_HIST_LEN];
+
/* Fields accessed only by completion path (dql_completed) */
unsigned int limit ____cacheline_aligned_in_smp; /* Current limit */
@@ -62,6 +70,13 @@ struct dql {
unsigned int max_limit; /* Max limit */
unsigned int min_limit; /* Minimum limit */
unsigned int slack_hold_time; /* Time to measure slack */
+
+ /* Stall threshold (in jiffies), defined by user */
+ unsigned short stall_thrs;
+ /* Longest stall detected, reported to user */
+ unsigned short stall_max;
+ unsigned long last_reap; /* Last reap (in jiffies) */
+ unsigned long stall_cnt; /* Number of stalls */
};
/* Set some static maximums */
@@ -74,6 +89,8 @@ struct dql {
*/
static inline void dql_queued(struct dql *dql, unsigned int count)
{
+ unsigned long map, now, now_hi, i;
+
BUG_ON(count > DQL_MAX_OBJECT);
dql->last_obj_cnt = count;
@@ -86,6 +103,34 @@ static inline void dql_queued(struct dql *dql, unsigned int count)
barrier();
dql->num_queued += count;
+
+ now = jiffies;
+ now_hi = now / BITS_PER_LONG;
+
+ /* The following code set a bit in the ring buffer, where each
+ * bit trackes time the packet was queued. The dql->history buffer
+ * tracks DQL_HIST_LEN * BITS_PER_LONG time (jiffies) slot
+ */
+ if (unlikely(now_hi != dql->history_head)) {
+ /* About to reuse slots, clear them */
+ for (i = 0; i < DQL_HIST_LEN; i++) {
+ /* Multiplication masks high bits */
+ if (now_hi * BITS_PER_LONG ==
+ (dql->history_head + i) * BITS_PER_LONG)
+ break;
+ DQL_HIST_ENT(dql, dql->history_head + i + 1) = 0;
+ }
+ /* pairs with smp_rmb() in dql_check_stall() */
+ smp_wmb();
+ WRITE_ONCE(dql->history_head, now_hi);
+ }
+
+ /* __set_bit() does not guarantee WRITE_ONCE() semantics */
+ map = DQL_HIST_ENT(dql, now_hi);
+
+ /* Populate the history with an entry (bit) per queued */
+ if (!(map & BIT_MASK(now)))
+ WRITE_ONCE(DQL_HIST_ENT(dql, now_hi), map | BIT_MASK(now));
}
/* Returns how many objects can be queued, < 0 indicates over limit. */
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 325e0778e937..9901e563f706 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -222,6 +222,16 @@ extern int
__ethtool_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *link_ksettings);
+struct ethtool_keee {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertised);
+ u32 tx_lpi_timer;
+ bool tx_lpi_enabled;
+ bool eee_active;
+ bool eee_enabled;
+};
+
struct kernel_ethtool_coalesce {
u8 use_cqe_mode_tx;
u8 use_cqe_mode_rx;
@@ -892,8 +902,8 @@ struct ethtool_ops {
struct ethtool_modinfo *);
int (*get_module_eeprom)(struct net_device *,
struct ethtool_eeprom *, u8 *);
- int (*get_eee)(struct net_device *, struct ethtool_eee *);
- int (*set_eee)(struct net_device *, struct ethtool_eee *);
+ int (*get_eee)(struct net_device *dev, struct ethtool_keee *eee);
+ int (*set_eee)(struct net_device *dev, struct ethtool_keee *eee);
int (*get_tunable)(struct net_device *,
const struct ethtool_tunable *, void *);
int (*set_tunable)(struct net_device *,
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 68fb6c8142fe..c99bc3df2d28 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -72,6 +72,9 @@ struct ctl_table_header;
/* unused opcode to mark special ldsx instruction. Same as BPF_IND */
#define BPF_PROBE_MEMSX 0x40
+/* unused opcode to mark special load instruction. Same as BPF_MSH */
+#define BPF_PROBE_MEM32 0xa0
+
/* unused opcode to mark call to interpreter with arguments */
#define BPF_CALL_ARGS 0xe0
@@ -547,24 +550,27 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
__BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \
u64, __ur_3, u64, __ur_4, u64, __ur_5)
-#define BPF_CALL_x(x, name, ...) \
+#define BPF_CALL_x(x, attr, name, ...) \
static __always_inline \
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
- u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
- u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
+ attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
+ attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
{ \
return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
} \
static __always_inline \
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
-#define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__)
-#define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__)
-#define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__)
-#define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__)
-#define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__)
-#define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__)
+#define __NOATTR
+#define BPF_CALL_0(name, ...) BPF_CALL_x(0, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_1(name, ...) BPF_CALL_x(1, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_2(name, ...) BPF_CALL_x(2, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_3(name, ...) BPF_CALL_x(3, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_4(name, ...) BPF_CALL_x(4, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_5(name, ...) BPF_CALL_x(5, __NOATTR, name, __VA_ARGS__)
+
+#define NOTRACE_BPF_CALL_1(name, ...) BPF_CALL_x(1, notrace, name, __VA_ARGS__)
#define bpf_ctx_range(TYPE, MEMBER) \
offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
@@ -955,6 +961,8 @@ bool bpf_jit_supports_subprog_tailcalls(void);
bool bpf_jit_supports_kfunc_call(void);
bool bpf_jit_supports_far_kfunc_call(void);
bool bpf_jit_supports_exceptions(void);
+bool bpf_jit_supports_ptr_xchg(void);
+bool bpf_jit_supports_arena(void);
void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
bool bpf_helper_changes_pkt_data(void *func);
@@ -1139,7 +1147,7 @@ static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
return false;
if (!bpf_jit_harden)
return false;
- if (bpf_jit_harden == 1 && bpf_capable())
+ if (bpf_jit_harden == 1 && bpf_token_capable(prog->aux->token, CAP_BPF))
return false;
return true;
diff --git a/include/linux/framer/framer-provider.h b/include/linux/framer/framer-provider.h
index 782cd5fc83d5..9724d4b44b9c 100644
--- a/include/linux/framer/framer-provider.h
+++ b/include/linux/framer/framer-provider.h
@@ -83,7 +83,6 @@ struct framer_ops {
/**
* struct framer_provider - represents the framer provider
* @dev: framer provider device
- * @children: can be used to override the default (dev->of_node) child node
* @owner: the module owner having of_xlate
* @list: to maintain a linked list of framer providers
* @of_xlate: function pointer to obtain framer instance from framer pointer
@@ -93,7 +92,7 @@ struct framer_provider {
struct module *owner;
struct list_head list;
struct framer * (*of_xlate)(struct device *dev,
- struct of_phandle_args *args);
+ const struct of_phandle_args *args);
};
static inline void framer_set_drvdata(struct framer *framer, void *data)
@@ -118,19 +117,19 @@ struct framer *devm_framer_create(struct device *dev, struct device_node *node,
const struct framer_ops *ops);
struct framer *framer_provider_simple_of_xlate(struct device *dev,
- struct of_phandle_args *args);
+ const struct of_phandle_args *args);
struct framer_provider *
__framer_provider_of_register(struct device *dev, struct module *owner,
struct framer *(*of_xlate)(struct device *dev,
- struct of_phandle_args *args));
+ const struct of_phandle_args *args));
void framer_provider_of_unregister(struct framer_provider *framer_provider);
struct framer_provider *
__devm_framer_provider_of_register(struct device *dev, struct module *owner,
struct framer *(*of_xlate)(struct device *dev,
- struct of_phandle_args *args));
+ const struct of_phandle_args *args));
void framer_notify_status_change(struct framer *framer);
@@ -154,7 +153,7 @@ static inline struct framer *devm_framer_create(struct device *dev, struct devic
}
static inline struct framer *framer_provider_simple_of_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
return ERR_PTR(-ENOSYS);
}
@@ -162,7 +161,7 @@ static inline struct framer *framer_provider_simple_of_xlate(struct device *dev,
static inline struct framer_provider *
__framer_provider_of_register(struct device *dev, struct module *owner,
struct framer *(*of_xlate)(struct device *dev,
- struct of_phandle_args *args))
+ const struct of_phandle_args *args))
{
return ERR_PTR(-ENOSYS);
}
@@ -174,7 +173,7 @@ void framer_provider_of_unregister(struct framer_provider *framer_provider)
static inline struct framer_provider *
__devm_framer_provider_of_register(struct device *dev, struct module *owner,
struct framer *(*of_xlate)(struct device *dev,
- struct of_phandle_args *args))
+ const struct of_phandle_args *args))
{
return ERR_PTR(-ENOSYS);
}
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index e2a916cf29c4..937c2a9b6e54 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -311,15 +311,23 @@ extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);
struct page_frag_cache;
+void page_frag_cache_drain(struct page_frag_cache *nc);
extern void __page_frag_cache_drain(struct page *page, unsigned int count);
-extern void *page_frag_alloc_align(struct page_frag_cache *nc,
- unsigned int fragsz, gfp_t gfp_mask,
- unsigned int align_mask);
+void *__page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz,
+ gfp_t gfp_mask, unsigned int align_mask);
+
+static inline void *page_frag_alloc_align(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask,
+ unsigned int align)
+{
+ WARN_ON_ONCE(!is_power_of_2(align));
+ return __page_frag_alloc_align(nc, fragsz, gfp_mask, -align);
+}
static inline void *page_frag_alloc(struct page_frag_cache *nc,
unsigned int fragsz, gfp_t gfp_mask)
{
- return page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
+ return __page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
}
extern void page_frag_free(void *addr);
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 83c4d060a559..3385a2cc5b09 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -9,7 +9,7 @@
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (c) 2018 - 2023 Intel Corporation
+ * Copyright (c) 2018 - 2024 Intel Corporation
*/
#ifndef LINUX_IEEE80211_H
@@ -191,6 +191,11 @@ static inline bool ieee80211_sn_less(u16 sn1, u16 sn2)
return ((sn1 - sn2) & IEEE80211_SN_MASK) > (IEEE80211_SN_MODULO >> 1);
}
+static inline bool ieee80211_sn_less_eq(u16 sn1, u16 sn2)
+{
+ return ((sn2 - sn1) & IEEE80211_SN_MASK) <= (IEEE80211_SN_MODULO >> 1);
+}
+
static inline u16 ieee80211_sn_add(u16 sn1, u16 sn2)
{
return (sn1 + sn2) & IEEE80211_SN_MASK;
@@ -808,6 +813,11 @@ static inline bool ieee80211_is_frag(struct ieee80211_hdr *hdr)
hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
}
+static inline u16 ieee80211_get_sn(struct ieee80211_hdr *hdr)
+{
+ return le16_get_bits(hdr->seq_ctrl, IEEE80211_SCTL_SEQ);
+}
+
struct ieee80211s_hdr {
u8 flags;
u8 ttl;
@@ -1454,6 +1464,20 @@ struct ieee80211_mgmt {
u8 max_tod_error;
u8 max_toa_error;
} __packed wnm_timing_msr;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 variable[];
+ } __packed ttlm_req;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 status_code;
+ u8 variable[];
+ } __packed ttlm_res;
+ struct {
+ u8 action_code;
+ } __packed ttlm_tear_down;
} u;
} __packed action;
DECLARE_FLEX_ARRAY(u8, body); /* Generic frame body */
@@ -3036,6 +3060,9 @@ ieee80211_he_spr_size(const u8 *he_spr_ie)
#define IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF 0x40
#define IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK 0x07
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_80MHZ 0x08
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_160MHZ 0x30
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_320MHZ 0x40
#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK 0x78
#define IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP 0x80
@@ -3175,6 +3202,22 @@ ieee80211_eht_oper_size_ok(const u8 *data, u8 len)
return len >= needed;
}
+/* must validate ieee80211_eht_oper_size_ok() first */
+static inline u16
+ieee80211_eht_oper_dis_subchan_bitmap(const struct ieee80211_eht_operation *eht_oper)
+{
+ const struct ieee80211_eht_operation_info *info =
+ (const void *)eht_oper->optional;
+
+ if (!(eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT))
+ return 0;
+
+ if (!(eht_oper->params & IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT))
+ return 0;
+
+ return get_unaligned_le16(info->optional);
+}
+
#define IEEE80211_BW_IND_DIS_SUBCH_PRESENT BIT(1)
struct ieee80211_bandwidth_indication {
@@ -3357,6 +3400,8 @@ enum ieee80211_statuscode {
WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 109,
WLAN_STATUS_SAE_HASH_TO_ELEMENT = 126,
WLAN_STATUS_SAE_PK = 127,
+ WLAN_STATUS_DENIED_TID_TO_LINK_MAPPING = 133,
+ WLAN_STATUS_PREF_TID_TO_LINK_MAPPING_SUGGESTED = 134,
};
@@ -3682,6 +3727,7 @@ enum ieee80211_category {
WLAN_CATEGORY_UNPROT_DMG = 20,
WLAN_CATEGORY_VHT = 21,
WLAN_CATEGORY_S1G = 22,
+ WLAN_CATEGORY_PROTECTED_EHT = 37,
WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126,
WLAN_CATEGORY_VENDOR_SPECIFIC = 127,
};
@@ -3745,6 +3791,13 @@ enum ieee80211_unprotected_wnm_actioncode {
WLAN_UNPROTECTED_WNM_ACTION_TIMING_MEASUREMENT_RESPONSE = 1,
};
+/* Protected EHT action codes */
+enum ieee80211_protected_eht_actioncode {
+ WLAN_PROTECTED_EHT_ACTION_TTLM_REQ = 0,
+ WLAN_PROTECTED_EHT_ACTION_TTLM_RES = 1,
+ WLAN_PROTECTED_EHT_ACTION_TTLM_TEARDOWN = 2,
+};
+
/* Security key length */
enum ieee80211_key_len {
WLAN_KEY_LEN_WEP40 = 5,
@@ -4845,6 +4898,10 @@ struct ieee80211_multi_link_elem {
#define IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS 0x000f
#define IEEE80211_MLD_CAP_OP_SRS_SUPPORT 0x0010
#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP 0x0060
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_NO_SUPP 0
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME 1
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_RESERVED 2
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_DIFF 3
#define IEEE80211_MLD_CAP_OP_FREQ_SEP_TYPE_IND 0x0f80
#define IEEE80211_MLD_CAP_OP_AAR_SUPPORT 0x1000
@@ -4908,18 +4965,43 @@ static inline u8 ieee80211_mle_common_size(const u8 *data)
}
/**
+ * ieee80211_mle_get_link_id - returns the link ID
+ * @data: the basic multi link element
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ *
+ * If the BSS link ID can't be found, -1 will be returned
+ */
+static inline int ieee80211_mle_get_link_id(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /* common points now at the beginning of ieee80211_mle_basic_common_info */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_LINK_ID))
+ return -1;
+
+ return *common;
+}
+
+/**
* ieee80211_mle_get_bss_param_ch_cnt - returns the BSS parameter change count
- * @mle: the basic multi link element
+ * @data: pointer to the basic multi link element
*
* The element is assumed to be of the correct type (BASIC) and big enough,
* this must be checked using ieee80211_mle_type_ok().
*
* If the BSS parameter change count value can't be found (the presence bit
- * for it is clear), 0 will be returned.
+ * for it is clear), -1 will be returned.
*/
-static inline u8
-ieee80211_mle_get_bss_param_ch_cnt(const struct ieee80211_multi_link_elem *mle)
+static inline int
+ieee80211_mle_get_bss_param_ch_cnt(const u8 *data)
{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
u16 control = le16_to_cpu(mle->control);
const u8 *common = mle->variable;
@@ -4927,7 +5009,7 @@ ieee80211_mle_get_bss_param_ch_cnt(const struct ieee80211_multi_link_elem *mle)
common += sizeof(struct ieee80211_mle_basic_common_info);
if (!(control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT))
- return 0;
+ return -1;
if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
common += 1;
@@ -4997,6 +5079,81 @@ static inline u16 ieee80211_mle_get_eml_cap(const u8 *data)
}
/**
+ * ieee80211_mle_get_mld_capa_op - returns the MLD capabilities and operations.
+ * @data: pointer to the multi link EHT IE
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ *
+ * If the MLD capabilities and operations field is not present, 0 will be
+ * returned.
+ */
+static inline u16 ieee80211_mle_get_mld_capa_op(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /*
+ * common points now at the beginning of
+ * ieee80211_mle_basic_common_info
+ */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP))
+ return 0;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
+ common += 2;
+
+ return get_unaligned_le16(common);
+}
+
+/**
+ * ieee80211_mle_get_mld_id - returns the MLD ID
+ * @data: pointer to the multi link element
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ *
+ * If the MLD ID is not present, 0 will be returned.
+ */
+static inline u8 ieee80211_mle_get_mld_id(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /*
+ * common points now at the beginning of
+ * ieee80211_mle_basic_common_info
+ */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_MLD_ID))
+ return 0;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP)
+ common += 2;
+
+ return *common;
+}
+
+/**
* ieee80211_mle_size_ok - validate multi-link element size
* @data: pointer to the element data
* @len: length of the containing element
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index 2a7660843444..043d442994b0 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -27,44 +27,54 @@ struct tun_xdp_hdr {
#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
struct socket *tun_get_socket(struct file *);
struct ptr_ring *tun_get_tx_ring(struct file *file);
+
static inline bool tun_is_xdp_frame(void *ptr)
{
- return (unsigned long)ptr & TUN_XDP_FLAG;
+ return (unsigned long)ptr & TUN_XDP_FLAG;
}
+
static inline void *tun_xdp_to_ptr(struct xdp_frame *xdp)
{
- return (void *)((unsigned long)xdp | TUN_XDP_FLAG);
+ return (void *)((unsigned long)xdp | TUN_XDP_FLAG);
}
+
static inline struct xdp_frame *tun_ptr_to_xdp(void *ptr)
{
- return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
+ return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
}
+
void tun_ptr_free(void *ptr);
#else
#include <linux/err.h>
#include <linux/errno.h>
struct file;
struct socket;
+
static inline struct socket *tun_get_socket(struct file *f)
{
return ERR_PTR(-EINVAL);
}
+
static inline struct ptr_ring *tun_get_tx_ring(struct file *f)
{
return ERR_PTR(-EINVAL);
}
+
static inline bool tun_is_xdp_frame(void *ptr)
{
return false;
}
+
static inline void *tun_xdp_to_ptr(struct xdp_frame *xdp)
{
return NULL;
}
+
static inline struct xdp_frame *tun_ptr_to_xdp(void *ptr)
{
return NULL;
}
+
static inline void tun_ptr_free(void *ptr)
{
}
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index 84abb30a3fbb..a9033696b0aa 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -8,6 +8,7 @@
struct inet_hashinfo;
struct inet_diag_handler {
+ struct module *owner;
void (*dump)(struct sk_buff *skb,
struct netlink_callback *cb,
const struct inet_diag_req_v2 *r);
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index ddb27fc0ee8c..cb5280e6cc21 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -53,13 +53,15 @@ struct in_device {
};
#define IPV4_DEVCONF(cnf, attr) ((cnf).data[IPV4_DEVCONF_ ## attr - 1])
+#define IPV4_DEVCONF_RO(cnf, attr) READ_ONCE(IPV4_DEVCONF(cnf, attr))
#define IPV4_DEVCONF_ALL(net, attr) \
IPV4_DEVCONF((*(net)->ipv4.devconf_all), attr)
+#define IPV4_DEVCONF_ALL_RO(net, attr) READ_ONCE(IPV4_DEVCONF_ALL(net, attr))
-static inline int ipv4_devconf_get(struct in_device *in_dev, int index)
+static inline int ipv4_devconf_get(const struct in_device *in_dev, int index)
{
index--;
- return in_dev->cnf.data[index];
+ return READ_ONCE(in_dev->cnf.data[index]);
}
static inline void ipv4_devconf_set(struct in_device *in_dev, int index,
@@ -67,7 +69,7 @@ static inline void ipv4_devconf_set(struct in_device *in_dev, int index,
{
index--;
set_bit(index, in_dev->cnf.state);
- in_dev->cnf.data[index] = val;
+ WRITE_ONCE(in_dev->cnf.data[index], val);
}
static inline void ipv4_devconf_setall(struct in_device *in_dev)
@@ -81,18 +83,18 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
ipv4_devconf_set((in_dev), IPV4_DEVCONF_ ## attr, (val))
#define IN_DEV_ANDCONF(in_dev, attr) \
- (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) && \
+ (IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), attr) && \
IN_DEV_CONF_GET((in_dev), attr))
#define IN_DEV_NET_ORCONF(in_dev, net, attr) \
- (IPV4_DEVCONF_ALL(net, attr) || \
+ (IPV4_DEVCONF_ALL_RO(net, attr) || \
IN_DEV_CONF_GET((in_dev), attr))
#define IN_DEV_ORCONF(in_dev, attr) \
IN_DEV_NET_ORCONF(in_dev, dev_net(in_dev->dev), attr)
#define IN_DEV_MAXCONF(in_dev, attr) \
- (max(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr), \
+ (max(IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), attr), \
IN_DEV_CONF_GET((in_dev), attr)))
#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
diff --git a/include/linux/io.h b/include/linux/io.h
index 7304f2a69960..235ba7d80a8f 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -23,12 +23,19 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
#ifdef CONFIG_MMU
int ioremap_page_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot);
+int vmap_page_range(unsigned long addr, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot);
#else
static inline int ioremap_page_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot)
{
return 0;
}
+static inline int vmap_page_range(unsigned long addr, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot)
+{
+ return 0;
+}
#endif
/*
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 5e605e384aac..383a0ea2ab91 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -3,6 +3,7 @@
#define _IPV6_H
#include <uapi/linux/ipv6.h>
+#include <linux/cache.h>
#define ipv6_optlen(p) (((p)->hdrlen+1) << 3)
#define ipv6_authlen(p) (((p)->hdrlen+2) << 2)
@@ -10,9 +11,16 @@
* This structure contains configuration options per IPv6 link.
*/
struct ipv6_devconf {
- __s32 forwarding;
+ /* RX & TX fastpath fields. */
+ __cacheline_group_begin(ipv6_devconf_read_txrx);
+ __s32 disable_ipv6;
__s32 hop_limit;
__s32 mtu6;
+ __s32 forwarding;
+ __s32 disable_policy;
+ __s32 proxy_ndp;
+ __cacheline_group_end(ipv6_devconf_read_txrx);
+
__s32 accept_ra;
__s32 accept_redirects;
__s32 autoconf;
@@ -27,6 +35,7 @@ struct ipv6_devconf {
__s32 use_tempaddr;
__s32 temp_valid_lft;
__s32 temp_prefered_lft;
+ __s32 regen_min_advance;
__s32 regen_max_retry;
__s32 max_desync_factor;
__s32 max_addresses;
@@ -44,7 +53,6 @@ struct ipv6_devconf {
__s32 accept_ra_rt_info_max_plen;
#endif
#endif
- __s32 proxy_ndp;
__s32 accept_source_route;
__s32 accept_ra_from_local;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
@@ -54,7 +62,6 @@ struct ipv6_devconf {
#ifdef CONFIG_IPV6_MROUTE
atomic_t mc_forwarding;
#endif
- __s32 disable_ipv6;
__s32 drop_unicast_in_l2_multicast;
__s32 accept_dad;
__s32 force_tllao;
@@ -75,7 +82,6 @@ struct ipv6_devconf {
#endif
__u32 enhanced_dad;
__u32 addr_gen_mode;
- __s32 disable_policy;
__s32 ndisc_tclass;
__s32 rpl_seg_enabled;
__u32 ioam6_id;
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 76458b6d53da..642272576582 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -404,10 +404,17 @@ LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule)
LSM_HOOK(int, 0, bpf, int cmd, union bpf_attr *attr, unsigned int size)
LSM_HOOK(int, 0, bpf_map, struct bpf_map *map, fmode_t fmode)
LSM_HOOK(int, 0, bpf_prog, struct bpf_prog *prog)
-LSM_HOOK(int, 0, bpf_map_alloc_security, struct bpf_map *map)
-LSM_HOOK(void, LSM_RET_VOID, bpf_map_free_security, struct bpf_map *map)
-LSM_HOOK(int, 0, bpf_prog_alloc_security, struct bpf_prog_aux *aux)
-LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free_security, struct bpf_prog_aux *aux)
+LSM_HOOK(int, 0, bpf_map_create, struct bpf_map *map, union bpf_attr *attr,
+ struct bpf_token *token)
+LSM_HOOK(void, LSM_RET_VOID, bpf_map_free, struct bpf_map *map)
+LSM_HOOK(int, 0, bpf_prog_load, struct bpf_prog *prog, union bpf_attr *attr,
+ struct bpf_token *token)
+LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free, struct bpf_prog *prog)
+LSM_HOOK(int, 0, bpf_token_create, struct bpf_token *token, union bpf_attr *attr,
+ struct path *path)
+LSM_HOOK(void, LSM_RET_VOID, bpf_token_free, struct bpf_token *token)
+LSM_HOOK(int, 0, bpf_token_cmd, const struct bpf_token *token, enum bpf_cmd cmd)
+LSM_HOOK(int, 0, bpf_token_capable, const struct bpf_token *token, int cap)
#endif /* CONFIG_BPF_SYSCALL */
LSM_HOOK(int, 0, locked_down, enum lockdown_reason what)
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index 9b54c4f0677f..693eba9869e4 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -26,6 +26,7 @@
#define MARVELL_PHY_ID_88E2110 0x002b09b0
#define MARVELL_PHY_ID_88X2222 0x01410f10
#define MARVELL_PHY_ID_88Q2110 0x002b0980
+#define MARVELL_PHY_ID_88Q2220 0x002b0b20
/* Marvel 88E1111 in Finisar SFP module with modified PHY ID */
#define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index 79ceee3c8673..68f8d2e970d4 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -373,6 +373,10 @@ static inline void mii_t1_adv_m_mod_linkmode_t(unsigned long *advertising, u32 l
{
linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
advertising, lpa & MDIO_AN_T1_ADV_M_B10L);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT,
+ advertising, lpa & MDIO_AN_T1_ADV_M_100BT1);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT1_Full_BIT,
+ advertising, lpa & MDIO_AN_T1_ADV_M_1000BT1);
}
/**
@@ -409,6 +413,10 @@ static inline u32 linkmode_adv_to_mii_t1_adv_m_t(unsigned long *advertising)
if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, advertising))
result |= MDIO_AN_T1_ADV_M_B10L;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_M_100BT1;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT1_Full_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_M_1000BT1;
return result;
}
@@ -440,6 +448,42 @@ static inline void mii_eee_cap1_mod_linkmode_t(unsigned long *adv, u32 val)
}
/**
+ * mii_eee_cap2_mod_linkmode_sup_t()
+ * @adv: target the linkmode settings
+ * @val: register value
+ *
+ * A function that translates value of following registers to the linkmode:
+ * IEEE 802.3-2022 45.2.3.11 "EEE control and capability 2" register (3.21)
+ */
+static inline void mii_eee_cap2_mod_linkmode_sup_t(unsigned long *adv, u32 val)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ adv, val & MDIO_EEE_2_5GT);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ adv, val & MDIO_EEE_5GT);
+}
+
+/**
+ * mii_eee_cap2_mod_linkmode_adv_t()
+ * @adv: target the linkmode advertisement settings
+ * @val: register value
+ *
+ * A function that translates value of following registers to the linkmode:
+ * IEEE 802.3-2022 45.2.7.16 "EEE advertisement 2" register (7.62)
+ * IEEE 802.3-2022 45.2.7.17 "EEE link partner ability 2" register (7.63)
+ * Note: Currently this function is the same as mii_eee_cap2_mod_linkmode_sup_t.
+ * For certain, not yet supported, modes however the bits differ.
+ * Therefore create separate functions already.
+ */
+static inline void mii_eee_cap2_mod_linkmode_adv_t(unsigned long *adv, u32 val)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ adv, val & MDIO_EEE_2_5GT);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ adv, val & MDIO_EEE_5GT);
+}
+
+/**
* linkmode_to_mii_eee_cap1_t()
* @adv: the linkmode advertisement settings
*
@@ -467,6 +511,25 @@ static inline u32 linkmode_to_mii_eee_cap1_t(unsigned long *adv)
}
/**
+ * linkmode_to_mii_eee_cap2_t()
+ * @adv: the linkmode advertisement settings
+ *
+ * A function that translates linkmode to value for IEEE 802.3-2022 45.2.7.16
+ * "EEE advertisement 2" register (7.62)
+ */
+static inline u32 linkmode_to_mii_eee_cap2_t(unsigned long *adv)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, adv))
+ result |= MDIO_EEE_2_5GT;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, adv))
+ result |= MDIO_EEE_5GT;
+
+ return result;
+}
+
+/**
* mii_10base_t1_adv_mod_linkmode_t()
* @adv: linkmode advertisement settings
* @val: register value
diff --git a/include/linux/mfd/idtRC38xxx_reg.h b/include/linux/mfd/idtRC38xxx_reg.h
new file mode 100644
index 000000000000..ec11872f51ad
--- /dev/null
+++ b/include/linux/mfd/idtRC38xxx_reg.h
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Register Map - Based on PolarBear_CSRs.RevA.xlsx (2023-04-21)
+ *
+ * Copyright (C) 2023 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef MFD_IDTRC38XXX_REG
+#define MFD_IDTRC38XXX_REG
+
+/* GLOBAL */
+#define SOFT_RESET_CTRL (0x15) /* Specific to FC3W */
+#define MISC_CTRL (0x14) /* Specific to FC3A */
+#define APLL_REINIT BIT(1)
+#define APLL_REINIT_VFC3A BIT(2)
+
+#define DEVICE_ID (0x2)
+#define DEVICE_ID_MASK (0x1000) /* Bit 12 is 1 if FC3W and 0 if FC3A */
+#define DEVICE_ID_SHIFT (12)
+
+/* FOD */
+#define FOD_0 (0x300)
+#define FOD_0_VFC3A (0x400)
+#define FOD_1 (0x340)
+#define FOD_1_VFC3A (0x440)
+#define FOD_2 (0x380)
+#define FOD_2_VFC3A (0x480)
+
+/* TDCAPLL */
+#define TDC_CTRL (0x44a) /* Specific to FC3W */
+#define TDC_ENABLE_CTRL (0x169) /* Specific to FC3A */
+#define TDC_DAC_CAL_CTRL (0x16a) /* Specific to FC3A */
+#define TDC_EN BIT(0)
+#define TDC_DAC_RECAL_REQ BIT(1)
+#define TDC_DAC_RECAL_REQ_VFC3A BIT(0)
+
+#define TDC_FB_DIV_INT_CNFG (0x442)
+#define TDC_FB_DIV_INT_CNFG_VFC3A (0x162)
+#define TDC_FB_DIV_INT_MASK GENMASK(7, 0)
+#define TDC_REF_DIV_CNFG (0x443)
+#define TDC_REF_DIV_CNFG_VFC3A (0x163)
+#define TDC_REF_DIV_CONFIG_MASK GENMASK(2, 0)
+
+/* TIME SYNC CHANNEL */
+#define TIME_CLOCK_SRC (0xa01) /* Specific to FC3W */
+#define TIME_CLOCK_COUNT (0xa00) /* Specific to FC3W */
+#define TIME_CLOCK_COUNT_MASK GENMASK(5, 0)
+
+#define SUB_SYNC_GEN_CNFG (0xa04)
+
+#define TOD_COUNTER_READ_REQ (0xa5f)
+#define TOD_COUNTER_READ_REQ_VFC3A (0x6df)
+#define TOD_SYNC_LOAD_VAL_CTRL (0xa10)
+#define TOD_SYNC_LOAD_VAL_CTRL_VFC3A (0x690)
+#define SYNC_COUNTER_MASK GENMASK_ULL(51, 0)
+#define SUB_SYNC_COUNTER_MASK GENMASK(30, 0)
+#define TOD_SYNC_LOAD_REQ_CTRL (0xa21)
+#define TOD_SYNC_LOAD_REQ_CTRL_VFC3A (0x6a1)
+#define SYNC_LOAD_ENABLE BIT(1)
+#define SUB_SYNC_LOAD_ENABLE BIT(0)
+#define SYNC_LOAD_REQ BIT(0)
+
+#define LPF_MODE_CNFG (0xa80)
+#define LPF_MODE_CNFG_VFC3A (0x700)
+enum lpf_mode {
+ LPF_DISABLED = 0,
+ LPF_WP = 1,
+ LPF_HOLDOVER = 2,
+ LPF_WF = 3,
+ LPF_INVALID = 4
+};
+#define LPF_CTRL (0xa98)
+#define LPF_CTRL_VFC3A (0x718)
+#define LPF_EN BIT(0)
+
+#define LPF_BW_CNFG (0xa81)
+#define LPF_BW_SHIFT GENMASK(7, 3)
+#define LPF_BW_MULT GENMASK(2, 0)
+#define LPF_BW_SHIFT_DEFAULT (0xb)
+#define LPF_BW_MULT_DEFAULT (0x0)
+#define LPF_BW_SHIFT_1PPS (0x5)
+
+#define LPF_WR_PHASE_CTRL (0xaa8)
+#define LPF_WR_PHASE_CTRL_VFC3A (0x728)
+#define LPF_WR_FREQ_CTRL (0xab0)
+#define LPF_WR_FREQ_CTRL_VFC3A (0x730)
+
+#define TIME_CLOCK_TDC_FANOUT_CNFG (0xB00)
+#define TIME_SYNC_TO_TDC_EN BIT(0)
+#define SIG1_MUX_SEL_MASK GENMASK(7, 4)
+#define SIG2_MUX_SEL_MASK GENMASK(11, 8)
+enum tdc_mux_sel {
+ REF0 = 0,
+ REF1 = 1,
+ REF2 = 2,
+ REF3 = 3,
+ REF_CLK5 = 4,
+ REF_CLK6 = 5,
+ DPLL_FB_TO_TDC = 6,
+ DPLL_FB_DIVIDED_TO_TDC = 7,
+ TIME_CLK_DIVIDED = 8,
+ TIME_SYNC = 9,
+};
+
+#define TIME_CLOCK_MEAS_CNFG (0xB04)
+#define TDC_MEAS_MODE BIT(0)
+enum tdc_meas_mode {
+ CONTINUOUS = 0,
+ ONE_SHOT = 1,
+ MEAS_MODE_INVALID = 2,
+};
+
+#define TIME_CLOCK_MEAS_DIV_CNFG (0xB08)
+#define TIME_REF_DIV_MASK GENMASK(29, 24)
+
+#define TIME_CLOCK_MEAS_CTRL (0xB10)
+#define TDC_MEAS_EN BIT(0)
+#define TDC_MEAS_START BIT(1)
+
+#define TDC_FIFO_READ_REQ (0xB2F)
+#define TDC_FIFO_READ (0xB30)
+#define COARSE_MEAS_MASK GENMASK_ULL(39, 13)
+#define FINE_MEAS_MASK GENMASK(12, 0)
+
+#define TDC_FIFO_CTRL (0xB12)
+#define FIFO_CLEAR BIT(0)
+#define TDC_FIFO_STS (0xB38)
+#define FIFO_FULL BIT(1)
+#define FIFO_EMPTY BIT(0)
+#define TDC_FIFO_EVENT (0xB39)
+#define FIFO_OVERRUN BIT(1)
+
+/* DPLL */
+#define MAX_REFERENCE_INDEX (3)
+#define MAX_NUM_REF_PRIORITY (4)
+
+#define MAX_DPLL_INDEX (2)
+
+#define DPLL_STS (0x580)
+#define DPLL_STS_VFC3A (0x571)
+#define DPLL_STATE_STS_MASK (0x70)
+#define DPLL_STATE_STS_SHIFT (4)
+#define DPLL_REF_SEL_STS_MASK (0x6)
+#define DPLL_REF_SEL_STS_SHIFT (1)
+
+#define DPLL_REF_PRIORITY_CNFG (0x502)
+#define DPLL_REFX_PRIORITY_DISABLE_MASK (0xf)
+#define DPLL_REF0_PRIORITY_ENABLE_AND_SET_MASK (0x31)
+#define DPLL_REF1_PRIORITY_ENABLE_AND_SET_MASK (0xc2)
+#define DPLL_REF2_PRIORITY_ENABLE_AND_SET_MASK (0x304)
+#define DPLL_REF3_PRIORITY_ENABLE_AND_SET_MASK (0xc08)
+#define DPLL_REF0_PRIORITY_SHIFT (4)
+#define DPLL_REF1_PRIORITY_SHIFT (6)
+#define DPLL_REF2_PRIORITY_SHIFT (8)
+#define DPLL_REF3_PRIORITY_SHIFT (10)
+
+enum dpll_state {
+ DPLL_STATE_MIN = 0,
+ DPLL_STATE_FREERUN = DPLL_STATE_MIN,
+ DPLL_STATE_LOCKED = 1,
+ DPLL_STATE_HOLDOVER = 2,
+ DPLL_STATE_WRITE_FREQUENCY = 3,
+ DPLL_STATE_ACQUIRE = 4,
+ DPLL_STATE_HITLESS_SWITCH = 5,
+ DPLL_STATE_MAX = DPLL_STATE_HITLESS_SWITCH
+};
+
+/* REFMON */
+#define LOSMON_STS_0 (0x81e)
+#define LOSMON_STS_0_VFC3A (0x18e)
+#define LOSMON_STS_1 (0x82e)
+#define LOSMON_STS_1_VFC3A (0x19e)
+#define LOSMON_STS_2 (0x83e)
+#define LOSMON_STS_2_VFC3A (0x1ae)
+#define LOSMON_STS_3 (0x84e)
+#define LOSMON_STS_3_VFC3A (0x1be)
+#define LOS_STS_MASK (0x1)
+
+#define FREQMON_STS_0 (0x874)
+#define FREQMON_STS_0_VFC3A (0x1d4)
+#define FREQMON_STS_1 (0x894)
+#define FREQMON_STS_1_VFC3A (0x1f4)
+#define FREQMON_STS_2 (0x8b4)
+#define FREQMON_STS_2_VFC3A (0x214)
+#define FREQMON_STS_3 (0x8d4)
+#define FREQMON_STS_3_VFC3A (0x234)
+#define FREQ_FAIL_STS_SHIFT (31)
+
+/* Firmware interface */
+#define TIME_CLK_FREQ_ADDR (0xffa0)
+#define XTAL_FREQ_ADDR (0xffa1)
+
+/*
+ * Return register address and field mask based on passed in firmware version
+ */
+#define IDTFC3_FW_REG(FW, VER, REG) (((FW) < (VER)) ? (REG) : (REG##_##VER))
+#define IDTFC3_FW_FIELD(FW, VER, FIELD) (((FW) < (VER)) ? (FIELD) : (FIELD##_##VER))
+enum fw_version {
+ V_DEFAULT = 0,
+ VFC3W = 1,
+ VFC3A = 2
+};
+
+/* XTAL_FREQ_ADDR/TIME_CLK_FREQ_ADDR */
+enum {
+ FREQ_MIN = 0,
+ FREQ_25M = 1,
+ FREQ_49_152M = 2,
+ FREQ_50M = 3,
+ FREQ_100M = 4,
+ FREQ_125M = 5,
+ FREQ_250M = 6,
+ FREQ_MAX
+};
+
+struct idtfc3_hw_param {
+ u32 xtal_freq;
+ u32 time_clk_freq;
+};
+
+struct idtfc3_fwrc {
+ u8 hiaddr;
+ u8 loaddr;
+ u8 value;
+ u8 reserved;
+} __packed;
+
+static inline void idtfc3_default_hw_param(struct idtfc3_hw_param *hw_param)
+{
+ hw_param->xtal_freq = 49152000;
+ hw_param->time_clk_freq = 25000000;
+}
+
+static inline int idtfc3_set_hw_param(struct idtfc3_hw_param *hw_param,
+ u16 addr, u8 val)
+{
+ if (addr == XTAL_FREQ_ADDR)
+ switch (val) {
+ case FREQ_49_152M:
+ hw_param->xtal_freq = 49152000;
+ break;
+ case FREQ_50M:
+ hw_param->xtal_freq = 50000000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ else if (addr == TIME_CLK_FREQ_ADDR)
+ switch (val) {
+ case FREQ_25M:
+ hw_param->time_clk_freq = 25000000;
+ break;
+ case FREQ_50M:
+ hw_param->time_clk_freq = 50000000;
+ break;
+ case FREQ_100M:
+ hw_param->time_clk_freq = 100000000;
+ break;
+ case FREQ_125M:
+ hw_param->time_clk_freq = 125000000;
+ break;
+ case FREQ_250M:
+ hw_param->time_clk_freq = 250000000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ else
+ return -EFAULT;
+
+ return 0;
+}
+
+#endif
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 41f03b352401..bf9324a31ae9 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -823,6 +823,7 @@ struct mlx5_core_dev {
struct blocking_notifier_head macsec_nh;
#endif
u64 num_ipsec_offloads;
+ struct mlx5_sd *sd;
};
struct mlx5_db {
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 486b7492050c..49f660563e49 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -10253,7 +10253,9 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 mcqi[0x1];
u8 mcqs[0x1];
- u8 regs_95_to_87[0x9];
+ u8 regs_95_to_90[0x6];
+ u8 mpir[0x1];
+ u8 regs_88_to_87[0x2];
u8 mpegc[0x1];
u8 mtutc[0x1];
u8 regs_84_to_68[0x11];
@@ -10663,6 +10665,7 @@ enum {
MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0,
MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1,
MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC = 0x2,
+ MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET = 0x7,
};
enum {
@@ -12707,6 +12710,14 @@ enum mlx5_msees_oper_status {
MLX5_MSEES_OPER_STATUS_FAIL_FREE_RUNNING = 0x5,
};
+enum mlx5_msees_failure_reason {
+ MLX5_MSEES_FAILURE_REASON_UNDEFINED_ERROR = 0x0,
+ MLX5_MSEES_FAILURE_REASON_PORT_DOWN = 0x1,
+ MLX5_MSEES_FAILURE_REASON_TOO_HIGH_FREQUENCY_DIFF = 0x2,
+ MLX5_MSEES_FAILURE_REASON_NET_SYNCHRONIZER_DEVICE_ERROR = 0x3,
+ MLX5_MSEES_FAILURE_REASON_LACK_OF_RESOURCES = 0x4,
+};
+
struct mlx5_ifc_msees_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
diff --git a/include/linux/net.h b/include/linux/net.h
index c9b4a63791a4..15df6d5f27a7 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -299,10 +299,7 @@ do { \
net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
#else
#define net_dbg_ratelimited(fmt, ...) \
- do { \
- if (0) \
- no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
- } while (0)
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#endif
#define net_get_random_once(buf, nbytes) \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 78a09af89e39..c6f6ac779b34 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -225,12 +225,6 @@ struct net_device_core_stats {
#include <linux/cache.h>
#include <linux/skbuff.h>
-#ifdef CONFIG_RPS
-#include <linux/static_key.h>
-extern struct static_key_false rps_needed;
-extern struct static_key_false rfs_needed;
-#endif
-
struct neighbour;
struct neigh_parms;
struct sk_buff;
@@ -730,86 +724,10 @@ static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node
#endif
}
-#ifdef CONFIG_RPS
-/*
- * This structure holds an RPS map which can be of variable length. The
- * map is an array of CPUs.
- */
-struct rps_map {
- unsigned int len;
- struct rcu_head rcu;
- u16 cpus[];
-};
-#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
-
-/*
- * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
- * tail pointer for that CPU's input queue at the time of last enqueue, and
- * a hardware filter index.
- */
-struct rps_dev_flow {
- u16 cpu;
- u16 filter;
- unsigned int last_qtail;
-};
-#define RPS_NO_FILTER 0xffff
-
-/*
- * The rps_dev_flow_table structure contains a table of flow mappings.
- */
-struct rps_dev_flow_table {
- unsigned int mask;
- struct rcu_head rcu;
- struct rps_dev_flow flows[];
-};
-#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
- ((_num) * sizeof(struct rps_dev_flow)))
-
-/*
- * The rps_sock_flow_table contains mappings of flows to the last CPU
- * on which they were processed by the application (set in recvmsg).
- * Each entry is a 32bit value. Upper part is the high-order bits
- * of flow hash, lower part is CPU number.
- * rps_cpu_mask is used to partition the space, depending on number of
- * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
- * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
- * meaning we use 32-6=26 bits for the hash.
- */
-struct rps_sock_flow_table {
- u32 mask;
-
- u32 ents[] ____cacheline_aligned_in_smp;
-};
-#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
-
-#define RPS_NO_CPU 0xffff
-
-extern u32 rps_cpu_mask;
-extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
-
-static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
- u32 hash)
-{
- if (table && hash) {
- unsigned int index = hash & table->mask;
- u32 val = hash & ~rps_cpu_mask;
-
- /* We only give a hint, preemption can change CPU under us */
- val |= raw_smp_processor_id();
-
- /* The following WRITE_ONCE() is paired with the READ_ONCE()
- * here, and another one in get_rps_cpu().
- */
- if (READ_ONCE(table->ents[index]) != val)
- WRITE_ONCE(table->ents[index], val);
- }
-}
-
#ifdef CONFIG_RFS_ACCEL
bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
u16 filter_id);
#endif
-#endif /* CONFIG_RPS */
/* XPS map type and offset of the xps map within net_device->xps_maps[]. */
enum xps_map_type {
@@ -1060,7 +978,7 @@ struct xfrmdev_ops {
bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
struct xfrm_state *x);
void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
- void (*xdo_dev_state_update_curlft) (struct xfrm_state *x);
+ void (*xdo_dev_state_update_stats) (struct xfrm_state *x);
int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack);
void (*xdo_dev_policy_delete) (struct xfrm_policy *x);
void (*xdo_dev_policy_free) (struct xfrm_policy *x);
@@ -1813,6 +1731,15 @@ enum netdev_stat_type {
NETDEV_PCPU_STAT_DSTATS, /* struct pcpu_dstats */
};
+enum netdev_reg_state {
+ NETREG_UNINITIALIZED = 0,
+ NETREG_REGISTERED, /* completed register_netdevice */
+ NETREG_UNREGISTERING, /* called unregister_netdevice */
+ NETREG_UNREGISTERED, /* completed unregister todo */
+ NETREG_RELEASED, /* called free_netdev */
+ NETREG_DUMMY, /* dummy device for NAPI poll */
+};
+
/**
* struct net_device - The DEVICE structure.
*
@@ -2028,6 +1955,7 @@ enum netdev_stat_type {
*
* @sysfs_rx_queue_group: Space for optional per-rx queue attributes
* @rtnl_link_ops: Rtnl_link_ops
+ * @stat_ops: Optional ops for queue-aware statistics
*
* @gso_max_size: Maximum size of generic segmentation offload
* @tso_max_size: Device (as in HW) limit on the max TSO request size
@@ -2252,7 +2180,7 @@ struct net_device {
const struct tlsdev_ops *tlsdev_ops;
#endif
- unsigned char operstate;
+ unsigned int operstate;
unsigned char link_mode;
unsigned char if_port;
@@ -2375,13 +2303,7 @@ struct net_device {
struct list_head link_watch_list;
- enum { NETREG_UNINITIALIZED=0,
- NETREG_REGISTERED, /* completed register_netdevice */
- NETREG_UNREGISTERING, /* called unregister_netdevice */
- NETREG_UNREGISTERED, /* completed unregister todo */
- NETREG_RELEASED, /* called free_netdev */
- NETREG_DUMMY, /* dummy device for NAPI poll */
- } reg_state:8;
+ u8 reg_state;
bool dismantle;
@@ -2414,6 +2336,8 @@ struct net_device {
const struct rtnl_link_ops *rtnl_link_ops;
+ const struct netdev_stat_ops *stat_ops;
+
/* for setting kernel sock attribute on TCP connection setup */
#define GSO_MAX_SEGS 65535u
#define GSO_LEGACY_MAX_SIZE 65536u
@@ -3072,8 +2996,6 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
int call_netdevice_notifiers_info(unsigned long val,
struct netdev_notifier_info *info);
-extern rwlock_t dev_base_lock; /* Device list lock */
-
#define for_each_netdev(net, d) \
list_for_each_entry(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_reverse(net, d) \
@@ -3196,7 +3118,7 @@ static inline void unregister_netdevice(struct net_device *dev)
int netdev_refcnt_read(const struct net_device *dev);
void free_netdev(struct net_device *dev);
void netdev_freemem(struct net_device *dev);
-int init_dummy_netdev(struct net_device *dev);
+void init_dummy_netdev(struct net_device *dev);
struct net_device *netdev_get_xmit_slave(struct net_device *dev,
struct sk_buff *skb,
@@ -3966,7 +3888,7 @@ static inline void dev_consume_skb_any(struct sk_buff *skb)
u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog);
void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
-int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
+int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb);
int netif_rx(struct sk_buff *skb);
int __netif_rx(struct sk_buff *skb);
@@ -3979,8 +3901,6 @@ void napi_gro_flush(struct napi_struct *napi, bool flush_old);
struct sk_buff *napi_get_frags(struct napi_struct *napi);
void napi_get_frags_check(struct napi_struct *napi);
gro_result_t napi_gro_frags(struct napi_struct *napi);
-struct packet_offload *gro_find_receive_by_type(__be16 type);
-struct packet_offload *gro_find_complete_by_type(__be16 type);
static inline void napi_free_frags(struct napi_struct *napi)
{
@@ -4350,8 +4270,10 @@ static inline bool netif_testing(const struct net_device *dev)
*/
static inline bool netif_oper_up(const struct net_device *dev)
{
- return (dev->operstate == IF_OPER_UP ||
- dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
+ unsigned int operstate = READ_ONCE(dev->operstate);
+
+ return operstate == IF_OPER_UP ||
+ operstate == IF_OPER_UNKNOWN /* backward compat */;
}
/**
@@ -4790,11 +4712,6 @@ void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
const struct pcpu_sw_netstats __percpu *netstats);
void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s);
-extern int netdev_max_backlog;
-extern int dev_rx_weight;
-extern int dev_tx_weight;
-extern int gro_normal_batch;
-
enum {
NESTED_SYNC_IMM_BIT,
NESTED_SYNC_TODO_BIT,
@@ -5251,7 +5168,9 @@ static inline const char *netdev_name(const struct net_device *dev)
static inline const char *netdev_reg_state(const struct net_device *dev)
{
- switch (dev->reg_state) {
+ u8 reg_state = READ_ONCE(dev->reg_state);
+
+ switch (reg_state) {
case NETREG_UNINITIALIZED: return " (uninitialized)";
case NETREG_REGISTERED: return "";
case NETREG_UNREGISTERING: return " (unregistering)";
@@ -5260,7 +5179,7 @@ static inline const char *netdev_reg_state(const struct net_device *dev)
case NETREG_DUMMY: return " (dummy)";
}
- WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
+ WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, reg_state);
return " (unknown)";
}
@@ -5302,7 +5221,6 @@ static inline const char *netdev_reg_state(const struct net_device *dev)
#define PTYPE_HASH_SIZE (16)
#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
-extern struct list_head ptype_all __read_mostly;
extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
extern struct net_device *blackhole_netdev;
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index ce660d51549b..2683b2b77612 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -370,7 +370,6 @@ __sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
u_int8_t protocol, unsigned short family);
int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
bool strict, unsigned short family);
-int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry);
#include <net/flow.h>
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 1a4445bf2ab9..5df7340d4dab 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -291,6 +291,7 @@ struct netlink_callback {
u16 answer_flags;
u32 min_dump_alloc;
unsigned int prev_seq, seq;
+ int flags;
bool strict_check;
union {
u8 ctx[48];
@@ -323,6 +324,7 @@ struct netlink_dump_control {
void *data;
struct module *module;
u32 min_dump_alloc;
+ int flags;
};
int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 684efaeca07c..3f68b8239bb1 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -30,6 +30,7 @@
#include <linux/refcount.h>
#include <linux/atomic.h>
+#include <net/eee.h>
#define PHY_DEFAULT_FEATURES (SUPPORTED_Autoneg | \
SUPPORTED_TP | \
@@ -54,6 +55,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap1_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap2_features) __ro_after_init;
#define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features)
#define PHY_BASIC_T1_FEATURES ((unsigned long *)&phy_basic_t1_features)
@@ -65,6 +67,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap1_features) __ro_after_init;
#define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features)
#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features)
#define PHY_EEE_CAP1_FEATURES ((unsigned long *)&phy_eee_cap1_features)
+#define PHY_EEE_CAP2_FEATURES ((unsigned long *)&phy_eee_cap2_features)
extern const int phy_basic_ports_array[3];
extern const int phy_fibre_port_array[1];
@@ -329,6 +332,7 @@ struct mdio_bus_stats {
* struct phy_package_shared - Shared information in PHY packages
* @base_addr: Base PHY address of PHY package used to combine PHYs
* in one package and for offset calculation of phy_package_read/write
+ * @np: Pointer to the Device Node if PHY package defined in DT
* @refcnt: Number of PHYs connected to this shared data
* @flags: Initialization of PHY package
* @priv_size: Size of the shared private data @priv
@@ -340,6 +344,8 @@ struct mdio_bus_stats {
*/
struct phy_package_shared {
u8 base_addr;
+ /* With PHY package defined in DT this points to the PHY package node */
+ struct device_node *np;
refcount_t refcnt;
unsigned long flags;
size_t priv_size;
@@ -589,6 +595,8 @@ struct macsec_ops;
* @supported_eee: supported PHY EEE linkmodes
* @advertising_eee: Currently advertised EEE linkmodes
* @eee_enabled: Flag indicating whether the EEE feature is enabled
+ * @enable_tx_lpi: When True, MAC should transmit LPI to PHY
+ * @eee_cfg: User configuration of EEE
* @lp_advertising: Current link partner advertised linkmodes
* @host_interfaces: PHY interface modes supported by host
* @eee_broken_modes: Energy efficient ethernet modes which should be prohibited
@@ -638,7 +646,7 @@ struct phy_device {
/* Information about the PHY type */
/* And management functions */
- struct phy_driver *drv;
+ const struct phy_driver *drv;
struct device_link *devlink;
@@ -698,7 +706,7 @@ struct phy_device {
__ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
/* used with phy_speed_down */
__ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old);
- /* used for eee validation */
+ /* used for eee validation and configuration*/
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported_eee);
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising_eee);
bool eee_enabled;
@@ -708,6 +716,8 @@ struct phy_device {
/* Energy efficient ethernet modes which should be prohibited */
u32 eee_broken_modes;
+ bool enable_tx_lpi;
+ struct eee_config eee_cfg;
#ifdef CONFIG_LED_TRIGGER_PHY
struct phy_led_trigger *phy_led_triggers;
@@ -852,6 +862,15 @@ struct phy_plca_status {
bool pst;
};
+/* Modes for PHY LED configuration */
+enum phy_led_modes {
+ PHY_LED_ACTIVE_LOW = 0,
+ PHY_LED_INACTIVE_HIGH_IMPEDANCE = 1,
+
+ /* keep it last */
+ __PHY_LED_MODES_NUM,
+};
+
/**
* struct phy_led: An LED driven by the PHY
*
@@ -1145,6 +1164,19 @@ struct phy_driver {
int (*led_hw_control_get)(struct phy_device *dev, u8 index,
unsigned long *rules);
+ /**
+ * @led_polarity_set: Set the LED polarity modes
+ * @dev: PHY device which has the LED
+ * @index: Which LED of the PHY device
+ * @modes: bitmap of LED polarity modes
+ *
+ * Configure LED with all the required polarity modes in @modes
+ * to make it correctly turn ON or OFF.
+ *
+ * Returns 0, or an error code.
+ */
+ int (*led_polarity_set)(struct phy_device *dev, int index,
+ unsigned long modes);
};
#define to_phy_driver(d) container_of(to_mdio_common_driver(d), \
struct phy_driver, mdiodrv)
@@ -1851,7 +1883,7 @@ int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum,
/* Clause 37 */
int genphy_c37_config_aneg(struct phy_device *phydev);
-int genphy_c37_read_status(struct phy_device *phydev);
+int genphy_c37_read_status(struct phy_device *phydev, bool *changed);
/* Clause 45 PHY */
int genphy_c45_restart_aneg(struct phy_device *phydev);
@@ -1886,9 +1918,9 @@ int genphy_c45_plca_get_status(struct phy_device *phydev,
int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv,
unsigned long *lp, bool *is_enabled);
int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
- struct ethtool_eee *data);
+ struct ethtool_keee *data);
int genphy_c45_ethtool_set_eee(struct phy_device *phydev,
- struct ethtool_eee *data);
+ struct ethtool_keee *data);
int genphy_c45_write_eee_adv(struct phy_device *phydev, unsigned long *adv);
int genphy_c45_an_config_eee_aneg(struct phy_device *phydev);
int genphy_c45_read_eee_adv(struct phy_device *phydev, unsigned long *adv);
@@ -1938,8 +1970,10 @@ int phy_get_rate_matching(struct phy_device *phydev,
void phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode);
void phy_advertise_supported(struct phy_device *phydev);
+void phy_advertise_eee_all(struct phy_device *phydev);
void phy_support_sym_pause(struct phy_device *phydev);
void phy_support_asym_pause(struct phy_device *phydev);
+void phy_support_eee(struct phy_device *phydev);
void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx,
bool autoneg);
void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx);
@@ -1966,8 +2000,8 @@ int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask);
int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable);
int phy_get_eee_err(struct phy_device *phydev);
-int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data);
-int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data);
+int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_keee *data);
+int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_keee *data);
int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol);
void phy_ethtool_get_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol);
@@ -1977,9 +2011,12 @@ int phy_ethtool_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *cmd);
int phy_ethtool_nway_reset(struct net_device *ndev);
int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size);
+int of_phy_package_join(struct phy_device *phydev, size_t priv_size);
void phy_package_leave(struct phy_device *phydev);
int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
int base_addr, size_t priv_size);
+int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev,
+ size_t priv_size);
int __init mdio_bus_init(void);
void mdio_bus_exit(void);
@@ -2100,7 +2137,7 @@ static inline bool phy_package_probe_once(struct phy_device *phydev)
return __phy_package_set_once(phydev, PHY_SHARED_F_PROBE_DONE);
}
-extern struct bus_type mdio_bus_type;
+extern const struct bus_type mdio_bus_type;
struct mdio_board_info {
const char *bus_id;
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index d589f89c612c..9a57deefcb07 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -480,9 +480,6 @@ void pcs_disable(struct phylink_pcs *pcs);
* negotiation completion state in @state->an_complete, and link up state
* in @state->link. If possible, @state->lp_advertising should also be
* populated.
- *
- * When present, this overrides pcs_get_state() in &struct
- * phylink_pcs_ops.
*/
void pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state);
@@ -584,8 +581,8 @@ int phylink_ethtool_set_pauseparam(struct phylink *,
struct ethtool_pauseparam *);
int phylink_get_eee_err(struct phylink *);
int phylink_init_eee(struct phylink *, bool);
-int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *);
-int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *);
+int phylink_ethtool_get_eee(struct phylink *link, struct ethtool_keee *eee);
+int phylink_ethtool_set_eee(struct phylink *link, struct ethtool_keee *eee);
int phylink_mii_ioctl(struct phylink *, struct ifreq *, int);
int phylink_speed_down(struct phylink *pl, bool sync);
int phylink_speed_up(struct phylink *pl);
diff --git a/include/linux/platform_data/brcmfmac.h b/include/linux/platform_data/brcmfmac.h
index f922a192fe58..ec99b7b73d1d 100644
--- a/include/linux/platform_data/brcmfmac.h
+++ b/include/linux/platform_data/brcmfmac.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 201 Broadcom Corporation
+ * Copyright (c) 2016 Broadcom Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/include/linux/platform_data/mdio-bcm-unimac.h b/include/linux/platform_data/mdio-bcm-unimac.h
index 8a5f9f0b2c52..724e1f57b81f 100644
--- a/include/linux/platform_data/mdio-bcm-unimac.h
+++ b/include/linux/platform_data/mdio-bcm-unimac.h
@@ -1,11 +1,14 @@
#ifndef __MDIO_BCM_UNIMAC_PDATA_H
#define __MDIO_BCM_UNIMAC_PDATA_H
+struct clk;
+
struct unimac_mdio_pdata {
u32 phy_mask;
int (*wait_func)(void *data);
void *wait_func_data;
const char *bus_name;
+ struct clk *clk;
};
#define UNIMAC_MDIO_DRV_NAME "unimac-mdio"
diff --git a/include/linux/platform_data/microchip-ksz.h b/include/linux/platform_data/microchip-ksz.h
index f177416635a2..8c659db4da6b 100644
--- a/include/linux/platform_data/microchip-ksz.h
+++ b/include/linux/platform_data/microchip-ksz.h
@@ -33,6 +33,7 @@ enum ksz_chip_id {
KSZ9897_CHIP_ID = 0x00989700,
KSZ9893_CHIP_ID = 0x00989300,
KSZ9563_CHIP_ID = 0x00956300,
+ KSZ8567_CHIP_ID = 0x00856700,
KSZ9567_CHIP_ID = 0x00956700,
LAN9370_CHIP_ID = 0x00937000,
LAN9371_CHIP_ID = 0x00937100,
diff --git a/include/linux/platform_data/net-cw1200.h b/include/linux/platform_data/net-cw1200.h
index c510734405bb..89d0ec6f7d46 100644
--- a/include/linux/platform_data/net-cw1200.h
+++ b/include/linux/platform_data/net-cw1200.h
@@ -14,8 +14,6 @@ struct cw1200_platform_data_spi {
/* All others are optional */
bool have_5ghz;
- int reset; /* GPIO to RSTn signal (0 disables) */
- int powerup; /* GPIO to POWERUP signal (0 disables) */
int (*power_ctrl)(const struct cw1200_platform_data_spi *pdata,
bool enable); /* Control 3v3 / 1v8 supply */
int (*clk_ctrl)(const struct cw1200_platform_data_spi *pdata,
@@ -30,8 +28,6 @@ struct cw1200_platform_data_sdio {
/* All others are optional */
bool have_5ghz;
bool no_nptb; /* SDIO hardware does not support non-power-of-2-blocksizes */
- int reset; /* GPIO to RSTn signal (0 disables) */
- int powerup; /* GPIO to POWERUP signal (0 disables) */
int irq; /* IRQ line or 0 to use SDIO IRQ */
int (*power_ctrl)(const struct cw1200_platform_data_sdio *pdata,
bool enable); /* Control 3v3 / 1v8 supply */
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 1ef4e0f9bd2a..6e4b8206c7d0 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -200,6 +200,7 @@ struct ptp_clock;
enum ptp_clock_events {
PTP_CLOCK_ALARM,
PTP_CLOCK_EXTTS,
+ PTP_CLOCK_EXTOFF,
PTP_CLOCK_PPS,
PTP_CLOCK_PPSUSR,
};
@@ -210,6 +211,7 @@ enum ptp_clock_events {
* @type: One of the ptp_clock_events enumeration values.
* @index: Identifies the source of the event.
* @timestamp: When the event occurred (%PTP_CLOCK_EXTTS only).
+ * @offset: When the event occurred (%PTP_CLOCK_EXTOFF only).
* @pps_times: When the event occurred (%PTP_CLOCK_PPSUSR only).
*/
@@ -218,6 +220,7 @@ struct ptp_clock_event {
int index;
union {
u64 timestamp;
+ s64 offset;
struct pps_event_time pps_times;
};
};
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 410529fca18b..cdfc897f1e3c 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -47,6 +47,7 @@ extern int rtnl_lock_killable(void);
extern bool refcount_dec_and_rtnl_lock(refcount_t *r);
extern wait_queue_head_t netdev_unregistering_wq;
+extern atomic_t dev_unreg_count;
extern struct rw_semaphore pernet_ops_rwsem;
extern struct rw_semaphore net_rwsem;
@@ -171,4 +172,6 @@ rtnl_notify_needed(const struct net *net, u16 nlflags, u32 group)
return (nlflags & NLM_F_ECHO) || rtnl_has_listeners(net, group);
}
+void netdev_set_operstate(struct net_device *dev, int newstate);
+
#endif /* __LINUX_RTNETLINK_H */
diff --git a/include/linux/security.h b/include/linux/security.h
index d0eb20f90b26..15804af54f37 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -32,6 +32,7 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/sockptr.h>
+#include <linux/bpf.h>
#include <uapi/linux/lsm.h>
struct linux_binprm;
@@ -2064,15 +2065,22 @@ static inline void securityfs_remove(struct dentry *dentry)
union bpf_attr;
struct bpf_map;
struct bpf_prog;
-struct bpf_prog_aux;
+struct bpf_token;
#ifdef CONFIG_SECURITY
extern int security_bpf(int cmd, union bpf_attr *attr, unsigned int size);
extern int security_bpf_map(struct bpf_map *map, fmode_t fmode);
extern int security_bpf_prog(struct bpf_prog *prog);
-extern int security_bpf_map_alloc(struct bpf_map *map);
+extern int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
+ struct bpf_token *token);
extern void security_bpf_map_free(struct bpf_map *map);
-extern int security_bpf_prog_alloc(struct bpf_prog_aux *aux);
-extern void security_bpf_prog_free(struct bpf_prog_aux *aux);
+extern int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
+ struct bpf_token *token);
+extern void security_bpf_prog_free(struct bpf_prog *prog);
+extern int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
+ struct path *path);
+extern void security_bpf_token_free(struct bpf_token *token);
+extern int security_bpf_token_cmd(const struct bpf_token *token, enum bpf_cmd cmd);
+extern int security_bpf_token_capable(const struct bpf_token *token, int cap);
#else
static inline int security_bpf(int cmd, union bpf_attr *attr,
unsigned int size)
@@ -2090,7 +2098,8 @@ static inline int security_bpf_prog(struct bpf_prog *prog)
return 0;
}
-static inline int security_bpf_map_alloc(struct bpf_map *map)
+static inline int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
+ struct bpf_token *token)
{
return 0;
}
@@ -2098,13 +2107,33 @@ static inline int security_bpf_map_alloc(struct bpf_map *map)
static inline void security_bpf_map_free(struct bpf_map *map)
{ }
-static inline int security_bpf_prog_alloc(struct bpf_prog_aux *aux)
+static inline int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
+ struct bpf_token *token)
{
return 0;
}
-static inline void security_bpf_prog_free(struct bpf_prog_aux *aux)
+static inline void security_bpf_prog_free(struct bpf_prog *prog)
{ }
+
+static inline int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
+ struct path *path)
+{
+ return 0;
+}
+
+static inline void security_bpf_token_free(struct bpf_token *token)
+{ }
+
+static inline int security_bpf_token_cmd(const struct bpf_token *token, enum bpf_cmd cmd)
+{
+ return 0;
+}
+
+static inline int security_bpf_token_capable(const struct bpf_token *token, int cap)
+{
+ return 0;
+}
#endif /* CONFIG_SECURITY */
#endif /* CONFIG_BPF_SYSCALL */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 2dde34c29203..3023bc2be6a1 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -37,6 +37,7 @@
#endif
#include <net/net_debug.h>
#include <net/dropreason-core.h>
+#include <net/netmem.h>
/**
* DOC: skb checksums
@@ -359,7 +360,11 @@ extern int sysctl_max_skb_frags;
*/
#define GSO_BY_FRAGS 0xFFFF
-typedef struct bio_vec skb_frag_t;
+typedef struct skb_frag {
+ netmem_ref netmem;
+ unsigned int len;
+ unsigned int offset;
+} skb_frag_t;
/**
* skb_frag_size() - Returns the size of a skb fragment
@@ -367,7 +372,7 @@ typedef struct bio_vec skb_frag_t;
*/
static inline unsigned int skb_frag_size(const skb_frag_t *frag)
{
- return frag->bv_len;
+ return frag->len;
}
/**
@@ -377,7 +382,7 @@ static inline unsigned int skb_frag_size(const skb_frag_t *frag)
*/
static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
{
- frag->bv_len = size;
+ frag->len = size;
}
/**
@@ -387,7 +392,7 @@ static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
*/
static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
{
- frag->bv_len += delta;
+ frag->len += delta;
}
/**
@@ -397,7 +402,7 @@ static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
*/
static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
{
- frag->bv_len -= delta;
+ frag->len -= delta;
}
/**
@@ -417,7 +422,7 @@ static inline bool skb_frag_must_loop(struct page *p)
* skb_frag_foreach_page - loop over pages in a fragment
*
* @f: skb frag to operate on
- * @f_off: offset from start of f->bv_page
+ * @f_off: offset from start of f->netmem
* @f_len: length from f_off to loop over
* @p: (temp var) current page
* @p_off: (temp var) offset from start of current page,
@@ -817,9 +822,9 @@ typedef unsigned char *sk_buff_data_t;
* @decrypted: Decrypted SKB
* @slow_gro: state present at GRO time, slower prepare step required
* @mono_delivery_time: When set, skb->tstamp has the
- * delivery_time in mono clock base (i.e. EDT). Otherwise, the
- * skb->tstamp has the (rcv) timestamp at ingress and
- * delivery_time at egress.
+ * delivery_time in mono clock base (i.e., EDT) or a clock base chosen
+ * by SO_TXTIME. If zero, skb->tstamp has the (rcv) timestamp at
+ * ingress.
* @napi_id: id of the NAPI struct this skb came from
* @sender_cpu: (aka @napi_id) source CPU in XPS
* @alloc_cpu: CPU which did the skb allocation.
@@ -1232,6 +1237,24 @@ static inline bool skb_unref(struct sk_buff *skb)
return true;
}
+static inline bool skb_data_unref(const struct sk_buff *skb,
+ struct skb_shared_info *shinfo)
+{
+ int bias;
+
+ if (!skb->cloned)
+ return true;
+
+ bias = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
+
+ if (atomic_read(&shinfo->dataref) == bias)
+ smp_rmb();
+ else if (atomic_sub_return(bias, &shinfo->dataref))
+ return false;
+
+ return true;
+}
+
void __fix_address
kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason);
@@ -1266,7 +1289,6 @@ static inline void consume_skb(struct sk_buff *skb)
void __consume_stateless_skb(struct sk_buff *skb);
void __kfree_skb(struct sk_buff *skb);
-extern struct kmem_cache *skbuff_cache;
void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
@@ -2429,22 +2451,37 @@ static inline unsigned int skb_pagelen(const struct sk_buff *skb)
return skb_headlen(skb) + __skb_pagelen(skb);
}
+static inline void skb_frag_fill_netmem_desc(skb_frag_t *frag,
+ netmem_ref netmem, int off,
+ int size)
+{
+ frag->netmem = netmem;
+ frag->offset = off;
+ skb_frag_size_set(frag, size);
+}
+
static inline void skb_frag_fill_page_desc(skb_frag_t *frag,
struct page *page,
int off, int size)
{
- frag->bv_page = page;
- frag->bv_offset = off;
- skb_frag_size_set(frag, size);
+ skb_frag_fill_netmem_desc(frag, page_to_netmem(page), off, size);
+}
+
+static inline void __skb_fill_netmem_desc_noacc(struct skb_shared_info *shinfo,
+ int i, netmem_ref netmem,
+ int off, int size)
+{
+ skb_frag_t *frag = &shinfo->frags[i];
+
+ skb_frag_fill_netmem_desc(frag, netmem, off, size);
}
static inline void __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo,
int i, struct page *page,
int off, int size)
{
- skb_frag_t *frag = &shinfo->frags[i];
-
- skb_frag_fill_page_desc(frag, page, off, size);
+ __skb_fill_netmem_desc_noacc(shinfo, i, page_to_netmem(page), off,
+ size);
}
/**
@@ -2460,10 +2497,10 @@ static inline void skb_len_add(struct sk_buff *skb, int delta)
}
/**
- * __skb_fill_page_desc - initialise a paged fragment in an skb
+ * __skb_fill_netmem_desc - initialise a fragment in an skb
* @skb: buffer containing fragment to be initialised
- * @i: paged fragment index to initialise
- * @page: the page to use for this fragment
+ * @i: fragment index to initialise
+ * @netmem: the netmem to use for this fragment
* @off: the offset to the data with @page
* @size: the length of the data
*
@@ -2472,10 +2509,12 @@ static inline void skb_len_add(struct sk_buff *skb, int delta)
*
* Does not take any additional reference on the fragment.
*/
-static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
- struct page *page, int off, int size)
+static inline void __skb_fill_netmem_desc(struct sk_buff *skb, int i,
+ netmem_ref netmem, int off, int size)
{
- __skb_fill_page_desc_noacc(skb_shinfo(skb), i, page, off, size);
+ struct page *page = netmem_to_page(netmem);
+
+ __skb_fill_netmem_desc_noacc(skb_shinfo(skb), i, netmem, off, size);
/* Propagate page pfmemalloc to the skb if we can. The problem is
* that not all callers have unique ownership of the page but rely
@@ -2483,7 +2522,20 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
*/
page = compound_head(page);
if (page_is_pfmemalloc(page))
- skb->pfmemalloc = true;
+ skb->pfmemalloc = true;
+}
+
+static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
+ struct page *page, int off, int size)
+{
+ __skb_fill_netmem_desc(skb, i, page_to_netmem(page), off, size);
+}
+
+static inline void skb_fill_netmem_desc(struct sk_buff *skb, int i,
+ netmem_ref netmem, int off, int size)
+{
+ __skb_fill_netmem_desc(skb, i, netmem, off, size);
+ skb_shinfo(skb)->nr_frags = i + 1;
}
/**
@@ -2503,8 +2555,7 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
struct page *page, int off, int size)
{
- __skb_fill_page_desc(skb, i, page, off, size);
- skb_shinfo(skb)->nr_frags = i + 1;
+ skb_fill_netmem_desc(skb, i, page_to_netmem(page), off, size);
}
/**
@@ -2528,8 +2579,16 @@ static inline void skb_fill_page_desc_noacc(struct sk_buff *skb, int i,
shinfo->nr_frags = i + 1;
}
-void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
- int size, unsigned int truesize);
+void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem,
+ int off, int size, unsigned int truesize);
+
+static inline void skb_add_rx_frag(struct sk_buff *skb, int i,
+ struct page *page, int off, int size,
+ unsigned int truesize)
+{
+ skb_add_rx_frag_netmem(skb, i, page_to_netmem(page), off, size,
+ truesize);
+}
void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
unsigned int truesize);
@@ -2642,6 +2701,8 @@ static inline void skb_put_u8(struct sk_buff *skb, u8 val)
void *skb_push(struct sk_buff *skb, unsigned int len);
static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
{
+ DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
+
skb->data -= len;
skb->len += len;
return skb->data;
@@ -2650,6 +2711,8 @@ static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
void *skb_pull(struct sk_buff *skb, unsigned int len);
static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
{
+ DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
+
skb->len -= len;
if (unlikely(skb->len < skb->data_len)) {
#if defined(CONFIG_DEBUG_NET)
@@ -2674,6 +2737,8 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta);
static inline enum skb_drop_reason
pskb_may_pull_reason(struct sk_buff *skb, unsigned int len)
{
+ DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
+
if (likely(len <= skb_headlen(skb)))
return SKB_NOT_DROPPED_YET;
@@ -2846,6 +2911,11 @@ static inline void skb_set_inner_network_header(struct sk_buff *skb,
skb->inner_network_header += offset;
}
+static inline bool skb_inner_network_header_was_set(const struct sk_buff *skb)
+{
+ return skb->inner_network_header > 0;
+}
+
static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
{
return skb->head + skb->inner_mac_header;
@@ -2983,6 +3053,7 @@ static inline int skb_transport_offset(const struct sk_buff *skb)
static inline u32 skb_network_header_len(const struct sk_buff *skb)
{
+ DEBUG_NET_WARN_ON_ONCE(!skb_transport_header_was_set(skb));
return skb->transport_header - skb->network_header;
}
@@ -3378,7 +3449,7 @@ static inline void skb_propagate_pfmemalloc(const struct page *page,
*/
static inline unsigned int skb_frag_off(const skb_frag_t *frag)
{
- return frag->bv_offset;
+ return frag->offset;
}
/**
@@ -3388,7 +3459,7 @@ static inline unsigned int skb_frag_off(const skb_frag_t *frag)
*/
static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
{
- frag->bv_offset += delta;
+ frag->offset += delta;
}
/**
@@ -3398,7 +3469,7 @@ static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
*/
static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset)
{
- frag->bv_offset = offset;
+ frag->offset = offset;
}
/**
@@ -3409,7 +3480,7 @@ static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset)
static inline void skb_frag_off_copy(skb_frag_t *fragto,
const skb_frag_t *fragfrom)
{
- fragto->bv_offset = fragfrom->bv_offset;
+ fragto->offset = fragfrom->offset;
}
/**
@@ -3420,7 +3491,7 @@ static inline void skb_frag_off_copy(skb_frag_t *fragto,
*/
static inline struct page *skb_frag_page(const skb_frag_t *frag)
{
- return frag->bv_page;
+ return netmem_to_page(frag->netmem);
}
/**
@@ -3446,6 +3517,10 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f)
__skb_frag_ref(&skb_shinfo(skb)->frags[f]);
}
+int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
+ unsigned int headroom);
+int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
+ struct bpf_prog *prog);
bool napi_pp_put_page(struct page *page, bool napi_safe);
static inline void
@@ -3524,7 +3599,7 @@ static inline void *skb_frag_address_safe(const skb_frag_t *frag)
static inline void skb_frag_page_copy(skb_frag_t *fragto,
const skb_frag_t *fragfrom)
{
- fragto->bv_page = fragfrom->bv_page;
+ fragto->netmem = fragfrom->netmem;
}
bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index 0b9ecd8cf979..110978dc9af1 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -13,6 +13,7 @@ struct nlmsghdr;
struct sock;
struct sock_diag_handler {
+ struct module *owner;
__u8 family;
int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
int (*get_info)(struct sk_buff *skb, struct sock *sk);
@@ -22,8 +23,13 @@ struct sock_diag_handler {
int sock_diag_register(const struct sock_diag_handler *h);
void sock_diag_unregister(const struct sock_diag_handler *h);
-void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
-void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
+struct sock_diag_inet_compat {
+ struct module *owner;
+ int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh);
+};
+
+void sock_diag_register_inet_compat(const struct sock_diag_inet_compat *ptr);
+void sock_diag_unregister_inet_compat(const struct sock_diag_inet_compat *ptr);
u64 __sock_gen_cookie(struct sock *sk);
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index dee5ad6e48c5..dfa1828cd756 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -127,6 +127,7 @@ struct stmmac_est {
u32 gcl_unaligned[EST_GCL];
u32 gcl[EST_GCL];
u32 gcl_size;
+ u32 max_sdu[MTL_MAX_TX_QUEUES];
};
struct stmmac_rxq_cfg {
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index a1c47a6d69b0..55399ee2a57e 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -264,10 +264,10 @@ struct tcp_sock {
u32 pushed_seq; /* Last pushed seq, required to talk to windows */
u32 lsndtime;
u32 mdev_us; /* medium deviation */
+ u32 rtt_seq; /* sequence number to update rttvar */
u64 tcp_wstamp_ns; /* departure time for next sent data packet */
u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */
u64 tcp_mstamp; /* most recent packet received/sent */
- u32 rtt_seq; /* sequence number to update rttvar */
struct list_head tsorted_sent_queue; /* time-sorted sent but un-SACKed skbs */
struct sk_buff *highest_sack; /* skb just after the highest
* skb with SACKed bit set
@@ -304,7 +304,7 @@ struct tcp_sock {
__cacheline_group_end(tcp_sock_write_txrx);
/* RX read-write hotpath cache lines */
- __cacheline_group_begin(tcp_sock_write_rx);
+ __cacheline_group_begin(tcp_sock_write_rx) __aligned(8);
u64 bytes_received;
/* RFC4898 tcpEStatsAppHCThruOctetsReceived
* sum(delta(rcv_nxt)), or how many bytes
@@ -350,7 +350,6 @@ struct tcp_sock {
u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups
* total number of DSACK blocks received
*/
- u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */
u32 compressed_ack_rcv_nxt;
struct list_head tsq_node; /* anchor in tsq_tasklet.head list */
@@ -384,12 +383,12 @@ struct tcp_sock {
syn_fastopen_ch:1, /* Active TFO re-enabling probe */
syn_data_acked:1;/* data in SYN is acked by SYN-ACK */
+ u8 keepalive_probes; /* num of allowed keep alive probes */
u32 tcp_tx_delay; /* delay (in usec) added to TX packets */
/* RTT measurement */
u32 mdev_max_us; /* maximal mdev for the last rtt period */
- u8 keepalive_probes; /* num of allowed keep alive probes */
u32 reord_seen; /* number of data packet reordering events */
/*
@@ -402,6 +401,7 @@ struct tcp_sock {
u32 prior_cwnd; /* cwnd right before starting loss recovery */
u32 prr_delivered; /* Number of newly delivered packets to
* receiver in Recovery. */
+ u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */
struct hrtimer pacing_timer;
struct hrtimer compressed_ack_timer;
@@ -477,8 +477,8 @@ struct tcp_sock {
bool is_mptcp;
#endif
#if IS_ENABLED(CONFIG_SMC)
- bool (*smc_hs_congested)(const struct sock *sk);
bool syn_smc; /* SYN includes SMC */
+ bool (*smc_hs_congested)(const struct sock *sk);
#endif
#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
diff --git a/include/linux/udp.h b/include/linux/udp.h
index d04188714dca..3748e82b627b 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -92,6 +92,9 @@ struct udp_sock {
/* This fields follows rcvbuf value, and is touched by udp_recvmsg */
int forward_threshold;
+
+ /* Cache friendly copy of sk->sk_peek_off >= 0 */
+ bool peeking_with_offset;
};
#define udp_test_bit(nr, sk) \
@@ -109,6 +112,13 @@ struct udp_sock {
#define udp_sk(ptr) container_of_const(ptr, struct udp_sock, inet.sk)
+static inline int udp_set_peek_off(struct sock *sk, int val)
+{
+ sk_set_peek_off(sk, val);
+ WRITE_ONCE(udp_sk(sk)->peeking_with_offset, val >= 0);
+ return 0;
+}
+
static inline void udp_set_no_check6_tx(struct sock *sk, bool val)
{
udp_assign_bit(NO_CHECK6_TX, sk, val);
diff --git a/include/linux/units.h b/include/linux/units.h
index 45110daaf8d3..00e15de33eca 100644
--- a/include/linux/units.h
+++ b/include/linux/units.h
@@ -24,10 +24,13 @@
#define NANOHZ_PER_HZ 1000000000UL
#define MICROHZ_PER_HZ 1000000UL
#define MILLIHZ_PER_HZ 1000UL
+
#define HZ_PER_KHZ 1000UL
-#define KHZ_PER_MHZ 1000UL
#define HZ_PER_MHZ 1000000UL
+#define KHZ_PER_MHZ 1000UL
+#define KHZ_PER_GHZ 1000000UL
+
#define MILLIWATT_PER_WATT 1000UL
#define MICROWATT_PER_MILLIWATT 1000UL
#define MICROWATT_PER_WATT 1000000UL
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index c720be70c8dd..0f72c85a377b 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -35,6 +35,7 @@ struct iov_iter; /* in uio.h */
#else
#define VM_DEFER_KMEMLEAK 0
#endif
+#define VM_SPARSE 0x00001000 /* sparse vm_area. not all pages are present. */
/* bits [20..32] reserved for arch specific ioremap internals */
@@ -232,6 +233,10 @@ static inline bool is_vm_area_hugepages(const void *addr)
}
#ifdef CONFIG_MMU
+int vm_area_map_pages(struct vm_struct *area, unsigned long start,
+ unsigned long end, struct page **pages);
+void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
+ unsigned long end);
void vunmap_range(unsigned long addr, unsigned long end);
static inline void set_vm_flush_reset_perms(void *addr)
{
diff --git a/include/linux/wwan.h b/include/linux/wwan.h
index 01fa15506286..170fdee6339c 100644
--- a/include/linux/wwan.h
+++ b/include/linux/wwan.h
@@ -16,6 +16,7 @@
* @WWAN_PORT_QCDM: Qcom Modem diagnostic interface
* @WWAN_PORT_FIREHOSE: XML based command protocol
* @WWAN_PORT_XMMRPC: Control protocol for Intel XMM modems
+ * @WWAN_PORT_FASTBOOT: Fastboot protocol control
*
* @WWAN_PORT_MAX: Highest supported port types
* @WWAN_PORT_UNKNOWN: Special value to indicate an unknown port type
@@ -28,6 +29,7 @@ enum wwan_port_type {
WWAN_PORT_QCDM,
WWAN_PORT_FIREHOSE,
WWAN_PORT_XMMRPC,
+ WWAN_PORT_FASTBOOT,
/* Add new port types above this line */
diff --git a/include/net/act_api.h b/include/net/act_api.h
index e1e5e72b901e..77ee0c657e2c 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -201,6 +201,8 @@ int tcf_idr_release(struct tc_action *a, bool bind);
int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
int tcf_unregister_action(struct tc_action_ops *a,
struct pernet_operations *ops);
+#define NET_ACT_ALIAS_PREFIX "net-act-"
+#define MODULE_ALIAS_NET_ACT(kind) MODULE_ALIAS(NET_ACT_ALIAS_PREFIX kind)
int tcf_action_destroy(struct tc_action *actions[], int bind);
int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
int nr_actions, struct tcf_result *res);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 61ebe723ee4d..9d06eb945509 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -8,8 +8,9 @@
#define MIN_VALID_LIFETIME (2*3600) /* 2 hours */
-#define TEMP_VALID_LIFETIME (7*86400)
-#define TEMP_PREFERRED_LIFETIME (86400)
+#define TEMP_VALID_LIFETIME (7*86400) /* 1 week */
+#define TEMP_PREFERRED_LIFETIME (86400) /* 24 hours */
+#define REGEN_MIN_ADVANCE (2) /* 2 seconds */
#define REGEN_MAX_RETRY (3)
#define MAX_DESYNC_FACTOR (600)
@@ -416,7 +417,7 @@ static inline bool ip6_ignore_linkdown(const struct net_device *dev)
if (unlikely(!idev))
return true;
- return !!idev->cnf.ignore_routes_with_linkdown;
+ return !!READ_ONCE(idev->cnf.ignore_routes_with_linkdown);
}
void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp);
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index afd40dce40f3..627ea8e2d915 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -8,21 +8,29 @@
#include <linux/refcount.h>
#include <net/sock.h>
+#if IS_ENABLED(CONFIG_UNIX)
+struct unix_sock *unix_get_socket(struct file *filp);
+#else
+static inline struct unix_sock *unix_get_socket(struct file *filp)
+{
+ return NULL;
+}
+#endif
+
+extern spinlock_t unix_gc_lock;
+extern unsigned int unix_tot_inflight;
+
void unix_inflight(struct user_struct *user, struct file *fp);
void unix_notinflight(struct user_struct *user, struct file *fp);
-void unix_destruct_scm(struct sk_buff *skb);
-void io_uring_destruct_scm(struct sk_buff *skb);
void unix_gc(void);
-void wait_for_unix_gc(void);
-struct sock *unix_get_socket(struct file *filp);
+void wait_for_unix_gc(struct scm_fp_list *fpl);
+
struct sock *unix_peer_get(struct sock *sk);
#define UNIX_HASH_MOD (256 - 1)
#define UNIX_HASH_SIZE (256 * 2)
#define UNIX_HASH_BITS 8
-extern unsigned int unix_tot_inflight;
-
struct unix_address {
refcount_t refcnt;
int len;
@@ -55,7 +63,7 @@ struct unix_sock {
struct mutex iolock, bindlock;
struct sock *peer;
struct list_head link;
- atomic_long_t inflight;
+ unsigned long inflight;
spinlock_t lock;
unsigned long gc_flags;
#define UNIX_GC_CANDIDATE 0
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 7ffa8c192c3f..9fe95a22abeb 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -164,6 +164,8 @@ struct bt_voice {
#define BT_ISO_QOS_BIG_UNSET 0xff
#define BT_ISO_QOS_BIS_UNSET 0xff
+#define BT_ISO_SYNC_TIMEOUT 0x07d0 /* 20 secs */
+
struct bt_iso_io_qos {
__u32 interval;
__u16 latency;
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index bdee5d649cc6..8701ca5f31ee 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -330,6 +330,14 @@ enum {
* during the hdev->setup vendor callback.
*/
HCI_QUIRK_BROKEN_LE_CODED,
+
+ /*
+ * When this quirk is set, the HCI_OP_READ_ENC_KEY_SIZE command is
+ * skipped during an HCI_EV_ENCRYPT_CHANGE event. This is required
+ * for Actions Semiconductor ATS2851 based controllers, which erroneously
+ * claim to support it.
+ */
+ HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE,
};
/* HCI device flags */
@@ -372,6 +380,7 @@ enum {
HCI_SETUP,
HCI_CONFIG,
HCI_DEBUGFS_CREATED,
+ HCI_POWERING_DOWN,
HCI_AUTO_OFF,
HCI_RFKILLED,
HCI_MGMT,
@@ -393,7 +402,6 @@ enum {
HCI_LIMITED_PRIVACY,
HCI_RPA_EXPIRED,
HCI_RPA_RESOLVING,
- HCI_HS_ENABLED,
HCI_LE_ENABLED,
HCI_ADVERTISING,
HCI_ADVERTISING_CONNECTABLE,
@@ -437,7 +445,7 @@ enum {
#define HCI_NCMD_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */
#define HCI_ACL_TX_TIMEOUT msecs_to_jiffies(45000) /* 45 seconds */
#define HCI_AUTO_OFF_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
-#define HCI_POWER_OFF_TIMEOUT msecs_to_jiffies(5000) /* 5 seconds */
+#define HCI_ACL_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */
#define HCI_LE_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */
#define HCI_LE_AUTOCONN_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */
@@ -653,6 +661,7 @@ enum {
#define HCI_ERROR_PIN_OR_KEY_MISSING 0x06
#define HCI_ERROR_MEMORY_EXCEEDED 0x07
#define HCI_ERROR_CONNECTION_TIMEOUT 0x08
+#define HCI_ERROR_COMMAND_DISALLOWED 0x0c
#define HCI_ERROR_REJ_LIMITED_RESOURCES 0x0d
#define HCI_ERROR_REJ_BAD_ADDR 0x0f
#define HCI_ERROR_INVALID_PARAMETERS 0x12
@@ -661,6 +670,7 @@ enum {
#define HCI_ERROR_REMOTE_POWER_OFF 0x15
#define HCI_ERROR_LOCAL_HOST_TERM 0x16
#define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18
+#define HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE 0x1e
#define HCI_ERROR_INVALID_LL_PARAMS 0x1e
#define HCI_ERROR_UNSPECIFIED 0x1f
#define HCI_ERROR_ADVERTISING_TIMEOUT 0x3c
@@ -2035,6 +2045,7 @@ struct hci_cp_le_set_per_adv_params {
} __packed;
#define HCI_MAX_PER_AD_LENGTH 252
+#define HCI_MAX_PER_AD_TOT_LEN 1650
#define HCI_OP_LE_SET_PER_ADV_DATA 0x203f
struct hci_cp_le_set_per_adv_data {
@@ -2795,6 +2806,10 @@ struct hci_ev_le_per_adv_report {
__u8 data[];
} __packed;
+#define LE_PA_DATA_COMPLETE 0x00
+#define LE_PA_DATA_MORE_TO_COME 0x01
+#define LE_PA_DATA_TRUNCATED 0x02
+
#define HCI_EV_LE_EXT_ADV_SET_TERM 0x12
struct hci_evt_le_ext_adv_set_term {
__u8 status;
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 8f8dd9173714..56fb42df44a3 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -1,7 +1,7 @@
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
- Copyright 2023 NXP
+ Copyright 2023-2024 NXP
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -552,6 +552,7 @@ struct hci_dev {
__u32 req_status;
__u32 req_result;
struct sk_buff *req_skb;
+ struct sk_buff *req_rsp;
void *smp_data;
void *smp_bredr_data;
@@ -734,8 +735,9 @@ struct hci_conn {
__u16 le_supv_timeout;
__u8 le_adv_data[HCI_MAX_EXT_AD_LENGTH];
__u8 le_adv_data_len;
- __u8 le_per_adv_data[HCI_MAX_PER_AD_LENGTH];
- __u8 le_per_adv_data_len;
+ __u8 le_per_adv_data[HCI_MAX_PER_AD_TOT_LEN];
+ __u16 le_per_adv_data_len;
+ __u16 le_per_adv_data_offset;
__u8 le_tx_phy;
__u8 le_rx_phy;
__s8 rssi;
@@ -1083,6 +1085,24 @@ static inline unsigned int hci_conn_count(struct hci_dev *hdev)
return c->acl_num + c->amp_num + c->sco_num + c->le_num + c->iso_num;
}
+static inline bool hci_conn_valid(struct hci_dev *hdev, struct hci_conn *conn)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c == conn) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+ rcu_read_unlock();
+
+ return false;
+}
+
static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle)
{
struct hci_conn_hash *h = &hdev->conn_hash;
@@ -1480,7 +1500,6 @@ struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
bdaddr_t *dst, u8 role);
void hci_conn_del(struct hci_conn *conn);
void hci_conn_hash_flush(struct hci_dev *hdev);
-void hci_conn_check_pending(struct hci_dev *hdev);
struct hci_chan *hci_chan_create(struct hci_conn *conn);
void hci_chan_del(struct hci_chan *chan);
@@ -1494,11 +1513,13 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, bool dst_resolved, u8 sec_level,
u16 conn_timeout, u8 role);
+void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status);
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
u8 sec_level, u8 auth_type,
- enum conn_reasons conn_reason);
+ enum conn_reasons conn_reason, u16 timeout);
struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
- __u16 setting, struct bt_codec *codec);
+ __u16 setting, struct bt_codec *codec,
+ u16 timeout);
struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
__u8 dst_type, struct bt_iso_qos *qos);
struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
@@ -1509,8 +1530,8 @@ struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
__u8 dst_type, struct bt_iso_qos *qos,
__u8 data_len, __u8 *data);
-int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
- __u8 sid, struct bt_iso_qos *qos);
+struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
+ __u8 dst_type, __u8 sid, struct bt_iso_qos *qos);
int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
struct bt_iso_qos *qos,
__u16 sync_handle, __u8 num_bis, __u8 bis[]);
diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
index 6efbc2152146..6a9d063e9f47 100644
--- a/include/net/bluetooth/hci_sync.h
+++ b/include/net/bluetooth/hci_sync.h
@@ -42,12 +42,24 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
void hci_cmd_sync_init(struct hci_dev *hdev);
void hci_cmd_sync_clear(struct hci_dev *hdev);
void hci_cmd_sync_cancel(struct hci_dev *hdev, int err);
-void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err);
+void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err);
int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
void *data, hci_cmd_sync_work_destroy_t destroy);
int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
void *data, hci_cmd_sync_work_destroy_t destroy);
+int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+struct hci_cmd_sync_work_entry *
+hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+void hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
+ struct hci_cmd_sync_work_entry *entry);
+bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,
+ hci_cmd_sync_work_func_t func, void *data,
+ hci_cmd_sync_work_destroy_t destroy);
int hci_update_eir_sync(struct hci_dev *hdev);
int hci_update_class_sync(struct hci_dev *hdev);
@@ -127,8 +139,6 @@ struct hci_conn;
int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason);
-int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn);
-
int hci_le_create_cis_sync(struct hci_dev *hdev);
int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle);
@@ -138,3 +148,9 @@ int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason);
int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle);
int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle);
+
+int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn);
+
+int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn);
+
+int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn);
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index cf393e72d6ed..a4278aa618ab 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -59,8 +59,6 @@
#define L2CAP_WAIT_ACK_POLL_PERIOD msecs_to_jiffies(200)
#define L2CAP_WAIT_ACK_TIMEOUT msecs_to_jiffies(10000)
-#define L2CAP_A2MP_DEFAULT_MTU 670
-
/* L2CAP socket address */
struct sockaddr_l2 {
sa_family_t l2_family;
@@ -109,12 +107,6 @@ struct l2cap_conninfo {
#define L2CAP_ECHO_RSP 0x09
#define L2CAP_INFO_REQ 0x0a
#define L2CAP_INFO_RSP 0x0b
-#define L2CAP_CREATE_CHAN_REQ 0x0c
-#define L2CAP_CREATE_CHAN_RSP 0x0d
-#define L2CAP_MOVE_CHAN_REQ 0x0e
-#define L2CAP_MOVE_CHAN_RSP 0x0f
-#define L2CAP_MOVE_CHAN_CFM 0x10
-#define L2CAP_MOVE_CHAN_CFM_RSP 0x11
#define L2CAP_CONN_PARAM_UPDATE_REQ 0x12
#define L2CAP_CONN_PARAM_UPDATE_RSP 0x13
#define L2CAP_LE_CONN_REQ 0x14
@@ -144,7 +136,6 @@ struct l2cap_conninfo {
/* L2CAP fixed channels */
#define L2CAP_FC_SIG_BREDR 0x02
#define L2CAP_FC_CONNLESS 0x04
-#define L2CAP_FC_A2MP 0x08
#define L2CAP_FC_ATT 0x10
#define L2CAP_FC_SIG_LE 0x20
#define L2CAP_FC_SMP_LE 0x40
@@ -267,7 +258,6 @@ struct l2cap_conn_rsp {
/* channel identifier */
#define L2CAP_CID_SIGNALING 0x0001
#define L2CAP_CID_CONN_LESS 0x0002
-#define L2CAP_CID_A2MP 0x0003
#define L2CAP_CID_ATT 0x0004
#define L2CAP_CID_LE_SIGNALING 0x0005
#define L2CAP_CID_SMP 0x0006
@@ -282,7 +272,6 @@ struct l2cap_conn_rsp {
#define L2CAP_CR_BAD_PSM 0x0002
#define L2CAP_CR_SEC_BLOCK 0x0003
#define L2CAP_CR_NO_MEM 0x0004
-#define L2CAP_CR_BAD_AMP 0x0005
#define L2CAP_CR_INVALID_SCID 0x0006
#define L2CAP_CR_SCID_IN_USE 0x0007
@@ -404,29 +393,6 @@ struct l2cap_info_rsp {
__u8 data[];
} __packed;
-struct l2cap_create_chan_req {
- __le16 psm;
- __le16 scid;
- __u8 amp_id;
-} __packed;
-
-struct l2cap_create_chan_rsp {
- __le16 dcid;
- __le16 scid;
- __le16 result;
- __le16 status;
-} __packed;
-
-struct l2cap_move_chan_req {
- __le16 icid;
- __u8 dest_amp_id;
-} __packed;
-
-struct l2cap_move_chan_rsp {
- __le16 icid;
- __le16 result;
-} __packed;
-
#define L2CAP_MR_SUCCESS 0x0000
#define L2CAP_MR_PEND 0x0001
#define L2CAP_MR_BAD_ID 0x0002
@@ -539,8 +505,6 @@ struct l2cap_seq_list {
struct l2cap_chan {
struct l2cap_conn *conn;
- struct hci_conn *hs_hcon;
- struct hci_chan *hs_hchan;
struct kref kref;
atomic_t nesting;
@@ -591,12 +555,6 @@ struct l2cap_chan {
unsigned long conn_state;
unsigned long flags;
- __u8 remote_amp_id;
- __u8 local_amp_id;
- __u8 move_id;
- __u8 move_state;
- __u8 move_role;
-
__u16 next_tx_seq;
__u16 expected_ack_seq;
__u16 expected_tx_seq;
@@ -981,7 +939,7 @@ int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid);
struct l2cap_chan *l2cap_chan_create(void);
void l2cap_chan_close(struct l2cap_chan *chan, int reason);
int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
- bdaddr_t *dst, u8 dst_type);
+ bdaddr_t *dst, u8 dst_type, u16 timeout);
int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu);
int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len);
void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index c5e57c6bd873..9ce5ac2bfbad 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -54,6 +54,8 @@ typedef enum {
AD_MUX_DETACHED, /* mux machine */
AD_MUX_WAITING, /* mux machine */
AD_MUX_ATTACHED, /* mux machine */
+ AD_MUX_COLLECTING, /* mux machine */
+ AD_MUX_DISTRIBUTING, /* mux machine */
AD_MUX_COLLECTING_DISTRIBUTING /* mux machine */
} mux_states_t;
diff --git a/include/net/bond_options.h b/include/net/bond_options.h
index 69292ecc0325..473a0147769e 100644
--- a/include/net/bond_options.h
+++ b/include/net/bond_options.h
@@ -76,6 +76,7 @@ enum {
BOND_OPT_MISSED_MAX,
BOND_OPT_NS_TARGETS,
BOND_OPT_PRIO,
+ BOND_OPT_COUPLED_CONTROL,
BOND_OPT_LAST
};
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 5b8b1b644a2d..b61fb1aa3a56 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -148,6 +148,7 @@ struct bond_params {
#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr ns_targets[BOND_MAX_NS_TARGETS];
#endif
+ int coupled_control;
/* 2 bytes of padding : see ether_addr_equal_64bits() */
u8 ad_actor_system[ETH_ALEN + 2];
@@ -167,6 +168,7 @@ struct slave {
u8 backup:1, /* indicates backup slave. Value corresponds with
BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
inactive:1, /* indicates inactive slave */
+ rx_disabled:1, /* indicates whether slave's Rx is disabled */
should_notify:1, /* indicates whether the state changed */
should_notify_link:1; /* indicates whether the link changed */
u8 duplex;
@@ -568,6 +570,14 @@ static inline void bond_set_slave_inactive_flags(struct slave *slave,
bond_set_slave_state(slave, BOND_STATE_BACKUP, notify);
if (!slave->bond->params.all_slaves_active)
slave->inactive = 1;
+ if (BOND_MODE(slave->bond) == BOND_MODE_8023AD)
+ slave->rx_disabled = 1;
+}
+
+static inline void bond_set_slave_tx_disabled_flags(struct slave *slave,
+ bool notify)
+{
+ bond_set_slave_state(slave, BOND_STATE_BACKUP, notify);
}
static inline void bond_set_slave_active_flags(struct slave *slave,
@@ -575,6 +585,14 @@ static inline void bond_set_slave_active_flags(struct slave *slave,
{
bond_set_slave_state(slave, BOND_STATE_ACTIVE, notify);
slave->inactive = 0;
+ if (BOND_MODE(slave->bond) == BOND_MODE_8023AD)
+ slave->rx_disabled = 0;
+}
+
+static inline void bond_set_slave_rx_enabled_flags(struct slave *slave,
+ bool notify)
+{
+ slave->rx_disabled = 0;
}
static inline bool bond_is_slave_inactive(struct slave *slave)
@@ -582,6 +600,11 @@ static inline bool bond_is_slave_inactive(struct slave *slave)
return slave->inactive;
}
+static inline bool bond_is_slave_rx_disabled(struct slave *slave)
+{
+ return slave->rx_disabled;
+}
+
static inline void bond_propose_link_state(struct slave *slave, int state)
{
slave->link_new_state = state;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 2b54fdd8ca15..2e2be4fd2bb6 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -7,7 +7,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021, 2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/ethtool.h>
@@ -118,10 +118,13 @@ struct wiphy;
* restrictions.
* @IEEE80211_CHAN_NO_EHT: EHT operation is not permitted on this channel.
* @IEEE80211_CHAN_DFS_CONCURRENT: See %NL80211_RRF_DFS_CONCURRENT
- * @IEEE80211_CHAN_NO_UHB_VLP_CLIENT: Client connection with VLP AP
+ * @IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT: Client connection with VLP AP
* not permitted using this channel
- * @IEEE80211_CHAN_NO_UHB_AFC_CLIENT: Client connection with AFC AP
+ * @IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT: Client connection with AFC AP
* not permitted using this channel
+ * @IEEE80211_CHAN_CAN_MONITOR: This channel can be used for monitor
+ * mode even in the presence of other (regulatory) restrictions,
+ * even if it is otherwise disabled.
*/
enum ieee80211_channel_flags {
IEEE80211_CHAN_DISABLED = 1<<0,
@@ -146,8 +149,9 @@ enum ieee80211_channel_flags {
IEEE80211_CHAN_NO_320MHZ = 1<<19,
IEEE80211_CHAN_NO_EHT = 1<<20,
IEEE80211_CHAN_DFS_CONCURRENT = 1<<21,
- IEEE80211_CHAN_NO_UHB_VLP_CLIENT= 1<<22,
- IEEE80211_CHAN_NO_UHB_AFC_CLIENT= 1<<23,
+ IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT = 1<<22,
+ IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT = 1<<23,
+ IEEE80211_CHAN_CAN_MONITOR = 1<<24,
};
#define IEEE80211_CHAN_NO_HT40 \
@@ -808,6 +812,9 @@ struct key_params {
* chan will define the primary channel and all other
* parameters are ignored.
* @freq1_offset: offset from @center_freq1, in KHz
+ * @punctured: mask of the punctured 20 MHz subchannels, with
+ * bits turned on being disabled (punctured); numbered
+ * from lower to higher frequency (like in the spec)
*/
struct cfg80211_chan_def {
struct ieee80211_channel *chan;
@@ -816,6 +823,7 @@ struct cfg80211_chan_def {
u32 center_freq2;
struct ieee80211_edmg edmg;
u16 freq1_offset;
+ u16 punctured;
};
/*
@@ -956,7 +964,8 @@ cfg80211_chandef_identical(const struct cfg80211_chan_def *chandef1,
chandef1->width == chandef2->width &&
chandef1->center_freq1 == chandef2->center_freq1 &&
chandef1->freq1_offset == chandef2->freq1_offset &&
- chandef1->center_freq2 == chandef2->center_freq2);
+ chandef1->center_freq2 == chandef2->center_freq2 &&
+ chandef1->punctured == chandef2->punctured);
}
/**
@@ -1048,6 +1057,20 @@ cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy,
const struct cfg80211_chan_def *chandef);
/**
+ * cfg80211_chandef_primary - calculate primary 40/80/160 MHz freq
+ * @chandef: chandef to calculate for
+ * @primary_chan_width: primary channel width to calculate center for
+ * @punctured: punctured sub-channel bitmap, will be recalculated
+ * according to the new bandwidth, can be %NULL
+ *
+ * Returns: the primary 40/80/160 MHz channel center frequency, or -1
+ * for errors, updating the punctured bitmap
+ */
+int cfg80211_chandef_primary(const struct cfg80211_chan_def *chandef,
+ enum nl80211_chan_width primary_chan_width,
+ u16 *punctured);
+
+/**
* nl80211_send_chandef - sends the channel definition.
* @msg: the msg to send channel definition
* @chandef: the channel definition to check
@@ -1457,9 +1480,6 @@ struct cfg80211_unsol_bcast_probe_resp {
* @fils_discovery: FILS discovery transmission parameters
* @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
* @mbssid_config: AP settings for multiple bssid
- * @punct_bitmap: Preamble puncturing bitmap. Each bit represents
- * a 20 MHz channel, lowest bit corresponding to the lowest channel.
- * Bit set to 1 indicates that the channel is punctured.
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -1494,7 +1514,6 @@ struct cfg80211_ap_settings {
struct cfg80211_fils_discovery fils_discovery;
struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
struct cfg80211_mbssid_config mbssid_config;
- u16 punct_bitmap;
};
@@ -1528,9 +1547,8 @@ struct cfg80211_ap_update {
* @radar_required: whether radar detection is required on the new channel
* @block_tx: whether transmissions should be blocked while changing
* @count: number of beacons until switch
- * @punct_bitmap: Preamble puncturing bitmap. Each bit represents
- * a 20 MHz channel, lowest bit corresponding to the lowest channel.
- * Bit set to 1 indicates that the channel is punctured.
+ * @link_id: defines the link on which channel switch is expected during
+ * MLO. 0 in case of non-MLO.
*/
struct cfg80211_csa_settings {
struct cfg80211_chan_def chandef;
@@ -1543,7 +1561,7 @@ struct cfg80211_csa_settings {
bool radar_required;
bool block_tx;
u8 count;
- u16 punct_bitmap;
+ u8 link_id;
};
/**
@@ -1766,11 +1784,15 @@ struct station_parameters {
* @subtype: Management frame subtype to use for indicating removal
* (10 = Disassociation, 12 = Deauthentication)
* @reason_code: Reason code for the Disassociation/Deauthentication frame
+ * @link_id: Link ID indicating a link that stations to be flushed must be
+ * using; valid only for MLO, but can also be -1 for MLO to really
+ * remove all stations.
*/
struct station_del_parameters {
const u8 *mac;
u8 subtype;
u16 reason_code;
+ int link_id;
};
/**
@@ -2695,19 +2717,11 @@ static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask)
* @bssid: BSSID to be matched; may be all-zero BSSID in case of SSID match
* or no match (RSSI only)
* @rssi_thold: don't report scan results below this threshold (in s32 dBm)
- * @per_band_rssi_thold: Minimum rssi threshold for each band to be applied
- * for filtering out scan results received. Drivers advertise this support
- * of band specific rssi based filtering through the feature capability
- * %NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD. These band
- * specific rssi thresholds take precedence over rssi_thold, if specified.
- * If not specified for any band, it will be assigned with rssi_thold of
- * corresponding matchset.
*/
struct cfg80211_match_set {
struct cfg80211_ssid ssid;
u8 bssid[ETH_ALEN];
s32 rssi_thold;
- s32 per_band_rssi_thold[NUM_NL80211_BANDS];
};
/**
@@ -3063,6 +3077,7 @@ struct cfg80211_assoc_link {
* @CONNECT_REQ_MLO_SUPPORT: Userspace indicates support for handling MLD links.
* Drivers shall disable MLO features for the current association if this
* flag is not set.
+ * @ASSOC_REQ_SPP_AMSDU: SPP A-MSDUs will be used on this connection (if any)
*/
enum cfg80211_assoc_req_flags {
ASSOC_REQ_DISABLE_HT = BIT(0),
@@ -3072,6 +3087,7 @@ enum cfg80211_assoc_req_flags {
ASSOC_REQ_DISABLE_HE = BIT(4),
ASSOC_REQ_DISABLE_EHT = BIT(5),
CONNECT_REQ_MLO_SUPPORT = BIT(6),
+ ASSOC_REQ_SPP_AMSDU = BIT(7),
};
/**
@@ -3596,12 +3612,15 @@ struct cfg80211_wowlan_nd_info {
* @tcp_connlost: TCP connection lost or failed to establish
* @tcp_nomoretokens: TCP data ran out of tokens
* @net_detect: if not %NULL, woke up because of net detect
+ * @unprot_deauth_disassoc: woke up due to unprotected deauth or
+ * disassoc frame (in MFP).
*/
struct cfg80211_wowlan_wakeup {
bool disconnect, magic_pkt, gtk_rekey_failure,
eap_identity_req, four_way_handshake,
rfkill_release, packet_80211,
- tcp_match, tcp_connlost, tcp_nomoretokens;
+ tcp_match, tcp_connlost, tcp_nomoretokens,
+ unprot_deauth_disassoc;
s32 pattern_idx;
u32 packet_present_len, packet_len;
const void *packet;
@@ -4923,7 +4942,7 @@ struct cfg80211_ops {
* enum wiphy_flags - wiphy capability flags
*
* @WIPHY_FLAG_SPLIT_SCAN_6GHZ: if set to true, the scan request will be split
- * into two, first for legacy bands and second for UHB.
+ * into two, first for legacy bands and second for 6 GHz.
* @WIPHY_FLAG_NETNS_OK: if not set, do not allow changing the netns of this
* wiphy at all
* @WIPHY_FLAG_PS_ON_BY_DEFAULT: if set to true, powersave will be enabled
@@ -6204,7 +6223,7 @@ struct wireless_dev {
int beacon_interval;
struct cfg80211_chan_def preset_chandef;
struct cfg80211_chan_def chandef;
- u8 id[IEEE80211_MAX_SSID_LEN];
+ u8 id[IEEE80211_MAX_MESH_ID_LEN];
u8 id_len, id_up_len;
} mesh;
struct {
@@ -6852,13 +6871,45 @@ cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
}
/**
+ * enum cfg80211_rnr_iter_ret - reduced neighbor report iteration state
+ * @RNR_ITER_CONTINUE: continue iterating with the next entry
+ * @RNR_ITER_BREAK: break iteration and return success
+ * @RNR_ITER_ERROR: break iteration and return error
+ */
+enum cfg80211_rnr_iter_ret {
+ RNR_ITER_CONTINUE,
+ RNR_ITER_BREAK,
+ RNR_ITER_ERROR,
+};
+
+/**
+ * cfg80211_iter_rnr - iterate reduced neighbor report entries
+ * @elems: the frame elements to iterate RNR elements and then
+ * their entries in
+ * @elems_len: length of the elements
+ * @iter: iteration function, see also &enum cfg80211_rnr_iter_ret
+ * for the return value
+ * @iter_data: additional data passed to the iteration function
+ * Return: %true on success (after successfully iterating all entries
+ * or if the iteration function returned %RNR_ITER_BREAK),
+ * %false on error (iteration function returned %RNR_ITER_ERROR
+ * or elements were malformed.)
+ */
+bool cfg80211_iter_rnr(const u8 *elems, size_t elems_len,
+ enum cfg80211_rnr_iter_ret
+ (*iter)(void *data, u8 type,
+ const struct ieee80211_neighbor_ap_info *info,
+ const u8 *tbtt_info, u8 tbtt_info_len),
+ void *iter_data);
+
+/**
* cfg80211_defragment_element - Defrag the given element data into a buffer
*
* @elem: the element to defragment
* @ies: elements where @elem is contained
* @ieslen: length of @ies
- * @data: buffer to store element data
- * @data_len: length of @data
+ * @data: buffer to store element data, or %NULL to just determine size
+ * @data_len: length of @data, or 0
* @frag_id: the element ID of fragments
*
* Return: length of @data, or -EINVAL on error
@@ -7156,11 +7207,13 @@ size_t cfg80211_merge_profile(const u8 *ie, size_t ielen,
* from a beacon or probe response
* @CFG80211_BSS_FTYPE_BEACON: data comes from a beacon
* @CFG80211_BSS_FTYPE_PRESP: data comes from a probe response
+ * @CFG80211_BSS_FTYPE_S1G_BEACON: data comes from an S1G beacon
*/
enum cfg80211_bss_frame_type {
CFG80211_BSS_FTYPE_UNKNOWN,
CFG80211_BSS_FTYPE_BEACON,
CFG80211_BSS_FTYPE_PRESP,
+ CFG80211_BSS_FTYPE_S1G_BEACON,
};
/**
@@ -8733,14 +8786,13 @@ bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
* @dev: the device which switched channels
* @chandef: the new channel definition
* @link_id: the link ID for MLO, must be 0 for non-MLO
- * @punct_bitmap: the new puncturing bitmap
*
* Caller must hold wiphy mutex, therefore must only be called from sleepable
* driver context!
*/
void cfg80211_ch_switch_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
- unsigned int link_id, u16 punct_bitmap);
+ unsigned int link_id);
/*
* cfg80211_ch_switch_started_notify - notify channel switch start
@@ -8749,7 +8801,6 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
* @link_id: the link ID for MLO, must be 0 for non-MLO
* @count: the number of TBTTs until the channel switch happens
* @quiet: whether or not immediate quiet was requested by the AP
- * @punct_bitmap: the future puncturing bitmap
*
* Inform the userspace about the channel switch that has just
* started, so that it can take appropriate actions (eg. starting
@@ -8758,7 +8809,7 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
void cfg80211_ch_switch_started_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
unsigned int link_id, u8 count,
- bool quiet, u16 punct_bitmap);
+ bool quiet);
/**
* ieee80211_operating_class_to_band - convert operating class to band
@@ -8772,6 +8823,19 @@ bool ieee80211_operating_class_to_band(u8 operating_class,
enum nl80211_band *band);
/**
+ * ieee80211_operating_class_to_chandef - convert operating class to chandef
+ *
+ * @operating_class: the operating class to convert
+ * @chan: the ieee80211_channel to convert
+ * @chandef: a pointer to the resulting chandef
+ *
+ * Returns %true if the conversion was successful, %false otherwise.
+ */
+bool ieee80211_operating_class_to_chandef(u8 operating_class,
+ struct ieee80211_channel *chan,
+ struct cfg80211_chan_def *chandef);
+
+/**
* ieee80211_chandef_to_operating_class - convert chandef to operation class
*
* @chandef: the chandef to convert
@@ -9377,18 +9441,6 @@ static inline int cfg80211_color_change_notify(struct net_device *dev)
}
/**
- * cfg80211_valid_disable_subchannel_bitmap - validate puncturing bitmap
- * @bitmap: bitmap to be validated
- * @chandef: channel definition
- *
- * Validate the puncturing bitmap.
- *
- * Return: %true if the bitmap is valid. %false otherwise.
- */
-bool cfg80211_valid_disable_subchannel_bitmap(u16 *bitmap,
- const struct cfg80211_chan_def *chandef);
-
-/**
* cfg80211_links_removed - Notify about removed STA MLD setup links.
* @dev: network device.
* @link_mask: BIT mask of removed STA MLD setup link IDs.
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index cd95711b12b8..76d2cd2e2b30 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -401,6 +401,7 @@ struct ieee802154_llsec_key {
struct ieee802154_llsec_key_entry {
struct list_head list;
+ struct rcu_head rcu;
struct ieee802154_llsec_key_id id;
struct ieee802154_llsec_key *key;
diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h
index 6d3a20163260..9707ab54fdd5 100644
--- a/include/net/dropreason-core.h
+++ b/include/net/dropreason-core.h
@@ -30,6 +30,7 @@
FN(TCP_AOFAILURE) \
FN(SOCKET_BACKLOG) \
FN(TCP_FLAGS) \
+ FN(TCP_ABORT_ON_DATA) \
FN(TCP_ZEROWINDOW) \
FN(TCP_OLD_DATA) \
FN(TCP_OVERWINDOW) \
@@ -37,6 +38,7 @@
FN(TCP_RFC7323_PAWS) \
FN(TCP_OLD_SEQUENCE) \
FN(TCP_INVALID_SEQUENCE) \
+ FN(TCP_INVALID_ACK_SEQUENCE) \
FN(TCP_RESET) \
FN(TCP_INVALID_SYN) \
FN(TCP_CLOSE) \
@@ -54,6 +56,7 @@
FN(NEIGH_QUEUEFULL) \
FN(NEIGH_DEAD) \
FN(TC_EGRESS) \
+ FN(SECURITY_HOOK) \
FN(QDISC_DROP) \
FN(CPU_BACKLOG) \
FN(XDP) \
@@ -105,7 +108,13 @@ enum skb_drop_reason {
SKB_CONSUMED,
/** @SKB_DROP_REASON_NOT_SPECIFIED: drop reason is not specified */
SKB_DROP_REASON_NOT_SPECIFIED,
- /** @SKB_DROP_REASON_NO_SOCKET: socket not found */
+ /**
+ * @SKB_DROP_REASON_NO_SOCKET: no valid socket that can be used.
+ * Reason could be one of three cases:
+ * 1) no established/listening socket found during lookup process
+ * 2) no valid request socket during 3WHS process
+ * 3) no valid child socket during 3WHS process
+ */
SKB_DROP_REASON_NO_SOCKET,
/** @SKB_DROP_REASON_PKT_TOO_SMALL: packet size is too small */
SKB_DROP_REASON_PKT_TOO_SMALL,
@@ -198,6 +207,11 @@ enum skb_drop_reason {
/** @SKB_DROP_REASON_TCP_FLAGS: TCP flags invalid */
SKB_DROP_REASON_TCP_FLAGS,
/**
+ * @SKB_DROP_REASON_TCP_ABORT_ON_DATA: abort on data, corresponding to
+ * LINUX_MIB_TCPABORTONDATA
+ */
+ SKB_DROP_REASON_TCP_ABORT_ON_DATA,
+ /**
* @SKB_DROP_REASON_TCP_ZEROWINDOW: TCP receive window size is zero,
* see LINUX_MIB_TCPZEROWINDOWDROP
*/
@@ -221,13 +235,19 @@ enum skb_drop_reason {
SKB_DROP_REASON_TCP_OFOMERGE,
/**
* @SKB_DROP_REASON_TCP_RFC7323_PAWS: PAWS check, corresponding to
- * LINUX_MIB_PAWSESTABREJECTED
+ * LINUX_MIB_PAWSESTABREJECTED, LINUX_MIB_PAWSACTIVEREJECTED
*/
SKB_DROP_REASON_TCP_RFC7323_PAWS,
/** @SKB_DROP_REASON_TCP_OLD_SEQUENCE: Old SEQ field (duplicate packet) */
SKB_DROP_REASON_TCP_OLD_SEQUENCE,
/** @SKB_DROP_REASON_TCP_INVALID_SEQUENCE: Not acceptable SEQ field */
SKB_DROP_REASON_TCP_INVALID_SEQUENCE,
+ /**
+ * @SKB_DROP_REASON_TCP_INVALID_ACK_SEQUENCE: Not acceptable ACK SEQ
+ * field because ack sequence is not in the window between snd_una
+ * and snd_nxt
+ */
+ SKB_DROP_REASON_TCP_INVALID_ACK_SEQUENCE,
/** @SKB_DROP_REASON_TCP_RESET: Invalid RST packet */
SKB_DROP_REASON_TCP_RESET,
/**
@@ -271,6 +291,8 @@ enum skb_drop_reason {
SKB_DROP_REASON_NEIGH_DEAD,
/** @SKB_DROP_REASON_TC_EGRESS: dropped in TC egress HOOK */
SKB_DROP_REASON_TC_EGRESS,
+ /** @SKB_DROP_REASON_SECURITY_HOOK: dropped due to security HOOK */
+ SKB_DROP_REASON_SECURITY_HOOK,
/**
* @SKB_DROP_REASON_QDISC_DROP: dropped by qdisc when packet outputting (
* failed to enqueue to current qdisc)
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 82135fbdb1e6..7c0da9effe4e 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -991,9 +991,9 @@ struct dsa_switch_ops {
* Port's MAC EEE settings
*/
int (*set_mac_eee)(struct dsa_switch *ds, int port,
- struct ethtool_eee *e);
+ struct ethtool_keee *e);
int (*get_mac_eee)(struct dsa_switch *ds, int port,
- struct ethtool_eee *e);
+ struct ethtool_keee *e);
/* EEPROM access */
int (*get_eeprom_len)(struct dsa_switch *ds);
diff --git a/include/net/dst.h b/include/net/dst.h
index f5dfc8fb7b37..0aa331bd2fdb 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -390,7 +390,6 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
void dst_init(struct dst_entry *dst, struct dst_ops *ops,
struct net_device *dev, int initial_obsolete,
unsigned short flags);
-struct dst_entry *dst_destroy(struct dst_entry *dst);
void dst_dev_put(struct dst_entry *dst);
static inline void dst_confirm(struct dst_entry *dst)
diff --git a/include/net/eee.h b/include/net/eee.h
new file mode 100644
index 000000000000..84837aba3cd9
--- /dev/null
+++ b/include/net/eee.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _EEE_H
+#define _EEE_H
+
+#include <linux/types.h>
+
+struct eee_config {
+ u32 tx_lpi_timer;
+ bool tx_lpi_enabled;
+ bool eee_enabled;
+};
+
+static inline bool eeecfg_mac_can_tx_lpi(const struct eee_config *eeecfg)
+{
+ /* eee_enabled is the master on/off */
+ if (!eeecfg->eee_enabled || !eeecfg->tx_lpi_enabled)
+ return false;
+
+ return true;
+}
+
+static inline void eeecfg_to_eee(struct ethtool_keee *eee,
+ const struct eee_config *eeecfg)
+{
+ eee->tx_lpi_timer = eeecfg->tx_lpi_timer;
+ eee->tx_lpi_enabled = eeecfg->tx_lpi_enabled;
+ eee->eee_enabled = eeecfg->eee_enabled;
+}
+
+static inline void eee_to_eeecfg(struct eee_config *eeecfg,
+ const struct ethtool_keee *eee)
+{
+ eeecfg->tx_lpi_timer = eee->tx_lpi_timer;
+ eeecfg->tx_lpi_enabled = eee->tx_lpi_enabled;
+ eeecfg->eee_enabled = eee->eee_enabled;
+}
+
+#endif
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index e61469129402..9ece6e5a3ea8 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -41,6 +41,8 @@ struct genl_info;
* do additional, common, filtering and return an error
* @post_doit: called after an operation's doit callback, it may
* undo operations done by pre_doit, for example release locks
+ * @bind: called when family multicast group is added to a netlink socket
+ * @unbind: called when family multicast group is removed from a netlink socket
* @module: pointer to the owning module (set to THIS_MODULE)
* @mcgrps: multicast groups used by this family
* @n_mcgrps: number of multicast groups
@@ -84,6 +86,8 @@ struct genl_family {
void (*post_doit)(const struct genl_split_ops *ops,
struct sk_buff *skb,
struct genl_info *info);
+ int (*bind)(int mcgrp);
+ void (*unbind)(int mcgrp);
const struct genl_ops * ops;
const struct genl_small_ops *small_ops;
const struct genl_split_ops *split_ops;
@@ -149,7 +153,7 @@ static inline void *genl_info_userhdr(const struct genl_info *info)
/* Report that a root attribute is missing */
#define GENL_REQ_ATTR_CHECK(info, attr) ({ \
- struct genl_info *__info = (info); \
+ const struct genl_info *__info = (info); \
\
NL_REQ_ATTR_CHECK(__info->extack, NULL, __info->attrs, (attr)); \
})
diff --git a/include/net/gro.h b/include/net/gro.h
index b435f0ddbf64..50f1e403dbbb 100644
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef _NET_IPV6_GRO_H
-#define _NET_IPV6_GRO_H
+#ifndef _NET_GRO_H
+#define _NET_GRO_H
#include <linux/indirect_call_wrapper.h>
#include <linux/ip.h>
@@ -9,6 +9,7 @@
#include <net/ip6_checksum.h>
#include <linux/skbuff.h>
#include <net/udp.h>
+#include <net/hotdata.h>
struct napi_gro_cb {
union {
@@ -139,21 +140,16 @@ static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
NAPI_GRO_CB(skb)->data_offset += len;
}
-static inline void *skb_gro_header_fast(struct sk_buff *skb,
+static inline void *skb_gro_header_fast(const struct sk_buff *skb,
unsigned int offset)
{
return NAPI_GRO_CB(skb)->frag0 + offset;
}
-static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
+static inline bool skb_gro_may_pull(const struct sk_buff *skb,
+ unsigned int hlen)
{
- return NAPI_GRO_CB(skb)->frag0_len < hlen;
-}
-
-static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
-{
- NAPI_GRO_CB(skb)->frag0 = NULL;
- NAPI_GRO_CB(skb)->frag0_len = 0;
+ return likely(hlen <= NAPI_GRO_CB(skb)->frag0_len);
}
static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
@@ -162,28 +158,30 @@ static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
if (!pskb_may_pull(skb, hlen))
return NULL;
- skb_gro_frag0_invalidate(skb);
return skb->data + offset;
}
-static inline void *skb_gro_header(struct sk_buff *skb,
- unsigned int hlen, unsigned int offset)
+static inline void *skb_gro_header(struct sk_buff *skb, unsigned int hlen,
+ unsigned int offset)
{
void *ptr;
ptr = skb_gro_header_fast(skb, offset);
- if (skb_gro_header_hard(skb, hlen))
+ if (!skb_gro_may_pull(skb, hlen))
ptr = skb_gro_header_slow(skb, hlen, offset);
return ptr;
}
-static inline void *skb_gro_network_header(struct sk_buff *skb)
+static inline void *skb_gro_network_header(const struct sk_buff *skb)
{
- return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
- skb_network_offset(skb);
+ if (skb_gro_may_pull(skb, skb_gro_offset(skb)))
+ return skb_gro_header_fast(skb, skb_network_offset(skb));
+
+ return skb_network_header(skb);
}
-static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
+static inline __wsum inet_gro_compute_pseudo(const struct sk_buff *skb,
+ int proto)
{
const struct iphdr *iph = skb_gro_network_header(skb);
@@ -421,7 +419,8 @@ static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
return uh;
}
-static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto)
+static inline __wsum ip6_gro_compute_pseudo(const struct sk_buff *skb,
+ int proto)
{
const struct ipv6hdr *iph = skb_gro_network_header(skb);
@@ -448,7 +447,7 @@ static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb,
{
list_add_tail(&skb->list, &napi->rx_list);
napi->rx_count += segs;
- if (napi->rx_count >= READ_ONCE(gro_normal_batch))
+ if (napi->rx_count >= READ_ONCE(net_hotdata.gro_normal_batch))
gro_normal_list(napi);
}
@@ -495,6 +494,7 @@ static inline void inet6_get_iif_sdif(const struct sk_buff *skb, int *iif, int *
#endif
}
-extern struct list_head offload_base;
+struct packet_offload *gro_find_receive_by_type(__be16 type);
+struct packet_offload *gro_find_complete_by_type(__be16 type);
-#endif /* _NET_IPV6_GRO_H */
+#endif /* _NET_GRO_H */
diff --git a/include/net/hotdata.h b/include/net/hotdata.h
new file mode 100644
index 000000000000..003667a1efd6
--- /dev/null
+++ b/include/net/hotdata.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _NET_HOTDATA_H
+#define _NET_HOTDATA_H
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <net/protocol.h>
+
+/* Read mostly data used in network fast paths. */
+struct net_hotdata {
+#if IS_ENABLED(CONFIG_INET)
+ struct packet_offload ip_packet_offload;
+ struct net_offload tcpv4_offload;
+ struct net_protocol tcp_protocol;
+ struct net_offload udpv4_offload;
+ struct net_protocol udp_protocol;
+ struct packet_offload ipv6_packet_offload;
+ struct net_offload tcpv6_offload;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct inet6_protocol tcpv6_protocol;
+ struct inet6_protocol udpv6_protocol;
+#endif
+ struct net_offload udpv6_offload;
+#endif
+ struct list_head offload_base;
+ struct list_head ptype_all;
+ struct kmem_cache *skbuff_cache;
+ struct kmem_cache *skbuff_fclone_cache;
+ struct kmem_cache *skb_small_head_cache;
+#ifdef CONFIG_RPS
+ struct rps_sock_flow_table __rcu *rps_sock_flow_table;
+ u32 rps_cpu_mask;
+#endif
+ int gro_normal_batch;
+ int netdev_budget;
+ int netdev_budget_usecs;
+ int tstamp_prequeue;
+ int max_backlog;
+ int dev_tx_weight;
+ int dev_rx_weight;
+};
+
+#define inet_ehash_secret net_hotdata.tcp_protocol.secret
+#define udp_ehash_secret net_hotdata.udp_protocol.secret
+#define inet6_ehash_secret net_hotdata.tcpv6_protocol.secret
+#define tcp_ipv6_hash_secret net_hotdata.tcpv6_offload.secret
+#define udp6_ehash_secret net_hotdata.udpv6_protocol.secret
+#define udp_ipv6_hash_secret net_hotdata.udpv6_offload.secret
+
+extern struct net_hotdata net_hotdata;
+
+#endif /* _NET_HOTDATA_H */
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index f07642264c1e..238ad3349456 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -144,7 +144,7 @@ struct ipv6_ac_socklist {
struct ifacaddr6 {
struct in6_addr aca_addr;
struct fib6_info *aca_rt;
- struct ifacaddr6 *aca_next;
+ struct ifacaddr6 __rcu *aca_next;
struct hlist_node aca_addr_lst;
int aca_users;
refcount_t aca_refcnt;
@@ -196,7 +196,7 @@ struct inet6_dev {
spinlock_t mc_report_lock; /* mld query report lock */
struct mutex mc_lock; /* mld global lock */
- struct ifacaddr6 *ac_list;
+ struct ifacaddr6 __rcu *ac_list;
rwlock_t lock;
refcount_t refcnt;
__u32 if_flags;
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index d94c242eb3ed..f9ddd47dc4f8 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -274,6 +274,7 @@ enum {
INET_FLAGS_REPFLOW = 27,
INET_FLAGS_RTALERT_ISOLATE = 28,
INET_FLAGS_SNDFLOW = 29,
+ INET_FLAGS_RTALERT = 30,
};
/* cmsg flags for inet */
diff --git a/include/net/ioam6.h b/include/net/ioam6.h
index 781d2d8b2f29..2cbbee6e806a 100644
--- a/include/net/ioam6.h
+++ b/include/net/ioam6.h
@@ -12,6 +12,7 @@
#include <linux/net.h>
#include <linux/ipv6.h>
#include <linux/ioam6.h>
+#include <linux/ioam6_genl.h>
#include <linux/rhashtable-types.h>
struct ioam6_namespace {
@@ -65,4 +66,7 @@ void ioam6_exit(void);
int ioam6_iptunnel_init(void);
void ioam6_iptunnel_exit(void);
+void ioam6_event(enum ioam6_event_type type, struct net *net, gfp_t gfp,
+ void *opt, unsigned int opt_len);
+
#endif /* _NET_IOAM6_H */
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 9ba6413fd2e3..323c94f1845b 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -30,12 +30,6 @@
#define RT6_DEBUG 2
-#if RT6_DEBUG >= 3
-#define RT6_TRACE(x...) pr_debug(x)
-#else
-#define RT6_TRACE(x...) do { ; } while (0)
-#endif
-
struct rt6_info;
struct fib6_info;
@@ -179,6 +173,9 @@ struct fib6_info {
refcount_t fib6_ref;
unsigned long expires;
+
+ struct hlist_node gc_link;
+
struct dst_metrics *fib6_metrics;
#define fib6_pmtu fib6_metrics->metrics[RTAX_MTU-1]
@@ -247,12 +244,18 @@ static inline bool fib6_requires_src(const struct fib6_info *rt)
return rt->fib6_src.plen > 0;
}
+/* The callers should hold f6i->fib6_table->tb6_lock if a route has ever
+ * been added to a table before.
+ */
static inline void fib6_clean_expires(struct fib6_info *f6i)
{
f6i->fib6_flags &= ~RTF_EXPIRES;
f6i->expires = 0;
}
+/* The callers should hold f6i->fib6_table->tb6_lock if a route has ever
+ * been added to a table before.
+ */
static inline void fib6_set_expires(struct fib6_info *f6i,
unsigned long expires)
{
@@ -333,8 +336,10 @@ static inline bool fib6_info_hold_safe(struct fib6_info *f6i)
static inline void fib6_info_release(struct fib6_info *f6i)
{
- if (f6i && refcount_dec_and_test(&f6i->fib6_ref))
+ if (f6i && refcount_dec_and_test(&f6i->fib6_ref)) {
+ DEBUG_NET_WARN_ON_ONCE(!hlist_unhashed(&f6i->gc_link));
call_rcu(&f6i->rcu, fib6_info_destroy_rcu);
+ }
}
enum fib6_walk_state {
@@ -388,6 +393,7 @@ struct fib6_table {
struct inet_peer_base tb6_peers;
unsigned int flags;
unsigned int fib_seq;
+ struct hlist_head tb6_gc_hlist; /* GC candidates */
#define RT6_TABLE_HAS_DFLT_ROUTER BIT(0)
};
@@ -504,6 +510,38 @@ void fib6_gc_cleanup(void);
int fib6_init(void);
+/* Add the route to the gc list if it is not already there
+ *
+ * The callers should hold f6i->fib6_table->tb6_lock.
+ */
+static inline void fib6_add_gc_list(struct fib6_info *f6i)
+{
+ /* If fib6_node is null, the f6i is not in (or removed from) the
+ * table.
+ *
+ * There is a gap between finding the f6i from the table and
+ * calling this function without the protection of the tb6_lock.
+ * This check makes sure the f6i is not added to the gc list when
+ * it is not on the table.
+ */
+ if (!rcu_dereference_protected(f6i->fib6_node,
+ lockdep_is_held(&f6i->fib6_table->tb6_lock)))
+ return;
+
+ if (hlist_unhashed(&f6i->gc_link))
+ hlist_add_head(&f6i->gc_link, &f6i->fib6_table->tb6_gc_hlist);
+}
+
+/* Remove the route from the gc list if it is on the list.
+ *
+ * The callers should hold f6i->fib6_table->tb6_lock.
+ */
+static inline void fib6_remove_gc_list(struct fib6_info *f6i)
+{
+ if (!hlist_unhashed(&f6i->gc_link))
+ hlist_del_init(&f6i->gc_link);
+}
+
struct ipv6_route_iter {
struct seq_net_private p;
struct fib6_walker w;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 28b065790261..a30c6aa9e5cf 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -170,7 +170,8 @@ struct fib6_info *rt6_get_dflt_router(struct net *net,
struct fib6_info *rt6_add_dflt_router(struct net *net,
const struct in6_addr *gwaddr,
struct net_device *dev, unsigned int pref,
- u32 defrtr_usr_metric);
+ u32 defrtr_usr_metric,
+ int lifetime);
void rt6_purge_dflt_routers(struct net *net);
@@ -331,7 +332,7 @@ static inline unsigned int ip6_dst_mtu_maybe_forward(const struct dst_entry *dst
rcu_read_lock();
idev = __in6_dev_get(dst->dev);
if (idev)
- mtu = idev->cnf.mtu6;
+ mtu = READ_ONCE(idev->cnf.mtu6);
rcu_read_unlock();
out:
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index d4667b7797e3..9b2f69ba5e49 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -264,6 +264,7 @@ struct fib_dump_filter {
bool filter_set;
bool dump_routes;
bool dump_exceptions;
+ bool rtnl_held;
unsigned char protocol;
unsigned char rt_type;
unsigned int flags;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 2d746f4c9a0a..5cd64bb2104d 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -284,7 +284,8 @@ int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
struct rtnl_link_ops *ops, char *devname);
void ip_tunnel_delete_nets(struct list_head *list_net, unsigned int id,
- struct rtnl_link_ops *ops);
+ struct rtnl_link_ops *ops,
+ struct list_head *dev_to_kill);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index cf25ea21d770..88a8e554f7a1 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -534,13 +534,15 @@ static inline int ipv6_hopopt_jumbo_remove(struct sk_buff *skb)
return 0;
}
-static inline bool ipv6_accept_ra(struct inet6_dev *idev)
+static inline bool ipv6_accept_ra(const struct inet6_dev *idev)
{
+ s32 accept_ra = READ_ONCE(idev->cnf.accept_ra);
+
/* If forwarding is enabled, RA are not accepted unless the special
* hybrid mode (accept_ra=2) is enabled.
*/
- return idev->cnf.forwarding ? idev->cnf.accept_ra == 2 :
- idev->cnf.accept_ra;
+ return READ_ONCE(idev->cnf.forwarding) ? accept_ra == 2 :
+ accept_ra;
}
#define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index d400fe2e8668..353488ab94a2 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -7,7 +7,7 @@
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2023 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
*/
#ifndef MAC80211_H
@@ -214,6 +214,10 @@ struct ieee80211_low_level_stats {
* @IEEE80211_CHANCTX_CHANGE_CHANNEL: switched to another operating channel,
* this is used only with channel switching with CSA
* @IEEE80211_CHANCTX_CHANGE_MIN_WIDTH: The min required channel width changed
+ * @IEEE80211_CHANCTX_CHANGE_AP: The AP channel definition changed, so (wider
+ * bandwidth) OFDMA settings need to be changed
+ * @IEEE80211_CHANCTX_CHANGE_PUNCTURING: The punctured channel(s) bitmap
+ * was changed.
*/
enum ieee80211_chanctx_change {
IEEE80211_CHANCTX_CHANGE_WIDTH = BIT(0),
@@ -221,6 +225,19 @@ enum ieee80211_chanctx_change {
IEEE80211_CHANCTX_CHANGE_RADAR = BIT(2),
IEEE80211_CHANCTX_CHANGE_CHANNEL = BIT(3),
IEEE80211_CHANCTX_CHANGE_MIN_WIDTH = BIT(4),
+ IEEE80211_CHANCTX_CHANGE_AP = BIT(5),
+ IEEE80211_CHANCTX_CHANGE_PUNCTURING = BIT(6),
+};
+
+/**
+ * struct ieee80211_chan_req - A channel "request"
+ * @oper: channel definition to use for operation
+ * @ap: the channel definition of the AP, if any
+ * (otherwise the chan member is %NULL)
+ */
+struct ieee80211_chan_req {
+ struct cfg80211_chan_def oper;
+ struct cfg80211_chan_def ap;
};
/**
@@ -231,6 +248,8 @@ enum ieee80211_chanctx_change {
*
* @def: the channel definition
* @min_def: the minimum channel definition currently required.
+ * @ap: the channel definition the AP actually is operating as,
+ * for use with (wider bandwidth) OFDMA
* @rx_chains_static: The number of RX chains that must always be
* active on the channel to receive MIMO transmissions
* @rx_chains_dynamic: The number of RX chains that must be enabled
@@ -243,6 +262,7 @@ enum ieee80211_chanctx_change {
struct ieee80211_chanctx_conf {
struct cfg80211_chan_def def;
struct cfg80211_chan_def min_def;
+ struct cfg80211_chan_def ap;
u8 rx_chains_static, rx_chains_dynamic;
@@ -340,8 +360,8 @@ struct ieee80211_vif_chanctx_switch {
* @BSS_CHANGED_FILS_DISCOVERY: FILS discovery status changed.
* @BSS_CHANGED_UNSOL_BCAST_PROBE_RESP: Unsolicited broadcast probe response
* status changed.
- * @BSS_CHANGED_EHT_PUNCTURING: The channel puncturing bitmap changed.
* @BSS_CHANGED_MLD_VALID_LINKS: MLD valid links status changed.
+ * @BSS_CHANGED_MLD_TTLM: TID to link mapping was changed
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
@@ -376,8 +396,8 @@ enum ieee80211_bss_change {
BSS_CHANGED_HE_BSS_COLOR = 1<<29,
BSS_CHANGED_FILS_DISCOVERY = 1<<30,
BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = 1<<31,
- BSS_CHANGED_EHT_PUNCTURING = BIT_ULL(32),
BSS_CHANGED_MLD_VALID_LINKS = BIT_ULL(33),
+ BSS_CHANGED_MLD_TTLM = BIT_ULL(34),
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -537,6 +557,10 @@ struct ieee80211_fils_discovery {
* to that BSS) that can change during the lifetime of the BSS.
*
* @vif: reference to owning VIF
+ * @bss: the cfg80211 bss descriptor. Valid only for a station, and only
+ * when associated. Note: This contains information which is not
+ * necessarily authenticated. For example, information coming from probe
+ * responses.
* @addr: (link) address used locally
* @link_id: link ID, or 0 for non-MLO
* @htc_trig_based_pkt_ext: default PE in 4us units, if BSS supports HE
@@ -581,7 +605,7 @@ struct ieee80211_fils_discovery {
* @mcast_rate: per-band multicast rate index + 1 (0: disabled)
* @bssid: The BSSID for this BSS
* @enable_beacon: whether beaconing should be enabled or not
- * @chandef: Channel definition for this BSS -- the hardware might be
+ * @chanreq: Channel request for this BSS -- the hardware might be
* configured a higher bandwidth than this BSS uses, for example.
* @mu_group: VHT MU-MIMO group membership data
* @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation.
@@ -642,9 +666,7 @@ struct ieee80211_fils_discovery {
* @tx_pwr_env_num: number of @tx_pwr_env.
* @pwr_reduction: power constraint of BSS.
* @eht_support: does this BSS support EHT
- * @eht_puncturing: bitmap to indicate which channels are punctured in this BSS
* @csa_active: marks whether a channel switch is going on.
- * @csa_punct_bitmap: new puncturing bitmap for channel switch
* @mu_mimo_owner: indicates interface owns MU-MIMO capability
* @chanctx_conf: The channel context this interface is assigned to, or %NULL
* when it is not assigned. This pointer is RCU-protected due to the TX
@@ -682,6 +704,7 @@ struct ieee80211_fils_discovery {
*/
struct ieee80211_bss_conf {
struct ieee80211_vif *vif;
+ struct cfg80211_bss *bss;
const u8 *bssid;
unsigned int link_id;
@@ -714,7 +737,7 @@ struct ieee80211_bss_conf {
u32 cqm_rssi_hyst;
s32 cqm_rssi_low;
s32 cqm_rssi_high;
- struct cfg80211_chan_def chandef;
+ struct ieee80211_chan_req chanreq;
struct ieee80211_mu_group_data mu_group;
bool qos;
bool hidden_ssid;
@@ -747,10 +770,8 @@ struct ieee80211_bss_conf {
u8 tx_pwr_env_num;
u8 pwr_reduction;
bool eht_support;
- u16 eht_puncturing;
bool csa_active;
- u16 csa_punct_bitmap;
bool mu_mimo_owner;
struct ieee80211_chanctx_conf __rcu *chanctx_conf;
@@ -1742,8 +1763,9 @@ struct ieee80211_conf {
* @chandef: the new channel to switch to
* @count: the number of TBTT's until the channel switch event
* @delay: maximum delay between the time the AP transmitted the last beacon in
- * current channel and the expected time of the first beacon in the new
- * channel, expressed in TU.
+ * current channel and the expected time of the first beacon in the new
+ * channel, expressed in TU.
+ * @link_id: the link ID of the link doing the channel switch, 0 for non-MLO
*/
struct ieee80211_channel_switch {
u64 timestamp;
@@ -1751,6 +1773,7 @@ struct ieee80211_channel_switch {
bool block_tx;
struct cfg80211_chan_def chandef;
u8 count;
+ u8 link_id;
u32 delay;
};
@@ -1772,6 +1795,10 @@ struct ieee80211_channel_switch {
* this is not pure P2P vif.
* @IEEE80211_VIF_EML_ACTIVE: The driver indicates that EML operation is
* enabled for the interface.
+ * @IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW: Ignore wider bandwidth OFDMA
+ * operation on this interface and request a channel context without
+ * the AP definition. Use this e.g. because the device is able to
+ * handle OFDMA (downlink and trigger for uplink) on a per-AP basis.
*/
enum ieee80211_vif_flags {
IEEE80211_VIF_BEACON_FILTER = BIT(0),
@@ -1779,6 +1806,7 @@ enum ieee80211_vif_flags {
IEEE80211_VIF_SUPPORTS_UAPSD = BIT(2),
IEEE80211_VIF_GET_NOA_UPDATE = BIT(3),
IEEE80211_VIF_EML_ACTIVE = BIT(4),
+ IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW = BIT(5),
};
@@ -1808,9 +1836,11 @@ enum ieee80211_offload_flags {
* @ps: power-save mode (STA only). This flag is NOT affected by
* offchannel/dynamic_ps operations.
* @aid: association ID number, valid only when @assoc is true
- * @eml_cap: EML capabilities as described in P802.11be_D2.2 Figure 9-1002k.
+ * @eml_cap: EML capabilities as described in P802.11be_D4.1 Figure 9-1001j.
* @eml_med_sync_delay: Medium Synchronization delay as described in
- * P802.11be_D2.2 Figure 9-1002j.
+ * P802.11be_D4.1 Figure 9-1001i.
+ * @mld_capa_op: MLD Capabilities and Operations per P802.11be_D4.1
+ * Figure 9-1001k
* @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The
* may filter ARP queries targeted for other addresses than listed here.
* The driver must allow ARP queries targeted for all address listed here
@@ -1835,6 +1865,7 @@ struct ieee80211_vif_cfg {
u16 aid;
u16 eml_cap;
u16 eml_med_sync_delay;
+ u16 mld_capa_op;
__be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
int arp_addr_cnt;
@@ -1845,6 +1876,35 @@ struct ieee80211_vif_cfg {
u8 ap_addr[ETH_ALEN] __aligned(2);
};
+#define IEEE80211_TTLM_NUM_TIDS 8
+
+/**
+ * struct ieee80211_neg_ttlm - negotiated TID to link map info
+ *
+ * @downlink: bitmap of active links per TID for downlink, or 0 if mapping for
+ * this TID is not included.
+ * @uplink: bitmap of active links per TID for uplink, or 0 if mapping for this
+ * TID is not included.
+ * @valid: info is valid or not.
+ */
+struct ieee80211_neg_ttlm {
+ u16 downlink[IEEE80211_TTLM_NUM_TIDS];
+ u16 uplink[IEEE80211_TTLM_NUM_TIDS];
+ bool valid;
+};
+
+/**
+ * enum ieee80211_neg_ttlm_res - return value for negotiated TTLM handling
+ * @NEG_TTLM_RES_ACCEPT: accept the request
+ * @NEG_TTLM_RES_REJECT: reject the request
+ * @NEG_TTLM_RES_SUGGEST_PREFERRED: reject and suggest a new mapping
+ */
+enum ieee80211_neg_ttlm_res {
+ NEG_TTLM_RES_ACCEPT,
+ NEG_TTLM_RES_REJECT,
+ NEG_TTLM_RES_SUGGEST_PREFERRED
+};
+
/**
* struct ieee80211_vif - per-interface data
*
@@ -1863,6 +1923,11 @@ struct ieee80211_vif_cfg {
* API calls meant for that purpose.
* @dormant_links: bitmap of valid but disabled links, or 0 for non-MLO.
* Must be a subset of valid_links.
+ * @suspended_links: subset of dormant_links representing links that are
+ * suspended.
+ * 0 for non-MLO.
+ * @neg_ttlm: negotiated TID to link mapping info.
+ * see &struct ieee80211_neg_ttlm.
* @addr: address of this interface
* @p2p: indicates whether this AP or STA interface is a p2p
* interface, i.e. a GO or p2p-sta respectively
@@ -1900,7 +1965,8 @@ struct ieee80211_vif {
struct ieee80211_vif_cfg cfg;
struct ieee80211_bss_conf bss_conf;
struct ieee80211_bss_conf __rcu *link_conf[IEEE80211_MLD_MAX_NUM_LINKS];
- u16 valid_links, active_links, dormant_links;
+ u16 valid_links, active_links, dormant_links, suspended_links;
+ struct ieee80211_neg_ttlm neg_ttlm;
u8 addr[ETH_ALEN] __aligned(2);
bool p2p;
@@ -1947,6 +2013,21 @@ static inline bool ieee80211_vif_is_mld(const struct ieee80211_vif *vif)
return vif->valid_links != 0;
}
+/**
+ * ieee80211_vif_link_active - check if a given link is active
+ * @vif: the vif
+ * @link_id: the link ID to check
+ * Return: %true if the vif is an MLD and the link is active, or if
+ * the vif is not an MLD and the link ID is 0; %false otherwise.
+ */
+static inline bool ieee80211_vif_link_active(const struct ieee80211_vif *vif,
+ unsigned int link_id)
+{
+ if (!ieee80211_vif_is_mld(vif))
+ return link_id == 0;
+ return vif->active_links & BIT(link_id);
+}
+
#define for_each_vif_active_link(vif, link, link_id) \
for (link_id = 0; link_id < ARRAY_SIZE((vif)->link_conf); link_id++) \
if ((!(vif)->active_links || \
@@ -2041,6 +2122,8 @@ static inline bool lockdep_vif_wiphy_mutex_held(struct ieee80211_vif *vif)
* @IEEE80211_KEY_FLAG_GENERATE_MMIE: This flag should be set by the driver
* for a AES_CMAC key to indicate that it requires sequence number
* generation only
+ * @IEEE80211_KEY_FLAG_SPP_AMSDU: SPP A-MSDUs can be used with this key
+ * (set by mac80211 from the sta->spp_amsdu flag)
*/
enum ieee80211_key_flags {
IEEE80211_KEY_FLAG_GENERATE_IV_MGMT = BIT(0),
@@ -2054,6 +2137,7 @@ enum ieee80211_key_flags {
IEEE80211_KEY_FLAG_PUT_MIC_SPACE = BIT(8),
IEEE80211_KEY_FLAG_NO_AUTO_TX = BIT(9),
IEEE80211_KEY_FLAG_GENERATE_MMIE = BIT(10),
+ IEEE80211_KEY_FLAG_SPP_AMSDU = BIT(11),
};
/**
@@ -2352,6 +2436,7 @@ struct ieee80211_link_sta {
* would be assigned to link[link_id] where link_id is the id assigned
* by the AP.
* @valid_links: bitmap of valid links, or 0 for non-MLO
+ * @spp_amsdu: indicates whether the STA uses SPP A-MSDU or not.
*/
struct ieee80211_sta {
u8 addr[ETH_ALEN];
@@ -2365,6 +2450,7 @@ struct ieee80211_sta {
bool tdls_initiator;
bool mfp;
bool mlo;
+ bool spp_amsdu;
u8 max_amsdu_subframes;
struct ieee80211_sta_aggregates *cur;
@@ -2692,6 +2778,11 @@ struct ieee80211_txq {
* @IEEE80211_HW_DISALLOW_PUNCTURING: HW requires disabling puncturing in EHT
* and connecting with a lower bandwidth instead
*
+ * @IEEE80211_HW_HANDLES_QUIET_CSA: HW/driver handles quieting for CSA, so
+ * no need to stop queues. This really should be set by a driver that
+ * implements MLO, so operation can continue on other links when one
+ * link is switching.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2750,6 +2841,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_DETECTS_COLOR_COLLISION,
IEEE80211_HW_MLO_MCAST_MULTI_LINK_TX,
IEEE80211_HW_DISALLOW_PUNCTURING,
+ IEEE80211_HW_HANDLES_QUIET_CSA,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -4182,7 +4274,7 @@ struct ieee80211_prep_tx_info {
* after a channel switch procedure is completed, allowing the
* driver to go back to a normal configuration.
* @abort_channel_switch: This is an optional callback that is called
- * when channel switch procedure was completed, allowing the
+ * when channel switch procedure was aborted, allowing the
* driver to go back to a normal configuration.
* @channel_switch_rx_beacon: This is an optional callback that is called
* when channel switch procedure is in progress and additional beacon with
@@ -4293,6 +4385,10 @@ struct ieee80211_prep_tx_info {
* flow offloading for flows originating from the vif.
* Note that the driver must not assume that the vif driver_data is valid
* at this point, since the callback can be called during netdev teardown.
+ * @can_neg_ttlm: for managed interface, requests the driver to determine
+ * if the requested TID-To-Link mapping can be accepted or not.
+ * If it's not accepted the driver may suggest a preferred mapping and
+ * modify @ttlm parameter with the suggested TID-to-Link mapping.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -4574,7 +4670,8 @@ struct ieee80211_ops {
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
void (*abort_channel_switch)(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
void (*channel_switch_rx_beacon)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *ch_switch);
@@ -4673,6 +4770,9 @@ struct ieee80211_ops {
struct net_device *dev,
enum tc_setup_type type,
void *type_data);
+ enum ieee80211_neg_ttlm_res
+ (*can_neg_ttlm)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_neg_ttlm *ttlm);
};
/**
@@ -5455,6 +5555,7 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
/**
* ieee80211_beacon_update_cntdwn - request mac80211 to decrement the beacon countdown
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: valid link_id during MLO or 0 for non-MLO
*
* The beacon counter should be updated after each beacon transmission.
* This function is called implicitly when
@@ -5464,7 +5565,8 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
*
* Return: new countdown value
*/
-u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif);
+u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif,
+ unsigned int link_id);
/**
* ieee80211_beacon_set_cntdwn - request mac80211 to set beacon countdown
@@ -5482,20 +5584,23 @@ void ieee80211_beacon_set_cntdwn(struct ieee80211_vif *vif, u8 counter);
/**
* ieee80211_csa_finish - notify mac80211 about channel switch
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: valid link_id during MLO or 0 for non-MLO
*
* After a channel switch announcement was scheduled and the counter in this
* announcement hits 1, this function must be called by the driver to
* notify mac80211 that the channel can be changed.
*/
-void ieee80211_csa_finish(struct ieee80211_vif *vif);
+void ieee80211_csa_finish(struct ieee80211_vif *vif, unsigned int link_id);
/**
* ieee80211_beacon_cntdwn_is_complete - find out if countdown reached 1
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: valid link_id during MLO or 0 for non-MLO
*
* This function returns whether the countdown reached zero.
*/
-bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif);
+bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif,
+ unsigned int link_id);
/**
* ieee80211_color_change_finish - notify mac80211 about color change
@@ -5831,6 +5936,7 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf);
* ieee80211_gtk_rekey_add - add a GTK key from rekeying during WoWLAN
* @vif: the virtual interface to add the key on
* @keyconf: new key data
+ * @link_id: the link id of the key or -1 for non-MLO
*
* When GTK rekeying was done while the system was suspended, (a) new
* key(s) will be available. These will be needed by mac80211 for proper
@@ -5858,7 +5964,8 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf);
*/
struct ieee80211_key_conf *
ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf);
+ struct ieee80211_key_conf *keyconf,
+ int link_id);
/**
* ieee80211_gtk_rekey_notify - notify userspace supplicant of rekeying
@@ -7416,11 +7523,10 @@ ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @color_bitmap: a 64 bit bitmap representing the colors that the local BSS is
* aware of.
- * @gfp: allocation flags
*/
void
ieee80211_obss_color_collision_notify(struct ieee80211_vif *vif,
- u64 color_bitmap, gfp_t gfp);
+ u64 color_bitmap);
/**
* ieee80211_is_tx_data - check if frame is a data frame
@@ -7480,4 +7586,17 @@ int ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links);
void ieee80211_set_active_links_async(struct ieee80211_vif *vif,
u16 active_links);
+/* for older drivers - let's not document these ... */
+int ieee80211_emulate_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx);
+void ieee80211_emulate_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx);
+void ieee80211_emulate_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed);
+int ieee80211_emulate_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode);
+
#endif /* MAC80211_H */
diff --git a/include/net/mctp.h b/include/net/mctp.h
index 2bff5f47ce82..7b17c52e8ce2 100644
--- a/include/net/mctp.h
+++ b/include/net/mctp.h
@@ -87,7 +87,7 @@ struct mctp_sock {
};
/* Key for matching incoming packets to sockets or reassembly contexts.
- * Packets are matched on (src,dest,tag).
+ * Packets are matched on (peer EID, local EID, tag).
*
* Lifetime / locking requirements:
*
@@ -133,6 +133,7 @@ struct mctp_sock {
* - through an expiry timeout, on a per-socket timer
*/
struct mctp_sk_key {
+ unsigned int net;
mctp_eid_t peer_addr;
mctp_eid_t local_addr; /* MCTP_ADDR_ANY for local owned tags */
__u8 tag; /* incoming tag match; invert TO for local */
@@ -255,7 +256,8 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
void mctp_key_unref(struct mctp_sk_key *key);
struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
- mctp_eid_t daddr, mctp_eid_t saddr,
+ unsigned int netid,
+ mctp_eid_t local, mctp_eid_t peer,
bool manual, u8 *tagp);
/* routing <--> device interface */
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 13b3a4e29fdb..20c34bd7a077 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -67,8 +67,6 @@ struct net {
*/
spinlock_t rules_mod_lock;
- atomic_t dev_unreg_count;
-
unsigned int dev_base_seq; /* protected by rtnl_mutex */
u32 ifindex;
@@ -450,6 +448,9 @@ struct pernet_operations {
void (*pre_exit)(struct net *net);
void (*exit)(struct net *net);
void (*exit_batch)(struct list_head *net_exit_list);
+ /* Following method is called with RTNL held. */
+ void (*exit_batch_rtnl)(struct list_head *net_exit_list,
+ struct list_head *dev_kill_list);
unsigned int *id;
size_t size;
};
diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h
index 8b8ed4e13d74..1ec408585373 100644
--- a/include/net/netdev_queues.h
+++ b/include/net/netdev_queues.h
@@ -4,6 +4,62 @@
#include <linux/netdevice.h>
+/* See the netdev.yaml spec for definition of each statistic */
+struct netdev_queue_stats_rx {
+ u64 bytes;
+ u64 packets;
+ u64 alloc_fail;
+};
+
+struct netdev_queue_stats_tx {
+ u64 bytes;
+ u64 packets;
+};
+
+/**
+ * struct netdev_stat_ops - netdev ops for fine grained stats
+ * @get_queue_stats_rx: get stats for a given Rx queue
+ * @get_queue_stats_tx: get stats for a given Tx queue
+ * @get_base_stats: get base stats (not belonging to any live instance)
+ *
+ * Query stats for a given object. The values of the statistics are undefined
+ * on entry (specifically they are *not* zero-initialized). Drivers should
+ * assign values only to the statistics they collect. Statistics which are not
+ * collected must be left undefined.
+ *
+ * Queue objects are not necessarily persistent, and only currently active
+ * queues are queried by the per-queue callbacks. This means that per-queue
+ * statistics will not generally add up to the total number of events for
+ * the device. The @get_base_stats callback allows filling in the delta
+ * between events for currently live queues and overall device history.
+ * When the statistics for the entire device are queried, first @get_base_stats
+ * is issued to collect the delta, and then a series of per-queue callbacks.
+ * Only statistics which are set in @get_base_stats will be reported
+ * at the device level, meaning that unlike in queue callbacks, setting
+ * a statistic to zero in @get_base_stats is a legitimate thing to do.
+ * This is because @get_base_stats has a second function of designating which
+ * statistics are in fact correct for the entire device (e.g. when history
+ * for some of the events is not maintained, and reliable "total" cannot
+ * be provided).
+ *
+ * Device drivers can assume that when collecting total device stats,
+ * the @get_base_stats and subsequent per-queue calls are performed
+ * "atomically" (without releasing the rtnl_lock).
+ *
+ * Device drivers are encouraged to reset the per-queue statistics when
+ * number of queues change. This is because the primary use case for
+ * per-queue statistics is currently to detect traffic imbalance.
+ */
+struct netdev_stat_ops {
+ void (*get_queue_stats_rx)(struct net_device *dev, int idx,
+ struct netdev_queue_stats_rx *stats);
+ void (*get_queue_stats_tx)(struct net_device *dev, int idx,
+ struct netdev_queue_stats_tx *stats);
+ void (*get_base_stats)(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx);
+};
+
/**
* DOC: Lockless queue stopping / waking helpers.
*
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index c81021ab07aa..4aeffddb7586 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -35,7 +35,6 @@ struct nf_queue_handler {
void nf_register_queue_handler(const struct nf_queue_handler *qh);
void nf_unregister_queue_handler(void);
-void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
void nf_queue_entry_free(struct nf_queue_entry *entry);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 510244cc0f8f..e27c28b612e4 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -1277,6 +1277,12 @@ static inline bool nft_table_has_owner(const struct nft_table *table)
return table->flags & NFT_TABLE_F_OWNER;
}
+static inline bool nft_table_is_orphan(const struct nft_table *table)
+{
+ return (table->flags & (NFT_TABLE_F_OWNER | NFT_TABLE_F_PERSIST)) ==
+ NFT_TABLE_F_PERSIST;
+}
+
static inline bool nft_base_chain_netdev(int family, u32 hooknum)
{
return family == NFPROTO_NETDEV ||
diff --git a/include/net/netlabel.h b/include/net/netlabel.h
index 43ae50337685..f3ab0b8a4b18 100644
--- a/include/net/netlabel.h
+++ b/include/net/netlabel.h
@@ -145,15 +145,14 @@ struct netlbl_lsm_cache {
* processing.
*
*/
-#define NETLBL_CATMAP_MAPTYPE u64
#define NETLBL_CATMAP_MAPCNT 4
-#define NETLBL_CATMAP_MAPSIZE (sizeof(NETLBL_CATMAP_MAPTYPE) * 8)
+#define NETLBL_CATMAP_MAPSIZE (sizeof(u64) * 8)
#define NETLBL_CATMAP_SIZE (NETLBL_CATMAP_MAPSIZE * \
NETLBL_CATMAP_MAPCNT)
-#define NETLBL_CATMAP_BIT (NETLBL_CATMAP_MAPTYPE)0x01
+#define NETLBL_CATMAP_BIT ((u64)0x01)
struct netlbl_lsm_catmap {
u32 startbit;
- NETLBL_CATMAP_MAPTYPE bitmap[NETLBL_CATMAP_MAPCNT];
+ u64 bitmap[NETLBL_CATMAP_MAPCNT];
struct netlbl_lsm_catmap *next;
};
diff --git a/include/net/netmem.h b/include/net/netmem.h
new file mode 100644
index 000000000000..d8b810245c1d
--- /dev/null
+++ b/include/net/netmem.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Network memory
+ *
+ * Author: Mina Almasry <almasrymina@google.com>
+ */
+
+#ifndef _NET_NETMEM_H
+#define _NET_NETMEM_H
+
+/**
+ * typedef netmem_ref - a nonexistent type marking a reference to generic
+ * network memory.
+ *
+ * A netmem_ref currently is always a reference to a struct page. This
+ * abstraction is introduced so support for new memory types can be added.
+ *
+ * Use the supplied helpers to obtain the underlying memory pointer and fields.
+ */
+typedef unsigned long __bitwise netmem_ref;
+
+/* This conversion fails (returns NULL) if the netmem_ref is not struct page
+ * backed.
+ *
+ * Currently struct page is the only possible netmem, and this helper never
+ * fails.
+ */
+static inline struct page *netmem_to_page(netmem_ref netmem)
+{
+ return (__force struct page *)netmem;
+}
+
+/* Converting from page to netmem is always safe, because a page can always be
+ * a netmem.
+ */
+static inline netmem_ref page_to_netmem(struct page *page)
+{
+ return (__force netmem_ref)page;
+}
+
+#endif /* _NET_NETMEM_H */
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index d92046a4a078..7ca315ad500e 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -47,6 +47,8 @@ struct nh_config {
bool nh_grp_res_has_idle_timer;
bool nh_grp_res_has_unbalanced_timer;
+ bool nh_hw_stats;
+
struct nlattr *nh_encap;
u16 nh_encap_type;
@@ -95,8 +97,14 @@ struct nh_res_table {
struct nh_res_bucket nh_buckets[] __counted_by(num_nh_buckets);
};
+struct nh_grp_entry_stats {
+ u64_stats_t packets;
+ struct u64_stats_sync syncp;
+};
+
struct nh_grp_entry {
struct nexthop *nh;
+ struct nh_grp_entry_stats __percpu *stats;
u8 weight;
union {
@@ -114,6 +122,7 @@ struct nh_grp_entry {
struct list_head nh_list;
struct nexthop *nh_parent; /* nexthop of group with this entry */
+ u64 packets_hw;
};
struct nh_group {
@@ -124,6 +133,7 @@ struct nh_group {
bool resilient;
bool fdb_nh;
bool has_v4;
+ bool hw_stats;
struct nh_res_table __rcu *res_table;
struct nh_grp_entry nh_entries[] __counted_by(num_nh);
@@ -157,6 +167,7 @@ enum nexthop_event_type {
NEXTHOP_EVENT_REPLACE,
NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE,
NEXTHOP_EVENT_BUCKET_REPLACE,
+ NEXTHOP_EVENT_HW_STATS_REPORT_DELTA,
};
enum nh_notifier_info_type {
@@ -164,6 +175,7 @@ enum nh_notifier_info_type {
NH_NOTIFIER_INFO_TYPE_GRP,
NH_NOTIFIER_INFO_TYPE_RES_TABLE,
NH_NOTIFIER_INFO_TYPE_RES_BUCKET,
+ NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS,
};
struct nh_notifier_single_info {
@@ -173,6 +185,7 @@ struct nh_notifier_single_info {
__be32 ipv4;
struct in6_addr ipv6;
};
+ u32 id;
u8 is_reject:1,
is_fdb:1,
has_encap:1;
@@ -180,13 +193,13 @@ struct nh_notifier_single_info {
struct nh_notifier_grp_entry_info {
u8 weight;
- u32 id;
struct nh_notifier_single_info nh;
};
struct nh_notifier_grp_info {
u16 num_nh;
bool is_fdb;
+ bool hw_stats;
struct nh_notifier_grp_entry_info nh_entries[] __counted_by(num_nh);
};
@@ -200,9 +213,21 @@ struct nh_notifier_res_bucket_info {
struct nh_notifier_res_table_info {
u16 num_nh_buckets;
+ bool hw_stats;
struct nh_notifier_single_info nhs[] __counted_by(num_nh_buckets);
};
+struct nh_notifier_grp_hw_stats_entry_info {
+ u32 id;
+ u64 packets;
+};
+
+struct nh_notifier_grp_hw_stats_info {
+ u16 num_nh;
+ bool hw_stats_used;
+ struct nh_notifier_grp_hw_stats_entry_info stats[] __counted_by(num_nh);
+};
+
struct nh_notifier_info {
struct net *net;
struct netlink_ext_ack *extack;
@@ -213,17 +238,22 @@ struct nh_notifier_info {
struct nh_notifier_grp_info *nh_grp;
struct nh_notifier_res_table_info *nh_res_table;
struct nh_notifier_res_bucket_info *nh_res_bucket;
+ struct nh_notifier_grp_hw_stats_info *nh_grp_hw_stats;
};
};
int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack);
+int __unregister_nexthop_notifier(struct net *net, struct notifier_block *nb);
int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb);
void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap);
void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
bool offload, bool trap);
void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
unsigned long *activity);
+void nh_grp_hw_stats_report_delta(struct nh_notifier_grp_hw_stats_info *info,
+ unsigned int nh_idx,
+ u64 delta_packets);
/* caller is holding rcu or rtnl; no reference taken to nexthop */
struct nexthop *nexthop_find_by_id(struct net *net, u32 id);
@@ -316,7 +346,7 @@ static inline
int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh,
u8 rt_family)
{
- struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
+ struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
int i;
for (i = 0; i < nhg->num_nh; i++) {
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 5dee575fbe86..3d07abacf08b 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -196,7 +196,7 @@ struct nfc_dev {
};
#define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev)
-extern struct class nfc_class;
+extern const struct class nfc_class;
struct nfc_dev *nfc_allocate_device(const struct nfc_ops *ops,
u32 supported_protocols,
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index 76481c465375..5e43a08d3231 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -18,8 +18,9 @@
* Please note DMA-sync-for-CPU is still
* device driver responsibility
*/
-#define PP_FLAG_ALL (PP_FLAG_DMA_MAP |\
- PP_FLAG_DMA_SYNC_DEV)
+#define PP_FLAG_SYSTEM_POOL BIT(2) /* Global system page_pool */
+#define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | \
+ PP_FLAG_SYSTEM_POOL)
/*
* Fast allocation side cache array/stack
@@ -128,6 +129,7 @@ struct page_pool_stats {
struct page_pool {
struct page_pool_params_fast p;
+ int cpuid;
bool has_init_callback;
long frag_users;
@@ -203,21 +205,18 @@ struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
unsigned int size, gfp_t gfp);
struct page_pool *page_pool_create(const struct page_pool_params *params);
+struct page_pool *page_pool_create_percpu(const struct page_pool_params *params,
+ int cpuid);
struct xdp_mem_info;
#ifdef CONFIG_PAGE_POOL
-void page_pool_unlink_napi(struct page_pool *pool);
void page_pool_destroy(struct page_pool *pool);
void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
struct xdp_mem_info *mem);
void page_pool_put_page_bulk(struct page_pool *pool, void **data,
int count);
#else
-static inline void page_pool_unlink_napi(struct page_pool *pool)
-{
-}
-
static inline void page_pool_destroy(struct page_pool *pool)
{
}
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index f308e8268651..a4ee43f493bb 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -24,6 +24,8 @@ struct tcf_walker {
int register_tcf_proto_ops(struct tcf_proto_ops *ops);
void unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
+#define NET_CLS_ALIAS_PREFIX "net-cls-"
+#define MODULE_ALIAS_NET_CLS(kind) MODULE_ALIAS(NET_CLS_ALIAS_PREFIX kind)
struct tcf_block_ext_info {
enum flow_block_binder_type binder_type;
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 1e200d9a066d..d7b7b6cd4aa1 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -100,6 +100,8 @@ struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
int register_qdisc(struct Qdisc_ops *qops);
void unregister_qdisc(struct Qdisc_ops *qops);
+#define NET_SCH_ALIAS_PREFIX "net-sch-"
+#define MODULE_ALIAS_NET_SCH(id) MODULE_ALIAS(NET_SCH_ALIAS_PREFIX id)
void qdisc_get_default(char *id, size_t len);
int qdisc_set_default(const char *id);
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 6aef8cb11cc8..b2499f88f8f8 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -46,6 +46,7 @@ struct net_protocol {
* socket lookup?
*/
icmp_strict_tag_validation:1;
+ u32 secret;
};
#if IS_ENABLED(CONFIG_IPV6)
@@ -59,6 +60,7 @@ struct inet6_protocol {
__be32 info);
unsigned int flags; /* INET6_PROTO_xxx */
+ u32 secret;
};
#define INET6_PROTO_NOPOLICY 0x1
@@ -68,6 +70,7 @@ struct inet6_protocol {
struct net_offload {
struct offload_callbacks callbacks;
unsigned int flags; /* Flags used by IPv6 for now */
+ u32 secret;
};
/* This should be set for any extension header which is compatible with GSO. */
#define INET6_PROTO_GSO_EXTHDR 0x1
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 144c39db9898..8839133d6f6b 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -83,6 +83,45 @@ static inline struct sock *req_to_sk(struct request_sock *req)
return (struct sock *)req;
}
+/**
+ * skb_steal_sock - steal a socket from an sk_buff
+ * @skb: sk_buff to steal the socket from
+ * @refcounted: is set to true if the socket is reference-counted
+ * @prefetched: is set to true if the socket was assigned from bpf
+ */
+static inline struct sock *skb_steal_sock(struct sk_buff *skb,
+ bool *refcounted, bool *prefetched)
+{
+ struct sock *sk = skb->sk;
+
+ if (!sk) {
+ *prefetched = false;
+ *refcounted = false;
+ return NULL;
+ }
+
+ *prefetched = skb_sk_is_prefetched(skb);
+ if (*prefetched) {
+#if IS_ENABLED(CONFIG_SYN_COOKIES)
+ if (sk->sk_state == TCP_NEW_SYN_RECV && inet_reqsk(sk)->syncookie) {
+ struct request_sock *req = inet_reqsk(sk);
+
+ *refcounted = false;
+ sk = req->rsk_listener;
+ req->rsk_listener = NULL;
+ return sk;
+ }
+#endif
+ *refcounted = sk_is_refcounted(sk);
+ } else {
+ *refcounted = true;
+ }
+
+ skb->destructor = NULL;
+ skb->sk = NULL;
+ return sk;
+}
+
static inline struct request_sock *
reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
bool attach_listener)
diff --git a/include/net/route.h b/include/net/route.h
index 980ab474eabd..d4a0147942f1 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -37,9 +37,6 @@
#define RTO_ONLINK 0x01
-#define RT_CONN_FLAGS(sk) (RT_TOS(READ_ONCE(inet_sk(sk)->tos)) | sock_flag(sk, SOCK_LOCALROUTE))
-#define RT_CONN_FLAGS_TOS(sk,tos) (RT_TOS(tos) | sock_flag(sk, SOCK_LOCALROUTE))
-
static inline __u8 ip_sock_rt_scope(const struct sock *sk)
{
if (sock_flag(sk, SOCK_LOCALROUTE))
@@ -163,8 +160,8 @@ static inline struct rtable *ip_route_output_ports(struct net *net, struct flowi
__u8 proto, __u8 tos, int oif)
{
flowi4_init_output(fl4, oif, sk ? READ_ONCE(sk->sk_mark) : 0, tos,
- RT_SCOPE_UNIVERSE, proto,
- sk ? inet_sk_flowi_flags(sk) : 0,
+ sk ? ip_sock_rt_scope(sk) : RT_SCOPE_UNIVERSE,
+ proto, sk ? inet_sk_flowi_flags(sk) : 0,
daddr, saddr, dport, sport, sock_net_uid(net, sk));
if (sk)
security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
diff --git a/include/net/rps.h b/include/net/rps.h
new file mode 100644
index 000000000000..7660243e905b
--- /dev/null
+++ b/include/net/rps.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _NET_RPS_H
+#define _NET_RPS_H
+
+#include <linux/types.h>
+#include <linux/static_key.h>
+#include <net/sock.h>
+#include <net/hotdata.h>
+
+#ifdef CONFIG_RPS
+
+extern struct static_key_false rps_needed;
+extern struct static_key_false rfs_needed;
+
+/*
+ * This structure holds an RPS map which can be of variable length. The
+ * map is an array of CPUs.
+ */
+struct rps_map {
+ unsigned int len;
+ struct rcu_head rcu;
+ u16 cpus[];
+};
+#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
+
+/*
+ * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
+ * tail pointer for that CPU's input queue at the time of last enqueue, and
+ * a hardware filter index.
+ */
+struct rps_dev_flow {
+ u16 cpu;
+ u16 filter;
+ unsigned int last_qtail;
+};
+#define RPS_NO_FILTER 0xffff
+
+/*
+ * The rps_dev_flow_table structure contains a table of flow mappings.
+ */
+struct rps_dev_flow_table {
+ unsigned int mask;
+ struct rcu_head rcu;
+ struct rps_dev_flow flows[];
+};
+#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
+ ((_num) * sizeof(struct rps_dev_flow)))
+
+/*
+ * The rps_sock_flow_table contains mappings of flows to the last CPU
+ * on which they were processed by the application (set in recvmsg).
+ * Each entry is a 32bit value. Upper part is the high-order bits
+ * of flow hash, lower part is CPU number.
+ * rps_cpu_mask is used to partition the space, depending on number of
+ * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
+ * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
+ * meaning we use 32-6=26 bits for the hash.
+ */
+struct rps_sock_flow_table {
+ u32 mask;
+
+ u32 ents[] ____cacheline_aligned_in_smp;
+};
+#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
+
+#define RPS_NO_CPU 0xffff
+
+static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
+ u32 hash)
+{
+ unsigned int index = hash & table->mask;
+ u32 val = hash & ~net_hotdata.rps_cpu_mask;
+
+ /* We only give a hint, preemption can change CPU under us */
+ val |= raw_smp_processor_id();
+
+ /* The following WRITE_ONCE() is paired with the READ_ONCE()
+ * here, and another one in get_rps_cpu().
+ */
+ if (READ_ONCE(table->ents[index]) != val)
+ WRITE_ONCE(table->ents[index], val);
+}
+
+#endif /* CONFIG_RPS */
+
+static inline void sock_rps_record_flow_hash(__u32 hash)
+{
+#ifdef CONFIG_RPS
+ struct rps_sock_flow_table *sock_flow_table;
+
+ if (!hash)
+ return;
+ rcu_read_lock();
+ sock_flow_table = rcu_dereference(net_hotdata.rps_sock_flow_table);
+ if (sock_flow_table)
+ rps_record_sock_flow(sock_flow_table, hash);
+ rcu_read_unlock();
+#endif
+}
+
+static inline void sock_rps_record_flow(const struct sock *sk)
+{
+#ifdef CONFIG_RPS
+ if (static_branch_unlikely(&rfs_needed)) {
+ /* Reading sk->sk_rxhash might incur an expensive cache line
+ * miss.
+ *
+ * TCP_ESTABLISHED does cover almost all states where RFS
+ * might be useful, and is cheaper [1] than testing :
+ * IPv4: inet_sk(sk)->inet_daddr
+ * IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
+ * OR an additional socket flag
+ * [1] : sk_state and sk_prot are in the same cache line.
+ */
+ if (sk->sk_state == TCP_ESTABLISHED) {
+ /* This READ_ONCE() is paired with the WRITE_ONCE()
+ * from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
+ */
+ sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
+ }
+ }
+#endif
+}
+
+#endif /* _NET_RPS_H */
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 6506221c5fe3..3bfb80bad173 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -12,6 +12,7 @@ typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
enum rtnl_link_flags {
RTNL_FLAG_DOIT_UNLOCKED = BIT(0),
RTNL_FLAG_BULK_DEL_SUPPORTED = BIT(1),
+ RTNL_FLAG_DUMP_UNLOCKED = BIT(2),
};
enum rtnl_kinds {
diff --git a/include/net/scm.h b/include/net/scm.h
index cf68acec4d70..92276a2c5543 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -25,6 +25,7 @@ struct scm_creds {
struct scm_fp_list {
short count;
+ short count_unix;
short max;
struct user_struct *user;
struct file *fp[SCM_MAX_FD];
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 5a24d6d8522a..f24a1bbcb3ef 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -242,10 +242,7 @@ struct sctp_sock {
int do_auto_asconf;
};
-static inline struct sctp_sock *sctp_sk(const struct sock *sk)
-{
- return (struct sctp_sock *)sk;
-}
+#define sctp_sk(ptr) container_of_const(ptr, struct sctp_sock, inet.sk)
static inline struct sock *sctp_opt2sk(const struct sctp_sock *sp)
{
diff --git a/include/net/sock.h b/include/net/sock.h
index 54ca8dcbfb43..b5e00702acc1 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -378,14 +378,10 @@ struct sock {
#define sk_flags __sk_common.skc_flags
#define sk_rxhash __sk_common.skc_rxhash
- /* early demux fields */
- struct dst_entry __rcu *sk_rx_dst;
- int sk_rx_dst_ifindex;
- u32 sk_rx_dst_cookie;
+ __cacheline_group_begin(sock_write_rx);
- socket_lock_t sk_lock;
atomic_t sk_drops;
- int sk_rcvlowat;
+ __s32 sk_peek_off;
struct sk_buff_head sk_error_queue;
struct sk_buff_head sk_receive_queue;
/*
@@ -402,18 +398,24 @@ struct sock {
struct sk_buff *head;
struct sk_buff *tail;
} sk_backlog;
-
#define sk_rmem_alloc sk_backlog.rmem_alloc
- int sk_forward_alloc;
- u32 sk_reserved_mem;
+ __cacheline_group_end(sock_write_rx);
+
+ __cacheline_group_begin(sock_read_rx);
+ /* early demux fields */
+ struct dst_entry __rcu *sk_rx_dst;
+ int sk_rx_dst_ifindex;
+ u32 sk_rx_dst_cookie;
+
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sk_ll_usec;
- /* ===== mostly read cache line ===== */
unsigned int sk_napi_id;
+ u16 sk_busy_poll_budget;
+ u8 sk_prefer_busy_poll;
#endif
+ u8 sk_userlocks;
int sk_rcvbuf;
- int sk_disconnects;
struct sk_filter __rcu *sk_filter;
union {
@@ -422,15 +424,33 @@ struct sock {
struct socket_wq *sk_wq_raw;
/* public: */
};
+
+ void (*sk_data_ready)(struct sock *sk);
+ long sk_rcvtimeo;
+ int sk_rcvlowat;
+ __cacheline_group_end(sock_read_rx);
+
+ __cacheline_group_begin(sock_read_rxtx);
+ int sk_err;
+ struct socket *sk_socket;
+ struct mem_cgroup *sk_memcg;
#ifdef CONFIG_XFRM
struct xfrm_policy __rcu *sk_policy[2];
#endif
+ __cacheline_group_end(sock_read_rxtx);
- struct dst_entry __rcu *sk_dst_cache;
+ __cacheline_group_begin(sock_write_rxtx);
+ socket_lock_t sk_lock;
+ u32 sk_reserved_mem;
+ int sk_forward_alloc;
+ u32 sk_tsflags;
+ __cacheline_group_end(sock_write_rxtx);
+
+ __cacheline_group_begin(sock_write_tx);
+ int sk_write_pending;
atomic_t sk_omem_alloc;
int sk_sndbuf;
- /* ===== cache line for TX ===== */
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
unsigned long sk_tsq_flags;
@@ -439,22 +459,36 @@ struct sock {
struct rb_root tcp_rtx_queue;
};
struct sk_buff_head sk_write_queue;
- __s32 sk_peek_off;
- int sk_write_pending;
- __u32 sk_dst_pending_confirm;
+ u32 sk_dst_pending_confirm;
u32 sk_pacing_status; /* see enum sk_pacing */
- long sk_sndtimeo;
+ struct page_frag sk_frag;
struct timer_list sk_timer;
- __u32 sk_priority;
- __u32 sk_mark;
+
unsigned long sk_pacing_rate; /* bytes per second */
+ atomic_t sk_zckey;
+ atomic_t sk_tskey;
+ __cacheline_group_end(sock_write_tx);
+
+ __cacheline_group_begin(sock_read_tx);
unsigned long sk_max_pacing_rate;
- struct page_frag sk_frag;
+ long sk_sndtimeo;
+ u32 sk_priority;
+ u32 sk_mark;
+ struct dst_entry __rcu *sk_dst_cache;
netdev_features_t sk_route_caps;
- int sk_gso_type;
+#ifdef CONFIG_SOCK_VALIDATE_XMIT
+ struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
+ struct net_device *dev,
+ struct sk_buff *skb);
+#endif
+ u16 sk_gso_type;
+ u16 sk_gso_max_segs;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
- __u32 sk_txhash;
+ u32 sk_txhash;
+ u8 sk_pacing_shift;
+ bool sk_use_task_frag;
+ __cacheline_group_end(sock_read_tx);
/*
* Because of non atomicity rules, all
@@ -463,64 +497,44 @@ struct sock {
u8 sk_gso_disabled : 1,
sk_kern_sock : 1,
sk_no_check_tx : 1,
- sk_no_check_rx : 1,
- sk_userlocks : 4;
- u8 sk_pacing_shift;
+ sk_no_check_rx : 1;
+ u8 sk_shutdown;
u16 sk_type;
u16 sk_protocol;
- u16 sk_gso_max_segs;
unsigned long sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
- int sk_err,
- sk_err_soft;
+ int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
- u8 sk_txrehash;
-#ifdef CONFIG_NET_RX_BUSY_POLL
- u8 sk_prefer_busy_poll;
- u16 sk_busy_poll_budget;
-#endif
spinlock_t sk_peer_lock;
int sk_bind_phc;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
- long sk_rcvtimeo;
ktime_t sk_stamp;
#if BITS_PER_LONG==32
seqlock_t sk_stamp_seq;
#endif
- atomic_t sk_tskey;
- atomic_t sk_zckey;
- u32 sk_tsflags;
- u8 sk_shutdown;
+ int sk_disconnects;
+ u8 sk_txrehash;
u8 sk_clockid;
u8 sk_txtime_deadline_mode : 1,
sk_txtime_report_errors : 1,
sk_txtime_unused : 6;
- bool sk_use_task_frag;
- struct socket *sk_socket;
void *sk_user_data;
#ifdef CONFIG_SECURITY
void *sk_security;
#endif
struct sock_cgroup_data sk_cgrp_data;
- struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *sk);
- void (*sk_data_ready)(struct sock *sk);
void (*sk_write_space)(struct sock *sk);
void (*sk_error_report)(struct sock *sk);
int (*sk_backlog_rcv)(struct sock *sk,
struct sk_buff *skb);
-#ifdef CONFIG_SOCK_VALIDATE_XMIT
- struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
- struct net_device *dev,
- struct sk_buff *skb);
-#endif
void (*sk_destruct)(struct sock *sk);
struct sock_reuseport __rcu *sk_reuseport_cb;
#ifdef CONFIG_BPF_SYSCALL
@@ -1103,41 +1117,6 @@ static inline void sk_incoming_cpu_update(struct sock *sk)
WRITE_ONCE(sk->sk_incoming_cpu, cpu);
}
-static inline void sock_rps_record_flow_hash(__u32 hash)
-{
-#ifdef CONFIG_RPS
- struct rps_sock_flow_table *sock_flow_table;
-
- rcu_read_lock();
- sock_flow_table = rcu_dereference(rps_sock_flow_table);
- rps_record_sock_flow(sock_flow_table, hash);
- rcu_read_unlock();
-#endif
-}
-
-static inline void sock_rps_record_flow(const struct sock *sk)
-{
-#ifdef CONFIG_RPS
- if (static_branch_unlikely(&rfs_needed)) {
- /* Reading sk->sk_rxhash might incur an expensive cache line
- * miss.
- *
- * TCP_ESTABLISHED does cover almost all states where RFS
- * might be useful, and is cheaper [1] than testing :
- * IPv4: inet_sk(sk)->inet_daddr
- * IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
- * OR an additional socket flag
- * [1] : sk_state and sk_prot are in the same cache line.
- */
- if (sk->sk_state == TCP_ESTABLISHED) {
- /* This READ_ONCE() is paired with the WRITE_ONCE()
- * from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
- */
- sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
- }
- }
-#endif
-}
static inline void sock_rps_save_rxhash(struct sock *sk,
const struct sk_buff *skb)
@@ -1429,6 +1408,7 @@ sk_memory_allocated(const struct sock *sk)
/* 1 MB per cpu, in page units */
#define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
+extern int sysctl_mem_pcpu_rsv;
static inline void
sk_memory_allocated_add(struct sock *sk, int amt)
@@ -1437,7 +1417,7 @@ sk_memory_allocated_add(struct sock *sk, int amt)
preempt_disable();
local_reserve = __this_cpu_add_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
- if (local_reserve >= SK_MEMORY_PCPU_RESERVE) {
+ if (local_reserve >= READ_ONCE(sysctl_mem_pcpu_rsv)) {
__this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
}
@@ -1451,7 +1431,7 @@ sk_memory_allocated_sub(struct sock *sk, int amt)
preempt_disable();
local_reserve = __this_cpu_sub_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
- if (local_reserve <= -SK_MEMORY_PCPU_RESERVE) {
+ if (local_reserve <= -READ_ONCE(sysctl_mem_pcpu_rsv)) {
__this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
}
@@ -2830,31 +2810,6 @@ sk_is_refcounted(struct sock *sk)
return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE);
}
-/**
- * skb_steal_sock - steal a socket from an sk_buff
- * @skb: sk_buff to steal the socket from
- * @refcounted: is set to true if the socket is reference-counted
- * @prefetched: is set to true if the socket was assigned from bpf
- */
-static inline struct sock *
-skb_steal_sock(struct sk_buff *skb, bool *refcounted, bool *prefetched)
-{
- if (skb->sk) {
- struct sock *sk = skb->sk;
-
- *refcounted = true;
- *prefetched = skb_sk_is_prefetched(skb);
- if (*prefetched)
- *refcounted = sk_is_refcounted(sk);
- skb->destructor = NULL;
- skb->sk = NULL;
- return sk;
- }
- *prefetched = false;
- *refcounted = false;
- return NULL;
-}
-
/* Checks if this SKB belongs to an HW offloaded socket
* and whether any SW fallbacks are required based on dev.
* Check decrypted mark in case skb_orphan() cleared socket.
diff --git a/include/net/tcp.h b/include/net/tcp.h
index f6eba9652d01..6ae35199d3b3 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -348,7 +348,7 @@ void tcp_wfree(struct sk_buff *skb);
void tcp_write_timer_handler(struct sock *sk);
void tcp_delack_timer_handler(struct sock *sk);
int tcp_ioctl(struct sock *sk, int cmd, int *karg);
-int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
+enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
void tcp_rcv_space_adjust(struct sock *sk);
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
@@ -396,8 +396,8 @@ enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
struct request_sock *req, bool fastopen,
bool *lost_race);
-int tcp_child_process(struct sock *parent, struct sock *child,
- struct sk_buff *skb);
+enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
+ struct sk_buff *skb);
void tcp_enter_loss(struct sock *sk);
void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
void tcp_clear_retrans(struct tcp_sock *tp);
@@ -498,6 +498,22 @@ struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
struct tcp_options_received *tcp_opt,
int mss, u32 tsoff);
+#if IS_ENABLED(CONFIG_BPF)
+struct bpf_tcp_req_attrs {
+ u32 rcv_tsval;
+ u32 rcv_tsecr;
+ u16 mss;
+ u8 rcv_wscale;
+ u8 snd_wscale;
+ u8 ecn_ok;
+ u8 wscale_ok;
+ u8 sack_ok;
+ u8 tstamp_ok;
+ u8 usec_ts_ok;
+ u8 reserved[3];
+};
+#endif
+
#ifdef CONFIG_SYN_COOKIES
/* Syncookies use a monotonic timer which increments every 60 seconds.
@@ -577,6 +593,15 @@ static inline u32 tcp_cookie_time(void)
return val;
}
+/* Convert one nsec 64bit timestamp to ts (ms or usec resolution) */
+static inline u64 tcp_ns_to_ts(bool usec_ts, u64 val)
+{
+ if (usec_ts)
+ return div_u64(val, NSEC_PER_USEC);
+
+ return div_u64(val, NSEC_PER_MSEC);
+}
+
u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
u16 *mssp);
__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
@@ -590,6 +615,26 @@ static inline bool cookie_ecn_ok(const struct net *net, const struct dst_entry *
dst_feature(dst, RTAX_FEATURE_ECN);
}
+#if IS_ENABLED(CONFIG_BPF)
+static inline bool cookie_bpf_ok(struct sk_buff *skb)
+{
+ return skb->sk;
+}
+
+struct request_sock *cookie_bpf_check(struct sock *sk, struct sk_buff *skb);
+#else
+static inline bool cookie_bpf_ok(struct sk_buff *skb)
+{
+ return false;
+}
+
+static inline struct request_sock *cookie_bpf_check(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ return NULL;
+}
+#endif
+
/* From net/ipv6/syncookies.c */
int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th);
struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 1d107241b901..57c743b7e4fe 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -51,8 +51,10 @@
#ifdef CONFIG_XFRM_STATISTICS
#define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
+#define XFRM_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.xfrm_statistics, field, val)
#else
#define XFRM_INC_STATS(net, field) ((void)(net))
+#define XFRM_ADD_STATS(net, field, val) ((void)(net))
#endif
@@ -1577,22 +1579,20 @@ struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
unsigned short family);
int xfrm_state_check_expire(struct xfrm_state *x);
+void xfrm_state_update_stats(struct net *net);
#ifdef CONFIG_XFRM_OFFLOAD
-static inline void xfrm_dev_state_update_curlft(struct xfrm_state *x)
+static inline void xfrm_dev_state_update_stats(struct xfrm_state *x)
{
struct xfrm_dev_offload *xdo = &x->xso;
struct net_device *dev = xdo->dev;
- if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
- return;
-
if (dev && dev->xfrmdev_ops &&
- dev->xfrmdev_ops->xdo_dev_state_update_curlft)
- dev->xfrmdev_ops->xdo_dev_state_update_curlft(x);
+ dev->xfrmdev_ops->xdo_dev_state_update_stats)
+ dev->xfrmdev_ops->xdo_dev_state_update_stats(x);
}
#else
-static inline void xfrm_dev_state_update_curlft(struct xfrm_state *x) {}
+static inline void xfrm_dev_state_update_stats(struct xfrm_state *x) {}
#endif
void xfrm_state_insert(struct xfrm_state *x);
int xfrm_state_add(struct xfrm_state *x);
diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h
index 6678cf8b235b..dc03cf8e0369 100644
--- a/include/trace/events/napi.h
+++ b/include/trace/events/napi.h
@@ -36,6 +36,39 @@ TRACE_EVENT(napi_poll,
__entry->work, __entry->budget)
);
+TRACE_EVENT(dql_stall_detected,
+
+ TP_PROTO(unsigned short thrs, unsigned int len,
+ unsigned long last_reap, unsigned long hist_head,
+ unsigned long now, unsigned long *hist),
+
+ TP_ARGS(thrs, len, last_reap, hist_head, now, hist),
+
+ TP_STRUCT__entry(
+ __field( unsigned short, thrs)
+ __field( unsigned int, len)
+ __field( unsigned long, last_reap)
+ __field( unsigned long, hist_head)
+ __field( unsigned long, now)
+ __array( unsigned long, hist, 4)
+ ),
+
+ TP_fast_assign(
+ __entry->thrs = thrs;
+ __entry->len = len;
+ __entry->last_reap = last_reap;
+ __entry->hist_head = hist_head * BITS_PER_LONG;
+ __entry->now = now;
+ memcpy(__entry->hist, hist, sizeof(entry->hist));
+ ),
+
+ TP_printk("thrs %u len %u last_reap %lu hist_head %lu now %lu hist %016lx %016lx %016lx %016lx",
+ __entry->thrs, __entry->len,
+ __entry->last_reap, __entry->hist_head, __entry->now,
+ __entry->hist[0], __entry->hist[1],
+ __entry->hist[2], __entry->hist[3])
+);
+
#undef NO_DEV
#endif /* _TRACE_NAPI_H */
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 87b8de9b6c1c..a1b126a6b0d7 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -83,7 +83,7 @@
EM(rxrpc_badmsg_bad_abort, "bad-abort") \
EM(rxrpc_badmsg_bad_jumbo, "bad-jumbo") \
EM(rxrpc_badmsg_short_ack, "short-ack") \
- EM(rxrpc_badmsg_short_ack_info, "short-ack-info") \
+ EM(rxrpc_badmsg_short_ack_trailer, "short-ack-trailer") \
EM(rxrpc_badmsg_short_hdr, "short-hdr") \
EM(rxrpc_badmsg_unsupported_packet, "unsup-pkt") \
EM(rxrpc_badmsg_zero_call, "zero-call") \
@@ -119,6 +119,7 @@
EM(rxrpc_call_poke_complete, "Compl") \
EM(rxrpc_call_poke_error, "Error") \
EM(rxrpc_call_poke_idle, "Idle") \
+ EM(rxrpc_call_poke_set_timeout, "Set-timo") \
EM(rxrpc_call_poke_start, "Start") \
EM(rxrpc_call_poke_timer, "Timer") \
E_(rxrpc_call_poke_timer_now, "Timer-now")
@@ -340,35 +341,26 @@
E_(rxrpc_rtt_rx_requested_ack, "RACK")
#define rxrpc_timer_traces \
- EM(rxrpc_timer_begin, "Begin ") \
- EM(rxrpc_timer_exp_ack, "ExpAck") \
- EM(rxrpc_timer_exp_hard, "ExpHrd") \
- EM(rxrpc_timer_exp_idle, "ExpIdl") \
- EM(rxrpc_timer_exp_keepalive, "ExpKA ") \
- EM(rxrpc_timer_exp_lost_ack, "ExpLoA") \
- EM(rxrpc_timer_exp_normal, "ExpNml") \
- EM(rxrpc_timer_exp_ping, "ExpPng") \
- EM(rxrpc_timer_exp_resend, "ExpRsn") \
- EM(rxrpc_timer_init_for_reply, "IniRpl") \
- EM(rxrpc_timer_init_for_send_reply, "SndRpl") \
- EM(rxrpc_timer_restart, "Restrt") \
- EM(rxrpc_timer_set_for_ack, "SetAck") \
- EM(rxrpc_timer_set_for_hard, "SetHrd") \
- EM(rxrpc_timer_set_for_idle, "SetIdl") \
- EM(rxrpc_timer_set_for_keepalive, "KeepAl") \
- EM(rxrpc_timer_set_for_lost_ack, "SetLoA") \
- EM(rxrpc_timer_set_for_normal, "SetNml") \
- EM(rxrpc_timer_set_for_ping, "SetPng") \
- EM(rxrpc_timer_set_for_resend, "SetRTx") \
- E_(rxrpc_timer_set_for_send, "SetSnd")
+ EM(rxrpc_timer_trace_delayed_ack, "DelayAck ") \
+ EM(rxrpc_timer_trace_expect_rx, "ExpectRx ") \
+ EM(rxrpc_timer_trace_hard, "HardLimit") \
+ EM(rxrpc_timer_trace_idle, "IdleLimit") \
+ EM(rxrpc_timer_trace_keepalive, "KeepAlive") \
+ EM(rxrpc_timer_trace_lost_ack, "LostAck ") \
+ EM(rxrpc_timer_trace_ping, "DelayPing") \
+ EM(rxrpc_timer_trace_resend, "Resend ") \
+ EM(rxrpc_timer_trace_resend_reset, "ResendRst") \
+ E_(rxrpc_timer_trace_resend_tx, "ResendTx ")
#define rxrpc_propose_ack_traces \
EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \
+ EM(rxrpc_propose_ack_delayed_ack, "DlydAck") \
EM(rxrpc_propose_ack_input_data, "DataIn ") \
EM(rxrpc_propose_ack_input_data_hole, "DataInH") \
EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \
EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
+ EM(rxrpc_propose_ack_ping_for_0_retrans, "0-Retrn") \
EM(rxrpc_propose_ack_ping_for_old_rtt, "OldRtt ") \
EM(rxrpc_propose_ack_ping_for_params, "Params ") \
EM(rxrpc_propose_ack_ping_for_rtt, "Rtt ") \
@@ -1084,9 +1076,9 @@ TRACE_EVENT(rxrpc_tx_packet,
TRACE_EVENT(rxrpc_tx_data,
TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
- rxrpc_serial_t serial, u8 flags, bool retrans, bool lose),
+ rxrpc_serial_t serial, unsigned int flags, bool lose),
- TP_ARGS(call, seq, serial, flags, retrans, lose),
+ TP_ARGS(call, seq, serial, flags, lose),
TP_STRUCT__entry(
__field(unsigned int, call)
@@ -1094,8 +1086,7 @@ TRACE_EVENT(rxrpc_tx_data,
__field(rxrpc_serial_t, serial)
__field(u32, cid)
__field(u32, call_id)
- __field(u8, flags)
- __field(bool, retrans)
+ __field(u16, flags)
__field(bool, lose)
),
@@ -1106,7 +1097,6 @@ TRACE_EVENT(rxrpc_tx_data,
__entry->seq = seq;
__entry->serial = serial;
__entry->flags = flags;
- __entry->retrans = retrans;
__entry->lose = lose;
),
@@ -1116,8 +1106,8 @@ TRACE_EVENT(rxrpc_tx_data,
__entry->call_id,
__entry->serial,
__entry->seq,
- __entry->flags,
- __entry->retrans ? " *RETRANS*" : "",
+ __entry->flags & RXRPC_TXBUF_WIRE_FLAGS,
+ __entry->flags & RXRPC_TXBUF_RESENT ? " *RETRANS*" : "",
__entry->lose ? " *LOSE*" : "")
);
@@ -1314,90 +1304,112 @@ TRACE_EVENT(rxrpc_rtt_rx,
__entry->rto)
);
-TRACE_EVENT(rxrpc_timer,
- TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
- unsigned long now),
+TRACE_EVENT(rxrpc_timer_set,
+ TP_PROTO(struct rxrpc_call *call, ktime_t delay,
+ enum rxrpc_timer_trace why),
- TP_ARGS(call, why, now),
+ TP_ARGS(call, delay, why),
TP_STRUCT__entry(
__field(unsigned int, call)
__field(enum rxrpc_timer_trace, why)
- __field(long, now)
- __field(long, ack_at)
- __field(long, ack_lost_at)
- __field(long, resend_at)
- __field(long, ping_at)
- __field(long, expect_rx_by)
- __field(long, expect_req_by)
- __field(long, expect_term_by)
- __field(long, timer)
+ __field(ktime_t, delay)
),
TP_fast_assign(
__entry->call = call->debug_id;
__entry->why = why;
- __entry->now = now;
- __entry->ack_at = call->delay_ack_at;
- __entry->ack_lost_at = call->ack_lost_at;
- __entry->resend_at = call->resend_at;
- __entry->expect_rx_by = call->expect_rx_by;
- __entry->expect_req_by = call->expect_req_by;
- __entry->expect_term_by = call->expect_term_by;
- __entry->timer = call->timer.expires;
+ __entry->delay = delay;
),
- TP_printk("c=%08x %s a=%ld la=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld",
+ TP_printk("c=%08x %s to=%lld",
__entry->call,
__print_symbolic(__entry->why, rxrpc_timer_traces),
- __entry->ack_at - __entry->now,
- __entry->ack_lost_at - __entry->now,
- __entry->resend_at - __entry->now,
- __entry->expect_rx_by - __entry->now,
- __entry->expect_req_by - __entry->now,
- __entry->expect_term_by - __entry->now,
- __entry->timer - __entry->now)
+ ktime_to_us(__entry->delay))
+ );
+
+TRACE_EVENT(rxrpc_timer_exp,
+ TP_PROTO(struct rxrpc_call *call, ktime_t delay,
+ enum rxrpc_timer_trace why),
+
+ TP_ARGS(call, delay, why),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(enum rxrpc_timer_trace, why)
+ __field(ktime_t, delay)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->why = why;
+ __entry->delay = delay;
+ ),
+
+ TP_printk("c=%08x %s to=%lld",
+ __entry->call,
+ __print_symbolic(__entry->why, rxrpc_timer_traces),
+ ktime_to_us(__entry->delay))
+ );
+
+TRACE_EVENT(rxrpc_timer_can,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why),
+
+ TP_ARGS(call, why),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(enum rxrpc_timer_trace, why)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->why = why;
+ ),
+
+ TP_printk("c=%08x %s",
+ __entry->call,
+ __print_symbolic(__entry->why, rxrpc_timer_traces))
+ );
+
+TRACE_EVENT(rxrpc_timer_restart,
+ TP_PROTO(struct rxrpc_call *call, ktime_t delay, unsigned long delayj),
+
+ TP_ARGS(call, delay, delayj),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(unsigned long, delayj)
+ __field(ktime_t, delay)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->delayj = delayj;
+ __entry->delay = delay;
+ ),
+
+ TP_printk("c=%08x to=%lld j=%ld",
+ __entry->call,
+ ktime_to_us(__entry->delay),
+ __entry->delayj)
);
TRACE_EVENT(rxrpc_timer_expired,
- TP_PROTO(struct rxrpc_call *call, unsigned long now),
+ TP_PROTO(struct rxrpc_call *call),
- TP_ARGS(call, now),
+ TP_ARGS(call),
TP_STRUCT__entry(
__field(unsigned int, call)
- __field(long, now)
- __field(long, ack_at)
- __field(long, ack_lost_at)
- __field(long, resend_at)
- __field(long, ping_at)
- __field(long, expect_rx_by)
- __field(long, expect_req_by)
- __field(long, expect_term_by)
- __field(long, timer)
),
TP_fast_assign(
__entry->call = call->debug_id;
- __entry->now = now;
- __entry->ack_at = call->delay_ack_at;
- __entry->ack_lost_at = call->ack_lost_at;
- __entry->resend_at = call->resend_at;
- __entry->expect_rx_by = call->expect_rx_by;
- __entry->expect_req_by = call->expect_req_by;
- __entry->expect_term_by = call->expect_term_by;
- __entry->timer = call->timer.expires;
),
- TP_printk("c=%08x EXPIRED a=%ld la=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld",
- __entry->call,
- __entry->ack_at - __entry->now,
- __entry->ack_lost_at - __entry->now,
- __entry->resend_at - __entry->now,
- __entry->expect_rx_by - __entry->now,
- __entry->expect_req_by - __entry->now,
- __entry->expect_term_by - __entry->now,
- __entry->timer - __entry->now)
+ TP_printk("c=%08x EXPIRED",
+ __entry->call)
);
TRACE_EVENT(rxrpc_rx_lose,
@@ -1506,26 +1518,30 @@ TRACE_EVENT(rxrpc_drop_ack,
);
TRACE_EVENT(rxrpc_retransmit,
- TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, s64 expiry),
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
+ rxrpc_serial_t serial, ktime_t expiry),
- TP_ARGS(call, seq, expiry),
+ TP_ARGS(call, seq, serial, expiry),
TP_STRUCT__entry(
__field(unsigned int, call)
__field(rxrpc_seq_t, seq)
- __field(s64, expiry)
+ __field(rxrpc_serial_t, serial)
+ __field(ktime_t, expiry)
),
TP_fast_assign(
__entry->call = call->debug_id;
__entry->seq = seq;
+ __entry->serial = serial;
__entry->expiry = expiry;
),
- TP_printk("c=%08x q=%x xp=%lld",
+ TP_printk("c=%08x q=%x r=%x xp=%lld",
__entry->call,
__entry->seq,
- __entry->expiry)
+ __entry->serial,
+ ktime_to_us(__entry->expiry))
);
TRACE_EVENT(rxrpc_congest,
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index 7b1ddffa3dfc..699dafd204ea 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -88,7 +88,8 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
),
- TP_printk("family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s",
+ TP_printk("skbaddr=%p skaddr=%p family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s",
+ __entry->skbaddr, __entry->skaddr,
show_family_name(__entry->family),
__entry->sport, __entry->dport, __entry->saddr, __entry->daddr,
__entry->saddr_v6, __entry->daddr_v6,
@@ -258,6 +259,8 @@ TRACE_EVENT(tcp_probe,
__field(__u32, srtt)
__field(__u32, rcv_wnd)
__field(__u64, sock_cookie)
+ __field(const void *, skbaddr)
+ __field(const void *, skaddr)
),
TP_fast_assign(
@@ -285,14 +288,18 @@ TRACE_EVENT(tcp_probe,
__entry->ssthresh = tcp_current_ssthresh(sk);
__entry->srtt = tp->srtt_us >> 3;
__entry->sock_cookie = sock_gen_cookie(sk);
+
+ __entry->skbaddr = skb;
+ __entry->skaddr = sk;
),
- TP_printk("family=%s src=%pISpc dest=%pISpc mark=%#x data_len=%d snd_nxt=%#x snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u rcv_wnd=%u sock_cookie=%llx",
+ TP_printk("family=%s src=%pISpc dest=%pISpc mark=%#x data_len=%d snd_nxt=%#x snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u rcv_wnd=%u sock_cookie=%llx skbaddr=%p skaddr=%p",
show_family_name(__entry->family),
__entry->saddr, __entry->daddr, __entry->mark,
__entry->data_len, __entry->snd_nxt, __entry->snd_una,
__entry->snd_cwnd, __entry->ssthresh, __entry->snd_wnd,
- __entry->srtt, __entry->rcv_wnd, __entry->sock_cookie)
+ __entry->srtt, __entry->rcv_wnd, __entry->sock_cookie,
+ __entry->skbaddr, __entry->skaddr)
);
#define TP_STORE_ADDR_PORTS_SKB_V4(__entry, skb) \
@@ -361,7 +368,8 @@ DECLARE_EVENT_CLASS(tcp_event_skb,
TP_STORE_ADDR_PORTS_SKB(__entry, skb);
),
- TP_printk("src=%pISpc dest=%pISpc", __entry->saddr, __entry->daddr)
+ TP_printk("skbaddr=%p src=%pISpc dest=%pISpc",
+ __entry->skbaddr, __entry->saddr, __entry->daddr)
);
DEFINE_EVENT(tcp_event_skb, tcp_bad_csum,
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 754e68ca8744..3c42b9f1bada 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -42,6 +42,7 @@
#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
#define BPF_JSLT 0xc0 /* SLT is signed, '<' */
#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
+#define BPF_JCOND 0xe0 /* conditional pseudo jumps: may_goto, goto_or_nop */
#define BPF_CALL 0x80 /* function call */
#define BPF_EXIT 0x90 /* function return */
@@ -50,6 +51,10 @@
#define BPF_XCHG (0xe0 | BPF_FETCH) /* atomic exchange */
#define BPF_CMPXCHG (0xf0 | BPF_FETCH) /* atomic compare-and-write */
+enum bpf_cond_pseudo_jmp {
+ BPF_MAY_GOTO = 0,
+};
+
/* Register numbers */
enum {
BPF_REG_0 = 0,
@@ -77,12 +82,29 @@ struct bpf_insn {
__s32 imm; /* signed immediate constant */
};
-/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
+/* Deprecated: use struct bpf_lpm_trie_key_u8 (when the "data" member is needed for
+ * byte access) or struct bpf_lpm_trie_key_hdr (when using an alternative type for
+ * the trailing flexible array member) instead.
+ */
struct bpf_lpm_trie_key {
__u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
__u8 data[0]; /* Arbitrary size */
};
+/* Header for bpf_lpm_trie_key structs */
+struct bpf_lpm_trie_key_hdr {
+ __u32 prefixlen;
+};
+
+/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry, with trailing byte array. */
+struct bpf_lpm_trie_key_u8 {
+ union {
+ struct bpf_lpm_trie_key_hdr hdr;
+ __u32 prefixlen;
+ };
+ __u8 data[]; /* Arbitrary size */
+};
+
struct bpf_cgroup_storage_key {
__u64 cgroup_inode_id; /* cgroup inode id */
__u32 attach_type; /* program attach type (enum bpf_attach_type) */
@@ -617,7 +639,11 @@ union bpf_iter_link_info {
* to NULL to begin the batched operation. After each subsequent
* **BPF_MAP_LOOKUP_BATCH**, the caller should pass the resultant
* *out_batch* as the *in_batch* for the next operation to
- * continue iteration from the current point.
+ * continue iteration from the current point. Both *in_batch* and
+ * *out_batch* must point to memory large enough to hold a key,
+ * except for maps of type **BPF_MAP_TYPE_{HASH, PERCPU_HASH,
+ * LRU_HASH, LRU_PERCPU_HASH}**, for which batch parameters
+ * must be at least 4 bytes wide regardless of key size.
*
* The *keys* and *values* are output parameters which must point
* to memory large enough to hold *count* items based on the key
@@ -847,6 +873,36 @@ union bpf_iter_link_info {
* Returns zero on success. On error, -1 is returned and *errno*
* is set appropriately.
*
+ * BPF_TOKEN_CREATE
+ * Description
+ * Create BPF token with embedded information about what
+ * BPF-related functionality it allows:
+ * - a set of allowed bpf() syscall commands;
+ * - a set of allowed BPF map types to be created with
+ * BPF_MAP_CREATE command, if BPF_MAP_CREATE itself is allowed;
+ * - a set of allowed BPF program types and BPF program attach
+ * types to be loaded with BPF_PROG_LOAD command, if
+ * BPF_PROG_LOAD itself is allowed.
+ *
+ * BPF token is created (derived) from an instance of BPF FS,
+ * assuming it has necessary delegation mount options specified.
+ * This BPF token can be passed as an extra parameter to various
+ * bpf() syscall commands to grant BPF subsystem functionality to
+ * unprivileged processes.
+ *
+ * When created, BPF token is "associated" with the owning
+ * user namespace of BPF FS instance (super block) that it was
+ * derived from, and subsequent BPF operations performed with
+ * BPF token would be performing capabilities checks (i.e.,
+ * CAP_BPF, CAP_PERFMON, CAP_NET_ADMIN, CAP_SYS_ADMIN) within
+ * that user namespace. Without BPF token, such capabilities
+ * have to be granted in init user namespace, making bpf()
+ * syscall incompatible with user namespace, for the most part.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
* NOTES
* eBPF objects (maps and programs) can be shared between processes.
*
@@ -901,6 +957,8 @@ enum bpf_cmd {
BPF_ITER_CREATE,
BPF_LINK_DETACH,
BPF_PROG_BIND_MAP,
+ BPF_TOKEN_CREATE,
+ __MAX_BPF_CMD,
};
enum bpf_map_type {
@@ -951,6 +1009,8 @@ enum bpf_map_type {
BPF_MAP_TYPE_BLOOM_FILTER,
BPF_MAP_TYPE_USER_RINGBUF,
BPF_MAP_TYPE_CGRP_STORAGE,
+ BPF_MAP_TYPE_ARENA,
+ __MAX_BPF_MAP_TYPE
};
/* Note that tracing related programs such as
@@ -995,6 +1055,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_SK_LOOKUP,
BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
BPF_PROG_TYPE_NETFILTER,
+ __MAX_BPF_PROG_TYPE
};
enum bpf_attach_type {
@@ -1278,6 +1339,10 @@ enum {
*/
#define BPF_PSEUDO_KFUNC_CALL 2
+enum bpf_addr_space_cast {
+ BPF_ADDR_SPACE_CAST = 1,
+};
+
/* flags for BPF_MAP_UPDATE_ELEM command */
enum {
BPF_ANY = 0, /* create new element or update existing */
@@ -1330,6 +1395,18 @@ enum {
/* Get path from provided FD in BPF_OBJ_PIN/BPF_OBJ_GET commands */
BPF_F_PATH_FD = (1U << 14),
+
+/* Flag for value_type_btf_obj_fd, the fd is available */
+ BPF_F_VTYPE_BTF_OBJ_FD = (1U << 15),
+
+/* BPF token FD is passed in a corresponding command's token_fd field */
+ BPF_F_TOKEN_FD = (1U << 16),
+
+/* When user space page faults in bpf_arena send SIGSEGV instead of inserting new page */
+ BPF_F_SEGV_ON_FAULT = (1U << 17),
+
+/* Do not translate kernel bpf_arena pointers to user pointers */
+ BPF_F_NO_USER_CONV = (1U << 18),
};
/* Flags for BPF_PROG_QUERY. */
@@ -1401,8 +1478,20 @@ union bpf_attr {
* BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the
* number of hash functions (if 0, the bloom filter will default
* to using 5 hash functions).
+ *
+ * BPF_MAP_TYPE_ARENA - contains the address where user space
+ * is going to mmap() the arena. It has to be page aligned.
*/
__u64 map_extra;
+
+ __s32 value_type_btf_obj_fd; /* fd pointing to a BTF
+ * type data for
+ * btf_vmlinux_value_type_id.
+ */
+ /* BPF token FD to use with BPF_MAP_CREATE operation.
+ * If provided, map_flags should have BPF_F_TOKEN_FD flag set.
+ */
+ __s32 map_token_fd;
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -1472,6 +1561,10 @@ union bpf_attr {
* truncated), or smaller (if log buffer wasn't filled completely).
*/
__u32 log_true_size;
+ /* BPF token FD to use with BPF_PROG_LOAD operation.
+ * If provided, prog_flags should have BPF_F_TOKEN_FD flag set.
+ */
+ __s32 prog_token_fd;
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -1584,6 +1677,11 @@ union bpf_attr {
* truncated), or smaller (if log buffer wasn't filled completely).
*/
__u32 btf_log_true_size;
+ __u32 btf_flags;
+ /* BPF token FD to use with BPF_BTF_LOAD operation.
+ * If provided, btf_flags should have BPF_F_TOKEN_FD flag set.
+ */
+ __s32 btf_token_fd;
};
struct {
@@ -1714,6 +1812,11 @@ union bpf_attr {
__u32 flags; /* extra flags */
} prog_bind_map;
+ struct { /* struct used by BPF_TOKEN_CREATE command */
+ __u32 flags;
+ __u32 bpffs_fd;
+ } token_create;
+
} __attribute__((aligned(8)));
/* The description below is an attempt at providing documentation to eBPF
@@ -4839,9 +4942,9 @@ union bpf_attr {
* going through the CPU's backlog queue.
*
* The *flags* argument is reserved and must be 0. The helper is
- * currently only supported for tc BPF program types at the ingress
- * hook and for veth device types. The peer device must reside in a
- * different network namespace.
+ * currently only supported for tc BPF program types at the
+ * ingress hook and for veth and netkit target device types. The
+ * peer device must reside in a different network namespace.
* Return
* The helper returns **TC_ACT_REDIRECT** on success or
* **TC_ACT_SHOT** on error.
@@ -6487,7 +6590,7 @@ struct bpf_map_info {
__u32 btf_id;
__u32 btf_key_type_id;
__u32 btf_value_type_id;
- __u32 :32; /* alignment pad */
+ __u32 btf_vmlinux_id;
__u64 map_extra;
} __attribute__((aligned(8)));
@@ -6563,6 +6666,7 @@ struct bpf_link_info {
__u32 count; /* in/out: kprobe_multi function count */
__u32 flags;
__u64 missed;
+ __aligned_u64 cookies;
} kprobe_multi;
struct {
__aligned_u64 path;
@@ -6582,6 +6686,7 @@ struct bpf_link_info {
__aligned_u64 file_name; /* in/out */
__u32 name_len;
__u32 offset; /* offset from file_name */
+ __u64 cookie;
} uprobe; /* BPF_PERF_EVENT_UPROBE, BPF_PERF_EVENT_URETPROBE */
struct {
__aligned_u64 func_name; /* in/out */
@@ -6589,14 +6694,19 @@ struct bpf_link_info {
__u32 offset; /* offset from func_name */
__u64 addr;
__u64 missed;
+ __u64 cookie;
} kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */
struct {
__aligned_u64 tp_name; /* in/out */
__u32 name_len;
+ __u32 :32;
+ __u64 cookie;
} tracepoint; /* BPF_PERF_EVENT_TRACEPOINT */
struct {
__u64 config;
__u32 type;
+ __u32 :32;
+ __u64 cookie;
} event; /* BPF_PERF_EVENT_EVENT */
};
} perf_event;
diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
index 939db2388208..e78cbd85ce7c 100644
--- a/include/uapi/linux/can.h
+++ b/include/uapi/linux/can.h
@@ -193,9 +193,14 @@ struct canfd_frame {
#define CANXL_XLF 0x80 /* mandatory CAN XL frame flag (must always be set!) */
#define CANXL_SEC 0x01 /* Simple Extended Content (security/segmentation) */
+/* the 8-bit VCID is optionally placed in the canxl_frame.prio element */
+#define CANXL_VCID_OFFSET 16 /* bit offset of VCID in prio element */
+#define CANXL_VCID_VAL_MASK 0xFFUL /* VCID is an 8-bit value */
+#define CANXL_VCID_MASK (CANXL_VCID_VAL_MASK << CANXL_VCID_OFFSET)
+
/**
* struct canxl_frame - CAN with e'X'tended frame 'L'ength frame structure
- * @prio: 11 bit arbitration priority with zero'ed CAN_*_FLAG flags
+ * @prio: 11 bit arbitration priority with zero'ed CAN_*_FLAG flags / VCID
* @flags: additional flags for CAN XL
* @sdt: SDU (service data unit) type
* @len: frame payload length in byte (CANXL_MIN_DLEN .. CANXL_MAX_DLEN)
@@ -205,7 +210,7 @@ struct canfd_frame {
* @prio shares the same position as @can_id from struct can[fd]_frame.
*/
struct canxl_frame {
- canid_t prio; /* 11 bit priority for arbitration (canid_t) */
+ canid_t prio; /* 11 bit priority for arbitration / 8 bit VCID */
__u8 flags; /* additional flags for CAN XL */
__u8 sdt; /* SDU (service data unit) type */
__u16 len; /* frame payload length in byte */
diff --git a/include/uapi/linux/can/isotp.h b/include/uapi/linux/can/isotp.h
index 439c982f7e81..6cde62371b6f 100644
--- a/include/uapi/linux/can/isotp.h
+++ b/include/uapi/linux/can/isotp.h
@@ -137,6 +137,7 @@ struct can_isotp_ll_options {
#define CAN_ISOTP_WAIT_TX_DONE 0x0400 /* wait for tx completion */
#define CAN_ISOTP_SF_BROADCAST 0x0800 /* 1-to-N functional addressing */
#define CAN_ISOTP_CF_BROADCAST 0x1000 /* 1-to-N transmission w/o FC */
+#define CAN_ISOTP_DYN_FC_PARMS 0x2000 /* dynamic FC parameters BS/STmin */
/* protocol machine default values */
diff --git a/include/uapi/linux/can/raw.h b/include/uapi/linux/can/raw.h
index 31622c9b7988..e024d896e278 100644
--- a/include/uapi/linux/can/raw.h
+++ b/include/uapi/linux/can/raw.h
@@ -65,6 +65,22 @@ enum {
CAN_RAW_FD_FRAMES, /* allow CAN FD frames (default:off) */
CAN_RAW_JOIN_FILTERS, /* all filters must match to trigger */
CAN_RAW_XL_FRAMES, /* allow CAN XL frames (default:off) */
+ CAN_RAW_XL_VCID_OPTS, /* CAN XL VCID configuration options */
};
+/* configuration for CAN XL virtual CAN identifier (VCID) handling */
+struct can_raw_vcid_options {
+
+ __u8 flags; /* flags for vcid (filter) behaviour */
+ __u8 tx_vcid; /* VCID value set into canxl_frame.prio */
+ __u8 rx_vcid; /* VCID value for VCID filter */
+ __u8 rx_vcid_mask; /* VCID mask for VCID filter */
+
+};
+
+/* can_raw_vcid_options.flags for CAN XL virtual CAN identifier handling */
+#define CAN_RAW_XL_VCID_TX_SET 0x01
+#define CAN_RAW_XL_VCID_TX_PASS 0x02
+#define CAN_RAW_XL_VCID_RX_FILTER 0x04
+
#endif /* !_UAPI_CAN_RAW_H */
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 130cae0d3e20..2da0c7eb6710 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -614,7 +614,10 @@ enum devlink_attr {
DEVLINK_ATTR_REGION_DIRECT, /* flag */
- /* add new attributes above here, update the policy in devlink.c */
+ /* Add new attributes above here, update the spec in
+ * Documentation/netlink/specs/devlink.yaml and re-generate
+ * net/devlink/netlink_gen.c.
+ */
__DEVLINK_ATTR_MAX,
DEVLINK_ATTR_MAX = __DEVLINK_ATTR_MAX - 1
diff --git a/include/uapi/linux/dpll.h b/include/uapi/linux/dpll.h
index b4e947f9bfbc..0c13d7f1a1bc 100644
--- a/include/uapi/linux/dpll.h
+++ b/include/uapi/linux/dpll.h
@@ -50,6 +50,35 @@ enum dpll_lock_status {
DPLL_LOCK_STATUS_MAX = (__DPLL_LOCK_STATUS_MAX - 1)
};
+/**
+ * enum dpll_lock_status_error - if previous status change was done due to a
+ * failure, this provides information of dpll device lock status error. Valid
+ * values for DPLL_A_LOCK_STATUS_ERROR attribute
+ * @DPLL_LOCK_STATUS_ERROR_NONE: dpll device lock status was changed without
+ * any error
+ * @DPLL_LOCK_STATUS_ERROR_UNDEFINED: dpll device lock status was changed due
+ * to undefined error. Driver fills this value up in case it is not able to
+ * obtain suitable exact error type.
+ * @DPLL_LOCK_STATUS_ERROR_MEDIA_DOWN: dpll device lock status was changed
+ * because of associated media got down. This may happen for example if dpll
+ * device was previously locked on an input pin of type
+ * PIN_TYPE_SYNCE_ETH_PORT.
+ * @DPLL_LOCK_STATUS_ERROR_FRACTIONAL_FREQUENCY_OFFSET_TOO_HIGH: the FFO
+ * (Fractional Frequency Offset) between the RX and TX symbol rate on the
+ * media got too high. This may happen for example if dpll device was
+ * previously locked on an input pin of type PIN_TYPE_SYNCE_ETH_PORT.
+ */
+enum dpll_lock_status_error {
+ DPLL_LOCK_STATUS_ERROR_NONE = 1,
+ DPLL_LOCK_STATUS_ERROR_UNDEFINED,
+ DPLL_LOCK_STATUS_ERROR_MEDIA_DOWN,
+ DPLL_LOCK_STATUS_ERROR_FRACTIONAL_FREQUENCY_OFFSET_TOO_HIGH,
+
+ /* private: */
+ __DPLL_LOCK_STATUS_ERROR_MAX,
+ DPLL_LOCK_STATUS_ERROR_MAX = (__DPLL_LOCK_STATUS_ERROR_MAX - 1)
+};
+
#define DPLL_TEMP_DIVIDER 1000
/**
@@ -150,6 +179,7 @@ enum dpll_a {
DPLL_A_LOCK_STATUS,
DPLL_A_TEMP,
DPLL_A_TYPE,
+ DPLL_A_LOCK_STATUS_ERROR,
__DPLL_A_MAX,
DPLL_A_MAX = (__DPLL_A_MAX - 1)
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 06ef6b78b7de..11fc18988bc2 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -2023,6 +2023,53 @@ static inline int ethtool_validate_duplex(__u8 duplex)
#define IPV4_FLOW 0x10 /* hash only */
#define IPV6_FLOW 0x11 /* hash only */
#define ETHER_FLOW 0x12 /* spec only (ether_spec) */
+
+/* Used for GTP-U IPv4 and IPv6.
+ * The format of GTP packets only includes
+ * elements such as TEID and GTP version.
+ * It is primarily intended for data communication of the UE.
+ */
+#define GTPU_V4_FLOW 0x13 /* hash only */
+#define GTPU_V6_FLOW 0x14 /* hash only */
+
+/* Use for GTP-C IPv4 and v6.
+ * The format of these GTP packets does not include TEID.
+ * Primarily expected to be used for communication
+ * to create sessions for UE data communication,
+ * commonly referred to as CSR (Create Session Request).
+ */
+#define GTPC_V4_FLOW 0x15 /* hash only */
+#define GTPC_V6_FLOW 0x16 /* hash only */
+
+/* Use for GTP-C IPv4 and v6.
+ * Unlike GTPC_V4_FLOW, the format of these GTP packets includes TEID.
+ * After session creation, it becomes this packet.
+ * This is mainly used for requests to realize UE handover.
+ */
+#define GTPC_TEID_V4_FLOW 0x17 /* hash only */
+#define GTPC_TEID_V6_FLOW 0x18 /* hash only */
+
+/* Use for GTP-U and extended headers for the PSC (PDU Session Container).
+ * The format of these GTP packets includes TEID and QFI.
+ * In 5G communication using UPF (User Plane Function),
+ * data communication with this extended header is performed.
+ */
+#define GTPU_EH_V4_FLOW 0x19 /* hash only */
+#define GTPU_EH_V6_FLOW 0x1a /* hash only */
+
+/* Use for GTP-U IPv4 and v6 PSC (PDU Session Container) extended headers.
+ * This differs from GTPU_EH_V(4|6)_FLOW in that it is distinguished by
+ * UL/DL included in the PSC.
+ * There are differences in the data included based on Downlink/Uplink,
+ * and can be used to distinguish packets.
+ * The functions described so far are useful when you want to
+ * handle communication from the mobile network in UPF, PGW, etc.
+ */
+#define GTPU_UL_V4_FLOW 0x1b /* hash only */
+#define GTPU_UL_V6_FLOW 0x1c /* hash only */
+#define GTPU_DL_V4_FLOW 0x1d /* hash only */
+#define GTPU_DL_V6_FLOW 0x1e /* hash only */
+
/* Flag to enable additional fields in struct ethtool_rx_flow_spec */
#define FLOW_EXT 0x80000000
#define FLOW_MAC_EXT 0x40000000
@@ -2037,6 +2084,7 @@ static inline int ethtool_validate_duplex(__u8 duplex)
#define RXH_IP_DST (1 << 5)
#define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */
#define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */
+#define RXH_GTP_TEID (1 << 8) /* teid in case of GTP */
#define RXH_DISCARD (1 << 31)
#define RX_CLS_FLOW_DISC 0xffffffffffffffffULL
diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h
index cfbcc4cc49ac..4f4b948ef381 100644
--- a/include/uapi/linux/eventpoll.h
+++ b/include/uapi/linux/eventpoll.h
@@ -85,4 +85,17 @@ struct epoll_event {
__u64 data;
} EPOLL_PACKED;
+struct epoll_params {
+ __u32 busy_poll_usecs;
+ __u16 busy_poll_budget;
+ __u8 prefer_busy_poll;
+
+ /* pad the struct to a multiple of 64bits */
+ __u8 __pad;
+};
+
+#define EPOLL_IOC_TYPE 0x8A
+#define EPIOCSPARAMS _IOW(EPOLL_IOC_TYPE, 0x01, struct epoll_params)
+#define EPIOCGPARAMS _IOR(EPOLL_IOC_TYPE, 0x02, struct epoll_params)
+
#endif /* _UAPI_LINUX_EVENTPOLL_H */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index ab9bcff96e4d..ffa637b38c93 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -1505,6 +1505,7 @@ enum {
IFLA_BOND_AD_LACP_ACTIVE,
IFLA_BOND_MISSED_MAX,
IFLA_BOND_NS_IP6_TARGET,
+ IFLA_BOND_COUPLED_CONTROL,
__IFLA_BOND_MAX,
};
diff --git a/include/uapi/linux/ioam6_genl.h b/include/uapi/linux/ioam6_genl.h
index ca4b22833754..1733fbc51fb5 100644
--- a/include/uapi/linux/ioam6_genl.h
+++ b/include/uapi/linux/ioam6_genl.h
@@ -49,4 +49,24 @@ enum {
#define IOAM6_CMD_MAX (__IOAM6_CMD_MAX - 1)
+#define IOAM6_GENL_EV_GRP_NAME "ioam6_events"
+
+enum ioam6_event_type {
+ IOAM6_EVENT_UNSPEC,
+ IOAM6_EVENT_TRACE,
+};
+
+enum ioam6_event_attr {
+ IOAM6_EVENT_ATTR_UNSPEC,
+
+ IOAM6_EVENT_ATTR_TRACE_NAMESPACE, /* u16 */
+ IOAM6_EVENT_ATTR_TRACE_NODELEN, /* u8 */
+ IOAM6_EVENT_ATTR_TRACE_TYPE, /* u32 */
+ IOAM6_EVENT_ATTR_TRACE_DATA, /* Binary */
+
+ __IOAM6_EVENT_ATTR_MAX
+};
+
+#define IOAM6_EVENT_ATTR_MAX (__IOAM6_EVENT_ATTR_MAX - 1)
+
#endif /* _UAPI_LINUX_IOAM6_GENL_H */
diff --git a/include/uapi/linux/mctp.h b/include/uapi/linux/mctp.h
index 154ab56651f1..e1db65df9359 100644
--- a/include/uapi/linux/mctp.h
+++ b/include/uapi/linux/mctp.h
@@ -50,7 +50,14 @@ struct sockaddr_mctp_ext {
#define SIOCMCTPALLOCTAG (SIOCPROTOPRIVATE + 0)
#define SIOCMCTPDROPTAG (SIOCPROTOPRIVATE + 1)
+#define SIOCMCTPALLOCTAG2 (SIOCPROTOPRIVATE + 2)
+#define SIOCMCTPDROPTAG2 (SIOCPROTOPRIVATE + 3)
+/* Deprecated: use mctp_ioc_tag_ctl2 / TAG2 ioctls instead, which defines the
+ * MCTP network ID as part of the allocated tag. Using this assumes the default
+ * net ID for allocated tags, which may not give correct behaviour on system
+ * with multiple networks configured.
+ */
struct mctp_ioc_tag_ctl {
mctp_eid_t peer_addr;
@@ -65,4 +72,29 @@ struct mctp_ioc_tag_ctl {
__u16 flags;
};
+struct mctp_ioc_tag_ctl2 {
+ /* Peer details: network ID, peer EID, local EID. All set by the
+ * caller.
+ *
+ * Local EID must be MCTP_ADDR_NULL or MCTP_ADDR_ANY in current
+ * kernels.
+ */
+ unsigned int net;
+ mctp_eid_t peer_addr;
+ mctp_eid_t local_addr;
+
+ /* Set by caller, but no flags defined currently. Must be 0 */
+ __u16 flags;
+
+ /* For SIOCMCTPALLOCTAG2: must be passed as zero, kernel will
+ * populate with the allocated tag value. Returned tag value will
+ * always have TO and PREALLOC set.
+ *
+ * For SIOCMCTPDROPTAG2: userspace provides tag value to drop, from
+ * a prior SIOCMCTPALLOCTAG2 call (and so must have TO and PREALLOC set).
+ */
+ __u8 tag;
+
+};
+
#endif /* __UAPI_MCTP_H */
diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h
index d03863da180e..c0c8ec995b06 100644
--- a/include/uapi/linux/mdio.h
+++ b/include/uapi/linux/mdio.h
@@ -138,6 +138,8 @@
#define MDIO_PMA_SPEED_1000 0x0010 /* 1000M capable */
#define MDIO_PMA_SPEED_100 0x0020 /* 100M capable */
#define MDIO_PMA_SPEED_10 0x0040 /* 10M capable */
+#define MDIO_PMA_SPEED_2_5G 0x2000 /* 2.5G capable */
+#define MDIO_PMA_SPEED_5G 0x4000 /* 5G capable */
#define MDIO_PCS_SPEED_10P2B 0x0002 /* 10PASS-TS/2BASE-TL capable */
#define MDIO_PCS_SPEED_2_5G 0x0040 /* 2.5G capable */
#define MDIO_PCS_SPEED_5G 0x0080 /* 5G capable */
@@ -348,6 +350,8 @@
/* BASE-T1 auto-negotiation advertisement register [31:16] */
#define MDIO_AN_T1_ADV_M_B10L 0x4000 /* device is compatible with 10BASE-T1L */
+#define MDIO_AN_T1_ADV_M_1000BT1 0x0080 /* advertise 1000BASE-T1 */
+#define MDIO_AN_T1_ADV_M_100BT1 0x0020 /* advertise 100BASE-T1 */
#define MDIO_AN_T1_ADV_M_MST 0x0010 /* advertise master preference */
/* BASE-T1 auto-negotiation advertisement register [47:32] */
diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h
index 93cb411adf72..bb65ee840cda 100644
--- a/include/uapi/linux/netdev.h
+++ b/include/uapi/linux/netdev.h
@@ -70,6 +70,10 @@ enum netdev_queue_type {
NETDEV_QUEUE_TYPE_TX,
};
+enum netdev_qstats_scope {
+ NETDEV_QSTATS_SCOPE_QUEUE = 1,
+};
+
enum {
NETDEV_A_DEV_IFINDEX = 1,
NETDEV_A_DEV_PAD,
@@ -133,6 +137,21 @@ enum {
};
enum {
+ NETDEV_A_QSTATS_IFINDEX = 1,
+ NETDEV_A_QSTATS_QUEUE_TYPE,
+ NETDEV_A_QSTATS_QUEUE_ID,
+ NETDEV_A_QSTATS_SCOPE,
+ NETDEV_A_QSTATS_RX_PACKETS = 8,
+ NETDEV_A_QSTATS_RX_BYTES,
+ NETDEV_A_QSTATS_TX_PACKETS,
+ NETDEV_A_QSTATS_TX_BYTES,
+ NETDEV_A_QSTATS_RX_ALLOC_FAIL,
+
+ __NETDEV_A_QSTATS_MAX,
+ NETDEV_A_QSTATS_MAX = (__NETDEV_A_QSTATS_MAX - 1)
+};
+
+enum {
NETDEV_CMD_DEV_GET = 1,
NETDEV_CMD_DEV_ADD_NTF,
NETDEV_CMD_DEV_DEL_NTF,
@@ -144,6 +163,7 @@ enum {
NETDEV_CMD_PAGE_POOL_STATS_GET,
NETDEV_CMD_QUEUE_GET,
NETDEV_CMD_NAPI_GET,
+ NETDEV_CMD_QSTATS_GET,
__NETDEV_CMD_MAX,
NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 117c6a9b845b..aa4094ca2444 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -179,13 +179,17 @@ enum nft_hook_attributes {
* enum nft_table_flags - nf_tables table flags
*
* @NFT_TABLE_F_DORMANT: this table is not active
+ * @NFT_TABLE_F_OWNER: this table is owned by a process
+ * @NFT_TABLE_F_PERSIST: this table shall outlive its owner
*/
enum nft_table_flags {
NFT_TABLE_F_DORMANT = 0x1,
NFT_TABLE_F_OWNER = 0x2,
+ NFT_TABLE_F_PERSIST = 0x4,
};
#define NFT_TABLE_F_MASK (NFT_TABLE_F_DORMANT | \
- NFT_TABLE_F_OWNER)
+ NFT_TABLE_F_OWNER | \
+ NFT_TABLE_F_PERSIST)
/**
* enum nft_table_attributes - nf_tables table netlink attributes
diff --git a/include/uapi/linux/nexthop.h b/include/uapi/linux/nexthop.h
index d8ffa8c9ca78..dd8787f9cf39 100644
--- a/include/uapi/linux/nexthop.h
+++ b/include/uapi/linux/nexthop.h
@@ -30,6 +30,9 @@ enum {
#define NEXTHOP_GRP_TYPE_MAX (__NEXTHOP_GRP_TYPE_MAX - 1)
+#define NHA_OP_FLAG_DUMP_STATS BIT(0)
+#define NHA_OP_FLAG_DUMP_HW_STATS BIT(1)
+
enum {
NHA_UNSPEC,
NHA_ID, /* u32; id for nexthop. id == 0 means auto-assign */
@@ -60,6 +63,18 @@ enum {
/* nested; nexthop bucket attributes */
NHA_RES_BUCKET,
+ /* u32; operation-specific flags */
+ NHA_OP_FLAGS,
+
+ /* nested; nexthop group stats */
+ NHA_GROUP_STATS,
+
+ /* u32; nexthop hardware stats enable */
+ NHA_HW_STATS_ENABLE,
+
+ /* u32; read-only; whether any driver collects HW stats */
+ NHA_HW_STATS_USED,
+
__NHA_MAX,
};
@@ -101,4 +116,34 @@ enum {
#define NHA_RES_BUCKET_MAX (__NHA_RES_BUCKET_MAX - 1)
+enum {
+ NHA_GROUP_STATS_UNSPEC,
+
+ /* nested; nexthop group entry stats */
+ NHA_GROUP_STATS_ENTRY,
+
+ __NHA_GROUP_STATS_MAX,
+};
+
+#define NHA_GROUP_STATS_MAX (__NHA_GROUP_STATS_MAX - 1)
+
+enum {
+ NHA_GROUP_STATS_ENTRY_UNSPEC,
+
+ /* u32; nexthop id of the nexthop group entry */
+ NHA_GROUP_STATS_ENTRY_ID,
+
+ /* uint; number of packets forwarded via the nexthop group entry */
+ NHA_GROUP_STATS_ENTRY_PACKETS,
+
+ /* uint; number of packets forwarded via the nexthop group entry in
+ * hardware
+ */
+ NHA_GROUP_STATS_ENTRY_PACKETS_HW,
+
+ __NHA_GROUP_STATS_ENTRY_MAX,
+};
+
+#define NHA_GROUP_STATS_ENTRY_MAX (__NHA_GROUP_STATS_ENTRY_MAX - 1)
+
#endif
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 1ccdcae24372..f23ecbdd84a2 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -11,7 +11,7 @@
* Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com>
* Copyright 2008 Colin McCabe <colin@cozybit.com>
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -438,7 +438,8 @@
* %NL80211_ATTR_REASON_CODE can optionally be used to specify which type
* of disconnection indication should be sent to the station
* (Deauthentication or Disassociation frame and reason code for that
- * frame).
+ * frame). %NL80211_ATTR_MLO_LINK_ID can be used optionally to remove
+ * stations connected and using at least that link as one of its links.
*
* @NL80211_CMD_GET_MPATH: Get mesh path attributes for mesh path to
* destination %NL80211_ATTR_MAC on the interface identified by
@@ -2851,6 +2852,10 @@ enum nl80211_commands {
* mapping is as defined in section 9.4.2.314 (TID-To-Link Mapping element)
* in Draft P802.11be_D4.0.
*
+ * @NL80211_ATTR_ASSOC_SPP_AMSDU: flag attribute used with
+ * %NL80211_CMD_ASSOCIATE indicating the SPP A-MSDUs
+ * are used on this connection
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -3394,6 +3399,8 @@ enum nl80211_attrs {
NL80211_ATTR_MLO_TTLM_DLINK,
NL80211_ATTR_MLO_TTLM_ULINK,
+ NL80211_ATTR_ASSOC_SPP_AMSDU,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -3534,6 +3541,7 @@ enum nl80211_iftype {
* @NL80211_STA_FLAG_ASSOCIATED: station is associated; used with drivers
* that support %NL80211_FEATURE_FULL_AP_CLIENT_STATE to transition a
* previously added station into associated state
+ * @NL80211_STA_FLAG_SPP_AMSDU: station supports SPP A-MSDUs
* @NL80211_STA_FLAG_MAX: highest station flag number currently defined
* @__NL80211_STA_FLAG_AFTER_LAST: internal use
*/
@@ -3546,6 +3554,7 @@ enum nl80211_sta_flags {
NL80211_STA_FLAG_AUTHENTICATED,
NL80211_STA_FLAG_TDLS_PEER,
NL80211_STA_FLAG_ASSOCIATED,
+ NL80211_STA_FLAG_SPP_AMSDU,
/* keep last */
__NL80211_STA_FLAG_AFTER_LAST,
@@ -4260,10 +4269,13 @@ enum nl80211_wmm_rule {
* allowed for peer-to-peer or adhoc communication under the control
* of a DFS master which operates on the same channel (FCC-594280 D01
* Section B.3). Should be used together with %NL80211_RRF_DFS only.
- * @NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT: Client connection to VLP AP
+ * @NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT: Client connection to VLP AP
* not allowed using this channel
- * @NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT: Client connection to AFC AP
+ * @NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT: Client connection to AFC AP
* not allowed using this channel
+ * @NL80211_FREQUENCY_ATTR_CAN_MONITOR: This channel can be used in monitor
+ * mode despite other (regulatory) restrictions, even if the channel is
+ * otherwise completely disabled.
* @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
* currently defined
* @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
@@ -4304,8 +4316,9 @@ enum nl80211_frequency_attr {
NL80211_FREQUENCY_ATTR_NO_EHT,
NL80211_FREQUENCY_ATTR_PSD,
NL80211_FREQUENCY_ATTR_DFS_CONCURRENT,
- NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT,
- NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT,
+ NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT,
+ NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT,
+ NL80211_FREQUENCY_ATTR_CAN_MONITOR,
/* keep last */
__NL80211_FREQUENCY_ATTR_AFTER_LAST,
@@ -4318,6 +4331,10 @@ enum nl80211_frequency_attr {
#define NL80211_FREQUENCY_ATTR_NO_IR NL80211_FREQUENCY_ATTR_NO_IR
#define NL80211_FREQUENCY_ATTR_GO_CONCURRENT \
NL80211_FREQUENCY_ATTR_IR_CONCURRENT
+#define NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT \
+ NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT
+#define NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT \
+ NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT
/**
* enum nl80211_bitrate_attr - bitrate attributes
@@ -4455,14 +4472,7 @@ enum nl80211_reg_rule_attr {
* value as specified by &struct nl80211_bss_select_rssi_adjust.
* @NL80211_SCHED_SCAN_MATCH_ATTR_BSSID: BSSID to be used for matching
* (this cannot be used together with SSID).
- * @NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI: Nested attribute that carries the
- * band specific minimum rssi thresholds for the bands defined in
- * enum nl80211_band. The minimum rssi threshold value(s32) specific to a
- * band shall be encapsulated in attribute with type value equals to one
- * of the NL80211_BAND_* defined in enum nl80211_band. For example, the
- * minimum rssi threshold value for 2.4GHZ band shall be encapsulated
- * within an attribute of type NL80211_BAND_2GHZ. And one or more of such
- * attributes will be nested within this attribute.
+ * @NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI: Obsolete
* @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter
* attribute number currently defined
* @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use
@@ -4475,7 +4485,7 @@ enum nl80211_sched_scan_match_attr {
NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI,
NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST,
NL80211_SCHED_SCAN_MATCH_ATTR_BSSID,
- NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI,
+ NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI, /* obsolete */
/* keep last */
__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST,
@@ -4515,8 +4525,8 @@ enum nl80211_sched_scan_match_attr {
peer-to-peer or adhoc communication under the control of a DFS master
which operates on the same channel (FCC-594280 D01 Section B.3).
Should be used together with %NL80211_RRF_DFS only.
- * @NL80211_RRF_NO_UHB_VLP_CLIENT: Client connection to VLP AP not allowed
- * @NL80211_RRF_NO_UHB_AFC_CLIENT: Client connection to AFC AP not allowed
+ * @NL80211_RRF_NO_6GHZ_VLP_CLIENT: Client connection to VLP AP not allowed
+ * @NL80211_RRF_NO_6GHZ_AFC_CLIENT: Client connection to AFC AP not allowed
*/
enum nl80211_reg_rule_flags {
NL80211_RRF_NO_OFDM = 1<<0,
@@ -4539,8 +4549,8 @@ enum nl80211_reg_rule_flags {
NL80211_RRF_NO_EHT = 1<<19,
NL80211_RRF_PSD = 1<<20,
NL80211_RRF_DFS_CONCURRENT = 1<<21,
- NL80211_RRF_NO_UHB_VLP_CLIENT = 1<<22,
- NL80211_RRF_NO_UHB_AFC_CLIENT = 1<<23,
+ NL80211_RRF_NO_6GHZ_VLP_CLIENT = 1<<22,
+ NL80211_RRF_NO_6GHZ_AFC_CLIENT = 1<<23,
};
#define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR
@@ -4549,6 +4559,8 @@ enum nl80211_reg_rule_flags {
#define NL80211_RRF_NO_HT40 (NL80211_RRF_NO_HT40MINUS |\
NL80211_RRF_NO_HT40PLUS)
#define NL80211_RRF_GO_CONCURRENT NL80211_RRF_IR_CONCURRENT
+#define NL80211_RRF_NO_UHB_VLP_CLIENT NL80211_RRF_NO_6GHZ_VLP_CLIENT
+#define NL80211_RRF_NO_UHB_AFC_CLIENT NL80211_RRF_NO_6GHZ_AFC_CLIENT
/* For backport compatibility with older userspace */
#define NL80211_RRF_NO_IR_ALL (NL80211_RRF_NO_IR | __NL80211_RRF_NO_IBSS)
@@ -5096,14 +5108,17 @@ enum nl80211_bss_use_for {
* BSS isn't possible
* @NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY: NSTR nonprimary links aren't
* supported by the device, and this BSS entry represents one.
- * @NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH: STA is not supporting
+ * @NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH: STA is not supporting
* the AP power type (SP, VLP, AP) that the AP uses.
*/
enum nl80211_bss_cannot_use_reasons {
NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY = 1 << 0,
- NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH = 1 << 1,
+ NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH = 1 << 1,
};
+#define NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH \
+ NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH
+
/**
* enum nl80211_bss - netlink attributes for a BSS
*
@@ -5742,6 +5757,8 @@ struct nl80211_pattern_support {
* %NL80211_ATTR_SCAN_FREQUENCIES contains more than one
* frequency, it means that the match occurred in more than one
* channel.
+ * @NL80211_WOWLAN_TRIG_UNPROTECTED_DEAUTH_DISASSOC: For wakeup reporting only.
+ * Wake up happened due to unprotected deauth or disassoc frame in MFP.
* @NUM_NL80211_WOWLAN_TRIG: number of wake on wireless triggers
* @MAX_NL80211_WOWLAN_TRIG: highest wowlan trigger attribute number
*
@@ -5769,6 +5786,7 @@ enum nl80211_wowlan_triggers {
NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS,
NL80211_WOWLAN_TRIG_NET_DETECT,
NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS,
+ NL80211_WOWLAN_TRIG_UNPROTECTED_DEAUTH_DISASSOC,
/* keep last */
NUM_NL80211_WOWLAN_TRIG,
@@ -6410,8 +6428,7 @@ enum nl80211_feature_flags {
* @NL80211_EXT_FEATURE_AP_PMKSA_CACHING: Driver/device supports PMKSA caching
* (set/del PMKSA operations) in AP mode.
*
- * @NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD: Driver supports
- * filtering of sched scan results using band specific RSSI thresholds.
+ * @NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD: Obsolete
*
* @NL80211_EXT_FEATURE_STA_TX_PWR: This driver supports controlling tx power
* to a station.
@@ -6520,6 +6537,11 @@ enum nl80211_feature_flags {
* DFS master on the same channel as described in FCC-594280 D01
* (Section B.3). This, for example, allows P2P GO and P2P clients to
* operate on DFS channels as long as there's a concurrent BSS connection.
+ *
+ * @NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT: The driver has support for SPP
+ * (signaling and payload protected) A-MSDUs and this shall be advertised
+ * in the RSNXE.
+ *
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@ -6561,7 +6583,7 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER,
NL80211_EXT_FEATURE_AIRTIME_FAIRNESS,
NL80211_EXT_FEATURE_AP_PMKSA_CACHING,
- NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD,
+ NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD, /* obsolete */
NL80211_EXT_FEATURE_EXT_KEY_ID,
NL80211_EXT_FEATURE_STA_TX_PWR,
NL80211_EXT_FEATURE_SAE_OFFLOAD,
@@ -6594,6 +6616,7 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_OWE_OFFLOAD,
NL80211_EXT_FEATURE_OWE_OFFLOAD_AP,
NL80211_EXT_FEATURE_DFS_CONCURRENT,
+ NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h
index da700999cad4..053b40d642de 100644
--- a/include/uapi/linux/ptp_clock.h
+++ b/include/uapi/linux/ptp_clock.h
@@ -32,6 +32,7 @@
#define PTP_RISING_EDGE (1<<1)
#define PTP_FALLING_EDGE (1<<2)
#define PTP_STRICT_FLAGS (1<<3)
+#define PTP_EXT_OFFSET (1<<4)
#define PTP_EXTTS_EDGES (PTP_RISING_EDGE | PTP_FALLING_EDGE)
/*
@@ -40,7 +41,8 @@
#define PTP_EXTTS_VALID_FLAGS (PTP_ENABLE_FEATURE | \
PTP_RISING_EDGE | \
PTP_FALLING_EDGE | \
- PTP_STRICT_FLAGS)
+ PTP_STRICT_FLAGS | \
+ PTP_EXT_OFFSET)
/*
* flag fields valid for the original PTP_EXTTS_REQUEST ioctl.
@@ -51,6 +53,11 @@
PTP_FALLING_EDGE)
/*
+ * flag fields valid for the ptp_extts_event report.
+ */
+#define PTP_EXTTS_EVENT_VALID (PTP_ENABLE_FEATURE)
+
+/*
* Bits of the ptp_perout_request.flags field:
*/
#define PTP_PEROUT_ONE_SHOT (1<<0)
@@ -228,9 +235,9 @@ struct ptp_pin_desc {
#define PTP_MASK_EN_SINGLE _IOW(PTP_CLK_MAGIC, 20, unsigned int)
struct ptp_extts_event {
- struct ptp_clock_time t; /* Time event occured. */
+ struct ptp_clock_time t; /* Time event occurred. */
unsigned int index; /* Which channel produced the event. */
- unsigned int flags; /* Reserved for future use. */
+ unsigned int flags; /* Event type. */
unsigned int rsv[2]; /* Reserved for future use. */
};
diff --git a/include/uapi/linux/tc_act/tc_pedit.h b/include/uapi/linux/tc_act/tc_pedit.h
index f3e61b04fa01..f5cab7fc96ab 100644
--- a/include/uapi/linux/tc_act/tc_pedit.h
+++ b/include/uapi/linux/tc_act/tc_pedit.h
@@ -62,7 +62,7 @@ struct tc_pedit_sel {
tc_gen;
unsigned char nkeys;
unsigned char flags;
- struct tc_pedit_key keys[0];
+ struct tc_pedit_key keys[] __counted_by(nkeys);
};
#define tc_pedit tc_pedit_sel
diff --git a/init/Kconfig b/init/Kconfig
index 7c5df7b0b576..f3ea5dea9c85 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1466,11 +1466,6 @@ config SYSCTL_ARCH_UNALIGN_ALLOW
config HAVE_PCSPKR_PLATFORM
bool
-# interpreter that classic socket filters depend on
-config BPF
- bool
- select CRYPTO_LIB_SHA1
-
menuconfig EXPERT
bool "Configure standard kernel features (expert users)"
# Unhide debug options, to make the on-by-default options visible
diff --git a/kernel/bpf/Kconfig b/kernel/bpf/Kconfig
index 6a906ff93006..bc25f5098a25 100644
--- a/kernel/bpf/Kconfig
+++ b/kernel/bpf/Kconfig
@@ -3,6 +3,7 @@
# BPF interpreter that, for example, classic socket filters depend on.
config BPF
bool
+ select CRYPTO_LIB_SHA1
# Used by archs to tell that they support BPF JIT compiler plus which
# flavour. Only one of the two can be selected for a specific arch since
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index f526b7573e97..368c5d86b5b7 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -6,7 +6,7 @@ cflags-nogcse-$(CONFIG_X86)$(CONFIG_CC_IS_GCC) := -fno-gcse
endif
CFLAGS_core.o += $(call cc-disable-warning, override-init) $(cflags-nogcse-yy)
-obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o log.o
+obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o log.o token.o
obj-$(CONFIG_BPF_SYSCALL) += bpf_iter.o map_iter.o task_iter.o prog_iter.o link_iter.o
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o bloom_filter.o
obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o
@@ -15,6 +15,9 @@ obj-${CONFIG_BPF_LSM} += bpf_inode_storage.o
obj-$(CONFIG_BPF_SYSCALL) += disasm.o mprog.o
obj-$(CONFIG_BPF_JIT) += trampoline.o
obj-$(CONFIG_BPF_SYSCALL) += btf.o memalloc.o
+ifeq ($(CONFIG_MMU)$(CONFIG_64BIT),yy)
+obj-$(CONFIG_BPF_SYSCALL) += arena.o
+endif
obj-$(CONFIG_BPF_JIT) += dispatcher.o
ifeq ($(CONFIG_NET),y)
obj-$(CONFIG_BPF_SYSCALL) += devmap.o
diff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c
new file mode 100644
index 000000000000..86571e760dd6
--- /dev/null
+++ b/kernel/bpf/arena.c
@@ -0,0 +1,558 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <linux/bpf.h>
+#include <linux/btf.h>
+#include <linux/err.h>
+#include <linux/btf_ids.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+
+/*
+ * bpf_arena is a sparsely populated shared memory region between bpf program and
+ * user space process.
+ *
+ * For example on x86-64 the values could be:
+ * user_vm_start 7f7d26200000 // picked by mmap()
+ * kern_vm_start ffffc90001e69000 // picked by get_vm_area()
+ * For user space all pointers within the arena are normal 8-byte addresses.
+ * In this example 7f7d26200000 is the address of the first page (pgoff=0).
+ * The bpf program will access it as: kern_vm_start + lower_32bit_of_user_ptr
+ * (u32)7f7d26200000 -> 26200000
+ * hence
+ * ffffc90001e69000 + 26200000 == ffffc90028069000 is "pgoff=0" within 4Gb
+ * kernel memory region.
+ *
+ * BPF JITs generate the following code to access arena:
+ * mov eax, eax // eax has lower 32-bit of user pointer
+ * mov word ptr [rax + r12 + off], bx
+ * where r12 == kern_vm_start and off is s16.
+ * Hence allocate 4Gb + GUARD_SZ/2 on each side.
+ *
+ * Initially kernel vm_area and user vma are not populated.
+ * User space can fault-in any address which will insert the page
+ * into kernel and user vma.
+ * bpf program can allocate a page via bpf_arena_alloc_pages() kfunc
+ * which will insert it into kernel vm_area.
+ * The later fault-in from user space will populate that page into user vma.
+ */
+
+/* number of bytes addressable by LDX/STX insn with 16-bit 'off' field */
+#define GUARD_SZ (1ull << sizeof(((struct bpf_insn *)0)->off) * 8)
+#define KERN_VM_SZ ((1ull << 32) + GUARD_SZ)
+
+struct bpf_arena {
+ struct bpf_map map;
+ u64 user_vm_start;
+ u64 user_vm_end;
+ struct vm_struct *kern_vm;
+ struct maple_tree mt;
+ struct list_head vma_list;
+ struct mutex lock;
+};
+
+u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena)
+{
+ return arena ? (u64) (long) arena->kern_vm->addr + GUARD_SZ / 2 : 0;
+}
+
+u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena)
+{
+ return arena ? arena->user_vm_start : 0;
+}
+
+static long arena_map_peek_elem(struct bpf_map *map, void *value)
+{
+ return -EOPNOTSUPP;
+}
+
+static long arena_map_push_elem(struct bpf_map *map, void *value, u64 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static long arena_map_pop_elem(struct bpf_map *map, void *value)
+{
+ return -EOPNOTSUPP;
+}
+
+static long arena_map_delete_elem(struct bpf_map *map, void *value)
+{
+ return -EOPNOTSUPP;
+}
+
+static int arena_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
+{
+ return -EOPNOTSUPP;
+}
+
+static long compute_pgoff(struct bpf_arena *arena, long uaddr)
+{
+ return (u32)(uaddr - (u32)arena->user_vm_start) >> PAGE_SHIFT;
+}
+
+static struct bpf_map *arena_map_alloc(union bpf_attr *attr)
+{
+ struct vm_struct *kern_vm;
+ int numa_node = bpf_map_attr_numa_node(attr);
+ struct bpf_arena *arena;
+ u64 vm_range;
+ int err = -ENOMEM;
+
+ if (attr->key_size || attr->value_size || attr->max_entries == 0 ||
+ /* BPF_F_MMAPABLE must be set */
+ !(attr->map_flags & BPF_F_MMAPABLE) ||
+ /* No unsupported flags present */
+ (attr->map_flags & ~(BPF_F_SEGV_ON_FAULT | BPF_F_MMAPABLE | BPF_F_NO_USER_CONV)))
+ return ERR_PTR(-EINVAL);
+
+ if (attr->map_extra & ~PAGE_MASK)
+ /* If non-zero the map_extra is an expected user VMA start address */
+ return ERR_PTR(-EINVAL);
+
+ vm_range = (u64)attr->max_entries * PAGE_SIZE;
+ if (vm_range > (1ull << 32))
+ return ERR_PTR(-E2BIG);
+
+ if ((attr->map_extra >> 32) != ((attr->map_extra + vm_range - 1) >> 32))
+ /* user vma must not cross 32-bit boundary */
+ return ERR_PTR(-ERANGE);
+
+ kern_vm = get_vm_area(KERN_VM_SZ, VM_SPARSE | VM_USERMAP);
+ if (!kern_vm)
+ return ERR_PTR(-ENOMEM);
+
+ arena = bpf_map_area_alloc(sizeof(*arena), numa_node);
+ if (!arena)
+ goto err;
+
+ arena->kern_vm = kern_vm;
+ arena->user_vm_start = attr->map_extra;
+ if (arena->user_vm_start)
+ arena->user_vm_end = arena->user_vm_start + vm_range;
+
+ INIT_LIST_HEAD(&arena->vma_list);
+ bpf_map_init_from_attr(&arena->map, attr);
+ mt_init_flags(&arena->mt, MT_FLAGS_ALLOC_RANGE);
+ mutex_init(&arena->lock);
+
+ return &arena->map;
+err:
+ free_vm_area(kern_vm);
+ return ERR_PTR(err);
+}
+
+static int existing_page_cb(pte_t *ptep, unsigned long addr, void *data)
+{
+ struct page *page;
+ pte_t pte;
+
+ pte = ptep_get(ptep);
+ if (!pte_present(pte)) /* sanity check */
+ return 0;
+ page = pte_page(pte);
+ /*
+ * We do not update pte here:
+ * 1. Nobody should be accessing bpf_arena's range outside of a kernel bug
+ * 2. TLB flushing is batched or deferred. Even if we clear pte,
+ * the TLB entries can stick around and continue to permit access to
+ * the freed page. So it all relies on 1.
+ */
+ __free_page(page);
+ return 0;
+}
+
+static void arena_map_free(struct bpf_map *map)
+{
+ struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
+
+ /*
+ * Check that user vma-s are not around when bpf map is freed.
+ * mmap() holds vm_file which holds bpf_map refcnt.
+ * munmap() must have happened on vma followed by arena_vm_close()
+ * which would clear arena->vma_list.
+ */
+ if (WARN_ON_ONCE(!list_empty(&arena->vma_list)))
+ return;
+
+ /*
+ * free_vm_area() calls remove_vm_area() that calls free_unmap_vmap_area().
+ * It unmaps everything from vmalloc area and clears pgtables.
+ * Call apply_to_existing_page_range() first to find populated ptes and
+ * free those pages.
+ */
+ apply_to_existing_page_range(&init_mm, bpf_arena_get_kern_vm_start(arena),
+ KERN_VM_SZ - GUARD_SZ, existing_page_cb, NULL);
+ free_vm_area(arena->kern_vm);
+ mtree_destroy(&arena->mt);
+ bpf_map_area_free(arena);
+}
+
+static void *arena_map_lookup_elem(struct bpf_map *map, void *key)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static long arena_map_update_elem(struct bpf_map *map, void *key,
+ void *value, u64 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static int arena_map_check_btf(const struct bpf_map *map, const struct btf *btf,
+ const struct btf_type *key_type, const struct btf_type *value_type)
+{
+ return 0;
+}
+
+static u64 arena_map_mem_usage(const struct bpf_map *map)
+{
+ return 0;
+}
+
+struct vma_list {
+ struct vm_area_struct *vma;
+ struct list_head head;
+};
+
+static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
+{
+ struct vma_list *vml;
+
+ vml = kmalloc(sizeof(*vml), GFP_KERNEL);
+ if (!vml)
+ return -ENOMEM;
+ vma->vm_private_data = vml;
+ vml->vma = vma;
+ list_add(&vml->head, &arena->vma_list);
+ return 0;
+}
+
+static void arena_vm_close(struct vm_area_struct *vma)
+{
+ struct bpf_map *map = vma->vm_file->private_data;
+ struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
+ struct vma_list *vml;
+
+ guard(mutex)(&arena->lock);
+ vml = vma->vm_private_data;
+ list_del(&vml->head);
+ vma->vm_private_data = NULL;
+ kfree(vml);
+}
+
+#define MT_ENTRY ((void *)&arena_map_ops) /* unused. has to be valid pointer */
+
+static vm_fault_t arena_vm_fault(struct vm_fault *vmf)
+{
+ struct bpf_map *map = vmf->vma->vm_file->private_data;
+ struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
+ struct page *page;
+ long kbase, kaddr;
+ int ret;
+
+ kbase = bpf_arena_get_kern_vm_start(arena);
+ kaddr = kbase + (u32)(vmf->address & PAGE_MASK);
+
+ guard(mutex)(&arena->lock);
+ page = vmalloc_to_page((void *)kaddr);
+ if (page)
+ /* already have a page vmap-ed */
+ goto out;
+
+ if (arena->map.map_flags & BPF_F_SEGV_ON_FAULT)
+ /* User space requested to segfault when page is not allocated by bpf prog */
+ return VM_FAULT_SIGSEGV;
+
+ ret = mtree_insert(&arena->mt, vmf->pgoff, MT_ENTRY, GFP_KERNEL);
+ if (ret)
+ return VM_FAULT_SIGSEGV;
+
+ /* Account into memcg of the process that created bpf_arena */
+ ret = bpf_map_alloc_pages(map, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, 1, &page);
+ if (ret) {
+ mtree_erase(&arena->mt, vmf->pgoff);
+ return VM_FAULT_SIGSEGV;
+ }
+
+ ret = vm_area_map_pages(arena->kern_vm, kaddr, kaddr + PAGE_SIZE, &page);
+ if (ret) {
+ mtree_erase(&arena->mt, vmf->pgoff);
+ __free_page(page);
+ return VM_FAULT_SIGSEGV;
+ }
+out:
+ page_ref_add(page, 1);
+ vmf->page = page;
+ return 0;
+}
+
+static const struct vm_operations_struct arena_vm_ops = {
+ .close = arena_vm_close,
+ .fault = arena_vm_fault,
+};
+
+static unsigned long arena_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ struct bpf_map *map = filp->private_data;
+ struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
+ long ret;
+
+ if (pgoff)
+ return -EINVAL;
+ if (len > (1ull << 32))
+ return -E2BIG;
+
+ /* if user_vm_start was specified at arena creation time */
+ if (arena->user_vm_start) {
+ if (len > arena->user_vm_end - arena->user_vm_start)
+ return -E2BIG;
+ if (len != arena->user_vm_end - arena->user_vm_start)
+ return -EINVAL;
+ if (addr != arena->user_vm_start)
+ return -EINVAL;
+ }
+
+ ret = current->mm->get_unmapped_area(filp, addr, len * 2, 0, flags);
+ if (IS_ERR_VALUE(ret))
+ return ret;
+ if ((ret >> 32) == ((ret + len - 1) >> 32))
+ return ret;
+ if (WARN_ON_ONCE(arena->user_vm_start))
+ /* checks at map creation time should prevent this */
+ return -EFAULT;
+ return round_up(ret, 1ull << 32);
+}
+
+static int arena_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
+{
+ struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
+
+ guard(mutex)(&arena->lock);
+ if (arena->user_vm_start && arena->user_vm_start != vma->vm_start)
+ /*
+ * If map_extra was not specified at arena creation time then
+ * 1st user process can do mmap(NULL, ...) to pick user_vm_start
+ * 2nd user process must pass the same addr to mmap(addr, MAP_FIXED..);
+ * or
+ * specify addr in map_extra and
+ * use the same addr later with mmap(addr, MAP_FIXED..);
+ */
+ return -EBUSY;
+
+ if (arena->user_vm_end && arena->user_vm_end != vma->vm_end)
+ /* all user processes must have the same size of mmap-ed region */
+ return -EBUSY;
+
+ /* Earlier checks should prevent this */
+ if (WARN_ON_ONCE(vma->vm_end - vma->vm_start > (1ull << 32) || vma->vm_pgoff))
+ return -EFAULT;
+
+ if (remember_vma(arena, vma))
+ return -ENOMEM;
+
+ arena->user_vm_start = vma->vm_start;
+ arena->user_vm_end = vma->vm_end;
+ /*
+ * bpf_map_mmap() checks that it's being mmaped as VM_SHARED and
+ * clears VM_MAYEXEC. Set VM_DONTEXPAND as well to avoid
+ * potential change of user_vm_start.
+ */
+ vm_flags_set(vma, VM_DONTEXPAND);
+ vma->vm_ops = &arena_vm_ops;
+ return 0;
+}
+
+static int arena_map_direct_value_addr(const struct bpf_map *map, u64 *imm, u32 off)
+{
+ struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
+
+ if ((u64)off > arena->user_vm_end - arena->user_vm_start)
+ return -ERANGE;
+ *imm = (unsigned long)arena->user_vm_start;
+ return 0;
+}
+
+BTF_ID_LIST_SINGLE(bpf_arena_map_btf_ids, struct, bpf_arena)
+const struct bpf_map_ops arena_map_ops = {
+ .map_meta_equal = bpf_map_meta_equal,
+ .map_alloc = arena_map_alloc,
+ .map_free = arena_map_free,
+ .map_direct_value_addr = arena_map_direct_value_addr,
+ .map_mmap = arena_map_mmap,
+ .map_get_unmapped_area = arena_get_unmapped_area,
+ .map_get_next_key = arena_map_get_next_key,
+ .map_push_elem = arena_map_push_elem,
+ .map_peek_elem = arena_map_peek_elem,
+ .map_pop_elem = arena_map_pop_elem,
+ .map_lookup_elem = arena_map_lookup_elem,
+ .map_update_elem = arena_map_update_elem,
+ .map_delete_elem = arena_map_delete_elem,
+ .map_check_btf = arena_map_check_btf,
+ .map_mem_usage = arena_map_mem_usage,
+ .map_btf_id = &bpf_arena_map_btf_ids[0],
+};
+
+static u64 clear_lo32(u64 val)
+{
+ return val & ~(u64)~0U;
+}
+
+/*
+ * Allocate pages and vmap them into kernel vmalloc area.
+ * Later the pages will be mmaped into user space vma.
+ */
+static long arena_alloc_pages(struct bpf_arena *arena, long uaddr, long page_cnt, int node_id)
+{
+ /* user_vm_end/start are fixed before bpf prog runs */
+ long page_cnt_max = (arena->user_vm_end - arena->user_vm_start) >> PAGE_SHIFT;
+ u64 kern_vm_start = bpf_arena_get_kern_vm_start(arena);
+ struct page **pages;
+ long pgoff = 0;
+ u32 uaddr32;
+ int ret, i;
+
+ if (page_cnt > page_cnt_max)
+ return 0;
+
+ if (uaddr) {
+ if (uaddr & ~PAGE_MASK)
+ return 0;
+ pgoff = compute_pgoff(arena, uaddr);
+ if (pgoff + page_cnt > page_cnt_max)
+ /* requested address will be outside of user VMA */
+ return 0;
+ }
+
+ /* zeroing is needed, since alloc_pages_bulk_array() only fills in non-zero entries */
+ pages = kvcalloc(page_cnt, sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ return 0;
+
+ guard(mutex)(&arena->lock);
+
+ if (uaddr)
+ ret = mtree_insert_range(&arena->mt, pgoff, pgoff + page_cnt - 1,
+ MT_ENTRY, GFP_KERNEL);
+ else
+ ret = mtree_alloc_range(&arena->mt, &pgoff, MT_ENTRY,
+ page_cnt, 0, page_cnt_max - 1, GFP_KERNEL);
+ if (ret)
+ goto out_free_pages;
+
+ ret = bpf_map_alloc_pages(&arena->map, GFP_KERNEL | __GFP_ZERO,
+ node_id, page_cnt, pages);
+ if (ret)
+ goto out;
+
+ uaddr32 = (u32)(arena->user_vm_start + pgoff * PAGE_SIZE);
+ /* Earlier checks make sure that uaddr32 + page_cnt * PAGE_SIZE will not overflow 32-bit */
+ ret = vm_area_map_pages(arena->kern_vm, kern_vm_start + uaddr32,
+ kern_vm_start + uaddr32 + page_cnt * PAGE_SIZE, pages);
+ if (ret) {
+ for (i = 0; i < page_cnt; i++)
+ __free_page(pages[i]);
+ goto out;
+ }
+ kvfree(pages);
+ return clear_lo32(arena->user_vm_start) + uaddr32;
+out:
+ mtree_erase(&arena->mt, pgoff);
+out_free_pages:
+ kvfree(pages);
+ return 0;
+}
+
+/*
+ * If page is present in vmalloc area, unmap it from vmalloc area,
+ * unmap it from all user space vma-s,
+ * and free it.
+ */
+static void zap_pages(struct bpf_arena *arena, long uaddr, long page_cnt)
+{
+ struct vma_list *vml;
+
+ list_for_each_entry(vml, &arena->vma_list, head)
+ zap_page_range_single(vml->vma, uaddr,
+ PAGE_SIZE * page_cnt, NULL);
+}
+
+static void arena_free_pages(struct bpf_arena *arena, long uaddr, long page_cnt)
+{
+ u64 full_uaddr, uaddr_end;
+ long kaddr, pgoff, i;
+ struct page *page;
+
+ /* only aligned lower 32-bit are relevant */
+ uaddr = (u32)uaddr;
+ uaddr &= PAGE_MASK;
+ full_uaddr = clear_lo32(arena->user_vm_start) + uaddr;
+ uaddr_end = min(arena->user_vm_end, full_uaddr + (page_cnt << PAGE_SHIFT));
+ if (full_uaddr >= uaddr_end)
+ return;
+
+ page_cnt = (uaddr_end - full_uaddr) >> PAGE_SHIFT;
+
+ guard(mutex)(&arena->lock);
+
+ pgoff = compute_pgoff(arena, uaddr);
+ /* clear range */
+ mtree_store_range(&arena->mt, pgoff, pgoff + page_cnt - 1, NULL, GFP_KERNEL);
+
+ if (page_cnt > 1)
+ /* bulk zap if multiple pages being freed */
+ zap_pages(arena, full_uaddr, page_cnt);
+
+ kaddr = bpf_arena_get_kern_vm_start(arena) + uaddr;
+ for (i = 0; i < page_cnt; i++, kaddr += PAGE_SIZE, full_uaddr += PAGE_SIZE) {
+ page = vmalloc_to_page((void *)kaddr);
+ if (!page)
+ continue;
+ if (page_cnt == 1 && page_mapped(page)) /* mapped by some user process */
+ zap_pages(arena, full_uaddr, 1);
+ vm_area_unmap_pages(arena->kern_vm, kaddr, kaddr + PAGE_SIZE);
+ __free_page(page);
+ }
+}
+
+__bpf_kfunc_start_defs();
+
+__bpf_kfunc void *bpf_arena_alloc_pages(void *p__map, void *addr__ign, u32 page_cnt,
+ int node_id, u64 flags)
+{
+ struct bpf_map *map = p__map;
+ struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
+
+ if (map->map_type != BPF_MAP_TYPE_ARENA || flags || !page_cnt)
+ return NULL;
+
+ return (void *)arena_alloc_pages(arena, (long)addr__ign, page_cnt, node_id);
+}
+
+__bpf_kfunc void bpf_arena_free_pages(void *p__map, void *ptr__ign, u32 page_cnt)
+{
+ struct bpf_map *map = p__map;
+ struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
+
+ if (map->map_type != BPF_MAP_TYPE_ARENA || !page_cnt || !ptr__ign)
+ return;
+ arena_free_pages(arena, (long)ptr__ign, page_cnt);
+}
+__bpf_kfunc_end_defs();
+
+BTF_KFUNCS_START(arena_kfuncs)
+BTF_ID_FLAGS(func, bpf_arena_alloc_pages, KF_TRUSTED_ARGS | KF_SLEEPABLE)
+BTF_ID_FLAGS(func, bpf_arena_free_pages, KF_TRUSTED_ARGS | KF_SLEEPABLE)
+BTF_KFUNCS_END(arena_kfuncs)
+
+static const struct btf_kfunc_id_set common_kfunc_set = {
+ .owner = THIS_MODULE,
+ .set = &arena_kfuncs,
+};
+
+static int __init kfunc_init(void)
+{
+ return register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set);
+}
+late_initcall(kfunc_init);
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 0bdbbbeab155..13358675ff2e 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -82,7 +82,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
int numa_node = bpf_map_attr_numa_node(attr);
u32 elem_size, index_mask, max_entries;
- bool bypass_spec_v1 = bpf_bypass_spec_v1();
+ bool bypass_spec_v1 = bpf_bypass_spec_v1(NULL);
u64 array_size, mask64;
struct bpf_array *array;
diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c
index 0fae79164187..112581cf97e7 100644
--- a/kernel/bpf/bpf_iter.c
+++ b/kernel/bpf/bpf_iter.c
@@ -548,7 +548,7 @@ int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr,
return -ENOENT;
/* Only allow sleepable program for resched-able iterator */
- if (prog->aux->sleepable && !bpf_iter_target_support_resched(tinfo))
+ if (prog->sleepable && !bpf_iter_target_support_resched(tinfo))
return -EINVAL;
link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN);
@@ -697,7 +697,7 @@ int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx)
struct bpf_run_ctx run_ctx, *old_run_ctx;
int ret;
- if (prog->aux->sleepable) {
+ if (prog->sleepable) {
rcu_read_lock_trace();
migrate_disable();
might_fault();
diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
index 146824cc9689..bdea1a459153 100644
--- a/kernel/bpf/bpf_local_storage.c
+++ b/kernel/bpf/bpf_local_storage.c
@@ -414,47 +414,21 @@ void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now)
bpf_selem_unlink_storage(selem, reuse_now);
}
-/* If cacheit_lockit is false, this lookup function is lockless */
-struct bpf_local_storage_data *
-bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
- struct bpf_local_storage_map *smap,
- bool cacheit_lockit)
+void __bpf_local_storage_insert_cache(struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_map *smap,
+ struct bpf_local_storage_elem *selem)
{
- struct bpf_local_storage_data *sdata;
- struct bpf_local_storage_elem *selem;
-
- /* Fast path (cache hit) */
- sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
- bpf_rcu_lock_held());
- if (sdata && rcu_access_pointer(sdata->smap) == smap)
- return sdata;
-
- /* Slow path (cache miss) */
- hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
- rcu_read_lock_trace_held())
- if (rcu_access_pointer(SDATA(selem)->smap) == smap)
- break;
-
- if (!selem)
- return NULL;
-
- sdata = SDATA(selem);
- if (cacheit_lockit) {
- unsigned long flags;
-
- /* spinlock is needed to avoid racing with the
- * parallel delete. Otherwise, publishing an already
- * deleted sdata to the cache will become a use-after-free
- * problem in the next bpf_local_storage_lookup().
- */
- raw_spin_lock_irqsave(&local_storage->lock, flags);
- if (selem_linked_to_storage(selem))
- rcu_assign_pointer(local_storage->cache[smap->cache_idx],
- sdata);
- raw_spin_unlock_irqrestore(&local_storage->lock, flags);
- }
+ unsigned long flags;
- return sdata;
+ /* spinlock is needed to avoid racing with the
+ * parallel delete. Otherwise, publishing an already
+ * deleted sdata to the cache will become a use-after-free
+ * problem in the next bpf_local_storage_lookup().
+ */
+ raw_spin_lock_irqsave(&local_storage->lock, flags);
+ if (selem_linked_to_storage(selem))
+ rcu_assign_pointer(local_storage->cache[smap->cache_idx], SDATA(selem));
+ raw_spin_unlock_irqrestore(&local_storage->lock, flags);
}
static int check_flags(const struct bpf_local_storage_data *old_sdata,
diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c
index e8e910395bf6..68240c3c6e7d 100644
--- a/kernel/bpf/bpf_lsm.c
+++ b/kernel/bpf/bpf_lsm.c
@@ -260,9 +260,15 @@ bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
BTF_SET_START(sleepable_lsm_hooks)
BTF_ID(func, bpf_lsm_bpf)
BTF_ID(func, bpf_lsm_bpf_map)
-BTF_ID(func, bpf_lsm_bpf_map_alloc_security)
-BTF_ID(func, bpf_lsm_bpf_map_free_security)
+BTF_ID(func, bpf_lsm_bpf_map_create)
+BTF_ID(func, bpf_lsm_bpf_map_free)
BTF_ID(func, bpf_lsm_bpf_prog)
+BTF_ID(func, bpf_lsm_bpf_prog_load)
+BTF_ID(func, bpf_lsm_bpf_prog_free)
+BTF_ID(func, bpf_lsm_bpf_token_create)
+BTF_ID(func, bpf_lsm_bpf_token_free)
+BTF_ID(func, bpf_lsm_bpf_token_cmd)
+BTF_ID(func, bpf_lsm_bpf_token_capable)
BTF_ID(func, bpf_lsm_bprm_check_security)
BTF_ID(func, bpf_lsm_bprm_committed_creds)
BTF_ID(func, bpf_lsm_bprm_committing_creds)
@@ -276,10 +282,6 @@ BTF_ID(func, bpf_lsm_file_lock)
BTF_ID(func, bpf_lsm_file_open)
BTF_ID(func, bpf_lsm_file_receive)
-#ifdef CONFIG_SECURITY_NETWORK
-BTF_ID(func, bpf_lsm_inet_conn_established)
-#endif /* CONFIG_SECURITY_NETWORK */
-
BTF_ID(func, bpf_lsm_inode_create)
BTF_ID(func, bpf_lsm_inode_free_security)
BTF_ID(func, bpf_lsm_inode_getattr)
@@ -330,6 +332,8 @@ BTF_ID(func, bpf_lsm_sb_umount)
BTF_ID(func, bpf_lsm_settime)
#ifdef CONFIG_SECURITY_NETWORK
+BTF_ID(func, bpf_lsm_inet_conn_established)
+
BTF_ID(func, bpf_lsm_socket_accept)
BTF_ID(func, bpf_lsm_socket_bind)
BTF_ID(func, bpf_lsm_socket_connect)
@@ -357,9 +361,8 @@ BTF_ID(func, bpf_lsm_userns_create)
BTF_SET_END(sleepable_lsm_hooks)
BTF_SET_START(untrusted_lsm_hooks)
-BTF_ID(func, bpf_lsm_bpf_map_free_security)
-BTF_ID(func, bpf_lsm_bpf_prog_alloc_security)
-BTF_ID(func, bpf_lsm_bpf_prog_free_security)
+BTF_ID(func, bpf_lsm_bpf_map_free)
+BTF_ID(func, bpf_lsm_bpf_prog_free)
BTF_ID(func, bpf_lsm_file_alloc_security)
BTF_ID(func, bpf_lsm_file_free_security)
#ifdef CONFIG_SECURITY_NETWORK
diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
index 02068bd0e4d9..43356faaa057 100644
--- a/kernel/bpf/bpf_struct_ops.c
+++ b/kernel/bpf/bpf_struct_ops.c
@@ -13,26 +13,17 @@
#include <linux/btf_ids.h>
#include <linux/rcupdate_wait.h>
-enum bpf_struct_ops_state {
- BPF_STRUCT_OPS_STATE_INIT,
- BPF_STRUCT_OPS_STATE_INUSE,
- BPF_STRUCT_OPS_STATE_TOBEFREE,
- BPF_STRUCT_OPS_STATE_READY,
-};
-
-#define BPF_STRUCT_OPS_COMMON_VALUE \
- refcount_t refcnt; \
- enum bpf_struct_ops_state state
-
struct bpf_struct_ops_value {
- BPF_STRUCT_OPS_COMMON_VALUE;
+ struct bpf_struct_ops_common_value common;
char data[] ____cacheline_aligned_in_smp;
};
+#define MAX_TRAMP_IMAGE_PAGES 8
+
struct bpf_struct_ops_map {
struct bpf_map map;
struct rcu_head rcu;
- const struct bpf_struct_ops *st_ops;
+ const struct bpf_struct_ops_desc *st_ops_desc;
/* protect map_update */
struct mutex lock;
/* link has all the bpf_links that is populated
@@ -40,12 +31,14 @@ struct bpf_struct_ops_map {
* (in kvalue.data).
*/
struct bpf_link **links;
- /* image is a page that has all the trampolines
+ u32 links_cnt;
+ u32 image_pages_cnt;
+ /* image_pages is an array of pages that has all the trampolines
* that stores the func args before calling the bpf_prog.
- * A PAGE_SIZE "image" is enough to store all trampoline for
- * "links[]".
*/
- void *image;
+ void *image_pages[MAX_TRAMP_IMAGE_PAGES];
+ /* The owner moduler's btf. */
+ struct btf *btf;
/* uvalue->data stores the kernel struct
* (e.g. tcp_congestion_ops) that is more useful
* to userspace than the kvalue. For example,
@@ -70,35 +63,6 @@ static DEFINE_MUTEX(update_mutex);
#define VALUE_PREFIX "bpf_struct_ops_"
#define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1)
-/* bpf_struct_ops_##_name (e.g. bpf_struct_ops_tcp_congestion_ops) is
- * the map's value exposed to the userspace and its btf-type-id is
- * stored at the map->btf_vmlinux_value_type_id.
- *
- */
-#define BPF_STRUCT_OPS_TYPE(_name) \
-extern struct bpf_struct_ops bpf_##_name; \
- \
-struct bpf_struct_ops_##_name { \
- BPF_STRUCT_OPS_COMMON_VALUE; \
- struct _name data ____cacheline_aligned_in_smp; \
-};
-#include "bpf_struct_ops_types.h"
-#undef BPF_STRUCT_OPS_TYPE
-
-enum {
-#define BPF_STRUCT_OPS_TYPE(_name) BPF_STRUCT_OPS_TYPE_##_name,
-#include "bpf_struct_ops_types.h"
-#undef BPF_STRUCT_OPS_TYPE
- __NR_BPF_STRUCT_OPS_TYPE,
-};
-
-static struct bpf_struct_ops * const bpf_struct_ops[] = {
-#define BPF_STRUCT_OPS_TYPE(_name) \
- [BPF_STRUCT_OPS_TYPE_##_name] = &bpf_##_name,
-#include "bpf_struct_ops_types.h"
-#undef BPF_STRUCT_OPS_TYPE
-};
-
const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = {
};
@@ -108,138 +72,355 @@ const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
#endif
};
-static const struct btf_type *module_type;
+BTF_ID_LIST(st_ops_ids)
+BTF_ID(struct, module)
+BTF_ID(struct, bpf_struct_ops_common_value)
+
+enum {
+ IDX_MODULE_ID,
+ IDX_ST_OPS_COMMON_VALUE_ID,
+};
+
+extern struct btf *btf_vmlinux;
-void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log)
+static bool is_valid_value_type(struct btf *btf, s32 value_id,
+ const struct btf_type *type,
+ const char *value_name)
{
- s32 type_id, value_id, module_id;
+ const struct btf_type *common_value_type;
const struct btf_member *member;
- struct bpf_struct_ops *st_ops;
- const struct btf_type *t;
- char value_name[128];
- const char *mname;
- u32 i, j;
+ const struct btf_type *vt, *mt;
- /* Ensure BTF type is emitted for "struct bpf_struct_ops_##_name" */
-#define BPF_STRUCT_OPS_TYPE(_name) BTF_TYPE_EMIT(struct bpf_struct_ops_##_name);
-#include "bpf_struct_ops_types.h"
-#undef BPF_STRUCT_OPS_TYPE
+ vt = btf_type_by_id(btf, value_id);
+ if (btf_vlen(vt) != 2) {
+ pr_warn("The number of %s's members should be 2, but we get %d\n",
+ value_name, btf_vlen(vt));
+ return false;
+ }
+ member = btf_type_member(vt);
+ mt = btf_type_by_id(btf, member->type);
+ common_value_type = btf_type_by_id(btf_vmlinux,
+ st_ops_ids[IDX_ST_OPS_COMMON_VALUE_ID]);
+ if (mt != common_value_type) {
+ pr_warn("The first member of %s should be bpf_struct_ops_common_value\n",
+ value_name);
+ return false;
+ }
+ member++;
+ mt = btf_type_by_id(btf, member->type);
+ if (mt != type) {
+ pr_warn("The second member of %s should be %s\n",
+ value_name, btf_name_by_offset(btf, type->name_off));
+ return false;
+ }
- module_id = btf_find_by_name_kind(btf, "module", BTF_KIND_STRUCT);
- if (module_id < 0) {
- pr_warn("Cannot find struct module in btf_vmlinux\n");
- return;
+ return true;
+}
+
+static void *bpf_struct_ops_image_alloc(void)
+{
+ void *image;
+ int err;
+
+ err = bpf_jit_charge_modmem(PAGE_SIZE);
+ if (err)
+ return ERR_PTR(err);
+ image = arch_alloc_bpf_trampoline(PAGE_SIZE);
+ if (!image) {
+ bpf_jit_uncharge_modmem(PAGE_SIZE);
+ return ERR_PTR(-ENOMEM);
}
- module_type = btf_type_by_id(btf, module_id);
- for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
- st_ops = bpf_struct_ops[i];
+ return image;
+}
- if (strlen(st_ops->name) + VALUE_PREFIX_LEN >=
- sizeof(value_name)) {
- pr_warn("struct_ops name %s is too long\n",
- st_ops->name);
- continue;
- }
- sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name);
+void bpf_struct_ops_image_free(void *image)
+{
+ if (image) {
+ arch_free_bpf_trampoline(image, PAGE_SIZE);
+ bpf_jit_uncharge_modmem(PAGE_SIZE);
+ }
+}
+
+#define MAYBE_NULL_SUFFIX "__nullable"
+#define MAX_STUB_NAME 128
- value_id = btf_find_by_name_kind(btf, value_name,
- BTF_KIND_STRUCT);
- if (value_id < 0) {
- pr_warn("Cannot find struct %s in btf_vmlinux\n",
- value_name);
+/* Return the type info of a stub function, if it exists.
+ *
+ * The name of a stub function is made up of the name of the struct_ops and
+ * the name of the function pointer member, separated by "__". For example,
+ * if the struct_ops type is named "foo_ops" and the function pointer
+ * member is named "bar", the stub function name would be "foo_ops__bar".
+ */
+static const struct btf_type *
+find_stub_func_proto(const struct btf *btf, const char *st_op_name,
+ const char *member_name)
+{
+ char stub_func_name[MAX_STUB_NAME];
+ const struct btf_type *func_type;
+ s32 btf_id;
+ int cp;
+
+ cp = snprintf(stub_func_name, MAX_STUB_NAME, "%s__%s",
+ st_op_name, member_name);
+ if (cp >= MAX_STUB_NAME) {
+ pr_warn("Stub function name too long\n");
+ return NULL;
+ }
+ btf_id = btf_find_by_name_kind(btf, stub_func_name, BTF_KIND_FUNC);
+ if (btf_id < 0)
+ return NULL;
+ func_type = btf_type_by_id(btf, btf_id);
+ if (!func_type)
+ return NULL;
+
+ return btf_type_by_id(btf, func_type->type); /* FUNC_PROTO */
+}
+
+/* Prepare argument info for every nullable argument of a member of a
+ * struct_ops type.
+ *
+ * Initialize a struct bpf_struct_ops_arg_info according to type info of
+ * the arguments of a stub function. (Check kCFI for more information about
+ * stub functions.)
+ *
+ * Each member in the struct_ops type has a struct bpf_struct_ops_arg_info
+ * to provide an array of struct bpf_ctx_arg_aux, which in turn provides
+ * the information that used by the verifier to check the arguments of the
+ * BPF struct_ops program assigned to the member. Here, we only care about
+ * the arguments that are marked as __nullable.
+ *
+ * The array of struct bpf_ctx_arg_aux is eventually assigned to
+ * prog->aux->ctx_arg_info of BPF struct_ops programs and passed to the
+ * verifier. (See check_struct_ops_btf_id())
+ *
+ * arg_info->info will be the list of struct bpf_ctx_arg_aux if success. If
+ * fails, it will be kept untouched.
+ */
+static int prepare_arg_info(struct btf *btf,
+ const char *st_ops_name,
+ const char *member_name,
+ const struct btf_type *func_proto,
+ struct bpf_struct_ops_arg_info *arg_info)
+{
+ const struct btf_type *stub_func_proto, *pointed_type;
+ const struct btf_param *stub_args, *args;
+ struct bpf_ctx_arg_aux *info, *info_buf;
+ u32 nargs, arg_no, info_cnt = 0;
+ u32 arg_btf_id;
+ int offset;
+
+ stub_func_proto = find_stub_func_proto(btf, st_ops_name, member_name);
+ if (!stub_func_proto)
+ return 0;
+
+ /* Check if the number of arguments of the stub function is the same
+ * as the number of arguments of the function pointer.
+ */
+ nargs = btf_type_vlen(func_proto);
+ if (nargs != btf_type_vlen(stub_func_proto)) {
+ pr_warn("the number of arguments of the stub function %s__%s does not match the number of arguments of the member %s of struct %s\n",
+ st_ops_name, member_name, member_name, st_ops_name);
+ return -EINVAL;
+ }
+
+ if (!nargs)
+ return 0;
+
+ args = btf_params(func_proto);
+ stub_args = btf_params(stub_func_proto);
+
+ info_buf = kcalloc(nargs, sizeof(*info_buf), GFP_KERNEL);
+ if (!info_buf)
+ return -ENOMEM;
+
+ /* Prepare info for every nullable argument */
+ info = info_buf;
+ for (arg_no = 0; arg_no < nargs; arg_no++) {
+ /* Skip arguments that is not suffixed with
+ * "__nullable".
+ */
+ if (!btf_param_match_suffix(btf, &stub_args[arg_no],
+ MAYBE_NULL_SUFFIX))
continue;
+
+ /* Should be a pointer to struct */
+ pointed_type = btf_type_resolve_ptr(btf,
+ args[arg_no].type,
+ &arg_btf_id);
+ if (!pointed_type ||
+ !btf_type_is_struct(pointed_type)) {
+ pr_warn("stub function %s__%s has %s tagging to an unsupported type\n",
+ st_ops_name, member_name, MAYBE_NULL_SUFFIX);
+ goto err_out;
}
- type_id = btf_find_by_name_kind(btf, st_ops->name,
- BTF_KIND_STRUCT);
- if (type_id < 0) {
- pr_warn("Cannot find struct %s in btf_vmlinux\n",
- st_ops->name);
- continue;
+ offset = btf_ctx_arg_offset(btf, func_proto, arg_no);
+ if (offset < 0) {
+ pr_warn("stub function %s__%s has an invalid trampoline ctx offset for arg#%u\n",
+ st_ops_name, member_name, arg_no);
+ goto err_out;
}
- t = btf_type_by_id(btf, type_id);
- if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) {
- pr_warn("Cannot support #%u members in struct %s\n",
- btf_type_vlen(t), st_ops->name);
- continue;
+
+ if (args[arg_no].type != stub_args[arg_no].type) {
+ pr_warn("arg#%u type in stub function %s__%s does not match with its original func_proto\n",
+ arg_no, st_ops_name, member_name);
+ goto err_out;
}
- for_each_member(j, t, member) {
- const struct btf_type *func_proto;
+ /* Fill the information of the new argument */
+ info->reg_type =
+ PTR_TRUSTED | PTR_TO_BTF_ID | PTR_MAYBE_NULL;
+ info->btf_id = arg_btf_id;
+ info->btf = btf;
+ info->offset = offset;
- mname = btf_name_by_offset(btf, member->name_off);
- if (!*mname) {
- pr_warn("anon member in struct %s is not supported\n",
- st_ops->name);
- break;
- }
+ info++;
+ info_cnt++;
+ }
- if (__btf_member_bitfield_size(t, member)) {
- pr_warn("bit field member %s in struct %s is not supported\n",
- mname, st_ops->name);
- break;
- }
+ if (info_cnt) {
+ arg_info->info = info_buf;
+ arg_info->cnt = info_cnt;
+ } else {
+ kfree(info_buf);
+ }
- func_proto = btf_type_resolve_func_ptr(btf,
- member->type,
- NULL);
- if (func_proto &&
- btf_distill_func_proto(log, btf,
- func_proto, mname,
- &st_ops->func_models[j])) {
- pr_warn("Error in parsing func ptr %s in struct %s\n",
- mname, st_ops->name);
- break;
- }
- }
+ return 0;
- if (j == btf_type_vlen(t)) {
- if (st_ops->init(btf)) {
- pr_warn("Error in init bpf_struct_ops %s\n",
- st_ops->name);
- } else {
- st_ops->type_id = type_id;
- st_ops->type = t;
- st_ops->value_id = value_id;
- st_ops->value_type = btf_type_by_id(btf,
- value_id);
- }
- }
- }
+err_out:
+ kfree(info_buf);
+
+ return -EINVAL;
}
-extern struct btf *btf_vmlinux;
+/* Clean up the arg_info in a struct bpf_struct_ops_desc. */
+void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc)
+{
+ struct bpf_struct_ops_arg_info *arg_info;
+ int i;
-static const struct bpf_struct_ops *
-bpf_struct_ops_find_value(u32 value_id)
+ arg_info = st_ops_desc->arg_info;
+ for (i = 0; i < btf_type_vlen(st_ops_desc->type); i++)
+ kfree(arg_info[i].info);
+
+ kfree(arg_info);
+}
+
+int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
+ struct btf *btf,
+ struct bpf_verifier_log *log)
{
- unsigned int i;
+ struct bpf_struct_ops *st_ops = st_ops_desc->st_ops;
+ struct bpf_struct_ops_arg_info *arg_info;
+ const struct btf_member *member;
+ const struct btf_type *t;
+ s32 type_id, value_id;
+ char value_name[128];
+ const char *mname;
+ int i, err;
- if (!value_id || !btf_vmlinux)
- return NULL;
+ if (strlen(st_ops->name) + VALUE_PREFIX_LEN >=
+ sizeof(value_name)) {
+ pr_warn("struct_ops name %s is too long\n",
+ st_ops->name);
+ return -EINVAL;
+ }
+ sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name);
- for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
- if (bpf_struct_ops[i]->value_id == value_id)
- return bpf_struct_ops[i];
+ if (!st_ops->cfi_stubs) {
+ pr_warn("struct_ops for %s has no cfi_stubs\n", st_ops->name);
+ return -EINVAL;
}
- return NULL;
-}
+ type_id = btf_find_by_name_kind(btf, st_ops->name,
+ BTF_KIND_STRUCT);
+ if (type_id < 0) {
+ pr_warn("Cannot find struct %s in %s\n",
+ st_ops->name, btf_get_name(btf));
+ return -EINVAL;
+ }
+ t = btf_type_by_id(btf, type_id);
+ if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) {
+ pr_warn("Cannot support #%u members in struct %s\n",
+ btf_type_vlen(t), st_ops->name);
+ return -EINVAL;
+ }
-const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
-{
- unsigned int i;
+ value_id = btf_find_by_name_kind(btf, value_name,
+ BTF_KIND_STRUCT);
+ if (value_id < 0) {
+ pr_warn("Cannot find struct %s in %s\n",
+ value_name, btf_get_name(btf));
+ return -EINVAL;
+ }
+ if (!is_valid_value_type(btf, value_id, t, value_name))
+ return -EINVAL;
- if (!type_id || !btf_vmlinux)
- return NULL;
+ arg_info = kcalloc(btf_type_vlen(t), sizeof(*arg_info),
+ GFP_KERNEL);
+ if (!arg_info)
+ return -ENOMEM;
+
+ st_ops_desc->arg_info = arg_info;
+ st_ops_desc->type = t;
+ st_ops_desc->type_id = type_id;
+ st_ops_desc->value_id = value_id;
+ st_ops_desc->value_type = btf_type_by_id(btf, value_id);
+
+ for_each_member(i, t, member) {
+ const struct btf_type *func_proto;
- for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
- if (bpf_struct_ops[i]->type_id == type_id)
- return bpf_struct_ops[i];
+ mname = btf_name_by_offset(btf, member->name_off);
+ if (!*mname) {
+ pr_warn("anon member in struct %s is not supported\n",
+ st_ops->name);
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+
+ if (__btf_member_bitfield_size(t, member)) {
+ pr_warn("bit field member %s in struct %s is not supported\n",
+ mname, st_ops->name);
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+
+ func_proto = btf_type_resolve_func_ptr(btf,
+ member->type,
+ NULL);
+ if (!func_proto)
+ continue;
+
+ if (btf_distill_func_proto(log, btf,
+ func_proto, mname,
+ &st_ops->func_models[i])) {
+ pr_warn("Error in parsing func ptr %s in struct %s\n",
+ mname, st_ops->name);
+ err = -EINVAL;
+ goto errout;
+ }
+
+ err = prepare_arg_info(btf, st_ops->name, mname,
+ func_proto,
+ arg_info + i);
+ if (err)
+ goto errout;
}
- return NULL;
+ if (st_ops->init(btf)) {
+ pr_warn("Error in init bpf_struct_ops %s\n",
+ st_ops->name);
+ err = -EINVAL;
+ goto errout;
+ }
+
+ return 0;
+
+errout:
+ bpf_struct_ops_desc_release(st_ops_desc);
+
+ return err;
}
static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key,
@@ -265,7 +446,7 @@ int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
kvalue = &st_map->kvalue;
/* Pair with smp_store_release() during map_update */
- state = smp_load_acquire(&kvalue->state);
+ state = smp_load_acquire(&kvalue->common.state);
if (state == BPF_STRUCT_OPS_STATE_INIT) {
memset(value, 0, map->value_size);
return 0;
@@ -276,7 +457,7 @@ int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
*/
uvalue = value;
memcpy(uvalue, st_map->uvalue, map->value_size);
- uvalue->state = state;
+ uvalue->common.state = state;
/* This value offers the user space a general estimate of how
* many sockets are still utilizing this struct_ops for TCP
@@ -284,7 +465,7 @@ int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
* should sufficiently meet our present goals.
*/
refcnt = atomic64_read(&map->refcnt) - atomic64_read(&map->usercnt);
- refcount_set(&uvalue->refcnt, max_t(s64, refcnt, 0));
+ refcount_set(&uvalue->common.refcnt, max_t(s64, refcnt, 0));
return 0;
}
@@ -296,10 +477,9 @@ static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key)
static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
{
- const struct btf_type *t = st_map->st_ops->type;
u32 i;
- for (i = 0; i < btf_type_vlen(t); i++) {
+ for (i = 0; i < st_map->links_cnt; i++) {
if (st_map->links[i]) {
bpf_link_put(st_map->links[i]);
st_map->links[i] = NULL;
@@ -307,7 +487,16 @@ static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
}
}
-static int check_zero_holes(const struct btf_type *t, void *data)
+static void bpf_struct_ops_map_free_image(struct bpf_struct_ops_map *st_map)
+{
+ int i;
+
+ for (i = 0; i < st_map->image_pages_cnt; i++)
+ bpf_struct_ops_image_free(st_map->image_pages[i]);
+ st_map->image_pages_cnt = 0;
+}
+
+static int check_zero_holes(const struct btf *btf, const struct btf_type *t, void *data)
{
const struct btf_member *member;
u32 i, moff, msize, prev_mend = 0;
@@ -319,8 +508,8 @@ static int check_zero_holes(const struct btf_type *t, void *data)
memchr_inv(data + prev_mend, 0, moff - prev_mend))
return -EINVAL;
- mtype = btf_type_by_id(btf_vmlinux, member->type);
- mtype = btf_resolve_size(btf_vmlinux, mtype, &msize);
+ mtype = btf_type_by_id(btf, member->type);
+ mtype = btf_resolve_size(btf, mtype, &msize);
if (IS_ERR(mtype))
return PTR_ERR(mtype);
prev_mend = moff + msize;
@@ -352,9 +541,12 @@ const struct bpf_link_ops bpf_struct_ops_link_lops = {
int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
struct bpf_tramp_link *link,
const struct btf_func_model *model,
- void *stub_func, void *image, void *image_end)
+ void *stub_func,
+ void **_image, u32 *_image_off,
+ bool allow_alloc)
{
- u32 flags = BPF_TRAMP_F_INDIRECT;
+ u32 image_off = *_image_off, flags = BPF_TRAMP_F_INDIRECT;
+ void *image = *_image;
int size;
tlinks[BPF_TRAMP_FENTRY].links[0] = link;
@@ -364,27 +556,49 @@ int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
flags |= BPF_TRAMP_F_RET_FENTRY_RET;
size = arch_bpf_trampoline_size(model, flags, tlinks, NULL);
- if (size < 0)
- return size;
- if (size > (unsigned long)image_end - (unsigned long)image)
- return -E2BIG;
- return arch_prepare_bpf_trampoline(NULL, image, image_end,
+ if (size <= 0)
+ return size ? : -EFAULT;
+
+ /* Allocate image buffer if necessary */
+ if (!image || size > PAGE_SIZE - image_off) {
+ if (!allow_alloc)
+ return -E2BIG;
+
+ image = bpf_struct_ops_image_alloc();
+ if (IS_ERR(image))
+ return PTR_ERR(image);
+ image_off = 0;
+ }
+
+ size = arch_prepare_bpf_trampoline(NULL, image + image_off,
+ image + PAGE_SIZE,
model, flags, tlinks, stub_func);
+ if (size <= 0) {
+ if (image != *_image)
+ bpf_struct_ops_image_free(image);
+ return size ? : -EFAULT;
+ }
+
+ *_image = image;
+ *_image_off = image_off + size;
+ return 0;
}
static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
void *value, u64 flags)
{
struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
- const struct bpf_struct_ops *st_ops = st_map->st_ops;
+ const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc;
+ const struct bpf_struct_ops *st_ops = st_ops_desc->st_ops;
struct bpf_struct_ops_value *uvalue, *kvalue;
+ const struct btf_type *module_type;
const struct btf_member *member;
- const struct btf_type *t = st_ops->type;
+ const struct btf_type *t = st_ops_desc->type;
struct bpf_tramp_links *tlinks;
void *udata, *kdata;
int prog_fd, err;
- void *image, *image_end;
- u32 i;
+ u32 i, trampoline_start, image_off = 0;
+ void *cur_image = NULL, *image = NULL;
if (flags)
return -EINVAL;
@@ -392,16 +606,16 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
if (*(u32 *)key != 0)
return -E2BIG;
- err = check_zero_holes(st_ops->value_type, value);
+ err = check_zero_holes(st_map->btf, st_ops_desc->value_type, value);
if (err)
return err;
uvalue = value;
- err = check_zero_holes(t, uvalue->data);
+ err = check_zero_holes(st_map->btf, t, uvalue->data);
if (err)
return err;
- if (uvalue->state || refcount_read(&uvalue->refcnt))
+ if (uvalue->common.state || refcount_read(&uvalue->common.refcnt))
return -EINVAL;
tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
@@ -413,7 +627,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
mutex_lock(&st_map->lock);
- if (kvalue->state != BPF_STRUCT_OPS_STATE_INIT) {
+ if (kvalue->common.state != BPF_STRUCT_OPS_STATE_INIT) {
err = -EBUSY;
goto unlock;
}
@@ -422,9 +636,8 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
udata = &uvalue->data;
kdata = &kvalue->data;
- image = st_map->image;
- image_end = st_map->image + PAGE_SIZE;
+ module_type = btf_type_by_id(btf_vmlinux, st_ops_ids[IDX_MODULE_ID]);
for_each_member(i, t, member) {
const struct btf_type *mtype, *ptype;
struct bpf_prog *prog;
@@ -432,7 +645,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
u32 moff;
moff = __btf_member_bit_offset(t, member) / 8;
- ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL);
+ ptype = btf_type_resolve_ptr(st_map->btf, member->type, NULL);
if (ptype == module_type) {
if (*(void **)(udata + moff))
goto reset_unlock;
@@ -457,8 +670,8 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
if (!ptype || !btf_type_is_func_proto(ptype)) {
u32 msize;
- mtype = btf_type_by_id(btf_vmlinux, member->type);
- mtype = btf_resolve_size(btf_vmlinux, mtype, &msize);
+ mtype = btf_type_by_id(st_map->btf, member->type);
+ mtype = btf_resolve_size(st_map->btf, mtype, &msize);
if (IS_ERR(mtype)) {
err = PTR_ERR(mtype);
goto reset_unlock;
@@ -484,7 +697,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
}
if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
- prog->aux->attach_btf_id != st_ops->type_id ||
+ prog->aux->attach_btf_id != st_ops_desc->type_id ||
prog->expected_attach_type != i) {
bpf_prog_put(prog);
err = -EINVAL;
@@ -501,37 +714,47 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
&bpf_struct_ops_link_lops, prog);
st_map->links[i] = &link->link;
+ trampoline_start = image_off;
err = bpf_struct_ops_prepare_trampoline(tlinks, link,
- &st_ops->func_models[i],
- *(void **)(st_ops->cfi_stubs + moff),
- image, image_end);
+ &st_ops->func_models[i],
+ *(void **)(st_ops->cfi_stubs + moff),
+ &image, &image_off,
+ st_map->image_pages_cnt < MAX_TRAMP_IMAGE_PAGES);
+ if (err)
+ goto reset_unlock;
+
+ if (cur_image != image) {
+ st_map->image_pages[st_map->image_pages_cnt++] = image;
+ cur_image = image;
+ trampoline_start = 0;
+ }
if (err < 0)
goto reset_unlock;
- *(void **)(kdata + moff) = image + cfi_get_offset();
- image += err;
+ *(void **)(kdata + moff) = image + trampoline_start + cfi_get_offset();
/* put prog_id to udata */
*(unsigned long *)(udata + moff) = prog->aux->id;
}
+ if (st_ops->validate) {
+ err = st_ops->validate(kdata);
+ if (err)
+ goto reset_unlock;
+ }
+ for (i = 0; i < st_map->image_pages_cnt; i++)
+ arch_protect_bpf_trampoline(st_map->image_pages[i], PAGE_SIZE);
+
if (st_map->map.map_flags & BPF_F_LINK) {
err = 0;
- if (st_ops->validate) {
- err = st_ops->validate(kdata);
- if (err)
- goto reset_unlock;
- }
- arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE);
/* Let bpf_link handle registration & unregistration.
*
* Pair with smp_load_acquire() during lookup_elem().
*/
- smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_READY);
+ smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_READY);
goto unlock;
}
- arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE);
err = st_ops->reg(kdata);
if (likely(!err)) {
/* This refcnt increment on the map here after
@@ -545,7 +768,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
* It ensures the above udata updates (e.g. prog->aux->id)
* can be seen once BPF_STRUCT_OPS_STATE_INUSE is set.
*/
- smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_INUSE);
+ smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_INUSE);
goto unlock;
}
@@ -554,9 +777,9 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
* there was a race in registering the struct_ops (under the same name) to
* a sub-system through different struct_ops's maps.
*/
- arch_unprotect_bpf_trampoline(st_map->image, PAGE_SIZE);
reset_unlock:
+ bpf_struct_ops_map_free_image(st_map);
bpf_struct_ops_map_put_progs(st_map);
memset(uvalue, 0, map->value_size);
memset(kvalue, 0, map->value_size);
@@ -575,12 +798,12 @@ static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
if (st_map->map.map_flags & BPF_F_LINK)
return -EOPNOTSUPP;
- prev_state = cmpxchg(&st_map->kvalue.state,
+ prev_state = cmpxchg(&st_map->kvalue.common.state,
BPF_STRUCT_OPS_STATE_INUSE,
BPF_STRUCT_OPS_STATE_TOBEFREE);
switch (prev_state) {
case BPF_STRUCT_OPS_STATE_INUSE:
- st_map->st_ops->unreg(&st_map->kvalue.data);
+ st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data);
bpf_map_put(map);
return 0;
case BPF_STRUCT_OPS_STATE_TOBEFREE:
@@ -597,6 +820,7 @@ static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
struct seq_file *m)
{
+ struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
void *value;
int err;
@@ -606,7 +830,8 @@ static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
if (!err) {
- btf_type_seq_show(btf_vmlinux, map->btf_vmlinux_value_type_id,
+ btf_type_seq_show(st_map->btf,
+ map->btf_vmlinux_value_type_id,
value, m);
seq_puts(m, "\n");
}
@@ -621,16 +846,22 @@ static void __bpf_struct_ops_map_free(struct bpf_map *map)
if (st_map->links)
bpf_struct_ops_map_put_progs(st_map);
bpf_map_area_free(st_map->links);
- if (st_map->image) {
- arch_free_bpf_trampoline(st_map->image, PAGE_SIZE);
- bpf_jit_uncharge_modmem(PAGE_SIZE);
- }
+ bpf_struct_ops_map_free_image(st_map);
bpf_map_area_free(st_map->uvalue);
bpf_map_area_free(st_map);
}
static void bpf_struct_ops_map_free(struct bpf_map *map)
{
+ struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
+
+ /* st_ops->owner was acquired during map_alloc to implicitly holds
+ * the btf's refcnt. The acquire was only done when btf_is_module()
+ * st_map->btf cannot be NULL here.
+ */
+ if (btf_is_module(st_map->btf))
+ module_put(st_map->st_ops_desc->st_ops->owner);
+
/* The struct_ops's function may switch to another struct_ops.
*
* For example, bpf_tcp_cc_x->init() may switch to
@@ -654,29 +885,61 @@ static void bpf_struct_ops_map_free(struct bpf_map *map)
static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
{
if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 ||
- (attr->map_flags & ~BPF_F_LINK) || !attr->btf_vmlinux_value_type_id)
+ (attr->map_flags & ~(BPF_F_LINK | BPF_F_VTYPE_BTF_OBJ_FD)) ||
+ !attr->btf_vmlinux_value_type_id)
return -EINVAL;
return 0;
}
static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
{
- const struct bpf_struct_ops *st_ops;
+ const struct bpf_struct_ops_desc *st_ops_desc;
size_t st_map_size;
struct bpf_struct_ops_map *st_map;
const struct btf_type *t, *vt;
+ struct module *mod = NULL;
struct bpf_map *map;
+ struct btf *btf;
int ret;
- st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id);
- if (!st_ops)
- return ERR_PTR(-ENOTSUPP);
+ if (attr->map_flags & BPF_F_VTYPE_BTF_OBJ_FD) {
+ /* The map holds btf for its whole life time. */
+ btf = btf_get_by_fd(attr->value_type_btf_obj_fd);
+ if (IS_ERR(btf))
+ return ERR_CAST(btf);
+ if (!btf_is_module(btf)) {
+ btf_put(btf);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mod = btf_try_get_module(btf);
+ /* mod holds a refcnt to btf. We don't need an extra refcnt
+ * here.
+ */
+ btf_put(btf);
+ if (!mod)
+ return ERR_PTR(-EINVAL);
+ } else {
+ btf = bpf_get_btf_vmlinux();
+ if (IS_ERR(btf))
+ return ERR_CAST(btf);
+ if (!btf)
+ return ERR_PTR(-ENOTSUPP);
+ }
+
+ st_ops_desc = bpf_struct_ops_find_value(btf, attr->btf_vmlinux_value_type_id);
+ if (!st_ops_desc) {
+ ret = -ENOTSUPP;
+ goto errout;
+ }
- vt = st_ops->value_type;
- if (attr->value_size != vt->size)
- return ERR_PTR(-EINVAL);
+ vt = st_ops_desc->value_type;
+ if (attr->value_size != vt->size) {
+ ret = -EINVAL;
+ goto errout;
+ }
- t = st_ops->type;
+ t = st_ops_desc->type;
st_map_size = sizeof(*st_map) +
/* kvalue stores the
@@ -685,48 +948,43 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
(vt->size - sizeof(struct bpf_struct_ops_value));
st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE);
- if (!st_map)
- return ERR_PTR(-ENOMEM);
+ if (!st_map) {
+ ret = -ENOMEM;
+ goto errout;
+ }
- st_map->st_ops = st_ops;
+ st_map->st_ops_desc = st_ops_desc;
map = &st_map->map;
- ret = bpf_jit_charge_modmem(PAGE_SIZE);
- if (ret) {
- __bpf_struct_ops_map_free(map);
- return ERR_PTR(ret);
- }
-
- st_map->image = arch_alloc_bpf_trampoline(PAGE_SIZE);
- if (!st_map->image) {
- /* __bpf_struct_ops_map_free() uses st_map->image as flag
- * for "charged or not". In this case, we need to unchange
- * here.
- */
- bpf_jit_uncharge_modmem(PAGE_SIZE);
- __bpf_struct_ops_map_free(map);
- return ERR_PTR(-ENOMEM);
- }
st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
+ st_map->links_cnt = btf_type_vlen(t);
st_map->links =
- bpf_map_area_alloc(btf_type_vlen(t) * sizeof(struct bpf_links *),
+ bpf_map_area_alloc(st_map->links_cnt * sizeof(struct bpf_links *),
NUMA_NO_NODE);
if (!st_map->uvalue || !st_map->links) {
- __bpf_struct_ops_map_free(map);
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto errout_free;
}
+ st_map->btf = btf;
mutex_init(&st_map->lock);
bpf_map_init_from_attr(map, attr);
return map;
+
+errout_free:
+ __bpf_struct_ops_map_free(map);
+errout:
+ module_put(mod);
+
+ return ERR_PTR(ret);
}
static u64 bpf_struct_ops_map_mem_usage(const struct bpf_map *map)
{
struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
- const struct bpf_struct_ops *st_ops = st_map->st_ops;
- const struct btf_type *vt = st_ops->value_type;
+ const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc;
+ const struct btf_type *vt = st_ops_desc->value_type;
u64 usage;
usage = sizeof(*st_map) +
@@ -785,7 +1043,7 @@ static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map)
return map->map_type == BPF_MAP_TYPE_STRUCT_OPS &&
map->map_flags & BPF_F_LINK &&
/* Pair with smp_store_release() during map_update */
- smp_load_acquire(&st_map->kvalue.state) == BPF_STRUCT_OPS_STATE_READY;
+ smp_load_acquire(&st_map->kvalue.common.state) == BPF_STRUCT_OPS_STATE_READY;
}
static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link)
@@ -800,7 +1058,7 @@ static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link)
/* st_link->map can be NULL if
* bpf_struct_ops_link_create() fails to register.
*/
- st_map->st_ops->unreg(&st_map->kvalue.data);
+ st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data);
bpf_map_put(&st_map->map);
}
kfree(st_link);
@@ -847,7 +1105,7 @@ static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map
if (!bpf_struct_ops_valid_to_reg(new_map))
return -EINVAL;
- if (!st_map->st_ops->update)
+ if (!st_map->st_ops_desc->st_ops->update)
return -EOPNOTSUPP;
mutex_lock(&update_mutex);
@@ -860,12 +1118,12 @@ static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map
old_st_map = container_of(old_map, struct bpf_struct_ops_map, map);
/* The new and old struct_ops must be the same type. */
- if (st_map->st_ops != old_st_map->st_ops) {
+ if (st_map->st_ops_desc != old_st_map->st_ops_desc) {
err = -EINVAL;
goto err_out;
}
- err = st_map->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data);
+ err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data);
if (err)
goto err_out;
@@ -916,7 +1174,7 @@ int bpf_struct_ops_link_create(union bpf_attr *attr)
if (err)
goto err_out;
- err = st_map->st_ops->reg(st_map->kvalue.data);
+ err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data);
if (err) {
bpf_link_cleanup(&link_primer);
link = NULL;
@@ -931,3 +1189,10 @@ err_out:
kfree(link);
return err;
}
+
+void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map)
+{
+ struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
+
+ info->btf_vmlinux_id = btf_obj_id(st_map->btf);
+}
diff --git a/kernel/bpf/bpf_struct_ops_types.h b/kernel/bpf/bpf_struct_ops_types.h
deleted file mode 100644
index 5678a9ddf817..000000000000
--- a/kernel/bpf/bpf_struct_ops_types.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* internal file - do not include directly */
-
-#ifdef CONFIG_BPF_JIT
-#ifdef CONFIG_NET
-BPF_STRUCT_OPS_TYPE(bpf_dummy_ops)
-#endif
-#ifdef CONFIG_INET
-#include <net/tcp.h>
-BPF_STRUCT_OPS_TYPE(tcp_congestion_ops)
-#endif
-#endif
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 596471189176..90c4a32d89ff 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -19,6 +19,7 @@
#include <linux/bpf_verifier.h>
#include <linux/btf.h>
#include <linux/btf_ids.h>
+#include <linux/bpf.h>
#include <linux/bpf_lsm.h>
#include <linux/skmsg.h>
#include <linux/perf_event.h>
@@ -241,6 +242,12 @@ struct btf_id_dtor_kfunc_tab {
struct btf_id_dtor_kfunc dtors[];
};
+struct btf_struct_ops_tab {
+ u32 cnt;
+ u32 capacity;
+ struct bpf_struct_ops_desc ops[];
+};
+
struct btf {
void *data;
struct btf_type **types;
@@ -258,6 +265,7 @@ struct btf {
struct btf_kfunc_set_tab *kfunc_set_tab;
struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab;
struct btf_struct_metas *struct_meta_tab;
+ struct btf_struct_ops_tab *struct_ops_tab;
/* split BTF support */
struct btf *base_btf;
@@ -801,9 +809,23 @@ static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
return __btf_name_valid(btf, offset);
}
+/* Allow any printable character in DATASEC names */
static bool btf_name_valid_section(const struct btf *btf, u32 offset)
{
- return __btf_name_valid(btf, offset);
+ /* offset must be valid */
+ const char *src = btf_str_by_offset(btf, offset);
+ const char *src_limit;
+
+ /* set a limit on identifier length */
+ src_limit = src + KSYM_NAME_LEN;
+ src++;
+ while (*src && src < src_limit) {
+ if (!isprint(*src))
+ return false;
+ src++;
+ }
+
+ return !*src;
}
static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
@@ -1688,11 +1710,27 @@ static void btf_free_struct_meta_tab(struct btf *btf)
btf->struct_meta_tab = NULL;
}
+static void btf_free_struct_ops_tab(struct btf *btf)
+{
+ struct btf_struct_ops_tab *tab = btf->struct_ops_tab;
+ u32 i;
+
+ if (!tab)
+ return;
+
+ for (i = 0; i < tab->cnt; i++)
+ bpf_struct_ops_desc_release(&tab->ops[i]);
+
+ kfree(tab);
+ btf->struct_ops_tab = NULL;
+}
+
static void btf_free(struct btf *btf)
{
btf_free_struct_meta_tab(btf);
btf_free_dtor_kfunc_tab(btf);
btf_free_kfunc_set_tab(btf);
+ btf_free_struct_ops_tab(btf);
kvfree(btf->types);
kvfree(btf->resolved_sizes);
kvfree(btf->resolved_ids);
@@ -1707,6 +1745,11 @@ static void btf_free_rcu(struct rcu_head *rcu)
btf_free(btf);
}
+const char *btf_get_name(const struct btf *btf)
+{
+ return btf->name;
+}
+
void btf_get(struct btf *btf)
{
refcount_inc(&btf->refcnt);
@@ -3310,30 +3353,48 @@ static int btf_find_kptr(const struct btf *btf, const struct btf_type *t,
return BTF_FIELD_FOUND;
}
-const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
- int comp_idx, const char *tag_key)
+int btf_find_next_decl_tag(const struct btf *btf, const struct btf_type *pt,
+ int comp_idx, const char *tag_key, int last_id)
{
- const char *value = NULL;
- int i;
+ int len = strlen(tag_key);
+ int i, n;
- for (i = 1; i < btf_nr_types(btf); i++) {
+ for (i = last_id + 1, n = btf_nr_types(btf); i < n; i++) {
const struct btf_type *t = btf_type_by_id(btf, i);
- int len = strlen(tag_key);
if (!btf_type_is_decl_tag(t))
continue;
- if (pt != btf_type_by_id(btf, t->type) ||
- btf_type_decl_tag(t)->component_idx != comp_idx)
+ if (pt != btf_type_by_id(btf, t->type))
+ continue;
+ if (btf_type_decl_tag(t)->component_idx != comp_idx)
continue;
if (strncmp(__btf_name_by_offset(btf, t->name_off), tag_key, len))
continue;
- /* Prevent duplicate entries for same type */
- if (value)
- return ERR_PTR(-EEXIST);
- value = __btf_name_by_offset(btf, t->name_off) + len;
+ return i;
}
- if (!value)
- return ERR_PTR(-ENOENT);
+ return -ENOENT;
+}
+
+const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
+ int comp_idx, const char *tag_key)
+{
+ const char *value = NULL;
+ const struct btf_type *t;
+ int len, id;
+
+ id = btf_find_next_decl_tag(btf, pt, comp_idx, tag_key, 0);
+ if (id < 0)
+ return ERR_PTR(id);
+
+ t = btf_type_by_id(btf, id);
+ len = strlen(tag_key);
+ value = __btf_name_by_offset(btf, t->name_off) + len;
+
+ /* Prevent duplicate entries for same type */
+ id = btf_find_next_decl_tag(btf, pt, comp_idx, tag_key, id);
+ if (id >= 0)
+ return ERR_PTR(-EEXIST);
+
return value;
}
@@ -5647,15 +5708,29 @@ static int find_kern_ctx_type_id(enum bpf_prog_type prog_type)
return ctx_type->type;
}
-const struct btf_type *
-btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
- const struct btf_type *t, enum bpf_prog_type prog_type,
- int arg)
+bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
+ const struct btf_type *t, enum bpf_prog_type prog_type,
+ int arg)
{
const struct btf_type *ctx_type;
const char *tname, *ctx_tname;
t = btf_type_by_id(btf, t->type);
+
+ /* KPROBE programs allow bpf_user_pt_regs_t typedef, which we need to
+ * check before we skip all the typedef below.
+ */
+ if (prog_type == BPF_PROG_TYPE_KPROBE) {
+ while (btf_type_is_modifier(t) && !btf_type_is_typedef(t))
+ t = btf_type_by_id(btf, t->type);
+
+ if (btf_type_is_typedef(t)) {
+ tname = btf_name_by_offset(btf, t->name_off);
+ if (tname && strcmp(tname, "bpf_user_pt_regs_t") == 0)
+ return true;
+ }
+ }
+
while (btf_type_is_modifier(t))
t = btf_type_by_id(btf, t->type);
if (!btf_type_is_struct(t)) {
@@ -5664,27 +5739,30 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
* is not supported yet.
* BPF_PROG_TYPE_RAW_TRACEPOINT is fine.
*/
- return NULL;
+ return false;
}
tname = btf_name_by_offset(btf, t->name_off);
if (!tname) {
bpf_log(log, "arg#%d struct doesn't have a name\n", arg);
- return NULL;
+ return false;
}
ctx_type = find_canonical_prog_ctx_type(prog_type);
if (!ctx_type) {
bpf_log(log, "btf_vmlinux is malformed\n");
/* should not happen */
- return NULL;
+ return false;
}
again:
ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_type->name_off);
if (!ctx_tname) {
/* should not happen */
bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n");
- return NULL;
+ return false;
}
+ /* program types without named context types work only with arg:ctx tag */
+ if (ctx_tname[0] == '\0')
+ return false;
/* only compare that prog's ctx type name is the same as
* kernel expects. No need to compare field by field.
* It's ok for bpf prog to do:
@@ -5693,20 +5771,20 @@ again:
* { // no fields of skb are ever used }
*/
if (strcmp(ctx_tname, "__sk_buff") == 0 && strcmp(tname, "sk_buff") == 0)
- return ctx_type;
+ return true;
if (strcmp(ctx_tname, "xdp_md") == 0 && strcmp(tname, "xdp_buff") == 0)
- return ctx_type;
+ return true;
if (strcmp(ctx_tname, tname)) {
/* bpf_user_pt_regs_t is a typedef, so resolve it to
* underlying struct and check name again
*/
if (!btf_type_is_modifier(ctx_type))
- return NULL;
+ return false;
while (btf_type_is_modifier(ctx_type))
ctx_type = btf_type_by_id(btf_vmlinux, ctx_type->type);
goto again;
}
- return ctx_type;
+ return true;
}
/* forward declarations for arch-specific underlying types of
@@ -5858,7 +5936,7 @@ static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
enum bpf_prog_type prog_type,
int arg)
{
- if (!btf_get_prog_ctx_type(log, btf, t, prog_type, arg))
+ if (!btf_is_prog_ctx_type(log, btf, t, prog_type, arg))
return -ENOENT;
return find_kern_ctx_type_id(prog_type);
}
@@ -5933,8 +6011,6 @@ struct btf *btf_parse_vmlinux(void)
/* btf_parse_vmlinux() runs under bpf_verifier_lock */
bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]);
- bpf_struct_ops_init(btf, log);
-
refcount_set(&btf->refcnt, 1);
err = btf_alloc_id(btf);
@@ -6092,6 +6168,26 @@ static bool prog_args_trusted(const struct bpf_prog *prog)
}
}
+int btf_ctx_arg_offset(const struct btf *btf, const struct btf_type *func_proto,
+ u32 arg_no)
+{
+ const struct btf_param *args;
+ const struct btf_type *t;
+ int off = 0, i;
+ u32 sz;
+
+ args = btf_params(func_proto);
+ for (i = 0; i < arg_no; i++) {
+ t = btf_type_by_id(btf, args[i].type);
+ t = btf_resolve_size(btf, t, &sz);
+ if (IS_ERR(t))
+ return PTR_ERR(t);
+ off += roundup(sz, 8);
+ }
+
+ return off;
+}
+
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
@@ -6228,7 +6324,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
}
info->reg_type = ctx_arg_info->reg_type;
- info->btf = btf_vmlinux;
+ info->btf = ctx_arg_info->btf ? : btf_vmlinux;
info->btf_id = ctx_arg_info->btf_id;
return true;
}
@@ -6284,6 +6380,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
__btf_name_by_offset(btf, t->name_off));
return true;
}
+EXPORT_SYMBOL_GPL(btf_ctx_access);
enum bpf_struct_walk_result {
/* < 0 error */
@@ -6946,6 +7043,81 @@ static bool btf_is_dynptr_ptr(const struct btf *btf, const struct btf_type *t)
return false;
}
+struct bpf_cand_cache {
+ const char *name;
+ u32 name_len;
+ u16 kind;
+ u16 cnt;
+ struct {
+ const struct btf *btf;
+ u32 id;
+ } cands[];
+};
+
+static DEFINE_MUTEX(cand_cache_mutex);
+
+static struct bpf_cand_cache *
+bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id);
+
+static int btf_get_ptr_to_btf_id(struct bpf_verifier_log *log, int arg_idx,
+ const struct btf *btf, const struct btf_type *t)
+{
+ struct bpf_cand_cache *cc;
+ struct bpf_core_ctx ctx = {
+ .btf = btf,
+ .log = log,
+ };
+ u32 kern_type_id, type_id;
+ int err = 0;
+
+ /* skip PTR and modifiers */
+ type_id = t->type;
+ t = btf_type_by_id(btf, t->type);
+ while (btf_type_is_modifier(t)) {
+ type_id = t->type;
+ t = btf_type_by_id(btf, t->type);
+ }
+
+ mutex_lock(&cand_cache_mutex);
+ cc = bpf_core_find_cands(&ctx, type_id);
+ if (IS_ERR(cc)) {
+ err = PTR_ERR(cc);
+ bpf_log(log, "arg#%d reference type('%s %s') candidate matching error: %d\n",
+ arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off),
+ err);
+ goto cand_cache_unlock;
+ }
+ if (cc->cnt != 1) {
+ bpf_log(log, "arg#%d reference type('%s %s') %s\n",
+ arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off),
+ cc->cnt == 0 ? "has no matches" : "is ambiguous");
+ err = cc->cnt == 0 ? -ENOENT : -ESRCH;
+ goto cand_cache_unlock;
+ }
+ if (btf_is_module(cc->cands[0].btf)) {
+ bpf_log(log, "arg#%d reference type('%s %s') points to kernel module type (unsupported)\n",
+ arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off));
+ err = -EOPNOTSUPP;
+ goto cand_cache_unlock;
+ }
+ kern_type_id = cc->cands[0].id;
+
+cand_cache_unlock:
+ mutex_unlock(&cand_cache_mutex);
+ if (err)
+ return err;
+
+ return kern_type_id;
+}
+
+enum btf_arg_tag {
+ ARG_TAG_CTX = BIT_ULL(0),
+ ARG_TAG_NONNULL = BIT_ULL(1),
+ ARG_TAG_TRUSTED = BIT_ULL(2),
+ ARG_TAG_NULLABLE = BIT_ULL(3),
+ ARG_TAG_ARENA = BIT_ULL(4),
+};
+
/* Process BTF of a function to produce high-level expectation of function
* arguments (like ARG_PTR_TO_CTX, or ARG_PTR_TO_MEM, etc). This information
* is cached in subprog info for reuse.
@@ -7009,6 +7181,8 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog)
args = (const struct btf_param *)(t + 1);
nargs = btf_type_vlen(t);
if (nargs > MAX_BPF_FUNC_REG_ARGS) {
+ if (!is_global)
+ return -EINVAL;
bpf_log(log, "Global function %s() with %d > %d args. Buggy compiler.\n",
tname, nargs, MAX_BPF_FUNC_REG_ARGS);
return -EINVAL;
@@ -7018,6 +7192,8 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog)
while (btf_type_is_modifier(t))
t = btf_type_by_id(btf, t->type);
if (!btf_type_is_int(t) && !btf_is_any_enum(t)) {
+ if (!is_global)
+ return -EINVAL;
bpf_log(log,
"Global function %s() doesn't return scalar. Only those are supported.\n",
tname);
@@ -7027,92 +7203,134 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog)
* Only PTR_TO_CTX and SCALAR are supported atm.
*/
for (i = 0; i < nargs; i++) {
- bool is_nonnull = false;
- const char *tag;
+ u32 tags = 0;
+ int id = 0;
- t = btf_type_by_id(btf, args[i].type);
-
- tag = btf_find_decl_tag_value(btf, fn_t, i, "arg:");
- if (IS_ERR(tag) && PTR_ERR(tag) == -ENOENT) {
- tag = NULL;
- } else if (IS_ERR(tag)) {
- bpf_log(log, "arg#%d type's tag fetching failure: %ld\n", i, PTR_ERR(tag));
- return PTR_ERR(tag);
- }
/* 'arg:<tag>' decl_tag takes precedence over derivation of
* register type from BTF type itself
*/
- if (tag) {
+ while ((id = btf_find_next_decl_tag(btf, fn_t, i, "arg:", id)) > 0) {
+ const struct btf_type *tag_t = btf_type_by_id(btf, id);
+ const char *tag = __btf_name_by_offset(btf, tag_t->name_off) + 4;
+
/* disallow arg tags in static subprogs */
if (!is_global) {
bpf_log(log, "arg#%d type tag is not supported in static functions\n", i);
return -EOPNOTSUPP;
}
+
if (strcmp(tag, "ctx") == 0) {
- sub->args[i].arg_type = ARG_PTR_TO_CTX;
- continue;
+ tags |= ARG_TAG_CTX;
+ } else if (strcmp(tag, "trusted") == 0) {
+ tags |= ARG_TAG_TRUSTED;
+ } else if (strcmp(tag, "nonnull") == 0) {
+ tags |= ARG_TAG_NONNULL;
+ } else if (strcmp(tag, "nullable") == 0) {
+ tags |= ARG_TAG_NULLABLE;
+ } else if (strcmp(tag, "arena") == 0) {
+ tags |= ARG_TAG_ARENA;
+ } else {
+ bpf_log(log, "arg#%d has unsupported set of tags\n", i);
+ return -EOPNOTSUPP;
}
- if (strcmp(tag, "nonnull") == 0)
- is_nonnull = true;
+ }
+ if (id != -ENOENT) {
+ bpf_log(log, "arg#%d type tag fetching failure: %d\n", i, id);
+ return id;
}
+ t = btf_type_by_id(btf, args[i].type);
while (btf_type_is_modifier(t))
t = btf_type_by_id(btf, t->type);
- if (btf_type_is_int(t) || btf_is_any_enum(t)) {
- sub->args[i].arg_type = ARG_ANYTHING;
- continue;
- }
- if (btf_type_is_ptr(t) && btf_get_prog_ctx_type(log, btf, t, prog_type, i)) {
+ if (!btf_type_is_ptr(t))
+ goto skip_pointer;
+
+ if ((tags & ARG_TAG_CTX) || btf_is_prog_ctx_type(log, btf, t, prog_type, i)) {
+ if (tags & ~ARG_TAG_CTX) {
+ bpf_log(log, "arg#%d has invalid combination of tags\n", i);
+ return -EINVAL;
+ }
+ if ((tags & ARG_TAG_CTX) &&
+ btf_validate_prog_ctx_type(log, btf, t, i, prog_type,
+ prog->expected_attach_type))
+ return -EINVAL;
sub->args[i].arg_type = ARG_PTR_TO_CTX;
continue;
}
- if (btf_type_is_ptr(t) && btf_is_dynptr_ptr(btf, t)) {
+ if (btf_is_dynptr_ptr(btf, t)) {
+ if (tags) {
+ bpf_log(log, "arg#%d has invalid combination of tags\n", i);
+ return -EINVAL;
+ }
sub->args[i].arg_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY;
continue;
}
- if (is_global && btf_type_is_ptr(t)) {
+ if (tags & ARG_TAG_TRUSTED) {
+ int kern_type_id;
+
+ if (tags & ARG_TAG_NONNULL) {
+ bpf_log(log, "arg#%d has invalid combination of tags\n", i);
+ return -EINVAL;
+ }
+
+ kern_type_id = btf_get_ptr_to_btf_id(log, i, btf, t);
+ if (kern_type_id < 0)
+ return kern_type_id;
+
+ sub->args[i].arg_type = ARG_PTR_TO_BTF_ID | PTR_TRUSTED;
+ if (tags & ARG_TAG_NULLABLE)
+ sub->args[i].arg_type |= PTR_MAYBE_NULL;
+ sub->args[i].btf_id = kern_type_id;
+ continue;
+ }
+ if (tags & ARG_TAG_ARENA) {
+ if (tags & ~ARG_TAG_ARENA) {
+ bpf_log(log, "arg#%d arena cannot be combined with any other tags\n", i);
+ return -EINVAL;
+ }
+ sub->args[i].arg_type = ARG_PTR_TO_ARENA;
+ continue;
+ }
+ if (is_global) { /* generic user data pointer */
u32 mem_size;
+ if (tags & ARG_TAG_NULLABLE) {
+ bpf_log(log, "arg#%d has invalid combination of tags\n", i);
+ return -EINVAL;
+ }
+
t = btf_type_skip_modifiers(btf, t->type, NULL);
ref_t = btf_resolve_size(btf, t, &mem_size);
if (IS_ERR(ref_t)) {
- bpf_log(log,
- "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
- i, btf_type_str(t), btf_name_by_offset(btf, t->name_off),
+ bpf_log(log, "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
+ i, btf_type_str(t), btf_name_by_offset(btf, t->name_off),
PTR_ERR(ref_t));
return -EINVAL;
}
- sub->args[i].arg_type = is_nonnull ? ARG_PTR_TO_MEM : ARG_PTR_TO_MEM_OR_NULL;
+ sub->args[i].arg_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL;
+ if (tags & ARG_TAG_NONNULL)
+ sub->args[i].arg_type &= ~PTR_MAYBE_NULL;
sub->args[i].mem_size = mem_size;
continue;
}
- if (is_nonnull) {
- bpf_log(log, "arg#%d marked as non-null, but is not a pointer type\n", i);
+
+skip_pointer:
+ if (tags) {
+ bpf_log(log, "arg#%d has pointer tag, but is not a pointer type\n", i);
return -EINVAL;
}
+ if (btf_type_is_int(t) || btf_is_any_enum(t)) {
+ sub->args[i].arg_type = ARG_ANYTHING;
+ continue;
+ }
+ if (!is_global)
+ return -EINVAL;
bpf_log(log, "Arg#%d type %s in %s() is not supported yet.\n",
i, btf_type_str(t), tname);
return -EINVAL;
}
- for (i = 0; i < nargs; i++) {
- const char *tag;
-
- if (sub->args[i].arg_type != ARG_PTR_TO_CTX)
- continue;
-
- /* check if arg has "arg:ctx" tag */
- t = btf_type_by_id(btf, args[i].type);
- tag = btf_find_decl_tag_value(btf, fn_t, i, "arg:");
- if (IS_ERR_OR_NULL(tag) || strcmp(tag, "ctx") != 0)
- continue;
-
- if (btf_validate_prog_ctx_type(log, btf, t, i, prog_type,
- prog->expected_attach_type))
- return -EINVAL;
- }
-
sub->arg_cnt = nargs;
sub->args_cached = true;
@@ -7589,6 +7807,17 @@ static struct btf *btf_get_module_btf(const struct module *module)
return btf;
}
+static int check_btf_kconfigs(const struct module *module, const char *feature)
+{
+ if (!module && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
+ pr_err("missing vmlinux BTF, cannot register %s\n", feature);
+ return -ENOENT;
+ }
+ if (module && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
+ pr_warn("missing module BTF, cannot register %s\n", feature);
+ return 0;
+}
+
BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags)
{
struct btf *btf = NULL;
@@ -7949,15 +8178,8 @@ static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook,
int ret, i;
btf = btf_get_module_btf(kset->owner);
- if (!btf) {
- if (!kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
- pr_err("missing vmlinux BTF, cannot register kfuncs\n");
- return -ENOENT;
- }
- if (kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
- pr_warn("missing module BTF, cannot register kfuncs\n");
- return 0;
- }
+ if (!btf)
+ return check_btf_kconfigs(kset->owner, "kfunc");
if (IS_ERR(btf))
return PTR_ERR(btf);
@@ -7981,6 +8203,14 @@ int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
{
enum btf_kfunc_hook hook;
+ /* All kfuncs need to be tagged as such in BTF.
+ * WARN() for initcall registrations that do not check errors.
+ */
+ if (!(kset->set->flags & BTF_SET8_KFUNCS)) {
+ WARN_ON(!kset->owner);
+ return -EINVAL;
+ }
+
hook = bpf_prog_type_to_kfunc_hook(prog_type);
return __register_btf_kfunc_id_set(hook, kset);
}
@@ -8057,17 +8287,8 @@ int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_c
int ret;
btf = btf_get_module_btf(owner);
- if (!btf) {
- if (!owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
- pr_err("missing vmlinux BTF, cannot register dtor kfuncs\n");
- return -ENOENT;
- }
- if (owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) {
- pr_err("missing module BTF, cannot register dtor kfuncs\n");
- return -ENOENT;
- }
- return 0;
- }
+ if (!btf)
+ return check_btf_kconfigs(owner, "dtor kfuncs");
if (IS_ERR(btf))
return PTR_ERR(btf);
@@ -8182,17 +8403,6 @@ size_t bpf_core_essential_name_len(const char *name)
return n;
}
-struct bpf_cand_cache {
- const char *name;
- u32 name_len;
- u16 kind;
- u16 cnt;
- struct {
- const struct btf *btf;
- u32 id;
- } cands[];
-};
-
static void bpf_free_cands(struct bpf_cand_cache *cands)
{
if (!cands->cnt)
@@ -8213,8 +8423,6 @@ static struct bpf_cand_cache *vmlinux_cand_cache[VMLINUX_CAND_CACHE_SIZE];
#define MODULE_CAND_CACHE_SIZE 31
static struct bpf_cand_cache *module_cand_cache[MODULE_CAND_CACHE_SIZE];
-static DEFINE_MUTEX(cand_cache_mutex);
-
static void __print_cand_cache(struct bpf_verifier_log *log,
struct bpf_cand_cache **cache,
int cache_size)
@@ -8645,3 +8853,141 @@ bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log,
return !strncmp(reg_name, arg_name, cmp_len);
}
+
+#ifdef CONFIG_BPF_JIT
+static int
+btf_add_struct_ops(struct btf *btf, struct bpf_struct_ops *st_ops,
+ struct bpf_verifier_log *log)
+{
+ struct btf_struct_ops_tab *tab, *new_tab;
+ int i, err;
+
+ tab = btf->struct_ops_tab;
+ if (!tab) {
+ tab = kzalloc(offsetof(struct btf_struct_ops_tab, ops[4]),
+ GFP_KERNEL);
+ if (!tab)
+ return -ENOMEM;
+ tab->capacity = 4;
+ btf->struct_ops_tab = tab;
+ }
+
+ for (i = 0; i < tab->cnt; i++)
+ if (tab->ops[i].st_ops == st_ops)
+ return -EEXIST;
+
+ if (tab->cnt == tab->capacity) {
+ new_tab = krealloc(tab,
+ offsetof(struct btf_struct_ops_tab,
+ ops[tab->capacity * 2]),
+ GFP_KERNEL);
+ if (!new_tab)
+ return -ENOMEM;
+ tab = new_tab;
+ tab->capacity *= 2;
+ btf->struct_ops_tab = tab;
+ }
+
+ tab->ops[btf->struct_ops_tab->cnt].st_ops = st_ops;
+
+ err = bpf_struct_ops_desc_init(&tab->ops[btf->struct_ops_tab->cnt], btf, log);
+ if (err)
+ return err;
+
+ btf->struct_ops_tab->cnt++;
+
+ return 0;
+}
+
+const struct bpf_struct_ops_desc *
+bpf_struct_ops_find_value(struct btf *btf, u32 value_id)
+{
+ const struct bpf_struct_ops_desc *st_ops_list;
+ unsigned int i;
+ u32 cnt;
+
+ if (!value_id)
+ return NULL;
+ if (!btf->struct_ops_tab)
+ return NULL;
+
+ cnt = btf->struct_ops_tab->cnt;
+ st_ops_list = btf->struct_ops_tab->ops;
+ for (i = 0; i < cnt; i++) {
+ if (st_ops_list[i].value_id == value_id)
+ return &st_ops_list[i];
+ }
+
+ return NULL;
+}
+
+const struct bpf_struct_ops_desc *
+bpf_struct_ops_find(struct btf *btf, u32 type_id)
+{
+ const struct bpf_struct_ops_desc *st_ops_list;
+ unsigned int i;
+ u32 cnt;
+
+ if (!type_id)
+ return NULL;
+ if (!btf->struct_ops_tab)
+ return NULL;
+
+ cnt = btf->struct_ops_tab->cnt;
+ st_ops_list = btf->struct_ops_tab->ops;
+ for (i = 0; i < cnt; i++) {
+ if (st_ops_list[i].type_id == type_id)
+ return &st_ops_list[i];
+ }
+
+ return NULL;
+}
+
+int __register_bpf_struct_ops(struct bpf_struct_ops *st_ops)
+{
+ struct bpf_verifier_log *log;
+ struct btf *btf;
+ int err = 0;
+
+ btf = btf_get_module_btf(st_ops->owner);
+ if (!btf)
+ return check_btf_kconfigs(st_ops->owner, "struct_ops");
+ if (IS_ERR(btf))
+ return PTR_ERR(btf);
+
+ log = kzalloc(sizeof(*log), GFP_KERNEL | __GFP_NOWARN);
+ if (!log) {
+ err = -ENOMEM;
+ goto errout;
+ }
+
+ log->level = BPF_LOG_KERNEL;
+
+ err = btf_add_struct_ops(btf, st_ops, log);
+
+errout:
+ kfree(log);
+ btf_put(btf);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(__register_bpf_struct_ops);
+#endif
+
+bool btf_param_match_suffix(const struct btf *btf,
+ const struct btf_param *arg,
+ const char *suffix)
+{
+ int suffix_len = strlen(suffix), len;
+ const char *param_name;
+
+ /* In the future, this can be ported to use BTF tagging */
+ param_name = btf_name_by_offset(btf, arg->name_off);
+ if (str_is_empty(param_name))
+ return false;
+ len = strlen(param_name);
+ if (len <= suffix_len)
+ return false;
+ param_name += len - suffix_len;
+ return !strncmp(param_name, suffix, suffix_len);
+}
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 491d20038cbe..82243cb6c54d 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -1358,15 +1358,12 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
struct sk_buff *skb,
enum cgroup_bpf_attach_type atype)
{
- unsigned int offset = skb->data - skb_network_header(skb);
+ unsigned int offset = -skb_network_offset(skb);
struct sock *save_sk;
void *saved_data_end;
struct cgroup *cgrp;
int ret;
- if (!sk || !sk_fullsock(sk))
- return 0;
-
if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
return 0;
@@ -1630,7 +1627,7 @@ cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_perf_event_output:
return &bpf_event_output_data_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
}
@@ -2191,7 +2188,7 @@ sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_perf_event_output:
return &bpf_event_output_data_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
}
@@ -2348,7 +2345,7 @@ cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_perf_event_output:
return &bpf_event_output_data_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
}
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index ea6843be2616..696bc55de8e8 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -88,13 +88,18 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
return NULL;
}
+/* tell bpf programs that include vmlinux.h kernel's PAGE_SIZE */
+enum page_size_enum {
+ __PAGE_SIZE = PAGE_SIZE
+};
+
struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
{
gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
struct bpf_prog_aux *aux;
struct bpf_prog *fp;
- size = round_up(size, PAGE_SIZE);
+ size = round_up(size, __PAGE_SIZE);
fp = __vmalloc(size, gfp_flags);
if (fp == NULL)
return NULL;
@@ -682,7 +687,7 @@ static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
void bpf_prog_kallsyms_add(struct bpf_prog *fp)
{
if (!bpf_prog_kallsyms_candidate(fp) ||
- !bpf_capable())
+ !bpf_token_capable(fp->aux->token, CAP_BPF))
return;
bpf_prog_ksym_set_addr(fp);
@@ -888,7 +893,12 @@ static LIST_HEAD(pack_list);
* CONFIG_MMU=n. Use PAGE_SIZE in these cases.
*/
#ifdef PMD_SIZE
-#define BPF_PROG_PACK_SIZE (PMD_SIZE * num_possible_nodes())
+/* PMD_SIZE is really big for some archs. It doesn't make sense to
+ * reserve too much memory in one allocation. Hardcode BPF_PROG_PACK_SIZE to
+ * 2MiB * num_possible_nodes(). On most architectures PMD_SIZE will be
+ * greater than or equal to 2MB.
+ */
+#define BPF_PROG_PACK_SIZE (SZ_2M * num_possible_nodes())
#else
#define BPF_PROG_PACK_SIZE PAGE_SIZE
#endif
@@ -1675,6 +1685,7 @@ bool bpf_opcode_in_insntable(u8 code)
[BPF_LD | BPF_IND | BPF_B] = true,
[BPF_LD | BPF_IND | BPF_H] = true,
[BPF_LD | BPF_IND | BPF_W] = true,
+ [BPF_JMP | BPF_JCOND] = true,
};
#undef BPF_INSN_3_TBL
#undef BPF_INSN_2_TBL
@@ -2695,7 +2706,7 @@ void __bpf_free_used_maps(struct bpf_prog_aux *aux,
bool sleepable;
u32 i;
- sleepable = aux->sleepable;
+ sleepable = aux->prog->sleepable;
for (i = 0; i < len; i++) {
map = used_maps[i];
if (map->ops->map_poke_untrack)
@@ -2779,6 +2790,7 @@ void bpf_prog_free(struct bpf_prog *fp)
if (aux->dst_prog)
bpf_prog_put(aux->dst_prog);
+ bpf_token_put(aux->token);
INIT_WORK(&aux->work, bpf_prog_free_deferred);
schedule_work(&aux->work);
}
@@ -2925,6 +2937,21 @@ bool __weak bpf_jit_supports_far_kfunc_call(void)
return false;
}
+bool __weak bpf_jit_supports_arena(void)
+{
+ return false;
+}
+
+/* Return TRUE if the JIT backend satisfies the following two conditions:
+ * 1) JIT backend supports atomic_xchg() on pointer-sized words.
+ * 2) Under the specific arch, the implementation of xchg() is the same
+ * as atomic_xchg() on pointer-sized words.
+ */
+bool __weak bpf_jit_supports_ptr_xchg(void)
+{
+ return false;
+}
+
/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
* skb_copy_bits(), so provide a weak definition of it for NET-less config.
*/
@@ -2959,6 +2986,17 @@ void __weak arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp,
{
}
+/* for configs without MMU or 32-bit */
+__weak const struct bpf_map_ops arena_map_ops;
+__weak u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena)
+{
+ return 0;
+}
+__weak u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena)
+{
+ return 0;
+}
+
#ifdef CONFIG_BPF_SYSCALL
static int __init bpf_global_ma_init(void)
{
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index ef82ffc90cbe..9ee8da477465 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -24,6 +24,7 @@
#include <linux/filter.h>
#include <linux/ptr_ring.h>
#include <net/xdp.h>
+#include <net/hotdata.h>
#include <linux/sched.h>
#include <linux/workqueue.h>
@@ -326,7 +327,8 @@ static int cpu_map_kthread_run(void *data)
/* Support running another XDP prog on this CPU */
nframes = cpu_map_bpf_prog_run(rcpu, frames, xdp_n, &stats, &list);
if (nframes) {
- m = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, skbs);
+ m = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
+ gfp, nframes, skbs);
if (unlikely(m == 0)) {
for (i = 0; i < nframes; i++)
skbs[i] = NULL; /* effect: xdp_return_frame */
diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c
index 2e73533a3811..dad0fb1c8e87 100644
--- a/kernel/bpf/cpumask.c
+++ b/kernel/bpf/cpumask.c
@@ -424,7 +424,7 @@ __bpf_kfunc u32 bpf_cpumask_weight(const struct cpumask *cpumask)
__bpf_kfunc_end_defs();
-BTF_SET8_START(cpumask_kfunc_btf_ids)
+BTF_KFUNCS_START(cpumask_kfunc_btf_ids)
BTF_ID_FLAGS(func, bpf_cpumask_create, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_cpumask_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_cpumask_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
@@ -450,7 +450,7 @@ BTF_ID_FLAGS(func, bpf_cpumask_copy, KF_RCU)
BTF_ID_FLAGS(func, bpf_cpumask_any_distribute, KF_RCU)
BTF_ID_FLAGS(func, bpf_cpumask_any_and_distribute, KF_RCU)
BTF_ID_FLAGS(func, bpf_cpumask_weight, KF_RCU)
-BTF_SET8_END(cpumask_kfunc_btf_ids)
+BTF_KFUNCS_END(cpumask_kfunc_btf_ids)
static const struct btf_kfunc_id_set cpumask_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index a936c704d4e7..4e2cdbb5629f 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -130,13 +130,14 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
bpf_map_init_from_attr(&dtab->map, attr);
if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
- dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
-
- if (!dtab->n_buckets) /* Overflow check */
+ /* hash table size must be power of 2; roundup_pow_of_two() can
+ * overflow into UB on 32-bit arches, so check that first
+ */
+ if (dtab->map.max_entries > 1UL << 31)
return -EINVAL;
- }
- if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
+ dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
+
dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
dtab->map.numa_node);
if (!dtab->dev_index_head)
diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c
index 49940c26a227..bd2e2dd04740 100644
--- a/kernel/bpf/disasm.c
+++ b/kernel/bpf/disasm.c
@@ -166,6 +166,12 @@ static bool is_movsx(const struct bpf_insn *insn)
(insn->off == 8 || insn->off == 16 || insn->off == 32);
}
+static bool is_addr_space_cast(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) &&
+ insn->off == BPF_ADDR_SPACE_CAST;
+}
+
void print_bpf_insn(const struct bpf_insn_cbs *cbs,
const struct bpf_insn *insn,
bool allow_ptr_leaks)
@@ -184,6 +190,10 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
insn->code, class == BPF_ALU ? 'w' : 'r',
insn->dst_reg, class == BPF_ALU ? 'w' : 'r',
insn->dst_reg);
+ } else if (is_addr_space_cast(insn)) {
+ verbose(cbs->private_data, "(%02x) r%d = addr_space_cast(r%d, %d, %d)\n",
+ insn->code, insn->dst_reg,
+ insn->src_reg, ((u32)insn->imm) >> 16, (u16)insn->imm);
} else if (BPF_SRC(insn->code) == BPF_X) {
verbose(cbs->private_data, "(%02x) %c%d %s %s%c%d\n",
insn->code, class == BPF_ALU ? 'w' : 'r',
@@ -322,6 +332,10 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
} else if (insn->code == (BPF_JMP | BPF_JA)) {
verbose(cbs->private_data, "(%02x) goto pc%+d\n",
insn->code, insn->off);
+ } else if (insn->code == (BPF_JMP | BPF_JCOND) &&
+ insn->src_reg == BPF_MAY_GOTO) {
+ verbose(cbs->private_data, "(%02x) may_goto pc%+d\n",
+ insn->code, insn->off);
} else if (insn->code == (BPF_JMP32 | BPF_JA)) {
verbose(cbs->private_data, "(%02x) gotol pc%+d\n",
insn->code, insn->imm);
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 03a6a2500b6a..3a088a5349bc 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -499,7 +499,13 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
num_possible_cpus());
}
- /* hash table size must be power of 2 */
+ /* hash table size must be power of 2; roundup_pow_of_two() can overflow
+ * into UB on 32-bit arches, so check that first
+ */
+ err = -E2BIG;
+ if (htab->map.max_entries > 1UL << 31)
+ goto free_htab;
+
htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
htab->elem_size = sizeof(struct htab_elem) +
@@ -509,10 +515,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
else
htab->elem_size += round_up(htab->map.value_size, 8);
- err = -E2BIG;
- /* prevent zero size kmalloc and check for u32 overflow */
- if (htab->n_buckets == 0 ||
- htab->n_buckets > U32_MAX / sizeof(struct bucket))
+ /* check for u32 overflow */
+ if (htab->n_buckets > U32_MAX / sizeof(struct bucket))
goto free_htab;
err = bpf_map_init_elem_count(&htab->map);
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index d19cd863d294..a89587859571 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -334,7 +334,7 @@ static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
__this_cpu_write(irqsave_flags, flags);
}
-notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
+NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
{
__bpf_spin_lock_irqsave(lock);
return 0;
@@ -357,7 +357,7 @@ static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
local_irq_restore(flags);
}
-notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
+NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
{
__bpf_spin_unlock_irqrestore(lock);
return 0;
@@ -1417,6 +1417,7 @@ BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
{
unsigned long *kptr = map_value;
+ /* This helper may be inlined by verifier. */
return xchg(kptr, (unsigned long)ptr);
}
@@ -1682,7 +1683,7 @@ const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
const struct bpf_func_proto bpf_task_pt_regs_proto __weak;
const struct bpf_func_proto *
-bpf_base_func_proto(enum bpf_func_id func_id)
+bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
switch (func_id) {
case BPF_FUNC_map_lookup_elem:
@@ -1733,7 +1734,7 @@ bpf_base_func_proto(enum bpf_func_id func_id)
break;
}
- if (!bpf_capable())
+ if (!bpf_token_capable(prog->aux->token, CAP_BPF))
return NULL;
switch (func_id) {
@@ -1791,7 +1792,7 @@ bpf_base_func_proto(enum bpf_func_id func_id)
break;
}
- if (!perfmon_capable())
+ if (!bpf_token_capable(prog->aux->token, CAP_PERFMON))
return NULL;
switch (func_id) {
@@ -2486,9 +2487,9 @@ __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj)
return obj;
}
-__bpf_kfunc void *bpf_rdonly_cast(void *obj__ign, u32 btf_id__k)
+__bpf_kfunc void *bpf_rdonly_cast(const void *obj__ign, u32 btf_id__k)
{
- return obj__ign;
+ return (void *)obj__ign;
}
__bpf_kfunc void bpf_rcu_read_lock(void)
@@ -2546,7 +2547,7 @@ __bpf_kfunc void bpf_throw(u64 cookie)
__bpf_kfunc_end_defs();
-BTF_SET8_START(generic_btf_ids)
+BTF_KFUNCS_START(generic_btf_ids)
#ifdef CONFIG_KEXEC_CORE
BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE)
#endif
@@ -2575,7 +2576,7 @@ BTF_ID_FLAGS(func, bpf_task_get_cgroup1, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
#endif
BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_throw)
-BTF_SET8_END(generic_btf_ids)
+BTF_KFUNCS_END(generic_btf_ids)
static const struct btf_kfunc_id_set generic_kfunc_set = {
.owner = THIS_MODULE,
@@ -2591,7 +2592,7 @@ BTF_ID(struct, cgroup)
BTF_ID(func, bpf_cgroup_release_dtor)
#endif
-BTF_SET8_START(common_btf_ids)
+BTF_KFUNCS_START(common_btf_ids)
BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx)
BTF_ID_FLAGS(func, bpf_rdonly_cast)
BTF_ID_FLAGS(func, bpf_rcu_read_lock)
@@ -2620,7 +2621,7 @@ BTF_ID_FLAGS(func, bpf_dynptr_is_null)
BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly)
BTF_ID_FLAGS(func, bpf_dynptr_size)
BTF_ID_FLAGS(func, bpf_dynptr_clone)
-BTF_SET8_END(common_btf_ids)
+BTF_KFUNCS_END(common_btf_ids)
static const struct btf_kfunc_id_set common_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 41e0a55c35f5..af5d2ffadd70 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -20,6 +20,7 @@
#include <linux/filter.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
+#include <linux/kstrtox.h>
#include "preload/bpf_preload.h"
enum bpf_type {
@@ -98,9 +99,9 @@ static const struct inode_operations bpf_prog_iops = { };
static const struct inode_operations bpf_map_iops = { };
static const struct inode_operations bpf_link_iops = { };
-static struct inode *bpf_get_inode(struct super_block *sb,
- const struct inode *dir,
- umode_t mode)
+struct inode *bpf_get_inode(struct super_block *sb,
+ const struct inode *dir,
+ umode_t mode)
{
struct inode *inode;
@@ -594,6 +595,136 @@ struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type typ
}
EXPORT_SYMBOL(bpf_prog_get_type_path);
+struct bpffs_btf_enums {
+ const struct btf *btf;
+ const struct btf_type *cmd_t;
+ const struct btf_type *map_t;
+ const struct btf_type *prog_t;
+ const struct btf_type *attach_t;
+};
+
+static int find_bpffs_btf_enums(struct bpffs_btf_enums *info)
+{
+ const struct btf *btf;
+ const struct btf_type *t;
+ const char *name;
+ int i, n;
+
+ memset(info, 0, sizeof(*info));
+
+ btf = bpf_get_btf_vmlinux();
+ if (IS_ERR(btf))
+ return PTR_ERR(btf);
+ if (!btf)
+ return -ENOENT;
+
+ info->btf = btf;
+
+ for (i = 1, n = btf_nr_types(btf); i < n; i++) {
+ t = btf_type_by_id(btf, i);
+ if (!btf_type_is_enum(t))
+ continue;
+
+ name = btf_name_by_offset(btf, t->name_off);
+ if (!name)
+ continue;
+
+ if (strcmp(name, "bpf_cmd") == 0)
+ info->cmd_t = t;
+ else if (strcmp(name, "bpf_map_type") == 0)
+ info->map_t = t;
+ else if (strcmp(name, "bpf_prog_type") == 0)
+ info->prog_t = t;
+ else if (strcmp(name, "bpf_attach_type") == 0)
+ info->attach_t = t;
+ else
+ continue;
+
+ if (info->cmd_t && info->map_t && info->prog_t && info->attach_t)
+ return 0;
+ }
+
+ return -ESRCH;
+}
+
+static bool find_btf_enum_const(const struct btf *btf, const struct btf_type *enum_t,
+ const char *prefix, const char *str, int *value)
+{
+ const struct btf_enum *e;
+ const char *name;
+ int i, n, pfx_len = strlen(prefix);
+
+ *value = 0;
+
+ if (!btf || !enum_t)
+ return false;
+
+ for (i = 0, n = btf_vlen(enum_t); i < n; i++) {
+ e = &btf_enum(enum_t)[i];
+
+ name = btf_name_by_offset(btf, e->name_off);
+ if (!name || strncasecmp(name, prefix, pfx_len) != 0)
+ continue;
+
+ /* match symbolic name case insensitive and ignoring prefix */
+ if (strcasecmp(name + pfx_len, str) == 0) {
+ *value = e->val;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void seq_print_delegate_opts(struct seq_file *m,
+ const char *opt_name,
+ const struct btf *btf,
+ const struct btf_type *enum_t,
+ const char *prefix,
+ u64 delegate_msk, u64 any_msk)
+{
+ const struct btf_enum *e;
+ bool first = true;
+ const char *name;
+ u64 msk;
+ int i, n, pfx_len = strlen(prefix);
+
+ delegate_msk &= any_msk; /* clear unknown bits */
+
+ if (delegate_msk == 0)
+ return;
+
+ seq_printf(m, ",%s", opt_name);
+ if (delegate_msk == any_msk) {
+ seq_printf(m, "=any");
+ return;
+ }
+
+ if (btf && enum_t) {
+ for (i = 0, n = btf_vlen(enum_t); i < n; i++) {
+ e = &btf_enum(enum_t)[i];
+ name = btf_name_by_offset(btf, e->name_off);
+ if (!name || strncasecmp(name, prefix, pfx_len) != 0)
+ continue;
+ msk = 1ULL << e->val;
+ if (delegate_msk & msk) {
+ /* emit lower-case name without prefix */
+ seq_printf(m, "%c", first ? '=' : ':');
+ name += pfx_len;
+ while (*name) {
+ seq_printf(m, "%c", tolower(*name));
+ name++;
+ }
+
+ delegate_msk &= ~msk;
+ first = false;
+ }
+ }
+ }
+ if (delegate_msk)
+ seq_printf(m, "%c0x%llx", first ? '=' : ':', delegate_msk);
+}
+
/*
* Display the mount options in /proc/mounts.
*/
@@ -601,6 +732,8 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root)
{
struct inode *inode = d_inode(root);
umode_t mode = inode->i_mode & S_IALLUGO & ~S_ISVTX;
+ struct bpf_mount_opts *opts = root->d_sb->s_fs_info;
+ u64 mask;
if (!uid_eq(inode->i_uid, GLOBAL_ROOT_UID))
seq_printf(m, ",uid=%u",
@@ -610,6 +743,35 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root)
from_kgid_munged(&init_user_ns, inode->i_gid));
if (mode != S_IRWXUGO)
seq_printf(m, ",mode=%o", mode);
+
+ if (opts->delegate_cmds || opts->delegate_maps ||
+ opts->delegate_progs || opts->delegate_attachs) {
+ struct bpffs_btf_enums info;
+
+ /* ignore errors, fallback to hex */
+ (void)find_bpffs_btf_enums(&info);
+
+ mask = (1ULL << __MAX_BPF_CMD) - 1;
+ seq_print_delegate_opts(m, "delegate_cmds",
+ info.btf, info.cmd_t, "BPF_",
+ opts->delegate_cmds, mask);
+
+ mask = (1ULL << __MAX_BPF_MAP_TYPE) - 1;
+ seq_print_delegate_opts(m, "delegate_maps",
+ info.btf, info.map_t, "BPF_MAP_TYPE_",
+ opts->delegate_maps, mask);
+
+ mask = (1ULL << __MAX_BPF_PROG_TYPE) - 1;
+ seq_print_delegate_opts(m, "delegate_progs",
+ info.btf, info.prog_t, "BPF_PROG_TYPE_",
+ opts->delegate_progs, mask);
+
+ mask = (1ULL << __MAX_BPF_ATTACH_TYPE) - 1;
+ seq_print_delegate_opts(m, "delegate_attachs",
+ info.btf, info.attach_t, "BPF_",
+ opts->delegate_attachs, mask);
+ }
+
return 0;
}
@@ -624,7 +786,7 @@ static void bpf_free_inode(struct inode *inode)
free_inode_nonrcu(inode);
}
-static const struct super_operations bpf_super_ops = {
+const struct super_operations bpf_super_ops = {
.statfs = simple_statfs,
.drop_inode = generic_delete_inode,
.show_options = bpf_show_options,
@@ -635,28 +797,30 @@ enum {
OPT_UID,
OPT_GID,
OPT_MODE,
+ OPT_DELEGATE_CMDS,
+ OPT_DELEGATE_MAPS,
+ OPT_DELEGATE_PROGS,
+ OPT_DELEGATE_ATTACHS,
};
static const struct fs_parameter_spec bpf_fs_parameters[] = {
fsparam_u32 ("uid", OPT_UID),
fsparam_u32 ("gid", OPT_GID),
fsparam_u32oct ("mode", OPT_MODE),
+ fsparam_string ("delegate_cmds", OPT_DELEGATE_CMDS),
+ fsparam_string ("delegate_maps", OPT_DELEGATE_MAPS),
+ fsparam_string ("delegate_progs", OPT_DELEGATE_PROGS),
+ fsparam_string ("delegate_attachs", OPT_DELEGATE_ATTACHS),
{}
};
-struct bpf_mount_opts {
- kuid_t uid;
- kgid_t gid;
- umode_t mode;
-};
-
static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- struct bpf_mount_opts *opts = fc->fs_private;
+ struct bpf_mount_opts *opts = fc->s_fs_info;
struct fs_parse_result result;
kuid_t uid;
kgid_t gid;
- int opt;
+ int opt, err;
opt = fs_parse(fc, bpf_fs_parameters, param, &result);
if (opt < 0) {
@@ -708,6 +872,67 @@ static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
case OPT_MODE:
opts->mode = result.uint_32 & S_IALLUGO;
break;
+ case OPT_DELEGATE_CMDS:
+ case OPT_DELEGATE_MAPS:
+ case OPT_DELEGATE_PROGS:
+ case OPT_DELEGATE_ATTACHS: {
+ struct bpffs_btf_enums info;
+ const struct btf_type *enum_t;
+ const char *enum_pfx;
+ u64 *delegate_msk, msk = 0;
+ char *p;
+ int val;
+
+ /* ignore errors, fallback to hex */
+ (void)find_bpffs_btf_enums(&info);
+
+ switch (opt) {
+ case OPT_DELEGATE_CMDS:
+ delegate_msk = &opts->delegate_cmds;
+ enum_t = info.cmd_t;
+ enum_pfx = "BPF_";
+ break;
+ case OPT_DELEGATE_MAPS:
+ delegate_msk = &opts->delegate_maps;
+ enum_t = info.map_t;
+ enum_pfx = "BPF_MAP_TYPE_";
+ break;
+ case OPT_DELEGATE_PROGS:
+ delegate_msk = &opts->delegate_progs;
+ enum_t = info.prog_t;
+ enum_pfx = "BPF_PROG_TYPE_";
+ break;
+ case OPT_DELEGATE_ATTACHS:
+ delegate_msk = &opts->delegate_attachs;
+ enum_t = info.attach_t;
+ enum_pfx = "BPF_";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ while ((p = strsep(&param->string, ":"))) {
+ if (strcmp(p, "any") == 0) {
+ msk |= ~0ULL;
+ } else if (find_btf_enum_const(info.btf, enum_t, enum_pfx, p, &val)) {
+ msk |= 1ULL << val;
+ } else {
+ err = kstrtou64(p, 0, &msk);
+ if (err)
+ return err;
+ }
+ }
+
+ /* Setting delegation mount options requires privileges */
+ if (msk && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ *delegate_msk |= msk;
+ break;
+ }
+ default:
+ /* ignore unknown mount options */
+ break;
}
return 0;
@@ -784,10 +1009,14 @@ out:
static int bpf_fill_super(struct super_block *sb, struct fs_context *fc)
{
static const struct tree_descr bpf_rfiles[] = { { "" } };
- struct bpf_mount_opts *opts = fc->fs_private;
+ struct bpf_mount_opts *opts = sb->s_fs_info;
struct inode *inode;
int ret;
+ /* Mounting an instance of BPF FS requires privileges */
+ if (fc->user_ns != &init_user_ns && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles);
if (ret)
return ret;
@@ -811,7 +1040,7 @@ static int bpf_get_tree(struct fs_context *fc)
static void bpf_free_fc(struct fs_context *fc)
{
- kfree(fc->fs_private);
+ kfree(fc->s_fs_info);
}
static const struct fs_context_operations bpf_context_ops = {
@@ -835,17 +1064,32 @@ static int bpf_init_fs_context(struct fs_context *fc)
opts->uid = current_fsuid();
opts->gid = current_fsgid();
- fc->fs_private = opts;
+ /* start out with no BPF token delegation enabled */
+ opts->delegate_cmds = 0;
+ opts->delegate_maps = 0;
+ opts->delegate_progs = 0;
+ opts->delegate_attachs = 0;
+
+ fc->s_fs_info = opts;
fc->ops = &bpf_context_ops;
return 0;
}
+static void bpf_kill_super(struct super_block *sb)
+{
+ struct bpf_mount_opts *opts = sb->s_fs_info;
+
+ kill_litter_super(sb);
+ kfree(opts);
+}
+
static struct file_system_type bpf_fs_type = {
.owner = THIS_MODULE,
.name = "bpf",
.init_fs_context = bpf_init_fs_context,
.parameters = bpf_fs_parameters,
- .kill_sb = kill_litter_super,
+ .kill_sb = bpf_kill_super,
+ .fs_flags = FS_USERNS_MOUNT,
};
static int __init bpf_init(void)
diff --git a/kernel/bpf/log.c b/kernel/bpf/log.c
index 594a234f122b..2a243cf37c60 100644
--- a/kernel/bpf/log.c
+++ b/kernel/bpf/log.c
@@ -9,6 +9,7 @@
#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
#include <linux/math64.h>
+#include <linux/string.h>
#define verbose(env, fmt, args...) bpf_verifier_log_write(env, fmt, ##args)
@@ -333,7 +334,8 @@ find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
{
const struct bpf_line_info *linfo;
const struct bpf_prog *prog;
- u32 i, nr_linfo;
+ u32 nr_linfo;
+ int l, r, m;
prog = env->prog;
nr_linfo = prog->aux->nr_linfo;
@@ -342,11 +344,30 @@ find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
return NULL;
linfo = prog->aux->linfo;
- for (i = 1; i < nr_linfo; i++)
- if (insn_off < linfo[i].insn_off)
- break;
+ /* Loop invariant: linfo[l].insn_off <= insns_off.
+ * linfo[0].insn_off == 0 which always satisfies above condition.
+ * Binary search is searching for rightmost linfo entry that satisfies
+ * the above invariant, giving us the desired record that covers given
+ * instruction offset.
+ */
+ l = 0;
+ r = nr_linfo - 1;
+ while (l < r) {
+ /* (r - l + 1) / 2 means we break a tie to the right, so if:
+ * l=1, r=2, linfo[l].insn_off <= insn_off, linfo[r].insn_off > insn_off,
+ * then m=2, we see that linfo[m].insn_off > insn_off, and so
+ * r becomes 1 and we exit the loop with correct l==1.
+ * If the tie was broken to the left, m=1 would end us up in
+ * an endless loop where l and m stay at 1 and r stays at 2.
+ */
+ m = l + (r - l + 1) / 2;
+ if (linfo[m].insn_off <= insn_off)
+ l = m;
+ else
+ r = m - 1;
+ }
- return &linfo[i - 1];
+ return &linfo[l];
}
static const char *ltrim(const char *s)
@@ -361,13 +382,28 @@ __printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
u32 insn_off,
const char *prefix_fmt, ...)
{
- const struct bpf_line_info *linfo;
+ const struct bpf_line_info *linfo, *prev_linfo;
+ const struct btf *btf;
+ const char *s, *fname;
if (!bpf_verifier_log_needed(&env->log))
return;
+ prev_linfo = env->prev_linfo;
linfo = find_linfo(env, insn_off);
- if (!linfo || linfo == env->prev_linfo)
+ if (!linfo || linfo == prev_linfo)
+ return;
+
+ /* It often happens that two separate linfo records point to the same
+ * source code line, but have differing column numbers. Given verifier
+ * log doesn't emit column information, from user perspective we just
+ * end up emitting the same source code line twice unnecessarily.
+ * So instead check that previous and current linfo record point to
+ * the same file (file_name_offs match) and the same line number, and
+ * avoid emitting duplicated source code line in such case.
+ */
+ if (prev_linfo && linfo->file_name_off == prev_linfo->file_name_off &&
+ BPF_LINE_INFO_LINE_NUM(linfo->line_col) == BPF_LINE_INFO_LINE_NUM(prev_linfo->line_col))
return;
if (prefix_fmt) {
@@ -378,9 +414,15 @@ __printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
va_end(args);
}
- verbose(env, "%s\n",
- ltrim(btf_name_by_offset(env->prog->aux->btf,
- linfo->line_off)));
+ btf = env->prog->aux->btf;
+ s = ltrim(btf_name_by_offset(btf, linfo->line_off));
+ verbose(env, "%s", s); /* source code line */
+
+ s = btf_name_by_offset(btf, linfo->file_name_off);
+ /* leave only file name */
+ fname = strrchr(s, '/');
+ fname = fname ? fname + 1 : s;
+ verbose(env, " @ %s:%u\n", fname, BPF_LINE_INFO_LINE_NUM(linfo->line_col));
env->prev_linfo = linfo;
}
@@ -416,6 +458,7 @@ const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type)
[PTR_TO_XDP_SOCK] = "xdp_sock",
[PTR_TO_BTF_ID] = "ptr_",
[PTR_TO_MEM] = "mem",
+ [PTR_TO_ARENA] = "arena",
[PTR_TO_BUF] = "buf",
[PTR_TO_FUNC] = "func",
[PTR_TO_MAP_KEY] = "map_key",
@@ -651,6 +694,8 @@ static void print_reg_state(struct bpf_verifier_env *env,
}
verbose(env, "%s", reg_type_str(env, t));
+ if (t == PTR_TO_ARENA)
+ return;
if (t == PTR_TO_STACK) {
if (state->frameno != reg->frameno)
verbose(env, "[%d]", reg->frameno);
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index b32be680da6c..050fe1ebf0f7 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -164,13 +164,13 @@ static inline int extract_bit(const u8 *data, size_t index)
*/
static size_t longest_prefix_match(const struct lpm_trie *trie,
const struct lpm_trie_node *node,
- const struct bpf_lpm_trie_key *key)
+ const struct bpf_lpm_trie_key_u8 *key)
{
u32 limit = min(node->prefixlen, key->prefixlen);
u32 prefixlen = 0, i = 0;
BUILD_BUG_ON(offsetof(struct lpm_trie_node, data) % sizeof(u32));
- BUILD_BUG_ON(offsetof(struct bpf_lpm_trie_key, data) % sizeof(u32));
+ BUILD_BUG_ON(offsetof(struct bpf_lpm_trie_key_u8, data) % sizeof(u32));
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(CONFIG_64BIT)
@@ -229,7 +229,7 @@ static void *trie_lookup_elem(struct bpf_map *map, void *_key)
{
struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
struct lpm_trie_node *node, *found = NULL;
- struct bpf_lpm_trie_key *key = _key;
+ struct bpf_lpm_trie_key_u8 *key = _key;
if (key->prefixlen > trie->max_prefixlen)
return NULL;
@@ -309,7 +309,7 @@ static long trie_update_elem(struct bpf_map *map,
struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
struct lpm_trie_node *node, *im_node = NULL, *new_node = NULL;
struct lpm_trie_node __rcu **slot;
- struct bpf_lpm_trie_key *key = _key;
+ struct bpf_lpm_trie_key_u8 *key = _key;
unsigned long irq_flags;
unsigned int next_bit;
size_t matchlen = 0;
@@ -437,7 +437,7 @@ out:
static long trie_delete_elem(struct bpf_map *map, void *_key)
{
struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
- struct bpf_lpm_trie_key *key = _key;
+ struct bpf_lpm_trie_key_u8 *key = _key;
struct lpm_trie_node __rcu **trim, **trim2;
struct lpm_trie_node *node, *parent;
unsigned long irq_flags;
@@ -536,7 +536,7 @@ out:
sizeof(struct lpm_trie_node))
#define LPM_VAL_SIZE_MIN 1
-#define LPM_KEY_SIZE(X) (sizeof(struct bpf_lpm_trie_key) + (X))
+#define LPM_KEY_SIZE(X) (sizeof(struct bpf_lpm_trie_key_u8) + (X))
#define LPM_KEY_SIZE_MAX LPM_KEY_SIZE(LPM_DATA_SIZE_MAX)
#define LPM_KEY_SIZE_MIN LPM_KEY_SIZE(LPM_DATA_SIZE_MIN)
@@ -565,7 +565,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
/* copy mandatory map attributes */
bpf_map_init_from_attr(&trie->map, attr);
trie->data_size = attr->key_size -
- offsetof(struct bpf_lpm_trie_key, data);
+ offsetof(struct bpf_lpm_trie_key_u8, data);
trie->max_prefixlen = trie->data_size * 8;
spin_lock_init(&trie->lock);
@@ -616,7 +616,7 @@ static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
{
struct lpm_trie_node *node, *next_node = NULL, *parent, *search_root;
struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
- struct bpf_lpm_trie_key *key = _key, *next_key = _next_key;
+ struct bpf_lpm_trie_key_u8 *key = _key, *next_key = _next_key;
struct lpm_trie_node **node_stack = NULL;
int err = 0, stack_ptr = -1;
unsigned int next_bit;
@@ -703,7 +703,7 @@ find_leftmost:
}
do_copy:
next_key->prefixlen = next_node->prefixlen;
- memcpy((void *)next_key + offsetof(struct bpf_lpm_trie_key, data),
+ memcpy((void *)next_key + offsetof(struct bpf_lpm_trie_key_u8, data),
next_node->data, trie->data_size);
free_stack:
kfree(node_stack);
@@ -715,7 +715,7 @@ static int trie_check_btf(const struct bpf_map *map,
const struct btf_type *key_type,
const struct btf_type *value_type)
{
- /* Keys must have struct bpf_lpm_trie_key embedded. */
+ /* Keys must have struct bpf_lpm_trie_key_u8 embedded. */
return BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ?
-EINVAL : 0;
}
diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c
index 6abd7c5df4b3..9575314f40a6 100644
--- a/kernel/bpf/map_iter.c
+++ b/kernel/bpf/map_iter.c
@@ -213,9 +213,9 @@ __bpf_kfunc s64 bpf_map_sum_elem_count(const struct bpf_map *map)
__bpf_kfunc_end_defs();
-BTF_SET8_START(bpf_map_iter_kfunc_ids)
+BTF_KFUNCS_START(bpf_map_iter_kfunc_ids)
BTF_ID_FLAGS(func, bpf_map_sum_elem_count, KF_TRUSTED_ARGS)
-BTF_SET8_END(bpf_map_iter_kfunc_ids)
+BTF_KFUNCS_END(bpf_map_iter_kfunc_ids)
static const struct btf_kfunc_id_set bpf_map_iter_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index dff7ba539701..c99f8e5234ac 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -91,11 +91,14 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
} else if (value_size / 8 > sysctl_perf_event_max_stack)
return ERR_PTR(-EINVAL);
- /* hash table size must be power of 2 */
- n_buckets = roundup_pow_of_two(attr->max_entries);
- if (!n_buckets)
+ /* hash table size must be power of 2; roundup_pow_of_two() can overflow
+ * into UB on 32-bit arches, so check that first
+ */
+ if (attr->max_entries > 1UL << 31)
return ERR_PTR(-E2BIG);
+ n_buckets = roundup_pow_of_two(attr->max_entries);
+
cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
if (!smap)
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index a1f18681721c..ae2ff73bde7e 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -164,6 +164,7 @@ static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
if (bpf_map_is_offloaded(map)) {
return bpf_map_offload_update_elem(map, key, value, flags);
} else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
+ map->map_type == BPF_MAP_TYPE_ARENA ||
map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
return map->ops->map_update_elem(map, key, value, flags);
} else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
@@ -479,6 +480,39 @@ static void bpf_map_release_memcg(struct bpf_map *map)
}
#endif
+int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
+ unsigned long nr_pages, struct page **pages)
+{
+ unsigned long i, j;
+ struct page *pg;
+ int ret = 0;
+#ifdef CONFIG_MEMCG_KMEM
+ struct mem_cgroup *memcg, *old_memcg;
+
+ memcg = bpf_map_get_memcg(map);
+ old_memcg = set_active_memcg(memcg);
+#endif
+ for (i = 0; i < nr_pages; i++) {
+ pg = alloc_pages_node(nid, gfp | __GFP_ACCOUNT, 0);
+
+ if (pg) {
+ pages[i] = pg;
+ continue;
+ }
+ for (j = 0; j < i; j++)
+ __free_page(pages[j]);
+ ret = -ENOMEM;
+ break;
+ }
+
+#ifdef CONFIG_MEMCG_KMEM
+ set_active_memcg(old_memcg);
+ mem_cgroup_put(memcg);
+#endif
+ return ret;
+}
+
+
static int btf_field_cmp(const void *a, const void *b)
{
const struct btf_field *f1 = a, *f2 = b;
@@ -937,6 +971,21 @@ static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
return EPOLLERR;
}
+static unsigned long bpf_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ struct bpf_map *map = filp->private_data;
+
+ if (map->ops->map_get_unmapped_area)
+ return map->ops->map_get_unmapped_area(filp, addr, len, pgoff, flags);
+#ifdef CONFIG_MMU
+ return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
+#else
+ return addr;
+#endif
+}
+
const struct file_operations bpf_map_fops = {
#ifdef CONFIG_PROC_FS
.show_fdinfo = bpf_map_show_fdinfo,
@@ -946,6 +995,7 @@ const struct file_operations bpf_map_fops = {
.write = bpf_dummy_write,
.mmap = bpf_map_mmap,
.poll = bpf_map_poll,
+ .get_unmapped_area = bpf_get_unmapped_area,
};
int bpf_map_new_fd(struct bpf_map *map, int flags)
@@ -1011,8 +1061,8 @@ int map_check_no_btf(const struct bpf_map *map,
return -ENOTSUPP;
}
-static int map_check_btf(struct bpf_map *map, const struct btf *btf,
- u32 btf_key_id, u32 btf_value_id)
+static int map_check_btf(struct bpf_map *map, struct bpf_token *token,
+ const struct btf *btf, u32 btf_key_id, u32 btf_value_id)
{
const struct btf_type *key_type, *value_type;
u32 key_size, value_size;
@@ -1040,7 +1090,7 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
if (!IS_ERR_OR_NULL(map->record)) {
int i;
- if (!bpf_capable()) {
+ if (!bpf_token_capable(token, CAP_BPF)) {
ret = -EPERM;
goto free_map_tab;
}
@@ -1123,14 +1173,21 @@ free_map_tab:
return ret;
}
-#define BPF_MAP_CREATE_LAST_FIELD map_extra
+static bool bpf_net_capable(void)
+{
+ return capable(CAP_NET_ADMIN) || capable(CAP_SYS_ADMIN);
+}
+
+#define BPF_MAP_CREATE_LAST_FIELD map_token_fd
/* called via syscall */
static int map_create(union bpf_attr *attr)
{
const struct bpf_map_ops *ops;
+ struct bpf_token *token = NULL;
int numa_node = bpf_map_attr_numa_node(attr);
u32 map_type = attr->map_type;
struct bpf_map *map;
+ bool token_flag;
int f_flags;
int err;
@@ -1138,6 +1195,12 @@ static int map_create(union bpf_attr *attr)
if (err)
return -EINVAL;
+ /* check BPF_F_TOKEN_FD flag, remember if it's set, and then clear it
+ * to avoid per-map type checks tripping on unknown flag
+ */
+ token_flag = attr->map_flags & BPF_F_TOKEN_FD;
+ attr->map_flags &= ~BPF_F_TOKEN_FD;
+
if (attr->btf_vmlinux_value_type_id) {
if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
attr->btf_key_type_id || attr->btf_value_type_id)
@@ -1147,6 +1210,7 @@ static int map_create(union bpf_attr *attr)
}
if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER &&
+ attr->map_type != BPF_MAP_TYPE_ARENA &&
attr->map_extra != 0)
return -EINVAL;
@@ -1178,14 +1242,32 @@ static int map_create(union bpf_attr *attr)
if (!ops->map_mem_usage)
return -EINVAL;
+ if (token_flag) {
+ token = bpf_token_get_from_fd(attr->map_token_fd);
+ if (IS_ERR(token))
+ return PTR_ERR(token);
+
+ /* if current token doesn't grant map creation permissions,
+ * then we can't use this token, so ignore it and rely on
+ * system-wide capabilities checks
+ */
+ if (!bpf_token_allow_cmd(token, BPF_MAP_CREATE) ||
+ !bpf_token_allow_map_type(token, attr->map_type)) {
+ bpf_token_put(token);
+ token = NULL;
+ }
+ }
+
+ err = -EPERM;
+
/* Intent here is for unprivileged_bpf_disabled to block BPF map
* creation for unprivileged users; other actions depend
* on fd availability and access to bpffs, so are dependent on
* object creation success. Even with unprivileged BPF disabled,
* capability checks are still carried out.
*/
- if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
- return -EPERM;
+ if (sysctl_unprivileged_bpf_disabled && !bpf_token_capable(token, CAP_BPF))
+ goto put_token;
/* check privileged map type permissions */
switch (map_type) {
@@ -1218,25 +1300,28 @@ static int map_create(union bpf_attr *attr)
case BPF_MAP_TYPE_LRU_PERCPU_HASH:
case BPF_MAP_TYPE_STRUCT_OPS:
case BPF_MAP_TYPE_CPUMAP:
- if (!bpf_capable())
- return -EPERM;
+ case BPF_MAP_TYPE_ARENA:
+ if (!bpf_token_capable(token, CAP_BPF))
+ goto put_token;
break;
case BPF_MAP_TYPE_SOCKMAP:
case BPF_MAP_TYPE_SOCKHASH:
case BPF_MAP_TYPE_DEVMAP:
case BPF_MAP_TYPE_DEVMAP_HASH:
case BPF_MAP_TYPE_XSKMAP:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
+ if (!bpf_token_capable(token, CAP_NET_ADMIN))
+ goto put_token;
break;
default:
WARN(1, "unsupported map type %d", map_type);
- return -EPERM;
+ goto put_token;
}
map = ops->map_alloc(attr);
- if (IS_ERR(map))
- return PTR_ERR(map);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto put_token;
+ }
map->ops = ops;
map->map_type = map_type;
@@ -1273,7 +1358,7 @@ static int map_create(union bpf_attr *attr)
map->btf = btf;
if (attr->btf_value_type_id) {
- err = map_check_btf(map, btf, attr->btf_key_type_id,
+ err = map_check_btf(map, token, btf, attr->btf_key_type_id,
attr->btf_value_type_id);
if (err)
goto free_map;
@@ -1285,15 +1370,16 @@ static int map_create(union bpf_attr *attr)
attr->btf_vmlinux_value_type_id;
}
- err = security_bpf_map_alloc(map);
+ err = security_bpf_map_create(map, attr, token);
if (err)
- goto free_map;
+ goto free_map_sec;
err = bpf_map_alloc_id(map);
if (err)
goto free_map_sec;
bpf_map_save_memcg(map);
+ bpf_token_put(token);
err = bpf_map_new_fd(map, f_flags);
if (err < 0) {
@@ -1314,6 +1400,8 @@ free_map_sec:
free_map:
btf_put(map->btf);
map->ops->map_free(map);
+put_token:
+ bpf_token_put(token);
return err;
}
@@ -2144,7 +2232,7 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
kvfree(aux->func_info);
kfree(aux->func_info_aux);
free_uid(aux->user);
- security_bpf_prog_free(aux);
+ security_bpf_prog_free(aux->prog);
bpf_prog_free(aux->prog);
}
@@ -2160,7 +2248,7 @@ static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
btf_put(prog->aux->attach_btf);
if (deferred) {
- if (prog->aux->sleepable)
+ if (prog->sleepable)
call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
else
call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
@@ -2590,13 +2678,15 @@ static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
}
/* last field in 'union bpf_attr' used by this command */
-#define BPF_PROG_LOAD_LAST_FIELD log_true_size
+#define BPF_PROG_LOAD_LAST_FIELD prog_token_fd
static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
{
enum bpf_prog_type type = attr->prog_type;
struct bpf_prog *prog, *dst_prog = NULL;
struct btf *attach_btf = NULL;
+ struct bpf_token *token = NULL;
+ bool bpf_cap;
int err;
char license[128];
@@ -2610,13 +2700,35 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
BPF_F_TEST_RND_HI32 |
BPF_F_XDP_HAS_FRAGS |
BPF_F_XDP_DEV_BOUND_ONLY |
- BPF_F_TEST_REG_INVARIANTS))
+ BPF_F_TEST_REG_INVARIANTS |
+ BPF_F_TOKEN_FD))
return -EINVAL;
+ bpf_prog_load_fixup_attach_type(attr);
+
+ if (attr->prog_flags & BPF_F_TOKEN_FD) {
+ token = bpf_token_get_from_fd(attr->prog_token_fd);
+ if (IS_ERR(token))
+ return PTR_ERR(token);
+ /* if current token doesn't grant prog loading permissions,
+ * then we can't use this token, so ignore it and rely on
+ * system-wide capabilities checks
+ */
+ if (!bpf_token_allow_cmd(token, BPF_PROG_LOAD) ||
+ !bpf_token_allow_prog_type(token, attr->prog_type,
+ attr->expected_attach_type)) {
+ bpf_token_put(token);
+ token = NULL;
+ }
+ }
+
+ bpf_cap = bpf_token_capable(token, CAP_BPF);
+ err = -EPERM;
+
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
(attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
- !bpf_capable())
- return -EPERM;
+ !bpf_cap)
+ goto put_token;
/* Intent here is for unprivileged_bpf_disabled to block BPF program
* creation for unprivileged users; other actions depend
@@ -2625,21 +2737,23 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
* capability checks are still carried out for these
* and other operations.
*/
- if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
- return -EPERM;
+ if (sysctl_unprivileged_bpf_disabled && !bpf_cap)
+ goto put_token;
if (attr->insn_cnt == 0 ||
- attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
- return -E2BIG;
+ attr->insn_cnt > (bpf_cap ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) {
+ err = -E2BIG;
+ goto put_token;
+ }
if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
type != BPF_PROG_TYPE_CGROUP_SKB &&
- !bpf_capable())
- return -EPERM;
+ !bpf_cap)
+ goto put_token;
- if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN))
- return -EPERM;
- if (is_perfmon_prog_type(type) && !perfmon_capable())
- return -EPERM;
+ if (is_net_admin_prog_type(type) && !bpf_token_capable(token, CAP_NET_ADMIN))
+ goto put_token;
+ if (is_perfmon_prog_type(type) && !bpf_token_capable(token, CAP_PERFMON))
+ goto put_token;
/* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog
* or btf, we need to check which one it is
@@ -2649,27 +2763,33 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
if (IS_ERR(dst_prog)) {
dst_prog = NULL;
attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd);
- if (IS_ERR(attach_btf))
- return -EINVAL;
+ if (IS_ERR(attach_btf)) {
+ err = -EINVAL;
+ goto put_token;
+ }
if (!btf_is_kernel(attach_btf)) {
/* attaching through specifying bpf_prog's BTF
* objects directly might be supported eventually
*/
btf_put(attach_btf);
- return -ENOTSUPP;
+ err = -ENOTSUPP;
+ goto put_token;
}
}
} else if (attr->attach_btf_id) {
/* fall back to vmlinux BTF, if BTF type ID is specified */
attach_btf = bpf_get_btf_vmlinux();
- if (IS_ERR(attach_btf))
- return PTR_ERR(attach_btf);
- if (!attach_btf)
- return -EINVAL;
+ if (IS_ERR(attach_btf)) {
+ err = PTR_ERR(attach_btf);
+ goto put_token;
+ }
+ if (!attach_btf) {
+ err = -EINVAL;
+ goto put_token;
+ }
btf_get(attach_btf);
}
- bpf_prog_load_fixup_attach_type(attr);
if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
attach_btf, attr->attach_btf_id,
dst_prog)) {
@@ -2677,7 +2797,8 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
bpf_prog_put(dst_prog);
if (attach_btf)
btf_put(attach_btf);
- return -EINVAL;
+ err = -EINVAL;
+ goto put_token;
}
/* plain bpf_prog allocation */
@@ -2687,20 +2808,21 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
bpf_prog_put(dst_prog);
if (attach_btf)
btf_put(attach_btf);
- return -ENOMEM;
+ err = -EINVAL;
+ goto put_token;
}
prog->expected_attach_type = attr->expected_attach_type;
+ prog->sleepable = !!(attr->prog_flags & BPF_F_SLEEPABLE);
prog->aux->attach_btf = attach_btf;
prog->aux->attach_btf_id = attr->attach_btf_id;
prog->aux->dst_prog = dst_prog;
prog->aux->dev_bound = !!attr->prog_ifindex;
- prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS;
- err = security_bpf_prog_alloc(prog->aux);
- if (err)
- goto free_prog;
+ /* move token into prog->aux, reuse taken refcnt */
+ prog->aux->token = token;
+ token = NULL;
prog->aux->user = get_current_user();
prog->len = attr->insn_cnt;
@@ -2709,12 +2831,12 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
if (copy_from_bpfptr(prog->insns,
make_bpfptr(attr->insns, uattr.is_kernel),
bpf_prog_insn_size(prog)) != 0)
- goto free_prog_sec;
+ goto free_prog;
/* copy eBPF program license from user space */
if (strncpy_from_bpfptr(license,
make_bpfptr(attr->license, uattr.is_kernel),
sizeof(license) - 1) < 0)
- goto free_prog_sec;
+ goto free_prog;
license[sizeof(license) - 1] = 0;
/* eBPF programs must be GPL compatible to use GPL-ed functions */
@@ -2728,14 +2850,14 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
if (bpf_prog_is_dev_bound(prog->aux)) {
err = bpf_prog_dev_bound_init(prog, attr);
if (err)
- goto free_prog_sec;
+ goto free_prog;
}
if (type == BPF_PROG_TYPE_EXT && dst_prog &&
bpf_prog_is_dev_bound(dst_prog->aux)) {
err = bpf_prog_dev_bound_inherit(prog, dst_prog);
if (err)
- goto free_prog_sec;
+ goto free_prog;
}
/*
@@ -2757,12 +2879,16 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
/* find program type: socket_filter vs tracing_filter */
err = find_prog_type(type, prog);
if (err < 0)
- goto free_prog_sec;
+ goto free_prog;
prog->aux->load_time = ktime_get_boottime_ns();
err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
sizeof(attr->prog_name));
if (err < 0)
+ goto free_prog;
+
+ err = security_bpf_prog_load(prog, attr, token);
+ if (err)
goto free_prog_sec;
/* run eBPF verifier */
@@ -2808,13 +2934,16 @@ free_used_maps:
*/
__bpf_prog_put_noref(prog, prog->aux->real_func_cnt);
return err;
+
free_prog_sec:
- free_uid(prog->aux->user);
- security_bpf_prog_free(prog->aux);
+ security_bpf_prog_free(prog);
free_prog:
+ free_uid(prog->aux->user);
if (prog->aux->attach_btf)
btf_put(prog->aux->attach_btf);
bpf_prog_free(prog);
+put_token:
+ bpf_token_put(token);
return err;
}
@@ -3501,6 +3630,7 @@ static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
if (!kallsyms_show_value(current_cred()))
addr = 0;
info->perf_event.kprobe.addr = addr;
+ info->perf_event.kprobe.cookie = event->bpf_cookie;
return 0;
}
#endif
@@ -3526,6 +3656,7 @@ static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
else
info->perf_event.type = BPF_PERF_EVENT_UPROBE;
info->perf_event.uprobe.offset = offset;
+ info->perf_event.uprobe.cookie = event->bpf_cookie;
return 0;
}
#endif
@@ -3553,6 +3684,7 @@ static int bpf_perf_link_fill_tracepoint(const struct perf_event *event,
uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name);
ulen = info->perf_event.tracepoint.name_len;
info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT;
+ info->perf_event.tracepoint.cookie = event->bpf_cookie;
return bpf_perf_link_fill_common(event, uname, ulen, NULL, NULL, NULL, NULL);
}
@@ -3561,6 +3693,7 @@ static int bpf_perf_link_fill_perf_event(const struct perf_event *event,
{
info->perf_event.event.type = event->attr.type;
info->perf_event.event.config = event->attr.config;
+ info->perf_event.event.cookie = event->bpf_cookie;
info->perf_event.type = BPF_PERF_EVENT_EVENT;
return 0;
}
@@ -3818,7 +3951,7 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
case BPF_PROG_TYPE_SK_LOOKUP:
return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
case BPF_PROG_TYPE_CGROUP_SKB:
- if (!capable(CAP_NET_ADMIN))
+ if (!bpf_token_capable(prog->aux->token, CAP_NET_ADMIN))
/* cg-skb progs can be loaded by unpriv user.
* check permissions at attach time.
*/
@@ -4021,7 +4154,7 @@ static int bpf_prog_detach(const union bpf_attr *attr)
static int bpf_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
- if (!capable(CAP_NET_ADMIN))
+ if (!bpf_net_capable())
return -EPERM;
if (CHECK_ATTR(BPF_PROG_QUERY))
return -EINVAL;
@@ -4320,6 +4453,12 @@ static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
continue;
}
+ if ((BPF_CLASS(code) == BPF_LDX || BPF_CLASS(code) == BPF_STX ||
+ BPF_CLASS(code) == BPF_ST) && BPF_MODE(code) == BPF_PROBE_MEM32) {
+ insns[i].code = BPF_CLASS(code) | BPF_SIZE(code) | BPF_MEM;
+ continue;
+ }
+
if (code != (BPF_LD | BPF_IMM | BPF_DW))
continue;
@@ -4687,6 +4826,8 @@ static int bpf_map_get_info_by_fd(struct file *file,
info.btf_value_type_id = map->btf_value_type_id;
}
info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
+ if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS)
+ bpf_map_struct_ops_info_fill(&info, map);
if (bpf_map_is_offloaded(map)) {
err = bpf_map_offload_info_fill(&info, map);
@@ -4789,15 +4930,34 @@ static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
return err;
}
-#define BPF_BTF_LOAD_LAST_FIELD btf_log_true_size
+#define BPF_BTF_LOAD_LAST_FIELD btf_token_fd
static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
{
+ struct bpf_token *token = NULL;
+
if (CHECK_ATTR(BPF_BTF_LOAD))
return -EINVAL;
- if (!bpf_capable())
+ if (attr->btf_flags & ~BPF_F_TOKEN_FD)
+ return -EINVAL;
+
+ if (attr->btf_flags & BPF_F_TOKEN_FD) {
+ token = bpf_token_get_from_fd(attr->btf_token_fd);
+ if (IS_ERR(token))
+ return PTR_ERR(token);
+ if (!bpf_token_allow_cmd(token, BPF_BTF_LOAD)) {
+ bpf_token_put(token);
+ token = NULL;
+ }
+ }
+
+ if (!bpf_token_capable(token, CAP_BPF)) {
+ bpf_token_put(token);
return -EPERM;
+ }
+
+ bpf_token_put(token);
return btf_new_fd(attr, uattr, uattr_size);
}
@@ -5394,7 +5554,7 @@ static int bpf_prog_bind_map(union bpf_attr *attr)
/* The bpf program will not access the bpf map, but for the sake of
* simplicity, increase sleepable_refcnt for sleepable program as well.
*/
- if (prog->aux->sleepable)
+ if (prog->sleepable)
atomic64_inc(&map->sleepable_refcnt);
memcpy(used_maps_new, used_maps_old,
sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
@@ -5415,6 +5575,20 @@ out_prog_put:
return ret;
}
+#define BPF_TOKEN_CREATE_LAST_FIELD token_create.bpffs_fd
+
+static int token_create(union bpf_attr *attr)
+{
+ if (CHECK_ATTR(BPF_TOKEN_CREATE))
+ return -EINVAL;
+
+ /* no flags are supported yet */
+ if (attr->token_create.flags)
+ return -EINVAL;
+
+ return bpf_token_create(attr);
+}
+
static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
{
union bpf_attr attr;
@@ -5548,6 +5722,9 @@ static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
case BPF_PROG_BIND_MAP:
err = bpf_prog_bind_map(&attr);
break;
+ case BPF_TOKEN_CREATE:
+ err = token_create(&attr);
+ break;
default:
err = -EINVAL;
break;
@@ -5654,7 +5831,7 @@ static const struct bpf_func_proto bpf_sys_bpf_proto = {
const struct bpf_func_proto * __weak
tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
BPF_CALL_1(bpf_sys_close, u32, fd)
@@ -5704,7 +5881,8 @@ syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
switch (func_id) {
case BPF_FUNC_sys_bpf:
- return !perfmon_capable() ? NULL : &bpf_sys_bpf_proto;
+ return !bpf_token_capable(prog->aux->token, CAP_PERFMON)
+ ? NULL : &bpf_sys_bpf_proto;
case BPF_FUNC_btf_find_by_name_kind:
return &bpf_btf_find_by_name_kind_proto;
case BPF_FUNC_sys_close:
diff --git a/kernel/bpf/token.c b/kernel/bpf/token.c
new file mode 100644
index 000000000000..d6ccf8d00eab
--- /dev/null
+++ b/kernel/bpf/token.c
@@ -0,0 +1,278 @@
+#include <linux/bpf.h>
+#include <linux/vmalloc.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/idr.h>
+#include <linux/namei.h>
+#include <linux/user_namespace.h>
+#include <linux/security.h>
+
+static bool bpf_ns_capable(struct user_namespace *ns, int cap)
+{
+ return ns_capable(ns, cap) || (cap != CAP_SYS_ADMIN && ns_capable(ns, CAP_SYS_ADMIN));
+}
+
+bool bpf_token_capable(const struct bpf_token *token, int cap)
+{
+ struct user_namespace *userns;
+
+ /* BPF token allows ns_capable() level of capabilities */
+ userns = token ? token->userns : &init_user_ns;
+ if (!bpf_ns_capable(userns, cap))
+ return false;
+ if (token && security_bpf_token_capable(token, cap) < 0)
+ return false;
+ return true;
+}
+
+void bpf_token_inc(struct bpf_token *token)
+{
+ atomic64_inc(&token->refcnt);
+}
+
+static void bpf_token_free(struct bpf_token *token)
+{
+ security_bpf_token_free(token);
+ put_user_ns(token->userns);
+ kfree(token);
+}
+
+static void bpf_token_put_deferred(struct work_struct *work)
+{
+ struct bpf_token *token = container_of(work, struct bpf_token, work);
+
+ bpf_token_free(token);
+}
+
+void bpf_token_put(struct bpf_token *token)
+{
+ if (!token)
+ return;
+
+ if (!atomic64_dec_and_test(&token->refcnt))
+ return;
+
+ INIT_WORK(&token->work, bpf_token_put_deferred);
+ schedule_work(&token->work);
+}
+
+static int bpf_token_release(struct inode *inode, struct file *filp)
+{
+ struct bpf_token *token = filp->private_data;
+
+ bpf_token_put(token);
+ return 0;
+}
+
+static void bpf_token_show_fdinfo(struct seq_file *m, struct file *filp)
+{
+ struct bpf_token *token = filp->private_data;
+ u64 mask;
+
+ BUILD_BUG_ON(__MAX_BPF_CMD >= 64);
+ mask = BIT_ULL(__MAX_BPF_CMD) - 1;
+ if ((token->allowed_cmds & mask) == mask)
+ seq_printf(m, "allowed_cmds:\tany\n");
+ else
+ seq_printf(m, "allowed_cmds:\t0x%llx\n", token->allowed_cmds);
+
+ BUILD_BUG_ON(__MAX_BPF_MAP_TYPE >= 64);
+ mask = BIT_ULL(__MAX_BPF_MAP_TYPE) - 1;
+ if ((token->allowed_maps & mask) == mask)
+ seq_printf(m, "allowed_maps:\tany\n");
+ else
+ seq_printf(m, "allowed_maps:\t0x%llx\n", token->allowed_maps);
+
+ BUILD_BUG_ON(__MAX_BPF_PROG_TYPE >= 64);
+ mask = BIT_ULL(__MAX_BPF_PROG_TYPE) - 1;
+ if ((token->allowed_progs & mask) == mask)
+ seq_printf(m, "allowed_progs:\tany\n");
+ else
+ seq_printf(m, "allowed_progs:\t0x%llx\n", token->allowed_progs);
+
+ BUILD_BUG_ON(__MAX_BPF_ATTACH_TYPE >= 64);
+ mask = BIT_ULL(__MAX_BPF_ATTACH_TYPE) - 1;
+ if ((token->allowed_attachs & mask) == mask)
+ seq_printf(m, "allowed_attachs:\tany\n");
+ else
+ seq_printf(m, "allowed_attachs:\t0x%llx\n", token->allowed_attachs);
+}
+
+#define BPF_TOKEN_INODE_NAME "bpf-token"
+
+static const struct inode_operations bpf_token_iops = { };
+
+static const struct file_operations bpf_token_fops = {
+ .release = bpf_token_release,
+ .show_fdinfo = bpf_token_show_fdinfo,
+};
+
+int bpf_token_create(union bpf_attr *attr)
+{
+ struct bpf_mount_opts *mnt_opts;
+ struct bpf_token *token = NULL;
+ struct user_namespace *userns;
+ struct inode *inode;
+ struct file *file;
+ struct path path;
+ struct fd f;
+ umode_t mode;
+ int err, fd;
+
+ f = fdget(attr->token_create.bpffs_fd);
+ if (!f.file)
+ return -EBADF;
+
+ path = f.file->f_path;
+ path_get(&path);
+ fdput(f);
+
+ if (path.dentry != path.mnt->mnt_sb->s_root) {
+ err = -EINVAL;
+ goto out_path;
+ }
+ if (path.mnt->mnt_sb->s_op != &bpf_super_ops) {
+ err = -EINVAL;
+ goto out_path;
+ }
+ err = path_permission(&path, MAY_ACCESS);
+ if (err)
+ goto out_path;
+
+ userns = path.dentry->d_sb->s_user_ns;
+ /*
+ * Enforce that creators of BPF tokens are in the same user
+ * namespace as the BPF FS instance. This makes reasoning about
+ * permissions a lot easier and we can always relax this later.
+ */
+ if (current_user_ns() != userns) {
+ err = -EPERM;
+ goto out_path;
+ }
+ if (!ns_capable(userns, CAP_BPF)) {
+ err = -EPERM;
+ goto out_path;
+ }
+
+ /* Creating BPF token in init_user_ns doesn't make much sense. */
+ if (current_user_ns() == &init_user_ns) {
+ err = -EOPNOTSUPP;
+ goto out_path;
+ }
+
+ mnt_opts = path.dentry->d_sb->s_fs_info;
+ if (mnt_opts->delegate_cmds == 0 &&
+ mnt_opts->delegate_maps == 0 &&
+ mnt_opts->delegate_progs == 0 &&
+ mnt_opts->delegate_attachs == 0) {
+ err = -ENOENT; /* no BPF token delegation is set up */
+ goto out_path;
+ }
+
+ mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
+ inode = bpf_get_inode(path.mnt->mnt_sb, NULL, mode);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto out_path;
+ }
+
+ inode->i_op = &bpf_token_iops;
+ inode->i_fop = &bpf_token_fops;
+ clear_nlink(inode); /* make sure it is unlinked */
+
+ file = alloc_file_pseudo(inode, path.mnt, BPF_TOKEN_INODE_NAME, O_RDWR, &bpf_token_fops);
+ if (IS_ERR(file)) {
+ iput(inode);
+ err = PTR_ERR(file);
+ goto out_path;
+ }
+
+ token = kzalloc(sizeof(*token), GFP_USER);
+ if (!token) {
+ err = -ENOMEM;
+ goto out_file;
+ }
+
+ atomic64_set(&token->refcnt, 1);
+
+ /* remember bpffs owning userns for future ns_capable() checks */
+ token->userns = get_user_ns(userns);
+
+ token->allowed_cmds = mnt_opts->delegate_cmds;
+ token->allowed_maps = mnt_opts->delegate_maps;
+ token->allowed_progs = mnt_opts->delegate_progs;
+ token->allowed_attachs = mnt_opts->delegate_attachs;
+
+ err = security_bpf_token_create(token, attr, &path);
+ if (err)
+ goto out_token;
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0) {
+ err = fd;
+ goto out_token;
+ }
+
+ file->private_data = token;
+ fd_install(fd, file);
+
+ path_put(&path);
+ return fd;
+
+out_token:
+ bpf_token_free(token);
+out_file:
+ fput(file);
+out_path:
+ path_put(&path);
+ return err;
+}
+
+struct bpf_token *bpf_token_get_from_fd(u32 ufd)
+{
+ struct fd f = fdget(ufd);
+ struct bpf_token *token;
+
+ if (!f.file)
+ return ERR_PTR(-EBADF);
+ if (f.file->f_op != &bpf_token_fops) {
+ fdput(f);
+ return ERR_PTR(-EINVAL);
+ }
+
+ token = f.file->private_data;
+ bpf_token_inc(token);
+ fdput(f);
+
+ return token;
+}
+
+bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd)
+{
+ if (!token)
+ return false;
+ if (!(token->allowed_cmds & BIT_ULL(cmd)))
+ return false;
+ return security_bpf_token_cmd(token, cmd) == 0;
+}
+
+bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type)
+{
+ if (!token || type >= __MAX_BPF_MAP_TYPE)
+ return false;
+
+ return token->allowed_maps & BIT_ULL(type);
+}
+
+bool bpf_token_allow_prog_type(const struct bpf_token *token,
+ enum bpf_prog_type prog_type,
+ enum bpf_attach_type attach_type)
+{
+ if (!token || prog_type >= __MAX_BPF_PROG_TYPE || attach_type >= __MAX_BPF_ATTACH_TYPE)
+ return false;
+
+ return (token->allowed_progs & BIT_ULL(prog_type)) &&
+ (token->allowed_attachs & BIT_ULL(attach_type));
+}
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index d382f5ebe06c..db7599c59c78 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -1014,7 +1014,7 @@ void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog)
{
- bool sleepable = prog->aux->sleepable;
+ bool sleepable = prog->sleepable;
if (bpf_prog_check_recur(prog))
return sleepable ? __bpf_prog_enter_sleepable_recur :
@@ -1029,7 +1029,7 @@ bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog)
bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog)
{
- bool sleepable = prog->aux->sleepable;
+ bool sleepable = prog->sleepable;
if (bpf_prog_check_recur(prog))
return sleepable ? __bpf_prog_exit_sleepable_recur :
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index ddea9567f755..63749ad5ac6b 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -528,6 +528,21 @@ static bool is_sync_callback_calling_insn(struct bpf_insn *insn)
(bpf_pseudo_kfunc_call(insn) && is_sync_callback_calling_kfunc(insn->imm));
}
+static bool is_async_callback_calling_insn(struct bpf_insn *insn)
+{
+ return bpf_helper_call(insn) && is_async_callback_calling_function(insn->imm);
+}
+
+static bool is_may_goto_insn(struct bpf_insn *insn)
+{
+ return insn->code == (BPF_JMP | BPF_JCOND) && insn->src_reg == BPF_MAY_GOTO;
+}
+
+static bool is_may_goto_insn_at(struct bpf_verifier_env *env, int insn_idx)
+{
+ return is_may_goto_insn(&env->prog->insnsi[insn_idx]);
+}
+
static bool is_storage_get_function(enum bpf_func_id func_id)
{
return func_id == BPF_FUNC_sk_storage_get ||
@@ -1155,6 +1170,12 @@ static bool is_spilled_scalar_reg(const struct bpf_stack_state *stack)
stack->spilled_ptr.type == SCALAR_VALUE;
}
+static bool is_spilled_scalar_reg64(const struct bpf_stack_state *stack)
+{
+ return stack->slot_type[0] == STACK_SPILL &&
+ stack->spilled_ptr.type == SCALAR_VALUE;
+}
+
/* Mark stack slot as STACK_MISC, unless it is already STACK_INVALID, in which
* case they are equivalent, or it's STACK_ZERO, in which case we preserve
* more precise STACK_ZERO.
@@ -1418,6 +1439,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
dst_state->dfs_depth = src->dfs_depth;
dst_state->callback_unroll_depth = src->callback_unroll_depth;
dst_state->used_as_loop_entry = src->used_as_loop_entry;
+ dst_state->may_goto_depth = src->may_goto_depth;
for (i = 0; i <= src->curframe; i++) {
dst = dst_state->frame[i];
if (!dst) {
@@ -2264,8 +2286,7 @@ static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
}
/* Mark a register as having a completely unknown (scalar) value. */
-static void __mark_reg_unknown(const struct bpf_verifier_env *env,
- struct bpf_reg_state *reg)
+static void __mark_reg_unknown_imprecise(struct bpf_reg_state *reg)
{
/*
* Clear type, off, and union(map_ptr, range) and
@@ -2277,10 +2298,20 @@ static void __mark_reg_unknown(const struct bpf_verifier_env *env,
reg->ref_obj_id = 0;
reg->var_off = tnum_unknown;
reg->frameno = 0;
- reg->precise = !env->bpf_capable;
+ reg->precise = false;
__mark_reg_unbounded(reg);
}
+/* Mark a register as having a completely unknown (scalar) value,
+ * initialize .precise as true when not bpf capable.
+ */
+static void __mark_reg_unknown(const struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg)
+{
+ __mark_reg_unknown_imprecise(reg);
+ reg->precise = !env->bpf_capable;
+}
+
static void mark_reg_unknown(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, u32 regno)
{
@@ -4355,6 +4386,7 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
case PTR_TO_MEM:
case PTR_TO_FUNC:
case PTR_TO_MAP_KEY:
+ case PTR_TO_ARENA:
return true;
default:
return false;
@@ -4380,20 +4412,6 @@ static u64 reg_const_value(struct bpf_reg_state *reg, bool subreg32)
return subreg32 ? tnum_subreg(reg->var_off).value : reg->var_off.value;
}
-static bool __is_scalar_unbounded(struct bpf_reg_state *reg)
-{
- return tnum_is_unknown(reg->var_off) &&
- reg->smin_value == S64_MIN && reg->smax_value == S64_MAX &&
- reg->umin_value == 0 && reg->umax_value == U64_MAX &&
- reg->s32_min_value == S32_MIN && reg->s32_max_value == S32_MAX &&
- reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX;
-}
-
-static bool register_is_bounded(struct bpf_reg_state *reg)
-{
- return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg);
-}
-
static bool __is_pointer_value(bool allow_ptr_leaks,
const struct bpf_reg_state *reg)
{
@@ -4403,6 +4421,18 @@ static bool __is_pointer_value(bool allow_ptr_leaks,
return reg->type != SCALAR_VALUE;
}
+static void assign_scalar_id_before_mov(struct bpf_verifier_env *env,
+ struct bpf_reg_state *src_reg)
+{
+ if (src_reg->type == SCALAR_VALUE && !src_reg->id &&
+ !tnum_is_const(src_reg->var_off))
+ /* Ensure that src_reg has a valid ID that will be copied to
+ * dst_reg and then will be used by find_equal_scalars() to
+ * propagate min/max range.
+ */
+ src_reg->id = ++env->id_gen;
+}
+
/* Copy src state preserving dst->parent and dst->live fields */
static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_state *src)
{
@@ -4438,6 +4468,11 @@ static bool is_bpf_st_mem(struct bpf_insn *insn)
return BPF_CLASS(insn->code) == BPF_ST && BPF_MODE(insn->code) == BPF_MEM;
}
+static int get_reg_width(struct bpf_reg_state *reg)
+{
+ return fls64(reg->umax_value);
+}
+
/* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
* stack boundary and alignment are checked in check_mem_access()
*/
@@ -4487,13 +4522,19 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
return err;
mark_stack_slot_scratched(env, spi);
- if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) && env->bpf_capable) {
+ if (reg && !(off % BPF_REG_SIZE) && reg->type == SCALAR_VALUE && env->bpf_capable) {
+ bool reg_value_fits;
+
+ reg_value_fits = get_reg_width(reg) <= BITS_PER_BYTE * size;
+ /* Make sure that reg had an ID to build a relation on spill. */
+ if (reg_value_fits)
+ assign_scalar_id_before_mov(env, reg);
save_register_state(env, state, spi, reg, size);
/* Break the relation on a narrowing spill. */
- if (fls64(reg->umax_value) > BITS_PER_BYTE * size)
+ if (!reg_value_fits)
state->stack[spi].spilled_ptr.id = 0;
} else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) &&
- insn->imm != 0 && env->bpf_capable) {
+ env->bpf_capable) {
struct bpf_reg_state fake_reg = {};
__mark_reg_known(&fake_reg, insn->imm);
@@ -4640,7 +4681,20 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
return -EINVAL;
}
- /* Erase all spilled pointers. */
+ /* If writing_zero and the spi slot contains a spill of value 0,
+ * maintain the spill type.
+ */
+ if (writing_zero && *stype == STACK_SPILL &&
+ is_spilled_scalar_reg(&state->stack[spi])) {
+ struct bpf_reg_state *spill_reg = &state->stack[spi].spilled_ptr;
+
+ if (tnum_is_const(spill_reg->var_off) && spill_reg->var_off.value == 0) {
+ zero_used = true;
+ continue;
+ }
+ }
+
+ /* Erase all other spilled pointers. */
state->stack[spi].spilled_ptr.type = NOT_INIT;
/* Update the slot type. */
@@ -4756,7 +4810,8 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
if (dst_regno < 0)
return 0;
- if (!(off % BPF_REG_SIZE) && size == spill_size) {
+ if (size <= spill_size &&
+ bpf_stack_narrow_access_ok(off, size, spill_size)) {
/* The earlier check_reg_arg() has decided the
* subreg_def for this insn. Save it first.
*/
@@ -4764,6 +4819,12 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
copy_register_state(&state->regs[dst_regno], reg);
state->regs[dst_regno].subreg_def = subreg_def;
+
+ /* Break the relation on a narrowing fill.
+ * coerce_reg_to_size will adjust the boundaries.
+ */
+ if (get_reg_width(reg) > size * BITS_PER_BYTE)
+ state->regs[dst_regno].id = 0;
} else {
int spill_cnt = 0, zero_cnt = 0;
@@ -5211,6 +5272,11 @@ bad_type:
return -EINVAL;
}
+static bool in_sleepable(struct bpf_verifier_env *env)
+{
+ return env->prog->sleepable;
+}
+
/* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock()
* can dereference RCU protected pointers and result is PTR_TRUSTED.
*/
@@ -5218,7 +5284,7 @@ static bool in_rcu_cs(struct bpf_verifier_env *env)
{
return env->cur_state->active_rcu_lock ||
env->cur_state->active_lock.ptr ||
- !env->prog->aux->sleepable;
+ !in_sleepable(env);
}
/* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */
@@ -5763,6 +5829,8 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
case PTR_TO_XDP_SOCK:
pointer_desc = "xdp_sock ";
break;
+ case PTR_TO_ARENA:
+ return 0;
default:
break;
}
@@ -5770,6 +5838,17 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
strict);
}
+static int round_up_stack_depth(struct bpf_verifier_env *env, int stack_depth)
+{
+ if (env->prog->jit_requested)
+ return round_up(stack_depth, 16);
+
+ /* round up to 32-bytes, since this is granularity
+ * of interpreter stack size
+ */
+ return round_up(max_t(u32, stack_depth, 1), 32);
+}
+
/* starting from main bpf function walk all instructions of the function
* and recursively walk all callees that given function can call.
* Ignore jump and exit insns.
@@ -5813,10 +5892,7 @@ process_func:
depth);
return -EACCES;
}
- /* round up to 32-bytes, since this is granularity
- * of interpreter stack size
- */
- depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
+ depth += round_up_stack_depth(env, subprog[idx].stack_depth);
if (depth > MAX_BPF_STACK) {
verbose(env, "combined stack size of %d calls is %d. Too large\n",
frame + 1, depth);
@@ -5910,7 +5986,7 @@ continue_func:
*/
if (frame == 0)
return 0;
- depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
+ depth -= round_up_stack_depth(env, subprog[idx].stack_depth);
frame--;
i = ret_insn[frame];
idx = ret_prog[frame];
@@ -6041,10 +6117,10 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
* values are also truncated so we push 64-bit bounds into
* 32-bit bounds. Above were truncated < 32-bits already.
*/
- if (size < 4) {
+ if (size < 4)
__mark_reg32_unbounded(reg);
- reg_bounds_sync(reg);
- }
+
+ reg_bounds_sync(reg);
}
static void set_sext64_default_val(struct bpf_reg_state *reg, int size)
@@ -6864,6 +6940,9 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
if (!err && value_regno >= 0 && (rdonly_mem || t == BPF_READ))
mark_reg_unknown(env, regs, value_regno);
+ } else if (reg->type == PTR_TO_ARENA) {
+ if (t == BPF_READ && value_regno >= 0)
+ mark_reg_unknown(env, regs, value_regno);
} else {
verbose(env, "R%d invalid mem access '%s'\n", regno,
reg_type_str(env, reg->type));
@@ -8200,6 +8279,7 @@ found:
switch ((int)reg->type) {
case PTR_TO_BTF_ID:
case PTR_TO_BTF_ID | PTR_TRUSTED:
+ case PTR_TO_BTF_ID | PTR_TRUSTED | PTR_MAYBE_NULL:
case PTR_TO_BTF_ID | MEM_RCU:
case PTR_TO_BTF_ID | PTR_MAYBE_NULL:
case PTR_TO_BTF_ID | PTR_MAYBE_NULL | MEM_RCU:
@@ -8334,6 +8414,7 @@ static int check_func_arg_reg_off(struct bpf_verifier_env *env,
case PTR_TO_MEM | MEM_RINGBUF:
case PTR_TO_BUF:
case PTR_TO_BUF | MEM_RDONLY:
+ case PTR_TO_ARENA:
case SCALAR_VALUE:
return 0;
/* All the rest must be rejected, except PTR_TO_BTF_ID which allows
@@ -9298,10 +9379,34 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
bpf_log(log, "arg#%d is expected to be non-NULL\n", i);
return -EINVAL;
}
+ } else if (base_type(arg->arg_type) == ARG_PTR_TO_ARENA) {
+ /*
+ * Can pass any value and the kernel won't crash, but
+ * only PTR_TO_ARENA or SCALAR make sense. Everything
+ * else is a bug in the bpf program. Point it out to
+ * the user at the verification time instead of
+ * run-time debug nightmare.
+ */
+ if (reg->type != PTR_TO_ARENA && reg->type != SCALAR_VALUE) {
+ bpf_log(log, "R%d is not a pointer to arena or scalar.\n", regno);
+ return -EINVAL;
+ }
} else if (arg->arg_type == (ARG_PTR_TO_DYNPTR | MEM_RDONLY)) {
ret = process_dynptr_func(env, regno, -1, arg->arg_type, 0);
if (ret)
return ret;
+ } else if (base_type(arg->arg_type) == ARG_PTR_TO_BTF_ID) {
+ struct bpf_call_arg_meta meta;
+ int err;
+
+ if (register_is_null(reg) && type_may_be_null(arg->arg_type))
+ continue;
+
+ memset(&meta, 0, sizeof(meta)); /* leave func_id as zero */
+ err = check_reg_type(env, regno, arg->arg_type, &arg->btf_id, &meta);
+ err = err ?: check_func_arg_reg_off(env, reg, regno, arg->arg_type);
+ if (err)
+ return err;
} else {
bpf_log(log, "verifier bug: unrecognized arg#%d type %d\n",
i, arg->arg_type);
@@ -9377,9 +9482,7 @@ static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *ins
return -EFAULT;
}
- if (insn->code == (BPF_JMP | BPF_CALL) &&
- insn->src_reg == 0 &&
- insn->imm == BPF_FUNC_timer_set_callback) {
+ if (is_async_callback_calling_insn(insn)) {
struct bpf_verifier_state *async_cb;
/* there is no real recursion here. timer callbacks are async */
@@ -9438,6 +9541,13 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
if (subprog_is_global(env, subprog)) {
const char *sub_name = subprog_name(env, subprog);
+ /* Only global subprogs cannot be called with a lock held. */
+ if (env->cur_state->active_lock.ptr) {
+ verbose(env, "global function calls are not allowed while holding a lock,\n"
+ "use static function instead\n");
+ return -EINVAL;
+ }
+
if (err) {
verbose(env, "Caller passes invalid args into func#%d ('%s')\n",
subprog, sub_name);
@@ -10094,7 +10204,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
return -EINVAL;
}
- if (!env->prog->aux->sleepable && fn->might_sleep) {
+ if (!in_sleepable(env) && fn->might_sleep) {
verbose(env, "helper call might sleep in a non-sleepable prog\n");
return -EINVAL;
}
@@ -10124,7 +10234,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
return -EINVAL;
}
- if (env->prog->aux->sleepable && is_storage_get_function(func_id))
+ if (in_sleepable(env) && is_storage_get_function(func_id))
env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
}
@@ -10620,24 +10730,6 @@ static bool is_kfunc_rcu_protected(struct bpf_kfunc_call_arg_meta *meta)
return meta->kfunc_flags & KF_RCU_PROTECTED;
}
-static bool __kfunc_param_match_suffix(const struct btf *btf,
- const struct btf_param *arg,
- const char *suffix)
-{
- int suffix_len = strlen(suffix), len;
- const char *param_name;
-
- /* In the future, this can be ported to use BTF tagging */
- param_name = btf_name_by_offset(btf, arg->name_off);
- if (str_is_empty(param_name))
- return false;
- len = strlen(param_name);
- if (len < suffix_len)
- return false;
- param_name += len - suffix_len;
- return !strncmp(param_name, suffix, suffix_len);
-}
-
static bool is_kfunc_arg_mem_size(const struct btf *btf,
const struct btf_param *arg,
const struct bpf_reg_state *reg)
@@ -10648,7 +10740,7 @@ static bool is_kfunc_arg_mem_size(const struct btf *btf,
if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE)
return false;
- return __kfunc_param_match_suffix(btf, arg, "__sz");
+ return btf_param_match_suffix(btf, arg, "__sz");
}
static bool is_kfunc_arg_const_mem_size(const struct btf *btf,
@@ -10661,47 +10753,52 @@ static bool is_kfunc_arg_const_mem_size(const struct btf *btf,
if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE)
return false;
- return __kfunc_param_match_suffix(btf, arg, "__szk");
+ return btf_param_match_suffix(btf, arg, "__szk");
}
static bool is_kfunc_arg_optional(const struct btf *btf, const struct btf_param *arg)
{
- return __kfunc_param_match_suffix(btf, arg, "__opt");
+ return btf_param_match_suffix(btf, arg, "__opt");
}
static bool is_kfunc_arg_constant(const struct btf *btf, const struct btf_param *arg)
{
- return __kfunc_param_match_suffix(btf, arg, "__k");
+ return btf_param_match_suffix(btf, arg, "__k");
}
static bool is_kfunc_arg_ignore(const struct btf *btf, const struct btf_param *arg)
{
- return __kfunc_param_match_suffix(btf, arg, "__ign");
+ return btf_param_match_suffix(btf, arg, "__ign");
+}
+
+static bool is_kfunc_arg_map(const struct btf *btf, const struct btf_param *arg)
+{
+ return btf_param_match_suffix(btf, arg, "__map");
}
static bool is_kfunc_arg_alloc_obj(const struct btf *btf, const struct btf_param *arg)
{
- return __kfunc_param_match_suffix(btf, arg, "__alloc");
+ return btf_param_match_suffix(btf, arg, "__alloc");
}
static bool is_kfunc_arg_uninit(const struct btf *btf, const struct btf_param *arg)
{
- return __kfunc_param_match_suffix(btf, arg, "__uninit");
+ return btf_param_match_suffix(btf, arg, "__uninit");
}
static bool is_kfunc_arg_refcounted_kptr(const struct btf *btf, const struct btf_param *arg)
{
- return __kfunc_param_match_suffix(btf, arg, "__refcounted_kptr");
+ return btf_param_match_suffix(btf, arg, "__refcounted_kptr");
}
static bool is_kfunc_arg_nullable(const struct btf *btf, const struct btf_param *arg)
{
- return __kfunc_param_match_suffix(btf, arg, "__nullable");
+ return btf_param_match_suffix(btf, arg, "__nullable");
}
static bool is_kfunc_arg_const_str(const struct btf *btf, const struct btf_param *arg)
{
- return __kfunc_param_match_suffix(btf, arg, "__str");
+ return btf_param_match_suffix(btf, arg, "__str");
}
static bool is_kfunc_arg_scalar_with_name(const struct btf *btf,
@@ -10848,6 +10945,7 @@ enum kfunc_ptr_arg_type {
KF_ARG_PTR_TO_RB_NODE,
KF_ARG_PTR_TO_NULL,
KF_ARG_PTR_TO_CONST_STR,
+ KF_ARG_PTR_TO_MAP,
};
enum special_kfunc_type {
@@ -10971,7 +11069,7 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
* type to our caller. When a set of conditions hold in the BTF type of
* arguments, we resolve it to a known kfunc_ptr_arg_type.
*/
- if (btf_get_prog_ctx_type(&env->log, meta->btf, t, resolve_prog_type(env->prog), argno))
+ if (btf_is_prog_ctx_type(&env->log, meta->btf, t, resolve_prog_type(env->prog), argno))
return KF_ARG_PTR_TO_CTX;
if (is_kfunc_arg_alloc_obj(meta->btf, &args[argno]))
@@ -11001,6 +11099,9 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
if (is_kfunc_arg_const_str(meta->btf, &args[argno]))
return KF_ARG_PTR_TO_CONST_STR;
+ if (is_kfunc_arg_map(meta->btf, &args[argno]))
+ return KF_ARG_PTR_TO_MAP;
+
if ((base_type(reg->type) == PTR_TO_BTF_ID || reg2btf_ids[base_type(reg->type)])) {
if (!btf_type_is_struct(ref_t)) {
verbose(env, "kernel function %s args#%d pointer type %s %s is not supported\n",
@@ -11483,7 +11584,7 @@ static bool check_css_task_iter_allowlist(struct bpf_verifier_env *env)
return true;
fallthrough;
default:
- return env->prog->aux->sleepable;
+ return in_sleepable(env);
}
}
@@ -11601,6 +11702,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
switch (kf_arg_type) {
case KF_ARG_PTR_TO_NULL:
continue;
+ case KF_ARG_PTR_TO_MAP:
case KF_ARG_PTR_TO_ALLOC_BTF_ID:
case KF_ARG_PTR_TO_BTF_ID:
if (!is_kfunc_trusted_args(meta) && !is_kfunc_rcu(meta))
@@ -11817,6 +11919,12 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
if (ret < 0)
return ret;
break;
+ case KF_ARG_PTR_TO_MAP:
+ /* If argument has '__map' suffix expect 'struct bpf_map *' */
+ ref_id = *reg2btf_ids[CONST_PTR_TO_MAP];
+ ref_t = btf_type_by_id(btf_vmlinux, ref_id);
+ ref_tname = btf_name_by_offset(btf, ref_t->name_off);
+ fallthrough;
case KF_ARG_PTR_TO_BTF_ID:
/* Only base_type is checked, further checks are done here */
if ((base_type(reg->type) != PTR_TO_BTF_ID ||
@@ -12004,7 +12112,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
}
sleepable = is_kfunc_sleepable(&meta);
- if (sleepable && !env->prog->aux->sleepable) {
+ if (sleepable && !in_sleepable(env)) {
verbose(env, "program must be sleepable to call sleepable kfunc %s\n", func_name);
return -EACCES;
}
@@ -12291,6 +12399,9 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
meta.func_name);
return -EFAULT;
}
+ } else if (btf_type_is_void(ptr_type)) {
+ /* kfunc returning 'void *' is equivalent to returning scalar */
+ mark_reg_unknown(env, regs, BPF_REG_0);
} else if (!__btf_type_is_struct(ptr_type)) {
if (!meta.r0_size) {
__u32 sz;
@@ -12828,6 +12939,19 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
}
switch (base_type(ptr_reg->type)) {
+ case PTR_TO_CTX:
+ case PTR_TO_MAP_VALUE:
+ case PTR_TO_MAP_KEY:
+ case PTR_TO_STACK:
+ case PTR_TO_PACKET_META:
+ case PTR_TO_PACKET:
+ case PTR_TO_TP_BUFFER:
+ case PTR_TO_BTF_ID:
+ case PTR_TO_MEM:
+ case PTR_TO_BUF:
+ case PTR_TO_FUNC:
+ case CONST_PTR_TO_DYNPTR:
+ break;
case PTR_TO_FLOW_KEYS:
if (known)
break;
@@ -12837,16 +12961,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
if (known && smin_val == 0 && opcode == BPF_ADD)
break;
fallthrough;
- case PTR_TO_PACKET_END:
- case PTR_TO_SOCKET:
- case PTR_TO_SOCK_COMMON:
- case PTR_TO_TCP_SOCK:
- case PTR_TO_XDP_SOCK:
+ default:
verbose(env, "R%d pointer arithmetic on %s prohibited\n",
dst, reg_type_str(env, ptr_reg->type));
return -EACCES;
- default:
- break;
}
/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
@@ -13753,6 +13871,21 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
dst_reg = &regs[insn->dst_reg];
src_reg = NULL;
+
+ if (dst_reg->type == PTR_TO_ARENA) {
+ struct bpf_insn_aux_data *aux = cur_aux(env);
+
+ if (BPF_CLASS(insn->code) == BPF_ALU64)
+ /*
+ * 32-bit operations zero upper bits automatically.
+ * 64-bit operations need to be converted to 32.
+ */
+ aux->needs_zext = true;
+
+ /* Any arithmetic operations are allowed on arena pointers */
+ return 0;
+ }
+
if (dst_reg->type != SCALAR_VALUE)
ptr_reg = dst_reg;
else
@@ -13870,19 +14003,20 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
} else if (opcode == BPF_MOV) {
if (BPF_SRC(insn->code) == BPF_X) {
- if (insn->imm != 0) {
- verbose(env, "BPF_MOV uses reserved fields\n");
- return -EINVAL;
- }
-
if (BPF_CLASS(insn->code) == BPF_ALU) {
- if (insn->off != 0 && insn->off != 8 && insn->off != 16) {
+ if ((insn->off != 0 && insn->off != 8 && insn->off != 16) ||
+ insn->imm) {
verbose(env, "BPF_MOV uses reserved fields\n");
return -EINVAL;
}
+ } else if (insn->off == BPF_ADDR_SPACE_CAST) {
+ if (insn->imm != 1 && insn->imm != 1u << 16) {
+ verbose(env, "addr_space_cast insn can only convert between address space 1 and 0\n");
+ return -EINVAL;
+ }
} else {
- if (insn->off != 0 && insn->off != 8 && insn->off != 16 &&
- insn->off != 32) {
+ if ((insn->off != 0 && insn->off != 8 && insn->off != 16 &&
+ insn->off != 32) || insn->imm) {
verbose(env, "BPF_MOV uses reserved fields\n");
return -EINVAL;
}
@@ -13907,20 +14041,18 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
if (BPF_SRC(insn->code) == BPF_X) {
struct bpf_reg_state *src_reg = regs + insn->src_reg;
struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
- bool need_id = src_reg->type == SCALAR_VALUE && !src_reg->id &&
- !tnum_is_const(src_reg->var_off);
if (BPF_CLASS(insn->code) == BPF_ALU64) {
- if (insn->off == 0) {
+ if (insn->imm) {
+ /* off == BPF_ADDR_SPACE_CAST */
+ mark_reg_unknown(env, regs, insn->dst_reg);
+ if (insn->imm == 1) /* cast from as(1) to as(0) */
+ dst_reg->type = PTR_TO_ARENA;
+ } else if (insn->off == 0) {
/* case: R1 = R2
* copy register state to dest reg
*/
- if (need_id)
- /* Assign src and dst registers the same ID
- * that will be used by find_equal_scalars()
- * to propagate min/max range.
- */
- src_reg->id = ++env->id_gen;
+ assign_scalar_id_before_mov(env, src_reg);
copy_register_state(dst_reg, src_reg);
dst_reg->live |= REG_LIVE_WRITTEN;
dst_reg->subreg_def = DEF_NOT_SUBREG;
@@ -13935,8 +14067,8 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
bool no_sext;
no_sext = src_reg->umax_value < (1ULL << (insn->off - 1));
- if (no_sext && need_id)
- src_reg->id = ++env->id_gen;
+ if (no_sext)
+ assign_scalar_id_before_mov(env, src_reg);
copy_register_state(dst_reg, src_reg);
if (!no_sext)
dst_reg->id = 0;
@@ -13956,10 +14088,10 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
return -EACCES;
} else if (src_reg->type == SCALAR_VALUE) {
if (insn->off == 0) {
- bool is_src_reg_u32 = src_reg->umax_value <= U32_MAX;
+ bool is_src_reg_u32 = get_reg_width(src_reg) <= 32;
- if (is_src_reg_u32 && need_id)
- src_reg->id = ++env->id_gen;
+ if (is_src_reg_u32)
+ assign_scalar_id_before_mov(env, src_reg);
copy_register_state(dst_reg, src_reg);
/* Make sure ID is cleared if src_reg is not in u32
* range otherwise dst_reg min/max could be incorrectly
@@ -13973,8 +14105,8 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
/* case: W1 = (s8, s16)W2 */
bool no_sext = src_reg->umax_value < (1ULL << (insn->off - 1));
- if (no_sext && need_id)
- src_reg->id = ++env->id_gen;
+ if (no_sext)
+ assign_scalar_id_before_mov(env, src_reg);
copy_register_state(dst_reg, src_reg);
if (!no_sext)
dst_reg->id = 0;
@@ -14809,11 +14941,36 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
int err;
/* Only conditional jumps are expected to reach here. */
- if (opcode == BPF_JA || opcode > BPF_JSLE) {
+ if (opcode == BPF_JA || opcode > BPF_JCOND) {
verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
return -EINVAL;
}
+ if (opcode == BPF_JCOND) {
+ struct bpf_verifier_state *cur_st = env->cur_state, *queued_st, *prev_st;
+ int idx = *insn_idx;
+
+ if (insn->code != (BPF_JMP | BPF_JCOND) ||
+ insn->src_reg != BPF_MAY_GOTO ||
+ insn->dst_reg || insn->imm || insn->off == 0) {
+ verbose(env, "invalid may_goto off %d imm %d\n",
+ insn->off, insn->imm);
+ return -EINVAL;
+ }
+ prev_st = find_prev_entry(env, cur_st->parent, idx);
+
+ /* branch out 'fallthrough' insn as a new state to explore */
+ queued_st = push_stack(env, idx + 1, idx, false);
+ if (!queued_st)
+ return -ENOMEM;
+
+ queued_st->may_goto_depth++;
+ if (prev_st)
+ widen_imprecise_scalars(env, prev_st, queued_st);
+ *insn_idx += insn->off;
+ return 0;
+ }
+
/* check src2 operand */
err = check_reg_arg(env, insn->dst_reg, SRC_OP);
if (err)
@@ -15065,6 +15222,10 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
if (insn->src_reg == BPF_PSEUDO_MAP_VALUE ||
insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) {
+ if (map->map_type == BPF_MAP_TYPE_ARENA) {
+ __mark_reg_unknown(env, dst_reg);
+ return 0;
+ }
dst_reg->type = PTR_TO_MAP_VALUE;
dst_reg->off = aux->map_off;
WARN_ON_ONCE(map->max_entries != 1);
@@ -15531,7 +15692,7 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
return DONE_EXPLORING;
case BPF_CALL:
- if (insn->src_reg == 0 && insn->imm == BPF_FUNC_timer_set_callback)
+ if (is_async_callback_calling_insn(insn))
/* Mark this call insn as a prune point to trigger
* is_state_visited() check before call itself is
* processed by __check_func_call(). Otherwise new
@@ -15597,6 +15758,8 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
default:
/* conditional jump with two edges */
mark_prune_point(env, t);
+ if (is_may_goto_insn(insn))
+ mark_force_checkpoint(env, t);
ret = push_insn(t, t + 1, FALLTHROUGH, env);
if (ret)
@@ -16160,8 +16323,8 @@ static int check_btf_info(struct bpf_verifier_env *env,
}
/* check %cur's range satisfies %old's */
-static bool range_within(struct bpf_reg_state *old,
- struct bpf_reg_state *cur)
+static bool range_within(const struct bpf_reg_state *old,
+ const struct bpf_reg_state *cur)
{
return old->umin_value <= cur->umin_value &&
old->umax_value >= cur->umax_value &&
@@ -16325,21 +16488,28 @@ static bool regs_exact(const struct bpf_reg_state *rold,
check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap);
}
+enum exact_level {
+ NOT_EXACT,
+ EXACT,
+ RANGE_WITHIN
+};
+
/* Returns true if (rold safe implies rcur safe) */
static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
- struct bpf_reg_state *rcur, struct bpf_idmap *idmap, bool exact)
+ struct bpf_reg_state *rcur, struct bpf_idmap *idmap,
+ enum exact_level exact)
{
- if (exact)
+ if (exact == EXACT)
return regs_exact(rold, rcur, idmap);
- if (!(rold->live & REG_LIVE_READ))
+ if (!(rold->live & REG_LIVE_READ) && exact == NOT_EXACT)
/* explored state didn't use this */
return true;
- if (rold->type == NOT_INIT)
- /* explored state can't have used this */
- return true;
- if (rcur->type == NOT_INIT)
- return false;
+ if (rold->type == NOT_INIT) {
+ if (exact == NOT_EXACT || rcur->type == NOT_INIT)
+ /* explored state can't have used this */
+ return true;
+ }
/* Enforce that register types have to match exactly, including their
* modifiers (like PTR_MAYBE_NULL, MEM_RDONLY, etc), as a general
@@ -16374,7 +16544,7 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
check_scalar_ids(rold->id, rcur->id, idmap);
}
- if (!rold->precise)
+ if (!rold->precise && exact == NOT_EXACT)
return true;
/* Why check_ids() for scalar registers?
*
@@ -16442,13 +16612,53 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
* the same stack frame, since fp-8 in foo != fp-8 in bar
*/
return regs_exact(rold, rcur, idmap) && rold->frameno == rcur->frameno;
+ case PTR_TO_ARENA:
+ return true;
default:
return regs_exact(rold, rcur, idmap);
}
}
+static struct bpf_reg_state unbound_reg;
+
+static __init int unbound_reg_init(void)
+{
+ __mark_reg_unknown_imprecise(&unbound_reg);
+ unbound_reg.live |= REG_LIVE_READ;
+ return 0;
+}
+late_initcall(unbound_reg_init);
+
+static bool is_stack_all_misc(struct bpf_verifier_env *env,
+ struct bpf_stack_state *stack)
+{
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(stack->slot_type); ++i) {
+ if ((stack->slot_type[i] == STACK_MISC) ||
+ (stack->slot_type[i] == STACK_INVALID && env->allow_uninit_stack))
+ continue;
+ return false;
+ }
+
+ return true;
+}
+
+static struct bpf_reg_state *scalar_reg_for_stack(struct bpf_verifier_env *env,
+ struct bpf_stack_state *stack)
+{
+ if (is_spilled_scalar_reg64(stack))
+ return &stack->spilled_ptr;
+
+ if (is_stack_all_misc(env, stack))
+ return &unbound_reg;
+
+ return NULL;
+}
+
static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
- struct bpf_func_state *cur, struct bpf_idmap *idmap, bool exact)
+ struct bpf_func_state *cur, struct bpf_idmap *idmap,
+ enum exact_level exact)
{
int i, spi;
@@ -16461,12 +16671,13 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
spi = i / BPF_REG_SIZE;
- if (exact &&
+ if (exact != NOT_EXACT &&
old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
cur->stack[spi].slot_type[i % BPF_REG_SIZE])
return false;
- if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ) && !exact) {
+ if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)
+ && exact == NOT_EXACT) {
i += BPF_REG_SIZE - 1;
/* explored state didn't use this */
continue;
@@ -16485,6 +16696,20 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
if (i >= cur->allocated_stack)
return false;
+ /* 64-bit scalar spill vs all slots MISC and vice versa.
+ * Load from all slots MISC produces unbound scalar.
+ * Construct a fake register for such stack and call
+ * regsafe() to ensure scalar ids are compared.
+ */
+ old_reg = scalar_reg_for_stack(env, &old->stack[spi]);
+ cur_reg = scalar_reg_for_stack(env, &cur->stack[spi]);
+ if (old_reg && cur_reg) {
+ if (!regsafe(env, old_reg, cur_reg, idmap, exact))
+ return false;
+ i += BPF_REG_SIZE - 1;
+ continue;
+ }
+
/* if old state was safe with misc data in the stack
* it will be safe with zero-initialized stack.
* The opposite is not true
@@ -16598,7 +16823,7 @@ static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur,
* the current state will reach 'bpf_exit' instruction safely
*/
static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
- struct bpf_func_state *cur, bool exact)
+ struct bpf_func_state *cur, enum exact_level exact)
{
int i;
@@ -16628,7 +16853,7 @@ static void reset_idmap_scratch(struct bpf_verifier_env *env)
static bool states_equal(struct bpf_verifier_env *env,
struct bpf_verifier_state *old,
struct bpf_verifier_state *cur,
- bool exact)
+ enum exact_level exact)
{
int i;
@@ -17002,7 +17227,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
* => unsafe memory access at 11 would not be caught.
*/
if (is_iter_next_insn(env, insn_idx)) {
- if (states_equal(env, &sl->state, cur, true)) {
+ if (states_equal(env, &sl->state, cur, RANGE_WITHIN)) {
struct bpf_func_state *cur_frame;
struct bpf_reg_state *iter_state, *iter_reg;
int spi;
@@ -17025,15 +17250,23 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
}
goto skip_inf_loop_check;
}
+ if (is_may_goto_insn_at(env, insn_idx)) {
+ if (states_equal(env, &sl->state, cur, RANGE_WITHIN)) {
+ update_loop_entry(cur, &sl->state);
+ goto hit;
+ }
+ goto skip_inf_loop_check;
+ }
if (calls_callback(env, insn_idx)) {
- if (states_equal(env, &sl->state, cur, true))
+ if (states_equal(env, &sl->state, cur, RANGE_WITHIN))
goto hit;
goto skip_inf_loop_check;
}
/* attempt to detect infinite loop to avoid unnecessary doomed work */
if (states_maybe_looping(&sl->state, cur) &&
- states_equal(env, &sl->state, cur, false) &&
+ states_equal(env, &sl->state, cur, EXACT) &&
!iter_active_depths_differ(&sl->state, cur) &&
+ sl->state.may_goto_depth == cur->may_goto_depth &&
sl->state.callback_unroll_depth == cur->callback_unroll_depth) {
verbose_linfo(env, insn_idx, "; ");
verbose(env, "infinite loop detected at insn %d\n", insn_idx);
@@ -17089,7 +17322,7 @@ skip_inf_loop_check:
*/
loop_entry = get_loop_entry(&sl->state);
force_exact = loop_entry && loop_entry->branches > 0;
- if (states_equal(env, &sl->state, cur, force_exact)) {
+ if (states_equal(env, &sl->state, cur, force_exact ? RANGE_WITHIN : NOT_EXACT)) {
if (force_exact)
update_loop_entry(cur, loop_entry);
hit:
@@ -17259,6 +17492,7 @@ static bool reg_type_mismatch_ok(enum bpf_reg_type type)
case PTR_TO_TCP_SOCK:
case PTR_TO_XDP_SOCK:
case PTR_TO_BTF_ID:
+ case PTR_TO_ARENA:
return false;
default:
return true;
@@ -17541,7 +17775,6 @@ static int do_check(struct bpf_verifier_env *env)
if (env->cur_state->active_lock.ptr) {
if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) ||
- (insn->src_reg == BPF_PSEUDO_CALL) ||
(insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
(insn->off != 0 || !is_bpf_graph_api_kfunc(insn->imm)))) {
verbose(env, "function calls are not allowed while holding a lock\n");
@@ -17589,14 +17822,12 @@ static int do_check(struct bpf_verifier_env *env)
return -EINVAL;
}
process_bpf_exit_full:
- if (env->cur_state->active_lock.ptr &&
- !in_rbtree_lock_required_cb(env)) {
+ if (env->cur_state->active_lock.ptr && !env->cur_state->curframe) {
verbose(env, "bpf_spin_unlock is missing\n");
return -EINVAL;
}
- if (env->cur_state->active_rcu_lock &&
- !in_rbtree_lock_required_cb(env)) {
+ if (env->cur_state->active_rcu_lock && !env->cur_state->curframe) {
verbose(env, "bpf_rcu_read_unlock is missing\n");
return -EINVAL;
}
@@ -17909,7 +18140,7 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
return -EINVAL;
}
- if (prog->aux->sleepable)
+ if (prog->sleepable)
switch (map->map_type) {
case BPF_MAP_TYPE_HASH:
case BPF_MAP_TYPE_LRU_HASH:
@@ -17925,6 +18156,9 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
case BPF_MAP_TYPE_SK_STORAGE:
case BPF_MAP_TYPE_TASK_STORAGE:
case BPF_MAP_TYPE_CGRP_STORAGE:
+ case BPF_MAP_TYPE_QUEUE:
+ case BPF_MAP_TYPE_STACK:
+ case BPF_MAP_TYPE_ARENA:
break;
default:
verbose(env,
@@ -18094,7 +18328,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
return -E2BIG;
}
- if (env->prog->aux->sleepable)
+ if (env->prog->sleepable)
atomic64_inc(&map->sleepable_refcnt);
/* hold the map. If the program is rejected by verifier,
* the map will be released by release_maps() or it
@@ -18112,6 +18346,31 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
fdput(f);
return -EBUSY;
}
+ if (map->map_type == BPF_MAP_TYPE_ARENA) {
+ if (env->prog->aux->arena) {
+ verbose(env, "Only one arena per program\n");
+ fdput(f);
+ return -EBUSY;
+ }
+ if (!env->allow_ptr_leaks || !env->bpf_capable) {
+ verbose(env, "CAP_BPF and CAP_PERFMON are required to use arena\n");
+ fdput(f);
+ return -EPERM;
+ }
+ if (!env->prog->jit_requested) {
+ verbose(env, "JIT is required to use arena\n");
+ return -EOPNOTSUPP;
+ }
+ if (!bpf_jit_supports_arena()) {
+ verbose(env, "JIT doesn't support arena\n");
+ return -EOPNOTSUPP;
+ }
+ env->prog->aux->arena = (void *)map;
+ if (!bpf_arena_get_user_vm_start(env->prog->aux->arena)) {
+ verbose(env, "arena's user address must be set via map_extra or mmap()\n");
+ return -EINVAL;
+ }
+ }
fdput(f);
next_insn:
@@ -18733,6 +18992,14 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
env->prog->aux->num_exentries++;
}
continue;
+ case PTR_TO_ARENA:
+ if (BPF_MODE(insn->code) == BPF_MEMSX) {
+ verbose(env, "sign extending loads from arena are not supported yet\n");
+ return -EOPNOTSUPP;
+ }
+ insn->code = BPF_CLASS(insn->code) | BPF_PROBE_MEM32 | BPF_SIZE(insn->code);
+ env->prog->aux->num_exentries++;
+ continue;
default:
continue;
}
@@ -18918,13 +19185,19 @@ static int jit_subprogs(struct bpf_verifier_env *env)
func[i]->aux->nr_linfo = prog->aux->nr_linfo;
func[i]->aux->jited_linfo = prog->aux->jited_linfo;
func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
+ func[i]->aux->arena = prog->aux->arena;
num_exentries = 0;
insn = func[i]->insnsi;
for (j = 0; j < func[i]->len; j++, insn++) {
if (BPF_CLASS(insn->code) == BPF_LDX &&
(BPF_MODE(insn->code) == BPF_PROBE_MEM ||
+ BPF_MODE(insn->code) == BPF_PROBE_MEM32 ||
BPF_MODE(insn->code) == BPF_PROBE_MEMSX))
num_exentries++;
+ if ((BPF_CLASS(insn->code) == BPF_STX ||
+ BPF_CLASS(insn->code) == BPF_ST) &&
+ BPF_MODE(insn->code) == BPF_PROBE_MEM32)
+ num_exentries++;
}
func[i]->aux->num_exentries = num_exentries;
func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable;
@@ -19299,7 +19572,10 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
struct bpf_insn insn_buf[16];
struct bpf_prog *new_prog;
struct bpf_map *map_ptr;
- int i, ret, cnt, delta = 0;
+ int i, ret, cnt, delta = 0, cur_subprog = 0;
+ struct bpf_subprog_info *subprogs = env->subprog_info;
+ u16 stack_depth = subprogs[cur_subprog].stack_depth;
+ u16 stack_depth_extra = 0;
if (env->seen_exception && !env->exception_callback_subprog) {
struct bpf_insn patch[] = {
@@ -19319,7 +19595,22 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
mark_subprog_exc_cb(env, env->exception_callback_subprog);
}
- for (i = 0; i < insn_cnt; i++, insn++) {
+ for (i = 0; i < insn_cnt;) {
+ if (insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) && insn->imm) {
+ if ((insn->off == BPF_ADDR_SPACE_CAST && insn->imm == 1) ||
+ (((struct bpf_map *)env->prog->aux->arena)->map_flags & BPF_F_NO_USER_CONV)) {
+ /* convert to 32-bit mov that clears upper 32-bit */
+ insn->code = BPF_ALU | BPF_MOV | BPF_X;
+ /* clear off, so it's a normal 'wX = wY' from JIT pov */
+ insn->off = 0;
+ } /* cast from as(0) to as(1) should be handled by JIT */
+ goto next_insn;
+ }
+
+ if (env->insn_aux_data[i + delta].needs_zext)
+ /* Convert BPF_CLASS(insn->code) == BPF_ALU64 to 32-bit ALU */
+ insn->code = BPF_ALU | BPF_OP(insn->code) | BPF_SRC(insn->code);
+
/* Make divide-by-zero exceptions impossible. */
if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
@@ -19358,7 +19649,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
- continue;
+ goto next_insn;
}
/* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */
@@ -19378,7 +19669,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
- continue;
+ goto next_insn;
}
/* Rewrite pointer arithmetic to mitigate speculation attacks. */
@@ -19393,7 +19684,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
aux = &env->insn_aux_data[i + delta];
if (!aux->alu_state ||
aux->alu_state == BPF_ALU_NON_POINTER)
- continue;
+ goto next_insn;
isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
@@ -19431,19 +19722,39 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
- continue;
+ goto next_insn;
+ }
+
+ if (is_may_goto_insn(insn)) {
+ int stack_off = -stack_depth - 8;
+
+ stack_depth_extra = 8;
+ insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_AX, BPF_REG_10, stack_off);
+ insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off + 2);
+ insn_buf[2] = BPF_ALU64_IMM(BPF_SUB, BPF_REG_AX, 1);
+ insn_buf[3] = BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_AX, stack_off);
+ cnt = 4;
+
+ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+ if (!new_prog)
+ return -ENOMEM;
+
+ delta += cnt - 1;
+ env->prog = prog = new_prog;
+ insn = new_prog->insnsi + i + delta;
+ goto next_insn;
}
if (insn->code != (BPF_JMP | BPF_CALL))
- continue;
+ goto next_insn;
if (insn->src_reg == BPF_PSEUDO_CALL)
- continue;
+ goto next_insn;
if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
ret = fixup_kfunc_call(env, insn, insn_buf, i + delta, &cnt);
if (ret)
return ret;
if (cnt == 0)
- continue;
+ goto next_insn;
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
@@ -19452,7 +19763,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
- continue;
+ goto next_insn;
}
if (insn->imm == BPF_FUNC_get_route_realm)
@@ -19500,11 +19811,11 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
}
insn->imm = ret + 1;
- continue;
+ goto next_insn;
}
if (!bpf_map_ptr_unpriv(aux))
- continue;
+ goto next_insn;
/* instead of changing every JIT dealing with tail_call
* emit two extra insns:
@@ -19533,7 +19844,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
- continue;
+ goto next_insn;
}
if (insn->imm == BPF_FUNC_timer_set_callback) {
@@ -19570,7 +19881,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
}
if (is_storage_get_function(insn->imm)) {
- if (!env->prog->aux->sleepable ||
+ if (!in_sleepable(env) ||
env->insn_aux_data[i + delta].storage_get_func_atomic)
insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC);
else
@@ -19645,7 +19956,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
- continue;
+ goto next_insn;
}
BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
@@ -19676,31 +19987,31 @@ patch_map_ops_generic:
switch (insn->imm) {
case BPF_FUNC_map_lookup_elem:
insn->imm = BPF_CALL_IMM(ops->map_lookup_elem);
- continue;
+ goto next_insn;
case BPF_FUNC_map_update_elem:
insn->imm = BPF_CALL_IMM(ops->map_update_elem);
- continue;
+ goto next_insn;
case BPF_FUNC_map_delete_elem:
insn->imm = BPF_CALL_IMM(ops->map_delete_elem);
- continue;
+ goto next_insn;
case BPF_FUNC_map_push_elem:
insn->imm = BPF_CALL_IMM(ops->map_push_elem);
- continue;
+ goto next_insn;
case BPF_FUNC_map_pop_elem:
insn->imm = BPF_CALL_IMM(ops->map_pop_elem);
- continue;
+ goto next_insn;
case BPF_FUNC_map_peek_elem:
insn->imm = BPF_CALL_IMM(ops->map_peek_elem);
- continue;
+ goto next_insn;
case BPF_FUNC_redirect_map:
insn->imm = BPF_CALL_IMM(ops->map_redirect);
- continue;
+ goto next_insn;
case BPF_FUNC_for_each_map_elem:
insn->imm = BPF_CALL_IMM(ops->map_for_each_callback);
- continue;
+ goto next_insn;
case BPF_FUNC_map_lookup_percpu_elem:
insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem);
- continue;
+ goto next_insn;
}
goto patch_call_imm;
@@ -19728,7 +20039,7 @@ patch_map_ops_generic:
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
- continue;
+ goto next_insn;
}
/* Implement bpf_get_func_arg inline. */
@@ -19753,7 +20064,7 @@ patch_map_ops_generic:
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
- continue;
+ goto next_insn;
}
/* Implement bpf_get_func_ret inline. */
@@ -19781,7 +20092,7 @@ patch_map_ops_generic:
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
- continue;
+ goto next_insn;
}
/* Implement get_func_arg_cnt inline. */
@@ -19796,7 +20107,7 @@ patch_map_ops_generic:
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
- continue;
+ goto next_insn;
}
/* Implement bpf_get_func_ip inline. */
@@ -19811,9 +20122,26 @@ patch_map_ops_generic:
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
- continue;
+ goto next_insn;
}
+ /* Implement bpf_kptr_xchg inline */
+ if (prog->jit_requested && BITS_PER_LONG == 64 &&
+ insn->imm == BPF_FUNC_kptr_xchg &&
+ bpf_jit_supports_ptr_xchg()) {
+ insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_2);
+ insn_buf[1] = BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_1, BPF_REG_0, 0);
+ cnt = 2;
+
+ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+ if (!new_prog)
+ return -ENOMEM;
+
+ delta += cnt - 1;
+ env->prog = prog = new_prog;
+ insn = new_prog->insnsi + i + delta;
+ goto next_insn;
+ }
patch_call_imm:
fn = env->ops->get_func_proto(insn->imm, env->prog);
/* all functions that have prototype and verifier allowed
@@ -19826,6 +20154,40 @@ patch_call_imm:
return -EFAULT;
}
insn->imm = fn->func - __bpf_call_base;
+next_insn:
+ if (subprogs[cur_subprog + 1].start == i + delta + 1) {
+ subprogs[cur_subprog].stack_depth += stack_depth_extra;
+ subprogs[cur_subprog].stack_extra = stack_depth_extra;
+ cur_subprog++;
+ stack_depth = subprogs[cur_subprog].stack_depth;
+ stack_depth_extra = 0;
+ }
+ i++;
+ insn++;
+ }
+
+ env->prog->aux->stack_depth = subprogs[0].stack_depth;
+ for (i = 0; i < env->subprog_cnt; i++) {
+ int subprog_start = subprogs[i].start;
+ int stack_slots = subprogs[i].stack_extra / 8;
+
+ if (!stack_slots)
+ continue;
+ if (stack_slots > 1) {
+ verbose(env, "verifier bug: stack_slots supports may_goto only\n");
+ return -EFAULT;
+ }
+
+ /* Add ST insn to subprog prologue to init extra stack */
+ insn_buf[0] = BPF_ST_MEM(BPF_DW, BPF_REG_FP,
+ -subprogs[i].stack_depth, BPF_MAX_LOOPS);
+ /* Copy first actual insn to preserve it */
+ insn_buf[1] = env->prog->insnsi[subprog_start];
+
+ new_prog = bpf_patch_insn_data(env, subprog_start, insn_buf, 2);
+ if (!new_prog)
+ return -ENOMEM;
+ env->prog = prog = new_prog;
}
/* Since poke tab is now finalized, publish aux to tracker. */
@@ -20046,7 +20408,6 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog)
state->first_insn_idx = env->subprog_info[subprog].start;
state->last_insn_idx = -1;
-
regs = state->frame[state->curframe]->regs;
if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
const char *sub_name = subprog_name(env, subprog);
@@ -20090,6 +20451,21 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog)
mark_reg_known_zero(env, regs, i);
reg->mem_size = arg->mem_size;
reg->id = ++env->id_gen;
+ } else if (base_type(arg->arg_type) == ARG_PTR_TO_BTF_ID) {
+ reg->type = PTR_TO_BTF_ID;
+ if (arg->arg_type & PTR_MAYBE_NULL)
+ reg->type |= PTR_MAYBE_NULL;
+ if (arg->arg_type & PTR_UNTRUSTED)
+ reg->type |= PTR_UNTRUSTED;
+ if (arg->arg_type & PTR_TRUSTED)
+ reg->type |= PTR_TRUSTED;
+ mark_reg_known_zero(env, regs, i);
+ reg->btf = bpf_get_btf_vmlinux(); /* can't fail at this point */
+ reg->btf_id = arg->btf_id;
+ reg->id = ++env->id_gen;
+ } else if (base_type(arg->arg_type) == ARG_PTR_TO_ARENA) {
+ /* caller can pass either PTR_TO_ARENA or SCALAR */
+ mark_reg_unknown(env, regs, i);
} else {
WARN_ONCE(1, "BUG: unhandled arg#%d type %d\n",
i - BPF_REG_1, arg->arg_type);
@@ -20238,10 +20614,12 @@ static void print_verification_stats(struct bpf_verifier_env *env)
static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
{
const struct btf_type *t, *func_proto;
+ const struct bpf_struct_ops_desc *st_ops_desc;
const struct bpf_struct_ops *st_ops;
const struct btf_member *member;
struct bpf_prog *prog = env->prog;
u32 btf_id, member_idx;
+ struct btf *btf;
const char *mname;
if (!prog->gpl_compatible) {
@@ -20249,15 +20627,30 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
return -EINVAL;
}
+ if (!prog->aux->attach_btf_id)
+ return -ENOTSUPP;
+
+ btf = prog->aux->attach_btf;
+ if (btf_is_module(btf)) {
+ /* Make sure st_ops is valid through the lifetime of env */
+ env->attach_btf_mod = btf_try_get_module(btf);
+ if (!env->attach_btf_mod) {
+ verbose(env, "struct_ops module %s is not found\n",
+ btf_get_name(btf));
+ return -ENOTSUPP;
+ }
+ }
+
btf_id = prog->aux->attach_btf_id;
- st_ops = bpf_struct_ops_find(btf_id);
- if (!st_ops) {
+ st_ops_desc = bpf_struct_ops_find(btf, btf_id);
+ if (!st_ops_desc) {
verbose(env, "attach_btf_id %u is not a supported struct\n",
btf_id);
return -ENOTSUPP;
}
+ st_ops = st_ops_desc->st_ops;
- t = st_ops->type;
+ t = st_ops_desc->type;
member_idx = prog->expected_attach_type;
if (member_idx >= btf_type_vlen(t)) {
verbose(env, "attach to invalid member idx %u of struct %s\n",
@@ -20266,8 +20659,8 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
}
member = &btf_type_member(t)[member_idx];
- mname = btf_name_by_offset(btf_vmlinux, member->name_off);
- func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type,
+ mname = btf_name_by_offset(btf, member->name_off);
+ func_proto = btf_type_resolve_func_ptr(btf, member->type,
NULL);
if (!func_proto) {
verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n",
@@ -20285,6 +20678,12 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
}
}
+ /* btf_ctx_access() used this to provide argument type info */
+ prog->aux->ctx_arg_info =
+ st_ops_desc->arg_info[member_idx].info;
+ prog->aux->ctx_arg_info_size =
+ st_ops_desc->arg_info[member_idx].cnt;
+
prog->aux->attach_func_proto = func_proto;
prog->aux->attach_func_name = mname;
env->ops = st_ops->verifier_ops;
@@ -20542,7 +20941,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
}
}
- if (prog->aux->sleepable) {
+ if (prog->sleepable) {
ret = -EINVAL;
switch (prog->type) {
case BPF_PROG_TYPE_TRACING:
@@ -20653,14 +21052,14 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
u64 key;
if (prog->type == BPF_PROG_TYPE_SYSCALL) {
- if (prog->aux->sleepable)
+ if (prog->sleepable)
/* attach_btf_id checked to be zero already */
return 0;
verbose(env, "Syscall programs can only be sleepable\n");
return -EINVAL;
}
- if (prog->aux->sleepable && !can_be_sleepable(prog)) {
+ if (prog->sleepable && !can_be_sleepable(prog)) {
verbose(env, "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable\n");
return -EINVAL;
}
@@ -20769,7 +21168,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
env->prog = *prog;
env->ops = bpf_verifier_ops[env->prog->type];
env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);
- is_priv = bpf_capable();
+
+ env->allow_ptr_leaks = bpf_allow_ptr_leaks(env->prog->aux->token);
+ env->allow_uninit_stack = bpf_allow_uninit_stack(env->prog->aux->token);
+ env->bypass_spec_v1 = bpf_bypass_spec_v1(env->prog->aux->token);
+ env->bypass_spec_v4 = bpf_bypass_spec_v4(env->prog->aux->token);
+ env->bpf_capable = is_priv = bpf_token_capable(env->prog->aux->token, CAP_BPF);
bpf_get_btf_vmlinux();
@@ -20801,12 +21205,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
env->strict_alignment = false;
- env->allow_ptr_leaks = bpf_allow_ptr_leaks();
- env->allow_uninit_stack = bpf_allow_uninit_stack();
- env->bypass_spec_v1 = bpf_bypass_spec_v1();
- env->bypass_spec_v4 = bpf_bypass_spec_v4();
- env->bpf_capable = bpf_capable();
-
if (is_priv)
env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
env->test_reg_invariants = attr->prog_flags & BPF_F_TEST_REG_INVARIANTS;
@@ -20972,6 +21370,8 @@ err_release_maps:
env->prog->expected_attach_type = 0;
*prog = env->prog;
+
+ module_put(env->attach_btf_mod);
err_unlock:
if (!is_priv)
mutex_unlock(&bpf_verifier_lock);
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index a8350d2d63e6..07e2284bb499 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -562,10 +562,10 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq)
}
/* Add bpf kfuncs for cgroup_rstat_updated() and cgroup_rstat_flush() */
-BTF_SET8_START(bpf_rstat_kfunc_ids)
+BTF_KFUNCS_START(bpf_rstat_kfunc_ids)
BTF_ID_FLAGS(func, cgroup_rstat_updated)
BTF_ID_FLAGS(func, cgroup_rstat_flush, KF_SLEEPABLE)
-BTF_SET8_END(bpf_rstat_kfunc_ids)
+BTF_KFUNCS_END(bpf_rstat_kfunc_ids)
static const struct btf_kfunc_id_set bpf_rstat_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/kernel/configs/debug.config b/kernel/configs/debug.config
index 4722b998a324..509ee703de15 100644
--- a/kernel/configs/debug.config
+++ b/kernel/configs/debug.config
@@ -40,6 +40,12 @@ CONFIG_UBSAN_ENUM=y
CONFIG_UBSAN_SHIFT=y
CONFIG_UBSAN_UNREACHABLE=y
#
+# Networking Debugging
+#
+CONFIG_NET_DEV_REFCNT_TRACKER=y
+CONFIG_NET_NS_REFCNT_TRACKER=y
+CONFIG_DEBUG_NET=y
+#
# Memory Debugging
#
# CONFIG_DEBUG_PAGEALLOC is not set
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f0f0f71213a1..724e6d7e128f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -9302,10 +9302,6 @@ void perf_event_bpf_event(struct bpf_prog *prog,
{
struct perf_bpf_event bpf_event;
- if (type <= PERF_BPF_EVENT_UNKNOWN ||
- type >= PERF_BPF_EVENT_MAX)
- return;
-
switch (type) {
case PERF_BPF_EVENT_PROG_LOAD:
case PERF_BPF_EVENT_PROG_UNLOAD:
@@ -9313,7 +9309,7 @@ void perf_event_bpf_event(struct bpf_prog *prog,
perf_event_bpf_emit_ksymbols(prog, type);
break;
default:
- break;
+ return;
}
if (!atomic_read(&nr_bpf_events))
@@ -10557,7 +10553,7 @@ int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
(is_syscall_tp && prog->type != BPF_PROG_TYPE_TRACEPOINT))
return -EINVAL;
- if (prog->type == BPF_PROG_TYPE_KPROBE && prog->aux->sleepable && !is_uprobe)
+ if (prog->type == BPF_PROG_TYPE_KPROBE && prog->sleepable && !is_uprobe)
/* only uprobe programs are allowed to be sleepable */
return -EINVAL;
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 7ac6c52b25eb..0a5c4efc73c3 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1412,14 +1412,14 @@ __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
__bpf_kfunc_end_defs();
-BTF_SET8_START(key_sig_kfunc_set)
+BTF_KFUNCS_START(key_sig_kfunc_set)
BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
#endif
-BTF_SET8_END(key_sig_kfunc_set)
+BTF_KFUNCS_END(key_sig_kfunc_set)
static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = {
.owner = THIS_MODULE,
@@ -1475,9 +1475,9 @@ __bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str,
__bpf_kfunc_end_defs();
-BTF_SET8_START(fs_kfunc_set_ids)
+BTF_KFUNCS_START(fs_kfunc_set_ids)
BTF_ID_FLAGS(func, bpf_get_file_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
-BTF_SET8_END(fs_kfunc_set_ids)
+BTF_KFUNCS_END(fs_kfunc_set_ids)
static int bpf_get_file_xattr_filter(const struct bpf_prog *prog, u32 kfunc_id)
{
@@ -1629,7 +1629,7 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_trace_vprintk:
return bpf_get_trace_vprintk_proto();
default:
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
}
@@ -2679,6 +2679,7 @@ static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link)
static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
struct bpf_link_info *info)
{
+ u64 __user *ucookies = u64_to_user_ptr(info->kprobe_multi.cookies);
u64 __user *uaddrs = u64_to_user_ptr(info->kprobe_multi.addrs);
struct bpf_kprobe_multi_link *kmulti_link;
u32 ucount = info->kprobe_multi.count;
@@ -2686,6 +2687,8 @@ static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
if (!uaddrs ^ !ucount)
return -EINVAL;
+ if (ucookies && !ucount)
+ return -EINVAL;
kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
info->kprobe_multi.count = kmulti_link->cnt;
@@ -2699,6 +2702,18 @@ static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
else
ucount = kmulti_link->cnt;
+ if (ucookies) {
+ if (kmulti_link->cookies) {
+ if (copy_to_user(ucookies, kmulti_link->cookies, ucount * sizeof(u64)))
+ return -EFAULT;
+ } else {
+ for (i = 0; i < ucount; i++) {
+ if (put_user(0, ucookies + i))
+ return -EFAULT;
+ }
+ }
+ }
+
if (kallsyms_show_value(current_cred())) {
if (copy_to_user(uaddrs, kmulti_link->addrs, ucount * sizeof(u64)))
return -EFAULT;
@@ -3241,7 +3256,7 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe,
.uprobe = uprobe,
};
struct bpf_prog *prog = link->link.prog;
- bool sleepable = prog->aux->sleepable;
+ bool sleepable = prog->sleepable;
struct bpf_run_ctx *old_run_ctx;
int err = 0;
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 09522af227f1..b97692854966 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -348,6 +348,13 @@ unsigned int __bitmap_weight_and(const unsigned long *bitmap1,
}
EXPORT_SYMBOL(__bitmap_weight_and);
+unsigned int __bitmap_weight_andnot(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
+{
+ return BITMAP_WEIGHT(bitmap1[idx] & ~bitmap2[idx], bits);
+}
+EXPORT_SYMBOL(__bitmap_weight_andnot);
+
void __bitmap_set(unsigned long *map, unsigned int start, int len)
{
unsigned long *p = map + BIT_WORD(start);
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
index fde0aa244148..a1389db1c30a 100644
--- a/lib/dynamic_queue_limits.c
+++ b/lib/dynamic_queue_limits.c
@@ -10,10 +10,77 @@
#include <linux/dynamic_queue_limits.h>
#include <linux/compiler.h>
#include <linux/export.h>
+#include <trace/events/napi.h>
#define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0)
#define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0)
+static void dql_check_stall(struct dql *dql)
+{
+ unsigned short stall_thrs;
+ unsigned long now;
+
+ stall_thrs = READ_ONCE(dql->stall_thrs);
+ if (!stall_thrs)
+ return;
+
+ now = jiffies;
+ /* Check for a potential stall */
+ if (time_after_eq(now, dql->last_reap + stall_thrs)) {
+ unsigned long hist_head, t, start, end;
+
+ /* We are trying to detect a period of at least @stall_thrs
+ * jiffies without any Tx completions, but during first half
+ * of which some Tx was posted.
+ */
+dqs_again:
+ hist_head = READ_ONCE(dql->history_head);
+ /* pairs with smp_wmb() in dql_queued() */
+ smp_rmb();
+
+ /* Get the previous entry in the ring buffer, which is the
+ * oldest sample.
+ */
+ start = (hist_head - DQL_HIST_LEN + 1) * BITS_PER_LONG;
+
+ /* Advance start to continue from the last reap time */
+ if (time_before(start, dql->last_reap + 1))
+ start = dql->last_reap + 1;
+
+ /* Newest sample we should have already seen a completion for */
+ end = hist_head * BITS_PER_LONG + (BITS_PER_LONG - 1);
+
+ /* Shrink the search space to [start, (now - start_thrs/2)] if
+ * `end` is beyond the stall zone
+ */
+ if (time_before(now, end + stall_thrs / 2))
+ end = now - stall_thrs / 2;
+
+ /* Search for the queued time in [t, end] */
+ for (t = start; time_before_eq(t, end); t++)
+ if (test_bit(t % (DQL_HIST_LEN * BITS_PER_LONG),
+ dql->history))
+ break;
+
+ /* Variable t contains the time of the queue */
+ if (!time_before_eq(t, end))
+ goto no_stall;
+
+ /* The ring buffer was modified in the meantime, retry */
+ if (hist_head != READ_ONCE(dql->history_head))
+ goto dqs_again;
+
+ dql->stall_cnt++;
+ dql->stall_max = max_t(unsigned short, dql->stall_max, now - t);
+
+ trace_dql_stall_detected(dql->stall_thrs, now - t,
+ dql->last_reap, dql->history_head,
+ now, dql->history);
+ }
+no_stall:
+ dql->last_reap = now;
+}
+
/* Records completed count and recalculates the queue limit */
void dql_completed(struct dql *dql, unsigned int count)
{
@@ -110,6 +177,8 @@ void dql_completed(struct dql *dql, unsigned int count)
dql->prev_last_obj_cnt = dql->last_obj_cnt;
dql->num_completed = completed;
dql->prev_num_queued = num_queued;
+
+ dql_check_stall(dql);
}
EXPORT_SYMBOL(dql_completed);
@@ -125,6 +194,10 @@ void dql_reset(struct dql *dql)
dql->prev_ovlimit = 0;
dql->lowest_slack = UINT_MAX;
dql->slack_start_time = jiffies;
+
+ dql->last_reap = jiffies;
+ dql->history_head = jiffies / BITS_PER_LONG;
+ memset(dql->history, 0, sizeof(dql->history));
}
EXPORT_SYMBOL(dql_reset);
@@ -133,6 +206,7 @@ void dql_init(struct dql *dql, unsigned int hold_time)
dql->max_limit = DQL_MAX_LIMIT;
dql->min_limit = 0;
dql->slack_hold_time = hold_time;
+ dql->stall_thrs = 0;
dql_reset(dql);
}
EXPORT_SYMBOL(dql_init);
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 65f22c2578b0..6b2b33579f56 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -380,6 +380,47 @@ static void __init test_replace(void)
expect_eq_bitmap(bmap, exp3_1_0, nbits);
}
+static const unsigned long sg_mask[] __initconst = {
+ BITMAP_FROM_U64(0x000000000000035aULL),
+};
+
+static const unsigned long sg_src[] __initconst = {
+ BITMAP_FROM_U64(0x0000000000000667ULL),
+};
+
+static const unsigned long sg_gather_exp[] __initconst = {
+ BITMAP_FROM_U64(0x0000000000000029ULL),
+};
+
+static const unsigned long sg_scatter_exp[] __initconst = {
+ BITMAP_FROM_U64(0x000000000000021aULL),
+};
+
+static void __init test_bitmap_sg(void)
+{
+ unsigned int nbits = 64;
+ DECLARE_BITMAP(bmap_gather, 100);
+ DECLARE_BITMAP(bmap_scatter, 100);
+ DECLARE_BITMAP(bmap_tmp, 100);
+ DECLARE_BITMAP(bmap_res, 100);
+
+ /* Simple gather call */
+ bitmap_zero(bmap_gather, 100);
+ bitmap_gather(bmap_gather, sg_src, sg_mask, nbits);
+ expect_eq_bitmap(sg_gather_exp, bmap_gather, nbits);
+
+ /* Simple scatter call */
+ bitmap_zero(bmap_scatter, 100);
+ bitmap_scatter(bmap_scatter, sg_src, sg_mask, nbits);
+ expect_eq_bitmap(sg_scatter_exp, bmap_scatter, nbits);
+
+ /* Scatter/gather relationship */
+ bitmap_zero(bmap_tmp, 100);
+ bitmap_gather(bmap_tmp, bmap_scatter, sg_mask, nbits);
+ bitmap_scatter(bmap_res, bmap_tmp, sg_mask, nbits);
+ expect_eq_bitmap(bmap_scatter, bmap_res, nbits);
+}
+
#define PARSE_TIME 0x1
#define NO_LEN 0x2
@@ -1252,6 +1293,7 @@ static void __init selftest(void)
test_copy();
test_bitmap_region();
test_replace();
+ test_bitmap_sg();
test_bitmap_arr32();
test_bitmap_arr64();
test_bitmap_parse();
diff --git a/lib/test_blackhole_dev.c b/lib/test_blackhole_dev.c
index 4c40580a99a3..f247089d63c0 100644
--- a/lib/test_blackhole_dev.c
+++ b/lib/test_blackhole_dev.c
@@ -29,7 +29,6 @@ static int __init test_blackholedev_init(void)
{
struct ipv6hdr *ip6h;
struct sk_buff *skb;
- struct ethhdr *ethh;
struct udphdr *uh;
int data_len;
int ret;
@@ -61,7 +60,7 @@ static int __init test_blackholedev_init(void)
ip6h->saddr = in6addr_loopback;
ip6h->daddr = in6addr_loopback;
/* Ether */
- ethh = (struct ethhdr *)skb_push(skb, sizeof(struct ethhdr));
+ skb_push(skb, sizeof(struct ethhdr));
skb_set_mac_header(skb, 0);
skb->protocol = htons(ETH_P_IPV6);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a663202045dc..62fc2e8f2733 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4687,8 +4687,8 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
gfp_t gfp = gfp_mask;
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
- gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
- __GFP_NOMEMALLOC;
+ gfp_mask = (gfp_mask & ~__GFP_DIRECT_RECLAIM) | __GFP_COMP |
+ __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC;
page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
PAGE_FRAG_CACHE_MAX_ORDER);
nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
@@ -4701,6 +4701,16 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
return page;
}
+void page_frag_cache_drain(struct page_frag_cache *nc)
+{
+ if (!nc->va)
+ return;
+
+ __page_frag_cache_drain(virt_to_head_page(nc->va), nc->pagecnt_bias);
+ nc->va = NULL;
+}
+EXPORT_SYMBOL(page_frag_cache_drain);
+
void __page_frag_cache_drain(struct page *page, unsigned int count)
{
VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
@@ -4710,9 +4720,9 @@ void __page_frag_cache_drain(struct page *page, unsigned int count)
}
EXPORT_SYMBOL(__page_frag_cache_drain);
-void *page_frag_alloc_align(struct page_frag_cache *nc,
- unsigned int fragsz, gfp_t gfp_mask,
- unsigned int align_mask)
+void *__page_frag_alloc_align(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask,
+ unsigned int align_mask)
{
unsigned int size = PAGE_SIZE;
struct page *page;
@@ -4781,7 +4791,7 @@ refill:
return nc->va + offset;
}
-EXPORT_SYMBOL(page_frag_alloc_align);
+EXPORT_SYMBOL(__page_frag_alloc_align);
/*
* Frees a page fragment allocated out of either a compound or order 0 page.
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d12a17fc0c17..1e36322d83d8 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -304,8 +304,8 @@ static int vmap_range_noflush(unsigned long addr, unsigned long end,
return err;
}
-int ioremap_page_range(unsigned long addr, unsigned long end,
- phys_addr_t phys_addr, pgprot_t prot)
+int vmap_page_range(unsigned long addr, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot)
{
int err;
@@ -318,6 +318,26 @@ int ioremap_page_range(unsigned long addr, unsigned long end,
return err;
}
+int ioremap_page_range(unsigned long addr, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot)
+{
+ struct vm_struct *area;
+
+ area = find_vm_area((void *)addr);
+ if (!area || !(area->flags & VM_IOREMAP)) {
+ WARN_ONCE(1, "vm_area at addr %lx is not marked as VM_IOREMAP\n", addr);
+ return -EINVAL;
+ }
+ if (addr != (unsigned long)area->addr ||
+ (void *)end != area->addr + get_vm_area_size(area)) {
+ WARN_ONCE(1, "ioremap request [%lx,%lx) doesn't match vm_area [%lx, %lx)\n",
+ addr, end, (long)area->addr,
+ (long)area->addr + get_vm_area_size(area));
+ return -ERANGE;
+ }
+ return vmap_page_range(addr, end, phys_addr, prot);
+}
+
static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
pgtbl_mod_mask *mask)
{
@@ -635,6 +655,58 @@ static int vmap_pages_range(unsigned long addr, unsigned long end,
return err;
}
+static int check_sparse_vm_area(struct vm_struct *area, unsigned long start,
+ unsigned long end)
+{
+ might_sleep();
+ if (WARN_ON_ONCE(area->flags & VM_FLUSH_RESET_PERMS))
+ return -EINVAL;
+ if (WARN_ON_ONCE(area->flags & VM_NO_GUARD))
+ return -EINVAL;
+ if (WARN_ON_ONCE(!(area->flags & VM_SPARSE)))
+ return -EINVAL;
+ if ((end - start) >> PAGE_SHIFT > totalram_pages())
+ return -E2BIG;
+ if (start < (unsigned long)area->addr ||
+ (void *)end > area->addr + get_vm_area_size(area))
+ return -ERANGE;
+ return 0;
+}
+
+/**
+ * vm_area_map_pages - map pages inside given sparse vm_area
+ * @area: vm_area
+ * @start: start address inside vm_area
+ * @end: end address inside vm_area
+ * @pages: pages to map (always PAGE_SIZE pages)
+ */
+int vm_area_map_pages(struct vm_struct *area, unsigned long start,
+ unsigned long end, struct page **pages)
+{
+ int err;
+
+ err = check_sparse_vm_area(area, start, end);
+ if (err)
+ return err;
+
+ return vmap_pages_range(start, end, PAGE_KERNEL, pages, PAGE_SHIFT);
+}
+
+/**
+ * vm_area_unmap_pages - unmap pages inside given sparse vm_area
+ * @area: vm_area
+ * @start: start address inside vm_area
+ * @end: end address inside vm_area
+ */
+void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
+ unsigned long end)
+{
+ if (check_sparse_vm_area(area, start, end))
+ return;
+
+ vunmap_range(start, end);
+}
+
int is_vmalloc_or_module_addr(const void *x)
{
/*
@@ -3809,9 +3881,9 @@ long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
if (flags & VMAP_RAM)
copied = vmap_ram_vread_iter(iter, addr, n, flags);
- else if (!(vm && (vm->flags & VM_IOREMAP)))
+ else if (!(vm && (vm->flags & (VM_IOREMAP | VM_SPARSE))))
copied = aligned_vread_iter(iter, addr, n);
- else /* IOREMAP area is treated as memory hole */
+ else /* IOREMAP | SPARSE area is treated as memory hole */
copied = zero_iter(iter, n);
addr += copied;
@@ -4402,6 +4474,9 @@ static int s_show(struct seq_file *m, void *p)
if (v->flags & VM_IOREMAP)
seq_puts(m, " ioremap");
+ if (v->flags & VM_SPARSE)
+ seq_puts(m, " sparse");
+
if (v->flags & VM_ALLOC)
seq_puts(m, " vmalloc");
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 407b2335f091..39876eff51d2 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -504,28 +504,6 @@ static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
}
-/*
- * vlan network devices have devices nesting below it, and are a special
- * "super class" of normal network devices; split their locks off into a
- * separate class since they always nest.
- */
-static struct lock_class_key vlan_netdev_xmit_lock_key;
-static struct lock_class_key vlan_netdev_addr_lock_key;
-
-static void vlan_dev_set_lockdep_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *unused)
-{
- lockdep_set_class(&txq->_xmit_lock, &vlan_netdev_xmit_lock_key);
-}
-
-static void vlan_dev_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock,
- &vlan_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, NULL);
-}
-
static __be16 vlan_parse_protocol(const struct sk_buff *skb)
{
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
@@ -559,7 +537,7 @@ static const struct header_ops vlan_passthru_header_ops = {
.parse_protocol = vlan_parse_protocol,
};
-static struct device_type vlan_type = {
+static const struct device_type vlan_type = {
.name = "vlan",
};
@@ -627,7 +605,7 @@ static int vlan_dev_init(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &vlan_type);
- vlan_dev_set_lockdep_class(dev);
+ netdev_lockdep_set_classes(dev);
vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan->vlan_pcpu_stats)
@@ -784,9 +762,9 @@ static void vlan_dev_netpoll_cleanup(struct net_device *dev)
static int vlan_dev_get_iflink(const struct net_device *dev)
{
- struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ const struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
- return real_dev->ifindex;
+ return READ_ONCE(real_dev->ifindex);
}
static int vlan_dev_fill_forward_path(struct net_device_path_ctx *ctx,
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 7825c129742a..87b959da00cd 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -163,48 +163,34 @@ void vlan_proc_rem_dev(struct net_device *vlandev)
* The following few functions build the content of /proc/net/vlan/config
*/
-/* start read of /proc/net/vlan/config */
-static void *vlan_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(rcu)
+static void *vlan_seq_from_index(struct seq_file *seq, loff_t *pos)
{
+ unsigned long ifindex = *pos;
struct net_device *dev;
- struct net *net = seq_file_net(seq);
- loff_t i = 1;
-
- rcu_read_lock();
- if (*pos == 0)
- return SEQ_START_TOKEN;
- for_each_netdev_rcu(net, dev) {
+ for_each_netdev_dump(seq_file_net(seq), dev, ifindex) {
if (!is_vlan_dev(dev))
continue;
-
- if (i++ == *pos)
- return dev;
+ *pos = dev->ifindex;
+ return dev;
}
+ return NULL;
+}
+
+static void *vlan_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(rcu)
+{
+ rcu_read_lock();
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
- return NULL;
+ return vlan_seq_from_index(seq, pos);
}
static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct net_device *dev;
- struct net *net = seq_file_net(seq);
-
++*pos;
-
- dev = v;
- if (v == SEQ_START_TOKEN)
- dev = net_device_entry(&net->dev_base_head);
-
- for_each_netdev_continue_rcu(net, dev) {
- if (!is_vlan_dev(dev))
- continue;
-
- return dev;
- }
-
- return NULL;
+ return vlan_seq_from_index(seq, pos);
}
static void vlan_seq_stop(struct seq_file *seq, void *v)
diff --git a/net/Kconfig b/net/Kconfig
index 4adc47d0c9c2..3e57ccf0da27 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -331,6 +331,7 @@ config NET_RX_BUSY_POLL
config BQL
bool
+ prompt "Enable Byte Queue Limits"
depends on SYSFS
select DQL
default y
diff --git a/net/Makefile b/net/Makefile
index b06b5539e7a6..65bb8c72a35e 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -17,7 +17,7 @@ obj-$(CONFIG_NETFILTER) += netfilter/
obj-$(CONFIG_INET) += ipv4/
obj-$(CONFIG_TLS) += tls/
obj-$(CONFIG_XFRM) += xfrm/
-obj-$(CONFIG_UNIX_SCM) += unix/
+obj-$(CONFIG_UNIX) += unix/
obj-y += ipv6/
obj-$(CONFIG_PACKET) += packet/
obj-$(CONFIG_NET_KEY) += key/
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 28a939d56090..4c7e85534324 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -684,7 +684,7 @@ static bool batadv_dat_forward_data(struct batadv_priv *bat_priv,
cand = batadv_dat_select_candidates(bat_priv, ip, vid);
if (!cand)
- goto out;
+ return ret;
batadv_dbg(BATADV_DBG_DAT, bat_priv, "DHT_SEND for %pI4\n", &ip);
@@ -728,7 +728,6 @@ free_orig:
batadv_orig_node_put(cand[i].orig_node);
}
-out:
kfree(cand);
return ret;
}
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 5fc754b0b3f7..75119f1ffccc 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -691,29 +691,31 @@ int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
"%s%s", BATADV_UEV_TYPE_VAR,
batadv_uev_type_str[type]);
if (!uevent_env[0])
- goto out;
+ goto report_error;
uevent_env[1] = kasprintf(GFP_ATOMIC,
"%s%s", BATADV_UEV_ACTION_VAR,
batadv_uev_action_str[action]);
if (!uevent_env[1])
- goto out;
+ goto free_first_env;
/* If the event is DEL, ignore the data field */
if (action != BATADV_UEV_DEL) {
uevent_env[2] = kasprintf(GFP_ATOMIC,
"%s%s", BATADV_UEV_DATA_VAR, data);
if (!uevent_env[2])
- goto out;
+ goto free_second_env;
}
ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env);
-out:
- kfree(uevent_env[0]);
- kfree(uevent_env[1]);
kfree(uevent_env[2]);
+free_second_env:
+ kfree(uevent_env[1]);
+free_first_env:
+ kfree(uevent_env[0]);
if (ret)
+report_error:
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Impossible to send uevent for (%s,%s,%s) event (err: %d)\n",
batadv_uev_type_str[type],
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 870dcd7f1786..8ca854a75a32 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -13,7 +13,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2024.0"
+#define BATADV_SOURCE_VERSION "2024.1"
#endif
/* B.A.T.M.A.N. parameters */
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index 1f7ed9d4f6fd..0954757f0b8b 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -15,7 +15,6 @@
#include <linux/cache.h>
#include <linux/err.h>
#include <linux/errno.h>
-#include <linux/export.h>
#include <linux/genetlink.h>
#include <linux/gfp.h>
#include <linux/if_ether.h>
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 4eb1b3ced0d2..27520a8a486f 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -572,7 +572,7 @@ static void netdev_setup(struct net_device *dev)
dev->needs_free_netdev = true;
}
-static struct device_type bt_type = {
+static const struct device_type bt_type = {
.name = "bluetooth",
};
@@ -892,7 +892,7 @@ static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
chan->ops = &bt_6lowpan_chan_ops;
err = l2cap_chan_connect(chan, cpu_to_le16(L2CAP_PSM_IPSP), 0,
- addr, dst_type);
+ addr, dst_type, L2CAP_CONN_TIMEOUT);
BT_DBG("chan %p err %d", chan, err);
if (err < 0)
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index da7cac0a1b71..6b2b65a66700 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -62,14 +62,6 @@ source "net/bluetooth/cmtp/Kconfig"
source "net/bluetooth/hidp/Kconfig"
-config BT_HS
- bool "Bluetooth High Speed (HS) features"
- depends on BT_BREDR
- help
- Bluetooth High Speed includes support for off-loading
- Bluetooth connections via 802.11 (wifi) physical layer
- available with Bluetooth version 3.0 or later.
-
config BT_LE
bool "Bluetooth Low Energy (LE) features"
depends on BT
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 141ac1fda0bf..628d448d78be 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -21,7 +21,6 @@ bluetooth-$(CONFIG_DEV_COREDUMP) += coredump.o
bluetooth-$(CONFIG_BT_BREDR) += sco.o
bluetooth-$(CONFIG_BT_LE) += iso.o
-bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o
bluetooth-$(CONFIG_BT_LEDS) += leds.o
bluetooth-$(CONFIG_BT_MSFTEXT) += msft.o
bluetooth-$(CONFIG_BT_AOSPEXT) += aosp.o
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
deleted file mode 100644
index e7adb8a98cf9..000000000000
--- a/net/bluetooth/a2mp.c
+++ /dev/null
@@ -1,1054 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
- Copyright (c) 2011,2012 Intel Corp.
-
-*/
-
-#include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci_core.h>
-#include <net/bluetooth/l2cap.h>
-
-#include "hci_request.h"
-#include "a2mp.h"
-#include "amp.h"
-
-#define A2MP_FEAT_EXT 0x8000
-
-/* Global AMP Manager list */
-static LIST_HEAD(amp_mgr_list);
-static DEFINE_MUTEX(amp_mgr_list_lock);
-
-/* A2MP build & send command helper functions */
-static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data)
-{
- struct a2mp_cmd *cmd;
- int plen;
-
- plen = sizeof(*cmd) + len;
- cmd = kzalloc(plen, GFP_KERNEL);
- if (!cmd)
- return NULL;
-
- cmd->code = code;
- cmd->ident = ident;
- cmd->len = cpu_to_le16(len);
-
- memcpy(cmd->data, data, len);
-
- return cmd;
-}
-
-static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data)
-{
- struct l2cap_chan *chan = mgr->a2mp_chan;
- struct a2mp_cmd *cmd;
- u16 total_len = len + sizeof(*cmd);
- struct kvec iv;
- struct msghdr msg;
-
- cmd = __a2mp_build(code, ident, len, data);
- if (!cmd)
- return;
-
- iv.iov_base = cmd;
- iv.iov_len = total_len;
-
- memset(&msg, 0, sizeof(msg));
-
- iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iv, 1, total_len);
-
- l2cap_chan_send(chan, &msg, total_len);
-
- kfree(cmd);
-}
-
-static u8 __next_ident(struct amp_mgr *mgr)
-{
- if (++mgr->ident == 0)
- mgr->ident = 1;
-
- return mgr->ident;
-}
-
-static struct amp_mgr *amp_mgr_lookup_by_state(u8 state)
-{
- struct amp_mgr *mgr;
-
- mutex_lock(&amp_mgr_list_lock);
- list_for_each_entry(mgr, &amp_mgr_list, list) {
- if (test_and_clear_bit(state, &mgr->state)) {
- amp_mgr_get(mgr);
- mutex_unlock(&amp_mgr_list_lock);
- return mgr;
- }
- }
- mutex_unlock(&amp_mgr_list_lock);
-
- return NULL;
-}
-
-/* hci_dev_list shall be locked */
-static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl)
-{
- struct hci_dev *hdev;
- int i = 1;
-
- cl[0].id = AMP_ID_BREDR;
- cl[0].type = AMP_TYPE_BREDR;
- cl[0].status = AMP_STATUS_BLUETOOTH_ONLY;
-
- list_for_each_entry(hdev, &hci_dev_list, list) {
- if (hdev->dev_type == HCI_AMP) {
- cl[i].id = hdev->id;
- cl[i].type = hdev->amp_type;
- if (test_bit(HCI_UP, &hdev->flags))
- cl[i].status = hdev->amp_status;
- else
- cl[i].status = AMP_STATUS_POWERED_DOWN;
- i++;
- }
- }
-}
-
-/* Processing A2MP messages */
-static int a2mp_command_rej(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_cmd_rej *rej = (void *) skb->data;
-
- if (le16_to_cpu(hdr->len) < sizeof(*rej))
- return -EINVAL;
-
- BT_DBG("ident %u reason %d", hdr->ident, le16_to_cpu(rej->reason));
-
- skb_pull(skb, sizeof(*rej));
-
- return 0;
-}
-
-static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_discov_req *req = (void *) skb->data;
- u16 len = le16_to_cpu(hdr->len);
- struct a2mp_discov_rsp *rsp;
- u16 ext_feat;
- u8 num_ctrl;
- struct hci_dev *hdev;
-
- if (len < sizeof(*req))
- return -EINVAL;
-
- skb_pull(skb, sizeof(*req));
-
- ext_feat = le16_to_cpu(req->ext_feat);
-
- BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu), ext_feat);
-
- /* check that packet is not broken for now */
- while (ext_feat & A2MP_FEAT_EXT) {
- if (len < sizeof(ext_feat))
- return -EINVAL;
-
- ext_feat = get_unaligned_le16(skb->data);
- BT_DBG("efm 0x%4.4x", ext_feat);
- len -= sizeof(ext_feat);
- skb_pull(skb, sizeof(ext_feat));
- }
-
- read_lock(&hci_dev_list_lock);
-
- /* at minimum the BR/EDR needs to be listed */
- num_ctrl = 1;
-
- list_for_each_entry(hdev, &hci_dev_list, list) {
- if (hdev->dev_type == HCI_AMP)
- num_ctrl++;
- }
-
- len = struct_size(rsp, cl, num_ctrl);
- rsp = kmalloc(len, GFP_ATOMIC);
- if (!rsp) {
- read_unlock(&hci_dev_list_lock);
- return -ENOMEM;
- }
-
- rsp->mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
- rsp->ext_feat = 0;
-
- __a2mp_add_cl(mgr, rsp->cl);
-
- read_unlock(&hci_dev_list_lock);
-
- a2mp_send(mgr, A2MP_DISCOVER_RSP, hdr->ident, len, rsp);
-
- kfree(rsp);
- return 0;
-}
-
-static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_discov_rsp *rsp = (void *) skb->data;
- u16 len = le16_to_cpu(hdr->len);
- struct a2mp_cl *cl;
- u16 ext_feat;
- bool found = false;
-
- if (len < sizeof(*rsp))
- return -EINVAL;
-
- len -= sizeof(*rsp);
- skb_pull(skb, sizeof(*rsp));
-
- ext_feat = le16_to_cpu(rsp->ext_feat);
-
- BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(rsp->mtu), ext_feat);
-
- /* check that packet is not broken for now */
- while (ext_feat & A2MP_FEAT_EXT) {
- if (len < sizeof(ext_feat))
- return -EINVAL;
-
- ext_feat = get_unaligned_le16(skb->data);
- BT_DBG("efm 0x%4.4x", ext_feat);
- len -= sizeof(ext_feat);
- skb_pull(skb, sizeof(ext_feat));
- }
-
- cl = (void *) skb->data;
- while (len >= sizeof(*cl)) {
- BT_DBG("Remote AMP id %u type %u status %u", cl->id, cl->type,
- cl->status);
-
- if (cl->id != AMP_ID_BREDR && cl->type != AMP_TYPE_BREDR) {
- struct a2mp_info_req req;
-
- found = true;
-
- memset(&req, 0, sizeof(req));
-
- req.id = cl->id;
- a2mp_send(mgr, A2MP_GETINFO_REQ, __next_ident(mgr),
- sizeof(req), &req);
- }
-
- len -= sizeof(*cl);
- cl = skb_pull(skb, sizeof(*cl));
- }
-
- /* Fall back to L2CAP init sequence */
- if (!found) {
- struct l2cap_conn *conn = mgr->l2cap_conn;
- struct l2cap_chan *chan;
-
- mutex_lock(&conn->chan_lock);
-
- list_for_each_entry(chan, &conn->chan_l, list) {
-
- BT_DBG("chan %p state %s", chan,
- state_to_string(chan->state));
-
- if (chan->scid == L2CAP_CID_A2MP)
- continue;
-
- l2cap_chan_lock(chan);
-
- if (chan->state == BT_CONNECT)
- l2cap_send_conn_req(chan);
-
- l2cap_chan_unlock(chan);
- }
-
- mutex_unlock(&conn->chan_lock);
- }
-
- return 0;
-}
-
-static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_cl *cl = (void *) skb->data;
-
- while (skb->len >= sizeof(*cl)) {
- BT_DBG("Controller id %u type %u status %u", cl->id, cl->type,
- cl->status);
- cl = skb_pull(skb, sizeof(*cl));
- }
-
- /* TODO send A2MP_CHANGE_RSP */
-
- return 0;
-}
-
-static void read_local_amp_info_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
-{
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
- a2mp_send_getinfo_rsp(hdev);
-}
-
-static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_info_req *req = (void *) skb->data;
- struct hci_dev *hdev;
- struct hci_request hreq;
- int err = 0;
-
- if (le16_to_cpu(hdr->len) < sizeof(*req))
- return -EINVAL;
-
- BT_DBG("id %u", req->id);
-
- hdev = hci_dev_get(req->id);
- if (!hdev || hdev->dev_type != HCI_AMP) {
- struct a2mp_info_rsp rsp;
-
- memset(&rsp, 0, sizeof(rsp));
-
- rsp.id = req->id;
- rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
-
- a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp),
- &rsp);
-
- goto done;
- }
-
- set_bit(READ_LOC_AMP_INFO, &mgr->state);
- hci_req_init(&hreq, hdev);
- hci_req_add(&hreq, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
- err = hci_req_run(&hreq, read_local_amp_info_complete);
- if (err < 0)
- a2mp_send_getinfo_rsp(hdev);
-
-done:
- if (hdev)
- hci_dev_put(hdev);
-
- skb_pull(skb, sizeof(*req));
- return 0;
-}
-
-static int a2mp_getinfo_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_info_rsp *rsp = (struct a2mp_info_rsp *) skb->data;
- struct a2mp_amp_assoc_req req;
- struct amp_ctrl *ctrl;
-
- if (le16_to_cpu(hdr->len) < sizeof(*rsp))
- return -EINVAL;
-
- BT_DBG("id %u status 0x%2.2x", rsp->id, rsp->status);
-
- if (rsp->status)
- return -EINVAL;
-
- ctrl = amp_ctrl_add(mgr, rsp->id);
- if (!ctrl)
- return -ENOMEM;
-
- memset(&req, 0, sizeof(req));
-
- req.id = rsp->id;
- a2mp_send(mgr, A2MP_GETAMPASSOC_REQ, __next_ident(mgr), sizeof(req),
- &req);
-
- skb_pull(skb, sizeof(*rsp));
- return 0;
-}
-
-static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_amp_assoc_req *req = (void *) skb->data;
- struct hci_dev *hdev;
- struct amp_mgr *tmp;
-
- if (le16_to_cpu(hdr->len) < sizeof(*req))
- return -EINVAL;
-
- BT_DBG("id %u", req->id);
-
- /* Make sure that other request is not processed */
- tmp = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC);
-
- hdev = hci_dev_get(req->id);
- if (!hdev || hdev->amp_type == AMP_TYPE_BREDR || tmp) {
- struct a2mp_amp_assoc_rsp rsp;
-
- memset(&rsp, 0, sizeof(rsp));
- rsp.id = req->id;
-
- if (tmp) {
- rsp.status = A2MP_STATUS_COLLISION_OCCURED;
- amp_mgr_put(tmp);
- } else {
- rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
- }
-
- a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp),
- &rsp);
-
- goto done;
- }
-
- amp_read_loc_assoc(hdev, mgr);
-
-done:
- if (hdev)
- hci_dev_put(hdev);
-
- skb_pull(skb, sizeof(*req));
- return 0;
-}
-
-static int a2mp_getampassoc_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_amp_assoc_rsp *rsp = (void *) skb->data;
- u16 len = le16_to_cpu(hdr->len);
- struct hci_dev *hdev;
- struct amp_ctrl *ctrl;
- struct hci_conn *hcon;
- size_t assoc_len;
-
- if (len < sizeof(*rsp))
- return -EINVAL;
-
- assoc_len = len - sizeof(*rsp);
-
- BT_DBG("id %u status 0x%2.2x assoc len %zu", rsp->id, rsp->status,
- assoc_len);
-
- if (rsp->status)
- return -EINVAL;
-
- /* Save remote ASSOC data */
- ctrl = amp_ctrl_lookup(mgr, rsp->id);
- if (ctrl) {
- u8 *assoc;
-
- assoc = kmemdup(rsp->amp_assoc, assoc_len, GFP_KERNEL);
- if (!assoc) {
- amp_ctrl_put(ctrl);
- return -ENOMEM;
- }
-
- ctrl->assoc = assoc;
- ctrl->assoc_len = assoc_len;
- ctrl->assoc_rem_len = assoc_len;
- ctrl->assoc_len_so_far = 0;
-
- amp_ctrl_put(ctrl);
- }
-
- /* Create Phys Link */
- hdev = hci_dev_get(rsp->id);
- if (!hdev)
- return -EINVAL;
-
- hcon = phylink_add(hdev, mgr, rsp->id, true);
- if (!hcon)
- goto done;
-
- BT_DBG("Created hcon %p: loc:%u -> rem:%u", hcon, hdev->id, rsp->id);
-
- mgr->bredr_chan->remote_amp_id = rsp->id;
-
- amp_create_phylink(hdev, mgr, hcon);
-
-done:
- hci_dev_put(hdev);
- skb_pull(skb, len);
- return 0;
-}
-
-static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_physlink_req *req = (void *) skb->data;
- struct a2mp_physlink_rsp rsp;
- struct hci_dev *hdev;
- struct hci_conn *hcon;
- struct amp_ctrl *ctrl;
-
- if (le16_to_cpu(hdr->len) < sizeof(*req))
- return -EINVAL;
-
- BT_DBG("local_id %u, remote_id %u", req->local_id, req->remote_id);
-
- memset(&rsp, 0, sizeof(rsp));
-
- rsp.local_id = req->remote_id;
- rsp.remote_id = req->local_id;
-
- hdev = hci_dev_get(req->remote_id);
- if (!hdev || hdev->amp_type == AMP_TYPE_BREDR) {
- rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
- goto send_rsp;
- }
-
- ctrl = amp_ctrl_lookup(mgr, rsp.remote_id);
- if (!ctrl) {
- ctrl = amp_ctrl_add(mgr, rsp.remote_id);
- if (ctrl) {
- amp_ctrl_get(ctrl);
- } else {
- rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
- goto send_rsp;
- }
- }
-
- if (ctrl) {
- size_t assoc_len = le16_to_cpu(hdr->len) - sizeof(*req);
- u8 *assoc;
-
- assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL);
- if (!assoc) {
- amp_ctrl_put(ctrl);
- hci_dev_put(hdev);
- return -ENOMEM;
- }
-
- ctrl->assoc = assoc;
- ctrl->assoc_len = assoc_len;
- ctrl->assoc_rem_len = assoc_len;
- ctrl->assoc_len_so_far = 0;
-
- amp_ctrl_put(ctrl);
- }
-
- hcon = phylink_add(hdev, mgr, req->local_id, false);
- if (hcon) {
- amp_accept_phylink(hdev, mgr, hcon);
- rsp.status = A2MP_STATUS_SUCCESS;
- } else {
- rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
- }
-
-send_rsp:
- if (hdev)
- hci_dev_put(hdev);
-
- /* Reply error now and success after HCI Write Remote AMP Assoc
- command complete with success status
- */
- if (rsp.status != A2MP_STATUS_SUCCESS) {
- a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident,
- sizeof(rsp), &rsp);
- } else {
- set_bit(WRITE_REMOTE_AMP_ASSOC, &mgr->state);
- mgr->ident = hdr->ident;
- }
-
- skb_pull(skb, le16_to_cpu(hdr->len));
- return 0;
-}
-
-static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_physlink_req *req = (void *) skb->data;
- struct a2mp_physlink_rsp rsp;
- struct hci_dev *hdev;
- struct hci_conn *hcon;
-
- if (le16_to_cpu(hdr->len) < sizeof(*req))
- return -EINVAL;
-
- BT_DBG("local_id %u remote_id %u", req->local_id, req->remote_id);
-
- memset(&rsp, 0, sizeof(rsp));
-
- rsp.local_id = req->remote_id;
- rsp.remote_id = req->local_id;
- rsp.status = A2MP_STATUS_SUCCESS;
-
- hdev = hci_dev_get(req->remote_id);
- if (!hdev) {
- rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
- goto send_rsp;
- }
-
- hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
- &mgr->l2cap_conn->hcon->dst);
- if (!hcon) {
- bt_dev_err(hdev, "no phys link exist");
- rsp.status = A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS;
- goto clean;
- }
-
- /* TODO Disconnect Phys Link here */
-
-clean:
- hci_dev_put(hdev);
-
-send_rsp:
- a2mp_send(mgr, A2MP_DISCONNPHYSLINK_RSP, hdr->ident, sizeof(rsp), &rsp);
-
- skb_pull(skb, sizeof(*req));
- return 0;
-}
-
-static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- BT_DBG("ident %u code 0x%2.2x", hdr->ident, hdr->code);
-
- skb_pull(skb, le16_to_cpu(hdr->len));
- return 0;
-}
-
-/* Handle A2MP signalling */
-static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
-{
- struct a2mp_cmd *hdr;
- struct amp_mgr *mgr = chan->data;
- int err = 0;
-
- amp_mgr_get(mgr);
-
- while (skb->len >= sizeof(*hdr)) {
- u16 len;
-
- hdr = (void *) skb->data;
- len = le16_to_cpu(hdr->len);
-
- BT_DBG("code 0x%2.2x id %u len %u", hdr->code, hdr->ident, len);
-
- skb_pull(skb, sizeof(*hdr));
-
- if (len > skb->len || !hdr->ident) {
- err = -EINVAL;
- break;
- }
-
- mgr->ident = hdr->ident;
-
- switch (hdr->code) {
- case A2MP_COMMAND_REJ:
- a2mp_command_rej(mgr, skb, hdr);
- break;
-
- case A2MP_DISCOVER_REQ:
- err = a2mp_discover_req(mgr, skb, hdr);
- break;
-
- case A2MP_CHANGE_NOTIFY:
- err = a2mp_change_notify(mgr, skb, hdr);
- break;
-
- case A2MP_GETINFO_REQ:
- err = a2mp_getinfo_req(mgr, skb, hdr);
- break;
-
- case A2MP_GETAMPASSOC_REQ:
- err = a2mp_getampassoc_req(mgr, skb, hdr);
- break;
-
- case A2MP_CREATEPHYSLINK_REQ:
- err = a2mp_createphyslink_req(mgr, skb, hdr);
- break;
-
- case A2MP_DISCONNPHYSLINK_REQ:
- err = a2mp_discphyslink_req(mgr, skb, hdr);
- break;
-
- case A2MP_DISCOVER_RSP:
- err = a2mp_discover_rsp(mgr, skb, hdr);
- break;
-
- case A2MP_GETINFO_RSP:
- err = a2mp_getinfo_rsp(mgr, skb, hdr);
- break;
-
- case A2MP_GETAMPASSOC_RSP:
- err = a2mp_getampassoc_rsp(mgr, skb, hdr);
- break;
-
- case A2MP_CHANGE_RSP:
- case A2MP_CREATEPHYSLINK_RSP:
- case A2MP_DISCONNPHYSLINK_RSP:
- err = a2mp_cmd_rsp(mgr, skb, hdr);
- break;
-
- default:
- BT_ERR("Unknown A2MP sig cmd 0x%2.2x", hdr->code);
- err = -EINVAL;
- break;
- }
- }
-
- if (err) {
- struct a2mp_cmd_rej rej;
-
- memset(&rej, 0, sizeof(rej));
-
- rej.reason = cpu_to_le16(0);
- hdr = (void *) skb->data;
-
- BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err);
-
- a2mp_send(mgr, A2MP_COMMAND_REJ, hdr->ident, sizeof(rej),
- &rej);
- }
-
- /* Always free skb and return success error code to prevent
- from sending L2CAP Disconnect over A2MP channel */
- kfree_skb(skb);
-
- amp_mgr_put(mgr);
-
- return 0;
-}
-
-static void a2mp_chan_close_cb(struct l2cap_chan *chan)
-{
- l2cap_chan_put(chan);
-}
-
-static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state,
- int err)
-{
- struct amp_mgr *mgr = chan->data;
-
- if (!mgr)
- return;
-
- BT_DBG("chan %p state %s", chan, state_to_string(state));
-
- chan->state = state;
-
- switch (state) {
- case BT_CLOSED:
- if (mgr)
- amp_mgr_put(mgr);
- break;
- }
-}
-
-static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan,
- unsigned long hdr_len,
- unsigned long len, int nb)
-{
- struct sk_buff *skb;
-
- skb = bt_skb_alloc(hdr_len + len, GFP_KERNEL);
- if (!skb)
- return ERR_PTR(-ENOMEM);
-
- return skb;
-}
-
-static const struct l2cap_ops a2mp_chan_ops = {
- .name = "L2CAP A2MP channel",
- .recv = a2mp_chan_recv_cb,
- .close = a2mp_chan_close_cb,
- .state_change = a2mp_chan_state_change_cb,
- .alloc_skb = a2mp_chan_alloc_skb_cb,
-
- /* Not implemented for A2MP */
- .new_connection = l2cap_chan_no_new_connection,
- .teardown = l2cap_chan_no_teardown,
- .ready = l2cap_chan_no_ready,
- .defer = l2cap_chan_no_defer,
- .resume = l2cap_chan_no_resume,
- .set_shutdown = l2cap_chan_no_set_shutdown,
- .get_sndtimeo = l2cap_chan_no_get_sndtimeo,
-};
-
-static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked)
-{
- struct l2cap_chan *chan;
- int err;
-
- chan = l2cap_chan_create();
- if (!chan)
- return NULL;
-
- BT_DBG("chan %p", chan);
-
- chan->chan_type = L2CAP_CHAN_FIXED;
- chan->scid = L2CAP_CID_A2MP;
- chan->dcid = L2CAP_CID_A2MP;
- chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
- chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
- chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
-
- chan->ops = &a2mp_chan_ops;
-
- l2cap_chan_set_defaults(chan);
- chan->remote_max_tx = chan->max_tx;
- chan->remote_tx_win = chan->tx_win;
-
- chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
- chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
-
- skb_queue_head_init(&chan->tx_q);
-
- chan->mode = L2CAP_MODE_ERTM;
-
- err = l2cap_ertm_init(chan);
- if (err < 0) {
- l2cap_chan_del(chan, 0);
- return NULL;
- }
-
- chan->conf_state = 0;
-
- if (locked)
- __l2cap_chan_add(conn, chan);
- else
- l2cap_chan_add(conn, chan);
-
- chan->remote_mps = chan->omtu;
- chan->mps = chan->omtu;
-
- chan->state = BT_CONNECTED;
-
- return chan;
-}
-
-/* AMP Manager functions */
-struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr)
-{
- BT_DBG("mgr %p orig refcnt %d", mgr, kref_read(&mgr->kref));
-
- kref_get(&mgr->kref);
-
- return mgr;
-}
-
-static void amp_mgr_destroy(struct kref *kref)
-{
- struct amp_mgr *mgr = container_of(kref, struct amp_mgr, kref);
-
- BT_DBG("mgr %p", mgr);
-
- mutex_lock(&amp_mgr_list_lock);
- list_del(&mgr->list);
- mutex_unlock(&amp_mgr_list_lock);
-
- amp_ctrl_list_flush(mgr);
- kfree(mgr);
-}
-
-int amp_mgr_put(struct amp_mgr *mgr)
-{
- BT_DBG("mgr %p orig refcnt %d", mgr, kref_read(&mgr->kref));
-
- return kref_put(&mgr->kref, &amp_mgr_destroy);
-}
-
-static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn, bool locked)
-{
- struct amp_mgr *mgr;
- struct l2cap_chan *chan;
-
- mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
- if (!mgr)
- return NULL;
-
- BT_DBG("conn %p mgr %p", conn, mgr);
-
- mgr->l2cap_conn = conn;
-
- chan = a2mp_chan_open(conn, locked);
- if (!chan) {
- kfree(mgr);
- return NULL;
- }
-
- mgr->a2mp_chan = chan;
- chan->data = mgr;
-
- conn->hcon->amp_mgr = mgr;
-
- kref_init(&mgr->kref);
-
- /* Remote AMP ctrl list initialization */
- INIT_LIST_HEAD(&mgr->amp_ctrls);
- mutex_init(&mgr->amp_ctrls_lock);
-
- mutex_lock(&amp_mgr_list_lock);
- list_add(&mgr->list, &amp_mgr_list);
- mutex_unlock(&amp_mgr_list_lock);
-
- return mgr;
-}
-
-struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
- struct sk_buff *skb)
-{
- struct amp_mgr *mgr;
-
- if (conn->hcon->type != ACL_LINK)
- return NULL;
-
- mgr = amp_mgr_create(conn, false);
- if (!mgr) {
- BT_ERR("Could not create AMP manager");
- return NULL;
- }
-
- BT_DBG("mgr: %p chan %p", mgr, mgr->a2mp_chan);
-
- return mgr->a2mp_chan;
-}
-
-void a2mp_send_getinfo_rsp(struct hci_dev *hdev)
-{
- struct amp_mgr *mgr;
- struct a2mp_info_rsp rsp;
-
- mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_INFO);
- if (!mgr)
- return;
-
- BT_DBG("%s mgr %p", hdev->name, mgr);
-
- memset(&rsp, 0, sizeof(rsp));
-
- rsp.id = hdev->id;
- rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
-
- if (hdev->amp_type != AMP_TYPE_BREDR) {
- rsp.status = 0;
- rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
- rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
- rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
- rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
- rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
- }
-
- a2mp_send(mgr, A2MP_GETINFO_RSP, mgr->ident, sizeof(rsp), &rsp);
- amp_mgr_put(mgr);
-}
-
-void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status)
-{
- struct amp_mgr *mgr;
- struct amp_assoc *loc_assoc = &hdev->loc_assoc;
- struct a2mp_amp_assoc_rsp *rsp;
- size_t len;
-
- mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC);
- if (!mgr)
- return;
-
- BT_DBG("%s mgr %p", hdev->name, mgr);
-
- len = sizeof(struct a2mp_amp_assoc_rsp) + loc_assoc->len;
- rsp = kzalloc(len, GFP_KERNEL);
- if (!rsp) {
- amp_mgr_put(mgr);
- return;
- }
-
- rsp->id = hdev->id;
-
- if (status) {
- rsp->status = A2MP_STATUS_INVALID_CTRL_ID;
- } else {
- rsp->status = A2MP_STATUS_SUCCESS;
- memcpy(rsp->amp_assoc, loc_assoc->data, loc_assoc->len);
- }
-
- a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, mgr->ident, len, rsp);
- amp_mgr_put(mgr);
- kfree(rsp);
-}
-
-void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status)
-{
- struct amp_mgr *mgr;
- struct amp_assoc *loc_assoc = &hdev->loc_assoc;
- struct a2mp_physlink_req *req;
- struct l2cap_chan *bredr_chan;
- size_t len;
-
- mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC_FINAL);
- if (!mgr)
- return;
-
- len = sizeof(*req) + loc_assoc->len;
-
- BT_DBG("%s mgr %p assoc_len %zu", hdev->name, mgr, len);
-
- req = kzalloc(len, GFP_KERNEL);
- if (!req) {
- amp_mgr_put(mgr);
- return;
- }
-
- bredr_chan = mgr->bredr_chan;
- if (!bredr_chan)
- goto clean;
-
- req->local_id = hdev->id;
- req->remote_id = bredr_chan->remote_amp_id;
- memcpy(req->amp_assoc, loc_assoc->data, loc_assoc->len);
-
- a2mp_send(mgr, A2MP_CREATEPHYSLINK_REQ, __next_ident(mgr), len, req);
-
-clean:
- amp_mgr_put(mgr);
- kfree(req);
-}
-
-void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status)
-{
- struct amp_mgr *mgr;
- struct a2mp_physlink_rsp rsp;
- struct hci_conn *hs_hcon;
-
- mgr = amp_mgr_lookup_by_state(WRITE_REMOTE_AMP_ASSOC);
- if (!mgr)
- return;
-
- memset(&rsp, 0, sizeof(rsp));
-
- hs_hcon = hci_conn_hash_lookup_state(hdev, AMP_LINK, BT_CONNECT);
- if (!hs_hcon) {
- rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
- } else {
- rsp.remote_id = hs_hcon->remote_id;
- rsp.status = A2MP_STATUS_SUCCESS;
- }
-
- BT_DBG("%s mgr %p hs_hcon %p status %u", hdev->name, mgr, hs_hcon,
- status);
-
- rsp.local_id = hdev->id;
- a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, mgr->ident, sizeof(rsp), &rsp);
- amp_mgr_put(mgr);
-}
-
-void a2mp_discover_amp(struct l2cap_chan *chan)
-{
- struct l2cap_conn *conn = chan->conn;
- struct amp_mgr *mgr = conn->hcon->amp_mgr;
- struct a2mp_discov_req req;
-
- BT_DBG("chan %p conn %p mgr %p", chan, conn, mgr);
-
- if (!mgr) {
- mgr = amp_mgr_create(conn, true);
- if (!mgr)
- return;
- }
-
- mgr->bredr_chan = chan;
-
- memset(&req, 0, sizeof(req));
-
- req.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
- req.ext_feat = 0;
- a2mp_send(mgr, A2MP_DISCOVER_REQ, 1, sizeof(req), &req);
-}
diff --git a/net/bluetooth/a2mp.h b/net/bluetooth/a2mp.h
deleted file mode 100644
index 2fd253a61a2a..000000000000
--- a/net/bluetooth/a2mp.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
- Copyright (c) 2011,2012 Intel Corp.
-
-*/
-
-#ifndef __A2MP_H
-#define __A2MP_H
-
-#include <net/bluetooth/l2cap.h>
-
-enum amp_mgr_state {
- READ_LOC_AMP_INFO,
- READ_LOC_AMP_ASSOC,
- READ_LOC_AMP_ASSOC_FINAL,
- WRITE_REMOTE_AMP_ASSOC,
-};
-
-struct amp_mgr {
- struct list_head list;
- struct l2cap_conn *l2cap_conn;
- struct l2cap_chan *a2mp_chan;
- struct l2cap_chan *bredr_chan;
- struct kref kref;
- __u8 ident;
- __u8 handle;
- unsigned long state;
- unsigned long flags;
-
- struct list_head amp_ctrls;
- struct mutex amp_ctrls_lock;
-};
-
-struct a2mp_cmd {
- __u8 code;
- __u8 ident;
- __le16 len;
- __u8 data[];
-} __packed;
-
-/* A2MP command codes */
-#define A2MP_COMMAND_REJ 0x01
-struct a2mp_cmd_rej {
- __le16 reason;
- __u8 data[];
-} __packed;
-
-#define A2MP_DISCOVER_REQ 0x02
-struct a2mp_discov_req {
- __le16 mtu;
- __le16 ext_feat;
-} __packed;
-
-struct a2mp_cl {
- __u8 id;
- __u8 type;
- __u8 status;
-} __packed;
-
-#define A2MP_DISCOVER_RSP 0x03
-struct a2mp_discov_rsp {
- __le16 mtu;
- __le16 ext_feat;
- struct a2mp_cl cl[];
-} __packed;
-
-#define A2MP_CHANGE_NOTIFY 0x04
-#define A2MP_CHANGE_RSP 0x05
-
-#define A2MP_GETINFO_REQ 0x06
-struct a2mp_info_req {
- __u8 id;
-} __packed;
-
-#define A2MP_GETINFO_RSP 0x07
-struct a2mp_info_rsp {
- __u8 id;
- __u8 status;
- __le32 total_bw;
- __le32 max_bw;
- __le32 min_latency;
- __le16 pal_cap;
- __le16 assoc_size;
-} __packed;
-
-#define A2MP_GETAMPASSOC_REQ 0x08
-struct a2mp_amp_assoc_req {
- __u8 id;
-} __packed;
-
-#define A2MP_GETAMPASSOC_RSP 0x09
-struct a2mp_amp_assoc_rsp {
- __u8 id;
- __u8 status;
- __u8 amp_assoc[];
-} __packed;
-
-#define A2MP_CREATEPHYSLINK_REQ 0x0A
-#define A2MP_DISCONNPHYSLINK_REQ 0x0C
-struct a2mp_physlink_req {
- __u8 local_id;
- __u8 remote_id;
- __u8 amp_assoc[];
-} __packed;
-
-#define A2MP_CREATEPHYSLINK_RSP 0x0B
-#define A2MP_DISCONNPHYSLINK_RSP 0x0D
-struct a2mp_physlink_rsp {
- __u8 local_id;
- __u8 remote_id;
- __u8 status;
-} __packed;
-
-/* A2MP response status */
-#define A2MP_STATUS_SUCCESS 0x00
-#define A2MP_STATUS_INVALID_CTRL_ID 0x01
-#define A2MP_STATUS_UNABLE_START_LINK_CREATION 0x02
-#define A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS 0x02
-#define A2MP_STATUS_COLLISION_OCCURED 0x03
-#define A2MP_STATUS_DISCONN_REQ_RECVD 0x04
-#define A2MP_STATUS_PHYS_LINK_EXISTS 0x05
-#define A2MP_STATUS_SECURITY_VIOLATION 0x06
-
-struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr);
-
-#if IS_ENABLED(CONFIG_BT_HS)
-int amp_mgr_put(struct amp_mgr *mgr);
-struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
- struct sk_buff *skb);
-void a2mp_discover_amp(struct l2cap_chan *chan);
-#else
-static inline int amp_mgr_put(struct amp_mgr *mgr)
-{
- return 0;
-}
-
-static inline struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
- struct sk_buff *skb)
-{
- return NULL;
-}
-
-static inline void a2mp_discover_amp(struct l2cap_chan *chan)
-{
-}
-#endif
-
-void a2mp_send_getinfo_rsp(struct hci_dev *hdev);
-void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status);
-void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status);
-void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status);
-
-#endif /* __A2MP_H */
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index b93464ac3517..67604ccec2f4 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -309,14 +309,11 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (flags & MSG_OOB)
return -EOPNOTSUPP;
- lock_sock(sk);
-
skb = skb_recv_datagram(sk, flags, &err);
if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
err = 0;
- release_sock(sk);
return err;
}
@@ -346,8 +343,6 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
skb_free_datagram(sk, skb);
- release_sock(sk);
-
if (flags & MSG_TRUNC)
copied = skblen;
@@ -570,10 +565,11 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
if (sk->sk_state == BT_LISTEN)
return -EINVAL;
- lock_sock(sk);
+ spin_lock(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
amount = skb ? skb->len : 0;
- release_sock(sk);
+ spin_unlock(&sk->sk_receive_queue.lock);
+
err = put_user(amount, (int __user *)arg);
break;
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
deleted file mode 100644
index 5d698f19868c..000000000000
--- a/net/bluetooth/amp.c
+++ /dev/null
@@ -1,590 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- Copyright (c) 2011,2012 Intel Corp.
-
-*/
-
-#include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci.h>
-#include <net/bluetooth/hci_core.h>
-#include <crypto/hash.h>
-
-#include "hci_request.h"
-#include "a2mp.h"
-#include "amp.h"
-
-/* Remote AMP Controllers interface */
-void amp_ctrl_get(struct amp_ctrl *ctrl)
-{
- BT_DBG("ctrl %p orig refcnt %d", ctrl,
- kref_read(&ctrl->kref));
-
- kref_get(&ctrl->kref);
-}
-
-static void amp_ctrl_destroy(struct kref *kref)
-{
- struct amp_ctrl *ctrl = container_of(kref, struct amp_ctrl, kref);
-
- BT_DBG("ctrl %p", ctrl);
-
- kfree(ctrl->assoc);
- kfree(ctrl);
-}
-
-int amp_ctrl_put(struct amp_ctrl *ctrl)
-{
- BT_DBG("ctrl %p orig refcnt %d", ctrl,
- kref_read(&ctrl->kref));
-
- return kref_put(&ctrl->kref, &amp_ctrl_destroy);
-}
-
-struct amp_ctrl *amp_ctrl_add(struct amp_mgr *mgr, u8 id)
-{
- struct amp_ctrl *ctrl;
-
- ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
- if (!ctrl)
- return NULL;
-
- kref_init(&ctrl->kref);
- ctrl->id = id;
-
- mutex_lock(&mgr->amp_ctrls_lock);
- list_add(&ctrl->list, &mgr->amp_ctrls);
- mutex_unlock(&mgr->amp_ctrls_lock);
-
- BT_DBG("mgr %p ctrl %p", mgr, ctrl);
-
- return ctrl;
-}
-
-void amp_ctrl_list_flush(struct amp_mgr *mgr)
-{
- struct amp_ctrl *ctrl, *n;
-
- BT_DBG("mgr %p", mgr);
-
- mutex_lock(&mgr->amp_ctrls_lock);
- list_for_each_entry_safe(ctrl, n, &mgr->amp_ctrls, list) {
- list_del(&ctrl->list);
- amp_ctrl_put(ctrl);
- }
- mutex_unlock(&mgr->amp_ctrls_lock);
-}
-
-struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id)
-{
- struct amp_ctrl *ctrl;
-
- BT_DBG("mgr %p id %u", mgr, id);
-
- mutex_lock(&mgr->amp_ctrls_lock);
- list_for_each_entry(ctrl, &mgr->amp_ctrls, list) {
- if (ctrl->id == id) {
- amp_ctrl_get(ctrl);
- mutex_unlock(&mgr->amp_ctrls_lock);
- return ctrl;
- }
- }
- mutex_unlock(&mgr->amp_ctrls_lock);
-
- return NULL;
-}
-
-/* Physical Link interface */
-static u8 __next_handle(struct amp_mgr *mgr)
-{
- if (++mgr->handle == 0)
- mgr->handle = 1;
-
- return mgr->handle;
-}
-
-struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
- u8 remote_id, bool out)
-{
- bdaddr_t *dst = &mgr->l2cap_conn->hcon->dst;
- struct hci_conn *hcon;
- u8 role = out ? HCI_ROLE_MASTER : HCI_ROLE_SLAVE;
-
- hcon = hci_conn_add(hdev, AMP_LINK, dst, role, __next_handle(mgr));
- if (!hcon)
- return NULL;
-
- BT_DBG("hcon %p dst %pMR", hcon, dst);
-
- hcon->state = BT_CONNECT;
- hcon->attempt++;
- hcon->remote_id = remote_id;
- hcon->amp_mgr = amp_mgr_get(mgr);
-
- return hcon;
-}
-
-/* AMP crypto key generation interface */
-static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output)
-{
- struct crypto_shash *tfm;
- struct shash_desc *shash;
- int ret;
-
- if (!ksize)
- return -EINVAL;
-
- tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
- if (IS_ERR(tfm)) {
- BT_DBG("crypto_alloc_ahash failed: err %ld", PTR_ERR(tfm));
- return PTR_ERR(tfm);
- }
-
- ret = crypto_shash_setkey(tfm, key, ksize);
- if (ret) {
- BT_DBG("crypto_ahash_setkey failed: err %d", ret);
- goto failed;
- }
-
- shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
- GFP_KERNEL);
- if (!shash) {
- ret = -ENOMEM;
- goto failed;
- }
-
- shash->tfm = tfm;
-
- ret = crypto_shash_digest(shash, plaintext, psize, output);
-
- kfree(shash);
-
-failed:
- crypto_free_shash(tfm);
- return ret;
-}
-
-int phylink_gen_key(struct hci_conn *conn, u8 *data, u8 *len, u8 *type)
-{
- struct hci_dev *hdev = conn->hdev;
- struct link_key *key;
- u8 keybuf[HCI_AMP_LINK_KEY_SIZE];
- u8 gamp_key[HCI_AMP_LINK_KEY_SIZE];
- int err;
-
- if (!hci_conn_check_link_mode(conn))
- return -EACCES;
-
- BT_DBG("conn %p key_type %d", conn, conn->key_type);
-
- /* Legacy key */
- if (conn->key_type < 3) {
- bt_dev_err(hdev, "legacy key type %u", conn->key_type);
- return -EACCES;
- }
-
- *type = conn->key_type;
- *len = HCI_AMP_LINK_KEY_SIZE;
-
- key = hci_find_link_key(hdev, &conn->dst);
- if (!key) {
- BT_DBG("No Link key for conn %p dst %pMR", conn, &conn->dst);
- return -EACCES;
- }
-
- /* BR/EDR Link Key concatenated together with itself */
- memcpy(&keybuf[0], key->val, HCI_LINK_KEY_SIZE);
- memcpy(&keybuf[HCI_LINK_KEY_SIZE], key->val, HCI_LINK_KEY_SIZE);
-
- /* Derive Generic AMP Link Key (gamp) */
- err = hmac_sha256(keybuf, HCI_AMP_LINK_KEY_SIZE, "gamp", 4, gamp_key);
- if (err) {
- bt_dev_err(hdev, "could not derive Generic AMP Key: err %d", err);
- return err;
- }
-
- if (conn->key_type == HCI_LK_DEBUG_COMBINATION) {
- BT_DBG("Use Generic AMP Key (gamp)");
- memcpy(data, gamp_key, HCI_AMP_LINK_KEY_SIZE);
- return err;
- }
-
- /* Derive Dedicated AMP Link Key: "802b" is 802.11 PAL keyID */
- return hmac_sha256(gamp_key, HCI_AMP_LINK_KEY_SIZE, "802b", 4, data);
-}
-
-static void read_local_amp_assoc_complete(struct hci_dev *hdev, u8 status,
- u16 opcode, struct sk_buff *skb)
-{
- struct hci_rp_read_local_amp_assoc *rp = (void *)skb->data;
- struct amp_assoc *assoc = &hdev->loc_assoc;
- size_t rem_len, frag_len;
-
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
-
- if (rp->status)
- goto send_rsp;
-
- frag_len = skb->len - sizeof(*rp);
- rem_len = __le16_to_cpu(rp->rem_len);
-
- if (rem_len > frag_len) {
- BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
-
- memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
- assoc->offset += frag_len;
-
- /* Read other fragments */
- amp_read_loc_assoc_frag(hdev, rp->phy_handle);
-
- return;
- }
-
- memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
- assoc->len = assoc->offset + rem_len;
- assoc->offset = 0;
-
-send_rsp:
- /* Send A2MP Rsp when all fragments are received */
- a2mp_send_getampassoc_rsp(hdev, rp->status);
- a2mp_send_create_phy_link_req(hdev, rp->status);
-}
-
-void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle)
-{
- struct hci_cp_read_local_amp_assoc cp;
- struct amp_assoc *loc_assoc = &hdev->loc_assoc;
- struct hci_request req;
- int err;
-
- BT_DBG("%s handle %u", hdev->name, phy_handle);
-
- cp.phy_handle = phy_handle;
- cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
- cp.len_so_far = cpu_to_le16(loc_assoc->offset);
-
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
- err = hci_req_run_skb(&req, read_local_amp_assoc_complete);
- if (err < 0)
- a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
-}
-
-void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr)
-{
- struct hci_cp_read_local_amp_assoc cp;
- struct hci_request req;
- int err;
-
- memset(&hdev->loc_assoc, 0, sizeof(struct amp_assoc));
- memset(&cp, 0, sizeof(cp));
-
- cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
-
- set_bit(READ_LOC_AMP_ASSOC, &mgr->state);
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
- err = hci_req_run_skb(&req, read_local_amp_assoc_complete);
- if (err < 0)
- a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
-}
-
-void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
- struct hci_conn *hcon)
-{
- struct hci_cp_read_local_amp_assoc cp;
- struct amp_mgr *mgr = hcon->amp_mgr;
- struct hci_request req;
- int err;
-
- if (!mgr)
- return;
-
- cp.phy_handle = hcon->handle;
- cp.len_so_far = cpu_to_le16(0);
- cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
-
- set_bit(READ_LOC_AMP_ASSOC_FINAL, &mgr->state);
-
- /* Read Local AMP Assoc final link information data */
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
- err = hci_req_run_skb(&req, read_local_amp_assoc_complete);
- if (err < 0)
- a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
-}
-
-static void write_remote_amp_assoc_complete(struct hci_dev *hdev, u8 status,
- u16 opcode, struct sk_buff *skb)
-{
- struct hci_rp_write_remote_amp_assoc *rp = (void *)skb->data;
-
- BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
- hdev->name, rp->status, rp->phy_handle);
-
- if (rp->status)
- return;
-
- amp_write_rem_assoc_continue(hdev, rp->phy_handle);
-}
-
-/* Write AMP Assoc data fragments, returns true with last fragment written*/
-static bool amp_write_rem_assoc_frag(struct hci_dev *hdev,
- struct hci_conn *hcon)
-{
- struct hci_cp_write_remote_amp_assoc *cp;
- struct amp_mgr *mgr = hcon->amp_mgr;
- struct amp_ctrl *ctrl;
- struct hci_request req;
- u16 frag_len, len;
-
- ctrl = amp_ctrl_lookup(mgr, hcon->remote_id);
- if (!ctrl)
- return false;
-
- if (!ctrl->assoc_rem_len) {
- BT_DBG("all fragments are written");
- ctrl->assoc_rem_len = ctrl->assoc_len;
- ctrl->assoc_len_so_far = 0;
-
- amp_ctrl_put(ctrl);
- return true;
- }
-
- frag_len = min_t(u16, 248, ctrl->assoc_rem_len);
- len = frag_len + sizeof(*cp);
-
- cp = kzalloc(len, GFP_KERNEL);
- if (!cp) {
- amp_ctrl_put(ctrl);
- return false;
- }
-
- BT_DBG("hcon %p ctrl %p frag_len %u assoc_len %u rem_len %u",
- hcon, ctrl, frag_len, ctrl->assoc_len, ctrl->assoc_rem_len);
-
- cp->phy_handle = hcon->handle;
- cp->len_so_far = cpu_to_le16(ctrl->assoc_len_so_far);
- cp->rem_len = cpu_to_le16(ctrl->assoc_rem_len);
- memcpy(cp->frag, ctrl->assoc, frag_len);
-
- ctrl->assoc_len_so_far += frag_len;
- ctrl->assoc_rem_len -= frag_len;
-
- amp_ctrl_put(ctrl);
-
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp);
- hci_req_run_skb(&req, write_remote_amp_assoc_complete);
-
- kfree(cp);
-
- return false;
-}
-
-void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle)
-{
- struct hci_conn *hcon;
-
- BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle);
-
- hcon = hci_conn_hash_lookup_handle(hdev, handle);
- if (!hcon)
- return;
-
- /* Send A2MP create phylink rsp when all fragments are written */
- if (amp_write_rem_assoc_frag(hdev, hcon))
- a2mp_send_create_phy_link_rsp(hdev, 0);
-}
-
-void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle)
-{
- struct hci_conn *hcon;
-
- BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle);
-
- hcon = hci_conn_hash_lookup_handle(hdev, handle);
- if (!hcon)
- return;
-
- BT_DBG("%s phy handle 0x%2.2x hcon %p", hdev->name, handle, hcon);
-
- amp_write_rem_assoc_frag(hdev, hcon);
-}
-
-static void create_phylink_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
-{
- struct hci_cp_create_phy_link *cp;
-
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
- cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
- if (!cp)
- return;
-
- hci_dev_lock(hdev);
-
- if (status) {
- struct hci_conn *hcon;
-
- hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
- if (hcon)
- hci_conn_del(hcon);
- } else {
- amp_write_remote_assoc(hdev, cp->phy_handle);
- }
-
- hci_dev_unlock(hdev);
-}
-
-void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
- struct hci_conn *hcon)
-{
- struct hci_cp_create_phy_link cp;
- struct hci_request req;
-
- cp.phy_handle = hcon->handle;
-
- BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon,
- hcon->handle);
-
- if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len,
- &cp.key_type)) {
- BT_DBG("Cannot create link key");
- return;
- }
-
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp);
- hci_req_run(&req, create_phylink_complete);
-}
-
-static void accept_phylink_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
-{
- struct hci_cp_accept_phy_link *cp;
-
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
- if (status)
- return;
-
- cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
- if (!cp)
- return;
-
- amp_write_remote_assoc(hdev, cp->phy_handle);
-}
-
-void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
- struct hci_conn *hcon)
-{
- struct hci_cp_accept_phy_link cp;
- struct hci_request req;
-
- cp.phy_handle = hcon->handle;
-
- BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon,
- hcon->handle);
-
- if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len,
- &cp.key_type)) {
- BT_DBG("Cannot create link key");
- return;
- }
-
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp);
- hci_req_run(&req, accept_phylink_complete);
-}
-
-void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon)
-{
- struct hci_dev *bredr_hdev = hci_dev_hold(bredr_hcon->hdev);
- struct amp_mgr *mgr = hs_hcon->amp_mgr;
- struct l2cap_chan *bredr_chan;
-
- BT_DBG("bredr_hcon %p hs_hcon %p mgr %p", bredr_hcon, hs_hcon, mgr);
-
- if (!bredr_hdev || !mgr || !mgr->bredr_chan)
- return;
-
- bredr_chan = mgr->bredr_chan;
-
- l2cap_chan_lock(bredr_chan);
-
- set_bit(FLAG_EFS_ENABLE, &bredr_chan->flags);
- bredr_chan->remote_amp_id = hs_hcon->remote_id;
- bredr_chan->local_amp_id = hs_hcon->hdev->id;
- bredr_chan->hs_hcon = hs_hcon;
- bredr_chan->conn->mtu = hs_hcon->hdev->block_mtu;
-
- __l2cap_physical_cfm(bredr_chan, 0);
-
- l2cap_chan_unlock(bredr_chan);
-
- hci_dev_put(bredr_hdev);
-}
-
-void amp_create_logical_link(struct l2cap_chan *chan)
-{
- struct hci_conn *hs_hcon = chan->hs_hcon;
- struct hci_cp_create_accept_logical_link cp;
- struct hci_dev *hdev;
-
- BT_DBG("chan %p hs_hcon %p dst %pMR", chan, hs_hcon,
- &chan->conn->hcon->dst);
-
- if (!hs_hcon)
- return;
-
- hdev = hci_dev_hold(chan->hs_hcon->hdev);
- if (!hdev)
- return;
-
- cp.phy_handle = hs_hcon->handle;
-
- cp.tx_flow_spec.id = chan->local_id;
- cp.tx_flow_spec.stype = chan->local_stype;
- cp.tx_flow_spec.msdu = cpu_to_le16(chan->local_msdu);
- cp.tx_flow_spec.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
- cp.tx_flow_spec.acc_lat = cpu_to_le32(chan->local_acc_lat);
- cp.tx_flow_spec.flush_to = cpu_to_le32(chan->local_flush_to);
-
- cp.rx_flow_spec.id = chan->remote_id;
- cp.rx_flow_spec.stype = chan->remote_stype;
- cp.rx_flow_spec.msdu = cpu_to_le16(chan->remote_msdu);
- cp.rx_flow_spec.sdu_itime = cpu_to_le32(chan->remote_sdu_itime);
- cp.rx_flow_spec.acc_lat = cpu_to_le32(chan->remote_acc_lat);
- cp.rx_flow_spec.flush_to = cpu_to_le32(chan->remote_flush_to);
-
- if (hs_hcon->out)
- hci_send_cmd(hdev, HCI_OP_CREATE_LOGICAL_LINK, sizeof(cp),
- &cp);
- else
- hci_send_cmd(hdev, HCI_OP_ACCEPT_LOGICAL_LINK, sizeof(cp),
- &cp);
-
- hci_dev_put(hdev);
-}
-
-void amp_disconnect_logical_link(struct hci_chan *hchan)
-{
- struct hci_conn *hcon = hchan->conn;
- struct hci_cp_disconn_logical_link cp;
-
- if (hcon->state != BT_CONNECTED) {
- BT_DBG("hchan %p not connected", hchan);
- return;
- }
-
- cp.log_handle = cpu_to_le16(hchan->handle);
- hci_send_cmd(hcon->hdev, HCI_OP_DISCONN_LOGICAL_LINK, sizeof(cp), &cp);
-}
-
-void amp_destroy_logical_link(struct hci_chan *hchan, u8 reason)
-{
- BT_DBG("hchan %p", hchan);
-
- hci_chan_del(hchan);
-}
diff --git a/net/bluetooth/amp.h b/net/bluetooth/amp.h
deleted file mode 100644
index 97c87abd129f..000000000000
--- a/net/bluetooth/amp.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- Copyright (c) 2011,2012 Intel Corp.
-
-*/
-
-#ifndef __AMP_H
-#define __AMP_H
-
-struct amp_ctrl {
- struct list_head list;
- struct kref kref;
- __u8 id;
- __u16 assoc_len_so_far;
- __u16 assoc_rem_len;
- __u16 assoc_len;
- __u8 *assoc;
-};
-
-int amp_ctrl_put(struct amp_ctrl *ctrl);
-void amp_ctrl_get(struct amp_ctrl *ctrl);
-struct amp_ctrl *amp_ctrl_add(struct amp_mgr *mgr, u8 id);
-struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id);
-void amp_ctrl_list_flush(struct amp_mgr *mgr);
-
-struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
- u8 remote_id, bool out);
-
-int phylink_gen_key(struct hci_conn *hcon, u8 *data, u8 *len, u8 *type);
-
-void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle);
-void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr);
-void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
- struct hci_conn *hcon);
-void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
- struct hci_conn *hcon);
-void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
- struct hci_conn *hcon);
-
-#if IS_ENABLED(CONFIG_BT_HS)
-void amp_create_logical_link(struct l2cap_chan *chan);
-void amp_disconnect_logical_link(struct hci_chan *hchan);
-#else
-static inline void amp_create_logical_link(struct l2cap_chan *chan)
-{
-}
-
-static inline void amp_disconnect_logical_link(struct hci_chan *hchan)
-{
-}
-#endif
-
-void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle);
-void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle);
-void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon);
-void amp_create_logical_link(struct l2cap_chan *chan);
-void amp_disconnect_logical_link(struct hci_chan *hchan);
-void amp_destroy_logical_link(struct hci_chan *hchan, u8 reason);
-
-#endif /* __AMP_H */
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 5a6a49885ab6..ec45f77fce21 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -385,7 +385,8 @@ static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
case BNEP_COMPRESSED_DST_ONLY:
__skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN);
- __skb_put_data(nskb, s->eh.h_source, ETH_ALEN + 2);
+ __skb_put_data(nskb, s->eh.h_source, ETH_ALEN);
+ put_unaligned(s->eh.h_proto, (__be16 *)__skb_put(nskb, 2));
break;
case BNEP_GENERAL:
@@ -549,7 +550,7 @@ static struct device *bnep_get_device(struct bnep_session *session)
return &conn->hcon->dev;
}
-static struct device_type bnep_type = {
+static const struct device_type bnep_type = {
.name = "bluetooth",
};
diff --git a/net/bluetooth/eir.c b/net/bluetooth/eir.c
index 9214189279e8..1bc51e2b05a3 100644
--- a/net/bluetooth/eir.c
+++ b/net/bluetooth/eir.c
@@ -13,48 +13,33 @@
#define PNP_INFO_SVCLASS_ID 0x1200
-static u8 eir_append_name(u8 *eir, u16 eir_len, u8 type, u8 *data, u8 data_len)
-{
- u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
-
- /* If data is already NULL terminated just pass it directly */
- if (data[data_len - 1] == '\0')
- return eir_append_data(eir, eir_len, type, data, data_len);
-
- memcpy(name, data, HCI_MAX_SHORT_NAME_LENGTH);
- name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
-
- return eir_append_data(eir, eir_len, type, name, sizeof(name));
-}
-
u8 eir_append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
{
size_t short_len;
size_t complete_len;
- /* no space left for name (+ NULL + type + len) */
- if ((max_adv_len(hdev) - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
+ /* no space left for name (+ type + len) */
+ if ((max_adv_len(hdev) - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 2)
return ad_len;
/* use complete name if present and fits */
complete_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
- return eir_append_name(ptr, ad_len, EIR_NAME_COMPLETE,
- hdev->dev_name, complete_len + 1);
+ return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
+ hdev->dev_name, complete_len);
/* use short name if present */
short_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
if (short_len)
- return eir_append_name(ptr, ad_len, EIR_NAME_SHORT,
+ return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
hdev->short_name,
- short_len == HCI_MAX_SHORT_NAME_LENGTH ?
- short_len : short_len + 1);
+ short_len);
/* use shortened full name if present, we already know that name
* is longer then HCI_MAX_SHORT_NAME_LENGTH
*/
if (complete_len)
- return eir_append_name(ptr, ad_len, EIR_NAME_SHORT,
+ return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
hdev->dev_name,
HCI_MAX_SHORT_NAME_LENGTH);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index a41d2693f4d8..3ad74f76983b 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1,7 +1,7 @@
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
- Copyright 2023 NXP
+ Copyright 2023-2024 NXP
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -36,7 +36,6 @@
#include "hci_request.h"
#include "smp.h"
-#include "a2mp.h"
#include "eir.h"
struct sco_param {
@@ -69,7 +68,7 @@ static const struct sco_param esco_param_msbc[] = {
};
/* This function requires the caller holds hdev->lock */
-static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
+void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
{
struct hci_conn_params *params;
struct hci_dev *hdev = conn->hdev;
@@ -179,64 +178,6 @@ static void hci_conn_cleanup(struct hci_conn *conn)
hci_dev_put(hdev);
}
-static void hci_acl_create_connection(struct hci_conn *conn)
-{
- struct hci_dev *hdev = conn->hdev;
- struct inquiry_entry *ie;
- struct hci_cp_create_conn cp;
-
- BT_DBG("hcon %p", conn);
-
- /* Many controllers disallow HCI Create Connection while it is doing
- * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
- * Connection. This may cause the MGMT discovering state to become false
- * without user space's request but it is okay since the MGMT Discovery
- * APIs do not promise that discovery should be done forever. Instead,
- * the user space monitors the status of MGMT discovering and it may
- * request for discovery again when this flag becomes false.
- */
- if (test_bit(HCI_INQUIRY, &hdev->flags)) {
- /* Put this connection to "pending" state so that it will be
- * executed after the inquiry cancel command complete event.
- */
- conn->state = BT_CONNECT2;
- hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
- return;
- }
-
- conn->state = BT_CONNECT;
- conn->out = true;
- conn->role = HCI_ROLE_MASTER;
-
- conn->attempt++;
-
- conn->link_policy = hdev->link_policy;
-
- memset(&cp, 0, sizeof(cp));
- bacpy(&cp.bdaddr, &conn->dst);
- cp.pscan_rep_mode = 0x02;
-
- ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
- if (ie) {
- if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
- cp.pscan_rep_mode = ie->data.pscan_rep_mode;
- cp.pscan_mode = ie->data.pscan_mode;
- cp.clock_offset = ie->data.clock_offset |
- cpu_to_le16(0x8000);
- }
-
- memcpy(conn->dev_class, ie->data.dev_class, 3);
- }
-
- cp.pkt_type = cpu_to_le16(conn->pkt_type);
- if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
- cp.role_switch = 0x01;
- else
- cp.role_switch = 0x00;
-
- hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
-}
-
int hci_disconnect(struct hci_conn *conn, __u8 reason)
{
BT_DBG("hcon %p", conn);
@@ -1175,9 +1116,6 @@ void hci_conn_del(struct hci_conn *conn)
}
}
- if (conn->amp_mgr)
- amp_mgr_put(conn->amp_mgr);
-
skb_queue_purge(&conn->data_q);
/* Remove the connection from the list and cleanup its remaining
@@ -1186,6 +1124,9 @@ void hci_conn_del(struct hci_conn *conn)
* rest of hci_conn_del.
*/
hci_conn_cleanup(conn);
+
+ /* Dequeue callbacks using connection pointer as data */
+ hci_cmd_sync_dequeue(hdev, NULL, conn, NULL);
}
struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
@@ -1320,53 +1261,6 @@ u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
return 0;
}
-static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
-{
- struct hci_conn *conn;
- u16 handle = PTR_UINT(data);
-
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- if (!conn)
- return;
-
- bt_dev_dbg(hdev, "err %d", err);
-
- hci_dev_lock(hdev);
-
- if (!err) {
- hci_connect_le_scan_cleanup(conn, 0x00);
- goto done;
- }
-
- /* Check if connection is still pending */
- if (conn != hci_lookup_le_connect(hdev))
- goto done;
-
- /* Flush to make sure we send create conn cancel command if needed */
- flush_delayed_work(&conn->le_conn_timeout);
- hci_conn_failed(conn, bt_status(err));
-
-done:
- hci_dev_unlock(hdev);
-}
-
-static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
-{
- struct hci_conn *conn;
- u16 handle = PTR_UINT(data);
-
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- if (!conn)
- return 0;
-
- bt_dev_dbg(hdev, "conn %p", conn);
-
- clear_bit(HCI_CONN_SCANNING, &conn->flags);
- conn->state = BT_CONNECT;
-
- return hci_le_create_conn_sync(hdev, conn);
-}
-
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, bool dst_resolved, u8 sec_level,
u16 conn_timeout, u8 role)
@@ -1433,9 +1327,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
conn->sec_level = BT_SECURITY_LOW;
conn->conn_timeout = conn_timeout;
- err = hci_cmd_sync_queue(hdev, hci_connect_le_sync,
- UINT_PTR(conn->handle),
- create_le_conn_complete);
+ err = hci_connect_le_sync(hdev, conn);
if (err) {
hci_conn_del(conn);
return ERR_PTR(err);
@@ -1669,7 +1561,7 @@ done:
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
u8 sec_level, u8 auth_type,
- enum conn_reasons conn_reason)
+ enum conn_reasons conn_reason, u16 timeout)
{
struct hci_conn *acl;
@@ -1700,10 +1592,18 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
acl->conn_reason = conn_reason;
if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
+ int err;
+
acl->sec_level = BT_SECURITY_LOW;
acl->pending_sec_level = sec_level;
acl->auth_type = auth_type;
- hci_acl_create_connection(acl);
+ acl->conn_timeout = timeout;
+
+ err = hci_connect_acl_sync(hdev, acl);
+ if (err) {
+ hci_conn_del(acl);
+ return ERR_PTR(err);
+ }
}
return acl;
@@ -1738,14 +1638,15 @@ static struct hci_link *hci_conn_link(struct hci_conn *parent,
}
struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
- __u16 setting, struct bt_codec *codec)
+ __u16 setting, struct bt_codec *codec,
+ u16 timeout)
{
struct hci_conn *acl;
struct hci_conn *sco;
struct hci_link *link;
acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
- CONN_REASON_SCO_CONNECT);
+ CONN_REASON_SCO_CONNECT, timeout);
if (IS_ERR(acl))
return acl;
@@ -2156,18 +2057,31 @@ static int create_pa_sync(struct hci_dev *hdev, void *data)
return hci_update_passive_scan_sync(hdev);
}
-int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
- __u8 sid, struct bt_iso_qos *qos)
+struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
+ __u8 dst_type, __u8 sid,
+ struct bt_iso_qos *qos)
{
struct hci_cp_le_pa_create_sync *cp;
+ struct hci_conn *conn;
+ int err;
if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
- return -EBUSY;
+ return ERR_PTR(-EBUSY);
+
+ conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_SLAVE);
+ if (!conn)
+ return ERR_PTR(-ENOMEM);
+
+ conn->iso_qos = *qos;
+ conn->state = BT_LISTEN;
+
+ hci_conn_hold(conn);
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
if (!cp) {
hci_dev_clear_flag(hdev, HCI_PA_SYNC);
- return -ENOMEM;
+ hci_conn_drop(conn);
+ return ERR_PTR(-ENOMEM);
}
cp->options = qos->bcast.options;
@@ -2179,7 +2093,14 @@ int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
cp->sync_cte_type = qos->bcast.sync_cte_type;
/* Queue start pa_create_sync and scan */
- return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
+ err = hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
+ if (err < 0) {
+ hci_conn_drop(conn);
+ kfree(cp);
+ return ERR_PTR(err);
+ }
+
+ return conn;
}
int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
@@ -2647,22 +2568,6 @@ void hci_conn_hash_flush(struct hci_dev *hdev)
}
}
-/* Check pending connect attempts */
-void hci_conn_check_pending(struct hci_dev *hdev)
-{
- struct hci_conn *conn;
-
- BT_DBG("hdev %s", hdev->name);
-
- hci_dev_lock(hdev);
-
- conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
- if (conn)
- hci_acl_create_connection(conn);
-
- hci_dev_unlock(hdev);
-}
-
static u32 get_link_mode(struct hci_conn *conn)
{
u32 link_mode = 0;
@@ -2978,12 +2883,10 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
static int abort_conn_sync(struct hci_dev *hdev, void *data)
{
- struct hci_conn *conn;
- u16 handle = PTR_UINT(data);
+ struct hci_conn *conn = data;
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- if (!conn)
- return 0;
+ if (!hci_conn_valid(hdev, conn))
+ return -ECANCELED;
return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
}
@@ -3011,14 +2914,17 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason)
*/
if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
switch (hci_skb_event(hdev->sent_cmd)) {
+ case HCI_EV_CONN_COMPLETE:
case HCI_EV_LE_CONN_COMPLETE:
case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
case HCI_EVT_LE_CIS_ESTABLISHED:
- hci_cmd_sync_cancel(hdev, -ECANCELED);
+ hci_cmd_sync_cancel(hdev, ECANCELED);
break;
}
+ /* Cancel connect attempt if still queued/pending */
+ } else if (!hci_cancel_connect_sync(hdev, conn)) {
+ return 0;
}
- return hci_cmd_sync_queue(hdev, abort_conn_sync, UINT_PTR(conn->handle),
- NULL);
+ return hci_cmd_sync_queue_once(hdev, abort_conn_sync, conn, NULL);
}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 2821a42cefdc..1690ae57a09d 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -908,7 +908,7 @@ int hci_get_dev_info(void __user *arg)
else
flags = hdev->flags;
- strcpy(di.name, hdev->name);
+ strscpy(di.name, hdev->name, sizeof(di.name));
di.bdaddr = hdev->bdaddr;
di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
di.flags = flags;
@@ -940,20 +940,51 @@ int hci_get_dev_info(void __user *arg)
/* ---- Interface to HCI drivers ---- */
+static int hci_dev_do_poweroff(struct hci_dev *hdev)
+{
+ int err;
+
+ BT_DBG("%s %p", hdev->name, hdev);
+
+ hci_req_sync_lock(hdev);
+
+ err = hci_set_powered_sync(hdev, false);
+
+ hci_req_sync_unlock(hdev);
+
+ return err;
+}
+
static int hci_rfkill_set_block(void *data, bool blocked)
{
struct hci_dev *hdev = data;
+ int err;
BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
return -EBUSY;
+ if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED))
+ return 0;
+
if (blocked) {
hci_dev_set_flag(hdev, HCI_RFKILLED);
+
if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
- !hci_dev_test_flag(hdev, HCI_CONFIG))
- hci_dev_do_close(hdev);
+ !hci_dev_test_flag(hdev, HCI_CONFIG)) {
+ err = hci_dev_do_poweroff(hdev);
+ if (err) {
+ bt_dev_err(hdev, "Error when powering off device on rfkill (%d)",
+ err);
+
+ /* Make sure the device is still closed even if
+ * anything during power off sequence (eg.
+ * disconnecting devices) failed.
+ */
+ hci_dev_do_close(hdev);
+ }
+ }
} else {
hci_dev_clear_flag(hdev, HCI_RFKILLED);
}
@@ -1491,11 +1522,12 @@ static void hci_cmd_timeout(struct work_struct *work)
struct hci_dev *hdev = container_of(work, struct hci_dev,
cmd_timer.work);
- if (hdev->sent_cmd) {
- struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
- u16 opcode = __le16_to_cpu(sent->opcode);
+ if (hdev->req_skb) {
+ u16 opcode = hci_skb_opcode(hdev->req_skb);
bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
+
+ hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
} else {
bt_dev_err(hdev, "command tx timeout");
}
@@ -2608,10 +2640,11 @@ int hci_register_dev(struct hci_dev *hdev)
*/
switch (hdev->dev_type) {
case HCI_PRIMARY:
- id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
+ id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
break;
case HCI_AMP:
- id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
+ id = ida_alloc_range(&hci_index_ida, 1, HCI_MAX_ID - 1,
+ GFP_KERNEL);
break;
default:
return -EINVAL;
@@ -2710,7 +2743,7 @@ err_wqueue:
destroy_workqueue(hdev->workqueue);
destroy_workqueue(hdev->req_workqueue);
err:
- ida_simple_remove(&hci_index_ida, hdev->id);
+ ida_free(&hci_index_ida, hdev->id);
return error;
}
@@ -2793,8 +2826,9 @@ void hci_release_dev(struct hci_dev *hdev)
hci_dev_unlock(hdev);
ida_destroy(&hdev->unset_handle_ida);
- ida_simple_remove(&hci_index_ida, hdev->id);
+ ida_free(&hci_index_ida, hdev->id);
kfree_skb(hdev->sent_cmd);
+ kfree_skb(hdev->req_skb);
kfree_skb(hdev->recv_event);
kfree(hdev);
}
@@ -2826,6 +2860,23 @@ int hci_unregister_suspend_notifier(struct hci_dev *hdev)
return ret;
}
+/* Cancel ongoing command synchronously:
+ *
+ * - Cancel command timer
+ * - Reset command counter
+ * - Cancel command request
+ */
+static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
+{
+ bt_dev_dbg(hdev, "err 0x%2.2x", err);
+
+ cancel_delayed_work_sync(&hdev->cmd_timer);
+ cancel_delayed_work_sync(&hdev->ncmd_timer);
+ atomic_set(&hdev->cmd_cnt, 1);
+
+ hci_cmd_sync_cancel_sync(hdev, -err);
+}
+
/* Suspend HCI device */
int hci_suspend_dev(struct hci_dev *hdev)
{
@@ -2843,7 +2894,7 @@ int hci_suspend_dev(struct hci_dev *hdev)
return 0;
/* Cancel potentially blocking sync operation before suspend */
- __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
+ hci_cancel_cmd_sync(hdev, -EHOSTDOWN);
hci_req_sync_lock(hdev);
ret = hci_suspend_sync(hdev);
@@ -3107,21 +3158,33 @@ int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
EXPORT_SYMBOL(__hci_cmd_send);
/* Get data from the previously sent command */
-void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
+static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
{
struct hci_command_hdr *hdr;
- if (!hdev->sent_cmd)
+ if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
return NULL;
- hdr = (void *) hdev->sent_cmd->data;
+ hdr = (void *)skb->data;
if (hdr->opcode != cpu_to_le16(opcode))
return NULL;
- BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
+ return skb->data + HCI_COMMAND_HDR_SIZE;
+}
- return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
+/* Get data from the previously sent command */
+void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
+{
+ void *data;
+
+ /* Check if opcode matches last sent command */
+ data = hci_cmd_data(hdev->sent_cmd, opcode);
+ if (!data)
+ /* Check if opcode matches last request */
+ data = hci_cmd_data(hdev->req_skb, opcode);
+
+ return data;
}
/* Get data from last received event */
@@ -4022,17 +4085,19 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
if (!status && !hci_req_is_complete(hdev))
return;
+ skb = hdev->req_skb;
+
/* If this was the last command in a request the complete
- * callback would be found in hdev->sent_cmd instead of the
+ * callback would be found in hdev->req_skb instead of the
* command queue (hdev->cmd_q).
*/
- if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
- *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
+ if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
+ *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
return;
}
- if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
- *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
+ if (skb && bt_cb(skb)->hci.req_complete) {
+ *req_complete = bt_cb(skb)->hci.req_complete;
return;
}
@@ -4128,6 +4193,36 @@ static void hci_rx_work(struct work_struct *work)
}
}
+static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ int err;
+
+ bt_dev_dbg(hdev, "skb %p", skb);
+
+ kfree_skb(hdev->sent_cmd);
+
+ hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
+ if (!hdev->sent_cmd) {
+ skb_queue_head(&hdev->cmd_q, skb);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
+ return;
+ }
+
+ err = hci_send_frame(hdev, skb);
+ if (err < 0) {
+ hci_cmd_sync_cancel_sync(hdev, err);
+ return;
+ }
+
+ if (hci_req_status_pend(hdev) &&
+ !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
+ kfree_skb(hdev->req_skb);
+ hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
+ }
+
+ atomic_dec(&hdev->cmd_cnt);
+}
+
static void hci_cmd_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
@@ -4142,30 +4237,15 @@ static void hci_cmd_work(struct work_struct *work)
if (!skb)
return;
- kfree_skb(hdev->sent_cmd);
-
- hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
- if (hdev->sent_cmd) {
- int res;
- if (hci_req_status_pend(hdev))
- hci_dev_set_flag(hdev, HCI_CMD_PENDING);
- atomic_dec(&hdev->cmd_cnt);
+ hci_send_cmd_sync(hdev, skb);
- res = hci_send_frame(hdev, skb);
- if (res < 0)
- __hci_cmd_sync_cancel(hdev, -res);
-
- rcu_read_lock();
- if (test_bit(HCI_RESET, &hdev->flags) ||
- hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
- cancel_delayed_work(&hdev->cmd_timer);
- else
- queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
- HCI_CMD_TIMEOUT);
- rcu_read_unlock();
- } else {
- skb_queue_head(&hdev->cmd_q, skb);
- queue_work(hdev->workqueue, &hdev->cmd_work);
- }
+ rcu_read_lock();
+ if (test_bit(HCI_RESET, &hdev->flags) ||
+ hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
+ cancel_delayed_work(&hdev->cmd_timer);
+ else
+ queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
+ HCI_CMD_TIMEOUT);
+ rcu_read_unlock();
}
}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 2a5f5a7d2412..4ae224824012 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -36,8 +36,6 @@
#include "hci_request.h"
#include "hci_debugfs.h"
#include "hci_codec.h"
-#include "a2mp.h"
-#include "amp.h"
#include "smp.h"
#include "msft.h"
#include "eir.h"
@@ -95,11 +93,11 @@ static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
/* It is possible that we receive Inquiry Complete event right
* before we receive Inquiry Cancel Command Complete event, in
* which case the latter event should have status of Command
- * Disallowed (0x0c). This should not be treated as error, since
+ * Disallowed. This should not be treated as error, since
* we actually achieve what Inquiry Cancel wants to achieve,
* which is to end the last Inquiry session.
*/
- if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
+ if (rp->status == HCI_ERROR_COMMAND_DISALLOWED && !test_bit(HCI_INQUIRY, &hdev->flags)) {
bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
rp->status = 0x00;
}
@@ -120,8 +118,6 @@ static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
hci_dev_unlock(hdev);
- hci_conn_check_pending(hdev);
-
return rp->status;
}
@@ -152,8 +148,6 @@ static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
- hci_conn_check_pending(hdev);
-
return rp->status;
}
@@ -2314,10 +2308,8 @@ static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
{
bt_dev_dbg(hdev, "status 0x%2.2x", status);
- if (status) {
- hci_conn_check_pending(hdev);
+ if (status)
return;
- }
if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
set_bit(HCI_INQUIRY, &hdev->flags);
@@ -2342,12 +2334,9 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
if (status) {
if (conn && conn->state == BT_CONNECT) {
- if (status != 0x0c || conn->attempt > 2) {
- conn->state = BT_CLOSED;
- hci_connect_cfm(conn, status);
- hci_conn_del(conn);
- } else
- conn->state = BT_CONNECT2;
+ conn->state = BT_CLOSED;
+ hci_connect_cfm(conn, status);
+ hci_conn_del(conn);
}
} else {
if (!conn) {
@@ -2526,9 +2515,7 @@ static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
* Only those in BT_CONFIG or BT_CONNECTED states can be
* considered connected.
*/
- if (conn &&
- (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
- !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED))
mgmt_device_connected(hdev, conn, name, name_len);
if (discov->state == DISCOVERY_STOPPED)
@@ -3039,8 +3026,6 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
- hci_conn_check_pending(hdev);
-
if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
return;
@@ -3262,8 +3247,6 @@ done:
unlock:
hci_dev_unlock(hdev);
-
- hci_conn_check_pending(hdev);
}
static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
@@ -3556,8 +3539,6 @@ static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
- hci_conn_check_pending(hdev);
-
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
@@ -3660,7 +3641,8 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
* controller really supports it. If it doesn't, assume
* the default size (16).
*/
- if (!(hdev->commands[20] & 0x10)) {
+ if (!(hdev->commands[20] & 0x10) ||
+ test_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks)) {
conn->enc_key_size = HCI_LINK_KEY_SIZE;
goto notify;
}
@@ -3762,8 +3744,9 @@ static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
bacpy(&cp.bdaddr, &conn->dst);
cp.pscan_rep_mode = 0x02;
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
- } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ } else {
mgmt_device_connected(hdev, conn, NULL, 0);
+ }
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
@@ -3936,6 +3919,11 @@ static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
* last.
*/
hci_connect_cfm(conn, rp->status);
+
+ /* Notify device connected in case it is a BIG Sync */
+ if (!rp->status && test_bit(HCI_CONN_BIG_SYNC, &conn->flags))
+ mgmt_device_connected(hdev, conn, NULL, 0);
+
break;
}
@@ -4381,7 +4369,7 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
* (since for this kind of commands there will not be a command
* complete event).
*/
- if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
+ if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) {
hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
req_complete_skb);
if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
@@ -5010,8 +4998,9 @@ static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
bacpy(&cp.bdaddr, &conn->dst);
cp.pscan_rep_mode = 0x02;
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
- } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ } else {
mgmt_device_connected(hdev, conn, NULL, 0);
+ }
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
@@ -5675,150 +5664,6 @@ unlock:
hci_dev_unlock(hdev);
}
-#if IS_ENABLED(CONFIG_BT_HS)
-static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
-{
- struct hci_ev_channel_selected *ev = data;
- struct hci_conn *hcon;
-
- bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
-
- hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
- if (!hcon)
- return;
-
- amp_read_loc_assoc_final_data(hdev, hcon);
-}
-
-static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
-{
- struct hci_ev_phy_link_complete *ev = data;
- struct hci_conn *hcon, *bredr_hcon;
-
- bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
- ev->status);
-
- hci_dev_lock(hdev);
-
- hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
- if (!hcon)
- goto unlock;
-
- if (!hcon->amp_mgr)
- goto unlock;
-
- if (ev->status) {
- hci_conn_del(hcon);
- goto unlock;
- }
-
- bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
-
- hcon->state = BT_CONNECTED;
- bacpy(&hcon->dst, &bredr_hcon->dst);
-
- hci_conn_hold(hcon);
- hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
- hci_conn_drop(hcon);
-
- hci_debugfs_create_conn(hcon);
- hci_conn_add_sysfs(hcon);
-
- amp_physical_cfm(bredr_hcon, hcon);
-
-unlock:
- hci_dev_unlock(hdev);
-}
-
-static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
-{
- struct hci_ev_logical_link_complete *ev = data;
- struct hci_conn *hcon;
- struct hci_chan *hchan;
- struct amp_mgr *mgr;
-
- bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
- le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
-
- hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
- if (!hcon)
- return;
-
- /* Create AMP hchan */
- hchan = hci_chan_create(hcon);
- if (!hchan)
- return;
-
- hchan->handle = le16_to_cpu(ev->handle);
- hchan->amp = true;
-
- BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
-
- mgr = hcon->amp_mgr;
- if (mgr && mgr->bredr_chan) {
- struct l2cap_chan *bredr_chan = mgr->bredr_chan;
-
- l2cap_chan_lock(bredr_chan);
-
- bredr_chan->conn->mtu = hdev->block_mtu;
- l2cap_logical_cfm(bredr_chan, hchan, 0);
- hci_conn_hold(hcon);
-
- l2cap_chan_unlock(bredr_chan);
- }
-}
-
-static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
-{
- struct hci_ev_disconn_logical_link_complete *ev = data;
- struct hci_chan *hchan;
-
- bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
- le16_to_cpu(ev->handle), ev->status);
-
- if (ev->status)
- return;
-
- hci_dev_lock(hdev);
-
- hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
- if (!hchan || !hchan->amp)
- goto unlock;
-
- amp_destroy_logical_link(hchan, ev->reason);
-
-unlock:
- hci_dev_unlock(hdev);
-}
-
-static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
-{
- struct hci_ev_disconn_phy_link_complete *ev = data;
- struct hci_conn *hcon;
-
- bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
-
- if (ev->status)
- return;
-
- hci_dev_lock(hdev);
-
- hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
- if (hcon && hcon->type == AMP_LINK) {
- hcon->state = BT_CLOSED;
- hci_disconn_cfm(hcon, ev->reason);
- hci_conn_del(hcon);
- }
-
- hci_dev_unlock(hdev);
-}
-#endif
-
static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
u8 bdaddr_type, bdaddr_t *local_rpa)
{
@@ -5984,8 +5829,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
goto unlock;
}
- if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
- mgmt_device_connected(hdev, conn, NULL, 0);
+ mgmt_device_connected(hdev, conn, NULL, 0);
conn->sec_level = BT_SECURITY_LOW;
conn->state = BT_CONFIG;
@@ -6684,7 +6528,7 @@ static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
* transition into connected state and mark it as
* successful.
*/
- if (!conn->out && ev->status == 0x1a &&
+ if (!conn->out && ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE &&
(hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
status = 0x00;
else
@@ -7214,6 +7058,9 @@ static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
/* Notify iso layer */
hci_connect_cfm(pa_sync, 0x00);
+ /* Notify MGMT layer */
+ mgmt_device_connected(hdev, pa_sync, NULL, 0);
+
unlock:
hci_dev_unlock(hdev);
}
@@ -7324,10 +7171,10 @@ static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
/* Only match event if command OGF is for LE */
- if (hdev->sent_cmd &&
- hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
- hci_skb_event(hdev->sent_cmd) == ev->subevent) {
- *opcode = hci_skb_opcode(hdev->sent_cmd);
+ if (hdev->req_skb &&
+ hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 &&
+ hci_skb_event(hdev->req_skb) == ev->subevent) {
+ *opcode = hci_skb_opcode(hdev->req_skb);
hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
req_complete_skb);
}
@@ -7626,25 +7473,6 @@ static const struct hci_ev {
/* [0x3e = HCI_EV_LE_META] */
HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
-#if IS_ENABLED(CONFIG_BT_HS)
- /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
- HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
- sizeof(struct hci_ev_phy_link_complete)),
- /* [0x41 = HCI_EV_CHANNEL_SELECTED] */
- HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
- sizeof(struct hci_ev_channel_selected)),
- /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
- HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
- hci_disconn_loglink_complete_evt,
- sizeof(struct hci_ev_disconn_logical_link_complete)),
- /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
- HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
- sizeof(struct hci_ev_logical_link_complete)),
- /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
- HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
- hci_disconn_phylink_complete_evt,
- sizeof(struct hci_ev_disconn_phy_link_complete)),
-#endif
/* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
sizeof(struct hci_ev_num_comp_blocks)),
@@ -7714,10 +7542,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
}
/* Only match event if command OGF is not for LE */
- if (hdev->sent_cmd &&
- hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
- hci_skb_event(hdev->sent_cmd) == event) {
- hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
+ if (hdev->req_skb &&
+ hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 &&
+ hci_skb_event(hdev->req_skb) == event) {
+ hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb),
status, &req_complete, &req_complete_skb);
req_evt = event;
}
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index 6e023b0104b0..00e02138003e 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -895,7 +895,7 @@ void hci_request_setup(struct hci_dev *hdev)
void hci_request_cancel_all(struct hci_dev *hdev)
{
- __hci_cmd_sync_cancel(hdev, ENODEV);
+ hci_cmd_sync_cancel_sync(hdev, ENODEV);
cancel_interleave_scan(hdev);
}
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 3e7cd330d731..4ee1b976678b 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -101,7 +101,7 @@ static bool hci_sock_gen_cookie(struct sock *sk)
int id = hci_pi(sk)->cookie;
if (!id) {
- id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
+ id = ida_alloc_min(&sock_cookie_ida, 1, GFP_KERNEL);
if (id < 0)
id = 0xffffffff;
@@ -119,7 +119,7 @@ static void hci_sock_free_cookie(struct sock *sk)
if (id) {
hci_pi(sk)->cookie = 0xffffffff;
- ida_simple_remove(&sock_cookie_ida, id);
+ ida_free(&sock_cookie_ida, id);
}
}
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index 5716345a26df..f6b662369322 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -32,6 +32,10 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
hdev->req_result = result;
hdev->req_status = HCI_REQ_DONE;
+ /* Free the request command so it is not used as response */
+ kfree_skb(hdev->req_skb);
+ hdev->req_skb = NULL;
+
if (skb) {
struct sock *sk = hci_skb_sk(skb);
@@ -39,7 +43,7 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
if (sk)
sock_put(sk);
- hdev->req_skb = skb_get(skb);
+ hdev->req_rsp = skb_get(skb);
}
wake_up_interruptible(&hdev->req_wait_q);
@@ -187,8 +191,8 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
hdev->req_status = 0;
hdev->req_result = 0;
- skb = hdev->req_skb;
- hdev->req_skb = NULL;
+ skb = hdev->req_rsp;
+ hdev->req_rsp = NULL;
bt_dev_dbg(hdev, "end: err %d", err);
@@ -566,6 +570,17 @@ void hci_cmd_sync_init(struct hci_dev *hdev)
INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
}
+static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
+ struct hci_cmd_sync_work_entry *entry,
+ int err)
+{
+ if (entry->destroy)
+ entry->destroy(hdev, entry->data, err);
+
+ list_del(&entry->list);
+ kfree(entry);
+}
+
void hci_cmd_sync_clear(struct hci_dev *hdev)
{
struct hci_cmd_sync_work_entry *entry, *tmp;
@@ -574,17 +589,12 @@ void hci_cmd_sync_clear(struct hci_dev *hdev)
cancel_work_sync(&hdev->reenable_adv_work);
mutex_lock(&hdev->cmd_sync_work_lock);
- list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
- if (entry->destroy)
- entry->destroy(hdev, entry->data, -ECANCELED);
-
- list_del(&entry->list);
- kfree(entry);
- }
+ list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list)
+ _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
mutex_unlock(&hdev->cmd_sync_work_lock);
}
-void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
+void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
{
bt_dev_dbg(hdev, "err 0x%2.2x", err);
@@ -592,15 +602,17 @@ void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
hdev->req_result = err;
hdev->req_status = HCI_REQ_CANCELED;
- cancel_delayed_work_sync(&hdev->cmd_timer);
- cancel_delayed_work_sync(&hdev->ncmd_timer);
- atomic_set(&hdev->cmd_cnt, 1);
-
- wake_up_interruptible(&hdev->req_wait_q);
+ queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
}
}
+EXPORT_SYMBOL(hci_cmd_sync_cancel);
-void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
+/* Cancel ongoing command request synchronously:
+ *
+ * - Set result and mark status to HCI_REQ_CANCELED
+ * - Wakeup command sync thread
+ */
+void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err)
{
bt_dev_dbg(hdev, "err 0x%2.2x", err);
@@ -608,10 +620,10 @@ void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
hdev->req_result = err;
hdev->req_status = HCI_REQ_CANCELED;
- queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
+ wake_up_interruptible(&hdev->req_wait_q);
}
}
-EXPORT_SYMBOL(hci_cmd_sync_cancel);
+EXPORT_SYMBOL(hci_cmd_sync_cancel_sync);
/* Submit HCI command to be run in as cmd_sync_work:
*
@@ -667,6 +679,115 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
}
EXPORT_SYMBOL(hci_cmd_sync_queue);
+static struct hci_cmd_sync_work_entry *
+_hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ struct hci_cmd_sync_work_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
+ if (func && entry->func != func)
+ continue;
+
+ if (data && entry->data != data)
+ continue;
+
+ if (destroy && entry->destroy != destroy)
+ continue;
+
+ return entry;
+ }
+
+ return NULL;
+}
+
+/* Queue HCI command entry once:
+ *
+ * - Lookup if an entry already exist and only if it doesn't creates a new entry
+ * and queue it.
+ */
+int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
+ return 0;
+
+ return hci_cmd_sync_queue(hdev, func, data, destroy);
+}
+EXPORT_SYMBOL(hci_cmd_sync_queue_once);
+
+/* Lookup HCI command entry:
+ *
+ * - Return first entry that matches by function callback or data or
+ * destroy callback.
+ */
+struct hci_cmd_sync_work_entry *
+hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ struct hci_cmd_sync_work_entry *entry;
+
+ mutex_lock(&hdev->cmd_sync_work_lock);
+ entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
+ mutex_unlock(&hdev->cmd_sync_work_lock);
+
+ return entry;
+}
+EXPORT_SYMBOL(hci_cmd_sync_lookup_entry);
+
+/* Cancel HCI command entry */
+void hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
+ struct hci_cmd_sync_work_entry *entry)
+{
+ mutex_lock(&hdev->cmd_sync_work_lock);
+ _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
+ mutex_unlock(&hdev->cmd_sync_work_lock);
+}
+EXPORT_SYMBOL(hci_cmd_sync_cancel_entry);
+
+/* Dequeue one HCI command entry:
+ *
+ * - Lookup and cancel first entry that matches.
+ */
+bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,
+ hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ struct hci_cmd_sync_work_entry *entry;
+
+ entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
+ if (!entry)
+ return false;
+
+ hci_cmd_sync_cancel_entry(hdev, entry);
+
+ return true;
+}
+EXPORT_SYMBOL(hci_cmd_sync_dequeue_once);
+
+/* Dequeue HCI command entry:
+ *
+ * - Lookup and cancel any entry that matches by function callback or data or
+ * destroy callback.
+ */
+bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ struct hci_cmd_sync_work_entry *entry;
+ bool ret = false;
+
+ mutex_lock(&hdev->cmd_sync_work_lock);
+ while ((entry = _hci_cmd_sync_lookup_entry(hdev, func, data,
+ destroy))) {
+ _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
+ ret = true;
+ }
+ mutex_unlock(&hdev->cmd_sync_work_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(hci_cmd_sync_dequeue);
+
int hci_update_eir_sync(struct hci_dev *hdev)
{
struct hci_cp_write_eir cp;
@@ -2445,6 +2566,16 @@ static struct conn_params *conn_params_copy(struct list_head *list, size_t *n)
return p;
}
+/* Clear LE Accept List */
+static int hci_le_clear_accept_list_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->commands[26] & 0x80))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL,
+ HCI_CMD_TIMEOUT);
+}
+
/* Device must not be scanning when updating the accept list.
*
* Update is done using the following sequence:
@@ -2493,6 +2624,31 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
goto done;
}
+ /* Force address filtering if PA Sync is in progress */
+ if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
+ struct hci_cp_le_pa_create_sync *sent;
+
+ sent = hci_sent_cmd_data(hdev, HCI_OP_LE_PA_CREATE_SYNC);
+ if (sent) {
+ struct conn_params pa;
+
+ memset(&pa, 0, sizeof(pa));
+
+ bacpy(&pa.addr, &sent->addr);
+ pa.addr_type = sent->addr_type;
+
+ /* Clear first since there could be addresses left
+ * behind.
+ */
+ hci_le_clear_accept_list_sync(hdev);
+
+ num_entries = 1;
+ err = hci_le_add_accept_list_sync(hdev, &pa,
+ &num_entries);
+ goto done;
+ }
+ }
+
/* Go through the current accept list programmed into the
* controller one by one and check if that address is connected or is
* still in the list of pending connections or list of devices to
@@ -2602,6 +2758,14 @@ done:
return filter_policy;
}
+static void hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params *cp,
+ u8 type, u16 interval, u16 window)
+{
+ cp->type = type;
+ cp->interval = cpu_to_le16(interval);
+ cp->window = cpu_to_le16(window);
+}
+
static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
u16 interval, u16 window,
u8 own_addr_type, u8 filter_policy)
@@ -2609,7 +2773,7 @@ static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
struct hci_cp_le_set_ext_scan_params *cp;
struct hci_cp_le_scan_phy_params *phy;
u8 data[sizeof(*cp) + sizeof(*phy) * 2];
- u8 num_phy = 0;
+ u8 num_phy = 0x00;
cp = (void *)data;
phy = (void *)cp->data;
@@ -2619,28 +2783,64 @@ static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
cp->own_addr_type = own_addr_type;
cp->filter_policy = filter_policy;
+ /* Check if PA Sync is in progress then select the PHY based on the
+ * hci_conn.iso_qos.
+ */
+ if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
+ struct hci_cp_le_add_to_accept_list *sent;
+
+ sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
+ if (sent) {
+ struct hci_conn *conn;
+
+ conn = hci_conn_hash_lookup_ba(hdev, ISO_LINK,
+ &sent->bdaddr);
+ if (conn) {
+ struct bt_iso_qos *qos = &conn->iso_qos;
+
+ if (qos->bcast.in.phy & BT_ISO_PHY_1M ||
+ qos->bcast.in.phy & BT_ISO_PHY_2M) {
+ cp->scanning_phys |= LE_SCAN_PHY_1M;
+ hci_le_scan_phy_params(phy, type,
+ interval,
+ window);
+ num_phy++;
+ phy++;
+ }
+
+ if (qos->bcast.in.phy & BT_ISO_PHY_CODED) {
+ cp->scanning_phys |= LE_SCAN_PHY_CODED;
+ hci_le_scan_phy_params(phy, type,
+ interval,
+ window);
+ num_phy++;
+ phy++;
+ }
+
+ if (num_phy)
+ goto done;
+ }
+ }
+ }
+
if (scan_1m(hdev) || scan_2m(hdev)) {
cp->scanning_phys |= LE_SCAN_PHY_1M;
-
- phy->type = type;
- phy->interval = cpu_to_le16(interval);
- phy->window = cpu_to_le16(window);
-
+ hci_le_scan_phy_params(phy, type, interval, window);
num_phy++;
phy++;
}
if (scan_coded(hdev)) {
cp->scanning_phys |= LE_SCAN_PHY_CODED;
-
- phy->type = type;
- phy->interval = cpu_to_le16(interval);
- phy->window = cpu_to_le16(window);
-
+ hci_le_scan_phy_params(phy, type, interval, window);
num_phy++;
phy++;
}
+done:
+ if (!num_phy)
+ return -EINVAL;
+
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
sizeof(*cp) + sizeof(*phy) * num_phy,
data, HCI_CMD_TIMEOUT);
@@ -2879,7 +3079,8 @@ int hci_update_passive_scan(struct hci_dev *hdev)
hci_dev_test_flag(hdev, HCI_UNREGISTER))
return 0;
- return hci_cmd_sync_queue(hdev, update_passive_scan_sync, NULL, NULL);
+ return hci_cmd_sync_queue_once(hdev, update_passive_scan_sync, NULL,
+ NULL);
}
int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val)
@@ -4098,16 +4299,6 @@ static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev)
0, NULL, HCI_CMD_TIMEOUT);
}
-/* Clear LE Accept List */
-static int hci_le_clear_accept_list_sync(struct hci_dev *hdev)
-{
- if (!(hdev->commands[26] & 0x80))
- return 0;
-
- return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL,
- HCI_CMD_TIMEOUT);
-}
-
/* Read LE Resolving List Size */
static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev)
{
@@ -4834,6 +5025,11 @@ int hci_dev_open_sync(struct hci_dev *hdev)
hdev->sent_cmd = NULL;
}
+ if (hdev->req_skb) {
+ kfree_skb(hdev->req_skb);
+ hdev->req_skb = NULL;
+ }
+
clear_bit(HCI_RUNNING, &hdev->flags);
hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
@@ -4994,6 +5190,12 @@ int hci_dev_close_sync(struct hci_dev *hdev)
hdev->sent_cmd = NULL;
}
+ /* Drop last request */
+ if (hdev->req_skb) {
+ kfree_skb(hdev->req_skb);
+ hdev->req_skb = NULL;
+ }
+
clear_bit(HCI_RUNNING, &hdev->flags);
hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
@@ -5403,27 +5605,33 @@ static int hci_power_off_sync(struct hci_dev *hdev)
if (!test_bit(HCI_UP, &hdev->flags))
return 0;
+ hci_dev_set_flag(hdev, HCI_POWERING_DOWN);
+
if (test_bit(HCI_ISCAN, &hdev->flags) ||
test_bit(HCI_PSCAN, &hdev->flags)) {
err = hci_write_scan_enable_sync(hdev, 0x00);
if (err)
- return err;
+ goto out;
}
err = hci_clear_adv_sync(hdev, NULL, false);
if (err)
- return err;
+ goto out;
err = hci_stop_discovery_sync(hdev);
if (err)
- return err;
+ goto out;
/* Terminated due to Power Off */
err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
if (err)
- return err;
+ goto out;
+
+ err = hci_dev_close_sync(hdev);
- return hci_dev_close_sync(hdev);
+out:
+ hci_dev_clear_flag(hdev, HCI_POWERING_DOWN);
+ return err;
}
int hci_set_powered_sync(struct hci_dev *hdev, u8 val)
@@ -6161,12 +6369,21 @@ static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
conn->conn_timeout, NULL);
}
-int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn)
+static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data)
{
struct hci_cp_le_create_conn cp;
struct hci_conn_params *params;
u8 own_addr_type;
int err;
+ struct hci_conn *conn = data;
+
+ if (!hci_conn_valid(hdev, conn))
+ return -ECANCELED;
+
+ bt_dev_dbg(hdev, "conn %p", conn);
+
+ clear_bit(HCI_CONN_SCANNING, &conn->flags);
+ conn->state = BT_CONNECT;
/* If requested to connect as peripheral use directed advertising */
if (conn->role == HCI_ROLE_SLAVE) {
@@ -6484,3 +6701,125 @@ int hci_update_adv_data(struct hci_dev *hdev, u8 instance)
return hci_cmd_sync_queue(hdev, _update_adv_data_sync,
UINT_PTR(instance), NULL);
}
+
+static int hci_acl_create_conn_sync(struct hci_dev *hdev, void *data)
+{
+ struct hci_conn *conn = data;
+ struct inquiry_entry *ie;
+ struct hci_cp_create_conn cp;
+ int err;
+
+ if (!hci_conn_valid(hdev, conn))
+ return -ECANCELED;
+
+ /* Many controllers disallow HCI Create Connection while it is doing
+ * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
+ * Connection. This may cause the MGMT discovering state to become false
+ * without user space's request but it is okay since the MGMT Discovery
+ * APIs do not promise that discovery should be done forever. Instead,
+ * the user space monitors the status of MGMT discovering and it may
+ * request for discovery again when this flag becomes false.
+ */
+ if (test_bit(HCI_INQUIRY, &hdev->flags)) {
+ err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0,
+ NULL, HCI_CMD_TIMEOUT);
+ if (err)
+ bt_dev_warn(hdev, "Failed to cancel inquiry %d", err);
+ }
+
+ conn->state = BT_CONNECT;
+ conn->out = true;
+ conn->role = HCI_ROLE_MASTER;
+
+ conn->attempt++;
+
+ conn->link_policy = hdev->link_policy;
+
+ memset(&cp, 0, sizeof(cp));
+ bacpy(&cp.bdaddr, &conn->dst);
+ cp.pscan_rep_mode = 0x02;
+
+ ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
+ if (ie) {
+ if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
+ cp.pscan_rep_mode = ie->data.pscan_rep_mode;
+ cp.pscan_mode = ie->data.pscan_mode;
+ cp.clock_offset = ie->data.clock_offset |
+ cpu_to_le16(0x8000);
+ }
+
+ memcpy(conn->dev_class, ie->data.dev_class, 3);
+ }
+
+ cp.pkt_type = cpu_to_le16(conn->pkt_type);
+ if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
+ cp.role_switch = 0x01;
+ else
+ cp.role_switch = 0x00;
+
+ return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN,
+ sizeof(cp), &cp,
+ HCI_EV_CONN_COMPLETE,
+ conn->conn_timeout, NULL);
+}
+
+int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn)
+{
+ return hci_cmd_sync_queue_once(hdev, hci_acl_create_conn_sync, conn,
+ NULL);
+}
+
+static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct hci_conn *conn = data;
+
+ bt_dev_dbg(hdev, "err %d", err);
+
+ if (err == -ECANCELED)
+ return;
+
+ hci_dev_lock(hdev);
+
+ if (!hci_conn_valid(hdev, conn))
+ goto done;
+
+ if (!err) {
+ hci_connect_le_scan_cleanup(conn, 0x00);
+ goto done;
+ }
+
+ /* Check if connection is still pending */
+ if (conn != hci_lookup_le_connect(hdev))
+ goto done;
+
+ /* Flush to make sure we send create conn cancel command if needed */
+ flush_delayed_work(&conn->le_conn_timeout);
+ hci_conn_failed(conn, bt_status(err));
+
+done:
+ hci_dev_unlock(hdev);
+}
+
+int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn)
+{
+ return hci_cmd_sync_queue_once(hdev, hci_le_create_conn_sync, conn,
+ create_le_conn_complete);
+}
+
+int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn)
+{
+ if (conn->state != BT_OPEN)
+ return -EINVAL;
+
+ switch (conn->type) {
+ case ACL_LINK:
+ return !hci_cmd_sync_dequeue_once(hdev,
+ hci_acl_create_conn_sync,
+ conn, NULL);
+ case LE_LINK:
+ return !hci_cmd_sync_dequeue_once(hdev, hci_le_create_conn_sync,
+ conn, create_le_conn_complete);
+ }
+
+ return -ENOENT;
+}
diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
index 04f6572d35f1..c8793e57f4b5 100644
--- a/net/bluetooth/iso.c
+++ b/net/bluetooth/iso.c
@@ -3,7 +3,7 @@
* BlueZ - Bluetooth protocol stack for Linux
*
* Copyright (C) 2022 Intel Corporation
- * Copyright 2023 NXP
+ * Copyright 2023-2024 NXP
*/
#include <linux/module.h>
@@ -690,11 +690,8 @@ static void iso_sock_cleanup_listen(struct sock *parent)
iso_sock_kill(sk);
}
- /* If listening socket stands for a PA sync connection,
- * properly disconnect the hcon and socket.
- */
- if (iso_pi(parent)->conn && iso_pi(parent)->conn->hcon &&
- test_bit(HCI_CONN_PA_SYNC, &iso_pi(parent)->conn->hcon->flags)) {
+ /* If listening socket has a hcon, properly disconnect it */
+ if (iso_pi(parent)->conn && iso_pi(parent)->conn->hcon) {
iso_sock_disconn(parent);
return;
}
@@ -837,10 +834,10 @@ static struct bt_iso_qos default_qos = {
.bcode = {0x00},
.options = 0x00,
.skip = 0x0000,
- .sync_timeout = 0x4000,
+ .sync_timeout = BT_ISO_SYNC_TIMEOUT,
.sync_cte_type = 0x00,
.mse = 0x00,
- .timeout = 0x4000,
+ .timeout = BT_ISO_SYNC_TIMEOUT,
},
};
@@ -1076,6 +1073,8 @@ static int iso_listen_bis(struct sock *sk)
{
struct hci_dev *hdev;
int err = 0;
+ struct iso_conn *conn;
+ struct hci_conn *hcon;
BT_DBG("%pMR -> %pMR (SID 0x%2.2x)", &iso_pi(sk)->src,
&iso_pi(sk)->dst, iso_pi(sk)->bc_sid);
@@ -1096,18 +1095,40 @@ static int iso_listen_bis(struct sock *sk)
if (!hdev)
return -EHOSTUNREACH;
+ hci_dev_lock(hdev);
+
/* Fail if user set invalid QoS */
if (iso_pi(sk)->qos_user_set && !check_bcast_qos(&iso_pi(sk)->qos)) {
iso_pi(sk)->qos = default_qos;
- return -EINVAL;
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ hcon = hci_pa_create_sync(hdev, &iso_pi(sk)->dst,
+ le_addr_type(iso_pi(sk)->dst_type),
+ iso_pi(sk)->bc_sid, &iso_pi(sk)->qos);
+ if (IS_ERR(hcon)) {
+ err = PTR_ERR(hcon);
+ goto unlock;
}
- err = hci_pa_create_sync(hdev, &iso_pi(sk)->dst,
- le_addr_type(iso_pi(sk)->dst_type),
- iso_pi(sk)->bc_sid, &iso_pi(sk)->qos);
+ conn = iso_conn_add(hcon);
+ if (!conn) {
+ hci_conn_drop(hcon);
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ err = iso_chan_add(conn, sk, NULL);
+ if (err) {
+ hci_conn_drop(hcon);
+ goto unlock;
+ }
hci_dev_put(hdev);
+unlock:
+ hci_dev_unlock(hdev);
return err;
}
@@ -1889,7 +1910,6 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
struct hci_evt_le_big_info_adv_report *ev2;
struct hci_ev_le_per_adv_report *ev3;
struct sock *sk;
- int lm = 0;
bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
@@ -1933,7 +1953,7 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
if (sk && test_bit(BT_SK_PA_SYNC_TERM,
&iso_pi(sk)->flags))
- return lm;
+ return 0;
}
if (sk) {
@@ -1961,16 +1981,58 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
ev3 = hci_recv_event_data(hdev, HCI_EV_LE_PER_ADV_REPORT);
if (ev3) {
- size_t base_len = ev3->length;
+ size_t base_len = 0;
u8 *base;
+ struct hci_conn *hcon;
sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr,
iso_match_sync_handle_pa_report, ev3);
- base = eir_get_service_data(ev3->data, ev3->length,
- EIR_BAA_SERVICE_UUID, &base_len);
- if (base && sk && base_len <= sizeof(iso_pi(sk)->base)) {
+ if (!sk)
+ goto done;
+
+ hcon = iso_pi(sk)->conn->hcon;
+ if (!hcon)
+ goto done;
+
+ if (ev3->data_status == LE_PA_DATA_TRUNCATED) {
+ /* The controller was unable to retrieve PA data. */
+ memset(hcon->le_per_adv_data, 0,
+ HCI_MAX_PER_AD_TOT_LEN);
+ hcon->le_per_adv_data_len = 0;
+ hcon->le_per_adv_data_offset = 0;
+ goto done;
+ }
+
+ if (hcon->le_per_adv_data_offset + ev3->length >
+ HCI_MAX_PER_AD_TOT_LEN)
+ goto done;
+
+ memcpy(hcon->le_per_adv_data + hcon->le_per_adv_data_offset,
+ ev3->data, ev3->length);
+ hcon->le_per_adv_data_offset += ev3->length;
+
+ if (ev3->data_status == LE_PA_DATA_COMPLETE) {
+ /* All PA data has been received. */
+ hcon->le_per_adv_data_len =
+ hcon->le_per_adv_data_offset;
+ hcon->le_per_adv_data_offset = 0;
+
+ /* Extract BASE */
+ base = eir_get_service_data(hcon->le_per_adv_data,
+ hcon->le_per_adv_data_len,
+ EIR_BAA_SERVICE_UUID,
+ &base_len);
+
+ if (!base || base_len > BASE_MAX_LENGTH)
+ goto done;
+
memcpy(iso_pi(sk)->base, base, base_len);
iso_pi(sk)->base_len = base_len;
+ } else {
+ /* This is a PA data fragment. Keep pa_data_len set to 0
+ * until all data has been reassembled.
+ */
+ hcon->le_per_adv_data_len = 0;
}
} else {
sk = iso_get_sock_listen(&hdev->bdaddr, BDADDR_ANY, NULL, NULL);
@@ -1978,16 +2040,14 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
done:
if (!sk)
- return lm;
-
- lm |= HCI_LM_ACCEPT;
+ return 0;
if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
*flags |= HCI_PROTO_DEFER;
sock_put(sk);
- return lm;
+ return HCI_LM_ACCEPT;
}
static void iso_connect_cfm(struct hci_conn *hcon, __u8 status)
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 656f49b299d2..467b242d8be0 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -39,8 +39,6 @@
#include <net/bluetooth/l2cap.h>
#include "smp.h"
-#include "a2mp.h"
-#include "amp.h"
#define LE_FLOWCTL_MAX_CREDITS 65535
@@ -167,24 +165,6 @@ static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
return NULL;
}
-static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
- u8 ident)
-{
- struct l2cap_chan *c;
-
- mutex_lock(&conn->chan_lock);
- c = __l2cap_get_chan_by_ident(conn, ident);
- if (c) {
- /* Only lock if chan reference is not 0 */
- c = l2cap_chan_hold_unless_zero(c);
- if (c)
- l2cap_chan_lock(c);
- }
- mutex_unlock(&conn->chan_lock);
-
- return c;
-}
-
static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
u8 src_type)
{
@@ -651,7 +631,6 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
chan->ops->teardown(chan, err);
if (conn) {
- struct amp_mgr *mgr = conn->hcon->amp_mgr;
/* Delete from channel list */
list_del(&chan->list);
@@ -666,16 +645,6 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
if (chan->chan_type != L2CAP_CHAN_FIXED ||
test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
hci_conn_drop(conn->hcon);
-
- if (mgr && mgr->bredr_chan == chan)
- mgr->bredr_chan = NULL;
- }
-
- if (chan->hs_hchan) {
- struct hci_chan *hs_hchan = chan->hs_hchan;
-
- BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
- amp_disconnect_logical_link(hs_hchan);
}
if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
@@ -977,12 +946,6 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
hci_send_acl(conn->hchan, skb, flags);
}
-static bool __chan_is_moving(struct l2cap_chan *chan)
-{
- return chan->move_state != L2CAP_MOVE_STABLE &&
- chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
-}
-
static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
{
struct hci_conn *hcon = chan->conn->hcon;
@@ -991,15 +954,6 @@ static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
skb->priority);
- if (chan->hs_hcon && !__chan_is_moving(chan)) {
- if (chan->hs_hchan)
- hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
- else
- kfree_skb(skb);
-
- return;
- }
-
/* Use NO_FLUSH for LE links (where this is the only option) or
* if the BR/EDR link supports it and flushing has not been
* explicitly requested (through FLAG_FLUSHABLE).
@@ -1180,9 +1134,6 @@ static void l2cap_send_sframe(struct l2cap_chan *chan,
if (!control->sframe)
return;
- if (__chan_is_moving(chan))
- return;
-
if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
!control->poll)
control->final = 1;
@@ -1237,40 +1188,6 @@ static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
}
-static bool __amp_capable(struct l2cap_chan *chan)
-{
- struct l2cap_conn *conn = chan->conn;
- struct hci_dev *hdev;
- bool amp_available = false;
-
- if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
- return false;
-
- if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
- return false;
-
- read_lock(&hci_dev_list_lock);
- list_for_each_entry(hdev, &hci_dev_list, list) {
- if (hdev->amp_type != AMP_TYPE_BREDR &&
- test_bit(HCI_UP, &hdev->flags)) {
- amp_available = true;
- break;
- }
- }
- read_unlock(&hci_dev_list_lock);
-
- if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
- return amp_available;
-
- return false;
-}
-
-static bool l2cap_check_efs(struct l2cap_chan *chan)
-{
- /* Check EFS parameters */
- return true;
-}
-
void l2cap_send_conn_req(struct l2cap_chan *chan)
{
struct l2cap_conn *conn = chan->conn;
@@ -1286,76 +1203,6 @@ void l2cap_send_conn_req(struct l2cap_chan *chan)
l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
}
-static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
-{
- struct l2cap_create_chan_req req;
- req.scid = cpu_to_le16(chan->scid);
- req.psm = chan->psm;
- req.amp_id = amp_id;
-
- chan->ident = l2cap_get_ident(chan->conn);
-
- l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
- sizeof(req), &req);
-}
-
-static void l2cap_move_setup(struct l2cap_chan *chan)
-{
- struct sk_buff *skb;
-
- BT_DBG("chan %p", chan);
-
- if (chan->mode != L2CAP_MODE_ERTM)
- return;
-
- __clear_retrans_timer(chan);
- __clear_monitor_timer(chan);
- __clear_ack_timer(chan);
-
- chan->retry_count = 0;
- skb_queue_walk(&chan->tx_q, skb) {
- if (bt_cb(skb)->l2cap.retries)
- bt_cb(skb)->l2cap.retries = 1;
- else
- break;
- }
-
- chan->expected_tx_seq = chan->buffer_seq;
-
- clear_bit(CONN_REJ_ACT, &chan->conn_state);
- clear_bit(CONN_SREJ_ACT, &chan->conn_state);
- l2cap_seq_list_clear(&chan->retrans_list);
- l2cap_seq_list_clear(&chan->srej_list);
- skb_queue_purge(&chan->srej_q);
-
- chan->tx_state = L2CAP_TX_STATE_XMIT;
- chan->rx_state = L2CAP_RX_STATE_MOVE;
-
- set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
-}
-
-static void l2cap_move_done(struct l2cap_chan *chan)
-{
- u8 move_role = chan->move_role;
- BT_DBG("chan %p", chan);
-
- chan->move_state = L2CAP_MOVE_STABLE;
- chan->move_role = L2CAP_MOVE_ROLE_NONE;
-
- if (chan->mode != L2CAP_MODE_ERTM)
- return;
-
- switch (move_role) {
- case L2CAP_MOVE_ROLE_INITIATOR:
- l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
- chan->rx_state = L2CAP_RX_STATE_WAIT_F;
- break;
- case L2CAP_MOVE_ROLE_RESPONDER:
- chan->rx_state = L2CAP_RX_STATE_WAIT_P;
- break;
- }
-}
-
static void l2cap_chan_ready(struct l2cap_chan *chan)
{
/* The channel may have already been flagged as connected in
@@ -1505,10 +1352,7 @@ static void l2cap_le_start(struct l2cap_chan *chan)
static void l2cap_start_connection(struct l2cap_chan *chan)
{
- if (__amp_capable(chan)) {
- BT_DBG("chan %p AMP capable: discover AMPs", chan);
- a2mp_discover_amp(chan);
- } else if (chan->conn->hcon->type == LE_LINK) {
+ if (chan->conn->hcon->type == LE_LINK) {
l2cap_le_start(chan);
} else {
l2cap_send_conn_req(chan);
@@ -1611,11 +1455,6 @@ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
__clear_ack_timer(chan);
}
- if (chan->scid == L2CAP_CID_A2MP) {
- l2cap_state_change(chan, BT_DISCONN);
- return;
- }
-
req.dcid = cpu_to_le16(chan->dcid);
req.scid = cpu_to_le16(chan->scid);
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
@@ -1754,11 +1593,6 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
l2cap_chan_lock(chan);
- if (chan->scid == L2CAP_CID_A2MP) {
- l2cap_chan_unlock(chan);
- continue;
- }
-
if (hcon->type == LE_LINK) {
l2cap_le_start(chan);
} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
@@ -2067,9 +1901,6 @@ static void l2cap_streaming_send(struct l2cap_chan *chan,
BT_DBG("chan %p, skbs %p", chan, skbs);
- if (__chan_is_moving(chan))
- return;
-
skb_queue_splice_tail_init(skbs, &chan->tx_q);
while (!skb_queue_empty(&chan->tx_q)) {
@@ -2112,9 +1943,6 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
return 0;
- if (__chan_is_moving(chan))
- return 0;
-
while (chan->tx_send_head &&
chan->unacked_frames < chan->remote_tx_win &&
chan->tx_state == L2CAP_TX_STATE_XMIT) {
@@ -2180,9 +2008,6 @@ static void l2cap_ertm_resend(struct l2cap_chan *chan)
if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
return;
- if (__chan_is_moving(chan))
- return;
-
while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
seq = l2cap_seq_list_pop(&chan->retrans_list);
@@ -2522,8 +2347,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
pdu_len = chan->conn->mtu;
/* Constrain PDU size for BR/EDR connections */
- if (!chan->hs_hcon)
- pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
+ pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
/* Adjust for largest possible L2CAP overhead. */
if (chan->fcs)
@@ -3287,11 +3111,6 @@ int l2cap_ertm_init(struct l2cap_chan *chan)
skb_queue_head_init(&chan->tx_q);
- chan->local_amp_id = AMP_ID_BREDR;
- chan->move_id = AMP_ID_BREDR;
- chan->move_state = L2CAP_MOVE_STABLE;
- chan->move_role = L2CAP_MOVE_ROLE_NONE;
-
if (chan->mode != L2CAP_MODE_ERTM)
return 0;
@@ -3326,52 +3145,19 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
{
- return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
- (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
+ return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
}
static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
{
- return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
- (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
+ return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
}
static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
struct l2cap_conf_rfc *rfc)
{
- if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
- u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
-
- /* Class 1 devices have must have ERTM timeouts
- * exceeding the Link Supervision Timeout. The
- * default Link Supervision Timeout for AMP
- * controllers is 10 seconds.
- *
- * Class 1 devices use 0xffffffff for their
- * best-effort flush timeout, so the clamping logic
- * will result in a timeout that meets the above
- * requirement. ERTM timeouts are 16-bit values, so
- * the maximum timeout is 65.535 seconds.
- */
-
- /* Convert timeout to milliseconds and round */
- ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
-
- /* This is the recommended formula for class 2 devices
- * that start ERTM timers when packets are sent to the
- * controller.
- */
- ertm_to = 3 * ertm_to + 500;
-
- if (ertm_to > 0xffff)
- ertm_to = 0xffff;
-
- rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
- rfc->monitor_timeout = rfc->retrans_timeout;
- } else {
- rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
- rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
- }
+ rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
+ rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
}
static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
@@ -3623,13 +3409,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
case L2CAP_CONF_EWS:
if (olen != 2)
break;
- if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
- return -ECONNREFUSED;
- set_bit(FLAG_EXT_CTRL, &chan->flags);
- set_bit(CONF_EWS_RECV, &chan->conf_state);
- chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
- chan->remote_tx_win = val;
- break;
+ return -ECONNREFUSED;
default:
if (hint)
@@ -4027,11 +3807,7 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
rsp.dcid = cpu_to_le16(chan->scid);
rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
-
- if (chan->hs_hcon)
- rsp_code = L2CAP_CREATE_CHAN_RSP;
- else
- rsp_code = L2CAP_CONN_RSP;
+ rsp_code = L2CAP_CONN_RSP;
BT_DBG("chan %p rsp_code %u", chan, rsp_code);
@@ -4190,7 +3966,6 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
chan->dst_type = bdaddr_dst_type(conn->hcon);
chan->psm = psm;
chan->dcid = scid;
- chan->local_amp_id = amp_id;
__l2cap_chan_add(conn, chan);
@@ -4516,10 +4291,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
/* check compatibility */
/* Send rsp for BR/EDR channel */
- if (!chan->hs_hcon)
- l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
- else
- chan->ident = cmd->ident;
+ l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
}
unlock:
@@ -4571,15 +4343,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
goto done;
}
- if (!chan->hs_hcon) {
- l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
- 0);
- } else {
- if (l2cap_check_efs(chan)) {
- amp_create_logical_link(chan);
- chan->ident = cmd->ident;
- }
- }
+ l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
}
goto done;
@@ -4750,9 +4514,6 @@ static inline int l2cap_information_req(struct l2cap_conn *conn,
if (!disable_ertm)
feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
| L2CAP_FEAT_FCS;
- if (conn->local_fixed_chan & L2CAP_FC_A2MP)
- feat_mask |= L2CAP_FEAT_EXT_FLOW
- | L2CAP_FEAT_EXT_WINDOW;
put_unaligned_le32(feat_mask, rsp->data);
l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
@@ -4841,751 +4602,6 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn,
return 0;
}
-static int l2cap_create_channel_req(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd,
- u16 cmd_len, void *data)
-{
- struct l2cap_create_chan_req *req = data;
- struct l2cap_create_chan_rsp rsp;
- struct l2cap_chan *chan;
- struct hci_dev *hdev;
- u16 psm, scid;
-
- if (cmd_len != sizeof(*req))
- return -EPROTO;
-
- if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
- return -EINVAL;
-
- psm = le16_to_cpu(req->psm);
- scid = le16_to_cpu(req->scid);
-
- BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
-
- /* For controller id 0 make BR/EDR connection */
- if (req->amp_id == AMP_ID_BREDR) {
- l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
- req->amp_id);
- return 0;
- }
-
- /* Validate AMP controller id */
- hdev = hci_dev_get(req->amp_id);
- if (!hdev)
- goto error;
-
- if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
- hci_dev_put(hdev);
- goto error;
- }
-
- chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
- req->amp_id);
- if (chan) {
- struct amp_mgr *mgr = conn->hcon->amp_mgr;
- struct hci_conn *hs_hcon;
-
- hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
- &conn->hcon->dst);
- if (!hs_hcon) {
- hci_dev_put(hdev);
- cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
- chan->dcid);
- return 0;
- }
-
- BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
-
- mgr->bredr_chan = chan;
- chan->hs_hcon = hs_hcon;
- chan->fcs = L2CAP_FCS_NONE;
- conn->mtu = hdev->block_mtu;
- }
-
- hci_dev_put(hdev);
-
- return 0;
-
-error:
- rsp.dcid = 0;
- rsp.scid = cpu_to_le16(scid);
- rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
-
- l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
- sizeof(rsp), &rsp);
-
- return 0;
-}
-
-static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
-{
- struct l2cap_move_chan_req req;
- u8 ident;
-
- BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
-
- ident = l2cap_get_ident(chan->conn);
- chan->ident = ident;
-
- req.icid = cpu_to_le16(chan->scid);
- req.dest_amp_id = dest_amp_id;
-
- l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
- &req);
-
- __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
-}
-
-static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
-{
- struct l2cap_move_chan_rsp rsp;
-
- BT_DBG("chan %p, result 0x%4.4x", chan, result);
-
- rsp.icid = cpu_to_le16(chan->dcid);
- rsp.result = cpu_to_le16(result);
-
- l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
- sizeof(rsp), &rsp);
-}
-
-static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
-{
- struct l2cap_move_chan_cfm cfm;
-
- BT_DBG("chan %p, result 0x%4.4x", chan, result);
-
- chan->ident = l2cap_get_ident(chan->conn);
-
- cfm.icid = cpu_to_le16(chan->scid);
- cfm.result = cpu_to_le16(result);
-
- l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
- sizeof(cfm), &cfm);
-
- __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
-}
-
-static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
-{
- struct l2cap_move_chan_cfm cfm;
-
- BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
-
- cfm.icid = cpu_to_le16(icid);
- cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
-
- l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
- sizeof(cfm), &cfm);
-}
-
-static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
- u16 icid)
-{
- struct l2cap_move_chan_cfm_rsp rsp;
-
- BT_DBG("icid 0x%4.4x", icid);
-
- rsp.icid = cpu_to_le16(icid);
- l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
-}
-
-static void __release_logical_link(struct l2cap_chan *chan)
-{
- chan->hs_hchan = NULL;
- chan->hs_hcon = NULL;
-
- /* Placeholder - release the logical link */
-}
-
-static void l2cap_logical_fail(struct l2cap_chan *chan)
-{
- /* Logical link setup failed */
- if (chan->state != BT_CONNECTED) {
- /* Create channel failure, disconnect */
- l2cap_send_disconn_req(chan, ECONNRESET);
- return;
- }
-
- switch (chan->move_role) {
- case L2CAP_MOVE_ROLE_RESPONDER:
- l2cap_move_done(chan);
- l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
- break;
- case L2CAP_MOVE_ROLE_INITIATOR:
- if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
- chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
- /* Remote has only sent pending or
- * success responses, clean up
- */
- l2cap_move_done(chan);
- }
-
- /* Other amp move states imply that the move
- * has already aborted
- */
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
- break;
- }
-}
-
-static void l2cap_logical_finish_create(struct l2cap_chan *chan,
- struct hci_chan *hchan)
-{
- struct l2cap_conf_rsp rsp;
-
- chan->hs_hchan = hchan;
- chan->hs_hcon->l2cap_data = chan->conn;
-
- l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
-
- if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
- int err;
-
- set_default_fcs(chan);
-
- err = l2cap_ertm_init(chan);
- if (err < 0)
- l2cap_send_disconn_req(chan, -err);
- else
- l2cap_chan_ready(chan);
- }
-}
-
-static void l2cap_logical_finish_move(struct l2cap_chan *chan,
- struct hci_chan *hchan)
-{
- chan->hs_hcon = hchan->conn;
- chan->hs_hcon->l2cap_data = chan->conn;
-
- BT_DBG("move_state %d", chan->move_state);
-
- switch (chan->move_state) {
- case L2CAP_MOVE_WAIT_LOGICAL_COMP:
- /* Move confirm will be sent after a success
- * response is received
- */
- chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
- break;
- case L2CAP_MOVE_WAIT_LOGICAL_CFM:
- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
- } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
- chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
- } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
- chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
- l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
- }
- break;
- default:
- /* Move was not in expected state, free the channel */
- __release_logical_link(chan);
-
- chan->move_state = L2CAP_MOVE_STABLE;
- }
-}
-
-/* Call with chan locked */
-void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
- u8 status)
-{
- BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
-
- if (status) {
- l2cap_logical_fail(chan);
- __release_logical_link(chan);
- return;
- }
-
- if (chan->state != BT_CONNECTED) {
- /* Ignore logical link if channel is on BR/EDR */
- if (chan->local_amp_id != AMP_ID_BREDR)
- l2cap_logical_finish_create(chan, hchan);
- } else {
- l2cap_logical_finish_move(chan, hchan);
- }
-}
-
-void l2cap_move_start(struct l2cap_chan *chan)
-{
- BT_DBG("chan %p", chan);
-
- if (chan->local_amp_id == AMP_ID_BREDR) {
- if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
- return;
- chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
- chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
- /* Placeholder - start physical link setup */
- } else {
- chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
- chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
- chan->move_id = 0;
- l2cap_move_setup(chan);
- l2cap_send_move_chan_req(chan, 0);
- }
-}
-
-static void l2cap_do_create(struct l2cap_chan *chan, int result,
- u8 local_amp_id, u8 remote_amp_id)
-{
- BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
- local_amp_id, remote_amp_id);
-
- chan->fcs = L2CAP_FCS_NONE;
-
- /* Outgoing channel on AMP */
- if (chan->state == BT_CONNECT) {
- if (result == L2CAP_CR_SUCCESS) {
- chan->local_amp_id = local_amp_id;
- l2cap_send_create_chan_req(chan, remote_amp_id);
- } else {
- /* Revert to BR/EDR connect */
- l2cap_send_conn_req(chan);
- }
-
- return;
- }
-
- /* Incoming channel on AMP */
- if (__l2cap_no_conn_pending(chan)) {
- struct l2cap_conn_rsp rsp;
- char buf[128];
- rsp.scid = cpu_to_le16(chan->dcid);
- rsp.dcid = cpu_to_le16(chan->scid);
-
- if (result == L2CAP_CR_SUCCESS) {
- /* Send successful response */
- rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
- } else {
- /* Send negative response */
- rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
- }
-
- l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
- sizeof(rsp), &rsp);
-
- if (result == L2CAP_CR_SUCCESS) {
- l2cap_state_change(chan, BT_CONFIG);
- set_bit(CONF_REQ_SENT, &chan->conf_state);
- l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
- L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
- chan->num_conf_req++;
- }
- }
-}
-
-static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
- u8 remote_amp_id)
-{
- l2cap_move_setup(chan);
- chan->move_id = local_amp_id;
- chan->move_state = L2CAP_MOVE_WAIT_RSP;
-
- l2cap_send_move_chan_req(chan, remote_amp_id);
-}
-
-static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
-{
- struct hci_chan *hchan = NULL;
-
- /* Placeholder - get hci_chan for logical link */
-
- if (hchan) {
- if (hchan->state == BT_CONNECTED) {
- /* Logical link is ready to go */
- chan->hs_hcon = hchan->conn;
- chan->hs_hcon->l2cap_data = chan->conn;
- chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
- l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
-
- l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
- } else {
- /* Wait for logical link to be ready */
- chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
- }
- } else {
- /* Logical link not available */
- l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
- }
-}
-
-static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
-{
- if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
- u8 rsp_result;
- if (result == -EINVAL)
- rsp_result = L2CAP_MR_BAD_ID;
- else
- rsp_result = L2CAP_MR_NOT_ALLOWED;
-
- l2cap_send_move_chan_rsp(chan, rsp_result);
- }
-
- chan->move_role = L2CAP_MOVE_ROLE_NONE;
- chan->move_state = L2CAP_MOVE_STABLE;
-
- /* Restart data transmission */
- l2cap_ertm_send(chan);
-}
-
-/* Invoke with locked chan */
-void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
-{
- u8 local_amp_id = chan->local_amp_id;
- u8 remote_amp_id = chan->remote_amp_id;
-
- BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
- chan, result, local_amp_id, remote_amp_id);
-
- if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
- return;
-
- if (chan->state != BT_CONNECTED) {
- l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
- } else if (result != L2CAP_MR_SUCCESS) {
- l2cap_do_move_cancel(chan, result);
- } else {
- switch (chan->move_role) {
- case L2CAP_MOVE_ROLE_INITIATOR:
- l2cap_do_move_initiate(chan, local_amp_id,
- remote_amp_id);
- break;
- case L2CAP_MOVE_ROLE_RESPONDER:
- l2cap_do_move_respond(chan, result);
- break;
- default:
- l2cap_do_move_cancel(chan, result);
- break;
- }
- }
-}
-
-static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd,
- u16 cmd_len, void *data)
-{
- struct l2cap_move_chan_req *req = data;
- struct l2cap_move_chan_rsp rsp;
- struct l2cap_chan *chan;
- u16 icid = 0;
- u16 result = L2CAP_MR_NOT_ALLOWED;
-
- if (cmd_len != sizeof(*req))
- return -EPROTO;
-
- icid = le16_to_cpu(req->icid);
-
- BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
-
- if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
- return -EINVAL;
-
- chan = l2cap_get_chan_by_dcid(conn, icid);
- if (!chan) {
- rsp.icid = cpu_to_le16(icid);
- rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
- l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
- sizeof(rsp), &rsp);
- return 0;
- }
-
- chan->ident = cmd->ident;
-
- if (chan->scid < L2CAP_CID_DYN_START ||
- chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
- (chan->mode != L2CAP_MODE_ERTM &&
- chan->mode != L2CAP_MODE_STREAMING)) {
- result = L2CAP_MR_NOT_ALLOWED;
- goto send_move_response;
- }
-
- if (chan->local_amp_id == req->dest_amp_id) {
- result = L2CAP_MR_SAME_ID;
- goto send_move_response;
- }
-
- if (req->dest_amp_id != AMP_ID_BREDR) {
- struct hci_dev *hdev;
- hdev = hci_dev_get(req->dest_amp_id);
- if (!hdev || hdev->dev_type != HCI_AMP ||
- !test_bit(HCI_UP, &hdev->flags)) {
- if (hdev)
- hci_dev_put(hdev);
-
- result = L2CAP_MR_BAD_ID;
- goto send_move_response;
- }
- hci_dev_put(hdev);
- }
-
- /* Detect a move collision. Only send a collision response
- * if this side has "lost", otherwise proceed with the move.
- * The winner has the larger bd_addr.
- */
- if ((__chan_is_moving(chan) ||
- chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
- bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
- result = L2CAP_MR_COLLISION;
- goto send_move_response;
- }
-
- chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
- l2cap_move_setup(chan);
- chan->move_id = req->dest_amp_id;
-
- if (req->dest_amp_id == AMP_ID_BREDR) {
- /* Moving to BR/EDR */
- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
- result = L2CAP_MR_PEND;
- } else {
- chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
- result = L2CAP_MR_SUCCESS;
- }
- } else {
- chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
- /* Placeholder - uncomment when amp functions are available */
- /*amp_accept_physical(chan, req->dest_amp_id);*/
- result = L2CAP_MR_PEND;
- }
-
-send_move_response:
- l2cap_send_move_chan_rsp(chan, result);
-
- l2cap_chan_unlock(chan);
- l2cap_chan_put(chan);
-
- return 0;
-}
-
-static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
-{
- struct l2cap_chan *chan;
- struct hci_chan *hchan = NULL;
-
- chan = l2cap_get_chan_by_scid(conn, icid);
- if (!chan) {
- l2cap_send_move_chan_cfm_icid(conn, icid);
- return;
- }
-
- __clear_chan_timer(chan);
- if (result == L2CAP_MR_PEND)
- __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
-
- switch (chan->move_state) {
- case L2CAP_MOVE_WAIT_LOGICAL_COMP:
- /* Move confirm will be sent when logical link
- * is complete.
- */
- chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
- break;
- case L2CAP_MOVE_WAIT_RSP_SUCCESS:
- if (result == L2CAP_MR_PEND) {
- break;
- } else if (test_bit(CONN_LOCAL_BUSY,
- &chan->conn_state)) {
- chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
- } else {
- /* Logical link is up or moving to BR/EDR,
- * proceed with move
- */
- chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
- }
- break;
- case L2CAP_MOVE_WAIT_RSP:
- /* Moving to AMP */
- if (result == L2CAP_MR_SUCCESS) {
- /* Remote is ready, send confirm immediately
- * after logical link is ready
- */
- chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
- } else {
- /* Both logical link and move success
- * are required to confirm
- */
- chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
- }
-
- /* Placeholder - get hci_chan for logical link */
- if (!hchan) {
- /* Logical link not available */
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
- break;
- }
-
- /* If the logical link is not yet connected, do not
- * send confirmation.
- */
- if (hchan->state != BT_CONNECTED)
- break;
-
- /* Logical link is already ready to go */
-
- chan->hs_hcon = hchan->conn;
- chan->hs_hcon->l2cap_data = chan->conn;
-
- if (result == L2CAP_MR_SUCCESS) {
- /* Can confirm now */
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
- } else {
- /* Now only need move success
- * to confirm
- */
- chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
- }
-
- l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
- break;
- default:
- /* Any other amp move state means the move failed. */
- chan->move_id = chan->local_amp_id;
- l2cap_move_done(chan);
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
- }
-
- l2cap_chan_unlock(chan);
- l2cap_chan_put(chan);
-}
-
-static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
- u16 result)
-{
- struct l2cap_chan *chan;
-
- chan = l2cap_get_chan_by_ident(conn, ident);
- if (!chan) {
- /* Could not locate channel, icid is best guess */
- l2cap_send_move_chan_cfm_icid(conn, icid);
- return;
- }
-
- __clear_chan_timer(chan);
-
- if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
- if (result == L2CAP_MR_COLLISION) {
- chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
- } else {
- /* Cleanup - cancel move */
- chan->move_id = chan->local_amp_id;
- l2cap_move_done(chan);
- }
- }
-
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
-
- l2cap_chan_unlock(chan);
- l2cap_chan_put(chan);
-}
-
-static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd,
- u16 cmd_len, void *data)
-{
- struct l2cap_move_chan_rsp *rsp = data;
- u16 icid, result;
-
- if (cmd_len != sizeof(*rsp))
- return -EPROTO;
-
- icid = le16_to_cpu(rsp->icid);
- result = le16_to_cpu(rsp->result);
-
- BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
-
- if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
- l2cap_move_continue(conn, icid, result);
- else
- l2cap_move_fail(conn, cmd->ident, icid, result);
-
- return 0;
-}
-
-static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd,
- u16 cmd_len, void *data)
-{
- struct l2cap_move_chan_cfm *cfm = data;
- struct l2cap_chan *chan;
- u16 icid, result;
-
- if (cmd_len != sizeof(*cfm))
- return -EPROTO;
-
- icid = le16_to_cpu(cfm->icid);
- result = le16_to_cpu(cfm->result);
-
- BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
-
- chan = l2cap_get_chan_by_dcid(conn, icid);
- if (!chan) {
- /* Spec requires a response even if the icid was not found */
- l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
- return 0;
- }
-
- if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
- if (result == L2CAP_MC_CONFIRMED) {
- chan->local_amp_id = chan->move_id;
- if (chan->local_amp_id == AMP_ID_BREDR)
- __release_logical_link(chan);
- } else {
- chan->move_id = chan->local_amp_id;
- }
-
- l2cap_move_done(chan);
- }
-
- l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
-
- l2cap_chan_unlock(chan);
- l2cap_chan_put(chan);
-
- return 0;
-}
-
-static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd,
- u16 cmd_len, void *data)
-{
- struct l2cap_move_chan_cfm_rsp *rsp = data;
- struct l2cap_chan *chan;
- u16 icid;
-
- if (cmd_len != sizeof(*rsp))
- return -EPROTO;
-
- icid = le16_to_cpu(rsp->icid);
-
- BT_DBG("icid 0x%4.4x", icid);
-
- chan = l2cap_get_chan_by_scid(conn, icid);
- if (!chan)
- return 0;
-
- __clear_chan_timer(chan);
-
- if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
- chan->local_amp_id = chan->move_id;
-
- if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
- __release_logical_link(chan);
-
- l2cap_move_done(chan);
- }
-
- l2cap_chan_unlock(chan);
- l2cap_chan_put(chan);
-
- return 0;
-}
-
static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
struct l2cap_cmd_hdr *cmd,
u16 cmd_len, u8 *data)
@@ -5745,7 +4761,6 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
break;
case L2CAP_CONN_RSP:
- case L2CAP_CREATE_CHAN_RSP:
l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
break;
@@ -5780,26 +4795,6 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
l2cap_information_rsp(conn, cmd, cmd_len, data);
break;
- case L2CAP_CREATE_CHAN_REQ:
- err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
- break;
-
- case L2CAP_MOVE_CHAN_REQ:
- err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
- break;
-
- case L2CAP_MOVE_CHAN_RSP:
- l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
- break;
-
- case L2CAP_MOVE_CHAN_CFM:
- err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
- break;
-
- case L2CAP_MOVE_CHAN_CFM_RSP:
- l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
- break;
-
default:
BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
err = -EINVAL;
@@ -7051,8 +6046,8 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan,
if (control->final) {
clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
- !__chan_is_moving(chan)) {
+ if (!test_and_clear_bit(CONN_REJ_ACT,
+ &chan->conn_state)) {
control->final = 0;
l2cap_retransmit_all(chan, control);
}
@@ -7245,11 +6240,7 @@ static int l2cap_finish_move(struct l2cap_chan *chan)
BT_DBG("chan %p", chan);
chan->rx_state = L2CAP_RX_STATE_RECV;
-
- if (chan->hs_hcon)
- chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
- else
- chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
+ chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
return l2cap_resegment(chan);
}
@@ -7316,11 +6307,7 @@ static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
*/
chan->next_tx_seq = control->reqseq;
chan->unacked_frames = 0;
-
- if (chan->hs_hcon)
- chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
- else
- chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
+ chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
err = l2cap_resegment(chan);
@@ -7672,21 +6659,10 @@ static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
chan = l2cap_get_chan_by_scid(conn, cid);
if (!chan) {
- if (cid == L2CAP_CID_A2MP) {
- chan = a2mp_channel_create(conn, skb);
- if (!chan) {
- kfree_skb(skb);
- return;
- }
-
- l2cap_chan_hold(chan);
- l2cap_chan_lock(chan);
- } else {
- BT_DBG("unknown cid 0x%4.4x", cid);
- /* Drop packet and return */
- kfree_skb(skb);
- return;
- }
+ BT_DBG("unknown cid 0x%4.4x", cid);
+ /* Drop packet and return */
+ kfree_skb(skb);
+ return;
}
BT_DBG("chan %p, len %d", chan, skb->len);
@@ -7887,10 +6863,6 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
- if (hcon->type == ACL_LINK &&
- hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
- conn->local_fixed_chan |= L2CAP_FC_A2MP;
-
if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
(bredr_sc_enabled(hcon->hdev) ||
hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
@@ -7953,7 +6925,7 @@ static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
}
int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
- bdaddr_t *dst, u8 dst_type)
+ bdaddr_t *dst, u8 dst_type, u16 timeout)
{
struct l2cap_conn *conn;
struct hci_conn *hcon;
@@ -8046,19 +7018,17 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
hcon = hci_connect_le(hdev, dst, dst_type, false,
- chan->sec_level,
- HCI_LE_CONN_TIMEOUT,
+ chan->sec_level, timeout,
HCI_ROLE_SLAVE);
else
hcon = hci_connect_le_scan(hdev, dst, dst_type,
- chan->sec_level,
- HCI_LE_CONN_TIMEOUT,
+ chan->sec_level, timeout,
CONN_REASON_L2CAP_CHAN);
} else {
u8 auth_type = l2cap_get_auth_type(chan);
hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
- CONN_REASON_L2CAP_CHAN);
+ CONN_REASON_L2CAP_CHAN, timeout);
}
if (IS_ERR(hcon)) {
@@ -8355,11 +7325,6 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
state_to_string(chan->state));
- if (chan->scid == L2CAP_CID_A2MP) {
- l2cap_chan_unlock(chan);
- continue;
- }
-
if (!status && encrypt)
chan->sec_level = hcon->sec_level;
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index e50d3d102078..4287aa6cc988 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -254,7 +254,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
chan->mode = L2CAP_MODE_LE_FLOWCTL;
err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
- &la.l2_bdaddr, la.l2_bdaddr_type);
+ &la.l2_bdaddr, la.l2_bdaddr_type,
+ sk->sk_sndtimeo);
if (err)
return err;
@@ -1027,23 +1028,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (opt > BT_CHANNEL_POLICY_AMP_PREFERRED) {
- err = -EINVAL;
- break;
- }
-
- if (chan->mode != L2CAP_MODE_ERTM &&
- chan->mode != L2CAP_MODE_STREAMING) {
- err = -EOPNOTSUPP;
- break;
- }
-
- chan->chan_policy = (u8) opt;
-
- if (sk->sk_state == BT_CONNECTED &&
- chan->move_role == L2CAP_MOVE_ROLE_NONE)
- l2cap_move_start(chan);
-
+ err = -EOPNOTSUPP;
break;
case BT_SNDMTU:
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index ee3b4aad8bd8..32ed6e9245a3 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -835,8 +835,6 @@ static u32 get_supported_settings(struct hci_dev *hdev)
if (lmp_ssp_capable(hdev)) {
settings |= MGMT_SETTING_SSP;
- if (IS_ENABLED(CONFIG_BT_HS))
- settings |= MGMT_SETTING_HS;
}
if (lmp_sc_capable(hdev))
@@ -901,9 +899,6 @@ static u32 get_current_settings(struct hci_dev *hdev)
if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
settings |= MGMT_SETTING_SSP;
- if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
- settings |= MGMT_SETTING_HS;
-
if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
settings |= MGMT_SETTING_ADVERTISING;
@@ -1390,6 +1385,14 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
+ if (!cp->val) {
+ if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+ }
+
if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
MGMT_STATUS_BUSY);
@@ -1409,7 +1412,7 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
/* Cancel potentially blocking sync operation before power off */
if (cp->val == 0x00) {
- __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
+ hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
mgmt_set_powered_complete);
} else {
@@ -1704,8 +1707,7 @@ static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
new_settings(hdev, cmd->sk);
done:
- if (cmd)
- mgmt_pending_remove(cmd);
+ mgmt_pending_remove(cmd);
hci_dev_unlock(hdev);
}
@@ -1930,7 +1932,6 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
if (enable && hci_dev_test_and_clear_flag(hdev,
HCI_SSP_ENABLED)) {
- hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
new_settings(hdev, NULL);
}
@@ -1943,12 +1944,6 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
} else {
changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
-
- if (!changed)
- changed = hci_dev_test_and_clear_flag(hdev,
- HCI_HS_ENABLED);
- else
- hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
}
mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
@@ -2012,11 +2007,6 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
} else {
changed = hci_dev_test_and_clear_flag(hdev,
HCI_SSP_ENABLED);
- if (!changed)
- changed = hci_dev_test_and_clear_flag(hdev,
- HCI_HS_ENABLED);
- else
- hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
}
err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
@@ -2062,63 +2052,10 @@ failed:
static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
- struct mgmt_mode *cp = data;
- bool changed;
- u8 status;
- int err;
-
bt_dev_dbg(hdev, "sock %p", sk);
- if (!IS_ENABLED(CONFIG_BT_HS))
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
MGMT_STATUS_NOT_SUPPORTED);
-
- status = mgmt_bredr_support(hdev);
- if (status)
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
-
- if (!lmp_ssp_capable(hdev))
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
- MGMT_STATUS_NOT_SUPPORTED);
-
- if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
- MGMT_STATUS_REJECTED);
-
- if (cp->val != 0x00 && cp->val != 0x01)
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
- MGMT_STATUS_INVALID_PARAMS);
-
- hci_dev_lock(hdev);
-
- if (pending_find(MGMT_OP_SET_SSP, hdev)) {
- err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
- MGMT_STATUS_BUSY);
- goto unlock;
- }
-
- if (cp->val) {
- changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
- } else {
- if (hdev_is_powered(hdev)) {
- err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
- MGMT_STATUS_REJECTED);
- goto unlock;
- }
-
- changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
- }
-
- err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
- if (err < 0)
- goto unlock;
-
- if (changed)
- err = new_settings(hdev, sk);
-
-unlock:
- hci_dev_unlock(hdev);
- return err;
}
static void set_le_complete(struct hci_dev *hdev, void *data, int err)
@@ -3188,6 +3125,7 @@ failed:
static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
{
switch (link_type) {
+ case ISO_LINK:
case LE_LINK:
switch (addr_type) {
case ADDR_LE_DEV_PUBLIC:
@@ -3505,7 +3443,8 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
if (cp->addr.type == BDADDR_BREDR) {
conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
- auth_type, CONN_REASON_PAIR_DEVICE);
+ auth_type, CONN_REASON_PAIR_DEVICE,
+ HCI_ACL_CONN_TIMEOUT);
} else {
u8 addr_type = le_addr_type(cp->addr.type);
struct hci_conn_params *p;
@@ -6766,7 +6705,6 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
- hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
}
hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
@@ -8470,7 +8408,7 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
static u8 calculate_name_len(struct hci_dev *hdev)
{
- u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
+ u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
return eir_append_local_name(hdev, buf, 0);
}
@@ -8829,8 +8767,7 @@ static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
}
unlock:
- if (cmd)
- mgmt_pending_free(cmd);
+ mgmt_pending_free(cmd);
hci_dev_unlock(hdev);
}
@@ -9681,6 +9618,9 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
u16 eir_len = 0;
u32 flags = 0;
+ if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ return;
+
/* allocate buff for LE or BR/EDR adv */
if (conn->le_adv_data_len > 0)
skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
@@ -9748,6 +9688,9 @@ bool mgmt_powering_down(struct hci_dev *hdev)
struct mgmt_pending_cmd *cmd;
struct mgmt_mode *cp;
+ if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
+ return true;
+
cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
if (!cmd)
return false;
@@ -9766,14 +9709,6 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
struct mgmt_ev_device_disconnected ev;
struct sock *sk = NULL;
- /* The connection is still in hci_conn_hash so test for 1
- * instead of 0 to know if this is the last one.
- */
- if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
- cancel_delayed_work(&hdev->power_off);
- queue_work(hdev->req_workqueue, &hdev->power_off.work);
- }
-
if (!mgmt_connected)
return;
@@ -9830,14 +9765,6 @@ void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
{
struct mgmt_ev_connect_failed ev;
- /* The connection is still in hci_conn_hash so test for 1
- * instead of 0 to know if this is the last one.
- */
- if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
- cancel_delayed_work(&hdev->power_off);
- queue_work(hdev->req_workqueue, &hdev->power_off.work);
- }
-
bacpy(&ev.addr.bdaddr, bdaddr);
ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.status = mgmt_status(status);
@@ -10071,6 +9998,9 @@ void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
/* If this is a HCI command related to powering on the
* HCI dev don't send any mgmt signals.
*/
+ if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
+ return;
+
if (pending_find(MGMT_OP_SET_POWERED, hdev))
return;
}
diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c
index 630e3023273b..9612c5d1b13f 100644
--- a/net/bluetooth/msft.c
+++ b/net/bluetooth/msft.c
@@ -875,6 +875,7 @@ static int msft_add_address_filter_sync(struct hci_dev *hdev, void *data)
remove = true;
goto done;
}
+
cp->sub_opcode = MSFT_OP_LE_MONITOR_ADVERTISEMENT;
cp->rssi_high = address_filter->rssi_high;
cp->rssi_low = address_filter->rssi_low;
@@ -887,6 +888,8 @@ static int msft_add_address_filter_sync(struct hci_dev *hdev, void *data)
skb = __hci_cmd_sync(hdev, hdev->msft_opcode, size, cp,
HCI_CMD_TIMEOUT);
+ kfree(cp);
+
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Failed to enable address %pMR filter",
&address_filter->bdaddr);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index c736186aba26..43daf965a01e 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -264,7 +264,8 @@ static int sco_connect(struct sock *sk)
}
hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst,
- sco_pi(sk)->setting, &sco_pi(sk)->codec);
+ sco_pi(sk)->setting, &sco_pi(sk)->codec,
+ sk->sk_sndtimeo);
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto unlock;
diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c
index 8906f7bdf4a9..de33dc1b0daa 100644
--- a/net/bpf/bpf_dummy_struct_ops.c
+++ b/net/bpf/bpf_dummy_struct_ops.c
@@ -7,7 +7,7 @@
#include <linux/bpf.h>
#include <linux/btf.h>
-extern struct bpf_struct_ops bpf_bpf_dummy_ops;
+static struct bpf_struct_ops bpf_bpf_dummy_ops;
/* A common type for test_N with return value in bpf_dummy_ops */
typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *state, ...);
@@ -22,6 +22,8 @@ struct bpf_dummy_ops_test_args {
struct bpf_dummy_ops_state state;
};
+static struct btf *bpf_dummy_ops_btf;
+
static struct bpf_dummy_ops_test_args *
dummy_ops_init_args(const union bpf_attr *kattr, unsigned int nr)
{
@@ -89,10 +91,17 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
struct bpf_tramp_link *link = NULL;
void *image = NULL;
unsigned int op_idx;
+ u32 image_off = 0;
int prog_ret;
+ s32 type_id;
int err;
- if (prog->aux->attach_btf_id != st_ops->type_id)
+ type_id = btf_find_by_name_kind(bpf_dummy_ops_btf,
+ bpf_bpf_dummy_ops.name,
+ BTF_KIND_STRUCT);
+ if (type_id < 0)
+ return -EINVAL;
+ if (prog->aux->attach_btf_id != type_id)
return -EOPNOTSUPP;
func_proto = prog->aux->attach_func_proto;
@@ -106,12 +115,6 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
goto out;
}
- image = arch_alloc_bpf_trampoline(PAGE_SIZE);
- if (!image) {
- err = -ENOMEM;
- goto out;
- }
-
link = kzalloc(sizeof(*link), GFP_USER);
if (!link) {
err = -ENOMEM;
@@ -125,7 +128,8 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
err = bpf_struct_ops_prepare_trampoline(tlinks, link,
&st_ops->func_models[op_idx],
&dummy_ops_test_ret_function,
- image, image + PAGE_SIZE);
+ &image, &image_off,
+ true);
if (err < 0)
goto out;
@@ -139,7 +143,7 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
err = -EFAULT;
out:
kfree(args);
- arch_free_bpf_trampoline(image, PAGE_SIZE);
+ bpf_struct_ops_image_free(image);
if (link)
bpf_link_put(&link->link);
kfree(tlinks);
@@ -148,6 +152,7 @@ out:
static int bpf_dummy_init(struct btf *btf)
{
+ bpf_dummy_ops_btf = btf;
return 0;
}
@@ -169,7 +174,7 @@ static int bpf_dummy_ops_check_member(const struct btf_type *t,
case offsetof(struct bpf_dummy_ops, test_sleepable):
break;
default:
- if (prog->aux->sleepable)
+ if (prog->sleepable)
return -EINVAL;
}
@@ -247,7 +252,7 @@ static struct bpf_dummy_ops __bpf_bpf_dummy_ops = {
.test_sleepable = bpf_dummy_test_sleepable,
};
-struct bpf_struct_ops bpf_bpf_dummy_ops = {
+static struct bpf_struct_ops bpf_bpf_dummy_ops = {
.verifier_ops = &bpf_dummy_verifier_ops,
.init = bpf_dummy_init,
.check_member = bpf_dummy_ops_check_member,
@@ -256,4 +261,11 @@ struct bpf_struct_ops bpf_bpf_dummy_ops = {
.unreg = bpf_dummy_unreg,
.name = "bpf_dummy_ops",
.cfi_stubs = &__bpf_bpf_dummy_ops,
+ .owner = THIS_MODULE,
};
+
+static int __init bpf_dummy_struct_ops_init(void)
+{
+ return register_bpf_struct_ops(&bpf_bpf_dummy_ops, bpf_dummy_ops);
+}
+late_initcall(bpf_dummy_struct_ops_init);
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index dfd919374017..61efeadaff8d 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -12,6 +12,7 @@
#include <linux/rcupdate_trace.h>
#include <linux/sched/signal.h>
#include <net/bpf_sk_storage.h>
+#include <net/hotdata.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <net/net_namespace.h>
@@ -254,7 +255,8 @@ static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
int i, n;
LIST_HEAD(list);
- n = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, (void **)skbs);
+ n = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, nframes,
+ (void **)skbs);
if (unlikely(n == 0)) {
for (i = 0; i < nframes; i++)
xdp_return_frame(frames[i]);
@@ -617,21 +619,21 @@ CFI_NOSEAL(bpf_kfunc_call_memb_release_dtor);
__bpf_kfunc_end_defs();
-BTF_SET8_START(bpf_test_modify_return_ids)
+BTF_KFUNCS_START(bpf_test_modify_return_ids)
BTF_ID_FLAGS(func, bpf_modify_return_test)
BTF_ID_FLAGS(func, bpf_modify_return_test2)
BTF_ID_FLAGS(func, bpf_fentry_test1, KF_SLEEPABLE)
-BTF_SET8_END(bpf_test_modify_return_ids)
+BTF_KFUNCS_END(bpf_test_modify_return_ids)
static const struct btf_kfunc_id_set bpf_test_modify_return_set = {
.owner = THIS_MODULE,
.set = &bpf_test_modify_return_ids,
};
-BTF_SET8_START(test_sk_check_kfunc_ids)
+BTF_KFUNCS_START(test_sk_check_kfunc_ids)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE)
-BTF_SET8_END(test_sk_check_kfunc_ids)
+BTF_KFUNCS_END(test_sk_check_kfunc_ids)
static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
u32 size, u32 headroom, u32 tailroom)
diff --git a/net/bridge/br.c b/net/bridge/br.c
index ac19b797dbec..2cab878e0a39 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -356,26 +356,21 @@ void br_opt_toggle(struct net_bridge *br, enum net_bridge_opts opt, bool on)
clear_bit(opt, &br->options);
}
-static void __net_exit br_net_exit_batch(struct list_head *net_list)
+static void __net_exit br_net_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct net_device *dev;
struct net *net;
- LIST_HEAD(list);
-
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list)
for_each_netdev(net, dev)
if (netif_is_bridge_master(dev))
- br_dev_delete(dev, &list);
-
- unregister_netdevice_many(&list);
-
- rtnl_unlock();
+ br_dev_delete(dev, dev_to_kill);
}
static struct pernet_operations br_net_ops = {
- .exit_batch = br_net_exit_batch,
+ .exit_batch_rtnl = br_net_exit_batch_rtnl,
};
static const struct stp_proto br_stp_proto = {
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 65cee0ad3c1b..c366ccc8b3db 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -108,38 +108,23 @@ out:
return NETDEV_TX_OK;
}
-static struct lock_class_key bridge_netdev_addr_lock_key;
-
-static void br_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
-}
-
static int br_dev_init(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
int err;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
err = br_fdb_hash_init(br);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
err = br_mdb_hash_init(br);
if (err) {
- free_percpu(dev->tstats);
br_fdb_hash_fini(br);
return err;
}
err = br_vlan_init(br);
if (err) {
- free_percpu(dev->tstats);
br_mdb_hash_fini(br);
br_fdb_hash_fini(br);
return err;
@@ -147,14 +132,14 @@ static int br_dev_init(struct net_device *dev)
err = br_multicast_init_stats(br);
if (err) {
- free_percpu(dev->tstats);
br_vlan_flush(br);
br_mdb_hash_fini(br);
br_fdb_hash_fini(br);
+ return err;
}
- br_set_lockdep_class(dev);
- return err;
+ netdev_lockdep_set_classes(dev);
+ return 0;
}
static void br_dev_uninit(struct net_device *dev)
@@ -166,7 +151,6 @@ static void br_dev_uninit(struct net_device *dev)
br_vlan_flush(br);
br_mdb_hash_fini(br);
br_fdb_hash_fini(br);
- free_percpu(dev->tstats);
}
static int br_dev_open(struct net_device *dev)
@@ -481,7 +465,7 @@ static const struct net_device_ops br_netdev_ops = {
.ndo_fill_forward_path = br_fill_forward_path,
};
-static struct device_type br_type = {
+static const struct device_type br_type = {
.name = "bridge",
};
@@ -503,6 +487,7 @@ void br_dev_setup(struct net_device *dev)
dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
dev->vlan_features = COMMON_FEATURES;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
br->dev = dev;
spin_lock_init(&br->lock);
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index c622de5eccd0..c77591e63841 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -35,10 +35,7 @@ static struct kmem_cache *br_fdb_cache __read_mostly;
int __init br_fdb_init(void)
{
- br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
- sizeof(struct net_bridge_fdb_entry),
- 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ br_fdb_cache = KMEM_CACHE(net_bridge_fdb_entry, SLAB_HWCACHE_ALIGN);
if (!br_fdb_cache)
return -ENOMEM;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 5ad4abfcb7ba..2cf4fc756263 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -455,7 +455,8 @@ static int br_fill_ifinfo(struct sk_buff *skb,
u32 filter_mask, const struct net_device *dev,
bool getlink)
{
- u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
+ u8 operstate = netif_running(dev) ? READ_ONCE(dev->operstate) :
+ IF_OPER_DOWN;
struct nlattr *af = NULL;
struct net_bridge *br;
struct ifinfomsg *hdr;
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 15f44d026e75..9c2fffb827ab 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -841,7 +841,7 @@ void br_vlan_flush(struct net_bridge *br)
vg = br_vlan_group(br);
__vlan_flush(br, NULL, vg);
RCU_INIT_POINTER(br->vlgrp, NULL);
- synchronize_rcu();
+ synchronize_net();
__vlan_group_free(vg);
}
@@ -1372,7 +1372,7 @@ void nbp_vlan_flush(struct net_bridge_port *port)
vg = nbp_vlan_group(port);
__vlan_flush(port->br, port, vg);
RCU_INIT_POINTER(port->vlgrp, NULL);
- synchronize_rcu();
+ synchronize_net();
__vlan_group_free(vg);
}
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index 7f304a19ac1b..104c0125e32e 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -39,6 +39,10 @@ config NF_CONNTRACK_BRIDGE
To compile it as a module, choose M here. If unsure, say N.
+# old sockopt interface and eval loop
+config BRIDGE_NF_EBTABLES_LEGACY
+ tristate
+
menuconfig BRIDGE_NF_EBTABLES
tristate "Ethernet Bridge tables (ebtables) support"
depends on BRIDGE && NETFILTER && NETFILTER_XTABLES
@@ -55,6 +59,7 @@ if BRIDGE_NF_EBTABLES
#
config BRIDGE_EBT_BROUTE
tristate "ebt: broute table support"
+ select BRIDGE_NF_EBTABLES_LEGACY
help
The ebtables broute table is used to define rules that decide between
bridging and routing frames, giving Linux the functionality of a
@@ -65,6 +70,7 @@ config BRIDGE_EBT_BROUTE
config BRIDGE_EBT_T_FILTER
tristate "ebt: filter table support"
+ select BRIDGE_NF_EBTABLES_LEGACY
help
The ebtables filter table is used to define frame filtering rules at
local input, forwarding and local output. See the man page for
@@ -74,6 +80,7 @@ config BRIDGE_EBT_T_FILTER
config BRIDGE_EBT_T_NAT
tristate "ebt: nat table support"
+ select BRIDGE_NF_EBTABLES_LEGACY
help
The ebtables nat table is used to define rules that alter the MAC
source address (MAC SNAT) or the MAC destination address (MAC DNAT).
diff --git a/net/bridge/netfilter/Makefile b/net/bridge/netfilter/Makefile
index 1c9ce49ab651..b9a1303da977 100644
--- a/net/bridge/netfilter/Makefile
+++ b/net/bridge/netfilter/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_NFT_BRIDGE_REJECT) += nft_reject_bridge.o
# connection tracking
obj-$(CONFIG_NF_CONNTRACK_BRIDGE) += nf_conntrack_bridge.o
-obj-$(CONFIG_BRIDGE_NF_EBTABLES) += ebtables.o
+obj-$(CONFIG_BRIDGE_NF_EBTABLES_LEGACY) += ebtables.o
# tables
obj-$(CONFIG_BRIDGE_EBT_BROUTE) += ebtable_broute.o
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 7343fd487dbe..707576eeeb58 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -865,6 +865,8 @@ static __init int can_init(void)
/* check for correct padding to be able to use the structs similarly */
BUILD_BUG_ON(offsetof(struct can_frame, len) !=
offsetof(struct canfd_frame, len) ||
+ offsetof(struct can_frame, len) !=
+ offsetof(struct canxl_frame, flags) ||
offsetof(struct can_frame, data) !=
offsetof(struct canfd_frame, data));
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 9168114fc87f..27d5fcf0eac9 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -72,9 +72,11 @@
#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
/* use of last_frames[index].flags */
+#define RX_LOCAL 0x10 /* frame was created on the local host */
+#define RX_OWN 0x20 /* frame was sent via the socket it was received on */
#define RX_RECV 0x40 /* received data for this element */
#define RX_THR 0x80 /* element not been sent due to throttle feature */
-#define BCM_CAN_FLAGS_MASK 0x3F /* to clean private flags after usage */
+#define BCM_CAN_FLAGS_MASK 0x0F /* to clean private flags after usage */
/* get best masking value for can_rx_register() for a given single can_id */
#define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
@@ -138,6 +140,16 @@ static LIST_HEAD(bcm_notifier_list);
static DEFINE_SPINLOCK(bcm_notifier_lock);
static struct bcm_sock *bcm_busy_notifier;
+/* Return pointer to store the extra msg flags for bcm_recvmsg().
+ * We use the space of one unsigned int beyond the 'struct sockaddr_can'
+ * in skb->cb.
+ */
+static inline unsigned int *bcm_flags(struct sk_buff *skb)
+{
+ /* return pointer after struct sockaddr_can */
+ return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
+}
+
static inline struct bcm_sock *bcm_sk(const struct sock *sk)
{
return (struct bcm_sock *)sk;
@@ -325,6 +337,7 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
struct sock *sk = op->sk;
unsigned int datalen = head->nframes * op->cfsiz;
int err;
+ unsigned int *pflags;
skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
if (!skb)
@@ -332,6 +345,14 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
skb_put_data(skb, head, sizeof(*head));
+ /* ensure space for sockaddr_can and msg flags */
+ sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
+ sizeof(unsigned int));
+
+ /* initialize msg flags */
+ pflags = bcm_flags(skb);
+ *pflags = 0;
+
if (head->nframes) {
/* CAN frames starting here */
firstframe = (struct canfd_frame *)skb_tail_pointer(skb);
@@ -344,8 +365,14 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
* relevant for updates that are generated by the
* BCM, where nframes is 1
*/
- if (head->nframes == 1)
+ if (head->nframes == 1) {
+ if (firstframe->flags & RX_LOCAL)
+ *pflags |= MSG_DONTROUTE;
+ if (firstframe->flags & RX_OWN)
+ *pflags |= MSG_CONFIRM;
+
firstframe->flags &= BCM_CAN_FLAGS_MASK;
+ }
}
if (has_timestamp) {
@@ -360,7 +387,6 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
* containing the interface index.
*/
- sock_skb_cb_check_size(sizeof(struct sockaddr_can));
addr = (struct sockaddr_can *)skb->cb;
memset(addr, 0, sizeof(*addr));
addr->can_family = AF_CAN;
@@ -444,7 +470,7 @@ static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
op->frames_filtered = op->frames_abs = 0;
/* this element is not throttled anymore */
- data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
+ data->flags &= ~RX_THR;
memset(&head, 0, sizeof(head));
head.opcode = RX_CHANGED;
@@ -465,13 +491,17 @@ static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
*/
static void bcm_rx_update_and_send(struct bcm_op *op,
struct canfd_frame *lastdata,
- const struct canfd_frame *rxdata)
+ const struct canfd_frame *rxdata,
+ unsigned char traffic_flags)
{
memcpy(lastdata, rxdata, op->cfsiz);
/* mark as used and throttled by default */
lastdata->flags |= (RX_RECV|RX_THR);
+ /* add own/local/remote traffic flags */
+ lastdata->flags |= traffic_flags;
+
/* throttling mode inactive ? */
if (!op->kt_ival2) {
/* send RX_CHANGED to the user immediately */
@@ -508,7 +538,8 @@ rx_changed_settime:
* received data stored in op->last_frames[]
*/
static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
- const struct canfd_frame *rxdata)
+ const struct canfd_frame *rxdata,
+ unsigned char traffic_flags)
{
struct canfd_frame *cf = op->frames + op->cfsiz * index;
struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
@@ -521,7 +552,7 @@ static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
if (!(lcf->flags & RX_RECV)) {
/* received data for the first time => send update to user */
- bcm_rx_update_and_send(op, lcf, rxdata);
+ bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
return;
}
@@ -529,7 +560,7 @@ static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
for (i = 0; i < rxdata->len; i += 8) {
if ((get_u64(cf, i) & get_u64(rxdata, i)) !=
(get_u64(cf, i) & get_u64(lcf, i))) {
- bcm_rx_update_and_send(op, lcf, rxdata);
+ bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
return;
}
}
@@ -537,7 +568,7 @@ static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
if (op->flags & RX_CHECK_DLC) {
/* do a real check in CAN frame length */
if (rxdata->len != lcf->len) {
- bcm_rx_update_and_send(op, lcf, rxdata);
+ bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
return;
}
}
@@ -644,6 +675,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
struct bcm_op *op = (struct bcm_op *)data;
const struct canfd_frame *rxframe = (struct canfd_frame *)skb->data;
unsigned int i;
+ unsigned char traffic_flags;
if (op->can_id != rxframe->can_id)
return;
@@ -673,15 +705,24 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
return;
}
+ /* compute flags to distinguish between own/local/remote CAN traffic */
+ traffic_flags = 0;
+ if (skb->sk) {
+ traffic_flags |= RX_LOCAL;
+ if (skb->sk == op->sk)
+ traffic_flags |= RX_OWN;
+ }
+
if (op->flags & RX_FILTER_ID) {
/* the easiest case */
- bcm_rx_update_and_send(op, op->last_frames, rxframe);
+ bcm_rx_update_and_send(op, op->last_frames, rxframe,
+ traffic_flags);
goto rx_starttimer;
}
if (op->nframes == 1) {
/* simple compare with index 0 */
- bcm_rx_cmp_to_index(op, 0, rxframe);
+ bcm_rx_cmp_to_index(op, 0, rxframe, traffic_flags);
goto rx_starttimer;
}
@@ -698,7 +739,8 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
if ((get_u64(op->frames, 0) & get_u64(rxframe, 0)) ==
(get_u64(op->frames, 0) &
get_u64(op->frames + op->cfsiz * i, 0))) {
- bcm_rx_cmp_to_index(op, i, rxframe);
+ bcm_rx_cmp_to_index(op, i, rxframe,
+ traffic_flags);
break;
}
}
@@ -1675,6 +1717,9 @@ static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
}
+ /* assign the flags that have been recorded in bcm_send_to_user() */
+ msg->msg_flags |= *(bcm_flags(skb));
+
skb_free_datagram(sk, skb);
return size;
diff --git a/net/can/isotp.c b/net/can/isotp.c
index d1c6f206f429..25bac0fafc83 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -381,8 +381,9 @@ static int isotp_rcv_fc(struct isotp_sock *so, struct canfd_frame *cf, int ae)
return 1;
}
- /* get communication parameters only from the first FC frame */
- if (so->tx.state == ISOTP_WAIT_FIRST_FC) {
+ /* get static/dynamic communication params from first/every FC frame */
+ if (so->tx.state == ISOTP_WAIT_FIRST_FC ||
+ so->opt.flags & CAN_ISOTP_DYN_FC_PARMS) {
so->txfc.bs = cf->data[ae + 1];
so->txfc.stmin = cf->data[ae + 2];
diff --git a/net/can/raw.c b/net/can/raw.c
index e6b822624ba2..00533f64d69d 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -91,6 +91,10 @@ struct raw_sock {
int recv_own_msgs;
int fd_frames;
int xl_frames;
+ struct can_raw_vcid_options raw_vcid_opts;
+ canid_t tx_vcid_shifted;
+ canid_t rx_vcid_shifted;
+ canid_t rx_vcid_mask_shifted;
int join_filters;
int count; /* number of active filters */
struct can_filter dfilter; /* default/single filter */
@@ -134,10 +138,29 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
return;
/* make sure to not pass oversized frames to the socket */
- if ((!ro->fd_frames && can_is_canfd_skb(oskb)) ||
- (!ro->xl_frames && can_is_canxl_skb(oskb)))
+ if (!ro->fd_frames && can_is_canfd_skb(oskb))
return;
+ if (can_is_canxl_skb(oskb)) {
+ struct canxl_frame *cxl = (struct canxl_frame *)oskb->data;
+
+ /* make sure to not pass oversized frames to the socket */
+ if (!ro->xl_frames)
+ return;
+
+ /* filter CAN XL VCID content */
+ if (ro->raw_vcid_opts.flags & CAN_RAW_XL_VCID_RX_FILTER) {
+ /* apply VCID filter if user enabled the filter */
+ if ((cxl->prio & ro->rx_vcid_mask_shifted) !=
+ (ro->rx_vcid_shifted & ro->rx_vcid_mask_shifted))
+ return;
+ } else {
+ /* no filter => do not forward VCID tagged frames */
+ if (cxl->prio & CANXL_VCID_MASK)
+ return;
+ }
+ }
+
/* eliminate multiple filter matches for the same skb */
if (this_cpu_ptr(ro->uniq)->skb == oskb &&
this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
@@ -698,6 +721,19 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
ro->fd_frames = ro->xl_frames;
break;
+ case CAN_RAW_XL_VCID_OPTS:
+ if (optlen != sizeof(ro->raw_vcid_opts))
+ return -EINVAL;
+
+ if (copy_from_sockptr(&ro->raw_vcid_opts, optval, optlen))
+ return -EFAULT;
+
+ /* prepare 32 bit values for handling in hot path */
+ ro->tx_vcid_shifted = ro->raw_vcid_opts.tx_vcid << CANXL_VCID_OFFSET;
+ ro->rx_vcid_shifted = ro->raw_vcid_opts.rx_vcid << CANXL_VCID_OFFSET;
+ ro->rx_vcid_mask_shifted = ro->raw_vcid_opts.rx_vcid_mask << CANXL_VCID_OFFSET;
+ break;
+
case CAN_RAW_JOIN_FILTERS:
if (optlen != sizeof(ro->join_filters))
return -EINVAL;
@@ -720,7 +756,6 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
struct raw_sock *ro = raw_sk(sk);
int len;
void *val;
- int err = 0;
if (level != SOL_CAN_RAW)
return -EINVAL;
@@ -730,7 +765,9 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
return -EINVAL;
switch (optname) {
- case CAN_RAW_FILTER:
+ case CAN_RAW_FILTER: {
+ int err = 0;
+
lock_sock(sk);
if (ro->count > 0) {
int fsize = ro->count * sizeof(struct can_filter);
@@ -755,7 +792,7 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
if (!err)
err = put_user(len, optlen);
return err;
-
+ }
case CAN_RAW_ERR_FILTER:
if (len > sizeof(can_err_mask_t))
len = sizeof(can_err_mask_t);
@@ -786,6 +823,25 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
val = &ro->xl_frames;
break;
+ case CAN_RAW_XL_VCID_OPTS: {
+ int err = 0;
+
+ /* user space buffer to small for VCID opts? */
+ if (len < sizeof(ro->raw_vcid_opts)) {
+ /* return -ERANGE and needed space in optlen */
+ err = -ERANGE;
+ if (put_user(sizeof(ro->raw_vcid_opts), optlen))
+ err = -EFAULT;
+ } else {
+ if (len > sizeof(ro->raw_vcid_opts))
+ len = sizeof(ro->raw_vcid_opts);
+ if (copy_to_user(optval, &ro->raw_vcid_opts, len))
+ err = -EFAULT;
+ }
+ if (!err)
+ err = put_user(len, optlen);
+ return err;
+ }
case CAN_RAW_JOIN_FILTERS:
if (len > sizeof(int))
len = sizeof(int);
@@ -803,23 +859,41 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
return 0;
}
-static bool raw_bad_txframe(struct raw_sock *ro, struct sk_buff *skb, int mtu)
+static void raw_put_canxl_vcid(struct raw_sock *ro, struct sk_buff *skb)
+{
+ struct canxl_frame *cxl = (struct canxl_frame *)skb->data;
+
+ /* sanitize non CAN XL bits */
+ cxl->prio &= (CANXL_PRIO_MASK | CANXL_VCID_MASK);
+
+ /* clear VCID in CAN XL frame if pass through is disabled */
+ if (!(ro->raw_vcid_opts.flags & CAN_RAW_XL_VCID_TX_PASS))
+ cxl->prio &= CANXL_PRIO_MASK;
+
+ /* set VCID in CAN XL frame if enabled */
+ if (ro->raw_vcid_opts.flags & CAN_RAW_XL_VCID_TX_SET) {
+ cxl->prio &= CANXL_PRIO_MASK;
+ cxl->prio |= ro->tx_vcid_shifted;
+ }
+}
+
+static unsigned int raw_check_txframe(struct raw_sock *ro, struct sk_buff *skb, int mtu)
{
/* Classical CAN -> no checks for flags and device capabilities */
if (can_is_can_skb(skb))
- return false;
+ return CAN_MTU;
/* CAN FD -> needs to be enabled and a CAN FD or CAN XL device */
if (ro->fd_frames && can_is_canfd_skb(skb) &&
(mtu == CANFD_MTU || can_is_canxl_dev_mtu(mtu)))
- return false;
+ return CANFD_MTU;
/* CAN XL -> needs to be enabled and a CAN XL device */
if (ro->xl_frames && can_is_canxl_skb(skb) &&
can_is_canxl_dev_mtu(mtu))
- return false;
+ return CANXL_MTU;
- return true;
+ return 0;
}
static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
@@ -829,6 +903,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
struct sockcm_cookie sockc;
struct sk_buff *skb;
struct net_device *dev;
+ unsigned int txmtu;
int ifindex;
int err = -EINVAL;
@@ -869,9 +944,16 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
goto free_skb;
err = -EINVAL;
- if (raw_bad_txframe(ro, skb, dev->mtu))
+
+ /* check for valid CAN (CC/FD/XL) frame content */
+ txmtu = raw_check_txframe(ro, skb, dev->mtu);
+ if (!txmtu)
goto free_skb;
+ /* only CANXL: clear/forward/set VCID value */
+ if (txmtu == CANXL_MTU)
+ raw_put_canxl_vcid(ro, skb);
+
sockcm_init(&sockc, sk);
if (msg->msg_controllen) {
err = sock_cmsg_send(sk, msg, &sockc);
diff --git a/net/core/Makefile b/net/core/Makefile
index 821aec06abf1..6e6548011fae 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -18,6 +18,7 @@ obj-y += dev.o dev_addr_lists.o dst.o netevent.o \
obj-$(CONFIG_NETDEV_ADDR_LIST_TEST) += dev_addr_lists_test.o
obj-y += net-sysfs.o
+obj-y += hotdata.o
obj-$(CONFIG_PAGE_POOL) += page_pool.o page_pool_user.o
obj-$(CONFIG_PROC_FS) += net-procfs.o
obj-$(CONFIG_NET_PKTGEN) += pktgen.o
diff --git a/net/core/dev.c b/net/core/dev.c
index a892f7265189..0766a245816b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -153,41 +153,21 @@
#include <linux/prandom.h>
#include <linux/once_lite.h>
#include <net/netdev_rx_queue.h>
+#include <net/page_pool/types.h>
+#include <net/page_pool/helpers.h>
+#include <net/rps.h>
#include "dev.h"
#include "net-sysfs.h"
static DEFINE_SPINLOCK(ptype_lock);
struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
-struct list_head ptype_all __read_mostly; /* Taps */
static int netif_rx_internal(struct sk_buff *skb);
static int call_netdevice_notifiers_extack(unsigned long val,
struct net_device *dev,
struct netlink_ext_ack *extack);
-/*
- * The @dev_base_head list is protected by @dev_base_lock and the rtnl
- * semaphore.
- *
- * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
- *
- * Writers must hold the rtnl semaphore while they loop through the
- * dev_base_head list, and hold dev_base_lock for writing when they do the
- * actual updates. This allows pure readers to access the list even
- * while a writer is preparing to update it.
- *
- * To put it another way, dev_base_lock is held for writing only to
- * protect against pure readers; the rtnl semaphore provides the
- * protection against other writers.
- *
- * See, for example usages, register_netdevice() and
- * unregister_netdevice(), which must be called with the rtnl
- * semaphore held.
- */
-DEFINE_RWLOCK(dev_base_lock);
-EXPORT_SYMBOL(dev_base_lock);
-
static DEFINE_MUTEX(ifalias_mutex);
/* protects napi_hash addition/deletion and napi_gen_id */
@@ -200,8 +180,9 @@ static DECLARE_RWSEM(devnet_rename_sem);
static inline void dev_base_seq_inc(struct net *net)
{
- while (++net->dev_base_seq == 0)
- ;
+ unsigned int val = net->dev_base_seq + 1;
+
+ WRITE_ONCE(net->dev_base_seq, val ?: 1);
}
static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
@@ -341,13 +322,22 @@ int netdev_name_node_alt_create(struct net_device *dev, const char *name)
return 0;
}
-static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
+static void netdev_name_node_alt_free(struct rcu_head *head)
{
- list_del(&name_node->list);
+ struct netdev_name_node *name_node =
+ container_of(head, struct netdev_name_node, rcu);
+
kfree(name_node->name);
netdev_name_node_free(name_node);
}
+static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
+{
+ netdev_name_node_del(name_node);
+ list_del(&name_node->list);
+ call_rcu(&name_node->rcu, netdev_name_node_alt_free);
+}
+
int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
{
struct netdev_name_node *name_node;
@@ -362,10 +352,7 @@ int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
if (name_node == dev->name_node || name_node->dev != dev)
return -EINVAL;
- netdev_name_node_del(name_node);
- synchronize_rcu();
__netdev_name_node_alt_destroy(name_node);
-
return 0;
}
@@ -373,8 +360,10 @@ static void netdev_name_node_alt_flush(struct net_device *dev)
{
struct netdev_name_node *name_node, *tmp;
- list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list)
- __netdev_name_node_alt_destroy(name_node);
+ list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list) {
+ list_del(&name_node->list);
+ netdev_name_node_alt_free(&name_node->rcu);
+ }
}
/* Device list insertion */
@@ -385,12 +374,10 @@ static void list_netdevice(struct net_device *dev)
ASSERT_RTNL();
- write_lock(&dev_base_lock);
list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
netdev_name_node_add(net, dev->name_node);
hlist_add_head_rcu(&dev->index_hlist,
dev_index_hash(net, dev->ifindex));
- write_unlock(&dev_base_lock);
netdev_for_each_altname(dev, name_node)
netdev_name_node_add(net, name_node);
@@ -404,7 +391,7 @@ static void list_netdevice(struct net_device *dev)
/* Device list removal
* caller must respect a RCU grace period before freeing/reusing dev
*/
-static void unlist_netdevice(struct net_device *dev, bool lock)
+static void unlist_netdevice(struct net_device *dev)
{
struct netdev_name_node *name_node;
struct net *net = dev_net(dev);
@@ -417,13 +404,9 @@ static void unlist_netdevice(struct net_device *dev, bool lock)
netdev_name_node_del(name_node);
/* Unlink dev from the device chain */
- if (lock)
- write_lock(&dev_base_lock);
list_del_rcu(&dev->dev_list);
netdev_name_node_del(dev->name_node);
hlist_del_rcu(&dev->index_hlist);
- if (lock)
- write_unlock(&dev_base_lock);
dev_base_seq_inc(dev_net(dev));
}
@@ -442,6 +425,12 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
EXPORT_PER_CPU_SYMBOL(softnet_data);
+/* Page_pool has a lockless array/stack to alloc/recycle pages.
+ * PP consumers must pay attention to run APIs in the appropriate context
+ * (e.g. NAPI context).
+ */
+static DEFINE_PER_CPU_ALIGNED(struct page_pool *, system_page_pool);
+
#ifdef CONFIG_LOCKDEP
/*
* register_netdevice() inits txq->_xmit_lock and sets lockdep class
@@ -551,7 +540,7 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
static inline struct list_head *ptype_head(const struct packet_type *pt)
{
if (pt->type == htons(ETH_P_ALL))
- return pt->dev ? &pt->dev->ptype_all : &ptype_all;
+ return pt->dev ? &pt->dev->ptype_all : &net_hotdata.ptype_all;
else
return pt->dev ? &pt->dev->ptype_specific :
&ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
@@ -653,7 +642,7 @@ int dev_get_iflink(const struct net_device *dev)
if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
return dev->netdev_ops->ndo_get_iflink(dev);
- return dev->ifindex;
+ return READ_ONCE(dev->ifindex);
}
EXPORT_SYMBOL(dev_get_iflink);
@@ -738,9 +727,9 @@ EXPORT_SYMBOL_GPL(dev_fill_forward_path);
* @net: the applicable net namespace
* @name: name to find
*
- * Find an interface by name. Must be called under RTNL semaphore
- * or @dev_base_lock. If the name is found a pointer to the device
- * is returned. If the name is not found then %NULL is returned. The
+ * Find an interface by name. Must be called under RTNL semaphore.
+ * If the name is found a pointer to the device is returned.
+ * If the name is not found then %NULL is returned. The
* reference counters are not incremented so the caller must be
* careful with locks.
*/
@@ -821,8 +810,7 @@ EXPORT_SYMBOL(netdev_get_by_name);
* Search for an interface by index. Returns %NULL if the device
* is not found or a pointer to the device. The device has not
* had its reference counter increased so the caller must be careful
- * about locking. The caller must hold either the RTNL semaphore
- * or @dev_base_lock.
+ * about locking. The caller must hold the RTNL semaphore.
*/
struct net_device *__dev_get_by_index(struct net *net, int ifindex)
@@ -1212,13 +1200,13 @@ int dev_change_name(struct net_device *dev, const char *newname)
dev->flags & IFF_UP ? " (while UP)" : "");
old_assign_type = dev->name_assign_type;
- dev->name_assign_type = NET_NAME_RENAMED;
+ WRITE_ONCE(dev->name_assign_type, NET_NAME_RENAMED);
rollback:
ret = device_rename(&dev->dev, dev->name);
if (ret) {
memcpy(dev->name, oldname, IFNAMSIZ);
- dev->name_assign_type = old_assign_type;
+ WRITE_ONCE(dev->name_assign_type, old_assign_type);
up_write(&devnet_rename_sem);
return ret;
}
@@ -1227,15 +1215,11 @@ rollback:
netdev_adjacent_rename_links(dev, oldname);
- write_lock(&dev_base_lock);
netdev_name_node_del(dev->name_node);
- write_unlock(&dev_base_lock);
- synchronize_rcu();
+ synchronize_net();
- write_lock(&dev_base_lock);
netdev_name_node_add(net, dev->name_node);
- write_unlock(&dev_base_lock);
ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
ret = notifier_to_errno(ret);
@@ -1247,7 +1231,7 @@ rollback:
down_write(&devnet_rename_sem);
memcpy(dev->name, oldname, IFNAMSIZ);
memcpy(oldname, newname, IFNAMSIZ);
- dev->name_assign_type = old_assign_type;
+ WRITE_ONCE(dev->name_assign_type, old_assign_type);
old_assign_type = NET_NAME_RENAMED;
goto rollback;
} else {
@@ -2242,7 +2226,8 @@ static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
*/
bool dev_nit_active(struct net_device *dev)
{
- return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
+ return !list_empty(&net_hotdata.ptype_all) ||
+ !list_empty(&dev->ptype_all);
}
EXPORT_SYMBOL_GPL(dev_nit_active);
@@ -2253,10 +2238,9 @@ EXPORT_SYMBOL_GPL(dev_nit_active);
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
{
- struct packet_type *ptype;
+ struct list_head *ptype_list = &net_hotdata.ptype_all;
+ struct packet_type *ptype, *pt_prev = NULL;
struct sk_buff *skb2 = NULL;
- struct packet_type *pt_prev = NULL;
- struct list_head *ptype_list = &ptype_all;
rcu_read_lock();
again:
@@ -2302,7 +2286,7 @@ again:
pt_prev = ptype;
}
- if (ptype_list == &ptype_all) {
+ if (ptype_list == &net_hotdata.ptype_all) {
ptype_list = &dev->ptype_all;
goto again;
}
@@ -4421,19 +4405,10 @@ EXPORT_SYMBOL(__dev_direct_xmit);
* Receiver routines
*************************************************************************/
-int netdev_max_backlog __read_mostly = 1000;
-EXPORT_SYMBOL(netdev_max_backlog);
-
-int netdev_tstamp_prequeue __read_mostly = 1;
unsigned int sysctl_skb_defer_max __read_mostly = 64;
-int netdev_budget __read_mostly = 300;
-/* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
-unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ;
int weight_p __read_mostly = 64; /* old backlog weight */
int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
-int dev_rx_weight __read_mostly = 64;
-int dev_tx_weight __read_mostly = 64;
/* Called with irq disabled */
static inline void ____napi_schedule(struct softnet_data *sd,
@@ -4475,12 +4450,6 @@ static inline void ____napi_schedule(struct softnet_data *sd,
#ifdef CONFIG_RPS
-/* One global table that all flow-based protocols share. */
-struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
-EXPORT_SYMBOL(rps_sock_flow_table);
-u32 rps_cpu_mask __read_mostly;
-EXPORT_SYMBOL(rps_cpu_mask);
-
struct static_key_false rps_needed __read_mostly;
EXPORT_SYMBOL(rps_needed);
struct static_key_false rfs_needed __read_mostly;
@@ -4572,7 +4541,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
if (!hash)
goto done;
- sock_flow_table = rcu_dereference(rps_sock_flow_table);
+ sock_flow_table = rcu_dereference(net_hotdata.rps_sock_flow_table);
if (flow_table && sock_flow_table) {
struct rps_dev_flow *rflow;
u32 next_cpu;
@@ -4582,10 +4551,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
* This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
*/
ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
- if ((ident ^ hash) & ~rps_cpu_mask)
+ if ((ident ^ hash) & ~net_hotdata.rps_cpu_mask)
goto try_rps;
- next_cpu = ident & rps_cpu_mask;
+ next_cpu = ident & net_hotdata.rps_cpu_mask;
/* OK, now we know there is a match,
* we can look at the local (per receive queue) flow table
@@ -4734,7 +4703,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
struct softnet_data *sd;
unsigned int old_flow, new_flow;
- if (qlen < (READ_ONCE(netdev_max_backlog) >> 1))
+ if (qlen < (READ_ONCE(net_hotdata.max_backlog) >> 1))
return false;
sd = this_cpu_ptr(&softnet_data);
@@ -4782,7 +4751,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
if (!netif_running(skb->dev))
goto drop;
qlen = skb_queue_len(&sd->input_pkt_queue);
- if (qlen <= READ_ONCE(netdev_max_backlog) && !skb_flow_limit(skb, qlen)) {
+ if (qlen <= READ_ONCE(net_hotdata.max_backlog) &&
+ !skb_flow_limit(skb, qlen)) {
if (qlen) {
enqueue:
__skb_queue_tail(&sd->input_pkt_queue, skb);
@@ -4858,6 +4828,12 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
skb_headlen(skb) + mac_len, true);
+ if (skb_is_nonlinear(skb)) {
+ skb_shinfo(skb)->xdp_frags_size = skb->data_len;
+ xdp_buff_set_frags_flag(xdp);
+ } else {
+ xdp_buff_clear_frags_flag(xdp);
+ }
orig_data_end = xdp->data_end;
orig_data = xdp->data;
@@ -4887,6 +4863,14 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
skb->len += off; /* positive on grow, negative on shrink */
}
+ /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
+ * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
+ */
+ if (xdp_buff_has_frags(xdp))
+ skb->data_len = skb_shinfo(skb)->xdp_frags_size;
+ else
+ skb->data_len = 0;
+
/* check if XDP changed eth hdr such SKB needs update */
eth = (struct ethhdr *)xdp->data;
if ((orig_eth_type != eth->h_proto) ||
@@ -4920,11 +4904,35 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
return act;
}
-static u32 netif_receive_generic_xdp(struct sk_buff *skb,
+static int
+netif_skb_check_for_xdp(struct sk_buff **pskb, struct bpf_prog *prog)
+{
+ struct sk_buff *skb = *pskb;
+ int err, hroom, troom;
+
+ if (!skb_cow_data_for_xdp(this_cpu_read(system_page_pool), pskb, prog))
+ return 0;
+
+ /* In case we have to go down the path and also linearize,
+ * then lets do the pskb_expand_head() work just once here.
+ */
+ hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
+ troom = skb->tail + skb->data_len - skb->end;
+ err = pskb_expand_head(skb,
+ hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
+ troom > 0 ? troom + 128 : 0, GFP_ATOMIC);
+ if (err)
+ return err;
+
+ return skb_linearize(skb);
+}
+
+static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{
- u32 act = XDP_DROP;
+ struct sk_buff *skb = *pskb;
+ u32 mac_len, act = XDP_DROP;
/* Reinjected packets coming from act_mirred or similar should
* not get XDP generic processing.
@@ -4932,41 +4940,36 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
if (skb_is_redirected(skb))
return XDP_PASS;
- /* XDP packets must be linear and must have sufficient headroom
- * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
- * native XDP provides, thus we need to do it here as well.
+ /* XDP packets must have sufficient headroom of XDP_PACKET_HEADROOM
+ * bytes. This is the guarantee that also native XDP provides,
+ * thus we need to do it here as well.
*/
+ mac_len = skb->data - skb_mac_header(skb);
+ __skb_push(skb, mac_len);
+
if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
skb_headroom(skb) < XDP_PACKET_HEADROOM) {
- int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
- int troom = skb->tail + skb->data_len - skb->end;
-
- /* In case we have to go down the path and also linearize,
- * then lets do the pskb_expand_head() work just once here.
- */
- if (pskb_expand_head(skb,
- hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
- troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
- goto do_drop;
- if (skb_linearize(skb))
+ if (netif_skb_check_for_xdp(pskb, xdp_prog))
goto do_drop;
}
- act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog);
+ __skb_pull(*pskb, mac_len);
+
+ act = bpf_prog_run_generic_xdp(*pskb, xdp, xdp_prog);
switch (act) {
case XDP_REDIRECT:
case XDP_TX:
case XDP_PASS:
break;
default:
- bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act);
+ bpf_warn_invalid_xdp_action((*pskb)->dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
- trace_xdp_exception(skb->dev, xdp_prog, act);
+ trace_xdp_exception((*pskb)->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
do_drop:
- kfree_skb(skb);
+ kfree_skb(*pskb);
break;
}
@@ -5004,24 +5007,24 @@ void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
-int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
+int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb)
{
if (xdp_prog) {
struct xdp_buff xdp;
u32 act;
int err;
- act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
+ act = netif_receive_generic_xdp(pskb, &xdp, xdp_prog);
if (act != XDP_PASS) {
switch (act) {
case XDP_REDIRECT:
- err = xdp_do_generic_redirect(skb->dev, skb,
+ err = xdp_do_generic_redirect((*pskb)->dev, *pskb,
&xdp, xdp_prog);
if (err)
goto out_redir;
break;
case XDP_TX:
- generic_xdp_tx(skb, xdp_prog);
+ generic_xdp_tx(*pskb, xdp_prog);
break;
}
return XDP_DROP;
@@ -5029,7 +5032,7 @@ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
}
return XDP_PASS;
out_redir:
- kfree_skb_reason(skb, SKB_DROP_REASON_XDP);
+ kfree_skb_reason(*pskb, SKB_DROP_REASON_XDP);
return XDP_DROP;
}
EXPORT_SYMBOL_GPL(do_xdp_generic);
@@ -5038,7 +5041,7 @@ static int netif_rx_internal(struct sk_buff *skb)
{
int ret;
- net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
+ net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb);
trace_netif_rx(skb);
@@ -5330,7 +5333,7 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
int ret = NET_RX_DROP;
__be16 type;
- net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb);
+ net_timestamp_check(!READ_ONCE(net_hotdata.tstamp_prequeue), skb);
trace_netif_receive_skb(skb);
@@ -5352,7 +5355,8 @@ another_round:
int ret2;
migrate_disable();
- ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
+ ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog),
+ &skb);
migrate_enable();
if (ret2 != XDP_PASS) {
@@ -5373,7 +5377,7 @@ another_round:
if (pfmemalloc)
goto skip_taps;
- list_for_each_entry_rcu(ptype, &ptype_all, list) {
+ list_for_each_entry_rcu(ptype, &net_hotdata.ptype_all, list) {
if (pt_prev)
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = ptype;
@@ -5713,7 +5717,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
{
int ret;
- net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
+ net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb);
if (skb_defer_rx_timestamp(skb))
return NET_RX_SUCCESS;
@@ -5743,7 +5747,8 @@ void netif_receive_skb_list_internal(struct list_head *head)
INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
- net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
+ net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue),
+ skb);
skb_list_del_init(skb);
if (!skb_defer_rx_timestamp(skb))
list_add_tail(&skb->list, &sublist);
@@ -5967,7 +5972,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
net_rps_action_and_irq_enable(sd);
}
- napi->weight = READ_ONCE(dev_rx_weight);
+ napi->weight = READ_ONCE(net_hotdata.dev_rx_weight);
while (again) {
struct sk_buff *skb;
@@ -6156,6 +6161,27 @@ struct napi_struct *napi_by_id(unsigned int napi_id)
return NULL;
}
+static void skb_defer_free_flush(struct softnet_data *sd)
+{
+ struct sk_buff *skb, *next;
+
+ /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
+ if (!READ_ONCE(sd->defer_list))
+ return;
+
+ spin_lock(&sd->defer_lock);
+ skb = sd->defer_list;
+ sd->defer_list = NULL;
+ sd->defer_count = 0;
+ spin_unlock(&sd->defer_lock);
+
+ while (skb != NULL) {
+ next = skb->next;
+ napi_consume_skb(skb, 1);
+ skb = next;
+ }
+}
+
#if defined(CONFIG_NET_RX_BUSY_POLL)
static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
@@ -6280,6 +6306,7 @@ count:
if (work > 0)
__NET_ADD_STATS(dev_net(napi->dev),
LINUX_MIB_BUSYPOLLRXPACKETS, work);
+ skb_defer_free_flush(this_cpu_ptr(&softnet_data));
local_bh_enable();
if (!loop_end || loop_end(loop_end_arg, start_time))
@@ -6709,27 +6736,6 @@ static int napi_thread_wait(struct napi_struct *napi)
return -1;
}
-static void skb_defer_free_flush(struct softnet_data *sd)
-{
- struct sk_buff *skb, *next;
-
- /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
- if (!READ_ONCE(sd->defer_list))
- return;
-
- spin_lock(&sd->defer_lock);
- skb = sd->defer_list;
- sd->defer_list = NULL;
- sd->defer_count = 0;
- spin_unlock(&sd->defer_lock);
-
- while (skb != NULL) {
- next = skb->next;
- napi_consume_skb(skb, 1);
- skb = next;
- }
-}
-
static int napi_threaded_poll(void *data)
{
struct napi_struct *napi = data;
@@ -6771,8 +6777,8 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies +
- usecs_to_jiffies(READ_ONCE(netdev_budget_usecs));
- int budget = READ_ONCE(netdev_budget);
+ usecs_to_jiffies(READ_ONCE(net_hotdata.netdev_budget_usecs));
+ int budget = READ_ONCE(net_hotdata.netdev_budget);
LIST_HEAD(list);
LIST_HEAD(repoll);
@@ -8615,12 +8621,12 @@ unsigned int dev_get_flags(const struct net_device *dev)
{
unsigned int flags;
- flags = (dev->flags & ~(IFF_PROMISC |
+ flags = (READ_ONCE(dev->flags) & ~(IFF_PROMISC |
IFF_ALLMULTI |
IFF_RUNNING |
IFF_LOWER_UP |
IFF_DORMANT)) |
- (dev->gflags & (IFF_PROMISC |
+ (READ_ONCE(dev->gflags) & (IFF_PROMISC |
IFF_ALLMULTI));
if (netif_running(dev)) {
@@ -8943,7 +8949,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
}
EXPORT_SYMBOL(dev_set_mac_address);
-static DECLARE_RWSEM(dev_addr_sem);
+DECLARE_RWSEM(dev_addr_sem);
int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
struct netlink_ext_ack *extack)
@@ -9697,11 +9703,11 @@ static void dev_index_release(struct net *net, int ifindex)
/* Delayed registration/unregisteration */
LIST_HEAD(net_todo_list);
DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
+atomic_t dev_unreg_count = ATOMIC_INIT(0);
static void net_set_todo(struct net_device *dev)
{
list_add_tail(&dev->todo_list, &net_todo_list);
- atomic_inc(&dev_net(dev)->dev_unreg_count);
}
static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
@@ -10266,9 +10272,9 @@ int register_netdevice(struct net_device *dev)
goto err_ifindex_release;
ret = netdev_register_kobject(dev);
- write_lock(&dev_base_lock);
- dev->reg_state = ret ? NETREG_UNREGISTERED : NETREG_REGISTERED;
- write_unlock(&dev_base_lock);
+
+ WRITE_ONCE(dev->reg_state, ret ? NETREG_UNREGISTERED : NETREG_REGISTERED);
+
if (ret)
goto err_uninit_notify;
@@ -10344,7 +10350,7 @@ EXPORT_SYMBOL(register_netdevice);
* that need to tie several hardware interfaces to a single NAPI
* poll scheduler due to HW limitations.
*/
-int init_dummy_netdev(struct net_device *dev)
+void init_dummy_netdev(struct net_device *dev)
{
/* Clear everything. Note we don't initialize spinlocks
* are they aren't supposed to be taken by any of the
@@ -10372,8 +10378,6 @@ int init_dummy_netdev(struct net_device *dev)
* because users of this 'device' dont need to change
* its refcount.
*/
-
- return 0;
}
EXPORT_SYMBOL_GPL(init_dummy_netdev);
@@ -10528,6 +10532,7 @@ void netdev_run_todo(void)
{
struct net_device *dev, *tmp;
struct list_head list;
+ int cnt;
#ifdef CONFIG_LOCKDEP
struct list_head unlink_list;
@@ -10558,12 +10563,11 @@ void netdev_run_todo(void)
continue;
}
- write_lock(&dev_base_lock);
- dev->reg_state = NETREG_UNREGISTERED;
- write_unlock(&dev_base_lock);
+ WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERED);
linkwatch_sync_dev(dev);
}
+ cnt = 0;
while (!list_empty(&list)) {
dev = netdev_wait_allrefs_any(&list);
list_del(&dev->todo_list);
@@ -10581,12 +10585,13 @@ void netdev_run_todo(void)
if (dev->needs_free_netdev)
free_netdev(dev);
- if (atomic_dec_and_test(&dev_net(dev)->dev_unreg_count))
- wake_up(&netdev_unregistering_wq);
+ cnt++;
/* Free network device */
kobject_put(&dev->dev.kobj);
}
+ if (cnt && atomic_sub_and_test(cnt, &dev_unreg_count))
+ wake_up(&netdev_unregistering_wq);
}
/* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
@@ -10663,6 +10668,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
ops->ndo_get_stats64(dev, storage);
} else if (ops->ndo_get_stats) {
netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
+ } else if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_TSTATS) {
+ dev_get_tstats64(dev, storage);
} else {
netdev_stats_to_stats64(storage, &dev->stats);
}
@@ -10977,7 +10984,7 @@ void free_netdev(struct net_device *dev)
}
BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
- dev->reg_state = NETREG_RELEASED;
+ WRITE_ONCE(dev->reg_state, NETREG_RELEASED);
/* will free via device release */
put_device(&dev->dev);
@@ -11033,6 +11040,7 @@ void unregister_netdevice_many_notify(struct list_head *head,
{
struct net_device *dev, *tmp;
LIST_HEAD(close_head);
+ int cnt = 0;
BUG_ON(dev_boot_phase);
ASSERT_RTNL();
@@ -11064,10 +11072,8 @@ void unregister_netdevice_many_notify(struct list_head *head,
list_for_each_entry(dev, head, unreg_list) {
/* And unlink it from device chain. */
- write_lock(&dev_base_lock);
- unlist_netdevice(dev, false);
- dev->reg_state = NETREG_UNREGISTERING;
- write_unlock(&dev_base_lock);
+ unlist_netdevice(dev);
+ WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERING);
}
flush_all_backlogs();
@@ -11129,7 +11135,9 @@ void unregister_netdevice_many_notify(struct list_head *head,
list_for_each_entry(dev, head, unreg_list) {
netdev_put(dev, &dev->dev_registered_tracker);
net_set_todo(dev);
+ cnt++;
}
+ atomic_add(cnt, &dev_unreg_count);
list_del(head);
}
@@ -11247,7 +11255,7 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
dev_close(dev);
/* And unlink it from device chain */
- unlist_netdevice(dev, true);
+ unlist_netdevice(dev);
synchronize_net();
@@ -11583,11 +11591,8 @@ static void __net_exit default_device_exit_net(struct net *net)
snprintf(fb_name, IFNAMSIZ, "dev%%d");
netdev_for_each_altname_safe(dev, name_node, tmp)
- if (netdev_name_in_use(&init_net, name_node->name)) {
- netdev_name_node_del(name_node);
- synchronize_rcu();
+ if (netdev_name_in_use(&init_net, name_node->name))
__netdev_name_node_alt_destroy(name_node);
- }
err = dev_change_net_namespace(dev, &init_net, fb_name);
if (err) {
@@ -11694,6 +11699,28 @@ static void __init net_dev_struct_check(void)
*
*/
+/* We allocate 256 pages for each CPU if PAGE_SHIFT is 12 */
+#define SYSTEM_PERCPU_PAGE_POOL_SIZE ((1 << 20) / PAGE_SIZE)
+
+static int net_page_pool_create(int cpuid)
+{
+#if IS_ENABLED(CONFIG_PAGE_POOL)
+ struct page_pool_params page_pool_params = {
+ .pool_size = SYSTEM_PERCPU_PAGE_POOL_SIZE,
+ .flags = PP_FLAG_SYSTEM_POOL,
+ .nid = NUMA_NO_NODE,
+ };
+ struct page_pool *pp_ptr;
+
+ pp_ptr = page_pool_create_percpu(&page_pool_params, cpuid);
+ if (IS_ERR(pp_ptr))
+ return -ENOMEM;
+
+ per_cpu(system_page_pool, cpuid) = pp_ptr;
+#endif
+ return 0;
+}
+
/*
* This is called single threaded during boot, so no need
* to take the rtnl semaphore.
@@ -11712,7 +11739,6 @@ static int __init net_dev_init(void)
if (netdev_kobject_init())
goto out;
- INIT_LIST_HEAD(&ptype_all);
for (i = 0; i < PTYPE_HASH_SIZE; i++)
INIT_LIST_HEAD(&ptype_base[i]);
@@ -11746,6 +11772,9 @@ static int __init net_dev_init(void)
init_gro_hash(&sd->backlog);
sd->backlog.poll = process_backlog;
sd->backlog.weight = weight_p;
+
+ if (net_page_pool_create(i))
+ goto out;
}
dev_boot_phase = 0;
@@ -11773,6 +11802,19 @@ static int __init net_dev_init(void)
WARN_ON(rc < 0);
rc = 0;
out:
+ if (rc < 0) {
+ for_each_possible_cpu(i) {
+ struct page_pool *pp_ptr;
+
+ pp_ptr = per_cpu(system_page_pool, i);
+ if (!pp_ptr)
+ continue;
+
+ page_pool_destroy(pp_ptr);
+ per_cpu(system_page_pool, i) = NULL;
+ }
+ }
+
return rc;
}
diff --git a/net/core/dev.h b/net/core/dev.h
index 7480b4c84298..2bcaf8eee50c 100644
--- a/net/core/dev.h
+++ b/net/core/dev.h
@@ -3,6 +3,7 @@
#define _NET_CORE_DEV_H
#include <linux/types.h>
+#include <linux/rwsem.h>
struct net;
struct net_device;
@@ -37,15 +38,14 @@ int dev_addr_init(struct net_device *dev);
void dev_addr_check(struct net_device *dev);
/* sysctls not referred to from outside net/core/ */
-extern int netdev_budget;
-extern unsigned int netdev_budget_usecs;
extern unsigned int sysctl_skb_defer_max;
-extern int netdev_tstamp_prequeue;
extern int netdev_unregister_timeout_secs;
extern int weight_p;
extern int dev_weight_rx_bias;
extern int dev_weight_tx_bias;
+extern struct rw_semaphore dev_addr_sem;
+
/* rtnl helpers */
extern struct list_head net_todo_list;
void netdev_run_todo(void);
@@ -56,6 +56,7 @@ struct netdev_name_node {
struct list_head list;
struct net_device *dev;
const char *name;
+ struct rcu_head rcu;
};
int netdev_get_name(struct net *net, char *name, int ifindex);
diff --git a/net/core/dst.c b/net/core/dst.c
index 6838d3212c37..95f533844f17 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -96,7 +96,7 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
}
EXPORT_SYMBOL(dst_alloc);
-struct dst_entry *dst_destroy(struct dst_entry * dst)
+static void dst_destroy(struct dst_entry *dst)
{
struct dst_entry *child = NULL;
@@ -126,15 +126,13 @@ struct dst_entry *dst_destroy(struct dst_entry * dst)
dst = child;
if (dst)
dst_release_immediate(dst);
- return NULL;
}
-EXPORT_SYMBOL(dst_destroy);
static void dst_destroy_rcu(struct rcu_head *head)
{
struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
- dst = dst_destroy(dst);
+ dst_destroy(dst);
}
/* Operations to mark dst as DEAD and clean up the net device referenced
diff --git a/net/core/filter.c b/net/core/filter.c
index ef3e78b6a39c..8adf95765cdd 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -88,7 +88,7 @@
#include "dev.h"
static const struct bpf_func_proto *
-bpf_sk_base_func_proto(enum bpf_func_id func_id);
+bpf_sk_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len)
{
@@ -778,7 +778,7 @@ jmp_rest:
BPF_EMIT_JMP;
break;
- /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
+ /* ldxb 4 * ([14] & 0xf) is remapped into 6 insns. */
case BPF_LDX | BPF_MSH | BPF_B: {
struct sock_filter tmp = {
.code = BPF_LD | BPF_ABS | BPF_B,
@@ -804,7 +804,7 @@ jmp_rest:
*insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
break;
}
- /* RET_K is remaped into 2 insns. RET_A case doesn't need an
+ /* RET_K is remapped into 2 insns. RET_A case doesn't need an
* extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
*/
case BPF_RET | BPF_A:
@@ -2968,7 +2968,7 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
*
* Then if B is non-zero AND there is no space allocate space and
* compact A, B regions into page. If there is space shift ring to
- * the rigth free'ing the next element in ring to place B, leaving
+ * the right free'ing the next element in ring to place B, leaving
* A untouched except to reduce length.
*/
if (start != offset) {
@@ -5988,7 +5988,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
return -ENODEV;
idev = __in6_dev_get_safely(dev);
- if (unlikely(!idev || !idev->cnf.forwarding))
+ if (unlikely(!idev || !READ_ONCE(idev->cnf.forwarding)))
return BPF_FIB_LKUP_RET_FWD_DISABLED;
if (flags & BPF_FIB_LOOKUP_OUTPUT) {
@@ -7894,7 +7894,7 @@ sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_ktime_get_coarse_ns:
return &bpf_ktime_get_coarse_ns_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
}
@@ -7987,7 +7987,7 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return NULL;
}
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8006,7 +8006,7 @@ sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_perf_event_output:
return &bpf_skb_event_output_proto;
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8193,7 +8193,7 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
#endif
#endif
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8252,13 +8252,13 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
#endif
#endif
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
#if IS_MODULE(CONFIG_NF_CONNTRACK) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)
/* The nf_conn___init type is used in the NF_CONNTRACK kfuncs. The
* kfuncs are defined in two different modules, and we want to be able
- * to use them interchangably with the same BTF type ID. Because modules
+ * to use them interchangeably with the same BTF type ID. Because modules
* can't de-duplicate BTF IDs between each other, we need the type to be
* referenced in the vmlinux BTF or the verifier will get confused about
* the different types. So we add this dummy type reference which will
@@ -8313,7 +8313,7 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_tcp_sock_proto;
#endif /* CONFIG_INET */
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8355,7 +8355,7 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_cgroup_classid_curr_proto;
#endif
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8399,7 +8399,7 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_skc_lookup_tcp_proto;
#endif
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8410,7 +8410,7 @@ flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_skb_load_bytes:
return &bpf_flow_dissector_load_bytes_proto;
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8437,7 +8437,7 @@ lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_skb_under_cgroup:
return &bpf_skb_under_cgroup_proto;
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8612,7 +8612,7 @@ static bool cg_skb_is_valid_access(int off, int size,
return false;
case bpf_ctx_range(struct __sk_buff, data):
case bpf_ctx_range(struct __sk_buff, data_end):
- if (!bpf_capable())
+ if (!bpf_token_capable(prog->aux->token, CAP_BPF))
return false;
break;
}
@@ -8624,7 +8624,7 @@ static bool cg_skb_is_valid_access(int off, int size,
case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
break;
case bpf_ctx_range(struct __sk_buff, tstamp):
- if (!bpf_capable())
+ if (!bpf_token_capable(prog->aux->token, CAP_BPF))
return false;
break;
default:
@@ -11268,7 +11268,7 @@ sk_reuseport_func_proto(enum bpf_func_id func_id,
case BPF_FUNC_ktime_get_coarse_ns:
return &bpf_ktime_get_coarse_ns_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
}
@@ -11450,7 +11450,7 @@ sk_lookup_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_sk_release:
return &bpf_sk_release_proto;
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -11784,7 +11784,7 @@ const struct bpf_func_proto bpf_sock_from_file_proto = {
};
static const struct bpf_func_proto *
-bpf_sk_base_func_proto(enum bpf_func_id func_id)
+bpf_sk_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
const struct bpf_func_proto *func;
@@ -11813,10 +11813,10 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_ktime_get_coarse_ns:
return &bpf_ktime_get_coarse_ns_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
- if (!perfmon_capable())
+ if (!bpf_token_capable(prog->aux->token, CAP_PERFMON))
return NULL;
return func;
@@ -11869,6 +11869,103 @@ __bpf_kfunc int bpf_sock_addr_set_sun_path(struct bpf_sock_addr_kern *sa_kern,
return 0;
}
+
+__bpf_kfunc int bpf_sk_assign_tcp_reqsk(struct sk_buff *skb, struct sock *sk,
+ struct bpf_tcp_req_attrs *attrs, int attrs__sz)
+{
+#if IS_ENABLED(CONFIG_SYN_COOKIES)
+ const struct request_sock_ops *ops;
+ struct inet_request_sock *ireq;
+ struct tcp_request_sock *treq;
+ struct request_sock *req;
+ struct net *net;
+ __u16 min_mss;
+ u32 tsoff = 0;
+
+ if (attrs__sz != sizeof(*attrs) ||
+ attrs->reserved[0] || attrs->reserved[1] || attrs->reserved[2])
+ return -EINVAL;
+
+ if (!skb_at_tc_ingress(skb))
+ return -EINVAL;
+
+ net = dev_net(skb->dev);
+ if (net != sock_net(sk))
+ return -ENETUNREACH;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ ops = &tcp_request_sock_ops;
+ min_mss = 536;
+ break;
+#if IS_BUILTIN(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ ops = &tcp6_request_sock_ops;
+ min_mss = IPV6_MIN_MTU - 60;
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+
+ if (sk->sk_type != SOCK_STREAM || sk->sk_state != TCP_LISTEN ||
+ sk_is_mptcp(sk))
+ return -EINVAL;
+
+ if (attrs->mss < min_mss)
+ return -EINVAL;
+
+ if (attrs->wscale_ok) {
+ if (!READ_ONCE(net->ipv4.sysctl_tcp_window_scaling))
+ return -EINVAL;
+
+ if (attrs->snd_wscale > TCP_MAX_WSCALE ||
+ attrs->rcv_wscale > TCP_MAX_WSCALE)
+ return -EINVAL;
+ }
+
+ if (attrs->sack_ok && !READ_ONCE(net->ipv4.sysctl_tcp_sack))
+ return -EINVAL;
+
+ if (attrs->tstamp_ok) {
+ if (!READ_ONCE(net->ipv4.sysctl_tcp_timestamps))
+ return -EINVAL;
+
+ tsoff = attrs->rcv_tsecr - tcp_ns_to_ts(attrs->usec_ts_ok, tcp_clock_ns());
+ }
+
+ req = inet_reqsk_alloc(ops, sk, false);
+ if (!req)
+ return -ENOMEM;
+
+ ireq = inet_rsk(req);
+ treq = tcp_rsk(req);
+
+ req->rsk_listener = sk;
+ req->syncookie = 1;
+ req->mss = attrs->mss;
+ req->ts_recent = attrs->rcv_tsval;
+
+ ireq->snd_wscale = attrs->snd_wscale;
+ ireq->rcv_wscale = attrs->rcv_wscale;
+ ireq->tstamp_ok = !!attrs->tstamp_ok;
+ ireq->sack_ok = !!attrs->sack_ok;
+ ireq->wscale_ok = !!attrs->wscale_ok;
+ ireq->ecn_ok = !!attrs->ecn_ok;
+
+ treq->req_usec_ts = !!attrs->usec_ts_ok;
+ treq->ts_off = tsoff;
+
+ skb_orphan(skb);
+ skb->sk = req_to_sk(req);
+ skb->destructor = sock_pfree;
+
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
__bpf_kfunc_end_defs();
int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
@@ -11885,17 +11982,21 @@ int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
return 0;
}
-BTF_SET8_START(bpf_kfunc_check_set_skb)
+BTF_KFUNCS_START(bpf_kfunc_check_set_skb)
BTF_ID_FLAGS(func, bpf_dynptr_from_skb)
-BTF_SET8_END(bpf_kfunc_check_set_skb)
+BTF_KFUNCS_END(bpf_kfunc_check_set_skb)
-BTF_SET8_START(bpf_kfunc_check_set_xdp)
+BTF_KFUNCS_START(bpf_kfunc_check_set_xdp)
BTF_ID_FLAGS(func, bpf_dynptr_from_xdp)
-BTF_SET8_END(bpf_kfunc_check_set_xdp)
+BTF_KFUNCS_END(bpf_kfunc_check_set_xdp)
-BTF_SET8_START(bpf_kfunc_check_set_sock_addr)
+BTF_KFUNCS_START(bpf_kfunc_check_set_sock_addr)
BTF_ID_FLAGS(func, bpf_sock_addr_set_sun_path)
-BTF_SET8_END(bpf_kfunc_check_set_sock_addr)
+BTF_KFUNCS_END(bpf_kfunc_check_set_sock_addr)
+
+BTF_KFUNCS_START(bpf_kfunc_check_set_tcp_reqsk)
+BTF_ID_FLAGS(func, bpf_sk_assign_tcp_reqsk, KF_TRUSTED_ARGS)
+BTF_KFUNCS_END(bpf_kfunc_check_set_tcp_reqsk)
static const struct btf_kfunc_id_set bpf_kfunc_set_skb = {
.owner = THIS_MODULE,
@@ -11912,6 +12013,11 @@ static const struct btf_kfunc_id_set bpf_kfunc_set_sock_addr = {
.set = &bpf_kfunc_check_set_sock_addr,
};
+static const struct btf_kfunc_id_set bpf_kfunc_set_tcp_reqsk = {
+ .owner = THIS_MODULE,
+ .set = &bpf_kfunc_check_set_tcp_reqsk,
+};
+
static int __init bpf_kfunc_init(void)
{
int ret;
@@ -11927,8 +12033,9 @@ static int __init bpf_kfunc_init(void)
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_SEG6LOCAL, &bpf_kfunc_set_skb);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_NETFILTER, &bpf_kfunc_set_skb);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &bpf_kfunc_set_xdp);
- return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
- &bpf_kfunc_set_sock_addr);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
+ &bpf_kfunc_set_sock_addr);
+ return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_kfunc_set_tcp_reqsk);
}
late_initcall(bpf_kfunc_init);
@@ -11968,9 +12075,9 @@ __bpf_kfunc int bpf_sock_destroy(struct sock_common *sock)
__bpf_kfunc_end_defs();
-BTF_SET8_START(bpf_sk_iter_kfunc_ids)
+BTF_KFUNCS_START(bpf_sk_iter_kfunc_ids)
BTF_ID_FLAGS(func, bpf_sock_destroy, KF_TRUSTED_ARGS)
-BTF_SET8_END(bpf_sk_iter_kfunc_ids)
+BTF_KFUNCS_END(bpf_sk_iter_kfunc_ids)
static int tracing_iter_filter(const struct bpf_prog *prog, u32 kfunc_id)
{
diff --git a/net/core/gro.c b/net/core/gro.c
index 0759277dc14e..ee30d4f0c038 100644
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -10,9 +10,6 @@
#define GRO_MAX_HEAD (MAX_HEADER + 128)
static DEFINE_SPINLOCK(offload_lock);
-struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base);
-/* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
-int gro_normal_batch __read_mostly = 8;
/**
* dev_add_offload - register offload handlers
@@ -31,7 +28,7 @@ void dev_add_offload(struct packet_offload *po)
struct packet_offload *elem;
spin_lock(&offload_lock);
- list_for_each_entry(elem, &offload_base, list) {
+ list_for_each_entry(elem, &net_hotdata.offload_base, list) {
if (po->priority < elem->priority)
break;
}
@@ -55,7 +52,7 @@ EXPORT_SYMBOL(dev_add_offload);
*/
static void __dev_remove_offload(struct packet_offload *po)
{
- struct list_head *head = &offload_base;
+ struct list_head *head = &net_hotdata.offload_base;
struct packet_offload *po1;
spin_lock(&offload_lock);
@@ -235,9 +232,9 @@ done:
static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
{
+ struct list_head *head = &net_hotdata.offload_base;
struct packet_offload *ptype;
__be16 type = skb->protocol;
- struct list_head *head = &offload_base;
int err = -ENOENT;
BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
@@ -369,15 +366,21 @@ static void gro_list_prepare(const struct list_head *head,
static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
{
- const struct skb_shared_info *pinfo = skb_shinfo(skb);
- const skb_frag_t *frag0 = &pinfo->frags[0];
+ const struct skb_shared_info *pinfo;
+ const skb_frag_t *frag0;
+ unsigned int headlen;
NAPI_GRO_CB(skb)->data_offset = 0;
- NAPI_GRO_CB(skb)->frag0 = NULL;
- NAPI_GRO_CB(skb)->frag0_len = 0;
+ headlen = skb_headlen(skb);
+ NAPI_GRO_CB(skb)->frag0 = skb->data;
+ NAPI_GRO_CB(skb)->frag0_len = headlen;
+ if (headlen)
+ return;
+
+ pinfo = skb_shinfo(skb);
+ frag0 = &pinfo->frags[0];
- if (!skb_headlen(skb) && pinfo->nr_frags &&
- !PageHighMem(skb_frag_page(frag0)) &&
+ if (pinfo->nr_frags && !PageHighMem(skb_frag_page(frag0)) &&
(!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
@@ -438,7 +441,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
{
u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
struct gro_list *gro_list = &napi->gro_hash[bucket];
- struct list_head *head = &offload_base;
+ struct list_head *head = &net_hotdata.offload_base;
struct packet_offload *ptype;
__be16 type = skb->protocol;
struct sk_buff *pp = NULL;
@@ -544,7 +547,7 @@ normal:
struct packet_offload *gro_find_receive_by_type(__be16 type)
{
- struct list_head *offload_head = &offload_base;
+ struct list_head *offload_head = &net_hotdata.offload_base;
struct packet_offload *ptype;
list_for_each_entry_rcu(ptype, offload_head, list) {
@@ -558,7 +561,7 @@ EXPORT_SYMBOL(gro_find_receive_by_type);
struct packet_offload *gro_find_complete_by_type(__be16 type)
{
- struct list_head *offload_head = &offload_base;
+ struct list_head *offload_head = &net_hotdata.offload_base;
struct packet_offload *ptype;
list_for_each_entry_rcu(ptype, offload_head, list) {
@@ -700,7 +703,7 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
skb_reset_mac_header(skb);
skb_gro_reset_offset(skb, hlen);
- if (unlikely(skb_gro_header_hard(skb, hlen))) {
+ if (unlikely(!skb_gro_may_pull(skb, hlen))) {
eth = skb_gro_header_slow(skb, hlen, 0);
if (unlikely(!eth)) {
net_warn_ratelimited("%s: dropping impossible skb from %s\n",
@@ -710,7 +713,10 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
}
} else {
eth = (const struct ethhdr *)skb->data;
- gro_pull_from_frag0(skb, hlen);
+
+ if (NAPI_GRO_CB(skb)->frag0 != skb->data)
+ gro_pull_from_frag0(skb, hlen);
+
NAPI_GRO_CB(skb)->frag0 += hlen;
NAPI_GRO_CB(skb)->frag0_len -= hlen;
}
diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
index ed5ec5de47f6..ff8e5b64bf6b 100644
--- a/net/core/gro_cells.c
+++ b/net/core/gro_cells.c
@@ -3,6 +3,7 @@
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <net/gro_cells.h>
+#include <net/hotdata.h>
struct gro_cell {
struct sk_buff_head napi_skbs;
@@ -26,7 +27,7 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
cell = this_cpu_ptr(gcells->cells);
- if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(netdev_max_backlog)) {
+ if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(net_hotdata.max_backlog)) {
drop:
dev_core_stats_rx_dropped_inc(dev);
kfree_skb(skb);
diff --git a/net/core/gso.c b/net/core/gso.c
index 9e1803bfc9c6..bcd156372f4d 100644
--- a/net/core/gso.c
+++ b/net/core/gso.c
@@ -17,7 +17,7 @@ struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
struct packet_offload *ptype;
rcu_read_lock();
- list_for_each_entry_rcu(ptype, &offload_base, list) {
+ list_for_each_entry_rcu(ptype, &net_hotdata.offload_base, list) {
if (ptype->type == type && ptype->callbacks.gso_segment) {
segs = ptype->callbacks.gso_segment(skb, features);
break;
@@ -48,7 +48,7 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
__skb_pull(skb, vlan_depth);
rcu_read_lock();
- list_for_each_entry_rcu(ptype, &offload_base, list) {
+ list_for_each_entry_rcu(ptype, &net_hotdata.offload_base, list) {
if (ptype->type == type && ptype->callbacks.gso_segment) {
segs = ptype->callbacks.gso_segment(skb, features);
break;
diff --git a/net/core/hotdata.c b/net/core/hotdata.c
new file mode 100644
index 000000000000..c8a7a451c18a
--- /dev/null
+++ b/net/core/hotdata.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <net/hotdata.h>
+#include <linux/cache.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+
+
+struct net_hotdata net_hotdata __cacheline_aligned = {
+ .offload_base = LIST_HEAD_INIT(net_hotdata.offload_base),
+ .ptype_all = LIST_HEAD_INIT(net_hotdata.ptype_all),
+ .gro_normal_batch = 8,
+
+ .netdev_budget = 300,
+ /* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
+ .netdev_budget_usecs = 2 * USEC_PER_SEC / HZ,
+
+ .tstamp_prequeue = 1,
+ .max_backlog = 1000,
+ .dev_tx_weight = 64,
+ .dev_rx_weight = 64,
+};
+EXPORT_SYMBOL(net_hotdata);
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 429571c258da..8ec35194bfcb 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -33,7 +33,7 @@ static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
static LIST_HEAD(lweventlist);
static DEFINE_SPINLOCK(lweventlist_lock);
-static unsigned char default_operstate(const struct net_device *dev)
+static unsigned int default_operstate(const struct net_device *dev)
{
if (netif_testing(dev))
return IF_OPER_TESTING;
@@ -62,16 +62,13 @@ static unsigned char default_operstate(const struct net_device *dev)
return IF_OPER_UP;
}
-
static void rfc2863_policy(struct net_device *dev)
{
- unsigned char operstate = default_operstate(dev);
+ unsigned int operstate = default_operstate(dev);
- if (operstate == dev->operstate)
+ if (operstate == READ_ONCE(dev->operstate))
return;
- write_lock(&dev_base_lock);
-
switch(dev->link_mode) {
case IF_LINK_MODE_TESTING:
if (operstate == IF_OPER_UP)
@@ -87,9 +84,7 @@ static void rfc2863_policy(struct net_device *dev)
break;
}
- dev->operstate = operstate;
-
- write_unlock(&dev_base_lock);
+ WRITE_ONCE(dev->operstate, operstate);
}
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 09f7ed1a04e8..a97eceb84e61 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -3,52 +3,22 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/wext.h>
+#include <net/hotdata.h>
#include "dev.h"
-#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
-
-#define get_bucket(x) ((x) >> BUCKET_SPACE)
-#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
-#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
-
-static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
+static void *dev_seq_from_index(struct seq_file *seq, loff_t *pos)
{
- struct net *net = seq_file_net(seq);
+ unsigned long ifindex = *pos;
struct net_device *dev;
- struct hlist_head *h;
- unsigned int count = 0, offset = get_offset(*pos);
- h = &net->dev_index_head[get_bucket(*pos)];
- hlist_for_each_entry_rcu(dev, h, index_hlist) {
- if (++count == offset)
- return dev;
+ for_each_netdev_dump(seq_file_net(seq), dev, ifindex) {
+ *pos = dev->ifindex;
+ return dev;
}
-
- return NULL;
-}
-
-static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
-{
- struct net_device *dev;
- unsigned int bucket;
-
- do {
- dev = dev_from_same_bucket(seq, pos);
- if (dev)
- return dev;
-
- bucket = get_bucket(*pos) + 1;
- *pos = set_bucket_offset(bucket, 1);
- } while (bucket < NETDEV_HASHENTRIES);
-
return NULL;
}
-/*
- * This is invoked by the /proc filesystem handler to display a device
- * in detail.
- */
static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
@@ -56,16 +26,13 @@ static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
if (!*pos)
return SEQ_START_TOKEN;
- if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
- return NULL;
-
- return dev_from_bucket(seq, pos);
+ return dev_seq_from_index(seq, pos);
}
static void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
- return dev_from_bucket(seq, pos);
+ return dev_seq_from_index(seq, pos);
}
static void dev_seq_stop(struct seq_file *seq, void *v)
@@ -217,7 +184,7 @@ static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
}
}
- list_for_each_entry_rcu(pt, &ptype_all, list) {
+ list_for_each_entry_rcu(pt, &net_hotdata.ptype_all, list) {
if (i == pos)
return pt;
++i;
@@ -265,13 +232,13 @@ static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
}
- nxt = ptype_all.next;
+ nxt = net_hotdata.ptype_all.next;
goto ptype_all;
}
if (pt->type == htons(ETH_P_ALL)) {
ptype_all:
- if (nxt != &ptype_all)
+ if (nxt != &net_hotdata.ptype_all)
goto found;
hash = 0;
nxt = ptype_base[0].next;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index a09d507c5b03..e3d7a8cfa20b 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -24,6 +24,7 @@
#include <linux/of_net.h>
#include <linux/cpu.h>
#include <net/netdev_rx_queue.h>
+#include <net/rps.h>
#include "dev.h"
#include "net-sysfs.h"
@@ -34,10 +35,10 @@ static const char fmt_dec[] = "%d\n";
static const char fmt_ulong[] = "%lu\n";
static const char fmt_u64[] = "%llu\n";
-/* Caller holds RTNL or dev_base_lock */
+/* Caller holds RTNL or RCU */
static inline int dev_isalive(const struct net_device *dev)
{
- return dev->reg_state <= NETREG_REGISTERED;
+ return READ_ONCE(dev->reg_state) <= NETREG_REGISTERED;
}
/* use same locking rules as GIF* ioctl's */
@@ -48,10 +49,10 @@ static ssize_t netdev_show(const struct device *dev,
struct net_device *ndev = to_net_dev(dev);
ssize_t ret = -EINVAL;
- read_lock(&dev_base_lock);
+ rcu_read_lock();
if (dev_isalive(ndev))
ret = (*format)(ndev, buf);
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
return ret;
}
@@ -60,7 +61,7 @@ static ssize_t netdev_show(const struct device *dev,
#define NETDEVICE_SHOW(field, format_string) \
static ssize_t format_##field(const struct net_device *dev, char *buf) \
{ \
- return sysfs_emit(buf, format_string, dev->field); \
+ return sysfs_emit(buf, format_string, READ_ONCE(dev->field)); \
} \
static ssize_t field##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -125,7 +126,7 @@ static DEVICE_ATTR_RO(iflink);
static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
{
- return sysfs_emit(buf, fmt_dec, dev->name_assign_type);
+ return sysfs_emit(buf, fmt_dec, READ_ONCE(dev->name_assign_type));
}
static ssize_t name_assign_type_show(struct device *dev,
@@ -135,24 +136,28 @@ static ssize_t name_assign_type_show(struct device *dev,
struct net_device *ndev = to_net_dev(dev);
ssize_t ret = -EINVAL;
- if (ndev->name_assign_type != NET_NAME_UNKNOWN)
+ if (READ_ONCE(ndev->name_assign_type) != NET_NAME_UNKNOWN)
ret = netdev_show(dev, attr, buf, format_name_assign_type);
return ret;
}
static DEVICE_ATTR_RO(name_assign_type);
-/* use same locking rules as GIFHWADDR ioctl's */
+/* use same locking rules as GIFHWADDR ioctl's (dev_get_mac_address()) */
static ssize_t address_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct net_device *ndev = to_net_dev(dev);
ssize_t ret = -EINVAL;
- read_lock(&dev_base_lock);
+ down_read(&dev_addr_sem);
+
+ rcu_read_lock();
if (dev_isalive(ndev))
ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len);
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
+
+ up_read(&dev_addr_sem);
return ret;
}
static DEVICE_ATTR_RO(address);
@@ -161,10 +166,13 @@ static ssize_t broadcast_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct net_device *ndev = to_net_dev(dev);
+ int ret = -EINVAL;
+ rcu_read_lock();
if (dev_isalive(ndev))
- return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
- return -EINVAL;
+ ret = sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
+ rcu_read_unlock();
+ return ret;
}
static DEVICE_ATTR_RO(broadcast);
@@ -318,11 +326,9 @@ static ssize_t operstate_show(struct device *dev,
const struct net_device *netdev = to_net_dev(dev);
unsigned char operstate;
- read_lock(&dev_base_lock);
- operstate = netdev->operstate;
+ operstate = READ_ONCE(netdev->operstate);
if (!netif_running(netdev))
operstate = IF_OPER_DOWN;
- read_unlock(&dev_base_lock);
if (operstate >= ARRAY_SIZE(operstates))
return -EINVAL; /* should not happen */
@@ -680,14 +686,14 @@ static ssize_t netstat_show(const struct device *d,
WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
offset % sizeof(u64) != 0);
- read_lock(&dev_base_lock);
+ rcu_read_lock();
if (dev_isalive(dev)) {
struct rtnl_link_stats64 temp;
const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
ret = sysfs_emit(buf, fmt_u64, *(u64 *)(((u8 *)stats) + offset));
}
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
return ret;
}
@@ -1409,6 +1415,65 @@ static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init
= __ATTR(hold_time, 0644,
bql_show_hold_time, bql_set_hold_time);
+static ssize_t bql_show_stall_thrs(struct netdev_queue *queue, char *buf)
+{
+ struct dql *dql = &queue->dql;
+
+ return sprintf(buf, "%u\n", jiffies_to_msecs(dql->stall_thrs));
+}
+
+static ssize_t bql_set_stall_thrs(struct netdev_queue *queue,
+ const char *buf, size_t len)
+{
+ struct dql *dql = &queue->dql;
+ unsigned int value;
+ int err;
+
+ err = kstrtouint(buf, 10, &value);
+ if (err < 0)
+ return err;
+
+ value = msecs_to_jiffies(value);
+ if (value && (value < 4 || value > 4 / 2 * BITS_PER_LONG))
+ return -ERANGE;
+
+ if (!dql->stall_thrs && value)
+ dql->last_reap = jiffies;
+ /* Force last_reap to be live */
+ smp_wmb();
+ dql->stall_thrs = value;
+
+ return len;
+}
+
+static struct netdev_queue_attribute bql_stall_thrs_attribute __ro_after_init =
+ __ATTR(stall_thrs, 0644, bql_show_stall_thrs, bql_set_stall_thrs);
+
+static ssize_t bql_show_stall_max(struct netdev_queue *queue, char *buf)
+{
+ return sprintf(buf, "%u\n", READ_ONCE(queue->dql.stall_max));
+}
+
+static ssize_t bql_set_stall_max(struct netdev_queue *queue,
+ const char *buf, size_t len)
+{
+ WRITE_ONCE(queue->dql.stall_max, 0);
+ return len;
+}
+
+static struct netdev_queue_attribute bql_stall_max_attribute __ro_after_init =
+ __ATTR(stall_max, 0644, bql_show_stall_max, bql_set_stall_max);
+
+static ssize_t bql_show_stall_cnt(struct netdev_queue *queue, char *buf)
+{
+ struct dql *dql = &queue->dql;
+
+ return sprintf(buf, "%lu\n", dql->stall_cnt);
+}
+
+static struct netdev_queue_attribute bql_stall_cnt_attribute __ro_after_init =
+ __ATTR(stall_cnt, 0444, bql_show_stall_cnt, NULL);
+
static ssize_t bql_show_inflight(struct netdev_queue *queue,
char *buf)
{
@@ -1447,6 +1512,9 @@ static struct attribute *dql_attrs[] __ro_after_init = {
&bql_limit_min_attribute.attr,
&bql_hold_time_attribute.attr,
&bql_inflight_attribute.attr,
+ &bql_stall_thrs_attribute.attr,
+ &bql_stall_cnt_attribute.attr,
+ &bql_stall_max_attribute.attr,
NULL
};
@@ -1454,6 +1522,9 @@ static const struct attribute_group dql_group = {
.name = "byte_queue_limits",
.attrs = dql_attrs,
};
+#else
+/* Fake declaration, all the code using it should be dead */
+extern const struct attribute_group dql_group;
#endif /* CONFIG_BQL */
#ifdef CONFIG_XPS
@@ -1691,6 +1762,15 @@ static const struct kobj_type netdev_queue_ktype = {
.get_ownership = netdev_queue_get_ownership,
};
+static bool netdev_uses_bql(const struct net_device *dev)
+{
+ if (dev->features & NETIF_F_LLTX ||
+ dev->priv_flags & IFF_NO_QUEUE)
+ return false;
+
+ return IS_ENABLED(CONFIG_BQL);
+}
+
static int netdev_queue_add_kobject(struct net_device *dev, int index)
{
struct netdev_queue *queue = dev->_tx + index;
@@ -1708,11 +1788,11 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
if (error)
goto err;
-#ifdef CONFIG_BQL
- error = sysfs_create_group(kobj, &dql_group);
- if (error)
- goto err;
-#endif
+ if (netdev_uses_bql(dev)) {
+ error = sysfs_create_group(kobj, &dql_group);
+ if (error)
+ goto err;
+ }
kobject_uevent(kobj, KOBJ_ADD);
return 0;
@@ -1733,9 +1813,9 @@ static int tx_queue_change_owner(struct net_device *ndev, int index,
if (error)
return error;
-#ifdef CONFIG_BQL
- error = sysfs_group_change_owner(kobj, &dql_group, kuid, kgid);
-#endif
+ if (netdev_uses_bql(ndev))
+ error = sysfs_group_change_owner(kobj, &dql_group, kuid, kgid);
+
return error;
}
#endif /* CONFIG_SYSFS */
@@ -1767,9 +1847,10 @@ netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
if (!refcount_read(&dev_net(dev)->ns.count))
queue->kobj.uevent_suppress = 1;
-#ifdef CONFIG_BQL
- sysfs_remove_group(&queue->kobj, &dql_group);
-#endif
+
+ if (netdev_uses_bql(dev))
+ sysfs_remove_group(&queue->kobj, &dql_group);
+
kobject_put(&queue->kobj);
}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 72799533426b..f0540c557515 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -318,8 +318,9 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
{
/* Must be called with pernet_ops_rwsem held */
const struct pernet_operations *ops, *saved_ops;
- int error = 0;
LIST_HEAD(net_exit_list);
+ LIST_HEAD(dev_kill_list);
+ int error = 0;
refcount_set(&net->ns.count, 1);
ref_tracker_dir_init(&net->refcnt_tracker, 128, "net refcnt");
@@ -358,6 +359,15 @@ out_undo:
synchronize_rcu();
ops = saved_ops;
+ rtnl_lock();
+ list_for_each_entry_continue_reverse(ops, &pernet_list, list) {
+ if (ops->exit_batch_rtnl)
+ ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list);
+ }
+ unregister_netdevice_many(&dev_kill_list);
+ rtnl_unlock();
+
+ ops = saved_ops;
list_for_each_entry_continue_reverse(ops, &pernet_list, list)
ops_exit_list(ops, &net_exit_list);
@@ -573,6 +583,7 @@ static void cleanup_net(struct work_struct *work)
struct net *net, *tmp, *last;
struct llist_node *net_kill_list;
LIST_HEAD(net_exit_list);
+ LIST_HEAD(dev_kill_list);
/* Atomically snapshot the list of namespaces to cleanup */
net_kill_list = llist_del_all(&cleanup_list);
@@ -611,7 +622,15 @@ static void cleanup_net(struct work_struct *work)
* the rcu_barrier() below isn't sufficient alone.
* Also the pre_exit() and exit() methods need this barrier.
*/
- synchronize_rcu();
+ synchronize_rcu_expedited();
+
+ rtnl_lock();
+ list_for_each_entry_reverse(ops, &pernet_list, list) {
+ if (ops->exit_batch_rtnl)
+ ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list);
+ }
+ unregister_netdevice_many(&dev_kill_list);
+ rtnl_unlock();
/* Run all of the network namespace exit methods */
list_for_each_entry_reverse(ops, &pernet_list, list)
@@ -1193,7 +1212,17 @@ static void free_exit_list(struct pernet_operations *ops, struct list_head *net_
{
ops_pre_exit_list(ops, net_exit_list);
synchronize_rcu();
+
+ if (ops->exit_batch_rtnl) {
+ LIST_HEAD(dev_kill_list);
+
+ rtnl_lock();
+ ops->exit_batch_rtnl(net_exit_list, &dev_kill_list);
+ unregister_netdevice_many(&dev_kill_list);
+ rtnl_unlock();
+ }
ops_exit_list(ops, net_exit_list);
+
ops_free_list(ops, net_exit_list);
}
diff --git a/net/core/netdev-genl-gen.c b/net/core/netdev-genl-gen.c
index be7f2ebd61b2..8d8ace9ef87f 100644
--- a/net/core/netdev-genl-gen.c
+++ b/net/core/netdev-genl-gen.c
@@ -68,6 +68,11 @@ static const struct nla_policy netdev_napi_get_dump_nl_policy[NETDEV_A_NAPI_IFIN
[NETDEV_A_NAPI_IFINDEX] = NLA_POLICY_MIN(NLA_U32, 1),
};
+/* NETDEV_CMD_QSTATS_GET - dump */
+static const struct nla_policy netdev_qstats_get_nl_policy[NETDEV_A_QSTATS_SCOPE + 1] = {
+ [NETDEV_A_QSTATS_SCOPE] = NLA_POLICY_MASK(NLA_UINT, 0x1),
+};
+
/* Ops table for netdev */
static const struct genl_split_ops netdev_nl_ops[] = {
{
@@ -138,6 +143,13 @@ static const struct genl_split_ops netdev_nl_ops[] = {
.maxattr = NETDEV_A_NAPI_IFINDEX,
.flags = GENL_CMD_CAP_DUMP,
},
+ {
+ .cmd = NETDEV_CMD_QSTATS_GET,
+ .dumpit = netdev_nl_qstats_get_dumpit,
+ .policy = netdev_qstats_get_nl_policy,
+ .maxattr = NETDEV_A_QSTATS_SCOPE,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
};
static const struct genl_multicast_group netdev_nl_mcgrps[] = {
diff --git a/net/core/netdev-genl-gen.h b/net/core/netdev-genl-gen.h
index a47f2bcbe4fa..4db40fd5b4a9 100644
--- a/net/core/netdev-genl-gen.h
+++ b/net/core/netdev-genl-gen.h
@@ -28,6 +28,8 @@ int netdev_nl_queue_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info);
int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
enum {
NETDEV_NLGRP_MGMT,
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index fd98936da3ae..7004b3399c2b 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -8,6 +8,7 @@
#include <net/xdp.h>
#include <net/xdp_sock.h>
#include <net/netdev_rx_queue.h>
+#include <net/netdev_queues.h>
#include <net/busy_poll.h>
#include "netdev-genl-gen.h"
@@ -152,10 +153,7 @@ int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
}
rtnl_unlock();
- if (err != -EMSGSIZE)
- return err;
-
- return skb->len;
+ return err;
}
static int
@@ -287,10 +285,7 @@ int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
}
rtnl_unlock();
- if (err != -EMSGSIZE)
- return err;
-
- return skb->len;
+ return err;
}
static int
@@ -463,10 +458,220 @@ int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
}
rtnl_unlock();
- if (err != -EMSGSIZE)
- return err;
+ return err;
+}
+
+#define NETDEV_STAT_NOT_SET (~0ULL)
+
+static void netdev_nl_stats_add(void *_sum, const void *_add, size_t size)
+{
+ const u64 *add = _add;
+ u64 *sum = _sum;
+
+ while (size) {
+ if (*add != NETDEV_STAT_NOT_SET && *sum != NETDEV_STAT_NOT_SET)
+ *sum += *add;
+ sum++;
+ add++;
+ size -= 8;
+ }
+}
+
+static int netdev_stat_put(struct sk_buff *rsp, unsigned int attr_id, u64 value)
+{
+ if (value == NETDEV_STAT_NOT_SET)
+ return 0;
+ return nla_put_uint(rsp, attr_id, value);
+}
+
+static int
+netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx)
+{
+ if (netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_PACKETS, rx->packets) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_BYTES, rx->bytes) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail))
+ return -EMSGSIZE;
+ return 0;
+}
+
+static int
+netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx)
+{
+ if (netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_PACKETS, tx->packets) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_BYTES, tx->bytes))
+ return -EMSGSIZE;
+ return 0;
+}
+
+static int
+netdev_nl_stats_queue(struct net_device *netdev, struct sk_buff *rsp,
+ u32 q_type, int i, const struct genl_info *info)
+{
+ const struct netdev_stat_ops *ops = netdev->stat_ops;
+ struct netdev_queue_stats_rx rx;
+ struct netdev_queue_stats_tx tx;
+ void *hdr;
+
+ hdr = genlmsg_iput(rsp, info);
+ if (!hdr)
+ return -EMSGSIZE;
+ if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex) ||
+ nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_TYPE, q_type) ||
+ nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_ID, i))
+ goto nla_put_failure;
+
+ switch (q_type) {
+ case NETDEV_QUEUE_TYPE_RX:
+ memset(&rx, 0xff, sizeof(rx));
+ ops->get_queue_stats_rx(netdev, i, &rx);
+ if (!memchr_inv(&rx, 0xff, sizeof(rx)))
+ goto nla_cancel;
+ if (netdev_nl_stats_write_rx(rsp, &rx))
+ goto nla_put_failure;
+ break;
+ case NETDEV_QUEUE_TYPE_TX:
+ memset(&tx, 0xff, sizeof(tx));
+ ops->get_queue_stats_tx(netdev, i, &tx);
+ if (!memchr_inv(&tx, 0xff, sizeof(tx)))
+ goto nla_cancel;
+ if (netdev_nl_stats_write_tx(rsp, &tx))
+ goto nla_put_failure;
+ break;
+ }
+
+ genlmsg_end(rsp, hdr);
+ return 0;
+
+nla_cancel:
+ genlmsg_cancel(rsp, hdr);
+ return 0;
+nla_put_failure:
+ genlmsg_cancel(rsp, hdr);
+ return -EMSGSIZE;
+}
+
+static int
+netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp,
+ const struct genl_info *info,
+ struct netdev_nl_dump_ctx *ctx)
+{
+ const struct netdev_stat_ops *ops = netdev->stat_ops;
+ int i, err;
+
+ if (!(netdev->flags & IFF_UP))
+ return 0;
+
+ i = ctx->rxq_idx;
+ while (ops->get_queue_stats_rx && i < netdev->real_num_rx_queues) {
+ err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_RX,
+ i, info);
+ if (err)
+ return err;
+ ctx->rxq_idx = i++;
+ }
+ i = ctx->txq_idx;
+ while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) {
+ err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_TX,
+ i, info);
+ if (err)
+ return err;
+ ctx->txq_idx = i++;
+ }
+
+ ctx->rxq_idx = 0;
+ ctx->txq_idx = 0;
+ return 0;
+}
+
+static int
+netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp,
+ const struct genl_info *info)
+{
+ struct netdev_queue_stats_rx rx_sum, rx;
+ struct netdev_queue_stats_tx tx_sum, tx;
+ const struct netdev_stat_ops *ops;
+ void *hdr;
+ int i;
+
+ ops = netdev->stat_ops;
+ /* Netdev can't guarantee any complete counters */
+ if (!ops->get_base_stats)
+ return 0;
+
+ memset(&rx_sum, 0xff, sizeof(rx_sum));
+ memset(&tx_sum, 0xff, sizeof(tx_sum));
+
+ ops->get_base_stats(netdev, &rx_sum, &tx_sum);
+
+ /* The op was there, but nothing reported, don't bother */
+ if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) &&
+ !memchr_inv(&tx_sum, 0xff, sizeof(tx_sum)))
+ return 0;
+
+ hdr = genlmsg_iput(rsp, info);
+ if (!hdr)
+ return -EMSGSIZE;
+ if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex))
+ goto nla_put_failure;
+
+ for (i = 0; i < netdev->real_num_rx_queues; i++) {
+ memset(&rx, 0xff, sizeof(rx));
+ if (ops->get_queue_stats_rx)
+ ops->get_queue_stats_rx(netdev, i, &rx);
+ netdev_nl_stats_add(&rx_sum, &rx, sizeof(rx));
+ }
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
+ memset(&tx, 0xff, sizeof(tx));
+ if (ops->get_queue_stats_tx)
+ ops->get_queue_stats_tx(netdev, i, &tx);
+ netdev_nl_stats_add(&tx_sum, &tx, sizeof(tx));
+ }
+
+ if (netdev_nl_stats_write_rx(rsp, &rx_sum) ||
+ netdev_nl_stats_write_tx(rsp, &tx_sum))
+ goto nla_put_failure;
+
+ genlmsg_end(rsp, hdr);
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(rsp, hdr);
+ return -EMSGSIZE;
+}
+
+int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
+ const struct genl_info *info = genl_info_dump(cb);
+ struct net *net = sock_net(skb->sk);
+ struct net_device *netdev;
+ unsigned int scope;
+ int err = 0;
+
+ scope = 0;
+ if (info->attrs[NETDEV_A_QSTATS_SCOPE])
+ scope = nla_get_uint(info->attrs[NETDEV_A_QSTATS_SCOPE]);
+
+ rtnl_lock();
+ for_each_netdev_dump(net, netdev, ctx->ifindex) {
+ if (!netdev->stat_ops)
+ continue;
+
+ switch (scope) {
+ case 0:
+ err = netdev_nl_stats_by_netdev(netdev, skb, info);
+ break;
+ case NETDEV_QSTATS_SCOPE_QUEUE:
+ err = netdev_nl_stats_by_queue(netdev, skb, info, ctx);
+ break;
+ }
+ if (err < 0)
+ break;
+ }
+ rtnl_unlock();
- return skb->len;
+ return err;
}
static int netdev_genl_netdevice_event(struct notifier_block *nb,
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 4933762e5a6b..dd364d738c00 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -31,6 +31,8 @@
#define BIAS_MAX (LONG_MAX >> 1)
#ifdef CONFIG_PAGE_POOL_STATS
+static DEFINE_PER_CPU(struct page_pool_recycle_stats, pp_system_recycle_stats);
+
/* alloc_stat_inc is intended to be used in softirq context */
#define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
/* recycle_stat_inc is safe to use when preemption is possible. */
@@ -171,13 +173,16 @@ static void page_pool_producer_unlock(struct page_pool *pool,
}
static int page_pool_init(struct page_pool *pool,
- const struct page_pool_params *params)
+ const struct page_pool_params *params,
+ int cpuid)
{
unsigned int ring_qsize = 1024; /* Default */
memcpy(&pool->p, &params->fast, sizeof(pool->p));
memcpy(&pool->slow, &params->slow, sizeof(pool->slow));
+ pool->cpuid = cpuid;
+
/* Validate only known flags were used */
if (pool->p.flags & ~(PP_FLAG_ALL))
return -EINVAL;
@@ -217,14 +222,23 @@ static int page_pool_init(struct page_pool *pool,
pool->has_init_callback = !!pool->slow.init_callback;
#ifdef CONFIG_PAGE_POOL_STATS
- pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
- if (!pool->recycle_stats)
- return -ENOMEM;
+ if (!(pool->p.flags & PP_FLAG_SYSTEM_POOL)) {
+ pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
+ if (!pool->recycle_stats)
+ return -ENOMEM;
+ } else {
+ /* For system page pool instance we use a singular stats object
+ * instead of allocating a separate percpu variable for each
+ * (also percpu) page pool instance.
+ */
+ pool->recycle_stats = &pp_system_recycle_stats;
+ }
#endif
if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
#ifdef CONFIG_PAGE_POOL_STATS
- free_percpu(pool->recycle_stats);
+ if (!(pool->p.flags & PP_FLAG_SYSTEM_POOL))
+ free_percpu(pool->recycle_stats);
#endif
return -ENOMEM;
}
@@ -248,15 +262,18 @@ static void page_pool_uninit(struct page_pool *pool)
put_device(pool->p.dev);
#ifdef CONFIG_PAGE_POOL_STATS
- free_percpu(pool->recycle_stats);
+ if (!(pool->p.flags & PP_FLAG_SYSTEM_POOL))
+ free_percpu(pool->recycle_stats);
#endif
}
/**
- * page_pool_create() - create a page pool.
+ * page_pool_create_percpu() - create a page pool for a given cpu.
* @params: parameters, see struct page_pool_params
+ * @cpuid: cpu identifier
*/
-struct page_pool *page_pool_create(const struct page_pool_params *params)
+struct page_pool *
+page_pool_create_percpu(const struct page_pool_params *params, int cpuid)
{
struct page_pool *pool;
int err;
@@ -265,7 +282,7 @@ struct page_pool *page_pool_create(const struct page_pool_params *params)
if (!pool)
return ERR_PTR(-ENOMEM);
- err = page_pool_init(pool, params);
+ err = page_pool_init(pool, params, cpuid);
if (err < 0)
goto err_free;
@@ -282,6 +299,16 @@ err_free:
kfree(pool);
return ERR_PTR(err);
}
+EXPORT_SYMBOL(page_pool_create_percpu);
+
+/**
+ * page_pool_create() - create a page pool
+ * @params: parameters, see struct page_pool_params
+ */
+struct page_pool *page_pool_create(const struct page_pool_params *params)
+{
+ return page_pool_create_percpu(params, -1);
+}
EXPORT_SYMBOL(page_pool_create);
static void page_pool_return_page(struct page_pool *pool, struct page *page);
@@ -630,6 +657,11 @@ static bool page_pool_recycle_in_cache(struct page *page,
return true;
}
+static bool __page_pool_page_can_be_recycled(const struct page *page)
+{
+ return page_ref_count(page) == 1 && !page_is_pfmemalloc(page);
+}
+
/* If the page refcnt == 1, this will try to recycle the page.
* if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
* the configured size min(dma_sync_size, pool->max_len).
@@ -651,7 +683,7 @@ __page_pool_put_page(struct page_pool *pool, struct page *page,
* page is NOT reusable when allocated when system is under
* some pressure. (page_is_pfmemalloc)
*/
- if (likely(page_ref_count(page) == 1 && !page_is_pfmemalloc(page))) {
+ if (likely(__page_pool_page_can_be_recycled(page))) {
/* Read barrier done in page_ref_count / READ_ONCE */
if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
@@ -766,7 +798,7 @@ static struct page *page_pool_drain_frag(struct page_pool *pool,
if (likely(page_pool_unref_page(page, drain_count)))
return NULL;
- if (page_ref_count(page) == 1 && !page_is_pfmemalloc(page)) {
+ if (__page_pool_page_can_be_recycled(page)) {
if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
page_pool_dma_sync_for_device(pool, page, -1);
@@ -934,8 +966,13 @@ void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
pool->xdp_mem_id = mem->id;
}
-void page_pool_unlink_napi(struct page_pool *pool)
+static void page_pool_disable_direct_recycling(struct page_pool *pool)
{
+ /* Disable direct recycling based on pool->cpuid.
+ * Paired with READ_ONCE() in napi_pp_put_page().
+ */
+ WRITE_ONCE(pool->cpuid, -1);
+
if (!pool->p.napi)
return;
@@ -947,7 +984,6 @@ void page_pool_unlink_napi(struct page_pool *pool)
WRITE_ONCE(pool->p.napi, NULL);
}
-EXPORT_SYMBOL(page_pool_unlink_napi);
void page_pool_destroy(struct page_pool *pool)
{
@@ -957,7 +993,7 @@ void page_pool_destroy(struct page_pool *pool)
if (!page_pool_put(pool))
return;
- page_pool_unlink_napi(pool);
+ page_pool_disable_direct_recycling(pool);
page_pool_free_frag(pool);
if (!page_pool_release(pool))
diff --git a/net/core/page_pool_user.c b/net/core/page_pool_user.c
index 278294aca66a..3a3277ba167b 100644
--- a/net/core/page_pool_user.c
+++ b/net/core/page_pool_user.c
@@ -103,8 +103,6 @@ out:
mutex_unlock(&page_pools_lock);
rtnl_unlock();
- if (skb->len && err == -EMSGSIZE)
- return skb->len;
return err;
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index bd50e9fe3234..a3d7847ce69d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -483,24 +483,15 @@ EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
*/
static void rtnl_lock_unregistering_all(void)
{
- struct net *net;
- bool unregistering;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
add_wait_queue(&netdev_unregistering_wq, &wait);
for (;;) {
- unregistering = false;
rtnl_lock();
/* We held write locked pernet_ops_rwsem, and parallel
* setup_net() and cleanup_net() are not possible.
*/
- for_each_net(net) {
- if (atomic_read(&net->dev_unreg_count) > 0) {
- unregistering = true;
- break;
- }
- }
- if (!unregistering)
+ if (!atomic_read(&dev_unreg_count))
break;
__rtnl_unlock();
@@ -851,9 +842,22 @@ int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
}
EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
+void netdev_set_operstate(struct net_device *dev, int newstate)
+{
+ unsigned int old = READ_ONCE(dev->operstate);
+
+ do {
+ if (old == newstate)
+ return;
+ } while (!try_cmpxchg(&dev->operstate, &old, newstate));
+
+ netdev_state_change(dev);
+}
+EXPORT_SYMBOL(netdev_set_operstate);
+
static void set_operstate(struct net_device *dev, unsigned char transition)
{
- unsigned char operstate = dev->operstate;
+ unsigned char operstate = READ_ONCE(dev->operstate);
switch (transition) {
case IF_OPER_UP:
@@ -875,12 +879,7 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
break;
}
- if (dev->operstate != operstate) {
- write_lock(&dev_base_lock);
- dev->operstate = operstate;
- write_unlock(&dev_base_lock);
- netdev_state_change(dev);
- }
+ netdev_set_operstate(dev, operstate);
}
static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
@@ -1456,17 +1455,18 @@ static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
return 0;
}
-static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
+static int rtnl_fill_link_ifmap(struct sk_buff *skb,
+ const struct net_device *dev)
{
struct rtnl_link_ifmap map;
memset(&map, 0, sizeof(map));
- map.mem_start = dev->mem_start;
- map.mem_end = dev->mem_end;
- map.base_addr = dev->base_addr;
- map.irq = dev->irq;
- map.dma = dev->dma;
- map.port = dev->if_port;
+ map.mem_start = READ_ONCE(dev->mem_start);
+ map.mem_end = READ_ONCE(dev->mem_end);
+ map.base_addr = READ_ONCE(dev->base_addr);
+ map.irq = READ_ONCE(dev->irq);
+ map.dma = READ_ONCE(dev->dma);
+ map.port = READ_ONCE(dev->if_port);
if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
return -EMSGSIZE;
@@ -1612,10 +1612,10 @@ static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
bool force)
{
- int ifindex = dev_get_iflink(dev);
+ int iflink = dev_get_iflink(dev);
- if (force || dev->ifindex != ifindex)
- return nla_put_u32(skb, IFLA_LINK, ifindex);
+ if (force || READ_ONCE(dev->ifindex) != iflink)
+ return nla_put_u32(skb, IFLA_LINK, iflink);
return 0;
}
@@ -1699,7 +1699,7 @@ static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
struct netdev_name_node *name_node;
int count = 0;
- list_for_each_entry(name_node, &dev->name_node->list, list) {
+ list_for_each_entry_rcu(name_node, &dev->name_node->list, list) {
if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name))
return -EMSGSIZE;
count++;
@@ -1707,6 +1707,7 @@ static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
return count;
}
+/* RCU protected. */
static int rtnl_fill_prop_list(struct sk_buff *skb,
const struct net_device *dev)
{
@@ -1876,9 +1877,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
goto nla_put_failure;
}
- if (rtnl_fill_link_ifmap(skb, dev))
- goto nla_put_failure;
-
if (dev->addr_len) {
if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
@@ -1928,10 +1926,11 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
rcu_read_lock();
if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
goto nla_put_failure_rcu;
- rcu_read_unlock();
-
+ if (rtnl_fill_link_ifmap(skb, dev))
+ goto nla_put_failure_rcu;
if (rtnl_fill_prop_list(skb, dev))
- goto nla_put_failure;
+ goto nla_put_failure_rcu;
+ rcu_read_unlock();
if (dev->dev.parent &&
nla_put_string(skb, IFLA_PARENT_DEV_NAME,
@@ -2200,25 +2199,22 @@ static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
{
+ const struct rtnl_link_ops *kind_ops = NULL;
struct netlink_ext_ack *extack = cb->extack;
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
- struct net *tgt_net = net;
- int h, s_h;
- int idx = 0, s_idx;
- struct net_device *dev;
- struct hlist_head *head;
+ unsigned int flags = NLM_F_MULTI;
struct nlattr *tb[IFLA_MAX+1];
+ struct {
+ unsigned long ifindex;
+ } *ctx = (void *)cb->ctx;
+ struct net *tgt_net = net;
u32 ext_filter_mask = 0;
- const struct rtnl_link_ops *kind_ops = NULL;
- unsigned int flags = NLM_F_MULTI;
+ struct net_device *dev;
int master_idx = 0;
int netnsid = -1;
int err, i;
- s_h = cb->args[0];
- s_idx = cb->args[1];
-
err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
if (err < 0) {
if (cb->strict_check)
@@ -2262,36 +2258,18 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
flags |= NLM_F_DUMP_FILTERED;
walk_entries:
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &tgt_net->dev_index_head[h];
- hlist_for_each_entry(dev, head, index_hlist) {
- if (link_dump_filtered(dev, master_idx, kind_ops))
- goto cont;
- if (idx < s_idx)
- goto cont;
- err = rtnl_fill_ifinfo(skb, dev, net,
- RTM_NEWLINK,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq, 0, flags,
- ext_filter_mask, 0, NULL, 0,
- netnsid, GFP_KERNEL);
-
- if (err < 0) {
- if (likely(skb->len))
- goto out;
-
- goto out_err;
- }
-cont:
- idx++;
- }
+ err = 0;
+ for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
+ if (link_dump_filtered(dev, master_idx, kind_ops))
+ continue;
+ err = rtnl_fill_ifinfo(skb, dev, net, RTM_NEWLINK,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq, 0, flags,
+ ext_filter_mask, 0, NULL, 0,
+ netnsid, GFP_KERNEL);
+ if (err < 0)
+ break;
}
-out:
- err = skb->len;
-out_err:
- cb->args[1] = idx;
- cb->args[0] = h;
cb->seq = tgt_net->dev_base_seq;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
if (netnsid >= 0)
@@ -2983,11 +2961,9 @@ static int do_setlink(const struct sk_buff *skb,
if (tb[IFLA_LINKMODE]) {
unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
- write_lock(&dev_base_lock);
if (dev->link_mode ^ value)
status |= DO_SETLINK_NOTIFY;
- dev->link_mode = value;
- write_unlock(&dev_base_lock);
+ WRITE_ONCE(dev->link_mode, value);
}
if (tb[IFLA_VFINFO_LIST]) {
@@ -6552,6 +6528,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
}
owner = link->owner;
dumpit = link->dumpit;
+ flags = link->flags;
if (type == RTM_GETLINK - RTM_BASE)
min_dump_alloc = rtnl_calcit(skb, nlh);
@@ -6569,6 +6546,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
.dump = dumpit,
.min_dump_alloc = min_dump_alloc,
.module = owner,
+ .flags = flags,
};
err = netlink_dump_start(rtnl, skb, nlh, &c);
/* netlink_dump_start() will keep a reference on
diff --git a/net/core/scm.c b/net/core/scm.c
index d0e0852a24d5..9cd4b0a01cd6 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -36,6 +36,7 @@
#include <net/compat.h>
#include <net/scm.h>
#include <net/cls_cgroup.h>
+#include <net/af_unix.h>
/*
@@ -85,6 +86,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
return -ENOMEM;
*fplp = fpl;
fpl->count = 0;
+ fpl->count_unix = 0;
fpl->max = SCM_MAX_FD;
fpl->user = NULL;
}
@@ -109,6 +111,9 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
fput(file);
return -EINVAL;
}
+ if (unix_get_socket(file))
+ fpl->count_unix++;
+
*fpp++ = file;
fpl->count++;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index edbbef563d4d..b99127712e67 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -69,6 +69,7 @@
#include <net/sock.h>
#include <net/checksum.h>
#include <net/gso.h>
+#include <net/hotdata.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
#include <net/mpls.h>
@@ -88,15 +89,10 @@
#include "dev.h"
#include "sock_destructor.h"
-struct kmem_cache *skbuff_cache __ro_after_init;
-static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
#ifdef CONFIG_SKB_EXTENSIONS
static struct kmem_cache *skbuff_ext_cache __ro_after_init;
#endif
-
-static struct kmem_cache *skb_small_head_cache __ro_after_init;
-
#define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(MAX_TCP_HEADER)
/* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two.
@@ -115,6 +111,24 @@ static struct kmem_cache *skb_small_head_cache __ro_after_init;
int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
EXPORT_SYMBOL(sysctl_max_skb_frags);
+/* kcm_write_msgs() relies on casting paged frags to bio_vec to use
+ * iov_iter_bvec(). These static asserts ensure the cast is valid is long as the
+ * netmem is a page.
+ */
+static_assert(offsetof(struct bio_vec, bv_page) ==
+ offsetof(skb_frag_t, netmem));
+static_assert(sizeof_field(struct bio_vec, bv_page) ==
+ sizeof_field(skb_frag_t, netmem));
+
+static_assert(offsetof(struct bio_vec, bv_len) == offsetof(skb_frag_t, len));
+static_assert(sizeof_field(struct bio_vec, bv_len) ==
+ sizeof_field(skb_frag_t, len));
+
+static_assert(offsetof(struct bio_vec, bv_offset) ==
+ offsetof(skb_frag_t, offset));
+static_assert(sizeof_field(struct bio_vec, bv_offset) ==
+ sizeof_field(skb_frag_t, offset));
+
#undef FN
#define FN(reason) [SKB_DROP_REASON_##reason] = #reason,
static const char * const drop_reasons[] = {
@@ -297,7 +311,8 @@ void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
fragsz = SKB_DATA_ALIGN(fragsz);
- return page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
+ return __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
+ align_mask);
}
EXPORT_SYMBOL(__napi_alloc_frag_align);
@@ -309,13 +324,15 @@ void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
if (in_hardirq() || irqs_disabled()) {
struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
- data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask);
+ data = __page_frag_alloc_align(nc, fragsz, GFP_ATOMIC,
+ align_mask);
} else {
struct napi_alloc_cache *nc;
local_bh_disable();
nc = this_cpu_ptr(&napi_alloc_cache);
- data = page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
+ data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
+ align_mask);
local_bh_enable();
}
return data;
@@ -328,7 +345,7 @@ static struct sk_buff *napi_skb_cache_get(void)
struct sk_buff *skb;
if (unlikely(!nc->skb_count)) {
- nc->skb_count = kmem_cache_alloc_bulk(skbuff_cache,
+ nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
GFP_ATOMIC,
NAPI_SKB_CACHE_BULK,
nc->skb_cache);
@@ -337,7 +354,7 @@ static struct sk_buff *napi_skb_cache_get(void)
}
skb = nc->skb_cache[--nc->skb_count];
- kasan_mempool_unpoison_object(skb, kmem_cache_size(skbuff_cache));
+ kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache));
return skb;
}
@@ -395,7 +412,7 @@ struct sk_buff *slab_build_skb(void *data)
struct sk_buff *skb;
unsigned int size;
- skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
+ skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
@@ -446,7 +463,7 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
{
struct sk_buff *skb;
- skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
+ skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
@@ -557,7 +574,7 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
obj_size = SKB_HEAD_ALIGN(*size);
if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE &&
!(flags & KMALLOC_NOT_NORMAL_BITS)) {
- obj = kmem_cache_alloc_node(skb_small_head_cache,
+ obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache,
flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
node);
*size = SKB_SMALL_HEAD_CACHE_SIZE;
@@ -565,7 +582,7 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
goto out;
/* Try again but now we are using pfmemalloc reserves */
ret_pfmemalloc = true;
- obj = kmem_cache_alloc_node(skb_small_head_cache, flags, node);
+ obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, flags, node);
goto out;
}
@@ -628,7 +645,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
u8 *data;
cache = (flags & SKB_ALLOC_FCLONE)
- ? skbuff_fclone_cache : skbuff_cache;
+ ? net_hotdata.skbuff_fclone_cache : net_hotdata.skbuff_cache;
if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
gfp_mask |= __GFP_MEMALLOC;
@@ -845,17 +862,17 @@ skb_fail:
}
EXPORT_SYMBOL(__napi_alloc_skb);
-void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
- int size, unsigned int truesize)
+void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem,
+ int off, int size, unsigned int truesize)
{
DEBUG_NET_WARN_ON_ONCE(size > truesize);
- skb_fill_page_desc(skb, i, page, off, size);
+ skb_fill_netmem_desc(skb, i, netmem, off, size);
skb->len += size;
skb->data_len += size;
skb->truesize += truesize;
}
-EXPORT_SYMBOL(skb_add_rx_frag);
+EXPORT_SYMBOL(skb_add_rx_frag_netmem);
void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
unsigned int truesize)
@@ -895,6 +912,98 @@ static bool is_pp_page(struct page *page)
return (page->pp_magic & ~0x3UL) == PP_SIGNATURE;
}
+int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
+ unsigned int headroom)
+{
+#if IS_ENABLED(CONFIG_PAGE_POOL)
+ u32 size, truesize, len, max_head_size, off;
+ struct sk_buff *skb = *pskb, *nskb;
+ int err, i, head_off;
+ void *data;
+
+ /* XDP does not support fraglist so we need to linearize
+ * the skb.
+ */
+ if (skb_has_frag_list(skb))
+ return -EOPNOTSUPP;
+
+ max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE - headroom);
+ if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE)
+ return -ENOMEM;
+
+ size = min_t(u32, skb->len, max_head_size);
+ truesize = SKB_HEAD_ALIGN(size) + headroom;
+ data = page_pool_dev_alloc_va(pool, &truesize);
+ if (!data)
+ return -ENOMEM;
+
+ nskb = napi_build_skb(data, truesize);
+ if (!nskb) {
+ page_pool_free_va(pool, data, true);
+ return -ENOMEM;
+ }
+
+ skb_reserve(nskb, headroom);
+ skb_copy_header(nskb, skb);
+ skb_mark_for_recycle(nskb);
+
+ err = skb_copy_bits(skb, 0, nskb->data, size);
+ if (err) {
+ consume_skb(nskb);
+ return err;
+ }
+ skb_put(nskb, size);
+
+ head_off = skb_headroom(nskb) - skb_headroom(skb);
+ skb_headers_offset_update(nskb, head_off);
+
+ off = size;
+ len = skb->len - off;
+ for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
+ struct page *page;
+ u32 page_off;
+
+ size = min_t(u32, len, PAGE_SIZE);
+ truesize = size;
+
+ page = page_pool_dev_alloc(pool, &page_off, &truesize);
+ if (!page) {
+ consume_skb(nskb);
+ return -ENOMEM;
+ }
+
+ skb_add_rx_frag(nskb, i, page, page_off, size, truesize);
+ err = skb_copy_bits(skb, off, page_address(page) + page_off,
+ size);
+ if (err) {
+ consume_skb(nskb);
+ return err;
+ }
+
+ len -= size;
+ off += size;
+ }
+
+ consume_skb(skb);
+ *pskb = nskb;
+
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+EXPORT_SYMBOL(skb_pp_cow_data);
+
+int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
+ struct bpf_prog *prog)
+{
+ if (!prog->aux->xdp_has_frags)
+ return -EINVAL;
+
+ return skb_pp_cow_data(pool, pskb, XDP_PACKET_HEADROOM);
+}
+EXPORT_SYMBOL(skb_cow_data_for_xdp);
+
#if IS_ENABLED(CONFIG_PAGE_POOL)
bool napi_pp_put_page(struct page *page, bool napi_safe)
{
@@ -923,9 +1032,10 @@ bool napi_pp_put_page(struct page *page, bool napi_safe)
*/
if (napi_safe || in_softirq()) {
const struct napi_struct *napi = READ_ONCE(pp->p.napi);
+ unsigned int cpuid = smp_processor_id();
- allow_direct = napi &&
- READ_ONCE(napi->list_owner) == smp_processor_id();
+ allow_direct = napi && READ_ONCE(napi->list_owner) == cpuid;
+ allow_direct |= READ_ONCE(pp->cpuid) == cpuid;
}
/* Driver set this to memory recycling info. Reset it on recycle.
@@ -981,7 +1091,7 @@ static int skb_pp_frag_ref(struct sk_buff *skb)
static void skb_kfree_head(void *head, unsigned int end_offset)
{
if (end_offset == SKB_SMALL_HEAD_HEADROOM)
- kmem_cache_free(skb_small_head_cache, head);
+ kmem_cache_free(net_hotdata.skb_small_head_cache, head);
else
kfree(head);
}
@@ -1005,9 +1115,7 @@ static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason,
struct skb_shared_info *shinfo = skb_shinfo(skb);
int i;
- if (skb->cloned &&
- atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
- &shinfo->dataref))
+ if (!skb_data_unref(skb, shinfo))
goto exit;
if (skb_zcopy(skb)) {
@@ -1048,7 +1156,7 @@ static void kfree_skbmem(struct sk_buff *skb)
switch (skb->fclone) {
case SKB_FCLONE_UNAVAILABLE:
- kmem_cache_free(skbuff_cache, skb);
+ kmem_cache_free(net_hotdata.skbuff_cache, skb);
return;
case SKB_FCLONE_ORIG:
@@ -1069,7 +1177,7 @@ static void kfree_skbmem(struct sk_buff *skb)
if (!refcount_dec_and_test(&fclones->fclone_ref))
return;
fastpath:
- kmem_cache_free(skbuff_fclone_cache, fclones);
+ kmem_cache_free(net_hotdata.skbuff_fclone_cache, fclones);
}
void skb_release_head_state(struct sk_buff *skb)
@@ -1166,7 +1274,7 @@ static void kfree_skb_add_bulk(struct sk_buff *skb,
sa->skb_array[sa->skb_count++] = skb;
if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) {
- kmem_cache_free_bulk(skbuff_cache, KFREE_SKB_BULK_SIZE,
+ kmem_cache_free_bulk(net_hotdata.skbuff_cache, KFREE_SKB_BULK_SIZE,
sa->skb_array);
sa->skb_count = 0;
}
@@ -1191,7 +1299,7 @@ kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason)
}
if (sa.skb_count)
- kmem_cache_free_bulk(skbuff_cache, sa.skb_count, sa.skb_array);
+ kmem_cache_free_bulk(net_hotdata.skbuff_cache, sa.skb_count, sa.skb_array);
}
EXPORT_SYMBOL(kfree_skb_list_reason);
@@ -1353,9 +1461,9 @@ static void napi_skb_cache_put(struct sk_buff *skb)
if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
kasan_mempool_unpoison_object(nc->skb_cache[i],
- kmem_cache_size(skbuff_cache));
+ kmem_cache_size(net_hotdata.skbuff_cache));
- kmem_cache_free_bulk(skbuff_cache, NAPI_SKB_CACHE_HALF,
+ kmem_cache_free_bulk(net_hotdata.skbuff_cache, NAPI_SKB_CACHE_HALF,
nc->skb_cache + NAPI_SKB_CACHE_HALF);
nc->skb_count = NAPI_SKB_CACHE_HALF;
}
@@ -1906,10 +2014,11 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
/* skb frags point to kernel buffers */
for (i = 0; i < new_frags - 1; i++) {
- __skb_fill_page_desc(skb, i, head, 0, psize);
+ __skb_fill_netmem_desc(skb, i, page_to_netmem(head), 0, psize);
head = (struct page *)page_private(head);
}
- __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
+ __skb_fill_netmem_desc(skb, new_frags - 1, page_to_netmem(head), 0,
+ d_off);
skb_shinfo(skb)->nr_frags = new_frags;
release:
@@ -1951,7 +2060,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
- n = kmem_cache_alloc(skbuff_cache, gfp_mask);
+ n = kmem_cache_alloc(net_hotdata.skbuff_cache, gfp_mask);
if (!n)
return NULL;
@@ -3647,7 +3756,8 @@ skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
if (plen) {
page = virt_to_head_page(from->head);
offset = from->data - (unsigned char *)page_address(page);
- __skb_fill_page_desc(to, 0, page, offset, plen);
+ __skb_fill_netmem_desc(to, 0, page_to_netmem(page),
+ offset, plen);
get_page(page);
j = 1;
len -= plen;
@@ -4889,7 +4999,7 @@ static void skb_extensions_init(void) {}
void __init skb_init(void)
{
- skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache",
+ net_hotdata.skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache",
sizeof(struct sk_buff),
0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|
@@ -4897,7 +5007,7 @@ void __init skb_init(void)
offsetof(struct sk_buff, cb),
sizeof_field(struct sk_buff, cb),
NULL);
- skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
+ net_hotdata.skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
sizeof(struct sk_buff_fclones),
0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC,
@@ -4906,7 +5016,7 @@ void __init skb_init(void)
* struct skb_shared_info is located at the end of skb->head,
* and should not be copied to/from user.
*/
- skb_small_head_cache = kmem_cache_create_usercopy("skbuff_small_head",
+ net_hotdata.skb_small_head_cache = kmem_cache_create_usercopy("skbuff_small_head",
SKB_SMALL_HEAD_CACHE_SIZE,
0,
SLAB_HWCACHE_ALIGN | SLAB_PANIC,
@@ -5779,7 +5889,7 @@ void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
{
if (head_stolen) {
skb_release_head_state(skb);
- kmem_cache_free(skbuff_cache, skb);
+ kmem_cache_free(net_hotdata.skbuff_cache, skb);
} else {
__kfree_skb(skb);
}
@@ -6737,6 +6847,14 @@ static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
xfrm_state_hold(sp->xvec[i]);
}
#endif
+#ifdef CONFIG_MCTP_FLOWS
+ if (old_active & (1 << SKB_EXT_MCTP)) {
+ struct mctp_flow *flow = skb_ext_get_ptr(old, SKB_EXT_MCTP);
+
+ if (flow->key)
+ refcount_inc(&flow->key->refs);
+ }
+#endif
__skb_ext_put(old);
return new;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index 5e78798456fd..43bf3818c19e 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -283,6 +283,7 @@ __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
EXPORT_SYMBOL(sysctl_rmem_max);
__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
+int sysctl_mem_pcpu_rsv __read_mostly = SK_MEMORY_PCPU_RESERVE;
int sysctl_tstamp_allow_data __read_mostly = 1;
@@ -2052,8 +2053,9 @@ static void sock_copy(struct sock *nsk, const struct sock *osk)
memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
- memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
- prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
+ unsafe_memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
+ prot->obj_size - offsetof(struct sock, sk_dontcopy_end),
+ /* alloc is larger than struct, see sk_prot_alloc() */);
#ifdef CONFIG_SECURITY_NETWORK
nsk->sk_security = sptr;
@@ -2582,8 +2584,18 @@ EXPORT_SYMBOL(sock_efree);
#ifdef CONFIG_INET
void sock_pfree(struct sk_buff *skb)
{
- if (sk_is_refcounted(skb->sk))
- sock_gen_put(skb->sk);
+ struct sock *sk = skb->sk;
+
+ if (!sk_is_refcounted(sk))
+ return;
+
+ if (sk->sk_state == TCP_NEW_SYN_RECV && inet_reqsk(sk)->syncookie) {
+ inet_reqsk(sk)->rsk_listener = NULL;
+ reqsk_free(inet_reqsk(sk));
+ return;
+ }
+
+ sock_gen_put(sk);
}
EXPORT_SYMBOL(sock_pfree);
#endif /* CONFIG_INET */
@@ -4223,3 +4235,65 @@ int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
return sock_ioctl_out(sk, cmd, arg);
}
EXPORT_SYMBOL(sk_ioctl);
+
+static int __init sock_struct_check(void)
+{
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_drops);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_peek_off);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_error_queue);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_receive_queue);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_backlog);
+
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst_ifindex);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst_cookie);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvbuf);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_filter);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_wq);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_data_ready);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvtimeo);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvlowat);
+
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_err);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_socket);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_memcg);
+
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_lock);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_reserved_mem);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_forward_alloc);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_tsflags);
+
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_sndbuf);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_queued);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_alloc);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_tsq_flags);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_send_head);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_write_queue);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_write_pending);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_dst_pending_confirm);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_pacing_status);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_frag);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_timer);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_pacing_rate);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_zckey);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_tskey);
+
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_max_pacing_rate);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_sndtimeo);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_priority);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_mark);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_dst_cache);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_route_caps);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_type);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_size);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_allocation);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_txhash);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_segs);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_pacing_shift);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_use_task_frag);
+ return 0;
+}
+
+core_initcall(sock_struct_check);
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index b1e29e18d1d6..654122838025 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -16,9 +16,10 @@
#include <linux/inet_diag.h>
#include <linux/sock_diag.h>
-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
-static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
-static DEFINE_MUTEX(sock_diag_table_mutex);
+static const struct sock_diag_handler __rcu *sock_diag_handlers[AF_MAX];
+
+static struct sock_diag_inet_compat __rcu *inet_rcv_compat;
+
static struct workqueue_struct *broadcast_wq;
DEFINE_COOKIE(sock_cookie);
@@ -122,6 +123,24 @@ static size_t sock_diag_nlmsg_size(void)
+ nla_total_size_64bit(sizeof(struct tcp_info))); /* INET_DIAG_INFO */
}
+static const struct sock_diag_handler *sock_diag_lock_handler(int family)
+{
+ const struct sock_diag_handler *handler;
+
+ rcu_read_lock();
+ handler = rcu_dereference(sock_diag_handlers[family]);
+ if (handler && !try_module_get(handler->owner))
+ handler = NULL;
+ rcu_read_unlock();
+
+ return handler;
+}
+
+static void sock_diag_unlock_handler(const struct sock_diag_handler *handler)
+{
+ module_put(handler->owner);
+}
+
static void sock_diag_broadcast_destroy_work(struct work_struct *work)
{
struct broadcast_sk *bsk =
@@ -138,12 +157,12 @@ static void sock_diag_broadcast_destroy_work(struct work_struct *work)
if (!skb)
goto out;
- mutex_lock(&sock_diag_table_mutex);
- hndl = sock_diag_handlers[sk->sk_family];
- if (hndl && hndl->get_info)
- err = hndl->get_info(skb, sk);
- mutex_unlock(&sock_diag_table_mutex);
-
+ hndl = sock_diag_lock_handler(sk->sk_family);
+ if (hndl) {
+ if (hndl->get_info)
+ err = hndl->get_info(skb, sk);
+ sock_diag_unlock_handler(hndl);
+ }
if (!err)
nlmsg_multicast(sock_net(sk)->diag_nlsk, skb, 0, group,
GFP_KERNEL);
@@ -166,51 +185,45 @@ void sock_diag_broadcast_destroy(struct sock *sk)
queue_work(broadcast_wq, &bsk->work);
}
-void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
+void sock_diag_register_inet_compat(const struct sock_diag_inet_compat *ptr)
{
- mutex_lock(&sock_diag_table_mutex);
- inet_rcv_compat = fn;
- mutex_unlock(&sock_diag_table_mutex);
+ xchg((__force const struct sock_diag_inet_compat **)&inet_rcv_compat,
+ ptr);
}
EXPORT_SYMBOL_GPL(sock_diag_register_inet_compat);
-void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
+void sock_diag_unregister_inet_compat(const struct sock_diag_inet_compat *ptr)
{
- mutex_lock(&sock_diag_table_mutex);
- inet_rcv_compat = NULL;
- mutex_unlock(&sock_diag_table_mutex);
+ const struct sock_diag_inet_compat *old;
+
+ old = xchg((__force const struct sock_diag_inet_compat **)&inet_rcv_compat,
+ NULL);
+ WARN_ON_ONCE(old != ptr);
}
EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat);
int sock_diag_register(const struct sock_diag_handler *hndl)
{
- int err = 0;
+ int family = hndl->family;
- if (hndl->family >= AF_MAX)
+ if (family >= AF_MAX)
return -EINVAL;
- mutex_lock(&sock_diag_table_mutex);
- if (sock_diag_handlers[hndl->family])
- err = -EBUSY;
- else
- sock_diag_handlers[hndl->family] = hndl;
- mutex_unlock(&sock_diag_table_mutex);
-
- return err;
+ return !cmpxchg((const struct sock_diag_handler **)
+ &sock_diag_handlers[family],
+ NULL, hndl) ? 0 : -EBUSY;
}
EXPORT_SYMBOL_GPL(sock_diag_register);
-void sock_diag_unregister(const struct sock_diag_handler *hnld)
+void sock_diag_unregister(const struct sock_diag_handler *hndl)
{
- int family = hnld->family;
+ int family = hndl->family;
if (family >= AF_MAX)
return;
- mutex_lock(&sock_diag_table_mutex);
- BUG_ON(sock_diag_handlers[family] != hnld);
- sock_diag_handlers[family] = NULL;
- mutex_unlock(&sock_diag_table_mutex);
+ xchg((const struct sock_diag_handler **)&sock_diag_handlers[family],
+ NULL);
}
EXPORT_SYMBOL_GPL(sock_diag_unregister);
@@ -227,20 +240,20 @@ static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh)
return -EINVAL;
req->sdiag_family = array_index_nospec(req->sdiag_family, AF_MAX);
- if (sock_diag_handlers[req->sdiag_family] == NULL)
+ if (!rcu_access_pointer(sock_diag_handlers[req->sdiag_family]))
sock_load_diag_module(req->sdiag_family, 0);
- mutex_lock(&sock_diag_table_mutex);
- hndl = sock_diag_handlers[req->sdiag_family];
+ hndl = sock_diag_lock_handler(req->sdiag_family);
if (hndl == NULL)
- err = -ENOENT;
- else if (nlh->nlmsg_type == SOCK_DIAG_BY_FAMILY)
+ return -ENOENT;
+
+ if (nlh->nlmsg_type == SOCK_DIAG_BY_FAMILY)
err = hndl->dump(skb, nlh);
else if (nlh->nlmsg_type == SOCK_DESTROY && hndl->destroy)
err = hndl->destroy(skb, nlh);
else
err = -EOPNOTSUPP;
- mutex_unlock(&sock_diag_table_mutex);
+ sock_diag_unlock_handler(hndl);
return err;
}
@@ -248,20 +261,27 @@ static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh)
static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
+ const struct sock_diag_inet_compat *ptr;
int ret;
switch (nlh->nlmsg_type) {
case TCPDIAG_GETSOCK:
case DCCPDIAG_GETSOCK:
- if (inet_rcv_compat == NULL)
+
+ if (!rcu_access_pointer(inet_rcv_compat))
sock_load_diag_module(AF_INET, 0);
- mutex_lock(&sock_diag_table_mutex);
- if (inet_rcv_compat != NULL)
- ret = inet_rcv_compat(skb, nlh);
- else
- ret = -EOPNOTSUPP;
- mutex_unlock(&sock_diag_table_mutex);
+ rcu_read_lock();
+ ptr = rcu_dereference(inet_rcv_compat);
+ if (ptr && !try_module_get(ptr->owner))
+ ptr = NULL;
+ rcu_read_unlock();
+
+ ret = -EOPNOTSUPP;
+ if (ptr) {
+ ret = ptr->fn(skb, nlh);
+ module_put(ptr->owner);
+ }
return ret;
case SOCK_DIAG_BY_FAMILY:
@@ -272,13 +292,9 @@ static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
}
}
-static DEFINE_MUTEX(sock_diag_mutex);
-
static void sock_diag_rcv(struct sk_buff *skb)
{
- mutex_lock(&sock_diag_mutex);
netlink_rcv_skb(skb, &sock_diag_rcv_msg);
- mutex_unlock(&sock_diag_mutex);
}
static int sock_diag_bind(struct net *net, int group)
@@ -286,12 +302,12 @@ static int sock_diag_bind(struct net *net, int group)
switch (group) {
case SKNLGRP_INET_TCP_DESTROY:
case SKNLGRP_INET_UDP_DESTROY:
- if (!sock_diag_handlers[AF_INET])
+ if (!rcu_access_pointer(sock_diag_handlers[AF_INET]))
sock_load_diag_module(AF_INET, 0);
break;
case SKNLGRP_INET6_TCP_DESTROY:
case SKNLGRP_INET6_UDP_DESTROY:
- if (!sock_diag_handlers[AF_INET6])
+ if (!rcu_access_pointer(sock_diag_handlers[AF_INET6]))
sock_load_diag_module(AF_INET6, 0);
break;
}
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 0f0cb1465e08..6973dda3abda 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -23,6 +23,8 @@
#include <net/net_ratelimit.h>
#include <net/busy_poll.h>
#include <net/pkt_sched.h>
+#include <net/hotdata.h>
+#include <net/rps.h>
#include "dev.h"
@@ -30,6 +32,7 @@ static int int_3600 = 3600;
static int min_sndbuf = SOCK_MIN_SNDBUF;
static int min_rcvbuf = SOCK_MIN_RCVBUF;
static int max_skb_frags = MAX_SKB_FRAGS;
+static int min_mem_pcpu_rsv = SK_MEMORY_PCPU_RESERVE;
static int net_msg_warn; /* Unused, but still a sysctl */
@@ -137,7 +140,8 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
mutex_lock(&sock_flow_mutex);
- orig_sock_table = rcu_dereference_protected(rps_sock_flow_table,
+ orig_sock_table = rcu_dereference_protected(
+ net_hotdata.rps_sock_flow_table,
lockdep_is_held(&sock_flow_mutex));
size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0;
@@ -158,7 +162,8 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
mutex_unlock(&sock_flow_mutex);
return -ENOMEM;
}
- rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1;
+ net_hotdata.rps_cpu_mask =
+ roundup_pow_of_two(nr_cpu_ids) - 1;
sock_table->mask = size - 1;
} else
sock_table = orig_sock_table;
@@ -169,7 +174,8 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
sock_table = NULL;
if (sock_table != orig_sock_table) {
- rcu_assign_pointer(rps_sock_flow_table, sock_table);
+ rcu_assign_pointer(net_hotdata.rps_sock_flow_table,
+ sock_table);
if (sock_table) {
static_branch_inc(&rps_needed);
static_branch_inc(&rfs_needed);
@@ -299,8 +305,8 @@ static int proc_do_dev_weight(struct ctl_table *table, int write,
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (!ret && write) {
weight = READ_ONCE(weight_p);
- WRITE_ONCE(dev_rx_weight, weight * dev_weight_rx_bias);
- WRITE_ONCE(dev_tx_weight, weight * dev_weight_tx_bias);
+ WRITE_ONCE(net_hotdata.dev_rx_weight, weight * dev_weight_rx_bias);
+ WRITE_ONCE(net_hotdata.dev_tx_weight, weight * dev_weight_tx_bias);
}
mutex_unlock(&dev_weight_mutex);
@@ -408,6 +414,14 @@ static struct ctl_table net_core_table[] = {
.extra1 = &min_rcvbuf,
},
{
+ .procname = "mem_pcpu_rsv",
+ .data = &sysctl_mem_pcpu_rsv,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_mem_pcpu_rsv,
+ },
+ {
.procname = "dev_weight",
.data = &weight_p,
.maxlen = sizeof(int),
@@ -430,7 +444,7 @@ static struct ctl_table net_core_table[] = {
},
{
.procname = "netdev_max_backlog",
- .data = &netdev_max_backlog,
+ .data = &net_hotdata.max_backlog,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
@@ -489,7 +503,7 @@ static struct ctl_table net_core_table[] = {
#endif
{
.procname = "netdev_tstamp_prequeue",
- .data = &netdev_tstamp_prequeue,
+ .data = &net_hotdata.tstamp_prequeue,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
@@ -567,7 +581,7 @@ static struct ctl_table net_core_table[] = {
#endif
{
.procname = "netdev_budget",
- .data = &netdev_budget,
+ .data = &net_hotdata.netdev_budget,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
@@ -590,7 +604,7 @@ static struct ctl_table net_core_table[] = {
},
{
.procname = "netdev_budget_usecs",
- .data = &netdev_budget_usecs,
+ .data = &net_hotdata.netdev_budget_usecs,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
@@ -623,7 +637,7 @@ static struct ctl_table net_core_table[] = {
},
{
.procname = "gro_normal_batch",
- .data = &gro_normal_batch,
+ .data = &net_hotdata.gro_normal_batch,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 4869c1c2d8f3..41693154e426 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -16,6 +16,7 @@
#include <linux/bug.h>
#include <net/page_pool/helpers.h>
+#include <net/hotdata.h>
#include <net/xdp.h>
#include <net/xdp_priv.h> /* struct xdp_mem_allocator */
#include <trace/events/xdp.h>
@@ -75,7 +76,7 @@ static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
xa = container_of(rcu, struct xdp_mem_allocator, rcu);
/* Allow this ID to be reused */
- ida_simple_remove(&mem_id_pool, xa->mem.id);
+ ida_free(&mem_id_pool, xa->mem.id);
kfree(xa);
}
@@ -242,7 +243,7 @@ static int __mem_id_cyclic_get(gfp_t gfp)
int id;
again:
- id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp);
+ id = ida_alloc_range(&mem_id_pool, mem_id_next, MEM_ID_MAX - 1, gfp);
if (id < 0) {
if (id == -ENOSPC) {
/* Cyclic allocator, reset next id */
@@ -317,7 +318,7 @@ static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem,
/* Insert allocator into ID lookup table */
ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
if (IS_ERR(ptr)) {
- ida_simple_remove(&mem_id_pool, mem->id);
+ ida_free(&mem_id_pool, mem->id);
mem->id = 0;
errno = PTR_ERR(ptr);
goto err;
@@ -589,7 +590,7 @@ EXPORT_SYMBOL_GPL(xdp_warn);
int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp)
{
- n_skb = kmem_cache_alloc_bulk(skbuff_cache, gfp, n_skb, skbs);
+ n_skb = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, n_skb, skbs);
if (unlikely(!n_skb))
return -ENOMEM;
@@ -658,7 +659,7 @@ struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
{
struct sk_buff *skb;
- skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
+ skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
@@ -771,11 +772,11 @@ __bpf_kfunc int bpf_xdp_metadata_rx_vlan_tag(const struct xdp_md *ctx,
__bpf_kfunc_end_defs();
-BTF_SET8_START(xdp_metadata_kfunc_ids)
+BTF_KFUNCS_START(xdp_metadata_kfunc_ids)
#define XDP_METADATA_KFUNC(_, __, name, ___) BTF_ID_FLAGS(func, name, KF_TRUSTED_ARGS)
XDP_METADATA_KFUNC_xxx
#undef XDP_METADATA_KFUNC
-BTF_SET8_END(xdp_metadata_kfunc_ids)
+BTF_KFUNCS_END(xdp_metadata_kfunc_ids)
static const struct btf_kfunc_id_set xdp_metadata_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index c4bbac99740d..1cba001bb4c8 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -376,15 +376,11 @@ EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_cleanup);
int __init dccp_ackvec_init(void)
{
- dccp_ackvec_slab = kmem_cache_create("dccp_ackvec",
- sizeof(struct dccp_ackvec), 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ dccp_ackvec_slab = KMEM_CACHE(dccp_ackvec, SLAB_HWCACHE_ALIGN);
if (dccp_ackvec_slab == NULL)
goto out_err;
- dccp_ackvec_record_slab = kmem_cache_create("dccp_ackvec_record",
- sizeof(struct dccp_ackvec_record),
- 0, SLAB_HWCACHE_ALIGN, NULL);
+ dccp_ackvec_record_slab = KMEM_CACHE(dccp_ackvec_record, SLAB_HWCACHE_ALIGN);
if (dccp_ackvec_record_slab == NULL)
goto out_destroy_slab;
diff --git a/net/dccp/diag.c b/net/dccp/diag.c
index 8a82c5a2c5a8..f5019d95c3ae 100644
--- a/net/dccp/diag.c
+++ b/net/dccp/diag.c
@@ -58,6 +58,7 @@ static int dccp_diag_dump_one(struct netlink_callback *cb,
}
static const struct inet_diag_handler dccp_diag_handler = {
+ .owner = THIS_MODULE,
.dump = dccp_diag_dump,
.dump_one = dccp_diag_dump_one,
.idiag_get_info = dccp_diag_get_info,
diff --git a/net/devlink/netlink_gen.c b/net/devlink/netlink_gen.c
index c81cf2dd154f..f9786d51f68f 100644
--- a/net/devlink/netlink_gen.c
+++ b/net/devlink/netlink_gen.c
@@ -198,7 +198,7 @@ static const struct nla_policy devlink_eswitch_set_nl_policy[DEVLINK_ATTR_ESWITC
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
[DEVLINK_ATTR_ESWITCH_MODE] = NLA_POLICY_MAX(NLA_U16, 1),
- [DEVLINK_ATTR_ESWITCH_INLINE_MODE] = NLA_POLICY_MAX(NLA_U16, 3),
+ [DEVLINK_ATTR_ESWITCH_INLINE_MODE] = NLA_POLICY_MAX(NLA_U8, 3),
[DEVLINK_ATTR_ESWITCH_ENCAP_MODE] = NLA_POLICY_MAX(NLA_U8, 1),
};
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index ac7be864e80d..09d2f5d4b3dd 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/of.h>
-#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <net/dsa_stubs.h>
#include <net/sch_generic.h>
@@ -626,7 +625,6 @@ static void dsa_switch_teardown_tag_protocol(struct dsa_switch *ds)
static int dsa_switch_setup(struct dsa_switch *ds)
{
- struct device_node *dn;
int err;
if (ds->setup)
@@ -666,10 +664,7 @@ static int dsa_switch_setup(struct dsa_switch *ds)
dsa_user_mii_bus_init(ds);
- dn = of_get_child_by_name(ds->dev->of_node, "mdio");
-
- err = of_mdiobus_register(ds->user_mii_bus, dn);
- of_node_put(dn);
+ err = mdiobus_register(ds->user_mii_bus);
if (err < 0)
goto free_user_mii_bus;
}
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
index 2717e9d7b612..1aba1d05c27a 100644
--- a/net/dsa/tag_sja1105.c
+++ b/net/dsa/tag_sja1105.c
@@ -75,7 +75,7 @@ sja1105_tagger_private(struct dsa_switch *ds)
}
/* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */
-static inline bool sja1105_is_link_local(const struct sk_buff *skb)
+static bool sja1105_is_link_local(const struct sk_buff *skb)
{
const struct ethhdr *hdr = eth_hdr(skb);
u64 dmac = ether_addr_to_u64(hdr->h_dest);
@@ -121,7 +121,7 @@ static void sja1105_meta_unpack(const struct sk_buff *skb,
packing(buf + 7, &meta->switch_id, 7, 0, 1, UNPACK, 0);
}
-static inline bool sja1105_is_meta_frame(const struct sk_buff *skb)
+static bool sja1105_is_meta_frame(const struct sk_buff *skb)
{
const struct ethhdr *hdr = eth_hdr(skb);
u64 smac = ether_addr_to_u64(hdr->h_source);
diff --git a/net/dsa/user.c b/net/dsa/user.c
index b15e71cc342c..16d395bb1a1f 100644
--- a/net/dsa/user.c
+++ b/net/dsa/user.c
@@ -210,7 +210,7 @@ static int dsa_user_sync_uc(struct net_device *dev,
return 0;
return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
- &ctx);
+ &ctx);
}
static int dsa_user_unsync_uc(struct net_device *dev,
@@ -230,7 +230,7 @@ static int dsa_user_unsync_uc(struct net_device *dev,
return 0;
return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
- &ctx);
+ &ctx);
}
static int dsa_user_sync_mc(struct net_device *dev,
@@ -250,7 +250,7 @@ static int dsa_user_sync_mc(struct net_device *dev,
return 0;
return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
- &ctx);
+ &ctx);
}
static int dsa_user_unsync_mc(struct net_device *dev,
@@ -270,7 +270,7 @@ static int dsa_user_unsync_mc(struct net_device *dev,
return 0;
return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
- &ctx);
+ &ctx);
}
void dsa_user_sync_ha(struct net_device *dev)
@@ -352,7 +352,7 @@ void dsa_user_mii_bus_init(struct dsa_switch *ds)
/* user device handling ****************************************************/
static int dsa_user_get_iflink(const struct net_device *dev)
{
- return dsa_user_to_conduit(dev)->ifindex;
+ return READ_ONCE(dsa_user_to_conduit(dev)->ifindex);
}
static int dsa_user_open(struct net_device *dev)
@@ -875,8 +875,8 @@ static int dsa_user_port_obj_del(struct net_device *dev, const void *ctx,
return err;
}
-static inline netdev_tx_t dsa_user_netpoll_send_skb(struct net_device *dev,
- struct sk_buff *skb)
+static netdev_tx_t dsa_user_netpoll_send_skb(struct net_device *dev,
+ struct sk_buff *skb)
{
#ifdef CONFIG_NET_POLL_CONTROLLER
struct dsa_user_priv *p = netdev_priv(dev);
@@ -1222,7 +1222,7 @@ static int dsa_user_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
return ret;
}
-static int dsa_user_set_eee(struct net_device *dev, struct ethtool_eee *e)
+static int dsa_user_set_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
@@ -1242,7 +1242,7 @@ static int dsa_user_set_eee(struct net_device *dev, struct ethtool_eee *e)
return phylink_ethtool_set_eee(dp->pl, e);
}
-static int dsa_user_get_eee(struct net_device *dev, struct ethtool_eee *e)
+static int dsa_user_get_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
@@ -2429,7 +2429,7 @@ static const struct net_device_ops dsa_user_netdev_ops = {
.ndo_fill_forward_path = dsa_user_fill_forward_path,
};
-static struct device_type dsa_type = {
+static const struct device_type dsa_type = {
.name = "dsa",
};
@@ -2625,11 +2625,7 @@ int dsa_user_create(struct dsa_port *port)
user_dev->vlan_features = conduit->vlan_features;
p = netdev_priv(user_dev);
- user_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!user_dev->tstats) {
- free_netdev(user_dev);
- return -ENOMEM;
- }
+ user_dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
ret = gro_cells_init(&p->gcells, user_dev);
if (ret)
@@ -2695,7 +2691,6 @@ out_phy:
out_gcells:
gro_cells_destroy(&p->gcells);
out_free:
- free_percpu(user_dev->tstats);
free_netdev(user_dev);
port->user = NULL;
return ret;
@@ -2716,7 +2711,6 @@ void dsa_user_destroy(struct net_device *user_dev)
dsa_port_phylink_destroy(dp);
gro_cells_destroy(&p->gcells);
- free_percpu(user_dev->tstats);
free_netdev(user_dev);
}
diff --git a/net/ethtool/eee.c b/net/ethtool/eee.c
index 2853394d06a8..bf398973eb8a 100644
--- a/net/ethtool/eee.c
+++ b/net/ethtool/eee.c
@@ -4,16 +4,13 @@
#include "common.h"
#include "bitset.h"
-#define EEE_MODES_COUNT \
- (sizeof_field(struct ethtool_eee, supported) * BITS_PER_BYTE)
-
struct eee_req_info {
struct ethnl_req_info base;
};
struct eee_reply_data {
struct ethnl_reply_data base;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
};
#define EEE_REPDATA(__reply_base) \
@@ -30,6 +27,7 @@ static int eee_prepare_data(const struct ethnl_req_info *req_base,
{
struct eee_reply_data *data = EEE_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
+ struct ethtool_keee *eee = &data->eee;
int ret;
if (!dev->ethtool_ops->get_eee)
@@ -37,7 +35,7 @@ static int eee_prepare_data(const struct ethnl_req_info *req_base,
ret = ethnl_ops_begin(dev);
if (ret < 0)
return ret;
- ret = dev->ethtool_ops->get_eee(dev, &data->eee);
+ ret = dev->ethtool_ops->get_eee(dev, eee);
ethnl_ops_complete(dev);
return ret;
@@ -48,24 +46,21 @@ static int eee_reply_size(const struct ethnl_req_info *req_base,
{
bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
const struct eee_reply_data *data = EEE_REPDATA(reply_base);
- const struct ethtool_eee *eee = &data->eee;
+ const struct ethtool_keee *eee = &data->eee;
int len = 0;
int ret;
- BUILD_BUG_ON(sizeof(eee->advertised) * BITS_PER_BYTE !=
- EEE_MODES_COUNT);
- BUILD_BUG_ON(sizeof(eee->lp_advertised) * BITS_PER_BYTE !=
- EEE_MODES_COUNT);
-
/* MODES_OURS */
- ret = ethnl_bitset32_size(&eee->advertised, &eee->supported,
- EEE_MODES_COUNT, link_mode_names, compact);
+ ret = ethnl_bitset_size(eee->advertised, eee->supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ link_mode_names, compact);
if (ret < 0)
return ret;
len += ret;
/* MODES_PEERS */
- ret = ethnl_bitset32_size(&eee->lp_advertised, NULL,
- EEE_MODES_COUNT, link_mode_names, compact);
+ ret = ethnl_bitset_size(eee->lp_advertised, NULL,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ link_mode_names, compact);
if (ret < 0)
return ret;
len += ret;
@@ -84,24 +79,26 @@ static int eee_fill_reply(struct sk_buff *skb,
{
bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
const struct eee_reply_data *data = EEE_REPDATA(reply_base);
- const struct ethtool_eee *eee = &data->eee;
+ const struct ethtool_keee *eee = &data->eee;
int ret;
- ret = ethnl_put_bitset32(skb, ETHTOOL_A_EEE_MODES_OURS,
- &eee->advertised, &eee->supported,
- EEE_MODES_COUNT, link_mode_names, compact);
+ ret = ethnl_put_bitset(skb, ETHTOOL_A_EEE_MODES_OURS,
+ eee->advertised, eee->supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ link_mode_names, compact);
if (ret < 0)
return ret;
- ret = ethnl_put_bitset32(skb, ETHTOOL_A_EEE_MODES_PEER,
- &eee->lp_advertised, NULL, EEE_MODES_COUNT,
- link_mode_names, compact);
+ ret = ethnl_put_bitset(skb, ETHTOOL_A_EEE_MODES_PEER,
+ eee->lp_advertised, NULL,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ link_mode_names, compact);
if (ret < 0)
return ret;
- if (nla_put_u8(skb, ETHTOOL_A_EEE_ACTIVE, !!eee->eee_active) ||
- nla_put_u8(skb, ETHTOOL_A_EEE_ENABLED, !!eee->eee_enabled) ||
+ if (nla_put_u8(skb, ETHTOOL_A_EEE_ACTIVE, eee->eee_active) ||
+ nla_put_u8(skb, ETHTOOL_A_EEE_ENABLED, eee->eee_enabled) ||
nla_put_u8(skb, ETHTOOL_A_EEE_TX_LPI_ENABLED,
- !!eee->tx_lpi_enabled) ||
+ eee->tx_lpi_enabled) ||
nla_put_u32(skb, ETHTOOL_A_EEE_TX_LPI_TIMER, eee->tx_lpi_timer))
return -EMSGSIZE;
@@ -132,7 +129,7 @@ ethnl_set_eee(struct ethnl_req_info *req_info, struct genl_info *info)
{
struct net_device *dev = req_info->dev;
struct nlattr **tb = info->attrs;
- struct ethtool_eee eee = {};
+ struct ethtool_keee eee = {};
bool mod = false;
int ret;
@@ -140,14 +137,15 @@ ethnl_set_eee(struct ethnl_req_info *req_info, struct genl_info *info)
if (ret < 0)
return ret;
- ret = ethnl_update_bitset32(&eee.advertised, EEE_MODES_COUNT,
- tb[ETHTOOL_A_EEE_MODES_OURS],
- link_mode_names, info->extack, &mod);
+ ret = ethnl_update_bitset(eee.advertised,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ tb[ETHTOOL_A_EEE_MODES_OURS],
+ link_mode_names, info->extack, &mod);
if (ret < 0)
return ret;
- ethnl_update_bool32(&eee.eee_enabled, tb[ETHTOOL_A_EEE_ENABLED], &mod);
- ethnl_update_bool32(&eee.tx_lpi_enabled,
- tb[ETHTOOL_A_EEE_TX_LPI_ENABLED], &mod);
+ ethnl_update_bool(&eee.eee_enabled, tb[ETHTOOL_A_EEE_ENABLED], &mod);
+ ethnl_update_bool(&eee.tx_lpi_enabled, tb[ETHTOOL_A_EEE_TX_LPI_ENABLED],
+ &mod);
ethnl_update_u32(&eee.tx_lpi_timer, tb[ETHTOOL_A_EEE_TX_LPI_TIMER],
&mod);
if (!mod)
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 7519b0818b91..5a55270aa86e 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -26,12 +26,12 @@
#include <linux/sched/signal.h>
#include <linux/net.h>
#include <linux/pm_runtime.h>
+#include <linux/utsname.h>
#include <net/devlink.h>
#include <net/ipv6.h>
#include <net/xdp_sock_drv.h>
#include <net/flow_offload.h>
#include <linux/ethtool_netlink.h>
-#include <generated/utsrelease.h>
#include "common.h"
/* State held across locks and calls for commands which have devlink fallback */
@@ -713,7 +713,8 @@ ethtool_get_drvinfo(struct net_device *dev, struct ethtool_devlink_compat *rsp)
struct device *parent = dev->dev.parent;
rsp->info.cmd = ETHTOOL_GDRVINFO;
- strscpy(rsp->info.version, UTS_RELEASE, sizeof(rsp->info.version));
+ strscpy(rsp->info.version, init_uts_ns.name.release,
+ sizeof(rsp->info.version));
if (ops->get_drvinfo) {
ops->get_drvinfo(dev, &rsp->info);
if (!rsp->info.bus_info[0] && parent)
@@ -1508,22 +1509,57 @@ static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
return 0;
}
+static void eee_to_keee(struct ethtool_keee *keee,
+ const struct ethtool_eee *eee)
+{
+ memset(keee, 0, sizeof(*keee));
+
+ keee->eee_enabled = eee->eee_enabled;
+ keee->tx_lpi_enabled = eee->tx_lpi_enabled;
+ keee->tx_lpi_timer = eee->tx_lpi_timer;
+
+ ethtool_convert_legacy_u32_to_link_mode(keee->advertised,
+ eee->advertised);
+}
+
+static void keee_to_eee(struct ethtool_eee *eee,
+ const struct ethtool_keee *keee)
+{
+ bool overflow;
+
+ memset(eee, 0, sizeof(*eee));
+
+ eee->eee_active = keee->eee_active;
+ eee->eee_enabled = keee->eee_enabled;
+ eee->tx_lpi_enabled = keee->tx_lpi_enabled;
+ eee->tx_lpi_timer = keee->tx_lpi_timer;
+
+ overflow = !ethtool_convert_link_mode_to_legacy_u32(&eee->supported,
+ keee->supported);
+ ethtool_convert_link_mode_to_legacy_u32(&eee->advertised,
+ keee->advertised);
+ ethtool_convert_link_mode_to_legacy_u32(&eee->lp_advertised,
+ keee->lp_advertised);
+ if (overflow)
+ pr_warn("Ethtool ioctl interface doesn't support passing EEE linkmodes beyond bit 32\n");
+}
+
static int ethtool_get_eee(struct net_device *dev, char __user *useraddr)
{
- struct ethtool_eee edata;
+ struct ethtool_keee keee;
+ struct ethtool_eee eee;
int rc;
if (!dev->ethtool_ops->get_eee)
return -EOPNOTSUPP;
- memset(&edata, 0, sizeof(struct ethtool_eee));
- edata.cmd = ETHTOOL_GEEE;
- rc = dev->ethtool_ops->get_eee(dev, &edata);
-
+ memset(&keee, 0, sizeof(keee));
+ rc = dev->ethtool_ops->get_eee(dev, &keee);
if (rc)
return rc;
- if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ keee_to_eee(&eee, &keee);
+ if (copy_to_user(useraddr, &eee, sizeof(eee)))
return -EFAULT;
return 0;
@@ -1531,16 +1567,18 @@ static int ethtool_get_eee(struct net_device *dev, char __user *useraddr)
static int ethtool_set_eee(struct net_device *dev, char __user *useraddr)
{
- struct ethtool_eee edata;
+ struct ethtool_keee keee;
+ struct ethtool_eee eee;
int ret;
if (!dev->ethtool_ops->set_eee)
return -EOPNOTSUPP;
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ if (copy_from_user(&eee, useraddr, sizeof(eee)))
return -EFAULT;
- ret = dev->ethtool_ops->set_eee(dev, &edata);
+ eee_to_keee(&keee, &eee);
+ ret = dev->ethtool_ops->set_eee(dev, &keee);
if (!ret)
ethtool_notify(dev, ETHTOOL_MSG_EEE_NTF, NULL);
return ret;
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index fe3553f60bf3..bd04f28d5cf4 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -477,11 +477,7 @@ out:
return ret;
}
-/* Default ->dumpit() handler for GET requests. Device iteration copied from
- * rtnl_dump_ifinfo(); we have to be more careful about device hashtable
- * persistence as we cannot guarantee to hold RTNL lock through the whole
- * function as rtnetnlink does.
- */
+/* Default ->dumpit() handler for GET requests. */
static int ethnl_default_dumpit(struct sk_buff *skb,
struct netlink_callback *cb)
{
@@ -490,14 +486,14 @@ static int ethnl_default_dumpit(struct sk_buff *skb,
struct net_device *dev;
int ret = 0;
- rtnl_lock();
+ rcu_read_lock();
for_each_netdev_dump(net, dev, ctx->pos_ifindex) {
dev_hold(dev);
- rtnl_unlock();
+ rcu_read_unlock();
ret = ethnl_default_dump_one(skb, dev, ctx, genl_info_dump(cb));
- rtnl_lock();
+ rcu_read_lock();
dev_put(dev);
if (ret < 0 && ret != -EOPNOTSUPP) {
@@ -507,7 +503,7 @@ static int ethnl_default_dumpit(struct sk_buff *skb,
}
ret = 0;
}
- rtnl_unlock();
+ rcu_read_unlock();
return ret;
}
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index 9d71b66183da..c98b5b71ad7c 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -28,29 +28,19 @@ static bool is_slave_up(struct net_device *dev)
return dev && is_admin_up(dev) && netif_oper_up(dev);
}
-static void __hsr_set_operstate(struct net_device *dev, int transition)
-{
- write_lock(&dev_base_lock);
- if (dev->operstate != transition) {
- dev->operstate = transition;
- write_unlock(&dev_base_lock);
- netdev_state_change(dev);
- } else {
- write_unlock(&dev_base_lock);
- }
-}
-
static void hsr_set_operstate(struct hsr_port *master, bool has_carrier)
{
- if (!is_admin_up(master->dev)) {
- __hsr_set_operstate(master->dev, IF_OPER_DOWN);
+ struct net_device *dev = master->dev;
+
+ if (!is_admin_up(dev)) {
+ netdev_set_operstate(dev, IF_OPER_DOWN);
return;
}
if (has_carrier)
- __hsr_set_operstate(master->dev, IF_OPER_UP);
+ netdev_set_operstate(dev, IF_OPER_UP);
else
- __hsr_set_operstate(master->dev, IF_OPER_LOWERLAYERDOWN);
+ netdev_set_operstate(dev, IF_OPER_LOWERLAYERDOWN);
}
static bool hsr_check_carrier(struct hsr_port *master)
@@ -78,14 +68,14 @@ static void hsr_check_announce(struct net_device *hsr_dev,
hsr = netdev_priv(hsr_dev);
- if (hsr_dev->operstate == IF_OPER_UP && old_operstate != IF_OPER_UP) {
+ if (READ_ONCE(hsr_dev->operstate) == IF_OPER_UP && old_operstate != IF_OPER_UP) {
/* Went up */
hsr->announce_count = 0;
mod_timer(&hsr->announce_timer,
jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
}
- if (hsr_dev->operstate != IF_OPER_UP && old_operstate == IF_OPER_UP)
+ if (READ_ONCE(hsr_dev->operstate) != IF_OPER_UP && old_operstate == IF_OPER_UP)
/* Went down */
del_timer(&hsr->announce_timer);
}
@@ -100,7 +90,7 @@ void hsr_check_carrier_and_operstate(struct hsr_priv *hsr)
/* netif_stacked_transfer_operstate() cannot be used here since
* it doesn't set IF_OPER_LOWERLAYERDOWN (?)
*/
- old_operstate = master->dev->operstate;
+ old_operstate = READ_ONCE(master->dev->operstate);
has_carrier = hsr_check_carrier(master);
hsr_set_operstate(master, has_carrier);
hsr_check_announce(master->dev, old_operstate);
@@ -477,7 +467,7 @@ static const struct net_device_ops hsr_device_ops = {
.ndo_set_rx_mode = hsr_set_rx_mode,
};
-static struct device_type hsr_type = {
+static const struct device_type hsr_type = {
.name = "hsr",
};
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index 2c087b7f17c5..77b4e92027c5 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -93,7 +93,7 @@ static int lowpan_neigh_construct(struct net_device *dev, struct neighbour *n)
static int lowpan_get_iflink(const struct net_device *dev)
{
- return lowpan_802154_dev(dev)->wdev->ifindex;
+ return READ_ONCE(lowpan_802154_dev(dev)->wdev->ifindex);
}
static const struct net_device_ops lowpan_netdev_ops = {
@@ -280,5 +280,6 @@ static void __exit lowpan_cleanup_module(void)
module_init(lowpan_init_module);
module_exit(lowpan_cleanup_module);
+MODULE_DESCRIPTION("IPv6 over Low power Wireless Personal Area Network IEEE 802.15.4 core");
MODULE_LICENSE("GPL");
MODULE_ALIAS_RTNL_LINK("lowpan");
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index 00302e8b9615..990a83455dcf 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -1137,4 +1137,5 @@ module_init(af_ieee802154_init);
module_exit(af_ieee802154_remove);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IEEE 802.15.4 socket interface");
MODULE_ALIAS_NETPROTO(PF_IEEE802154);
diff --git a/net/ieee802154/sysfs.c b/net/ieee802154/sysfs.c
index d2903933805c..6708160ebf9f 100644
--- a/net/ieee802154/sysfs.c
+++ b/net/ieee802154/sysfs.c
@@ -93,7 +93,7 @@ static SIMPLE_DEV_PM_OPS(wpan_phy_pm_ops, wpan_phy_suspend, wpan_phy_resume);
#define WPAN_PHY_PM_OPS NULL
#endif
-struct class wpan_phy_class = {
+const struct class wpan_phy_class = {
.name = "ieee802154",
.dev_release = wpan_phy_release,
.dev_groups = pmib_groups,
diff --git a/net/ieee802154/sysfs.h b/net/ieee802154/sysfs.h
index 337545b639e9..69961e166257 100644
--- a/net/ieee802154/sysfs.h
+++ b/net/ieee802154/sysfs.h
@@ -5,6 +5,6 @@
int wpan_phy_sysfs_init(void);
void wpan_phy_sysfs_exit(void);
-extern struct class wpan_phy_class;
+extern const struct class wpan_phy_class;
#endif /* __IEEE802154_SYSFS_H */
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index a5a820ee2026..55bd72997b31 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -119,6 +119,7 @@
#endif
#include <net/l3mdev.h>
#include <net/compat.h>
+#include <net/rps.h>
#include <trace/events/sock.h>
@@ -1103,7 +1104,7 @@ const struct proto_ops inet_dgram_ops = {
.recvmsg = inet_recvmsg,
.mmap = sock_no_mmap,
.splice_eof = inet_splice_eof,
- .set_peek_off = sk_set_peek_off,
+ .set_peek_off = udp_set_peek_off,
#ifdef CONFIG_COMPAT
.compat_ioctl = inet_compat_ioctl,
#endif
@@ -1326,7 +1327,7 @@ int inet_sk_rebuild_header(struct sock *sk)
fl4 = &inet->cork.fl.u.ip4;
rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet->inet_saddr,
inet->inet_dport, inet->inet_sport,
- sk->sk_protocol, RT_CONN_FLAGS(sk),
+ sk->sk_protocol, ip_sock_rt_tos(sk),
sk->sk_bound_dev_if);
if (!IS_ERR(rt)) {
err = 0;
@@ -1751,19 +1752,6 @@ static const struct net_protocol igmp_protocol = {
};
#endif
-static const struct net_protocol tcp_protocol = {
- .handler = tcp_v4_rcv,
- .err_handler = tcp_v4_err,
- .no_policy = 1,
- .icmp_strict_tag_validation = 1,
-};
-
-static const struct net_protocol udp_protocol = {
- .handler = udp_rcv,
- .err_handler = udp_err,
- .no_policy = 1,
-};
-
static const struct net_protocol icmp_protocol = {
.handler = icmp_rcv,
.err_handler = icmp_err,
@@ -1904,14 +1892,6 @@ static int ipv4_proc_init(void);
* IP protocol layer initialiser
*/
-static struct packet_offload ip_packet_offload __read_mostly = {
- .type = cpu_to_be16(ETH_P_IP),
- .callbacks = {
- .gso_segment = inet_gso_segment,
- .gro_receive = inet_gro_receive,
- .gro_complete = inet_gro_complete,
- },
-};
static const struct net_offload ipip_offload = {
.callbacks = {
@@ -1938,7 +1918,15 @@ static int __init ipv4_offload_init(void)
if (ipip_offload_init() < 0)
pr_crit("%s: Cannot add IPIP protocol offload\n", __func__);
- dev_add_offload(&ip_packet_offload);
+ net_hotdata.ip_packet_offload = (struct packet_offload) {
+ .type = cpu_to_be16(ETH_P_IP),
+ .callbacks = {
+ .gso_segment = inet_gso_segment,
+ .gro_receive = inet_gro_receive,
+ .gro_complete = inet_gro_complete,
+ },
+ };
+ dev_add_offload(&net_hotdata.ip_packet_offload);
return 0;
}
@@ -1992,9 +1980,22 @@ static int __init inet_init(void)
if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0)
pr_crit("%s: Cannot add ICMP protocol\n", __func__);
- if (inet_add_protocol(&udp_protocol, IPPROTO_UDP) < 0)
+
+ net_hotdata.udp_protocol = (struct net_protocol) {
+ .handler = udp_rcv,
+ .err_handler = udp_err,
+ .no_policy = 1,
+ };
+ if (inet_add_protocol(&net_hotdata.udp_protocol, IPPROTO_UDP) < 0)
pr_crit("%s: Cannot add UDP protocol\n", __func__);
- if (inet_add_protocol(&tcp_protocol, IPPROTO_TCP) < 0)
+
+ net_hotdata.tcp_protocol = (struct net_protocol) {
+ .handler = tcp_v4_rcv,
+ .err_handler = tcp_v4_err,
+ .no_policy = 1,
+ .icmp_strict_tag_validation = 1,
+ };
+ if (inet_add_protocol(&net_hotdata.tcp_protocol, IPPROTO_TCP) < 0)
pr_crit("%s: Cannot add TCP protocol\n", __func__);
#ifdef CONFIG_IP_MULTICAST
if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0)
diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c
index ae8b15e6896f..7f518ea5f4ac 100644
--- a/net/ipv4/bpf_tcp_ca.c
+++ b/net/ipv4/bpf_tcp_ca.c
@@ -12,7 +12,7 @@
#include <net/bpf_sk_storage.h>
/* "extern" is to avoid sparse warning. It is only used in bpf_struct_ops.c. */
-extern struct bpf_struct_ops bpf_tcp_congestion_ops;
+static struct bpf_struct_ops bpf_tcp_congestion_ops;
static u32 unsupported_ops[] = {
offsetof(struct tcp_congestion_ops, get_info),
@@ -20,6 +20,7 @@ static u32 unsupported_ops[] = {
static const struct btf_type *tcp_sock_type;
static u32 tcp_sock_id, sock_id;
+static const struct btf_type *tcp_congestion_ops_type;
static int bpf_tcp_ca_init(struct btf *btf)
{
@@ -36,6 +37,11 @@ static int bpf_tcp_ca_init(struct btf *btf)
tcp_sock_id = type_id;
tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
+ type_id = btf_find_by_name_kind(btf, "tcp_congestion_ops", BTF_KIND_STRUCT);
+ if (type_id < 0)
+ return -EINVAL;
+ tcp_congestion_ops_type = btf_type_by_id(btf, type_id);
+
return 0;
}
@@ -149,7 +155,7 @@ static u32 prog_ops_moff(const struct bpf_prog *prog)
u32 midx;
midx = prog->expected_attach_type;
- t = bpf_tcp_congestion_ops.type;
+ t = tcp_congestion_ops_type;
m = &btf_type_member(t)[midx];
return __btf_member_bit_offset(t, m) / 8;
@@ -191,17 +197,17 @@ bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
case BPF_FUNC_ktime_get_coarse_ns:
return &bpf_ktime_get_coarse_ns_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
}
-BTF_SET8_START(bpf_tcp_ca_check_kfunc_ids)
+BTF_KFUNCS_START(bpf_tcp_ca_check_kfunc_ids)
BTF_ID_FLAGS(func, tcp_reno_ssthresh)
BTF_ID_FLAGS(func, tcp_reno_cong_avoid)
BTF_ID_FLAGS(func, tcp_reno_undo_cwnd)
BTF_ID_FLAGS(func, tcp_slow_start)
BTF_ID_FLAGS(func, tcp_cong_avoid_ai)
-BTF_SET8_END(bpf_tcp_ca_check_kfunc_ids)
+BTF_KFUNCS_END(bpf_tcp_ca_check_kfunc_ids)
static const struct btf_kfunc_id_set bpf_tcp_ca_kfunc_set = {
.owner = THIS_MODULE,
@@ -339,7 +345,7 @@ static struct tcp_congestion_ops __bpf_ops_tcp_congestion_ops = {
.release = __bpf_tcp_ca_release,
};
-struct bpf_struct_ops bpf_tcp_congestion_ops = {
+static struct bpf_struct_ops bpf_tcp_congestion_ops = {
.verifier_ops = &bpf_tcp_ca_verifier_ops,
.reg = bpf_tcp_ca_reg,
.unreg = bpf_tcp_ca_unreg,
@@ -350,10 +356,16 @@ struct bpf_struct_ops bpf_tcp_congestion_ops = {
.validate = bpf_tcp_ca_validate,
.name = "tcp_congestion_ops",
.cfi_stubs = &__bpf_ops_tcp_congestion_ops,
+ .owner = THIS_MODULE,
};
static int __init bpf_tcp_ca_kfunc_init(void)
{
- return register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_tcp_ca_kfunc_set);
+ int ret;
+
+ ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_tcp_ca_kfunc_set);
+ ret = ret ?: register_bpf_struct_ops(&bpf_tcp_congestion_ops, tcp_congestion_ops);
+
+ return ret;
}
late_initcall(bpf_tcp_ca_kfunc_init);
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index d048aa833293..8b17d83e5fde 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -864,11 +864,8 @@ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def,
net_clen_bits,
net_spot + 1,
1);
- if (net_spot < 0) {
- if (net_spot == -2)
- return -EFAULT;
+ if (net_spot < 0)
return 0;
- }
switch (doi_def->type) {
case CIPSO_V4_MAP_PASS:
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 2cc50cbfc2a3..cc6d0bd7b0a9 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -119,7 +119,7 @@ void ip4_datagram_release_cb(struct sock *sk)
rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr,
inet->inet_saddr, inet->inet_dport,
inet->inet_sport, sk->sk_protocol,
- RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
+ ip_sock_rt_tos(sk), sk->sk_bound_dev_if);
dst = !IS_ERR(rt) ? &rt->dst : NULL;
sk_dst_set(sk, dst);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index bc74f131fe4d..7a437f0d4190 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -713,34 +713,37 @@ static void check_lifetime(struct work_struct *work)
rcu_read_lock();
hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
- unsigned long age;
+ unsigned long age, tstamp;
+ u32 preferred_lft;
+ u32 valid_lft;
+ u32 flags;
- if (ifa->ifa_flags & IFA_F_PERMANENT)
+ flags = READ_ONCE(ifa->ifa_flags);
+ if (flags & IFA_F_PERMANENT)
continue;
+ preferred_lft = READ_ONCE(ifa->ifa_preferred_lft);
+ valid_lft = READ_ONCE(ifa->ifa_valid_lft);
+ tstamp = READ_ONCE(ifa->ifa_tstamp);
/* We try to batch several events at once. */
- age = (now - ifa->ifa_tstamp +
+ age = (now - tstamp +
ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
- if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
- age >= ifa->ifa_valid_lft) {
+ if (valid_lft != INFINITY_LIFE_TIME &&
+ age >= valid_lft) {
change_needed = true;
- } else if (ifa->ifa_preferred_lft ==
+ } else if (preferred_lft ==
INFINITY_LIFE_TIME) {
continue;
- } else if (age >= ifa->ifa_preferred_lft) {
- if (time_before(ifa->ifa_tstamp +
- ifa->ifa_valid_lft * HZ, next))
- next = ifa->ifa_tstamp +
- ifa->ifa_valid_lft * HZ;
+ } else if (age >= preferred_lft) {
+ if (time_before(tstamp + valid_lft * HZ, next))
+ next = tstamp + valid_lft * HZ;
- if (!(ifa->ifa_flags & IFA_F_DEPRECATED))
+ if (!(flags & IFA_F_DEPRECATED))
change_needed = true;
- } else if (time_before(ifa->ifa_tstamp +
- ifa->ifa_preferred_lft * HZ,
+ } else if (time_before(tstamp + preferred_lft * HZ,
next)) {
- next = ifa->ifa_tstamp +
- ifa->ifa_preferred_lft * HZ;
+ next = tstamp + preferred_lft * HZ;
}
}
rcu_read_unlock();
@@ -804,24 +807,26 @@ static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft,
__u32 prefered_lft)
{
unsigned long timeout;
+ u32 flags;
- ifa->ifa_flags &= ~(IFA_F_PERMANENT | IFA_F_DEPRECATED);
+ flags = ifa->ifa_flags & ~(IFA_F_PERMANENT | IFA_F_DEPRECATED);
timeout = addrconf_timeout_fixup(valid_lft, HZ);
if (addrconf_finite_timeout(timeout))
- ifa->ifa_valid_lft = timeout;
+ WRITE_ONCE(ifa->ifa_valid_lft, timeout);
else
- ifa->ifa_flags |= IFA_F_PERMANENT;
+ flags |= IFA_F_PERMANENT;
timeout = addrconf_timeout_fixup(prefered_lft, HZ);
if (addrconf_finite_timeout(timeout)) {
if (timeout == 0)
- ifa->ifa_flags |= IFA_F_DEPRECATED;
- ifa->ifa_preferred_lft = timeout;
+ flags |= IFA_F_DEPRECATED;
+ WRITE_ONCE(ifa->ifa_preferred_lft, timeout);
}
- ifa->ifa_tstamp = jiffies;
+ WRITE_ONCE(ifa->ifa_flags, flags);
+ WRITE_ONCE(ifa->ifa_tstamp, jiffies);
if (!ifa->ifa_cstamp)
- ifa->ifa_cstamp = ifa->ifa_tstamp;
+ WRITE_ONCE(ifa->ifa_cstamp, ifa->ifa_tstamp);
}
static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
@@ -1312,7 +1317,7 @@ static __be32 in_dev_select_addr(const struct in_device *in_dev,
const struct in_ifaddr *ifa;
in_dev_for_each_ifa_rcu(ifa, in_dev) {
- if (ifa->ifa_flags & IFA_F_SECONDARY)
+ if (READ_ONCE(ifa->ifa_flags) & IFA_F_SECONDARY)
continue;
if (ifa->ifa_scope != RT_SCOPE_LINK &&
ifa->ifa_scope <= scope)
@@ -1340,7 +1345,7 @@ __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
localnet_scope = RT_SCOPE_LINK;
in_dev_for_each_ifa_rcu(ifa, in_dev) {
- if (ifa->ifa_flags & IFA_F_SECONDARY)
+ if (READ_ONCE(ifa->ifa_flags) & IFA_F_SECONDARY)
continue;
if (min(ifa->ifa_scope, localnet_scope) > scope)
continue;
@@ -1671,11 +1676,12 @@ static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
}
-static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
+static int inet_fill_ifaddr(struct sk_buff *skb, const struct in_ifaddr *ifa,
struct inet_fill_args *args)
{
struct ifaddrmsg *ifm;
struct nlmsghdr *nlh;
+ unsigned long tstamp;
u32 preferred, valid;
nlh = nlmsg_put(skb, args->portid, args->seq, args->event, sizeof(*ifm),
@@ -1686,7 +1692,7 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
ifm = nlmsg_data(nlh);
ifm->ifa_family = AF_INET;
ifm->ifa_prefixlen = ifa->ifa_prefixlen;
- ifm->ifa_flags = ifa->ifa_flags;
+ ifm->ifa_flags = READ_ONCE(ifa->ifa_flags);
ifm->ifa_scope = ifa->ifa_scope;
ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
@@ -1694,11 +1700,12 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
goto nla_put_failure;
+ tstamp = READ_ONCE(ifa->ifa_tstamp);
if (!(ifm->ifa_flags & IFA_F_PERMANENT)) {
- preferred = ifa->ifa_preferred_lft;
- valid = ifa->ifa_valid_lft;
+ preferred = READ_ONCE(ifa->ifa_preferred_lft);
+ valid = READ_ONCE(ifa->ifa_valid_lft);
if (preferred != INFINITY_LIFE_TIME) {
- long tval = (jiffies - ifa->ifa_tstamp) / HZ;
+ long tval = (jiffies - tstamp) / HZ;
if (preferred > tval)
preferred -= tval;
@@ -1725,10 +1732,10 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
(ifa->ifa_proto &&
nla_put_u8(skb, IFA_PROTO, ifa->ifa_proto)) ||
- nla_put_u32(skb, IFA_FLAGS, ifa->ifa_flags) ||
+ nla_put_u32(skb, IFA_FLAGS, ifm->ifa_flags) ||
(ifa->ifa_rt_priority &&
nla_put_u32(skb, IFA_RT_PRIORITY, ifa->ifa_rt_priority)) ||
- put_cacheinfo(skb, ifa->ifa_cstamp, ifa->ifa_tstamp,
+ put_cacheinfo(skb, READ_ONCE(ifa->ifa_cstamp), tstamp,
preferred, valid))
goto nla_put_failure;
@@ -1798,15 +1805,15 @@ static int inet_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
}
static int in_dev_dump_addr(struct in_device *in_dev, struct sk_buff *skb,
- struct netlink_callback *cb, int s_ip_idx,
+ struct netlink_callback *cb, int *s_ip_idx,
struct inet_fill_args *fillargs)
{
struct in_ifaddr *ifa;
int ip_idx = 0;
int err;
- in_dev_for_each_ifa_rtnl(ifa, in_dev) {
- if (ip_idx < s_ip_idx) {
+ in_dev_for_each_ifa_rcu(ifa, in_dev) {
+ if (ip_idx < *s_ip_idx) {
ip_idx++;
continue;
}
@@ -1818,9 +1825,9 @@ static int in_dev_dump_addr(struct in_device *in_dev, struct sk_buff *skb,
ip_idx++;
}
err = 0;
-
+ ip_idx = 0;
done:
- cb->args[2] = ip_idx;
+ *s_ip_idx = ip_idx;
return err;
}
@@ -1830,7 +1837,7 @@ done:
static u32 inet_base_seq(const struct net *net)
{
u32 res = atomic_read(&net->ipv4.dev_addr_genid) +
- net->dev_base_seq;
+ READ_ONCE(net->dev_base_seq);
/* Must not return 0 (see nl_dump_check_consistent()).
* Chose a value far away from 0.
@@ -1852,75 +1859,51 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
};
struct net *net = sock_net(skb->sk);
struct net *tgt_net = net;
- int h, s_h;
- int idx, s_idx;
- int s_ip_idx;
- struct net_device *dev;
+ struct {
+ unsigned long ifindex;
+ int ip_idx;
+ } *ctx = (void *)cb->ctx;
struct in_device *in_dev;
- struct hlist_head *head;
+ struct net_device *dev;
int err = 0;
- s_h = cb->args[0];
- s_idx = idx = cb->args[1];
- s_ip_idx = cb->args[2];
-
+ rcu_read_lock();
if (cb->strict_check) {
err = inet_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
skb->sk, cb);
if (err < 0)
- goto put_tgt_net;
+ goto done;
- err = 0;
if (fillargs.ifindex) {
- dev = __dev_get_by_index(tgt_net, fillargs.ifindex);
- if (!dev) {
- err = -ENODEV;
- goto put_tgt_net;
- }
-
- in_dev = __in_dev_get_rtnl(dev);
- if (in_dev) {
- err = in_dev_dump_addr(in_dev, skb, cb, s_ip_idx,
- &fillargs);
- }
- goto put_tgt_net;
- }
- }
-
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &tgt_net->dev_index_head[h];
- rcu_read_lock();
- cb->seq = inet_base_seq(tgt_net);
- hlist_for_each_entry_rcu(dev, head, index_hlist) {
- if (idx < s_idx)
- goto cont;
- if (h > s_h || idx > s_idx)
- s_ip_idx = 0;
+ err = -ENODEV;
+ dev = dev_get_by_index_rcu(tgt_net, fillargs.ifindex);
+ if (!dev)
+ goto done;
in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
- goto cont;
-
- err = in_dev_dump_addr(in_dev, skb, cb, s_ip_idx,
- &fillargs);
- if (err < 0) {
- rcu_read_unlock();
goto done;
- }
-cont:
- idx++;
+ err = in_dev_dump_addr(in_dev, skb, cb, &ctx->ip_idx,
+ &fillargs);
+ goto done;
}
- rcu_read_unlock();
}
+ cb->seq = inet_base_seq(tgt_net);
+
+ for_each_netdev_dump(net, dev, ctx->ifindex) {
+ in_dev = __in_dev_get_rcu(dev);
+ if (!in_dev)
+ continue;
+ err = in_dev_dump_addr(in_dev, skb, cb, &ctx->ip_idx,
+ &fillargs);
+ if (err < 0)
+ goto done;
+ }
done:
- cb->args[0] = h;
- cb->args[1] = idx;
-put_tgt_net:
if (fillargs.netnsid >= 0)
put_net(tgt_net);
-
- return skb->len ? : err;
+ rcu_read_unlock();
+ return err;
}
static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
@@ -1982,7 +1965,7 @@ static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
return -EMSGSIZE;
for (i = 0; i < IPV4_DEVCONF_MAX; i++)
- ((u32 *) nla_data(nla))[i] = in_dev->cnf.data[i];
+ ((u32 *) nla_data(nla))[i] = READ_ONCE(in_dev->cnf.data[i]);
return 0;
}
@@ -2068,9 +2051,9 @@ static int inet_netconf_msgsize_devconf(int type)
}
static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
- struct ipv4_devconf *devconf, u32 portid,
- u32 seq, int event, unsigned int flags,
- int type)
+ const struct ipv4_devconf *devconf,
+ u32 portid, u32 seq, int event,
+ unsigned int flags, int type)
{
struct nlmsghdr *nlh;
struct netconfmsg *ncm;
@@ -2095,27 +2078,28 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
if ((all || type == NETCONFA_FORWARDING) &&
nla_put_s32(skb, NETCONFA_FORWARDING,
- IPV4_DEVCONF(*devconf, FORWARDING)) < 0)
+ IPV4_DEVCONF_RO(*devconf, FORWARDING)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_RP_FILTER) &&
nla_put_s32(skb, NETCONFA_RP_FILTER,
- IPV4_DEVCONF(*devconf, RP_FILTER)) < 0)
+ IPV4_DEVCONF_RO(*devconf, RP_FILTER)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_MC_FORWARDING) &&
nla_put_s32(skb, NETCONFA_MC_FORWARDING,
- IPV4_DEVCONF(*devconf, MC_FORWARDING)) < 0)
+ IPV4_DEVCONF_RO(*devconf, MC_FORWARDING)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_BC_FORWARDING) &&
nla_put_s32(skb, NETCONFA_BC_FORWARDING,
- IPV4_DEVCONF(*devconf, BC_FORWARDING)) < 0)
+ IPV4_DEVCONF_RO(*devconf, BC_FORWARDING)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_PROXY_NEIGH) &&
nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
- IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
+ IPV4_DEVCONF_RO(*devconf, PROXY_ARP)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
- IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
+ IPV4_DEVCONF_RO(*devconf,
+ IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
goto nla_put_failure;
out:
@@ -2204,21 +2188,20 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(in_skb->sk);
- struct nlattr *tb[NETCONFA_MAX+1];
+ struct nlattr *tb[NETCONFA_MAX + 1];
+ const struct ipv4_devconf *devconf;
+ struct in_device *in_dev = NULL;
+ struct net_device *dev = NULL;
struct sk_buff *skb;
- struct ipv4_devconf *devconf;
- struct in_device *in_dev;
- struct net_device *dev;
int ifindex;
int err;
err = inet_netconf_valid_get_req(in_skb, nlh, tb, extack);
if (err)
- goto errout;
+ return err;
- err = -EINVAL;
if (!tb[NETCONFA_IFINDEX])
- goto errout;
+ return -EINVAL;
ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
switch (ifindex) {
@@ -2229,10 +2212,10 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
devconf = net->ipv4.devconf_dflt;
break;
default:
- dev = __dev_get_by_index(net, ifindex);
- if (!dev)
- goto errout;
- in_dev = __in_dev_get_rtnl(dev);
+ err = -ENODEV;
+ dev = dev_get_by_index(net, ifindex);
+ if (dev)
+ in_dev = in_dev_get(dev);
if (!in_dev)
goto errout;
devconf = &in_dev->cnf;
@@ -2256,6 +2239,9 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
}
err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
errout:
+ if (in_dev)
+ in_dev_put(in_dev);
+ dev_put(dev);
return err;
}
@@ -2264,11 +2250,13 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
{
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
- int h, s_h;
- int idx, s_idx;
+ struct {
+ unsigned long ifindex;
+ unsigned int all_default;
+ } *ctx = (void *)cb->ctx;
+ const struct in_device *in_dev;
struct net_device *dev;
- struct in_device *in_dev;
- struct hlist_head *head;
+ int err = 0;
if (cb->strict_check) {
struct netlink_ext_ack *extack = cb->extack;
@@ -2285,64 +2273,45 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
}
}
- s_h = cb->args[0];
- s_idx = idx = cb->args[1];
-
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &net->dev_index_head[h];
- rcu_read_lock();
- cb->seq = inet_base_seq(net);
- hlist_for_each_entry_rcu(dev, head, index_hlist) {
- if (idx < s_idx)
- goto cont;
- in_dev = __in_dev_get_rcu(dev);
- if (!in_dev)
- goto cont;
-
- if (inet_netconf_fill_devconf(skb, dev->ifindex,
- &in_dev->cnf,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq,
- RTM_NEWNETCONF,
- NLM_F_MULTI,
- NETCONFA_ALL) < 0) {
- rcu_read_unlock();
- goto done;
- }
- nl_dump_check_consistent(cb, nlmsg_hdr(skb));
-cont:
- idx++;
- }
- rcu_read_unlock();
+ rcu_read_lock();
+ for_each_netdev_dump(net, dev, ctx->ifindex) {
+ in_dev = __in_dev_get_rcu(dev);
+ if (!in_dev)
+ continue;
+ err = inet_netconf_fill_devconf(skb, dev->ifindex,
+ &in_dev->cnf,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq,
+ RTM_NEWNETCONF, NLM_F_MULTI,
+ NETCONFA_ALL);
+ if (err < 0)
+ goto done;
}
- if (h == NETDEV_HASHENTRIES) {
- if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
- net->ipv4.devconf_all,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq,
- RTM_NEWNETCONF, NLM_F_MULTI,
- NETCONFA_ALL) < 0)
+ if (ctx->all_default == 0) {
+ err = inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
+ net->ipv4.devconf_all,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq,
+ RTM_NEWNETCONF, NLM_F_MULTI,
+ NETCONFA_ALL);
+ if (err < 0)
goto done;
- else
- h++;
- }
- if (h == NETDEV_HASHENTRIES + 1) {
- if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
- net->ipv4.devconf_dflt,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq,
- RTM_NEWNETCONF, NLM_F_MULTI,
- NETCONFA_ALL) < 0)
+ ctx->all_default++;
+ }
+ if (ctx->all_default == 1) {
+ err = inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
+ net->ipv4.devconf_dflt,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq,
+ RTM_NEWNETCONF, NLM_F_MULTI,
+ NETCONFA_ALL);
+ if (err < 0)
goto done;
- else
- h++;
+ ctx->all_default++;
}
done:
- cb->args[0] = h;
- cb->args[1] = idx;
-
- return skb->len;
+ rcu_read_unlock();
+ return err;
}
#ifdef CONFIG_SYSCTL
@@ -2823,7 +2792,9 @@ void __init devinet_init(void)
rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, 0);
rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, 0);
- rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, 0);
+ rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr,
+ RTNL_FLAG_DUMP_UNLOCKED);
rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
- inet_netconf_dump_devconf, 0);
+ inet_netconf_dump_devconf,
+ RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED);
}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 390f4be7f7be..48741352a88a 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -916,7 +916,8 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
struct rtmsg *rtm;
int err, i;
- ASSERT_RTNL();
+ if (filter->rtnl_held)
+ ASSERT_RTNL();
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
NL_SET_ERR_MSG(extack, "Invalid header for FIB dump request");
@@ -961,7 +962,10 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
break;
case RTA_OIF:
ifindex = nla_get_u32(tb[i]);
- filter->dev = __dev_get_by_index(net, ifindex);
+ if (filter->rtnl_held)
+ filter->dev = __dev_get_by_index(net, ifindex);
+ else
+ filter->dev = dev_get_by_index_rcu(net, ifindex);
if (!filter->dev)
return -ENODEV;
break;
@@ -983,20 +987,24 @@ EXPORT_SYMBOL_GPL(ip_valid_fib_dump_req);
static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct fib_dump_filter filter = { .dump_routes = true,
- .dump_exceptions = true };
+ struct fib_dump_filter filter = {
+ .dump_routes = true,
+ .dump_exceptions = true,
+ .rtnl_held = false,
+ };
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
unsigned int h, s_h;
unsigned int e = 0, s_e;
struct fib_table *tb;
struct hlist_head *head;
- int dumped = 0, err;
+ int dumped = 0, err = 0;
+ rcu_read_lock();
if (cb->strict_check) {
err = ip_valid_fib_dump_req(net, nlh, &filter, cb);
if (err < 0)
- return err;
+ goto unlock;
} else if (nlmsg_len(nlh) >= sizeof(struct rtmsg)) {
struct rtmsg *rtm = nlmsg_data(nlh);
@@ -1005,29 +1013,26 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
/* ipv4 does not use prefix flag */
if (filter.flags & RTM_F_PREFIX)
- return skb->len;
+ goto unlock;
if (filter.table_id) {
tb = fib_get_table(net, filter.table_id);
if (!tb) {
if (rtnl_msg_family(cb->nlh) != PF_INET)
- return skb->len;
+ goto unlock;
NL_SET_ERR_MSG(cb->extack, "ipv4: FIB table does not exist");
- return -ENOENT;
+ err = -ENOENT;
+ goto unlock;
}
-
- rcu_read_lock();
err = fib_table_dump(tb, skb, cb, &filter);
- rcu_read_unlock();
- return skb->len ? : err;
+ goto unlock;
}
s_h = cb->args[0];
s_e = cb->args[1];
- rcu_read_lock();
-
+ err = 0;
for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
e = 0;
head = &net->ipv4.fib_table_hash[h];
@@ -1038,25 +1043,20 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
memset(&cb->args[2], 0, sizeof(cb->args) -
2 * sizeof(cb->args[0]));
err = fib_table_dump(tb, skb, cb, &filter);
- if (err < 0) {
- if (likely(skb->len))
- goto out;
-
- goto out_err;
- }
+ if (err < 0)
+ goto out;
dumped = 1;
next:
e++;
}
}
out:
- err = skb->len;
-out_err:
- rcu_read_unlock();
cb->args[1] = e;
cb->args[0] = h;
+unlock:
+ rcu_read_unlock();
return err;
}
@@ -1659,5 +1659,6 @@ void __init ip_fib_init(void)
rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, 0);
rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, 0);
- rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, 0);
+ rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib,
+ RTNL_FLAG_DUMP_UNLOCKED);
}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 3ff35f811765..f474106464d2 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -501,7 +501,7 @@ static void tnode_free(struct key_vector *tn)
if (tnode_free_size >= READ_ONCE(sysctl_fib_sync_mem)) {
tnode_free_size = 0;
- synchronize_rcu();
+ synchronize_net();
}
}
@@ -2368,7 +2368,7 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
* and key == 0 means the dump has wrapped around and we are done.
*/
if (count && !key)
- return skb->len;
+ return 0;
while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
int err;
@@ -2394,7 +2394,7 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
cb->args[3] = key;
cb->args[2] = count;
- return skb->len;
+ return 0;
}
void __init fib_trie_init(void)
diff --git a/net/ipv4/fou_bpf.c b/net/ipv4/fou_bpf.c
index 4da03bf45c9b..06e5572f296f 100644
--- a/net/ipv4/fou_bpf.c
+++ b/net/ipv4/fou_bpf.c
@@ -100,10 +100,10 @@ __bpf_kfunc int bpf_skb_get_fou_encap(struct __sk_buff *skb_ctx,
__bpf_kfunc_end_defs();
-BTF_SET8_START(fou_kfunc_set)
+BTF_KFUNCS_START(fou_kfunc_set)
BTF_ID_FLAGS(func, bpf_skb_set_fou_encap)
BTF_ID_FLAGS(func, bpf_skb_get_fou_encap)
-BTF_SET8_END(fou_kfunc_set)
+BTF_KFUNCS_END(fou_kfunc_set)
static const struct btf_kfunc_id_set fou_bpf_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/ipv4/fou_core.c b/net/ipv4/fou_core.c
index 0c41076e31ed..a8494f796dca 100644
--- a/net/ipv4/fou_core.c
+++ b/net/ipv4/fou_core.c
@@ -351,7 +351,7 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
optlen = guehdr->hlen << 2;
len += optlen;
- if (skb_gro_header_hard(skb, len)) {
+ if (!skb_gro_may_pull(skb, len)) {
guehdr = skb_gro_header_slow(skb, len, off);
if (unlikely(!guehdr))
goto out;
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 311e70bfce40..5028c72d494a 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -174,7 +174,7 @@ static struct sk_buff *gre_gro_receive(struct list_head *head,
grehlen += GRE_HEADER_SECTION;
hlen = off + grehlen;
- if (skb_gro_header_hard(skb, hlen)) {
+ if (!skb_gro_may_pull(skb, hlen)) {
greh = skb_gro_header_slow(skb, hlen, off);
if (unlikely(!greh))
goto out;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index efeeca2b1328..717e97a389a8 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -120,12 +120,12 @@
*/
#define IGMP_V1_SEEN(in_dev) \
- (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 1 || \
+ (IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 1 || \
IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 1 || \
((in_dev)->mr_v1_seen && \
time_before(jiffies, (in_dev)->mr_v1_seen)))
#define IGMP_V2_SEEN(in_dev) \
- (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 2 || \
+ (IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 2 || \
IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 2 || \
((in_dev)->mr_v2_seen && \
time_before(jiffies, (in_dev)->mr_v2_seen)))
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 459af1f89739..7d8090f109ef 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -906,8 +906,9 @@ static struct request_sock *inet_reqsk_clone(struct request_sock *req,
memcpy(nreq_sk, req_sk,
offsetof(struct sock, sk_dontcopy_begin));
- memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end,
- req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end));
+ unsafe_memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end,
+ req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end),
+ /* alloc is larger than struct, see above */);
sk_node_init(&nreq_sk->sk_node);
nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping;
@@ -1467,7 +1468,7 @@ static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *f
rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
inet->inet_saddr, inet->inet_dport,
inet->inet_sport, sk->sk_protocol,
- RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
+ ip_sock_rt_tos(sk), sk->sk_bound_dev_if);
if (IS_ERR(rt))
rt = NULL;
if (rt)
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 8e6b6aa0579e..7adace541fe2 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -32,7 +32,7 @@
#include <linux/inet_diag.h>
#include <linux/sock_diag.h>
-static const struct inet_diag_handler **inet_diag_table;
+static const struct inet_diag_handler __rcu **inet_diag_table;
struct inet_diag_entry {
const __be32 *saddr;
@@ -48,28 +48,28 @@ struct inet_diag_entry {
#endif
};
-static DEFINE_MUTEX(inet_diag_table_mutex);
-
static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
{
- if (proto < 0 || proto >= IPPROTO_MAX) {
- mutex_lock(&inet_diag_table_mutex);
- return ERR_PTR(-ENOENT);
- }
+ const struct inet_diag_handler *handler;
- if (!inet_diag_table[proto])
+ if (proto < 0 || proto >= IPPROTO_MAX)
+ return NULL;
+
+ if (!READ_ONCE(inet_diag_table[proto]))
sock_load_diag_module(AF_INET, proto);
- mutex_lock(&inet_diag_table_mutex);
- if (!inet_diag_table[proto])
- return ERR_PTR(-ENOENT);
+ rcu_read_lock();
+ handler = rcu_dereference(inet_diag_table[proto]);
+ if (handler && !try_module_get(handler->owner))
+ handler = NULL;
+ rcu_read_unlock();
- return inet_diag_table[proto];
+ return handler;
}
static void inet_diag_unlock_handler(const struct inet_diag_handler *handler)
{
- mutex_unlock(&inet_diag_table_mutex);
+ module_put(handler->owner);
}
void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk)
@@ -104,9 +104,12 @@ static size_t inet_sk_attr_size(struct sock *sk,
const struct inet_diag_handler *handler;
size_t aux = 0;
- handler = inet_diag_table[req->sdiag_protocol];
+ rcu_read_lock();
+ handler = rcu_dereference(inet_diag_table[req->sdiag_protocol]);
+ DEBUG_NET_WARN_ON_ONCE(!handler);
if (handler && handler->idiag_get_aux_size)
aux = handler->idiag_get_aux_size(sk, net_admin);
+ rcu_read_unlock();
return nla_total_size(sizeof(struct tcp_info))
+ nla_total_size(sizeof(struct inet_diag_msg))
@@ -244,10 +247,16 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
struct nlmsghdr *nlh;
struct nlattr *attr;
void *info = NULL;
+ int protocol;
cb_data = cb->data;
- handler = inet_diag_table[inet_diag_get_protocol(req, cb_data)];
- BUG_ON(!handler);
+ protocol = inet_diag_get_protocol(req, cb_data);
+
+ /* inet_diag_lock_handler() made sure inet_diag_table[] is stable. */
+ handler = rcu_dereference_protected(inet_diag_table[protocol], 1);
+ DEBUG_NET_WARN_ON_ONCE(!handler);
+ if (!handler)
+ return -ENXIO;
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags);
@@ -605,9 +614,10 @@ static int inet_diag_cmd_exact(int cmd, struct sk_buff *in_skb,
protocol = inet_diag_get_protocol(req, &dump_data);
handler = inet_diag_lock_handler(protocol);
- if (IS_ERR(handler)) {
- err = PTR_ERR(handler);
- } else if (cmd == SOCK_DIAG_BY_FAMILY) {
+ if (!handler)
+ return -ENOENT;
+
+ if (cmd == SOCK_DIAG_BY_FAMILY) {
struct netlink_callback cb = {
.nlh = nlh,
.skb = in_skb,
@@ -1035,6 +1045,10 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
num = 0;
ilb = &hashinfo->lhash2[i];
+ if (hlist_nulls_empty(&ilb->nulls_head)) {
+ s_num = 0;
+ continue;
+ }
spin_lock(&ilb->lock);
sk_nulls_for_each(sk, node, &ilb->nulls_head) {
struct inet_sock *inet = inet_sk(sk);
@@ -1099,6 +1113,10 @@ resume_bind_walk:
accum = 0;
ibb = &hashinfo->bhash2[i];
+ if (hlist_empty(&ibb->chain)) {
+ s_num = 0;
+ continue;
+ }
spin_lock_bh(&ibb->lock);
inet_bind_bucket_for_each(tb2, &ibb->chain) {
if (!net_eq(ib2_net(tb2), net))
@@ -1259,12 +1277,12 @@ static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
again:
prev_min_dump_alloc = cb->min_dump_alloc;
handler = inet_diag_lock_handler(protocol);
- if (!IS_ERR(handler))
+ if (handler) {
handler->dump(skb, cb, r);
- else
- err = PTR_ERR(handler);
- inet_diag_unlock_handler(handler);
-
+ inet_diag_unlock_handler(handler);
+ } else {
+ err = -ENOENT;
+ }
/* The skb is not large enough to fit one sk info and
* inet_sk_diag_fill() has requested for a larger skb.
*/
@@ -1457,10 +1475,9 @@ int inet_diag_handler_get_info(struct sk_buff *skb, struct sock *sk)
}
handler = inet_diag_lock_handler(sk->sk_protocol);
- if (IS_ERR(handler)) {
- inet_diag_unlock_handler(handler);
+ if (!handler) {
nlmsg_cancel(skb, nlh);
- return PTR_ERR(handler);
+ return -ENOENT;
}
attr = handler->idiag_info_size
@@ -1479,6 +1496,7 @@ int inet_diag_handler_get_info(struct sk_buff *skb, struct sock *sk)
}
static const struct sock_diag_handler inet_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_INET,
.dump = inet_diag_handler_cmd,
.get_info = inet_diag_handler_get_info,
@@ -1486,6 +1504,7 @@ static const struct sock_diag_handler inet_diag_handler = {
};
static const struct sock_diag_handler inet6_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_INET6,
.dump = inet_diag_handler_cmd,
.get_info = inet_diag_handler_get_info,
@@ -1495,20 +1514,12 @@ static const struct sock_diag_handler inet6_diag_handler = {
int inet_diag_register(const struct inet_diag_handler *h)
{
const __u16 type = h->idiag_type;
- int err = -EINVAL;
if (type >= IPPROTO_MAX)
- goto out;
+ return -EINVAL;
- mutex_lock(&inet_diag_table_mutex);
- err = -EEXIST;
- if (!inet_diag_table[type]) {
- inet_diag_table[type] = h;
- err = 0;
- }
- mutex_unlock(&inet_diag_table_mutex);
-out:
- return err;
+ return !cmpxchg((const struct inet_diag_handler **)&inet_diag_table[type],
+ NULL, h) ? 0 : -EEXIST;
}
EXPORT_SYMBOL_GPL(inet_diag_register);
@@ -1519,12 +1530,16 @@ void inet_diag_unregister(const struct inet_diag_handler *h)
if (type >= IPPROTO_MAX)
return;
- mutex_lock(&inet_diag_table_mutex);
- inet_diag_table[type] = NULL;
- mutex_unlock(&inet_diag_table_mutex);
+ xchg((const struct inet_diag_handler **)&inet_diag_table[type],
+ NULL);
}
EXPORT_SYMBOL_GPL(inet_diag_unregister);
+static const struct sock_diag_inet_compat inet_diag_compat = {
+ .owner = THIS_MODULE,
+ .fn = inet_diag_rcv_msg_compat,
+};
+
static int __init inet_diag_init(void)
{
const int inet_diag_table_size = (IPPROTO_MAX *
@@ -1543,7 +1558,7 @@ static int __init inet_diag_init(void)
if (err)
goto out_free_inet;
- sock_diag_register_inet_compat(inet_diag_rcv_msg_compat);
+ sock_diag_register_inet_compat(&inet_diag_compat);
out:
return err;
@@ -1558,7 +1573,7 @@ static void __exit inet_diag_exit(void)
{
sock_diag_unregister(&inet6_diag_handler);
sock_diag_unregister(&inet_diag_handler);
- sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat);
+ sock_diag_unregister_inet_compat(&inet_diag_compat);
kfree(inet_diag_table);
}
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 308ff34002ea..7498af320164 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -24,6 +24,7 @@
#include <net/inet6_hashtables.h>
#endif
#include <net/secure_seq.h>
+#include <net/hotdata.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/sock_reuseport.h>
@@ -32,8 +33,6 @@ u32 inet_ehashfn(const struct net *net, const __be32 laddr,
const __u16 lport, const __be32 faddr,
const __be16 fport)
{
- static u32 inet_ehash_secret __read_mostly;
-
net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret));
return __inet_ehashfn(laddr, lport, faddr, fport,
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index e9fed83e9b3c..5bd759963451 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -81,10 +81,7 @@ void __init inet_initpeers(void)
inet_peer_threshold = clamp_val(nr_entries, 4096, 65536 + 128);
- peer_cachep = kmem_cache_create("inet_peer_cache",
- sizeof(struct inet_peer),
- 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
- NULL);
+ peer_cachep = KMEM_CACHE(inet_peer, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
}
/* Called with rcu_read_lock() or base->lock held */
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 6b9cf5a24c19..7b16c211b904 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1025,14 +1025,16 @@ static int __net_init ipgre_init_net(struct net *net)
return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
}
-static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
+static void __net_exit ipgre_exit_batch_rtnl(struct list_head *list_net,
+ struct list_head *dev_to_kill)
{
- ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
+ ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops,
+ dev_to_kill);
}
static struct pernet_operations ipgre_net_ops = {
.init = ipgre_init_net,
- .exit_batch = ipgre_exit_batch_net,
+ .exit_batch_rtnl = ipgre_exit_batch_rtnl,
.id = &ipgre_net_id,
.size = sizeof(struct ip_tunnel_net),
};
@@ -1697,14 +1699,16 @@ static int __net_init ipgre_tap_init_net(struct net *net)
return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
}
-static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
+static void __net_exit ipgre_tap_exit_batch_rtnl(struct list_head *list_net,
+ struct list_head *dev_to_kill)
{
- ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
+ ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops,
+ dev_to_kill);
}
static struct pernet_operations ipgre_tap_net_ops = {
.init = ipgre_tap_init_net,
- .exit_batch = ipgre_tap_exit_batch_net,
+ .exit_batch_rtnl = ipgre_tap_exit_batch_rtnl,
.id = &gre_tap_net_id,
.size = sizeof(struct ip_tunnel_net),
};
@@ -1715,14 +1719,16 @@ static int __net_init erspan_init_net(struct net *net)
&erspan_link_ops, "erspan0");
}
-static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
+static void __net_exit erspan_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
- ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
+ ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops,
+ dev_to_kill);
}
static struct pernet_operations erspan_net_ops = {
.init = erspan_init_net,
- .exit_batch = erspan_exit_batch_net,
+ .exit_batch_rtnl = erspan_exit_batch_rtnl,
.id = &erspan_net_id,
.size = sizeof(struct ip_tunnel_net),
};
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 67d846622365..33f93dc730a3 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -493,7 +493,7 @@ int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
inet->inet_dport,
inet->inet_sport,
sk->sk_protocol,
- RT_CONN_FLAGS_TOS(sk, tos),
+ RT_TOS(tos),
sk->sk_bound_dev_if);
if (IS_ERR(rt))
goto no_route;
@@ -1458,6 +1458,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
skb->priority = (cork->tos != -1) ? cork->priority: READ_ONCE(sk->sk_priority);
skb->mark = cork->mark;
skb->tstamp = cork->transmit_time;
+ skb->mono_delivery_time = !!skb->tstamp;
/*
* Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
* on dst refcount
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 21d2ffa919e9..cf377377b52d 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -894,7 +894,7 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
{
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
- int val = 0, err;
+ int val = 0, err, retv;
bool needs_rtnl = setsockopt_needs_rtnl(optname);
switch (optname) {
@@ -938,8 +938,12 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
/* If optlen==0, it is equivalent to val == 0 */
- if (optname == IP_ROUTER_ALERT)
- return ip_ra_control(sk, val ? 1 : 0, NULL);
+ if (optname == IP_ROUTER_ALERT) {
+ retv = ip_ra_control(sk, val ? 1 : 0, NULL);
+ if (retv == 0)
+ inet_assign_bit(RTALERT, sk, val);
+ return retv;
+ }
if (ip_mroute_opt(optname))
return ip_mroute_setsockopt(sk, optname, optval, optlen);
@@ -1575,6 +1579,9 @@ int do_ip_getsockopt(struct sock *sk, int level, int optname,
case IP_BIND_ADDRESS_NO_PORT:
val = inet_test_bit(BIND_ADDRESS_NO_PORT, sk);
goto copyval;
+ case IP_ROUTER_ALERT:
+ val = inet_test_bit(RTALERT, sk);
+ goto copyval;
case IP_TTL:
val = READ_ONCE(inet->uc_ttl);
if (val < 0)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 1b6981de3f29..1b8d8ff9a237 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -102,10 +102,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
if (!ip_tunnel_key_match(&t->parms, flags, key))
continue;
- if (t->parms.link == link)
+ if (READ_ONCE(t->parms.link) == link)
return t;
- else
- cand = t;
+ cand = t;
}
hlist_for_each_entry_rcu(t, head, hash_node) {
@@ -117,9 +116,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
if (!ip_tunnel_key_match(&t->parms, flags, key))
continue;
- if (t->parms.link == link)
+ if (READ_ONCE(t->parms.link) == link)
return t;
- else if (!cand)
+ if (!cand)
cand = t;
}
@@ -137,9 +136,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
if (!ip_tunnel_key_match(&t->parms, flags, key))
continue;
- if (t->parms.link == link)
+ if (READ_ONCE(t->parms.link) == link)
return t;
- else if (!cand)
+ if (!cand)
cand = t;
}
@@ -150,9 +149,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
!(t->dev->flags & IFF_UP))
continue;
- if (t->parms.link == link)
+ if (READ_ONCE(t->parms.link) == link)
return t;
- else if (!cand)
+ if (!cand)
cand = t;
}
@@ -221,7 +220,7 @@ static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
hlist_for_each_entry_rcu(t, head, hash_node) {
if (local == t->parms.iph.saddr &&
remote == t->parms.iph.daddr &&
- link == t->parms.link &&
+ link == READ_ONCE(t->parms.link) &&
type == t->dev->type &&
ip_tunnel_key_match(&t->parms, flags, key))
break;
@@ -378,7 +377,7 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
bool log_ecn_error)
{
const struct iphdr *iph = ip_hdr(skb);
- int err;
+ int nh, err;
#ifdef CONFIG_NET_IPGRE_BROADCAST
if (ipv4_is_multicast(iph->daddr)) {
@@ -404,8 +403,21 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
tunnel->i_seqno = ntohl(tpi->seq) + 1;
}
+ /* Save offset of outer header relative to skb->head,
+ * because we are going to reset the network header to the inner header
+ * and might change skb->head.
+ */
+ nh = skb_network_header(skb) - skb->head;
+
skb_set_network_header(skb, (tunnel->dev->type == ARPHRD_ETHER) ? ETH_HLEN : 0);
+ if (!pskb_inet_may_pull(skb)) {
+ DEV_STATS_INC(tunnel->dev, rx_length_errors);
+ DEV_STATS_INC(tunnel->dev, rx_errors);
+ goto drop;
+ }
+ iph = (struct iphdr *)(skb->head + nh);
+
err = IP_ECN_decapsulate(iph, skb);
if (unlikely(err)) {
if (log_ecn_error)
@@ -761,7 +773,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
tunnel->parms.o_key, RT_TOS(tos),
- dev_net(dev), tunnel->parms.link,
+ dev_net(dev), READ_ONCE(tunnel->parms.link),
tunnel->fwmark, skb_get_hash(skb), 0);
if (ip_tunnel_encap(skb, &tunnel->encap, &protocol, &fl4) < 0)
@@ -881,7 +893,7 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn,
if (t->parms.link != p->link || t->fwmark != fwmark) {
int mtu;
- t->parms.link = p->link;
+ WRITE_ONCE(t->parms.link, p->link);
t->fwmark = fwmark;
mtu = ip_tunnel_bind_dev(dev);
if (set_mtu)
@@ -1071,9 +1083,9 @@ EXPORT_SYMBOL(ip_tunnel_get_link_net);
int ip_tunnel_get_iflink(const struct net_device *dev)
{
- struct ip_tunnel *tunnel = netdev_priv(dev);
+ const struct ip_tunnel *tunnel = netdev_priv(dev);
- return tunnel->parms.link;
+ return READ_ONCE(tunnel->parms.link);
}
EXPORT_SYMBOL(ip_tunnel_get_iflink);
@@ -1144,19 +1156,17 @@ static void ip_tunnel_destroy(struct net *net, struct ip_tunnel_net *itn,
}
void ip_tunnel_delete_nets(struct list_head *net_list, unsigned int id,
- struct rtnl_link_ops *ops)
+ struct rtnl_link_ops *ops,
+ struct list_head *dev_to_kill)
{
struct ip_tunnel_net *itn;
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list) {
itn = net_generic(net, id);
- ip_tunnel_destroy(net, itn, &list, ops);
+ ip_tunnel_destroy(net, itn, dev_to_kill, ops);
}
- unregister_netdevice_many(&list);
- rtnl_unlock();
}
EXPORT_SYMBOL_GPL(ip_tunnel_delete_nets);
@@ -1285,6 +1295,7 @@ int ip_tunnel_init(struct net_device *dev)
if (tunnel->collect_md)
netif_keep_dst(dev);
+ netdev_lockdep_set_classes(dev);
return 0;
}
EXPORT_SYMBOL_GPL(ip_tunnel_init);
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index d1d6bb28ed6e..ee587adb169f 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -510,14 +510,16 @@ static int __net_init vti_init_net(struct net *net)
return 0;
}
-static void __net_exit vti_exit_batch_net(struct list_head *list_net)
+static void __net_exit vti_exit_batch_rtnl(struct list_head *list_net,
+ struct list_head *dev_to_kill)
{
- ip_tunnel_delete_nets(list_net, vti_net_id, &vti_link_ops);
+ ip_tunnel_delete_nets(list_net, vti_net_id, &vti_link_ops,
+ dev_to_kill);
}
static struct pernet_operations vti_net_ops = {
.init = vti_init_net,
- .exit_batch = vti_exit_batch_net,
+ .exit_batch_rtnl = vti_exit_batch_rtnl,
.id = &vti_net_id,
.size = sizeof(struct ip_tunnel_net),
};
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 03afa3871efc..f2696eaadbe6 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -592,14 +592,16 @@ static int __net_init ipip_init_net(struct net *net)
return ip_tunnel_init_net(net, ipip_net_id, &ipip_link_ops, "tunl0");
}
-static void __net_exit ipip_exit_batch_net(struct list_head *list_net)
+static void __net_exit ipip_exit_batch_rtnl(struct list_head *list_net,
+ struct list_head *dev_to_kill)
{
- ip_tunnel_delete_nets(list_net, ipip_net_id, &ipip_link_ops);
+ ip_tunnel_delete_nets(list_net, ipip_net_id, &ipip_link_ops,
+ dev_to_kill);
}
static struct pernet_operations ipip_net_ops = {
.init = ipip_init_net,
- .exit_batch = ipip_exit_batch_net,
+ .exit_batch_rtnl = ipip_exit_batch_rtnl,
.id = &ipip_net_id,
.size = sizeof(struct ip_tunnel_net),
};
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 362229836510..fd5c01c8489f 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1603,9 +1603,11 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, sockptr_t optval,
if (copy_from_sockptr(&olr, optlen, sizeof(int)))
return -EFAULT;
- olr = min_t(unsigned int, olr, sizeof(int));
if (olr < 0)
return -EINVAL;
+
+ olr = min_t(unsigned int, olr, sizeof(int));
+
if (copy_to_sockptr(optlen, &olr, sizeof(int)))
return -EFAULT;
if (copy_to_sockptr(optval, &val, olr))
@@ -2587,7 +2589,9 @@ errout_free:
static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct fib_dump_filter filter = {};
+ struct fib_dump_filter filter = {
+ .rtnl_held = true,
+ };
int err;
if (cb->strict_check) {
@@ -3139,10 +3143,7 @@ int __init ip_mr_init(void)
{
int err;
- mrt_cachep = kmem_cache_create("ip_mrt_cache",
- sizeof(struct mfc_cache),
- 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
- NULL);
+ mrt_cachep = KMEM_CACHE(mfc_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
err = register_pernet_subsys(&ipmr_net_ops);
if (err)
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index f71a7e9a7de6..8f6e950163a7 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -10,6 +10,10 @@ config NF_DEFRAG_IPV4
tristate
default n
+# old sockopt interface and eval loop
+config IP_NF_IPTABLES_LEGACY
+ tristate
+
config NF_SOCKET_IPV4
tristate "IPv4 socket lookup support"
help
@@ -152,7 +156,7 @@ config IP_NF_MATCH_ECN
config IP_NF_MATCH_RPFILTER
tristate '"rpfilter" reverse path filter match support'
depends on NETFILTER_ADVANCED
- depends on IP_NF_MANGLE || IP_NF_RAW
+ depends on IP_NF_MANGLE || IP_NF_RAW || NFT_COMPAT
help
This option allows you to match packets whose replies would
go out via the interface the packet came in.
@@ -173,6 +177,7 @@ config IP_NF_MATCH_TTL
config IP_NF_FILTER
tristate "Packet filtering"
default m if NETFILTER_ADVANCED=n
+ select IP_NF_IPTABLES_LEGACY
help
Packet filtering defines a table `filter', which has a series of
rules for simple packet filtering at local input, forwarding and
@@ -182,7 +187,7 @@ config IP_NF_FILTER
config IP_NF_TARGET_REJECT
tristate "REJECT target support"
- depends on IP_NF_FILTER
+ depends on IP_NF_FILTER || NFT_COMPAT
select NF_REJECT_IPV4
default m if NETFILTER_ADVANCED=n
help
@@ -212,6 +217,7 @@ config IP_NF_NAT
default m if NETFILTER_ADVANCED=n
select NF_NAT
select NETFILTER_XT_NAT
+ select IP_NF_IPTABLES_LEGACY
help
This enables the `nat' table in iptables. This allows masquerading,
port forwarding and other forms of full Network Address Port
@@ -252,6 +258,7 @@ endif # IP_NF_NAT
config IP_NF_MANGLE
tristate "Packet mangling"
default m if NETFILTER_ADVANCED=n
+ select IP_NF_IPTABLES_LEGACY
help
This option adds a `mangle' table to iptables: see the man page for
iptables(8). This table is used for various packet alterations
@@ -261,7 +268,7 @@ config IP_NF_MANGLE
config IP_NF_TARGET_ECN
tristate "ECN target support"
- depends on IP_NF_MANGLE
+ depends on IP_NF_MANGLE || NFT_COMPAT
depends on NETFILTER_ADVANCED
help
This option adds a `ECN' target, which can be used in the iptables mangle
@@ -286,6 +293,7 @@ config IP_NF_TARGET_TTL
# raw + specific targets
config IP_NF_RAW
tristate 'raw table support (required for NOTRACK/TRACE)'
+ select IP_NF_IPTABLES_LEGACY
help
This option adds a `raw' table to iptables. This table is the very
first in the netfilter framework and hooks in at the PREROUTING
@@ -299,6 +307,7 @@ config IP_NF_SECURITY
tristate "Security table"
depends on SECURITY
depends on NETFILTER_ADVANCED
+ select IP_NF_IPTABLES_LEGACY
help
This option adds a `security' table to iptables, for use
with Mandatory Access Control (MAC) policy.
@@ -309,36 +318,35 @@ endif # IP_NF_IPTABLES
# ARP tables
config IP_NF_ARPTABLES
- tristate "ARP tables support"
- select NETFILTER_XTABLES
- select NETFILTER_FAMILY_ARP
- depends on NETFILTER_ADVANCED
- help
- arptables is a general, extensible packet identification framework.
- The ARP packet filtering and mangling (manipulation)subsystems
- use this: say Y or M here if you want to use either of those.
-
- To compile it as a module, choose M here. If unsure, say N.
+ tristate
-if IP_NF_ARPTABLES
+config NFT_COMPAT_ARP
+ tristate
+ depends on NF_TABLES_ARP && NFT_COMPAT
+ default m if NFT_COMPAT=m
+ default y if NFT_COMPAT=y
config IP_NF_ARPFILTER
- tristate "ARP packet filtering"
+ tristate "arptables-legacy packet filtering support"
+ select IP_NF_ARPTABLES
+ depends on NETFILTER_XTABLES
help
ARP packet filtering defines a table `filter', which has a series of
rules for simple ARP packet filtering at local input and
- local output. On a bridge, you can also specify filtering rules
- for forwarded ARP packets. See the man page for arptables(8).
+ local output. This is only needed for arptables-legacy(8).
+ Neither arptables-nft nor nftables need this to work.
To compile it as a module, choose M here. If unsure, say N.
config IP_NF_ARP_MANGLE
tristate "ARP payload mangling"
+ depends on IP_NF_ARPTABLES || NFT_COMPAT_ARP
help
Allows altering the ARP packet payload: source and destination
hardware and network addresses.
-endif # IP_NF_ARPTABLES
+ This option is needed by both arptables-legacy and arptables-nft.
+ It is not used by nftables.
endmenu
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 5a26f9de1ab9..85502d4dfbb4 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -25,7 +25,7 @@ obj-$(CONFIG_NFT_FIB_IPV4) += nft_fib_ipv4.o
obj-$(CONFIG_NFT_DUP_IPV4) += nft_dup_ipv4.o
# generic IP tables
-obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
+obj-$(CONFIG_IP_NF_IPTABLES_LEGACY) += ip_tables.o
# the three instances of ip_tables
obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index bbff68b5b5d4..74928a9d1aa4 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -26,6 +26,9 @@ static void remove_nexthop(struct net *net, struct nexthop *nh,
#define NH_DEV_HASHBITS 8
#define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
+#define NHA_OP_FLAGS_DUMP_ALL (NHA_OP_FLAG_DUMP_STATS | \
+ NHA_OP_FLAG_DUMP_HW_STATS)
+
static const struct nla_policy rtm_nh_policy_new[] = {
[NHA_ID] = { .type = NLA_U32 },
[NHA_GROUP] = { .type = NLA_BINARY },
@@ -37,10 +40,17 @@ static const struct nla_policy rtm_nh_policy_new[] = {
[NHA_ENCAP] = { .type = NLA_NESTED },
[NHA_FDB] = { .type = NLA_FLAG },
[NHA_RES_GROUP] = { .type = NLA_NESTED },
+ [NHA_HW_STATS_ENABLE] = NLA_POLICY_MAX(NLA_U32, true),
};
static const struct nla_policy rtm_nh_policy_get[] = {
[NHA_ID] = { .type = NLA_U32 },
+ [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32,
+ NHA_OP_FLAGS_DUMP_ALL),
+};
+
+static const struct nla_policy rtm_nh_policy_del[] = {
+ [NHA_ID] = { .type = NLA_U32 },
};
static const struct nla_policy rtm_nh_policy_dump[] = {
@@ -48,6 +58,8 @@ static const struct nla_policy rtm_nh_policy_dump[] = {
[NHA_GROUPS] = { .type = NLA_FLAG },
[NHA_MASTER] = { .type = NLA_U32 },
[NHA_FDB] = { .type = NLA_FLAG },
+ [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32,
+ NHA_OP_FLAGS_DUMP_ALL),
};
static const struct nla_policy rtm_nh_res_policy_new[] = {
@@ -92,6 +104,7 @@ __nh_notifier_single_info_init(struct nh_notifier_single_info *nh_info,
else if (nh_info->gw_family == AF_INET6)
nh_info->ipv6 = nhi->fib_nhc.nhc_gw.ipv6;
+ nh_info->id = nhi->nh_parent->id;
nh_info->is_reject = nhi->reject_nh;
nh_info->is_fdb = nhi->fdb_nh;
nh_info->has_encap = !!nhi->fib_nhc.nhc_lwtstate;
@@ -131,13 +144,13 @@ static int nh_notifier_mpath_info_init(struct nh_notifier_info *info,
info->nh_grp->num_nh = num_nh;
info->nh_grp->is_fdb = nhg->fdb_nh;
+ info->nh_grp->hw_stats = nhg->hw_stats;
for (i = 0; i < num_nh; i++) {
struct nh_grp_entry *nhge = &nhg->nh_entries[i];
struct nh_info *nhi;
nhi = rtnl_dereference(nhge->nh->nh_info);
- info->nh_grp->nh_entries[i].id = nhge->nh->id;
info->nh_grp->nh_entries[i].weight = nhge->weight;
__nh_notifier_single_info_init(&info->nh_grp->nh_entries[i].nh,
nhi);
@@ -162,6 +175,7 @@ static int nh_notifier_res_table_info_init(struct nh_notifier_info *info,
return -ENOMEM;
info->nh_res_table->num_nh_buckets = num_nh_buckets;
+ info->nh_res_table->hw_stats = nhg->hw_stats;
for (i = 0; i < num_nh_buckets; i++) {
struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
@@ -393,6 +407,7 @@ static int call_nexthop_res_table_notifiers(struct net *net, struct nexthop *nh,
struct nh_notifier_info info = {
.net = net,
.extack = extack,
+ .id = nh->id,
};
struct nh_group *nhg;
int err;
@@ -474,6 +489,7 @@ static void nexthop_free_group(struct nexthop *nh)
struct nh_grp_entry *nhge = &nhg->nh_entries[i];
WARN_ON(!list_empty(&nhge->nh_list));
+ free_percpu(nhge->stats);
nexthop_put(nhge->nh);
}
@@ -654,8 +670,202 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int nla_put_nh_group(struct sk_buff *skb, struct nh_group *nhg)
+static void nh_grp_entry_stats_inc(struct nh_grp_entry *nhge)
+{
+ struct nh_grp_entry_stats *cpu_stats;
+
+ cpu_stats = get_cpu_ptr(nhge->stats);
+ u64_stats_update_begin(&cpu_stats->syncp);
+ u64_stats_inc(&cpu_stats->packets);
+ u64_stats_update_end(&cpu_stats->syncp);
+ put_cpu_ptr(cpu_stats);
+}
+
+static void nh_grp_entry_stats_read(struct nh_grp_entry *nhge,
+ u64 *ret_packets)
{
+ int i;
+
+ *ret_packets = 0;
+
+ for_each_possible_cpu(i) {
+ struct nh_grp_entry_stats *cpu_stats;
+ unsigned int start;
+ u64 packets;
+
+ cpu_stats = per_cpu_ptr(nhge->stats, i);
+ do {
+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
+ packets = u64_stats_read(&cpu_stats->packets);
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
+
+ *ret_packets += packets;
+ }
+}
+
+static int nh_notifier_grp_hw_stats_init(struct nh_notifier_info *info,
+ const struct nexthop *nh)
+{
+ struct nh_group *nhg;
+ int i;
+
+ ASSERT_RTNL();
+ nhg = rtnl_dereference(nh->nh_grp);
+
+ info->id = nh->id;
+ info->type = NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS;
+ info->nh_grp_hw_stats = kzalloc(struct_size(info->nh_grp_hw_stats,
+ stats, nhg->num_nh),
+ GFP_KERNEL);
+ if (!info->nh_grp_hw_stats)
+ return -ENOMEM;
+
+ info->nh_grp_hw_stats->num_nh = nhg->num_nh;
+ for (i = 0; i < nhg->num_nh; i++) {
+ struct nh_grp_entry *nhge = &nhg->nh_entries[i];
+
+ info->nh_grp_hw_stats->stats[i].id = nhge->nh->id;
+ }
+
+ return 0;
+}
+
+static void nh_notifier_grp_hw_stats_fini(struct nh_notifier_info *info)
+{
+ kfree(info->nh_grp_hw_stats);
+}
+
+void nh_grp_hw_stats_report_delta(struct nh_notifier_grp_hw_stats_info *info,
+ unsigned int nh_idx,
+ u64 delta_packets)
+{
+ info->hw_stats_used = true;
+ info->stats[nh_idx].packets += delta_packets;
+}
+EXPORT_SYMBOL(nh_grp_hw_stats_report_delta);
+
+static void nh_grp_hw_stats_apply_update(struct nexthop *nh,
+ struct nh_notifier_info *info)
+{
+ struct nh_group *nhg;
+ int i;
+
+ ASSERT_RTNL();
+ nhg = rtnl_dereference(nh->nh_grp);
+
+ for (i = 0; i < nhg->num_nh; i++) {
+ struct nh_grp_entry *nhge = &nhg->nh_entries[i];
+
+ nhge->packets_hw += info->nh_grp_hw_stats->stats[i].packets;
+ }
+}
+
+static int nh_grp_hw_stats_update(struct nexthop *nh, bool *hw_stats_used)
+{
+ struct nh_notifier_info info = {
+ .net = nh->net,
+ };
+ struct net *net = nh->net;
+ int err;
+
+ if (nexthop_notifiers_is_empty(net))
+ return 0;
+
+ err = nh_notifier_grp_hw_stats_init(&info, nh);
+ if (err)
+ return err;
+
+ err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
+ NEXTHOP_EVENT_HW_STATS_REPORT_DELTA,
+ &info);
+
+ /* Cache whatever we got, even if there was an error, otherwise the
+ * successful stats retrievals would get lost.
+ */
+ nh_grp_hw_stats_apply_update(nh, &info);
+ *hw_stats_used = info.nh_grp_hw_stats->hw_stats_used;
+
+ nh_notifier_grp_hw_stats_fini(&info);
+ return notifier_to_errno(err);
+}
+
+static int nla_put_nh_group_stats_entry(struct sk_buff *skb,
+ struct nh_grp_entry *nhge,
+ u32 op_flags)
+{
+ struct nlattr *nest;
+ u64 packets;
+
+ nh_grp_entry_stats_read(nhge, &packets);
+
+ nest = nla_nest_start(skb, NHA_GROUP_STATS_ENTRY);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, NHA_GROUP_STATS_ENTRY_ID, nhge->nh->id) ||
+ nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS,
+ packets + nhge->packets_hw))
+ goto nla_put_failure;
+
+ if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
+ nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS_HW,
+ nhge->packets_hw))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+}
+
+static int nla_put_nh_group_stats(struct sk_buff *skb, struct nexthop *nh,
+ u32 op_flags)
+{
+ struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
+ struct nlattr *nest;
+ bool hw_stats_used;
+ int err;
+ int i;
+
+ if (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats))
+ goto err_out;
+
+ if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
+ nhg->hw_stats) {
+ err = nh_grp_hw_stats_update(nh, &hw_stats_used);
+ if (err)
+ goto out;
+
+ if (nla_put_u32(skb, NHA_HW_STATS_USED, hw_stats_used))
+ goto err_out;
+ }
+
+ nest = nla_nest_start(skb, NHA_GROUP_STATS);
+ if (!nest)
+ goto err_out;
+
+ for (i = 0; i < nhg->num_nh; i++)
+ if (nla_put_nh_group_stats_entry(skb, &nhg->nh_entries[i],
+ op_flags))
+ goto cancel_out;
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+cancel_out:
+ nla_nest_cancel(skb, nest);
+err_out:
+ err = -EMSGSIZE;
+out:
+ return err;
+}
+
+static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh,
+ u32 op_flags)
+{
+ struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
struct nexthop_grp *p;
size_t len = nhg->num_nh * sizeof(*p);
struct nlattr *nla;
@@ -684,6 +894,11 @@ static int nla_put_nh_group(struct sk_buff *skb, struct nh_group *nhg)
if (nhg->resilient && nla_put_nh_group_res(skb, nhg))
goto nla_put_failure;
+ if (op_flags & NHA_OP_FLAG_DUMP_STATS &&
+ (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats) ||
+ nla_put_nh_group_stats(skb, nh, op_flags)))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
@@ -691,7 +906,8 @@ nla_put_failure:
}
static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
- int event, u32 portid, u32 seq, unsigned int nlflags)
+ int event, u32 portid, u32 seq, unsigned int nlflags,
+ u32 op_flags)
{
struct fib6_nh *fib6_nh;
struct fib_nh *fib_nh;
@@ -718,7 +934,7 @@ static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB))
goto nla_put_failure;
- if (nla_put_nh_group(skb, nhg))
+ if (nla_put_nh_group(skb, nh, op_flags))
goto nla_put_failure;
goto out;
}
@@ -849,7 +1065,7 @@ static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info)
if (!skb)
goto errout;
- err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags);
+ err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags, 0);
if (err < 0) {
/* -EMSGSIZE implies BUG in nh_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
@@ -1104,6 +1320,7 @@ static int nh_check_attr_group(struct net *net,
if (!tb[i])
continue;
switch (i) {
+ case NHA_HW_STATS_ENABLE:
case NHA_FDB:
continue;
case NHA_RES_GROUP:
@@ -1176,6 +1393,7 @@ static struct nexthop *nexthop_select_path_fdb(struct nh_group *nhg, int hash)
if (hash > atomic_read(&nhge->hthr.upper_bound))
continue;
+ nh_grp_entry_stats_inc(nhge);
return nhge->nh;
}
@@ -1185,7 +1403,7 @@ static struct nexthop *nexthop_select_path_fdb(struct nh_group *nhg, int hash)
static struct nexthop *nexthop_select_path_hthr(struct nh_group *nhg, int hash)
{
- struct nexthop *rc = NULL;
+ struct nh_grp_entry *nhge0 = NULL;
int i;
if (nhg->fdb_nh)
@@ -1200,16 +1418,20 @@ static struct nexthop *nexthop_select_path_hthr(struct nh_group *nhg, int hash)
if (!nexthop_is_good_nh(nhge->nh))
continue;
- if (!rc)
- rc = nhge->nh;
+ if (!nhge0)
+ nhge0 = nhge;
if (hash > atomic_read(&nhge->hthr.upper_bound))
continue;
+ nh_grp_entry_stats_inc(nhge);
return nhge->nh;
}
- return rc ? : nhg->nh_entries[0].nh;
+ if (!nhge0)
+ nhge0 = &nhg->nh_entries[0];
+ nh_grp_entry_stats_inc(nhge0);
+ return nhge0->nh;
}
static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash)
@@ -1225,6 +1447,7 @@ static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash)
bucket = &res_table->nh_buckets[bucket_index];
nh_res_bucket_set_busy(bucket);
nhge = rcu_dereference(bucket->nh_entry);
+ nh_grp_entry_stats_inc(nhge);
return nhge->nh;
}
@@ -1798,6 +2021,7 @@ static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
newg->has_v4 = true;
list_del(&nhges[i].nh_list);
+ new_nhges[j].stats = nhges[i].stats;
new_nhges[j].nh_parent = nhges[i].nh_parent;
new_nhges[j].nh = nhges[i].nh;
new_nhges[j].weight = nhges[i].weight;
@@ -1813,6 +2037,7 @@ static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
rcu_assign_pointer(nhp->nh_grp, newg);
list_del(&nhge->nh_list);
+ free_percpu(nhge->stats);
nexthop_put(nhge->nh);
/* Removal of a NH from a resilient group is notified through
@@ -2477,6 +2702,13 @@ static struct nexthop *nexthop_create_group(struct net *net,
if (nhi->family == AF_INET)
nhg->has_v4 = true;
+ nhg->nh_entries[i].stats =
+ netdev_alloc_pcpu_stats(struct nh_grp_entry_stats);
+ if (!nhg->nh_entries[i].stats) {
+ err = -ENOMEM;
+ nexthop_put(nhe);
+ goto out_no_nh;
+ }
nhg->nh_entries[i].nh = nhe;
nhg->nh_entries[i].weight = entry[i].weight + 1;
list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list);
@@ -2509,6 +2741,9 @@ static struct nexthop *nexthop_create_group(struct net *net,
if (cfg->nh_fdb)
nhg->fdb_nh = 1;
+ if (cfg->nh_hw_stats)
+ nhg->hw_stats = true;
+
rcu_assign_pointer(nh->nh_grp, nhg);
return nh;
@@ -2516,6 +2751,7 @@ static struct nexthop *nexthop_create_group(struct net *net,
out_no_nh:
for (i--; i >= 0; --i) {
list_del(&nhg->nh_entries[i].nh_list);
+ free_percpu(nhg->nh_entries[i].stats);
nexthop_put(nhg->nh_entries[i].nh);
}
@@ -2850,6 +3086,9 @@ static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
err = rtm_to_nh_config_grp_res(tb[NHA_RES_GROUP],
cfg, extack);
+ if (tb[NHA_HW_STATS_ENABLE])
+ cfg->nh_hw_stats = nla_get_u32(tb[NHA_HW_STATS_ENABLE]);
+
/* no other attributes should be set */
goto out;
}
@@ -2941,6 +3180,10 @@ static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
goto out;
}
+ if (tb[NHA_HW_STATS_ENABLE]) {
+ NL_SET_ERR_MSG(extack, "Cannot enable nexthop hardware statistics for non-group nexthops");
+ goto out;
+ }
err = 0;
out:
@@ -2966,9 +3209,9 @@ static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
return err;
}
-static int __nh_valid_get_del_req(const struct nlmsghdr *nlh,
- struct nlattr **tb, u32 *id,
- struct netlink_ext_ack *extack)
+static int nh_valid_get_del_req(const struct nlmsghdr *nlh,
+ struct nlattr **tb, u32 *id, u32 *op_flags,
+ struct netlink_ext_ack *extack)
{
struct nhmsg *nhm = nlmsg_data(nlh);
@@ -2988,28 +3231,21 @@ static int __nh_valid_get_del_req(const struct nlmsghdr *nlh,
return -EINVAL;
}
- return 0;
-}
-
-static int nh_valid_get_del_req(const struct nlmsghdr *nlh, u32 *id,
- struct netlink_ext_ack *extack)
-{
- struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get)];
- int err;
-
- err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
- ARRAY_SIZE(rtm_nh_policy_get) - 1,
- rtm_nh_policy_get, extack);
- if (err < 0)
- return err;
+ if (op_flags) {
+ if (tb[NHA_OP_FLAGS])
+ *op_flags = nla_get_u32(tb[NHA_OP_FLAGS]);
+ else
+ *op_flags = 0;
+ }
- return __nh_valid_get_del_req(nlh, tb, id, extack);
+ return 0;
}
/* rtnl */
static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
+ struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_del)];
struct net *net = sock_net(skb->sk);
struct nl_info nlinfo = {
.nlh = nlh,
@@ -3020,7 +3256,13 @@ static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
int err;
u32 id;
- err = nh_valid_get_del_req(nlh, &id, extack);
+ err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
+ ARRAY_SIZE(rtm_nh_policy_del) - 1, rtm_nh_policy_del,
+ extack);
+ if (err < 0)
+ return err;
+
+ err = nh_valid_get_del_req(nlh, tb, &id, NULL, extack);
if (err)
return err;
@@ -3037,13 +3279,21 @@ static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
+ struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get)];
struct net *net = sock_net(in_skb->sk);
struct sk_buff *skb = NULL;
struct nexthop *nh;
+ u32 op_flags;
int err;
u32 id;
- err = nh_valid_get_del_req(nlh, &id, extack);
+ err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
+ ARRAY_SIZE(rtm_nh_policy_get) - 1, rtm_nh_policy_get,
+ extack);
+ if (err < 0)
+ return err;
+
+ err = nh_valid_get_del_req(nlh, tb, &id, &op_flags, extack);
if (err)
return err;
@@ -3058,7 +3308,7 @@ static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
goto errout_free;
err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, 0);
+ nlh->nlmsg_seq, 0, op_flags);
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
goto errout_free;
@@ -3079,6 +3329,7 @@ struct nh_dump_filter {
bool group_filter;
bool fdb_filter;
u32 res_bucket_nh_id;
+ u32 op_flags;
};
static bool nh_dump_filtered(struct nexthop *nh,
@@ -3166,6 +3417,11 @@ static int nh_valid_dump_req(const struct nlmsghdr *nlh,
if (err < 0)
return err;
+ if (tb[NHA_OP_FLAGS])
+ filter->op_flags = nla_get_u32(tb[NHA_OP_FLAGS]);
+ else
+ filter->op_flags = 0;
+
return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
}
@@ -3223,7 +3479,7 @@ static int rtm_dump_nexthop_cb(struct sk_buff *skb, struct netlink_callback *cb,
return nh_fill_node(skb, nh, RTM_NEWNEXTHOP,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI);
+ cb->nlh->nlmsg_seq, NLM_F_MULTI, filter->op_flags);
}
/* rtnl */
@@ -3241,10 +3497,6 @@ static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb)
err = rtm_dump_walk_nexthops(skb, cb, root, ctx,
&rtm_dump_nexthop_cb, &filter);
- if (err < 0) {
- if (likely(skb->len))
- err = skb->len;
- }
cb->seq = net->nexthop.seq;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
@@ -3439,11 +3691,6 @@ static int rtm_dump_nexthop_bucket(struct sk_buff *skb,
&rtm_dump_nexthop_bucket_cb, &dd);
}
- if (err < 0) {
- if (likely(skb->len))
- err = skb->len;
- }
-
cb->seq = net->nexthop.seq;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
return err;
@@ -3483,7 +3730,7 @@ static int nh_valid_get_bucket_req(const struct nlmsghdr *nlh,
if (err < 0)
return err;
- err = __nh_valid_get_del_req(nlh, tb, id, extack);
+ err = nh_valid_get_del_req(nlh, tb, id, NULL, extack);
if (err)
return err;
@@ -3631,17 +3878,24 @@ unlock:
}
EXPORT_SYMBOL(register_nexthop_notifier);
-int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
+int __unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
{
int err;
- rtnl_lock();
err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
nb);
- if (err)
- goto unlock;
- nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
-unlock:
+ if (!err)
+ nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
+ return err;
+}
+EXPORT_SYMBOL(__unregister_nexthop_notifier);
+
+int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
+{
+ int err;
+
+ rtnl_lock();
+ err = __unregister_nexthop_notifier(net, nb);
rtnl_unlock();
return err;
}
@@ -3737,16 +3991,20 @@ out:
}
EXPORT_SYMBOL(nexthop_res_grp_activity_update);
-static void __net_exit nexthop_net_exit_batch(struct list_head *net_list)
+static void __net_exit nexthop_net_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct net *net;
- rtnl_lock();
- list_for_each_entry(net, net_list, exit_list) {
+ ASSERT_RTNL();
+ list_for_each_entry(net, net_list, exit_list)
flush_all_nexthops(net);
- kfree(net->nexthop.devhash);
- }
- rtnl_unlock();
+}
+
+static void __net_exit nexthop_net_exit(struct net *net)
+{
+ kfree(net->nexthop.devhash);
+ net->nexthop.devhash = NULL;
}
static int __net_init nexthop_net_init(struct net *net)
@@ -3764,7 +4022,8 @@ static int __net_init nexthop_net_init(struct net *net)
static struct pernet_operations nexthop_net_ops = {
.init = nexthop_net_init,
- .exit_batch = nexthop_net_exit_batch,
+ .exit = nexthop_net_exit,
+ .exit_batch_rtnl = nexthop_net_exit_batch_rtnl,
};
static int __init nexthop_init(void)
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 5f4654ebff48..914bc9c35cc7 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -395,7 +395,7 @@ static int snmp_seq_show_ipstats(struct seq_file *seq, void *v)
seq_printf(seq, " %s", snmp4_ipstats_list[i].name);
seq_printf(seq, "\nIp: %d %d",
- IPV4_DEVCONF_ALL(net, FORWARDING) ? 1 : 2,
+ IPV4_DEVCONF_ALL_RO(net, FORWARDING) ? 1 : 2,
READ_ONCE(net->ipv4.sysctl_ip_default_ttl));
BUILD_BUG_ON(offsetof(struct ipstats_mib, mibs) != 0);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index aea89326c697..42ac434cfcfa 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -175,6 +175,13 @@ static int raw_v4_input(struct net *net, struct sk_buff *skb,
if (!raw_v4_match(net, sk, iph->protocol,
iph->saddr, iph->daddr, dif, sdif))
continue;
+
+ if (atomic_read(&sk->sk_rmem_alloc) >=
+ READ_ONCE(sk->sk_rcvbuf)) {
+ atomic_inc(&sk->sk_drops);
+ continue;
+ }
+
delivered = 1;
if ((iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) &&
ip_mc_sf_allow(sk, iph->daddr, iph->saddr,
@@ -310,7 +317,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
}
nf_reset_ct(skb);
- skb_push(skb, skb->data - skb_network_header(skb));
+ skb_push(skb, -skb_network_offset(skb));
raw_rcv_skb(sk, skb);
return 0;
@@ -353,6 +360,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = sockc->mark;
skb->tstamp = sockc->transmit_time;
+ skb->mono_delivery_time = !!skb->tstamp;
skb_dst_set(skb, &rt->dst);
*rtp = NULL;
@@ -815,7 +823,7 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
out: return ret;
}
-static int do_raw_setsockopt(struct sock *sk, int level, int optname,
+static int do_raw_setsockopt(struct sock *sk, int optname,
sockptr_t optval, unsigned int optlen)
{
if (optname == ICMP_FILTER) {
@@ -832,11 +840,11 @@ static int raw_setsockopt(struct sock *sk, int level, int optname,
{
if (level != SOL_RAW)
return ip_setsockopt(sk, level, optname, optval, optlen);
- return do_raw_setsockopt(sk, level, optname, optval, optlen);
+ return do_raw_setsockopt(sk, optname, optval, optlen);
}
-static int do_raw_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen)
+static int do_raw_getsockopt(struct sock *sk, int optname,
+ char __user *optval, int __user *optlen)
{
if (optname == ICMP_FILTER) {
if (inet_sk(sk)->inet_num != IPPROTO_ICMP)
@@ -852,7 +860,7 @@ static int raw_getsockopt(struct sock *sk, int level, int optname,
{
if (level != SOL_RAW)
return ip_getsockopt(sk, level, optname, optval, optlen);
- return do_raw_getsockopt(sk, level, optname, optval, optlen);
+ return do_raw_getsockopt(sk, optname, optval, optlen);
}
static int raw_ioctl(struct sock *sk, int cmd, int *karg)
diff --git a/net/ipv4/raw_diag.c b/net/ipv4/raw_diag.c
index fe2140c8375c..cc793bd8de25 100644
--- a/net/ipv4/raw_diag.c
+++ b/net/ipv4/raw_diag.c
@@ -213,6 +213,7 @@ static int raw_diag_destroy(struct sk_buff *in_skb,
#endif
static const struct inet_diag_handler raw_diag_handler = {
+ .owner = THIS_MODULE,
.dump = raw_diag_dump,
.dump_one = raw_diag_dump_one,
.idiag_get_info = raw_diag_get_info,
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 16615d107cf0..c8f76f56dc16 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2313,7 +2313,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
if (IN_DEV_BFORWARD(in_dev))
goto make_route;
/* not do cache if bc_forwarding is enabled */
- if (IPV4_DEVCONF_ALL(net, BC_FORWARDING))
+ if (IPV4_DEVCONF_ALL_RO(net, BC_FORWARDING))
do_cache = false;
goto brd_input;
}
@@ -2993,7 +2993,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
#ifdef CONFIG_IP_MROUTE
if (ipv4_is_multicast(dst) &&
!ipv4_is_local_multicast(dst) &&
- IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
+ IPV4_DEVCONF_ALL_RO(net, MC_FORWARDING)) {
int err = ipmr_get_route(net, skb,
fl4->saddr, fl4->daddr,
r, portid);
@@ -3693,9 +3693,8 @@ int __init ip_rt_init(void)
panic("IP: failed to allocate ip_rt_acct\n");
#endif
- ipv4_dst_ops.kmem_cachep =
- kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ ipv4_dst_ops.kmem_cachep = KMEM_CACHE(rtable,
+ SLAB_HWCACHE_ALIGN | SLAB_PANIC);
ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 61f1c96cfe63..7972ad3d7c73 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -51,15 +51,6 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
count, &syncookie_secret[c]);
}
-/* Convert one nsec 64bit timestamp to ts (ms or usec resolution) */
-static u64 tcp_ns_to_ts(bool usec_ts, u64 val)
-{
- if (usec_ts)
- return div_u64(val, NSEC_PER_USEC);
-
- return div_u64(val, NSEC_PER_MSEC);
-}
-
/*
* when syncookies are in effect and tcp timestamps are enabled we encode
* tcp options in the lower bits of the timestamp value that will be
@@ -304,6 +295,24 @@ static int cookie_tcp_reqsk_init(struct sock *sk, struct sk_buff *skb,
return 0;
}
+#if IS_ENABLED(CONFIG_BPF)
+struct request_sock *cookie_bpf_check(struct sock *sk, struct sk_buff *skb)
+{
+ struct request_sock *req = inet_reqsk(skb->sk);
+
+ skb->sk = NULL;
+ skb->destructor = NULL;
+
+ if (cookie_tcp_reqsk_init(sk, skb, req)) {
+ reqsk_free(req);
+ req = NULL;
+ }
+
+ return req;
+}
+EXPORT_SYMBOL_GPL(cookie_bpf_check);
+#endif
+
struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
struct sock *sk, struct sk_buff *skb,
struct tcp_options_received *tcp_opt,
@@ -399,16 +408,23 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
struct rtable *rt;
__u8 rcv_wscale;
int full_space;
+ SKB_DR(reason);
if (!READ_ONCE(net->ipv4.sysctl_tcp_syncookies) ||
!th->ack || th->rst)
goto out;
- req = cookie_tcp_check(net, sk, skb);
- if (IS_ERR(req))
- goto out;
- if (!req)
+ if (cookie_bpf_ok(skb)) {
+ req = cookie_bpf_check(sk, skb);
+ } else {
+ req = cookie_tcp_check(net, sk, skb);
+ if (IS_ERR(req))
+ goto out;
+ }
+ if (!req) {
+ SKB_DR_SET(reason, NO_SOCKET);
goto out_drop;
+ }
ireq = inet_rsk(req);
@@ -420,8 +436,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
*/
RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
- if (security_inet_conn_request(sk, skb, req))
+ if (security_inet_conn_request(sk, skb, req)) {
+ SKB_DR_SET(reason, SECURITY_HOOK);
goto out_free;
+ }
tcp_ao_syncookie(sk, skb, req, AF_INET);
@@ -438,8 +456,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
ireq->ir_loc_addr, th->source, th->dest, sk->sk_uid);
security_req_classify_flow(req, flowi4_to_flowi_common(&fl4));
rt = ip_route_output_key(net, &fl4);
- if (IS_ERR(rt))
+ if (IS_ERR(rt)) {
+ SKB_DR_SET(reason, IP_OUTNOROUTES);
goto out_free;
+ }
/* Try to redo what tcp_v4_send_synack did. */
req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
@@ -454,19 +474,24 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
ireq->wscale_ok, &rcv_wscale,
dst_metric(&rt->dst, RTAX_INITRWND));
- ireq->rcv_wscale = rcv_wscale;
+ if (!req->syncookie)
+ ireq->rcv_wscale = rcv_wscale;
ireq->ecn_ok &= cookie_ecn_ok(net, &rt->dst);
ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst);
/* ip_queue_xmit() depends on our flow being setup
* Normal sockets get it right from inet_csk_route_child_sock()
*/
- if (ret)
- inet_sk(ret)->cork.fl.u.ip4 = fl4;
+ if (!ret) {
+ SKB_DR_SET(reason, NO_SOCKET);
+ goto out_drop;
+ }
+ inet_sk(ret)->cork.fl.u.ip4 = fl4;
out:
return ret;
out_free:
reqsk_free(req);
out_drop:
+ kfree_skb_reason(skb, reason);
return NULL;
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c82dc42f57c6..d20b62d52171 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -279,6 +279,7 @@
#include <linux/uaccess.h>
#include <asm/ioctls.h>
#include <net/busy_poll.h>
+#include <net/rps.h>
/* Track pending CMSGs. */
enum {
@@ -974,7 +975,7 @@ int tcp_wmem_schedule(struct sock *sk, int copy)
* Use whatever is left in sk->sk_forward_alloc and tcp_wmem[0]
* to guarantee some progress.
*/
- left = sock_net(sk)->ipv4.sysctl_tcp_wmem[0] - sk->sk_wmem_queued;
+ left = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[0]) - sk->sk_wmem_queued;
if (left > 0)
sk_forced_mem_schedule(sk, min(left, copy));
return min(copy, sk->sk_forward_alloc);
@@ -4010,11 +4011,11 @@ int do_tcp_getsockopt(struct sock *sk, int level,
if (copy_from_sockptr(&len, optlen, sizeof(int)))
return -EFAULT;
- len = min_t(unsigned int, len, sizeof(int));
-
if (len < 0)
return -EINVAL;
+ len = min_t(unsigned int, len, sizeof(int));
+
switch (optname) {
case TCP_MAXSEG:
val = tp->mss_cache;
@@ -4651,7 +4652,7 @@ static void __init tcp_struct_check(void)
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tsorted_sent_queue);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, highest_sack);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, ecn_flags);
- CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_tx, 113);
+ CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_tx, 105);
/* TXRX read-write hotpath cache lines */
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, pred_flags);
diff --git a/net/ipv4/tcp_ao.c b/net/ipv4/tcp_ao.c
index 87db432c6bb4..3afeeb68e8a7 100644
--- a/net/ipv4/tcp_ao.c
+++ b/net/ipv4/tcp_ao.c
@@ -509,9 +509,9 @@ static int tcp_ao_hash_header(struct tcp_sigpool *hp,
bool exclude_options, u8 *hash,
int hash_offset, int hash_len)
{
- int err, len = th->doff << 2;
struct scatterlist sg;
u8 *hdr = hp->scratch;
+ int err, len;
/* We are not allowed to change tcphdr, make a local copy */
if (exclude_options) {
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 22358032dd48..05dc2d05bc7c 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -1155,7 +1155,7 @@ static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = {
.set_state = bbr_set_state,
};
-BTF_SET8_START(tcp_bbr_check_kfunc_ids)
+BTF_KFUNCS_START(tcp_bbr_check_kfunc_ids)
#ifdef CONFIG_X86
#ifdef CONFIG_DYNAMIC_FTRACE
BTF_ID_FLAGS(func, bbr_init)
@@ -1168,7 +1168,7 @@ BTF_ID_FLAGS(func, bbr_min_tso_segs)
BTF_ID_FLAGS(func, bbr_set_state)
#endif
#endif
-BTF_SET8_END(tcp_bbr_check_kfunc_ids)
+BTF_KFUNCS_END(tcp_bbr_check_kfunc_ids)
static const struct btf_kfunc_id_set tcp_bbr_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 1b34050a7538..28ffcfbeef14 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -146,11 +146,7 @@ EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
int tcp_update_congestion_control(struct tcp_congestion_ops *ca, struct tcp_congestion_ops *old_ca)
{
struct tcp_congestion_ops *existing;
- int ret;
-
- ret = tcp_validate_congestion_control(ca);
- if (ret)
- return ret;
+ int ret = 0;
ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 0fd78ecb67e7..44869ea089e3 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -485,7 +485,7 @@ static struct tcp_congestion_ops cubictcp __read_mostly = {
.name = "cubic",
};
-BTF_SET8_START(tcp_cubic_check_kfunc_ids)
+BTF_KFUNCS_START(tcp_cubic_check_kfunc_ids)
#ifdef CONFIG_X86
#ifdef CONFIG_DYNAMIC_FTRACE
BTF_ID_FLAGS(func, cubictcp_init)
@@ -496,7 +496,7 @@ BTF_ID_FLAGS(func, cubictcp_cwnd_event)
BTF_ID_FLAGS(func, cubictcp_acked)
#endif
#endif
-BTF_SET8_END(tcp_cubic_check_kfunc_ids)
+BTF_KFUNCS_END(tcp_cubic_check_kfunc_ids)
static const struct btf_kfunc_id_set tcp_cubic_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index bb23bb5b387a..e33fbe4933e4 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -260,7 +260,7 @@ static struct tcp_congestion_ops dctcp_reno __read_mostly = {
.name = "dctcp-reno",
};
-BTF_SET8_START(tcp_dctcp_check_kfunc_ids)
+BTF_KFUNCS_START(tcp_dctcp_check_kfunc_ids)
#ifdef CONFIG_X86
#ifdef CONFIG_DYNAMIC_FTRACE
BTF_ID_FLAGS(func, dctcp_init)
@@ -271,7 +271,7 @@ BTF_ID_FLAGS(func, dctcp_cwnd_undo)
BTF_ID_FLAGS(func, dctcp_state)
#endif
#endif
-BTF_SET8_END(tcp_dctcp_check_kfunc_ids)
+BTF_KFUNCS_END(tcp_dctcp_check_kfunc_ids)
static const struct btf_kfunc_id_set tcp_dctcp_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 4cbe4b44425a..f428ecf9120f 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -222,6 +222,7 @@ static int tcp_diag_destroy(struct sk_buff *in_skb,
#endif
static const struct inet_diag_handler tcp_diag_handler = {
+ .owner = THIS_MODULE,
.dump = tcp_diag_dump,
.dump_one = tcp_diag_dump_one,
.idiag_get_info = tcp_diag_get_info,
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index df7b13f0e5e0..5d874817a78d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1164,7 +1164,7 @@ static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered,
* L|R 1 - orig is lost, retransmit is in flight.
* S|R 1 - orig reached receiver, retrans is still in flight.
* (L|S|R is logically valid, it could occur when L|R is sacked,
- * but it is equivalent to plain S and code short-curcuits it to S.
+ * but it is equivalent to plain S and code short-circuits it to S.
* L|S is logically invalid, it would mean -1 packet in flight 8))
*
* These 6 states form finite state machine, controlled by the following events:
@@ -6361,6 +6361,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
inet_csk_reset_xmit_timer(sk,
ICSK_TIME_RETRANS,
TCP_TIMEOUT_MIN, TCP_RTO_MAX);
+ SKB_DR_SET(reason, TCP_INVALID_ACK_SEQUENCE);
goto reset_and_undo;
}
@@ -6369,6 +6370,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
tcp_time_stamp_ts(tp))) {
NET_INC_STATS(sock_net(sk),
LINUX_MIB_PAWSACTIVEREJECTED);
+ SKB_DR_SET(reason, TCP_RFC7323_PAWS);
goto reset_and_undo;
}
@@ -6572,7 +6574,8 @@ discard_and_undo:
reset_and_undo:
tcp_clear_options(&tp->rx_opt);
tp->rx_opt.mss_clamp = saved_clamp;
- return 1;
+ /* we can reuse/return @reason to its caller to handle the exception */
+ return reason;
}
static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
@@ -6616,14 +6619,14 @@ static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
* address independent.
*/
-int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
+enum skb_drop_reason
+tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcphdr *th = tcp_hdr(skb);
struct request_sock *req;
int queued = 0;
- bool acceptable;
SKB_DR(reason);
switch (sk->sk_state) {
@@ -6633,7 +6636,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
case TCP_LISTEN:
if (th->ack)
- return 1;
+ return SKB_DROP_REASON_TCP_FLAGS;
if (th->rst) {
SKB_DR_SET(reason, TCP_RESET);
@@ -6649,12 +6652,10 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
*/
rcu_read_lock();
local_bh_disable();
- acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0;
+ icsk->icsk_af_ops->conn_request(sk, skb);
local_bh_enable();
rcu_read_unlock();
- if (!acceptable)
- return 1;
consume_skb(skb);
return 0;
}
@@ -6699,17 +6700,25 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
return 0;
/* step 5: check the ACK field */
- acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
- FLAG_UPDATE_TS_RECENT |
- FLAG_NO_CHALLENGE_ACK) > 0;
-
- if (!acceptable) {
- if (sk->sk_state == TCP_SYN_RECV)
- return 1; /* send one RST */
- tcp_send_challenge_ack(sk);
- SKB_DR_SET(reason, TCP_OLD_ACK);
- goto discard;
+ reason = tcp_ack(sk, skb, FLAG_SLOWPATH |
+ FLAG_UPDATE_TS_RECENT |
+ FLAG_NO_CHALLENGE_ACK);
+
+ if ((int)reason <= 0) {
+ if (sk->sk_state == TCP_SYN_RECV) {
+ /* send one RST */
+ if (!reason)
+ return SKB_DROP_REASON_TCP_OLD_ACK;
+ return -reason;
+ }
+ /* accept old ack during closing */
+ if ((int)reason < 0) {
+ tcp_send_challenge_ack(sk);
+ reason = -reason;
+ goto discard;
+ }
}
+ SKB_DR_SET(reason, NOT_SPECIFIED);
switch (sk->sk_state) {
case TCP_SYN_RECV:
tp->delivered++; /* SYN-ACK delivery isn't tracked in tcp_ack */
@@ -6777,7 +6786,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
if (READ_ONCE(tp->linger2) < 0) {
tcp_done(sk);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
- return 1;
+ return SKB_DROP_REASON_TCP_ABORT_ON_DATA;
}
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
@@ -6786,7 +6795,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
tcp_fastopen_active_disable(sk);
tcp_done(sk);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
- return 1;
+ return SKB_DROP_REASON_TCP_ABORT_ON_DATA;
}
tmo = tcp_fin_time(sk);
@@ -6851,7 +6860,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
tcp_reset(sk, skb);
- return 1;
+ return SKB_DROP_REASON_TCP_ABORT_ON_DATA;
}
}
fallthrough;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0c50c5a32b84..a22ee5838751 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1907,7 +1907,6 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}
- reason = SKB_DROP_REASON_NOT_SPECIFIED;
if (tcp_checksum_complete(skb))
goto csum_err;
@@ -1915,9 +1914,10 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
struct sock *nsk = tcp_v4_cookie_check(sk, skb);
if (!nsk)
- goto discard;
+ return 0;
if (nsk != sk) {
- if (tcp_child_process(sk, nsk, skb)) {
+ reason = tcp_child_process(sk, nsk, skb);
+ if (reason) {
rsk = nsk;
goto reset;
}
@@ -1926,7 +1926,8 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
} else
sock_rps_save_rxhash(sk, skb);
- if (tcp_rcv_state_process(sk, skb)) {
+ reason = tcp_rcv_state_process(sk, skb);
+ if (reason) {
rsk = sk;
goto reset;
}
@@ -2275,10 +2276,12 @@ process:
if (nsk == sk) {
reqsk_put(req);
tcp_v4_restore_cb(skb);
- } else if (tcp_child_process(sk, nsk, skb)) {
- tcp_v4_send_reset(nsk, skb);
- goto discard_and_relse;
} else {
+ drop_reason = tcp_child_process(sk, nsk, skb);
+ if (drop_reason) {
+ tcp_v4_send_reset(nsk, skb);
+ goto discard_and_relse;
+ }
sock_put(sk);
return 0;
}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 9e85f2a0bddd..52040b0e2616 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -911,11 +911,11 @@ EXPORT_SYMBOL(tcp_check_req);
* be created.
*/
-int tcp_child_process(struct sock *parent, struct sock *child,
- struct sk_buff *skb)
+enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
+ struct sk_buff *skb)
__releases(&((child)->sk_lock.slock))
{
- int ret = 0;
+ enum skb_drop_reason reason = SKB_NOT_DROPPED_YET;
int state = child->sk_state;
/* record sk_napi_id and sk_rx_queue_mapping of child. */
@@ -923,7 +923,7 @@ int tcp_child_process(struct sock *parent, struct sock *child,
tcp_segs_in(tcp_sk(child), skb);
if (!sock_owned_by_user(child)) {
- ret = tcp_rcv_state_process(child, skb);
+ reason = tcp_rcv_state_process(child, skb);
/* Wakeup parent, send SIGIO */
if (state == TCP_SYN_RECV && child->sk_state != state)
parent->sk_data_ready(parent);
@@ -937,6 +937,6 @@ int tcp_child_process(struct sock *parent, struct sock *child,
bh_unlock_sock(child);
sock_put(child);
- return ret;
+ return reason;
}
EXPORT_SYMBOL(tcp_child_process);
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 8311c38267b5..ebe4722bb020 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -204,7 +204,7 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
goto out;
hlen = off + thlen;
- if (skb_gro_header_hard(skb, hlen)) {
+ if (!skb_gro_may_pull(skb, hlen)) {
th = skb_gro_header_slow(skb, hlen, off);
if (unlikely(!th))
goto out;
@@ -299,18 +299,20 @@ out:
void tcp_gro_complete(struct sk_buff *skb)
{
struct tcphdr *th = tcp_hdr(skb);
+ struct skb_shared_info *shinfo;
+
+ if (skb->encapsulation)
+ skb->inner_transport_header = skb->transport_header;
skb->csum_start = (unsigned char *)th - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
skb->ip_summed = CHECKSUM_PARTIAL;
- skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+ shinfo = skb_shinfo(skb);
+ shinfo->gso_segs = NAPI_GRO_CB(skb)->count;
if (th->cwr)
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
-
- if (skb->encapsulation)
- skb->inner_transport_header = skb->transport_header;
+ shinfo->gso_type |= SKB_GSO_TCP_ECN;
}
EXPORT_SYMBOL(tcp_gro_complete);
@@ -335,24 +337,22 @@ INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
iph->daddr, 0);
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
- if (NAPI_GRO_CB(skb)->is_atomic)
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4 |
+ (NAPI_GRO_CB(skb)->is_atomic * SKB_GSO_TCP_FIXEDID);
tcp_gro_complete(skb);
return 0;
}
-static const struct net_offload tcpv4_offload = {
- .callbacks = {
- .gso_segment = tcp4_gso_segment,
- .gro_receive = tcp4_gro_receive,
- .gro_complete = tcp4_gro_complete,
- },
-};
-
int __init tcpv4_offload_init(void)
{
- return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
+ net_hotdata.tcpv4_offload = (struct net_offload) {
+ .callbacks = {
+ .gso_segment = tcp4_gso_segment,
+ .gro_receive = tcp4_gro_receive,
+ .gro_complete = tcp4_gro_complete,
+ },
+ };
+ return inet_add_offload(&net_hotdata.tcpv4_offload, IPPROTO_TCP);
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index e474b201900f..661d0e0d273f 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -411,8 +411,6 @@ INDIRECT_CALLABLE_SCOPE
u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport,
const __be32 faddr, const __be16 fport)
{
- static u32 udp_ehash_secret __read_mostly;
-
net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret));
return __inet_ehashfn(laddr, lport, faddr, fport,
@@ -1589,7 +1587,8 @@ int udp_init_sock(struct sock *sk)
void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
{
- sk_peek_offset_bwd(sk, len);
+ if (unlikely(READ_ONCE(udp_sk(sk)->peeking_with_offset)))
+ sk_peek_offset_bwd(sk, len);
if (!skb_unref(skb))
return;
@@ -2569,11 +2568,12 @@ int udp_v4_early_demux(struct sk_buff *skb)
uh->source, iph->saddr, dif, sdif);
}
- if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
+ if (!sk)
return 0;
skb->sk = sk;
- skb->destructor = sock_efree;
+ DEBUG_NET_WARN_ON_ONCE(sk_is_refcounted(sk));
+ skb->destructor = sock_pfree;
dst = rcu_dereference(sk->sk_rx_dst);
if (dst)
@@ -2792,11 +2792,11 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
- len = min_t(unsigned int, len, sizeof(int));
-
if (len < 0)
return -EINVAL;
+ len = min_t(unsigned int, len, sizeof(int));
+
switch (optname) {
case UDP_CORK:
val = udp_test_bit(CORK, sk);
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index dc41a22ee80e..38cb3a28e4ed 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -237,6 +237,7 @@ static int udplite_diag_destroy(struct sk_buff *in_skb,
#endif
static const struct inet_diag_handler udp_diag_handler = {
+ .owner = THIS_MODULE,
.dump = udp_diag_dump,
.dump_one = udp_diag_dump_one,
.idiag_get_info = udp_diag_get_info,
@@ -260,6 +261,7 @@ static int udplite_diag_dump_one(struct netlink_callback *cb,
}
static const struct inet_diag_handler udplite_diag_handler = {
+ .owner = THIS_MODULE,
.dump = udplite_diag_dump,
.dump_one = udplite_diag_dump_one,
.idiag_get_info = udp_diag_get_info,
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 6c95d28d0c4a..b9880743765c 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -737,15 +737,14 @@ INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff)
return udp_gro_complete(skb, nhoff, udp4_lib_lookup_skb);
}
-static const struct net_offload udpv4_offload = {
- .callbacks = {
- .gso_segment = udp4_ufo_fragment,
- .gro_receive = udp4_gro_receive,
- .gro_complete = udp4_gro_complete,
- },
-};
-
int __init udpv4_offload_init(void)
{
- return inet_add_offload(&udpv4_offload, IPPROTO_UDP);
+ net_hotdata.udpv4_offload = (struct net_offload) {
+ .callbacks = {
+ .gso_segment = udp4_ufo_fragment,
+ .gro_receive = udp4_gro_receive,
+ .gro_complete = udp4_gro_complete,
+ },
+ };
+ return inet_add_offload(&net_hotdata.udpv4_offload, IPPROTO_UDP);
}
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index c54676998eb6..dae35101d189 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -58,7 +58,7 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async)
return -iph->protocol;
#endif
- __skb_push(skb, skb->data - skb_network_header(skb));
+ __skb_push(skb, -skb_network_offset(skb));
iph->tot_len = htons(skb->len);
ip_send_check(iph);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 055230b669cf..247bd4d8ee45 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -195,6 +195,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.use_tempaddr = 0,
.temp_valid_lft = TEMP_VALID_LIFETIME,
.temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
+ .regen_min_advance = REGEN_MIN_ADVANCE,
.regen_max_retry = REGEN_MAX_RETRY,
.max_desync_factor = MAX_DESYNC_FACTOR,
.max_addresses = IPV6_MAX_ADDRESSES,
@@ -257,6 +258,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.use_tempaddr = 0,
.temp_valid_lft = TEMP_VALID_LIFETIME,
.temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
+ .regen_min_advance = REGEN_MIN_ADVANCE,
.regen_max_retry = REGEN_MAX_RETRY,
.max_desync_factor = MAX_DESYNC_FACTOR,
.max_addresses = IPV6_MAX_ADDRESSES,
@@ -549,7 +551,8 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
goto out;
if ((all || type == NETCONFA_FORWARDING) &&
- nla_put_s32(skb, NETCONFA_FORWARDING, devconf->forwarding) < 0)
+ nla_put_s32(skb, NETCONFA_FORWARDING,
+ READ_ONCE(devconf->forwarding)) < 0)
goto nla_put_failure;
#ifdef CONFIG_IPV6_MROUTE
if ((all || type == NETCONFA_MC_FORWARDING) &&
@@ -558,12 +561,13 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
goto nla_put_failure;
#endif
if ((all || type == NETCONFA_PROXY_NEIGH) &&
- nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
+ nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
+ READ_ONCE(devconf->proxy_ndp)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
- devconf->ignore_routes_with_linkdown) < 0)
+ READ_ONCE(devconf->ignore_routes_with_linkdown)) < 0)
goto nla_put_failure;
out:
@@ -713,7 +717,7 @@ errout:
static u32 inet6_base_seq(const struct net *net)
{
u32 res = atomic_read(&net->ipv6.dev_addr_genid) +
- net->dev_base_seq;
+ READ_ONCE(net->dev_base_seq);
/* Must not return 0 (see nl_dump_check_consistent()).
* Chose a value far away from 0.
@@ -723,17 +727,18 @@ static u32 inet6_base_seq(const struct net *net)
return res;
}
-
static int inet6_netconf_dump_devconf(struct sk_buff *skb,
struct netlink_callback *cb)
{
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
- int h, s_h;
- int idx, s_idx;
+ struct {
+ unsigned long ifindex;
+ unsigned int all_default;
+ } *ctx = (void *)cb->ctx;
struct net_device *dev;
struct inet6_dev *idev;
- struct hlist_head *head;
+ int err = 0;
if (cb->strict_check) {
struct netlink_ext_ack *extack = cb->extack;
@@ -750,64 +755,46 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
}
}
- s_h = cb->args[0];
- s_idx = idx = cb->args[1];
-
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &net->dev_index_head[h];
- rcu_read_lock();
- cb->seq = inet6_base_seq(net);
- hlist_for_each_entry_rcu(dev, head, index_hlist) {
- if (idx < s_idx)
- goto cont;
- idev = __in6_dev_get(dev);
- if (!idev)
- goto cont;
-
- if (inet6_netconf_fill_devconf(skb, dev->ifindex,
- &idev->cnf,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq,
- RTM_NEWNETCONF,
- NLM_F_MULTI,
- NETCONFA_ALL) < 0) {
- rcu_read_unlock();
- goto done;
- }
- nl_dump_check_consistent(cb, nlmsg_hdr(skb));
-cont:
- idx++;
- }
- rcu_read_unlock();
+ rcu_read_lock();
+ for_each_netdev_dump(net, dev, ctx->ifindex) {
+ idev = __in6_dev_get(dev);
+ if (!idev)
+ continue;
+ err = inet6_netconf_fill_devconf(skb, dev->ifindex,
+ &idev->cnf,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq,
+ RTM_NEWNETCONF,
+ NLM_F_MULTI,
+ NETCONFA_ALL);
+ if (err < 0)
+ goto done;
}
- if (h == NETDEV_HASHENTRIES) {
- if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
- net->ipv6.devconf_all,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq,
- RTM_NEWNETCONF, NLM_F_MULTI,
- NETCONFA_ALL) < 0)
+ if (ctx->all_default == 0) {
+ err = inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
+ net->ipv6.devconf_all,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq,
+ RTM_NEWNETCONF, NLM_F_MULTI,
+ NETCONFA_ALL);
+ if (err < 0)
goto done;
- else
- h++;
- }
- if (h == NETDEV_HASHENTRIES + 1) {
- if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
- net->ipv6.devconf_dflt,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq,
- RTM_NEWNETCONF, NLM_F_MULTI,
- NETCONFA_ALL) < 0)
+ ctx->all_default++;
+ }
+ if (ctx->all_default == 1) {
+ err = inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
+ net->ipv6.devconf_dflt,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq,
+ RTM_NEWNETCONF, NLM_F_MULTI,
+ NETCONFA_ALL);
+ if (err < 0)
goto done;
- else
- h++;
+ ctx->all_default++;
}
done:
- cb->args[0] = h;
- cb->args[1] = idx;
-
- return skb->len;
+ rcu_read_unlock();
+ return err;
}
#ifdef CONFIG_SYSCTL
@@ -867,7 +854,8 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
idev = __in6_dev_get(dev);
if (idev) {
int changed = (!idev->cnf.forwarding) ^ (!newf);
- idev->cnf.forwarding = newf;
+
+ WRITE_ONCE(idev->cnf.forwarding, newf);
if (changed)
dev_forward_change(idev);
}
@@ -884,7 +872,7 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
net = (struct net *)table->extra2;
old = *p;
- *p = newf;
+ WRITE_ONCE(*p, newf);
if (p == &net->ipv6.devconf_dflt->forwarding) {
if ((!newf) ^ (!old))
@@ -899,7 +887,7 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
if (p == &net->ipv6.devconf_all->forwarding) {
int old_dflt = net->ipv6.devconf_dflt->forwarding;
- net->ipv6.devconf_dflt->forwarding = newf;
+ WRITE_ONCE(net->ipv6.devconf_dflt->forwarding, newf);
if ((!newf) ^ (!old_dflt))
inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
NETCONFA_FORWARDING,
@@ -931,7 +919,7 @@ static void addrconf_linkdown_change(struct net *net, __s32 newf)
if (idev) {
int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf);
- idev->cnf.ignore_routes_with_linkdown = newf;
+ WRITE_ONCE(idev->cnf.ignore_routes_with_linkdown, newf);
if (changed)
inet6_netconf_notify_devconf(dev_net(dev),
RTM_NEWNETCONF,
@@ -952,7 +940,7 @@ static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
net = (struct net *)table->extra2;
old = *p;
- *p = newf;
+ WRITE_ONCE(*p, newf);
if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
if ((!newf) ^ (!old))
@@ -966,7 +954,7 @@ static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
}
if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
- net->ipv6.devconf_dflt->ignore_routes_with_linkdown = newf;
+ WRITE_ONCE(net->ipv6.devconf_dflt->ignore_routes_with_linkdown, newf);
addrconf_linkdown_change(net, newf);
if ((!newf) ^ (!old))
inet6_netconf_notify_devconf(net,
@@ -1270,6 +1258,7 @@ static void
cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires,
bool del_rt, bool del_peer)
{
+ struct fib6_table *table;
struct fib6_info *f6i;
f6i = addrconf_get_prefix_route(del_peer ? &ifp->peer_addr : &ifp->addr,
@@ -1279,8 +1268,15 @@ cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires,
if (del_rt)
ip6_del_rt(dev_net(ifp->idev->dev), f6i, false);
else {
- if (!(f6i->fib6_flags & RTF_EXPIRES))
+ if (!(f6i->fib6_flags & RTF_EXPIRES)) {
+ table = f6i->fib6_table;
+ spin_lock_bh(&table->tb6_lock);
+
fib6_set_expires(f6i, expires);
+ fib6_add_gc_list(f6i);
+
+ spin_unlock_bh(&table->tb6_lock);
+ }
fib6_info_release(f6i);
}
}
@@ -1346,12 +1342,21 @@ out:
in6_ifa_put(ifp);
}
+static unsigned long ipv6_get_regen_advance(const struct inet6_dev *idev)
+{
+ return READ_ONCE(idev->cnf.regen_min_advance) +
+ READ_ONCE(idev->cnf.regen_max_retry) *
+ READ_ONCE(idev->cnf.dad_transmits) *
+ max(NEIGH_VAR(idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
+}
+
static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, bool block)
{
struct inet6_dev *idev = ifp->idev;
unsigned long tmp_tstamp, age;
unsigned long regen_advance;
unsigned long now = jiffies;
+ u32 if_public_preferred_lft;
s32 cnf_temp_preferred_lft;
struct inet6_ifaddr *ift;
struct ifa6_config cfg;
@@ -1363,7 +1368,7 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, bool block)
retry:
in6_dev_hold(idev);
- if (idev->cnf.use_tempaddr <= 0) {
+ if (READ_ONCE(idev->cnf.use_tempaddr) <= 0) {
write_unlock_bh(&idev->lock);
pr_info("%s: use_tempaddr is disabled\n", __func__);
in6_dev_put(idev);
@@ -1371,8 +1376,8 @@ retry:
goto out;
}
spin_lock_bh(&ifp->lock);
- if (ifp->regen_count++ >= idev->cnf.regen_max_retry) {
- idev->cnf.use_tempaddr = -1; /*XXX*/
+ if (ifp->regen_count++ >= READ_ONCE(idev->cnf.regen_max_retry)) {
+ WRITE_ONCE(idev->cnf.use_tempaddr, -1); /*XXX*/
spin_unlock_bh(&ifp->lock);
write_unlock_bh(&idev->lock);
pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
@@ -1387,16 +1392,14 @@ retry:
age = (now - ifp->tstamp) / HZ;
- regen_advance = idev->cnf.regen_max_retry *
- idev->cnf.dad_transmits *
- max(NEIGH_VAR(idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
+ regen_advance = ipv6_get_regen_advance(idev);
/* recalculate max_desync_factor each time and update
* idev->desync_factor if it's larger
*/
cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
max_desync_factor = min_t(long,
- idev->cnf.max_desync_factor,
+ READ_ONCE(idev->cnf.max_desync_factor),
cnf_temp_preferred_lft - regen_advance);
if (unlikely(idev->desync_factor > max_desync_factor)) {
@@ -1409,11 +1412,13 @@ retry:
}
}
+ if_public_preferred_lft = ifp->prefered_lft;
+
memset(&cfg, 0, sizeof(cfg));
cfg.valid_lft = min_t(__u32, ifp->valid_lft,
- idev->cnf.temp_valid_lft + age);
+ READ_ONCE(idev->cnf.temp_valid_lft) + age);
cfg.preferred_lft = cnf_temp_preferred_lft + age - idev->desync_factor;
- cfg.preferred_lft = min_t(__u32, ifp->prefered_lft, cfg.preferred_lft);
+ cfg.preferred_lft = min_t(__u32, if_public_preferred_lft, cfg.preferred_lft);
cfg.preferred_lft = min_t(__u32, cfg.valid_lft, cfg.preferred_lft);
cfg.plen = ifp->prefix_len;
@@ -1422,19 +1427,41 @@ retry:
write_unlock_bh(&idev->lock);
- /* A temporary address is created only if this calculated Preferred
- * Lifetime is greater than REGEN_ADVANCE time units. In particular,
- * an implementation must not create a temporary address with a zero
- * Preferred Lifetime.
+ /* From RFC 4941:
+ *
+ * A temporary address is created only if this calculated Preferred
+ * Lifetime is greater than REGEN_ADVANCE time units. In
+ * particular, an implementation must not create a temporary address
+ * with a zero Preferred Lifetime.
+ *
+ * ...
+ *
+ * When creating a temporary address, the lifetime values MUST be
+ * derived from the corresponding prefix as follows:
+ *
+ * ...
+ *
+ * * Its Preferred Lifetime is the lower of the Preferred Lifetime
+ * of the public address or TEMP_PREFERRED_LIFETIME -
+ * DESYNC_FACTOR.
+ *
+ * To comply with the RFC's requirements, clamp the preferred lifetime
+ * to a minimum of regen_advance, unless that would exceed valid_lft or
+ * ifp->prefered_lft.
+ *
* Use age calculation as in addrconf_verify to avoid unnecessary
* temporary addresses being generated.
*/
age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
if (cfg.preferred_lft <= regen_advance + age) {
- in6_ifa_put(ifp);
- in6_dev_put(idev);
- ret = -1;
- goto out;
+ cfg.preferred_lft = regen_advance + age + 1;
+ if (cfg.preferred_lft > cfg.valid_lft ||
+ cfg.preferred_lft > if_public_preferred_lft) {
+ in6_ifa_put(ifp);
+ in6_dev_put(idev);
+ ret = -1;
+ goto out;
+ }
}
cfg.ifa_flags = IFA_F_TEMPORARY;
@@ -1513,15 +1540,17 @@ static inline int ipv6_saddr_preferred(int type)
return 0;
}
-static bool ipv6_use_optimistic_addr(struct net *net,
- struct inet6_dev *idev)
+static bool ipv6_use_optimistic_addr(const struct net *net,
+ const struct inet6_dev *idev)
{
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
if (!idev)
return false;
- if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
+ if (!READ_ONCE(net->ipv6.devconf_all->optimistic_dad) &&
+ !READ_ONCE(idev->cnf.optimistic_dad))
return false;
- if (!net->ipv6.devconf_all->use_optimistic && !idev->cnf.use_optimistic)
+ if (!READ_ONCE(net->ipv6.devconf_all->use_optimistic) &&
+ !READ_ONCE(idev->cnf.use_optimistic))
return false;
return true;
@@ -1530,13 +1559,14 @@ static bool ipv6_use_optimistic_addr(struct net *net,
#endif
}
-static bool ipv6_allow_optimistic_dad(struct net *net,
- struct inet6_dev *idev)
+static bool ipv6_allow_optimistic_dad(const struct net *net,
+ const struct inet6_dev *idev)
{
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
if (!idev)
return false;
- if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
+ if (!READ_ONCE(net->ipv6.devconf_all->optimistic_dad) &&
+ !READ_ONCE(idev->cnf.optimistic_dad))
return false;
return true;
@@ -1642,7 +1672,7 @@ static int ipv6_get_saddr_eval(struct net *net,
*/
int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?
!!(dst->prefs & IPV6_PREFER_SRC_TMP) :
- score->ifa->idev->cnf.use_tempaddr >= 2;
+ READ_ONCE(score->ifa->idev->cnf.use_tempaddr) >= 2;
ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp;
break;
}
@@ -1818,7 +1848,7 @@ int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
idev = __in6_dev_get(dst_dev);
if ((dst_type & IPV6_ADDR_MULTICAST) ||
dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
- (idev && idev->cnf.use_oif_addrs_only)) {
+ (idev && READ_ONCE(idev->cnf.use_oif_addrs_only))) {
use_oif_addr = true;
}
}
@@ -2125,6 +2155,7 @@ void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp)
{
struct inet6_dev *idev = ifp->idev;
struct net *net = dev_net(idev->dev);
+ int max_addresses;
if (addrconf_dad_end(ifp)) {
in6_ifa_put(ifp);
@@ -2162,9 +2193,9 @@ void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp)
spin_unlock_bh(&ifp->lock);
- if (idev->cnf.max_addresses &&
- ipv6_count_addresses(idev) >=
- idev->cnf.max_addresses)
+ max_addresses = READ_ONCE(idev->cnf.max_addresses);
+ if (max_addresses &&
+ ipv6_count_addresses(idev) >= max_addresses)
goto lock_errdad;
net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n",
@@ -2561,11 +2592,11 @@ static void manage_tempaddrs(struct inet6_dev *idev,
* (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively.
*/
age = (now - ift->cstamp) / HZ;
- max_valid = idev->cnf.temp_valid_lft - age;
+ max_valid = READ_ONCE(idev->cnf.temp_valid_lft) - age;
if (max_valid < 0)
max_valid = 0;
- max_prefered = idev->cnf.temp_prefered_lft -
+ max_prefered = READ_ONCE(idev->cnf.temp_prefered_lft) -
idev->desync_factor - age;
if (max_prefered < 0)
max_prefered = 0;
@@ -2598,7 +2629,7 @@ static void manage_tempaddrs(struct inet6_dev *idev,
if (list_empty(&idev->tempaddr_list) && (valid_lft || prefered_lft))
create = true;
- if (create && idev->cnf.use_tempaddr > 0) {
+ if (create && READ_ONCE(idev->cnf.use_tempaddr) > 0) {
/* When a new public address is created as described
* in [ADDRCONF], also create a new temporary address.
*/
@@ -2626,7 +2657,7 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
int create = 0, update_lft = 0;
if (!ifp && valid_lft) {
- int max_addresses = in6_dev->cnf.max_addresses;
+ int max_addresses = READ_ONCE(in6_dev->cnf.max_addresses);
struct ifa6_config cfg = {
.pfx = addr,
.plen = pinfo->prefix_len,
@@ -2638,8 +2669,8 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
};
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
- if ((net->ipv6.devconf_all->optimistic_dad ||
- in6_dev->cnf.optimistic_dad) &&
+ if ((READ_ONCE(net->ipv6.devconf_all->optimistic_dad) ||
+ READ_ONCE(in6_dev->cnf.optimistic_dad)) &&
!net->ipv6.devconf_all->forwarding && sllao)
cfg.ifa_flags |= IFA_F_OPTIMISTIC;
#endif
@@ -2688,7 +2719,7 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
*/
update_lft = !create && stored_lft;
- if (update_lft && !in6_dev->cnf.ra_honor_pio_life) {
+ if (update_lft && !READ_ONCE(in6_dev->cnf.ra_honor_pio_life)) {
const u32 minimum_lft = min_t(u32,
stored_lft, MIN_VALID_LIFETIME);
valid_lft = max(valid_lft, minimum_lft);
@@ -2697,7 +2728,7 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
if (update_lft) {
ifp->valid_lft = valid_lft;
ifp->prefered_lft = prefered_lft;
- ifp->tstamp = now;
+ WRITE_ONCE(ifp->tstamp, now);
flags = ifp->flags;
ifp->flags &= ~IFA_F_DEPRECATED;
spin_unlock_bh(&ifp->lock);
@@ -2721,6 +2752,7 @@ EXPORT_SYMBOL_GPL(addrconf_prefix_rcv_add_addr);
void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
{
struct prefix_info *pinfo;
+ struct fib6_table *table;
__u32 valid_lft;
__u32 prefered_lft;
int addr_type, err;
@@ -2797,11 +2829,20 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
if (valid_lft == 0) {
ip6_del_rt(net, rt, false);
rt = NULL;
- } else if (addrconf_finite_timeout(rt_expires)) {
- /* not infinity */
- fib6_set_expires(rt, jiffies + rt_expires);
} else {
- fib6_clean_expires(rt);
+ table = rt->fib6_table;
+ spin_lock_bh(&table->tb6_lock);
+
+ if (addrconf_finite_timeout(rt_expires)) {
+ /* not infinity */
+ fib6_set_expires(rt, jiffies + rt_expires);
+ fib6_add_gc_list(rt);
+ } else {
+ fib6_clean_expires(rt);
+ fib6_remove_gc_list(rt);
+ }
+
+ spin_unlock_bh(&table->tb6_lock);
}
} else if (valid_lft) {
clock_t expires = 0;
@@ -3262,8 +3303,8 @@ void addrconf_add_linklocal(struct inet6_dev *idev,
struct inet6_ifaddr *ifp;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
- if ((dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad ||
- idev->cnf.optimistic_dad) &&
+ if ((READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad) ||
+ READ_ONCE(idev->cnf.optimistic_dad)) &&
!dev_net(idev->dev)->ipv6.devconf_all->forwarding)
cfg.ifa_flags |= IFA_F_OPTIMISTIC;
#endif
@@ -3442,7 +3483,8 @@ static void addrconf_dev_config(struct net_device *dev)
/* this device type has no EUI support */
if (dev->type == ARPHRD_NONE &&
idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
- idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
+ WRITE_ONCE(idev->cnf.addr_gen_mode,
+ IN6_ADDR_GEN_MODE_RANDOM);
addrconf_addr_gen(idev, false);
}
@@ -3620,7 +3662,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
if (idev) {
rt6_mtu_change(dev, dev->mtu);
- idev->cnf.mtu6 = dev->mtu;
+ WRITE_ONCE(idev->cnf.mtu6, dev->mtu);
break;
}
@@ -3712,9 +3754,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
if (idev->cnf.mtu6 != dev->mtu &&
dev->mtu >= IPV6_MIN_MTU) {
rt6_mtu_change(dev, dev->mtu);
- idev->cnf.mtu6 = dev->mtu;
+ WRITE_ONCE(idev->cnf.mtu6, dev->mtu);
}
- idev->tstamp = jiffies;
+ WRITE_ONCE(idev->tstamp, jiffies);
inet6_ifinfo_notify(RTM_NEWLINK, idev);
/*
@@ -3834,10 +3876,10 @@ static int addrconf_ifdown(struct net_device *dev, bool unregister)
*/
if (!unregister && !idev->cnf.disable_ipv6) {
/* aggregate the system setting and interface setting */
- int _keep_addr = net->ipv6.devconf_all->keep_addr_on_down;
+ int _keep_addr = READ_ONCE(net->ipv6.devconf_all->keep_addr_on_down);
if (!_keep_addr)
- _keep_addr = idev->cnf.keep_addr_on_down;
+ _keep_addr = READ_ONCE(idev->cnf.keep_addr_on_down);
keep_addr = (_keep_addr > 0);
}
@@ -3956,7 +3998,7 @@ restart:
ipv6_mc_down(idev);
}
- idev->tstamp = jiffies;
+ WRITE_ONCE(idev->tstamp, jiffies);
idev->ra_mtu = 0;
/* Last: Shot the device (if unregistered) */
@@ -3974,6 +4016,7 @@ static void addrconf_rs_timer(struct timer_list *t)
struct inet6_dev *idev = from_timer(idev, t, rs_timer);
struct net_device *dev = idev->dev;
struct in6_addr lladdr;
+ int rtr_solicits;
write_lock(&idev->lock);
if (idev->dead || !(idev->if_flags & IF_READY))
@@ -3986,7 +4029,9 @@ static void addrconf_rs_timer(struct timer_list *t)
if (idev->if_flags & IF_RA_RCVD)
goto out;
- if (idev->rs_probes++ < idev->cnf.rtr_solicits || idev->cnf.rtr_solicits < 0) {
+ rtr_solicits = READ_ONCE(idev->cnf.rtr_solicits);
+
+ if (idev->rs_probes++ < rtr_solicits || rtr_solicits < 0) {
write_unlock(&idev->lock);
if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
ndisc_send_rs(dev, &lladdr,
@@ -3996,11 +4041,12 @@ static void addrconf_rs_timer(struct timer_list *t)
write_lock(&idev->lock);
idev->rs_interval = rfc3315_s14_backoff_update(
- idev->rs_interval, idev->cnf.rtr_solicit_max_interval);
+ idev->rs_interval,
+ READ_ONCE(idev->cnf.rtr_solicit_max_interval));
/* The wait after the last probe can be shorter */
addrconf_mod_rs_timer(idev, (idev->rs_probes ==
- idev->cnf.rtr_solicits) ?
- idev->cnf.rtr_solicit_delay :
+ READ_ONCE(idev->cnf.rtr_solicits)) ?
+ READ_ONCE(idev->cnf.rtr_solicit_delay) :
idev->rs_interval);
} else {
/*
@@ -4021,24 +4067,25 @@ put:
*/
static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
{
- unsigned long rand_num;
struct inet6_dev *idev = ifp->idev;
+ unsigned long rand_num;
u64 nonce;
if (ifp->flags & IFA_F_OPTIMISTIC)
rand_num = 0;
else
- rand_num = get_random_u32_below(idev->cnf.rtr_solicit_delay ? : 1);
+ rand_num = get_random_u32_below(
+ READ_ONCE(idev->cnf.rtr_solicit_delay) ? : 1);
nonce = 0;
- if (idev->cnf.enhanced_dad ||
- dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad) {
+ if (READ_ONCE(idev->cnf.enhanced_dad) ||
+ READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad)) {
do
get_random_bytes(&nonce, 6);
while (nonce == 0);
}
ifp->dad_nonce = nonce;
- ifp->dad_probes = idev->cnf.dad_transmits;
+ ifp->dad_probes = READ_ONCE(idev->cnf.dad_transmits);
addrconf_mod_dad_work(ifp, rand_num);
}
@@ -4058,8 +4105,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
net = dev_net(dev);
if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
- (net->ipv6.devconf_all->accept_dad < 1 &&
- idev->cnf.accept_dad < 1) ||
+ (READ_ONCE(net->ipv6.devconf_all->accept_dad) < 1 &&
+ READ_ONCE(idev->cnf.accept_dad) < 1) ||
!(ifp->flags&IFA_F_TENTATIVE) ||
ifp->flags & IFA_F_NODAD) {
bool send_na = false;
@@ -4151,8 +4198,8 @@ static void addrconf_dad_work(struct work_struct *w)
action = DAD_ABORT;
ifp->state = INET6_IFADDR_STATE_POSTDAD;
- if ((dev_net(idev->dev)->ipv6.devconf_all->accept_dad > 1 ||
- idev->cnf.accept_dad > 1) &&
+ if ((READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->accept_dad) > 1 ||
+ READ_ONCE(idev->cnf.accept_dad) > 1) &&
!idev->cnf.disable_ipv6 &&
!(ifp->flags & IFA_F_STABLE_PRIVACY)) {
struct in6_addr addr;
@@ -4163,7 +4210,7 @@ static void addrconf_dad_work(struct work_struct *w)
if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
ipv6_addr_equal(&ifp->addr, &addr)) {
/* DAD failed for link-local based on MAC */
- idev->cnf.disable_ipv6 = 1;
+ WRITE_ONCE(idev->cnf.disable_ipv6, 1);
pr_info("%s: IPv6 being disabled!\n",
ifp->idev->dev->name);
@@ -4277,7 +4324,7 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
send_rs = send_mld &&
ipv6_accept_ra(ifp->idev) &&
- ifp->idev->cnf.rtr_solicits != 0 &&
+ READ_ONCE(ifp->idev->cnf.rtr_solicits) != 0 &&
(dev->flags & IFF_LOOPBACK) == 0 &&
(dev->type != ARPHRD_TUNNEL) &&
!netif_is_team_port(dev);
@@ -4291,8 +4338,8 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
/* send unsolicited NA if enabled */
if (send_na &&
- (ifp->idev->cnf.ndisc_notify ||
- dev_net(dev)->ipv6.devconf_all->ndisc_notify)) {
+ (READ_ONCE(ifp->idev->cnf.ndisc_notify) ||
+ READ_ONCE(dev_net(dev)->ipv6.devconf_all->ndisc_notify))) {
ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifp->addr,
/*router=*/ !!ifp->idev->cnf.forwarding,
/*solicited=*/ false, /*override=*/ true,
@@ -4312,7 +4359,7 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
write_lock_bh(&ifp->idev->lock);
spin_lock(&ifp->lock);
ifp->idev->rs_interval = rfc3315_s14_backoff_init(
- ifp->idev->cnf.rtr_solicit_interval);
+ READ_ONCE(ifp->idev->cnf.rtr_solicit_interval));
ifp->idev->rs_probes = 1;
ifp->idev->if_flags |= IF_RS_SENT;
addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval);
@@ -4592,9 +4639,7 @@ restart:
!ifp->regen_count && ifp->ifpub) {
/* This is a non-regenerated temporary addr. */
- unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
- ifp->idev->cnf.dad_transmits *
- max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
+ unsigned long regen_advance = ipv6_get_regen_advance(ifp->idev);
if (age + regen_advance >= ifp->prefered_lft) {
struct inet6_ifaddr *ifpub = ifp->ifpub;
@@ -4756,6 +4801,7 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
unsigned long expires, u32 flags,
bool modify_peer)
{
+ struct fib6_table *table;
struct fib6_info *f6i;
u32 prio;
@@ -4776,10 +4822,18 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
ifp->rt_priority, ifp->idev->dev,
expires, flags, GFP_KERNEL);
} else {
- if (!expires)
+ table = f6i->fib6_table;
+ spin_lock_bh(&table->tb6_lock);
+
+ if (!(flags & RTF_EXPIRES)) {
fib6_clean_expires(f6i);
- else
+ fib6_remove_gc_list(f6i);
+ } else {
fib6_set_expires(f6i, expires);
+ fib6_add_gc_list(f6i);
+ }
+
+ spin_unlock_bh(&table->tb6_lock);
fib6_info_release(f6i);
}
@@ -4842,13 +4896,13 @@ static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp,
IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
IFA_F_NOPREFIXROUTE);
ifp->flags |= cfg->ifa_flags;
- ifp->tstamp = jiffies;
- ifp->valid_lft = cfg->valid_lft;
- ifp->prefered_lft = cfg->preferred_lft;
- ifp->ifa_proto = cfg->ifa_proto;
+ WRITE_ONCE(ifp->tstamp, jiffies);
+ WRITE_ONCE(ifp->valid_lft, cfg->valid_lft);
+ WRITE_ONCE(ifp->prefered_lft, cfg->preferred_lft);
+ WRITE_ONCE(ifp->ifa_proto, cfg->ifa_proto);
if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority)
- ifp->rt_priority = cfg->rt_priority;
+ WRITE_ONCE(ifp->rt_priority, cfg->rt_priority);
if (new_peer)
ifp->peer_addr = *cfg->peer_pfx;
@@ -5069,17 +5123,21 @@ struct inet6_fill_args {
enum addr_type_t type;
};
-static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
+static int inet6_fill_ifaddr(struct sk_buff *skb,
+ const struct inet6_ifaddr *ifa,
struct inet6_fill_args *args)
{
- struct nlmsghdr *nlh;
+ struct nlmsghdr *nlh;
u32 preferred, valid;
+ u32 flags, priority;
+ u8 proto;
nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
sizeof(struct ifaddrmsg), args->flags);
if (!nlh)
return -EMSGSIZE;
+ flags = READ_ONCE(ifa->flags);
put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
ifa->idev->dev->ifindex);
@@ -5087,13 +5145,14 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
goto error;
- spin_lock_bh(&ifa->lock);
- if (!((ifa->flags&IFA_F_PERMANENT) &&
- (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
- preferred = ifa->prefered_lft;
- valid = ifa->valid_lft;
+ preferred = READ_ONCE(ifa->prefered_lft);
+ valid = READ_ONCE(ifa->valid_lft);
+
+ if (!((flags & IFA_F_PERMANENT) &&
+ (preferred == INFINITY_LIFE_TIME))) {
if (preferred != INFINITY_LIFE_TIME) {
- long tval = (jiffies - ifa->tstamp)/HZ;
+ long tval = (jiffies - READ_ONCE(ifa->tstamp)) / HZ;
+
if (preferred > tval)
preferred -= tval;
else
@@ -5109,28 +5168,29 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
preferred = INFINITY_LIFE_TIME;
valid = INFINITY_LIFE_TIME;
}
- spin_unlock_bh(&ifa->lock);
if (!ipv6_addr_any(&ifa->peer_addr)) {
if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->peer_addr) < 0)
goto error;
- } else
+ } else {
if (nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->addr) < 0)
goto error;
+ }
- if (ifa->rt_priority &&
- nla_put_u32(skb, IFA_RT_PRIORITY, ifa->rt_priority))
+ priority = READ_ONCE(ifa->rt_priority);
+ if (priority && nla_put_u32(skb, IFA_RT_PRIORITY, priority))
goto error;
- if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
+ if (put_cacheinfo(skb, ifa->cstamp, READ_ONCE(ifa->tstamp),
+ preferred, valid) < 0)
goto error;
- if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
+ if (nla_put_u32(skb, IFA_FLAGS, flags) < 0)
goto error;
- if (ifa->ifa_proto &&
- nla_put_u8(skb, IFA_PROTO, ifa->ifa_proto))
+ proto = READ_ONCE(ifa->ifa_proto);
+ if (proto && nla_put_u8(skb, IFA_PROTO, proto))
goto error;
nlmsg_end(skb, nlh);
@@ -5141,12 +5201,13 @@ error:
return -EMSGSIZE;
}
-static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
+static int inet6_fill_ifmcaddr(struct sk_buff *skb,
+ const struct ifmcaddr6 *ifmca,
struct inet6_fill_args *args)
{
- struct nlmsghdr *nlh;
- u8 scope = RT_SCOPE_UNIVERSE;
int ifindex = ifmca->idev->dev->ifindex;
+ u8 scope = RT_SCOPE_UNIVERSE;
+ struct nlmsghdr *nlh;
if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
scope = RT_SCOPE_SITE;
@@ -5164,7 +5225,7 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
- put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
+ put_cacheinfo(skb, ifmca->mca_cstamp, READ_ONCE(ifmca->mca_tstamp),
INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
@@ -5174,13 +5235,14 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
return 0;
}
-static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
+static int inet6_fill_ifacaddr(struct sk_buff *skb,
+ const struct ifacaddr6 *ifaca,
struct inet6_fill_args *args)
{
struct net_device *dev = fib6_info_nh_dev(ifaca->aca_rt);
int ifindex = dev ? dev->ifindex : 1;
- struct nlmsghdr *nlh;
u8 scope = RT_SCOPE_UNIVERSE;
+ struct nlmsghdr *nlh;
if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
scope = RT_SCOPE_SITE;
@@ -5198,7 +5260,7 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
- put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
+ put_cacheinfo(skb, ifaca->aca_cstamp, READ_ONCE(ifaca->aca_tstamp),
INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
@@ -5209,24 +5271,23 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
}
/* called with rcu_read_lock() */
-static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
- struct netlink_callback *cb, int s_ip_idx,
+static int in6_dump_addrs(const struct inet6_dev *idev, struct sk_buff *skb,
+ struct netlink_callback *cb, int *s_ip_idx,
struct inet6_fill_args *fillargs)
{
- struct ifmcaddr6 *ifmca;
- struct ifacaddr6 *ifaca;
+ const struct ifmcaddr6 *ifmca;
+ const struct ifacaddr6 *ifaca;
int ip_idx = 0;
- int err = 1;
+ int err = 0;
- read_lock_bh(&idev->lock);
switch (fillargs->type) {
case UNICAST_ADDR: {
- struct inet6_ifaddr *ifa;
+ const struct inet6_ifaddr *ifa;
fillargs->event = RTM_NEWADDR;
/* unicast address incl. temp addr */
- list_for_each_entry(ifa, &idev->addr_list, if_list) {
- if (ip_idx < s_ip_idx)
+ list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
+ if (ip_idx < *s_ip_idx)
goto next;
err = inet6_fill_ifaddr(skb, ifa, fillargs);
if (err < 0)
@@ -5238,27 +5299,25 @@ next:
break;
}
case MULTICAST_ADDR:
- read_unlock_bh(&idev->lock);
fillargs->event = RTM_GETMULTICAST;
/* multicast address */
- for (ifmca = rtnl_dereference(idev->mc_list);
+ for (ifmca = rcu_dereference(idev->mc_list);
ifmca;
- ifmca = rtnl_dereference(ifmca->next), ip_idx++) {
- if (ip_idx < s_ip_idx)
+ ifmca = rcu_dereference(ifmca->next), ip_idx++) {
+ if (ip_idx < *s_ip_idx)
continue;
err = inet6_fill_ifmcaddr(skb, ifmca, fillargs);
if (err < 0)
break;
}
- read_lock_bh(&idev->lock);
break;
case ANYCAST_ADDR:
fillargs->event = RTM_GETANYCAST;
/* anycast address */
- for (ifaca = idev->ac_list; ifaca;
- ifaca = ifaca->aca_next, ip_idx++) {
- if (ip_idx < s_ip_idx)
+ for (ifaca = rcu_dereference(idev->ac_list); ifaca;
+ ifaca = rcu_dereference(ifaca->aca_next), ip_idx++) {
+ if (ip_idx < *s_ip_idx)
continue;
err = inet6_fill_ifacaddr(skb, ifaca, fillargs);
if (err < 0)
@@ -5268,8 +5327,7 @@ next:
default:
break;
}
- read_unlock_bh(&idev->lock);
- cb->args[2] = ip_idx;
+ *s_ip_idx = err ? ip_idx : 0;
return err;
}
@@ -5332,6 +5390,7 @@ static int inet6_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
enum addr_type_t type)
{
+ struct net *tgt_net = sock_net(skb->sk);
const struct nlmsghdr *nlh = cb->nlh;
struct inet6_fill_args fillargs = {
.portid = NETLINK_CB(cb->skb).portid,
@@ -5340,72 +5399,52 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
.netnsid = -1,
.type = type,
};
- struct net *tgt_net = sock_net(skb->sk);
- int idx, s_idx, s_ip_idx;
- int h, s_h;
+ struct {
+ unsigned long ifindex;
+ int ip_idx;
+ } *ctx = (void *)cb->ctx;
struct net_device *dev;
struct inet6_dev *idev;
- struct hlist_head *head;
int err = 0;
- s_h = cb->args[0];
- s_idx = idx = cb->args[1];
- s_ip_idx = cb->args[2];
-
+ rcu_read_lock();
if (cb->strict_check) {
err = inet6_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
skb->sk, cb);
if (err < 0)
- goto put_tgt_net;
+ goto done;
err = 0;
if (fillargs.ifindex) {
- dev = __dev_get_by_index(tgt_net, fillargs.ifindex);
- if (!dev) {
- err = -ENODEV;
- goto put_tgt_net;
- }
+ err = -ENODEV;
+ dev = dev_get_by_index_rcu(tgt_net, fillargs.ifindex);
+ if (!dev)
+ goto done;
idev = __in6_dev_get(dev);
- if (idev) {
- err = in6_dump_addrs(idev, skb, cb, s_ip_idx,
+ if (idev)
+ err = in6_dump_addrs(idev, skb, cb,
+ &ctx->ip_idx,
&fillargs);
- if (err > 0)
- err = 0;
- }
- goto put_tgt_net;
+ goto done;
}
}
- rcu_read_lock();
cb->seq = inet6_base_seq(tgt_net);
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &tgt_net->dev_index_head[h];
- hlist_for_each_entry_rcu(dev, head, index_hlist) {
- if (idx < s_idx)
- goto cont;
- if (h > s_h || idx > s_idx)
- s_ip_idx = 0;
- idev = __in6_dev_get(dev);
- if (!idev)
- goto cont;
-
- if (in6_dump_addrs(idev, skb, cb, s_ip_idx,
- &fillargs) < 0)
- goto done;
-cont:
- idx++;
- }
+ for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
+ idev = __in6_dev_get(dev);
+ if (!idev)
+ continue;
+ err = in6_dump_addrs(idev, skb, cb, &ctx->ip_idx,
+ &fillargs);
+ if (err < 0)
+ goto done;
}
done:
rcu_read_unlock();
- cb->args[0] = h;
- cb->args[1] = idx;
-put_tgt_net:
if (fillargs.netnsid >= 0)
put_net(tgt_net);
- return skb->len ? : err;
+ return err;
}
static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
@@ -5578,87 +5617,97 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
}
-static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
- __s32 *array, int bytes)
+static void ipv6_store_devconf(const struct ipv6_devconf *cnf,
+ __s32 *array, int bytes)
{
BUG_ON(bytes < (DEVCONF_MAX * 4));
memset(array, 0, bytes);
- array[DEVCONF_FORWARDING] = cnf->forwarding;
- array[DEVCONF_HOPLIMIT] = cnf->hop_limit;
- array[DEVCONF_MTU6] = cnf->mtu6;
- array[DEVCONF_ACCEPT_RA] = cnf->accept_ra;
- array[DEVCONF_ACCEPT_REDIRECTS] = cnf->accept_redirects;
- array[DEVCONF_AUTOCONF] = cnf->autoconf;
- array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits;
- array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
+ array[DEVCONF_FORWARDING] = READ_ONCE(cnf->forwarding);
+ array[DEVCONF_HOPLIMIT] = READ_ONCE(cnf->hop_limit);
+ array[DEVCONF_MTU6] = READ_ONCE(cnf->mtu6);
+ array[DEVCONF_ACCEPT_RA] = READ_ONCE(cnf->accept_ra);
+ array[DEVCONF_ACCEPT_REDIRECTS] = READ_ONCE(cnf->accept_redirects);
+ array[DEVCONF_AUTOCONF] = READ_ONCE(cnf->autoconf);
+ array[DEVCONF_DAD_TRANSMITS] = READ_ONCE(cnf->dad_transmits);
+ array[DEVCONF_RTR_SOLICITS] = READ_ONCE(cnf->rtr_solicits);
array[DEVCONF_RTR_SOLICIT_INTERVAL] =
- jiffies_to_msecs(cnf->rtr_solicit_interval);
+ jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_interval));
array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] =
- jiffies_to_msecs(cnf->rtr_solicit_max_interval);
+ jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_max_interval));
array[DEVCONF_RTR_SOLICIT_DELAY] =
- jiffies_to_msecs(cnf->rtr_solicit_delay);
- array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
+ jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_delay));
+ array[DEVCONF_FORCE_MLD_VERSION] = READ_ONCE(cnf->force_mld_version);
array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
- jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval);
+ jiffies_to_msecs(READ_ONCE(cnf->mldv1_unsolicited_report_interval));
array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
- jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval);
- array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
- array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
- array[DEVCONF_TEMP_PREFERED_LFT] = cnf->temp_prefered_lft;
- array[DEVCONF_REGEN_MAX_RETRY] = cnf->regen_max_retry;
- array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
- array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
- array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
- array[DEVCONF_RA_DEFRTR_METRIC] = cnf->ra_defrtr_metric;
- array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
- array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
+ jiffies_to_msecs(READ_ONCE(cnf->mldv2_unsolicited_report_interval));
+ array[DEVCONF_USE_TEMPADDR] = READ_ONCE(cnf->use_tempaddr);
+ array[DEVCONF_TEMP_VALID_LFT] = READ_ONCE(cnf->temp_valid_lft);
+ array[DEVCONF_TEMP_PREFERED_LFT] = READ_ONCE(cnf->temp_prefered_lft);
+ array[DEVCONF_REGEN_MAX_RETRY] = READ_ONCE(cnf->regen_max_retry);
+ array[DEVCONF_MAX_DESYNC_FACTOR] = READ_ONCE(cnf->max_desync_factor);
+ array[DEVCONF_MAX_ADDRESSES] = READ_ONCE(cnf->max_addresses);
+ array[DEVCONF_ACCEPT_RA_DEFRTR] = READ_ONCE(cnf->accept_ra_defrtr);
+ array[DEVCONF_RA_DEFRTR_METRIC] = READ_ONCE(cnf->ra_defrtr_metric);
+ array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] =
+ READ_ONCE(cnf->accept_ra_min_hop_limit);
+ array[DEVCONF_ACCEPT_RA_PINFO] = READ_ONCE(cnf->accept_ra_pinfo);
#ifdef CONFIG_IPV6_ROUTER_PREF
- array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
+ array[DEVCONF_ACCEPT_RA_RTR_PREF] = READ_ONCE(cnf->accept_ra_rtr_pref);
array[DEVCONF_RTR_PROBE_INTERVAL] =
- jiffies_to_msecs(cnf->rtr_probe_interval);
+ jiffies_to_msecs(READ_ONCE(cnf->rtr_probe_interval));
#ifdef CONFIG_IPV6_ROUTE_INFO
- array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = cnf->accept_ra_rt_info_min_plen;
- array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
+ array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] =
+ READ_ONCE(cnf->accept_ra_rt_info_min_plen);
+ array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] =
+ READ_ONCE(cnf->accept_ra_rt_info_max_plen);
#endif
#endif
- array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
- array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
+ array[DEVCONF_PROXY_NDP] = READ_ONCE(cnf->proxy_ndp);
+ array[DEVCONF_ACCEPT_SOURCE_ROUTE] =
+ READ_ONCE(cnf->accept_source_route);
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
- array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
- array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
+ array[DEVCONF_OPTIMISTIC_DAD] = READ_ONCE(cnf->optimistic_dad);
+ array[DEVCONF_USE_OPTIMISTIC] = READ_ONCE(cnf->use_optimistic);
#endif
#ifdef CONFIG_IPV6_MROUTE
array[DEVCONF_MC_FORWARDING] = atomic_read(&cnf->mc_forwarding);
#endif
- array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
- array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
- array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
- array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
- array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
- array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
- array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
- array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] = cnf->ignore_routes_with_linkdown;
+ array[DEVCONF_DISABLE_IPV6] = READ_ONCE(cnf->disable_ipv6);
+ array[DEVCONF_ACCEPT_DAD] = READ_ONCE(cnf->accept_dad);
+ array[DEVCONF_FORCE_TLLAO] = READ_ONCE(cnf->force_tllao);
+ array[DEVCONF_NDISC_NOTIFY] = READ_ONCE(cnf->ndisc_notify);
+ array[DEVCONF_SUPPRESS_FRAG_NDISC] =
+ READ_ONCE(cnf->suppress_frag_ndisc);
+ array[DEVCONF_ACCEPT_RA_FROM_LOCAL] =
+ READ_ONCE(cnf->accept_ra_from_local);
+ array[DEVCONF_ACCEPT_RA_MTU] = READ_ONCE(cnf->accept_ra_mtu);
+ array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] =
+ READ_ONCE(cnf->ignore_routes_with_linkdown);
/* we omit DEVCONF_STABLE_SECRET for now */
- array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
- array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] = cnf->drop_unicast_in_l2_multicast;
- array[DEVCONF_DROP_UNSOLICITED_NA] = cnf->drop_unsolicited_na;
- array[DEVCONF_KEEP_ADDR_ON_DOWN] = cnf->keep_addr_on_down;
- array[DEVCONF_SEG6_ENABLED] = cnf->seg6_enabled;
+ array[DEVCONF_USE_OIF_ADDRS_ONLY] = READ_ONCE(cnf->use_oif_addrs_only);
+ array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] =
+ READ_ONCE(cnf->drop_unicast_in_l2_multicast);
+ array[DEVCONF_DROP_UNSOLICITED_NA] = READ_ONCE(cnf->drop_unsolicited_na);
+ array[DEVCONF_KEEP_ADDR_ON_DOWN] = READ_ONCE(cnf->keep_addr_on_down);
+ array[DEVCONF_SEG6_ENABLED] = READ_ONCE(cnf->seg6_enabled);
#ifdef CONFIG_IPV6_SEG6_HMAC
- array[DEVCONF_SEG6_REQUIRE_HMAC] = cnf->seg6_require_hmac;
+ array[DEVCONF_SEG6_REQUIRE_HMAC] = READ_ONCE(cnf->seg6_require_hmac);
#endif
- array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad;
- array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode;
- array[DEVCONF_DISABLE_POLICY] = cnf->disable_policy;
- array[DEVCONF_NDISC_TCLASS] = cnf->ndisc_tclass;
- array[DEVCONF_RPL_SEG_ENABLED] = cnf->rpl_seg_enabled;
- array[DEVCONF_IOAM6_ENABLED] = cnf->ioam6_enabled;
- array[DEVCONF_IOAM6_ID] = cnf->ioam6_id;
- array[DEVCONF_IOAM6_ID_WIDE] = cnf->ioam6_id_wide;
- array[DEVCONF_NDISC_EVICT_NOCARRIER] = cnf->ndisc_evict_nocarrier;
- array[DEVCONF_ACCEPT_UNTRACKED_NA] = cnf->accept_untracked_na;
- array[DEVCONF_ACCEPT_RA_MIN_LFT] = cnf->accept_ra_min_lft;
+ array[DEVCONF_ENHANCED_DAD] = READ_ONCE(cnf->enhanced_dad);
+ array[DEVCONF_ADDR_GEN_MODE] = READ_ONCE(cnf->addr_gen_mode);
+ array[DEVCONF_DISABLE_POLICY] = READ_ONCE(cnf->disable_policy);
+ array[DEVCONF_NDISC_TCLASS] = READ_ONCE(cnf->ndisc_tclass);
+ array[DEVCONF_RPL_SEG_ENABLED] = READ_ONCE(cnf->rpl_seg_enabled);
+ array[DEVCONF_IOAM6_ENABLED] = READ_ONCE(cnf->ioam6_enabled);
+ array[DEVCONF_IOAM6_ID] = READ_ONCE(cnf->ioam6_id);
+ array[DEVCONF_IOAM6_ID_WIDE] = READ_ONCE(cnf->ioam6_id_wide);
+ array[DEVCONF_NDISC_EVICT_NOCARRIER] =
+ READ_ONCE(cnf->ndisc_evict_nocarrier);
+ array[DEVCONF_ACCEPT_UNTRACKED_NA] =
+ READ_ONCE(cnf->accept_untracked_na);
+ array[DEVCONF_ACCEPT_RA_MIN_LFT] = READ_ONCE(cnf->accept_ra_min_lft);
}
static inline size_t inet6_ifla6_size(void)
@@ -5738,13 +5787,14 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
u32 ext_filter_mask)
{
- struct nlattr *nla;
struct ifla_cacheinfo ci;
+ struct nlattr *nla;
+ u32 ra_mtu;
- if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags))
+ if (nla_put_u32(skb, IFLA_INET6_FLAGS, READ_ONCE(idev->if_flags)))
goto nla_put_failure;
ci.max_reasm_len = IPV6_MAXPLEN;
- ci.tstamp = cstamp_delta(idev->tstamp);
+ ci.tstamp = cstamp_delta(READ_ONCE(idev->tstamp));
ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME));
if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
@@ -5776,11 +5826,12 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla));
read_unlock_bh(&idev->lock);
- if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode))
+ if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE,
+ READ_ONCE(idev->cnf.addr_gen_mode)))
goto nla_put_failure;
- if (idev->ra_mtu &&
- nla_put_u32(skb, IFLA_INET6_RA_MTU, idev->ra_mtu))
+ ra_mtu = READ_ONCE(idev->ra_mtu);
+ if (ra_mtu && nla_put_u32(skb, IFLA_INET6_RA_MTU, ra_mtu))
goto nla_put_failure;
return 0;
@@ -5842,7 +5893,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token,
return -EINVAL;
}
- if (idev->cnf.rtr_solicits == 0) {
+ if (READ_ONCE(idev->cnf.rtr_solicits) == 0) {
NL_SET_ERR_MSG(extack,
"Router solicitation is disabled on device");
return -EINVAL;
@@ -5875,7 +5926,7 @@ update_lft:
if (update_rs) {
idev->if_flags |= IF_RS_SENT;
idev->rs_interval = rfc3315_s14_backoff_init(
- idev->cnf.rtr_solicit_interval);
+ READ_ONCE(idev->cnf.rtr_solicit_interval));
idev->rs_probes = 1;
addrconf_mod_rs_timer(idev, idev->rs_interval);
}
@@ -5981,7 +6032,7 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla,
if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
- idev->cnf.addr_gen_mode = mode;
+ WRITE_ONCE(idev->cnf.addr_gen_mode, mode);
}
return 0;
@@ -5993,6 +6044,7 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
struct net_device *dev = idev->dev;
struct ifinfomsg *hdr;
struct nlmsghdr *nlh;
+ int ifindex, iflink;
void *protoinfo;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
@@ -6003,18 +6055,20 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
hdr->ifi_family = AF_INET6;
hdr->__ifi_pad = 0;
hdr->ifi_type = dev->type;
- hdr->ifi_index = dev->ifindex;
+ ifindex = READ_ONCE(dev->ifindex);
+ hdr->ifi_index = ifindex;
hdr->ifi_flags = dev_get_flags(dev);
hdr->ifi_change = 0;
+ iflink = dev_get_iflink(dev);
if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
(dev->addr_len &&
nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
- nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
- (dev->ifindex != dev_get_iflink(dev) &&
- nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
+ nla_put_u32(skb, IFLA_MTU, READ_ONCE(dev->mtu)) ||
+ (ifindex != iflink &&
+ nla_put_u32(skb, IFLA_LINK, iflink)) ||
nla_put_u8(skb, IFLA_OPERSTATE,
- netif_running(dev) ? dev->operstate : IF_OPER_DOWN))
+ netif_running(dev) ? READ_ONCE(dev->operstate) : IF_OPER_DOWN))
goto nla_put_failure;
protoinfo = nla_nest_start_noflag(skb, IFLA_PROTINFO);
if (!protoinfo)
@@ -6060,50 +6114,39 @@ static int inet6_valid_dump_ifinfo(const struct nlmsghdr *nlh,
static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
- int h, s_h;
- int idx = 0, s_idx;
+ struct {
+ unsigned long ifindex;
+ } *ctx = (void *)cb->ctx;
struct net_device *dev;
struct inet6_dev *idev;
- struct hlist_head *head;
+ int err;
/* only requests using strict checking can pass data to
* influence the dump
*/
if (cb->strict_check) {
- int err = inet6_valid_dump_ifinfo(cb->nlh, cb->extack);
+ err = inet6_valid_dump_ifinfo(cb->nlh, cb->extack);
if (err < 0)
return err;
}
- s_h = cb->args[0];
- s_idx = cb->args[1];
-
+ err = 0;
rcu_read_lock();
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &net->dev_index_head[h];
- hlist_for_each_entry_rcu(dev, head, index_hlist) {
- if (idx < s_idx)
- goto cont;
- idev = __in6_dev_get(dev);
- if (!idev)
- goto cont;
- if (inet6_fill_ifinfo(skb, idev,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWLINK, NLM_F_MULTI) < 0)
- goto out;
-cont:
- idx++;
- }
+ for_each_netdev_dump(net, dev, ctx->ifindex) {
+ idev = __in6_dev_get(dev);
+ if (!idev)
+ continue;
+ err = inet6_fill_ifinfo(skb, idev,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWLINK, NLM_F_MULTI);
+ if (err < 0)
+ break;
}
-out:
rcu_read_unlock();
- cb->args[1] = idx;
- cb->args[0] = h;
- return skb->len;
+ return err;
}
void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
@@ -6324,7 +6367,8 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
idev = __in6_dev_get(dev);
if (idev) {
int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
- idev->cnf.disable_ipv6 = newf;
+
+ WRITE_ONCE(idev->cnf.disable_ipv6, newf);
if (changed)
dev_disable_change(idev);
}
@@ -6333,23 +6377,22 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
{
- struct net *net;
+ struct net *net = (struct net *)table->extra2;
int old;
+ if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
+ WRITE_ONCE(*p, newf);
+ return 0;
+ }
+
if (!rtnl_trylock())
return restart_syscall();
- net = (struct net *)table->extra2;
old = *p;
- *p = newf;
-
- if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
- rtnl_unlock();
- return 0;
- }
+ WRITE_ONCE(*p, newf);
if (p == &net->ipv6.devconf_all->disable_ipv6) {
- net->ipv6.devconf_dflt->disable_ipv6 = newf;
+ WRITE_ONCE(net->ipv6.devconf_dflt->disable_ipv6, newf);
addrconf_disable_change(net, newf);
} else if ((!newf) ^ (!old))
dev_disable_change((struct inet6_dev *)table->extra1);
@@ -6460,24 +6503,25 @@ static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
}
if (idev->cnf.addr_gen_mode != new_val) {
- idev->cnf.addr_gen_mode = new_val;
+ WRITE_ONCE(idev->cnf.addr_gen_mode, new_val);
addrconf_init_auto_addrs(idev->dev);
}
} else if (&net->ipv6.devconf_all->addr_gen_mode == ctl->data) {
struct net_device *dev;
- net->ipv6.devconf_dflt->addr_gen_mode = new_val;
+ WRITE_ONCE(net->ipv6.devconf_dflt->addr_gen_mode, new_val);
for_each_netdev(net, dev) {
idev = __in6_dev_get(dev);
if (idev &&
idev->cnf.addr_gen_mode != new_val) {
- idev->cnf.addr_gen_mode = new_val;
+ WRITE_ONCE(idev->cnf.addr_gen_mode,
+ new_val);
addrconf_init_auto_addrs(idev->dev);
}
}
}
- *((u32 *)ctl->data) = new_val;
+ WRITE_ONCE(*((u32 *)ctl->data), new_val);
}
out:
@@ -6536,14 +6580,15 @@ static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
struct inet6_dev *idev = __in6_dev_get(dev);
if (idev) {
- idev->cnf.addr_gen_mode =
- IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
+ WRITE_ONCE(idev->cnf.addr_gen_mode,
+ IN6_ADDR_GEN_MODE_STABLE_PRIVACY);
}
}
} else {
struct inet6_dev *idev = ctl->extra1;
- idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
+ WRITE_ONCE(idev->cnf.addr_gen_mode,
+ IN6_ADDR_GEN_MODE_STABLE_PRIVACY);
}
out:
@@ -6623,20 +6668,19 @@ void addrconf_disable_policy_idev(struct inet6_dev *idev, int val)
static
int addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val)
{
+ struct net *net = (struct net *)ctl->extra2;
struct inet6_dev *idev;
- struct net *net;
- if (!rtnl_trylock())
- return restart_syscall();
-
- *valp = val;
-
- net = (struct net *)ctl->extra2;
if (valp == &net->ipv6.devconf_dflt->disable_policy) {
- rtnl_unlock();
+ WRITE_ONCE(*valp, val);
return 0;
}
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ WRITE_ONCE(*valp, val);
+
if (valp == &net->ipv6.devconf_all->disable_policy) {
struct net_device *dev;
@@ -6806,6 +6850,13 @@ static const struct ctl_table addrconf_sysctl[] = {
.proc_handler = proc_dointvec,
},
{
+ .procname = "regen_min_advance",
+ .data = &ipv6_devconf.regen_min_advance,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "regen_max_retry",
.data = &ipv6_devconf.regen_max_retry,
.maxlen = sizeof(int),
@@ -7365,7 +7416,8 @@ int __init addrconf_init(void)
if (err < 0)
goto out_addrlabel;
- addrconf_wq = create_workqueue("ipv6_addrconf");
+ /* All works using addrconf_wq need to lock rtnl. */
+ addrconf_wq = create_singlethread_workqueue("ipv6_addrconf");
if (!addrconf_wq) {
err = -ENOMEM;
goto out_nowq;
@@ -7388,7 +7440,7 @@ int __init addrconf_init(void)
rtnl_af_register(&inet6_ops);
err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETLINK,
- NULL, inet6_dump_ifinfo, 0);
+ NULL, inet6_dump_ifinfo, RTNL_FLAG_DUMP_UNLOCKED);
if (err < 0)
goto errout;
@@ -7402,21 +7454,25 @@ int __init addrconf_init(void)
goto errout;
err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETADDR,
inet6_rtm_getaddr, inet6_dump_ifaddr,
- RTNL_FLAG_DOIT_UNLOCKED);
+ RTNL_FLAG_DOIT_UNLOCKED |
+ RTNL_FLAG_DUMP_UNLOCKED);
if (err < 0)
goto errout;
err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETMULTICAST,
- NULL, inet6_dump_ifmcaddr, 0);
+ NULL, inet6_dump_ifmcaddr,
+ RTNL_FLAG_DUMP_UNLOCKED);
if (err < 0)
goto errout;
err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETANYCAST,
- NULL, inet6_dump_ifacaddr, 0);
+ NULL, inet6_dump_ifacaddr,
+ RTNL_FLAG_DUMP_UNLOCKED);
if (err < 0)
goto errout;
err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETNETCONF,
inet6_netconf_get_devconf,
inet6_netconf_dump_devconf,
- RTNL_FLAG_DOIT_UNLOCKED);
+ RTNL_FLAG_DOIT_UNLOCKED |
+ RTNL_FLAG_DUMP_UNLOCKED);
if (err < 0)
goto errout;
err = ipv6_addr_label_rtnl_register();
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 959bfd9f6344..8041dc181bd4 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -64,6 +64,7 @@
#include <net/xfrm.h>
#include <net/ioam6.h>
#include <net/rawv6.h>
+#include <net/rps.h>
#include <linux/uaccess.h>
#include <linux/mroute6.h>
@@ -736,7 +737,7 @@ const struct proto_ops inet6_dgram_ops = {
.recvmsg = inet6_recvmsg, /* retpoline's sake */
.read_skb = udp_read_skb,
.mmap = sock_no_mmap,
- .set_peek_off = sk_set_peek_off,
+ .set_peek_off = udp_set_peek_off,
#ifdef CONFIG_COMPAT
.compat_ioctl = inet6_compat_ioctl,
#endif
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index bb17f484ee2c..0f2506e35359 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -296,7 +296,8 @@ int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr)
goto out;
}
- for (aca = idev->ac_list; aca; aca = aca->aca_next) {
+ for (aca = rtnl_dereference(idev->ac_list); aca;
+ aca = rtnl_dereference(aca->aca_next)) {
if (ipv6_addr_equal(&aca->aca_addr, addr)) {
aca->aca_users++;
err = 0;
@@ -317,13 +318,13 @@ int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr)
goto out;
}
- aca->aca_next = idev->ac_list;
- idev->ac_list = aca;
-
/* Hold this for addrconf_join_solict() below before we unlock,
* it is already exposed via idev->ac_list.
*/
aca_get(aca);
+ aca->aca_next = idev->ac_list;
+ rcu_assign_pointer(idev->ac_list, aca);
+
write_unlock_bh(&idev->lock);
ipv6_add_acaddr_hash(net, aca);
@@ -350,7 +351,8 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
write_lock_bh(&idev->lock);
prev_aca = NULL;
- for (aca = idev->ac_list; aca; aca = aca->aca_next) {
+ for (aca = rtnl_dereference(idev->ac_list); aca;
+ aca = rtnl_dereference(aca->aca_next)) {
if (ipv6_addr_equal(&aca->aca_addr, addr))
break;
prev_aca = aca;
@@ -364,9 +366,9 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
return 0;
}
if (prev_aca)
- prev_aca->aca_next = aca->aca_next;
+ rcu_assign_pointer(prev_aca->aca_next, aca->aca_next);
else
- idev->ac_list = aca->aca_next;
+ rcu_assign_pointer(idev->ac_list, aca->aca_next);
write_unlock_bh(&idev->lock);
ipv6_del_acaddr_hash(aca);
addrconf_leave_solict(idev, &aca->aca_addr);
@@ -392,8 +394,8 @@ void ipv6_ac_destroy_dev(struct inet6_dev *idev)
struct ifacaddr6 *aca;
write_lock_bh(&idev->lock);
- while ((aca = idev->ac_list) != NULL) {
- idev->ac_list = aca->aca_next;
+ while ((aca = rtnl_dereference(idev->ac_list)) != NULL) {
+ rcu_assign_pointer(idev->ac_list, aca->aca_next);
write_unlock_bh(&idev->lock);
ipv6_del_acaddr_hash(aca);
@@ -420,11 +422,10 @@ static bool ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *ad
idev = __in6_dev_get(dev);
if (idev) {
- read_lock_bh(&idev->lock);
- for (aca = idev->ac_list; aca; aca = aca->aca_next)
+ for (aca = rcu_dereference(idev->ac_list); aca;
+ aca = rcu_dereference(aca->aca_next))
if (ipv6_addr_equal(&aca->aca_addr, addr))
break;
- read_unlock_bh(&idev->lock);
return aca != NULL;
}
return false;
@@ -477,30 +478,25 @@ bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev,
struct ac6_iter_state {
struct seq_net_private p;
struct net_device *dev;
- struct inet6_dev *idev;
};
#define ac6_seq_private(seq) ((struct ac6_iter_state *)(seq)->private)
static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq)
{
- struct ifacaddr6 *im = NULL;
struct ac6_iter_state *state = ac6_seq_private(seq);
struct net *net = seq_file_net(seq);
+ struct ifacaddr6 *im = NULL;
- state->idev = NULL;
for_each_netdev_rcu(net, state->dev) {
struct inet6_dev *idev;
+
idev = __in6_dev_get(state->dev);
if (!idev)
continue;
- read_lock_bh(&idev->lock);
- im = idev->ac_list;
- if (im) {
- state->idev = idev;
+ im = rcu_dereference(idev->ac_list);
+ if (im)
break;
- }
- read_unlock_bh(&idev->lock);
}
return im;
}
@@ -508,22 +504,17 @@ static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq)
static struct ifacaddr6 *ac6_get_next(struct seq_file *seq, struct ifacaddr6 *im)
{
struct ac6_iter_state *state = ac6_seq_private(seq);
+ struct inet6_dev *idev;
- im = im->aca_next;
+ im = rcu_dereference(im->aca_next);
while (!im) {
- if (likely(state->idev != NULL))
- read_unlock_bh(&state->idev->lock);
-
state->dev = next_net_device_rcu(state->dev);
- if (!state->dev) {
- state->idev = NULL;
+ if (!state->dev)
break;
- }
- state->idev = __in6_dev_get(state->dev);
- if (!state->idev)
+ idev = __in6_dev_get(state->dev);
+ if (!idev)
continue;
- read_lock_bh(&state->idev->lock);
- im = state->idev->ac_list;
+ im = rcu_dereference(idev->ac_list);
}
return im;
}
@@ -555,12 +546,6 @@ static void *ac6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static void ac6_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
- struct ac6_iter_state *state = ac6_seq_private(seq);
-
- if (likely(state->idev != NULL)) {
- read_unlock_bh(&state->idev->lock);
- state->idev = NULL;
- }
rcu_read_unlock();
}
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
index 1578ed9e97d8..eb8ee1e9373a 100644
--- a/net/ipv6/calipso.c
+++ b/net/ipv6/calipso.c
@@ -657,11 +657,8 @@ static int calipso_map_cat_ntoh(const struct calipso_doi *doi_def,
net_clen_bits,
spot + 1,
1);
- if (spot < 0) {
- if (spot == -2)
- return -EFAULT;
+ if (spot < 0)
return 0;
- }
ret_val = netlbl_catmap_setbit(&secattr->attr.mls.cat,
spot,
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 02e9ffb63af1..6789623b2b0d 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -50,6 +50,7 @@
#endif
#include <net/rpl.h>
#include <linux/ioam6.h>
+#include <linux/ioam6_genl.h>
#include <net/ioam6.h>
#include <net/dst_metadata.h>
@@ -378,9 +379,8 @@ static int ipv6_srh_rcv(struct sk_buff *skb)
idev = __in6_dev_get(skb->dev);
- accept_seg6 = net->ipv6.devconf_all->seg6_enabled;
- if (accept_seg6 > idev->cnf.seg6_enabled)
- accept_seg6 = idev->cnf.seg6_enabled;
+ accept_seg6 = min(READ_ONCE(net->ipv6.devconf_all->seg6_enabled),
+ READ_ONCE(idev->cnf.seg6_enabled));
if (!accept_seg6) {
kfree_skb(skb);
@@ -654,10 +654,13 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb)
struct ipv6_rt_hdr *hdr;
struct rt0_hdr *rthdr;
struct net *net = dev_net(skb->dev);
- int accept_source_route = net->ipv6.devconf_all->accept_source_route;
+ int accept_source_route;
- if (idev && accept_source_route > idev->cnf.accept_source_route)
- accept_source_route = idev->cnf.accept_source_route;
+ accept_source_route = READ_ONCE(net->ipv6.devconf_all->accept_source_route);
+
+ if (idev)
+ accept_source_route = min(accept_source_route,
+ READ_ONCE(idev->cnf.accept_source_route));
if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
!pskb_may_pull(skb, (skb_transport_offset(skb) +
@@ -801,7 +804,7 @@ looped_back:
ip6_route_input(skb);
if (skb_dst(skb)->error) {
- skb_push(skb, skb->data - skb_network_header(skb));
+ skb_push(skb, -skb_network_offset(skb));
dst_input(skb);
return -1;
}
@@ -818,7 +821,7 @@ looped_back:
goto looped_back;
}
- skb_push(skb, skb->data - skb_network_header(skb));
+ skb_push(skb, -skb_network_offset(skb));
dst_input(skb);
return -1;
@@ -880,14 +883,6 @@ void ipv6_exthdrs_exit(void)
Hop-by-hop options.
**********************************/
-/*
- * Note: we cannot rely on skb_dst(skb) before we assign it in ip6_route_input().
- */
-static inline struct net *ipv6_skb_net(struct sk_buff *skb)
-{
- return skb_dst(skb) ? dev_net(skb_dst(skb)->dev) : dev_net(skb->dev);
-}
-
/* Router Alert as of RFC 2711 */
static bool ipv6_hop_ra(struct sk_buff *skb, int optoff)
@@ -918,7 +913,7 @@ static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff)
goto drop;
/* Ignore if IOAM is not enabled on ingress */
- if (!__in6_dev_get(skb->dev)->cnf.ioam6_enabled)
+ if (!READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_enabled))
goto ignore;
/* Truncated Option header */
@@ -938,7 +933,7 @@ static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff)
goto drop;
/* Ignore if the IOAM namespace is unknown */
- ns = ioam6_namespace(ipv6_skb_net(skb), trace->namespace_id);
+ ns = ioam6_namespace(dev_net(skb->dev), trace->namespace_id);
if (!ns)
goto ignore;
@@ -954,6 +949,9 @@ static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff)
+ optoff + sizeof(*hdr));
ioam6_fill_trace_data(skb, ns, trace, true);
+
+ ioam6_event(IOAM6_EVENT_TRACE, dev_net(skb->dev),
+ GFP_ATOMIC, (void *)trace, hdr->opt_len - 2);
break;
default:
break;
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 7523c4baef35..52c04f0ac498 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -449,6 +449,11 @@ static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule)
+ nla_total_size(16); /* src */
}
+static void fib6_rule_flush_cache(struct fib_rules_ops *ops)
+{
+ rt_genid_bump_ipv6(ops->fro_net);
+}
+
static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
.family = AF_INET6,
.rule_size = sizeof(struct fib6_rule),
@@ -461,6 +466,7 @@ static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
.compare = fib6_rule_compare,
.fill = fib6_rule_fill,
.nlmsg_payload = fib6_rule_nlmsg_payload,
+ .flush_cache = fib6_rule_flush_cache,
.nlgroup = RTNLGRP_IPV6_RULE,
.owner = THIS_MODULE,
.fro_net = &init_net,
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index b0e8d278e8a9..2e81383b663b 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -14,6 +14,7 @@
#include <linux/random.h>
#include <net/addrconf.h>
+#include <net/hotdata.h>
#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
#include <net/inet6_hashtables.h>
@@ -25,16 +26,13 @@ u32 inet6_ehashfn(const struct net *net,
const struct in6_addr *laddr, const u16 lport,
const struct in6_addr *faddr, const __be16 fport)
{
- static u32 inet6_ehash_secret __read_mostly;
- static u32 ipv6_hash_secret __read_mostly;
-
u32 lhash, fhash;
net_get_random_once(&inet6_ehash_secret, sizeof(inet6_ehash_secret));
- net_get_random_once(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
+ net_get_random_once(&tcp_ipv6_hash_secret, sizeof(tcp_ipv6_hash_secret));
lhash = (__force u32)laddr->s6_addr32[3];
- fhash = __ipv6_addr_jhash(faddr, ipv6_hash_secret);
+ fhash = __ipv6_addr_jhash(faddr, tcp_ipv6_hash_secret);
return __inet6_ehashfn(lhash, lport, fhash, fport,
inet6_ehash_secret + net_hash_mix(net));
diff --git a/net/ipv6/ioam6.c b/net/ipv6/ioam6.c
index 571f0e4d9cf3..08c929513065 100644
--- a/net/ipv6/ioam6.c
+++ b/net/ipv6/ioam6.c
@@ -612,6 +612,68 @@ static const struct genl_ops ioam6_genl_ops[] = {
},
};
+#define IOAM6_GENL_EV_GRP_OFFSET 0
+
+static const struct genl_multicast_group ioam6_mcgrps[] = {
+ [IOAM6_GENL_EV_GRP_OFFSET] = { .name = IOAM6_GENL_EV_GRP_NAME,
+ .flags = GENL_MCAST_CAP_NET_ADMIN },
+};
+
+static int ioam6_event_put_trace(struct sk_buff *skb,
+ struct ioam6_trace_hdr *trace,
+ unsigned int len)
+{
+ if (nla_put_u16(skb, IOAM6_EVENT_ATTR_TRACE_NAMESPACE,
+ be16_to_cpu(trace->namespace_id)) ||
+ nla_put_u8(skb, IOAM6_EVENT_ATTR_TRACE_NODELEN, trace->nodelen) ||
+ nla_put_u32(skb, IOAM6_EVENT_ATTR_TRACE_TYPE,
+ be32_to_cpu(trace->type_be32)) ||
+ nla_put(skb, IOAM6_EVENT_ATTR_TRACE_DATA,
+ len - sizeof(struct ioam6_trace_hdr) - trace->remlen * 4,
+ trace->data + trace->remlen * 4))
+ return 1;
+
+ return 0;
+}
+
+void ioam6_event(enum ioam6_event_type type, struct net *net, gfp_t gfp,
+ void *opt, unsigned int opt_len)
+{
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb;
+
+ if (!genl_has_listeners(&ioam6_genl_family, net,
+ IOAM6_GENL_EV_GRP_OFFSET))
+ return;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ if (!skb)
+ return;
+
+ nlh = genlmsg_put(skb, 0, 0, &ioam6_genl_family, 0, type);
+ if (!nlh)
+ goto nla_put_failure;
+
+ switch (type) {
+ case IOAM6_EVENT_UNSPEC:
+ WARN_ON_ONCE(1);
+ break;
+ case IOAM6_EVENT_TRACE:
+ if (ioam6_event_put_trace(skb, (struct ioam6_trace_hdr *)opt,
+ opt_len))
+ goto nla_put_failure;
+ break;
+ }
+
+ genlmsg_end(skb, nlh);
+ genlmsg_multicast_netns(&ioam6_genl_family, net, skb, 0,
+ IOAM6_GENL_EV_GRP_OFFSET, gfp);
+ return;
+
+nla_put_failure:
+ nlmsg_free(skb);
+}
+
static struct genl_family ioam6_genl_family __ro_after_init = {
.name = IOAM6_GENL_NAME,
.version = IOAM6_GENL_VERSION,
@@ -620,6 +682,8 @@ static struct genl_family ioam6_genl_family __ro_after_init = {
.ops = ioam6_genl_ops,
.n_ops = ARRAY_SIZE(ioam6_genl_ops),
.resv_start_op = IOAM6_CMD_NS_SET_SCHEMA + 1,
+ .mcgrps = ioam6_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(ioam6_mcgrps),
.module = THIS_MODULE,
};
@@ -663,7 +727,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
if (!skb->dev)
raw16 = IOAM6_U16_UNAVAILABLE;
else
- raw16 = (__force u16)__in6_dev_get(skb->dev)->cnf.ioam6_id;
+ raw16 = (__force u16)READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_id);
*(__be16 *)data = cpu_to_be16(raw16);
data += sizeof(__be16);
@@ -671,7 +735,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
if (skb_dst(skb)->dev->flags & IFF_LOOPBACK)
raw16 = IOAM6_U16_UNAVAILABLE;
else
- raw16 = (__force u16)__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id;
+ raw16 = (__force u16)READ_ONCE(__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id);
*(__be16 *)data = cpu_to_be16(raw16);
data += sizeof(__be16);
@@ -758,7 +822,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
if (!skb->dev)
raw32 = IOAM6_U32_UNAVAILABLE;
else
- raw32 = __in6_dev_get(skb->dev)->cnf.ioam6_id_wide;
+ raw32 = READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_id_wide);
*(__be32 *)data = cpu_to_be32(raw32);
data += sizeof(__be32);
@@ -766,7 +830,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
if (skb_dst(skb)->dev->flags & IFF_LOOPBACK)
raw32 = IOAM6_U32_UNAVAILABLE;
else
- raw32 = __in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id_wide;
+ raw32 = READ_ONCE(__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id_wide);
*(__be32 *)data = cpu_to_be32(raw32);
data += sizeof(__be32);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 4fc2cae0d116..5c558dc1c683 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -160,6 +160,8 @@ struct fib6_info *fib6_info_alloc(gfp_t gfp_flags, bool with_fib6_nh)
INIT_LIST_HEAD(&f6i->fib6_siblings);
refcount_set(&f6i->fib6_ref, 1);
+ INIT_HLIST_NODE(&f6i->gc_link);
+
return f6i;
}
@@ -246,6 +248,7 @@ static struct fib6_table *fib6_alloc_table(struct net *net, u32 id)
net->ipv6.fib6_null_entry);
table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
inet_peer_base_init(&table->tb6_peers);
+ INIT_HLIST_HEAD(&table->tb6_gc_hlist);
}
return table;
@@ -617,8 +620,11 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct rt6_rtnl_dump_arg arg = { .filter.dump_exceptions = true,
- .filter.dump_routes = true };
+ struct rt6_rtnl_dump_arg arg = {
+ .filter.dump_exceptions = true,
+ .filter.dump_routes = true,
+ .filter.rtnl_held = true,
+ };
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
unsigned int h, s_h;
@@ -751,8 +757,6 @@ static struct fib6_node *fib6_add_1(struct net *net,
int bit;
__be32 dir = 0;
- RT6_TRACE("fib6_add_1\n");
-
/* insert node in tree */
fn = root;
@@ -1057,6 +1061,9 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
lockdep_is_held(&table->tb6_lock));
}
}
+
+ fib6_clean_expires(rt);
+ fib6_remove_gc_list(rt);
}
/*
@@ -1117,10 +1124,13 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
rt->fib6_nsiblings = 0;
if (!(iter->fib6_flags & RTF_EXPIRES))
return -EEXIST;
- if (!(rt->fib6_flags & RTF_EXPIRES))
+ if (!(rt->fib6_flags & RTF_EXPIRES)) {
fib6_clean_expires(iter);
- else
+ fib6_remove_gc_list(iter);
+ } else {
fib6_set_expires(iter, rt->expires);
+ fib6_add_gc_list(iter);
+ }
if (rt->fib6_pmtu)
fib6_metric_set(iter, RTAX_MTU,
@@ -1479,6 +1489,10 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
if (rt->nh)
list_add(&rt->nh_list, &rt->nh->f6i_list);
__fib6_update_sernum_upto_root(rt, fib6_new_sernum(info->nl_net));
+
+ if (rt->fib6_flags & RTF_EXPIRES)
+ fib6_add_gc_list(rt);
+
fib6_start_gc(info->nl_net, rt);
}
@@ -1803,7 +1817,7 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
lockdep_is_held(&table->tb6_lock));
struct fib6_info *new_fn_leaf;
- RT6_TRACE("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter);
+ pr_debug("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter);
iter++;
WARN_ON(fn->fn_flags & RTN_RTINFO);
@@ -1866,7 +1880,8 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
FOR_WALKERS(net, w) {
if (!child) {
if (w->node == fn) {
- RT6_TRACE("W %p adjusted by delnode 1, s=%d/%d\n", w, w->state, nstate);
+ pr_debug("W %p adjusted by delnode 1, s=%d/%d\n",
+ w, w->state, nstate);
w->node = pn;
w->state = nstate;
}
@@ -1874,10 +1889,12 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
if (w->node == fn) {
w->node = child;
if (children&2) {
- RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state);
+ pr_debug("W %p adjusted by delnode 2, s=%d\n",
+ w, w->state);
w->state = w->state >= FWS_R ? FWS_U : FWS_INIT;
} else {
- RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state);
+ pr_debug("W %p adjusted by delnode 2, s=%d\n",
+ w, w->state);
w->state = w->state >= FWS_C ? FWS_U : FWS_INIT;
}
}
@@ -1905,8 +1922,6 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
struct net *net = info->nl_net;
bool notify_del = false;
- RT6_TRACE("fib6_del_route\n");
-
/* If the deleted route is the first in the node and it is not part of
* a multipath route, then we need to replace it with the next route
* in the node, if exists.
@@ -1955,7 +1970,7 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
read_lock(&net->ipv6.fib6_walker_lock);
FOR_WALKERS(net, w) {
if (w->state == FWS_C && w->leaf == rt) {
- RT6_TRACE("walker %p adjusted by delroute\n", w);
+ pr_debug("walker %p adjusted by delroute\n", w);
w->leaf = rcu_dereference_protected(rt->fib6_next,
lockdep_is_held(&table->tb6_lock));
if (!w->leaf)
@@ -2281,9 +2296,8 @@ static void fib6_flush_trees(struct net *net)
* Garbage collection
*/
-static int fib6_age(struct fib6_info *rt, void *arg)
+static int fib6_age(struct fib6_info *rt, struct fib6_gc_args *gc_args)
{
- struct fib6_gc_args *gc_args = arg;
unsigned long now = jiffies;
/*
@@ -2293,7 +2307,7 @@ static int fib6_age(struct fib6_info *rt, void *arg)
if (rt->fib6_flags & RTF_EXPIRES && rt->expires) {
if (time_after(now, rt->expires)) {
- RT6_TRACE("expiring %p\n", rt);
+ pr_debug("expiring %p\n", rt);
return -1;
}
gc_args->more++;
@@ -2308,6 +2322,42 @@ static int fib6_age(struct fib6_info *rt, void *arg)
return 0;
}
+static void fib6_gc_table(struct net *net,
+ struct fib6_table *tb6,
+ struct fib6_gc_args *gc_args)
+{
+ struct fib6_info *rt;
+ struct hlist_node *n;
+ struct nl_info info = {
+ .nl_net = net,
+ .skip_notify = false,
+ };
+
+ hlist_for_each_entry_safe(rt, n, &tb6->tb6_gc_hlist, gc_link)
+ if (fib6_age(rt, gc_args) == -1)
+ fib6_del(rt, &info);
+}
+
+static void fib6_gc_all(struct net *net, struct fib6_gc_args *gc_args)
+{
+ struct fib6_table *table;
+ struct hlist_head *head;
+ unsigned int h;
+
+ rcu_read_lock();
+ for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
+ head = &net->ipv6.fib_table_hash[h];
+ hlist_for_each_entry_rcu(table, head, tb6_hlist) {
+ spin_lock_bh(&table->tb6_lock);
+
+ fib6_gc_table(net, table, gc_args);
+
+ spin_unlock_bh(&table->tb6_lock);
+ }
+ }
+ rcu_read_unlock();
+}
+
void fib6_run_gc(unsigned long expires, struct net *net, bool force)
{
struct fib6_gc_args gc_args;
@@ -2323,7 +2373,7 @@ void fib6_run_gc(unsigned long expires, struct net *net, bool force)
net->ipv6.sysctl.ip6_rt_gc_interval;
gc_args.more = 0;
- fib6_clean_all(net, fib6_age, &gc_args);
+ fib6_gc_all(net, &gc_args);
now = jiffies;
net->ipv6.ip6_rt_last_gc = now;
@@ -2383,6 +2433,7 @@ static int __net_init fib6_net_init(struct net *net)
net->ipv6.fib6_main_tbl->tb6_root.fn_flags =
RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
inet_peer_base_init(&net->ipv6.fib6_main_tbl->tb6_peers);
+ INIT_HLIST_HEAD(&net->ipv6.fib6_main_tbl->tb6_gc_hlist);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
net->ipv6.fib6_local_tbl = kzalloc(sizeof(*net->ipv6.fib6_local_tbl),
@@ -2395,6 +2446,7 @@ static int __net_init fib6_net_init(struct net *net)
net->ipv6.fib6_local_tbl->tb6_root.fn_flags =
RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
inet_peer_base_init(&net->ipv6.fib6_local_tbl->tb6_peers);
+ INIT_HLIST_HEAD(&net->ipv6.fib6_local_tbl->tb6_gc_hlist);
#endif
fib6_tables_init(net);
@@ -2444,10 +2496,8 @@ int __init fib6_init(void)
{
int ret = -ENOMEM;
- fib6_node_kmem = kmem_cache_create("fib6_nodes",
- sizeof(struct fib6_node), 0,
- SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT,
- NULL);
+ fib6_node_kmem = KMEM_CACHE(fib6_node,
+ SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT);
if (!fib6_node_kmem)
goto out;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 070d87abf7c0..5e97e0aa8e07 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1511,6 +1511,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
ip6gre_tnl_init_features(dev);
netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL);
+ netdev_lockdep_set_classes(dev);
return 0;
cleanup_dst_cache_init:
@@ -1632,21 +1633,19 @@ err_alloc_dev:
return err;
}
-static void __net_exit ip6gre_exit_batch_net(struct list_head *net_list)
+static void __net_exit ip6gre_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list)
- ip6gre_destroy_tunnels(net, &list);
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ ip6gre_destroy_tunnels(net, dev_to_kill);
}
static struct pernet_operations ip6gre_net_ops = {
.init = ip6gre_init_net,
- .exit_batch = ip6gre_exit_batch_net,
+ .exit_batch_rtnl = ip6gre_exit_batch_rtnl,
.id = &ip6gre_net_id,
.size = sizeof(struct ip6gre_net),
};
@@ -1903,6 +1902,7 @@ static int ip6erspan_tap_init(struct net_device *dev)
ip6erspan_tnl_link_config(tunnel, 1);
netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL);
+ netdev_lockdep_set_classes(dev);
return 0;
cleanup_dst_cache_init:
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index b8378814532c..133610a49da6 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -168,9 +168,9 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
SKB_DR_SET(reason, NOT_SPECIFIED);
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
- !idev || unlikely(idev->cnf.disable_ipv6)) {
+ !idev || unlikely(READ_ONCE(idev->cnf.disable_ipv6))) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
- if (idev && unlikely(idev->cnf.disable_ipv6))
+ if (idev && unlikely(READ_ONCE(idev->cnf.disable_ipv6)))
SKB_DR_SET(reason, IPV6DISABLED);
goto drop;
}
@@ -236,7 +236,7 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
if (!ipv6_addr_is_multicast(&hdr->daddr) &&
(skb->pkt_type == PACKET_BROADCAST ||
skb->pkt_type == PACKET_MULTICAST) &&
- idev->cnf.drop_unicast_in_l2_multicast) {
+ READ_ONCE(idev->cnf.drop_unicast_in_l2_multicast)) {
SKB_DR_SET(reason, UNICAST_IN_L2_MULTICAST);
goto err;
}
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index cca64c7809be..b41e35af69ea 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -419,14 +419,6 @@ static int ip4ip6_gro_complete(struct sk_buff *skb, int nhoff)
return inet_gro_complete(skb, nhoff);
}
-static struct packet_offload ipv6_packet_offload __read_mostly = {
- .type = cpu_to_be16(ETH_P_IPV6),
- .callbacks = {
- .gso_segment = ipv6_gso_segment,
- .gro_receive = ipv6_gro_receive,
- .gro_complete = ipv6_gro_complete,
- },
-};
static struct sk_buff *sit_gso_segment(struct sk_buff *skb,
netdev_features_t features)
@@ -486,7 +478,15 @@ static int __init ipv6_offload_init(void)
if (ipv6_exthdrs_offload_init() < 0)
pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
- dev_add_offload(&ipv6_packet_offload);
+ net_hotdata.ipv6_packet_offload = (struct packet_offload) {
+ .type = cpu_to_be16(ETH_P_IPV6),
+ .callbacks = {
+ .gso_segment = ipv6_gso_segment,
+ .gro_receive = ipv6_gro_receive,
+ .gro_complete = ipv6_gro_complete,
+ },
+ };
+ dev_add_offload(&net_hotdata.ipv6_packet_offload);
inet_add_offload(&sit_offload, IPPROTO_IPV6);
inet6_add_offload(&ip6ip6_offload, IPPROTO_IPV6);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 31b86fe661aa..02eeca5492cd 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -234,7 +234,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
- if (unlikely(idev->cnf.disable_ipv6)) {
+ if (unlikely(READ_ONCE(idev->cnf.disable_ipv6))) {
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
kfree_skb_reason(skb, SKB_DROP_REASON_IPV6DISABLED);
return 0;
@@ -501,7 +501,7 @@ int ip6_forward(struct sk_buff *skb)
u32 mtu;
idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
- if (net->ipv6.devconf_all->forwarding == 0)
+ if (READ_ONCE(net->ipv6.devconf_all->forwarding) == 0)
goto error;
if (skb->pkt_type != PACKET_HOST)
@@ -513,8 +513,8 @@ int ip6_forward(struct sk_buff *skb)
if (skb_warn_if_lro(skb))
goto drop;
- if (!net->ipv6.devconf_all->disable_policy &&
- (!idev || !idev->cnf.disable_policy) &&
+ if (!READ_ONCE(net->ipv6.devconf_all->disable_policy) &&
+ (!idev || !READ_ONCE(idev->cnf.disable_policy)) &&
!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
goto drop;
@@ -552,7 +552,7 @@ int ip6_forward(struct sk_buff *skb)
}
/* XXX: idev->cnf.proxy_ndp? */
- if (net->ipv6.devconf_all->proxy_ndp &&
+ if (READ_ONCE(net->ipv6.devconf_all->proxy_ndp) &&
pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
int proxied = ip6_forward_proxy_check(skb);
if (proxied > 0) {
@@ -1925,7 +1925,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = cork->base.mark;
skb->tstamp = cork->base.transmit_time;
-
+ skb->mono_delivery_time = !!skb->tstamp;
ip6_cork_steal_dst(skb, cork);
IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS);
if (proto == IPPROTO_ICMPV6) {
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 9bbabf750a21..e9cc315832cb 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -247,7 +247,6 @@ static void ip6_dev_free(struct net_device *dev)
gro_cells_destroy(&t->gro_cells);
dst_cache_destroy(&t->dst_cache);
- free_percpu(dev->tstats);
}
static int ip6_tnl_create2(struct net_device *dev)
@@ -1756,7 +1755,7 @@ int ip6_tnl_get_iflink(const struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- return t->parms.link;
+ return READ_ONCE(t->parms.link);
}
EXPORT_SYMBOL(ip6_tnl_get_iflink);
@@ -1848,6 +1847,7 @@ static void ip6_tnl_dev_setup(struct net_device *dev)
dev->flags |= IFF_NOARP;
dev->addr_len = sizeof(struct in6_addr);
dev->features |= NETIF_F_LLTX;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
netif_keep_dst(dev);
dev->features |= IPXIPX_FEATURES;
@@ -1873,13 +1873,10 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
t->dev = dev;
t->net = dev_net(dev);
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
if (ret)
- goto free_stats;
+ return ret;
ret = gro_cells_init(&t->gro_cells, dev);
if (ret)
@@ -1898,13 +1895,11 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
netdev_hold(dev, &t->dev_tracker, GFP_KERNEL);
+ netdev_lockdep_set_classes(dev);
return 0;
destroy_dst:
dst_cache_destroy(&t->dst_cache);
-free_stats:
- free_percpu(dev->tstats);
- dev->tstats = NULL;
return ret;
}
@@ -2282,21 +2277,19 @@ err_alloc_dev:
return err;
}
-static void __net_exit ip6_tnl_exit_batch_net(struct list_head *net_list)
+static void __net_exit ip6_tnl_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list)
- ip6_tnl_destroy_tunnels(net, &list);
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ ip6_tnl_destroy_tunnels(net, dev_to_kill);
}
static struct pernet_operations ip6_tnl_net_ops = {
.init = ip6_tnl_init_net,
- .exit_batch = ip6_tnl_exit_batch_net,
+ .exit_batch_rtnl = ip6_tnl_exit_batch_rtnl,
.id = &ip6_tnl_net_id,
.size = sizeof(struct ip6_tnl_net),
};
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index e550240c85e1..7f4f976aa24a 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -935,6 +935,7 @@ static inline int vti6_dev_init_gen(struct net_device *dev)
if (!dev->tstats)
return -ENOMEM;
netdev_hold(dev, &t->dev_tracker, GFP_KERNEL);
+ netdev_lockdep_set_classes(dev);
return 0;
}
@@ -1174,24 +1175,22 @@ err_alloc_dev:
return err;
}
-static void __net_exit vti6_exit_batch_net(struct list_head *net_list)
+static void __net_exit vti6_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct vti6_net *ip6n;
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list) {
ip6n = net_generic(net, vti6_net_id);
- vti6_destroy_tunnels(ip6n, &list);
+ vti6_destroy_tunnels(ip6n, dev_to_kill);
}
- unregister_netdevice_many(&list);
- rtnl_unlock();
}
static struct pernet_operations vti6_net_ops = {
.init = vti6_init_net,
- .exit_batch = vti6_exit_batch_net,
+ .exit_batch_rtnl = vti6_exit_batch_rtnl,
.id = &vti6_net_id,
.size = sizeof(struct vti6_net),
};
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 9782c180fee6..cb0ee81a068a 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1373,10 +1373,7 @@ int __init ip6_mr_init(void)
{
int err;
- mrt_cachep = kmem_cache_create("ip6_mrt_cache",
- sizeof(struct mfc6_cache),
- 0, SLAB_HWCACHE_ALIGN,
- NULL);
+ mrt_cachep = KMEM_CACHE(mfc6_cache, SLAB_HWCACHE_ALIGN);
if (!mrt_cachep)
return -ENOMEM;
@@ -2595,7 +2592,9 @@ static int ip6mr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
{
const struct nlmsghdr *nlh = cb->nlh;
- struct fib_dump_filter filter = {};
+ struct fib_dump_filter filter = {
+ .rtnl_held = true,
+ };
int err;
if (cb->strict_check) {
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 56c3c467f9de..d4c28ec1bc51 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -948,6 +948,8 @@ done:
if (optlen < sizeof(int))
goto e_inval;
retv = ip6_ra_control(sk, val);
+ if (retv == 0)
+ inet6_assign_bit(RTALERT, sk, valbool);
break;
case IPV6_FLOWLABEL_MGR:
retv = ipv6_flowlabel_opt(sk, optval, optlen);
@@ -1346,7 +1348,7 @@ int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
}
if (val < 0)
- val = sock_net(sk)->ipv6.devconf_all->hop_limit;
+ val = READ_ONCE(sock_net(sk)->ipv6.devconf_all->hop_limit);
break;
}
@@ -1445,6 +1447,10 @@ int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
val = np->rxopt.bits.recvfragsize;
break;
+ case IPV6_ROUTER_ALERT:
+ val = inet6_test_bit(RTALERT, sk);
+ break;
+
case IPV6_ROUTER_ALERT_ISOLATE:
val = inet6_test_bit(RTALERT_ISOLATE, sk);
break;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index bc6e0a0bad3c..7ba01d8cfbae 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -159,9 +159,9 @@ static int unsolicited_report_interval(struct inet6_dev *idev)
int iv;
if (mld_in_v1_mode(idev))
- iv = idev->cnf.mldv1_unsolicited_report_interval;
+ iv = READ_ONCE(idev->cnf.mldv1_unsolicited_report_interval);
else
- iv = idev->cnf.mldv2_unsolicited_report_interval;
+ iv = READ_ONCE(idev->cnf.mldv2_unsolicited_report_interval);
return iv > 0 ? iv : 1;
}
@@ -1202,15 +1202,15 @@ static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
static int mld_force_mld_version(const struct inet6_dev *idev)
{
+ const struct net *net = dev_net(idev->dev);
+ int all_force;
+
+ all_force = READ_ONCE(net->ipv6.devconf_all->force_mld_version);
/* Normally, both are 0 here. If enforcement to a particular is
* being used, individual device enforcement will have a lower
* precedence over 'all' device (.../conf/all/force_mld_version).
*/
-
- if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version != 0)
- return dev_net(idev->dev)->ipv6.devconf_all->force_mld_version;
- else
- return idev->cnf.force_mld_version;
+ return all_force ?: READ_ONCE(idev->cnf.force_mld_version);
}
static bool mld_in_v2_mode_only(const struct inet6_dev *idev)
@@ -2719,7 +2719,6 @@ void ipv6_mc_down(struct inet6_dev *idev)
/* Should stop work after group drop. or we will
* start work again in mld_ifc_event()
*/
- synchronize_net();
mld_query_stop_work(idev);
mld_report_stop_work(idev);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index a19999b30bc0..ae134634c323 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -451,7 +451,7 @@ static void ip6_nd_hdr(struct sk_buff *skb,
rcu_read_lock();
idev = __in6_dev_get(skb->dev);
- tclass = idev ? idev->cnf.ndisc_tclass : 0;
+ tclass = idev ? READ_ONCE(idev->cnf.ndisc_tclass) : 0;
rcu_read_unlock();
skb_push(skb, sizeof(*hdr));
@@ -535,7 +535,7 @@ void ndisc_send_na(struct net_device *dev, const struct in6_addr *daddr,
src_addr = solicited_addr;
if (ifp->flags & IFA_F_OPTIMISTIC)
override = false;
- inc_opt |= ifp->idev->cnf.force_tllao;
+ inc_opt |= READ_ONCE(ifp->idev->cnf.force_tllao);
in6_ifa_put(ifp);
} else {
if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr,
@@ -903,8 +903,9 @@ have_ifp:
}
if (ipv6_chk_acast_addr(net, dev, &msg->target) ||
- (idev->cnf.forwarding &&
- (net->ipv6.devconf_all->proxy_ndp || idev->cnf.proxy_ndp) &&
+ (READ_ONCE(idev->cnf.forwarding) &&
+ (READ_ONCE(net->ipv6.devconf_all->proxy_ndp) ||
+ READ_ONCE(idev->cnf.proxy_ndp)) &&
(is_router = pndisc_is_router(&msg->target, dev)) >= 0)) {
if (!(NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED) &&
skb->pkt_type != PACKET_HOST &&
@@ -929,7 +930,7 @@ have_ifp:
}
if (is_router < 0)
- is_router = idev->cnf.forwarding;
+ is_router = READ_ONCE(idev->cnf.forwarding);
if (dad) {
ndisc_send_na(dev, &in6addr_linklocal_allnodes, &msg->target,
@@ -973,7 +974,7 @@ static int accept_untracked_na(struct net_device *dev, struct in6_addr *saddr)
{
struct inet6_dev *idev = __in6_dev_get(dev);
- switch (idev->cnf.accept_untracked_na) {
+ switch (READ_ONCE(idev->cnf.accept_untracked_na)) {
case 0: /* Don't accept untracked na (absent in neighbor cache) */
return 0;
case 1: /* Create new entries from na if currently untracked */
@@ -1024,7 +1025,7 @@ static enum skb_drop_reason ndisc_recv_na(struct sk_buff *skb)
* drop_unsolicited_na takes precedence over accept_untracked_na
*/
if (!msg->icmph.icmp6_solicited && idev &&
- idev->cnf.drop_unsolicited_na)
+ READ_ONCE(idev->cnf.drop_unsolicited_na))
return reason;
if (!ndisc_parse_options(dev, msg->opt, ndoptlen, &ndopts))
@@ -1080,7 +1081,7 @@ static enum skb_drop_reason ndisc_recv_na(struct sk_buff *skb)
* Note that we don't do a (daddr == all-routers-mcast) check.
*/
new_state = msg->icmph.icmp6_solicited ? NUD_REACHABLE : NUD_STALE;
- if (!neigh && lladdr && idev && idev->cnf.forwarding) {
+ if (!neigh && lladdr && idev && READ_ONCE(idev->cnf.forwarding)) {
if (accept_untracked_na(dev, saddr)) {
neigh = neigh_create(&nd_tbl, &msg->target, dev);
new_state = NUD_STALE;
@@ -1100,7 +1101,8 @@ static enum skb_drop_reason ndisc_recv_na(struct sk_buff *skb)
* has already sent a NA to us.
*/
if (lladdr && !memcmp(lladdr, dev->dev_addr, dev->addr_len) &&
- net->ipv6.devconf_all->forwarding && net->ipv6.devconf_all->proxy_ndp &&
+ READ_ONCE(net->ipv6.devconf_all->forwarding) &&
+ READ_ONCE(net->ipv6.devconf_all->proxy_ndp) &&
pneigh_lookup(&nd_tbl, net, &msg->target, dev, 0)) {
/* XXX: idev->cnf.proxy_ndp */
goto out;
@@ -1148,7 +1150,7 @@ static enum skb_drop_reason ndisc_recv_rs(struct sk_buff *skb)
}
/* Don't accept RS if we're not in router mode */
- if (!idev->cnf.forwarding)
+ if (!READ_ONCE(idev->cnf.forwarding))
goto out;
/*
@@ -1237,6 +1239,7 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
struct ndisc_options ndopts;
struct fib6_info *rt = NULL;
struct inet6_dev *in6_dev;
+ struct fib6_table *table;
u32 defrtr_usr_metric;
unsigned int pref = 0;
__u32 old_if_flags;
@@ -1317,7 +1320,7 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
if (old_if_flags != in6_dev->if_flags)
send_ifinfo_notify = true;
- if (!in6_dev->cnf.accept_ra_defrtr) {
+ if (!READ_ONCE(in6_dev->cnf.accept_ra_defrtr)) {
ND_PRINTK(2, info,
"RA: %s, defrtr is false for dev: %s\n",
__func__, skb->dev->name);
@@ -1325,7 +1328,8 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
}
lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime);
- if (lifetime != 0 && lifetime < in6_dev->cnf.accept_ra_min_lft) {
+ if (lifetime != 0 &&
+ lifetime < READ_ONCE(in6_dev->cnf.accept_ra_min_lft)) {
ND_PRINTK(2, info,
"RA: router lifetime (%ds) is too short: %s\n",
lifetime, skb->dev->name);
@@ -1336,7 +1340,7 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
* accept_ra_from_local is set to true.
*/
net = dev_net(in6_dev->dev);
- if (!in6_dev->cnf.accept_ra_from_local &&
+ if (!READ_ONCE(in6_dev->cnf.accept_ra_from_local) &&
ipv6_chk_addr(net, &ipv6_hdr(skb)->saddr, in6_dev->dev, 0)) {
ND_PRINTK(2, info,
"RA from local address detected on dev: %s: default router ignored\n",
@@ -1348,7 +1352,7 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
pref = ra_msg->icmph.icmp6_router_pref;
/* 10b is handled as if it were 00b (medium) */
if (pref == ICMPV6_ROUTER_PREF_INVALID ||
- !in6_dev->cnf.accept_ra_rtr_pref)
+ !READ_ONCE(in6_dev->cnf.accept_ra_rtr_pref))
pref = ICMPV6_ROUTER_PREF_MEDIUM;
#endif
/* routes added from RAs do not use nexthop objects */
@@ -1382,7 +1386,8 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
neigh_release(neigh);
rt = rt6_add_dflt_router(net, &ipv6_hdr(skb)->saddr,
- skb->dev, pref, defrtr_usr_metric);
+ skb->dev, pref, defrtr_usr_metric,
+ lifetime);
if (!rt) {
ND_PRINTK(0, err,
"RA: %s failed to add default route\n",
@@ -1409,12 +1414,21 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
inet6_rt_notify(RTM_NEWROUTE, rt, &nlinfo, NLM_F_REPLACE);
}
- if (rt)
+ if (rt) {
+ table = rt->fib6_table;
+ spin_lock_bh(&table->tb6_lock);
+
fib6_set_expires(rt, jiffies + (HZ * lifetime));
- if (in6_dev->cnf.accept_ra_min_hop_limit < 256 &&
+ fib6_add_gc_list(rt);
+
+ spin_unlock_bh(&table->tb6_lock);
+ }
+ if (READ_ONCE(in6_dev->cnf.accept_ra_min_hop_limit) < 256 &&
ra_msg->icmph.icmp6_hop_limit) {
- if (in6_dev->cnf.accept_ra_min_hop_limit <= ra_msg->icmph.icmp6_hop_limit) {
- in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
+ if (READ_ONCE(in6_dev->cnf.accept_ra_min_hop_limit) <=
+ ra_msg->icmph.icmp6_hop_limit) {
+ WRITE_ONCE(in6_dev->cnf.hop_limit,
+ ra_msg->icmph.icmp6_hop_limit);
fib6_metric_set(rt, RTAX_HOPLIMIT,
ra_msg->icmph.icmp6_hop_limit);
} else {
@@ -1496,7 +1510,7 @@ skip_linkparms:
}
#ifdef CONFIG_IPV6_ROUTE_INFO
- if (!in6_dev->cnf.accept_ra_from_local &&
+ if (!READ_ONCE(in6_dev->cnf.accept_ra_from_local) &&
ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr,
in6_dev->dev, 0)) {
ND_PRINTK(2, info,
@@ -1505,7 +1519,7 @@ skip_linkparms:
goto skip_routeinfo;
}
- if (in6_dev->cnf.accept_ra_rtr_pref && ndopts.nd_opts_ri) {
+ if (READ_ONCE(in6_dev->cnf.accept_ra_rtr_pref) && ndopts.nd_opts_ri) {
struct nd_opt_hdr *p;
for (p = ndopts.nd_opts_ri;
p;
@@ -1517,14 +1531,14 @@ skip_linkparms:
continue;
#endif
if (ri->prefix_len == 0 &&
- !in6_dev->cnf.accept_ra_defrtr)
+ !READ_ONCE(in6_dev->cnf.accept_ra_defrtr))
continue;
if (ri->lifetime != 0 &&
- ntohl(ri->lifetime) < in6_dev->cnf.accept_ra_min_lft)
+ ntohl(ri->lifetime) < READ_ONCE(in6_dev->cnf.accept_ra_min_lft))
continue;
- if (ri->prefix_len < in6_dev->cnf.accept_ra_rt_info_min_plen)
+ if (ri->prefix_len < READ_ONCE(in6_dev->cnf.accept_ra_rt_info_min_plen))
continue;
- if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen)
+ if (ri->prefix_len > READ_ONCE(in6_dev->cnf.accept_ra_rt_info_max_plen))
continue;
rt6_route_rcv(skb->dev, (u8 *)p, (p->nd_opt_len) << 3,
&ipv6_hdr(skb)->saddr);
@@ -1544,7 +1558,7 @@ skip_routeinfo:
}
#endif
- if (in6_dev->cnf.accept_ra_pinfo && ndopts.nd_opts_pi) {
+ if (READ_ONCE(in6_dev->cnf.accept_ra_pinfo) && ndopts.nd_opts_pi) {
struct nd_opt_hdr *p;
for (p = ndopts.nd_opts_pi;
p;
@@ -1555,7 +1569,7 @@ skip_routeinfo:
}
}
- if (ndopts.nd_opts_mtu && in6_dev->cnf.accept_ra_mtu) {
+ if (ndopts.nd_opts_mtu && READ_ONCE(in6_dev->cnf.accept_ra_mtu)) {
__be32 n;
u32 mtu;
@@ -1569,8 +1583,8 @@ skip_routeinfo:
if (mtu < IPV6_MIN_MTU || mtu > skb->dev->mtu) {
ND_PRINTK(2, warn, "RA: invalid mtu: %d\n", mtu);
- } else if (in6_dev->cnf.mtu6 != mtu) {
- in6_dev->cnf.mtu6 = mtu;
+ } else if (READ_ONCE(in6_dev->cnf.mtu6) != mtu) {
+ WRITE_ONCE(in6_dev->cnf.mtu6, mtu);
fib6_metric_set(rt, RTAX_MTU, mtu);
rt6_mtu_change(skb->dev, mtu);
}
@@ -1804,7 +1818,7 @@ static bool ndisc_suppress_frag_ndisc(struct sk_buff *skb)
if (!idev)
return true;
if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED &&
- idev->cnf.suppress_frag_ndisc) {
+ READ_ONCE(idev->cnf.suppress_frag_ndisc)) {
net_warn_ratelimited("Received fragmented ndisc packet. Carefully consider disabling suppress_frag_ndisc.\n");
return true;
}
@@ -1881,8 +1895,8 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
idev = in6_dev_get(dev);
if (!idev)
break;
- if (idev->cnf.ndisc_notify ||
- net->ipv6.devconf_all->ndisc_notify)
+ if (READ_ONCE(idev->cnf.ndisc_notify) ||
+ READ_ONCE(net->ipv6.devconf_all->ndisc_notify))
ndisc_send_unsol_na(dev);
in6_dev_put(idev);
break;
@@ -1891,8 +1905,8 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
if (!idev)
evict_nocarrier = true;
else {
- evict_nocarrier = idev->cnf.ndisc_evict_nocarrier &&
- net->ipv6.devconf_all->ndisc_evict_nocarrier;
+ evict_nocarrier = READ_ONCE(idev->cnf.ndisc_evict_nocarrier) &&
+ READ_ONCE(net->ipv6.devconf_all->ndisc_evict_nocarrier);
in6_dev_put(idev);
}
@@ -1966,7 +1980,7 @@ int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, void *buffer,
if (ctl->data == &NEIGH_VAR(idev->nd_parms, BASE_REACHABLE_TIME))
idev->nd_parms->reachable_time =
neigh_rand_reach_time(NEIGH_VAR(idev->nd_parms, BASE_REACHABLE_TIME));
- idev->tstamp = jiffies;
+ WRITE_ONCE(idev->tstamp, jiffies);
inet6_ifinfo_notify(RTM_NEWLINK, idev);
in6_dev_put(idev);
}
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 0ba62f4868f9..f3c8e2d918e1 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -6,6 +6,10 @@
menu "IPv6: Netfilter Configuration"
depends on INET && IPV6 && NETFILTER
+# old sockopt interface and eval loop
+config IP6_NF_IPTABLES_LEGACY
+ tristate
+
config NF_SOCKET_IPV6
tristate "IPv6 socket lookup support"
help
@@ -147,7 +151,7 @@ config IP6_NF_MATCH_MH
config IP6_NF_MATCH_RPFILTER
tristate '"rpfilter" reverse path filter match support'
depends on NETFILTER_ADVANCED
- depends on IP6_NF_MANGLE || IP6_NF_RAW
+ depends on IP6_NF_MANGLE || IP6_NF_RAW || NFT_COMPAT
help
This option allows you to match packets whose replies would
go out via the interface the packet came in.
@@ -186,6 +190,8 @@ config IP6_NF_TARGET_HL
config IP6_NF_FILTER
tristate "Packet filtering"
default m if NETFILTER_ADVANCED=n
+ select IP6_NF_IPTABLES_LEGACY
+ tristate
help
Packet filtering defines a table `filter', which has a series of
rules for simple packet filtering at local input, forwarding and
@@ -195,7 +201,7 @@ config IP6_NF_FILTER
config IP6_NF_TARGET_REJECT
tristate "REJECT target support"
- depends on IP6_NF_FILTER
+ depends on IP6_NF_FILTER || NFT_COMPAT
select NF_REJECT_IPV6
default m if NETFILTER_ADVANCED=n
help
@@ -221,6 +227,7 @@ config IP6_NF_TARGET_SYNPROXY
config IP6_NF_MANGLE
tristate "Packet mangling"
default m if NETFILTER_ADVANCED=n
+ select IP6_NF_IPTABLES_LEGACY
help
This option adds a `mangle' table to iptables: see the man page for
iptables(8). This table is used for various packet alterations
@@ -230,6 +237,7 @@ config IP6_NF_MANGLE
config IP6_NF_RAW
tristate 'raw table support (required for TRACE)'
+ select IP6_NF_IPTABLES_LEGACY
help
This option adds a `raw' table to ip6tables. This table is the very
first in the netfilter framework and hooks in at the PREROUTING
@@ -243,6 +251,7 @@ config IP6_NF_SECURITY
tristate "Security table"
depends on SECURITY
depends on NETFILTER_ADVANCED
+ select IP6_NF_IPTABLES_LEGACY
help
This option adds a `security' table to iptables, for use
with Mandatory Access Control (MAC) policy.
@@ -254,6 +263,7 @@ config IP6_NF_NAT
depends on NF_CONNTRACK
depends on NETFILTER_ADVANCED
select NF_NAT
+ select IP6_NF_IPTABLES_LEGACY
select NETFILTER_XT_NAT
help
This enables the `nat' table in ip6tables. This allows masquerading,
@@ -262,25 +272,23 @@ config IP6_NF_NAT
To compile it as a module, choose M here. If unsure, say N.
-if IP6_NF_NAT
-
config IP6_NF_TARGET_MASQUERADE
tristate "MASQUERADE target support"
select NETFILTER_XT_TARGET_MASQUERADE
+ depends on IP6_NF_NAT
help
This is a backwards-compat option for the user's convenience
(e.g. when running oldconfig). It selects NETFILTER_XT_TARGET_MASQUERADE.
config IP6_NF_TARGET_NPT
tristate "NPT (Network Prefix translation) target support"
+ depends on IP6_NF_NAT || NFT_COMPAT
help
This option adds the `SNPT' and `DNPT' target, which perform
stateless IPv6-to-IPv6 Network Prefix Translation per RFC 6296.
To compile it as a module, choose M here. If unsure, say N.
-endif # IP6_NF_NAT
-
endif # IP6_NF_IPTABLES
endmenu
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index b8d6dc9aeeb6..66ce6fa5b2f5 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -4,7 +4,7 @@
#
# Link order matters here.
-obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o
+obj-$(CONFIG_IP6_NF_IPTABLES_LEGACY) += ip6_tables.o
obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index b2dd48911c8d..1a51a44571c3 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -327,9 +327,9 @@ static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
if (!reasm_data)
goto err;
- payload_len = ((skb->data - skb_network_header(skb)) -
+ payload_len = -skb_network_offset(skb) -
sizeof(struct ipv6hdr) + fq->q.len -
- sizeof(struct frag_hdr));
+ sizeof(struct frag_hdr);
if (payload_len > IPV6_MAXPLEN) {
net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
payload_len);
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 196dd4ecb5e2..dedee264b8f6 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -83,7 +83,7 @@ struct sk_buff *nf_reject_skb_v6_tcp_reset(struct net *net,
skb_reserve(nskb, LL_MAX_HEADER);
nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
- net->ipv6.devconf_all->hop_limit);
+ READ_ONCE(net->ipv6.devconf_all->hop_limit));
nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
@@ -124,7 +124,7 @@ struct sk_buff *nf_reject_skb_v6_unreach(struct net *net,
skb_reserve(nskb, LL_MAX_HEADER);
nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
- net->ipv6.devconf_all->hop_limit);
+ READ_ONCE(net->ipv6.devconf_all->hop_limit));
skb_reset_transport_header(nskb);
icmp6h = skb_put_zero(nskb, sizeof(struct icmp6hdr));
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index b5205311f372..806d4b5dd1e6 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -111,9 +111,9 @@ int ip6_dst_hoplimit(struct dst_entry *dst)
rcu_read_lock();
idev = __in6_dev_get(dev);
if (idev)
- hoplimit = idev->cnf.hop_limit;
+ hoplimit = READ_ONCE(idev->cnf.hop_limit);
else
- hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
+ hoplimit = READ_ONCE(dev_net(dev)->ipv6.devconf_all->hop_limit);
rcu_read_unlock();
}
return hoplimit;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 03dbb874c363..ca49e6617afa 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -160,6 +160,13 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
if (!raw_v6_match(net, sk, nexthdr, daddr, saddr,
inet6_iif(skb), inet6_sdif(skb)))
continue;
+
+ if (atomic_read(&sk->sk_rmem_alloc) >=
+ READ_ONCE(sk->sk_rcvbuf)) {
+ atomic_inc(&sk->sk_drops);
+ continue;
+ }
+
delivered = true;
switch (nexthdr) {
case IPPROTO_ICMPV6:
@@ -288,8 +295,7 @@ out:
}
static void rawv6_err(struct sock *sk, struct sk_buff *skb,
- struct inet6_skb_parm *opt,
- u8 type, u8 code, int offset, __be32 info)
+ u8 type, u8 code, int offset, __be32 info)
{
bool recverr = inet6_test_bit(RECVERR6, sk);
struct ipv6_pinfo *np = inet6_sk(sk);
@@ -344,7 +350,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
if (!raw_v6_match(net, sk, nexthdr, &ip6h->saddr, &ip6h->daddr,
inet6_iif(skb), inet6_iif(skb)))
continue;
- rawv6_err(sk, skb, NULL, type, code, inner_offset, info);
+ rawv6_err(sk, skb, type, code, inner_offset, info);
}
rcu_read_unlock();
}
@@ -616,7 +622,7 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = sockc->mark;
skb->tstamp = sockc->transmit_time;
-
+ skb->mono_delivery_time = !!skb->tstamp;
skb_put(skb, length);
skb_reset_network_header(skb);
iph = ipv6_hdr(skb);
@@ -935,7 +941,7 @@ do_confirm:
goto done;
}
-static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
+static int rawv6_seticmpfilter(struct sock *sk, int optname,
sockptr_t optval, int optlen)
{
switch (optname) {
@@ -952,7 +958,7 @@ static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
return 0;
}
-static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
+static int rawv6_geticmpfilter(struct sock *sk, int optname,
char __user *optval, int __user *optlen)
{
int len;
@@ -1038,7 +1044,7 @@ static int rawv6_setsockopt(struct sock *sk, int level, int optname,
case SOL_ICMPV6:
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
- return rawv6_seticmpfilter(sk, level, optname, optval, optlen);
+ return rawv6_seticmpfilter(sk, optname, optval, optlen);
case SOL_IPV6:
if (optname == IPV6_CHECKSUM ||
optname == IPV6_HDRINCL)
@@ -1099,7 +1105,7 @@ static int rawv6_getsockopt(struct sock *sk, int level, int optname,
case SOL_ICMPV6:
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
- return rawv6_geticmpfilter(sk, level, optname, optval, optlen);
+ return rawv6_geticmpfilter(sk, optname, optval, optlen);
case SOL_IPV6:
if (optname == IPV6_CHECKSUM ||
optname == IPV6_HDRINCL)
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 5ebc47da1000..acb4f119e11f 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -272,9 +272,9 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
if (!reasm_data)
goto out_oom;
- payload_len = ((skb->data - skb_network_header(skb)) -
+ payload_len = -skb_network_offset(skb) -
sizeof(struct ipv6hdr) + fq->q.len -
- sizeof(struct frag_hdr));
+ sizeof(struct frag_hdr);
if (payload_len > IPV6_MAXPLEN)
goto out_oversize;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index ef815ba583a8..1f4b935a0e57 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -645,14 +645,15 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
write_lock_bh(&neigh->lock);
if (!(neigh->nud_state & NUD_VALID) &&
time_after(jiffies,
- neigh->updated + idev->cnf.rtr_probe_interval)) {
+ neigh->updated +
+ READ_ONCE(idev->cnf.rtr_probe_interval))) {
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (work)
__neigh_set_probe_once(neigh);
}
write_unlock_bh(&neigh->lock);
} else if (time_after(jiffies, last_probe +
- idev->cnf.rtr_probe_interval)) {
+ READ_ONCE(idev->cnf.rtr_probe_interval))) {
work = kmalloc(sizeof(*work), GFP_ATOMIC);
}
@@ -931,6 +932,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
struct net *net = dev_net(dev);
struct route_info *rinfo = (struct route_info *) opt;
struct in6_addr prefix_buf, *prefix;
+ struct fib6_table *table;
unsigned int pref;
unsigned long lifetime;
struct fib6_info *rt;
@@ -989,10 +991,18 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
(rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
if (rt) {
- if (!addrconf_finite_timeout(lifetime))
+ table = rt->fib6_table;
+ spin_lock_bh(&table->tb6_lock);
+
+ if (!addrconf_finite_timeout(lifetime)) {
fib6_clean_expires(rt);
- else
+ fib6_remove_gc_list(rt);
+ } else {
fib6_set_expires(rt, jiffies + HZ * lifetime);
+ fib6_add_gc_list(rt);
+ }
+
+ spin_unlock_bh(&table->tb6_lock);
fib6_info_release(rt);
}
@@ -1587,7 +1597,7 @@ static unsigned int fib6_mtu(const struct fib6_result *res)
rcu_read_lock();
idev = __in6_dev_get(dev);
- mtu = idev->cnf.mtu6;
+ mtu = READ_ONCE(idev->cnf.mtu6);
rcu_read_unlock();
}
@@ -2085,12 +2095,12 @@ static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
*/
if (!(rt->rt6i_flags & RTF_EXPIRES)) {
if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
- RT6_TRACE("aging clone %p\n", rt);
+ pr_debug("aging clone %p\n", rt);
rt6_remove_exception(bucket, rt6_ex);
return;
}
} else if (time_after(jiffies, rt->dst.expires)) {
- RT6_TRACE("purging expired route %p\n", rt);
+ pr_debug("purging expired route %p\n", rt);
rt6_remove_exception(bucket, rt6_ex);
return;
}
@@ -2101,8 +2111,8 @@ static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
if (!(neigh && (neigh->flags & NTF_ROUTER))) {
- RT6_TRACE("purging route %p via non-router but gateway\n",
- rt);
+ pr_debug("purging route %p via non-router but gateway\n",
+ rt);
rt6_remove_exception(bucket, rt6_ex);
return;
}
@@ -2211,7 +2221,7 @@ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
strict |= flags & RT6_LOOKUP_F_IFACE;
strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
- if (net->ipv6.devconf_all->forwarding == 0)
+ if (READ_ONCE(net->ipv6.devconf_all->forwarding) == 0)
strict |= RT6_LOOKUP_F_REACHABLE;
rcu_read_lock();
@@ -3240,8 +3250,8 @@ u32 ip6_mtu_from_fib6(const struct fib6_result *res,
mtu = IPV6_MIN_MTU;
idev = __in6_dev_get(dev);
- if (idev && idev->cnf.mtu6 > mtu)
- mtu = idev->cnf.mtu6;
+ if (idev)
+ mtu = max_t(u32, mtu, READ_ONCE(idev->cnf.mtu6));
}
mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
@@ -3765,8 +3775,6 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
if (cfg->fc_flags & RTF_EXPIRES)
fib6_set_expires(rt, jiffies +
clock_t_to_jiffies(cfg->fc_expires));
- else
- fib6_clean_expires(rt);
if (cfg->fc_protocol == RTPROT_UNSPEC)
cfg->fc_protocol = RTPROT_BOOT;
@@ -4142,7 +4150,8 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
in6_dev = __in6_dev_get(skb->dev);
if (!in6_dev)
return;
- if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
+ if (READ_ONCE(in6_dev->cnf.forwarding) ||
+ !READ_ONCE(in6_dev->cnf.accept_redirects))
return;
/* RFC2461 8.1:
@@ -4355,7 +4364,8 @@ struct fib6_info *rt6_add_dflt_router(struct net *net,
const struct in6_addr *gwaddr,
struct net_device *dev,
unsigned int pref,
- u32 defrtr_usr_metric)
+ u32 defrtr_usr_metric,
+ int lifetime)
{
struct fib6_config cfg = {
.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
@@ -4368,6 +4378,7 @@ struct fib6_info *rt6_add_dflt_router(struct net *net,
.fc_nlinfo.portid = 0,
.fc_nlinfo.nlh = NULL,
.fc_nlinfo.nl_net = net,
+ .fc_expires = jiffies_to_clock_t(lifetime * HZ),
};
cfg.fc_gateway = *gwaddr;
@@ -4574,8 +4585,8 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net,
f6i->dst_nocount = true;
if (!anycast &&
- (net->ipv6.devconf_all->disable_policy ||
- idev->cnf.disable_policy))
+ (READ_ONCE(net->ipv6.devconf_all->disable_policy) ||
+ READ_ONCE(idev->cnf.disable_policy)))
f6i->dst_nopolicy = true;
}
diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
index d43c50a7310d..861e0366f549 100644
--- a/net/ipv6/seg6_hmac.c
+++ b/net/ipv6/seg6_hmac.c
@@ -241,6 +241,7 @@ bool seg6_hmac_validate_skb(struct sk_buff *skb)
struct sr6_tlv_hmac *tlv;
struct ipv6_sr_hdr *srh;
struct inet6_dev *idev;
+ int require_hmac;
idev = __in6_dev_get(skb->dev);
@@ -248,16 +249,17 @@ bool seg6_hmac_validate_skb(struct sk_buff *skb)
tlv = seg6_get_tlv_hmac(srh);
+ require_hmac = READ_ONCE(idev->cnf.seg6_require_hmac);
/* mandatory check but no tlv */
- if (idev->cnf.seg6_require_hmac > 0 && !tlv)
+ if (require_hmac > 0 && !tlv)
return false;
/* no check */
- if (idev->cnf.seg6_require_hmac < 0)
+ if (require_hmac < 0)
return true;
/* check only if present */
- if (idev->cnf.seg6_require_hmac == 0 && !tlv)
+ if (require_hmac == 0 && !tlv)
return true;
/* now, seg6_require_hmac >= 0 && tlv */
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 5e9f625b76e3..655c9b1a19b8 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1398,7 +1398,6 @@ static const struct net_device_ops ipip6_netdev_ops = {
.ndo_uninit = ipip6_tunnel_uninit,
.ndo_start_xmit = sit_tunnel_xmit,
.ndo_siocdevprivate = ipip6_tunnel_siocdevprivate,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_get_iflink = ip_tunnel_get_iflink,
.ndo_tunnel_ctl = ipip6_tunnel_ctl,
};
@@ -1408,7 +1407,6 @@ static void ipip6_dev_free(struct net_device *dev)
struct ip_tunnel *tunnel = netdev_priv(dev);
dst_cache_destroy(&tunnel->dst_cache);
- free_percpu(dev->tstats);
}
#define SIT_FEATURES (NETIF_F_SG | \
@@ -1437,6 +1435,8 @@ static void ipip6_tunnel_setup(struct net_device *dev)
dev->features |= NETIF_F_LLTX;
dev->features |= SIT_FEATURES;
dev->hw_features |= SIT_FEATURES;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+
}
static int ipip6_tunnel_init(struct net_device *dev)
@@ -1449,17 +1449,13 @@ static int ipip6_tunnel_init(struct net_device *dev)
strcpy(tunnel->parms.name, dev->name);
ipip6_tunnel_bind_dev(dev);
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
- if (err) {
- free_percpu(dev->tstats);
- dev->tstats = NULL;
+ if (err)
return err;
- }
+
netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL);
+ netdev_lockdep_set_classes(dev);
return 0;
}
@@ -1875,22 +1871,19 @@ err_alloc_dev:
return err;
}
-static void __net_exit sit_exit_batch_net(struct list_head *net_list)
+static void __net_exit sit_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
- LIST_HEAD(list);
struct net *net;
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list)
- sit_destroy_tunnels(net, &list);
-
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ sit_destroy_tunnels(net, dev_to_kill);
}
static struct pernet_operations sit_net_ops = {
.init = sit_init_net,
- .exit_batch = sit_exit_batch_net,
+ .exit_batch_rtnl = sit_exit_batch_rtnl,
.id = &sit_net_id,
.size = sizeof(struct sit_net),
};
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index c8d2ca27220c..8bad0a44a0a6 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -177,24 +177,33 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
struct sock *ret = sk;
__u8 rcv_wscale;
int full_space;
+ SKB_DR(reason);
if (!READ_ONCE(net->ipv4.sysctl_tcp_syncookies) ||
!th->ack || th->rst)
goto out;
- req = cookie_tcp_check(net, sk, skb);
- if (IS_ERR(req))
- goto out;
- if (!req)
+ if (cookie_bpf_ok(skb)) {
+ req = cookie_bpf_check(sk, skb);
+ } else {
+ req = cookie_tcp_check(net, sk, skb);
+ if (IS_ERR(req))
+ goto out;
+ }
+ if (!req) {
+ SKB_DR_SET(reason, NO_SOCKET);
goto out_drop;
+ }
ireq = inet_rsk(req);
ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
- if (security_inet_conn_request(sk, skb, req))
+ if (security_inet_conn_request(sk, skb, req)) {
+ SKB_DR_SET(reason, SECURITY_HOOK);
goto out_free;
+ }
if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
@@ -231,8 +240,10 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
security_req_classify_flow(req, flowi6_to_flowi_common(&fl6));
dst = ip6_dst_lookup_flow(net, sk, &fl6, final_p);
- if (IS_ERR(dst))
+ if (IS_ERR(dst)) {
+ SKB_DR_SET(reason, IP_OUTNOROUTES);
goto out_free;
+ }
}
req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
@@ -247,14 +258,20 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
ireq->wscale_ok, &rcv_wscale,
dst_metric(dst, RTAX_INITRWND));
- ireq->rcv_wscale = rcv_wscale;
+ if (!req->syncookie)
+ ireq->rcv_wscale = rcv_wscale;
ireq->ecn_ok &= cookie_ecn_ok(net, dst);
ret = tcp_get_cookie_sock(sk, skb, req, dst);
+ if (!ret) {
+ SKB_DR_SET(reason, NO_SOCKET);
+ goto out_drop;
+ }
out:
return ret;
out_free:
reqsk_free(req);
out_drop:
+ kfree_skb_reason(skb, reason);
return NULL;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 57b25b1fc9d9..3f4cba49e9ee 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -58,6 +58,7 @@
#include <net/timewait_sock.h>
#include <net/inet_common.h>
#include <net/secure_seq.h>
+#include <net/hotdata.h>
#include <net/busy_poll.h>
#include <linux/proc_fs.h>
@@ -1623,7 +1624,6 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
if (np->rxopt.all)
opt_skb = skb_clone_and_charge_r(skb, sk);
- reason = SKB_DROP_REASON_NOT_SPECIFIED;
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
struct dst_entry *dst;
@@ -1653,12 +1653,12 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
if (sk->sk_state == TCP_LISTEN) {
struct sock *nsk = tcp_v6_cookie_check(sk, skb);
- if (!nsk)
- goto discard;
-
if (nsk != sk) {
- if (tcp_child_process(sk, nsk, skb))
- goto reset;
+ if (nsk) {
+ reason = tcp_child_process(sk, nsk, skb);
+ if (reason)
+ goto reset;
+ }
if (opt_skb)
__kfree_skb(opt_skb);
return 0;
@@ -1666,7 +1666,8 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
} else
sock_rps_save_rxhash(sk, skb);
- if (tcp_rcv_state_process(sk, skb))
+ reason = tcp_rcv_state_process(sk, skb);
+ if (reason)
goto reset;
if (opt_skb)
goto ipv6_pktoptions;
@@ -1856,10 +1857,12 @@ process:
if (nsk == sk) {
reqsk_put(req);
tcp_v6_restore_cb(skb);
- } else if (tcp_child_process(sk, nsk, skb)) {
- tcp_v6_send_reset(nsk, skb);
- goto discard_and_relse;
} else {
+ drop_reason = tcp_child_process(sk, nsk, skb);
+ if (drop_reason) {
+ tcp_v6_send_reset(nsk, skb);
+ goto discard_and_relse;
+ }
sock_put(sk);
return 0;
}
@@ -2365,11 +2368,6 @@ struct proto tcpv6_prot = {
};
EXPORT_SYMBOL_GPL(tcpv6_prot);
-static const struct inet6_protocol tcpv6_protocol = {
- .handler = tcp_v6_rcv,
- .err_handler = tcp_v6_err,
- .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
-};
static struct inet_protosw tcpv6_protosw = {
.type = SOCK_STREAM,
@@ -2406,7 +2404,12 @@ int __init tcpv6_init(void)
{
int ret;
- ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
+ net_hotdata.tcpv6_protocol = (struct inet6_protocol) {
+ .handler = tcp_v6_rcv,
+ .err_handler = tcp_v6_err,
+ .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
+ };
+ ret = inet6_add_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
if (ret)
goto out;
@@ -2431,7 +2434,7 @@ out_tcpv6_pernet_subsys:
out_tcpv6_protosw:
inet6_unregister_protosw(&tcpv6_protosw);
out_tcpv6_protocol:
- inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
+ inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
goto out;
}
@@ -2439,5 +2442,5 @@ void tcpv6_exit(void)
{
unregister_pernet_subsys(&tcpv6_net_ops);
inet6_unregister_protosw(&tcpv6_protosw);
- inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
+ inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
}
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index bf0c957e4b5e..4b07d1e6c952 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -66,15 +66,15 @@ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
return tcp_gso_segment(skb, features);
}
-static const struct net_offload tcpv6_offload = {
- .callbacks = {
- .gso_segment = tcp6_gso_segment,
- .gro_receive = tcp6_gro_receive,
- .gro_complete = tcp6_gro_complete,
- },
-};
int __init tcpv6_offload_init(void)
{
- return inet6_add_offload(&tcpv6_offload, IPPROTO_TCP);
+ net_hotdata.tcpv6_offload = (struct net_offload) {
+ .callbacks = {
+ .gso_segment = tcp6_gso_segment,
+ .gro_receive = tcp6_gro_receive,
+ .gro_complete = tcp6_gro_complete,
+ },
+ };
+ return inet6_add_offload(&net_hotdata.tcpv6_offload, IPPROTO_TCP);
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 3f2249b4cd5f..7c1e6469d091 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -79,9 +79,6 @@ u32 udp6_ehashfn(const struct net *net,
const struct in6_addr *faddr,
const __be16 fport)
{
- static u32 udp6_ehash_secret __read_mostly;
- static u32 udp_ipv6_hash_secret __read_mostly;
-
u32 lhash, fhash;
net_get_random_once(&udp6_ehash_secret,
@@ -1101,11 +1098,12 @@ void udp_v6_early_demux(struct sk_buff *skb)
else
return;
- if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
+ if (!sk)
return;
skb->sk = sk;
- skb->destructor = sock_efree;
+ DEBUG_NET_WARN_ON_ONCE(sk_is_refcounted(sk));
+ skb->destructor = sock_pfree;
dst = rcu_dereference(sk->sk_rx_dst);
if (dst)
@@ -1702,11 +1700,6 @@ int udpv6_getsockopt(struct sock *sk, int level, int optname,
return ipv6_getsockopt(sk, level, optname, optval, optlen);
}
-static const struct inet6_protocol udpv6_protocol = {
- .handler = udpv6_rcv,
- .err_handler = udpv6_err,
- .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
-};
/* ------------------------------------------------------------------------ */
#ifdef CONFIG_PROC_FS
@@ -1803,7 +1796,12 @@ int __init udpv6_init(void)
{
int ret;
- ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
+ net_hotdata.udpv6_protocol = (struct inet6_protocol) {
+ .handler = udpv6_rcv,
+ .err_handler = udpv6_err,
+ .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
+ };
+ ret = inet6_add_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
if (ret)
goto out;
@@ -1814,12 +1812,12 @@ out:
return ret;
out_udpv6_protocol:
- inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
+ inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
goto out;
}
void udpv6_exit(void)
{
inet6_unregister_protosw(&udpv6_protosw);
- inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
+ inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
}
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 6b95ba241ebe..312bcaeea96f 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -192,20 +192,19 @@ INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff)
return udp_gro_complete(skb, nhoff, udp6_lib_lookup_skb);
}
-static const struct net_offload udpv6_offload = {
- .callbacks = {
- .gso_segment = udp6_ufo_fragment,
- .gro_receive = udp6_gro_receive,
- .gro_complete = udp6_gro_complete,
- },
-};
-
-int udpv6_offload_init(void)
+int __init udpv6_offload_init(void)
{
- return inet6_add_offload(&udpv6_offload, IPPROTO_UDP);
+ net_hotdata.udpv6_offload = (struct net_offload) {
+ .callbacks = {
+ .gso_segment = udp6_ufo_fragment,
+ .gro_receive = udp6_gro_receive,
+ .gro_complete = udp6_gro_complete,
+ },
+ };
+ return inet6_add_offload(&net_hotdata.udpv6_offload, IPPROTO_UDP);
}
int udpv6_offload_exit(void)
{
- return inet6_del_offload(&udpv6_offload, IPPROTO_UDP);
+ return inet6_del_offload(&net_hotdata.udpv6_offload, IPPROTO_UDP);
}
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 6e36e5047fba..a17d783dc7c0 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -43,7 +43,7 @@ static int xfrm6_transport_finish2(struct net *net, struct sock *sk,
int xfrm6_transport_finish(struct sk_buff *skb, int async)
{
struct xfrm_offload *xo = xfrm_offload(skb);
- int nhlen = skb->data - skb_network_header(skb);
+ int nhlen = -skb_network_offset(skb);
skb_network_header(skb)[IP6CB(skb)->nhoff] =
XFRM_MODE_SKB_CB(skb)->protocol;
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index f6cb94f82cc3..bf140ef781c1 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -355,10 +355,7 @@ static int __init xfrm6_tunnel_init(void)
{
int rv;
- xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
- sizeof(struct xfrm6_tunnel_spi),
- 0, SLAB_HWCACHE_ALIGN,
- NULL);
+ xfrm6_tunnel_spi_kmem = KMEM_CACHE(xfrm6_tunnel_spi, SLAB_HWCACHE_ALIGN);
if (!xfrm6_tunnel_spi_kmem)
return -ENOMEM;
rv = register_pernet_subsys(&xfrm6_tunnel_net_ops);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 498a0c35b7bb..4aa1c72e6c49 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1060,13 +1060,12 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
int i;
/* skip iucv_array lying in the headroom */
- iba[0].address = (u32)(addr_t)skb->data;
+ iba[0].address = (u32)virt_to_phys(skb->data);
iba[0].length = (u32)skb_headlen(skb);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- iba[i + 1].address =
- (u32)(addr_t)skb_frag_address(frag);
+ iba[i + 1].address = (u32)virt_to_phys(skb_frag_address(frag));
iba[i + 1].length = (u32)skb_frag_size(frag);
}
err = pr_iucv->message_send(iucv->path, &txmsg,
@@ -1162,13 +1161,12 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
struct iucv_array *iba = (struct iucv_array *)skb->head;
int i;
- iba[0].address = (u32)(addr_t)skb->data;
+ iba[0].address = (u32)virt_to_phys(skb->data);
iba[0].length = (u32)skb_headlen(skb);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- iba[i + 1].address =
- (u32)(addr_t)skb_frag_address(frag);
+ iba[i + 1].address = (u32)virt_to_phys(skb_frag_address(frag));
iba[i + 1].length = (u32)skb_frag_size(frag);
}
rc = pr_iucv->message_receive(path, msg,
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index b0b3e9c5af44..5b56ae6612dd 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -286,6 +286,7 @@ static union iucv_param *iucv_param_irq[NR_CPUS];
*/
static inline int __iucv_call_b2f0(int command, union iucv_param *parm)
{
+ unsigned long reg1 = virt_to_phys(parm);
int cc;
asm volatile(
@@ -296,7 +297,7 @@ static inline int __iucv_call_b2f0(int command, union iucv_param *parm)
" srl %[cc],28\n"
: [cc] "=&d" (cc), "+m" (*parm)
: [reg0] "d" ((unsigned long)command),
- [reg1] "d" ((unsigned long)parm)
+ [reg1] "d" (reg1)
: "cc", "0", "1");
return cc;
}
@@ -1123,7 +1124,7 @@ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
- parm->db.ipbfadr1 = (u32)(addr_t) buffer;
+ parm->db.ipbfadr1 = (u32)virt_to_phys(buffer);
parm->db.ipbfln1f = (u32) size;
parm->db.ipmsgid = msg->id;
parm->db.ippathid = path->pathid;
@@ -1241,7 +1242,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
parm->dpl.iptrgcls = msg->class;
memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8));
} else {
- parm->db.ipbfadr1 = (u32)(addr_t) reply;
+ parm->db.ipbfadr1 = (u32)virt_to_phys(reply);
parm->db.ipbfln1f = (u32) size;
parm->db.ippathid = path->pathid;
parm->db.ipflags1 = flags;
@@ -1293,7 +1294,7 @@ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
parm->dpl.ipmsgtag = msg->tag;
memcpy(parm->dpl.iprmmsg, buffer, 8);
} else {
- parm->db.ipbfadr1 = (u32)(addr_t) buffer;
+ parm->db.ipbfadr1 = (u32)virt_to_phys(buffer);
parm->db.ipbfln1f = (u32) size;
parm->db.ippathid = path->pathid;
parm->db.ipflags1 = flags | IUCV_IPNORPY;
@@ -1378,7 +1379,7 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
parm->dpl.iptrgcls = msg->class;
parm->dpl.ipsrccls = srccls;
parm->dpl.ipmsgtag = msg->tag;
- parm->dpl.ipbfadr2 = (u32)(addr_t) answer;
+ parm->dpl.ipbfadr2 = (u32)virt_to_phys(answer);
parm->dpl.ipbfln2f = (u32) asize;
memcpy(parm->dpl.iprmmsg, buffer, 8);
} else {
@@ -1387,9 +1388,9 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
parm->db.iptrgcls = msg->class;
parm->db.ipsrccls = srccls;
parm->db.ipmsgtag = msg->tag;
- parm->db.ipbfadr1 = (u32)(addr_t) buffer;
+ parm->db.ipbfadr1 = (u32)virt_to_phys(buffer);
parm->db.ipbfln1f = (u32) size;
- parm->db.ipbfadr2 = (u32)(addr_t) answer;
+ parm->db.ipbfadr2 = (u32)virt_to_phys(answer);
parm->db.ipbfln2f = (u32) asize;
}
rc = iucv_call_b2f0(IUCV_SEND, parm);
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 1184d40167b8..2f191e50d4fc 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -627,7 +627,8 @@ retry:
skb = txm->frag_skb;
}
- if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
+ if (WARN_ON(!skb_shinfo(skb)->nr_frags) ||
+ WARN_ON_ONCE(!skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
ret = -EINVAL;
goto out;
}
@@ -637,8 +638,8 @@ retry:
msize += skb_frag_size(&skb_shinfo(skb)->frags[i]);
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE,
- skb_shinfo(skb)->frags, skb_shinfo(skb)->nr_frags,
- msize);
+ (const struct bio_vec *)skb_shinfo(skb)->frags,
+ skb_shinfo(skb)->nr_frags, msize);
iov_iter_advance(&msg.msg_iter, txm->frag_offset);
do {
@@ -1152,10 +1153,11 @@ static int kcm_getsockopt(struct socket *sock, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
- len = min_t(unsigned int, len, sizeof(int));
if (len < 0)
return -EINVAL;
+ len = min_t(unsigned int, len, sizeof(int));
+
switch (optname) {
case KCM_RECV_DISABLE:
val = kcm->rx_disabled;
@@ -1877,15 +1879,11 @@ static int __init kcm_init(void)
{
int err = -ENOMEM;
- kcm_muxp = kmem_cache_create("kcm_mux_cache",
- sizeof(struct kcm_mux), 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ kcm_muxp = KMEM_CACHE(kcm_mux, SLAB_HWCACHE_ALIGN);
if (!kcm_muxp)
goto fail;
- kcm_psockp = kmem_cache_create("kcm_psock_cache",
- sizeof(struct kcm_psock), 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ kcm_psockp = KMEM_CACHE(kcm_psock, SLAB_HWCACHE_ALIGN);
if (!kcm_psockp)
goto fail;
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 25ca89f80414..39e487ccc468 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -100,7 +100,7 @@ static const struct net_device_ops l2tp_eth_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static struct device_type l2tpeth_type = {
+static const struct device_type l2tpeth_type = {
.name = "l2tpeth",
};
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 9a2a9ed3ba47..970af3983d11 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -478,7 +478,7 @@ static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
rt = ip_route_output_ports(sock_net(sk), fl4, sk,
daddr, inet->inet_saddr,
inet->inet_dport, inet->inet_sport,
- sk->sk_protocol, RT_CONN_FLAGS(sk),
+ sk->sk_protocol, ip_sock_rt_tos(sk),
sk->sk_bound_dev_if);
if (IS_ERR(rt))
goto no_route;
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index f011af6601c9..6146e4e67bbb 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1356,11 +1356,11 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
- len = min_t(unsigned int, len, sizeof(int));
-
if (len < 0)
return -EINVAL;
+ len = min_t(unsigned int, len, sizeof(int));
+
err = -ENOTCONN;
if (!sk->sk_user_data)
goto end;
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 4406b4f8f3b9..a33884967f21 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -29,7 +29,7 @@ mac80211-y := \
spectmgmt.o \
tx.o \
key.o \
- util.o \
+ util.o parse.o \
wme.o \
chan.o \
trace.o mlme.o \
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index b8a278355e18..21d55dc539f6 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -616,7 +616,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
return -EINVAL;
if (!pubsta->deflink.ht_cap.ht_supported &&
- sta->sdata->vif.bss_conf.chandef.chan->band != NL80211_BAND_6GHZ)
+ sta->sdata->vif.bss_conf.chanreq.oper.chan->band != NL80211_BAND_6GHZ)
return -EINVAL;
if (WARN_ON_ONCE(!local->ops->ampdu_action))
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 327682995c92..f03452dc716d 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -886,33 +886,32 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
{
struct ieee80211_local *local = wiphy_priv(wiphy);
struct ieee80211_sub_if_data *sdata;
- int ret = 0;
+ struct ieee80211_chan_req chanreq = { .oper = *chandef };
+ int ret;
lockdep_assert_wiphy(local->hw.wiphy);
- if (cfg80211_chandef_identical(&local->monitor_chandef, chandef))
+ if (cfg80211_chandef_identical(&local->monitor_chanreq.oper,
+ &chanreq.oper))
return 0;
- if (local->use_chanctx) {
- sdata = wiphy_dereference(local->hw.wiphy,
- local->monitor_sdata);
- if (sdata) {
- ieee80211_link_release_channel(&sdata->deflink);
- ret = ieee80211_link_use_channel(&sdata->deflink,
- chandef,
- IEEE80211_CHANCTX_EXCLUSIVE);
- }
- } else {
- if (local->open_count == local->monitors) {
- local->_oper_chandef = *chandef;
- ieee80211_hw_config(local, 0);
- }
- }
+ sdata = wiphy_dereference(local->hw.wiphy,
+ local->monitor_sdata);
+ if (!sdata)
+ goto done;
- if (ret == 0)
- local->monitor_chandef = *chandef;
+ if (cfg80211_chandef_identical(&sdata->vif.bss_conf.chanreq.oper,
+ &chanreq.oper))
+ return 0;
- return ret;
+ ieee80211_link_release_channel(&sdata->deflink);
+ ret = ieee80211_link_use_channel(&sdata->deflink, &chanreq,
+ IEEE80211_CHANCTX_EXCLUSIVE);
+ if (ret)
+ return ret;
+done:
+ local->monitor_chanreq = chanreq;
+ return 0;
}
static int
@@ -953,7 +952,8 @@ ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
static int ieee80211_set_fils_discovery(struct ieee80211_sub_if_data *sdata,
struct cfg80211_fils_discovery *params,
struct ieee80211_link_data *link,
- struct ieee80211_bss_conf *link_conf)
+ struct ieee80211_bss_conf *link_conf,
+ u64 *changed)
{
struct fils_discovery_data *new, *old = NULL;
struct ieee80211_fils_discovery *fd;
@@ -980,7 +980,8 @@ static int ieee80211_set_fils_discovery(struct ieee80211_sub_if_data *sdata,
RCU_INIT_POINTER(link->u.ap.fils_discovery, NULL);
}
- return BSS_CHANGED_FILS_DISCOVERY;
+ *changed |= BSS_CHANGED_FILS_DISCOVERY;
+ return 0;
}
static int
@@ -1240,6 +1241,30 @@ ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
return 0;
}
+static u8 ieee80211_num_beaconing_links(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_link_data *link;
+ u8 link_id, num = 0;
+
+ if (sdata->vif.type != NL80211_IFTYPE_AP &&
+ sdata->vif.type != NL80211_IFTYPE_P2P_GO)
+ return num;
+
+ if (!sdata->vif.valid_links)
+ return num;
+
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+ link = sdata_dereference(sdata->link[link_id], sdata);
+ if (!link)
+ continue;
+
+ if (sdata_dereference(link->u.ap.beacon, sdata))
+ num++;
+ }
+
+ return num;
+}
+
static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ap_settings *params)
{
@@ -1258,6 +1283,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
unsigned int link_id = params->beacon.link_id;
struct ieee80211_link_data *link;
struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_chan_req chanreq = { .oper = params->chandef };
lockdep_assert_wiphy(local->hw.wiphy);
@@ -1341,8 +1367,6 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
return -EOPNOTSUPP;
link_conf->eht_support = true;
- link_conf->eht_puncturing = params->punct_bitmap;
- changed |= BSS_CHANGED_EHT_PUNCTURING;
link_conf->eht_su_beamformer =
params->eht_cap->fixed.phy_cap_info[0] &
@@ -1370,7 +1394,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
return err;
}
- err = ieee80211_link_use_channel(link, &params->chandef,
+ err = ieee80211_link_use_channel(link, &chanreq,
IEEE80211_CHANCTX_SHARED);
if (!err)
ieee80211_link_copy_chanctx_to_vlans(link, false);
@@ -1445,10 +1469,9 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
goto error;
err = ieee80211_set_fils_discovery(sdata, &params->fils_discovery,
- link, link_conf);
+ link, link_conf, &changed);
if (err < 0)
goto error;
- changed |= err;
err = ieee80211_set_unsol_bcast_probe_resp(sdata,
&params->unsol_bcast_probe_resp,
@@ -1471,7 +1494,9 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_SSID);
ieee80211_link_info_change_notify(sdata, link, changed);
- netif_carrier_on(dev);
+ if (ieee80211_num_beaconing_links(sdata) <= 1)
+ netif_carrier_on(dev);
+
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
netif_carrier_on(vlan->dev);
@@ -1519,10 +1544,9 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
return err;
err = ieee80211_set_fils_discovery(sdata, &params->fils_discovery,
- link, link_conf);
+ link, link_conf, &changed);
if (err < 0)
return err;
- changed |= err;
err = ieee80211_set_unsol_bcast_probe_resp(sdata,
&params->unsol_bcast_probe_resp,
@@ -1565,6 +1589,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_link_data *link =
sdata_dereference(sdata->link[link_id], sdata);
struct ieee80211_bss_conf *link_conf = link->conf;
+ LIST_HEAD(keys);
lockdep_assert_wiphy(local->hw.wiphy);
@@ -1582,10 +1607,10 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
/* abort any running channel switch or color change */
link_conf->csa_active = false;
link_conf->color_change_active = false;
- if (link->csa_block_tx) {
+ if (sdata->csa_blocked_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- link->csa_block_tx = false;
+ sdata->csa_blocked_tx = false;
}
ieee80211_free_next_beacon(link);
@@ -1593,7 +1618,9 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
/* turn off carrier for this interface and dependent VLANs */
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
netif_carrier_off(vlan->dev);
- netif_carrier_off(dev);
+
+ if (ieee80211_num_beaconing_links(sdata) <= 1)
+ netif_carrier_off(dev);
/* remove beacon and probe response */
sdata->u.ap.active = false;
@@ -1618,8 +1645,13 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
link_conf->ema_ap = false;
link_conf->bssid_indicator = 0;
- __sta_info_flush(sdata, true);
- ieee80211_free_keys(sdata, true);
+ __sta_info_flush(sdata, true, link_id);
+
+ ieee80211_remove_link_keys(link, &keys);
+ if (!list_empty(&keys)) {
+ synchronize_net();
+ ieee80211_free_key_list(local, &keys);
+ }
link_conf->enable_beacon = false;
sdata->beacon_rate_set = false;
@@ -1629,7 +1661,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
BSS_CHANGED_BEACON_ENABLED);
if (sdata->wdev.cac_started) {
- chandef = link_conf->chandef;
+ chandef = link_conf->chanreq.oper;
wiphy_delayed_work_cancel(wiphy, &link->dfs_cac_timer_work);
cfg80211_cac_event(sdata->dev, &chandef,
NL80211_RADAR_CAC_ABORTED,
@@ -1829,7 +1861,7 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
if (params->supported_rates &&
params->supported_rates_len) {
- ieee80211_parse_bitrates(link->conf->chandef.width,
+ ieee80211_parse_bitrates(link->conf->chanreq.oper.width,
sband, params->supported_rates,
params->supported_rates_len,
&link_sta->pub->supp_rates[sband->band]);
@@ -1869,7 +1901,7 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
sband->band);
}
- ieee80211_sta_set_rx_nss(link_sta);
+ ieee80211_sta_init_nss(link_sta);
return ret;
}
@@ -1944,6 +1976,9 @@ static int sta_apply_parameters(struct ieee80211_local *local,
clear_sta_flag(sta, WLAN_STA_TDLS_PEER);
}
+ if (mask & BIT(NL80211_STA_FLAG_SPP_AMSDU))
+ sta->sta.spp_amsdu = set & BIT(NL80211_STA_FLAG_SPP_AMSDU);
+
/* mark TDLS channel switch support, if the AP allows it */
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
!sdata->deflink.u.mgd.tdls_chan_switch_prohibited &&
@@ -2095,7 +2130,7 @@ static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
if (params->mac)
return sta_info_destroy_addr_bss(sdata, params->mac);
- sta_info_flush(sdata);
+ sta_info_flush(sdata, params->link_id);
return 0;
}
@@ -2602,6 +2637,7 @@ static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev,
const struct mesh_setup *setup)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_chan_req chanreq = { .oper = setup->chandef };
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
int err;
@@ -2618,7 +2654,7 @@ static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev,
sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
sdata->deflink.needed_rx_chains = sdata->local->rx_chains;
- err = ieee80211_link_use_channel(&sdata->deflink, &setup->chandef,
+ err = ieee80211_link_use_channel(&sdata->deflink, &chanreq,
IEEE80211_CHANCTX_SHARED);
if (err)
return err;
@@ -2661,7 +2697,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
return -EINVAL;
if (params->basic_rates) {
- if (!ieee80211_parse_bitrates(link->conf->chandef.width,
+ if (!ieee80211_parse_bitrates(link->conf->chanreq.oper.width,
wiphy->bands[sband->band],
params->basic_rates,
params->basic_rates_len,
@@ -3083,7 +3119,7 @@ static int ieee80211_get_tx_power(struct wiphy *wiphy,
if (local->ops->get_txpower)
return drv_get_txpower(local, sdata, dbm);
- if (!local->use_chanctx)
+ if (local->emulate_chanctx)
*dbm = local->hw.conf.power_level;
else
*dbm = sdata->vif.bss_conf.txpower;
@@ -3153,8 +3189,7 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION))
return -EINVAL;
- if (ieee80211_vif_is_mld(&sdata->vif) &&
- !(sdata->vif.active_links & BIT(link->link_id)))
+ if (!ieee80211_vif_link_active(&sdata->vif, link->link_id))
return 0;
old_req = link->u.mgd.req_smps;
@@ -3176,7 +3211,7 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
* the new value until we associate.
*/
if (!sdata->u.mgd.associated ||
- link->conf->chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
+ link->conf->chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT)
return 0;
ap = sdata->vif.cfg.ap_addr;
@@ -3207,7 +3242,7 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
if (err)
link->u.mgd.req_smps = old_req;
else if (smps_mode != IEEE80211_SMPS_OFF && tdls_peer_found)
- ieee80211_teardown_tdls_peers(sdata);
+ ieee80211_teardown_tdls_peers(link);
return err;
}
@@ -3254,33 +3289,57 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
+static void ieee80211_set_cqm_rssi_link(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_link_data *link,
+ s32 rssi_thold, u32 rssi_hyst,
+ s32 rssi_low, s32 rssi_high)
+{
+ struct ieee80211_bss_conf *conf;
+
+ if (!link || !link->conf)
+ return;
+
+ conf = link->conf;
+
+ if (rssi_thold && rssi_hyst &&
+ rssi_thold == conf->cqm_rssi_thold &&
+ rssi_hyst == conf->cqm_rssi_hyst)
+ return;
+
+ conf->cqm_rssi_thold = rssi_thold;
+ conf->cqm_rssi_hyst = rssi_hyst;
+ conf->cqm_rssi_low = rssi_low;
+ conf->cqm_rssi_high = rssi_high;
+ link->u.mgd.last_cqm_event_signal = 0;
+
+ if (!ieee80211_vif_link_active(&sdata->vif, link->link_id))
+ return;
+
+ if (sdata->u.mgd.associated &&
+ (sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
+ ieee80211_link_info_change_notify(sdata, link, BSS_CHANGED_CQM);
+}
+
static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy,
struct net_device *dev,
s32 rssi_thold, u32 rssi_hyst)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_vif *vif = &sdata->vif;
- struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
-
- if (rssi_thold == bss_conf->cqm_rssi_thold &&
- rssi_hyst == bss_conf->cqm_rssi_hyst)
- return 0;
+ int link_id;
- if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER &&
- !(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
+ if (vif->driver_flags & IEEE80211_VIF_BEACON_FILTER &&
+ !(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
return -EOPNOTSUPP;
- bss_conf->cqm_rssi_thold = rssi_thold;
- bss_conf->cqm_rssi_hyst = rssi_hyst;
- bss_conf->cqm_rssi_low = 0;
- bss_conf->cqm_rssi_high = 0;
- sdata->deflink.u.mgd.last_cqm_event_signal = 0;
+ /* For MLD, handle CQM change on all the active links */
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+ struct ieee80211_link_data *link =
+ sdata_dereference(sdata->link[link_id], sdata);
- /* tell the driver upon association, unless already associated */
- if (sdata->u.mgd.associated &&
- sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)
- ieee80211_link_info_change_notify(sdata, &sdata->deflink,
- BSS_CHANGED_CQM);
+ ieee80211_set_cqm_rssi_link(sdata, link, rssi_thold, rssi_hyst,
+ 0, 0);
+ }
return 0;
}
@@ -3291,22 +3350,19 @@ static int ieee80211_set_cqm_rssi_range_config(struct wiphy *wiphy,
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_vif *vif = &sdata->vif;
- struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ int link_id;
- if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)
+ if (vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)
return -EOPNOTSUPP;
- bss_conf->cqm_rssi_low = rssi_low;
- bss_conf->cqm_rssi_high = rssi_high;
- bss_conf->cqm_rssi_thold = 0;
- bss_conf->cqm_rssi_hyst = 0;
- sdata->deflink.u.mgd.last_cqm_event_signal = 0;
+ /* For MLD, handle CQM change on all the active links */
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+ struct ieee80211_link_data *link =
+ sdata_dereference(sdata->link[link_id], sdata);
- /* tell the driver upon association, unless already associated */
- if (sdata->u.mgd.associated &&
- sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)
- ieee80211_link_info_change_notify(sdata, &sdata->deflink,
- BSS_CHANGED_CQM);
+ ieee80211_set_cqm_rssi_link(sdata, link, 0, 0,
+ rssi_low, rssi_high);
+ }
return 0;
}
@@ -3331,9 +3387,11 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
* so at a basic rate so that all clients can receive it.
*/
if (rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) &&
- sdata->vif.bss_conf.chandef.chan) {
+ sdata->vif.bss_conf.chanreq.oper.chan) {
u32 basic_rates = sdata->vif.bss_conf.basic_rates;
- enum nl80211_band band = sdata->vif.bss_conf.chandef.chan->band;
+ enum nl80211_band band;
+
+ band = sdata->vif.bss_conf.chanreq.oper.chan->band;
if (!(mask->control[band].legacy & basic_rates))
return -EINVAL;
@@ -3385,6 +3443,7 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy,
u32 cac_time_ms)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_chan_req chanreq = { .oper = *chandef };
struct ieee80211_local *local = sdata->local;
int err;
@@ -3399,7 +3458,7 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy,
sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
sdata->deflink.needed_rx_chains = local->rx_chains;
- err = ieee80211_link_use_channel(&sdata->deflink, chandef,
+ err = ieee80211_link_use_channel(&sdata->deflink, &chanreq,
IEEE80211_CHANCTX_SHARED);
if (err)
goto out_unlock;
@@ -3542,13 +3601,24 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
return new_beacon;
}
-void ieee80211_csa_finish(struct ieee80211_vif *vif)
+void ieee80211_csa_finish(struct ieee80211_vif *vif, unsigned int link_id)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_link_data *link_data;
+
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
+ return;
rcu_read_lock();
+ link_data = rcu_dereference(sdata->link[link_id]);
+ if (WARN_ON(!link_data)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ /* TODO: MBSSID with MLO changes */
if (vif->mbssid_tx_vif == vif) {
/* Trigger ieee80211_csa_finish() on the non-transmitting
* interfaces when channel switch is received on
@@ -3567,7 +3637,7 @@ void ieee80211_csa_finish(struct ieee80211_vif *vif)
&iter->deflink.csa_finalize_work);
}
}
- wiphy_work_queue(local->hw.wiphy, &sdata->deflink.csa_finalize_work);
+ wiphy_work_queue(local->hw.wiphy, &link_data->csa_finalize_work);
rcu_read_unlock();
}
@@ -3579,26 +3649,27 @@ void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif, bool block_t
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
- sdata->deflink.csa_block_tx = block_tx;
+ sdata->csa_blocked_tx = block_tx;
sdata_info(sdata, "channel switch failed, disconnecting\n");
wiphy_work_queue(local->hw.wiphy, &ifmgd->csa_connection_drop_work);
}
EXPORT_SYMBOL(ieee80211_channel_switch_disconnect);
-static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata,
+static int ieee80211_set_after_csa_beacon(struct ieee80211_link_data *link_data,
u64 *changed)
{
+ struct ieee80211_sub_if_data *sdata = link_data->sdata;
int err;
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
- if (!sdata->deflink.u.ap.next_beacon)
+ if (!link_data->u.ap.next_beacon)
return -EINVAL;
- err = ieee80211_assign_beacon(sdata, &sdata->deflink,
- sdata->deflink.u.ap.next_beacon,
+ err = ieee80211_assign_beacon(sdata, link_data,
+ link_data->u.ap.next_beacon,
NULL, NULL, changed);
- ieee80211_free_next_beacon(&sdata->deflink);
+ ieee80211_free_next_beacon(link_data);
if (err < 0)
return err;
@@ -3627,6 +3698,7 @@ static int __ieee80211_csa_finalize(struct ieee80211_link_data *link_data)
{
struct ieee80211_sub_if_data *sdata = link_data->sdata;
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_bss_conf *link_conf = link_data->conf;
u64 changed = 0;
int err;
@@ -3648,40 +3720,33 @@ static int __ieee80211_csa_finalize(struct ieee80211_link_data *link_data)
if (link_data->reserved_ready)
return 0;
- return ieee80211_link_use_reserved_context(&sdata->deflink);
+ return ieee80211_link_use_reserved_context(link_data);
}
- if (!cfg80211_chandef_identical(&link_data->conf->chandef,
- &link_data->csa_chandef))
+ if (!cfg80211_chandef_identical(&link_conf->chanreq.oper,
+ &link_data->csa_chanreq.oper))
return -EINVAL;
- sdata->vif.bss_conf.csa_active = false;
+ link_conf->csa_active = false;
- err = ieee80211_set_after_csa_beacon(sdata, &changed);
+ err = ieee80211_set_after_csa_beacon(link_data, &changed);
if (err)
return err;
- if (sdata->vif.bss_conf.eht_puncturing != sdata->vif.bss_conf.csa_punct_bitmap) {
- sdata->vif.bss_conf.eht_puncturing =
- sdata->vif.bss_conf.csa_punct_bitmap;
- changed |= BSS_CHANGED_EHT_PUNCTURING;
- }
-
ieee80211_link_info_change_notify(sdata, link_data, changed);
- if (link_data->csa_block_tx) {
+ if (sdata->csa_blocked_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- link_data->csa_block_tx = false;
+ sdata->csa_blocked_tx = false;
}
err = drv_post_channel_switch(link_data);
if (err)
return err;
- cfg80211_ch_switch_notify(sdata->dev, &link_data->csa_chandef,
- link_data->link_id,
- link_data->conf->eht_puncturing);
+ cfg80211_ch_switch_notify(sdata->dev, &link_data->csa_chanreq.oper,
+ link_data->link_id);
return 0;
}
@@ -3691,7 +3756,8 @@ static void ieee80211_csa_finalize(struct ieee80211_link_data *link_data)
struct ieee80211_sub_if_data *sdata = link_data->sdata;
if (__ieee80211_csa_finalize(link_data)) {
- sdata_info(sdata, "failed to finalize CSA, disconnecting\n");
+ sdata_info(sdata, "failed to finalize CSA on link %d, disconnecting\n",
+ link_data->link_id);
cfg80211_stop_iface(sdata->local->hw.wiphy, &sdata->wdev,
GFP_KERNEL);
}
@@ -3716,18 +3782,19 @@ void ieee80211_csa_finalize_work(struct wiphy *wiphy, struct wiphy_work *work)
ieee80211_csa_finalize(link);
}
-static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
+static int ieee80211_set_csa_beacon(struct ieee80211_link_data *link_data,
struct cfg80211_csa_settings *params,
u64 *changed)
{
+ struct ieee80211_sub_if_data *sdata = link_data->sdata;
struct ieee80211_csa_settings csa = {};
int err;
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
- sdata->deflink.u.ap.next_beacon =
+ link_data->u.ap.next_beacon =
cfg80211_beacon_dup(&params->beacon_after);
- if (!sdata->deflink.u.ap.next_beacon)
+ if (!link_data->u.ap.next_beacon)
return -ENOMEM;
/*
@@ -3753,7 +3820,7 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
IEEE80211_MAX_CNTDWN_COUNTERS_NUM) ||
(params->n_counter_offsets_presp >
IEEE80211_MAX_CNTDWN_COUNTERS_NUM)) {
- ieee80211_free_next_beacon(&sdata->deflink);
+ ieee80211_free_next_beacon(link_data);
return -EINVAL;
}
@@ -3763,11 +3830,11 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
csa.n_counter_offsets_presp = params->n_counter_offsets_presp;
csa.count = params->count;
- err = ieee80211_assign_beacon(sdata, &sdata->deflink,
+ err = ieee80211_assign_beacon(sdata, link_data,
&params->beacon_csa, &csa,
NULL, changed);
if (err < 0) {
- ieee80211_free_next_beacon(&sdata->deflink);
+ ieee80211_free_next_beacon(link_data);
return err;
}
@@ -3814,7 +3881,7 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
/* changes into another band are not supported */
- if (sdata->vif.bss_conf.chandef.chan->band !=
+ if (sdata->vif.bss_conf.chanreq.oper.chan->band !=
params->chandef.chan->band)
return -EINVAL;
@@ -3862,11 +3929,17 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_csa_settings *params)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_chan_req chanreq = { .oper = params->chandef };
struct ieee80211_local *local = sdata->local;
- struct ieee80211_channel_switch ch_switch;
+ struct ieee80211_channel_switch ch_switch = {
+ .link_id = params->link_id,
+ };
struct ieee80211_chanctx_conf *conf;
struct ieee80211_chanctx *chanctx;
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_link_data *link_data;
u64 changed = 0;
+ u8 link_id = params->link_id;
int err;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -3877,16 +3950,23 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
if (sdata->wdev.cac_started)
return -EBUSY;
- if (cfg80211_chandef_identical(&params->chandef,
- &sdata->vif.bss_conf.chandef))
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
+ return -EINVAL;
+
+ link_data = wiphy_dereference(wiphy, sdata->link[link_id]);
+ if (!link_data)
+ return -ENOLINK;
+
+ link_conf = link_data->conf;
+
+ if (chanreq.oper.punctured && !link_conf->eht_support)
return -EINVAL;
/* don't allow another channel switch if one is already active. */
- if (sdata->vif.bss_conf.csa_active)
+ if (link_conf->csa_active)
return -EBUSY;
- conf = rcu_dereference_protected(sdata->vif.bss_conf.chanctx_conf,
- lockdep_is_held(&local->hw.wiphy->mtx));
+ conf = wiphy_dereference(wiphy, link_conf->chanctx_conf);
if (!conf) {
err = -EBUSY;
goto out;
@@ -3903,14 +3983,14 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
ch_switch.timestamp = 0;
ch_switch.device_timestamp = 0;
ch_switch.block_tx = params->block_tx;
- ch_switch.chandef = params->chandef;
+ ch_switch.chandef = chanreq.oper;
ch_switch.count = params->count;
err = drv_pre_channel_switch(sdata, &ch_switch);
if (err)
goto out;
- err = ieee80211_link_reserve_chanctx(&sdata->deflink, &params->chandef,
+ err = ieee80211_link_reserve_chanctx(link_data, &chanreq,
chanctx->mode,
params->radar_required);
if (err)
@@ -3919,44 +3999,40 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
/* if reservation is invalid then this will fail */
err = ieee80211_check_combinations(sdata, NULL, chanctx->mode, 0);
if (err) {
- ieee80211_link_unreserve_chanctx(&sdata->deflink);
+ ieee80211_link_unreserve_chanctx(link_data);
goto out;
}
/* if there is a color change in progress, abort it */
- if (sdata->vif.bss_conf.color_change_active)
+ if (link_conf->color_change_active)
ieee80211_color_change_abort(sdata);
- err = ieee80211_set_csa_beacon(sdata, params, &changed);
+ err = ieee80211_set_csa_beacon(link_data, params, &changed);
if (err) {
- ieee80211_link_unreserve_chanctx(&sdata->deflink);
+ ieee80211_link_unreserve_chanctx(link_data);
goto out;
}
- if (params->punct_bitmap && !sdata->vif.bss_conf.eht_support)
- goto out;
+ link_data->csa_chanreq = chanreq;
+ link_conf->csa_active = true;
- sdata->deflink.csa_chandef = params->chandef;
- sdata->deflink.csa_block_tx = params->block_tx;
- sdata->vif.bss_conf.csa_active = true;
- sdata->vif.bss_conf.csa_punct_bitmap = params->punct_bitmap;
-
- if (sdata->deflink.csa_block_tx)
+ if (params->block_tx &&
+ !ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA)) {
ieee80211_stop_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
+ sdata->csa_blocked_tx = true;
+ }
cfg80211_ch_switch_started_notify(sdata->dev,
- &sdata->deflink.csa_chandef, 0,
- params->count, params->block_tx,
- sdata->vif.bss_conf.csa_punct_bitmap);
+ &link_data->csa_chanreq.oper, 0,
+ params->count, params->block_tx);
if (changed) {
- ieee80211_link_info_change_notify(sdata, &sdata->deflink,
- changed);
- drv_channel_switch_beacon(sdata, &params->chandef);
+ ieee80211_link_info_change_notify(sdata, link_data, changed);
+ drv_channel_switch_beacon(sdata, &link_data->csa_chanreq.oper);
} else {
/* if the beacon didn't change, we can finalize immediately */
- ieee80211_csa_finalize(&sdata->deflink);
+ ieee80211_csa_finalize(link_data);
}
out:
@@ -4206,15 +4282,12 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
chanctx_conf = rcu_dereference(link->conf->chanctx_conf);
if (chanctx_conf) {
- *chandef = link->conf->chandef;
+ *chandef = link->conf->chanreq.oper;
ret = 0;
} else if (local->open_count > 0 &&
local->open_count == local->monitors &&
sdata->vif.type == NL80211_IFTYPE_MONITOR) {
- if (local->use_chanctx)
- *chandef = local->monitor_chandef;
- else
- *chandef = local->_oper_chandef;
+ *chandef = local->monitor_chanreq.oper;
ret = 0;
}
out:
@@ -4262,12 +4335,13 @@ static int ieee80211_set_ap_chanwidth(struct wiphy *wiphy,
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_link_data *link;
+ struct ieee80211_chan_req chanreq = { .oper = *chandef };
int ret;
u64 changed = 0;
link = sdata_dereference(sdata->link[link_id], sdata);
- ret = ieee80211_link_change_bandwidth(link, chandef, &changed);
+ ret = ieee80211_link_change_chanreq(link, &chanreq, &changed);
if (ret == 0)
ieee80211_link_info_change_notify(sdata, link, changed);
@@ -4749,7 +4823,7 @@ EXPORT_SYMBOL_GPL(ieee80211_color_change_finish);
void
ieee80211_obss_color_collision_notify(struct ieee80211_vif *vif,
- u64 color_bitmap, gfp_t gfp)
+ u64 color_bitmap)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_link_data *link = &sdata->deflink;
@@ -4968,6 +5042,17 @@ static int ieee80211_set_hw_timestamp(struct wiphy *wiphy,
return local->ops->set_hw_timestamp(&local->hw, &sdata->vif, hwts);
}
+static int
+ieee80211_set_ttlm(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ttlm_params *params)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
+ return ieee80211_req_neg_ttlm(sdata, params);
+}
+
const struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@@ -5080,4 +5165,5 @@ const struct cfg80211_ops mac80211_config_ops = {
.mod_link_station = ieee80211_mod_link_station,
.del_link_station = ieee80211_del_link_station,
.set_hw_timestamp = ieee80211_set_hw_timestamp,
+ .set_ttlm = ieee80211_set_ttlm,
};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index ef4c2cebc080..80e4b9784131 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* mac80211 - channel management
- * Copyright 2020 - 2022 Intel Corporation
+ * Copyright 2020 - 2024 Intel Corporation
*/
#include <linux/nl80211.h>
@@ -81,87 +81,122 @@ ieee80211_link_get_chanctx(struct ieee80211_link_data *link)
return container_of(conf, struct ieee80211_chanctx, conf);
}
-static const struct cfg80211_chan_def *
-ieee80211_chanctx_reserved_chandef(struct ieee80211_local *local,
+bool ieee80211_chanreq_identical(const struct ieee80211_chan_req *a,
+ const struct ieee80211_chan_req *b)
+{
+ if (!cfg80211_chandef_identical(&a->oper, &b->oper))
+ return false;
+ if (!a->ap.chan && !b->ap.chan)
+ return true;
+ return cfg80211_chandef_identical(&a->ap, &b->ap);
+}
+
+static const struct ieee80211_chan_req *
+ieee80211_chanreq_compatible(const struct ieee80211_chan_req *a,
+ const struct ieee80211_chan_req *b,
+ struct ieee80211_chan_req *tmp)
+{
+ const struct cfg80211_chan_def *compat;
+
+ if (a->ap.chan && b->ap.chan &&
+ !cfg80211_chandef_identical(&a->ap, &b->ap))
+ return NULL;
+
+ compat = cfg80211_chandef_compatible(&a->oper, &b->oper);
+ if (!compat)
+ return NULL;
+
+ /* Note: later code assumes this always fills & returns tmp if compat */
+ tmp->oper = *compat;
+ tmp->ap = a->ap.chan ? a->ap : b->ap;
+ return tmp;
+}
+
+static const struct ieee80211_chan_req *
+ieee80211_chanctx_compatible(struct ieee80211_chanctx *ctx,
+ const struct ieee80211_chan_req *req,
+ struct ieee80211_chan_req *tmp)
+{
+ const struct ieee80211_chan_req *ret;
+ struct ieee80211_chan_req tmp2;
+
+ *tmp = (struct ieee80211_chan_req){
+ .oper = ctx->conf.def,
+ .ap = ctx->conf.ap,
+ };
+
+ ret = ieee80211_chanreq_compatible(tmp, req, &tmp2);
+ if (!ret)
+ return NULL;
+ *tmp = *ret;
+ return tmp;
+}
+
+static const struct ieee80211_chan_req *
+ieee80211_chanctx_reserved_chanreq(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
- const struct cfg80211_chan_def *compat)
+ const struct ieee80211_chan_req *req,
+ struct ieee80211_chan_req *tmp)
{
struct ieee80211_link_data *link;
lockdep_assert_wiphy(local->hw.wiphy);
- list_for_each_entry(link, &ctx->reserved_links,
- reserved_chanctx_list) {
- if (!compat)
- compat = &link->reserved_chandef;
+ if (WARN_ON(!req))
+ return NULL;
- compat = cfg80211_chandef_compatible(&link->reserved_chandef,
- compat);
- if (!compat)
+ list_for_each_entry(link, &ctx->reserved_links, reserved_chanctx_list) {
+ req = ieee80211_chanreq_compatible(&link->reserved, req, tmp);
+ if (!req)
break;
}
- return compat;
+ return req;
}
-static const struct cfg80211_chan_def *
+static const struct ieee80211_chan_req *
ieee80211_chanctx_non_reserved_chandef(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
- const struct cfg80211_chan_def *compat)
+ const struct ieee80211_chan_req *compat,
+ struct ieee80211_chan_req *tmp)
{
struct ieee80211_link_data *link;
+ const struct ieee80211_chan_req *comp_def = compat;
lockdep_assert_wiphy(local->hw.wiphy);
- list_for_each_entry(link, &ctx->assigned_links,
- assigned_chanctx_list) {
+ list_for_each_entry(link, &ctx->assigned_links, assigned_chanctx_list) {
struct ieee80211_bss_conf *link_conf = link->conf;
if (link->reserved_chanctx)
continue;
- if (!compat)
- compat = &link_conf->chandef;
-
- compat = cfg80211_chandef_compatible(
- &link_conf->chandef, compat);
- if (!compat)
+ comp_def = ieee80211_chanreq_compatible(&link_conf->chanreq,
+ comp_def, tmp);
+ if (!comp_def)
break;
}
- return compat;
-}
-
-static const struct cfg80211_chan_def *
-ieee80211_chanctx_combined_chandef(struct ieee80211_local *local,
- struct ieee80211_chanctx *ctx,
- const struct cfg80211_chan_def *compat)
-{
- lockdep_assert_wiphy(local->hw.wiphy);
-
- compat = ieee80211_chanctx_reserved_chandef(local, ctx, compat);
- if (!compat)
- return NULL;
-
- compat = ieee80211_chanctx_non_reserved_chandef(local, ctx, compat);
- if (!compat)
- return NULL;
-
- return compat;
+ return comp_def;
}
static bool
-ieee80211_chanctx_can_reserve_chandef(struct ieee80211_local *local,
- struct ieee80211_chanctx *ctx,
- const struct cfg80211_chan_def *def)
+ieee80211_chanctx_can_reserve(struct ieee80211_local *local,
+ struct ieee80211_chanctx *ctx,
+ const struct ieee80211_chan_req *req)
{
+ struct ieee80211_chan_req tmp;
+
lockdep_assert_wiphy(local->hw.wiphy);
- if (ieee80211_chanctx_combined_chandef(local, ctx, def))
- return true;
+ if (!ieee80211_chanctx_reserved_chanreq(local, ctx, req, &tmp))
+ return false;
+
+ if (!ieee80211_chanctx_non_reserved_chandef(local, ctx, req, &tmp))
+ return false;
if (!list_empty(&ctx->reserved_links) &&
- ieee80211_chanctx_reserved_chandef(local, ctx, def))
+ ieee80211_chanctx_reserved_chanreq(local, ctx, req, &tmp))
return true;
return false;
@@ -169,7 +204,7 @@ ieee80211_chanctx_can_reserve_chandef(struct ieee80211_local *local,
static struct ieee80211_chanctx *
ieee80211_find_reservation_chanctx(struct ieee80211_local *local,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *chanreq,
enum ieee80211_chanctx_mode mode)
{
struct ieee80211_chanctx *ctx;
@@ -186,8 +221,7 @@ ieee80211_find_reservation_chanctx(struct ieee80211_local *local,
if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE)
continue;
- if (!ieee80211_chanctx_can_reserve_chandef(local, ctx,
- chandef))
+ if (!ieee80211_chanctx_can_reserve(local, ctx, chanreq))
continue;
return ctx;
@@ -202,7 +236,7 @@ static enum nl80211_chan_width ieee80211_get_sta_bw(struct sta_info *sta,
enum ieee80211_sta_rx_bandwidth width;
struct link_sta_info *link_sta;
- link_sta = rcu_dereference(sta->link[link_id]);
+ link_sta = wiphy_dereference(sta->local->hw.wiphy, sta->link[link_id]);
/* no effect if this STA has no presence on this link */
if (!link_sta)
@@ -240,9 +274,10 @@ static enum nl80211_chan_width ieee80211_get_sta_bw(struct sta_info *sta,
}
static enum nl80211_chan_width
-ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata,
- unsigned int link_id)
+ieee80211_get_max_required_bw(struct ieee80211_link_data *link)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+ unsigned int link_id = link->link_id;
enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
struct sta_info *sta;
@@ -258,31 +293,25 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata,
}
static enum nl80211_chan_width
-ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_chanctx *ctx,
- struct ieee80211_link_data *rsvd_for)
+ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
+ struct ieee80211_chanctx *ctx,
+ struct ieee80211_link_data *rsvd_for)
{
+ struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_link_data *link;
enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
- struct ieee80211_vif *vif = &sdata->vif;
- int link_id;
- rcu_read_lock();
- for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
+ for_each_sdata_link(local, link) {
enum nl80211_chan_width width = NL80211_CHAN_WIDTH_20_NOHT;
- struct ieee80211_link_data *link =
- rcu_dereference(sdata->link[link_id]);
-
- if (!link)
- continue;
if (link != rsvd_for &&
rcu_access_pointer(link->conf->chanctx_conf) != &ctx->conf)
continue;
- switch (vif->type) {
+ switch (link->sdata->vif.type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
- width = ieee80211_get_max_required_bw(sdata, link_id);
+ width = ieee80211_get_max_required_bw(link);
break;
case NL80211_IFTYPE_STATION:
/*
@@ -290,8 +319,8 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
* point, so take the width from the chandef, but
* account also for TDLS peers
*/
- width = max(link->conf->chandef.width,
- ieee80211_get_max_required_bw(sdata, link_id));
+ width = max(link->conf->chanreq.oper.width,
+ ieee80211_get_max_required_bw(link));
break;
case NL80211_IFTYPE_P2P_DEVICE:
case NL80211_IFTYPE_NAN:
@@ -299,7 +328,7 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_OCB:
- width = link->conf->chandef.width;
+ width = link->conf->chanreq.oper.width;
break;
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_UNSPECIFIED:
@@ -312,40 +341,13 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
max_bw = max(max_bw, width);
}
- rcu_read_unlock();
-
- return max_bw;
-}
-
-static enum nl80211_chan_width
-ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
- struct ieee80211_chanctx *ctx,
- struct ieee80211_link_data *rsvd_for)
-{
- struct ieee80211_sub_if_data *sdata;
- enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
-
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- enum nl80211_chan_width width;
-
- if (!ieee80211_sdata_running(sdata))
- continue;
-
- width = ieee80211_get_chanctx_vif_max_required_bw(sdata, ctx,
- rsvd_for);
-
- max_bw = max(max_bw, width);
- }
/* use the configured bandwidth in case of monitor interface */
- sdata = rcu_dereference(local->monitor_sdata);
+ sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata);
if (sdata &&
rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) == &ctx->conf)
max_bw = max(max_bw, ctx->conf.def.width);
- rcu_read_unlock();
-
return max_bw;
}
@@ -382,7 +384,7 @@ _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
/* downgrade chandef up to max_bw */
min_def = ctx->conf.def;
while (min_def.width > max_bw)
- ieee80211_chandef_downgrade(&min_def);
+ ieee80211_chandef_downgrade(&min_def, NULL);
if (cfg80211_chandef_identical(&ctx->conf.min_def, &min_def))
return 0;
@@ -395,7 +397,7 @@ _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
}
/* calling this function is assuming that station vif is updated to
- * lates changes by calling ieee80211_link_update_chandef
+ * lates changes by calling ieee80211_link_update_chanreq
*/
static void ieee80211_chan_bw_change(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
@@ -475,10 +477,15 @@ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
static void _ieee80211_change_chanctx(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
struct ieee80211_chanctx *old_ctx,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *chanreq,
struct ieee80211_link_data *rsvd_for)
{
- u32 changed;
+ const struct cfg80211_chan_def *chandef = &chanreq->oper;
+ struct ieee80211_chan_req ctx_req = {
+ .oper = ctx->conf.def,
+ .ap = ctx->conf.ap,
+ };
+ u32 changed = 0;
/* expected to handle only 20/40/80/160/320 channel widths */
switch (chandef->width) {
@@ -500,47 +507,52 @@ static void _ieee80211_change_chanctx(struct ieee80211_local *local,
*/
ieee80211_chan_bw_change(local, old_ctx, true);
- if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) {
+ if (ieee80211_chanreq_identical(&ctx_req, chanreq)) {
ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for);
return;
}
- WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef));
+ WARN_ON(ieee80211_chanctx_refcount(local, ctx) > 1 &&
+ !cfg80211_chandef_compatible(&ctx->conf.def, &chanreq->oper));
ieee80211_remove_wbrf(local, &ctx->conf.def);
+ if (!cfg80211_chandef_identical(&ctx->conf.def, &chanreq->oper)) {
+ if (ctx->conf.def.width != chanreq->oper.width)
+ changed |= IEEE80211_CHANCTX_CHANGE_WIDTH;
+ if (ctx->conf.def.punctured != chanreq->oper.punctured)
+ changed |= IEEE80211_CHANCTX_CHANGE_PUNCTURING;
+ }
+ if (!cfg80211_chandef_identical(&ctx->conf.ap, &chanreq->ap))
+ changed |= IEEE80211_CHANCTX_CHANGE_AP;
ctx->conf.def = *chandef;
+ ctx->conf.ap = chanreq->ap;
/* check if min chanctx also changed */
- changed = IEEE80211_CHANCTX_CHANGE_WIDTH |
- _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for);
+ changed |= _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for);
ieee80211_add_wbrf(local, &ctx->conf.def);
drv_change_chanctx(local, ctx, changed);
- if (!local->use_chanctx) {
- local->_oper_chandef = *chandef;
- ieee80211_hw_config(local, 0);
- }
-
- /* check is BW wider */
+ /* check if BW is wider */
ieee80211_chan_bw_change(local, old_ctx, false);
}
static void ieee80211_change_chanctx(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
struct ieee80211_chanctx *old_ctx,
- const struct cfg80211_chan_def *chandef)
+ const struct ieee80211_chan_req *chanreq)
{
- _ieee80211_change_chanctx(local, ctx, old_ctx, chandef, NULL);
+ _ieee80211_change_chanctx(local, ctx, old_ctx, chanreq, NULL);
}
static struct ieee80211_chanctx *
ieee80211_find_chanctx(struct ieee80211_local *local,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *chanreq,
enum ieee80211_chanctx_mode mode)
{
+ struct ieee80211_chan_req tmp;
struct ieee80211_chanctx *ctx;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -549,7 +561,7 @@ ieee80211_find_chanctx(struct ieee80211_local *local,
return NULL;
list_for_each_entry(ctx, &local->chanctx_list, list) {
- const struct cfg80211_chan_def *compat;
+ const struct ieee80211_chan_req *compat;
if (ctx->replace_state != IEEE80211_CHANCTX_REPLACE_NONE)
continue;
@@ -557,12 +569,12 @@ ieee80211_find_chanctx(struct ieee80211_local *local,
if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE)
continue;
- compat = cfg80211_chandef_compatible(&ctx->conf.def, chandef);
+ compat = ieee80211_chanctx_compatible(ctx, chanreq, &tmp);
if (!compat)
continue;
- compat = ieee80211_chanctx_reserved_chandef(local, ctx,
- compat);
+ compat = ieee80211_chanctx_reserved_chanreq(local, ctx,
+ compat, &tmp);
if (!compat)
continue;
@@ -576,26 +588,14 @@ ieee80211_find_chanctx(struct ieee80211_local *local,
bool ieee80211_is_radar_required(struct ieee80211_local *local)
{
- struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_link_data *link;
lockdep_assert_wiphy(local->hw.wiphy);
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- unsigned int link_id;
-
- for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
- struct ieee80211_link_data *link;
-
- link = rcu_dereference(sdata->link[link_id]);
-
- if (link && link->radar_required) {
- rcu_read_unlock();
- return true;
- }
- }
+ for_each_sdata_link(local, link) {
+ if (link->radar_required)
+ return true;
}
- rcu_read_unlock();
return false;
}
@@ -605,43 +605,24 @@ ieee80211_chanctx_radar_required(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx)
{
struct ieee80211_chanctx_conf *conf = &ctx->conf;
- struct ieee80211_sub_if_data *sdata;
- bool required = false;
+ struct ieee80211_link_data *link;
lockdep_assert_wiphy(local->hw.wiphy);
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- unsigned int link_id;
-
- if (!ieee80211_sdata_running(sdata))
+ for_each_sdata_link(local, link) {
+ if (rcu_access_pointer(link->conf->chanctx_conf) != conf)
continue;
- for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
- struct ieee80211_link_data *link;
-
- link = rcu_dereference(sdata->link[link_id]);
- if (!link)
- continue;
-
- if (rcu_access_pointer(link->conf->chanctx_conf) != conf)
- continue;
- if (!link->radar_required)
- continue;
- required = true;
- break;
- }
-
- if (required)
- break;
+ if (!link->radar_required)
+ continue;
+ return true;
}
- rcu_read_unlock();
- return required;
+ return false;
}
static struct ieee80211_chanctx *
ieee80211_alloc_chanctx(struct ieee80211_local *local,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *chanreq,
enum ieee80211_chanctx_mode mode)
{
struct ieee80211_chanctx *ctx;
@@ -654,7 +635,8 @@ ieee80211_alloc_chanctx(struct ieee80211_local *local,
INIT_LIST_HEAD(&ctx->assigned_links);
INIT_LIST_HEAD(&ctx->reserved_links);
- ctx->conf.def = *chandef;
+ ctx->conf.def = chanreq->oper;
+ ctx->conf.ap = chanreq->ap;
ctx->conf.rx_chains_static = 1;
ctx->conf.rx_chains_dynamic = 1;
ctx->mode = mode;
@@ -674,23 +656,15 @@ static int ieee80211_add_chanctx(struct ieee80211_local *local,
ieee80211_add_wbrf(local, &ctx->conf.def);
- if (!local->use_chanctx)
- local->hw.conf.radar_enabled = ctx->conf.radar_enabled;
-
/* turn idle off *before* setting channel -- some drivers need that */
changed = ieee80211_idle_off(local);
if (changed)
ieee80211_hw_config(local, changed);
- if (!local->use_chanctx) {
- local->_oper_chandef = ctx->conf.def;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- } else {
- err = drv_add_chanctx(local, ctx);
- if (err) {
- ieee80211_recalc_idle(local);
- return err;
- }
+ err = drv_add_chanctx(local, ctx);
+ if (err) {
+ ieee80211_recalc_idle(local);
+ return err;
}
return 0;
@@ -698,7 +672,7 @@ static int ieee80211_add_chanctx(struct ieee80211_local *local,
static struct ieee80211_chanctx *
ieee80211_new_chanctx(struct ieee80211_local *local,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *chanreq,
enum ieee80211_chanctx_mode mode)
{
struct ieee80211_chanctx *ctx;
@@ -706,7 +680,7 @@ ieee80211_new_chanctx(struct ieee80211_local *local,
lockdep_assert_wiphy(local->hw.wiphy);
- ctx = ieee80211_alloc_chanctx(local, chandef, mode);
+ ctx = ieee80211_alloc_chanctx(local, chanreq, mode);
if (!ctx)
return ERR_PTR(-ENOMEM);
@@ -725,32 +699,7 @@ static void ieee80211_del_chanctx(struct ieee80211_local *local,
{
lockdep_assert_wiphy(local->hw.wiphy);
- if (!local->use_chanctx) {
- struct cfg80211_chan_def *chandef = &local->_oper_chandef;
- /* S1G doesn't have 20MHz, so get the correct width for the
- * current channel.
- */
- if (chandef->chan->band == NL80211_BAND_S1GHZ)
- chandef->width =
- ieee80211_s1g_channel_width(chandef->chan);
- else
- chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
- chandef->center_freq1 = chandef->chan->center_freq;
- chandef->freq1_offset = chandef->chan->freq_offset;
- chandef->center_freq2 = 0;
-
- /* NOTE: Disabling radar is only valid here for
- * single channel context. To be sure, check it ...
- */
- WARN_ON(local->hw.conf.radar_enabled &&
- !list_empty(&local->chanctx_list));
-
- local->hw.conf.radar_enabled = false;
-
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- } else {
- drv_remove_chanctx(local, ctx);
- }
+ drv_remove_chanctx(local, ctx);
ieee80211_recalc_idle(local);
@@ -773,64 +722,53 @@ void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx)
{
struct ieee80211_chanctx_conf *conf = &ctx->conf;
- struct ieee80211_sub_if_data *sdata;
- const struct cfg80211_chan_def *compat = NULL;
+ const struct ieee80211_chan_req *compat = NULL;
+ struct ieee80211_link_data *link;
+ struct ieee80211_chan_req tmp;
struct sta_info *sta;
lockdep_assert_wiphy(local->hw.wiphy);
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- int link_id;
+ for_each_sdata_link(local, link) {
+ struct ieee80211_bss_conf *link_conf;
- if (!ieee80211_sdata_running(sdata))
+ if (link->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
continue;
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- continue;
+ link_conf = link->conf;
- for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
- struct ieee80211_bss_conf *link_conf =
- rcu_dereference(sdata->vif.link_conf[link_id]);
-
- if (!link_conf)
- continue;
-
- if (rcu_access_pointer(link_conf->chanctx_conf) != conf)
- continue;
+ if (rcu_access_pointer(link_conf->chanctx_conf) != conf)
+ continue;
- if (!compat)
- compat = &link_conf->chandef;
+ if (!compat)
+ compat = &link_conf->chanreq;
- compat = cfg80211_chandef_compatible(&link_conf->chandef,
- compat);
- if (WARN_ON_ONCE(!compat))
- break;
- }
+ compat = ieee80211_chanreq_compatible(&link_conf->chanreq,
+ compat, &tmp);
+ if (WARN_ON_ONCE(!compat))
+ return;
}
- if (WARN_ON_ONCE(!compat)) {
- rcu_read_unlock();
+ if (WARN_ON_ONCE(!compat))
return;
- }
/* TDLS peers can sometimes affect the chandef width */
- list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ list_for_each_entry(sta, &local->sta_list, list) {
+ struct ieee80211_chan_req tdls_chanreq = {};
if (!sta->uploaded ||
!test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) ||
!test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
!sta->tdls_chandef.chan)
continue;
- compat = cfg80211_chandef_compatible(&sta->tdls_chandef,
- compat);
+ tdls_chanreq.oper = sta->tdls_chandef;
+
+ /* note this always fills and returns &tmp if compat */
+ compat = ieee80211_chanreq_compatible(&tdls_chanreq,
+ compat, &tmp);
if (WARN_ON_ONCE(!compat))
- break;
+ return;
}
- rcu_read_unlock();
-
- if (!compat)
- return;
ieee80211_change_chanctx(local, ctx, ctx, compat);
}
@@ -849,11 +787,6 @@ static void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
chanctx->conf.radar_enabled = radar_enabled;
- if (!local->use_chanctx) {
- local->hw.conf.radar_enabled = chanctx->conf.radar_enabled;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- }
-
drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RADAR);
}
@@ -924,23 +857,19 @@ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
{
struct ieee80211_sub_if_data *sdata;
u8 rx_chains_static, rx_chains_dynamic;
+ struct ieee80211_link_data *link;
lockdep_assert_wiphy(local->hw.wiphy);
rx_chains_static = 1;
rx_chains_dynamic = 1;
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ for_each_sdata_link(local, link) {
u8 needed_static, needed_dynamic;
- unsigned int link_id;
- if (!ieee80211_sdata_running(sdata))
- continue;
-
- switch (sdata->vif.type) {
+ switch (link->sdata->vif.type) {
case NL80211_IFTYPE_STATION:
- if (!sdata->u.mgd.associated)
+ if (!link->sdata->u.mgd.associated)
continue;
break;
case NL80211_IFTYPE_AP:
@@ -952,59 +881,38 @@ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
continue;
}
- for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
- struct ieee80211_link_data *link;
-
- link = rcu_dereference(sdata->link[link_id]);
-
- if (!link)
- continue;
-
- if (rcu_access_pointer(link->conf->chanctx_conf) != &chanctx->conf)
- continue;
-
- switch (link->smps_mode) {
- default:
- WARN_ONCE(1, "Invalid SMPS mode %d\n",
- link->smps_mode);
- fallthrough;
- case IEEE80211_SMPS_OFF:
- needed_static = link->needed_rx_chains;
- needed_dynamic = link->needed_rx_chains;
- break;
- case IEEE80211_SMPS_DYNAMIC:
- needed_static = 1;
- needed_dynamic = link->needed_rx_chains;
- break;
- case IEEE80211_SMPS_STATIC:
- needed_static = 1;
- needed_dynamic = 1;
- break;
- }
+ if (rcu_access_pointer(link->conf->chanctx_conf) != &chanctx->conf)
+ continue;
- rx_chains_static = max(rx_chains_static, needed_static);
- rx_chains_dynamic = max(rx_chains_dynamic, needed_dynamic);
+ switch (link->smps_mode) {
+ default:
+ WARN_ONCE(1, "Invalid SMPS mode %d\n",
+ link->smps_mode);
+ fallthrough;
+ case IEEE80211_SMPS_OFF:
+ needed_static = link->needed_rx_chains;
+ needed_dynamic = link->needed_rx_chains;
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ needed_static = 1;
+ needed_dynamic = link->needed_rx_chains;
+ break;
+ case IEEE80211_SMPS_STATIC:
+ needed_static = 1;
+ needed_dynamic = 1;
+ break;
}
+
+ rx_chains_static = max(rx_chains_static, needed_static);
+ rx_chains_dynamic = max(rx_chains_dynamic, needed_dynamic);
}
/* Disable SMPS for the monitor interface */
- sdata = rcu_dereference(local->monitor_sdata);
+ sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata);
if (sdata &&
rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) == &chanctx->conf)
rx_chains_dynamic = rx_chains_static = local->rx_chains;
- rcu_read_unlock();
-
- if (!local->use_chanctx) {
- if (rx_chains_static > 1)
- local->smps_mode = IEEE80211_SMPS_OFF;
- else if (rx_chains_dynamic > 1)
- local->smps_mode = IEEE80211_SMPS_DYNAMIC;
- else
- local->smps_mode = IEEE80211_SMPS_STATIC;
- ieee80211_hw_config(local, 0);
- }
-
if (rx_chains_static == chanctx->conf.rx_chains_static &&
rx_chains_dynamic == chanctx->conf.rx_chains_dynamic)
return;
@@ -1043,17 +951,16 @@ __ieee80211_link_copy_chanctx_to_vlans(struct ieee80211_link_data *link,
if (clear)
conf = NULL;
- rcu_read_lock();
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) {
struct ieee80211_bss_conf *vlan_conf;
- vlan_conf = rcu_dereference(vlan->vif.link_conf[link_id]);
+ vlan_conf = wiphy_dereference(local->hw.wiphy,
+ vlan->vif.link_conf[link_id]);
if (WARN_ON(!vlan_conf))
continue;
rcu_assign_pointer(vlan_conf->chanctx_conf, conf);
}
- rcu_read_unlock();
}
void ieee80211_link_copy_chanctx_to_vlans(struct ieee80211_link_data *link,
@@ -1103,7 +1010,7 @@ int ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link)
}
int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *chanreq,
enum ieee80211_chanctx_mode mode,
bool radar_required)
{
@@ -1114,13 +1021,13 @@ int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link,
lockdep_assert_wiphy(local->hw.wiphy);
curr_ctx = ieee80211_link_get_chanctx(link);
- if (curr_ctx && local->use_chanctx && !local->ops->switch_vif_chanctx)
+ if (curr_ctx && !local->ops->switch_vif_chanctx)
return -EOPNOTSUPP;
- new_ctx = ieee80211_find_reservation_chanctx(local, chandef, mode);
+ new_ctx = ieee80211_find_reservation_chanctx(local, chanreq, mode);
if (!new_ctx) {
if (ieee80211_can_create_new_chanctx(local)) {
- new_ctx = ieee80211_new_chanctx(local, chandef, mode);
+ new_ctx = ieee80211_new_chanctx(local, chanreq, mode);
if (IS_ERR(new_ctx))
return PTR_ERR(new_ctx);
} else {
@@ -1174,7 +1081,7 @@ int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link,
!list_empty(&curr_ctx->reserved_links))
return -EBUSY;
- new_ctx = ieee80211_alloc_chanctx(local, chandef, mode);
+ new_ctx = ieee80211_alloc_chanctx(local, chanreq, mode);
if (!new_ctx)
return -ENOMEM;
@@ -1192,7 +1099,7 @@ int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link,
list_add(&link->reserved_chanctx_list, &new_ctx->reserved_links);
link->reserved_chanctx = new_ctx;
- link->reserved_chandef = *chandef;
+ link->reserved = *chanreq;
link->reserved_radar_required = radar_required;
link->reserved_ready = false;
@@ -1231,29 +1138,28 @@ ieee80211_link_chanctx_reservation_complete(struct ieee80211_link_data *link)
}
static void
-ieee80211_link_update_chandef(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef)
+ieee80211_link_update_chanreq(struct ieee80211_link_data *link,
+ const struct ieee80211_chan_req *chanreq)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
unsigned int link_id = link->link_id;
struct ieee80211_sub_if_data *vlan;
- link->conf->chandef = *chandef;
+ link->conf->chanreq = *chanreq;
if (sdata->vif.type != NL80211_IFTYPE_AP)
return;
- rcu_read_lock();
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) {
struct ieee80211_bss_conf *vlan_conf;
- vlan_conf = rcu_dereference(vlan->vif.link_conf[link_id]);
+ vlan_conf = wiphy_dereference(sdata->local->hw.wiphy,
+ vlan->vif.link_conf[link_id]);
if (WARN_ON(!vlan_conf))
continue;
- vlan_conf->chandef = *chandef;
+ vlan_conf->chanreq = *chanreq;
}
- rcu_read_unlock();
}
static int
@@ -1264,7 +1170,8 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link)
struct ieee80211_local *local = sdata->local;
struct ieee80211_vif_chanctx_switch vif_chsw[1] = {};
struct ieee80211_chanctx *old_ctx, *new_ctx;
- const struct cfg80211_chan_def *chandef;
+ const struct ieee80211_chan_req *chanreq;
+ struct ieee80211_chan_req tmp;
u64 changed = 0;
int err;
@@ -1286,17 +1193,18 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link)
IEEE80211_CHANCTX_REPLACES_OTHER))
return -EINVAL;
- chandef = ieee80211_chanctx_non_reserved_chandef(local, new_ctx,
- &link->reserved_chandef);
- if (WARN_ON(!chandef))
+ chanreq = ieee80211_chanctx_non_reserved_chandef(local, new_ctx,
+ &link->reserved,
+ &tmp);
+ if (WARN_ON(!chanreq))
return -EINVAL;
- if (link_conf->chandef.width != link->reserved_chandef.width)
+ if (link_conf->chanreq.oper.width != link->reserved.oper.width)
changed = BSS_CHANGED_BANDWIDTH;
- ieee80211_link_update_chandef(link, &link->reserved_chandef);
+ ieee80211_link_update_chanreq(link, &link->reserved);
- _ieee80211_change_chanctx(local, new_ctx, old_ctx, chandef, link);
+ _ieee80211_change_chanctx(local, new_ctx, old_ctx, chanreq, link);
vif_chsw[0].vif = &sdata->vif;
vif_chsw[0].old_ctx = &old_ctx->conf;
@@ -1344,7 +1252,8 @@ ieee80211_link_use_reserved_assign(struct ieee80211_link_data *link)
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx *old_ctx, *new_ctx;
- const struct cfg80211_chan_def *chandef;
+ const struct ieee80211_chan_req *chanreq;
+ struct ieee80211_chan_req tmp;
int err;
old_ctx = ieee80211_link_get_chanctx(link);
@@ -1363,12 +1272,13 @@ ieee80211_link_use_reserved_assign(struct ieee80211_link_data *link)
IEEE80211_CHANCTX_REPLACES_OTHER))
return -EINVAL;
- chandef = ieee80211_chanctx_non_reserved_chandef(local, new_ctx,
- &link->reserved_chandef);
- if (WARN_ON(!chandef))
+ chanreq = ieee80211_chanctx_non_reserved_chandef(local, new_ctx,
+ &link->reserved,
+ &tmp);
+ if (WARN_ON(!chanreq))
return -EINVAL;
- ieee80211_change_chanctx(local, new_ctx, new_ctx, chandef);
+ ieee80211_change_chanctx(local, new_ctx, new_ctx, chanreq);
list_del(&link->reserved_chanctx_list);
link->reserved_chanctx = NULL;
@@ -1412,24 +1322,6 @@ ieee80211_link_has_in_place_reservation(struct ieee80211_link_data *link)
return true;
}
-static int ieee80211_chsw_switch_hwconf(struct ieee80211_local *local,
- struct ieee80211_chanctx *new_ctx)
-{
- const struct cfg80211_chan_def *chandef;
-
- lockdep_assert_wiphy(local->hw.wiphy);
-
- chandef = ieee80211_chanctx_reserved_chandef(local, new_ctx, NULL);
- if (WARN_ON(!chandef))
- return -EINVAL;
-
- local->hw.conf.radar_enabled = new_ctx->conf.radar_enabled;
- local->_oper_chandef = *chandef;
- ieee80211_hw_config(local, 0);
-
- return 0;
-}
-
static int ieee80211_chsw_switch_vifs(struct ieee80211_local *local,
int n_vifs)
{
@@ -1518,7 +1410,6 @@ err:
static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
{
struct ieee80211_chanctx *ctx, *ctx_tmp, *old_ctx;
- struct ieee80211_chanctx *new_ctx = NULL;
int err, n_assigned, n_reserved, n_ready;
int n_ctx = 0, n_vifs_switch = 0, n_vifs_assign = 0, n_vifs_ctxless = 0;
@@ -1551,9 +1442,6 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
goto err;
}
- if (!local->use_chanctx)
- new_ctx = ctx;
-
n_ctx++;
n_assigned = 0;
@@ -1607,9 +1495,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
if (WARN_ON(n_ctx == 0) ||
WARN_ON(n_vifs_switch == 0 &&
n_vifs_assign == 0 &&
- n_vifs_ctxless == 0) ||
- WARN_ON(n_ctx > 1 && !local->use_chanctx) ||
- WARN_ON(!new_ctx && !local->use_chanctx)) {
+ n_vifs_ctxless == 0)) {
err = -EINVAL;
goto err;
}
@@ -1619,20 +1505,14 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
* reservations and driver capabilities.
*/
- if (local->use_chanctx) {
- if (n_vifs_switch > 0) {
- err = ieee80211_chsw_switch_vifs(local, n_vifs_switch);
- if (err)
- goto err;
- }
+ if (n_vifs_switch > 0) {
+ err = ieee80211_chsw_switch_vifs(local, n_vifs_switch);
+ if (err)
+ goto err;
+ }
- if (n_vifs_assign > 0 || n_vifs_ctxless > 0) {
- err = ieee80211_chsw_switch_ctxs(local);
- if (err)
- goto err;
- }
- } else {
- err = ieee80211_chsw_switch_hwconf(local, new_ctx);
+ if (n_vifs_assign > 0 || n_vifs_ctxless > 0) {
+ err = ieee80211_chsw_switch_ctxs(local);
if (err)
goto err;
}
@@ -1672,10 +1552,10 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
link->radar_required = link->reserved_radar_required;
- if (link_conf->chandef.width != link->reserved_chandef.width)
+ if (link_conf->chanreq.oper.width != link->reserved.oper.width)
changed = BSS_CHANGED_BANDWIDTH;
- ieee80211_link_update_chandef(link, &link->reserved_chandef);
+ ieee80211_link_update_chanreq(link, &link->reserved);
if (changed)
ieee80211_link_info_change_notify(sdata,
link,
@@ -1810,7 +1690,7 @@ static void __ieee80211_link_release_channel(struct ieee80211_link_data *link)
}
int ieee80211_link_use_channel(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *chanreq,
enum ieee80211_chanctx_mode mode)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
@@ -1821,38 +1701,37 @@ int ieee80211_link_use_channel(struct ieee80211_link_data *link,
lockdep_assert_wiphy(local->hw.wiphy);
- if (sdata->vif.active_links &&
- !(sdata->vif.active_links & BIT(link->link_id))) {
- ieee80211_link_update_chandef(link, chandef);
+ if (!ieee80211_vif_link_active(&sdata->vif, link->link_id)) {
+ ieee80211_link_update_chanreq(link, chanreq);
return 0;
}
ret = cfg80211_chandef_dfs_required(local->hw.wiphy,
- chandef,
+ &chanreq->oper,
sdata->wdev.iftype);
if (ret < 0)
goto out;
if (ret > 0)
- radar_detect_width = BIT(chandef->width);
+ radar_detect_width = BIT(chanreq->oper.width);
link->radar_required = ret;
- ret = ieee80211_check_combinations(sdata, chandef, mode,
+ ret = ieee80211_check_combinations(sdata, &chanreq->oper, mode,
radar_detect_width);
if (ret < 0)
goto out;
__ieee80211_link_release_channel(link);
- ctx = ieee80211_find_chanctx(local, chandef, mode);
+ ctx = ieee80211_find_chanctx(local, chanreq, mode);
if (!ctx)
- ctx = ieee80211_new_chanctx(local, chandef, mode);
+ ctx = ieee80211_new_chanctx(local, chanreq, mode);
if (IS_ERR(ctx)) {
ret = PTR_ERR(ctx);
goto out;
}
- ieee80211_link_update_chandef(link, chandef);
+ ieee80211_link_update_chanreq(link, chanreq);
ret = ieee80211_assign_link_chanctx(link, ctx);
if (ret) {
@@ -1932,28 +1811,79 @@ int ieee80211_link_use_reserved_context(struct ieee80211_link_data *link)
return 0;
}
-int ieee80211_link_change_bandwidth(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef,
- u64 *changed)
+/*
+ * This is similar to ieee80211_chanctx_compatible(), but rechecks
+ * against all the links actually using it (except the one that's
+ * passed, since that one is changing).
+ * This is done in order to allow changes to the AP's bandwidth for
+ * wider bandwidth OFDMA purposes, which wouldn't be treated as
+ * compatible by ieee80211_chanctx_recheck() but is OK if the link
+ * requesting the update is the only one using it.
+ */
+static const struct ieee80211_chan_req *
+ieee80211_chanctx_recheck(struct ieee80211_local *local,
+ struct ieee80211_link_data *skip_link,
+ struct ieee80211_chanctx *ctx,
+ const struct ieee80211_chan_req *req,
+ struct ieee80211_chan_req *tmp)
+{
+ const struct ieee80211_chan_req *ret = req;
+ struct ieee80211_link_data *link;
+
+ lockdep_assert_wiphy(local->hw.wiphy);
+
+ for_each_sdata_link(local, link) {
+ if (link == skip_link)
+ continue;
+
+ if (rcu_access_pointer(link->conf->chanctx_conf) == &ctx->conf) {
+ ret = ieee80211_chanreq_compatible(ret,
+ &link->conf->chanreq,
+ tmp);
+ if (!ret)
+ return NULL;
+ }
+
+ if (link->reserved_chanctx == ctx) {
+ ret = ieee80211_chanreq_compatible(ret,
+ &link->reserved,
+ tmp);
+ if (!ret)
+ return NULL;
+ }
+ }
+
+ *tmp = *ret;
+ return tmp;
+}
+
+int ieee80211_link_change_chanreq(struct ieee80211_link_data *link,
+ const struct ieee80211_chan_req *chanreq,
+ u64 *changed)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_bss_conf *link_conf = link->conf;
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx_conf *conf;
struct ieee80211_chanctx *ctx;
- const struct cfg80211_chan_def *compat;
+ const struct ieee80211_chan_req *compat;
+ struct ieee80211_chan_req tmp;
lockdep_assert_wiphy(local->hw.wiphy);
- if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
+ if (!cfg80211_chandef_usable(sdata->local->hw.wiphy,
+ &chanreq->oper,
IEEE80211_CHAN_DISABLED))
return -EINVAL;
- if (cfg80211_chandef_identical(chandef, &link_conf->chandef))
+ /* for non-HT 20 MHz the rest doesn't matter */
+ if (chanreq->oper.width == NL80211_CHAN_WIDTH_20_NOHT &&
+ cfg80211_chandef_identical(&chanreq->oper, &link_conf->chanreq.oper))
return 0;
- if (chandef->width == NL80211_CHAN_WIDTH_20_NOHT ||
- link_conf->chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
+ /* but you cannot switch to/from it */
+ if (chanreq->oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ link_conf->chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT)
return -EINVAL;
conf = rcu_dereference_protected(link_conf->chanctx_conf,
@@ -1963,13 +1893,14 @@ int ieee80211_link_change_bandwidth(struct ieee80211_link_data *link,
ctx = container_of(conf, struct ieee80211_chanctx, conf);
- compat = cfg80211_chandef_compatible(&conf->def, chandef);
+ compat = ieee80211_chanctx_recheck(local, link, ctx, chanreq, &tmp);
if (!compat)
return -EINVAL;
switch (ctx->replace_state) {
case IEEE80211_CHANCTX_REPLACE_NONE:
- if (!ieee80211_chanctx_reserved_chandef(local, ctx, compat))
+ if (!ieee80211_chanctx_reserved_chanreq(local, ctx, compat,
+ &tmp))
return -EBUSY;
break;
case IEEE80211_CHANCTX_WILL_BE_REPLACED:
@@ -1984,7 +1915,7 @@ int ieee80211_link_change_bandwidth(struct ieee80211_link_data *link,
break;
}
- ieee80211_link_update_chandef(link, chandef);
+ ieee80211_link_update_chanreq(link, chanreq);
ieee80211_recalc_chanctx_chantype(local, ctx);
@@ -2019,12 +1950,11 @@ void ieee80211_link_vlan_copy_chanctx(struct ieee80211_link_data *link)
ap = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap);
- rcu_read_lock();
- ap_conf = rcu_dereference(ap->vif.link_conf[link_id]);
- conf = rcu_dereference_protected(ap_conf->chanctx_conf,
- lockdep_is_held(&local->hw.wiphy->mtx));
+ ap_conf = wiphy_dereference(local->hw.wiphy,
+ ap->vif.link_conf[link_id]);
+ conf = wiphy_dereference(local->hw.wiphy,
+ ap_conf->chanctx_conf);
rcu_assign_pointer(link_conf->chanctx_conf, conf);
- rcu_read_unlock();
}
void ieee80211_iter_chan_contexts_atomic(
diff --git a/net/mac80211/debug.h b/net/mac80211/debug.h
index d49894df2351..49da401c5340 100644
--- a/net/mac80211/debug.h
+++ b/net/mac80211/debug.h
@@ -152,16 +152,17 @@ do { \
else \
_sdata_err((link)->sdata, fmt, ##__VA_ARGS__); \
} while (0)
-#define link_dbg(link, fmt, ...) \
+#define _link_id_dbg(print, sdata, link_id, fmt, ...) \
do { \
- if (ieee80211_vif_is_mld(&(link)->sdata->vif)) \
- _sdata_dbg(1, (link)->sdata, "[link %d] " fmt, \
- (link)->link_id, \
- ##__VA_ARGS__); \
+ if (ieee80211_vif_is_mld(&(sdata)->vif)) \
+ _sdata_dbg(print, sdata, "[link %d] " fmt, \
+ link_id, ##__VA_ARGS__); \
else \
- _sdata_dbg(1, (link)->sdata, fmt, \
- ##__VA_ARGS__); \
+ _sdata_dbg(1, sdata, fmt, ##__VA_ARGS__); \
} while (0)
+#define link_dbg(link, fmt, ...) \
+ _link_id_dbg(1, (link)->sdata, (link)->link_id, \
+ fmt, ##__VA_ARGS__)
#define ht_dbg(sdata, fmt, ...) \
_sdata_dbg(MAC80211_HT_DEBUG, \
@@ -226,6 +227,9 @@ do { \
#define mlme_dbg(sdata, fmt, ...) \
_sdata_dbg(MAC80211_MLME_DEBUG, \
sdata, fmt, ##__VA_ARGS__)
+#define mlme_link_id_dbg(sdata, link_id, fmt, ...) \
+ _link_id_dbg(MAC80211_MLME_DEBUG, sdata, link_id, \
+ fmt, ##__VA_ARGS__)
#define mlme_dbg_ratelimited(sdata, fmt, ...) \
_sdata_dbg(MAC80211_MLME_DEBUG && net_ratelimit(), \
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 74be49191e70..2f68e92a7404 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -4,7 +4,7 @@
*
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright (C) 2018 - 2019, 2021-2023 Intel Corporation
+ * Copyright (C) 2018 - 2019, 2021-2024 Intel Corporation
*/
#include <linux/debugfs.h>
@@ -498,6 +498,7 @@ static const char *hw_flag_names[] = {
FLAG(DETECTS_COLOR_COLLISION),
FLAG(MLO_MCAST_MULTI_LINK_TX),
FLAG(DISALLOW_PUNCTURING),
+ FLAG(HANDLES_QUIET_CSA),
#undef FLAG
};
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index 3b7f70073fc3..dce37ba8ebe3 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2015 Intel Deutschland GmbH
- * Copyright (C) 2022-2023 Intel Corporation
+ * Copyright (C) 2022-2024 Intel Corporation
*/
#include <net/mac80211.h>
#include "ieee80211_i.h"
@@ -214,8 +214,7 @@ int drv_conf_tx(struct ieee80211_local *local,
if (!check_sdata_in_driver(sdata))
return -EIO;
- if (sdata->vif.active_links &&
- !(sdata->vif.active_links & BIT(link->link_id)))
+ if (!ieee80211_vif_link_active(&sdata->vif, link->link_id))
return 0;
if (params->cw_min == 0 || params->cw_min > params->cw_max) {
@@ -315,8 +314,7 @@ int drv_assign_vif_chanctx(struct ieee80211_local *local,
if (!check_sdata_in_driver(sdata))
return -EIO;
- if (sdata->vif.active_links &&
- !(sdata->vif.active_links & BIT(link_conf->link_id)))
+ if (!ieee80211_vif_link_active(&sdata->vif, link_conf->link_id))
return 0;
trace_drv_assign_vif_chanctx(local, sdata, link_conf, ctx);
@@ -343,8 +341,7 @@ void drv_unassign_vif_chanctx(struct ieee80211_local *local,
if (!check_sdata_in_driver(sdata))
return;
- if (sdata->vif.active_links &&
- !(sdata->vif.active_links & BIT(link_conf->link_id)))
+ if (!ieee80211_vif_link_active(&sdata->vif, link_conf->link_id))
return;
trace_drv_unassign_vif_chanctx(local, sdata, link_conf, ctx);
@@ -461,8 +458,7 @@ void drv_link_info_changed(struct ieee80211_local *local,
if (!check_sdata_in_driver(sdata))
return;
- if (sdata->vif.active_links &&
- !(sdata->vif.active_links & BIT(link_id)))
+ if (!ieee80211_vif_link_active(&sdata->vif, link_id))
return;
trace_drv_link_info_changed(local, sdata, info, changed);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index eb482fb8c3af..5d078c0a2323 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -2,7 +2,7 @@
/*
* Portions of this file
* Copyright(c) 2016 Intel Deutschland GmbH
-* Copyright (C) 2018 - 2019, 2021 - 2023 Intel Corporation
+* Copyright (C) 2018-2019, 2021-2024 Intel Corporation
*/
#ifndef __MAC80211_DRIVER_OPS
@@ -1180,8 +1180,9 @@ drv_post_channel_switch(struct ieee80211_link_data *link)
}
static inline void
-drv_abort_channel_switch(struct ieee80211_sub_if_data *sdata)
+drv_abort_channel_switch(struct ieee80211_link_data *link)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
might_sleep();
@@ -1193,7 +1194,8 @@ drv_abort_channel_switch(struct ieee80211_sub_if_data *sdata)
trace_drv_abort_channel_switch(local, sdata);
if (local->ops->abort_channel_switch)
- local->ops->abort_channel_switch(&local->hw, &sdata->vif);
+ local->ops->abort_channel_switch(&local->hw, &sdata->vif,
+ link->conf);
}
static inline void
@@ -1695,4 +1697,23 @@ int drv_change_sta_links(struct ieee80211_local *local,
struct ieee80211_sta *sta,
u16 old_links, u16 new_links);
+static inline enum ieee80211_neg_ttlm_res
+drv_can_neg_ttlm(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_neg_ttlm *neg_ttlm)
+{
+ enum ieee80211_neg_ttlm_res res = NEG_TTLM_RES_REJECT;
+
+ might_sleep();
+ if (!check_sdata_in_driver(sdata))
+ return -EIO;
+
+ trace_drv_can_neg_ttlm(local, sdata, neg_ttlm);
+ if (local->ops->can_neg_ttlm)
+ res = local->ops->can_neg_ttlm(&local->hw, &sdata->vif,
+ neg_ttlm);
+ trace_drv_neg_ttlm_res(local, sdata, res, neg_ttlm);
+
+ return res;
+}
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 749f4ecab990..c3330aea4da3 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -9,7 +9,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2010, Intel Corporation
* Copyright 2017 Intel Deutschland GmbH
- * Copyright(c) 2020-2023 Intel Corporation
+ * Copyright(c) 2020-2024 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -257,7 +257,7 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(!link_conf))
width = NL80211_CHAN_WIDTH_20_NOHT;
else
- width = link_conf->chandef.width;
+ width = link_conf->chanreq.oper.width;
switch (width) {
default:
@@ -603,6 +603,8 @@ void ieee80211_request_smps(struct ieee80211_vif *vif, unsigned int link_id,
if (WARN_ON(!link))
goto out;
+ trace_api_request_smps(sdata->local, sdata, link, smps_mode);
+
if (link->u.mgd.driver_smps_mode == smps_mode)
goto out;
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 8f2b445a5ec3..7ace5cdc6c26 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -223,7 +223,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt;
struct cfg80211_bss *bss;
u64 bss_change;
- struct cfg80211_chan_def chandef;
+ struct ieee80211_chan_req chanreq = {};
struct ieee80211_channel *chan;
struct beacon_data *presp;
struct cfg80211_inform_bss bss_meta = {};
@@ -237,7 +237,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
drv_reset_tsf(local, sdata);
if (!ether_addr_equal(ifibss->bssid, bssid))
- sta_info_flush(sdata);
+ sta_info_flush(sdata, -1);
/* if merging, indicate to driver that we leave the old IBSS */
if (sdata->vif.cfg.ibss_joined) {
@@ -257,22 +257,22 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
kfree_rcu(presp, rcu_head);
/* make a copy of the chandef, it could be modified below. */
- chandef = *req_chandef;
- chan = chandef.chan;
- if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef,
+ chanreq.oper = *req_chandef;
+ chan = chanreq.oper.chan;
+ if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chanreq.oper,
NL80211_IFTYPE_ADHOC)) {
- if (chandef.width == NL80211_CHAN_WIDTH_5 ||
- chandef.width == NL80211_CHAN_WIDTH_10 ||
- chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- chandef.width == NL80211_CHAN_WIDTH_20) {
+ if (chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ chanreq.oper.width == NL80211_CHAN_WIDTH_10 ||
+ chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ chanreq.oper.width == NL80211_CHAN_WIDTH_20) {
sdata_info(sdata,
"Failed to join IBSS, beacons forbidden\n");
return;
}
- chandef.width = NL80211_CHAN_WIDTH_20;
- chandef.center_freq1 = chan->center_freq;
+ chanreq.oper.width = NL80211_CHAN_WIDTH_20;
+ chanreq.oper.center_freq1 = chan->center_freq;
/* check again for downgraded chandef */
- if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef,
+ if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chanreq.oper,
NL80211_IFTYPE_ADHOC)) {
sdata_info(sdata,
"Failed to join IBSS, beacons forbidden\n");
@@ -281,7 +281,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
}
err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
- &chandef, NL80211_IFTYPE_ADHOC);
+ &chanreq.oper, NL80211_IFTYPE_ADHOC);
if (err < 0) {
sdata_info(sdata,
"Failed to join IBSS, invalid chandef\n");
@@ -295,7 +295,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
radar_required = err;
- if (ieee80211_link_use_channel(&sdata->deflink, &chandef,
+ if (ieee80211_link_use_channel(&sdata->deflink, &chanreq,
ifibss->fixed_channel ?
IEEE80211_CHANCTX_SHARED :
IEEE80211_CHANCTX_EXCLUSIVE)) {
@@ -307,7 +307,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
memcpy(ifibss->bssid, bssid, ETH_ALEN);
presp = ieee80211_ibss_build_presp(sdata, beacon_int, basic_rates,
- capability, tsf, &chandef,
+ capability, tsf, &chanreq.oper,
&have_higher_than_11mbit, NULL);
if (!presp)
return;
@@ -533,12 +533,12 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata, u64 *changed)
IEEE80211_PRIVACY(ifibss->privacy));
/* XXX: should not really modify cfg80211 data */
if (cbss) {
- cbss->channel = sdata->deflink.csa_chandef.chan;
+ cbss->channel = sdata->deflink.csa_chanreq.oper.chan;
cfg80211_put_bss(sdata->local->hw.wiphy, cbss);
}
}
- ifibss->chandef = sdata->deflink.csa_chandef;
+ ifibss->chandef = sdata->deflink.csa_chanreq.oper;
/* generate the beacon */
return ieee80211_ibss_csa_beacon(sdata, NULL, changed);
@@ -682,7 +682,7 @@ static void ieee80211_ibss_disconnect(struct ieee80211_sub_if_data *sdata)
ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
- sta_info_flush(sdata);
+ sta_info_flush(sdata, -1);
spin_lock_bh(&ifibss->incomplete_lock);
while (!list_empty(&ifibss->incomplete_stations)) {
@@ -757,21 +757,22 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
enum nl80211_channel_type ch_type;
int err;
- ieee80211_conn_flags_t conn_flags;
+ struct ieee80211_conn_settings conn = {
+ .mode = IEEE80211_CONN_MODE_HT,
+ .bw_limit = IEEE80211_CONN_BW_LIMIT_40,
+ };
u32 vht_cap_info = 0;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
- conn_flags = IEEE80211_CONN_DISABLE_VHT;
-
switch (ifibss->chandef.width) {
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
case NL80211_CHAN_WIDTH_20_NOHT:
- conn_flags |= IEEE80211_CONN_DISABLE_HT;
+ conn.mode = IEEE80211_CONN_MODE_LEGACY;
fallthrough;
case NL80211_CHAN_WIDTH_20:
- conn_flags |= IEEE80211_CONN_DISABLE_40MHZ;
+ conn.bw_limit = IEEE80211_CONN_BW_LIMIT_20;
break;
default:
break;
@@ -783,8 +784,8 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
memset(&params, 0, sizeof(params));
err = ieee80211_parse_ch_switch_ie(sdata, elems,
ifibss->chandef.chan->band,
- vht_cap_info,
- conn_flags, ifibss->bssid, &csa_ie);
+ vht_cap_info, &conn,
+ ifibss->bssid, &csa_ie);
/* can't switch to destination channel, fail */
if (err < 0)
goto disconnect;
@@ -798,7 +799,7 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
goto disconnect;
params.count = csa_ie.count;
- params.chandef = csa_ie.chandef;
+ params.chandef = csa_ie.chanreq.oper;
switch (ifibss->chandef.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
@@ -857,7 +858,7 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
params.radar_required = err;
if (cfg80211_chandef_identical(&params.chandef,
- &sdata->vif.bss_conf.chandef)) {
+ &sdata->vif.bss_conf.chanreq.oper)) {
ibss_dbg(sdata,
"received csa with an identical chandef, ignoring\n");
return true;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 0b2b53550bd9..b6fead612b66 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -5,7 +5,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2015 Intel Mobile Communications GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#ifndef IEEE80211_I_H
@@ -370,19 +370,32 @@ enum ieee80211_sta_flags {
IEEE80211_STA_ENABLE_RRM = BIT(15),
};
-typedef u32 __bitwise ieee80211_conn_flags_t;
-
-enum ieee80211_conn_flags {
- IEEE80211_CONN_DISABLE_HT = (__force ieee80211_conn_flags_t)BIT(0),
- IEEE80211_CONN_DISABLE_40MHZ = (__force ieee80211_conn_flags_t)BIT(1),
- IEEE80211_CONN_DISABLE_VHT = (__force ieee80211_conn_flags_t)BIT(2),
- IEEE80211_CONN_DISABLE_80P80MHZ = (__force ieee80211_conn_flags_t)BIT(3),
- IEEE80211_CONN_DISABLE_160MHZ = (__force ieee80211_conn_flags_t)BIT(4),
- IEEE80211_CONN_DISABLE_HE = (__force ieee80211_conn_flags_t)BIT(5),
- IEEE80211_CONN_DISABLE_EHT = (__force ieee80211_conn_flags_t)BIT(6),
- IEEE80211_CONN_DISABLE_320MHZ = (__force ieee80211_conn_flags_t)BIT(7),
+enum ieee80211_conn_mode {
+ IEEE80211_CONN_MODE_S1G,
+ IEEE80211_CONN_MODE_LEGACY,
+ IEEE80211_CONN_MODE_HT,
+ IEEE80211_CONN_MODE_VHT,
+ IEEE80211_CONN_MODE_HE,
+ IEEE80211_CONN_MODE_EHT,
};
+#define IEEE80211_CONN_MODE_HIGHEST IEEE80211_CONN_MODE_EHT
+
+enum ieee80211_conn_bw_limit {
+ IEEE80211_CONN_BW_LIMIT_20,
+ IEEE80211_CONN_BW_LIMIT_40,
+ IEEE80211_CONN_BW_LIMIT_80,
+ IEEE80211_CONN_BW_LIMIT_160, /* also 80+80 */
+ IEEE80211_CONN_BW_LIMIT_320,
+};
+
+struct ieee80211_conn_settings {
+ enum ieee80211_conn_mode mode;
+ enum ieee80211_conn_bw_limit bw_limit;
+};
+
+extern const struct ieee80211_conn_settings ieee80211_conn_settings_unlimited;
+
struct ieee80211_mgd_auth_data {
struct cfg80211_bss *bss;
unsigned long timeout;
@@ -416,7 +429,7 @@ struct ieee80211_mgd_assoc_data {
size_t elems_len;
u8 *elems; /* pointing to inside ie[] below */
- ieee80211_conn_flags_t conn_flags;
+ struct ieee80211_conn_settings conn;
u16 status;
@@ -441,6 +454,7 @@ struct ieee80211_mgd_assoc_data {
bool timeout_started;
bool comeback; /* whether the AP has requested association comeback */
bool s1g;
+ bool spp_amsdu;
unsigned int assoc_link_id;
@@ -509,6 +523,8 @@ struct ieee80211_if_managed {
unsigned int flags;
+ u16 mcast_seq_last;
+
bool status_acked;
bool status_received;
__le16 status_fc;
@@ -579,6 +595,10 @@ struct ieee80211_if_managed {
/* TID-to-link mapping support */
struct wiphy_delayed_work ttlm_work;
struct ieee80211_adv_ttlm_info ttlm_info;
+
+ /* dialog token enumerator for neg TTLM request */
+ u8 dialog_token_alloc;
+ struct wiphy_delayed_work neg_ttlm_timeout_work;
};
struct ieee80211_if_ibss {
@@ -866,6 +886,9 @@ struct ieee80211_chanctx {
enum ieee80211_chanctx_mode mode;
bool driver_present;
+ /* temporary data for search algorithm etc. */
+ struct ieee80211_chan_req req;
+
struct ieee80211_chanctx_conf conf;
};
@@ -938,7 +961,7 @@ struct ieee80211_link_data_managed {
enum ieee80211_smps_mode req_smps, /* requested smps mode */
driver_smps_mode; /* smps mode request */
- ieee80211_conn_flags_t conn_flags;
+ struct ieee80211_conn_settings conn;
s16 p2p_noa_index;
@@ -983,8 +1006,6 @@ struct ieee80211_link_data_managed {
int mu_edca_last_param_set;
u8 bss_param_ch_cnt;
-
- struct cfg80211_bss *bss;
};
struct ieee80211_link_data_ap {
@@ -1013,11 +1034,10 @@ struct ieee80211_link_data {
struct ieee80211_key __rcu *default_beacon_key;
struct wiphy_work csa_finalize_work;
- bool csa_block_tx;
bool operating_11g_mode;
- struct cfg80211_chan_def csa_chandef;
+ struct ieee80211_chan_req csa_chanreq;
struct wiphy_work color_change_finalize_work;
struct delayed_work color_collision_detect_work;
@@ -1025,7 +1045,7 @@ struct ieee80211_link_data {
/* context reservation -- protected with wiphy mutex */
struct ieee80211_chanctx *reserved_chanctx;
- struct cfg80211_chan_def reserved_chandef;
+ struct ieee80211_chan_req reserved;
bool reserved_radar_required;
bool reserved_ready;
@@ -1072,6 +1092,8 @@ struct ieee80211_sub_if_data {
unsigned long state;
+ bool csa_blocked_tx;
+
char name[IFNAMSIZ];
struct ieee80211_fragment_cache frags;
@@ -1160,6 +1182,19 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p)
#define sdata_dereference(p, sdata) \
wiphy_dereference(sdata->local->hw.wiphy, p)
+#define for_each_sdata_link(_local, _link) \
+ /* outer loop just to define the variables ... */ \
+ for (struct ieee80211_sub_if_data *___sdata = NULL; \
+ !___sdata; \
+ ___sdata = (void *)~0 /* always stop */) \
+ list_for_each_entry(___sdata, &(_local)->interfaces, list) \
+ if (ieee80211_sdata_running(___sdata)) \
+ for (int ___link_id = 0; \
+ ___link_id < ARRAY_SIZE(___sdata->link); \
+ ___link_id++) \
+ if ((_link = wiphy_dereference((local)->hw.wiphy, \
+ ___sdata->link[___link_id])))
+
static inline int
ieee80211_get_mbssid_beacon_len(struct cfg80211_mbssid_elems *elems,
struct cfg80211_rnr_elems *rnr_elems,
@@ -1330,7 +1365,8 @@ struct ieee80211_local {
bool wiphy_ciphers_allocated;
- bool use_chanctx;
+ struct cfg80211_chan_def dflt_chandef;
+ bool emulate_chanctx;
/* protects the aggregated multicast list and filter calls */
spinlock_t filter_lock;
@@ -1456,8 +1492,6 @@ struct ieee80211_local {
enum mac80211_scan_state next_scan_state;
struct wiphy_delayed_work scan_work;
struct ieee80211_sub_if_data __rcu *scan_sdata;
- /* For backward compatibility only -- do not use */
- struct cfg80211_chan_def _oper_chandef;
/* Temporary remain-on-channel for off-channel operations */
struct ieee80211_channel *tmp_channel;
@@ -1531,8 +1565,6 @@ struct ieee80211_local {
int user_power_level; /* in dBm, for all interfaces */
- enum ieee80211_smps_mode smps_mode;
-
struct work_struct restart_work;
#ifdef CONFIG_MAC80211_DEBUGFS
@@ -1559,7 +1591,7 @@ struct ieee80211_local {
/* virtual monitor interface */
struct ieee80211_sub_if_data __rcu *monitor_sdata;
- struct cfg80211_chan_def monitor_chandef;
+ struct ieee80211_chan_req monitor_chanreq;
/* extended capabilities provided by mac80211 */
u8 ext_capa[8];
@@ -1624,7 +1656,7 @@ ieee80211_get_link_sband(struct ieee80211_link_data *link)
/* this struct holds the value parsing from channel switch IE */
struct ieee80211_csa_ie {
- struct cfg80211_chan_def chandef;
+ struct ieee80211_chan_req chanreq;
u8 mode;
u8 count;
u8 ttl;
@@ -1633,6 +1665,14 @@ struct ieee80211_csa_ie {
u32 max_switch_time;
};
+enum ieee80211_elems_parse_error {
+ IEEE80211_PARSE_ERR_INVALID_END = BIT(0),
+ IEEE80211_PARSE_ERR_DUP_ELEM = BIT(1),
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE = BIT(2),
+ IEEE80211_PARSE_ERR_UNEXPECTED_ELEM = BIT(3),
+ IEEE80211_PARSE_ERR_DUP_NEST_ML_BASIC = BIT(4),
+};
+
/* Parsed Information Elements */
struct ieee802_11_elems {
const u8 *ie_start;
@@ -1727,12 +1767,6 @@ struct ieee802_11_elems {
size_t ml_basic_len;
size_t ml_reconf_len;
- /* The basic Multi-Link element in the original IEs */
- const struct element *ml_basic_elem;
-
- /* The reconfiguration Multi-Link element in the original IEs */
- const struct element *ml_reconf_elem;
-
u8 ttlm_num;
/*
@@ -1743,16 +1777,8 @@ struct ieee802_11_elems {
struct ieee80211_mle_per_sta_profile *prof;
size_t sta_prof_len;
- /* whether a parse error occurred while retrieving these elements */
- bool parse_error;
-
- /*
- * scratch buffer that can be used for various element parsing related
- * tasks, e.g., element de-fragmentation etc.
- */
- size_t scratch_len;
- u8 *scratch_pos;
- u8 scratch[] __counted_by(scratch_len);
+ /* whether/which parse error occurred while retrieving these elements */
+ u8 parse_error;
};
static inline struct ieee80211_local *hw_to_local(
@@ -1801,6 +1827,8 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
unsigned int mpdu_len,
unsigned int mpdu_offset);
int ieee80211_hw_config(struct ieee80211_local *local, u32 changed);
+int ieee80211_hw_conf_chan(struct ieee80211_local *local);
+void ieee80211_hw_conf_init(struct ieee80211_local *local);
void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx);
void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
u64 changed);
@@ -2112,7 +2140,7 @@ enum ieee80211_sta_rx_bandwidth
ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta);
enum ieee80211_sta_rx_bandwidth
ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta);
-void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta);
+void ieee80211_sta_init_nss(struct link_sta_info *link_sta);
enum ieee80211_sta_rx_bandwidth
ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width);
enum nl80211_chan_width
@@ -2166,9 +2194,8 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
* @elems: parsed 802.11 elements received with the frame
* @current_band: indicates the current band
* @vht_cap_info: VHT capabilities of the transmitter
- * @conn_flags: contains information about own capabilities and restrictions
- * to decide which channel switch announcements can be accepted, using
- * flags from &enum ieee80211_conn_flags.
+ * @conn: contains information about own capabilities and restrictions
+ * to decide which channel switch announcements can be accepted
* @bssid: the currently connected bssid (for reporting)
* @csa_ie: parsed 802.11 csa elements on count, mode, chandef and mesh ttl.
* All of them will be filled with if success only.
@@ -2178,7 +2205,8 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems,
enum nl80211_band current_band,
u32 vht_cap_info,
- ieee80211_conn_flags_t conn_flags, u8 *bssid,
+ struct ieee80211_conn_settings *conn,
+ u8 *bssid,
struct ieee80211_csa_ie *csa_ie);
/* Suspend/resume and hw reconfiguration */
@@ -2202,6 +2230,9 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw)
/* utility functions/constants */
extern const void *const mac80211_wiphy_privid; /* for wiphy privid */
+const char *ieee80211_conn_mode_str(enum ieee80211_conn_mode mode);
+enum ieee80211_conn_bw_limit
+ieee80211_min_bw_limit_from_chandef(struct cfg80211_chan_def *chandef);
int ieee80211_frame_duration(enum nl80211_band band, size_t len,
int rate, int erp, int short_preamble);
void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
@@ -2243,6 +2274,7 @@ static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata,
/**
* struct ieee80211_elems_parse_params - element parsing parameters
+ * @mode: connection mode for parsing
* @start: pointer to the elements
* @len: length of the elements
* @action: %true if the elements came from an action frame
@@ -2260,6 +2292,7 @@ static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata,
* for EHT capabilities parsing)
*/
struct ieee80211_elems_parse_params {
+ enum ieee80211_conn_mode mode;
const u8 *start;
size_t len;
bool action;
@@ -2279,6 +2312,7 @@ ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
struct cfg80211_bss *bss)
{
struct ieee80211_elems_parse_params params = {
+ .mode = IEEE80211_CONN_MODE_HIGHEST,
.start = start,
.len = len,
.action = action,
@@ -2408,7 +2442,6 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
const u8 *da, const u8 *bssid,
u16 stype, u16 reason,
bool send_frame, u8 *frame_buf);
-u8 *ieee80211_write_he_6ghz_cap(u8 *pos, __le16 cap, u8 *end);
enum {
IEEE80211_PROBE_FLAG_DIRECTED = BIT(0),
@@ -2453,32 +2486,36 @@ u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
u32 cap);
u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
const struct cfg80211_chan_def *chandef);
-u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata, u8 iftype);
-u8 *ieee80211_ie_build_he_cap(ieee80211_conn_flags_t disable_flags, u8 *pos,
- const struct ieee80211_sta_he_cap *he_cap,
- u8 *end);
-void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata,
- enum ieee80211_smps_mode smps_mode,
- struct sk_buff *skb);
+u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata);
u8 *ieee80211_ie_build_he_oper(u8 *pos, struct cfg80211_chan_def *chandef);
u8 *ieee80211_ie_build_eht_oper(u8 *pos, struct cfg80211_chan_def *chandef,
const struct ieee80211_sta_eht_cap *eht_cap);
int ieee80211_parse_bitrates(enum nl80211_chan_width width,
const struct ieee80211_supported_band *sband,
const u8 *srates, int srates_len, u32 *rates);
-int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, bool need_basic,
- enum nl80211_band band);
-int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, bool need_basic,
- enum nl80211_band band);
u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo);
void ieee80211_add_s1g_capab_ie(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta_s1g_cap *caps,
struct sk_buff *skb);
void ieee80211_add_aid_request_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
-u8 *ieee80211_ie_build_s1g_cap(u8 *pos, struct ieee80211_sta_s1g_cap *s1g_cap);
+
+/* element building in SKBs */
+int ieee80211_put_srates_elem(struct sk_buff *skb,
+ const struct ieee80211_supported_band *sband,
+ u32 basic_rates, u32 rate_flags, u32 masked_rates,
+ u8 element_id);
+int ieee80211_put_he_cap(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_supported_band *sband,
+ const struct ieee80211_conn_settings *conn);
+int ieee80211_put_he_6ghz_cap(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_smps_mode smps_mode);
+int ieee80211_put_eht_cap(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_supported_band *sband,
+ const struct ieee80211_conn_settings *conn);
/* channel management */
bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper,
@@ -2488,23 +2525,36 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, u32 vht_cap_info,
const struct ieee80211_ht_operation *htop,
struct cfg80211_chan_def *chandef);
void ieee80211_chandef_eht_oper(const struct ieee80211_eht_operation_info *info,
- bool support_160, bool support_320,
struct cfg80211_chan_def *chandef);
-bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
+bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_local *local,
const struct ieee80211_he_operation *he_oper,
const struct ieee80211_eht_operation *eht_oper,
struct cfg80211_chan_def *chandef);
bool ieee80211_chandef_s1g_oper(const struct ieee80211_s1g_oper_ie *oper,
struct cfg80211_chan_def *chandef);
-ieee80211_conn_flags_t ieee80211_chandef_downgrade(struct cfg80211_chan_def *c);
+void ieee80211_chandef_downgrade(struct cfg80211_chan_def *chandef,
+ struct ieee80211_conn_settings *conn);
+static inline void
+ieee80211_chanreq_downgrade(struct ieee80211_chan_req *chanreq,
+ struct ieee80211_conn_settings *conn)
+{
+ ieee80211_chandef_downgrade(&chanreq->oper, conn);
+ if (WARN_ON(!conn))
+ return;
+ if (conn->mode < IEEE80211_CONN_MODE_EHT)
+ chanreq->ap.chan = NULL;
+}
+
+bool ieee80211_chanreq_identical(const struct ieee80211_chan_req *a,
+ const struct ieee80211_chan_req *b);
int __must_check
ieee80211_link_use_channel(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *req,
enum ieee80211_chanctx_mode mode);
int __must_check
ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *req,
enum ieee80211_chanctx_mode mode,
bool radar_required);
int __must_check
@@ -2512,9 +2562,9 @@ ieee80211_link_use_reserved_context(struct ieee80211_link_data *link);
int ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link);
int __must_check
-ieee80211_link_change_bandwidth(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef,
- u64 *changed);
+ieee80211_link_change_chanreq(struct ieee80211_link_data *link,
+ const struct ieee80211_chan_req *req,
+ u64 *changed);
void ieee80211_link_release_channel(struct ieee80211_link_data *link);
void ieee80211_link_vlan_copy_chanctx(struct ieee80211_link_data *link);
void ieee80211_link_copy_chanctx_to_vlans(struct ieee80211_link_data *link,
@@ -2561,7 +2611,7 @@ int ieee80211_tdls_channel_switch(struct wiphy *wiphy, struct net_device *dev,
void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy,
struct net_device *dev,
const u8 *addr);
-void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata);
+void ieee80211_teardown_tdls_peers(struct ieee80211_link_data *link);
void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata,
const u8 *peer, u16 reason);
void
@@ -2589,12 +2639,7 @@ u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw,
void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache);
void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache);
-u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype);
-u8 *ieee80211_ie_build_eht_cap(u8 *pos,
- const struct ieee80211_sta_he_cap *he_cap,
- const struct ieee80211_sta_eht_cap *eht_cap,
- u8 *end,
- bool for_ap);
+u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata);
void
ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata,
@@ -2603,6 +2648,12 @@ ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata,
const struct ieee80211_eht_cap_elem *eht_cap_ie_elem,
u8 eht_cap_len,
struct link_sta_info *link_sta);
+void ieee80211_process_neg_ttlm_req(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len);
+void ieee80211_process_neg_ttlm_res(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len);
+int ieee80211_req_neg_ttlm(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_ttlm_params *params);
void ieee80211_check_wbrf_support(struct ieee80211_local *local);
void ieee80211_add_wbrf(struct ieee80211_local *local, struct cfg80211_chan_def *chandef);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 11c4caa4748e..395de62d9cb2 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -8,7 +8,7 @@
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/slab.h>
#include <linux/kernel.h>
@@ -511,7 +511,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
* would have removed them, but in other modes there shouldn't
* be any stations.
*/
- flushed = sta_info_flush(sdata);
+ flushed = sta_info_flush(sdata, -1);
WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_AP_VLAN && flushed > 0);
/* don't count this interface for allmulti while it is down */
@@ -544,10 +544,10 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
sdata->vif.bss_conf.csa_active = false;
if (sdata->vif.type == NL80211_IFTYPE_STATION)
sdata->deflink.u.mgd.csa_waiting_bcn = false;
- if (sdata->deflink.csa_block_tx) {
+ if (sdata->csa_blocked_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->deflink.csa_block_tx = false;
+ sdata->csa_blocked_tx = false;
}
wiphy_work_cancel(local->hw.wiphy, &sdata->deflink.csa_finalize_work);
@@ -557,7 +557,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
&sdata->deflink.dfs_cac_timer_work);
if (sdata->wdev.cac_started) {
- chandef = sdata->vif.bss_conf.chandef;
+ chandef = sdata->vif.bss_conf.chanreq.oper;
WARN_ON(local->suspended);
ieee80211_link_release_channel(&sdata->deflink);
cfg80211_cac_event(sdata->dev, &chandef,
@@ -1164,7 +1164,7 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
rcu_assign_pointer(local->monitor_sdata, sdata);
mutex_unlock(&local->iflist_mtx);
- ret = ieee80211_link_use_channel(&sdata->deflink, &local->monitor_chandef,
+ ret = ieee80211_link_use_channel(&sdata->deflink, &local->monitor_chanreq,
IEEE80211_CHANCTX_EXCLUSIVE);
if (ret) {
mutex_lock(&local->iflist_mtx);
@@ -1252,7 +1252,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
sdata->vif.cab_queue = master->vif.cab_queue;
memcpy(sdata->vif.hw_queue, master->vif.hw_queue,
sizeof(sdata->vif.hw_queue));
- sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef;
+ sdata->vif.bss_conf.chanreq = master->vif.bss_conf.chanreq;
sdata->crypto_tx_tailroom_needed_cnt +=
master->crypto_tx_tailroom_needed_cnt;
@@ -1288,8 +1288,6 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
res = drv_start(local);
if (res)
goto err_del_bss;
- /* we're brought up, everything changes */
- hw_reconf_flags = ~0;
ieee80211_led_radio(local, true);
ieee80211_mod_tpt_led_trig(local,
IEEE80211_TPT_LEDTRIG_FL_RADIO, 0);
@@ -1436,7 +1434,9 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
if (coming_up)
local->open_count++;
- if (hw_reconf_flags)
+ if (local->open_count == 1)
+ ieee80211_hw_conf_init(local);
+ else if (hw_reconf_flags)
ieee80211_hw_config(local, hw_reconf_flags);
ieee80211_recalc_ps(local);
@@ -1546,6 +1546,22 @@ static void ieee80211_iface_process_skb(struct ieee80211_local *local,
default:
break;
}
+ } else if (ieee80211_is_action(mgmt->frame_control) &&
+ mgmt->u.action.category == WLAN_CATEGORY_PROTECTED_EHT) {
+ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ switch (mgmt->u.action.u.ttlm_req.action_code) {
+ case WLAN_PROTECTED_EHT_ACTION_TTLM_REQ:
+ ieee80211_process_neg_ttlm_req(sdata, mgmt,
+ skb->len);
+ break;
+ case WLAN_PROTECTED_EHT_ACTION_TTLM_RES:
+ ieee80211_process_neg_ttlm_res(sdata, mgmt,
+ skb->len);
+ break;
+ default:
+ break;
+ }
+ }
} else if (ieee80211_is_ext(mgmt->frame_control)) {
if (sdata->vif.type == NL80211_IFTYPE_STATION)
ieee80211_sta_rx_queued_ext(sdata, skb);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index af74d7f9d94d..eecdd2265eaa 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -6,7 +6,7 @@
* Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright 2018-2020, 2022-2023 Intel Corporation
+ * Copyright 2018-2020, 2022-2024 Intel Corporation
*/
#include <crypto/utils.h>
@@ -925,6 +925,10 @@ int ieee80211_key_link(struct ieee80211_key *key,
*/
key->color = atomic_inc_return(&key_color);
+ /* keep this flag for easier access later */
+ if (sta && sta->sta.spp_amsdu)
+ key->conf.flags |= IEEE80211_KEY_FLAG_SPP_AMSDU;
+
increment_tailroom_need_count(sdata);
ret = ieee80211_key_replace(sdata, link, sta, pairwise, old_key, key);
@@ -1368,12 +1372,19 @@ EXPORT_SYMBOL_GPL(ieee80211_remove_key);
struct ieee80211_key_conf *
ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf)
+ struct ieee80211_key_conf *keyconf,
+ int link_id)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct ieee80211_key *key;
int err;
+ struct ieee80211_link_data *link_data =
+ link_id < 0 ? &sdata->deflink :
+ sdata_dereference(sdata->link[link_id], sdata);
+
+ if (WARN_ON(!link_data))
+ return ERR_PTR(-EINVAL);
if (WARN_ON(!local->wowlan))
return ERR_PTR(-EINVAL);
@@ -1390,8 +1401,9 @@ ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
if (sdata->u.mgd.mfp != IEEE80211_MFP_DISABLED)
key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
- /* FIXME: this function needs to get a link ID */
- err = ieee80211_key_link(key, &sdata->deflink, NULL);
+ key->conf.link_id = link_id;
+
+ err = ieee80211_key_link(key, link_data, NULL);
if (err)
return ERR_PTR(err);
diff --git a/net/mac80211/link.c b/net/mac80211/link.c
index d4f86955afa6..685ec66b4264 100644
--- a/net/mac80211/link.c
+++ b/net/mac80211/link.c
@@ -2,7 +2,7 @@
/*
* MLO link handling
*
- * Copyright (C) 2022-2023 Intel Corporation
+ * Copyright (C) 2022-2024 Intel Corporation
*/
#include <linux/slab.h>
#include <linux/kernel.h>
@@ -73,6 +73,8 @@ void ieee80211_link_stop(struct ieee80211_link_data *link)
ieee80211_mgd_stop_link(link);
cancel_delayed_work_sync(&link->color_collision_detect_work);
+ wiphy_work_cancel(link->sdata->local->hw.wiphy,
+ &link->csa_finalize_work);
ieee80211_link_release_channel(link);
}
@@ -354,7 +356,7 @@ static int _ieee80211_set_active_links(struct ieee80211_sub_if_data *sdata,
link = sdata_dereference(sdata->link[link_id], sdata);
- /* FIXME: kill TDLS connections on the link */
+ ieee80211_teardown_tdls_peers(link);
ieee80211_link_release_channel(link);
}
@@ -402,7 +404,8 @@ static int _ieee80211_set_active_links(struct ieee80211_sub_if_data *sdata,
link = sdata_dereference(sdata->link[link_id], sdata);
- ret = ieee80211_link_use_channel(link, &link->conf->chandef,
+ ret = ieee80211_link_use_channel(link,
+ &link->conf->chanreq,
IEEE80211_CHANCTX_SHARED);
WARN_ON_ONCE(ret);
@@ -444,6 +447,9 @@ int ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links)
lockdep_assert_wiphy(local->hw.wiphy);
+ if (WARN_ON(!active_links))
+ return -EINVAL;
+
if (!drv_can_activate_links(local, sdata, active_links))
return -EINVAL;
@@ -472,6 +478,9 @@ void ieee80211_set_active_links_async(struct ieee80211_vif *vif,
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ if (WARN_ON(!active_links))
+ return;
+
if (!ieee80211_sdata_running(sdata))
return;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index f2ece7793573..4eaea0a9975b 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -93,16 +93,32 @@ static void ieee80211_reconfig_filter(struct wiphy *wiphy,
ieee80211_configure_filter(local);
}
-static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
+static u32 ieee80211_calc_hw_conf_chan(struct ieee80211_local *local,
+ struct ieee80211_chanctx_conf *ctx)
{
struct ieee80211_sub_if_data *sdata;
struct cfg80211_chan_def chandef = {};
+ struct cfg80211_chan_def *oper = NULL;
+ enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_STATIC;
u32 changed = 0;
int power;
u32 offchannel_flag;
+ if (!local->emulate_chanctx)
+ return 0;
+
offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
+ if (ctx && !WARN_ON(!ctx->def.chan)) {
+ oper = &ctx->def;
+ if (ctx->rx_chains_static > 1)
+ smps_mode = IEEE80211_SMPS_OFF;
+ else if (ctx->rx_chains_dynamic > 1)
+ smps_mode = IEEE80211_SMPS_DYNAMIC;
+ else
+ smps_mode = IEEE80211_SMPS_STATIC;
+ }
+
if (local->scan_chandef.chan) {
chandef = local->scan_chandef;
} else if (local->tmp_channel) {
@@ -110,25 +126,30 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
chandef.center_freq1 = chandef.chan->center_freq;
chandef.freq1_offset = chandef.chan->freq_offset;
- } else
- chandef = local->_oper_chandef;
+ } else if (oper) {
+ chandef = *oper;
+ } else {
+ chandef = local->dflt_chandef;
+ }
- WARN(!cfg80211_chandef_valid(&chandef),
- "control:%d.%03d MHz width:%d center: %d.%03d/%d MHz",
- chandef.chan->center_freq, chandef.chan->freq_offset,
- chandef.width, chandef.center_freq1, chandef.freq1_offset,
- chandef.center_freq2);
+ if (WARN(!cfg80211_chandef_valid(&chandef),
+ "control:%d.%03d MHz width:%d center: %d.%03d/%d MHz",
+ chandef.chan ? chandef.chan->center_freq : -1,
+ chandef.chan ? chandef.chan->freq_offset : 0,
+ chandef.width, chandef.center_freq1, chandef.freq1_offset,
+ chandef.center_freq2))
+ return 0;
- if (!cfg80211_chandef_identical(&chandef, &local->_oper_chandef))
+ if (!oper || !cfg80211_chandef_identical(&chandef, oper))
local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
else
local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL;
offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
- if (offchannel_flag ||
- !cfg80211_chandef_identical(&local->hw.conf.chandef,
- &local->_oper_chandef)) {
+ /* force it also for scanning, since drivers might config differently */
+ if (offchannel_flag || local->scanning ||
+ !cfg80211_chandef_identical(&local->hw.conf.chandef, &chandef)) {
local->hw.conf.chandef = chandef;
changed |= IEEE80211_CONF_CHANGE_CHANNEL;
}
@@ -140,8 +161,8 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
* that otherwise STATIC is used.
*/
local->hw.conf.smps_mode = IEEE80211_SMPS_STATIC;
- } else if (local->hw.conf.smps_mode != local->smps_mode) {
- local->hw.conf.smps_mode = local->smps_mode;
+ } else if (local->hw.conf.smps_mode != smps_mode) {
+ local->hw.conf.smps_mode = smps_mode;
changed |= IEEE80211_CONF_CHANGE_SMPS;
}
@@ -173,12 +194,9 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
might_sleep();
- if (!local->use_chanctx)
- changed |= ieee80211_hw_conf_chan(local);
- else
- changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
- IEEE80211_CONF_CHANGE_POWER |
- IEEE80211_CONF_CHANGE_SMPS);
+ WARN_ON(changed & (IEEE80211_CONF_CHANGE_CHANNEL |
+ IEEE80211_CONF_CHANGE_POWER |
+ IEEE80211_CONF_CHANGE_SMPS));
if (changed && local->open_count) {
ret = drv_config(local, changed);
@@ -202,13 +220,115 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
return ret;
}
+/* for scanning, offchannel and chanctx emulation only */
+static int _ieee80211_hw_conf_chan(struct ieee80211_local *local,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ u32 changed;
+
+ if (!local->open_count)
+ return 0;
+
+ changed = ieee80211_calc_hw_conf_chan(local, ctx);
+ if (!changed)
+ return 0;
+
+ return drv_config(local, changed);
+}
+
+int ieee80211_hw_conf_chan(struct ieee80211_local *local)
+{
+ struct ieee80211_chanctx *ctx;
+
+ ctx = list_first_entry_or_null(&local->chanctx_list,
+ struct ieee80211_chanctx,
+ list);
+
+ return _ieee80211_hw_conf_chan(local, ctx ? &ctx->conf : NULL);
+}
+
+void ieee80211_hw_conf_init(struct ieee80211_local *local)
+{
+ u32 changed = ~(IEEE80211_CONF_CHANGE_CHANNEL |
+ IEEE80211_CONF_CHANGE_POWER |
+ IEEE80211_CONF_CHANGE_SMPS);
+
+ if (WARN_ON(!local->open_count))
+ return;
+
+ if (local->emulate_chanctx) {
+ struct ieee80211_chanctx *ctx;
+
+ ctx = list_first_entry_or_null(&local->chanctx_list,
+ struct ieee80211_chanctx,
+ list);
+
+ changed |= ieee80211_calc_hw_conf_chan(local,
+ ctx ? &ctx->conf : NULL);
+ }
+
+ WARN_ON(drv_config(local, changed));
+}
+
+int ieee80211_emulate_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ local->hw.conf.radar_enabled = ctx->radar_enabled;
+
+ return _ieee80211_hw_conf_chan(local, ctx);
+}
+EXPORT_SYMBOL(ieee80211_emulate_add_chanctx);
+
+void ieee80211_emulate_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ local->hw.conf.radar_enabled = false;
+
+ _ieee80211_hw_conf_chan(local, NULL);
+}
+EXPORT_SYMBOL(ieee80211_emulate_remove_chanctx);
+
+void ieee80211_emulate_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ local->hw.conf.radar_enabled = ctx->radar_enabled;
+
+ _ieee80211_hw_conf_chan(local, ctx);
+}
+EXPORT_SYMBOL(ieee80211_emulate_change_chanctx);
+
+int ieee80211_emulate_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ if (n_vifs <= 0)
+ return -EINVAL;
+
+ local->hw.conf.radar_enabled = vifs[0].new_ctx->radar_enabled;
+ _ieee80211_hw_conf_chan(local, vifs[0].new_ctx);
+
+ return 0;
+}
+EXPORT_SYMBOL(ieee80211_emulate_switch_vif_chanctx);
+
#define BSS_CHANGED_VIF_CFG_FLAGS (BSS_CHANGED_ASSOC |\
BSS_CHANGED_IDLE |\
BSS_CHANGED_PS |\
BSS_CHANGED_IBSS |\
BSS_CHANGED_ARP_FILTER |\
BSS_CHANGED_SSID |\
- BSS_CHANGED_MLD_VALID_LINKS)
+ BSS_CHANGED_MLD_VALID_LINKS |\
+ BSS_CHANGED_MLD_TTLM)
void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
u64 changed)
@@ -644,7 +764,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
struct ieee80211_local *local;
int priv_size, i;
struct wiphy *wiphy;
- bool use_chanctx;
+ bool emulate_chanctx;
if (WARN_ON(!ops->tx || !ops->start || !ops->stop || !ops->config ||
!ops->add_interface || !ops->remove_interface ||
@@ -659,12 +779,26 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
return NULL;
/* check all or no channel context operations exist */
- i = !!ops->add_chanctx + !!ops->remove_chanctx +
- !!ops->change_chanctx + !!ops->assign_vif_chanctx +
- !!ops->unassign_vif_chanctx;
- if (WARN_ON(i != 0 && i != 5))
- return NULL;
- use_chanctx = i == 5;
+ if (ops->add_chanctx == ieee80211_emulate_add_chanctx &&
+ ops->remove_chanctx == ieee80211_emulate_remove_chanctx &&
+ ops->change_chanctx == ieee80211_emulate_change_chanctx) {
+ if (WARN_ON(ops->assign_vif_chanctx ||
+ ops->unassign_vif_chanctx))
+ return NULL;
+ emulate_chanctx = true;
+ } else {
+ if (WARN_ON(ops->add_chanctx == ieee80211_emulate_add_chanctx ||
+ ops->remove_chanctx == ieee80211_emulate_remove_chanctx ||
+ ops->change_chanctx == ieee80211_emulate_change_chanctx))
+ return NULL;
+ if (WARN_ON(!ops->add_chanctx ||
+ !ops->remove_chanctx ||
+ !ops->change_chanctx ||
+ !ops->assign_vif_chanctx ||
+ !ops->unassign_vif_chanctx))
+ return NULL;
+ emulate_chanctx = false;
+ }
/* Ensure 32-byte alignment of our private data and hw private data.
* We use the wiphy priv data for both our ieee80211_local and for
@@ -698,7 +832,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
WIPHY_FLAG_REPORTS_OBSS |
WIPHY_FLAG_OFFCHAN_TX;
- if (!use_chanctx || ops->remain_on_channel)
+ if (emulate_chanctx || ops->remain_on_channel)
wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
wiphy->features |= NL80211_FEATURE_SK_TX_STATUS |
@@ -734,8 +868,11 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT);
}
- if (!ops->set_key)
+ if (!ops->set_key) {
wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT);
+ }
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_TXQS);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_RRM);
@@ -752,7 +889,10 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
local->ops = ops;
- local->use_chanctx = use_chanctx;
+ local->emulate_chanctx = emulate_chanctx;
+
+ if (emulate_chanctx)
+ ieee80211_hw_set(&local->hw, CHANCTX_STA_CSA);
/*
* We need a bit of data queued to build aggregates properly, so
@@ -829,7 +969,6 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
ieee80211_dfs_radar_detected_work);
wiphy_work_init(&local->reconfig_filter, ieee80211_reconfig_filter);
- local->smps_mode = IEEE80211_SMPS_OFF;
wiphy_work_init(&local->dynamic_ps_enable_work,
ieee80211_dynamic_ps_enable_work);
@@ -980,7 +1119,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
* as much, e.g. monitoring beacons would be hard if we
* might not even know which link is active at which time.
*/
- if (WARN_ON(!local->use_chanctx))
+ if (WARN_ON(local->emulate_chanctx))
return -EINVAL;
if (WARN_ON(!local->ops->link_info_changed))
@@ -1024,7 +1163,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
return -EINVAL;
#endif
- if (!local->use_chanctx) {
+ if (local->emulate_chanctx) {
for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) {
const struct ieee80211_iface_combination *comb;
@@ -1090,11 +1229,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
&sband->channels[i],
NL80211_CHAN_NO_HT);
/* init channel we're on */
- if (!local->use_chanctx && !local->_oper_chandef.chan) {
+ local->monitor_chanreq.oper = dflt_chandef;
+ if (local->emulate_chanctx) {
+ local->dflt_chandef = dflt_chandef;
local->hw.conf.chandef = dflt_chandef;
- local->_oper_chandef = dflt_chandef;
}
- local->monitor_chandef = dflt_chandef;
}
channels += sband->n_channels;
@@ -1115,8 +1254,26 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
supp_vht = supp_vht || sband->vht_cap.vht_supported;
for_each_sband_iftype_data(sband, i, iftd) {
+ u8 he_40_mhz_cap;
+
supp_he = supp_he || iftd->he_cap.has_he;
supp_eht = supp_eht || iftd->eht_cap.has_eht;
+
+ if (band == NL80211_BAND_2GHZ)
+ he_40_mhz_cap =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ else
+ he_40_mhz_cap =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
+
+ /* currently no support for HE client where HT has 40 MHz but not HT */
+ if (iftd->he_cap.has_he &&
+ iftd->types_mask & (BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT)) &&
+ sband->ht_cap.ht_supported &&
+ sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
+ !(iftd->he_cap.he_cap_elem.phy_cap_info[0] & he_40_mhz_cap))
+ return -EINVAL;
}
/* HT, VHT, HE require QoS, thus >= 4 queues */
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index fccbcde3359a..32475da98d73 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008, 2009 open80211s Ltd.
- * Copyright (C) 2018 - 2023 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
* Authors: Luis Carlos Cobo <luisca@cozybit.com>
* Javier Cardona <javier@cozybit.com>
*/
@@ -97,7 +97,7 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
if (sdata->vif.bss_conf.basic_rates != basic_rates)
return false;
- cfg80211_chandef_create(&sta_chan_def, sdata->vif.bss_conf.chandef.chan,
+ cfg80211_chandef_create(&sta_chan_def, sdata->vif.bss_conf.chanreq.oper.chan,
NL80211_CHAN_NO_HT);
ieee80211_chandef_ht_oper(ie->ht_operation, &sta_chan_def);
@@ -107,10 +107,11 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
ieee80211_chandef_vht_oper(&sdata->local->hw, vht_cap_info,
ie->vht_operation, ie->ht_operation,
&sta_chan_def);
- ieee80211_chandef_he_6ghz_oper(sdata, ie->he_operation, ie->eht_operation,
+ ieee80211_chandef_he_6ghz_oper(sdata->local, ie->he_operation,
+ ie->eht_operation,
&sta_chan_def);
- if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chandef,
+ if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chanreq.oper,
&sta_chan_def))
return false;
@@ -435,9 +436,9 @@ int mesh_add_ht_cap_ie(struct ieee80211_sub_if_data *sdata,
return 0;
if (!sband->ht_cap.ht_supported ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap))
@@ -476,16 +477,16 @@ int mesh_add_ht_oper_ie(struct ieee80211_sub_if_data *sdata,
return 0;
if (!ht_cap->ht_supported ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_operation))
return -ENOMEM;
pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation));
- ieee80211_ie_build_ht_oper(pos, ht_cap, &sdata->vif.bss_conf.chandef,
+ ieee80211_ie_build_ht_oper(pos, ht_cap, &sdata->vif.bss_conf.chanreq.oper,
sdata->vif.bss_conf.ht_operation_mode,
false);
@@ -507,9 +508,9 @@ int mesh_add_vht_cap_ie(struct ieee80211_sub_if_data *sdata,
return 0;
if (!sband->vht_cap.vht_supported ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_cap))
@@ -548,9 +549,9 @@ int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata,
return 0;
if (!vht_cap->vht_supported ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_operation))
@@ -558,7 +559,7 @@ int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata,
pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation));
ieee80211_ie_build_vht_oper(pos, vht_cap,
- &sdata->vif.bss_conf.chandef);
+ &sdata->vif.bss_conf.chanreq.oper);
return 0;
}
@@ -566,29 +567,18 @@ int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata,
int mesh_add_he_cap_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u8 ie_len)
{
- const struct ieee80211_sta_he_cap *he_cap;
struct ieee80211_supported_band *sband;
- u8 *pos;
sband = ieee80211_get_sband(sdata);
if (!sband)
return -EINVAL;
- he_cap = ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
-
- if (!he_cap ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ if (sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
- if (skb_tailroom(skb) < ie_len)
- return -ENOMEM;
-
- pos = skb_put(skb, ie_len);
- ieee80211_ie_build_he_cap(0, pos, he_cap, pos + ie_len);
-
- return 0;
+ return ieee80211_put_he_cap(skb, sdata, sband, NULL);
}
int mesh_add_he_oper_ie(struct ieee80211_sub_if_data *sdata,
@@ -605,20 +595,20 @@ int mesh_add_he_oper_ie(struct ieee80211_sub_if_data *sdata,
he_cap = ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
if (!he_cap ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
len = 2 + 1 + sizeof(struct ieee80211_he_operation);
- if (sdata->vif.bss_conf.chandef.chan->band == NL80211_BAND_6GHZ)
+ if (sdata->vif.bss_conf.chanreq.oper.chan->band == NL80211_BAND_6GHZ)
len += sizeof(struct ieee80211_he_6ghz_oper);
if (skb_tailroom(skb) < len)
return -ENOMEM;
pos = skb_put(skb, len);
- ieee80211_ie_build_he_oper(pos, &sdata->vif.bss_conf.chandef);
+ ieee80211_ie_build_he_oper(pos, &sdata->vif.bss_conf.chanreq.oper);
return 0;
}
@@ -639,37 +629,25 @@ int mesh_add_he_6ghz_cap_ie(struct ieee80211_sub_if_data *sdata,
if (!iftd)
return 0;
- ieee80211_ie_build_he_6ghz_cap(sdata, sdata->deflink.smps_mode, skb);
+ ieee80211_put_he_6ghz_cap(skb, sdata, sdata->deflink.smps_mode);
return 0;
}
int mesh_add_eht_cap_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u8 ie_len)
{
- const struct ieee80211_sta_he_cap *he_cap;
- const struct ieee80211_sta_eht_cap *eht_cap;
struct ieee80211_supported_band *sband;
- u8 *pos;
sband = ieee80211_get_sband(sdata);
if (!sband)
return -EINVAL;
- he_cap = ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
- eht_cap = ieee80211_get_eht_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
- if (!he_cap || !eht_cap ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ if (sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
- if (skb_tailroom(skb) < ie_len)
- return -ENOMEM;
-
- pos = skb_put(skb, ie_len);
- ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, pos + ie_len, false);
-
- return 0;
+ return ieee80211_put_eht_cap(skb, sdata, sband, NULL);
}
int mesh_add_eht_oper_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
@@ -685,9 +663,9 @@ int mesh_add_eht_oper_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *sk
eht_cap = ieee80211_get_eht_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
if (!eht_cap ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
len = 2 + 1 + offsetof(struct ieee80211_eht_operation, optional) +
@@ -697,7 +675,7 @@ int mesh_add_eht_oper_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *sk
return -ENOMEM;
pos = skb_put(skb, len);
- ieee80211_ie_build_eht_oper(pos, &sdata->vif.bss_conf.chandef, eht_cap);
+ ieee80211_ie_build_eht_oper(pos, &sdata->vif.bss_conf.chanreq.oper, eht_cap);
return 0;
}
@@ -745,9 +723,9 @@ ieee80211_mesh_update_bss_params(struct ieee80211_sub_if_data *sdata,
return;
if (!ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT) ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return;
sdata->vif.bss_conf.he_support = true;
@@ -966,24 +944,22 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
int head_len, tail_len;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
- struct ieee80211_chanctx_conf *chanctx_conf;
struct mesh_csa_settings *csa;
- enum nl80211_band band;
+ const struct ieee80211_supported_band *sband;
u8 ie_len_he_cap, ie_len_eht_cap;
u8 *pos;
struct ieee80211_sub_if_data *sdata;
int hdr_len = offsetofend(struct ieee80211_mgmt, u.beacon);
+ u32 rate_flags;
sdata = container_of(ifmsh, struct ieee80211_sub_if_data, u.mesh);
- rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
- band = chanctx_conf->def.chan->band;
- rcu_read_unlock();
- ie_len_he_cap = ieee80211_ie_len_he_cap(sdata,
- NL80211_IFTYPE_MESH_POINT);
- ie_len_eht_cap = ieee80211_ie_len_eht_cap(sdata,
- NL80211_IFTYPE_MESH_POINT);
+ sband = ieee80211_get_sband(sdata);
+ rate_flags =
+ ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chanreq.oper);
+
+ ie_len_he_cap = ieee80211_ie_len_he_cap(sdata);
+ ie_len_eht_cap = ieee80211_ie_len_eht_cap(sdata);
head_len = hdr_len +
2 + /* NULL SSID */
/* Channel Switch Announcement */
@@ -1107,7 +1083,9 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
}
rcu_read_unlock();
- if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
+ if (ieee80211_put_srates_elem(skb, sband,
+ sdata->vif.bss_conf.basic_rates,
+ rate_flags, 0, WLAN_EID_SUPP_RATES) ||
mesh_add_ds_params_ie(sdata, skb))
goto out_free;
@@ -1118,7 +1096,9 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
skb_trim(skb, 0);
bcn->tail = bcn->head + bcn->head_len;
- if (ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
+ if (ieee80211_put_srates_elem(skb, sband,
+ sdata->vif.bss_conf.basic_rates,
+ rate_flags, 0, WLAN_EID_EXT_SUPP_RATES) ||
mesh_add_rsn_ie(sdata, skb) ||
mesh_add_ht_cap_ie(sdata, skb) ||
mesh_add_ht_oper_ie(sdata, skb) ||
@@ -1234,7 +1214,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
netif_carrier_off(sdata->dev);
/* flush STAs and mpaths on this iface */
- sta_info_flush(sdata);
+ sta_info_flush(sdata, -1);
ieee80211_free_keys(sdata, true);
mesh_path_flush_by_iface(sdata);
@@ -1276,11 +1256,12 @@ static void ieee80211_mesh_csa_mark_radar(struct ieee80211_sub_if_data *sdata)
* unavailable.
*/
err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
- &sdata->vif.bss_conf.chandef,
+ &sdata->vif.bss_conf.chanreq.oper,
NL80211_IFTYPE_MESH_POINT);
if (err > 0)
cfg80211_radar_event(sdata->local->hw.wiphy,
- &sdata->vif.bss_conf.chandef, GFP_ATOMIC);
+ &sdata->vif.bss_conf.chanreq.oper,
+ GFP_ATOMIC);
}
static bool
@@ -1292,7 +1273,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_supported_band *sband;
int err;
- ieee80211_conn_flags_t conn_flags = 0;
+ struct ieee80211_conn_settings conn = ieee80211_conn_settings_unlimited;
u32 vht_cap_info = 0;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
@@ -1301,15 +1282,18 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
if (!sband)
return false;
- switch (sdata->vif.bss_conf.chandef.width) {
+ switch (sdata->vif.bss_conf.chanreq.oper.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
- conn_flags |= IEEE80211_CONN_DISABLE_HT;
- fallthrough;
+ conn.mode = IEEE80211_CONN_MODE_LEGACY;
+ conn.bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ break;
case NL80211_CHAN_WIDTH_20:
- conn_flags |= IEEE80211_CONN_DISABLE_40MHZ;
- fallthrough;
+ conn.mode = IEEE80211_CONN_MODE_HT;
+ conn.bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ break;
case NL80211_CHAN_WIDTH_40:
- conn_flags |= IEEE80211_CONN_DISABLE_VHT;
+ conn.mode = IEEE80211_CONN_MODE_HT;
+ conn.bw_limit = IEEE80211_CONN_BW_LIMIT_40;
break;
default:
break;
@@ -1321,8 +1305,8 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
memset(&params, 0, sizeof(params));
err = ieee80211_parse_ch_switch_ie(sdata, elems, sband->band,
- vht_cap_info,
- conn_flags, sdata->vif.addr,
+ vht_cap_info, &conn,
+ sdata->vif.addr,
&csa_ie);
if (err < 0)
return false;
@@ -1335,7 +1319,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
if (csa_ie.reason_code == WLAN_REASON_MESH_CHAN_REGULATORY)
ieee80211_mesh_csa_mark_radar(sdata);
- params.chandef = csa_ie.chandef;
+ params.chandef = csa_ie.chanreq.oper;
params.count = csa_ie.count;
if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, &params.chandef,
@@ -1371,7 +1355,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
params.radar_required = err;
if (cfg80211_chandef_identical(&params.chandef,
- &sdata->vif.bss_conf.chandef)) {
+ &sdata->vif.bss_conf.chanreq.oper)) {
mcsa_dbg(sdata,
"received csa with an identical chandef, ignoring\n");
return true;
@@ -1551,7 +1535,7 @@ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata, u64 *changed)
*changed |= BSS_CHANGED_BEACON;
mcsa_dbg(sdata, "complete switching to center freq %d MHz",
- sdata->vif.bss_conf.chandef.chan->center_freq);
+ sdata->vif.bss_conf.chanreq.oper.chan->center_freq);
return 0;
}
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index ad8469293d71..d913ce7ba72e 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2008, 2009 open80211s Ltd.
- * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2023-2024 Intel Corporation
* Authors: Luis Carlos Cobo <luisca@cozybit.com>
* Javier Cardona <javier@cozybit.com>
*/
@@ -94,6 +94,7 @@ enum mesh_deferred_task_flags {
* @is_root: the destination station of this path is a root node
* @is_gate: the destination station of this path is a mesh gate
* @path_change_count: the number of path changes to destination
+ * @fast_tx_check: timestamp of last fast-xmit enable attempt
*
*
* The dst address is unique in the mesh path table. Since the mesh_path is
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 735edde1bd81..91b55d6a68b9 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -600,11 +600,10 @@ unlock_sta:
void mesh_fast_tx_gc(struct ieee80211_sub_if_data *sdata)
{
unsigned long timeout = msecs_to_jiffies(MESH_FAST_TX_CACHE_TIMEOUT);
- struct mesh_tx_cache *cache;
+ struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
struct ieee80211_mesh_fast_tx *entry;
struct hlist_node *n;
- cache = &sdata->u.mesh.tx_cache;
if (atomic_read(&cache->rht.nelems) < MESH_FAST_TX_CACHE_THRESHOLD_SIZE)
return;
@@ -622,7 +621,6 @@ void mesh_fast_tx_flush_mpath(struct mesh_path *mpath)
struct ieee80211_mesh_fast_tx *entry;
struct hlist_node *n;
- cache = &sdata->u.mesh.tx_cache;
spin_lock_bh(&cache->walk_lock);
hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list)
if (entry->mpath == mpath)
@@ -637,7 +635,6 @@ void mesh_fast_tx_flush_sta(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mesh_fast_tx *entry;
struct hlist_node *n;
- cache = &sdata->u.mesh.tx_cache;
spin_lock_bh(&cache->walk_lock);
hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list)
if (rcu_access_pointer(entry->mpath->next_hop) == sta)
@@ -651,7 +648,6 @@ void mesh_fast_tx_flush_addr(struct ieee80211_sub_if_data *sdata,
struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
struct ieee80211_mesh_fast_tx *entry;
- cache = &sdata->u.mesh.tx_cache;
spin_lock_bh(&cache->walk_lock);
entry = rhashtable_lookup_fast(&cache->rht, addr, fast_tx_rht_params);
if (entry)
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 28bf794f67f8..8f2b492a9fe9 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008, 2009 open80211s Ltd.
- * Copyright (C) 2019, 2021-2023 Intel Corporation
+ * Copyright (C) 2019, 2021-2024 Intel Corporation
* Author: Luis Carlos Cobo <luisca@cozybit.com>
*/
#include <linux/gfp.h>
@@ -163,7 +163,7 @@ static u64 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
u16 ht_opmode;
bool non_ht_sta = false, ht20_sta = false;
- switch (sdata->vif.bss_conf.chandef.width) {
+ switch (sdata->vif.bss_conf.chanreq.oper.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
@@ -196,7 +196,7 @@ static u64 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
if (non_ht_sta)
ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED;
else if (ht20_sta &&
- sdata->vif.bss_conf.chandef.width > NL80211_CHAN_WIDTH_20)
+ sdata->vif.bss_conf.chanreq.oper.width > NL80211_CHAN_WIDTH_20)
ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ;
else
ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
@@ -226,10 +226,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.self_prot);
int err = -ENOMEM;
- ie_len_he_cap = ieee80211_ie_len_he_cap(sdata,
- NL80211_IFTYPE_MESH_POINT);
- ie_len_eht_cap = ieee80211_ie_len_eht_cap(sdata,
- NL80211_IFTYPE_MESH_POINT);
+ ie_len_he_cap = ieee80211_ie_len_he_cap(sdata);
+ ie_len_eht_cap = ieee80211_ie_len_eht_cap(sdata);
skb = dev_alloc_skb(local->tx_headroom +
hdr_len +
2 + /* capability info */
@@ -266,14 +264,13 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
if (action != WLAN_SP_MESH_PEERING_CLOSE) {
struct ieee80211_supported_band *sband;
- enum nl80211_band band;
+ u32 rate_flags, basic_rates;
sband = ieee80211_get_sband(sdata);
if (!sband) {
err = -EINVAL;
goto free;
}
- band = sband->band;
/* capability info */
pos = skb_put_zero(skb, 2);
@@ -282,8 +279,17 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
pos = skb_put(skb, 2);
put_unaligned_le16(sta->sta.aid, pos);
}
- if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
- ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
+
+ rate_flags =
+ ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chanreq.oper);
+ basic_rates = sdata->vif.bss_conf.basic_rates;
+
+ if (ieee80211_put_srates_elem(skb, sband, basic_rates,
+ rate_flags, 0,
+ WLAN_EID_SUPP_RATES) ||
+ ieee80211_put_srates_elem(skb, sband, basic_rates,
+ rate_flags, 0,
+ WLAN_EID_EXT_SUPP_RATES) ||
mesh_add_rsn_ie(sdata, skb) ||
mesh_add_meshid_ie(sdata, skb) ||
mesh_add_meshconf_ie(sdata, skb))
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 2022a26eb881..47a2cba8313f 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -46,6 +46,8 @@
#define IEEE80211_ADV_TTLM_SAFETY_BUFFER_MS msecs_to_jiffies(100)
#define IEEE80211_ADV_TTLM_ST_UNDERFLOW 0xff00
+#define IEEE80211_NEG_TTLM_REQ_TIMEOUT (HZ / 5)
+
static int max_nullfunc_tries = 2;
module_param(max_nullfunc_tries, int, 0644);
MODULE_PARM_DESC(max_nullfunc_tries,
@@ -92,84 +94,6 @@ MODULE_PARM_DESC(probe_wait_ms,
#define IEEE80211_SIGNAL_AVE_MIN_COUNT 4
/*
- * Extract from the given disabled subchannel bitmap (raw format
- * from the EHT Operation Element) the bits for the subchannel
- * we're using right now.
- */
-static u16
-ieee80211_extract_dis_subch_bmap(const struct ieee80211_eht_operation *eht_oper,
- struct cfg80211_chan_def *chandef, u16 bitmap)
-{
- struct ieee80211_eht_operation_info *info = (void *)eht_oper->optional;
- struct cfg80211_chan_def ap_chandef = *chandef;
- u32 ap_center_freq, local_center_freq;
- u32 ap_bw, local_bw;
- int ap_start_freq, local_start_freq;
- u16 shift, mask;
-
- if (!(eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) ||
- !(eht_oper->params &
- IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT))
- return 0;
-
- /* set 160/320 supported to get the full AP definition */
- ieee80211_chandef_eht_oper((const void *)eht_oper->optional,
- true, true, &ap_chandef);
- ap_center_freq = ap_chandef.center_freq1;
- ap_bw = 20 * BIT(u8_get_bits(info->control,
- IEEE80211_EHT_OPER_CHAN_WIDTH));
- ap_start_freq = ap_center_freq - ap_bw / 2;
- local_center_freq = chandef->center_freq1;
- local_bw = 20 * BIT(ieee80211_chan_width_to_rx_bw(chandef->width));
- local_start_freq = local_center_freq - local_bw / 2;
- shift = (local_start_freq - ap_start_freq) / 20;
- mask = BIT(local_bw / 20) - 1;
-
- return (bitmap >> shift) & mask;
-}
-
-/*
- * Handle the puncturing bitmap, possibly downgrading bandwidth to get a
- * valid bitmap.
- */
-static void
-ieee80211_handle_puncturing_bitmap(struct ieee80211_link_data *link,
- const struct ieee80211_eht_operation *eht_oper,
- u16 bitmap, u64 *changed)
-{
- struct cfg80211_chan_def *chandef = &link->conf->chandef;
- struct ieee80211_local *local = link->sdata->local;
- u16 extracted;
- u64 _changed = 0;
-
- if (!changed)
- changed = &_changed;
-
- while (chandef->width > NL80211_CHAN_WIDTH_40) {
- extracted =
- ieee80211_extract_dis_subch_bmap(eht_oper, chandef,
- bitmap);
-
- if (cfg80211_valid_disable_subchannel_bitmap(&bitmap,
- chandef) &&
- !(bitmap && ieee80211_hw_check(&local->hw,
- DISALLOW_PUNCTURING)))
- break;
- link->u.mgd.conn_flags |=
- ieee80211_chandef_downgrade(chandef);
- *changed |= BSS_CHANGED_BANDWIDTH;
- }
-
- if (chandef->width <= NL80211_CHAN_WIDTH_40)
- extracted = 0;
-
- if (link->conf->eht_puncturing != extracted) {
- link->conf->eht_puncturing = extracted;
- *changed |= BSS_CHANGED_EHT_PUNCTURING;
- }
-}
-
-/*
* We can have multiple work items (and connection probing)
* scheduling this timer, but we need to take care to only
* reschedule it when it should fire _earlier_ than it was
@@ -223,77 +147,84 @@ static int ecw2cw(int ecw)
return (1 << ecw) - 1;
}
-static ieee80211_conn_flags_t
-ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_link_data *link,
- ieee80211_conn_flags_t conn_flags,
- struct ieee80211_supported_band *sband,
- struct ieee80211_channel *channel,
- u32 vht_cap_info,
- const struct ieee80211_ht_operation *ht_oper,
- const struct ieee80211_vht_operation *vht_oper,
- const struct ieee80211_he_operation *he_oper,
- const struct ieee80211_eht_operation *eht_oper,
- const struct ieee80211_s1g_oper_ie *s1g_oper,
- struct cfg80211_chan_def *chandef, bool tracking)
+static enum ieee80211_conn_mode
+ieee80211_determine_ap_chan(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel *channel,
+ u32 vht_cap_info,
+ const struct ieee802_11_elems *elems,
+ bool ignore_ht_channel_mismatch,
+ const struct ieee80211_conn_settings *conn,
+ struct cfg80211_chan_def *chandef)
{
+ const struct ieee80211_ht_operation *ht_oper = elems->ht_operation;
+ const struct ieee80211_vht_operation *vht_oper = elems->vht_operation;
+ const struct ieee80211_he_operation *he_oper = elems->he_operation;
+ const struct ieee80211_eht_operation *eht_oper = elems->eht_operation;
+ struct ieee80211_supported_band *sband =
+ sdata->local->hw.wiphy->bands[channel->band];
struct cfg80211_chan_def vht_chandef;
- struct ieee80211_sta_ht_cap sta_ht_cap;
- ieee80211_conn_flags_t ret;
+ bool no_vht = false;
u32 ht_cfreq;
- memset(chandef, 0, sizeof(struct cfg80211_chan_def));
- chandef->chan = channel;
- chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
- chandef->center_freq1 = channel->center_freq;
- chandef->freq1_offset = channel->freq_offset;
+ *chandef = (struct cfg80211_chan_def) {
+ .chan = channel,
+ .width = NL80211_CHAN_WIDTH_20_NOHT,
+ .center_freq1 = channel->center_freq,
+ .freq1_offset = channel->freq_offset,
+ };
- if (channel->band == NL80211_BAND_6GHZ) {
- if (!ieee80211_chandef_he_6ghz_oper(sdata, he_oper, eht_oper,
- chandef)) {
- mlme_dbg(sdata,
- "bad 6 GHz operation, disabling HT/VHT/HE/EHT\n");
- ret = IEEE80211_CONN_DISABLE_HT |
- IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
- } else {
- ret = 0;
- }
- vht_chandef = *chandef;
- goto out;
- } else if (sband->band == NL80211_BAND_S1GHZ) {
- if (!ieee80211_chandef_s1g_oper(s1g_oper, chandef)) {
+ /* get special S1G case out of the way */
+ if (sband->band == NL80211_BAND_S1GHZ) {
+ if (!ieee80211_chandef_s1g_oper(elems->s1g_oper, chandef)) {
sdata_info(sdata,
"Missing S1G Operation Element? Trying operating == primary\n");
chandef->width = ieee80211_s1g_channel_width(channel);
}
- ret = IEEE80211_CONN_DISABLE_HT | IEEE80211_CONN_DISABLE_40MHZ |
- IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_80P80MHZ |
- IEEE80211_CONN_DISABLE_160MHZ;
- goto out;
+ return IEEE80211_CONN_MODE_S1G;
}
- memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap));
- ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
+ /* get special 6 GHz case out of the way */
+ if (sband->band == NL80211_BAND_6GHZ) {
+ enum ieee80211_conn_mode mode = IEEE80211_CONN_MODE_EHT;
- if (!ht_oper || !sta_ht_cap.ht_supported) {
- mlme_dbg(sdata, "HT operation missing / HT not supported\n");
- ret = IEEE80211_CONN_DISABLE_HT |
- IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
- goto out;
+ /* this is an error */
+ if (conn->mode < IEEE80211_CONN_MODE_HE)
+ return IEEE80211_CONN_MODE_LEGACY;
+
+ if (!elems->he_6ghz_capa || !elems->he_cap) {
+ sdata_info(sdata,
+ "HE 6 GHz AP is missing HE/HE 6 GHz band capability\n");
+ return IEEE80211_CONN_MODE_LEGACY;
+ }
+
+ if (!eht_oper || !elems->eht_cap) {
+ eht_oper = NULL;
+ mode = IEEE80211_CONN_MODE_HE;
+ }
+
+ if (!ieee80211_chandef_he_6ghz_oper(sdata->local, he_oper,
+ eht_oper, chandef)) {
+ sdata_info(sdata, "bad HE/EHT 6 GHz operation\n");
+ return IEEE80211_CONN_MODE_LEGACY;
+ }
+
+ return mode;
}
+ /* now we have the progression HT, VHT, ... */
+ if (conn->mode < IEEE80211_CONN_MODE_HT)
+ return IEEE80211_CONN_MODE_LEGACY;
+
+ if (!ht_oper || !elems->ht_cap_elem)
+ return IEEE80211_CONN_MODE_LEGACY;
+
chandef->width = NL80211_CHAN_WIDTH_20;
ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
channel->band);
/* check that channel matches the right operating channel */
- if (!tracking && channel->center_freq != ht_cfreq) {
+ if (!ignore_ht_channel_mismatch && channel->center_freq != ht_cfreq) {
/*
* It's possible that some APs are confused here;
* Netgear WNDR3700 sometimes reports 4 higher than
@@ -305,36 +236,22 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
"Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
channel->center_freq, ht_cfreq,
ht_oper->primary_chan, channel->band);
- ret = IEEE80211_CONN_DISABLE_HT |
- IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
- goto out;
+ return IEEE80211_CONN_MODE_LEGACY;
}
- /* check 40 MHz support, if we have it */
- if (sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
- ieee80211_chandef_ht_oper(ht_oper, chandef);
- } else {
- mlme_dbg(sdata, "40 MHz not supported\n");
- /* 40 MHz (and 80 MHz) must be supported for VHT */
- ret = IEEE80211_CONN_DISABLE_VHT;
- /* also mark 40 MHz disabled */
- ret |= IEEE80211_CONN_DISABLE_40MHZ;
- goto out;
- }
+ ieee80211_chandef_ht_oper(ht_oper, chandef);
- if (!vht_oper || !sband->vht_cap.vht_supported) {
- mlme_dbg(sdata, "VHT operation missing / VHT not supported\n");
- ret = IEEE80211_CONN_DISABLE_VHT;
- goto out;
- }
+ if (conn->mode < IEEE80211_CONN_MODE_VHT)
+ return IEEE80211_CONN_MODE_HT;
vht_chandef = *chandef;
- if (!(conn_flags & IEEE80211_CONN_DISABLE_HE) &&
- he_oper &&
- (le32_to_cpu(he_oper->he_oper_params) &
- IEEE80211_HE_OPERATION_VHT_OPER_INFO)) {
+
+ /*
+ * having he_cap/he_oper parsed out implies we're at
+ * least operating as HE STA
+ */
+ if (elems->he_cap && he_oper &&
+ he_oper->he_oper_params & cpu_to_le32(IEEE80211_HE_OPERATION_VHT_OPER_INFO)) {
struct ieee80211_vht_operation he_oper_vht_cap;
/*
@@ -347,253 +264,614 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
if (!ieee80211_chandef_vht_oper(&sdata->local->hw, vht_cap_info,
&he_oper_vht_cap, ht_oper,
&vht_chandef)) {
- if (!(conn_flags & IEEE80211_CONN_DISABLE_HE))
- sdata_info(sdata,
- "HE AP VHT information is invalid, disabling HE\n");
- ret = IEEE80211_CONN_DISABLE_HE | IEEE80211_CONN_DISABLE_EHT;
- goto out;
+ sdata_info(sdata,
+ "HE AP VHT information is invalid, disabling HE\n");
+ /* this will cause us to re-parse as VHT STA */
+ return IEEE80211_CONN_MODE_VHT;
}
+ } else if (!vht_oper || !elems->vht_cap_elem) {
+ if (sband->band == NL80211_BAND_5GHZ) {
+ sdata_info(sdata,
+ "VHT information is missing, disabling VHT\n");
+ return IEEE80211_CONN_MODE_HT;
+ }
+ no_vht = true;
+ } else if (sband->band == NL80211_BAND_2GHZ) {
+ no_vht = true;
} else if (!ieee80211_chandef_vht_oper(&sdata->local->hw,
vht_cap_info,
vht_oper, ht_oper,
&vht_chandef)) {
- if (!(conn_flags & IEEE80211_CONN_DISABLE_VHT))
- sdata_info(sdata,
- "AP VHT information is invalid, disabling VHT\n");
- ret = IEEE80211_CONN_DISABLE_VHT;
- goto out;
+ sdata_info(sdata,
+ "AP VHT information is invalid, disabling VHT\n");
+ return IEEE80211_CONN_MODE_HT;
}
- if (!cfg80211_chandef_valid(&vht_chandef)) {
- if (!(conn_flags & IEEE80211_CONN_DISABLE_VHT))
- sdata_info(sdata,
- "AP VHT information is invalid, disabling VHT\n");
- ret = IEEE80211_CONN_DISABLE_VHT;
- goto out;
+ if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
+ sdata_info(sdata,
+ "AP VHT information doesn't match HT, disabling VHT\n");
+ return IEEE80211_CONN_MODE_HT;
}
- if (cfg80211_chandef_identical(chandef, &vht_chandef)) {
- ret = 0;
- goto out;
- }
+ *chandef = vht_chandef;
- if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
- if (!(conn_flags & IEEE80211_CONN_DISABLE_VHT))
- sdata_info(sdata,
- "AP VHT information doesn't match HT, disabling VHT\n");
- ret = IEEE80211_CONN_DISABLE_VHT;
- goto out;
+ /* stick to current max mode if we or the AP don't have HE */
+ if (conn->mode < IEEE80211_CONN_MODE_HE ||
+ !elems->he_operation || !elems->he_cap) {
+ if (no_vht)
+ return IEEE80211_CONN_MODE_HT;
+ return IEEE80211_CONN_MODE_VHT;
}
- *chandef = vht_chandef;
+ /* stick to HE if we or the AP don't have EHT */
+ if (conn->mode < IEEE80211_CONN_MODE_EHT ||
+ !eht_oper || !elems->eht_cap)
+ return IEEE80211_CONN_MODE_HE;
/*
* handle the case that the EHT operation indicates that it holds EHT
* operation information (in case that the channel width differs from
* the channel width reported in HT/VHT/HE).
*/
- if (eht_oper && (eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT)) {
+ if (eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) {
struct cfg80211_chan_def eht_chandef = *chandef;
ieee80211_chandef_eht_oper((const void *)eht_oper->optional,
- eht_chandef.width ==
- NL80211_CHAN_WIDTH_160,
- false, &eht_chandef);
+ &eht_chandef);
+
+ eht_chandef.punctured =
+ ieee80211_eht_oper_dis_subchan_bitmap(eht_oper);
if (!cfg80211_chandef_valid(&eht_chandef)) {
- if (!(conn_flags & IEEE80211_CONN_DISABLE_EHT))
- sdata_info(sdata,
- "AP EHT information is invalid, disabling EHT\n");
- ret = IEEE80211_CONN_DISABLE_EHT;
- goto out;
+ sdata_info(sdata,
+ "AP EHT information is invalid, disabling EHT\n");
+ return IEEE80211_CONN_MODE_HE;
}
if (!cfg80211_chandef_compatible(chandef, &eht_chandef)) {
- if (!(conn_flags & IEEE80211_CONN_DISABLE_EHT))
- sdata_info(sdata,
- "AP EHT information is incompatible, disabling EHT\n");
- ret = IEEE80211_CONN_DISABLE_EHT;
- goto out;
+ sdata_info(sdata,
+ "AP EHT information doesn't match HT/VHT/HE, disabling EHT\n");
+ return IEEE80211_CONN_MODE_HE;
}
*chandef = eht_chandef;
}
- ret = 0;
+ return IEEE80211_CONN_MODE_EHT;
+}
+
+static bool
+ieee80211_verify_peer_he_mcs_support(struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_he_cap_elem *he_cap,
+ const struct ieee80211_he_operation *he_op)
+{
+ struct ieee80211_he_mcs_nss_supp *he_mcs_nss_supp;
+ u16 mcs_80_map_tx, mcs_80_map_rx;
+ u16 ap_min_req_set;
+ int nss;
+
+ if (!he_cap)
+ return false;
+
+ /* mcs_nss is right after he_cap info */
+ he_mcs_nss_supp = (void *)(he_cap + 1);
+
+ mcs_80_map_tx = le16_to_cpu(he_mcs_nss_supp->tx_mcs_80);
+ mcs_80_map_rx = le16_to_cpu(he_mcs_nss_supp->rx_mcs_80);
+
+ /* P802.11-REVme/D0.3
+ * 27.1.1 Introduction to the HE PHY
+ * ...
+ * An HE STA shall support the following features:
+ * ...
+ * Single spatial stream HE-MCSs 0 to 7 (transmit and receive) in all
+ * supported channel widths for HE SU PPDUs
+ */
+ if ((mcs_80_map_tx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ (mcs_80_map_rx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED) {
+ sdata_info(sdata,
+ "Missing mandatory rates for 1 Nss, rx 0x%x, tx 0x%x, disable HE\n",
+ mcs_80_map_tx, mcs_80_map_rx);
+ return false;
+ }
+
+ if (!he_op)
+ return true;
+
+ ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set);
-out:
/*
- * When tracking the current AP, don't do any further checks if the
- * new chandef is identical to the one we're currently using for the
- * connection. This keeps us from playing ping-pong with regulatory,
- * without it the following can happen (for example):
- * - connect to an AP with 80 MHz, world regdom allows 80 MHz
- * - AP advertises regdom US
- * - CRDA loads regdom US with 80 MHz prohibited (old database)
- * - the code below detects an unsupported channel, downgrades, and
- * we disconnect from the AP in the caller
- * - disconnect causes CRDA to reload world regdomain and the game
- * starts anew.
- * (see https://bugzilla.kernel.org/show_bug.cgi?id=70881)
+ * Apparently iPhone 13 (at least iOS version 15.3.1) sets this to all
+ * zeroes, which is nonsense, and completely inconsistent with itself
+ * (it doesn't have 8 streams). Accept the settings in this case anyway.
+ */
+ if (!ap_min_req_set)
+ return true;
+
+ /* make sure the AP is consistent with itself
*
- * It seems possible that there are still scenarios with CSA or real
- * bandwidth changes where a this could happen, but those cases are
- * less common and wouldn't completely prevent using the AP.
+ * P802.11-REVme/D0.3
+ * 26.17.1 Basic HE BSS operation
+ *
+ * A STA that is operating in an HE BSS shall be able to receive and
+ * transmit at each of the <HE-MCS, NSS> tuple values indicated by the
+ * Basic HE-MCS And NSS Set field of the HE Operation parameter of the
+ * MLME-START.request primitive and shall be able to receive at each of
+ * the <HE-MCS, NSS> tuple values indicated by the Supported HE-MCS and
+ * NSS Set field in the HE Capabilities parameter of the MLMESTART.request
+ * primitive
*/
- if (tracking &&
- cfg80211_chandef_identical(chandef, &link->conf->chandef))
- return ret;
+ for (nss = 8; nss > 0; nss--) {
+ u8 ap_op_val = (ap_min_req_set >> (2 * (nss - 1))) & 3;
+ u8 ap_rx_val;
+ u8 ap_tx_val;
+
+ if (ap_op_val == IEEE80211_HE_MCS_NOT_SUPPORTED)
+ continue;
- /* don't print the message below for VHT mismatch if VHT is disabled */
- if (ret & IEEE80211_CONN_DISABLE_VHT)
- vht_chandef = *chandef;
+ ap_rx_val = (mcs_80_map_rx >> (2 * (nss - 1))) & 3;
+ ap_tx_val = (mcs_80_map_tx >> (2 * (nss - 1))) & 3;
+
+ if (ap_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ ap_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ ap_rx_val < ap_op_val || ap_tx_val < ap_op_val) {
+ sdata_info(sdata,
+ "Invalid rates for %d Nss, rx %d, tx %d oper %d, disable HE\n",
+ nss, ap_rx_val, ap_rx_val, ap_op_val);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool
+ieee80211_verify_sta_he_mcs_support(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ const struct ieee80211_he_operation *he_op)
+{
+ const struct ieee80211_sta_he_cap *sta_he_cap =
+ ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+ u16 ap_min_req_set;
+ int i;
+
+ if (!sta_he_cap || !he_op)
+ return false;
+
+ ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set);
/*
- * Ignore the DISABLED flag when we're already connected and only
- * tracking the APs beacon for bandwidth changes - otherwise we
- * might get disconnected here if we connect to an AP, update our
- * regulatory information based on the AP's country IE and the
- * information we have is wrong/outdated and disables the channel
- * that we're actually using for the connection to the AP.
+ * Apparently iPhone 13 (at least iOS version 15.3.1) sets this to all
+ * zeroes, which is nonsense, and completely inconsistent with itself
+ * (it doesn't have 8 streams). Accept the settings in this case anyway.
*/
- while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
- tracking ? 0 :
- IEEE80211_CHAN_DISABLED)) {
- if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
- ret = IEEE80211_CONN_DISABLE_HT |
- IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
- break;
+ if (!ap_min_req_set)
+ return true;
+
+ /* Need to go over for 80MHz, 160MHz and for 80+80 */
+ for (i = 0; i < 3; i++) {
+ const struct ieee80211_he_mcs_nss_supp *sta_mcs_nss_supp =
+ &sta_he_cap->he_mcs_nss_supp;
+ u16 sta_mcs_map_rx =
+ le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i]);
+ u16 sta_mcs_map_tx =
+ le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i + 1]);
+ u8 nss;
+ bool verified = true;
+
+ /*
+ * For each band there is a maximum of 8 spatial streams
+ * possible. Each of the sta_mcs_map_* is a 16-bit struct built
+ * of 2 bits per NSS (1-8), with the values defined in enum
+ * ieee80211_he_mcs_support. Need to make sure STA TX and RX
+ * capabilities aren't less than the AP's minimum requirements
+ * for this HE BSS per SS.
+ * It is enough to find one such band that meets the reqs.
+ */
+ for (nss = 8; nss > 0; nss--) {
+ u8 sta_rx_val = (sta_mcs_map_rx >> (2 * (nss - 1))) & 3;
+ u8 sta_tx_val = (sta_mcs_map_tx >> (2 * (nss - 1))) & 3;
+ u8 ap_val = (ap_min_req_set >> (2 * (nss - 1))) & 3;
+
+ if (ap_val == IEEE80211_HE_MCS_NOT_SUPPORTED)
+ continue;
+
+ /*
+ * Make sure the HE AP doesn't require MCSs that aren't
+ * supported by the client as required by spec
+ *
+ * P802.11-REVme/D0.3
+ * 26.17.1 Basic HE BSS operation
+ *
+ * An HE STA shall not attempt to join * (MLME-JOIN.request primitive)
+ * a BSS, unless it supports (i.e., is able to both transmit and
+ * receive using) all of the <HE-MCS, NSS> tuples in the basic
+ * HE-MCS and NSS set.
+ */
+ if (sta_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ sta_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ (ap_val > sta_rx_val) || (ap_val > sta_tx_val)) {
+ verified = false;
+ break;
+ }
}
- ret |= ieee80211_chandef_downgrade(chandef);
+ if (verified)
+ return true;
+ }
+
+ /* If here, STA doesn't meet AP's HE min requirements */
+ return false;
+}
+
+static u8
+ieee80211_get_eht_cap_mcs_nss(const struct ieee80211_sta_he_cap *sta_he_cap,
+ const struct ieee80211_sta_eht_cap *sta_eht_cap,
+ unsigned int idx, int bw)
+{
+ u8 he_phy_cap0 = sta_he_cap->he_cap_elem.phy_cap_info[0];
+ u8 eht_phy_cap0 = sta_eht_cap->eht_cap_elem.phy_cap_info[0];
+
+ /* handle us being a 20 MHz-only EHT STA - with four values
+ * for MCS 0-7, 8-9, 10-11, 12-13.
+ */
+ if (!(he_phy_cap0 & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL))
+ return sta_eht_cap->eht_mcs_nss_supp.only_20mhz.rx_tx_max_nss[idx];
+
+ /* the others have MCS 0-9 together, rather than separately from 0-7 */
+ if (idx > 0)
+ idx--;
+
+ switch (bw) {
+ case 0:
+ return sta_eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_max_nss[idx];
+ case 1:
+ if (!(he_phy_cap0 &
+ (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)))
+ return 0xff; /* pass check */
+ return sta_eht_cap->eht_mcs_nss_supp.bw._160.rx_tx_max_nss[idx];
+ case 2:
+ if (!(eht_phy_cap0 & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ))
+ return 0xff; /* pass check */
+ return sta_eht_cap->eht_mcs_nss_supp.bw._320.rx_tx_max_nss[idx];
}
- if (!he_oper || !cfg80211_chandef_usable(sdata->wdev.wiphy, chandef,
- IEEE80211_CHAN_NO_HE))
- ret |= IEEE80211_CONN_DISABLE_HE | IEEE80211_CONN_DISABLE_EHT;
+ WARN_ON(1);
+ return 0;
+}
- if (!eht_oper || !cfg80211_chandef_usable(sdata->wdev.wiphy, chandef,
- IEEE80211_CHAN_NO_EHT))
- ret |= IEEE80211_CONN_DISABLE_EHT;
+static bool
+ieee80211_verify_sta_eht_mcs_support(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ const struct ieee80211_eht_operation *eht_op)
+{
+ const struct ieee80211_sta_he_cap *sta_he_cap =
+ ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+ const struct ieee80211_sta_eht_cap *sta_eht_cap =
+ ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
+ const struct ieee80211_eht_mcs_nss_supp_20mhz_only *req;
+ unsigned int i;
- if (chandef->width != vht_chandef.width && !tracking)
+ if (!sta_he_cap || !sta_eht_cap || !eht_op)
+ return false;
+
+ req = &eht_op->basic_mcs_nss;
+
+ for (i = 0; i < ARRAY_SIZE(req->rx_tx_max_nss); i++) {
+ u8 req_rx_nss, req_tx_nss;
+ unsigned int bw;
+
+ req_rx_nss = u8_get_bits(req->rx_tx_max_nss[i],
+ IEEE80211_EHT_MCS_NSS_RX);
+ req_tx_nss = u8_get_bits(req->rx_tx_max_nss[i],
+ IEEE80211_EHT_MCS_NSS_TX);
+
+ for (bw = 0; bw < 3; bw++) {
+ u8 have, have_rx_nss, have_tx_nss;
+
+ have = ieee80211_get_eht_cap_mcs_nss(sta_he_cap,
+ sta_eht_cap,
+ i, bw);
+ have_rx_nss = u8_get_bits(have,
+ IEEE80211_EHT_MCS_NSS_RX);
+ have_tx_nss = u8_get_bits(have,
+ IEEE80211_EHT_MCS_NSS_TX);
+
+ if (req_rx_nss > have_rx_nss ||
+ req_tx_nss > have_tx_nss)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool ieee80211_chandef_usable(struct ieee80211_sub_if_data *sdata,
+ const struct cfg80211_chan_def *chandef,
+ u32 prohibited_flags)
+{
+ if (!cfg80211_chandef_usable(sdata->local->hw.wiphy,
+ chandef, prohibited_flags))
+ return false;
+
+ if (chandef->punctured &&
+ ieee80211_hw_check(&sdata->local->hw, DISALLOW_PUNCTURING))
+ return false;
+
+ return true;
+}
+
+static struct ieee802_11_elems *
+ieee80211_determine_chan_mode(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_conn_settings *conn,
+ struct cfg80211_bss *cbss, int link_id,
+ struct ieee80211_chan_req *chanreq)
+{
+ const struct cfg80211_bss_ies *ies = rcu_dereference(cbss->ies);
+ struct ieee80211_bss *bss = (void *)cbss->priv;
+ struct ieee80211_channel *channel = cbss->channel;
+ struct ieee80211_elems_parse_params parse_params = {
+ .link_id = -1,
+ .from_ap = true,
+ .start = ies->data,
+ .len = ies->len,
+ .mode = conn->mode,
+ };
+ struct ieee802_11_elems *elems;
+ struct ieee80211_supported_band *sband;
+ struct cfg80211_chan_def ap_chandef;
+ enum ieee80211_conn_mode ap_mode;
+ int ret;
+
+again:
+ elems = ieee802_11_parse_elems_full(&parse_params);
+ if (!elems)
+ return ERR_PTR(-ENOMEM);
+
+ ap_mode = ieee80211_determine_ap_chan(sdata, channel, bss->vht_cap_info,
+ elems, false, conn, &ap_chandef);
+
+ mlme_link_id_dbg(sdata, link_id, "determined AP %pM to be %s\n",
+ cbss->bssid, ieee80211_conn_mode_str(ap_mode));
+
+ /* this should be impossible since parsing depends on our mode */
+ if (WARN_ON(ap_mode > conn->mode)) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ sband = sdata->local->hw.wiphy->bands[channel->band];
+
+ switch (channel->band) {
+ case NL80211_BAND_S1GHZ:
+ if (WARN_ON(ap_mode != IEEE80211_CONN_MODE_S1G)) {
+ ret = -EINVAL;
+ goto free;
+ }
+ return elems;
+ case NL80211_BAND_6GHZ:
+ if (ap_mode < IEEE80211_CONN_MODE_HE) {
+ sdata_info(sdata,
+ "Rejecting non-HE 6/7 GHz connection");
+ ret = -EINVAL;
+ goto free;
+ }
+ break;
+ default:
+ if (WARN_ON(ap_mode == IEEE80211_CONN_MODE_S1G)) {
+ ret = -EINVAL;
+ goto free;
+ }
+ }
+
+ switch (ap_mode) {
+ case IEEE80211_CONN_MODE_S1G:
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto free;
+ case IEEE80211_CONN_MODE_LEGACY:
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ break;
+ case IEEE80211_CONN_MODE_HT:
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_40);
+ break;
+ case IEEE80211_CONN_MODE_VHT:
+ case IEEE80211_CONN_MODE_HE:
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_160);
+ break;
+ case IEEE80211_CONN_MODE_EHT:
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_320);
+ break;
+ }
+
+ conn->mode = ap_mode;
+ chanreq->oper = ap_chandef;
+
+ /* wider-bandwidth OFDMA is only done in EHT */
+ if (conn->mode >= IEEE80211_CONN_MODE_EHT &&
+ !(sdata->vif.driver_flags & IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW))
+ chanreq->ap = ap_chandef;
+ else
+ chanreq->ap.chan = NULL;
+
+ while (!ieee80211_chandef_usable(sdata, &chanreq->oper,
+ IEEE80211_CHAN_DISABLED)) {
+ if (WARN_ON(chanreq->oper.width == NL80211_CHAN_WIDTH_20_NOHT)) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ ieee80211_chanreq_downgrade(chanreq, conn);
+ }
+
+ if (conn->mode >= IEEE80211_CONN_MODE_HE &&
+ !cfg80211_chandef_usable(sdata->wdev.wiphy, &chanreq->oper,
+ IEEE80211_CHAN_NO_HE)) {
+ conn->mode = IEEE80211_CONN_MODE_VHT;
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_160);
+ }
+
+ if (conn->mode >= IEEE80211_CONN_MODE_EHT &&
+ !cfg80211_chandef_usable(sdata->wdev.wiphy, &chanreq->oper,
+ IEEE80211_CHAN_NO_EHT)) {
+ conn->mode = IEEE80211_CONN_MODE_HE;
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_160);
+ }
+
+ if (chanreq->oper.width != ap_chandef.width || ap_mode != conn->mode)
sdata_info(sdata,
- "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n");
+ "regulatory prevented using AP config, downgraded\n");
- WARN_ON_ONCE(!cfg80211_chandef_valid(chandef));
- return ret;
+ if (conn->mode >= IEEE80211_CONN_MODE_HE &&
+ (!ieee80211_verify_peer_he_mcs_support(sdata, (void *)elems->he_cap,
+ elems->he_operation) ||
+ !ieee80211_verify_sta_he_mcs_support(sdata, sband,
+ elems->he_operation))) {
+ conn->mode = IEEE80211_CONN_MODE_VHT;
+ sdata_info(sdata, "required MCSes not supported, disabling HE\n");
+ }
+
+ if (conn->mode >= IEEE80211_CONN_MODE_EHT &&
+ !ieee80211_verify_sta_eht_mcs_support(sdata, sband,
+ elems->eht_operation)) {
+ conn->mode = IEEE80211_CONN_MODE_HE;
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_160);
+ sdata_info(sdata, "required MCSes not supported, disabling EHT\n");
+ }
+
+ /* the mode can only decrease, so this must terminate */
+ if (ap_mode != conn->mode)
+ goto again;
+
+ mlme_link_id_dbg(sdata, link_id,
+ "connecting with %s mode, max bandwidth %d MHz\n",
+ ieee80211_conn_mode_str(conn->mode),
+ 20 * (1 << conn->bw_limit));
+
+ if (WARN_ON_ONCE(!cfg80211_chandef_valid(&chanreq->oper))) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ return elems;
+free:
+ kfree(elems);
+ return ERR_PTR(ret);
}
static int ieee80211_config_bw(struct ieee80211_link_data *link,
- const struct ieee80211_ht_cap *ht_cap,
- const struct ieee80211_vht_cap *vht_cap,
- const struct ieee80211_ht_operation *ht_oper,
- const struct ieee80211_vht_operation *vht_oper,
- const struct ieee80211_he_operation *he_oper,
- const struct ieee80211_eht_operation *eht_oper,
- const struct ieee80211_s1g_oper_ie *s1g_oper,
- const u8 *bssid, u64 *changed)
+ struct ieee802_11_elems *elems,
+ bool update, u64 *changed)
{
+ struct ieee80211_channel *channel = link->conf->chanreq.oper.chan;
struct ieee80211_sub_if_data *sdata = link->sdata;
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_channel *chan = link->conf->chandef.chan;
- struct ieee80211_supported_band *sband =
- local->hw.wiphy->bands[chan->band];
- struct cfg80211_chan_def chandef;
- u16 ht_opmode;
- ieee80211_conn_flags_t flags;
+ struct ieee80211_chan_req chanreq = {};
+ enum ieee80211_conn_mode ap_mode;
u32 vht_cap_info = 0;
+ u16 ht_opmode;
int ret;
- /* if HT was/is disabled, don't track any bandwidth changes */
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT || !ht_oper)
+ /* don't track any bandwidth changes in legacy/S1G modes */
+ if (link->u.mgd.conn.mode == IEEE80211_CONN_MODE_LEGACY ||
+ link->u.mgd.conn.mode == IEEE80211_CONN_MODE_S1G)
return 0;
- /* don't check VHT if we associated as non-VHT station */
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)
- vht_oper = NULL;
+ if (elems->vht_cap_elem)
+ vht_cap_info = le32_to_cpu(elems->vht_cap_elem->vht_cap_info);
- /* don't check HE if we associated as non-HE station */
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE ||
- !ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif)) {
- he_oper = NULL;
- eht_oper = NULL;
+ ap_mode = ieee80211_determine_ap_chan(sdata, channel, vht_cap_info,
+ elems, true, &link->u.mgd.conn,
+ &chanreq.ap);
+
+ if (ap_mode != link->u.mgd.conn.mode) {
+ link_info(link,
+ "AP appears to change mode (expected %s, found %s), disconnect\n",
+ ieee80211_conn_mode_str(link->u.mgd.conn.mode),
+ ieee80211_conn_mode_str(ap_mode));
+ return -EINVAL;
}
- /* don't check EHT if we associated as non-EHT station */
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT ||
- !ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif))
- eht_oper = NULL;
+ chanreq.oper = chanreq.ap;
+ if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_EHT ||
+ sdata->vif.driver_flags & IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW)
+ chanreq.ap.chan = NULL;
/*
- * if bss configuration changed store the new one -
+ * if HT operation mode changed store the new one -
* this may be applicable even if channel is identical
*/
- ht_opmode = le16_to_cpu(ht_oper->operation_mode);
- if (link->conf->ht_operation_mode != ht_opmode) {
- *changed |= BSS_CHANGED_HT;
- link->conf->ht_operation_mode = ht_opmode;
+ if (elems->ht_operation) {
+ ht_opmode = le16_to_cpu(elems->ht_operation->operation_mode);
+ if (link->conf->ht_operation_mode != ht_opmode) {
+ *changed |= BSS_CHANGED_HT;
+ link->conf->ht_operation_mode = ht_opmode;
+ }
}
- if (vht_cap)
- vht_cap_info = le32_to_cpu(vht_cap->vht_cap_info);
-
- /* calculate new channel (type) based on HT/VHT/HE operation IEs */
- flags = ieee80211_determine_chantype(sdata, link,
- link->u.mgd.conn_flags,
- sband, chan, vht_cap_info,
- ht_oper, vht_oper,
- he_oper, eht_oper,
- s1g_oper, &chandef, true);
-
/*
* Downgrade the new channel if we associated with restricted
- * capabilities. For example, if we associated as a 20 MHz STA
- * to a 40 MHz AP (due to regulatory, capabilities or config
- * reasons) then switching to a 40 MHz channel now won't do us
- * any good -- we couldn't use it with the AP.
+ * bandwidth capabilities. For example, if we associated as a
+ * 20 MHz STA to a 40 MHz AP (due to regulatory, capabilities
+ * or config reasons) then switching to a 40 MHz channel now
+ * won't do us any good -- we couldn't use it with the AP.
*/
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_80P80MHZ &&
- chandef.width == NL80211_CHAN_WIDTH_80P80)
- flags |= ieee80211_chandef_downgrade(&chandef);
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_160MHZ &&
- chandef.width == NL80211_CHAN_WIDTH_160)
- flags |= ieee80211_chandef_downgrade(&chandef);
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_40MHZ &&
- chandef.width > NL80211_CHAN_WIDTH_20)
- flags |= ieee80211_chandef_downgrade(&chandef);
-
- if (cfg80211_chandef_identical(&chandef, &link->conf->chandef))
+ while (link->u.mgd.conn.bw_limit <
+ ieee80211_min_bw_limit_from_chandef(&chanreq.oper))
+ ieee80211_chandef_downgrade(&chanreq.oper, NULL);
+
+ if (ieee80211_chanreq_identical(&chanreq, &link->conf->chanreq))
return 0;
link_info(link,
- "AP %pM changed bandwidth, new config is %d.%03d MHz, width %d (%d.%03d/%d MHz)\n",
- link->u.mgd.bssid, chandef.chan->center_freq,
- chandef.chan->freq_offset, chandef.width,
- chandef.center_freq1, chandef.freq1_offset,
- chandef.center_freq2);
-
- if (flags != (link->u.mgd.conn_flags &
- (IEEE80211_CONN_DISABLE_HT |
- IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT |
- IEEE80211_CONN_DISABLE_40MHZ |
- IEEE80211_CONN_DISABLE_80P80MHZ |
- IEEE80211_CONN_DISABLE_160MHZ |
- IEEE80211_CONN_DISABLE_320MHZ)) ||
- !cfg80211_chandef_valid(&chandef)) {
+ "AP %pM changed bandwidth, new used config is %d.%03d MHz, width %d (%d.%03d/%d MHz)\n",
+ link->u.mgd.bssid, chanreq.oper.chan->center_freq,
+ chanreq.oper.chan->freq_offset, chanreq.oper.width,
+ chanreq.oper.center_freq1, chanreq.oper.freq1_offset,
+ chanreq.oper.center_freq2);
+
+ if (!cfg80211_chandef_valid(&chanreq.oper)) {
sdata_info(sdata,
- "AP %pM changed caps/bw in a way we can't support (0x%x/0x%x) - disconnect\n",
- link->u.mgd.bssid, flags, ifmgd->flags);
+ "AP %pM changed caps/bw in a way we can't support - disconnect\n",
+ link->u.mgd.bssid);
return -EINVAL;
}
- ret = ieee80211_link_change_bandwidth(link, &chandef, changed);
+ if (!update) {
+ link->conf->chanreq = chanreq;
+ return 0;
+ }
+ /*
+ * We're tracking the current AP here, so don't do any further checks
+ * here. This keeps us from playing ping-pong with regulatory, without
+ * it the following can happen (for example):
+ * - connect to an AP with 80 MHz, world regdom allows 80 MHz
+ * - AP advertises regdom US
+ * - CRDA loads regdom US with 80 MHz prohibited (old database)
+ * - we detect an unsupported channel and disconnect
+ * - disconnect causes CRDA to reload world regdomain and the game
+ * starts anew.
+ * (see https://bugzilla.kernel.org/show_bug.cgi?id=70881)
+ *
+ * It seems possible that there are still scenarios with CSA or real
+ * bandwidth changes where a this could happen, but those cases are
+ * less common and wouldn't completely prevent using the AP.
+ */
+
+ ret = ieee80211_link_change_chanreq(link, &chanreq, changed);
if (ret) {
sdata_info(sdata,
"AP %pM changed bandwidth to incompatible one - disconnect\n",
@@ -612,7 +890,7 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
struct ieee80211_channel *channel,
enum ieee80211_smps_mode smps,
- ieee80211_conn_flags_t conn_flags)
+ const struct ieee80211_conn_settings *conn)
{
u8 *pos;
u32 flags = channel->flags;
@@ -647,7 +925,7 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
* capable of 40 MHz -- some broken APs will never fall
* back to trying to transmit in 20 MHz.
*/
- if (conn_flags & IEEE80211_CONN_DISABLE_40MHZ) {
+ if (conn->bw_limit <= IEEE80211_CONN_BW_LIMIT_20) {
cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
cap &= ~IEEE80211_HT_CAP_SGI_40;
}
@@ -686,7 +964,7 @@ static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb,
struct ieee80211_supported_band *sband,
struct ieee80211_vht_cap *ap_vht_cap,
- ieee80211_conn_flags_t conn_flags)
+ const struct ieee80211_conn_settings *conn)
{
struct ieee80211_local *local = sdata->local;
u8 *pos;
@@ -703,16 +981,7 @@ static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
/* determine capability flags */
cap = vht_cap.cap;
- if (conn_flags & IEEE80211_CONN_DISABLE_80P80MHZ) {
- u32 bw = cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
-
- cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
- if (bw == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ ||
- bw == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
- cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
- }
-
- if (conn_flags & IEEE80211_CONN_DISABLE_160MHZ) {
+ if (conn->bw_limit <= IEEE80211_CONN_BW_LIMIT_80) {
cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160;
cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
}
@@ -769,79 +1038,12 @@ static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
return mu_mimo_owner;
}
-/* This function determines HE capability flags for the association
- * and builds the IE.
- */
-static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb,
- struct ieee80211_supported_band *sband,
- enum ieee80211_smps_mode smps_mode,
- ieee80211_conn_flags_t conn_flags)
-{
- u8 *pos, *pre_he_pos;
- const struct ieee80211_sta_he_cap *he_cap;
- u8 he_cap_size;
-
- he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
- if (WARN_ON(!he_cap))
- return;
-
- /* get a max size estimate */
- he_cap_size =
- 2 + 1 + sizeof(he_cap->he_cap_elem) +
- ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem) +
- ieee80211_he_ppe_size(he_cap->ppe_thres[0],
- he_cap->he_cap_elem.phy_cap_info);
- pos = skb_put(skb, he_cap_size);
- pre_he_pos = pos;
- pos = ieee80211_ie_build_he_cap(conn_flags,
- pos, he_cap, pos + he_cap_size);
- /* trim excess if any */
- skb_trim(skb, skb->len - (pre_he_pos + he_cap_size - pos));
-
- ieee80211_ie_build_he_6ghz_cap(sdata, smps_mode, skb);
-}
-
-static void ieee80211_add_eht_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb,
- struct ieee80211_supported_band *sband)
-{
- u8 *pos;
- const struct ieee80211_sta_he_cap *he_cap;
- const struct ieee80211_sta_eht_cap *eht_cap;
- u8 eht_cap_size;
-
- he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
- eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
-
- /*
- * EHT capabilities element is only added if the HE capabilities element
- * was added so assume that 'he_cap' is valid and don't check it.
- */
- if (WARN_ON(!he_cap || !eht_cap))
- return;
-
- eht_cap_size =
- 2 + 1 + sizeof(eht_cap->eht_cap_elem) +
- ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
- &eht_cap->eht_cap_elem,
- false) +
- ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0],
- eht_cap->eht_cap_elem.phy_cap_info);
- pos = skb_put(skb, eht_cap_size);
- ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, pos + eht_cap_size,
- false);
-}
-
static void ieee80211_assoc_add_rates(struct sk_buff *skb,
enum nl80211_chan_width width,
struct ieee80211_supported_band *sband,
struct ieee80211_mgd_assoc_data *assoc_data)
{
- unsigned int rates_len, supp_rates_len;
- u32 rates = 0;
- int i, count;
- u8 *pos;
+ u32 rates;
if (assoc_data->supp_rates_len) {
/*
@@ -850,53 +1052,23 @@ static void ieee80211_assoc_add_rates(struct sk_buff *skb,
* in the association request (e.g. D-Link DAP 1353 in
* b-only mode)...
*/
- rates_len = ieee80211_parse_bitrates(width, sband,
- assoc_data->supp_rates,
- assoc_data->supp_rates_len,
- &rates);
+ ieee80211_parse_bitrates(width, sband,
+ assoc_data->supp_rates,
+ assoc_data->supp_rates_len,
+ &rates);
} else {
/*
* In case AP not provide any supported rates information
* before association, we send information element(s) with
* all rates that we support.
*/
- rates_len = sband->n_bitrates;
- for (i = 0; i < sband->n_bitrates; i++)
- rates |= BIT(i);
- }
-
- supp_rates_len = rates_len;
- if (supp_rates_len > 8)
- supp_rates_len = 8;
-
- pos = skb_put(skb, supp_rates_len + 2);
- *pos++ = WLAN_EID_SUPP_RATES;
- *pos++ = supp_rates_len;
-
- count = 0;
- for (i = 0; i < sband->n_bitrates; i++) {
- if (BIT(i) & rates) {
- int rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
- *pos++ = (u8)rate;
- if (++count == 8)
- break;
- }
+ rates = ~0;
}
- if (rates_len > count) {
- pos = skb_put(skb, rates_len - count + 2);
- *pos++ = WLAN_EID_EXT_SUPP_RATES;
- *pos++ = rates_len - count;
-
- for (i++; i < sband->n_bitrates; i++) {
- if (BIT(i) & rates) {
- int rate;
-
- rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
- *pos++ = (u8)rate;
- }
- }
- }
+ ieee80211_put_srates_elem(skb, sband, 0, 0, ~rates,
+ WLAN_EID_SUPP_RATES);
+ ieee80211_put_srates_elem(skb, sband, 0, 0, ~rates,
+ WLAN_EID_EXT_SUPP_RATES);
}
static size_t ieee80211_add_before_ht_elems(struct sk_buff *skb,
@@ -1133,11 +1305,11 @@ static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata,
offset);
if (sband->band != NL80211_BAND_6GHZ &&
- !(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_HT)) {
+ assoc_data->link[link_id].conn.mode >= IEEE80211_CONN_MODE_HT) {
ieee80211_add_ht_ie(sdata, skb,
assoc_data->link[link_id].ap_ht_param,
sband, chan, smps_mode,
- assoc_data->link[link_id].conn_flags);
+ &assoc_data->link[link_id].conn);
ADD_PRESENT_ELEM(WLAN_EID_HT_CAPABILITY);
}
@@ -1147,37 +1319,28 @@ static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata,
offset);
if (sband->band != NL80211_BAND_6GHZ &&
- !(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_VHT)) {
+ assoc_data->link[link_id].conn.mode >= IEEE80211_CONN_MODE_VHT &&
+ sband->vht_cap.vht_supported) {
bool mu_mimo_owner =
ieee80211_add_vht_ie(sdata, skb, sband,
&assoc_data->link[link_id].ap_vht_cap,
- assoc_data->link[link_id].conn_flags);
+ &assoc_data->link[link_id].conn);
if (link)
link->conf->mu_mimo_owner = mu_mimo_owner;
ADD_PRESENT_ELEM(WLAN_EID_VHT_CAPABILITY);
}
- /*
- * If AP doesn't support HT, mark HE and EHT as disabled.
- * If on the 5GHz band, make sure it supports VHT.
- */
- if (assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_HT ||
- (sband->band == NL80211_BAND_5GHZ &&
- assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_VHT))
- assoc_data->link[link_id].conn_flags |=
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
-
/* if present, add any custom IEs that go before HE */
offset = ieee80211_add_before_he_elems(skb, extra_elems,
extra_elems_len,
offset);
- if (!(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_HE)) {
- ieee80211_add_he_ie(sdata, skb, sband, smps_mode,
- assoc_data->link[link_id].conn_flags);
+ if (assoc_data->link[link_id].conn.mode >= IEEE80211_CONN_MODE_HE) {
+ ieee80211_put_he_cap(skb, sdata, sband,
+ &assoc_data->link[link_id].conn);
ADD_PRESENT_EXT_ELEM(WLAN_EID_EXT_HE_CAPABILITY);
+ ieee80211_put_he_6ghz_cap(skb, sdata, smps_mode);
}
/*
@@ -1185,7 +1348,7 @@ static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata,
* calling ieee80211_assoc_add_ml_elem(), so add this one if
* we're going to put it after the ML element
*/
- if (!(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_EHT))
+ if (assoc_data->link[link_id].conn.mode >= IEEE80211_CONN_MODE_EHT)
ADD_PRESENT_EXT_ELEM(WLAN_EID_EXT_EHT_CAPABILITY);
if (link_id == assoc_data->assoc_link_id)
@@ -1195,8 +1358,9 @@ static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata,
/* crash if somebody gets it wrong */
present_elems = NULL;
- if (!(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_EHT))
- ieee80211_add_eht_ie(sdata, skb, sband);
+ if (assoc_data->link[link_id].conn.mode >= IEEE80211_CONN_MODE_EHT)
+ ieee80211_put_eht_cap(skb, sdata, sband,
+ &assoc_data->link[link_id].conn);
if (sband->band == NL80211_BAND_S1GHZ) {
ieee80211_add_aid_request_ie(sdata, skb);
@@ -1206,9 +1370,6 @@ static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata,
if (iftd && iftd->vendor_elems.data && iftd->vendor_elems.len)
skb_put_data(skb, iftd->vendor_elems.data, iftd->vendor_elems.len);
- if (link)
- link->u.mgd.conn_flags = assoc_data->link[link_id].conn_flags;
-
return offset;
}
@@ -1318,8 +1479,6 @@ static void ieee80211_assoc_add_ml_elem(struct ieee80211_sub_if_data *sdata,
cpu_to_le16(IEEE80211_MLC_BASIC_PRES_EML_CAPA);
skb_put_data(skb, &eml_capa, sizeof(eml_capa));
}
- /* need indication from userspace to support this */
- mld_capa_ops &= ~cpu_to_le16(IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP);
skb_put_data(skb, &mld_capa_ops, sizeof(mld_capa_ops));
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
@@ -1499,7 +1658,7 @@ static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
/* Set MBSSID support for HE AP if needed */
if (ieee80211_hw_check(&local->hw, SUPPORTS_ONLY_HE_MULTI_BSSID) &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HE &&
ext_capa && ext_capa->datalen >= 3)
ext_capa->data[2] |= WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT;
@@ -1544,7 +1703,7 @@ static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
* for some reason check it and want it to be set, set the bit for all
* pre-EHT connections as we used to do.
*/
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT)
+ if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_EHT)
capab |= WLAN_CAPABILITY_ESS;
/* add the elements for the assoc (main) link */
@@ -1741,8 +1900,8 @@ static void ieee80211_chswitch_work(struct wiphy *wiphy,
return;
}
- if (!cfg80211_chandef_identical(&link->conf->chandef,
- &link->csa_chandef)) {
+ if (!ieee80211_chanreq_identical(&link->conf->chanreq,
+ &link->csa_chanreq)) {
sdata_info(sdata,
"failed to finalize channel switch, disconnecting\n");
wiphy_work_queue(sdata->local->hw.wiphy,
@@ -1767,19 +1926,14 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_link_data *link)
WARN_ON(!link->conf->csa_active);
- if (link->csa_block_tx) {
+ if (sdata->csa_blocked_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- link->csa_block_tx = false;
+ sdata->csa_blocked_tx = false;
}
link->conf->csa_active = false;
link->u.mgd.csa_waiting_bcn = false;
- /*
- * If the CSA IE is still present on the beacon after the switch,
- * we need to consider it as a new CSA (possibly to self).
- */
- link->u.mgd.beacon_crc_valid = false;
ret = drv_post_channel_switch(link);
if (ret) {
@@ -1790,8 +1944,8 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_link_data *link)
return;
}
- cfg80211_ch_switch_notify(sdata->dev, &link->reserved_chandef,
- link->link_id, 0);
+ cfg80211_ch_switch_notify(sdata->dev, &link->reserved.oper,
+ link->link_id);
}
void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success,
@@ -1838,14 +1992,15 @@ ieee80211_sta_abort_chanswitch(struct ieee80211_link_data *link)
ieee80211_link_unreserve_chanctx(link);
- if (link->csa_block_tx)
+ if (sdata->csa_blocked_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
+ sdata->csa_blocked_tx = false;
+ }
- link->csa_block_tx = false;
link->conf->csa_active = false;
- drv_abort_channel_switch(sdata);
+ drv_abort_channel_switch(link);
}
static void
@@ -1857,12 +2012,14 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct cfg80211_bss *cbss = link->u.mgd.bss;
+ struct cfg80211_bss *cbss = link->conf->bss;
struct ieee80211_chanctx_conf *conf;
struct ieee80211_chanctx *chanctx;
enum nl80211_band current_band;
struct ieee80211_csa_ie csa_ie;
- struct ieee80211_channel_switch ch_switch;
+ struct ieee80211_channel_switch ch_switch = {
+ .link_id = link->link_id,
+ };
struct ieee80211_bss *bss;
unsigned long timeout;
int res;
@@ -1876,14 +2033,14 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
bss = (void *)cbss->priv;
res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band,
bss->vht_cap_info,
- link->u.mgd.conn_flags,
+ &link->u.mgd.conn,
link->u.mgd.bssid, &csa_ie);
if (!res) {
ch_switch.timestamp = timestamp;
ch_switch.device_timestamp = device_timestamp;
ch_switch.block_tx = csa_ie.mode;
- ch_switch.chandef = csa_ie.chandef;
+ ch_switch.chandef = csa_ie.chanreq.oper;
ch_switch.count = csa_ie.count;
ch_switch.delay = csa_ie.max_switch_time;
}
@@ -1891,46 +2048,62 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
if (res < 0)
goto drop_connection;
- if (beacon && link->conf->csa_active &&
- !link->u.mgd.csa_waiting_bcn) {
- if (res)
+ if (link->conf->csa_active) {
+ /* already processing - disregard action frames */
+ if (!beacon)
+ return;
+
+ if (link->u.mgd.csa_waiting_bcn) {
+ ieee80211_chswitch_post_beacon(link);
+ /*
+ * If the CSA IE is still present in the beacon after
+ * the switch, we need to consider it as a new CSA
+ * (possibly to self) - this happens by not returning
+ * here so we'll get to the check below.
+ */
+ } else if (res) {
ieee80211_sta_abort_chanswitch(link);
- else
+ return;
+ } else {
drv_channel_switch_rx_beacon(sdata, &ch_switch);
- return;
- } else if (link->conf->csa_active || res) {
- /* disregard subsequent announcements if already processing */
- return;
+ return;
+ }
}
- if (link->conf->chandef.chan->band !=
- csa_ie.chandef.chan->band) {
+ /* nothing to do at all - no active CSA nor a new one */
+ if (res)
+ return;
+
+ if (link->conf->chanreq.oper.chan->band !=
+ csa_ie.chanreq.oper.chan->band) {
sdata_info(sdata,
"AP %pM switches to different band (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n",
link->u.mgd.bssid,
- csa_ie.chandef.chan->center_freq,
- csa_ie.chandef.width, csa_ie.chandef.center_freq1,
- csa_ie.chandef.center_freq2);
+ csa_ie.chanreq.oper.chan->center_freq,
+ csa_ie.chanreq.oper.width,
+ csa_ie.chanreq.oper.center_freq1,
+ csa_ie.chanreq.oper.center_freq2);
goto drop_connection;
}
- if (!cfg80211_chandef_usable(local->hw.wiphy, &csa_ie.chandef,
+ if (!cfg80211_chandef_usable(local->hw.wiphy, &csa_ie.chanreq.oper,
IEEE80211_CHAN_DISABLED)) {
sdata_info(sdata,
"AP %pM switches to unsupported channel "
"(%d.%03d MHz, width:%d, CF1/2: %d.%03d/%d MHz), "
"disconnecting\n",
link->u.mgd.bssid,
- csa_ie.chandef.chan->center_freq,
- csa_ie.chandef.chan->freq_offset,
- csa_ie.chandef.width, csa_ie.chandef.center_freq1,
- csa_ie.chandef.freq1_offset,
- csa_ie.chandef.center_freq2);
+ csa_ie.chanreq.oper.chan->center_freq,
+ csa_ie.chanreq.oper.chan->freq_offset,
+ csa_ie.chanreq.oper.width,
+ csa_ie.chanreq.oper.center_freq1,
+ csa_ie.chanreq.oper.freq1_offset,
+ csa_ie.chanreq.oper.center_freq2);
goto drop_connection;
}
- if (cfg80211_chandef_identical(&csa_ie.chandef,
- &link->conf->chandef) &&
+ if (cfg80211_chandef_identical(&csa_ie.chanreq.oper,
+ &link->conf->chanreq.oper) &&
(!csa_ie.mode || !beacon)) {
if (link->u.mgd.csa_ignored_same_chan)
return;
@@ -1942,12 +2115,13 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
}
/*
- * Drop all TDLS peers - either we disconnect or move to a different
- * channel from this point on. There's no telling what our peer will do.
+ * Drop all TDLS peers on the affected link - either we disconnect or
+ * move to a different channel from this point on. There's no telling
+ * what our peer will do.
* The TDLS WIDER_BW scenario is also problematic, as peers might now
* have an incompatible wider chandef.
*/
- ieee80211_teardown_tdls_peers(sdata);
+ ieee80211_teardown_tdls_peers(link);
conf = rcu_dereference_protected(link->conf->chanctx_conf,
lockdep_is_held(&local->hw.wiphy->mtx));
@@ -1959,8 +2133,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
chanctx = container_of(conf, struct ieee80211_chanctx, conf);
- if (local->use_chanctx &&
- !ieee80211_hw_check(&local->hw, CHANCTX_STA_CSA)) {
+ if (!ieee80211_hw_check(&local->hw, CHANCTX_STA_CSA)) {
sdata_info(sdata,
"driver doesn't support chan-switch with channel contexts\n");
goto drop_connection;
@@ -1972,7 +2145,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
goto drop_connection;
}
- res = ieee80211_link_reserve_chanctx(link, &csa_ie.chandef,
+ res = ieee80211_link_reserve_chanctx(link, &csa_ie.chanreq,
chanctx->mode, false);
if (res) {
sdata_info(sdata,
@@ -1982,18 +2155,20 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
}
link->conf->csa_active = true;
- link->csa_chandef = csa_ie.chandef;
- link->csa_block_tx = csa_ie.mode;
+ link->csa_chanreq = csa_ie.chanreq;
link->u.mgd.csa_ignored_same_chan = false;
link->u.mgd.beacon_crc_valid = false;
- if (link->csa_block_tx)
+ if (csa_ie.mode &&
+ !ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA)) {
ieee80211_stop_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
+ sdata->csa_blocked_tx = true;
+ }
- cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chandef,
+ cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chanreq.oper,
link->link_id, csa_ie.count,
- csa_ie.mode, 0);
+ csa_ie.mode);
if (local->ops->channel_switch) {
/* use driver's channel switch callback */
@@ -2017,7 +2192,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
* reset when the disconnection worker runs.
*/
link->conf->csa_active = true;
- link->csa_block_tx = csa_ie.mode;
+ sdata->csa_blocked_tx =
+ csa_ie.mode && !ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA);
wiphy_work_queue(sdata->local->hw.wiphy,
&ifmgd->csa_connection_drop_work);
@@ -2414,7 +2590,7 @@ void ieee80211_dfs_cac_timer_work(struct wiphy *wiphy, struct wiphy_work *work)
struct ieee80211_link_data *link =
container_of(work, struct ieee80211_link_data,
dfs_cac_timer_work.work);
- struct cfg80211_chan_def chandef = link->conf->chandef;
+ struct cfg80211_chan_def chandef = link->conf->chanreq.oper;
struct ieee80211_sub_if_data *sdata = link->sdata;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
@@ -2769,7 +2945,7 @@ static u64 ieee80211_link_set_associated(struct ieee80211_link_data *link,
ieee80211_check_rate_mask(link);
- link->u.mgd.bss = cbss;
+ link->conf->bss = cbss;
memcpy(link->u.mgd.bssid, cbss->bssid, ETH_ALEN);
if (sdata->vif.p2p ||
@@ -2917,7 +3093,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
ifmgd->associated = false;
/* other links will be destroyed */
- sdata->deflink.u.mgd.bss = NULL;
+ sdata->deflink.conf->bss = NULL;
sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
netif_carrier_off(sdata->dev);
@@ -2992,7 +3168,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
sdata->vif.cfg.ssid_len = 0;
/* remove AP and TDLS peers */
- sta_info_flush(sdata);
+ sta_info_flush(sdata, -1);
/* finally reset all BSS / config parameters */
if (!ieee80211_vif_is_mld(&sdata->vif))
@@ -3058,7 +3234,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
sdata->deflink.u.mgd.disable_wmm_tracking = false;
ifmgd->flags = 0;
- sdata->deflink.u.mgd.conn_flags = 0;
for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
struct ieee80211_link_data *link;
@@ -3072,25 +3247,35 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.csa_active = false;
sdata->deflink.u.mgd.csa_waiting_bcn = false;
sdata->deflink.u.mgd.csa_ignored_same_chan = false;
- if (sdata->deflink.csa_block_tx) {
+ if (sdata->csa_blocked_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->deflink.csa_block_tx = false;
+ sdata->csa_blocked_tx = false;
}
/* existing TX TSPEC sessions no longer exist */
memset(ifmgd->tx_tspec, 0, sizeof(ifmgd->tx_tspec));
wiphy_delayed_work_cancel(local->hw.wiphy, &ifmgd->tx_tspec_wk);
+ sdata->vif.bss_conf.power_type = IEEE80211_REG_UNSET_AP;
sdata->vif.bss_conf.pwr_reduction = 0;
sdata->vif.bss_conf.tx_pwr_env_num = 0;
memset(sdata->vif.bss_conf.tx_pwr_env, 0,
sizeof(sdata->vif.bss_conf.tx_pwr_env));
+ sdata->vif.cfg.eml_cap = 0;
+ sdata->vif.cfg.eml_med_sync_delay = 0;
+ sdata->vif.cfg.mld_capa_op = 0;
+
memset(&sdata->u.mgd.ttlm_info, 0,
sizeof(sdata->u.mgd.ttlm_info));
wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->ttlm_work);
+
+ wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+ &ifmgd->neg_ttlm_timeout_work);
ieee80211_vif_set_links(sdata, 0, 0);
+
+ ifmgd->mcast_seq_last = IEEE80211_SN_MODULO;
}
static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
@@ -3238,7 +3423,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
ieee80211_mlme_send_probe_req(sdata, sdata->vif.addr, dst,
sdata->vif.cfg.ssid,
sdata->vif.cfg.ssid_len,
- sdata->deflink.u.mgd.bss->channel);
+ sdata->deflink.conf->bss->channel);
}
ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
@@ -3321,7 +3506,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
return NULL;
if (ifmgd->associated)
- cbss = sdata->deflink.u.mgd.bss;
+ cbss = sdata->deflink.conf->bss;
else if (ifmgd->auth_data)
cbss = ifmgd->auth_data->bss;
else if (ifmgd->assoc_data && ifmgd->assoc_data->link[0].bss)
@@ -3378,9 +3563,12 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
if (!ifmgd->associated)
return;
- /* in MLO assume we have a link where we can TX the frame */
- tx = ieee80211_vif_is_mld(&sdata->vif) ||
- !sdata->deflink.csa_block_tx;
+ /*
+ * MLO drivers should have HANDLES_QUIET_CSA, so that csa_blocked_tx
+ * is always false; if they don't then this may try to transmit the
+ * frame but queues will be stopped.
+ */
+ tx = !sdata->csa_blocked_tx;
if (!ifmgd->driver_disconnect) {
unsigned int link_id;
@@ -3400,8 +3588,8 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
link = sdata_dereference(sdata->link[link_id], sdata);
if (!link)
continue;
- cfg80211_unlink_bss(local->hw.wiphy, link->u.mgd.bss);
- link->u.mgd.bss = NULL;
+ cfg80211_unlink_bss(local->hw.wiphy, link->conf->bss);
+ link->conf->bss = NULL;
}
}
@@ -3413,10 +3601,10 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
/* the other links will be destroyed */
sdata->vif.bss_conf.csa_active = false;
sdata->deflink.u.mgd.csa_waiting_bcn = false;
- if (sdata->deflink.csa_block_tx) {
+ if (sdata->csa_blocked_tx) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->deflink.csa_block_tx = false;
+ sdata->csa_blocked_tx = false;
}
ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx,
@@ -3518,7 +3706,6 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
sta_info_destroy_addr(sdata, auth_data->ap_addr);
/* other links are destroyed */
- sdata->deflink.u.mgd.conn_flags = 0;
eth_zero_addr(sdata->deflink.u.mgd.bssid);
ieee80211_link_info_change_notify(sdata, &sdata->deflink,
BSS_CHANGED_BSSID);
@@ -3556,7 +3743,6 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
del_timer_sync(&sdata->u.mgd.timer);
sta_info_destroy_addr(sdata, assoc_data->ap_addr);
- sdata->deflink.u.mgd.conn_flags = 0;
eth_zero_addr(sdata->deflink.u.mgd.bssid);
ieee80211_link_info_change_notify(sdata, &sdata->deflink,
BSS_CHANGED_BSSID);
@@ -4006,11 +4192,13 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
struct ieee80211_local *local = sdata->local;
unsigned int link_id = link->link_id;
struct ieee80211_elems_parse_params parse_params = {
+ .mode = link->u.mgd.conn.mode,
.start = elem_start,
.len = elem_len,
.link_id = link_id == assoc_data->assoc_link_id ? -1 : link_id,
.from_ap = true,
};
+ bool is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ;
bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ;
bool is_s1g = cbss->channel->band == NL80211_BAND_S1GHZ;
const struct cfg80211_bss_ies *bss_ies = NULL;
@@ -4034,15 +4222,17 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
*/
assoc_data->link[link_id].status = WLAN_STATUS_SUCCESS;
if (elems->ml_basic) {
- if (!(elems->ml_basic->control &
- cpu_to_le16(IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT))) {
+ int bss_param_ch_cnt =
+ ieee80211_mle_get_bss_param_ch_cnt((const void *)elems->ml_basic);
+
+ if (bss_param_ch_cnt < 0) {
ret = false;
goto out;
}
- link->u.mgd.bss_param_ch_cnt =
- ieee80211_mle_get_bss_param_ch_cnt(elems->ml_basic);
+ link->u.mgd.bss_param_ch_cnt = bss_param_ch_cnt;
}
- } else if (!elems->prof ||
+ } else if (elems->parse_error & IEEE80211_PARSE_ERR_DUP_NEST_ML_BASIC ||
+ !elems->prof ||
!(elems->prof->control & prof_bss_param_ch_present)) {
ret = false;
goto out;
@@ -4086,9 +4276,9 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
*/
if (!is_6ghz &&
((assoc_data->wmm && !elems->wmm_param) ||
- (!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT) &&
+ (link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT &&
(!elems->ht_cap_elem || !elems->ht_operation)) ||
- (!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT) &&
+ (link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT &&
(!elems->vht_cap_elem || !elems->vht_operation)))) {
const struct cfg80211_bss_ies *ies;
struct ieee802_11_elems *bss_elems;
@@ -4125,25 +4315,25 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
* have to include the IEs in the (re)association response.
*/
if (!elems->ht_cap_elem && bss_elems->ht_cap_elem &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT)) {
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT) {
elems->ht_cap_elem = bss_elems->ht_cap_elem;
sdata_info(sdata,
"AP bug: HT capability missing from AssocResp\n");
}
if (!elems->ht_operation && bss_elems->ht_operation &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT)) {
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT) {
elems->ht_operation = bss_elems->ht_operation;
sdata_info(sdata,
"AP bug: HT operation missing from AssocResp\n");
}
if (!elems->vht_cap_elem && bss_elems->vht_cap_elem &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)) {
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT) {
elems->vht_cap_elem = bss_elems->vht_cap_elem;
sdata_info(sdata,
"AP bug: VHT capa missing from AssocResp\n");
}
if (!elems->vht_operation && bss_elems->vht_operation &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)) {
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT) {
elems->vht_operation = bss_elems->vht_operation;
sdata_info(sdata,
"AP bug: VHT operation missing from AssocResp\n");
@@ -4155,8 +4345,10 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
/*
* We previously checked these in the beacon/probe response, so
* they should be present here. This is just a safety net.
+ * Note that the ieee80211_config_bw() below would also check
+ * for this (and more), but this has better error reporting.
*/
- if (!is_6ghz && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT) &&
+ if (!is_6ghz && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT &&
(!elems->wmm_param || !elems->ht_cap_elem || !elems->ht_operation)) {
sdata_info(sdata,
"HT AP is missing WMM params or HT capability/operation\n");
@@ -4164,7 +4356,7 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
goto out;
}
- if (!is_6ghz && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT) &&
+ if (is_5ghz && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT &&
(!elems->vht_cap_elem || !elems->vht_operation)) {
sdata_info(sdata,
"VHT AP is missing VHT capability/operation\n");
@@ -4172,36 +4364,28 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
goto out;
}
- if (is_6ghz && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
- !elems->he_6ghz_capa) {
- sdata_info(sdata,
- "HE 6 GHz AP is missing HE 6 GHz band capability\n");
- ret = false;
- goto out;
- }
-
- if (WARN_ON(!link->conf->chandef.chan)) {
+ /* check/update if AP changed anything in assoc response vs. scan */
+ if (ieee80211_config_bw(link, elems,
+ link_id == assoc_data->assoc_link_id,
+ changed)) {
ret = false;
goto out;
}
- sband = local->hw.wiphy->bands[link->conf->chandef.chan->band];
- if (!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
- (!elems->he_cap || !elems->he_operation)) {
- sdata_info(sdata,
- "HE AP is missing HE capability/operation\n");
+ if (WARN_ON(!link->conf->chanreq.oper.chan)) {
ret = false;
goto out;
}
+ sband = local->hw.wiphy->bands[link->conf->chanreq.oper.chan->band];
/* Set up internal HT/VHT capabilities */
- if (elems->ht_cap_elem && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT))
+ if (elems->ht_cap_elem && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT)
ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
elems->ht_cap_elem,
link_sta);
if (elems->vht_cap_elem &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)) {
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT) {
const struct ieee80211_vht_cap *bss_vht_cap = NULL;
const struct cfg80211_bss_ies *ies;
@@ -4228,14 +4412,41 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
rcu_read_unlock();
}
- if (elems->he_operation && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
+ if (elems->he_operation &&
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HE &&
elems->he_cap) {
+ const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
+
ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband,
elems->he_cap,
elems->he_cap_len,
elems->he_6ghz_capa,
link_sta);
+ he_6ghz_oper = ieee80211_he_6ghz_oper(elems->he_operation);
+
+ if (is_6ghz && he_6ghz_oper) {
+ switch (u8_get_bits(he_6ghz_oper->control,
+ IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) {
+ case IEEE80211_6GHZ_CTRL_REG_LPI_AP:
+ bss_conf->power_type = IEEE80211_REG_LPI_AP;
+ break;
+ case IEEE80211_6GHZ_CTRL_REG_SP_AP:
+ bss_conf->power_type = IEEE80211_REG_SP_AP;
+ break;
+ case IEEE80211_6GHZ_CTRL_REG_VLP_AP:
+ bss_conf->power_type = IEEE80211_REG_VLP_AP;
+ break;
+ default:
+ bss_conf->power_type = IEEE80211_REG_UNSET_AP;
+ break;
+ }
+ } else if (is_6ghz) {
+ link_info(link,
+ "HE 6 GHz operation missing (on %d MHz), expect issues\n",
+ bss_conf->chanreq.oper.chan->center_freq);
+ }
+
bss_conf->he_support = link_sta->pub->he_cap.has_he;
if (elems->rsnx && elems->rsnx_len &&
(elems->rsnx[0] & WLAN_RSNX_CAPA_PROTECTED_TWT) &&
@@ -4249,7 +4460,7 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
link_sta, elems);
if (elems->eht_operation && elems->eht_cap &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT)) {
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_EHT) {
ieee80211_eht_cap_ie_to_sta_eht_cap(sdata, sband,
elems->he_cap,
elems->he_cap_len,
@@ -4258,7 +4469,6 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
link_sta);
bss_conf->eht_support = link_sta->pub->eht_cap.has_eht;
- *changed |= BSS_CHANGED_EHT_PUNCTURING;
} else {
bss_conf->eht_support = false;
}
@@ -4456,7 +4666,7 @@ static u8 ieee80211_max_rx_chains(struct ieee80211_link_data *link,
bool support_160;
u8 chains = 1;
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT)
+ if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_HT)
return chains;
ht_cap_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_HT_CAPABILITY);
@@ -4469,7 +4679,7 @@ static u8 ieee80211_max_rx_chains(struct ieee80211_link_data *link,
*/
}
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)
+ if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_VHT)
return chains;
vht_cap_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_VHT_CAPABILITY);
@@ -4488,7 +4698,7 @@ static u8 ieee80211_max_rx_chains(struct ieee80211_link_data *link,
chains = max(chains, nss);
}
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE)
+ if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_HE)
return chains;
ies = rcu_dereference(cbss->ies);
@@ -4539,533 +4749,331 @@ static u8 ieee80211_max_rx_chains(struct ieee80211_link_data *link,
return chains;
}
-static bool
-ieee80211_verify_peer_he_mcs_support(struct ieee80211_sub_if_data *sdata,
- const struct cfg80211_bss_ies *ies,
- const struct ieee80211_he_operation *he_op)
+static void
+ieee80211_determine_our_sta_mode(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_assoc_request *req,
+ bool wmm_used, int link_id,
+ struct ieee80211_conn_settings *conn)
{
- const struct element *he_cap_elem;
- const struct ieee80211_he_cap_elem *he_cap;
- struct ieee80211_he_mcs_nss_supp *he_mcs_nss_supp;
- u16 mcs_80_map_tx, mcs_80_map_rx;
- u16 ap_min_req_set;
- int mcs_nss_size;
- int nss;
-
- he_cap_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY,
- ies->data, ies->len);
-
- if (!he_cap_elem)
- return false;
+ struct ieee80211_sta_ht_cap sta_ht_cap = sband->ht_cap;
+ bool is_5ghz = sband->band == NL80211_BAND_5GHZ;
+ bool is_6ghz = sband->band == NL80211_BAND_6GHZ;
+ const struct ieee80211_sta_he_cap *he_cap;
+ const struct ieee80211_sta_eht_cap *eht_cap;
+ struct ieee80211_sta_vht_cap vht_cap;
- /* invalid HE IE */
- if (he_cap_elem->datalen < 1 + sizeof(*he_cap)) {
- sdata_info(sdata,
- "Invalid HE elem, Disable HE\n");
- return false;
+ if (sband->band == NL80211_BAND_S1GHZ) {
+ conn->mode = IEEE80211_CONN_MODE_S1G;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ mlme_dbg(sdata, "operating as S1G STA\n");
+ return;
}
- /* skip one byte ext_tag_id */
- he_cap = (void *)(he_cap_elem->data + 1);
- mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap);
+ conn->mode = IEEE80211_CONN_MODE_LEGACY;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
- /* invalid HE IE */
- if (he_cap_elem->datalen < 1 + sizeof(*he_cap) + mcs_nss_size) {
- sdata_info(sdata,
- "Invalid HE elem with nss size, Disable HE\n");
- return false;
+ ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
+
+ if (req && req->flags & ASSOC_REQ_DISABLE_HT) {
+ mlme_link_id_dbg(sdata, link_id,
+ "HT disabled by flag, limiting to legacy\n");
+ goto out;
}
- /* mcs_nss is right after he_cap info */
- he_mcs_nss_supp = (void *)(he_cap + 1);
+ if (!wmm_used) {
+ mlme_link_id_dbg(sdata, link_id,
+ "WMM/QoS not supported, limiting to legacy\n");
+ goto out;
+ }
- mcs_80_map_tx = le16_to_cpu(he_mcs_nss_supp->tx_mcs_80);
- mcs_80_map_rx = le16_to_cpu(he_mcs_nss_supp->rx_mcs_80);
+ if (req) {
+ unsigned int i;
- /* P802.11-REVme/D0.3
- * 27.1.1 Introduction to the HE PHY
- * ...
- * An HE STA shall support the following features:
- * ...
- * Single spatial stream HE-MCSs 0 to 7 (transmit and receive) in all
- * supported channel widths for HE SU PPDUs
- */
- if ((mcs_80_map_tx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED ||
- (mcs_80_map_rx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED) {
- sdata_info(sdata,
- "Missing mandatory rates for 1 Nss, rx 0x%x, tx 0x%x, disable HE\n",
- mcs_80_map_tx, mcs_80_map_rx);
- return false;
+ for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) {
+ if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
+ req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
+ req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
+ netdev_info(sdata->dev,
+ "WEP/TKIP use, limiting to legacy\n");
+ goto out;
+ }
+ }
}
- if (!he_op)
- return true;
+ if (!sta_ht_cap.ht_supported && !is_6ghz) {
+ mlme_link_id_dbg(sdata, link_id,
+ "HT not supported (and not on 6 GHz), limiting to legacy\n");
+ goto out;
+ }
- ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set);
+ /* HT is fine */
+ conn->mode = IEEE80211_CONN_MODE_HT;
+ conn->bw_limit = sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
+ IEEE80211_CONN_BW_LIMIT_40 :
+ IEEE80211_CONN_BW_LIMIT_20;
- /*
- * Apparently iPhone 13 (at least iOS version 15.3.1) sets this to all
- * zeroes, which is nonsense, and completely inconsistent with itself
- * (it doesn't have 8 streams). Accept the settings in this case anyway.
- */
- if (!ap_min_req_set)
- return true;
-
- /* make sure the AP is consistent with itself
- *
- * P802.11-REVme/D0.3
- * 26.17.1 Basic HE BSS operation
- *
- * A STA that is operating in an HE BSS shall be able to receive and
- * transmit at each of the <HE-MCS, NSS> tuple values indicated by the
- * Basic HE-MCS And NSS Set field of the HE Operation parameter of the
- * MLME-START.request primitive and shall be able to receive at each of
- * the <HE-MCS, NSS> tuple values indicated by the Supported HE-MCS and
- * NSS Set field in the HE Capabilities parameter of the MLMESTART.request
- * primitive
- */
- for (nss = 8; nss > 0; nss--) {
- u8 ap_op_val = (ap_min_req_set >> (2 * (nss - 1))) & 3;
- u8 ap_rx_val;
- u8 ap_tx_val;
+ memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap));
+ ieee80211_apply_vhtcap_overrides(sdata, &vht_cap);
- if (ap_op_val == IEEE80211_HE_MCS_NOT_SUPPORTED)
- continue;
+ if (req && req->flags & ASSOC_REQ_DISABLE_VHT) {
+ mlme_link_id_dbg(sdata, link_id,
+ "VHT disabled by flag, limiting to HT\n");
+ goto out;
+ }
- ap_rx_val = (mcs_80_map_rx >> (2 * (nss - 1))) & 3;
- ap_tx_val = (mcs_80_map_tx >> (2 * (nss - 1))) & 3;
+ if (vht_cap.vht_supported && is_5ghz) {
+ bool have_80mhz = false;
+ unsigned int i;
- if (ap_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
- ap_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
- ap_rx_val < ap_op_val || ap_tx_val < ap_op_val) {
- sdata_info(sdata,
- "Invalid rates for %d Nss, rx %d, tx %d oper %d, disable HE\n",
- nss, ap_rx_val, ap_rx_val, ap_op_val);
- return false;
+ if (conn->bw_limit == IEEE80211_CONN_BW_LIMIT_20) {
+ mlme_link_id_dbg(sdata, link_id,
+ "no 40 MHz support on 5 GHz, limiting to HT\n");
+ goto out;
}
- }
- return true;
-}
+ /* Allow VHT if at least one channel on the sband supports 80 MHz */
+ for (i = 0; i < sband->n_channels; i++) {
+ if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED |
+ IEEE80211_CHAN_NO_80MHZ))
+ continue;
-static bool
-ieee80211_verify_sta_he_mcs_support(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_supported_band *sband,
- const struct ieee80211_he_operation *he_op)
-{
- const struct ieee80211_sta_he_cap *sta_he_cap =
- ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
- u16 ap_min_req_set;
- int i;
+ have_80mhz = true;
+ break;
+ }
- if (!sta_he_cap || !he_op)
- return false;
+ if (!have_80mhz) {
+ mlme_link_id_dbg(sdata, link_id,
+ "no 80 MHz channel support on 5 GHz, limiting to HT\n");
+ goto out;
+ }
+ } else if (is_5ghz) { /* !vht_supported but on 5 GHz */
+ mlme_link_id_dbg(sdata, link_id,
+ "no VHT support on 5 GHz, limiting to HT\n");
+ goto out;
+ }
- ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set);
+ /* VHT - if we have - is fine, including 80 MHz, check 160 below again */
+ if (sband->band != NL80211_BAND_2GHZ) {
+ conn->mode = IEEE80211_CONN_MODE_VHT;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_160;
+ }
- /*
- * Apparently iPhone 13 (at least iOS version 15.3.1) sets this to all
- * zeroes, which is nonsense, and completely inconsistent with itself
- * (it doesn't have 8 streams). Accept the settings in this case anyway.
- */
- if (!ap_min_req_set)
- return true;
+ if (is_5ghz &&
+ !(vht_cap.cap & (IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ))) {
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_80;
+ mlme_link_id_dbg(sdata, link_id,
+ "no VHT 160 MHz capability on 5 GHz, limiting to 80 MHz");
+ }
- /* Need to go over for 80MHz, 160MHz and for 80+80 */
- for (i = 0; i < 3; i++) {
- const struct ieee80211_he_mcs_nss_supp *sta_mcs_nss_supp =
- &sta_he_cap->he_mcs_nss_supp;
- u16 sta_mcs_map_rx =
- le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i]);
- u16 sta_mcs_map_tx =
- le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i + 1]);
- u8 nss;
- bool verified = true;
+ if (req && req->flags & ASSOC_REQ_DISABLE_HE) {
+ mlme_link_id_dbg(sdata, link_id,
+ "HE disabled by flag, limiting to HT/VHT\n");
+ goto out;
+ }
- /*
- * For each band there is a maximum of 8 spatial streams
- * possible. Each of the sta_mcs_map_* is a 16-bit struct built
- * of 2 bits per NSS (1-8), with the values defined in enum
- * ieee80211_he_mcs_support. Need to make sure STA TX and RX
- * capabilities aren't less than the AP's minimum requirements
- * for this HE BSS per SS.
- * It is enough to find one such band that meets the reqs.
- */
- for (nss = 8; nss > 0; nss--) {
- u8 sta_rx_val = (sta_mcs_map_rx >> (2 * (nss - 1))) & 3;
- u8 sta_tx_val = (sta_mcs_map_tx >> (2 * (nss - 1))) & 3;
- u8 ap_val = (ap_min_req_set >> (2 * (nss - 1))) & 3;
+ he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+ if (!he_cap) {
+ WARN_ON(is_6ghz);
+ mlme_link_id_dbg(sdata, link_id,
+ "no HE support, limiting to HT/VHT\n");
+ goto out;
+ }
- if (ap_val == IEEE80211_HE_MCS_NOT_SUPPORTED)
- continue;
+ /* so we have HE */
+ conn->mode = IEEE80211_CONN_MODE_HE;
- /*
- * Make sure the HE AP doesn't require MCSs that aren't
- * supported by the client as required by spec
- *
- * P802.11-REVme/D0.3
- * 26.17.1 Basic HE BSS operation
- *
- * An HE STA shall not attempt to join * (MLME-JOIN.request primitive)
- * a BSS, unless it supports (i.e., is able to both transmit and
- * receive using) all of the <HE-MCS, NSS> tuples in the basic
- * HE-MCS and NSS set.
- */
- if (sta_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
- sta_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
- (ap_val > sta_rx_val) || (ap_val > sta_tx_val)) {
- verified = false;
- break;
- }
+ /* check bandwidth */
+ switch (sband->band) {
+ default:
+ case NL80211_BAND_2GHZ:
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G)
+ break;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ mlme_link_id_dbg(sdata, link_id,
+ "no 40 MHz HE cap in 2.4 GHz, limiting to 20 MHz\n");
+ break;
+ case NL80211_BAND_5GHZ:
+ if (!(he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G)) {
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ mlme_link_id_dbg(sdata, link_id,
+ "no 40/80 MHz HE cap in 5 GHz, limiting to 20 MHz\n");
+ break;
}
-
- if (verified)
- return true;
+ if (!(he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)) {
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_80);
+ mlme_link_id_dbg(sdata, link_id,
+ "no 160 MHz HE cap in 5 GHz, limiting to 80 MHz\n");
+ }
+ break;
+ case NL80211_BAND_6GHZ:
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ break;
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_80);
+ mlme_link_id_dbg(sdata, link_id,
+ "no 160 MHz HE cap in 6 GHz, limiting to 80 MHz\n");
+ break;
}
- /* If here, STA doesn't meet AP's HE min requirements */
- return false;
-}
+ if (req && req->flags & ASSOC_REQ_DISABLE_EHT) {
+ mlme_link_id_dbg(sdata, link_id,
+ "EHT disabled by flag, limiting to HE\n");
+ goto out;
+ }
-static u8
-ieee80211_get_eht_cap_mcs_nss(const struct ieee80211_sta_he_cap *sta_he_cap,
- const struct ieee80211_sta_eht_cap *sta_eht_cap,
- unsigned int idx, int bw)
-{
- u8 he_phy_cap0 = sta_he_cap->he_cap_elem.phy_cap_info[0];
- u8 eht_phy_cap0 = sta_eht_cap->eht_cap_elem.phy_cap_info[0];
+ eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
+ if (!eht_cap) {
+ mlme_link_id_dbg(sdata, link_id,
+ "no EHT support, limiting to HE\n");
+ goto out;
+ }
- /* handle us being a 20 MHz-only EHT STA - with four values
- * for MCS 0-7, 8-9, 10-11, 12-13.
- */
- if (!(he_phy_cap0 & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL))
- return sta_eht_cap->eht_mcs_nss_supp.only_20mhz.rx_tx_max_nss[idx];
+ /* we have EHT */
- /* the others have MCS 0-9 together, rather than separately from 0-7 */
- if (idx > 0)
- idx--;
+ conn->mode = IEEE80211_CONN_MODE_EHT;
- switch (bw) {
- case 0:
- return sta_eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_max_nss[idx];
- case 1:
- if (!(he_phy_cap0 &
- (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)))
- return 0xff; /* pass check */
- return sta_eht_cap->eht_mcs_nss_supp.bw._160.rx_tx_max_nss[idx];
- case 2:
- if (!(eht_phy_cap0 & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ))
- return 0xff; /* pass check */
- return sta_eht_cap->eht_mcs_nss_supp.bw._320.rx_tx_max_nss[idx];
- }
+ /* check bandwidth */
+ if (is_6ghz &&
+ eht_cap->eht_cap_elem.phy_cap_info[0] & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_320;
+ else if (is_6ghz)
+ mlme_link_id_dbg(sdata, link_id,
+ "no EHT 320 MHz cap in 6 GHz, limiting to 160 MHz\n");
- WARN_ON(1);
- return 0;
+out:
+ mlme_link_id_dbg(sdata, link_id,
+ "determined local STA to be %s, BW limited to %d MHz\n",
+ ieee80211_conn_mode_str(conn->mode),
+ 20 * (1 << conn->bw_limit));
}
-static bool
-ieee80211_verify_sta_eht_mcs_support(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_supported_band *sband,
- const struct ieee80211_eht_operation *eht_op)
+static void
+ieee80211_determine_our_sta_mode_auth(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_auth_request *req,
+ bool wmm_used,
+ struct ieee80211_conn_settings *conn)
{
- const struct ieee80211_sta_he_cap *sta_he_cap =
- ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
- const struct ieee80211_sta_eht_cap *sta_eht_cap =
- ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
- const struct ieee80211_eht_mcs_nss_supp_20mhz_only *req;
- unsigned int i;
-
- if (!sta_he_cap || !sta_eht_cap || !eht_op)
- return false;
-
- req = &eht_op->basic_mcs_nss;
-
- for (i = 0; i < ARRAY_SIZE(req->rx_tx_max_nss); i++) {
- u8 req_rx_nss, req_tx_nss;
- unsigned int bw;
-
- req_rx_nss = u8_get_bits(req->rx_tx_max_nss[i],
- IEEE80211_EHT_MCS_NSS_RX);
- req_tx_nss = u8_get_bits(req->rx_tx_max_nss[i],
- IEEE80211_EHT_MCS_NSS_TX);
+ ieee80211_determine_our_sta_mode(sdata, sband, NULL, wmm_used,
+ req->link_id > 0 ? req->link_id : 0,
+ conn);
+}
- for (bw = 0; bw < 3; bw++) {
- u8 have, have_rx_nss, have_tx_nss;
+static void
+ieee80211_determine_our_sta_mode_assoc(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_assoc_request *req,
+ bool wmm_used, int link_id,
+ struct ieee80211_conn_settings *conn)
+{
+ struct ieee80211_conn_settings tmp;
- have = ieee80211_get_eht_cap_mcs_nss(sta_he_cap,
- sta_eht_cap,
- i, bw);
- have_rx_nss = u8_get_bits(have,
- IEEE80211_EHT_MCS_NSS_RX);
- have_tx_nss = u8_get_bits(have,
- IEEE80211_EHT_MCS_NSS_TX);
+ WARN_ON(!req);
- if (req_rx_nss > have_rx_nss ||
- req_tx_nss > have_tx_nss)
- return false;
- }
- }
+ ieee80211_determine_our_sta_mode(sdata, sband, req, wmm_used, link_id,
+ &tmp);
- return true;
+ conn->mode = min_t(enum ieee80211_conn_mode,
+ conn->mode, tmp.mode);
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit, tmp.bw_limit);
}
static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
struct ieee80211_link_data *link,
- struct cfg80211_bss *cbss,
- bool mlo,
- ieee80211_conn_flags_t *conn_flags)
+ int link_id,
+ struct cfg80211_bss *cbss, bool mlo,
+ struct ieee80211_conn_settings *conn)
{
struct ieee80211_local *local = sdata->local;
- const struct ieee80211_ht_cap *ht_cap = NULL;
- const struct ieee80211_ht_operation *ht_oper = NULL;
- const struct ieee80211_vht_operation *vht_oper = NULL;
- const struct ieee80211_he_operation *he_oper = NULL;
- const struct ieee80211_eht_operation *eht_oper = NULL;
- const struct ieee80211_s1g_oper_ie *s1g_oper = NULL;
- struct ieee80211_supported_band *sband;
- struct cfg80211_chan_def chandef;
bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ;
- bool is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ;
- bool supports_mlo = false;
- struct ieee80211_bss *bss = (void *)cbss->priv;
- struct ieee80211_elems_parse_params parse_params = {
- .link_id = -1,
- .from_ap = true,
- };
+ struct ieee80211_chan_req chanreq = {};
struct ieee802_11_elems *elems;
- const struct cfg80211_bss_ies *ies;
int ret;
u32 i;
- bool have_80mhz;
lockdep_assert_wiphy(local->hw.wiphy);
rcu_read_lock();
+ elems = ieee80211_determine_chan_mode(sdata, conn, cbss, link_id,
+ &chanreq);
- ies = rcu_dereference(cbss->ies);
- parse_params.start = ies->data;
- parse_params.len = ies->len;
- elems = ieee802_11_parse_elems_full(&parse_params);
- if (!elems) {
+ if (IS_ERR(elems)) {
rcu_read_unlock();
- return -ENOMEM;
- }
-
- sband = local->hw.wiphy->bands[cbss->channel->band];
-
- *conn_flags &= ~(IEEE80211_CONN_DISABLE_40MHZ |
- IEEE80211_CONN_DISABLE_80P80MHZ |
- IEEE80211_CONN_DISABLE_160MHZ);
-
- /* disable HT/VHT/HE if we don't support them */
- if (!sband->ht_cap.ht_supported && !is_6ghz) {
- mlme_dbg(sdata, "HT not supported, disabling HT/VHT/HE/EHT\n");
- *conn_flags |= IEEE80211_CONN_DISABLE_HT;
- *conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- *conn_flags |= IEEE80211_CONN_DISABLE_HE;
- *conn_flags |= IEEE80211_CONN_DISABLE_EHT;
+ return PTR_ERR(elems);
}
- if (!sband->vht_cap.vht_supported && is_5ghz) {
- mlme_dbg(sdata, "VHT not supported, disabling VHT/HE/EHT\n");
- *conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- *conn_flags |= IEEE80211_CONN_DISABLE_HE;
- *conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- }
-
- if (!ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif)) {
- mlme_dbg(sdata, "HE not supported, disabling HE and EHT\n");
- *conn_flags |= IEEE80211_CONN_DISABLE_HE;
- *conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- }
-
- if (!ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif)) {
- mlme_dbg(sdata, "EHT not supported, disabling EHT\n");
- *conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- }
-
- if (!(*conn_flags & IEEE80211_CONN_DISABLE_HT) && !is_6ghz) {
- ht_oper = elems->ht_operation;
- ht_cap = elems->ht_cap_elem;
-
- if (!ht_cap) {
- *conn_flags |= IEEE80211_CONN_DISABLE_HT;
- ht_oper = NULL;
- }
- }
-
- if (!(*conn_flags & IEEE80211_CONN_DISABLE_VHT) && !is_6ghz) {
- vht_oper = elems->vht_operation;
- if (vht_oper && !ht_oper) {
- vht_oper = NULL;
- sdata_info(sdata,
- "AP advertised VHT without HT, disabling HT/VHT/HE\n");
- *conn_flags |= IEEE80211_CONN_DISABLE_HT;
- *conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- *conn_flags |= IEEE80211_CONN_DISABLE_HE;
- *conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- }
-
- if (!elems->vht_cap_elem) {
- *conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- vht_oper = NULL;
- }
+ if (mlo && !elems->ml_basic) {
+ sdata_info(sdata, "Rejecting MLO as it is not supported by AP\n");
+ rcu_read_unlock();
+ kfree(elems);
+ return -EINVAL;
}
- if (!(*conn_flags & IEEE80211_CONN_DISABLE_HE)) {
- he_oper = elems->he_operation;
-
- if (link && is_6ghz) {
- struct ieee80211_bss_conf *bss_conf;
- u8 j = 0;
+ if (link && is_6ghz && conn->mode >= IEEE80211_CONN_MODE_HE) {
+ struct ieee80211_bss_conf *bss_conf;
+ u8 j = 0;
- bss_conf = link->conf;
+ bss_conf = link->conf;
- if (elems->pwr_constr_elem)
- bss_conf->pwr_reduction = *elems->pwr_constr_elem;
+ if (elems->pwr_constr_elem)
+ bss_conf->pwr_reduction = *elems->pwr_constr_elem;
- BUILD_BUG_ON(ARRAY_SIZE(bss_conf->tx_pwr_env) !=
- ARRAY_SIZE(elems->tx_pwr_env));
-
- for (i = 0; i < elems->tx_pwr_env_num; i++) {
- if (elems->tx_pwr_env_len[i] >
- sizeof(bss_conf->tx_pwr_env[j]))
- continue;
+ BUILD_BUG_ON(ARRAY_SIZE(bss_conf->tx_pwr_env) !=
+ ARRAY_SIZE(elems->tx_pwr_env));
- bss_conf->tx_pwr_env_num++;
- memcpy(&bss_conf->tx_pwr_env[j], elems->tx_pwr_env[i],
- elems->tx_pwr_env_len[i]);
- j++;
- }
- }
-
- if (!ieee80211_verify_peer_he_mcs_support(sdata, ies, he_oper) ||
- !ieee80211_verify_sta_he_mcs_support(sdata, sband, he_oper))
- *conn_flags |= IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
- }
-
- /*
- * EHT requires HE to be supported as well. Specifically for 6 GHz
- * channels, the operation channel information can only be deduced from
- * both the 6 GHz operation information (from the HE operation IE) and
- * EHT operation.
- */
- if (!(*conn_flags &
- (IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT)) &&
- he_oper) {
- const struct cfg80211_bss_ies *cbss_ies;
- const struct element *eht_ml_elem;
- const u8 *eht_oper_ie;
-
- cbss_ies = rcu_dereference(cbss->ies);
- eht_oper_ie = cfg80211_find_ext_ie(WLAN_EID_EXT_EHT_OPERATION,
- cbss_ies->data, cbss_ies->len);
- if (eht_oper_ie && eht_oper_ie[1] >=
- 1 + sizeof(struct ieee80211_eht_operation))
- eht_oper = (void *)(eht_oper_ie + 3);
- else
- eht_oper = NULL;
-
- if (!ieee80211_verify_sta_eht_mcs_support(sdata, sband, eht_oper))
- *conn_flags |= IEEE80211_CONN_DISABLE_EHT;
-
- eht_ml_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_MULTI_LINK,
- cbss_ies->data, cbss_ies->len);
-
- /* data + 1 / datalen - 1 since it's an extended element */
- if (!(*conn_flags & IEEE80211_CONN_DISABLE_EHT) &&
- eht_ml_elem &&
- ieee80211_mle_type_ok(eht_ml_elem->data + 1,
- IEEE80211_ML_CONTROL_TYPE_BASIC,
- eht_ml_elem->datalen - 1)) {
- supports_mlo = true;
+ for (i = 0; i < elems->tx_pwr_env_num; i++) {
+ if (elems->tx_pwr_env_len[i] > sizeof(bss_conf->tx_pwr_env[j]))
+ continue;
- sdata->vif.cfg.eml_cap =
- ieee80211_mle_get_eml_cap(eht_ml_elem->data + 1);
- sdata->vif.cfg.eml_med_sync_delay =
- ieee80211_mle_get_eml_med_sync_delay(eht_ml_elem->data + 1);
+ bss_conf->tx_pwr_env_num++;
+ memcpy(&bss_conf->tx_pwr_env[j], elems->tx_pwr_env[i],
+ elems->tx_pwr_env_len[i]);
+ j++;
}
}
-
- /* Allow VHT if at least one channel on the sband supports 80 MHz */
- have_80mhz = false;
- for (i = 0; i < sband->n_channels; i++) {
- if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED |
- IEEE80211_CHAN_NO_80MHZ))
- continue;
-
- have_80mhz = true;
- break;
- }
-
- if (!have_80mhz) {
- sdata_info(sdata, "80 MHz not supported, disabling VHT\n");
- *conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- }
-
- if (sband->band == NL80211_BAND_S1GHZ) {
- s1g_oper = elems->s1g_oper;
- if (!s1g_oper)
- sdata_info(sdata,
- "AP missing S1G operation element?\n");
- }
-
- *conn_flags |=
- ieee80211_determine_chantype(sdata, link, *conn_flags,
- sband,
- cbss->channel,
- bss->vht_cap_info,
- ht_oper, vht_oper,
- he_oper, eht_oper,
- s1g_oper,
- &chandef, false);
-
- if (link)
- link->needed_rx_chains =
- min(ieee80211_max_rx_chains(link, cbss),
- local->rx_chains);
-
rcu_read_unlock();
/* the element data was RCU protected so no longer valid anyway */
kfree(elems);
elems = NULL;
- if (*conn_flags & IEEE80211_CONN_DISABLE_HE && is_6ghz) {
- sdata_info(sdata, "Rejecting non-HE 6/7 GHz connection");
- return -EINVAL;
- }
-
- if (mlo && !supports_mlo) {
- sdata_info(sdata, "Rejecting MLO as it is not supported by AP\n");
- return -EINVAL;
- }
-
if (!link)
return 0;
+ rcu_read_lock();
+ link->needed_rx_chains = min(ieee80211_max_rx_chains(link, cbss),
+ local->rx_chains);
+ rcu_read_unlock();
+
/*
* If this fails (possibly due to channel context sharing
* on incompatible channels, e.g. 80+80 and 160 sharing the
* same control channel) try to use a smaller bandwidth.
*/
- ret = ieee80211_link_use_channel(link, &chandef,
+ ret = ieee80211_link_use_channel(link, &chanreq,
IEEE80211_CHANCTX_SHARED);
/* don't downgrade for 5 and 10 MHz channels, though. */
- if (chandef.width == NL80211_CHAN_WIDTH_5 ||
- chandef.width == NL80211_CHAN_WIDTH_10)
- goto out;
+ if (chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ chanreq.oper.width == NL80211_CHAN_WIDTH_10)
+ return ret;
+
+ while (ret && chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT) {
+ ieee80211_chanreq_downgrade(&chanreq, conn);
- while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
- *conn_flags |=
- ieee80211_chandef_downgrade(&chandef);
- ret = ieee80211_link_use_channel(link, &chandef,
+ ret = ieee80211_link_use_channel(link, &chanreq,
IEEE80211_CHANCTX_SHARED);
}
- out:
+
return ret;
}
@@ -5126,6 +5134,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(!sta))
goto out_err;
+ sta->sta.spp_amsdu = assoc_data->spp_amsdu;
+
if (ieee80211_vif_is_mld(&sdata->vif)) {
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
if (!assoc_data->link[link_id].bss)
@@ -5189,8 +5199,10 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
link->conf->dtim_period = link->u.mgd.dtim_period ?: 1;
if (link_id != assoc_data->assoc_link_id) {
- err = ieee80211_prep_channel(sdata, link, cbss, true,
- &link->u.mgd.conn_flags);
+ link->u.mgd.conn = assoc_data->link[link_id].conn;
+
+ err = ieee80211_prep_channel(sdata, link, link_id, cbss,
+ true, &link->u.mgd.conn);
if (err) {
link_info(link, "prep_channel failed\n");
goto out_err;
@@ -5308,6 +5320,9 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
if (!assoc_data)
return;
+ parse_params.mode =
+ assoc_data->link[assoc_data->assoc_link_id].conn.mode;
+
if (!ether_addr_equal(assoc_data->ap_addr, mgmt->bssid) ||
!ether_addr_equal(assoc_data->ap_addr, mgmt->sa))
return;
@@ -5424,6 +5439,13 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
assoc_data->ap_addr);
goto abandon_assoc;
}
+
+ sdata->vif.cfg.eml_cap =
+ ieee80211_mle_get_eml_cap((const void *)elems->ml_basic);
+ sdata->vif.cfg.eml_med_sync_delay =
+ ieee80211_mle_get_eml_med_sync_delay((const void *)elems->ml_basic);
+ sdata->vif.cfg.mld_capa_op =
+ ieee80211_mle_get_mld_capa_op((const void *)elems->ml_basic);
}
sdata->vif.cfg.aid = aid;
@@ -5686,49 +5708,6 @@ static bool ieee80211_rx_our_beacon(const u8 *tx_bssid,
return ether_addr_equal(tx_bssid, bss->transmitted_bss->bssid);
}
-static bool ieee80211_config_puncturing(struct ieee80211_link_data *link,
- const struct ieee80211_eht_operation *eht_oper,
- u64 *changed)
-{
- struct ieee80211_local *local = link->sdata->local;
- u16 bitmap = 0, extracted;
-
- if ((eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) &&
- (eht_oper->params &
- IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT)) {
- const struct ieee80211_eht_operation_info *info =
- (void *)eht_oper->optional;
- const u8 *disable_subchannel_bitmap = info->optional;
-
- bitmap = get_unaligned_le16(disable_subchannel_bitmap);
- }
-
- extracted = ieee80211_extract_dis_subch_bmap(eht_oper,
- &link->conf->chandef,
- bitmap);
-
- /* accept if there are no changes */
- if (!(*changed & BSS_CHANGED_BANDWIDTH) &&
- extracted == link->conf->eht_puncturing)
- return true;
-
- if (!cfg80211_valid_disable_subchannel_bitmap(&bitmap,
- &link->conf->chandef)) {
- link_info(link,
- "Got an invalid disable subchannel bitmap from AP %pM: bitmap = 0x%x, bw = 0x%x. disconnect\n",
- link->u.mgd.bssid,
- bitmap,
- link->conf->chandef.width);
- return false;
- }
-
- if (bitmap && ieee80211_hw_check(&local->hw, DISALLOW_PUNCTURING))
- return false;
-
- ieee80211_handle_puncturing_bitmap(link, eht_oper, bitmap, changed);
- return true;
-}
-
static void ieee80211_ml_reconf_work(struct wiphy *wiphy,
struct wiphy_work *work)
{
@@ -5792,9 +5771,7 @@ out:
static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems)
{
- const struct ieee80211_multi_link_elem *ml;
const struct element *sub;
- ssize_t ml_len;
unsigned long removed_links = 0;
u16 link_removal_timeout[IEEE80211_MLD_MAX_NUM_LINKS] = {};
u8 link_id;
@@ -5803,24 +5780,11 @@ static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
if (!ieee80211_vif_is_mld(&sdata->vif) || !elems->ml_reconf)
return;
- ml_len = cfg80211_defragment_element(elems->ml_reconf_elem,
- elems->ie_start,
- elems->total_len,
- elems->scratch_pos,
- elems->scratch + elems->scratch_len -
- elems->scratch_pos,
- WLAN_EID_FRAGMENT);
- if (ml_len < 0)
- return;
-
- elems->ml_reconf = (const void *)elems->scratch_pos;
- elems->ml_reconf_len = ml_len;
- ml = elems->ml_reconf;
-
/* Directly parse the sub elements as the common information doesn't
* hold any useful information.
*/
- for_each_mle_subelement(sub, (u8 *)ml, ml_len) {
+ for_each_mle_subelement(sub, (const u8 *)elems->ml_reconf,
+ elems->ml_reconf_len) {
struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data;
u8 *pos = prof->variable;
u16 control;
@@ -5888,6 +5852,56 @@ static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
TU_TO_JIFFIES(delay));
}
+static int ieee80211_ttlm_set_links(struct ieee80211_sub_if_data *sdata,
+ u16 active_links, u16 dormant_links,
+ u16 suspended_links)
+{
+ u64 changed = 0;
+ int ret;
+
+ if (!active_links) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* If there is an active negotiated TTLM, it should be discarded by
+ * the new negotiated/advertised TTLM.
+ */
+ if (sdata->vif.neg_ttlm.valid) {
+ memset(&sdata->vif.neg_ttlm, 0, sizeof(sdata->vif.neg_ttlm));
+ sdata->vif.suspended_links = 0;
+ changed = BSS_CHANGED_MLD_TTLM;
+ }
+
+ if (sdata->vif.active_links != active_links) {
+ ret = ieee80211_set_active_links(&sdata->vif, active_links);
+ if (ret) {
+ sdata_info(sdata, "Failed to set TTLM active links\n");
+ goto out;
+ }
+ }
+
+ ret = ieee80211_vif_set_links(sdata, sdata->vif.valid_links,
+ dormant_links);
+ if (ret) {
+ sdata_info(sdata, "Failed to set TTLM dormant links\n");
+ goto out;
+ }
+
+ changed |= BSS_CHANGED_MLD_VALID_LINKS;
+ sdata->vif.suspended_links = suspended_links;
+ if (sdata->vif.suspended_links)
+ changed |= BSS_CHANGED_MLD_TTLM;
+
+ ieee80211_vif_cfg_change_notify(sdata, changed);
+
+out:
+ if (ret)
+ ieee80211_disconnect(&sdata->vif, false);
+
+ return ret;
+}
+
static void ieee80211_tid_to_link_map_work(struct wiphy *wiphy,
struct wiphy_work *work)
{
@@ -5895,30 +5909,19 @@ static void ieee80211_tid_to_link_map_work(struct wiphy *wiphy,
struct ieee80211_sub_if_data *sdata =
container_of(work, struct ieee80211_sub_if_data,
u.mgd.ttlm_work.work);
- int ret;
new_active_links = sdata->u.mgd.ttlm_info.map &
sdata->vif.valid_links;
new_dormant_links = ~sdata->u.mgd.ttlm_info.map &
sdata->vif.valid_links;
- if (!new_active_links) {
- ieee80211_disconnect(&sdata->vif, false);
- return;
- }
ieee80211_vif_set_links(sdata, sdata->vif.valid_links, 0);
- new_active_links = BIT(ffs(new_active_links) - 1);
- ieee80211_set_active_links(&sdata->vif, new_active_links);
-
- ret = ieee80211_vif_set_links(sdata, sdata->vif.valid_links,
- new_dormant_links);
+ if (ieee80211_ttlm_set_links(sdata, new_active_links, new_dormant_links,
+ 0))
+ return;
sdata->u.mgd.ttlm_info.active = true;
sdata->u.mgd.ttlm_info.switch_time = 0;
-
- if (!ret)
- ieee80211_vif_cfg_change_notify(sdata,
- BSS_CHANGED_MLD_VALID_LINKS);
}
static u16 ieee80211_get_ttlm(u8 bm_size, u8 *data)
@@ -6128,6 +6131,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
u8 *bssid, *variable = mgmt->u.beacon.variable;
u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN];
struct ieee80211_elems_parse_params parse_params = {
+ .mode = link->u.mgd.conn.mode,
.link_id = -1,
.from_ap = true,
};
@@ -6210,7 +6214,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
}
if (!ifmgd->associated ||
- !ieee80211_rx_our_beacon(bssid, link->u.mgd.bss))
+ !ieee80211_rx_our_beacon(bssid, link->conf->bss))
return;
bssid = link->u.mgd.bssid;
@@ -6237,7 +6241,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
*/
if (!ieee80211_is_s1g_beacon(hdr->frame_control))
ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4);
- parse_params.bss = link->u.mgd.bss;
+ parse_params.bss = link->conf->bss;
parse_params.filter = care_about_ies;
parse_params.crc = ncrc;
elems = ieee802_11_parse_elems_full(&parse_params);
@@ -6299,9 +6303,6 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
}
}
- if (link->u.mgd.csa_waiting_bcn)
- ieee80211_chswitch_post_beacon(link);
-
/*
* Update beacon timing and dtim count on every beacon appearance. This
* will allow the driver to use the most updated values. Do it before
@@ -6375,21 +6376,14 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
goto free;
}
- if (WARN_ON(!link->conf->chandef.chan))
+ if (WARN_ON(!link->conf->chanreq.oper.chan))
goto free;
- sband = local->hw.wiphy->bands[link->conf->chandef.chan->band];
+ sband = local->hw.wiphy->bands[link->conf->chanreq.oper.chan->band];
changed |= ieee80211_recalc_twt_req(sdata, sband, link, link_sta, elems);
- if (ieee80211_config_bw(link, elems->ht_cap_elem,
- elems->vht_cap_elem, elems->ht_operation,
- elems->vht_operation, elems->he_operation,
- elems->eht_operation,
- elems->s1g_oper, bssid, &changed)) {
- sdata_info(sdata,
- "failed to follow AP %pM bandwidth change, disconnect\n",
- bssid);
+ if (ieee80211_config_bw(link, elems, true, &changed)) {
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
WLAN_REASON_DEAUTH_LEAVING,
true, deauth_buf);
@@ -6411,21 +6405,6 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
elems->pwr_constr_elem,
elems->cisco_dtpc_elem);
- if (elems->eht_operation &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT)) {
- if (!ieee80211_config_puncturing(link, elems->eht_operation,
- &changed)) {
- ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
- WLAN_REASON_DEAUTH_LEAVING,
- true, deauth_buf);
- ieee80211_report_disconnect(sdata, deauth_buf,
- sizeof(deauth_buf), true,
- WLAN_REASON_DEAUTH_LEAVING,
- false);
- goto free;
- }
- }
-
ieee80211_ml_reconfiguration(sdata, elems);
ieee80211_process_adv_ttlm(sdata, elems,
le64_to_cpu(mgmt->u.beacon.timestamp));
@@ -6435,6 +6414,376 @@ free:
kfree(elems);
}
+static void ieee80211_apply_neg_ttlm(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_neg_ttlm neg_ttlm)
+{
+ u16 new_active_links, new_dormant_links, new_suspended_links, map = 0;
+ u8 i;
+
+ for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++)
+ map |= neg_ttlm.downlink[i] | neg_ttlm.uplink[i];
+
+ /* If there is an active TTLM, unset previously suspended links */
+ if (sdata->vif.neg_ttlm.valid)
+ sdata->vif.dormant_links &= ~sdata->vif.suspended_links;
+
+ /* exclude links that are already disabled by advertised TTLM */
+ new_active_links =
+ map & sdata->vif.valid_links & ~sdata->vif.dormant_links;
+ new_suspended_links =
+ (~map & sdata->vif.valid_links) & ~sdata->vif.dormant_links;
+ new_dormant_links = sdata->vif.dormant_links | new_suspended_links;
+ if (ieee80211_ttlm_set_links(sdata, new_active_links,
+ new_dormant_links, new_suspended_links))
+ return;
+
+ sdata->vif.neg_ttlm = neg_ttlm;
+ sdata->vif.neg_ttlm.valid = true;
+}
+
+static void ieee80211_neg_ttlm_timeout_work(struct wiphy *wiphy,
+ struct wiphy_work *work)
+{
+ struct ieee80211_sub_if_data *sdata =
+ container_of(work, struct ieee80211_sub_if_data,
+ u.mgd.neg_ttlm_timeout_work.work);
+
+ sdata_info(sdata,
+ "No negotiated TTLM response from AP, disconnecting.\n");
+
+ __ieee80211_disconnect(sdata);
+}
+
+static void
+ieee80211_neg_ttlm_add_suggested_map(struct sk_buff *skb,
+ struct ieee80211_neg_ttlm *neg_ttlm)
+{
+ u8 i, direction[IEEE80211_TTLM_MAX_CNT];
+
+ if (memcmp(neg_ttlm->downlink, neg_ttlm->uplink,
+ sizeof(neg_ttlm->downlink))) {
+ direction[0] = IEEE80211_TTLM_DIRECTION_DOWN;
+ direction[1] = IEEE80211_TTLM_DIRECTION_UP;
+ } else {
+ direction[0] = IEEE80211_TTLM_DIRECTION_BOTH;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(direction); i++) {
+ u8 tid, len, map_ind = 0, *len_pos, *map_ind_pos, *pos;
+ __le16 map;
+
+ len = sizeof(struct ieee80211_ttlm_elem) + 1 + 1;
+
+ pos = skb_put(skb, len + 2);
+ *pos++ = WLAN_EID_EXTENSION;
+ len_pos = pos++;
+ *pos++ = WLAN_EID_EXT_TID_TO_LINK_MAPPING;
+ *pos++ = direction[i];
+ map_ind_pos = pos++;
+ for (tid = 0; tid < IEEE80211_TTLM_NUM_TIDS; tid++) {
+ map = direction[i] == IEEE80211_TTLM_DIRECTION_UP ?
+ cpu_to_le16(neg_ttlm->uplink[tid]) :
+ cpu_to_le16(neg_ttlm->downlink[tid]);
+ if (!map)
+ continue;
+
+ len += 2;
+ map_ind |= BIT(tid);
+ skb_put_data(skb, &map, sizeof(map));
+ }
+
+ *map_ind_pos = map_ind;
+ *len_pos = len;
+
+ if (direction[i] == IEEE80211_TTLM_DIRECTION_BOTH)
+ break;
+ }
+}
+
+static void
+ieee80211_send_neg_ttlm_req(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_neg_ttlm *neg_ttlm,
+ u8 dialog_token)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_mgmt *mgmt;
+ struct sk_buff *skb;
+ int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.ttlm_req);
+ int ttlm_max_len = 2 + 1 + sizeof(struct ieee80211_ttlm_elem) + 1 +
+ 2 * 2 * IEEE80211_TTLM_NUM_TIDS;
+
+ skb = dev_alloc_skb(local->tx_headroom + hdr_len + ttlm_max_len);
+ if (!skb)
+ return;
+
+ skb_reserve(skb, local->tx_headroom);
+ mgmt = skb_put_zero(skb, hdr_len);
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION);
+ memcpy(mgmt->da, sdata->vif.cfg.ap_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN);
+
+ mgmt->u.action.category = WLAN_CATEGORY_PROTECTED_EHT;
+ mgmt->u.action.u.ttlm_req.action_code =
+ WLAN_PROTECTED_EHT_ACTION_TTLM_REQ;
+ mgmt->u.action.u.ttlm_req.dialog_token = dialog_token;
+ ieee80211_neg_ttlm_add_suggested_map(skb, neg_ttlm);
+ ieee80211_tx_skb(sdata, skb);
+}
+
+int ieee80211_req_neg_ttlm(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_ttlm_params *params)
+{
+ struct ieee80211_neg_ttlm neg_ttlm = {};
+ u8 i;
+
+ if (!ieee80211_vif_is_mld(&sdata->vif) ||
+ !(sdata->vif.cfg.mld_capa_op &
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP))
+ return -EINVAL;
+
+ for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++) {
+ if ((params->dlink[i] & ~sdata->vif.valid_links) ||
+ (params->ulink[i] & ~sdata->vif.valid_links))
+ return -EINVAL;
+
+ neg_ttlm.downlink[i] = params->dlink[i];
+ neg_ttlm.uplink[i] = params->ulink[i];
+ }
+
+ if (drv_can_neg_ttlm(sdata->local, sdata, &neg_ttlm) !=
+ NEG_TTLM_RES_ACCEPT)
+ return -EINVAL;
+
+ ieee80211_apply_neg_ttlm(sdata, neg_ttlm);
+ sdata->u.mgd.dialog_token_alloc++;
+ ieee80211_send_neg_ttlm_req(sdata, &sdata->vif.neg_ttlm,
+ sdata->u.mgd.dialog_token_alloc);
+ wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+ &sdata->u.mgd.neg_ttlm_timeout_work);
+ wiphy_delayed_work_queue(sdata->local->hw.wiphy,
+ &sdata->u.mgd.neg_ttlm_timeout_work,
+ IEEE80211_NEG_TTLM_REQ_TIMEOUT);
+ return 0;
+}
+
+static void
+ieee80211_send_neg_ttlm_res(struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_neg_ttlm_res ttlm_res,
+ u8 dialog_token,
+ struct ieee80211_neg_ttlm *neg_ttlm)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_mgmt *mgmt;
+ struct sk_buff *skb;
+ int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.ttlm_res);
+ int ttlm_max_len = 2 + 1 + sizeof(struct ieee80211_ttlm_elem) + 1 +
+ 2 * 2 * IEEE80211_TTLM_NUM_TIDS;
+
+ skb = dev_alloc_skb(local->tx_headroom + hdr_len + ttlm_max_len);
+ if (!skb)
+ return;
+
+ skb_reserve(skb, local->tx_headroom);
+ mgmt = skb_put_zero(skb, hdr_len);
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION);
+ memcpy(mgmt->da, sdata->vif.cfg.ap_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN);
+
+ mgmt->u.action.category = WLAN_CATEGORY_PROTECTED_EHT;
+ mgmt->u.action.u.ttlm_res.action_code =
+ WLAN_PROTECTED_EHT_ACTION_TTLM_RES;
+ mgmt->u.action.u.ttlm_res.dialog_token = dialog_token;
+ switch (ttlm_res) {
+ default:
+ WARN_ON(1);
+ fallthrough;
+ case NEG_TTLM_RES_REJECT:
+ mgmt->u.action.u.ttlm_res.status_code =
+ WLAN_STATUS_DENIED_TID_TO_LINK_MAPPING;
+ break;
+ case NEG_TTLM_RES_ACCEPT:
+ mgmt->u.action.u.ttlm_res.status_code = WLAN_STATUS_SUCCESS;
+ break;
+ case NEG_TTLM_RES_SUGGEST_PREFERRED:
+ mgmt->u.action.u.ttlm_res.status_code =
+ WLAN_STATUS_PREF_TID_TO_LINK_MAPPING_SUGGESTED;
+ ieee80211_neg_ttlm_add_suggested_map(skb, neg_ttlm);
+ break;
+ }
+
+ ieee80211_tx_skb(sdata, skb);
+}
+
+static int
+ieee80211_parse_neg_ttlm(struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_ttlm_elem *ttlm,
+ struct ieee80211_neg_ttlm *neg_ttlm,
+ u8 *direction)
+{
+ u8 control, link_map_presence, map_size, tid;
+ u8 *pos;
+
+ /* The element size was already validated in
+ * ieee80211_tid_to_link_map_size_ok()
+ */
+ pos = (void *)ttlm->optional;
+
+ control = ttlm->control;
+
+ /* mapping switch time and expected duration fields are not expected
+ * in case of negotiated TTLM
+ */
+ if (control & (IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT |
+ IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT)) {
+ mlme_dbg(sdata,
+ "Invalid TTLM element in negotiated TTLM request\n");
+ return -EINVAL;
+ }
+
+ if (control & IEEE80211_TTLM_CONTROL_DEF_LINK_MAP) {
+ for (tid = 0; tid < IEEE80211_TTLM_NUM_TIDS; tid++) {
+ neg_ttlm->downlink[tid] = sdata->vif.valid_links;
+ neg_ttlm->uplink[tid] = sdata->vif.valid_links;
+ }
+ *direction = IEEE80211_TTLM_DIRECTION_BOTH;
+ return 0;
+ }
+
+ *direction = u8_get_bits(control, IEEE80211_TTLM_CONTROL_DIRECTION);
+ if (*direction != IEEE80211_TTLM_DIRECTION_DOWN &&
+ *direction != IEEE80211_TTLM_DIRECTION_UP &&
+ *direction != IEEE80211_TTLM_DIRECTION_BOTH)
+ return -EINVAL;
+
+ link_map_presence = *pos;
+ pos++;
+
+ if (control & IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE)
+ map_size = 1;
+ else
+ map_size = 2;
+
+ for (tid = 0; tid < IEEE80211_TTLM_NUM_TIDS; tid++) {
+ u16 map;
+
+ if (link_map_presence & BIT(tid)) {
+ map = ieee80211_get_ttlm(map_size, pos);
+ if (!map) {
+ mlme_dbg(sdata,
+ "No active links for TID %d", tid);
+ return -EINVAL;
+ }
+ } else {
+ map = 0;
+ }
+
+ switch (*direction) {
+ case IEEE80211_TTLM_DIRECTION_BOTH:
+ neg_ttlm->downlink[tid] = map;
+ neg_ttlm->uplink[tid] = map;
+ break;
+ case IEEE80211_TTLM_DIRECTION_DOWN:
+ neg_ttlm->downlink[tid] = map;
+ break;
+ case IEEE80211_TTLM_DIRECTION_UP:
+ neg_ttlm->uplink[tid] = map;
+ break;
+ default:
+ return -EINVAL;
+ }
+ pos += map_size;
+ }
+ return 0;
+}
+
+void ieee80211_process_neg_ttlm_req(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len)
+{
+ u8 dialog_token, direction[IEEE80211_TTLM_MAX_CNT] = {}, i;
+ size_t ies_len;
+ enum ieee80211_neg_ttlm_res ttlm_res = NEG_TTLM_RES_ACCEPT;
+ struct ieee802_11_elems *elems = NULL;
+ struct ieee80211_neg_ttlm neg_ttlm = {};
+
+ BUILD_BUG_ON(ARRAY_SIZE(direction) != ARRAY_SIZE(elems->ttlm));
+
+ if (!ieee80211_vif_is_mld(&sdata->vif))
+ return;
+
+ dialog_token = mgmt->u.action.u.ttlm_req.dialog_token;
+ ies_len = len - offsetof(struct ieee80211_mgmt,
+ u.action.u.ttlm_req.variable);
+ elems = ieee802_11_parse_elems(mgmt->u.action.u.ttlm_req.variable,
+ ies_len, true, NULL);
+ if (!elems) {
+ ttlm_res = NEG_TTLM_RES_REJECT;
+ goto out;
+ }
+
+ for (i = 0; i < elems->ttlm_num; i++) {
+ if (ieee80211_parse_neg_ttlm(sdata, elems->ttlm[i],
+ &neg_ttlm, &direction[i]) ||
+ (direction[i] == IEEE80211_TTLM_DIRECTION_BOTH &&
+ elems->ttlm_num != 1)) {
+ ttlm_res = NEG_TTLM_RES_REJECT;
+ goto out;
+ }
+ }
+
+ if (!elems->ttlm_num ||
+ (elems->ttlm_num == 2 && direction[0] == direction[1])) {
+ ttlm_res = NEG_TTLM_RES_REJECT;
+ goto out;
+ }
+
+ for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++) {
+ if ((neg_ttlm.downlink[i] &&
+ (neg_ttlm.downlink[i] & ~sdata->vif.valid_links)) ||
+ (neg_ttlm.uplink[i] &&
+ (neg_ttlm.uplink[i] & ~sdata->vif.valid_links))) {
+ ttlm_res = NEG_TTLM_RES_REJECT;
+ goto out;
+ }
+ }
+
+ ttlm_res = drv_can_neg_ttlm(sdata->local, sdata, &neg_ttlm);
+
+ if (ttlm_res != NEG_TTLM_RES_ACCEPT)
+ goto out;
+
+ ieee80211_apply_neg_ttlm(sdata, neg_ttlm);
+out:
+ kfree(elems);
+ ieee80211_send_neg_ttlm_res(sdata, ttlm_res, dialog_token, &neg_ttlm);
+}
+
+void ieee80211_process_neg_ttlm_res(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len)
+{
+ if (!ieee80211_vif_is_mld(&sdata->vif) ||
+ mgmt->u.action.u.ttlm_req.dialog_token !=
+ sdata->u.mgd.dialog_token_alloc)
+ return;
+
+ wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+ &sdata->u.mgd.neg_ttlm_timeout_work);
+
+ /* MLD station sends a TID to link mapping request, mainly to handle
+ * BTM (BSS transition management) request, in which case it needs to
+ * restrict the active links set.
+ * In this case it's not expected that the MLD AP will reject the
+ * negotiated TTLM request.
+ * This can be better implemented in the future, to handle request
+ * rejections.
+ */
+ if (mgmt->u.action.u.ttlm_res.status_code != WLAN_STATUS_SUCCESS)
+ __ieee80211_disconnect(sdata);
+}
+
void ieee80211_sta_rx_queued_ext(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
@@ -7064,6 +7413,8 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
ieee80211_sta_handle_tspec_ac_params_wk);
wiphy_delayed_work_init(&ifmgd->ttlm_work,
ieee80211_tid_to_link_map_work);
+ wiphy_delayed_work_init(&ifmgd->neg_ttlm_timeout_work,
+ ieee80211_neg_ttlm_timeout_work);
ifmgd->flags = 0;
ifmgd->powersave = sdata->wdev.ps;
@@ -7073,6 +7424,7 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
spin_lock_init(&ifmgd->teardown_lock);
ifmgd->teardown_skb = NULL;
ifmgd->orig_teardown_skb = NULL;
+ ifmgd->mcast_seq_last = IEEE80211_SN_MODULO;
}
static void ieee80211_recalc_smps_work(struct wiphy *wiphy,
@@ -7092,7 +7444,6 @@ void ieee80211_mgd_setup_link(struct ieee80211_link_data *link)
unsigned int link_id = link->link_id;
link->u.mgd.p2p_noa_index = -1;
- link->u.mgd.conn_flags = 0;
link->conf->bssid = link->u.mgd.bssid;
link->smps_mode = IEEE80211_SMPS_OFF;
@@ -7132,6 +7483,7 @@ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)
static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
struct cfg80211_bss *cbss, s8 link_id,
const u8 *ap_mld_addr, bool assoc,
+ struct ieee80211_conn_settings *conn,
bool override)
{
struct ieee80211_local *local = sdata->local;
@@ -7263,13 +7615,22 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
}
if (new_sta || override) {
- err = ieee80211_prep_channel(sdata, link, cbss, mlo,
- &link->u.mgd.conn_flags);
+ /*
+ * Only set this if we're also going to calculate the AP
+ * settings etc., otherwise this was set before in a
+ * previous call. Note override is set to %true in assoc
+ * if the settings were changed.
+ */
+ link->u.mgd.conn = *conn;
+ err = ieee80211_prep_channel(sdata, link, link->link_id, cbss,
+ mlo, &link->u.mgd.conn);
if (err) {
if (new_sta)
sta_info_free(local, new_sta);
goto out_err;
}
+ /* pass out for use in assoc */
+ *conn = link->u.mgd.conn;
}
if (new_sta) {
@@ -7384,10 +7745,13 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_mgd_auth_data *auth_data;
+ struct ieee80211_conn_settings conn;
struct ieee80211_link_data *link;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_bss *bss;
u16 auth_alg;
int err;
- bool cont_auth;
+ bool cont_auth, wmm_used;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
@@ -7518,15 +7882,24 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
/* needed for transmitting the auth frame(s) properly */
memcpy(sdata->vif.cfg.ap_addr, auth_data->ap_addr, ETH_ALEN);
+ bss = (void *)req->bss->priv;
+ wmm_used = bss->wmm_used && (local->hw.queues >= IEEE80211_NUM_ACS);
+
+ sband = local->hw.wiphy->bands[req->bss->channel->band];
+
+ ieee80211_determine_our_sta_mode_auth(sdata, sband, req, wmm_used,
+ &conn);
+
err = ieee80211_prep_connection(sdata, req->bss, req->link_id,
- req->ap_mld_addr, cont_auth, false);
+ req->ap_mld_addr, cont_auth,
+ &conn, false);
if (err)
goto err_clear;
- if (req->link_id > 0)
+ if (req->link_id >= 0)
link = sdata_dereference(sdata->link[req->link_id], sdata);
else
- link = sdata_dereference(sdata->link[0], sdata);
+ link = &sdata->deflink;
if (WARN_ON(!link)) {
err = -ENOLINK;
@@ -7558,38 +7931,33 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
return err;
}
-static ieee80211_conn_flags_t
+static void
ieee80211_setup_assoc_link(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgd_assoc_data *assoc_data,
struct cfg80211_assoc_request *req,
- ieee80211_conn_flags_t conn_flags,
+ struct ieee80211_conn_settings *conn,
unsigned int link_id)
{
struct ieee80211_local *local = sdata->local;
const struct cfg80211_bss_ies *bss_ies;
struct ieee80211_supported_band *sband;
- const struct element *ht_elem, *vht_elem;
struct ieee80211_link_data *link;
struct cfg80211_bss *cbss;
struct ieee80211_bss *bss;
- bool is_5ghz, is_6ghz;
cbss = assoc_data->link[link_id].bss;
if (WARN_ON(!cbss))
- return 0;
+ return;
bss = (void *)cbss->priv;
sband = local->hw.wiphy->bands[cbss->channel->band];
if (WARN_ON(!sband))
- return 0;
+ return;
link = sdata_dereference(sdata->link[link_id], sdata);
if (WARN_ON(!link))
- return 0;
-
- is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ;
- is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ;
+ return;
/* for MLO connections assume advertising all rates is OK */
if (!req->ap_mld_addr) {
@@ -7606,40 +7974,18 @@ ieee80211_setup_assoc_link(struct ieee80211_sub_if_data *sdata,
assoc_data->ie_pos += req->links[link_id].elems_len;
}
- rcu_read_lock();
- ht_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_HT_OPERATION);
- if (ht_elem && ht_elem->datalen >= sizeof(struct ieee80211_ht_operation))
- assoc_data->link[link_id].ap_ht_param =
- ((struct ieee80211_ht_operation *)(ht_elem->data))->ht_param;
- else if (!is_6ghz)
- conn_flags |= IEEE80211_CONN_DISABLE_HT;
- vht_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_VHT_CAPABILITY);
- if (vht_elem && vht_elem->datalen >= sizeof(struct ieee80211_vht_cap)) {
- memcpy(&assoc_data->link[link_id].ap_vht_cap, vht_elem->data,
- sizeof(struct ieee80211_vht_cap));
- } else if (is_5ghz) {
- link_info(link,
- "VHT capa missing/short, disabling VHT/HE/EHT\n");
- conn_flags |= IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
- }
- rcu_read_unlock();
-
link->u.mgd.beacon_crc_valid = false;
link->u.mgd.dtim_period = 0;
link->u.mgd.have_beacon = false;
- /* override HT/VHT configuration only if the AP and we support it */
- if (!(conn_flags & IEEE80211_CONN_DISABLE_HT)) {
+ /* override HT configuration only if the AP and we support it */
+ if (conn->mode >= IEEE80211_CONN_MODE_HT) {
struct ieee80211_sta_ht_cap sta_ht_cap;
memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap));
ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
}
- link->conf->eht_puncturing = 0;
-
rcu_read_lock();
bss_ies = rcu_dereference(cbss->beacon_ies);
if (bss_ies) {
@@ -7660,7 +8006,6 @@ ieee80211_setup_assoc_link(struct ieee80211_sub_if_data *sdata,
}
if (bss_ies) {
- const struct ieee80211_eht_operation *eht_oper;
const struct element *elem;
elem = cfg80211_find_ext_elem(WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION,
@@ -7677,32 +8022,6 @@ ieee80211_setup_assoc_link(struct ieee80211_sub_if_data *sdata,
link->conf->ema_ap = true;
else
link->conf->ema_ap = false;
-
- elem = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_OPERATION,
- bss_ies->data, bss_ies->len);
- eht_oper = (const void *)(elem->data + 1);
-
- if (elem &&
- ieee80211_eht_oper_size_ok((const void *)(elem->data + 1),
- elem->datalen - 1) &&
- (eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) &&
- (eht_oper->params & IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT)) {
- const struct ieee80211_eht_operation_info *info =
- (void *)eht_oper->optional;
- const u8 *disable_subchannel_bitmap = info->optional;
- u16 bitmap;
-
- bitmap = get_unaligned_le16(disable_subchannel_bitmap);
- if (cfg80211_valid_disable_subchannel_bitmap(&bitmap,
- &link->conf->chandef) &&
- !(bitmap && ieee80211_hw_check(&local->hw, DISALLOW_PUNCTURING)))
- ieee80211_handle_puncturing_bitmap(link,
- eht_oper,
- bitmap,
- NULL);
- else
- conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- }
}
rcu_read_unlock();
@@ -7729,8 +8048,67 @@ ieee80211_setup_assoc_link(struct ieee80211_sub_if_data *sdata,
} else {
link->smps_mode = link->u.mgd.req_smps;
}
+}
+
+static int
+ieee80211_mgd_get_ap_ht_vht_capa(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgd_assoc_data *assoc_data,
+ int link_id)
+{
+ struct cfg80211_bss *cbss = assoc_data->link[link_id].bss;
+ enum nl80211_band band = cbss->channel->band;
+ struct ieee80211_supported_band *sband;
+ const struct element *elem;
+ int err;
+
+ /* neither HT nor VHT elements used on 6 GHz */
+ if (band == NL80211_BAND_6GHZ)
+ return 0;
+
+ if (assoc_data->link[link_id].conn.mode < IEEE80211_CONN_MODE_HT)
+ return 0;
- return conn_flags;
+ rcu_read_lock();
+ elem = ieee80211_bss_get_elem(cbss, WLAN_EID_HT_OPERATION);
+ if (!elem || elem->datalen < sizeof(struct ieee80211_ht_operation)) {
+ mlme_link_id_dbg(sdata, link_id, "no HT operation on BSS %pM\n",
+ cbss->bssid);
+ err = -EINVAL;
+ goto out_rcu;
+ }
+ assoc_data->link[link_id].ap_ht_param =
+ ((struct ieee80211_ht_operation *)(elem->data))->ht_param;
+ rcu_read_unlock();
+
+ if (assoc_data->link[link_id].conn.mode < IEEE80211_CONN_MODE_VHT)
+ return 0;
+
+ /* some drivers want to support VHT on 2.4 GHz even */
+ sband = sdata->local->hw.wiphy->bands[band];
+ if (!sband->vht_cap.vht_supported)
+ return 0;
+
+ rcu_read_lock();
+ elem = ieee80211_bss_get_elem(cbss, WLAN_EID_VHT_CAPABILITY);
+ /* but even then accept it not being present on the AP */
+ if (!elem && band == NL80211_BAND_2GHZ) {
+ err = 0;
+ goto out_rcu;
+ }
+ if (!elem || elem->datalen < sizeof(struct ieee80211_vht_cap)) {
+ mlme_link_id_dbg(sdata, link_id, "no VHT capa on BSS %pM\n",
+ cbss->bssid);
+ err = -EINVAL;
+ goto out_rcu;
+ }
+ memcpy(&assoc_data->link[link_id].ap_vht_cap, elem->data,
+ sizeof(struct ieee80211_vht_cap));
+ rcu_read_unlock();
+
+ return 0;
+out_rcu:
+ rcu_read_unlock();
+ return err;
}
int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
@@ -7742,11 +8120,10 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgd_assoc_data *assoc_data;
const struct element *ssid_elem;
struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg;
- ieee80211_conn_flags_t conn_flags = 0;
struct ieee80211_link_data *link;
struct cfg80211_bss *cbss;
- struct ieee80211_bss *bss;
- bool override;
+ bool override, uapsd_supported;
+ bool match_auth;
int i, err;
size_t size = sizeof(*assoc_data) + req->ie_len;
@@ -7765,44 +8142,26 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
if (ieee80211_mgd_csa_in_process(sdata, cbss)) {
sdata_info(sdata, "AP is in CSA process, reject assoc\n");
- kfree(assoc_data);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_free;
}
rcu_read_lock();
ssid_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_SSID);
if (!ssid_elem || ssid_elem->datalen > sizeof(assoc_data->ssid)) {
rcu_read_unlock();
- kfree(assoc_data);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_free;
}
memcpy(assoc_data->ssid, ssid_elem->data, ssid_elem->datalen);
assoc_data->ssid_len = ssid_elem->datalen;
- memcpy(vif_cfg->ssid, assoc_data->ssid, assoc_data->ssid_len);
- vif_cfg->ssid_len = assoc_data->ssid_len;
rcu_read_unlock();
- if (req->ap_mld_addr) {
- for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
- if (!req->links[i].bss)
- continue;
- link = sdata_dereference(sdata->link[i], sdata);
- if (link)
- ether_addr_copy(assoc_data->link[i].addr,
- link->conf->addr);
- else
- eth_random_addr(assoc_data->link[i].addr);
- }
- } else {
- memcpy(assoc_data->link[0].addr, sdata->vif.addr, ETH_ALEN);
- }
-
- assoc_data->s1g = cbss->channel->band == NL80211_BAND_S1GHZ;
-
- memcpy(assoc_data->ap_addr,
- req->ap_mld_addr ?: req->bss->bssid,
- ETH_ALEN);
+ if (req->ap_mld_addr)
+ memcpy(assoc_data->ap_addr, req->ap_mld_addr, ETH_ALEN);
+ else
+ memcpy(assoc_data->ap_addr, cbss->bssid, ETH_ALEN);
if (ifmgd->associated) {
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
@@ -7820,98 +8179,146 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
false);
}
- if (ifmgd->auth_data && !ifmgd->auth_data->done) {
- err = -EBUSY;
- goto err_free;
- }
+ memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
+ memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask,
+ sizeof(ifmgd->ht_capa_mask));
- if (ifmgd->assoc_data) {
- err = -EBUSY;
- goto err_free;
- }
+ memcpy(&ifmgd->vht_capa, &req->vht_capa, sizeof(ifmgd->vht_capa));
+ memcpy(&ifmgd->vht_capa_mask, &req->vht_capa_mask,
+ sizeof(ifmgd->vht_capa_mask));
- if (ifmgd->auth_data) {
- bool match;
+ memcpy(&ifmgd->s1g_capa, &req->s1g_capa, sizeof(ifmgd->s1g_capa));
+ memcpy(&ifmgd->s1g_capa_mask, &req->s1g_capa_mask,
+ sizeof(ifmgd->s1g_capa_mask));
- /* keep sta info, bssid if matching */
- match = ether_addr_equal(ifmgd->auth_data->ap_addr,
- assoc_data->ap_addr) &&
- ifmgd->auth_data->link_id == req->link_id;
+ /* keep some setup (AP STA, channel, ...) if matching */
+ match_auth = ifmgd->auth_data &&
+ ether_addr_equal(ifmgd->auth_data->ap_addr,
+ assoc_data->ap_addr) &&
+ ifmgd->auth_data->link_id == req->link_id;
- /* Cleanup is delayed if auth_data matches */
- if (!match)
- ieee80211_destroy_auth_data(sdata, false);
- }
+ if (req->ap_mld_addr) {
+ uapsd_supported = true;
- /* prepare assoc data */
+ for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
+ struct ieee80211_supported_band *sband;
+ struct cfg80211_bss *link_cbss = req->links[i].bss;
+ struct ieee80211_bss *bss;
- bss = (void *)cbss->priv;
- assoc_data->wmm = bss->wmm_used &&
- (local->hw.queues >= IEEE80211_NUM_ACS);
+ if (!link_cbss)
+ continue;
- /*
- * IEEE802.11n does not allow TKIP/WEP as pairwise ciphers in HT mode.
- * We still associate in non-HT mode (11a/b/g) if any one of these
- * ciphers is configured as pairwise.
- * We can set this to true for non-11n hardware, that'll be checked
- * separately along with the peer capabilities.
- */
- for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) {
- if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
- req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
- req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
- conn_flags |= IEEE80211_CONN_DISABLE_HT;
- conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- conn_flags |= IEEE80211_CONN_DISABLE_HE;
- conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- netdev_info(sdata->dev,
- "disabling HT/VHT/HE due to WEP/TKIP use\n");
+ bss = (void *)link_cbss->priv;
+
+ if (!bss->wmm_used) {
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ if (req->flags & (ASSOC_REQ_DISABLE_HT |
+ ASSOC_REQ_DISABLE_VHT |
+ ASSOC_REQ_DISABLE_HE |
+ ASSOC_REQ_DISABLE_EHT)) {
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ if (link_cbss->channel->band == NL80211_BAND_S1GHZ) {
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ link = sdata_dereference(sdata->link[i], sdata);
+ if (link)
+ ether_addr_copy(assoc_data->link[i].addr,
+ link->conf->addr);
+ else
+ eth_random_addr(assoc_data->link[i].addr);
+ sband = local->hw.wiphy->bands[link_cbss->channel->band];
+
+ if (match_auth && i == assoc_link_id && link)
+ assoc_data->link[i].conn = link->u.mgd.conn;
+ else
+ assoc_data->link[i].conn =
+ ieee80211_conn_settings_unlimited;
+ ieee80211_determine_our_sta_mode_assoc(sdata, sband,
+ req, true, i,
+ &assoc_data->link[i].conn);
+ assoc_data->link[i].bss = link_cbss;
+ assoc_data->link[i].disabled = req->links[i].disabled;
+
+ if (!bss->uapsd_supported)
+ uapsd_supported = false;
+
+ if (assoc_data->link[i].conn.mode < IEEE80211_CONN_MODE_EHT) {
+ err = -EINVAL;
+ req->links[i].error = err;
+ goto err_free;
+ }
+
+ err = ieee80211_mgd_get_ap_ht_vht_capa(sdata,
+ assoc_data, i);
+ if (err) {
+ err = -EINVAL;
+ req->links[i].error = err;
+ goto err_free;
+ }
}
- }
- /* also disable HT/VHT/HE/EHT if the AP doesn't use WMM */
- if (!bss->wmm_used) {
- conn_flags |= IEEE80211_CONN_DISABLE_HT;
- conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- conn_flags |= IEEE80211_CONN_DISABLE_HE;
- conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- netdev_info(sdata->dev,
- "disabling HT/VHT/HE as WMM/QoS is not supported by the AP\n");
- }
+ assoc_data->wmm = true;
+ } else {
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_bss *bss = (void *)cbss->priv;
- if (req->flags & ASSOC_REQ_DISABLE_HT) {
- mlme_dbg(sdata, "HT disabled by flag, disabling HT/VHT/HE\n");
- conn_flags |= IEEE80211_CONN_DISABLE_HT;
- conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- conn_flags |= IEEE80211_CONN_DISABLE_HE;
- conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- }
+ memcpy(assoc_data->link[0].addr, sdata->vif.addr, ETH_ALEN);
+ assoc_data->s1g = cbss->channel->band == NL80211_BAND_S1GHZ;
- if (req->flags & ASSOC_REQ_DISABLE_VHT) {
- mlme_dbg(sdata, "VHT disabled by flag, disabling VHT\n");
- conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- }
+ assoc_data->wmm = bss->wmm_used &&
+ (local->hw.queues >= IEEE80211_NUM_ACS);
- if (req->flags & ASSOC_REQ_DISABLE_HE) {
- mlme_dbg(sdata, "HE disabled by flag, disabling HE/EHT\n");
- conn_flags |= IEEE80211_CONN_DISABLE_HE;
- conn_flags |= IEEE80211_CONN_DISABLE_EHT;
+ if (cbss->channel->band == NL80211_BAND_6GHZ &&
+ req->flags & (ASSOC_REQ_DISABLE_HT |
+ ASSOC_REQ_DISABLE_VHT |
+ ASSOC_REQ_DISABLE_HE)) {
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ sband = local->hw.wiphy->bands[cbss->channel->band];
+
+ assoc_data->link[0].bss = cbss;
+
+ if (match_auth)
+ assoc_data->link[0].conn = sdata->deflink.u.mgd.conn;
+ else
+ assoc_data->link[0].conn =
+ ieee80211_conn_settings_unlimited;
+ ieee80211_determine_our_sta_mode_assoc(sdata, sband, req,
+ assoc_data->wmm, 0,
+ &assoc_data->link[0].conn);
+
+ uapsd_supported = bss->uapsd_supported;
+
+ err = ieee80211_mgd_get_ap_ht_vht_capa(sdata, assoc_data, 0);
+ if (err)
+ goto err_free;
}
- if (req->flags & ASSOC_REQ_DISABLE_EHT)
- conn_flags |= IEEE80211_CONN_DISABLE_EHT;
+ assoc_data->spp_amsdu = req->flags & ASSOC_REQ_SPP_AMSDU;
- memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
- memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask,
- sizeof(ifmgd->ht_capa_mask));
+ if (ifmgd->auth_data && !ifmgd->auth_data->done) {
+ err = -EBUSY;
+ goto err_free;
+ }
- memcpy(&ifmgd->vht_capa, &req->vht_capa, sizeof(ifmgd->vht_capa));
- memcpy(&ifmgd->vht_capa_mask, &req->vht_capa_mask,
- sizeof(ifmgd->vht_capa_mask));
+ if (ifmgd->assoc_data) {
+ err = -EBUSY;
+ goto err_free;
+ }
- memcpy(&ifmgd->s1g_capa, &req->s1g_capa, sizeof(ifmgd->s1g_capa));
- memcpy(&ifmgd->s1g_capa_mask, &req->s1g_capa_mask,
- sizeof(ifmgd->s1g_capa_mask));
+ /* Cleanup is delayed if auth_data matches */
+ if (ifmgd->auth_data && !match_auth)
+ ieee80211_destroy_auth_data(sdata, false);
if (req->ie && req->ie_len) {
memcpy(assoc_data->ie, req->ie, req->ie_len);
@@ -7943,19 +8350,10 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
assoc_data->assoc_link_id = assoc_link_id;
if (req->ap_mld_addr) {
- for (i = 0; i < ARRAY_SIZE(assoc_data->link); i++) {
- assoc_data->link[i].conn_flags = conn_flags;
- assoc_data->link[i].bss = req->links[i].bss;
- assoc_data->link[i].disabled = req->links[i].disabled;
- }
-
/* if there was no authentication, set up the link */
err = ieee80211_vif_set_links(sdata, BIT(assoc_link_id), 0);
if (err)
goto err_clear;
- } else {
- assoc_data->link[0].conn_flags = conn_flags;
- assoc_data->link[0].bss = cbss;
}
link = sdata_dereference(sdata->link[assoc_link_id], sdata);
@@ -7964,19 +8362,21 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
goto err_clear;
}
- /* keep old conn_flags from ieee80211_prep_channel() from auth */
- conn_flags |= link->u.mgd.conn_flags;
- conn_flags |= ieee80211_setup_assoc_link(sdata, assoc_data, req,
- conn_flags, assoc_link_id);
- override = link->u.mgd.conn_flags != conn_flags;
- link->u.mgd.conn_flags |= conn_flags;
+ override = link->u.mgd.conn.mode !=
+ assoc_data->link[assoc_link_id].conn.mode ||
+ link->u.mgd.conn.bw_limit !=
+ assoc_data->link[assoc_link_id].conn.bw_limit;
+ link->u.mgd.conn = assoc_data->link[assoc_link_id].conn;
+
+ ieee80211_setup_assoc_link(sdata, assoc_data, req, &link->u.mgd.conn,
+ assoc_link_id);
if (WARN((sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_UAPSD) &&
ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK),
"U-APSD not supported with HW_PS_NULLFUNC_STACK\n"))
sdata->vif.driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
- if (bss->wmm_used && bss->uapsd_supported &&
+ if (assoc_data->wmm && uapsd_supported &&
(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_UAPSD)) {
assoc_data->uapsd = true;
ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED;
@@ -8020,27 +8420,29 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
continue;
if (i == assoc_data->assoc_link_id)
continue;
- /* only calculate the flags, hence link == NULL */
- err = ieee80211_prep_channel(sdata, NULL,
+ /* only calculate the mode, hence link == NULL */
+ err = ieee80211_prep_channel(sdata, NULL, i,
assoc_data->link[i].bss, true,
- &assoc_data->link[i].conn_flags);
+ &assoc_data->link[i].conn);
if (err) {
req->links[i].error = err;
goto err_clear;
}
}
+ memcpy(vif_cfg->ssid, assoc_data->ssid, assoc_data->ssid_len);
+ vif_cfg->ssid_len = assoc_data->ssid_len;
+
/* needed for transmitting the assoc frames properly */
memcpy(sdata->vif.cfg.ap_addr, assoc_data->ap_addr, ETH_ALEN);
err = ieee80211_prep_connection(sdata, cbss, req->link_id,
- req->ap_mld_addr, true, override);
+ req->ap_mld_addr, true,
+ &assoc_data->link[assoc_link_id].conn,
+ override);
if (err)
goto err_clear;
- assoc_data->link[assoc_data->assoc_link_id].conn_flags =
- link->u.mgd.conn_flags;
-
if (ieee80211_hw_check(&sdata->local->hw, NEED_DTIM_BEFORE_ASSOC)) {
const struct cfg80211_bss_ies *beacon_ies;
@@ -8204,6 +8606,8 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
&ifmgd->ml_reconf_work);
wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->ttlm_work);
+ wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+ &ifmgd->neg_ttlm_timeout_work);
if (ifmgd->assoc_data)
ieee80211_destroy_assoc_data(sdata, ASSOC_TIMEOUT);
diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c
index 449af4e1cca4..9ef14e475c90 100644
--- a/net/mac80211/ocb.c
+++ b/net/mac80211/ocb.c
@@ -168,6 +168,7 @@ void ieee80211_ocb_setup_sdata(struct ieee80211_sub_if_data *sdata)
int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata,
struct ocb_setup *setup)
{
+ struct ieee80211_chan_req chanreq = { .oper = setup->chandef };
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
u64 changed = BSS_CHANGED_OCB | BSS_CHANGED_BSSID;
@@ -182,7 +183,7 @@ int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata,
sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
sdata->deflink.needed_rx_chains = sdata->local->rx_chains;
- err = ieee80211_link_use_channel(&sdata->deflink, &setup->chandef,
+ err = ieee80211_link_use_channel(&sdata->deflink, &chanreq,
IEEE80211_CHANCTX_SHARED);
if (err)
return err;
@@ -207,7 +208,7 @@ int ieee80211_ocb_leave(struct ieee80211_sub_if_data *sdata)
lockdep_assert_wiphy(sdata->local->hw.wiphy);
ifocb->joined = false;
- sta_info_flush(sdata);
+ sta_info_flush(sdata, -1);
spin_lock_bh(&ifocb->incomplete_lock);
while (!list_empty(&ifocb->incomplete_stations)) {
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 6c4080202573..221695d841fd 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -86,7 +86,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
lockdep_assert_wiphy(local->hw.wiphy);
- if (WARN_ON(local->use_chanctx))
+ if (WARN_ON(!local->emulate_chanctx))
return;
/*
@@ -136,7 +136,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local)
lockdep_assert_wiphy(local->hw.wiphy);
- if (WARN_ON(local->use_chanctx))
+ if (WARN_ON(!local->emulate_chanctx))
return;
list_for_each_entry(sdata, &local->interfaces, list) {
@@ -351,10 +351,13 @@ static void _ieee80211_start_next_roc(struct ieee80211_local *local)
* 20 MHz channel width) don't stop all the operations but still
* treat it as though the ROC operation started properly, so
* other ROC operations won't interfere with this one.
+ *
+ * Note: scan can't run, tmp_channel is what we use, so this
+ * must be the currently active channel.
*/
- roc->on_channel = roc->chan == local->_oper_chandef.chan &&
- local->_oper_chandef.width != NL80211_CHAN_WIDTH_5 &&
- local->_oper_chandef.width != NL80211_CHAN_WIDTH_10;
+ roc->on_channel = roc->chan == local->hw.conf.chandef.chan &&
+ local->hw.conf.chandef.width != NL80211_CHAN_WIDTH_5 &&
+ local->hw.conf.chandef.width != NL80211_CHAN_WIDTH_10;
/* start this ROC */
ieee80211_recalc_idle(local);
@@ -363,7 +366,7 @@ static void _ieee80211_start_next_roc(struct ieee80211_local *local)
ieee80211_offchannel_stop_vifs(local);
local->tmp_channel = roc->chan;
- ieee80211_hw_config(local, 0);
+ ieee80211_hw_conf_chan(local);
}
wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work,
@@ -426,7 +429,7 @@ static void __ieee80211_roc_work(struct ieee80211_local *local)
return;
if (!roc->started) {
- WARN_ON(local->use_chanctx);
+ WARN_ON(!local->emulate_chanctx);
_ieee80211_start_next_roc(local);
} else {
on_channel = roc->on_channel;
@@ -439,7 +442,7 @@ static void __ieee80211_roc_work(struct ieee80211_local *local)
ieee80211_flush_queues(local, NULL, false);
local->tmp_channel = NULL;
- ieee80211_hw_config(local, 0);
+ ieee80211_hw_conf_chan(local);
ieee80211_offchannel_return(local);
}
@@ -539,7 +542,7 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
/* this may work, but is untested */
return -EOPNOTSUPP;
- if (local->use_chanctx && !local->ops->remain_on_channel)
+ if (!local->emulate_chanctx && !local->ops->remain_on_channel)
return -EOPNOTSUPP;
roc = kzalloc(sizeof(*roc), GFP_KERNEL);
diff --git a/net/mac80211/parse.c b/net/mac80211/parse.c
new file mode 100644
index 000000000000..55e5497f8978
--- /dev/null
+++ b/net/mac80211/parse.c
@@ -0,0 +1,971 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2002-2005, Instant802 Networks, Inc.
+ * Copyright 2005-2006, Devicescape Software, Inc.
+ * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
+ * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018-2024 Intel Corporation
+ *
+ * element parsing for mac80211
+ */
+
+#include <net/mac80211.h>
+#include <linux/netdevice.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/bitmap.h>
+#include <linux/crc32.h>
+#include <net/net_namespace.h>
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+#include <kunit/visibility.h>
+
+#include "ieee80211_i.h"
+#include "driver-ops.h"
+#include "rate.h"
+#include "mesh.h"
+#include "wme.h"
+#include "led.h"
+#include "wep.h"
+
+struct ieee80211_elems_parse {
+ /* must be first for kfree to work */
+ struct ieee802_11_elems elems;
+
+ /* The basic Multi-Link element in the original elements */
+ const struct element *ml_basic_elem;
+
+ /* The reconfiguration Multi-Link element in the original elements */
+ const struct element *ml_reconf_elem;
+
+ /*
+ * scratch buffer that can be used for various element parsing related
+ * tasks, e.g., element de-fragmentation etc.
+ */
+ size_t scratch_len;
+ u8 *scratch_pos;
+ u8 scratch[] __counted_by(scratch_len);
+};
+
+static void
+ieee80211_parse_extension_element(u32 *crc,
+ const struct element *elem,
+ struct ieee80211_elems_parse *elems_parse,
+ struct ieee80211_elems_parse_params *params)
+{
+ struct ieee802_11_elems *elems = &elems_parse->elems;
+ const void *data = elem->data + 1;
+ bool calc_crc = false;
+ u8 len;
+
+ if (!elem->datalen)
+ return;
+
+ len = elem->datalen - 1;
+
+ switch (elem->data[0]) {
+ case WLAN_EID_EXT_HE_MU_EDCA:
+ if (params->mode < IEEE80211_CONN_MODE_HE)
+ break;
+ calc_crc = true;
+ if (len >= sizeof(*elems->mu_edca_param_set))
+ elems->mu_edca_param_set = data;
+ break;
+ case WLAN_EID_EXT_HE_CAPABILITY:
+ if (params->mode < IEEE80211_CONN_MODE_HE)
+ break;
+ if (ieee80211_he_capa_size_ok(data, len)) {
+ elems->he_cap = data;
+ elems->he_cap_len = len;
+ }
+ break;
+ case WLAN_EID_EXT_HE_OPERATION:
+ if (params->mode < IEEE80211_CONN_MODE_HE)
+ break;
+ calc_crc = true;
+ if (len >= sizeof(*elems->he_operation) &&
+ len >= ieee80211_he_oper_size(data) - 1)
+ elems->he_operation = data;
+ break;
+ case WLAN_EID_EXT_UORA:
+ if (params->mode < IEEE80211_CONN_MODE_HE)
+ break;
+ if (len >= 1)
+ elems->uora_element = data;
+ break;
+ case WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME:
+ if (len == 3)
+ elems->max_channel_switch_time = data;
+ break;
+ case WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION:
+ if (len >= sizeof(*elems->mbssid_config_ie))
+ elems->mbssid_config_ie = data;
+ break;
+ case WLAN_EID_EXT_HE_SPR:
+ if (params->mode < IEEE80211_CONN_MODE_HE)
+ break;
+ if (len >= sizeof(*elems->he_spr) &&
+ len >= ieee80211_he_spr_size(data))
+ elems->he_spr = data;
+ break;
+ case WLAN_EID_EXT_HE_6GHZ_CAPA:
+ if (params->mode < IEEE80211_CONN_MODE_HE)
+ break;
+ if (len >= sizeof(*elems->he_6ghz_capa))
+ elems->he_6ghz_capa = data;
+ break;
+ case WLAN_EID_EXT_EHT_CAPABILITY:
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ break;
+ if (ieee80211_eht_capa_size_ok(elems->he_cap,
+ data, len,
+ params->from_ap)) {
+ elems->eht_cap = data;
+ elems->eht_cap_len = len;
+ }
+ break;
+ case WLAN_EID_EXT_EHT_OPERATION:
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ break;
+ if (ieee80211_eht_oper_size_ok(data, len))
+ elems->eht_operation = data;
+ calc_crc = true;
+ break;
+ case WLAN_EID_EXT_EHT_MULTI_LINK:
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ break;
+ calc_crc = true;
+
+ if (ieee80211_mle_size_ok(data, len)) {
+ const struct ieee80211_multi_link_elem *mle =
+ (void *)data;
+
+ switch (le16_get_bits(mle->control,
+ IEEE80211_ML_CONTROL_TYPE)) {
+ case IEEE80211_ML_CONTROL_TYPE_BASIC:
+ if (elems_parse->ml_basic_elem) {
+ elems->parse_error |=
+ IEEE80211_PARSE_ERR_DUP_NEST_ML_BASIC;
+ break;
+ }
+ elems_parse->ml_basic_elem = elem;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_RECONF:
+ elems_parse->ml_reconf_elem = elem;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case WLAN_EID_EXT_BANDWIDTH_INDICATION:
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ break;
+ if (ieee80211_bandwidth_indication_size_ok(data, len))
+ elems->bandwidth_indication = data;
+ calc_crc = true;
+ break;
+ case WLAN_EID_EXT_TID_TO_LINK_MAPPING:
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ break;
+ calc_crc = true;
+ if (ieee80211_tid_to_link_map_size_ok(data, len) &&
+ elems->ttlm_num < ARRAY_SIZE(elems->ttlm)) {
+ elems->ttlm[elems->ttlm_num] = (void *)data;
+ elems->ttlm_num++;
+ }
+ break;
+ }
+
+ if (crc && calc_crc)
+ *crc = crc32_be(*crc, (void *)elem, elem->datalen + 2);
+}
+
+static u32
+_ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params,
+ struct ieee80211_elems_parse *elems_parse,
+ const struct element *check_inherit)
+{
+ struct ieee802_11_elems *elems = &elems_parse->elems;
+ const struct element *elem;
+ bool calc_crc = params->filter != 0;
+ DECLARE_BITMAP(seen_elems, 256);
+ u32 crc = params->crc;
+
+ bitmap_zero(seen_elems, 256);
+
+ for_each_element(elem, params->start, params->len) {
+ const struct element *subelem;
+ u8 elem_parse_failed;
+ u8 id = elem->id;
+ u8 elen = elem->datalen;
+ const u8 *pos = elem->data;
+
+ if (check_inherit &&
+ !cfg80211_is_element_inherited(elem,
+ check_inherit))
+ continue;
+
+ switch (id) {
+ case WLAN_EID_SSID:
+ case WLAN_EID_SUPP_RATES:
+ case WLAN_EID_FH_PARAMS:
+ case WLAN_EID_DS_PARAMS:
+ case WLAN_EID_CF_PARAMS:
+ case WLAN_EID_TIM:
+ case WLAN_EID_IBSS_PARAMS:
+ case WLAN_EID_CHALLENGE:
+ case WLAN_EID_RSN:
+ case WLAN_EID_ERP_INFO:
+ case WLAN_EID_EXT_SUPP_RATES:
+ case WLAN_EID_HT_CAPABILITY:
+ case WLAN_EID_HT_OPERATION:
+ case WLAN_EID_VHT_CAPABILITY:
+ case WLAN_EID_VHT_OPERATION:
+ case WLAN_EID_MESH_ID:
+ case WLAN_EID_MESH_CONFIG:
+ case WLAN_EID_PEER_MGMT:
+ case WLAN_EID_PREQ:
+ case WLAN_EID_PREP:
+ case WLAN_EID_PERR:
+ case WLAN_EID_RANN:
+ case WLAN_EID_CHANNEL_SWITCH:
+ case WLAN_EID_EXT_CHANSWITCH_ANN:
+ case WLAN_EID_COUNTRY:
+ case WLAN_EID_PWR_CONSTRAINT:
+ case WLAN_EID_TIMEOUT_INTERVAL:
+ case WLAN_EID_SECONDARY_CHANNEL_OFFSET:
+ case WLAN_EID_WIDE_BW_CHANNEL_SWITCH:
+ case WLAN_EID_CHAN_SWITCH_PARAM:
+ case WLAN_EID_EXT_CAPABILITY:
+ case WLAN_EID_CHAN_SWITCH_TIMING:
+ case WLAN_EID_LINK_ID:
+ case WLAN_EID_BSS_MAX_IDLE_PERIOD:
+ case WLAN_EID_RSNX:
+ case WLAN_EID_S1G_BCN_COMPAT:
+ case WLAN_EID_S1G_CAPABILITIES:
+ case WLAN_EID_S1G_OPERATION:
+ case WLAN_EID_AID_RESPONSE:
+ case WLAN_EID_S1G_SHORT_BCN_INTERVAL:
+ /*
+ * not listing WLAN_EID_CHANNEL_SWITCH_WRAPPER -- it seems possible
+ * that if the content gets bigger it might be needed more than once
+ */
+ if (test_bit(id, seen_elems)) {
+ elems->parse_error |=
+ IEEE80211_PARSE_ERR_DUP_ELEM;
+ continue;
+ }
+ break;
+ }
+
+ if (calc_crc && id < 64 && (params->filter & (1ULL << id)))
+ crc = crc32_be(crc, pos - 2, elen + 2);
+
+ elem_parse_failed = 0;
+
+ switch (id) {
+ case WLAN_EID_LINK_ID:
+ if (elen + 2 < sizeof(struct ieee80211_tdls_lnkie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->lnk_id = (void *)(pos - 2);
+ break;
+ case WLAN_EID_CHAN_SWITCH_TIMING:
+ if (elen < sizeof(struct ieee80211_ch_switch_timing)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->ch_sw_timing = (void *)pos;
+ break;
+ case WLAN_EID_EXT_CAPABILITY:
+ elems->ext_capab = pos;
+ elems->ext_capab_len = elen;
+ break;
+ case WLAN_EID_SSID:
+ elems->ssid = pos;
+ elems->ssid_len = elen;
+ break;
+ case WLAN_EID_SUPP_RATES:
+ elems->supp_rates = pos;
+ elems->supp_rates_len = elen;
+ break;
+ case WLAN_EID_DS_PARAMS:
+ if (elen >= 1)
+ elems->ds_params = pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_TIM:
+ if (elen >= sizeof(struct ieee80211_tim_ie)) {
+ elems->tim = (void *)pos;
+ elems->tim_len = elen;
+ } else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_VENDOR_SPECIFIC:
+ if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
+ pos[2] == 0xf2) {
+ /* Microsoft OUI (00:50:F2) */
+
+ if (calc_crc)
+ crc = crc32_be(crc, pos - 2, elen + 2);
+
+ if (elen >= 5 && pos[3] == 2) {
+ /* OUI Type 2 - WMM IE */
+ if (pos[4] == 0) {
+ elems->wmm_info = pos;
+ elems->wmm_info_len = elen;
+ } else if (pos[4] == 1) {
+ elems->wmm_param = pos;
+ elems->wmm_param_len = elen;
+ }
+ }
+ }
+ break;
+ case WLAN_EID_RSN:
+ elems->rsn = pos;
+ elems->rsn_len = elen;
+ break;
+ case WLAN_EID_ERP_INFO:
+ if (elen >= 1)
+ elems->erp_info = pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_EXT_SUPP_RATES:
+ elems->ext_supp_rates = pos;
+ elems->ext_supp_rates_len = elen;
+ break;
+ case WLAN_EID_HT_CAPABILITY:
+ if (params->mode < IEEE80211_CONN_MODE_HT)
+ break;
+ if (elen >= sizeof(struct ieee80211_ht_cap))
+ elems->ht_cap_elem = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_HT_OPERATION:
+ if (params->mode < IEEE80211_CONN_MODE_HT)
+ break;
+ if (elen >= sizeof(struct ieee80211_ht_operation))
+ elems->ht_operation = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_VHT_CAPABILITY:
+ if (params->mode < IEEE80211_CONN_MODE_VHT)
+ break;
+ if (elen >= sizeof(struct ieee80211_vht_cap))
+ elems->vht_cap_elem = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_VHT_OPERATION:
+ if (params->mode < IEEE80211_CONN_MODE_VHT)
+ break;
+ if (elen >= sizeof(struct ieee80211_vht_operation)) {
+ elems->vht_operation = (void *)pos;
+ if (calc_crc)
+ crc = crc32_be(crc, pos - 2, elen + 2);
+ break;
+ }
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_OPMODE_NOTIF:
+ if (params->mode < IEEE80211_CONN_MODE_VHT)
+ break;
+ if (elen > 0) {
+ elems->opmode_notif = pos;
+ if (calc_crc)
+ crc = crc32_be(crc, pos - 2, elen + 2);
+ break;
+ }
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_MESH_ID:
+ elems->mesh_id = pos;
+ elems->mesh_id_len = elen;
+ break;
+ case WLAN_EID_MESH_CONFIG:
+ if (elen >= sizeof(struct ieee80211_meshconf_ie))
+ elems->mesh_config = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_PEER_MGMT:
+ elems->peering = pos;
+ elems->peering_len = elen;
+ break;
+ case WLAN_EID_MESH_AWAKE_WINDOW:
+ if (elen >= 2)
+ elems->awake_window = (void *)pos;
+ break;
+ case WLAN_EID_PREQ:
+ elems->preq = pos;
+ elems->preq_len = elen;
+ break;
+ case WLAN_EID_PREP:
+ elems->prep = pos;
+ elems->prep_len = elen;
+ break;
+ case WLAN_EID_PERR:
+ elems->perr = pos;
+ elems->perr_len = elen;
+ break;
+ case WLAN_EID_RANN:
+ if (elen >= sizeof(struct ieee80211_rann_ie))
+ elems->rann = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_CHANNEL_SWITCH:
+ if (elen != sizeof(struct ieee80211_channel_sw_ie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->ch_switch_ie = (void *)pos;
+ break;
+ case WLAN_EID_EXT_CHANSWITCH_ANN:
+ if (elen != sizeof(struct ieee80211_ext_chansw_ie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->ext_chansw_ie = (void *)pos;
+ break;
+ case WLAN_EID_SECONDARY_CHANNEL_OFFSET:
+ if (params->mode < IEEE80211_CONN_MODE_HT)
+ break;
+ if (elen != sizeof(struct ieee80211_sec_chan_offs_ie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->sec_chan_offs = (void *)pos;
+ break;
+ case WLAN_EID_CHAN_SWITCH_PARAM:
+ if (elen <
+ sizeof(*elems->mesh_chansw_params_ie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->mesh_chansw_params_ie = (void *)pos;
+ break;
+ case WLAN_EID_WIDE_BW_CHANNEL_SWITCH:
+ if (params->mode < IEEE80211_CONN_MODE_VHT)
+ break;
+
+ if (!params->action) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_UNEXPECTED_ELEM;
+ break;
+ }
+
+ if (elen < sizeof(*elems->wide_bw_chansw_ie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->wide_bw_chansw_ie = (void *)pos;
+ break;
+ case WLAN_EID_CHANNEL_SWITCH_WRAPPER:
+ if (params->mode < IEEE80211_CONN_MODE_VHT)
+ break;
+ if (params->action) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_UNEXPECTED_ELEM;
+ break;
+ }
+ /*
+ * This is a bit tricky, but as we only care about
+ * a few elements, parse them out manually.
+ */
+ subelem = cfg80211_find_elem(WLAN_EID_WIDE_BW_CHANNEL_SWITCH,
+ pos, elen);
+ if (subelem) {
+ if (subelem->datalen >= sizeof(*elems->wide_bw_chansw_ie))
+ elems->wide_bw_chansw_ie =
+ (void *)subelem->data;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ }
+
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ break;
+
+ subelem = cfg80211_find_ext_elem(WLAN_EID_EXT_BANDWIDTH_INDICATION,
+ pos, elen);
+ if (subelem) {
+ const void *edata = subelem->data + 1;
+ u8 edatalen = subelem->datalen - 1;
+
+ if (ieee80211_bandwidth_indication_size_ok(edata,
+ edatalen))
+ elems->bandwidth_indication = edata;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ }
+ break;
+ case WLAN_EID_COUNTRY:
+ elems->country_elem = pos;
+ elems->country_elem_len = elen;
+ break;
+ case WLAN_EID_PWR_CONSTRAINT:
+ if (elen != 1) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->pwr_constr_elem = pos;
+ break;
+ case WLAN_EID_CISCO_VENDOR_SPECIFIC:
+ /* Lots of different options exist, but we only care
+ * about the Dynamic Transmit Power Control element.
+ * First check for the Cisco OUI, then for the DTPC
+ * tag (0x00).
+ */
+ if (elen < 4) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+
+ if (pos[0] != 0x00 || pos[1] != 0x40 ||
+ pos[2] != 0x96 || pos[3] != 0x00)
+ break;
+
+ if (elen != 6) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+
+ if (calc_crc)
+ crc = crc32_be(crc, pos - 2, elen + 2);
+
+ elems->cisco_dtpc_elem = pos;
+ break;
+ case WLAN_EID_ADDBA_EXT:
+ if (elen < sizeof(struct ieee80211_addba_ext_ie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->addba_ext_ie = (void *)pos;
+ break;
+ case WLAN_EID_TIMEOUT_INTERVAL:
+ if (elen >= sizeof(struct ieee80211_timeout_interval_ie))
+ elems->timeout_int = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_BSS_MAX_IDLE_PERIOD:
+ if (elen >= sizeof(*elems->max_idle_period_ie))
+ elems->max_idle_period_ie = (void *)pos;
+ break;
+ case WLAN_EID_RSNX:
+ elems->rsnx = pos;
+ elems->rsnx_len = elen;
+ break;
+ case WLAN_EID_TX_POWER_ENVELOPE:
+ if (elen < 1 ||
+ elen > sizeof(struct ieee80211_tx_pwr_env))
+ break;
+
+ if (elems->tx_pwr_env_num >= ARRAY_SIZE(elems->tx_pwr_env))
+ break;
+
+ elems->tx_pwr_env[elems->tx_pwr_env_num] = (void *)pos;
+ elems->tx_pwr_env_len[elems->tx_pwr_env_num] = elen;
+ elems->tx_pwr_env_num++;
+ break;
+ case WLAN_EID_EXTENSION:
+ ieee80211_parse_extension_element(calc_crc ?
+ &crc : NULL,
+ elem, elems_parse,
+ params);
+ break;
+ case WLAN_EID_S1G_CAPABILITIES:
+ if (params->mode != IEEE80211_CONN_MODE_S1G)
+ break;
+ if (elen >= sizeof(*elems->s1g_capab))
+ elems->s1g_capab = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_S1G_OPERATION:
+ if (params->mode != IEEE80211_CONN_MODE_S1G)
+ break;
+ if (elen == sizeof(*elems->s1g_oper))
+ elems->s1g_oper = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_S1G_BCN_COMPAT:
+ if (params->mode != IEEE80211_CONN_MODE_S1G)
+ break;
+ if (elen == sizeof(*elems->s1g_bcn_compat))
+ elems->s1g_bcn_compat = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_AID_RESPONSE:
+ if (params->mode != IEEE80211_CONN_MODE_S1G)
+ break;
+ if (elen == sizeof(struct ieee80211_aid_response_ie))
+ elems->aid_resp = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ default:
+ break;
+ }
+
+ if (elem_parse_failed)
+ elems->parse_error |= elem_parse_failed;
+ else
+ __set_bit(id, seen_elems);
+ }
+
+ if (!for_each_element_completed(elem, params->start, params->len))
+ elems->parse_error |= IEEE80211_PARSE_ERR_INVALID_END;
+
+ return crc;
+}
+
+static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len,
+ struct ieee802_11_elems *elems,
+ struct cfg80211_bss *bss,
+ u8 *nontransmitted_profile)
+{
+ const struct element *elem, *sub;
+ size_t profile_len = 0;
+ bool found = false;
+
+ if (!bss || !bss->transmitted_bss)
+ return profile_len;
+
+ for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, start, len) {
+ if (elem->datalen < 2)
+ continue;
+ if (elem->data[0] < 1 || elem->data[0] > 8)
+ continue;
+
+ for_each_element(sub, elem->data + 1, elem->datalen - 1) {
+ u8 new_bssid[ETH_ALEN];
+ const u8 *index;
+
+ if (sub->id != 0 || sub->datalen < 4) {
+ /* not a valid BSS profile */
+ continue;
+ }
+
+ if (sub->data[0] != WLAN_EID_NON_TX_BSSID_CAP ||
+ sub->data[1] != 2) {
+ /* The first element of the
+ * Nontransmitted BSSID Profile is not
+ * the Nontransmitted BSSID Capability
+ * element.
+ */
+ continue;
+ }
+
+ memset(nontransmitted_profile, 0, len);
+ profile_len = cfg80211_merge_profile(start, len,
+ elem,
+ sub,
+ nontransmitted_profile,
+ len);
+
+ /* found a Nontransmitted BSSID Profile */
+ index = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX,
+ nontransmitted_profile,
+ profile_len);
+ if (!index || index[1] < 1 || index[2] == 0) {
+ /* Invalid MBSSID Index element */
+ continue;
+ }
+
+ cfg80211_gen_new_bssid(bss->transmitted_bss->bssid,
+ elem->data[0],
+ index[2],
+ new_bssid);
+ if (ether_addr_equal(new_bssid, bss->bssid)) {
+ found = true;
+ elems->bssid_index_len = index[1];
+ elems->bssid_index = (void *)&index[2];
+ break;
+ }
+ }
+ }
+
+ return found ? profile_len : 0;
+}
+
+static void
+ieee80211_mle_get_sta_prof(struct ieee80211_elems_parse *elems_parse,
+ u8 link_id)
+{
+ struct ieee802_11_elems *elems = &elems_parse->elems;
+ const struct ieee80211_multi_link_elem *ml = elems->ml_basic;
+ ssize_t ml_len = elems->ml_basic_len;
+ const struct element *sub;
+
+ for_each_mle_subelement(sub, (u8 *)ml, ml_len) {
+ struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data;
+ ssize_t sta_prof_len;
+ u16 control;
+
+ if (sub->id != IEEE80211_MLE_SUBELEM_PER_STA_PROFILE)
+ continue;
+
+ if (!ieee80211_mle_basic_sta_prof_size_ok(sub->data,
+ sub->datalen))
+ return;
+
+ control = le16_to_cpu(prof->control);
+
+ if (link_id != u16_get_bits(control,
+ IEEE80211_MLE_STA_CONTROL_LINK_ID))
+ continue;
+
+ if (!(control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE))
+ return;
+
+ /* the sub element can be fragmented */
+ sta_prof_len =
+ cfg80211_defragment_element(sub,
+ (u8 *)ml, ml_len,
+ elems_parse->scratch_pos,
+ elems_parse->scratch +
+ elems_parse->scratch_len -
+ elems_parse->scratch_pos,
+ IEEE80211_MLE_SUBELEM_FRAGMENT);
+
+ if (sta_prof_len < 0)
+ return;
+
+ elems->prof = (void *)elems_parse->scratch_pos;
+ elems->sta_prof_len = sta_prof_len;
+ elems_parse->scratch_pos += sta_prof_len;
+
+ return;
+ }
+}
+
+static void ieee80211_mle_parse_link(struct ieee80211_elems_parse *elems_parse,
+ struct ieee80211_elems_parse_params *params)
+{
+ struct ieee802_11_elems *elems = &elems_parse->elems;
+ struct ieee80211_mle_per_sta_profile *prof;
+ struct ieee80211_elems_parse_params sub = {
+ .mode = params->mode,
+ .action = params->action,
+ .from_ap = params->from_ap,
+ .link_id = -1,
+ };
+ ssize_t ml_len = elems->ml_basic_len;
+ const struct element *non_inherit = NULL;
+ const u8 *end;
+
+ ml_len = cfg80211_defragment_element(elems_parse->ml_basic_elem,
+ elems->ie_start,
+ elems->total_len,
+ elems_parse->scratch_pos,
+ elems_parse->scratch +
+ elems_parse->scratch_len -
+ elems_parse->scratch_pos,
+ WLAN_EID_FRAGMENT);
+
+ if (ml_len < 0)
+ return;
+
+ elems->ml_basic = (const void *)elems_parse->scratch_pos;
+ elems->ml_basic_len = ml_len;
+ elems_parse->scratch_pos += ml_len;
+
+ if (params->link_id == -1)
+ return;
+
+ ieee80211_mle_get_sta_prof(elems_parse, params->link_id);
+ prof = elems->prof;
+
+ if (!prof)
+ return;
+
+ /* check if we have the 4 bytes for the fixed part in assoc response */
+ if (elems->sta_prof_len < sizeof(*prof) + prof->sta_info_len - 1 + 4) {
+ elems->prof = NULL;
+ elems->sta_prof_len = 0;
+ return;
+ }
+
+ /*
+ * Skip the capability information and the status code that are expected
+ * as part of the station profile in association response frames. Note
+ * the -1 is because the 'sta_info_len' is accounted to as part of the
+ * per-STA profile, but not part of the 'u8 variable[]' portion.
+ */
+ sub.start = prof->variable + prof->sta_info_len - 1 + 4;
+ end = (const u8 *)prof + elems->sta_prof_len;
+ sub.len = end - sub.start;
+
+ non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
+ sub.start, sub.len);
+ _ieee802_11_parse_elems_full(&sub, elems_parse, non_inherit);
+}
+
+static void
+ieee80211_mle_defrag_reconf(struct ieee80211_elems_parse *elems_parse)
+{
+ struct ieee802_11_elems *elems = &elems_parse->elems;
+ ssize_t ml_len;
+
+ ml_len = cfg80211_defragment_element(elems_parse->ml_reconf_elem,
+ elems->ie_start,
+ elems->total_len,
+ elems_parse->scratch_pos,
+ elems_parse->scratch +
+ elems_parse->scratch_len -
+ elems_parse->scratch_pos,
+ WLAN_EID_FRAGMENT);
+ if (ml_len < 0)
+ return;
+ elems->ml_reconf = (void *)elems_parse->scratch_pos;
+ elems->ml_reconf_len = ml_len;
+ elems_parse->scratch_pos += ml_len;
+}
+
+struct ieee802_11_elems *
+ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params)
+{
+ struct ieee80211_elems_parse *elems_parse;
+ struct ieee802_11_elems *elems;
+ const struct element *non_inherit = NULL;
+ u8 *nontransmitted_profile;
+ int nontransmitted_profile_len = 0;
+ size_t scratch_len = 3 * params->len;
+
+ BUILD_BUG_ON(offsetof(typeof(*elems_parse), elems) != 0);
+
+ elems_parse = kzalloc(struct_size(elems_parse, scratch, scratch_len),
+ GFP_ATOMIC);
+ if (!elems_parse)
+ return NULL;
+
+ elems_parse->scratch_len = scratch_len;
+ elems_parse->scratch_pos = elems_parse->scratch;
+
+ elems = &elems_parse->elems;
+ elems->ie_start = params->start;
+ elems->total_len = params->len;
+
+ nontransmitted_profile = elems_parse->scratch_pos;
+ nontransmitted_profile_len =
+ ieee802_11_find_bssid_profile(params->start, params->len,
+ elems, params->bss,
+ nontransmitted_profile);
+ elems_parse->scratch_pos += nontransmitted_profile_len;
+ non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
+ nontransmitted_profile,
+ nontransmitted_profile_len);
+
+ elems->crc = _ieee802_11_parse_elems_full(params, elems_parse,
+ non_inherit);
+
+ /* Override with nontransmitted profile, if found */
+ if (nontransmitted_profile_len) {
+ struct ieee80211_elems_parse_params sub = {
+ .mode = params->mode,
+ .start = nontransmitted_profile,
+ .len = nontransmitted_profile_len,
+ .action = params->action,
+ .link_id = params->link_id,
+ };
+
+ _ieee802_11_parse_elems_full(&sub, elems_parse, NULL);
+ }
+
+ ieee80211_mle_parse_link(elems_parse, params);
+
+ ieee80211_mle_defrag_reconf(elems_parse);
+
+ if (elems->tim && !elems->parse_error) {
+ const struct ieee80211_tim_ie *tim_ie = elems->tim;
+
+ elems->dtim_period = tim_ie->dtim_period;
+ elems->dtim_count = tim_ie->dtim_count;
+ }
+
+ /* Override DTIM period and count if needed */
+ if (elems->bssid_index &&
+ elems->bssid_index_len >=
+ offsetofend(struct ieee80211_bssid_index, dtim_period))
+ elems->dtim_period = elems->bssid_index->dtim_period;
+
+ if (elems->bssid_index &&
+ elems->bssid_index_len >=
+ offsetofend(struct ieee80211_bssid_index, dtim_count))
+ elems->dtim_count = elems->bssid_index->dtim_count;
+
+ return elems;
+}
+EXPORT_SYMBOL_IF_KUNIT(ieee802_11_parse_elems_full);
+
+int ieee80211_parse_bitrates(enum nl80211_chan_width width,
+ const struct ieee80211_supported_band *sband,
+ const u8 *srates, int srates_len, u32 *rates)
+{
+ u32 rate_flags = ieee80211_chanwidth_rate_flags(width);
+ struct ieee80211_rate *br;
+ int brate, rate, i, j, count = 0;
+
+ *rates = 0;
+
+ for (i = 0; i < srates_len; i++) {
+ rate = srates[i] & 0x7f;
+
+ for (j = 0; j < sband->n_bitrates; j++) {
+ br = &sband->bitrates[j];
+ if ((rate_flags & br->flags) != rate_flags)
+ continue;
+
+ brate = DIV_ROUND_UP(br->bitrate, 5);
+ if (brate == rate) {
+ *rates |= BIT(j);
+ count++;
+ break;
+ }
+ }
+ }
+ return count;
+}
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 9d33fd2377c8..23404b275457 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -4,7 +4,7 @@
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2017 Intel Deutschland GmbH
- * Copyright (C) 2022 Intel Corporation
+ * Copyright (C) 2019, 2022-2024 Intel Corporation
*/
#include <linux/kernel.h>
@@ -37,7 +37,7 @@ void rate_control_rate_init(struct sta_info *sta)
struct ieee80211_supported_band *sband;
struct ieee80211_chanctx_conf *chanctx_conf;
- ieee80211_sta_set_rx_nss(&sta->deflink);
+ ieee80211_sta_init_nss(&sta->deflink);
if (!ref)
return;
@@ -279,10 +279,10 @@ void ieee80211_check_rate_mask(struct ieee80211_link_data *link)
u32 user_mask, basic_rates = link->conf->basic_rates;
enum nl80211_band band;
- if (WARN_ON(!link->conf->chandef.chan))
+ if (WARN_ON(!link->conf->chanreq.oper.chan))
return;
- band = link->conf->chandef.chan->band;
+ band = link->conf->chanreq.oper.chan->band;
if (band == NL80211_BAND_S1GHZ) {
/* TODO */
return;
@@ -762,7 +762,7 @@ static bool rate_control_cap_mask(struct ieee80211_sub_if_data *sdata,
u32 i, flags;
*mask = sdata->rc_rateidx_mask[sband->band];
- flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+ flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chanreq.oper);
for (i = 0; i < sband->n_bitrates; i++) {
if ((flags & sband->bitrates[i].flags) != flags)
*mask &= ~BIT(i);
@@ -818,7 +818,7 @@ rate_control_apply_mask_ratetbl(struct sta_info *sta,
mcs_mask, vht_mask))
return;
- chan_width = sta->sdata->vif.bss_conf.chandef.width;
+ chan_width = sta->sdata->vif.bss_conf.chanreq.oper.width;
for (i = 0; i < IEEE80211_TX_RATE_TABLE_SIZE; i++) {
if (rates->rate[i].idx < 0)
break;
@@ -855,7 +855,7 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
* included in the configured mask and change the rate indexes
* if needed.
*/
- chan_width = sdata->vif.bss_conf.chandef.width;
+ chan_width = sdata->vif.bss_conf.chanreq.oper.width;
for (i = 0; i < max_rates; i++) {
/* Skip invalid rates */
if (rates[i].idx < 0)
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 0bf72928ccfc..c1f850138405 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -6,7 +6,7 @@
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/jiffies.h>
@@ -1251,8 +1251,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
- u16 sc = le16_to_cpu(hdr->seq_ctrl);
- u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
+ u16 mpdu_seq_num = ieee80211_get_sn(hdr);
u16 head_seq_num, buf_size;
int index;
bool ret = true;
@@ -1435,13 +1434,31 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
return RX_CONTINUE;
if (ieee80211_is_ctl(hdr->frame_control) ||
- ieee80211_is_any_nullfunc(hdr->frame_control) ||
- is_multicast_ether_addr(hdr->addr1))
+ ieee80211_is_any_nullfunc(hdr->frame_control))
return RX_CONTINUE;
if (!rx->sta)
return RX_CONTINUE;
+ if (unlikely(is_multicast_ether_addr(hdr->addr1))) {
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
+ u16 sn = ieee80211_get_sn(hdr);
+
+ if (!ieee80211_is_data_present(hdr->frame_control))
+ return RX_CONTINUE;
+
+ if (!ieee80211_vif_is_mld(&sdata->vif) ||
+ sdata->vif.type != NL80211_IFTYPE_STATION)
+ return RX_CONTINUE;
+
+ if (sdata->u.mgd.mcast_seq_last != IEEE80211_SN_MODULO &&
+ ieee80211_sn_less_eq(sn, sdata->u.mgd.mcast_seq_last))
+ return RX_DROP_U_DUP;
+
+ sdata->u.mgd.mcast_seq_last = sn;
+ return RX_CONTINUE;
+ }
+
if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) {
I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
@@ -3369,8 +3386,7 @@ ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx)
IEEE80211_HE_OPERATION_BSS_COLOR_MASK);
if (color == bss_conf->he_bss_color.color)
ieee80211_obss_color_collision_notify(&rx->sdata->vif,
- BIT_ULL(color),
- GFP_ATOMIC);
+ BIT_ULL(color));
}
}
@@ -3763,6 +3779,28 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
break;
}
break;
+ case WLAN_CATEGORY_PROTECTED_EHT:
+ switch (mgmt->u.action.u.ttlm_req.action_code) {
+ case WLAN_PROTECTED_EHT_ACTION_TTLM_REQ:
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ break;
+
+ if (len < offsetofend(typeof(*mgmt),
+ u.action.u.ttlm_req))
+ goto invalid;
+ goto queue;
+ case WLAN_PROTECTED_EHT_ACTION_TTLM_RES:
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ break;
+
+ if (len < offsetofend(typeof(*mgmt),
+ u.action.u.ttlm_res))
+ goto invalid;
+ goto queue;
+ default:
+ break;
+ }
+ break;
}
return RX_CONTINUE;
@@ -5192,7 +5230,6 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
*/
if (!status->link_valid && pubsta->mlo) {
- struct ieee80211_hdr *hdr = (void *)skb->data;
struct link_sta_info *link_sta;
link_sta = link_sta_info_get_bss(rx.sdata,
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index f9d5842601fa..0429e59ba387 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -257,7 +257,6 @@ static bool ieee80211_scan_accept_presp(struct ieee80211_sub_if_data *sdata,
void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
{
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
- struct ieee80211_sub_if_data *sdata1, *sdata2;
struct ieee80211_mgmt *mgmt = (void *)skb->data;
struct ieee80211_bss *bss;
struct ieee80211_channel *channel;
@@ -281,12 +280,6 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
if (skb->len < min_hdr_len)
return;
- sdata1 = rcu_dereference(local->scan_sdata);
- sdata2 = rcu_dereference(local->sched_scan_sdata);
-
- if (likely(!sdata1 && !sdata2))
- return;
-
if (test_and_clear_bit(SCAN_BEACON_WAIT, &local->scanning)) {
/*
* we were passive scanning because of radar/no-IR, but
@@ -304,10 +297,17 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
return;
if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+ struct ieee80211_sub_if_data *sdata1, *sdata2;
struct cfg80211_scan_request *scan_req;
struct cfg80211_sched_scan_request *sched_scan_req;
u32 scan_req_flags = 0, sched_scan_req_flags = 0;
+ sdata1 = rcu_dereference(local->scan_sdata);
+ sdata2 = rcu_dereference(local->sched_scan_sdata);
+
+ if (likely(!sdata1 && !sdata2))
+ return;
+
scan_req = rcu_dereference(local->scan_req);
sched_scan_req = rcu_dereference(local->sched_scan_req);
@@ -327,8 +327,16 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
sched_scan_req_flags,
mgmt->da))
return;
+ } else {
+ /* Beacons are expected only with broadcast address */
+ if (!is_broadcast_ether_addr(mgmt->da))
+ return;
}
+ /* Do not update the BSS table in case of only monitor interfaces */
+ if (local->open_count == local->monitors)
+ return;
+
bss = ieee80211_bss_info_update(local, rx_status,
mgmt, skb->len,
channel);
@@ -400,6 +408,8 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_sub_if_data *sdata)
req->ie, req->ie_len,
bands_used, req->rates, &chandef,
flags);
+ if (ielen < 0)
+ return false;
local->hw_scan_req->req.ie_len = ielen;
local->hw_scan_req->req.no_cck = req->no_cck;
ether_addr_copy(local->hw_scan_req->req.mac_addr, req->mac_addr);
@@ -476,7 +486,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
}
/* Set power back to normal operating levels. */
- ieee80211_hw_config(local, 0);
+ ieee80211_hw_conf_chan(local);
if (!hw_scan && was_scanning) {
ieee80211_configure_filter(local);
@@ -523,7 +533,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
/* Software scan is not supported in multi-channel cases */
- if (local->use_chanctx)
+ if (!local->emulate_chanctx)
return -EOPNOTSUPP;
/*
@@ -553,7 +563,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local,
ieee80211_configure_filter(local);
/* We need to set power level at maximum rate for scanning. */
- ieee80211_hw_config(local, 0);
+ ieee80211_hw_conf_chan(local);
wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
@@ -677,7 +687,10 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
* After sending probe requests, wait for probe responses
* on the channel.
*/
- *next_delay = IEEE80211_CHANNEL_TIME;
+ *next_delay = msecs_to_jiffies(scan_req->duration) >
+ IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME ?
+ msecs_to_jiffies(scan_req->duration) - IEEE80211_PROBE_DELAY :
+ IEEE80211_CHANNEL_TIME;
local->next_scan_state = SCAN_DECISION;
}
@@ -787,7 +800,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
if (hw_scan) {
__set_bit(SCAN_HW_SCANNING, &local->scanning);
} else if ((req->n_channels == 1) &&
- (req->channels[0] == local->_oper_chandef.chan)) {
+ (req->channels[0] == local->hw.conf.chandef.chan)) {
/*
* If we are scanning only on the operating channel
* then we do not need to stop normal activities
@@ -805,7 +818,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
ieee80211_configure_filter(local); /* accept probe-responses */
/* We need to ensure power level is at max for scanning. */
- ieee80211_hw_config(local, 0);
+ ieee80211_hw_conf_chan(local);
if ((req->channels[0]->flags & (IEEE80211_CHAN_NO_IR |
IEEE80211_CHAN_RADAR)) ||
@@ -970,13 +983,13 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
/* If scanning on oper channel, use whatever channel-type
* is currently in use.
*/
- if (chan == local->_oper_chandef.chan)
- local->scan_chandef = local->_oper_chandef;
+ if (chan == local->hw.conf.chandef.chan)
+ local->scan_chandef = local->hw.conf.chandef;
else
local->scan_chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
set_channel:
- if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
+ if (ieee80211_hw_conf_chan(local))
skip = 1;
/* advance state machine to next channel/band */
@@ -1000,7 +1013,10 @@ set_channel:
*/
if ((chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) ||
!scan_req->n_ssids) {
- *next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
+ *next_delay = msecs_to_jiffies(scan_req->duration) >
+ IEEE80211_PASSIVE_CHANNEL_TIME ?
+ msecs_to_jiffies(scan_req->duration) :
+ IEEE80211_PASSIVE_CHANNEL_TIME;
local->next_scan_state = SCAN_DECISION;
if (scan_req->n_ssids)
set_bit(SCAN_BEACON_WAIT, &local->scanning);
@@ -1017,7 +1033,7 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
{
/* switch back to the operating channel */
local->scan_chandef.chan = NULL;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+ ieee80211_hw_conf_chan(local);
/* disable PS */
ieee80211_offchannel_return(local);
@@ -1316,10 +1332,12 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
ieee80211_prepare_scan_chandef(&chandef);
- ieee80211_build_preq_ies(sdata, ie, num_bands * iebufsz,
- &sched_scan_ies, req->ie,
- req->ie_len, bands_used, rate_masks, &chandef,
- flags);
+ ret = ieee80211_build_preq_ies(sdata, ie, num_bands * iebufsz,
+ &sched_scan_ies, req->ie,
+ req->ie_len, bands_used, rate_masks,
+ &chandef, flags);
+ if (ret < 0)
+ goto error;
ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies);
if (ret == 0) {
@@ -1327,8 +1345,8 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
rcu_assign_pointer(local->sched_scan_req, req);
}
+error:
kfree(ie);
-
out:
if (ret) {
/* Clean in case of failure after HW restart or upon resume. */
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 55959b0b24c5..327c74e296e2 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -19,21 +19,222 @@
#include "sta_info.h"
#include "wme.h"
+static bool
+wbcs_elem_to_chandef(const struct ieee80211_wide_bw_chansw_ie *wbcs_elem,
+ struct cfg80211_chan_def *chandef)
+{
+ u8 ccfs0 = wbcs_elem->new_center_freq_seg0;
+ u8 ccfs1 = wbcs_elem->new_center_freq_seg1;
+ u32 cf0 = ieee80211_channel_to_frequency(ccfs0, chandef->chan->band);
+ u32 cf1 = ieee80211_channel_to_frequency(ccfs1, chandef->chan->band);
+
+ switch (wbcs_elem->new_channel_width) {
+ case IEEE80211_VHT_CHANWIDTH_160MHZ:
+ /* deprecated encoding */
+ chandef->width = NL80211_CHAN_WIDTH_160;
+ chandef->center_freq1 = cf0;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
+ /* deprecated encoding */
+ chandef->width = NL80211_CHAN_WIDTH_80P80;
+ chandef->center_freq1 = cf0;
+ chandef->center_freq2 = cf1;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_80MHZ:
+ chandef->width = NL80211_CHAN_WIDTH_80;
+ chandef->center_freq1 = cf0;
+
+ if (ccfs1) {
+ u8 diff = abs(ccfs0 - ccfs1);
+
+ if (diff == 8) {
+ chandef->width = NL80211_CHAN_WIDTH_160;
+ chandef->center_freq1 = cf1;
+ } else if (diff > 8) {
+ chandef->width = NL80211_CHAN_WIDTH_80P80;
+ chandef->center_freq2 = cf1;
+ }
+ }
+ break;
+ case IEEE80211_VHT_CHANWIDTH_USE_HT:
+ default:
+ /* If the WBCS Element is present, new channel bandwidth is
+ * at least 40 MHz.
+ */
+ chandef->width = NL80211_CHAN_WIDTH_40;
+ chandef->center_freq1 = cf0;
+ break;
+ }
+
+ return cfg80211_chandef_valid(chandef);
+}
+
+static void
+validate_chandef_by_ht_vht_oper(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_conn_settings *conn,
+ u32 vht_cap_info,
+ struct cfg80211_chan_def *chandef)
+{
+ u32 control_freq, center_freq1, center_freq2;
+ enum nl80211_chan_width chan_width;
+ struct ieee80211_ht_operation ht_oper;
+ struct ieee80211_vht_operation vht_oper;
+
+ if (conn->mode < IEEE80211_CONN_MODE_HT ||
+ conn->bw_limit < IEEE80211_CONN_BW_LIMIT_40) {
+ chandef->chan = NULL;
+ return;
+ }
+
+ control_freq = chandef->chan->center_freq;
+ center_freq1 = chandef->center_freq1;
+ center_freq2 = chandef->center_freq2;
+ chan_width = chandef->width;
+
+ ht_oper.primary_chan = ieee80211_frequency_to_channel(control_freq);
+ if (control_freq != center_freq1)
+ ht_oper.ht_param = control_freq > center_freq1 ?
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW :
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ else
+ ht_oper.ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+
+ ieee80211_chandef_ht_oper(&ht_oper, chandef);
+
+ if (conn->mode < IEEE80211_CONN_MODE_VHT)
+ return;
+
+ vht_oper.center_freq_seg0_idx =
+ ieee80211_frequency_to_channel(center_freq1);
+ vht_oper.center_freq_seg1_idx = center_freq2 ?
+ ieee80211_frequency_to_channel(center_freq2) : 0;
+
+ switch (chan_width) {
+ case NL80211_CHAN_WIDTH_320:
+ WARN_ON(1);
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ vht_oper.chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ vht_oper.center_freq_seg1_idx = vht_oper.center_freq_seg0_idx;
+ vht_oper.center_freq_seg0_idx +=
+ control_freq < center_freq1 ? -8 : 8;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ vht_oper.chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ vht_oper.chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ break;
+ default:
+ vht_oper.chan_width = IEEE80211_VHT_CHANWIDTH_USE_HT;
+ break;
+ }
+
+ ht_oper.operation_mode =
+ le16_encode_bits(vht_oper.center_freq_seg1_idx,
+ IEEE80211_HT_OP_MODE_CCFS2_MASK);
+
+ if (!ieee80211_chandef_vht_oper(&sdata->local->hw, vht_cap_info,
+ &vht_oper, &ht_oper, chandef))
+ chandef->chan = NULL;
+}
+
+static void
+validate_chandef_by_6ghz_he_eht_oper(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_conn_settings *conn,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_local *local = sdata->local;
+ u32 control_freq, center_freq1, center_freq2;
+ enum nl80211_chan_width chan_width;
+ struct {
+ struct ieee80211_he_operation _oper;
+ struct ieee80211_he_6ghz_oper _6ghz_oper;
+ } __packed he;
+ struct {
+ struct ieee80211_eht_operation _oper;
+ struct ieee80211_eht_operation_info _oper_info;
+ } __packed eht;
+
+ if (conn->mode < IEEE80211_CONN_MODE_HE) {
+ chandef->chan = NULL;
+ return;
+ }
+
+ control_freq = chandef->chan->center_freq;
+ center_freq1 = chandef->center_freq1;
+ center_freq2 = chandef->center_freq2;
+ chan_width = chandef->width;
+
+ he._oper.he_oper_params =
+ le32_encode_bits(1, IEEE80211_HE_OPERATION_6GHZ_OP_INFO);
+ he._6ghz_oper.primary =
+ ieee80211_frequency_to_channel(control_freq);
+ he._6ghz_oper.ccfs0 = ieee80211_frequency_to_channel(center_freq1);
+ he._6ghz_oper.ccfs1 = center_freq2 ?
+ ieee80211_frequency_to_channel(center_freq2) : 0;
+
+ switch (chan_width) {
+ case NL80211_CHAN_WIDTH_320:
+ he._6ghz_oper.ccfs1 = he._6ghz_oper.ccfs0;
+ he._6ghz_oper.ccfs0 += control_freq < center_freq1 ? -16 : 16;
+ he._6ghz_oper.control = IEEE80211_EHT_OPER_CHAN_WIDTH_320MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ he._6ghz_oper.ccfs1 = he._6ghz_oper.ccfs0;
+ he._6ghz_oper.ccfs0 += control_freq < center_freq1 ? -8 : 8;
+ fallthrough;
+ case NL80211_CHAN_WIDTH_80P80:
+ he._6ghz_oper.control =
+ IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ he._6ghz_oper.control =
+ IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ he._6ghz_oper.control =
+ IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_40MHZ;
+ break;
+ default:
+ he._6ghz_oper.control =
+ IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_20MHZ;
+ break;
+ }
+
+ if (conn->mode < IEEE80211_CONN_MODE_EHT) {
+ if (!ieee80211_chandef_he_6ghz_oper(local, &he._oper,
+ NULL, chandef))
+ chandef->chan = NULL;
+ } else {
+ eht._oper.params = IEEE80211_EHT_OPER_INFO_PRESENT;
+ eht._oper_info.control = he._6ghz_oper.control;
+ eht._oper_info.ccfs0 = he._6ghz_oper.ccfs0;
+ eht._oper_info.ccfs1 = he._6ghz_oper.ccfs1;
+
+ if (!ieee80211_chandef_he_6ghz_oper(local, &he._oper,
+ &eht._oper, chandef))
+ chandef->chan = NULL;
+ }
+}
+
int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems,
enum nl80211_band current_band,
u32 vht_cap_info,
- ieee80211_conn_flags_t conn_flags, u8 *bssid,
+ struct ieee80211_conn_settings *conn,
+ u8 *bssid,
struct ieee80211_csa_ie *csa_ie)
{
enum nl80211_band new_band = current_band;
int new_freq;
- u8 new_chan_no;
+ u8 new_chan_no = 0, new_op_class = 0;
struct ieee80211_channel *new_chan;
- struct cfg80211_chan_def new_vht_chandef = {};
+ struct cfg80211_chan_def new_chandef = {};
const struct ieee80211_sec_chan_offs_ie *sec_chan_offs;
const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie;
const struct ieee80211_bandwidth_indication *bwi;
+ const struct ieee80211_ext_chansw_ie *ext_chansw_elem;
int secondary_channel_offset = -1;
memset(csa_ie, 0, sizeof(*csa_ie));
@@ -41,36 +242,41 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
sec_chan_offs = elems->sec_chan_offs;
wide_bw_chansw_ie = elems->wide_bw_chansw_ie;
bwi = elems->bandwidth_indication;
+ ext_chansw_elem = elems->ext_chansw_ie;
- if (conn_flags & (IEEE80211_CONN_DISABLE_HT |
- IEEE80211_CONN_DISABLE_40MHZ)) {
+ if (conn->mode < IEEE80211_CONN_MODE_HT ||
+ conn->bw_limit < IEEE80211_CONN_BW_LIMIT_40) {
sec_chan_offs = NULL;
wide_bw_chansw_ie = NULL;
}
- if (conn_flags & IEEE80211_CONN_DISABLE_VHT)
+ if (conn->mode < IEEE80211_CONN_MODE_VHT)
wide_bw_chansw_ie = NULL;
- if (elems->ext_chansw_ie) {
- if (!ieee80211_operating_class_to_band(
- elems->ext_chansw_ie->new_operating_class,
- &new_band)) {
- sdata_info(sdata,
- "cannot understand ECSA IE operating class, %d, ignoring\n",
- elems->ext_chansw_ie->new_operating_class);
+ if (ext_chansw_elem) {
+ new_op_class = ext_chansw_elem->new_operating_class;
+
+ if (!ieee80211_operating_class_to_band(new_op_class, &new_band)) {
+ new_op_class = 0;
+ sdata_info(sdata, "cannot understand ECSA IE operating class, %d, ignoring\n",
+ ext_chansw_elem->new_operating_class);
+ } else {
+ new_chan_no = ext_chansw_elem->new_ch_num;
+ csa_ie->count = ext_chansw_elem->count;
+ csa_ie->mode = ext_chansw_elem->mode;
}
- new_chan_no = elems->ext_chansw_ie->new_ch_num;
- csa_ie->count = elems->ext_chansw_ie->count;
- csa_ie->mode = elems->ext_chansw_ie->mode;
- } else if (elems->ch_switch_ie) {
+ }
+
+ if (!new_op_class && elems->ch_switch_ie) {
new_chan_no = elems->ch_switch_ie->new_ch_num;
csa_ie->count = elems->ch_switch_ie->count;
csa_ie->mode = elems->ch_switch_ie->mode;
- } else {
- /* nothing here we understand */
- return 1;
}
+ /* nothing here we understand */
+ if (!new_chan_no)
+ return 1;
+
/* Mesh Channel Switch Parameters Element */
if (elems->mesh_chansw_params_ie) {
csa_ie->ttl = elems->mesh_chansw_params_ie->mesh_ttl;
@@ -95,7 +301,7 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
if (sec_chan_offs) {
secondary_channel_offset = sec_chan_offs->sec_chan_offs;
- } else if (!(conn_flags & IEEE80211_CONN_DISABLE_HT)) {
+ } else if (conn->mode >= IEEE80211_CONN_MODE_HT) {
/* If the secondary channel offset IE is not present,
* we can't know what's the post-CSA offset, so the
* best we can do is use 20MHz.
@@ -107,26 +313,26 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
default:
/* secondary_channel_offset was present but is invalid */
case IEEE80211_HT_PARAM_CHA_SEC_NONE:
- cfg80211_chandef_create(&csa_ie->chandef, new_chan,
+ cfg80211_chandef_create(&csa_ie->chanreq.oper, new_chan,
NL80211_CHAN_HT20);
break;
case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
- cfg80211_chandef_create(&csa_ie->chandef, new_chan,
+ cfg80211_chandef_create(&csa_ie->chanreq.oper, new_chan,
NL80211_CHAN_HT40PLUS);
break;
case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
- cfg80211_chandef_create(&csa_ie->chandef, new_chan,
+ cfg80211_chandef_create(&csa_ie->chanreq.oper, new_chan,
NL80211_CHAN_HT40MINUS);
break;
case -1:
- cfg80211_chandef_create(&csa_ie->chandef, new_chan,
+ cfg80211_chandef_create(&csa_ie->chanreq.oper, new_chan,
NL80211_CHAN_NO_HT);
/* keep width for 5/10 MHz channels */
- switch (sdata->vif.bss_conf.chandef.width) {
+ switch (sdata->vif.bss_conf.chanreq.oper.width) {
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
- csa_ie->chandef.width =
- sdata->vif.bss_conf.chandef.width;
+ csa_ie->chanreq.oper.width =
+ sdata->vif.bss_conf.chanreq.oper.width;
break;
default:
break;
@@ -134,59 +340,48 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
break;
}
+ /* parse one of the Elements to build a new chandef */
+ memset(&new_chandef, 0, sizeof(new_chandef));
+ new_chandef.chan = new_chan;
if (bwi) {
/* start with the CSA one */
- new_vht_chandef = csa_ie->chandef;
+ new_chandef = csa_ie->chanreq.oper;
/* and update the width accordingly */
- /* FIXME: support 160/320 */
- ieee80211_chandef_eht_oper(&bwi->info, true, true,
- &new_vht_chandef);
- } else if (wide_bw_chansw_ie) {
- u8 new_seg1 = wide_bw_chansw_ie->new_center_freq_seg1;
- struct ieee80211_vht_operation vht_oper = {
- .chan_width =
- wide_bw_chansw_ie->new_channel_width,
- .center_freq_seg0_idx =
- wide_bw_chansw_ie->new_center_freq_seg0,
- .center_freq_seg1_idx = new_seg1,
- /* .basic_mcs_set doesn't matter */
- };
- struct ieee80211_ht_operation ht_oper = {
- .operation_mode =
- cpu_to_le16(new_seg1 <<
- IEEE80211_HT_OP_MODE_CCFS2_SHIFT),
- };
-
- /* default, for the case of IEEE80211_VHT_CHANWIDTH_USE_HT,
- * to the previously parsed chandef
- */
- new_vht_chandef = csa_ie->chandef;
-
- /* ignore if parsing fails */
- if (!ieee80211_chandef_vht_oper(&sdata->local->hw,
- vht_cap_info,
- &vht_oper, &ht_oper,
- &new_vht_chandef))
- new_vht_chandef.chan = NULL;
-
- if (conn_flags & IEEE80211_CONN_DISABLE_80P80MHZ &&
- new_vht_chandef.width == NL80211_CHAN_WIDTH_80P80)
- ieee80211_chandef_downgrade(&new_vht_chandef);
- if (conn_flags & IEEE80211_CONN_DISABLE_160MHZ &&
- new_vht_chandef.width == NL80211_CHAN_WIDTH_160)
- ieee80211_chandef_downgrade(&new_vht_chandef);
+ ieee80211_chandef_eht_oper(&bwi->info, &new_chandef);
+ } else if (!wide_bw_chansw_ie || !wbcs_elem_to_chandef(wide_bw_chansw_ie,
+ &new_chandef)) {
+ if (!ieee80211_operating_class_to_chandef(new_op_class, new_chan,
+ &new_chandef))
+ new_chandef = csa_ie->chanreq.oper;
}
- /* if VHT data is there validate & use it */
- if (new_vht_chandef.chan) {
- if (!cfg80211_chandef_compatible(&new_vht_chandef,
- &csa_ie->chandef)) {
+ /* check if the new chandef fits the capabilities */
+ if (new_band == NL80211_BAND_6GHZ)
+ validate_chandef_by_6ghz_he_eht_oper(sdata, conn, &new_chandef);
+ else
+ validate_chandef_by_ht_vht_oper(sdata, conn, vht_cap_info,
+ &new_chandef);
+
+ /* if data is there validate the bandwidth & use it */
+ if (new_chandef.chan) {
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_320 &&
+ new_chandef.width == NL80211_CHAN_WIDTH_320)
+ ieee80211_chandef_downgrade(&new_chandef, NULL);
+
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_160 &&
+ (new_chandef.width == NL80211_CHAN_WIDTH_80P80 ||
+ new_chandef.width == NL80211_CHAN_WIDTH_160))
+ ieee80211_chandef_downgrade(&new_chandef, NULL);
+
+ if (!cfg80211_chandef_compatible(&new_chandef,
+ &csa_ie->chanreq.oper)) {
sdata_info(sdata,
"BSS %pM: CSA has inconsistent channel data, disconnecting\n",
bssid);
return -EINVAL;
}
- csa_ie->chandef = new_vht_chandef;
+
+ csa_ie->chanreq.oper = new_chandef;
}
if (elems->max_channel_switch_time)
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 4391d8dd634b..da5fdd6f5c85 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -1566,7 +1566,8 @@ void sta_info_stop(struct ieee80211_local *local)
}
-int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans)
+int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans,
+ int link_id)
{
struct ieee80211_local *local = sdata->local;
struct sta_info *sta, *tmp;
@@ -1580,12 +1581,18 @@ int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans)
WARN_ON(vlans && !sdata->bss);
list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
- if (sdata == sta->sdata ||
- (vlans && sdata->bss == sta->sdata->bss)) {
- if (!WARN_ON(__sta_info_destroy_part1(sta)))
- list_add(&sta->free_list, &free_list);
- ret++;
- }
+ if (sdata != sta->sdata &&
+ (!vlans || sdata->bss != sta->sdata->bss))
+ continue;
+
+ if (link_id >= 0 && sta->sta.valid_links &&
+ !(sta->sta.valid_links & BIT(link_id)))
+ continue;
+
+ if (!WARN_ON(__sta_info_destroy_part1(sta)))
+ list_add(&sta->free_list, &free_list);
+
+ ret++;
}
if (!list_empty(&free_list)) {
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 5ef1554f991f..a52fb76386d0 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -3,7 +3,7 @@
* Copyright 2002-2005, Devicescape Software, Inc.
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright(c) 2020-2023 Intel Corporation
+ * Copyright(c) 2020-2024 Intel Corporation
*/
#ifndef STA_INFO_H
@@ -482,6 +482,8 @@ struct ieee80211_fragment_cache {
* same for non-MLD STA. This is used as key for searching link STA
* @link_id: Link ID uniquely identifying the link STA. This is 0 for non-MLD
* and set to the corresponding vif LinkId for MLD STA
+ * @op_mode_nss: NSS limit as set by operating mode notification, or 0
+ * @capa_nss: NSS limit as determined by local and peer capabilities
* @link_hash_node: hash node for rhashtable
* @sta: Points to the STA info
* @gtk: group keys negotiated with this station, if any
@@ -518,6 +520,8 @@ struct link_sta_info {
u8 addr[ETH_ALEN];
u8 link_id;
+ u8 op_mode_nss, capa_nss;
+
struct rhlist_head link_hash_node;
struct sta_info *sta;
@@ -886,8 +890,12 @@ void sta_info_stop(struct ieee80211_local *local);
*
* @sdata: sdata to remove all stations from
* @vlans: if the given interface is an AP interface, also flush VLANs
+ * @link_id: if given (>=0), all those STA entries using @link_id only
+ * will be removed. If -1 is passed, all STA entries will be
+ * removed.
*/
-int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans);
+int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans,
+ int link_id);
/**
* sta_info_flush - flush matching STA entries from the STA table
@@ -895,10 +903,14 @@ int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans);
* Returns the number of removed STA entries.
*
* @sdata: sdata to remove all stations from
+ * @link_id: if given (>=0), all those STA entries using @link_id only
+ * will be removed. If -1 is passed, all STA entries will be
+ * removed.
*/
-static inline int sta_info_flush(struct ieee80211_sub_if_data *sdata)
+static inline int sta_info_flush(struct ieee80211_sub_if_data *sdata,
+ int link_id)
{
- return __sta_info_flush(sdata, false);
+ return __sta_info_flush(sdata, false, link_id);
}
void sta_set_rate_info_tx(struct sta_info *sta,
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index 49730b424141..f07b40916485 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -6,7 +6,7 @@
* Copyright 2014, Intel Corporation
* Copyright 2014 Intel Mobile Communications GmbH
* Copyright 2015 - 2016 Intel Deutschland GmbH
- * Copyright (C) 2019, 2021-2023 Intel Corporation
+ * Copyright (C) 2019, 2021-2024 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -159,7 +159,7 @@ static void ieee80211_tdls_add_oper_classes(struct ieee80211_link_data *link,
u8 *pos;
u8 op_class;
- if (!ieee80211_chandef_to_operating_class(&link->conf->chandef,
+ if (!ieee80211_chandef_to_operating_class(&link->conf->chanreq.oper,
&op_class))
return;
@@ -347,7 +347,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
(uc.width > sta->tdls_chandef.width &&
!cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc,
sdata->wdev.iftype)))
- ieee80211_chandef_downgrade(&uc);
+ ieee80211_chandef_downgrade(&uc, NULL);
if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) {
tdls_dbg(sdata, "TDLS ch width upgraded %d -> %d\n",
@@ -382,8 +382,8 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link,
if (WARN_ON_ONCE(!sband))
return;
- ieee80211_add_srates_ie(sdata, skb, false, sband->band);
- ieee80211_add_ext_srates_ie(sdata, skb, false, sband->band);
+ ieee80211_put_srates_elem(skb, sband, 0, 0, 0, WLAN_EID_SUPP_RATES);
+ ieee80211_put_srates_elem(skb, sband, 0, 0, 0, WLAN_EID_EXT_SUPP_RATES);
ieee80211_tdls_add_supp_channels(sdata, skb);
/* add any custom IEs that go before Extended Capabilities */
@@ -438,7 +438,7 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link,
if (WARN_ON_ONCE(!sta))
return;
- sta->tdls_chandef = link->conf->chandef;
+ sta->tdls_chandef = link->conf->chanreq.oper;
}
ieee80211_tdls_add_oper_classes(link, skb);
@@ -548,30 +548,14 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link,
}
/* build the HE-cap from sband */
- if (he_cap &&
- (action_code == WLAN_TDLS_SETUP_REQUEST ||
- action_code == WLAN_TDLS_SETUP_RESPONSE ||
- action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES)) {
- __le16 he_6ghz_capa;
- u8 cap_size;
-
- cap_size =
- 2 + 1 + sizeof(he_cap->he_cap_elem) +
- ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem) +
- ieee80211_he_ppe_size(he_cap->ppe_thres[0],
- he_cap->he_cap_elem.phy_cap_info);
- pos = skb_put(skb, cap_size);
- pos = ieee80211_ie_build_he_cap(0, pos, he_cap, pos + cap_size);
+ if (action_code == WLAN_TDLS_SETUP_REQUEST ||
+ action_code == WLAN_TDLS_SETUP_RESPONSE ||
+ action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) {
+ ieee80211_put_he_cap(skb, sdata, sband, NULL);
/* Build HE 6Ghz capa IE from sband */
- if (sband->band == NL80211_BAND_6GHZ) {
- cap_size = 2 + 1 + sizeof(struct ieee80211_he_6ghz_capa);
- pos = skb_put(skb, cap_size);
- he_6ghz_capa =
- ieee80211_get_he_6ghz_capa_vif(sband, &sdata->vif);
- pos = ieee80211_write_he_6ghz_cap(pos, he_6ghz_capa,
- pos + cap_size);
- }
+ if (sband->band == NL80211_BAND_6GHZ)
+ ieee80211_put_he_6ghz_cap(skb, sdata, link->smps_mode);
}
/* add any custom IEs that go before EHT capabilities */
@@ -591,21 +575,10 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link,
}
/* build the EHT-cap from sband */
- if (he_cap && eht_cap &&
- (action_code == WLAN_TDLS_SETUP_REQUEST ||
- action_code == WLAN_TDLS_SETUP_RESPONSE ||
- action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES)) {
- u8 cap_size;
-
- cap_size =
- 2 + 1 + sizeof(eht_cap->eht_cap_elem) +
- ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
- &eht_cap->eht_cap_elem, false) +
- ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0],
- eht_cap->eht_cap_elem.phy_cap_info);
- pos = skb_put(skb, cap_size);
- ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, pos + cap_size, false);
- }
+ if (action_code == WLAN_TDLS_SETUP_REQUEST ||
+ action_code == WLAN_TDLS_SETUP_RESPONSE ||
+ action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES)
+ ieee80211_put_eht_cap(skb, sdata, sband, NULL);
/* add any remaining IEs */
if (extra_ies_len) {
@@ -638,7 +611,7 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_link_data *link,
if (WARN_ON_ONCE(!sta || !ap_sta))
return;
- sta->tdls_chandef = link->conf->chandef;
+ sta->tdls_chandef = link->conf->chanreq.oper;
/* add any custom IEs that go before the QoS IE */
if (extra_ies_len) {
@@ -684,7 +657,7 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_link_data *link,
pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation));
ieee80211_ie_build_ht_oper(pos, &sta->sta.deflink.ht_cap,
- &link->conf->chandef, prot,
+ &link->conf->chanreq.oper, prot,
true);
}
@@ -1413,8 +1386,8 @@ iee80211_tdls_recalc_ht_protection(struct ieee80211_sub_if_data *sdata,
IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT;
u16 opmode;
- /* Nothing to do if the BSS connection uses HT */
- if (!(sdata->deflink.u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT))
+ /* Nothing to do if the BSS connection uses (at least) HT */
+ if (sdata->deflink.u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT)
return;
tdls_ht = (sta && sta->sta.deflink.ht_cap.ht_supported) ||
@@ -2055,8 +2028,9 @@ ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
}
}
-void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata)
+void ieee80211_teardown_tdls_peers(struct ieee80211_link_data *link)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
struct sta_info *sta;
u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
@@ -2066,6 +2040,9 @@ void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata)
!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
continue;
+ if (sta->deflink.link_id != link->link_id)
+ continue;
+
ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr,
NL80211_TDLS_TEARDOWN, reason,
GFP_ATOMIC);
diff --git a/net/mac80211/tests/elems.c b/net/mac80211/tests/elems.c
index 997d0cd27b2d..a413ba29f759 100644
--- a/net/mac80211/tests/elems.c
+++ b/net/mac80211/tests/elems.c
@@ -2,7 +2,7 @@
/*
* KUnit tests for element parsing
*
- * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2023-2024 Intel Corporation
*/
#include <kunit/test.h>
#include "../ieee80211_i.h"
@@ -14,6 +14,7 @@ static void mle_defrag(struct kunit *test)
struct ieee80211_elems_parse_params parse_params = {
.link_id = 12,
.from_ap = true,
+ .mode = IEEE80211_CONN_MODE_EHT,
};
struct ieee802_11_elems *parsed;
struct sk_buff *skb;
@@ -68,7 +69,7 @@ static void mle_defrag(struct kunit *test)
if (IS_ERR_OR_NULL(parsed))
goto free_skb;
- KUNIT_EXPECT_NOT_NULL(test, parsed->ml_basic_elem);
+ KUNIT_EXPECT_NOT_NULL(test, parsed->ml_basic);
KUNIT_EXPECT_EQ(test,
parsed->ml_basic_len,
2 /* control */ +
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 06835ed4c44f..8e758b5074bd 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -2,7 +2,7 @@
/*
* Portions of this file
* Copyright(c) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2023 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
*/
#if !defined(__MAC80211_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ)
@@ -50,7 +50,7 @@
__entry->center_freq1 = (c) ? (c)->center_freq1 : 0; \
__entry->freq1_offset = (c) ? (c)->freq1_offset : 0; \
__entry->center_freq2 = (c) ? (c)->center_freq2 : 0;
-#define CHANDEF_PR_FMT " control:%d.%03d MHz width:%d center: %d.%03d/%d MHz"
+#define CHANDEF_PR_FMT " chandef(%d.%03d MHz,width:%d,center: %d.%03d/%d MHz)"
#define CHANDEF_PR_ARG __entry->control_freq, __entry->freq_offset, __entry->chan_width, \
__entry->center_freq1, __entry->freq1_offset, __entry->center_freq2
@@ -69,22 +69,45 @@
__entry->min_center_freq1 = (c)->center_freq1; \
__entry->min_freq1_offset = (c)->freq1_offset; \
__entry->min_center_freq2 = (c)->center_freq2;
-#define MIN_CHANDEF_PR_FMT " min_control:%d.%03d MHz min_width:%d min_center: %d.%03d/%d MHz"
+#define MIN_CHANDEF_PR_FMT " mindef(%d.%03d MHz,width:%d,center: %d.%03d/%d MHz)"
#define MIN_CHANDEF_PR_ARG __entry->min_control_freq, __entry->min_freq_offset, \
__entry->min_chan_width, \
__entry->min_center_freq1, __entry->min_freq1_offset, \
__entry->min_center_freq2
+#define AP_CHANDEF_ENTRY \
+ __field(u32, ap_control_freq) \
+ __field(u32, ap_freq_offset) \
+ __field(u32, ap_chan_width) \
+ __field(u32, ap_center_freq1) \
+ __field(u32, ap_freq1_offset) \
+ __field(u32, ap_center_freq2)
+
+#define AP_CHANDEF_ASSIGN(c) \
+ __entry->ap_control_freq = (c)->chan ? (c)->chan->center_freq : 0;\
+ __entry->ap_freq_offset = (c)->chan ? (c)->chan->freq_offset : 0;\
+ __entry->ap_chan_width = (c)->chan ? (c)->width : 0; \
+ __entry->ap_center_freq1 = (c)->chan ? (c)->center_freq1 : 0; \
+ __entry->ap_freq1_offset = (c)->chan ? (c)->freq1_offset : 0; \
+ __entry->ap_center_freq2 = (c)->chan ? (c)->center_freq2 : 0;
+#define AP_CHANDEF_PR_FMT " ap(%d.%03d MHz,width:%d,center: %d.%03d/%d MHz)"
+#define AP_CHANDEF_PR_ARG __entry->ap_control_freq, __entry->ap_freq_offset, \
+ __entry->ap_chan_width, \
+ __entry->ap_center_freq1, __entry->ap_freq1_offset, \
+ __entry->ap_center_freq2
+
#define CHANCTX_ENTRY CHANDEF_ENTRY \
MIN_CHANDEF_ENTRY \
+ AP_CHANDEF_ENTRY \
__field(u8, rx_chains_static) \
__field(u8, rx_chains_dynamic)
#define CHANCTX_ASSIGN CHANDEF_ASSIGN(&ctx->conf.def) \
MIN_CHANDEF_ASSIGN(&ctx->conf.min_def) \
+ AP_CHANDEF_ASSIGN(&ctx->conf.ap) \
__entry->rx_chains_static = ctx->conf.rx_chains_static; \
__entry->rx_chains_dynamic = ctx->conf.rx_chains_dynamic
-#define CHANCTX_PR_FMT CHANDEF_PR_FMT MIN_CHANDEF_PR_FMT " chains:%d/%d"
-#define CHANCTX_PR_ARG CHANDEF_PR_ARG, MIN_CHANDEF_PR_ARG, \
+#define CHANCTX_PR_FMT CHANDEF_PR_FMT MIN_CHANDEF_PR_FMT AP_CHANDEF_PR_FMT " chains:%d/%d"
+#define CHANCTX_PR_ARG CHANDEF_PR_ARG, MIN_CHANDEF_PR_ARG, AP_CHANDEF_PR_ARG, \
__entry->rx_chains_static, __entry->rx_chains_dynamic
#define KEY_ENTRY __field(u32, cipher) \
@@ -503,9 +526,9 @@ TRACE_EVENT(drv_link_info_changed,
__entry->ht_operation_mode = link_conf->ht_operation_mode;
__entry->cqm_rssi_thold = link_conf->cqm_rssi_thold;
__entry->cqm_rssi_hyst = link_conf->cqm_rssi_hyst;
- __entry->channel_width = link_conf->chandef.width;
- __entry->channel_cfreq1 = link_conf->chandef.center_freq1;
- __entry->channel_cfreq1_offset = link_conf->chandef.freq1_offset;
+ __entry->channel_width = link_conf->chanreq.oper.width;
+ __entry->channel_cfreq1 = link_conf->chanreq.oper.center_freq1;
+ __entry->channel_cfreq1_offset = link_conf->chanreq.oper.freq1_offset;
__entry->qos = link_conf->qos;
__entry->hidden_ssid = link_conf->hidden_ssid;
__entry->txpower = link_conf->txpower;
@@ -1186,7 +1209,7 @@ DEFINE_EVENT(sta_event, drv_flush_sta,
TP_ARGS(local, sdata, sta)
);
-TRACE_EVENT(drv_channel_switch,
+DECLARE_EVENT_CLASS(chanswitch_evt,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel_switch *ch_switch),
@@ -1201,6 +1224,7 @@ TRACE_EVENT(drv_channel_switch,
__field(u32, device_timestamp)
__field(bool, block_tx)
__field(u8, count)
+ __field(u8, link_id)
),
TP_fast_assign(
@@ -1211,14 +1235,24 @@ TRACE_EVENT(drv_channel_switch,
__entry->device_timestamp = ch_switch->device_timestamp;
__entry->block_tx = ch_switch->block_tx;
__entry->count = ch_switch->count;
+ __entry->link_id = ch_switch->link_id;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT " new " CHANDEF_PR_FMT " count:%d",
- LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count
+ LOCAL_PR_FMT VIF_PR_FMT CHANDEF_PR_FMT " count:%d block_tx:%d timestamp:%llu device_ts:%u link_id:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count,
+ __entry->block_tx, __entry->timestamp,
+ __entry->device_timestamp, __entry->link_id
)
);
+DEFINE_EVENT(chanswitch_evt, drv_channel_switch,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel_switch *ch_switch),
+ TP_ARGS(local, sdata, ch_switch)
+);
+
TRACE_EVENT(drv_set_antenna,
TP_PROTO(struct ieee80211_local *local, u32 tx_ant, u32 rx_ant, int ret),
@@ -2098,39 +2132,11 @@ TRACE_EVENT(drv_channel_switch_beacon,
)
);
-TRACE_EVENT(drv_pre_channel_switch,
+DEFINE_EVENT(chanswitch_evt, drv_pre_channel_switch,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel_switch *ch_switch),
-
- TP_ARGS(local, sdata, ch_switch),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- VIF_ENTRY
- CHANDEF_ENTRY
- __field(u64, timestamp)
- __field(u32, device_timestamp)
- __field(bool, block_tx)
- __field(u8, count)
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- VIF_ASSIGN;
- CHANDEF_ASSIGN(&ch_switch->chandef)
- __entry->timestamp = ch_switch->timestamp;
- __entry->device_timestamp = ch_switch->device_timestamp;
- __entry->block_tx = ch_switch->block_tx;
- __entry->count = ch_switch->count;
- ),
-
- TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT " prepare channel switch to "
- CHANDEF_PR_FMT " count:%d block_tx:%d timestamp:%llu",
- LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count,
- __entry->block_tx, __entry->timestamp
- )
+ TP_ARGS(local, sdata, ch_switch)
);
DEFINE_EVENT(local_sdata_evt, drv_post_channel_switch,
@@ -2145,40 +2151,11 @@ DEFINE_EVENT(local_sdata_evt, drv_abort_channel_switch,
TP_ARGS(local, sdata)
);
-TRACE_EVENT(drv_channel_switch_rx_beacon,
+DEFINE_EVENT(chanswitch_evt, drv_channel_switch_rx_beacon,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel_switch *ch_switch),
-
- TP_ARGS(local, sdata, ch_switch),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- VIF_ENTRY
- CHANDEF_ENTRY
- __field(u64, timestamp)
- __field(u32, device_timestamp)
- __field(bool, block_tx)
- __field(u8, count)
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- VIF_ASSIGN;
- CHANDEF_ASSIGN(&ch_switch->chandef)
- __entry->timestamp = ch_switch->timestamp;
- __entry->device_timestamp = ch_switch->device_timestamp;
- __entry->block_tx = ch_switch->block_tx;
- __entry->count = ch_switch->count;
- ),
-
- TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT
- " received a channel switch beacon to "
- CHANDEF_PR_FMT " count:%d block_tx:%d timestamp:%llu",
- LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG, __entry->count,
- __entry->block_tx, __entry->timestamp
- )
+ TP_ARGS(local, sdata, ch_switch)
);
TRACE_EVENT(drv_get_txpower,
@@ -3035,6 +3012,34 @@ TRACE_EVENT(api_radar_detected,
)
);
+TRACE_EVENT(api_request_smps,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_link_data *link,
+ enum ieee80211_smps_mode smps_mode),
+
+ TP_ARGS(local, sdata, link, smps_mode),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ __field(int, link_id)
+ __field(u32, smps_mode)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ __entry->link_id = link->link_id,
+ __entry->smps_mode = smps_mode;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " " VIF_PR_FMT " link:%d, smps_mode:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->link_id, __entry->smps_mode
+ )
+);
+
/*
* Tracing for internal functions
* (which may also be called in response to driver calls)
@@ -3088,6 +3093,58 @@ TRACE_EVENT(stop_queue,
)
);
+TRACE_EVENT(drv_can_neg_ttlm,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_neg_ttlm *neg_ttlm),
+
+ TP_ARGS(local, sdata, neg_ttlm),
+
+ TP_STRUCT__entry(LOCAL_ENTRY
+ VIF_ENTRY
+ __array(u16, downlink, sizeof(u16) * 8)
+ __array(u16, uplink, sizeof(u16) * 8)
+ ),
+
+ TP_fast_assign(LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ memcpy(__entry->downlink, neg_ttlm->downlink,
+ sizeof(neg_ttlm->downlink));
+ memcpy(__entry->uplink, neg_ttlm->uplink,
+ sizeof(neg_ttlm->uplink));
+ ),
+
+ TP_printk(LOCAL_PR_FMT ", " VIF_PR_FMT, LOCAL_PR_ARG, VIF_PR_ARG)
+);
+
+TRACE_EVENT(drv_neg_ttlm_res,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_neg_ttlm_res res,
+ struct ieee80211_neg_ttlm *neg_ttlm),
+
+ TP_ARGS(local, sdata, res, neg_ttlm),
+
+ TP_STRUCT__entry(LOCAL_ENTRY
+ VIF_ENTRY
+ __field(u32, res)
+ __array(u16, downlink, sizeof(u16) * 8)
+ __array(u16, uplink, sizeof(u16) * 8)
+ ),
+
+ TP_fast_assign(LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ __entry->res = res;
+ memcpy(__entry->downlink, neg_ttlm->downlink,
+ sizeof(neg_ttlm->downlink));
+ memcpy(__entry->uplink, neg_ttlm->uplink,
+ sizeof(neg_ttlm->uplink));
+ ),
+
+ TP_printk(LOCAL_PR_FMT VIF_PR_FMT " response: %d\n ",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->res
+ )
+);
#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
diff --git a/net/mac80211/trace_msg.h b/net/mac80211/trace_msg.h
index c9dbe9aab7bd..aea4ce55c5ac 100644
--- a/net/mac80211/trace_msg.h
+++ b/net/mac80211/trace_msg.h
@@ -16,8 +16,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mac80211_msg
-#define MAX_MSG_LEN 120
-
DECLARE_EVENT_CLASS(mac80211_msg_event,
TP_PROTO(struct va_format *vaf),
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 6fbb15b65902..6bf223e6cd1a 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -133,6 +133,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
mrate = sband->bitrates[0].bitrate;
for (i = 0; i < sband->n_bitrates; i++) {
struct ieee80211_rate *r = &sband->bitrates[i];
+ u32 flag;
if (r->bitrate > txrate->bitrate)
break;
@@ -145,28 +146,24 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
switch (sband->band) {
case NL80211_BAND_2GHZ:
- case NL80211_BAND_LC: {
- u32 flag;
+ case NL80211_BAND_LC:
if (tx->sdata->deflink.operating_11g_mode)
flag = IEEE80211_RATE_MANDATORY_G;
else
flag = IEEE80211_RATE_MANDATORY_B;
- if (r->flags & flag)
- mrate = r->bitrate;
break;
- }
case NL80211_BAND_5GHZ:
case NL80211_BAND_6GHZ:
- if (r->flags & IEEE80211_RATE_MANDATORY_A)
- mrate = r->bitrate;
+ flag = IEEE80211_RATE_MANDATORY_A;
break;
- case NL80211_BAND_S1GHZ:
- case NL80211_BAND_60GHZ:
- /* TODO, for now fall through */
- case NUM_NL80211_BANDS:
+ default:
+ flag = 0;
WARN_ON(1);
break;
}
+
+ if (r->flags & flag)
+ mrate = r->bitrate;
}
if (rate == -1) {
/* No matching basic rate found; use highest suitable mandatory
@@ -2393,12 +2390,18 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
if (chanctx_conf)
chandef = &chanctx_conf->def;
- else if (!local->use_chanctx)
- chandef = &local->_oper_chandef;
else
goto fail_rcu;
/*
+ * If driver/HW supports IEEE80211_CHAN_CAN_MONITOR we still
+ * shouldn't transmit on disabled channels.
+ */
+ if (!cfg80211_chandef_usable(local->hw.wiphy, chandef,
+ IEEE80211_CHAN_DISABLED))
+ goto fail_rcu;
+
+ /*
* Frame injection is not allowed if beaconing is not allowed
* or if we need radar detection. Beaconing is usually not allowed when
* the mode or operation (Adhoc, AP, Mesh) does not support DFS.
@@ -3959,7 +3962,8 @@ begin:
ieee80211_free_txskb(&local->hw, skb);
goto begin;
} else {
- vif = NULL;
+ info->control.vif = NULL;
+ return skb;
}
break;
case NL80211_IFTYPE_AP_VLAN:
@@ -5032,16 +5036,24 @@ static u8 __ieee80211_beacon_update_cntdwn(struct beacon_data *beacon)
return beacon->cntdwn_current_counter;
}
-u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif)
+u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif, unsigned int link_id)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_link_data *link;
struct beacon_data *beacon = NULL;
u8 count = 0;
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
+ return 0;
+
rcu_read_lock();
+ link = rcu_dereference(sdata->link[link_id]);
+ if (!link)
+ goto unlock;
+
if (sdata->vif.type == NL80211_IFTYPE_AP)
- beacon = rcu_dereference(sdata->deflink.u.ap.beacon);
+ beacon = rcu_dereference(link->u.ap.beacon);
else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
beacon = rcu_dereference(sdata->u.ibss.presp);
else if (ieee80211_vif_is_mesh(&sdata->vif))
@@ -5083,9 +5095,11 @@ unlock:
}
EXPORT_SYMBOL(ieee80211_beacon_set_cntdwn);
-bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif)
+bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif,
+ unsigned int link_id)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_link_data *link;
struct beacon_data *beacon = NULL;
u8 *beacon_data;
size_t beacon_data_len;
@@ -5094,9 +5108,17 @@ bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif)
if (!ieee80211_sdata_running(sdata))
return false;
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
+ return 0;
+
rcu_read_lock();
+
+ link = rcu_dereference(sdata->link[link_id]);
+ if (!link)
+ goto out;
+
if (vif->type == NL80211_IFTYPE_AP) {
- beacon = rcu_dereference(sdata->deflink.u.ap.beacon);
+ beacon = rcu_dereference(link->u.ap.beacon);
if (WARN_ON(!beacon || !beacon->tail))
goto out;
beacon_data = beacon->tail;
@@ -5282,7 +5304,7 @@ ieee80211_beacon_get_ap(struct ieee80211_hw *hw,
if (beacon->cntdwn_counter_offsets[0]) {
if (!is_template)
- ieee80211_beacon_update_cntdwn(vif);
+ ieee80211_beacon_update_cntdwn(vif, link->link_id);
ieee80211_set_beacon_cntdwn(sdata, beacon, link);
}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 643c54855be6..a237cbcf7b49 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -6,7 +6,7 @@
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*
* utilities for mac80211
*/
@@ -46,6 +46,11 @@ struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy)
}
EXPORT_SYMBOL(wiphy_to_ieee80211_hw);
+const struct ieee80211_conn_settings ieee80211_conn_settings_unlimited = {
+ .mode = IEEE80211_CONN_MODE_EHT,
+ .bw_limit = IEEE80211_CONN_BW_LIMIT_320,
+};
+
u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
enum nl80211_iftype type)
{
@@ -912,776 +917,6 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_queue_delayed_work);
-static void
-ieee80211_parse_extension_element(u32 *crc,
- const struct element *elem,
- struct ieee802_11_elems *elems,
- struct ieee80211_elems_parse_params *params)
-{
- const void *data = elem->data + 1;
- bool calc_crc = false;
- u8 len;
-
- if (!elem->datalen)
- return;
-
- len = elem->datalen - 1;
-
- switch (elem->data[0]) {
- case WLAN_EID_EXT_HE_MU_EDCA:
- calc_crc = true;
- if (len >= sizeof(*elems->mu_edca_param_set))
- elems->mu_edca_param_set = data;
- break;
- case WLAN_EID_EXT_HE_CAPABILITY:
- if (ieee80211_he_capa_size_ok(data, len)) {
- elems->he_cap = data;
- elems->he_cap_len = len;
- }
- break;
- case WLAN_EID_EXT_HE_OPERATION:
- calc_crc = true;
- if (len >= sizeof(*elems->he_operation) &&
- len >= ieee80211_he_oper_size(data) - 1)
- elems->he_operation = data;
- break;
- case WLAN_EID_EXT_UORA:
- if (len >= 1)
- elems->uora_element = data;
- break;
- case WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME:
- if (len == 3)
- elems->max_channel_switch_time = data;
- break;
- case WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION:
- if (len >= sizeof(*elems->mbssid_config_ie))
- elems->mbssid_config_ie = data;
- break;
- case WLAN_EID_EXT_HE_SPR:
- if (len >= sizeof(*elems->he_spr) &&
- len >= ieee80211_he_spr_size(data))
- elems->he_spr = data;
- break;
- case WLAN_EID_EXT_HE_6GHZ_CAPA:
- if (len >= sizeof(*elems->he_6ghz_capa))
- elems->he_6ghz_capa = data;
- break;
- case WLAN_EID_EXT_EHT_CAPABILITY:
- if (ieee80211_eht_capa_size_ok(elems->he_cap,
- data, len,
- params->from_ap)) {
- elems->eht_cap = data;
- elems->eht_cap_len = len;
- }
- break;
- case WLAN_EID_EXT_EHT_OPERATION:
- if (ieee80211_eht_oper_size_ok(data, len))
- elems->eht_operation = data;
- calc_crc = true;
- break;
- case WLAN_EID_EXT_EHT_MULTI_LINK:
- calc_crc = true;
-
- if (ieee80211_mle_size_ok(data, len)) {
- const struct ieee80211_multi_link_elem *mle =
- (void *)data;
-
- switch (le16_get_bits(mle->control,
- IEEE80211_ML_CONTROL_TYPE)) {
- case IEEE80211_ML_CONTROL_TYPE_BASIC:
- elems->ml_basic_elem = (void *)elem;
- elems->ml_basic = data;
- elems->ml_basic_len = len;
- break;
- case IEEE80211_ML_CONTROL_TYPE_RECONF:
- elems->ml_reconf_elem = (void *)elem;
- elems->ml_reconf = data;
- elems->ml_reconf_len = len;
- break;
- default:
- break;
- }
- }
- break;
- case WLAN_EID_EXT_BANDWIDTH_INDICATION:
- if (ieee80211_bandwidth_indication_size_ok(data, len))
- elems->bandwidth_indication = data;
- calc_crc = true;
- break;
- case WLAN_EID_EXT_TID_TO_LINK_MAPPING:
- calc_crc = true;
- if (ieee80211_tid_to_link_map_size_ok(data, len) &&
- elems->ttlm_num < ARRAY_SIZE(elems->ttlm)) {
- elems->ttlm[elems->ttlm_num] = (void *)data;
- elems->ttlm_num++;
- }
- break;
- }
-
- if (crc && calc_crc)
- *crc = crc32_be(*crc, (void *)elem, elem->datalen + 2);
-}
-
-static u32
-_ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params,
- struct ieee802_11_elems *elems,
- const struct element *check_inherit)
-{
- const struct element *elem;
- bool calc_crc = params->filter != 0;
- DECLARE_BITMAP(seen_elems, 256);
- u32 crc = params->crc;
-
- bitmap_zero(seen_elems, 256);
-
- for_each_element(elem, params->start, params->len) {
- const struct element *subelem;
- bool elem_parse_failed;
- u8 id = elem->id;
- u8 elen = elem->datalen;
- const u8 *pos = elem->data;
-
- if (check_inherit &&
- !cfg80211_is_element_inherited(elem,
- check_inherit))
- continue;
-
- switch (id) {
- case WLAN_EID_SSID:
- case WLAN_EID_SUPP_RATES:
- case WLAN_EID_FH_PARAMS:
- case WLAN_EID_DS_PARAMS:
- case WLAN_EID_CF_PARAMS:
- case WLAN_EID_TIM:
- case WLAN_EID_IBSS_PARAMS:
- case WLAN_EID_CHALLENGE:
- case WLAN_EID_RSN:
- case WLAN_EID_ERP_INFO:
- case WLAN_EID_EXT_SUPP_RATES:
- case WLAN_EID_HT_CAPABILITY:
- case WLAN_EID_HT_OPERATION:
- case WLAN_EID_VHT_CAPABILITY:
- case WLAN_EID_VHT_OPERATION:
- case WLAN_EID_MESH_ID:
- case WLAN_EID_MESH_CONFIG:
- case WLAN_EID_PEER_MGMT:
- case WLAN_EID_PREQ:
- case WLAN_EID_PREP:
- case WLAN_EID_PERR:
- case WLAN_EID_RANN:
- case WLAN_EID_CHANNEL_SWITCH:
- case WLAN_EID_EXT_CHANSWITCH_ANN:
- case WLAN_EID_COUNTRY:
- case WLAN_EID_PWR_CONSTRAINT:
- case WLAN_EID_TIMEOUT_INTERVAL:
- case WLAN_EID_SECONDARY_CHANNEL_OFFSET:
- case WLAN_EID_WIDE_BW_CHANNEL_SWITCH:
- case WLAN_EID_CHAN_SWITCH_PARAM:
- case WLAN_EID_EXT_CAPABILITY:
- case WLAN_EID_CHAN_SWITCH_TIMING:
- case WLAN_EID_LINK_ID:
- case WLAN_EID_BSS_MAX_IDLE_PERIOD:
- case WLAN_EID_RSNX:
- case WLAN_EID_S1G_BCN_COMPAT:
- case WLAN_EID_S1G_CAPABILITIES:
- case WLAN_EID_S1G_OPERATION:
- case WLAN_EID_AID_RESPONSE:
- case WLAN_EID_S1G_SHORT_BCN_INTERVAL:
- /*
- * not listing WLAN_EID_CHANNEL_SWITCH_WRAPPER -- it seems possible
- * that if the content gets bigger it might be needed more than once
- */
- if (test_bit(id, seen_elems)) {
- elems->parse_error = true;
- continue;
- }
- break;
- }
-
- if (calc_crc && id < 64 && (params->filter & (1ULL << id)))
- crc = crc32_be(crc, pos - 2, elen + 2);
-
- elem_parse_failed = false;
-
- switch (id) {
- case WLAN_EID_LINK_ID:
- if (elen + 2 < sizeof(struct ieee80211_tdls_lnkie)) {
- elem_parse_failed = true;
- break;
- }
- elems->lnk_id = (void *)(pos - 2);
- break;
- case WLAN_EID_CHAN_SWITCH_TIMING:
- if (elen < sizeof(struct ieee80211_ch_switch_timing)) {
- elem_parse_failed = true;
- break;
- }
- elems->ch_sw_timing = (void *)pos;
- break;
- case WLAN_EID_EXT_CAPABILITY:
- elems->ext_capab = pos;
- elems->ext_capab_len = elen;
- break;
- case WLAN_EID_SSID:
- elems->ssid = pos;
- elems->ssid_len = elen;
- break;
- case WLAN_EID_SUPP_RATES:
- elems->supp_rates = pos;
- elems->supp_rates_len = elen;
- break;
- case WLAN_EID_DS_PARAMS:
- if (elen >= 1)
- elems->ds_params = pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_TIM:
- if (elen >= sizeof(struct ieee80211_tim_ie)) {
- elems->tim = (void *)pos;
- elems->tim_len = elen;
- } else
- elem_parse_failed = true;
- break;
- case WLAN_EID_VENDOR_SPECIFIC:
- if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
- pos[2] == 0xf2) {
- /* Microsoft OUI (00:50:F2) */
-
- if (calc_crc)
- crc = crc32_be(crc, pos - 2, elen + 2);
-
- if (elen >= 5 && pos[3] == 2) {
- /* OUI Type 2 - WMM IE */
- if (pos[4] == 0) {
- elems->wmm_info = pos;
- elems->wmm_info_len = elen;
- } else if (pos[4] == 1) {
- elems->wmm_param = pos;
- elems->wmm_param_len = elen;
- }
- }
- }
- break;
- case WLAN_EID_RSN:
- elems->rsn = pos;
- elems->rsn_len = elen;
- break;
- case WLAN_EID_ERP_INFO:
- if (elen >= 1)
- elems->erp_info = pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_EXT_SUPP_RATES:
- elems->ext_supp_rates = pos;
- elems->ext_supp_rates_len = elen;
- break;
- case WLAN_EID_HT_CAPABILITY:
- if (elen >= sizeof(struct ieee80211_ht_cap))
- elems->ht_cap_elem = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_HT_OPERATION:
- if (elen >= sizeof(struct ieee80211_ht_operation))
- elems->ht_operation = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_VHT_CAPABILITY:
- if (elen >= sizeof(struct ieee80211_vht_cap))
- elems->vht_cap_elem = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_VHT_OPERATION:
- if (elen >= sizeof(struct ieee80211_vht_operation)) {
- elems->vht_operation = (void *)pos;
- if (calc_crc)
- crc = crc32_be(crc, pos - 2, elen + 2);
- break;
- }
- elem_parse_failed = true;
- break;
- case WLAN_EID_OPMODE_NOTIF:
- if (elen > 0) {
- elems->opmode_notif = pos;
- if (calc_crc)
- crc = crc32_be(crc, pos - 2, elen + 2);
- break;
- }
- elem_parse_failed = true;
- break;
- case WLAN_EID_MESH_ID:
- elems->mesh_id = pos;
- elems->mesh_id_len = elen;
- break;
- case WLAN_EID_MESH_CONFIG:
- if (elen >= sizeof(struct ieee80211_meshconf_ie))
- elems->mesh_config = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_PEER_MGMT:
- elems->peering = pos;
- elems->peering_len = elen;
- break;
- case WLAN_EID_MESH_AWAKE_WINDOW:
- if (elen >= 2)
- elems->awake_window = (void *)pos;
- break;
- case WLAN_EID_PREQ:
- elems->preq = pos;
- elems->preq_len = elen;
- break;
- case WLAN_EID_PREP:
- elems->prep = pos;
- elems->prep_len = elen;
- break;
- case WLAN_EID_PERR:
- elems->perr = pos;
- elems->perr_len = elen;
- break;
- case WLAN_EID_RANN:
- if (elen >= sizeof(struct ieee80211_rann_ie))
- elems->rann = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_CHANNEL_SWITCH:
- if (elen != sizeof(struct ieee80211_channel_sw_ie)) {
- elem_parse_failed = true;
- break;
- }
- elems->ch_switch_ie = (void *)pos;
- break;
- case WLAN_EID_EXT_CHANSWITCH_ANN:
- if (elen != sizeof(struct ieee80211_ext_chansw_ie)) {
- elem_parse_failed = true;
- break;
- }
- elems->ext_chansw_ie = (void *)pos;
- break;
- case WLAN_EID_SECONDARY_CHANNEL_OFFSET:
- if (elen != sizeof(struct ieee80211_sec_chan_offs_ie)) {
- elem_parse_failed = true;
- break;
- }
- elems->sec_chan_offs = (void *)pos;
- break;
- case WLAN_EID_CHAN_SWITCH_PARAM:
- if (elen <
- sizeof(*elems->mesh_chansw_params_ie)) {
- elem_parse_failed = true;
- break;
- }
- elems->mesh_chansw_params_ie = (void *)pos;
- break;
- case WLAN_EID_WIDE_BW_CHANNEL_SWITCH:
- if (!params->action ||
- elen < sizeof(*elems->wide_bw_chansw_ie)) {
- elem_parse_failed = true;
- break;
- }
- elems->wide_bw_chansw_ie = (void *)pos;
- break;
- case WLAN_EID_CHANNEL_SWITCH_WRAPPER:
- if (params->action) {
- elem_parse_failed = true;
- break;
- }
- /*
- * This is a bit tricky, but as we only care about
- * a few elements, parse them out manually.
- */
- subelem = cfg80211_find_elem(WLAN_EID_WIDE_BW_CHANNEL_SWITCH,
- pos, elen);
- if (subelem) {
- if (subelem->datalen >= sizeof(*elems->wide_bw_chansw_ie))
- elems->wide_bw_chansw_ie =
- (void *)subelem->data;
- else
- elem_parse_failed = true;
- }
-
- subelem = cfg80211_find_ext_elem(WLAN_EID_EXT_BANDWIDTH_INDICATION,
- pos, elen);
- if (subelem) {
- const void *edata = subelem->data + 1;
- u8 edatalen = subelem->datalen - 1;
-
- if (ieee80211_bandwidth_indication_size_ok(edata,
- edatalen))
- elems->bandwidth_indication = edata;
- else
- elem_parse_failed = true;
- }
- break;
- case WLAN_EID_COUNTRY:
- elems->country_elem = pos;
- elems->country_elem_len = elen;
- break;
- case WLAN_EID_PWR_CONSTRAINT:
- if (elen != 1) {
- elem_parse_failed = true;
- break;
- }
- elems->pwr_constr_elem = pos;
- break;
- case WLAN_EID_CISCO_VENDOR_SPECIFIC:
- /* Lots of different options exist, but we only care
- * about the Dynamic Transmit Power Control element.
- * First check for the Cisco OUI, then for the DTPC
- * tag (0x00).
- */
- if (elen < 4) {
- elem_parse_failed = true;
- break;
- }
-
- if (pos[0] != 0x00 || pos[1] != 0x40 ||
- pos[2] != 0x96 || pos[3] != 0x00)
- break;
-
- if (elen != 6) {
- elem_parse_failed = true;
- break;
- }
-
- if (calc_crc)
- crc = crc32_be(crc, pos - 2, elen + 2);
-
- elems->cisco_dtpc_elem = pos;
- break;
- case WLAN_EID_ADDBA_EXT:
- if (elen < sizeof(struct ieee80211_addba_ext_ie)) {
- elem_parse_failed = true;
- break;
- }
- elems->addba_ext_ie = (void *)pos;
- break;
- case WLAN_EID_TIMEOUT_INTERVAL:
- if (elen >= sizeof(struct ieee80211_timeout_interval_ie))
- elems->timeout_int = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_BSS_MAX_IDLE_PERIOD:
- if (elen >= sizeof(*elems->max_idle_period_ie))
- elems->max_idle_period_ie = (void *)pos;
- break;
- case WLAN_EID_RSNX:
- elems->rsnx = pos;
- elems->rsnx_len = elen;
- break;
- case WLAN_EID_TX_POWER_ENVELOPE:
- if (elen < 1 ||
- elen > sizeof(struct ieee80211_tx_pwr_env))
- break;
-
- if (elems->tx_pwr_env_num >= ARRAY_SIZE(elems->tx_pwr_env))
- break;
-
- elems->tx_pwr_env[elems->tx_pwr_env_num] = (void *)pos;
- elems->tx_pwr_env_len[elems->tx_pwr_env_num] = elen;
- elems->tx_pwr_env_num++;
- break;
- case WLAN_EID_EXTENSION:
- ieee80211_parse_extension_element(calc_crc ?
- &crc : NULL,
- elem, elems, params);
- break;
- case WLAN_EID_S1G_CAPABILITIES:
- if (elen >= sizeof(*elems->s1g_capab))
- elems->s1g_capab = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_S1G_OPERATION:
- if (elen == sizeof(*elems->s1g_oper))
- elems->s1g_oper = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_S1G_BCN_COMPAT:
- if (elen == sizeof(*elems->s1g_bcn_compat))
- elems->s1g_bcn_compat = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_AID_RESPONSE:
- if (elen == sizeof(struct ieee80211_aid_response_ie))
- elems->aid_resp = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- default:
- break;
- }
-
- if (elem_parse_failed)
- elems->parse_error = true;
- else
- __set_bit(id, seen_elems);
- }
-
- if (!for_each_element_completed(elem, params->start, params->len))
- elems->parse_error = true;
-
- return crc;
-}
-
-static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len,
- struct ieee802_11_elems *elems,
- struct cfg80211_bss *bss,
- u8 *nontransmitted_profile)
-{
- const struct element *elem, *sub;
- size_t profile_len = 0;
- bool found = false;
-
- if (!bss || !bss->transmitted_bss)
- return profile_len;
-
- for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, start, len) {
- if (elem->datalen < 2)
- continue;
- if (elem->data[0] < 1 || elem->data[0] > 8)
- continue;
-
- for_each_element(sub, elem->data + 1, elem->datalen - 1) {
- u8 new_bssid[ETH_ALEN];
- const u8 *index;
-
- if (sub->id != 0 || sub->datalen < 4) {
- /* not a valid BSS profile */
- continue;
- }
-
- if (sub->data[0] != WLAN_EID_NON_TX_BSSID_CAP ||
- sub->data[1] != 2) {
- /* The first element of the
- * Nontransmitted BSSID Profile is not
- * the Nontransmitted BSSID Capability
- * element.
- */
- continue;
- }
-
- memset(nontransmitted_profile, 0, len);
- profile_len = cfg80211_merge_profile(start, len,
- elem,
- sub,
- nontransmitted_profile,
- len);
-
- /* found a Nontransmitted BSSID Profile */
- index = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX,
- nontransmitted_profile,
- profile_len);
- if (!index || index[1] < 1 || index[2] == 0) {
- /* Invalid MBSSID Index element */
- continue;
- }
-
- cfg80211_gen_new_bssid(bss->transmitted_bss->bssid,
- elem->data[0],
- index[2],
- new_bssid);
- if (ether_addr_equal(new_bssid, bss->bssid)) {
- found = true;
- elems->bssid_index_len = index[1];
- elems->bssid_index = (void *)&index[2];
- break;
- }
- }
- }
-
- return found ? profile_len : 0;
-}
-
-static void ieee80211_mle_get_sta_prof(struct ieee802_11_elems *elems,
- u8 link_id)
-{
- const struct ieee80211_multi_link_elem *ml = elems->ml_basic;
- ssize_t ml_len = elems->ml_basic_len;
- const struct element *sub;
-
- if (!ml || !ml_len)
- return;
-
- if (le16_get_bits(ml->control, IEEE80211_ML_CONTROL_TYPE) !=
- IEEE80211_ML_CONTROL_TYPE_BASIC)
- return;
-
- for_each_mle_subelement(sub, (u8 *)ml, ml_len) {
- struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data;
- ssize_t sta_prof_len;
- u16 control;
-
- if (sub->id != IEEE80211_MLE_SUBELEM_PER_STA_PROFILE)
- continue;
-
- if (!ieee80211_mle_basic_sta_prof_size_ok(sub->data,
- sub->datalen))
- return;
-
- control = le16_to_cpu(prof->control);
-
- if (link_id != u16_get_bits(control,
- IEEE80211_MLE_STA_CONTROL_LINK_ID))
- continue;
-
- if (!(control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE))
- return;
-
- /* the sub element can be fragmented */
- sta_prof_len =
- cfg80211_defragment_element(sub,
- (u8 *)ml, ml_len,
- elems->scratch_pos,
- elems->scratch +
- elems->scratch_len -
- elems->scratch_pos,
- IEEE80211_MLE_SUBELEM_FRAGMENT);
-
- if (sta_prof_len < 0)
- return;
-
- elems->prof = (void *)elems->scratch_pos;
- elems->sta_prof_len = sta_prof_len;
- elems->scratch_pos += sta_prof_len;
-
- return;
- }
-}
-
-static void ieee80211_mle_parse_link(struct ieee802_11_elems *elems,
- struct ieee80211_elems_parse_params *params)
-{
- struct ieee80211_mle_per_sta_profile *prof;
- struct ieee80211_elems_parse_params sub = {
- .action = params->action,
- .from_ap = params->from_ap,
- .link_id = -1,
- };
- ssize_t ml_len = elems->ml_basic_len;
- const struct element *non_inherit = NULL;
- const u8 *end;
-
- if (params->link_id == -1)
- return;
-
- ml_len = cfg80211_defragment_element(elems->ml_basic_elem,
- elems->ie_start,
- elems->total_len,
- elems->scratch_pos,
- elems->scratch +
- elems->scratch_len -
- elems->scratch_pos,
- WLAN_EID_FRAGMENT);
-
- if (ml_len < 0)
- return;
-
- elems->ml_basic = (const void *)elems->scratch_pos;
- elems->ml_basic_len = ml_len;
-
- ieee80211_mle_get_sta_prof(elems, params->link_id);
- prof = elems->prof;
-
- if (!prof)
- return;
-
- /* check if we have the 4 bytes for the fixed part in assoc response */
- if (elems->sta_prof_len < sizeof(*prof) + prof->sta_info_len - 1 + 4) {
- elems->prof = NULL;
- elems->sta_prof_len = 0;
- return;
- }
-
- /*
- * Skip the capability information and the status code that are expected
- * as part of the station profile in association response frames. Note
- * the -1 is because the 'sta_info_len' is accounted to as part of the
- * per-STA profile, but not part of the 'u8 variable[]' portion.
- */
- sub.start = prof->variable + prof->sta_info_len - 1 + 4;
- end = (const u8 *)prof + elems->sta_prof_len;
- sub.len = end - sub.start;
-
- non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
- sub.start, sub.len);
- _ieee802_11_parse_elems_full(&sub, elems, non_inherit);
-}
-
-struct ieee802_11_elems *
-ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params)
-{
- struct ieee802_11_elems *elems;
- const struct element *non_inherit = NULL;
- u8 *nontransmitted_profile;
- int nontransmitted_profile_len = 0;
- size_t scratch_len = 3 * params->len;
-
- elems = kzalloc(struct_size(elems, scratch, scratch_len), GFP_ATOMIC);
- if (!elems)
- return NULL;
- elems->ie_start = params->start;
- elems->total_len = params->len;
- elems->scratch_len = scratch_len;
- elems->scratch_pos = elems->scratch;
-
- nontransmitted_profile = elems->scratch_pos;
- nontransmitted_profile_len =
- ieee802_11_find_bssid_profile(params->start, params->len,
- elems, params->bss,
- nontransmitted_profile);
- elems->scratch_pos += nontransmitted_profile_len;
- elems->scratch_len -= nontransmitted_profile_len;
- non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
- nontransmitted_profile,
- nontransmitted_profile_len);
-
- elems->crc = _ieee802_11_parse_elems_full(params, elems, non_inherit);
-
- /* Override with nontransmitted profile, if found */
- if (nontransmitted_profile_len) {
- struct ieee80211_elems_parse_params sub = {
- .start = nontransmitted_profile,
- .len = nontransmitted_profile_len,
- .action = params->action,
- .link_id = params->link_id,
- };
-
- _ieee802_11_parse_elems_full(&sub, elems, NULL);
- }
-
- ieee80211_mle_parse_link(elems, params);
-
- if (elems->tim && !elems->parse_error) {
- const struct ieee80211_tim_ie *tim_ie = elems->tim;
-
- elems->dtim_period = tim_ie->dtim_period;
- elems->dtim_count = tim_ie->dtim_count;
- }
-
- /* Override DTIM period and count if needed */
- if (elems->bssid_index &&
- elems->bssid_index_len >=
- offsetofend(struct ieee80211_bssid_index, dtim_period))
- elems->dtim_period = elems->bssid_index->dtim_period;
-
- if (elems->bssid_index &&
- elems->bssid_index_len >=
- offsetofend(struct ieee80211_bssid_index, dtim_count))
- elems->dtim_count = elems->bssid_index->dtim_count;
-
- return elems;
-}
-EXPORT_SYMBOL_IF_KUNIT(ieee802_11_parse_elems_full);
-
void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
struct ieee80211_tx_queue_params
*qparam, int ac)
@@ -1938,37 +1173,34 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
}
}
-u8 *ieee80211_write_he_6ghz_cap(u8 *pos, __le16 cap, u8 *end)
+static int ieee80211_put_s1g_cap(struct sk_buff *skb,
+ struct ieee80211_sta_s1g_cap *s1g_cap)
{
- if ((end - pos) < 5)
- return pos;
+ if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_s1g_cap))
+ return -ENOBUFS;
- *pos++ = WLAN_EID_EXTENSION;
- *pos++ = 1 + sizeof(cap);
- *pos++ = WLAN_EID_EXT_HE_6GHZ_CAPA;
- memcpy(pos, &cap, sizeof(cap));
+ skb_put_u8(skb, WLAN_EID_S1G_CAPABILITIES);
+ skb_put_u8(skb, sizeof(struct ieee80211_s1g_cap));
- return pos + 2;
+ skb_put_data(skb, &s1g_cap->cap, sizeof(s1g_cap->cap));
+ skb_put_data(skb, &s1g_cap->nss_mcs, sizeof(s1g_cap->nss_mcs));
+
+ return 0;
}
-static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
- u8 *buffer, size_t buffer_len,
- const u8 *ie, size_t ie_len,
- enum nl80211_band band,
- u32 rate_mask,
- struct cfg80211_chan_def *chandef,
- size_t *offset, u32 flags)
+static int ieee80211_put_preq_ies_band(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ const u8 *ie, size_t ie_len,
+ size_t *offset,
+ enum nl80211_band band,
+ u32 rate_mask,
+ struct cfg80211_chan_def *chandef,
+ u32 flags)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
- const struct ieee80211_sta_he_cap *he_cap;
- const struct ieee80211_sta_eht_cap *eht_cap;
- u8 *pos = buffer, *end = buffer + buffer_len;
+ int i, err;
size_t noffset;
- int supp_rates_len, i;
- u8 rates[32];
- int num_rates;
- int ext_rates_len;
u32 rate_flags;
bool have_80mhz = false;
@@ -1981,32 +1213,13 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
rate_flags = ieee80211_chandef_rate_flags(chandef);
/* For direct scan add S1G IE and consider its override bits */
- if (band == NL80211_BAND_S1GHZ) {
- if (end - pos < 2 + sizeof(struct ieee80211_s1g_cap))
- goto out_err;
- pos = ieee80211_ie_build_s1g_cap(pos, &sband->s1g_cap);
- goto done;
- }
+ if (band == NL80211_BAND_S1GHZ)
+ return ieee80211_put_s1g_cap(skb, &sband->s1g_cap);
- num_rates = 0;
- for (i = 0; i < sband->n_bitrates; i++) {
- if ((BIT(i) & rate_mask) == 0)
- continue; /* skip rate */
- if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
- continue;
-
- rates[num_rates++] =
- (u8) DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
- }
-
- supp_rates_len = min_t(int, num_rates, 8);
-
- if (end - pos < 2 + supp_rates_len)
- goto out_err;
- *pos++ = WLAN_EID_SUPP_RATES;
- *pos++ = supp_rates_len;
- memcpy(pos, rates, supp_rates_len);
- pos += supp_rates_len;
+ err = ieee80211_put_srates_elem(skb, sband, 0, rate_flags,
+ ~rate_mask, WLAN_EID_SUPP_RATES);
+ if (err)
+ return err;
/* insert "request information" if in custom IEs */
if (ie && ie_len) {
@@ -2019,34 +1232,28 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
before_extrates,
ARRAY_SIZE(before_extrates),
*offset);
- if (end - pos < noffset - *offset)
- goto out_err;
- memcpy(pos, ie + *offset, noffset - *offset);
- pos += noffset - *offset;
+ if (skb_tailroom(skb) < noffset - *offset)
+ return -ENOBUFS;
+ skb_put_data(skb, ie + *offset, noffset - *offset);
*offset = noffset;
}
- ext_rates_len = num_rates - supp_rates_len;
- if (ext_rates_len > 0) {
- if (end - pos < 2 + ext_rates_len)
- goto out_err;
- *pos++ = WLAN_EID_EXT_SUPP_RATES;
- *pos++ = ext_rates_len;
- memcpy(pos, rates + supp_rates_len, ext_rates_len);
- pos += ext_rates_len;
- }
+ err = ieee80211_put_srates_elem(skb, sband, 0, rate_flags,
+ ~rate_mask, WLAN_EID_EXT_SUPP_RATES);
+ if (err)
+ return err;
if (chandef->chan && sband->band == NL80211_BAND_2GHZ) {
- if (end - pos < 3)
- goto out_err;
- *pos++ = WLAN_EID_DS_PARAMS;
- *pos++ = 1;
- *pos++ = ieee80211_frequency_to_channel(
- chandef->chan->center_freq);
+ if (skb_tailroom(skb) < 3)
+ return -ENOBUFS;
+ skb_put_u8(skb, WLAN_EID_DS_PARAMS);
+ skb_put_u8(skb, 1);
+ skb_put_u8(skb,
+ ieee80211_frequency_to_channel(chandef->chan->center_freq));
}
if (flags & IEEE80211_PROBE_FLAG_MIN_CONTENT)
- goto done;
+ return 0;
/* insert custom IEs that go before HT */
if (ie && ie_len) {
@@ -2061,18 +1268,21 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
noffset = ieee80211_ie_split(ie, ie_len,
before_ht, ARRAY_SIZE(before_ht),
*offset);
- if (end - pos < noffset - *offset)
- goto out_err;
- memcpy(pos, ie + *offset, noffset - *offset);
- pos += noffset - *offset;
+ if (skb_tailroom(skb) < noffset - *offset)
+ return -ENOBUFS;
+ skb_put_data(skb, ie + *offset, noffset - *offset);
*offset = noffset;
}
if (sband->ht_cap.ht_supported) {
- if (end - pos < 2 + sizeof(struct ieee80211_ht_cap))
- goto out_err;
- pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
- sband->ht_cap.cap);
+ u8 *pos;
+
+ if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap))
+ return -ENOBUFS;
+
+ pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_cap));
+ ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
+ sband->ht_cap.cap);
}
/* insert custom IEs that go before VHT */
@@ -2093,10 +1303,9 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
noffset = ieee80211_ie_split(ie, ie_len,
before_vht, ARRAY_SIZE(before_vht),
*offset);
- if (end - pos < noffset - *offset)
- goto out_err;
- memcpy(pos, ie + *offset, noffset - *offset);
- pos += noffset - *offset;
+ if (skb_tailroom(skb) < noffset - *offset)
+ return -ENOBUFS;
+ skb_put_data(skb, ie + *offset, noffset - *offset);
*offset = noffset;
}
@@ -2111,10 +1320,14 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
}
if (sband->vht_cap.vht_supported && have_80mhz) {
- if (end - pos < 2 + sizeof(struct ieee80211_vht_cap))
- goto out_err;
- pos = ieee80211_ie_build_vht_cap(pos, &sband->vht_cap,
- sband->vht_cap.cap);
+ u8 *pos;
+
+ if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_cap))
+ return -ENOBUFS;
+
+ pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_cap));
+ ieee80211_ie_build_vht_cap(pos, &sband->vht_cap,
+ sband->vht_cap.cap);
}
/* insert custom IEs that go before HE */
@@ -2131,107 +1344,128 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
noffset = ieee80211_ie_split(ie, ie_len,
before_he, ARRAY_SIZE(before_he),
*offset);
- if (end - pos < noffset - *offset)
- goto out_err;
- memcpy(pos, ie + *offset, noffset - *offset);
- pos += noffset - *offset;
+ if (skb_tailroom(skb) < noffset - *offset)
+ return -ENOBUFS;
+ skb_put_data(skb, ie + *offset, noffset - *offset);
*offset = noffset;
}
- he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
- if (he_cap &&
- cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band),
+ if (cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band),
IEEE80211_CHAN_NO_HE)) {
- pos = ieee80211_ie_build_he_cap(0, pos, he_cap, end);
- if (!pos)
- goto out_err;
+ err = ieee80211_put_he_cap(skb, sdata, sband, NULL);
+ if (err)
+ return err;
}
- eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
-
- if (eht_cap &&
- cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band),
+ if (cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band),
IEEE80211_CHAN_NO_HE |
IEEE80211_CHAN_NO_EHT)) {
- pos = ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, end,
- sdata->vif.type == NL80211_IFTYPE_AP);
- if (!pos)
- goto out_err;
+ err = ieee80211_put_eht_cap(skb, sdata, sband, NULL);
+ if (err)
+ return err;
}
- if (cfg80211_any_usable_channels(local->hw.wiphy,
- BIT(NL80211_BAND_6GHZ),
- IEEE80211_CHAN_NO_HE)) {
- struct ieee80211_supported_band *sband6;
-
- sband6 = local->hw.wiphy->bands[NL80211_BAND_6GHZ];
- he_cap = ieee80211_get_he_iftype_cap_vif(sband6, &sdata->vif);
-
- if (he_cap) {
- enum nl80211_iftype iftype =
- ieee80211_vif_type_p2p(&sdata->vif);
- __le16 cap = ieee80211_get_he_6ghz_capa(sband6, iftype);
-
- pos = ieee80211_write_he_6ghz_cap(pos, cap, end);
- }
- }
+ err = ieee80211_put_he_6ghz_cap(skb, sdata, IEEE80211_SMPS_OFF);
+ if (err)
+ return err;
/*
* If adding more here, adjust code in main.c
* that calculates local->scan_ies_len.
*/
- return pos - buffer;
- out_err:
- WARN_ONCE(1, "not enough space for preq IEs\n");
- done:
- return pos - buffer;
+ return 0;
}
-int ieee80211_build_preq_ies(struct ieee80211_sub_if_data *sdata, u8 *buffer,
- size_t buffer_len,
- struct ieee80211_scan_ies *ie_desc,
- const u8 *ie, size_t ie_len,
- u8 bands_used, u32 *rate_masks,
- struct cfg80211_chan_def *chandef,
- u32 flags)
+static int ieee80211_put_preq_ies(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_scan_ies *ie_desc,
+ const u8 *ie, size_t ie_len,
+ u8 bands_used, u32 *rate_masks,
+ struct cfg80211_chan_def *chandef,
+ u32 flags)
{
- size_t pos = 0, old_pos = 0, custom_ie_offset = 0;
- int i;
+ size_t custom_ie_offset = 0;
+ int i, err;
memset(ie_desc, 0, sizeof(*ie_desc));
for (i = 0; i < NUM_NL80211_BANDS; i++) {
if (bands_used & BIT(i)) {
- pos += ieee80211_build_preq_ies_band(sdata,
- buffer + pos,
- buffer_len - pos,
- ie, ie_len, i,
- rate_masks[i],
- chandef,
- &custom_ie_offset,
- flags);
- ie_desc->ies[i] = buffer + old_pos;
- ie_desc->len[i] = pos - old_pos;
- old_pos = pos;
+ ie_desc->ies[i] = skb_tail_pointer(skb);
+ err = ieee80211_put_preq_ies_band(skb, sdata,
+ ie, ie_len,
+ &custom_ie_offset,
+ i, rate_masks[i],
+ chandef, flags);
+ if (err)
+ return err;
+ ie_desc->len[i] = skb_tail_pointer(skb) -
+ ie_desc->ies[i];
}
}
/* add any remaining custom IEs */
if (ie && ie_len) {
- if (WARN_ONCE(buffer_len - pos < ie_len - custom_ie_offset,
+ if (WARN_ONCE(skb_tailroom(skb) < ie_len - custom_ie_offset,
"not enough space for preq custom IEs\n"))
- return pos;
- memcpy(buffer + pos, ie + custom_ie_offset,
- ie_len - custom_ie_offset);
- ie_desc->common_ies = buffer + pos;
- ie_desc->common_ie_len = ie_len - custom_ie_offset;
- pos += ie_len - custom_ie_offset;
+ return -ENOBUFS;
+ ie_desc->common_ies = skb_tail_pointer(skb);
+ skb_put_data(skb, ie + custom_ie_offset,
+ ie_len - custom_ie_offset);
+ ie_desc->common_ie_len = skb_tail_pointer(skb) -
+ ie_desc->common_ies;
}
- return pos;
+ return 0;
};
+int ieee80211_build_preq_ies(struct ieee80211_sub_if_data *sdata, u8 *buffer,
+ size_t buffer_len,
+ struct ieee80211_scan_ies *ie_desc,
+ const u8 *ie, size_t ie_len,
+ u8 bands_used, u32 *rate_masks,
+ struct cfg80211_chan_def *chandef,
+ u32 flags)
+{
+ struct sk_buff *skb = alloc_skb(buffer_len, GFP_KERNEL);
+ uintptr_t offs;
+ int ret, i;
+ u8 *start;
+
+ if (!skb)
+ return -ENOMEM;
+
+ start = skb_tail_pointer(skb);
+ memset(start, 0, skb_tailroom(skb));
+ ret = ieee80211_put_preq_ies(skb, sdata, ie_desc, ie, ie_len,
+ bands_used, rate_masks, chandef,
+ flags);
+ if (ret < 0) {
+ goto out;
+ }
+
+ if (skb->len > buffer_len) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ memcpy(buffer, start, skb->len);
+
+ /* adjust ie_desc for copy */
+ for (i = 0; i < NUM_NL80211_BANDS; i++) {
+ offs = ie_desc->ies[i] - start;
+ ie_desc->ies[i] = buffer + offs;
+ }
+ offs = ie_desc->common_ies - start;
+ ie_desc->common_ies = buffer + offs;
+
+ ret = skb->len;
+out:
+ consume_skb(skb);
+ return ret;
+}
+
struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
const u8 *src, const u8 *dst,
u32 ratemask,
@@ -2244,7 +1478,6 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
struct cfg80211_chan_def chandef;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
- int ies_len;
u32 rate_masks[NUM_NL80211_BANDS] = {};
struct ieee80211_scan_ies dummy_ie_desc;
@@ -2253,7 +1486,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
* in order to maximize the chance that we get a response. Some
* badly-behaved APs don't respond when this parameter is included.
*/
- chandef.width = sdata->vif.bss_conf.chandef.width;
+ chandef.width = sdata->vif.bss_conf.chanreq.oper.width;
if (flags & IEEE80211_PROBE_FLAG_DIRECTED)
chandef.chan = NULL;
else
@@ -2265,11 +1498,9 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
return NULL;
rate_masks[chan->band] = ratemask;
- ies_len = ieee80211_build_preq_ies(sdata, skb_tail_pointer(skb),
- skb_tailroom(skb), &dummy_ie_desc,
- ie, ie_len, BIT(chan->band),
- rate_masks, &chandef, flags);
- skb_put(skb, ies_len);
+ ieee80211_put_preq_ies(skb, sdata, &dummy_ie_desc,
+ ie, ie_len, BIT(chan->band),
+ rate_masks, &chandef, flags);
if (dst) {
mgmt = (struct ieee80211_mgmt *) skb->data;
@@ -2295,7 +1526,8 @@ u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(!sband))
return 1;
- rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+ rate_flags =
+ ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chanreq.oper);
num_rates = sband->n_bitrates;
supp_rates = 0;
@@ -2416,9 +1648,6 @@ static void ieee80211_assign_chanctx(struct ieee80211_local *local,
lockdep_assert_wiphy(local->hw.wiphy);
- if (!local->use_chanctx)
- return;
-
conf = rcu_dereference_protected(link->conf->chanctx_conf,
lockdep_is_held(&local->hw.wiphy->mtx));
if (conf) {
@@ -2648,20 +1877,20 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
/* add channel contexts */
- if (local->use_chanctx) {
- list_for_each_entry(ctx, &local->chanctx_list, list)
- if (ctx->replace_state !=
- IEEE80211_CHANCTX_REPLACES_OTHER)
- WARN_ON(drv_add_chanctx(local, ctx));
-
- sdata = wiphy_dereference(local->hw.wiphy,
- local->monitor_sdata);
- if (sdata && ieee80211_sdata_running(sdata))
- ieee80211_assign_chanctx(local, sdata, &sdata->deflink);
- }
+ list_for_each_entry(ctx, &local->chanctx_list, list)
+ if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER)
+ WARN_ON(drv_add_chanctx(local, ctx));
+
+ sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata);
+ if (sdata && ieee80211_sdata_running(sdata))
+ ieee80211_assign_chanctx(local, sdata, &sdata->deflink);
/* reconfigure hardware */
- ieee80211_hw_config(local, ~0);
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_LISTEN_INTERVAL |
+ IEEE80211_CONF_CHANGE_MONITOR |
+ IEEE80211_CONF_CHANGE_PS |
+ IEEE80211_CONF_CHANGE_RETRY_LIMITS |
+ IEEE80211_CONF_CHANGE_IDLE);
ieee80211_configure_filter(local);
@@ -2706,8 +1935,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
for (link_id = 0;
link_id < ARRAY_SIZE(sdata->vif.link_conf);
link_id++) {
- if (ieee80211_vif_is_mld(&sdata->vif) &&
- !(sdata->vif.active_links & BIT(link_id)))
+ if (!ieee80211_vif_link_active(&sdata->vif, link_id))
continue;
link = sdata_dereference(sdata->link[link_id], sdata);
@@ -2756,9 +1984,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
sdata->vif.bss_conf.protected_keep_alive)
changed |= BSS_CHANGED_KEEP_ALIVE;
- if (sdata->vif.bss_conf.eht_puncturing)
- changed |= BSS_CHANGED_EHT_PUNCTURING;
-
ieee80211_bss_info_change_notify(sdata,
changed);
} else if (!WARN_ON(!link)) {
@@ -3109,21 +2334,6 @@ size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset)
return pos;
}
-u8 *ieee80211_ie_build_s1g_cap(u8 *pos, struct ieee80211_sta_s1g_cap *s1g_cap)
-{
- *pos++ = WLAN_EID_S1G_CAPABILITIES;
- *pos++ = sizeof(struct ieee80211_s1g_cap);
- memset(pos, 0, sizeof(struct ieee80211_s1g_cap));
-
- memcpy(pos, &s1g_cap->cap, sizeof(s1g_cap->cap));
- pos += sizeof(s1g_cap->cap);
-
- memcpy(pos, &s1g_cap->nss_mcs, sizeof(s1g_cap->nss_mcs));
- pos += sizeof(s1g_cap->nss_mcs);
-
- return pos;
-}
-
u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
u16 cap)
{
@@ -3180,7 +2390,8 @@ u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
return pos;
}
-u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
+/* this may return more than ieee80211_put_he_6ghz_cap() will need */
+u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata)
{
const struct ieee80211_sta_he_cap *he_cap;
struct ieee80211_supported_band *sband;
@@ -3190,7 +2401,7 @@ u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
if (!sband)
return 0;
- he_cap = ieee80211_get_he_iftype_cap(sband, iftype);
+ he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
if (!he_cap)
return 0;
@@ -3201,38 +2412,75 @@ u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
he_cap->he_cap_elem.phy_cap_info);
}
-u8 *ieee80211_ie_build_he_cap(ieee80211_conn_flags_t disable_flags, u8 *pos,
+static void
+ieee80211_get_adjusted_he_cap(const struct ieee80211_conn_settings *conn,
const struct ieee80211_sta_he_cap *he_cap,
- u8 *end)
+ struct ieee80211_he_cap_elem *elem)
{
- struct ieee80211_he_cap_elem elem;
- u8 n;
- u8 ie_len;
- u8 *orig_pos = pos;
+ u8 ru_limit, max_ru;
- /* Make sure we have place for the IE */
- /*
- * TODO: the 1 added is because this temporarily is under the EXTENSION
- * IE. Get rid of it when it moves.
- */
- if (!he_cap)
- return orig_pos;
+ *elem = he_cap->he_cap_elem;
- /* modify on stack first to calculate 'n' and 'ie_len' correctly */
- elem = he_cap->he_cap_elem;
+ switch (conn->bw_limit) {
+ case IEEE80211_CONN_BW_LIMIT_20:
+ ru_limit = IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242;
+ break;
+ case IEEE80211_CONN_BW_LIMIT_40:
+ ru_limit = IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484;
+ break;
+ case IEEE80211_CONN_BW_LIMIT_80:
+ ru_limit = IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996;
+ break;
+ default:
+ ru_limit = IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996;
+ break;
+ }
+
+ max_ru = elem->phy_cap_info[8] & IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_MASK;
+ max_ru = min(max_ru, ru_limit);
+ elem->phy_cap_info[8] &= ~IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_MASK;
+ elem->phy_cap_info[8] |= max_ru;
- if (disable_flags & IEEE80211_CONN_DISABLE_40MHZ)
- elem.phy_cap_info[0] &=
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_40) {
+ elem->phy_cap_info[0] &=
~(IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G);
+ elem->phy_cap_info[9] &=
+ ~IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM;
+ }
+
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_160) {
+ elem->phy_cap_info[0] &=
+ ~(IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G);
+ elem->phy_cap_info[5] &=
+ ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK;
+ elem->phy_cap_info[7] &=
+ ~(IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ |
+ IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ);
+ }
+}
- if (disable_flags & IEEE80211_CONN_DISABLE_160MHZ)
- elem.phy_cap_info[0] &=
- ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+int ieee80211_put_he_cap(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_supported_band *sband,
+ const struct ieee80211_conn_settings *conn)
+{
+ const struct ieee80211_sta_he_cap *he_cap;
+ struct ieee80211_he_cap_elem elem;
+ u8 *len;
+ u8 n;
+ u8 ie_len;
- if (disable_flags & IEEE80211_CONN_DISABLE_80P80MHZ)
- elem.phy_cap_info[0] &=
- ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
+ if (!conn)
+ conn = &ieee80211_conn_settings_unlimited;
+
+ he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+ if (!he_cap)
+ return 0;
+
+ /* modify on stack first to calculate 'n' and 'ie_len' correctly */
+ ieee80211_get_adjusted_he_cap(conn, he_cap, &elem);
n = ieee80211_he_mcs_nss_size(&elem);
ie_len = 2 + 1 +
@@ -3240,19 +2488,17 @@ u8 *ieee80211_ie_build_he_cap(ieee80211_conn_flags_t disable_flags, u8 *pos,
ieee80211_he_ppe_size(he_cap->ppe_thres[0],
he_cap->he_cap_elem.phy_cap_info);
- if ((end - pos) < ie_len)
- return orig_pos;
+ if (skb_tailroom(skb) < ie_len)
+ return -ENOBUFS;
- *pos++ = WLAN_EID_EXTENSION;
- pos++; /* We'll set the size later below */
- *pos++ = WLAN_EID_EXT_HE_CAPABILITY;
+ skb_put_u8(skb, WLAN_EID_EXTENSION);
+ len = skb_put(skb, 1); /* We'll set the size later below */
+ skb_put_u8(skb, WLAN_EID_EXT_HE_CAPABILITY);
/* Fixed data */
- memcpy(pos, &elem, sizeof(elem));
- pos += sizeof(elem);
+ skb_put_data(skb, &elem, sizeof(elem));
- memcpy(pos, &he_cap->he_mcs_nss_supp, n);
- pos += n;
+ skb_put_data(skb, &he_cap->he_mcs_nss_supp, n);
/* Check if PPE Threshold should be present */
if ((he_cap->he_cap_elem.phy_cap_info[6] &
@@ -3276,41 +2522,39 @@ u8 *ieee80211_ie_build_he_cap(ieee80211_conn_flags_t disable_flags, u8 *pos,
n = DIV_ROUND_UP(n, 8);
/* Copy PPE Thresholds */
- memcpy(pos, &he_cap->ppe_thres, n);
- pos += n;
+ skb_put_data(skb, &he_cap->ppe_thres, n);
end:
- orig_pos[1] = (pos - orig_pos) - 2;
- return pos;
+ *len = skb_tail_pointer(skb) - len - 1;
+ return 0;
}
-void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata,
- enum ieee80211_smps_mode smps_mode,
- struct sk_buff *skb)
+int ieee80211_put_he_6ghz_cap(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_smps_mode smps_mode)
{
struct ieee80211_supported_band *sband;
const struct ieee80211_sband_iftype_data *iftd;
enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif);
- u8 *pos;
- u16 cap;
+ __le16 cap;
if (!cfg80211_any_usable_channels(sdata->local->hw.wiphy,
BIT(NL80211_BAND_6GHZ),
IEEE80211_CHAN_NO_HE))
- return;
+ return 0;
sband = sdata->local->hw.wiphy->bands[NL80211_BAND_6GHZ];
iftd = ieee80211_get_sband_iftype_data(sband, iftype);
if (!iftd)
- return;
+ return 0;
/* Check for device HE 6 GHz capability before adding element */
if (!iftd->he_6ghz_capa.capa)
- return;
+ return 0;
- cap = le16_to_cpu(iftd->he_6ghz_capa.capa);
- cap &= ~IEEE80211_HE_6GHZ_CAP_SM_PS;
+ cap = iftd->he_6ghz_capa.capa;
+ cap &= cpu_to_le16(~IEEE80211_HE_6GHZ_CAP_SM_PS);
switch (smps_mode) {
case IEEE80211_SMPS_AUTOMATIC:
@@ -3318,22 +2562,27 @@ void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata,
WARN_ON(1);
fallthrough;
case IEEE80211_SMPS_OFF:
- cap |= u16_encode_bits(WLAN_HT_CAP_SM_PS_DISABLED,
- IEEE80211_HE_6GHZ_CAP_SM_PS);
+ cap |= le16_encode_bits(WLAN_HT_CAP_SM_PS_DISABLED,
+ IEEE80211_HE_6GHZ_CAP_SM_PS);
break;
case IEEE80211_SMPS_STATIC:
- cap |= u16_encode_bits(WLAN_HT_CAP_SM_PS_STATIC,
- IEEE80211_HE_6GHZ_CAP_SM_PS);
+ cap |= le16_encode_bits(WLAN_HT_CAP_SM_PS_STATIC,
+ IEEE80211_HE_6GHZ_CAP_SM_PS);
break;
case IEEE80211_SMPS_DYNAMIC:
- cap |= u16_encode_bits(WLAN_HT_CAP_SM_PS_DYNAMIC,
- IEEE80211_HE_6GHZ_CAP_SM_PS);
+ cap |= le16_encode_bits(WLAN_HT_CAP_SM_PS_DYNAMIC,
+ IEEE80211_HE_6GHZ_CAP_SM_PS);
break;
}
- pos = skb_put(skb, 2 + 1 + sizeof(cap));
- ieee80211_write_he_6ghz_cap(pos, cpu_to_le16(cap),
- pos + 2 + 1 + sizeof(cap));
+ if (skb_tailroom(skb) < 2 + 1 + sizeof(cap))
+ return -ENOBUFS;
+
+ skb_put_u8(skb, WLAN_EID_EXTENSION);
+ skb_put_u8(skb, 1 + sizeof(cap));
+ skb_put_u8(skb, WLAN_EID_EXT_HE_6GHZ_CAPA);
+ skb_put_data(skb, &cap, sizeof(cap));
+ return 0;
}
u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
@@ -3785,7 +3034,6 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, u32 vht_cap_info,
}
void ieee80211_chandef_eht_oper(const struct ieee80211_eht_operation_info *info,
- bool support_160, bool support_320,
struct cfg80211_chan_def *chandef)
{
chandef->center_freq1 =
@@ -3804,90 +3052,38 @@ void ieee80211_chandef_eht_oper(const struct ieee80211_eht_operation_info *info,
chandef->width = NL80211_CHAN_WIDTH_80;
break;
case IEEE80211_EHT_OPER_CHAN_WIDTH_160MHZ:
- if (support_160) {
- chandef->width = NL80211_CHAN_WIDTH_160;
- chandef->center_freq1 =
- ieee80211_channel_to_frequency(info->ccfs1,
- chandef->chan->band);
- } else {
- chandef->width = NL80211_CHAN_WIDTH_80;
- }
+ chandef->width = NL80211_CHAN_WIDTH_160;
+ chandef->center_freq1 =
+ ieee80211_channel_to_frequency(info->ccfs1,
+ chandef->chan->band);
break;
case IEEE80211_EHT_OPER_CHAN_WIDTH_320MHZ:
- if (support_320) {
- chandef->width = NL80211_CHAN_WIDTH_320;
- chandef->center_freq1 =
- ieee80211_channel_to_frequency(info->ccfs1,
- chandef->chan->band);
- } else if (support_160) {
- chandef->width = NL80211_CHAN_WIDTH_160;
- } else {
- chandef->width = NL80211_CHAN_WIDTH_80;
-
- if (chandef->center_freq1 > chandef->chan->center_freq)
- chandef->center_freq1 -= 40;
- else
- chandef->center_freq1 += 40;
- }
+ chandef->width = NL80211_CHAN_WIDTH_320;
+ chandef->center_freq1 =
+ ieee80211_channel_to_frequency(info->ccfs1,
+ chandef->chan->band);
break;
}
}
-bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
+bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_local *local,
const struct ieee80211_he_operation *he_oper,
const struct ieee80211_eht_operation *eht_oper,
struct cfg80211_chan_def *chandef)
{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_supported_band *sband;
- enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif);
- const struct ieee80211_sta_he_cap *he_cap;
- const struct ieee80211_sta_eht_cap *eht_cap;
struct cfg80211_chan_def he_chandef = *chandef;
const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
- struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
- bool support_80_80, support_160, support_320;
- u8 he_phy_cap, eht_phy_cap;
u32 freq;
if (chandef->chan->band != NL80211_BAND_6GHZ)
return true;
- sband = local->hw.wiphy->bands[NL80211_BAND_6GHZ];
-
- he_cap = ieee80211_get_he_iftype_cap(sband, iftype);
- if (!he_cap) {
- sdata_info(sdata, "Missing iftype sband data/HE cap");
- return false;
- }
-
- he_phy_cap = he_cap->he_cap_elem.phy_cap_info[0];
- support_160 =
- he_phy_cap &
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
- support_80_80 =
- he_phy_cap &
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
-
- if (!he_oper) {
- sdata_info(sdata,
- "HE is not advertised on (on %d MHz), expect issues\n",
- chandef->chan->center_freq);
+ if (!he_oper)
return false;
- }
-
- eht_cap = ieee80211_get_eht_iftype_cap(sband, iftype);
- if (!eht_cap)
- eht_oper = NULL;
he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper);
-
- if (!he_6ghz_oper) {
- sdata_info(sdata,
- "HE 6GHz operation missing (on %d MHz), expect issues\n",
- chandef->chan->center_freq);
+ if (!he_6ghz_oper)
return false;
- }
/*
* The EHT operation IE does not contain the primary channel so the
@@ -3896,20 +3092,10 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
*/
freq = ieee80211_channel_to_frequency(he_6ghz_oper->primary,
NL80211_BAND_6GHZ);
- he_chandef.chan = ieee80211_get_channel(sdata->local->hw.wiphy, freq);
+ he_chandef.chan = ieee80211_get_channel(local->hw.wiphy, freq);
- switch (u8_get_bits(he_6ghz_oper->control,
- IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) {
- case IEEE80211_6GHZ_CTRL_REG_LPI_AP:
- bss_conf->power_type = IEEE80211_REG_LPI_AP;
- break;
- case IEEE80211_6GHZ_CTRL_REG_SP_AP:
- bss_conf->power_type = IEEE80211_REG_SP_AP;
- break;
- default:
- bss_conf->power_type = IEEE80211_REG_UNSET_AP;
- break;
- }
+ if (!he_chandef.chan)
+ return false;
if (!eht_oper ||
!(eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT)) {
@@ -3928,13 +3114,10 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
he_chandef.width = NL80211_CHAN_WIDTH_80;
if (!he_6ghz_oper->ccfs1)
break;
- if (abs(he_6ghz_oper->ccfs1 - he_6ghz_oper->ccfs0) == 8) {
- if (support_160)
- he_chandef.width = NL80211_CHAN_WIDTH_160;
- } else {
- if (support_80_80)
- he_chandef.width = NL80211_CHAN_WIDTH_80P80;
- }
+ if (abs(he_6ghz_oper->ccfs1 - he_6ghz_oper->ccfs0) == 8)
+ he_chandef.width = NL80211_CHAN_WIDTH_160;
+ else
+ he_chandef.width = NL80211_CHAN_WIDTH_80P80;
break;
}
@@ -3946,30 +3129,17 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
he_chandef.center_freq1 =
ieee80211_channel_to_frequency(he_6ghz_oper->ccfs0,
NL80211_BAND_6GHZ);
- if (support_80_80 || support_160)
- he_chandef.center_freq2 =
- ieee80211_channel_to_frequency(he_6ghz_oper->ccfs1,
- NL80211_BAND_6GHZ);
+ he_chandef.center_freq2 =
+ ieee80211_channel_to_frequency(he_6ghz_oper->ccfs1,
+ NL80211_BAND_6GHZ);
}
} else {
- eht_phy_cap = eht_cap->eht_cap_elem.phy_cap_info[0];
- support_320 =
- eht_phy_cap & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
-
ieee80211_chandef_eht_oper((const void *)eht_oper->optional,
- support_160, support_320,
&he_chandef);
}
- if (!cfg80211_chandef_valid(&he_chandef)) {
- sdata_info(sdata,
- "HE 6GHz operation resulted in invalid chandef: %d MHz/%d/%d MHz/%d MHz\n",
- he_chandef.chan ? he_chandef.chan->center_freq : 0,
- he_chandef.width,
- he_chandef.center_freq1,
- he_chandef.center_freq2);
+ if (!cfg80211_chandef_valid(&he_chandef))
return false;
- }
*chandef = he_chandef;
@@ -4012,121 +3182,62 @@ bool ieee80211_chandef_s1g_oper(const struct ieee80211_s1g_oper_ie *oper,
return true;
}
-int ieee80211_parse_bitrates(enum nl80211_chan_width width,
- const struct ieee80211_supported_band *sband,
- const u8 *srates, int srates_len, u32 *rates)
-{
- u32 rate_flags = ieee80211_chanwidth_rate_flags(width);
- struct ieee80211_rate *br;
- int brate, rate, i, j, count = 0;
-
- *rates = 0;
-
- for (i = 0; i < srates_len; i++) {
- rate = srates[i] & 0x7f;
-
- for (j = 0; j < sband->n_bitrates; j++) {
- br = &sband->bitrates[j];
- if ((rate_flags & br->flags) != rate_flags)
- continue;
-
- brate = DIV_ROUND_UP(br->bitrate, 5);
- if (brate == rate) {
- *rates |= BIT(j);
- count++;
- break;
- }
- }
- }
- return count;
-}
-
-int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, bool need_basic,
- enum nl80211_band band)
+int ieee80211_put_srates_elem(struct sk_buff *skb,
+ const struct ieee80211_supported_band *sband,
+ u32 basic_rates, u32 rate_flags, u32 masked_rates,
+ u8 element_id)
{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_supported_band *sband;
- int rate;
- u8 i, rates, *pos;
- u32 basic_rates = sdata->vif.bss_conf.basic_rates;
- u32 rate_flags;
+ u8 i, rates, skip;
- rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
- sband = local->hw.wiphy->bands[band];
rates = 0;
for (i = 0; i < sband->n_bitrates; i++) {
if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
continue;
+ if (masked_rates & BIT(i))
+ continue;
rates++;
}
- if (rates > 8)
- rates = 8;
-
- if (skb_tailroom(skb) < rates + 2)
- return -ENOMEM;
-
- pos = skb_put(skb, rates + 2);
- *pos++ = WLAN_EID_SUPP_RATES;
- *pos++ = rates;
- for (i = 0; i < rates; i++) {
- u8 basic = 0;
- if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
- continue;
- if (need_basic && basic_rates & BIT(i))
- basic = 0x80;
- rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
- *pos++ = basic | (u8) rate;
+ if (element_id == WLAN_EID_SUPP_RATES) {
+ rates = min_t(u8, rates, 8);
+ skip = 0;
+ } else {
+ skip = 8;
+ if (rates <= skip)
+ return 0;
+ rates -= skip;
}
- return 0;
-}
+ if (skb_tailroom(skb) < rates + 2)
+ return -ENOBUFS;
-int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, bool need_basic,
- enum nl80211_band band)
-{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_supported_band *sband;
- int rate;
- u8 i, exrates, *pos;
- u32 basic_rates = sdata->vif.bss_conf.basic_rates;
- u32 rate_flags;
+ skb_put_u8(skb, element_id);
+ skb_put_u8(skb, rates);
- rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+ for (i = 0; i < sband->n_bitrates && rates; i++) {
+ int rate;
+ u8 basic;
- sband = local->hw.wiphy->bands[band];
- exrates = 0;
- for (i = 0; i < sband->n_bitrates; i++) {
if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
continue;
- exrates++;
- }
+ if (masked_rates & BIT(i))
+ continue;
- if (exrates > 8)
- exrates -= 8;
- else
- exrates = 0;
+ if (skip > 0) {
+ skip--;
+ continue;
+ }
- if (skb_tailroom(skb) < exrates + 2)
- return -ENOMEM;
+ basic = basic_rates & BIT(i) ? 0x80 : 0;
- if (exrates) {
- pos = skb_put(skb, exrates + 2);
- *pos++ = WLAN_EID_EXT_SUPP_RATES;
- *pos++ = exrates;
- for (i = 8; i < sband->n_bitrates; i++) {
- u8 basic = 0;
- if ((rate_flags & sband->bitrates[i].flags)
- != rate_flags)
- continue;
- if (need_basic && basic_rates & BIT(i))
- basic = 0x80;
- rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
- *pos++ = basic | (u8) rate;
- }
+ rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
+ skb_put_u8(skb, basic | (u8)rate);
+ rates--;
}
+
+ WARN(rates > 0, "rates confused: rates:%d, element:%d\n",
+ rates, element_id);
+
return 0;
}
@@ -4338,7 +3449,7 @@ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local)
&sdata->deflink.dfs_cac_timer_work);
if (sdata->wdev.cac_started) {
- chandef = sdata->vif.bss_conf.chandef;
+ chandef = sdata->vif.bss_conf.chanreq.oper;
ieee80211_link_release_channel(&sdata->deflink);
cfg80211_cac_event(sdata->dev,
&chandef,
@@ -4386,78 +3497,92 @@ void ieee80211_radar_detected(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL(ieee80211_radar_detected);
-ieee80211_conn_flags_t ieee80211_chandef_downgrade(struct cfg80211_chan_def *c)
+void ieee80211_chandef_downgrade(struct cfg80211_chan_def *c,
+ struct ieee80211_conn_settings *conn)
{
- ieee80211_conn_flags_t ret;
- int tmp;
+ enum nl80211_chan_width new_primary_width;
+ struct ieee80211_conn_settings _ignored = {};
+
+ /* allow passing NULL if caller doesn't care */
+ if (!conn)
+ conn = &_ignored;
+
+again:
+ /* no-HT indicates nothing to do */
+ new_primary_width = NL80211_CHAN_WIDTH_20_NOHT;
switch (c->width) {
+ default:
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ WARN_ON_ONCE(1);
+ fallthrough;
case NL80211_CHAN_WIDTH_20:
c->width = NL80211_CHAN_WIDTH_20_NOHT;
- ret = IEEE80211_CONN_DISABLE_HT | IEEE80211_CONN_DISABLE_VHT;
+ conn->mode = IEEE80211_CONN_MODE_LEGACY;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ c->punctured = 0;
break;
case NL80211_CHAN_WIDTH_40:
c->width = NL80211_CHAN_WIDTH_20;
c->center_freq1 = c->chan->center_freq;
- ret = IEEE80211_CONN_DISABLE_40MHZ |
- IEEE80211_CONN_DISABLE_VHT;
+ if (conn->mode == IEEE80211_CONN_MODE_VHT)
+ conn->mode = IEEE80211_CONN_MODE_HT;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ c->punctured = 0;
break;
case NL80211_CHAN_WIDTH_80:
- tmp = (30 + c->chan->center_freq - c->center_freq1)/20;
- /* n_P40 */
- tmp /= 2;
- /* freq_P40 */
- c->center_freq1 = c->center_freq1 - 20 + 40 * tmp;
- c->width = NL80211_CHAN_WIDTH_40;
- ret = IEEE80211_CONN_DISABLE_VHT;
+ new_primary_width = NL80211_CHAN_WIDTH_40;
+ if (conn->mode == IEEE80211_CONN_MODE_VHT)
+ conn->mode = IEEE80211_CONN_MODE_HT;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_40;
break;
case NL80211_CHAN_WIDTH_80P80:
c->center_freq2 = 0;
c->width = NL80211_CHAN_WIDTH_80;
- ret = IEEE80211_CONN_DISABLE_80P80MHZ |
- IEEE80211_CONN_DISABLE_160MHZ;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_80;
break;
case NL80211_CHAN_WIDTH_160:
- /* n_P20 */
- tmp = (70 + c->chan->center_freq - c->center_freq1)/20;
- /* n_P80 */
- tmp /= 4;
- c->center_freq1 = c->center_freq1 - 40 + 80 * tmp;
- c->width = NL80211_CHAN_WIDTH_80;
- ret = IEEE80211_CONN_DISABLE_80P80MHZ |
- IEEE80211_CONN_DISABLE_160MHZ;
+ new_primary_width = NL80211_CHAN_WIDTH_80;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_80;
break;
case NL80211_CHAN_WIDTH_320:
- /* n_P20 */
- tmp = (150 + c->chan->center_freq - c->center_freq1) / 20;
- /* n_P160 */
- tmp /= 8;
- c->center_freq1 = c->center_freq1 - 80 + 160 * tmp;
- c->width = NL80211_CHAN_WIDTH_160;
- ret = IEEE80211_CONN_DISABLE_320MHZ;
- break;
- default:
- case NL80211_CHAN_WIDTH_20_NOHT:
- WARN_ON_ONCE(1);
- c->width = NL80211_CHAN_WIDTH_20_NOHT;
- ret = IEEE80211_CONN_DISABLE_HT | IEEE80211_CONN_DISABLE_VHT;
+ new_primary_width = NL80211_CHAN_WIDTH_160;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_160;
break;
case NL80211_CHAN_WIDTH_1:
case NL80211_CHAN_WIDTH_2:
case NL80211_CHAN_WIDTH_4:
case NL80211_CHAN_WIDTH_8:
case NL80211_CHAN_WIDTH_16:
+ WARN_ON_ONCE(1);
+ /* keep c->width */
+ conn->mode = IEEE80211_CONN_MODE_S1G;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ break;
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
WARN_ON_ONCE(1);
/* keep c->width */
- ret = IEEE80211_CONN_DISABLE_HT | IEEE80211_CONN_DISABLE_VHT;
+ conn->mode = IEEE80211_CONN_MODE_LEGACY;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
break;
}
- WARN_ON_ONCE(!cfg80211_chandef_valid(c));
+ if (new_primary_width != NL80211_CHAN_WIDTH_20_NOHT) {
+ c->center_freq1 = cfg80211_chandef_primary(c, new_primary_width,
+ &c->punctured);
+ c->width = new_primary_width;
+ }
- return ret;
+ /*
+ * With an 80 MHz channel, we might have the puncturing in the primary
+ * 40 Mhz channel, but that's not valid when downgraded to 40 MHz width.
+ * In that case, downgrade again.
+ */
+ if (!cfg80211_chandef_valid(c) && c->punctured)
+ goto again;
+
+ WARN_ON_ONCE(!cfg80211_chandef_valid(c));
}
/*
@@ -4773,7 +3898,7 @@ static u8 ieee80211_chanctx_radar_detect(struct ieee80211_local *local,
list_for_each_entry(link, &ctx->reserved_links, reserved_chanctx_list)
if (link->reserved_radar_required)
- radar_detect |= BIT(link->reserved_chandef.width);
+ radar_detect |= BIT(link->reserved.oper.width);
/*
* An in-place reservation context should not have any assigned vifs
@@ -4787,7 +3912,7 @@ static u8 ieee80211_chanctx_radar_detect(struct ieee80211_local *local,
continue;
radar_detect |=
- BIT(link->conf->chandef.width);
+ BIT(link->conf->chanreq.oper.width);
}
return radar_detect;
@@ -5037,7 +4162,8 @@ u16 ieee80211_encode_usf(int listen_interval)
return (u16) listen_interval;
}
-u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
+/* this may return more than ieee80211_put_eht_cap() will need */
+u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata)
{
const struct ieee80211_sta_he_cap *he_cap;
const struct ieee80211_sta_eht_cap *eht_cap;
@@ -5049,13 +4175,12 @@ u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
if (!sband)
return 0;
- he_cap = ieee80211_get_he_iftype_cap(sband, iftype);
- eht_cap = ieee80211_get_eht_iftype_cap(sband, iftype);
+ he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+ eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
if (!he_cap || !eht_cap)
return 0;
- is_ap = iftype == NL80211_IFTYPE_AP ||
- iftype == NL80211_IFTYPE_P2P_GO;
+ is_ap = sdata->vif.type == NL80211_IFTYPE_AP;
n = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
&eht_cap->eht_cap_elem,
@@ -5067,45 +4192,134 @@ u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
return 0;
}
-u8 *ieee80211_ie_build_eht_cap(u8 *pos,
- const struct ieee80211_sta_he_cap *he_cap,
- const struct ieee80211_sta_eht_cap *eht_cap,
- u8 *end,
- bool for_ap)
+int ieee80211_put_eht_cap(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_supported_band *sband,
+ const struct ieee80211_conn_settings *conn)
{
+ const struct ieee80211_sta_he_cap *he_cap =
+ ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+ const struct ieee80211_sta_eht_cap *eht_cap =
+ ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
+ bool for_ap = sdata->vif.type == NL80211_IFTYPE_AP;
+ struct ieee80211_eht_cap_elem_fixed fixed;
+ struct ieee80211_he_cap_elem he;
u8 mcs_nss_len, ppet_len;
+ u8 orig_mcs_nss_len;
u8 ie_len;
- u8 *orig_pos = pos;
+
+ if (!conn)
+ conn = &ieee80211_conn_settings_unlimited;
/* Make sure we have place for the IE */
if (!he_cap || !eht_cap)
- return orig_pos;
+ return 0;
+
+ orig_mcs_nss_len = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
+ &eht_cap->eht_cap_elem,
+ for_ap);
- mcs_nss_len = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
- &eht_cap->eht_cap_elem,
- for_ap);
+ ieee80211_get_adjusted_he_cap(conn, he_cap, &he);
+
+ fixed = eht_cap->eht_cap_elem;
+
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_80)
+ fixed.phy_cap_info[6] &=
+ ~IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_80MHZ;
+
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_160) {
+ fixed.phy_cap_info[1] &=
+ ~IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK;
+ fixed.phy_cap_info[2] &=
+ ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK;
+ fixed.phy_cap_info[6] &=
+ ~IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_160MHZ;
+ }
+
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_320) {
+ fixed.phy_cap_info[0] &=
+ ~IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
+ fixed.phy_cap_info[1] &=
+ ~IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK;
+ fixed.phy_cap_info[2] &=
+ ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK;
+ fixed.phy_cap_info[6] &=
+ ~IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_320MHZ;
+ }
+
+ if (conn->bw_limit == IEEE80211_CONN_BW_LIMIT_20)
+ fixed.phy_cap_info[0] &=
+ ~IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ;
+
+ mcs_nss_len = ieee80211_eht_mcs_nss_size(&he, &fixed, for_ap);
ppet_len = ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0],
- eht_cap->eht_cap_elem.phy_cap_info);
+ fixed.phy_cap_info);
ie_len = 2 + 1 + sizeof(eht_cap->eht_cap_elem) + mcs_nss_len + ppet_len;
- if ((end - pos) < ie_len)
- return orig_pos;
+ if (skb_tailroom(skb) < ie_len)
+ return -ENOBUFS;
- *pos++ = WLAN_EID_EXTENSION;
- *pos++ = ie_len - 2;
- *pos++ = WLAN_EID_EXT_EHT_CAPABILITY;
+ skb_put_u8(skb, WLAN_EID_EXTENSION);
+ skb_put_u8(skb, ie_len - 2);
+ skb_put_u8(skb, WLAN_EID_EXT_EHT_CAPABILITY);
+ skb_put_data(skb, &fixed, sizeof(fixed));
- /* Fixed data */
- memcpy(pos, &eht_cap->eht_cap_elem, sizeof(eht_cap->eht_cap_elem));
- pos += sizeof(eht_cap->eht_cap_elem);
+ if (mcs_nss_len == 4 && orig_mcs_nss_len != 4) {
+ /*
+ * If the (non-AP) STA became 20 MHz only, then convert from
+ * <=80 to 20-MHz-only format, where MCSes are indicated in
+ * the groups 0-7, 8-9, 10-11, 12-13 rather than just 0-9,
+ * 10-11, 12-13. Thus, use 0-9 for 0-7 and 8-9.
+ */
+ skb_put_u8(skb, eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_mcs9_max_nss);
+ skb_put_u8(skb, eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_mcs9_max_nss);
+ skb_put_u8(skb, eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_mcs11_max_nss);
+ skb_put_u8(skb, eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_mcs13_max_nss);
+ } else {
+ skb_put_data(skb, &eht_cap->eht_mcs_nss_supp, mcs_nss_len);
+ }
- memcpy(pos, &eht_cap->eht_mcs_nss_supp, mcs_nss_len);
- pos += mcs_nss_len;
+ if (ppet_len)
+ skb_put_data(skb, &eht_cap->eht_ppe_thres, ppet_len);
- if (ppet_len) {
- memcpy(pos, &eht_cap->eht_ppe_thres, ppet_len);
- pos += ppet_len;
- }
+ return 0;
+}
- return pos;
+const char *ieee80211_conn_mode_str(enum ieee80211_conn_mode mode)
+{
+ static const char * const modes[] = {
+ [IEEE80211_CONN_MODE_S1G] = "S1G",
+ [IEEE80211_CONN_MODE_LEGACY] = "legacy",
+ [IEEE80211_CONN_MODE_HT] = "HT",
+ [IEEE80211_CONN_MODE_VHT] = "VHT",
+ [IEEE80211_CONN_MODE_HE] = "HE",
+ [IEEE80211_CONN_MODE_EHT] = "EHT",
+ };
+
+ if (WARN_ON(mode >= ARRAY_SIZE(modes)))
+ return "<out of range>";
+
+ return modes[mode] ?: "<missing string>";
+}
+
+enum ieee80211_conn_bw_limit
+ieee80211_min_bw_limit_from_chandef(struct cfg80211_chan_def *chandef)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ return IEEE80211_CONN_BW_LIMIT_20;
+ case NL80211_CHAN_WIDTH_40:
+ return IEEE80211_CONN_BW_LIMIT_40;
+ case NL80211_CHAN_WIDTH_80:
+ return IEEE80211_CONN_BW_LIMIT_80;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ return IEEE80211_CONN_BW_LIMIT_160;
+ case NL80211_CHAN_WIDTH_320:
+ return IEEE80211_CONN_BW_LIMIT_320;
+ default:
+ WARN(1, "unhandled chandef width %d\n", chandef->width);
+ return IEEE80211_CONN_BW_LIMIT_20;
+ }
}
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index b3a5c3e96a72..642891cafbaf 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -4,7 +4,7 @@
*
* Portions of this file
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2023 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -369,7 +369,7 @@ ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta)
link_conf = rcu_dereference(sdata->vif.link_conf[link_id]);
if (eht_cap->has_eht &&
- link_conf->chandef.chan->band == NL80211_BAND_6GHZ) {
+ link_conf->chanreq.oper.chan->band == NL80211_BAND_6GHZ) {
info = eht_cap->eht_cap_elem.phy_cap_info[0];
if (info & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ) {
@@ -380,7 +380,7 @@ ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta)
info = he_cap->he_cap_elem.phy_cap_info[0];
- if (link_conf->chandef.chan->band == NL80211_BAND_2GHZ) {
+ if (link_conf->chanreq.oper.chan->band == NL80211_BAND_2GHZ) {
if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G)
ret = IEEE80211_STA_RX_BW_40;
else
@@ -515,7 +515,7 @@ ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta)
if (WARN_ON(!link_conf))
bss_width = NL80211_CHAN_WIDTH_20_NOHT;
else
- bss_width = link_conf->chandef.width;
+ bss_width = link_conf->chanreq.oper.width;
rcu_read_unlock();
bw = ieee80211_sta_cap_rx_bw(link_sta);
@@ -541,15 +541,11 @@ ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta)
return bw;
}
-void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta)
+void ieee80211_sta_init_nss(struct link_sta_info *link_sta)
{
u8 ht_rx_nss = 0, vht_rx_nss = 0, he_rx_nss = 0, eht_rx_nss = 0, rx_nss;
bool support_160;
- /* if we received a notification already don't overwrite it */
- if (link_sta->pub->rx_nss)
- return;
-
if (link_sta->pub->eht_cap.has_eht) {
int i;
const u8 *rx_nss_mcs = (void *)&link_sta->pub->eht_cap.eht_mcs_nss_supp;
@@ -627,7 +623,15 @@ void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta)
rx_nss = max(vht_rx_nss, ht_rx_nss);
rx_nss = max(he_rx_nss, rx_nss);
rx_nss = max(eht_rx_nss, rx_nss);
- link_sta->pub->rx_nss = max_t(u8, 1, rx_nss);
+ rx_nss = max_t(u8, 1, rx_nss);
+ link_sta->capa_nss = rx_nss;
+
+ /* that shouldn't be set yet, but we can handle it anyway */
+ if (link_sta->op_mode_nss)
+ link_sta->pub->rx_nss =
+ min_t(u8, rx_nss, link_sta->op_mode_nss);
+ else
+ link_sta->pub->rx_nss = rx_nss;
}
u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
@@ -637,7 +641,7 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
enum ieee80211_sta_rx_bandwidth new_bw;
struct sta_opmode_info sta_opmode = {};
u32 changed = 0;
- u8 nss, cur_nss;
+ u8 nss;
/* ignore - no support for BF yet */
if (opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)
@@ -647,23 +651,17 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
nss += 1;
- if (link_sta->pub->rx_nss != nss) {
- cur_nss = link_sta->pub->rx_nss;
- /* Reset rx_nss and call ieee80211_sta_set_rx_nss() which
- * will set the same to max nss value calculated based on capability.
- */
- link_sta->pub->rx_nss = 0;
- ieee80211_sta_set_rx_nss(link_sta);
- /* Do not allow an nss change to rx_nss greater than max_nss
- * negotiated and capped to APs capability during association.
- */
- if (nss <= link_sta->pub->rx_nss) {
- link_sta->pub->rx_nss = nss;
- sta_opmode.rx_nss = nss;
- changed |= IEEE80211_RC_NSS_CHANGED;
- sta_opmode.changed |= STA_OPMODE_N_SS_CHANGED;
+ if (link_sta->op_mode_nss != nss) {
+ if (nss <= link_sta->capa_nss) {
+ link_sta->op_mode_nss = nss;
+
+ if (nss != link_sta->pub->rx_nss) {
+ link_sta->pub->rx_nss = nss;
+ changed |= IEEE80211_RC_NSS_CHANGED;
+ sta_opmode.rx_nss = link_sta->pub->rx_nss;
+ sta_opmode.changed |= STA_OPMODE_N_SS_CHANGED;
+ }
} else {
- link_sta->pub->rx_nss = cur_nss;
pr_warn_ratelimited("Ignoring NSS change in VHT Operating Mode Notification from %pM with invalid nss %d",
link_sta->pub->addr, nss);
}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 94dae7cb6dbd..e40529b8c5c9 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -315,7 +315,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
* Calculate AAD for CCMP/GCMP, returning qos_tid since we
* need that in CCMP also for b_0.
*/
-static u8 ccmp_gcmp_aad(struct sk_buff *skb, u8 *aad)
+static u8 ccmp_gcmp_aad(struct sk_buff *skb, u8 *aad, bool spp_amsdu)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
__le16 mask_fc;
@@ -340,7 +340,14 @@ static u8 ccmp_gcmp_aad(struct sk_buff *skb, u8 *aad)
len_a += 6;
if (ieee80211_is_data_qos(hdr->frame_control)) {
- qos_tid = ieee80211_get_tid(hdr);
+ qos_tid = *ieee80211_get_qos_ctl(hdr);
+
+ if (spp_amsdu)
+ qos_tid &= IEEE80211_QOS_CTL_TID_MASK |
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ else
+ qos_tid &= IEEE80211_QOS_CTL_TID_MASK;
+
mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_ORDER);
len_a += 2;
} else {
@@ -369,10 +376,11 @@ static u8 ccmp_gcmp_aad(struct sk_buff *skb, u8 *aad)
return qos_tid;
}
-static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad)
+static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad,
+ bool spp_amsdu)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- u8 qos_tid = ccmp_gcmp_aad(skb, aad);
+ u8 qos_tid = ccmp_gcmp_aad(skb, aad, spp_amsdu);
/* In CCM, the initial vectors (IV) used for CTR mode encryption and CBC
* mode authentication are not allowed to collide, yet both are derived
@@ -479,7 +487,8 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
return 0;
pos += IEEE80211_CCMP_HDR_LEN;
- ccmp_special_blocks(skb, pn, b_0, aad);
+ ccmp_special_blocks(skb, pn, b_0, aad,
+ key->conf.flags & IEEE80211_KEY_FLAG_SPP_AMSDU);
return ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len,
skb_put(skb, mic_len));
}
@@ -557,7 +566,8 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
u8 aad[2 * AES_BLOCK_SIZE];
u8 b_0[AES_BLOCK_SIZE];
/* hardware didn't decrypt/verify MIC */
- ccmp_special_blocks(skb, pn, b_0, aad);
+ ccmp_special_blocks(skb, pn, b_0, aad,
+ key->conf.flags & IEEE80211_KEY_FLAG_SPP_AMSDU);
if (ieee80211_aes_ccm_decrypt(
key->u.ccmp.tfm, b_0, aad,
@@ -581,7 +591,8 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
return RX_CONTINUE;
}
-static void gcmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *j_0, u8 *aad)
+static void gcmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *j_0, u8 *aad,
+ bool spp_amsdu)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
@@ -591,7 +602,7 @@ static void gcmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *j_0, u8 *aad)
j_0[14] = 0;
j_0[AES_BLOCK_SIZE - 1] = 0x01;
- ccmp_gcmp_aad(skb, aad);
+ ccmp_gcmp_aad(skb, aad, spp_amsdu);
}
static inline void gcmp_pn2hdr(u8 *hdr, const u8 *pn, int key_id)
@@ -680,7 +691,8 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
return 0;
pos += IEEE80211_GCMP_HDR_LEN;
- gcmp_special_blocks(skb, pn, j_0, aad);
+ gcmp_special_blocks(skb, pn, j_0, aad,
+ key->conf.flags & IEEE80211_KEY_FLAG_SPP_AMSDU);
return ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len,
skb_put(skb, IEEE80211_GCMP_MIC_LEN));
}
@@ -753,7 +765,8 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
u8 aad[2 * AES_BLOCK_SIZE];
u8 j_0[AES_BLOCK_SIZE];
/* hardware didn't decrypt/verify MIC */
- gcmp_special_blocks(skb, pn, j_0, aad);
+ gcmp_special_blocks(skb, pn, j_0, aad,
+ key->conf.flags & IEEE80211_KEY_FLAG_SPP_AMSDU);
if (ieee80211_aes_gcm_decrypt(
key->u.gcmp.tfm, j_0, aad,
diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c
index 8d2eabc71bbe..f13b07ebfb98 100644
--- a/net/mac802154/llsec.c
+++ b/net/mac802154/llsec.c
@@ -265,19 +265,27 @@ fail:
return -ENOMEM;
}
+static void mac802154_llsec_key_del_rcu(struct rcu_head *rcu)
+{
+ struct ieee802154_llsec_key_entry *pos;
+ struct mac802154_llsec_key *mkey;
+
+ pos = container_of(rcu, struct ieee802154_llsec_key_entry, rcu);
+ mkey = container_of(pos->key, struct mac802154_llsec_key, key);
+
+ llsec_key_put(mkey);
+ kfree_sensitive(pos);
+}
+
int mac802154_llsec_key_del(struct mac802154_llsec *sec,
const struct ieee802154_llsec_key_id *key)
{
struct ieee802154_llsec_key_entry *pos;
list_for_each_entry(pos, &sec->table.keys, list) {
- struct mac802154_llsec_key *mkey;
-
- mkey = container_of(pos->key, struct mac802154_llsec_key, key);
-
if (llsec_key_id_equal(&pos->id, key)) {
list_del_rcu(&pos->list);
- llsec_key_put(mkey);
+ call_rcu(&pos->rcu, mac802154_llsec_key_del_rcu);
return 0;
}
}
diff --git a/net/mctp/Kconfig b/net/mctp/Kconfig
index 3a5c0e70da77..d8d3413a37f7 100644
--- a/net/mctp/Kconfig
+++ b/net/mctp/Kconfig
@@ -14,6 +14,7 @@ menuconfig MCTP
config MCTP_TEST
bool "MCTP core tests" if !KUNIT_ALL_TESTS
+ select MCTP_FLOWS
depends on MCTP=y && KUNIT=y
default KUNIT_ALL_TESTS
diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c
index f6be58b68c6f..de52a9191da0 100644
--- a/net/mctp/af_mctp.c
+++ b/net/mctp/af_mctp.c
@@ -350,30 +350,102 @@ static int mctp_getsockopt(struct socket *sock, int level, int optname,
return -EINVAL;
}
-static int mctp_ioctl_alloctag(struct mctp_sock *msk, unsigned long arg)
+/* helpers for reading/writing the tag ioc, handling compatibility across the
+ * two versions, and some basic API error checking
+ */
+static int mctp_ioctl_tag_copy_from_user(unsigned long arg,
+ struct mctp_ioc_tag_ctl2 *ctl,
+ bool tagv2)
+{
+ struct mctp_ioc_tag_ctl ctl_compat;
+ unsigned long size;
+ void *ptr;
+ int rc;
+
+ if (tagv2) {
+ size = sizeof(*ctl);
+ ptr = ctl;
+ } else {
+ size = sizeof(ctl_compat);
+ ptr = &ctl_compat;
+ }
+
+ rc = copy_from_user(ptr, (void __user *)arg, size);
+ if (rc)
+ return -EFAULT;
+
+ if (!tagv2) {
+ /* compat, using defaults for new fields */
+ ctl->net = MCTP_INITIAL_DEFAULT_NET;
+ ctl->peer_addr = ctl_compat.peer_addr;
+ ctl->local_addr = MCTP_ADDR_ANY;
+ ctl->flags = ctl_compat.flags;
+ ctl->tag = ctl_compat.tag;
+ }
+
+ if (ctl->flags)
+ return -EINVAL;
+
+ if (ctl->local_addr != MCTP_ADDR_ANY &&
+ ctl->local_addr != MCTP_ADDR_NULL)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mctp_ioctl_tag_copy_to_user(unsigned long arg,
+ struct mctp_ioc_tag_ctl2 *ctl,
+ bool tagv2)
+{
+ struct mctp_ioc_tag_ctl ctl_compat;
+ unsigned long size;
+ void *ptr;
+ int rc;
+
+ if (tagv2) {
+ ptr = ctl;
+ size = sizeof(*ctl);
+ } else {
+ ctl_compat.peer_addr = ctl->peer_addr;
+ ctl_compat.tag = ctl->tag;
+ ctl_compat.flags = ctl->flags;
+
+ ptr = &ctl_compat;
+ size = sizeof(ctl_compat);
+ }
+
+ rc = copy_to_user((void __user *)arg, ptr, size);
+ if (rc)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int mctp_ioctl_alloctag(struct mctp_sock *msk, bool tagv2,
+ unsigned long arg)
{
struct net *net = sock_net(&msk->sk);
struct mctp_sk_key *key = NULL;
- struct mctp_ioc_tag_ctl ctl;
+ struct mctp_ioc_tag_ctl2 ctl;
unsigned long flags;
u8 tag;
+ int rc;
- if (copy_from_user(&ctl, (void __user *)arg, sizeof(ctl)))
- return -EFAULT;
+ rc = mctp_ioctl_tag_copy_from_user(arg, &ctl, tagv2);
+ if (rc)
+ return rc;
if (ctl.tag)
return -EINVAL;
- if (ctl.flags)
- return -EINVAL;
-
- key = mctp_alloc_local_tag(msk, ctl.peer_addr, MCTP_ADDR_ANY,
- true, &tag);
+ key = mctp_alloc_local_tag(msk, ctl.net, MCTP_ADDR_ANY,
+ ctl.peer_addr, true, &tag);
if (IS_ERR(key))
return PTR_ERR(key);
ctl.tag = tag | MCTP_TAG_OWNER | MCTP_TAG_PREALLOC;
- if (copy_to_user((void __user *)arg, &ctl, sizeof(ctl))) {
+ rc = mctp_ioctl_tag_copy_to_user(arg, &ctl, tagv2);
+ if (rc) {
unsigned long fl2;
/* Unwind our key allocation: the keys list lock needs to be
* taken before the individual key locks, and we need a valid
@@ -385,28 +457,27 @@ static int mctp_ioctl_alloctag(struct mctp_sock *msk, unsigned long arg)
__mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_DROPPED);
mctp_key_unref(key);
spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
- return -EFAULT;
+ return rc;
}
mctp_key_unref(key);
return 0;
}
-static int mctp_ioctl_droptag(struct mctp_sock *msk, unsigned long arg)
+static int mctp_ioctl_droptag(struct mctp_sock *msk, bool tagv2,
+ unsigned long arg)
{
struct net *net = sock_net(&msk->sk);
- struct mctp_ioc_tag_ctl ctl;
+ struct mctp_ioc_tag_ctl2 ctl;
unsigned long flags, fl2;
struct mctp_sk_key *key;
struct hlist_node *tmp;
int rc;
u8 tag;
- if (copy_from_user(&ctl, (void __user *)arg, sizeof(ctl)))
- return -EFAULT;
-
- if (ctl.flags)
- return -EINVAL;
+ rc = mctp_ioctl_tag_copy_from_user(arg, &ctl, tagv2);
+ if (rc)
+ return rc;
/* Must be a local tag, TO set, preallocated */
if ((ctl.tag & ~MCTP_TAG_MASK) != (MCTP_TAG_OWNER | MCTP_TAG_PREALLOC))
@@ -422,6 +493,7 @@ static int mctp_ioctl_droptag(struct mctp_sock *msk, unsigned long arg)
*/
spin_lock_irqsave(&key->lock, fl2);
if (key->manual_alloc &&
+ ctl.net == key->net &&
ctl.peer_addr == key->peer_addr &&
tag == key->tag) {
__mctp_key_remove(key, net, fl2,
@@ -439,12 +511,17 @@ static int mctp_ioctl_droptag(struct mctp_sock *msk, unsigned long arg)
static int mctp_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk);
+ bool tagv2 = false;
switch (cmd) {
+ case SIOCMCTPALLOCTAG2:
case SIOCMCTPALLOCTAG:
- return mctp_ioctl_alloctag(msk, arg);
+ tagv2 = cmd == SIOCMCTPALLOCTAG2;
+ return mctp_ioctl_alloctag(msk, tagv2, arg);
case SIOCMCTPDROPTAG:
- return mctp_ioctl_droptag(msk, arg);
+ case SIOCMCTPDROPTAG2:
+ tagv2 = cmd == SIOCMCTPDROPTAG2;
+ return mctp_ioctl_droptag(msk, tagv2, arg);
}
return -EINVAL;
diff --git a/net/mctp/route.c b/net/mctp/route.c
index ceee44ea09d9..eefd7834d9a0 100644
--- a/net/mctp/route.c
+++ b/net/mctp/route.c
@@ -73,13 +73,50 @@ static struct mctp_sock *mctp_lookup_bind(struct net *net, struct sk_buff *skb)
return NULL;
}
-static bool mctp_key_match(struct mctp_sk_key *key, mctp_eid_t local,
- mctp_eid_t peer, u8 tag)
+/* A note on the key allocations.
+ *
+ * struct net->mctp.keys contains our set of currently-allocated keys for
+ * MCTP tag management. The lookup tuple for these is the peer EID,
+ * local EID and MCTP tag.
+ *
+ * In some cases, the peer EID may be MCTP_EID_ANY: for example, when a
+ * broadcast message is sent, we may receive responses from any peer EID.
+ * Because the broadcast dest address is equivalent to ANY, we create
+ * a key with (local = local-eid, peer = ANY). This allows a match on the
+ * incoming broadcast responses from any peer.
+ *
+ * We perform lookups when packets are received, and when tags are allocated
+ * in two scenarios:
+ *
+ * - when a packet is sent, with a locally-owned tag: we need to find an
+ * unused tag value for the (local, peer) EID pair.
+ *
+ * - when a tag is manually allocated: we need to find an unused tag value
+ * for the peer EID, but don't have a specific local EID at that stage.
+ *
+ * in the latter case, on successful allocation, we end up with a tag with
+ * (local = ANY, peer = peer-eid).
+ *
+ * So, the key set allows both a local EID of ANY, as well as a peer EID of
+ * ANY in the lookup tuple. Both may be ANY if we prealloc for a broadcast.
+ * The matching (in mctp_key_match()) during lookup allows the match value to
+ * be ANY in either the dest or source addresses.
+ *
+ * When allocating (+ inserting) a tag, we need to check for conflicts amongst
+ * the existing tag set. This requires macthing either exactly on the local
+ * and peer addresses, or either being ANY.
+ */
+
+static bool mctp_key_match(struct mctp_sk_key *key, unsigned int net,
+ mctp_eid_t local, mctp_eid_t peer, u8 tag)
{
+ if (key->net != net)
+ return false;
+
if (!mctp_address_matches(key->local_addr, local))
return false;
- if (key->peer_addr != peer)
+ if (!mctp_address_matches(key->peer_addr, peer))
return false;
if (key->tag != tag)
@@ -92,7 +129,7 @@ static bool mctp_key_match(struct mctp_sk_key *key, mctp_eid_t local,
* key exists.
*/
static struct mctp_sk_key *mctp_lookup_key(struct net *net, struct sk_buff *skb,
- mctp_eid_t peer,
+ unsigned int netid, mctp_eid_t peer,
unsigned long *irqflags)
__acquires(&key->lock)
{
@@ -108,7 +145,7 @@ static struct mctp_sk_key *mctp_lookup_key(struct net *net, struct sk_buff *skb,
spin_lock_irqsave(&net->mctp.keys_lock, flags);
hlist_for_each_entry(key, &net->mctp.keys, hlist) {
- if (!mctp_key_match(key, mh->dest, peer, tag))
+ if (!mctp_key_match(key, netid, mh->dest, peer, tag))
continue;
spin_lock(&key->lock);
@@ -131,6 +168,7 @@ static struct mctp_sk_key *mctp_lookup_key(struct net *net, struct sk_buff *skb,
}
static struct mctp_sk_key *mctp_key_alloc(struct mctp_sock *msk,
+ unsigned int net,
mctp_eid_t local, mctp_eid_t peer,
u8 tag, gfp_t gfp)
{
@@ -140,6 +178,7 @@ static struct mctp_sk_key *mctp_key_alloc(struct mctp_sock *msk,
if (!key)
return NULL;
+ key->net = net;
key->peer_addr = peer;
key->local_addr = local;
key->tag = tag;
@@ -185,8 +224,8 @@ static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk)
}
hlist_for_each_entry(tmp, &net->mctp.keys, hlist) {
- if (mctp_key_match(tmp, key->local_addr, key->peer_addr,
- key->tag)) {
+ if (mctp_key_match(tmp, key->net, key->local_addr,
+ key->peer_addr, key->tag)) {
spin_lock(&tmp->lock);
if (tmp->valid)
rc = -EEXIST;
@@ -327,6 +366,7 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
struct net *net = dev_net(skb->dev);
struct mctp_sock *msk;
struct mctp_hdr *mh;
+ unsigned int netid;
unsigned long f;
u8 tag, flags;
int rc;
@@ -345,6 +385,7 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
/* grab header, advance data ptr */
mh = mctp_hdr(skb);
+ netid = mctp_cb(skb)->net;
skb_pull(skb, sizeof(struct mctp_hdr));
if (mh->ver != 1)
@@ -358,7 +399,7 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
/* lookup socket / reasm context, exactly matching (src,dest,tag).
* we hold a ref on the key, and key->lock held.
*/
- key = mctp_lookup_key(net, skb, mh->src, &f);
+ key = mctp_lookup_key(net, skb, netid, mh->src, &f);
if (flags & MCTP_HDR_FLAG_SOM) {
if (key) {
@@ -368,8 +409,12 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
* key lookup to find the socket, but don't use this
* key for reassembly - we'll create a more specific
* one for future packets if required (ie, !EOM).
+ *
+ * this lookup requires key->peer to be MCTP_ADDR_ANY,
+ * it doesn't match just any key->peer.
*/
- any_key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f);
+ any_key = mctp_lookup_key(net, skb, netid,
+ MCTP_ADDR_ANY, &f);
if (any_key) {
msk = container_of(any_key->sk,
struct mctp_sock, sk);
@@ -406,7 +451,7 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
* packets for this message
*/
if (!key) {
- key = mctp_key_alloc(msk, mh->dest, mh->src,
+ key = mctp_key_alloc(msk, netid, mh->dest, mh->src,
tag, GFP_ATOMIC);
if (!key) {
rc = -ENOMEM;
@@ -596,11 +641,12 @@ static void mctp_reserve_tag(struct net *net, struct mctp_sk_key *key,
refcount_inc(&key->refs);
}
-/* Allocate a locally-owned tag value for (saddr, daddr), and reserve
+/* Allocate a locally-owned tag value for (local, peer), and reserve
* it for the socket msk
*/
struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
- mctp_eid_t daddr, mctp_eid_t saddr,
+ unsigned int netid,
+ mctp_eid_t local, mctp_eid_t peer,
bool manual, u8 *tagp)
{
struct net *net = sock_net(&msk->sk);
@@ -610,11 +656,11 @@ struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
u8 tagbits;
/* for NULL destination EIDs, we may get a response from any peer */
- if (daddr == MCTP_ADDR_NULL)
- daddr = MCTP_ADDR_ANY;
+ if (peer == MCTP_ADDR_NULL)
+ peer = MCTP_ADDR_ANY;
/* be optimistic, alloc now */
- key = mctp_key_alloc(msk, saddr, daddr, 0, GFP_KERNEL);
+ key = mctp_key_alloc(msk, netid, local, peer, 0, GFP_KERNEL);
if (!key)
return ERR_PTR(-ENOMEM);
@@ -631,12 +677,24 @@ struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
* lock held, they don't change over the lifetime of the key.
*/
+ /* tags are net-specific */
+ if (tmp->net != netid)
+ continue;
+
/* if we don't own the tag, it can't conflict */
if (tmp->tag & MCTP_HDR_FLAG_TO)
continue;
- if (!(mctp_address_matches(tmp->peer_addr, daddr) &&
- mctp_address_matches(tmp->local_addr, saddr)))
+ /* Since we're avoiding conflicting entries, match peer and
+ * local addresses, including with a wildcard on ANY. See
+ * 'A note on key allocations' for background.
+ */
+ if (peer != MCTP_ADDR_ANY &&
+ !mctp_address_matches(tmp->peer_addr, peer))
+ continue;
+
+ if (local != MCTP_ADDR_ANY &&
+ !mctp_address_matches(tmp->local_addr, local))
continue;
spin_lock(&tmp->lock);
@@ -671,6 +729,7 @@ struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
}
static struct mctp_sk_key *mctp_lookup_prealloc_tag(struct mctp_sock *msk,
+ unsigned int netid,
mctp_eid_t daddr,
u8 req_tag, u8 *tagp)
{
@@ -685,6 +744,9 @@ static struct mctp_sk_key *mctp_lookup_prealloc_tag(struct mctp_sock *msk,
spin_lock_irqsave(&mns->keys_lock, flags);
hlist_for_each_entry(tmp, &mns->keys, hlist) {
+ if (tmp->net != netid)
+ continue;
+
if (tmp->tag != req_tag)
continue;
@@ -843,6 +905,9 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
/* copy message payload */
skb_copy_bits(skb, pos, skb_transport_header(skb2), size);
+ /* we need to copy the extensions, for MCTP flow data */
+ skb_ext_copy(skb2, skb);
+
/* do route */
rc = rt->output(rt, skb2);
if (rc)
@@ -865,6 +930,7 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
struct mctp_sk_key *key;
struct mctp_hdr *hdr;
unsigned long flags;
+ unsigned int netid;
unsigned int mtu;
mctp_eid_t saddr;
bool ext_rt;
@@ -916,16 +982,17 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
rc = 0;
}
spin_unlock_irqrestore(&rt->dev->addrs_lock, flags);
+ netid = READ_ONCE(rt->dev->net);
if (rc)
goto out_release;
if (req_tag & MCTP_TAG_OWNER) {
if (req_tag & MCTP_TAG_PREALLOC)
- key = mctp_lookup_prealloc_tag(msk, daddr,
+ key = mctp_lookup_prealloc_tag(msk, netid, daddr,
req_tag, &tag);
else
- key = mctp_alloc_local_tag(msk, daddr, saddr,
+ key = mctp_alloc_local_tag(msk, netid, saddr, daddr,
false, &tag);
if (IS_ERR(key)) {
diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c
index 92ea4158f7fc..77e5dd422258 100644
--- a/net/mctp/test/route-test.c
+++ b/net/mctp/test/route-test.c
@@ -79,6 +79,16 @@ static void mctp_test_route_destroy(struct kunit *test,
kfree_rcu(&rt->rt, rcu);
}
+static void mctp_test_skb_set_dev(struct sk_buff *skb,
+ struct mctp_test_dev *dev)
+{
+ struct mctp_skb_cb *cb;
+
+ cb = mctp_cb(skb);
+ cb->net = READ_ONCE(dev->mdev->net);
+ skb->dev = dev->ndev;
+}
+
static struct sk_buff *mctp_test_create_skb(const struct mctp_hdr *hdr,
unsigned int data_len)
{
@@ -91,6 +101,7 @@ static struct sk_buff *mctp_test_create_skb(const struct mctp_hdr *hdr,
if (!skb)
return NULL;
+ __mctp_cb(skb);
memcpy(skb_put(skb, hdr_len), hdr, hdr_len);
buf = skb_put(skb, data_len);
@@ -111,6 +122,7 @@ static struct sk_buff *__mctp_test_create_skb_data(const struct mctp_hdr *hdr,
if (!skb)
return NULL;
+ __mctp_cb(skb);
memcpy(skb_put(skb, hdr_len), hdr, hdr_len);
memcpy(skb_put(skb, data_len), data, data_len);
@@ -249,8 +261,6 @@ static void mctp_test_rx_input(struct kunit *test)
skb = mctp_test_create_skb(&params->hdr, 1);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
- __mctp_cb(skb);
-
mctp_pkttype_receive(skb, dev->ndev, &mctp_packet_type, NULL);
KUNIT_EXPECT_EQ(test, !!rt->pkts.qlen, params->input);
@@ -283,7 +293,8 @@ KUNIT_ARRAY_PARAM(mctp_rx_input, mctp_rx_input_tests,
static void __mctp_route_test_init(struct kunit *test,
struct mctp_test_dev **devp,
struct mctp_test_route **rtp,
- struct socket **sockp)
+ struct socket **sockp,
+ unsigned int netid)
{
struct sockaddr_mctp addr = {0};
struct mctp_test_route *rt;
@@ -293,6 +304,8 @@ static void __mctp_route_test_init(struct kunit *test,
dev = mctp_test_create_dev();
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+ if (netid != MCTP_NET_ANY)
+ WRITE_ONCE(dev->mdev->net, netid);
rt = mctp_test_create_route(&init_net, dev->mdev, 8, 68);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
@@ -301,7 +314,7 @@ static void __mctp_route_test_init(struct kunit *test,
KUNIT_ASSERT_EQ(test, rc, 0);
addr.smctp_family = AF_MCTP;
- addr.smctp_network = MCTP_NET_ANY;
+ addr.smctp_network = netid;
addr.smctp_addr.s_addr = 8;
addr.smctp_type = 0;
rc = kernel_bind(sock, (struct sockaddr *)&addr, sizeof(addr));
@@ -339,13 +352,12 @@ static void mctp_test_route_input_sk(struct kunit *test)
params = test->param_value;
- __mctp_route_test_init(test, &dev, &rt, &sock);
+ __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
skb = mctp_test_create_skb_data(&params->hdr, &params->type);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
- skb->dev = dev->ndev;
- __mctp_cb(skb);
+ mctp_test_skb_set_dev(skb, dev);
rc = mctp_route_input(&rt->rt, skb);
@@ -410,15 +422,14 @@ static void mctp_test_route_input_sk_reasm(struct kunit *test)
params = test->param_value;
- __mctp_route_test_init(test, &dev, &rt, &sock);
+ __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
for (i = 0; i < params->n_hdrs; i++) {
c = i;
skb = mctp_test_create_skb_data(&params->hdrs[i], &c);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
- skb->dev = dev->ndev;
- __mctp_cb(skb);
+ mctp_test_skb_set_dev(skb, dev);
rc = mctp_route_input(&rt->rt, skb);
}
@@ -544,6 +555,7 @@ static void mctp_test_route_input_sk_keys(struct kunit *test)
struct mctp_sock *msk;
struct socket *sock;
unsigned long flags;
+ unsigned int net;
int rc;
u8 c;
@@ -551,6 +563,7 @@ static void mctp_test_route_input_sk_keys(struct kunit *test)
dev = mctp_test_create_dev();
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+ net = READ_ONCE(dev->mdev->net);
rt = mctp_test_create_route(&init_net, dev->mdev, 8, 68);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
@@ -562,8 +575,9 @@ static void mctp_test_route_input_sk_keys(struct kunit *test)
mns = &sock_net(sock->sk)->mctp;
/* set the incoming tag according to test params */
- key = mctp_key_alloc(msk, params->key_local_addr, params->key_peer_addr,
- params->key_tag, GFP_KERNEL);
+ key = mctp_key_alloc(msk, net, params->key_local_addr,
+ params->key_peer_addr, params->key_tag,
+ GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, key);
@@ -576,8 +590,7 @@ static void mctp_test_route_input_sk_keys(struct kunit *test)
skb = mctp_test_create_skb_data(&params->hdr, &c);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
- skb->dev = dev->ndev;
- __mctp_cb(skb);
+ mctp_test_skb_set_dev(skb, dev);
rc = mctp_route_input(&rt->rt, skb);
@@ -665,6 +678,373 @@ static void mctp_route_input_sk_keys_to_desc(
KUNIT_ARRAY_PARAM(mctp_route_input_sk_keys, mctp_route_input_sk_keys_tests,
mctp_route_input_sk_keys_to_desc);
+struct test_net {
+ unsigned int netid;
+ struct mctp_test_dev *dev;
+ struct mctp_test_route *rt;
+ struct socket *sock;
+ struct sk_buff *skb;
+ struct mctp_sk_key *key;
+ struct {
+ u8 type;
+ unsigned int data;
+ } msg;
+};
+
+static void
+mctp_test_route_input_multiple_nets_bind_init(struct kunit *test,
+ struct test_net *t)
+{
+ struct mctp_hdr hdr = RX_HDR(1, 9, 8, FL_S | FL_E | FL_T(1) | FL_TO);
+
+ t->msg.data = t->netid;
+
+ __mctp_route_test_init(test, &t->dev, &t->rt, &t->sock, t->netid);
+
+ t->skb = mctp_test_create_skb_data(&hdr, &t->msg);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->skb);
+ mctp_test_skb_set_dev(t->skb, t->dev);
+}
+
+static void
+mctp_test_route_input_multiple_nets_bind_fini(struct kunit *test,
+ struct test_net *t)
+{
+ __mctp_route_test_fini(test, t->dev, t->rt, t->sock);
+}
+
+/* Test that skbs from different nets (otherwise identical) get routed to their
+ * corresponding socket via the sockets' bind()
+ */
+static void mctp_test_route_input_multiple_nets_bind(struct kunit *test)
+{
+ struct sk_buff *rx_skb1, *rx_skb2;
+ struct test_net t1, t2;
+ int rc;
+
+ t1.netid = 1;
+ t2.netid = 2;
+
+ t1.msg.type = 0;
+ t2.msg.type = 0;
+
+ mctp_test_route_input_multiple_nets_bind_init(test, &t1);
+ mctp_test_route_input_multiple_nets_bind_init(test, &t2);
+
+ rc = mctp_route_input(&t1.rt->rt, t1.skb);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+ rc = mctp_route_input(&t2.rt->rt, t2.skb);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ rx_skb1 = skb_recv_datagram(t1.sock->sk, MSG_DONTWAIT, &rc);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, rx_skb1);
+ KUNIT_EXPECT_EQ(test, rx_skb1->len, sizeof(t1.msg));
+ KUNIT_EXPECT_EQ(test,
+ *(unsigned int *)skb_pull(rx_skb1, sizeof(t1.msg.data)),
+ t1.netid);
+ kfree_skb(rx_skb1);
+
+ rx_skb2 = skb_recv_datagram(t2.sock->sk, MSG_DONTWAIT, &rc);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, rx_skb2);
+ KUNIT_EXPECT_EQ(test, rx_skb2->len, sizeof(t2.msg));
+ KUNIT_EXPECT_EQ(test,
+ *(unsigned int *)skb_pull(rx_skb2, sizeof(t2.msg.data)),
+ t2.netid);
+ kfree_skb(rx_skb2);
+
+ mctp_test_route_input_multiple_nets_bind_fini(test, &t1);
+ mctp_test_route_input_multiple_nets_bind_fini(test, &t2);
+}
+
+static void
+mctp_test_route_input_multiple_nets_key_init(struct kunit *test,
+ struct test_net *t)
+{
+ struct mctp_hdr hdr = RX_HDR(1, 9, 8, FL_S | FL_E | FL_T(1));
+ struct mctp_sock *msk;
+ struct netns_mctp *mns;
+ unsigned long flags;
+
+ t->msg.data = t->netid;
+
+ __mctp_route_test_init(test, &t->dev, &t->rt, &t->sock, t->netid);
+
+ msk = container_of(t->sock->sk, struct mctp_sock, sk);
+
+ t->key = mctp_key_alloc(msk, t->netid, hdr.dest, hdr.src, 1, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->key);
+
+ mns = &sock_net(t->sock->sk)->mctp;
+ spin_lock_irqsave(&mns->keys_lock, flags);
+ mctp_reserve_tag(&init_net, t->key, msk);
+ spin_unlock_irqrestore(&mns->keys_lock, flags);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->key);
+ t->skb = mctp_test_create_skb_data(&hdr, &t->msg);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->skb);
+ mctp_test_skb_set_dev(t->skb, t->dev);
+}
+
+static void
+mctp_test_route_input_multiple_nets_key_fini(struct kunit *test,
+ struct test_net *t)
+{
+ mctp_key_unref(t->key);
+ __mctp_route_test_fini(test, t->dev, t->rt, t->sock);
+}
+
+/* test that skbs from different nets (otherwise identical) get routed to their
+ * corresponding socket via the sk_key
+ */
+static void mctp_test_route_input_multiple_nets_key(struct kunit *test)
+{
+ struct sk_buff *rx_skb1, *rx_skb2;
+ struct test_net t1, t2;
+ int rc;
+
+ t1.netid = 1;
+ t2.netid = 2;
+
+ /* use type 1 which is not bound */
+ t1.msg.type = 1;
+ t2.msg.type = 1;
+
+ mctp_test_route_input_multiple_nets_key_init(test, &t1);
+ mctp_test_route_input_multiple_nets_key_init(test, &t2);
+
+ rc = mctp_route_input(&t1.rt->rt, t1.skb);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+ rc = mctp_route_input(&t2.rt->rt, t2.skb);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ rx_skb1 = skb_recv_datagram(t1.sock->sk, MSG_DONTWAIT, &rc);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, rx_skb1);
+ KUNIT_EXPECT_EQ(test, rx_skb1->len, sizeof(t1.msg));
+ KUNIT_EXPECT_EQ(test,
+ *(unsigned int *)skb_pull(rx_skb1, sizeof(t1.msg.data)),
+ t1.netid);
+ kfree_skb(rx_skb1);
+
+ rx_skb2 = skb_recv_datagram(t2.sock->sk, MSG_DONTWAIT, &rc);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, rx_skb2);
+ KUNIT_EXPECT_EQ(test, rx_skb2->len, sizeof(t2.msg));
+ KUNIT_EXPECT_EQ(test,
+ *(unsigned int *)skb_pull(rx_skb2, sizeof(t2.msg.data)),
+ t2.netid);
+ kfree_skb(rx_skb2);
+
+ mctp_test_route_input_multiple_nets_key_fini(test, &t1);
+ mctp_test_route_input_multiple_nets_key_fini(test, &t2);
+}
+
+#if IS_ENABLED(CONFIG_MCTP_FLOWS)
+
+static void mctp_test_flow_init(struct kunit *test,
+ struct mctp_test_dev **devp,
+ struct mctp_test_route **rtp,
+ struct socket **sock,
+ struct sk_buff **skbp,
+ unsigned int len)
+{
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct sk_buff *skb;
+
+ /* we have a slightly odd routing setup here; the test route
+ * is for EID 8, which is our local EID. We don't do a routing
+ * lookup, so that's fine - all we require is a path through
+ * mctp_local_output, which will call rt->output on whatever
+ * route we provide
+ */
+ __mctp_route_test_init(test, &dev, &rt, sock, MCTP_NET_ANY);
+
+ /* Assign a single EID. ->addrs is freed on mctp netdev release */
+ dev->mdev->addrs = kmalloc(sizeof(u8), GFP_KERNEL);
+ dev->mdev->num_addrs = 1;
+ dev->mdev->addrs[0] = 8;
+
+ skb = alloc_skb(len + sizeof(struct mctp_hdr) + 1, GFP_KERNEL);
+ KUNIT_ASSERT_TRUE(test, skb);
+ __mctp_cb(skb);
+ skb_reserve(skb, sizeof(struct mctp_hdr) + 1);
+ memset(skb_put(skb, len), 0, len);
+
+ /* take a ref for the route, we'll decrement in local output */
+ refcount_inc(&rt->rt.refs);
+
+ *devp = dev;
+ *rtp = rt;
+ *skbp = skb;
+}
+
+static void mctp_test_flow_fini(struct kunit *test,
+ struct mctp_test_dev *dev,
+ struct mctp_test_route *rt,
+ struct socket *sock)
+{
+ __mctp_route_test_fini(test, dev, rt, sock);
+}
+
+/* test that an outgoing skb has the correct MCTP extension data set */
+static void mctp_test_packet_flow(struct kunit *test)
+{
+ struct sk_buff *skb, *skb2;
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct mctp_flow *flow;
+ struct socket *sock;
+ u8 dst = 8;
+ int n, rc;
+
+ mctp_test_flow_init(test, &dev, &rt, &sock, &skb, 30);
+
+ rc = mctp_local_output(sock->sk, &rt->rt, skb, dst, MCTP_TAG_OWNER);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ n = rt->pkts.qlen;
+ KUNIT_ASSERT_EQ(test, n, 1);
+
+ skb2 = skb_dequeue(&rt->pkts);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb2);
+
+ flow = skb_ext_find(skb2, SKB_EXT_MCTP);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flow);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flow->key);
+ KUNIT_ASSERT_PTR_EQ(test, flow->key->sk, sock->sk);
+
+ kfree_skb(skb2);
+ mctp_test_flow_fini(test, dev, rt, sock);
+}
+
+/* test that outgoing skbs, after fragmentation, all have the correct MCTP
+ * extension data set.
+ */
+static void mctp_test_fragment_flow(struct kunit *test)
+{
+ struct mctp_flow *flows[2];
+ struct sk_buff *tx_skbs[2];
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct sk_buff *skb;
+ struct socket *sock;
+ u8 dst = 8;
+ int n, rc;
+
+ mctp_test_flow_init(test, &dev, &rt, &sock, &skb, 100);
+
+ rc = mctp_local_output(sock->sk, &rt->rt, skb, dst, MCTP_TAG_OWNER);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ n = rt->pkts.qlen;
+ KUNIT_ASSERT_EQ(test, n, 2);
+
+ /* both resulting packets should have the same flow data */
+ tx_skbs[0] = skb_dequeue(&rt->pkts);
+ tx_skbs[1] = skb_dequeue(&rt->pkts);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, tx_skbs[0]);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, tx_skbs[1]);
+
+ flows[0] = skb_ext_find(tx_skbs[0], SKB_EXT_MCTP);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[0]);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[0]->key);
+ KUNIT_ASSERT_PTR_EQ(test, flows[0]->key->sk, sock->sk);
+
+ flows[1] = skb_ext_find(tx_skbs[1], SKB_EXT_MCTP);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[1]);
+ KUNIT_ASSERT_PTR_EQ(test, flows[1]->key, flows[0]->key);
+
+ kfree_skb(tx_skbs[0]);
+ kfree_skb(tx_skbs[1]);
+ mctp_test_flow_fini(test, dev, rt, sock);
+}
+
+#else
+static void mctp_test_packet_flow(struct kunit *test)
+{
+ kunit_skip(test, "Requires CONFIG_MCTP_FLOWS=y");
+}
+
+static void mctp_test_fragment_flow(struct kunit *test)
+{
+ kunit_skip(test, "Requires CONFIG_MCTP_FLOWS=y");
+}
+#endif
+
+/* Test that outgoing skbs cause a suitable tag to be created */
+static void mctp_test_route_output_key_create(struct kunit *test)
+{
+ const unsigned int netid = 50;
+ const u8 dst = 26, src = 15;
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct mctp_sk_key *key;
+ struct netns_mctp *mns;
+ unsigned long flags;
+ struct socket *sock;
+ struct sk_buff *skb;
+ bool empty, single;
+ const int len = 2;
+ int rc;
+
+ dev = mctp_test_create_dev();
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+ WRITE_ONCE(dev->mdev->net, netid);
+
+ rt = mctp_test_create_route(&init_net, dev->mdev, dst, 68);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
+
+ rc = sock_create_kern(&init_net, AF_MCTP, SOCK_DGRAM, 0, &sock);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ dev->mdev->addrs = kmalloc(sizeof(u8), GFP_KERNEL);
+ dev->mdev->num_addrs = 1;
+ dev->mdev->addrs[0] = src;
+
+ skb = alloc_skb(sizeof(struct mctp_hdr) + 1 + len, GFP_KERNEL);
+ KUNIT_ASSERT_TRUE(test, skb);
+ __mctp_cb(skb);
+ skb_reserve(skb, sizeof(struct mctp_hdr) + 1 + len);
+ memset(skb_put(skb, len), 0, len);
+
+ refcount_inc(&rt->rt.refs);
+
+ mns = &sock_net(sock->sk)->mctp;
+
+ /* We assume we're starting from an empty keys list, which requires
+ * preceding tests to clean up correctly!
+ */
+ spin_lock_irqsave(&mns->keys_lock, flags);
+ empty = hlist_empty(&mns->keys);
+ spin_unlock_irqrestore(&mns->keys_lock, flags);
+ KUNIT_ASSERT_TRUE(test, empty);
+
+ rc = mctp_local_output(sock->sk, &rt->rt, skb, dst, MCTP_TAG_OWNER);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ key = NULL;
+ single = false;
+ spin_lock_irqsave(&mns->keys_lock, flags);
+ if (!hlist_empty(&mns->keys)) {
+ key = hlist_entry(mns->keys.first, struct mctp_sk_key, hlist);
+ single = hlist_is_singular_node(&key->hlist, &mns->keys);
+ }
+ spin_unlock_irqrestore(&mns->keys_lock, flags);
+
+ KUNIT_ASSERT_NOT_NULL(test, key);
+ KUNIT_ASSERT_TRUE(test, single);
+
+ KUNIT_EXPECT_EQ(test, key->net, netid);
+ KUNIT_EXPECT_EQ(test, key->local_addr, src);
+ KUNIT_EXPECT_EQ(test, key->peer_addr, dst);
+ /* key has incoming tag, so inverse of what we sent */
+ KUNIT_EXPECT_FALSE(test, key->tag & MCTP_TAG_OWNER);
+
+ sock_release(sock);
+ mctp_test_route_destroy(test, rt);
+ mctp_test_destroy_dev(dev);
+}
+
static struct kunit_case mctp_test_cases[] = {
KUNIT_CASE_PARAM(mctp_test_fragment, mctp_frag_gen_params),
KUNIT_CASE_PARAM(mctp_test_rx_input, mctp_rx_input_gen_params),
@@ -673,6 +1053,11 @@ static struct kunit_case mctp_test_cases[] = {
mctp_route_input_sk_reasm_gen_params),
KUNIT_CASE_PARAM(mctp_test_route_input_sk_keys,
mctp_route_input_sk_keys_gen_params),
+ KUNIT_CASE(mctp_test_route_input_multiple_nets_bind),
+ KUNIT_CASE(mctp_test_route_input_multiple_nets_key),
+ KUNIT_CASE(mctp_test_packet_flow),
+ KUNIT_CASE(mctp_test_fragment_flow),
+ KUNIT_CASE(mctp_test_route_output_key_create),
{}
};
diff --git a/net/mctp/test/utils.c b/net/mctp/test/utils.c
index e03ba66bbe18..565763eb0211 100644
--- a/net/mctp/test/utils.c
+++ b/net/mctp/test/utils.c
@@ -4,6 +4,7 @@
#include <linux/mctp.h>
#include <linux/if_arp.h>
+#include <net/mctp.h>
#include <net/mctpdevice.h>
#include <net/pkt_sched.h>
@@ -54,6 +55,7 @@ struct mctp_test_dev *mctp_test_create_dev(void)
rcu_read_lock();
dev->mdev = __mctp_dev_get(ndev);
+ dev->mdev->net = mctp_default_net(dev_net(ndev));
rcu_read_unlock();
return dev;
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 1af29af65388..6dab883a08dd 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -2179,7 +2179,9 @@ static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
struct mpls_route __rcu **platform_label;
- struct fib_dump_filter filter = {};
+ struct fib_dump_filter filter = {
+ .rtnl_held = true,
+ };
unsigned int flags = NLM_F_MULTI;
size_t platform_labels;
unsigned int index;
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
index 533d082f0701..45d1e6a157fc 100644
--- a/net/mpls/mpls_gso.c
+++ b/net/mpls/mpls_gso.c
@@ -27,6 +27,9 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
__be16 mpls_protocol;
unsigned int mpls_hlen;
+ if (!skb_inner_network_header_was_set(skb))
+ goto out;
+
skb_reset_network_header(skb);
mpls_hlen = skb_inner_network_header(skb) - skb_network_header(skb);
if (unlikely(!mpls_hlen || mpls_hlen % MPLS_HLEN))
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index ef59e25dc482..8fc790f2a01b 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -55,8 +55,6 @@ static int mpls_xmit(struct sk_buff *skb)
out_dev = dst->dev;
net = dev_net(out_dev);
- skb_orphan(skb);
-
if (!mpls_output_possible(out_dev) ||
!dst->lwtstate || skb_warn_if_lro(skb))
goto drop;
diff --git a/net/mptcp/diag.c b/net/mptcp/diag.c
index 7017dd60659d..3ae46b545d2c 100644
--- a/net/mptcp/diag.c
+++ b/net/mptcp/diag.c
@@ -10,7 +10,6 @@
#include <linux/net.h>
#include <linux/inet_diag.h>
#include <net/netlink.h>
-#include <uapi/linux/mptcp.h>
#include "protocol.h"
static int subflow_get_info(struct sock *sk, struct sk_buff *skb)
diff --git a/net/mptcp/mptcp_diag.c b/net/mptcp/mptcp_diag.c
index 5409c2ea3f57..0566dd793810 100644
--- a/net/mptcp/mptcp_diag.c
+++ b/net/mptcp/mptcp_diag.c
@@ -10,7 +10,6 @@
#include <linux/net.h>
#include <linux/inet_diag.h>
#include <net/netlink.h>
-#include <uapi/linux/mptcp.h>
#include "protocol.h"
static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
@@ -225,6 +224,7 @@ static void mptcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
}
static const struct inet_diag_handler mptcp_diag_handler = {
+ .owner = THIS_MODULE,
.dump = mptcp_diag_dump,
.dump_one = mptcp_diag_dump_one,
.idiag_get_info = mptcp_diag_get_info,
diff --git a/net/mptcp/mptcp_pm_gen.c b/net/mptcp/mptcp_pm_gen.c
index 670da7822e6c..c30a2a90a192 100644
--- a/net/mptcp/mptcp_pm_gen.c
+++ b/net/mptcp/mptcp_pm_gen.c
@@ -32,8 +32,9 @@ const struct nla_policy mptcp_pm_del_addr_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1]
};
/* MPTCP_PM_CMD_GET_ADDR - do */
-const struct nla_policy mptcp_pm_get_addr_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1] = {
- [MPTCP_PM_ENDPOINT_ADDR] = NLA_POLICY_NESTED(mptcp_pm_address_nl_policy),
+const struct nla_policy mptcp_pm_get_addr_nl_policy[MPTCP_PM_ATTR_TOKEN + 1] = {
+ [MPTCP_PM_ATTR_ADDR] = NLA_POLICY_NESTED(mptcp_pm_address_nl_policy),
+ [MPTCP_PM_ATTR_TOKEN] = { .type = NLA_U32, },
};
/* MPTCP_PM_CMD_FLUSH_ADDRS - do */
@@ -110,7 +111,7 @@ const struct genl_ops mptcp_pm_nl_ops[11] = {
.doit = mptcp_pm_nl_get_addr_doit,
.dumpit = mptcp_pm_nl_get_addr_dumpit,
.policy = mptcp_pm_get_addr_nl_policy,
- .maxattr = MPTCP_PM_ENDPOINT_ADDR,
+ .maxattr = MPTCP_PM_ATTR_TOKEN,
.flags = GENL_UNS_ADMIN_PERM,
},
{
diff --git a/net/mptcp/mptcp_pm_gen.h b/net/mptcp/mptcp_pm_gen.h
index ac9fc7225b6a..e24258f6f819 100644
--- a/net/mptcp/mptcp_pm_gen.h
+++ b/net/mptcp/mptcp_pm_gen.h
@@ -18,7 +18,7 @@ extern const struct nla_policy mptcp_pm_add_addr_nl_policy[MPTCP_PM_ENDPOINT_ADD
extern const struct nla_policy mptcp_pm_del_addr_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1];
-extern const struct nla_policy mptcp_pm_get_addr_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1];
+extern const struct nla_policy mptcp_pm_get_addr_nl_policy[MPTCP_PM_ATTR_TOKEN + 1];
extern const struct nla_policy mptcp_pm_flush_addrs_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1];
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 63fc0758c22d..27ca42c77b02 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -689,8 +689,8 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
opts->suboptions |= OPTION_MPTCP_ADD_ADDR;
if (!echo) {
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDRTX);
- opts->ahmac = add_addr_generate_hmac(msk->local_key,
- msk->remote_key,
+ opts->ahmac = add_addr_generate_hmac(READ_ONCE(msk->local_key),
+ READ_ONCE(msk->remote_key),
&opts->addr);
} else {
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADDTX);
@@ -792,7 +792,7 @@ static bool mptcp_established_options_fastclose(struct sock *sk,
*size = TCPOLEN_MPTCP_FASTCLOSE;
opts->suboptions |= OPTION_MPTCP_FASTCLOSE;
- opts->rcvr_key = msk->remote_key;
+ opts->rcvr_key = READ_ONCE(msk->remote_key);
pr_debug("FASTCLOSE key=%llu", opts->rcvr_key);
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFASTCLOSETX);
@@ -1031,7 +1031,7 @@ u64 __mptcp_expand_seq(u64 old_seq, u64 cur_seq)
static void __mptcp_snd_una_update(struct mptcp_sock *msk, u64 new_snd_una)
{
msk->bytes_acked += new_snd_una - msk->snd_una;
- msk->snd_una = new_snd_una;
+ WRITE_ONCE(msk->snd_una, new_snd_una);
}
static void ack_update_msk(struct mptcp_sock *msk,
@@ -1058,10 +1058,10 @@ static void ack_update_msk(struct mptcp_sock *msk,
new_wnd_end = new_snd_una + tcp_sk(ssk)->snd_wnd;
if (after64(new_wnd_end, msk->wnd_end))
- msk->wnd_end = new_wnd_end;
+ WRITE_ONCE(msk->wnd_end, new_wnd_end);
/* this assumes mptcp_incoming_options() is invoked after tcp_ack() */
- if (after64(msk->wnd_end, READ_ONCE(msk->snd_nxt)))
+ if (after64(msk->wnd_end, snd_nxt))
__mptcp_check_push(sk, ssk);
if (after64(new_snd_una, old_snd_una)) {
@@ -1072,7 +1072,7 @@ static void ack_update_msk(struct mptcp_sock *msk,
trace_ack_update_msk(mp_opt->data_ack,
old_snd_una, new_snd_una,
- new_wnd_end, msk->wnd_end);
+ new_wnd_end, READ_ONCE(msk->wnd_end));
}
bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit)
@@ -1100,8 +1100,8 @@ static bool add_addr_hmac_valid(struct mptcp_sock *msk,
if (mp_opt->echo)
return true;
- hmac = add_addr_generate_hmac(msk->remote_key,
- msk->local_key,
+ hmac = add_addr_generate_hmac(READ_ONCE(msk->remote_key),
+ READ_ONCE(msk->local_key),
&mp_opt->addr);
pr_debug("msk=%p, ahmac=%llu, mp_opt->ahmac=%llu\n",
@@ -1148,7 +1148,7 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
if (unlikely(mp_opt.suboptions != OPTION_MPTCP_DSS)) {
if ((mp_opt.suboptions & OPTION_MPTCP_FASTCLOSE) &&
- msk->local_key == mp_opt.rcvr_key) {
+ READ_ONCE(msk->local_key) == mp_opt.rcvr_key) {
WRITE_ONCE(msk->rcv_fastclose, true);
mptcp_schedule_work((struct sock *)msk);
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFASTCLOSERX);
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index 4ae19113b8eb..55406720c607 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -6,7 +6,6 @@
#define pr_fmt(fmt) "MPTCP: " fmt
#include <linux/kernel.h>
-#include <net/tcp.h>
#include <net/mptcp.h>
#include "protocol.h"
@@ -77,7 +76,7 @@ void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int
{
struct mptcp_pm_data *pm = &msk->pm;
- pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side);
+ pr_debug("msk=%p, token=%u side=%d", msk, READ_ONCE(msk->token), server_side);
WRITE_ONCE(pm->server_side, server_side);
mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC);
@@ -441,13 +440,27 @@ int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id
return mptcp_pm_nl_get_flags_and_ifindex_by_id(msk, id, flags, ifindex);
}
-int mptcp_pm_set_flags(struct net *net, struct nlattr *token,
- struct mptcp_pm_addr_entry *loc,
- struct mptcp_pm_addr_entry *rem, u8 bkup)
+int mptcp_pm_get_addr(struct sk_buff *skb, struct genl_info *info)
{
- if (token)
- return mptcp_userspace_pm_set_flags(net, token, loc, rem, bkup);
- return mptcp_pm_nl_set_flags(net, loc, bkup);
+ if (info->attrs[MPTCP_PM_ATTR_TOKEN])
+ return mptcp_userspace_pm_get_addr(skb, info);
+ return mptcp_pm_nl_get_addr(skb, info);
+}
+
+int mptcp_pm_dump_addr(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ const struct genl_info *info = genl_info_dump(cb);
+
+ if (info->attrs[MPTCP_PM_ATTR_TOKEN])
+ return mptcp_userspace_pm_dump_addr(msg, cb);
+ return mptcp_pm_nl_dump_addr(msg, cb);
+}
+
+int mptcp_pm_set_flags(struct sk_buff *skb, struct genl_info *info)
+{
+ if (info->attrs[MPTCP_PM_ATTR_TOKEN])
+ return mptcp_userspace_pm_set_flags(skb, info);
+ return mptcp_pm_nl_set_flags(skb, info);
}
void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 58d17d9604e7..5c17d39146ea 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -8,19 +8,13 @@
#include <linux/inet.h>
#include <linux/kernel.h>
-#include <net/tcp.h>
#include <net/inet_common.h>
#include <net/netns/generic.h>
#include <net/mptcp.h>
-#include <net/genetlink.h>
-#include <uapi/linux/mptcp.h>
#include "protocol.h"
#include "mib.h"
-/* forward declaration */
-static struct genl_family mptcp_genl_family;
-
static int pm_nl_pernet_id;
struct mptcp_pm_add_entry {
@@ -505,15 +499,12 @@ __lookup_addr_by_id(struct pm_nl_pernet *pernet, unsigned int id)
}
static struct mptcp_pm_addr_entry *
-__lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info,
- bool lookup_by_id)
+__lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info)
{
struct mptcp_pm_addr_entry *entry;
list_for_each_entry(entry, &pernet->local_addr_list, list) {
- if ((!lookup_by_id &&
- mptcp_addresses_equal(&entry->addr, info, entry->addr.port)) ||
- (lookup_by_id && entry->addr.id == info->id))
+ if (mptcp_addresses_equal(&entry->addr, info, entry->addr.port))
return entry;
}
return NULL;
@@ -543,7 +534,7 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
mptcp_local_address((struct sock_common *)msk->first, &mpc_addr);
rcu_read_lock();
- entry = __lookup_addr(pernet, &mpc_addr, false);
+ entry = __lookup_addr(pernet, &mpc_addr);
if (entry) {
__clear_bit(entry->addr.id, msk->pm.id_avail_bitmap);
msk->mpc_endpoint_id = entry->addr.id;
@@ -1550,8 +1541,8 @@ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
}
}
-void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
- struct list_head *rm_list)
+static void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
+ struct list_head *rm_list)
{
struct mptcp_rm_list alist = { .nr = 0 }, slist = { .nr = 0 };
struct mptcp_pm_addr_entry *entry;
@@ -1636,8 +1627,8 @@ int mptcp_pm_nl_flush_addrs_doit(struct sk_buff *skb, struct genl_info *info)
return 0;
}
-static int mptcp_nl_fill_addr(struct sk_buff *skb,
- struct mptcp_pm_addr_entry *entry)
+int mptcp_nl_fill_addr(struct sk_buff *skb,
+ struct mptcp_pm_addr_entry *entry)
{
struct mptcp_addr_info *addr = &entry->addr;
struct nlattr *attr;
@@ -1675,7 +1666,7 @@ nla_put_failure:
return -EMSGSIZE;
}
-int mptcp_pm_nl_get_addr_doit(struct sk_buff *skb, struct genl_info *info)
+int mptcp_pm_nl_get_addr(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR];
struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
@@ -1725,8 +1716,13 @@ fail:
return ret;
}
-int mptcp_pm_nl_get_addr_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb)
+int mptcp_pm_nl_get_addr_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ return mptcp_pm_get_addr(skb, info);
+}
+
+int mptcp_pm_nl_dump_addr(struct sk_buff *msg,
+ struct netlink_callback *cb)
{
struct net *net = sock_net(msg->sk);
struct mptcp_pm_addr_entry *entry;
@@ -1768,6 +1764,12 @@ int mptcp_pm_nl_get_addr_dumpit(struct sk_buff *msg,
return msg->len;
}
+int mptcp_pm_nl_get_addr_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ return mptcp_pm_dump_addr(msg, cb);
+}
+
static int parse_limit(struct genl_info *info, int id, unsigned int *limit)
{
struct nlattr *attr = info->attrs[id];
@@ -1882,66 +1884,63 @@ next:
return ret;
}
-int mptcp_pm_nl_set_flags(struct net *net, struct mptcp_pm_addr_entry *addr, u8 bkup)
+int mptcp_pm_nl_set_flags(struct sk_buff *skb, struct genl_info *info)
{
- struct pm_nl_pernet *pernet = pm_nl_get_pernet(net);
+ struct mptcp_pm_addr_entry addr = { .addr = { .family = AF_UNSPEC }, };
+ struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
u8 changed, mask = MPTCP_PM_ADDR_FLAG_BACKUP |
MPTCP_PM_ADDR_FLAG_FULLMESH;
+ struct net *net = sock_net(skb->sk);
struct mptcp_pm_addr_entry *entry;
+ struct pm_nl_pernet *pernet;
u8 lookup_by_id = 0;
+ u8 bkup = 0;
+ int ret;
+
+ pernet = pm_nl_get_pernet(net);
+
+ ret = mptcp_pm_parse_entry(attr, info, false, &addr);
+ if (ret < 0)
+ return ret;
- if (addr->addr.family == AF_UNSPEC) {
+ if (addr.addr.family == AF_UNSPEC) {
lookup_by_id = 1;
- if (!addr->addr.id)
+ if (!addr.addr.id) {
+ GENL_SET_ERR_MSG(info, "missing required inputs");
return -EOPNOTSUPP;
+ }
}
+ if (addr.flags & MPTCP_PM_ADDR_FLAG_BACKUP)
+ bkup = 1;
+
spin_lock_bh(&pernet->lock);
- entry = __lookup_addr(pernet, &addr->addr, lookup_by_id);
+ entry = lookup_by_id ? __lookup_addr_by_id(pernet, addr.addr.id) :
+ __lookup_addr(pernet, &addr.addr);
if (!entry) {
spin_unlock_bh(&pernet->lock);
+ GENL_SET_ERR_MSG(info, "address not found");
return -EINVAL;
}
- if ((addr->flags & MPTCP_PM_ADDR_FLAG_FULLMESH) &&
+ if ((addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) &&
(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) {
spin_unlock_bh(&pernet->lock);
+ GENL_SET_ERR_MSG(info, "invalid addr flags");
return -EINVAL;
}
- changed = (addr->flags ^ entry->flags) & mask;
- entry->flags = (entry->flags & ~mask) | (addr->flags & mask);
- *addr = *entry;
+ changed = (addr.flags ^ entry->flags) & mask;
+ entry->flags = (entry->flags & ~mask) | (addr.flags & mask);
+ addr = *entry;
spin_unlock_bh(&pernet->lock);
- mptcp_nl_set_flags(net, &addr->addr, bkup, changed);
+ mptcp_nl_set_flags(net, &addr.addr, bkup, changed);
return 0;
}
int mptcp_pm_nl_set_flags_doit(struct sk_buff *skb, struct genl_info *info)
{
- struct mptcp_pm_addr_entry remote = { .addr = { .family = AF_UNSPEC }, };
- struct mptcp_pm_addr_entry addr = { .addr = { .family = AF_UNSPEC }, };
- struct nlattr *attr_rem = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE];
- struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
- struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
- struct net *net = sock_net(skb->sk);
- u8 bkup = 0;
- int ret;
-
- ret = mptcp_pm_parse_entry(attr, info, false, &addr);
- if (ret < 0)
- return ret;
-
- if (attr_rem) {
- ret = mptcp_pm_parse_entry(attr_rem, info, false, &remote);
- if (ret < 0)
- return ret;
- }
-
- if (addr.flags & MPTCP_PM_ADDR_FLAG_BACKUP)
- bkup = 1;
-
- return mptcp_pm_set_flags(net, token, &addr, &remote, bkup);
+ return mptcp_pm_set_flags(skb, info);
}
static void mptcp_nl_mcast_send(struct net *net, struct sk_buff *nlskb, gfp_t gfp)
@@ -2014,7 +2013,7 @@ static int mptcp_event_put_token_and_ssk(struct sk_buff *skb,
const struct mptcp_subflow_context *sf;
u8 sk_err;
- if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token))
+ if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)))
return -EMSGSIZE;
if (mptcp_event_add_subflow(skb, ssk))
@@ -2072,7 +2071,7 @@ static int mptcp_event_created(struct sk_buff *skb,
const struct mptcp_sock *msk,
const struct sock *ssk)
{
- int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token);
+ int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token));
if (err)
return err;
@@ -2100,7 +2099,7 @@ void mptcp_event_addr_removed(const struct mptcp_sock *msk, uint8_t id)
if (!nlh)
goto nla_put_failure;
- if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token))
+ if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)))
goto nla_put_failure;
if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, id))
@@ -2135,7 +2134,7 @@ void mptcp_event_addr_announced(const struct sock *ssk,
if (!nlh)
goto nla_put_failure;
- if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token))
+ if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)))
goto nla_put_failure;
if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, info->id))
@@ -2251,7 +2250,7 @@ void mptcp_event(enum mptcp_event_type type, const struct mptcp_sock *msk,
goto nla_put_failure;
break;
case MPTCP_EVENT_CLOSED:
- if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token) < 0)
+ if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)) < 0)
goto nla_put_failure;
break;
case MPTCP_EVENT_ANNOUNCED:
@@ -2281,7 +2280,7 @@ nla_put_failure:
nlmsg_free(skb);
}
-static struct genl_family mptcp_genl_family __ro_after_init = {
+struct genl_family mptcp_genl_family __ro_after_init = {
.name = MPTCP_PM_NAME,
.version = MPTCP_PM_VER,
.netnsok = true,
diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
index bc97cc30f013..9f5d422d5ef6 100644
--- a/net/mptcp/pm_userspace.c
+++ b/net/mptcp/pm_userspace.c
@@ -106,19 +106,26 @@ static int mptcp_userspace_pm_delete_local_addr(struct mptcp_sock *msk,
return -EINVAL;
}
+static struct mptcp_pm_addr_entry *
+mptcp_userspace_pm_lookup_addr_by_id(struct mptcp_sock *msk, unsigned int id)
+{
+ struct mptcp_pm_addr_entry *entry;
+
+ list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
+ if (entry->addr.id == id)
+ return entry;
+ }
+ return NULL;
+}
+
int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk,
unsigned int id,
u8 *flags, int *ifindex)
{
- struct mptcp_pm_addr_entry *entry, *match = NULL;
+ struct mptcp_pm_addr_entry *match;
spin_lock_bh(&msk->pm.lock);
- list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
- if (id == entry->addr.id) {
- match = entry;
- break;
- }
- }
+ match = mptcp_userspace_pm_lookup_addr_by_id(msk, id);
spin_unlock_bh(&msk->pm.lock);
if (match) {
*flags = match->flags;
@@ -261,7 +268,7 @@ int mptcp_pm_nl_remove_doit(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
struct nlattr *id = info->attrs[MPTCP_PM_ATTR_LOC_ID];
- struct mptcp_pm_addr_entry *match = NULL;
+ struct mptcp_pm_addr_entry *match;
struct mptcp_pm_addr_entry *entry;
struct mptcp_sock *msk;
LIST_HEAD(free_list);
@@ -298,13 +305,7 @@ int mptcp_pm_nl_remove_doit(struct sk_buff *skb, struct genl_info *info)
lock_sock(sk);
- list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
- if (entry->addr.id == id_val) {
- match = entry;
- break;
- }
- }
-
+ match = mptcp_userspace_pm_lookup_addr_by_id(msk, id_val);
if (!match) {
GENL_SET_ERR_MSG(info, "address with specified id not found");
release_sock(sk);
@@ -334,7 +335,6 @@ int mptcp_pm_nl_subflow_create_doit(struct sk_buff *skb, struct genl_info *info)
struct nlattr *laddr = info->attrs[MPTCP_PM_ATTR_ADDR];
struct mptcp_pm_addr_entry local = { 0 };
struct mptcp_addr_info addr_r;
- struct mptcp_addr_info addr_l;
struct mptcp_sock *msk;
int err = -EINVAL;
struct sock *sk;
@@ -360,25 +360,31 @@ int mptcp_pm_nl_subflow_create_doit(struct sk_buff *skb, struct genl_info *info)
goto create_err;
}
- err = mptcp_pm_parse_addr(laddr, info, &addr_l);
+ err = mptcp_pm_parse_entry(laddr, info, true, &local);
if (err < 0) {
NL_SET_ERR_MSG_ATTR(info->extack, laddr, "error parsing local addr");
goto create_err;
}
+ if (local.flags & MPTCP_PM_ADDR_FLAG_SIGNAL) {
+ GENL_SET_ERR_MSG(info, "invalid addr flags");
+ err = -EINVAL;
+ goto create_err;
+ }
+ local.flags |= MPTCP_PM_ADDR_FLAG_SUBFLOW;
+
err = mptcp_pm_parse_addr(raddr, info, &addr_r);
if (err < 0) {
NL_SET_ERR_MSG_ATTR(info->extack, raddr, "error parsing remote addr");
goto create_err;
}
- if (!mptcp_pm_addr_families_match(sk, &addr_l, &addr_r)) {
+ if (!mptcp_pm_addr_families_match(sk, &local.addr, &addr_r)) {
GENL_SET_ERR_MSG(info, "families mismatch");
err = -EINVAL;
goto create_err;
}
- local.addr = addr_l;
err = mptcp_userspace_pm_append_new_local_addr(msk, &local, false);
if (err < 0) {
GENL_SET_ERR_MSG(info, "did not match address and id");
@@ -387,7 +393,7 @@ int mptcp_pm_nl_subflow_create_doit(struct sk_buff *skb, struct genl_info *info)
lock_sock(sk);
- err = __mptcp_subflow_connect(sk, &addr_l, &addr_r);
+ err = __mptcp_subflow_connect(sk, &local.addr, &addr_r);
release_sock(sk);
@@ -540,35 +546,194 @@ destroy_err:
return err;
}
-int mptcp_userspace_pm_set_flags(struct net *net, struct nlattr *token,
- struct mptcp_pm_addr_entry *loc,
- struct mptcp_pm_addr_entry *rem, u8 bkup)
+int mptcp_userspace_pm_set_flags(struct sk_buff *skb, struct genl_info *info)
{
+ struct mptcp_pm_addr_entry loc = { .addr = { .family = AF_UNSPEC }, };
+ struct mptcp_pm_addr_entry rem = { .addr = { .family = AF_UNSPEC }, };
+ struct nlattr *attr_rem = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE];
+ struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
+ struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
+ struct net *net = sock_net(skb->sk);
struct mptcp_sock *msk;
int ret = -EINVAL;
struct sock *sk;
u32 token_val;
+ u8 bkup = 0;
token_val = nla_get_u32(token);
msk = mptcp_token_get_sock(net, token_val);
- if (!msk)
+ if (!msk) {
+ NL_SET_ERR_MSG_ATTR(info->extack, token, "invalid token");
return ret;
+ }
sk = (struct sock *)msk;
- if (!mptcp_pm_is_userspace(msk))
+ if (!mptcp_pm_is_userspace(msk)) {
+ GENL_SET_ERR_MSG(info, "userspace PM not selected");
goto set_flags_err;
+ }
+
+ ret = mptcp_pm_parse_entry(attr, info, false, &loc);
+ if (ret < 0)
+ goto set_flags_err;
+
+ if (attr_rem) {
+ ret = mptcp_pm_parse_entry(attr_rem, info, false, &rem);
+ if (ret < 0)
+ goto set_flags_err;
+ }
- if (loc->addr.family == AF_UNSPEC ||
- rem->addr.family == AF_UNSPEC)
+ if (loc.addr.family == AF_UNSPEC ||
+ rem.addr.family == AF_UNSPEC) {
+ GENL_SET_ERR_MSG(info, "invalid address families");
+ ret = -EINVAL;
goto set_flags_err;
+ }
+
+ if (loc.flags & MPTCP_PM_ADDR_FLAG_BACKUP)
+ bkup = 1;
lock_sock(sk);
- ret = mptcp_pm_nl_mp_prio_send_ack(msk, &loc->addr, &rem->addr, bkup);
+ ret = mptcp_pm_nl_mp_prio_send_ack(msk, &loc.addr, &rem.addr, bkup);
release_sock(sk);
set_flags_err:
sock_put(sk);
return ret;
}
+
+int mptcp_userspace_pm_dump_addr(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ struct id_bitmap {
+ DECLARE_BITMAP(map, MPTCP_PM_MAX_ADDR_ID + 1);
+ } *bitmap;
+ const struct genl_info *info = genl_info_dump(cb);
+ struct net *net = sock_net(msg->sk);
+ struct mptcp_pm_addr_entry *entry;
+ struct mptcp_sock *msk;
+ struct nlattr *token;
+ int ret = -EINVAL;
+ struct sock *sk;
+ void *hdr;
+
+ bitmap = (struct id_bitmap *)cb->ctx;
+ token = info->attrs[MPTCP_PM_ATTR_TOKEN];
+
+ msk = mptcp_token_get_sock(net, nla_get_u32(token));
+ if (!msk) {
+ NL_SET_ERR_MSG_ATTR(info->extack, token, "invalid token");
+ return ret;
+ }
+
+ sk = (struct sock *)msk;
+
+ if (!mptcp_pm_is_userspace(msk)) {
+ GENL_SET_ERR_MSG(info, "invalid request; userspace PM not selected");
+ goto out;
+ }
+
+ lock_sock(sk);
+ spin_lock_bh(&msk->pm.lock);
+ list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
+ if (test_bit(entry->addr.id, bitmap->map))
+ continue;
+
+ hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, &mptcp_genl_family,
+ NLM_F_MULTI, MPTCP_PM_CMD_GET_ADDR);
+ if (!hdr)
+ break;
+
+ if (mptcp_nl_fill_addr(msg, entry) < 0) {
+ genlmsg_cancel(msg, hdr);
+ break;
+ }
+
+ __set_bit(entry->addr.id, bitmap->map);
+ genlmsg_end(msg, hdr);
+ }
+ spin_unlock_bh(&msk->pm.lock);
+ release_sock(sk);
+ ret = msg->len;
+
+out:
+ sock_put(sk);
+ return ret;
+}
+
+int mptcp_userspace_pm_get_addr(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR];
+ struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
+ struct mptcp_pm_addr_entry addr, *entry;
+ struct net *net = sock_net(skb->sk);
+ struct mptcp_sock *msk;
+ struct sk_buff *msg;
+ int ret = -EINVAL;
+ struct sock *sk;
+ void *reply;
+
+ msk = mptcp_token_get_sock(net, nla_get_u32(token));
+ if (!msk) {
+ NL_SET_ERR_MSG_ATTR(info->extack, token, "invalid token");
+ return ret;
+ }
+
+ sk = (struct sock *)msk;
+
+ if (!mptcp_pm_is_userspace(msk)) {
+ GENL_SET_ERR_MSG(info, "invalid request; userspace PM not selected");
+ goto out;
+ }
+
+ ret = mptcp_pm_parse_entry(attr, info, false, &addr);
+ if (ret < 0)
+ goto out;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0,
+ info->genlhdr->cmd);
+ if (!reply) {
+ GENL_SET_ERR_MSG(info, "not enough space in Netlink message");
+ ret = -EMSGSIZE;
+ goto fail;
+ }
+
+ lock_sock(sk);
+ spin_lock_bh(&msk->pm.lock);
+ entry = mptcp_userspace_pm_lookup_addr_by_id(msk, addr.addr.id);
+ if (!entry) {
+ GENL_SET_ERR_MSG(info, "address not found");
+ ret = -EINVAL;
+ goto unlock_fail;
+ }
+
+ ret = mptcp_nl_fill_addr(msg, entry);
+ if (ret)
+ goto unlock_fail;
+
+ genlmsg_end(msg, reply);
+ ret = genlmsg_reply(msg, info);
+ spin_unlock_bh(&msk->pm.lock);
+ release_sock(sk);
+ sock_put(sk);
+ return ret;
+
+unlock_fail:
+ spin_unlock_bh(&msk->pm.lock);
+ release_sock(sk);
+fail:
+ nlmsg_free(msg);
+out:
+ sock_put(sk);
+ return ret;
+}
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 7833a49f6214..3a1967bc7bad 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -15,7 +15,6 @@
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/protocol.h>
-#include <net/tcp.h>
#include <net/tcp_states.h>
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
#include <net/transp_v6.h>
@@ -410,6 +409,7 @@ static void mptcp_close_wake_up(struct sock *sk)
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
}
+/* called under the msk socket lock */
static bool mptcp_pending_data_fin_ack(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -441,16 +441,17 @@ static void mptcp_check_data_fin_ack(struct sock *sk)
}
}
+/* can be called with no lock acquired */
static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
{
struct mptcp_sock *msk = mptcp_sk(sk);
if (READ_ONCE(msk->rcv_data_fin) &&
- ((1 << sk->sk_state) &
+ ((1 << inet_sk_state_load(sk)) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq);
- if (msk->ack_seq == rcv_data_fin_seq) {
+ if (READ_ONCE(msk->ack_seq) == rcv_data_fin_seq) {
if (seq)
*seq = rcv_data_fin_seq;
@@ -748,7 +749,7 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
__skb_queue_tail(&sk->sk_receive_queue, skb);
}
msk->bytes_received += end_seq - msk->ack_seq;
- msk->ack_seq = end_seq;
+ WRITE_ONCE(msk->ack_seq, end_seq);
moved = true;
}
return moved;
@@ -985,6 +986,7 @@ static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag)
put_page(dfrag->page);
}
+/* called under both the msk socket lock and the data lock */
static void __mptcp_clean_una(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -1033,13 +1035,15 @@ static void __mptcp_clean_una(struct sock *sk)
msk->recovery = false;
out:
- if (snd_una == READ_ONCE(msk->snd_nxt) &&
- snd_una == READ_ONCE(msk->write_seq)) {
+ if (snd_una == msk->snd_nxt && snd_una == msk->write_seq) {
if (mptcp_rtx_timer_pending(sk) && !mptcp_data_fin_enabled(msk))
mptcp_stop_rtx_timer(sk);
} else {
mptcp_reset_rtx_timer(sk);
}
+
+ if (mptcp_pending_data_fin_ack(sk))
+ mptcp_schedule_work(sk);
}
static void __mptcp_clean_una_wakeup(struct sock *sk)
@@ -1500,7 +1504,7 @@ static void mptcp_update_post_push(struct mptcp_sock *msk,
*/
if (likely(after64(snd_nxt_new, msk->snd_nxt))) {
msk->bytes_sent += snd_nxt_new - msk->snd_nxt;
- msk->snd_nxt = snd_nxt_new;
+ WRITE_ONCE(msk->snd_nxt, snd_nxt_new);
}
}
@@ -1687,15 +1691,6 @@ out:
}
}
-static void mptcp_set_nospace(struct sock *sk)
-{
- /* enable autotune */
- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
-
- /* will be cleared on avail space */
- set_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags);
-}
-
static int mptcp_disconnect(struct sock *sk, int flags);
static int mptcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
@@ -1766,6 +1761,30 @@ static int do_copy_data_nocache(struct sock *sk, int copy,
return 0;
}
+/* open-code sk_stream_memory_free() plus sent limit computation to
+ * avoid indirect calls in fast-path.
+ * Called under the msk socket lock, so we can avoid a bunch of ONCE
+ * annotations.
+ */
+static u32 mptcp_send_limit(const struct sock *sk)
+{
+ const struct mptcp_sock *msk = mptcp_sk(sk);
+ u32 limit, not_sent;
+
+ if (sk->sk_wmem_queued >= READ_ONCE(sk->sk_sndbuf))
+ return 0;
+
+ limit = mptcp_notsent_lowat(sk);
+ if (limit == UINT_MAX)
+ return UINT_MAX;
+
+ not_sent = msk->write_seq - msk->snd_nxt;
+ if (not_sent >= limit)
+ return 0;
+
+ return limit - not_sent;
+}
+
static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -1810,6 +1829,12 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
struct mptcp_data_frag *dfrag;
bool dfrag_collapsed;
size_t psize, offset;
+ u32 copy_limit;
+
+ /* ensure fitting the notsent_lowat() constraint */
+ copy_limit = mptcp_send_limit(sk);
+ if (!copy_limit)
+ goto wait_for_memory;
/* reuse tail pfrag, if possible, or carve a new one from the
* page allocator
@@ -1817,9 +1842,6 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
dfrag = mptcp_pending_tail(sk);
dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag);
if (!dfrag_collapsed) {
- if (!sk_stream_memory_free(sk))
- goto wait_for_memory;
-
if (!mptcp_page_frag_refill(sk, pfrag))
goto wait_for_memory;
@@ -1834,6 +1856,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
offset = dfrag->offset + dfrag->data_len;
psize = pfrag->size - offset;
psize = min_t(size_t, psize, msg_data_left(msg));
+ psize = min_t(size_t, psize, copy_limit);
total_ts = psize + frag_truesize;
if (!sk_wmem_schedule(sk, total_ts))
@@ -1869,7 +1892,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
continue;
wait_for_memory:
- mptcp_set_nospace(sk);
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
__mptcp_push_pending(sk, msg->msg_flags);
ret = sk_stream_wait_memory(sk, &timeo);
if (ret)
@@ -2115,7 +2138,7 @@ static unsigned int mptcp_inq_hint(const struct sock *sk)
skb = skb_peek(&msk->receive_queue);
if (skb) {
- u64 hint_val = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
+ u64 hint_val = READ_ONCE(msk->ack_seq) - MPTCP_SKB_CB(skb)->map_seq;
if (hint_val >= INT_MAX)
return INT_MAX;
@@ -2759,7 +2782,7 @@ static void __mptcp_init_sock(struct sock *sk)
__skb_queue_head_init(&msk->receive_queue);
msk->out_of_order_queue = RB_ROOT;
msk->first_pending = NULL;
- msk->rmem_fwd_alloc = 0;
+ WRITE_ONCE(msk->rmem_fwd_alloc, 0);
WRITE_ONCE(msk->rmem_released, 0);
msk->timer_ival = TCP_RTO_MIN;
msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
@@ -2975,7 +2998,7 @@ static void __mptcp_destroy_sock(struct sock *sk)
sk->sk_prot->destroy(sk);
- WARN_ON_ONCE(msk->rmem_fwd_alloc);
+ WARN_ON_ONCE(READ_ONCE(msk->rmem_fwd_alloc));
WARN_ON_ONCE(msk->rmem_released);
sk_stream_kill_queues(sk);
xfrm_sk_free_policy(sk);
@@ -3150,16 +3173,16 @@ static int mptcp_disconnect(struct sock *sk, int flags)
WRITE_ONCE(msk->flags, 0);
msk->cb_flags = 0;
msk->recovery = false;
- msk->can_ack = false;
- msk->fully_established = false;
- msk->rcv_data_fin = false;
- msk->snd_data_fin_enable = false;
- msk->rcv_fastclose = false;
- msk->use_64bit_ack = false;
- msk->bytes_consumed = 0;
+ WRITE_ONCE(msk->can_ack, false);
+ WRITE_ONCE(msk->fully_established, false);
+ WRITE_ONCE(msk->rcv_data_fin, false);
+ WRITE_ONCE(msk->snd_data_fin_enable, false);
+ WRITE_ONCE(msk->rcv_fastclose, false);
+ WRITE_ONCE(msk->use_64bit_ack, false);
WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
mptcp_pm_data_reset(msk);
mptcp_ca_reset(sk);
+ msk->bytes_consumed = 0;
msk->bytes_acked = 0;
msk->bytes_received = 0;
msk->bytes_sent = 0;
@@ -3250,17 +3273,17 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
mptcp_copy_ip_options(nsk, sk);
msk = mptcp_sk(nsk);
- msk->local_key = subflow_req->local_key;
- msk->token = subflow_req->token;
+ WRITE_ONCE(msk->local_key, subflow_req->local_key);
+ WRITE_ONCE(msk->token, subflow_req->token);
msk->in_accept_queue = 1;
WRITE_ONCE(msk->fully_established, false);
if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD)
WRITE_ONCE(msk->csum_enabled, true);
- msk->write_seq = subflow_req->idsn + 1;
- msk->snd_nxt = msk->write_seq;
- msk->snd_una = msk->write_seq;
- msk->wnd_end = msk->snd_nxt + tcp_sk(ssk)->snd_wnd;
+ WRITE_ONCE(msk->write_seq, subflow_req->idsn + 1);
+ WRITE_ONCE(msk->snd_nxt, msk->write_seq);
+ WRITE_ONCE(msk->snd_una, msk->write_seq);
+ WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd);
msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq;
mptcp_init_sched(msk, mptcp_sk(sk)->sched);
@@ -3363,9 +3386,6 @@ void __mptcp_data_acked(struct sock *sk)
__mptcp_clean_una(sk);
else
__set_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->cb_flags);
-
- if (mptcp_pending_data_fin_ack(sk))
- mptcp_schedule_work(sk);
}
void __mptcp_check_push(struct sock *sk, struct sock *ssk)
@@ -3767,6 +3787,7 @@ static struct proto mptcp_prot = {
.unhash = mptcp_unhash,
.get_port = mptcp_get_port,
.forward_alloc_get = mptcp_forward_alloc_get,
+ .stream_memory_free = mptcp_stream_memory_free,
.sockets_allocated = &mptcp_sockets_allocated,
.memory_allocated = &tcp_memory_allocated,
@@ -3940,12 +3961,12 @@ static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
{
struct sock *sk = (struct sock *)msk;
- if (sk_stream_is_writeable(sk))
+ if (__mptcp_stream_is_writeable(sk, 1))
return EPOLLOUT | EPOLLWRNORM;
- mptcp_set_nospace(sk);
- smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */
- if (sk_stream_is_writeable(sk))
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ smp_mb__after_atomic(); /* NOSPACE is changed by mptcp_write_space() */
+ if (__mptcp_stream_is_writeable(sk, 1))
return EPOLLOUT | EPOLLWRNORM;
return 0;
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 07f6242afc1a..a10ebf3ee10a 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -113,10 +113,9 @@
#define MPTCP_RST_TRANSIENT BIT(0)
/* MPTCP socket atomic flags */
-#define MPTCP_NOSPACE 1
-#define MPTCP_WORK_RTX 2
-#define MPTCP_FALLBACK_DONE 4
-#define MPTCP_WORK_CLOSE_SUBFLOW 5
+#define MPTCP_WORK_RTX 1
+#define MPTCP_FALLBACK_DONE 2
+#define MPTCP_WORK_CLOSE_SUBFLOW 3
/* MPTCP socket release cb flags */
#define MPTCP_PUSH_PENDING 1
@@ -260,8 +259,10 @@ struct mptcp_data_frag {
struct mptcp_sock {
/* inet_connection_sock must be the first member */
struct inet_connection_sock sk;
- u64 local_key;
- u64 remote_key;
+ u64 local_key; /* protected by the first subflow socket lock
+ * lockless access read
+ */
+ u64 remote_key; /* same as above */
u64 write_seq;
u64 bytes_sent;
u64 snd_nxt;
@@ -306,6 +307,7 @@ struct mptcp_sock {
in_accept_queue:1,
free_first:1,
rcvspace_init:1;
+ u32 notsent_lowat;
struct work_struct work;
struct sk_buff *ooo_last_skb;
struct rb_root out_of_order_queue;
@@ -341,12 +343,30 @@ struct mptcp_sock {
#define mptcp_for_each_subflow_safe(__msk, __subflow, __tmp) \
list_for_each_entry_safe(__subflow, __tmp, &((__msk)->conn_list), node)
+extern struct genl_family mptcp_genl_family;
+
static inline void msk_owned_by_me(const struct mptcp_sock *msk)
{
sock_owned_by_me((const struct sock *)msk);
}
+#ifdef CONFIG_DEBUG_NET
+/* MPTCP-specific: we might (indirectly) call this helper with the wrong sk */
+#undef tcp_sk
+#define tcp_sk(ptr) ({ \
+ typeof(ptr) _ptr = (ptr); \
+ WARN_ON(_ptr->sk_protocol != IPPROTO_TCP); \
+ container_of_const(_ptr, struct tcp_sock, inet_conn.icsk_inet.sk); \
+})
+#define mptcp_sk(ptr) ({ \
+ typeof(ptr) _ptr = (ptr); \
+ WARN_ON(_ptr->sk_protocol != IPPROTO_MPTCP); \
+ container_of_const(_ptr, struct mptcp_sock, sk.icsk_inet.sk); \
+})
+
+#else /* !CONFIG_DEBUG_NET */
#define mptcp_sk(ptr) container_of_const(ptr, struct mptcp_sock, sk.icsk_inet.sk)
+#endif
/* the msk socket don't use the backlog, also account for the bulk
* free memory
@@ -400,7 +420,7 @@ static inline struct mptcp_data_frag *mptcp_rtx_head(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- if (msk->snd_una == READ_ONCE(msk->snd_nxt))
+ if (msk->snd_una == msk->snd_nxt)
return NULL;
return list_first_entry_or_null(&msk->rtx_queue, struct mptcp_data_frag, list);
@@ -790,14 +810,36 @@ static inline bool mptcp_data_fin_enabled(const struct mptcp_sock *msk)
READ_ONCE(msk->write_seq) == READ_ONCE(msk->snd_nxt);
}
+static inline u32 mptcp_notsent_lowat(const struct sock *sk)
+{
+ struct net *net = sock_net(sk);
+ u32 val;
+
+ val = READ_ONCE(mptcp_sk(sk)->notsent_lowat);
+ return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
+}
+
+static inline bool mptcp_stream_memory_free(const struct sock *sk, int wake)
+{
+ const struct mptcp_sock *msk = mptcp_sk(sk);
+ u32 notsent_bytes;
+
+ notsent_bytes = READ_ONCE(msk->write_seq) - READ_ONCE(msk->snd_nxt);
+ return (notsent_bytes << wake) < mptcp_notsent_lowat(sk);
+}
+
+static inline bool __mptcp_stream_is_writeable(const struct sock *sk, int wake)
+{
+ return mptcp_stream_memory_free(sk, wake) &&
+ __sk_stream_is_writeable(sk, wake);
+}
+
static inline void mptcp_write_space(struct sock *sk)
{
- if (sk_stream_is_writeable(sk)) {
- /* pairs with memory barrier in mptcp_poll */
- smp_mb();
- if (test_and_clear_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags))
- sk_stream_write_space(sk);
- }
+ /* pairs with memory barrier in mptcp_poll */
+ smp_mb();
+ if (mptcp_stream_memory_free(sk, 1))
+ sk_stream_write_space(sk);
}
static inline void __mptcp_sync_sndbuf(struct sock *sk)
@@ -808,7 +850,7 @@ static inline void __mptcp_sync_sndbuf(struct sock *sk)
if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
return;
- new_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[0];
+ new_sndbuf = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[0]);
mptcp_for_each_subflow(mptcp_sk(sk), subflow) {
ssk_sndbuf = READ_ONCE(mptcp_subflow_tcp_sock(subflow)->sk_sndbuf);
@@ -928,21 +970,15 @@ int mptcp_pm_nl_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int
int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk,
unsigned int id,
u8 *flags, int *ifindex);
-int mptcp_pm_set_flags(struct net *net, struct nlattr *token,
- struct mptcp_pm_addr_entry *loc,
- struct mptcp_pm_addr_entry *rem, u8 bkup);
-int mptcp_pm_nl_set_flags(struct net *net, struct mptcp_pm_addr_entry *addr, u8 bkup);
-int mptcp_userspace_pm_set_flags(struct net *net, struct nlattr *token,
- struct mptcp_pm_addr_entry *loc,
- struct mptcp_pm_addr_entry *rem, u8 bkup);
+int mptcp_pm_set_flags(struct sk_buff *skb, struct genl_info *info);
+int mptcp_pm_nl_set_flags(struct sk_buff *skb, struct genl_info *info);
+int mptcp_userspace_pm_set_flags(struct sk_buff *skb, struct genl_info *info);
int mptcp_pm_announce_addr(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr,
bool echo);
int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list);
-void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
- struct list_head *rm_list);
void mptcp_free_local_addr_list(struct mptcp_sock *msk);
@@ -958,6 +994,8 @@ void __mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflo
const struct mptcp_options_received *mp_opt);
void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subflow,
struct request_sock *req);
+int mptcp_nl_fill_addr(struct sk_buff *skb,
+ struct mptcp_pm_addr_entry *entry);
static inline bool mptcp_pm_should_add_signal(struct mptcp_sock *msk)
{
@@ -1022,6 +1060,15 @@ bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
+int mptcp_pm_dump_addr(struct sk_buff *msg, struct netlink_callback *cb);
+int mptcp_pm_nl_dump_addr(struct sk_buff *msg,
+ struct netlink_callback *cb);
+int mptcp_userspace_pm_dump_addr(struct sk_buff *msg,
+ struct netlink_callback *cb);
+int mptcp_pm_get_addr(struct sk_buff *skb, struct genl_info *info);
+int mptcp_pm_nl_get_addr(struct sk_buff *skb, struct genl_info *info);
+int mptcp_userspace_pm_get_addr(struct sk_buff *skb,
+ struct genl_info *info);
static inline u8 subflow_get_local_id(const struct mptcp_subflow_context *subflow)
{
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index c40f1428e602..dcd1c76d2a3b 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -624,20 +624,11 @@ static int mptcp_setsockopt_sol_tcp_congestion(struct mptcp_sock *msk, sockptr_t
return ret;
}
-static int mptcp_setsockopt_sol_tcp_cork(struct mptcp_sock *msk, sockptr_t optval,
- unsigned int optlen)
+static int __mptcp_setsockopt_sol_tcp_cork(struct mptcp_sock *msk, int val)
{
struct mptcp_subflow_context *subflow;
struct sock *sk = (struct sock *)msk;
- int val;
- if (optlen < sizeof(int))
- return -EINVAL;
-
- if (copy_from_sockptr(&val, optval, sizeof(val)))
- return -EFAULT;
-
- lock_sock(sk);
sockopt_seq_inc(msk);
msk->cork = !!val;
mptcp_for_each_subflow(msk, subflow) {
@@ -649,25 +640,15 @@ static int mptcp_setsockopt_sol_tcp_cork(struct mptcp_sock *msk, sockptr_t optva
}
if (!val)
mptcp_check_and_set_pending(sk);
- release_sock(sk);
return 0;
}
-static int mptcp_setsockopt_sol_tcp_nodelay(struct mptcp_sock *msk, sockptr_t optval,
- unsigned int optlen)
+static int __mptcp_setsockopt_sol_tcp_nodelay(struct mptcp_sock *msk, int val)
{
struct mptcp_subflow_context *subflow;
struct sock *sk = (struct sock *)msk;
- int val;
-
- if (optlen < sizeof(int))
- return -EINVAL;
-
- if (copy_from_sockptr(&val, optval, sizeof(val)))
- return -EFAULT;
- lock_sock(sk);
sockopt_seq_inc(msk);
msk->nodelay = !!val;
mptcp_for_each_subflow(msk, subflow) {
@@ -679,8 +660,6 @@ static int mptcp_setsockopt_sol_tcp_nodelay(struct mptcp_sock *msk, sockptr_t op
}
if (val)
mptcp_check_and_set_pending(sk);
- release_sock(sk);
-
return 0;
}
@@ -803,25 +782,10 @@ static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
int ret, val;
switch (optname) {
- case TCP_INQ:
- ret = mptcp_get_int_option(msk, optval, optlen, &val);
- if (ret)
- return ret;
- if (val < 0 || val > 1)
- return -EINVAL;
-
- lock_sock(sk);
- msk->recvmsg_inq = !!val;
- release_sock(sk);
- return 0;
case TCP_ULP:
return -EOPNOTSUPP;
case TCP_CONGESTION:
return mptcp_setsockopt_sol_tcp_congestion(msk, optval, optlen);
- case TCP_CORK:
- return mptcp_setsockopt_sol_tcp_cork(msk, optval, optlen);
- case TCP_NODELAY:
- return mptcp_setsockopt_sol_tcp_nodelay(msk, optval, optlen);
case TCP_DEFER_ACCEPT:
/* See tcp.c: TCP_DEFER_ACCEPT does not fail */
mptcp_setsockopt_first_sf_only(msk, SOL_TCP, optname, optval, optlen);
@@ -834,7 +798,34 @@ static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
optval, optlen);
}
- return -EOPNOTSUPP;
+ ret = mptcp_get_int_option(msk, optval, optlen, &val);
+ if (ret)
+ return ret;
+
+ lock_sock(sk);
+ switch (optname) {
+ case TCP_INQ:
+ if (val < 0 || val > 1)
+ ret = -EINVAL;
+ else
+ msk->recvmsg_inq = !!val;
+ break;
+ case TCP_NOTSENT_LOWAT:
+ WRITE_ONCE(msk->notsent_lowat, val);
+ mptcp_write_space(sk);
+ break;
+ case TCP_CORK:
+ ret = __mptcp_setsockopt_sol_tcp_cork(msk, val);
+ break;
+ case TCP_NODELAY:
+ ret = __mptcp_setsockopt_sol_tcp_nodelay(msk, val);
+ break;
+ default:
+ ret = -ENOPROTOOPT;
+ }
+
+ release_sock(sk);
+ return ret;
}
int mptcp_setsockopt(struct sock *sk, int level, int optname,
@@ -942,7 +933,7 @@ void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info)
mptcp_data_unlock(sk);
slow = lock_sock_fast(sk);
- info->mptcpi_csum_enabled = msk->csum_enabled;
+ info->mptcpi_csum_enabled = READ_ONCE(msk->csum_enabled);
info->mptcpi_token = msk->token;
info->mptcpi_write_seq = msk->write_seq;
info->mptcpi_retransmits = inet_csk(sk)->icsk_retransmits;
@@ -1349,6 +1340,8 @@ static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
return mptcp_put_int_option(msk, optval, optlen, msk->cork);
case TCP_NODELAY:
return mptcp_put_int_option(msk, optval, optlen, msk->nodelay);
+ case TCP_NOTSENT_LOWAT:
+ return mptcp_put_int_option(msk, optval, optlen, msk->notsent_lowat);
}
return -EOPNOTSUPP;
}
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 71ba86246ff8..1626dd20c68f 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -15,13 +15,11 @@
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/protocol.h>
-#include <net/tcp.h>
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
#include <net/ip6_route.h>
#include <net/transp_v6.h>
#endif
#include <net/mptcp.h>
-#include <uapi/linux/mptcp.h>
#include "protocol.h"
#include "mib.h"
@@ -75,7 +73,8 @@ static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_
get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
- subflow_generate_hmac(msk->local_key, msk->remote_key,
+ subflow_generate_hmac(READ_ONCE(msk->local_key),
+ READ_ONCE(msk->remote_key),
subflow_req->local_nonce,
subflow_req->remote_nonce, hmac);
@@ -714,7 +713,8 @@ static bool subflow_hmac_valid(const struct request_sock *req,
if (!msk)
return false;
- subflow_generate_hmac(msk->remote_key, msk->local_key,
+ subflow_generate_hmac(READ_ONCE(msk->remote_key),
+ READ_ONCE(msk->local_key),
subflow_req->remote_nonce,
subflow_req->local_nonce, hmac);
@@ -1548,8 +1548,8 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
mptcp_pm_get_flags_and_ifindex_by_id(msk, local_id,
&flags, &ifindex);
subflow->remote_key_valid = 1;
- subflow->remote_key = msk->remote_key;
- subflow->local_key = msk->local_key;
+ subflow->remote_key = READ_ONCE(msk->remote_key);
+ subflow->local_key = READ_ONCE(msk->local_key);
subflow->token = msk->token;
mptcp_info2sockaddr(loc, &addr, ssk->sk_family);
diff --git a/net/mptcp/token_test.c b/net/mptcp/token_test.c
index bfff53e668da..4fc39fa2e262 100644
--- a/net/mptcp/token_test.c
+++ b/net/mptcp/token_test.c
@@ -52,14 +52,19 @@ static struct mptcp_subflow_context *build_ctx(struct kunit *test)
static struct mptcp_sock *build_msk(struct kunit *test)
{
struct mptcp_sock *msk;
+ struct sock *sk;
msk = kunit_kzalloc(test, sizeof(struct mptcp_sock), GFP_USER);
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, msk);
refcount_set(&((struct sock *)msk)->sk_refcnt, 1);
sock_net_set((struct sock *)msk, &init_net);
+ sk = (struct sock *)msk;
+
/* be sure the token helpers can dereference sk->sk_prot */
- ((struct sock *)msk)->sk_prot = &tcp_prot;
+ sk->sk_prot = &tcp_prot;
+ sk->sk_protocol = IPPROTO_MPTCP;
+
return msk;
}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 441d1f134110..df2dc21304ef 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -818,7 +818,7 @@ config NETFILTER_XT_TARGET_AUDIT
config NETFILTER_XT_TARGET_CHECKSUM
tristate "CHECKSUM target support"
- depends on IP_NF_MANGLE || IP6_NF_MANGLE
+ depends on IP_NF_MANGLE || IP6_NF_MANGLE || NFT_COMPAT
depends on NETFILTER_ADVANCED
help
This option adds a `CHECKSUM' target, which can be used in the iptables mangle
@@ -869,7 +869,7 @@ config NETFILTER_XT_TARGET_CONNSECMARK
config NETFILTER_XT_TARGET_CT
tristate '"CT" target support'
depends on NF_CONNTRACK
- depends on IP_NF_RAW || IP6_NF_RAW
+ depends on IP_NF_RAW || IP6_NF_RAW || NFT_COMPAT
depends on NETFILTER_ADVANCED
help
This options adds a `CT' target, which allows to specify initial
@@ -880,7 +880,7 @@ config NETFILTER_XT_TARGET_CT
config NETFILTER_XT_TARGET_DSCP
tristate '"DSCP" and "TOS" target support'
- depends on IP_NF_MANGLE || IP6_NF_MANGLE
+ depends on IP_NF_MANGLE || IP6_NF_MANGLE || NFT_COMPAT
depends on NETFILTER_ADVANCED
help
This option adds a `DSCP' target, which allows you to manipulate
@@ -896,7 +896,7 @@ config NETFILTER_XT_TARGET_DSCP
config NETFILTER_XT_TARGET_HL
tristate '"HL" hoplimit target support'
- depends on IP_NF_MANGLE || IP6_NF_MANGLE
+ depends on IP_NF_MANGLE || IP6_NF_MANGLE || NFT_COMPAT
depends on NETFILTER_ADVANCED
help
This option adds the "HL" (for IPv6) and "TTL" (for IPv4)
@@ -1080,7 +1080,7 @@ config NETFILTER_XT_TARGET_TPROXY
depends on NETFILTER_ADVANCED
depends on IPV6 || IPV6=n
depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
- depends on IP_NF_MANGLE
+ depends on IP_NF_MANGLE || NFT_COMPAT
select NF_DEFRAG_IPV4
select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES != n
select NF_TPROXY_IPV4
@@ -1147,7 +1147,7 @@ config NETFILTER_XT_TARGET_TCPMSS
config NETFILTER_XT_TARGET_TCPOPTSTRIP
tristate '"TCPOPTSTRIP" target support'
- depends on IP_NF_MANGLE || IP6_NF_MANGLE
+ depends on IP_NF_MANGLE || IP6_NF_MANGLE || NFT_COMPAT
depends on NETFILTER_ADVANCED
help
This option adds a "TCPOPTSTRIP" target, which allows you to strip
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index a743db073887..98d7dbe3d787 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -1511,9 +1511,7 @@ int __init ip_vs_conn_init(void)
return -ENOMEM;
/* Allocate ip_vs_conn slab cache */
- ip_vs_conn_cachep = kmem_cache_create("ip_vs_conn",
- sizeof(struct ip_vs_conn), 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ ip_vs_conn_cachep = KMEM_CACHE(ip_vs_conn, SLAB_HWCACHE_ALIGN);
if (!ip_vs_conn_cachep) {
kvfree(ip_vs_conn_tab);
return -ENOMEM;
diff --git a/net/netfilter/nf_bpf_link.c b/net/netfilter/nf_bpf_link.c
index 0e4beae421f8..5257d5e7eb09 100644
--- a/net/netfilter/nf_bpf_link.c
+++ b/net/netfilter/nf_bpf_link.c
@@ -314,7 +314,7 @@ static bool nf_is_valid_access(int off, int size, enum bpf_access_type type,
static const struct bpf_func_proto *
bpf_nf_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
const struct bpf_verifier_ops netfilter_verifier_ops = {
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
index 5d8ed6c90b7e..8715617b02fe 100644
--- a/net/netfilter/nf_conncount.c
+++ b/net/netfilter/nf_conncount.c
@@ -605,15 +605,11 @@ static int __init nf_conncount_modinit(void)
for (i = 0; i < CONNCOUNT_SLOTS; ++i)
spin_lock_init(&nf_conncount_locks[i]);
- conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple",
- sizeof(struct nf_conncount_tuple),
- 0, 0, NULL);
+ conncount_conn_cachep = KMEM_CACHE(nf_conncount_tuple, 0);
if (!conncount_conn_cachep)
return -ENOMEM;
- conncount_rb_cachep = kmem_cache_create("nf_conncount_rb",
- sizeof(struct nf_conncount_rb),
- 0, 0, NULL);
+ conncount_rb_cachep = KMEM_CACHE(nf_conncount_rb, 0);
if (!conncount_rb_cachep) {
kmem_cache_destroy(conncount_conn_cachep);
return -ENOMEM;
diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c
index 475358ec8212..d2492d050fe6 100644
--- a/net/netfilter/nf_conntrack_bpf.c
+++ b/net/netfilter/nf_conntrack_bpf.c
@@ -467,7 +467,7 @@ __bpf_kfunc int bpf_ct_change_status(struct nf_conn *nfct, u32 status)
__bpf_kfunc_end_defs();
-BTF_SET8_START(nf_ct_kfunc_set)
+BTF_KFUNCS_START(nf_ct_kfunc_set)
BTF_ID_FLAGS(func, bpf_xdp_ct_alloc, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_xdp_ct_lookup, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_skb_ct_alloc, KF_ACQUIRE | KF_RET_NULL)
@@ -478,7 +478,7 @@ BTF_ID_FLAGS(func, bpf_ct_set_timeout, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_ct_change_timeout, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_ct_set_status, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_ct_change_status, KF_TRUSTED_ARGS)
-BTF_SET8_END(nf_ct_kfunc_set)
+BTF_KFUNCS_END(nf_ct_kfunc_set)
static const struct btf_kfunc_id_set nf_conntrack_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 5b876fa7f9af..c63868666bd9 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -2530,7 +2530,7 @@ void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
* netfilter framework. Roll on, two-stage module
* delete...
*/
- synchronize_net();
+ synchronize_rcu_expedited();
i_see_dead_people:
busy = 0;
list_for_each_entry(net, net_exit_list, exit_list) {
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 81ca348915c9..21fa550966f0 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -722,9 +722,7 @@ int nf_conntrack_expect_init(void)
nf_ct_expect_hsize = 1;
}
nf_ct_expect_max = nf_ct_expect_hsize * 4;
- nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
- sizeof(struct nf_conntrack_expect),
- 0, 0, NULL);
+ nf_ct_expect_cachep = KMEM_CACHE(nf_conntrack_expect, 0);
if (!nf_ct_expect_cachep)
return -ENOMEM;
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index e16f158388bb..370f8231385c 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -31,10 +31,10 @@ static struct nf_logger *__find_logger(int pf, const char *str_logger)
int i;
for (i = 0; i < NF_LOG_TYPE_MAX; i++) {
- if (loggers[pf][i] == NULL)
+ log = nft_log_dereference(loggers[pf][i]);
+ if (!log)
continue;
- log = nft_log_dereference(loggers[pf][i]);
if (!strncasecmp(str_logger, log->name, strlen(log->name)))
return log;
}
@@ -156,6 +156,11 @@ int nf_logger_find_get(int pf, enum nf_log_type type)
struct nf_logger *logger;
int ret = -ENOENT;
+ if (pf >= ARRAY_SIZE(loggers))
+ return -EINVAL;
+ if (type >= NF_LOG_TYPE_MAX)
+ return -EINVAL;
+
if (pf == NFPROTO_INET) {
ret = nf_logger_find_get(NFPROTO_IPV4, type);
if (ret < 0)
diff --git a/net/netfilter/nf_nat_bpf.c b/net/netfilter/nf_nat_bpf.c
index 6e3b2f58855f..481be15609b1 100644
--- a/net/netfilter/nf_nat_bpf.c
+++ b/net/netfilter/nf_nat_bpf.c
@@ -54,9 +54,9 @@ __bpf_kfunc int bpf_ct_set_nat_info(struct nf_conn___init *nfct,
__bpf_kfunc_end_defs();
-BTF_SET8_START(nf_nat_kfunc_set)
+BTF_KFUNCS_START(nf_nat_kfunc_set)
BTF_ID_FLAGS(func, bpf_ct_set_nat_info, KF_TRUSTED_ARGS)
-BTF_SET8_END(nf_nat_kfunc_set)
+BTF_KFUNCS_END(nf_nat_kfunc_set)
static const struct btf_kfunc_id_set nf_bpf_nat_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index e2f334f70281..7f12e56e6e52 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -248,109 +248,3 @@ int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
return 0;
}
EXPORT_SYMBOL_GPL(nf_queue);
-
-static unsigned int nf_iterate(struct sk_buff *skb,
- struct nf_hook_state *state,
- const struct nf_hook_entries *hooks,
- unsigned int *index)
-{
- const struct nf_hook_entry *hook;
- unsigned int verdict, i = *index;
-
- while (i < hooks->num_hook_entries) {
- hook = &hooks->hooks[i];
-repeat:
- verdict = nf_hook_entry_hookfn(hook, skb, state);
- if (verdict != NF_ACCEPT) {
- *index = i;
- if (verdict != NF_REPEAT)
- return verdict;
- goto repeat;
- }
- i++;
- }
-
- *index = i;
- return NF_ACCEPT;
-}
-
-static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
-{
- switch (pf) {
-#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
- case NFPROTO_BRIDGE:
- return rcu_dereference(net->nf.hooks_bridge[hooknum]);
-#endif
- case NFPROTO_IPV4:
- return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
- case NFPROTO_IPV6:
- return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
- default:
- WARN_ON_ONCE(1);
- return NULL;
- }
-
- return NULL;
-}
-
-/* Caller must hold rcu read-side lock */
-void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
-{
- const struct nf_hook_entry *hook_entry;
- const struct nf_hook_entries *hooks;
- struct sk_buff *skb = entry->skb;
- const struct net *net;
- unsigned int i;
- int err;
- u8 pf;
-
- net = entry->state.net;
- pf = entry->state.pf;
-
- hooks = nf_hook_entries_head(net, pf, entry->state.hook);
-
- i = entry->hook_index;
- if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
- kfree_skb(skb);
- nf_queue_entry_free(entry);
- return;
- }
-
- hook_entry = &hooks->hooks[i];
-
- /* Continue traversal iff userspace said ok... */
- if (verdict == NF_REPEAT)
- verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
-
- if (verdict == NF_ACCEPT) {
- if (nf_reroute(skb, entry) < 0)
- verdict = NF_DROP;
- }
-
- if (verdict == NF_ACCEPT) {
-next_hook:
- ++i;
- verdict = nf_iterate(skb, &entry->state, hooks, &i);
- }
-
- switch (verdict & NF_VERDICT_MASK) {
- case NF_ACCEPT:
- case NF_STOP:
- local_bh_disable();
- entry->state.okfn(entry->state.net, entry->state.sk, skb);
- local_bh_enable();
- break;
- case NF_QUEUE:
- err = nf_queue(skb, &entry->state, i, verdict);
- if (err == 1)
- goto next_hook;
- break;
- case NF_STOLEN:
- break;
- default:
- kfree_skb(skb);
- }
-
- nf_queue_entry_free(entry);
-}
-EXPORT_SYMBOL(nf_reinject);
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index fbbc4fd37349..5b140c12b7df 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -800,7 +800,7 @@ synproxy_build_ip_ipv6(struct net *net, struct sk_buff *skb,
skb_reset_network_header(skb);
iph = skb_put(skb, sizeof(*iph));
ip6_flow_hdr(iph, 0, 0);
- iph->hop_limit = net->ipv6.devconf_all->hop_limit;
+ iph->hop_limit = READ_ONCE(net->ipv6.devconf_all->hop_limit);
iph->nexthdr = IPPROTO_TCP;
iph->saddr = *saddr;
iph->daddr = *daddr;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 1683dc196b59..e93f905e60b6 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1195,8 +1195,10 @@ static void nf_tables_table_disable(struct net *net, struct nft_table *table)
#define __NFT_TABLE_F_INTERNAL (NFT_TABLE_F_MASK + 1)
#define __NFT_TABLE_F_WAS_DORMANT (__NFT_TABLE_F_INTERNAL << 0)
#define __NFT_TABLE_F_WAS_AWAKEN (__NFT_TABLE_F_INTERNAL << 1)
+#define __NFT_TABLE_F_WAS_ORPHAN (__NFT_TABLE_F_INTERNAL << 2)
#define __NFT_TABLE_F_UPDATE (__NFT_TABLE_F_WAS_DORMANT | \
- __NFT_TABLE_F_WAS_AWAKEN)
+ __NFT_TABLE_F_WAS_AWAKEN | \
+ __NFT_TABLE_F_WAS_ORPHAN)
static int nf_tables_updtable(struct nft_ctx *ctx)
{
@@ -1216,8 +1218,11 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
if ((nft_table_has_owner(ctx->table) &&
!(flags & NFT_TABLE_F_OWNER)) ||
- (!nft_table_has_owner(ctx->table) &&
- flags & NFT_TABLE_F_OWNER))
+ (flags & NFT_TABLE_F_OWNER &&
+ !nft_table_is_orphan(ctx->table)))
+ return -EOPNOTSUPP;
+
+ if ((flags ^ ctx->table->flags) & NFT_TABLE_F_PERSIST)
return -EOPNOTSUPP;
/* No dormant off/on/off/on games in single transaction */
@@ -1246,6 +1251,13 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
}
}
+ if ((flags & NFT_TABLE_F_OWNER) &&
+ !nft_table_has_owner(ctx->table)) {
+ ctx->table->nlpid = ctx->portid;
+ ctx->table->flags |= NFT_TABLE_F_OWNER |
+ __NFT_TABLE_F_WAS_ORPHAN;
+ }
+
nft_trans_table_update(trans) = true;
nft_trans_commit_list_add_tail(ctx->net, trans);
@@ -4238,23 +4250,18 @@ static bool nft_set_ops_candidate(const struct nft_set_type *type, u32 flags)
* given, in that case the amount of memory per element is used.
*/
static const struct nft_set_ops *
-nft_select_set_ops(const struct nft_ctx *ctx,
- const struct nlattr * const nla[],
+nft_select_set_ops(const struct nft_ctx *ctx, u32 flags,
const struct nft_set_desc *desc)
{
struct nftables_pernet *nft_net = nft_pernet(ctx->net);
const struct nft_set_ops *ops, *bops;
struct nft_set_estimate est, best;
const struct nft_set_type *type;
- u32 flags = 0;
int i;
lockdep_assert_held(&nft_net->commit_mutex);
lockdep_nfnl_nft_mutex_not_held();
- if (nla[NFTA_SET_FLAGS] != NULL)
- flags = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS]));
-
bops = NULL;
best.size = ~0;
best.lookup = ~0;
@@ -5146,7 +5153,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
return -ENOENT;
- ops = nft_select_set_ops(&ctx, nla, &desc);
+ ops = nft_select_set_ops(&ctx, flags, &desc);
if (IS_ERR(ops))
return PTR_ERR(ops);
@@ -10435,6 +10442,10 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
} else if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_AWAKEN) {
trans->ctx.table->flags &= ~NFT_TABLE_F_DORMANT;
}
+ if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_ORPHAN) {
+ trans->ctx.table->flags &= ~NFT_TABLE_F_OWNER;
+ trans->ctx.table->nlpid = 0;
+ }
trans->ctx.table->flags &= ~__NFT_TABLE_F_UPDATE;
nft_trans_destroy(trans);
} else {
@@ -11361,6 +11372,10 @@ again:
list_for_each_entry(table, &nft_net->tables, list) {
if (nft_table_has_owner(table) &&
n->portid == table->nlpid) {
+ if (table->flags & NFT_TABLE_F_PERSIST) {
+ table->flags &= ~NFT_TABLE_F_OWNER;
+ continue;
+ }
__nft_release_hook(net, table);
list_del_rcu(&table->list);
to_delete[deleted++] = table;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 5cf38fc0a366..00f4bd21c59b 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -225,6 +225,148 @@ find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
return entry;
}
+static unsigned int nf_iterate(struct sk_buff *skb,
+ struct nf_hook_state *state,
+ const struct nf_hook_entries *hooks,
+ unsigned int *index)
+{
+ const struct nf_hook_entry *hook;
+ unsigned int verdict, i = *index;
+
+ while (i < hooks->num_hook_entries) {
+ hook = &hooks->hooks[i];
+repeat:
+ verdict = nf_hook_entry_hookfn(hook, skb, state);
+ if (verdict != NF_ACCEPT) {
+ *index = i;
+ if (verdict != NF_REPEAT)
+ return verdict;
+ goto repeat;
+ }
+ i++;
+ }
+
+ *index = i;
+ return NF_ACCEPT;
+}
+
+static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
+{
+ switch (pf) {
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+ case NFPROTO_BRIDGE:
+ return rcu_dereference(net->nf.hooks_bridge[hooknum]);
+#endif
+ case NFPROTO_IPV4:
+ return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
+ case NFPROTO_IPV6:
+ return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
+ default:
+ WARN_ON_ONCE(1);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+static int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry)
+{
+#ifdef CONFIG_INET
+ const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
+
+ if (entry->state.hook == NF_INET_LOCAL_OUT) {
+ const struct iphdr *iph = ip_hdr(skb);
+
+ if (!(iph->tos == rt_info->tos &&
+ skb->mark == rt_info->mark &&
+ iph->daddr == rt_info->daddr &&
+ iph->saddr == rt_info->saddr))
+ return ip_route_me_harder(entry->state.net, entry->state.sk,
+ skb, RTN_UNSPEC);
+ }
+#endif
+ return 0;
+}
+
+static int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry)
+{
+ const struct nf_ipv6_ops *v6ops;
+ int ret = 0;
+
+ switch (entry->state.pf) {
+ case AF_INET:
+ ret = nf_ip_reroute(skb, entry);
+ break;
+ case AF_INET6:
+ v6ops = rcu_dereference(nf_ipv6_ops);
+ if (v6ops)
+ ret = v6ops->reroute(skb, entry);
+ break;
+ }
+ return ret;
+}
+
+/* caller must hold rcu read-side lock */
+static void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
+{
+ const struct nf_hook_entry *hook_entry;
+ const struct nf_hook_entries *hooks;
+ struct sk_buff *skb = entry->skb;
+ const struct net *net;
+ unsigned int i;
+ int err;
+ u8 pf;
+
+ net = entry->state.net;
+ pf = entry->state.pf;
+
+ hooks = nf_hook_entries_head(net, pf, entry->state.hook);
+
+ i = entry->hook_index;
+ if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
+ kfree_skb_reason(skb, SKB_DROP_REASON_NETFILTER_DROP);
+ nf_queue_entry_free(entry);
+ return;
+ }
+
+ hook_entry = &hooks->hooks[i];
+
+ /* Continue traversal iff userspace said ok... */
+ if (verdict == NF_REPEAT)
+ verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
+
+ if (verdict == NF_ACCEPT) {
+ if (nf_reroute(skb, entry) < 0)
+ verdict = NF_DROP;
+ }
+
+ if (verdict == NF_ACCEPT) {
+next_hook:
+ ++i;
+ verdict = nf_iterate(skb, &entry->state, hooks, &i);
+ }
+
+ switch (verdict & NF_VERDICT_MASK) {
+ case NF_ACCEPT:
+ case NF_STOP:
+ local_bh_disable();
+ entry->state.okfn(entry->state.net, entry->state.sk, skb);
+ local_bh_enable();
+ break;
+ case NF_QUEUE:
+ err = nf_queue(skb, &entry->state, i, verdict);
+ if (err == 1)
+ goto next_hook;
+ break;
+ case NF_STOLEN:
+ break;
+ default:
+ kfree_skb(skb);
+ }
+
+ nf_queue_entry_free(entry);
+}
+
static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict)
{
const struct nf_ct_hook *ct_hook;
diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
index 7f61506e5b44..7fec57ff736f 100644
--- a/net/netfilter/nft_osf.c
+++ b/net/netfilter/nft_osf.c
@@ -63,7 +63,6 @@ static int nft_osf_init(const struct nft_ctx *ctx,
{
struct nft_osf *priv = nft_expr_priv(expr);
u32 flags;
- int err;
u8 ttl;
if (!tb[NFTA_OSF_DREG])
@@ -83,13 +82,9 @@ static int nft_osf_init(const struct nft_ctx *ctx,
priv->flags = flags;
}
- err = nft_parse_register_store(ctx, tb[NFTA_OSF_DREG], &priv->dreg,
- NULL, NFT_DATA_VALUE,
- NFT_OSF_MAXGENRELEN);
- if (err < 0)
- return err;
-
- return 0;
+ return nft_parse_register_store(ctx, tb[NFTA_OSF_DREG], &priv->dreg,
+ NULL, NFT_DATA_VALUE,
+ NFT_OSF_MAXGENRELEN);
}
static int nft_osf_dump(struct sk_buff *skb,
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
index aa1d9e93a9a0..c0ceea068936 100644
--- a/net/netfilter/nft_set_pipapo.c
+++ b/net/netfilter/nft_set_pipapo.c
@@ -359,11 +359,13 @@
*
* Return: -1 on no match, bit position on 'match_only', 0 otherwise.
*/
-int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
- union nft_pipapo_map_bucket *mt, bool match_only)
+int pipapo_refill(unsigned long *map, unsigned int len, unsigned int rules,
+ unsigned long *dst,
+ const union nft_pipapo_map_bucket *mt, bool match_only)
{
unsigned long bitset;
- int k, ret = -1;
+ unsigned int k;
+ int ret = -1;
for (k = 0; k < len; k++) {
bitset = map[k];
@@ -412,9 +414,9 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
struct nft_pipapo_scratch *scratch;
unsigned long *res_map, *fill_map;
u8 genmask = nft_genmask_cur(net);
+ const struct nft_pipapo_match *m;
+ const struct nft_pipapo_field *f;
const u8 *rp = (const u8 *)key;
- struct nft_pipapo_match *m;
- struct nft_pipapo_field *f;
bool map_index;
int i;
@@ -505,6 +507,7 @@ out:
* @data: Key data to be matched against existing elements
* @genmask: If set, check that element is active in given genmask
* @tstamp: timestamp to check for expired elements
+ * @gfp: the type of memory to allocate (see kmalloc).
*
* This is essentially the same as the lookup function, except that it matches
* key data against the uncommitted copy and doesn't use preallocated maps for
@@ -515,22 +518,26 @@ out:
static struct nft_pipapo_elem *pipapo_get(const struct net *net,
const struct nft_set *set,
const u8 *data, u8 genmask,
- u64 tstamp)
+ u64 tstamp, gfp_t gfp)
{
struct nft_pipapo_elem *ret = ERR_PTR(-ENOENT);
struct nft_pipapo *priv = nft_set_priv(set);
- struct nft_pipapo_match *m = priv->clone;
unsigned long *res_map, *fill_map = NULL;
- struct nft_pipapo_field *f;
+ const struct nft_pipapo_match *m;
+ const struct nft_pipapo_field *f;
int i;
- res_map = kmalloc_array(m->bsize_max, sizeof(*res_map), GFP_ATOMIC);
+ m = priv->clone;
+ if (m->bsize_max == 0)
+ return ret;
+
+ res_map = kmalloc_array(m->bsize_max, sizeof(*res_map), gfp);
if (!res_map) {
ret = ERR_PTR(-ENOMEM);
goto out;
}
- fill_map = kcalloc(m->bsize_max, sizeof(*res_map), GFP_ATOMIC);
+ fill_map = kcalloc(m->bsize_max, sizeof(*res_map), gfp);
if (!fill_map) {
ret = ERR_PTR(-ENOMEM);
goto out;
@@ -608,7 +615,8 @@ nft_pipapo_get(const struct net *net, const struct nft_set *set,
struct nft_pipapo_elem *e;
e = pipapo_get(net, set, (const u8 *)elem->key.val.data,
- nft_genmask_cur(net), get_jiffies_64());
+ nft_genmask_cur(net), get_jiffies_64(),
+ GFP_ATOMIC);
if (IS_ERR(e))
return ERR_CAST(e);
@@ -616,6 +624,65 @@ nft_pipapo_get(const struct net *net, const struct nft_set *set,
}
/**
+ * pipapo_realloc_mt() - Reallocate mapping table if needed upon resize
+ * @f: Field containing mapping table
+ * @old_rules: Amount of existing mapped rules
+ * @rules: Amount of new rules to map
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int pipapo_realloc_mt(struct nft_pipapo_field *f,
+ unsigned int old_rules, unsigned int rules)
+{
+ union nft_pipapo_map_bucket *new_mt = NULL, *old_mt = f->mt;
+ const unsigned int extra = PAGE_SIZE / sizeof(*new_mt);
+ unsigned int rules_alloc = rules;
+
+ might_sleep();
+
+ if (unlikely(rules == 0))
+ goto out_free;
+
+ /* growing and enough space left, no action needed */
+ if (rules > old_rules && f->rules_alloc > rules)
+ return 0;
+
+ /* downsize and extra slack has not grown too large */
+ if (rules < old_rules) {
+ unsigned int remove = f->rules_alloc - rules;
+
+ if (remove < (2u * extra))
+ return 0;
+ }
+
+ /* If set needs more than one page of memory for rules then
+ * allocate another extra page to avoid frequent reallocation.
+ */
+ if (rules > extra &&
+ check_add_overflow(rules, extra, &rules_alloc))
+ return -EOVERFLOW;
+
+ new_mt = kvmalloc_array(rules_alloc, sizeof(*new_mt), GFP_KERNEL);
+ if (!new_mt)
+ return -ENOMEM;
+
+ if (old_mt)
+ memcpy(new_mt, old_mt, min(old_rules, rules) * sizeof(*new_mt));
+
+ if (rules > old_rules) {
+ memset(new_mt + old_rules, 0,
+ (rules - old_rules) * sizeof(*new_mt));
+ }
+out_free:
+ f->rules_alloc = rules_alloc;
+ f->mt = new_mt;
+
+ kvfree(old_mt);
+
+ return 0;
+}
+
+/**
* pipapo_resize() - Resize lookup or mapping table, or both
* @f: Field containing lookup and mapping tables
* @old_rules: Previous amount of rules in field
@@ -627,12 +694,15 @@ nft_pipapo_get(const struct net *net, const struct nft_set *set,
*
* Return: 0 on success, -ENOMEM on allocation failure.
*/
-static int pipapo_resize(struct nft_pipapo_field *f, int old_rules, int rules)
+static int pipapo_resize(struct nft_pipapo_field *f,
+ unsigned int old_rules, unsigned int rules)
{
long *new_lt = NULL, *new_p, *old_lt = f->lt, *old_p;
- union nft_pipapo_map_bucket *new_mt, *old_mt = f->mt;
- size_t new_bucket_size, copy;
- int group, bucket;
+ unsigned int new_bucket_size, copy;
+ int group, bucket, err;
+
+ if (rules >= NFT_PIPAPO_RULE0_MAX)
+ return -ENOSPC;
new_bucket_size = DIV_ROUND_UP(rules, BITS_PER_LONG);
#ifdef NFT_PIPAPO_ALIGN
@@ -672,27 +742,18 @@ static int pipapo_resize(struct nft_pipapo_field *f, int old_rules, int rules)
}
mt:
- new_mt = kvmalloc(rules * sizeof(*new_mt), GFP_KERNEL);
- if (!new_mt) {
+ err = pipapo_realloc_mt(f, old_rules, rules);
+ if (err) {
kvfree(new_lt);
- return -ENOMEM;
- }
-
- memcpy(new_mt, f->mt, min(old_rules, rules) * sizeof(*new_mt));
- if (rules > old_rules) {
- memset(new_mt + old_rules, 0,
- (rules - old_rules) * sizeof(*new_mt));
+ return err;
}
if (new_lt) {
f->bsize = new_bucket_size;
- NFT_PIPAPO_LT_ASSIGN(f, new_lt);
+ f->lt = new_lt;
kvfree(old_lt);
}
- f->mt = new_mt;
- kvfree(old_mt);
-
return 0;
}
@@ -843,8 +904,8 @@ static void pipapo_lt_8b_to_4b(int old_groups, int bsize,
*/
static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
{
+ unsigned int groups, bb;
unsigned long *new_lt;
- int groups, bb;
size_t lt_size;
lt_size = f->groups * NFT_PIPAPO_BUCKETS(f->bb) * f->bsize *
@@ -894,7 +955,7 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
f->groups = groups;
f->bb = bb;
kvfree(f->lt);
- NFT_PIPAPO_LT_ASSIGN(f, new_lt);
+ f->lt = new_lt;
}
/**
@@ -911,7 +972,7 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
static int pipapo_insert(struct nft_pipapo_field *f, const uint8_t *k,
int mask_bits)
{
- int rule = f->rules, group, ret, bit_offset = 0;
+ unsigned int rule = f->rules, group, ret, bit_offset = 0;
ret = pipapo_resize(f, f->rules, f->rules + 1);
if (ret)
@@ -1216,7 +1277,7 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
else
end = start;
- dup = pipapo_get(net, set, start, genmask, tstamp);
+ dup = pipapo_get(net, set, start, genmask, tstamp, GFP_KERNEL);
if (!IS_ERR(dup)) {
/* Check if we already have the same exact entry */
const struct nft_data *dup_key, *dup_end;
@@ -1238,7 +1299,8 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
if (PTR_ERR(dup) == -ENOENT) {
/* Look for partially overlapping entries */
- dup = pipapo_get(net, set, end, nft_genmask_next(net), tstamp);
+ dup = pipapo_get(net, set, end, nft_genmask_next(net), tstamp,
+ GFP_KERNEL);
}
if (PTR_ERR(dup) != -ENOENT) {
@@ -1251,8 +1313,14 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
/* Validate */
start_p = start;
end_p = end;
+
+ /* some helpers return -1, or 0 >= for valid rule pos,
+ * so we cannot support more than INT_MAX rules at this time.
+ */
+ BUILD_BUG_ON(NFT_PIPAPO_RULE0_MAX > INT_MAX);
+
nft_pipapo_for_each_field(f, i, m) {
- if (f->rules >= (unsigned long)NFT_PIPAPO_RULE0_MAX)
+ if (f->rules >= NFT_PIPAPO_RULE0_MAX)
return -ENOSPC;
if (memcmp(start_p, end_p,
@@ -1358,18 +1426,25 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
if (!new_lt)
goto out_lt;
- NFT_PIPAPO_LT_ASSIGN(dst, new_lt);
+ dst->lt = new_lt;
memcpy(NFT_PIPAPO_LT_ALIGN(new_lt),
NFT_PIPAPO_LT_ALIGN(src->lt),
src->bsize * sizeof(*dst->lt) *
src->groups * NFT_PIPAPO_BUCKETS(src->bb));
- dst->mt = kvmalloc(src->rules * sizeof(*src->mt), GFP_KERNEL);
- if (!dst->mt)
- goto out_mt;
+ if (src->rules > 0) {
+ dst->mt = kvmalloc_array(src->rules_alloc,
+ sizeof(*src->mt), GFP_KERNEL);
+ if (!dst->mt)
+ goto out_mt;
+
+ memcpy(dst->mt, src->mt, src->rules * sizeof(*src->mt));
+ } else {
+ dst->mt = NULL;
+ dst->rules_alloc = 0;
+ }
- memcpy(dst->mt, src->mt, src->rules * sizeof(*src->mt));
src++;
dst++;
}
@@ -1423,10 +1498,10 @@ out_scratch:
*
* Return: Number of rules that originated from the same entry as @first.
*/
-static int pipapo_rules_same_key(struct nft_pipapo_field *f, int first)
+static unsigned int pipapo_rules_same_key(struct nft_pipapo_field *f, unsigned int first)
{
struct nft_pipapo_elem *e = NULL; /* Keep gcc happy */
- int r;
+ unsigned int r;
for (r = first; r < f->rules; r++) {
if (r != first && e != f->mt[r].e)
@@ -1479,8 +1554,9 @@ static int pipapo_rules_same_key(struct nft_pipapo_field *f, int first)
* 0 1 2
* element pointers: 0x42 0x42 0x44
*/
-static void pipapo_unmap(union nft_pipapo_map_bucket *mt, int rules,
- int start, int n, int to_offset, bool is_last)
+static void pipapo_unmap(union nft_pipapo_map_bucket *mt, unsigned int rules,
+ unsigned int start, unsigned int n,
+ unsigned int to_offset, bool is_last)
{
int i;
@@ -1586,8 +1662,8 @@ static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
{
struct nft_pipapo *priv = nft_set_priv(set);
struct net *net = read_pnet(&set->net);
+ unsigned int rules_f0, first_rule = 0;
u64 tstamp = nft_net_tstamp(net);
- int rules_f0, first_rule = 0;
struct nft_pipapo_elem *e;
struct nft_trans_gc *gc;
@@ -1597,8 +1673,8 @@ static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) {
union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
- struct nft_pipapo_field *f;
- int i, start, rules_fx;
+ const struct nft_pipapo_field *f;
+ unsigned int i, start, rules_fx;
start = first_rule;
rules_fx = rules_f0;
@@ -1792,7 +1868,8 @@ static void *pipapo_deactivate(const struct net *net, const struct nft_set *set,
{
struct nft_pipapo_elem *e;
- e = pipapo_get(net, set, data, nft_genmask_next(net), nft_net_tstamp(net));
+ e = pipapo_get(net, set, data, nft_genmask_next(net),
+ nft_net_tstamp(net), GFP_KERNEL);
if (IS_ERR(e))
return NULL;
@@ -1976,7 +2053,7 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
{
struct nft_pipapo *priv = nft_set_priv(set);
struct nft_pipapo_match *m = priv->clone;
- int rules_f0, first_rule = 0;
+ unsigned int rules_f0, first_rule = 0;
struct nft_pipapo_elem *e;
const u8 *data;
@@ -2039,9 +2116,9 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
{
struct nft_pipapo *priv = nft_set_priv(set);
struct net *net = read_pnet(&set->net);
- struct nft_pipapo_match *m;
- struct nft_pipapo_field *f;
- int i, r;
+ const struct nft_pipapo_match *m;
+ const struct nft_pipapo_field *f;
+ unsigned int i, r;
rcu_read_lock();
if (iter->genmask == nft_genmask_cur(net))
@@ -2145,6 +2222,9 @@ static int nft_pipapo_init(const struct nft_set *set,
field_count = desc->field_count ? : 1;
+ BUILD_BUG_ON(NFT_PIPAPO_MAX_FIELDS > 255);
+ BUILD_BUG_ON(NFT_PIPAPO_MAX_FIELDS != NFT_REG32_COUNT);
+
if (field_count > NFT_PIPAPO_MAX_FIELDS)
return -EINVAL;
@@ -2166,7 +2246,11 @@ static int nft_pipapo_init(const struct nft_set *set,
rcu_head_init(&m->rcu);
nft_pipapo_for_each_field(f, i, m) {
- int len = desc->field_len[i] ? : set->klen;
+ unsigned int len = desc->field_len[i] ? : set->klen;
+
+ /* f->groups is u8 */
+ BUILD_BUG_ON((NFT_PIPAPO_MAX_BYTES *
+ BITS_PER_BYTE / NFT_PIPAPO_GROUP_BITS_LARGE_SET) >= 256);
f->bb = NFT_PIPAPO_GROUP_BITS_INIT;
f->groups = len * NFT_PIPAPO_GROUPS_PER_BYTE(f);
@@ -2175,7 +2259,8 @@ static int nft_pipapo_init(const struct nft_set *set,
f->bsize = 0;
f->rules = 0;
- NFT_PIPAPO_LT_ASSIGN(f, NULL);
+ f->rules_alloc = 0;
+ f->lt = NULL;
f->mt = NULL;
}
@@ -2211,7 +2296,7 @@ static void nft_set_pipapo_match_destroy(const struct nft_ctx *ctx,
struct nft_pipapo_match *m)
{
struct nft_pipapo_field *f;
- int i, r;
+ unsigned int i, r;
for (i = 0, f = m->f; i < m->field_count - 1; i++, f++)
;
diff --git a/net/netfilter/nft_set_pipapo.h b/net/netfilter/nft_set_pipapo.h
index 3842c7341a9f..24cd1ff73f98 100644
--- a/net/netfilter/nft_set_pipapo.h
+++ b/net/netfilter/nft_set_pipapo.h
@@ -70,15 +70,9 @@
#define NFT_PIPAPO_ALIGN_HEADROOM \
(NFT_PIPAPO_ALIGN - ARCH_KMALLOC_MINALIGN)
#define NFT_PIPAPO_LT_ALIGN(lt) (PTR_ALIGN((lt), NFT_PIPAPO_ALIGN))
-#define NFT_PIPAPO_LT_ASSIGN(field, x) \
- do { \
- (field)->lt_aligned = NFT_PIPAPO_LT_ALIGN(x); \
- (field)->lt = (x); \
- } while (0)
#else
#define NFT_PIPAPO_ALIGN_HEADROOM 0
#define NFT_PIPAPO_LT_ALIGN(lt) (lt)
-#define NFT_PIPAPO_LT_ASSIGN(field, x) ((field)->lt = (x))
#endif /* NFT_PIPAPO_ALIGN */
#define nft_pipapo_for_each_field(field, index, match) \
@@ -110,22 +104,20 @@ union nft_pipapo_map_bucket {
/**
* struct nft_pipapo_field - Lookup, mapping tables and related data for a field
- * @groups: Amount of bit groups
* @rules: Number of inserted rules
* @bsize: Size of each bucket in lookup table, in longs
+ * @rules_alloc: Number of allocated rules, always >= rules
+ * @groups: Amount of bit groups
* @bb: Number of bits grouped together in lookup table buckets
* @lt: Lookup table: 'groups' rows of buckets
- * @lt_aligned: Version of @lt aligned to NFT_PIPAPO_ALIGN bytes
* @mt: Mapping table: one bucket per rule
*/
struct nft_pipapo_field {
- int groups;
- unsigned long rules;
- size_t bsize;
- int bb;
-#ifdef NFT_PIPAPO_ALIGN
- unsigned long *lt_aligned;
-#endif
+ unsigned int rules;
+ unsigned int bsize;
+ unsigned int rules_alloc;
+ u8 groups;
+ u8 bb;
unsigned long *lt;
union nft_pipapo_map_bucket *mt;
};
@@ -145,15 +137,15 @@ struct nft_pipapo_scratch {
/**
* struct nft_pipapo_match - Data used for lookup and matching
* @field_count: Amount of fields in set
- * @scratch: Preallocated per-CPU maps for partial matching results
* @bsize_max: Maximum lookup table bucket size of all fields, in longs
+ * @scratch: Preallocated per-CPU maps for partial matching results
* @rcu: Matching data is swapped on commits
* @f: Fields, with lookup and mapping tables
*/
struct nft_pipapo_match {
- int field_count;
+ u8 field_count;
+ unsigned int bsize_max;
struct nft_pipapo_scratch * __percpu *scratch;
- size_t bsize_max;
struct rcu_head rcu;
struct nft_pipapo_field f[] __counted_by(field_count);
};
@@ -186,8 +178,9 @@ struct nft_pipapo_elem {
struct nft_set_ext ext;
};
-int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
- union nft_pipapo_map_bucket *mt, bool match_only);
+int pipapo_refill(unsigned long *map, unsigned int len, unsigned int rules,
+ unsigned long *dst,
+ const union nft_pipapo_map_bucket *mt, bool match_only);
/**
* pipapo_and_field_buckets_4bit() - Intersect 4-bit buckets
@@ -195,7 +188,7 @@ int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
* @dst: Area to store result
* @data: Input data selecting table buckets
*/
-static inline void pipapo_and_field_buckets_4bit(struct nft_pipapo_field *f,
+static inline void pipapo_and_field_buckets_4bit(const struct nft_pipapo_field *f,
unsigned long *dst,
const u8 *data)
{
@@ -223,7 +216,7 @@ static inline void pipapo_and_field_buckets_4bit(struct nft_pipapo_field *f,
* @dst: Area to store result
* @data: Input data selecting table buckets
*/
-static inline void pipapo_and_field_buckets_8bit(struct nft_pipapo_field *f,
+static inline void pipapo_and_field_buckets_8bit(const struct nft_pipapo_field *f,
unsigned long *dst,
const u8 *data)
{
diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
index a3a8ddca9918..d08407d589ea 100644
--- a/net/netfilter/nft_set_pipapo_avx2.c
+++ b/net/netfilter/nft_set_pipapo_avx2.c
@@ -212,8 +212,9 @@ static int nft_pipapo_avx2_refill(int offset, unsigned long *map,
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_2(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
u8 pg[2] = { pkt[0] >> 4, pkt[0] & 0xf };
@@ -274,8 +275,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_4(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
u8 pg[4] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf };
@@ -350,8 +352,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_8(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
u8 pg[8] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf,
pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf,
@@ -445,8 +448,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_12(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
u8 pg[12] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf,
pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf,
@@ -534,8 +538,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_32(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
u8 pg[32] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf,
pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf,
@@ -669,8 +674,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_1(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
@@ -726,8 +732,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_2(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
@@ -790,8 +797,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_4(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
@@ -865,8 +873,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_6(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
@@ -950,8 +959,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_16(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
@@ -1042,8 +1052,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_slow(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
unsigned long bsize = f->bsize;
int i, ret = -1, b;
@@ -1119,9 +1130,9 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
struct nft_pipapo *priv = nft_set_priv(set);
struct nft_pipapo_scratch *scratch;
u8 genmask = nft_genmask_cur(net);
+ const struct nft_pipapo_match *m;
+ const struct nft_pipapo_field *f;
const u8 *rp = (const u8 *)key;
- struct nft_pipapo_match *m;
- struct nft_pipapo_field *f;
unsigned long *res, *fill;
bool map_index;
int i, ret = 0;
diff --git a/net/netfilter/utils.c b/net/netfilter/utils.c
index acef4155f0da..008419db815a 100644
--- a/net/netfilter/utils.c
+++ b/net/netfilter/utils.c
@@ -179,43 +179,6 @@ int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
}
EXPORT_SYMBOL_GPL(nf_route);
-static int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry)
-{
-#ifdef CONFIG_INET
- const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
-
- if (entry->state.hook == NF_INET_LOCAL_OUT) {
- const struct iphdr *iph = ip_hdr(skb);
-
- if (!(iph->tos == rt_info->tos &&
- skb->mark == rt_info->mark &&
- iph->daddr == rt_info->daddr &&
- iph->saddr == rt_info->saddr))
- return ip_route_me_harder(entry->state.net, entry->state.sk,
- skb, RTN_UNSPEC);
- }
-#endif
- return 0;
-}
-
-int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry)
-{
- const struct nf_ipv6_ops *v6ops;
- int ret = 0;
-
- switch (entry->state.pf) {
- case AF_INET:
- ret = nf_ip_reroute(skb, entry);
- break;
- case AF_INET6:
- v6ops = rcu_dereference(nf_ipv6_ops);
- if (v6ops)
- ret = v6ops->reroute(skb, entry);
- break;
- }
- return ret;
-}
-
/* Only get and check the lengths, not do any hop-by-hop stuff. */
int nf_ip6_check_hbh_len(struct sk_buff *skb, u32 *plen)
{
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 21624d68314f..da5d929c7c85 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1142,7 +1142,8 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
if (target->compat_from_user)
target->compat_from_user(t->data, ct->data);
else
- memcpy(t->data, ct->data, tsize - sizeof(*ct));
+ unsafe_memcpy(t->data, ct->data, tsize - sizeof(*ct),
+ /* UAPI 0-sized destination */);
tsize += off;
t->u.user.target_size = tsize;
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 27511c90a26f..1ba4f58e1d35 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -610,7 +610,7 @@ int netlbl_catmap_walk(struct netlbl_lsm_catmap *catmap, u32 offset)
struct netlbl_lsm_catmap *iter;
u32 idx;
u32 bit;
- NETLBL_CATMAP_MAPTYPE bitmap;
+ u64 bitmap;
iter = _netlbl_catmap_getnode(&catmap, offset, _CM_F_WALK, 0);
if (iter == NULL)
@@ -666,8 +666,8 @@ int netlbl_catmap_walkrng(struct netlbl_lsm_catmap *catmap, u32 offset)
struct netlbl_lsm_catmap *prev = NULL;
u32 idx;
u32 bit;
- NETLBL_CATMAP_MAPTYPE bitmask;
- NETLBL_CATMAP_MAPTYPE bitmap;
+ u64 bitmask;
+ u64 bitmap;
iter = _netlbl_catmap_getnode(&catmap, offset, _CM_F_WALK, 0);
if (iter == NULL)
@@ -857,7 +857,7 @@ int netlbl_catmap_setlong(struct netlbl_lsm_catmap **catmap,
offset -= iter->startbit;
idx = offset / NETLBL_CATMAP_MAPSIZE;
- iter->bitmap[idx] |= (NETLBL_CATMAP_MAPTYPE)bitmap
+ iter->bitmap[idx] |= (u64)bitmap
<< (offset % NETLBL_CATMAP_MAPSIZE);
return 0;
@@ -876,7 +876,7 @@ int netlbl_catmap_setlong(struct netlbl_lsm_catmap **catmap,
* Description:
* Starting at @offset, walk the bitmap from left to right until either the
* desired bit is found or we reach the end. Return the bit offset, -1 if
- * not found, or -2 if error.
+ * not found.
*/
int netlbl_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len,
u32 offset, u8 state)
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index ff315351269f..7554803218a2 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -130,7 +130,7 @@ static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
"nlk_cb_mutex-MAX_LINKS"
};
-static int netlink_dump(struct sock *sk);
+static int netlink_dump(struct sock *sk, bool lock_taken);
/* nl_table locking explained:
* Lookup and traversal are protected with an RCU read-side lock. Insertion
@@ -636,7 +636,7 @@ static struct proto netlink_proto = {
};
static int __netlink_create(struct net *net, struct socket *sock,
- struct mutex *cb_mutex, int protocol,
+ struct mutex *dump_cb_mutex, int protocol,
int kern)
{
struct sock *sk;
@@ -651,15 +651,11 @@ static int __netlink_create(struct net *net, struct socket *sock,
sock_init_data(sock, sk);
nlk = nlk_sk(sk);
- if (cb_mutex) {
- nlk->cb_mutex = cb_mutex;
- } else {
- nlk->cb_mutex = &nlk->cb_def_mutex;
- mutex_init(nlk->cb_mutex);
- lockdep_set_class_and_name(nlk->cb_mutex,
+ mutex_init(&nlk->nl_cb_mutex);
+ lockdep_set_class_and_name(&nlk->nl_cb_mutex,
nlk_cb_mutex_keys + protocol,
nlk_cb_mutex_key_strings[protocol]);
- }
+ nlk->dump_cb_mutex = dump_cb_mutex;
init_waitqueue_head(&nlk->wait);
sk->sk_destruct = netlink_sock_destruct;
@@ -1206,23 +1202,21 @@ struct sock *netlink_getsockbyfilp(struct file *filp)
struct sk_buff *netlink_alloc_large_skb(unsigned int size, int broadcast)
{
+ size_t head_size = SKB_HEAD_ALIGN(size);
struct sk_buff *skb;
void *data;
- if (size <= NLMSG_GOODSIZE || broadcast)
+ if (head_size <= PAGE_SIZE || broadcast)
return alloc_skb(size, GFP_KERNEL);
- size = SKB_DATA_ALIGN(size) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-
- data = vmalloc(size);
- if (data == NULL)
+ data = kvmalloc(head_size, GFP_KERNEL);
+ if (!data)
return NULL;
- skb = __build_skb(data, size);
- if (skb == NULL)
- vfree(data);
- else
+ skb = __build_skb(data, head_size);
+ if (!skb)
+ kvfree(data);
+ else if (is_vmalloc_addr(data))
skb->destructor = netlink_skb_destructor;
return skb;
@@ -1779,6 +1773,9 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
netlink_unlock_table();
return err;
}
+ case NETLINK_LISTEN_ALL_NSID:
+ flag = NETLINK_F_LISTEN_ALL_NSID;
+ break;
case NETLINK_CAP_ACK:
flag = NETLINK_F_CAP_ACK;
break;
@@ -1987,7 +1984,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (READ_ONCE(nlk->cb_running) &&
atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
- ret = netlink_dump(sk);
+ ret = netlink_dump(sk, false);
if (ret) {
WRITE_ONCE(sk->sk_err, -ret);
sk_error_report(sk);
@@ -2196,7 +2193,7 @@ static int netlink_dump_done(struct netlink_sock *nlk, struct sk_buff *skb,
return 0;
}
-static int netlink_dump(struct sock *sk)
+static int netlink_dump(struct sock *sk, bool lock_taken)
{
struct netlink_sock *nlk = nlk_sk(sk);
struct netlink_ext_ack extack = {};
@@ -2208,7 +2205,8 @@ static int netlink_dump(struct sock *sk)
int alloc_min_size;
int alloc_size;
- mutex_lock(nlk->cb_mutex);
+ if (!lock_taken)
+ mutex_lock(&nlk->nl_cb_mutex);
if (!nlk->cb_running) {
err = -EINVAL;
goto errout_skb;
@@ -2260,14 +2258,33 @@ static int netlink_dump(struct sock *sk)
netlink_skb_set_owner_r(skb, sk);
if (nlk->dump_done_errno > 0) {
+ struct mutex *extra_mutex = nlk->dump_cb_mutex;
+
cb->extack = &extack;
+
+ if (cb->flags & RTNL_FLAG_DUMP_UNLOCKED)
+ extra_mutex = NULL;
+ if (extra_mutex)
+ mutex_lock(extra_mutex);
nlk->dump_done_errno = cb->dump(skb, cb);
+ if (extra_mutex)
+ mutex_unlock(extra_mutex);
+
+ /* EMSGSIZE plus something already in the skb means
+ * that there's more to dump but current skb has filled up.
+ * If the callback really wants to return EMSGSIZE to user space
+ * it needs to do so again, on the next cb->dump() call,
+ * without putting data in the skb.
+ */
+ if (nlk->dump_done_errno == -EMSGSIZE && skb->len)
+ nlk->dump_done_errno = skb->len;
+
cb->extack = NULL;
}
if (nlk->dump_done_errno > 0 ||
skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) {
- mutex_unlock(nlk->cb_mutex);
+ mutex_unlock(&nlk->nl_cb_mutex);
if (sk_filter(sk, skb))
kfree_skb(skb);
@@ -2301,13 +2318,13 @@ static int netlink_dump(struct sock *sk)
WRITE_ONCE(nlk->cb_running, false);
module = cb->module;
skb = cb->skb;
- mutex_unlock(nlk->cb_mutex);
+ mutex_unlock(&nlk->nl_cb_mutex);
module_put(module);
consume_skb(skb);
return 0;
errout_skb:
- mutex_unlock(nlk->cb_mutex);
+ mutex_unlock(&nlk->nl_cb_mutex);
kfree_skb(skb);
return err;
}
@@ -2330,7 +2347,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
}
nlk = nlk_sk(sk);
- mutex_lock(nlk->cb_mutex);
+ mutex_lock(&nlk->nl_cb_mutex);
/* A dump is in progress... */
if (nlk->cb_running) {
ret = -EBUSY;
@@ -2350,6 +2367,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
cb->data = control->data;
cb->module = control->module;
cb->min_dump_alloc = control->min_dump_alloc;
+ cb->flags = control->flags;
cb->skb = skb;
cb->strict_check = nlk_test_bit(STRICT_CHK, NETLINK_CB(skb).sk);
@@ -2365,9 +2383,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
WRITE_ONCE(nlk->cb_running, true);
nlk->dump_done_errno = INT_MAX;
- mutex_unlock(nlk->cb_mutex);
-
- ret = netlink_dump(sk);
+ ret = netlink_dump(sk, true);
sock_put(sk);
@@ -2383,7 +2399,7 @@ error_put:
module_put(control->module);
error_unlock:
sock_put(sk);
- mutex_unlock(nlk->cb_mutex);
+ mutex_unlock(&nlk->nl_cb_mutex);
error_free:
kfree_skb(skb);
return ret;
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index 2145979b9986..9751e29d4bbb 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -39,8 +39,9 @@ struct netlink_sock {
bool cb_running;
int dump_done_errno;
struct netlink_callback cb;
- struct mutex *cb_mutex;
- struct mutex cb_def_mutex;
+ struct mutex nl_cb_mutex;
+
+ struct mutex *dump_cb_mutex;
void (*netlink_rcv)(struct sk_buff *skb);
int (*netlink_bind)(struct net *net, int group);
void (*netlink_unbind)(struct net *net, int group);
diff --git a/net/netlink/diag.c b/net/netlink/diag.c
index 1eeff9422856..61981e01fd6f 100644
--- a/net/netlink/diag.c
+++ b/net/netlink/diag.c
@@ -207,7 +207,7 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num);
}
- return err < 0 ? err : skb->len;
+ return err <= 0 ? err : skb->len;
}
static int netlink_diag_dump_done(struct netlink_callback *cb)
@@ -241,6 +241,7 @@ static int netlink_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
}
static const struct sock_diag_handler netlink_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_NETLINK,
.dump = netlink_diag_handler_dump,
};
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 8c7af02f8454..3b7666944b11 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -1232,7 +1232,7 @@ static int ctrl_fill_info(const struct genl_family *family, u32 portid, u32 seq,
hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
if (hdr == NULL)
- return -1;
+ return -EMSGSIZE;
if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
@@ -1355,6 +1355,7 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
struct net *net = sock_net(skb->sk);
int fams_to_skip = cb->args[0];
unsigned int id;
+ int err = 0;
idr_for_each_entry(&genl_fam_idr, rt, id) {
if (!rt->netnsok && !net_eq(net, &init_net))
@@ -1363,16 +1364,17 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
if (n++ < fams_to_skip)
continue;
- if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI,
- skb, CTRL_CMD_NEWFAMILY) < 0) {
+ err = ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ skb, CTRL_CMD_NEWFAMILY);
+ if (err) {
n--;
break;
}
}
cb->args[0] = n;
- return skb->len;
+ return err;
}
static struct sk_buff *ctrl_build_family_msg(const struct genl_family *family,
@@ -1836,6 +1838,9 @@ static int genl_bind(struct net *net, int group)
!ns_capable(net->user_ns, CAP_SYS_ADMIN))
ret = -EPERM;
+ if (family->bind)
+ family->bind(i);
+
break;
}
@@ -1843,12 +1848,39 @@ static int genl_bind(struct net *net, int group)
return ret;
}
+static void genl_unbind(struct net *net, int group)
+{
+ const struct genl_family *family;
+ unsigned int id;
+
+ down_read(&cb_lock);
+
+ idr_for_each_entry(&genl_fam_idr, family, id) {
+ int i;
+
+ if (family->n_mcgrps == 0)
+ continue;
+
+ i = group - family->mcgrp_offset;
+ if (i < 0 || i >= family->n_mcgrps)
+ continue;
+
+ if (family->unbind)
+ family->unbind(i);
+
+ break;
+ }
+
+ up_read(&cb_lock);
+}
+
static int __net_init genl_pernet_init(struct net *net)
{
struct netlink_kernel_cfg cfg = {
.input = genl_rcv,
.flags = NL_CFG_F_NONROOT_RECV,
.bind = genl_bind,
+ .unbind = genl_unbind,
.release = genl_release,
};
diff --git a/net/nfc/core.c b/net/nfc/core.c
index eb2c0959e5b6..e58dc6405054 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -1015,7 +1015,7 @@ static void nfc_check_pres_timeout(struct timer_list *t)
schedule_work(&dev->check_pres_work);
}
-struct class nfc_class = {
+const struct class nfc_class = {
.name = "nfc",
.dev_release = nfc_release,
};
diff --git a/net/nfc/hci/llc.c b/net/nfc/hci/llc.c
index 2140f6724644..ba91284f4086 100644
--- a/net/nfc/hci/llc.c
+++ b/net/nfc/hci/llc.c
@@ -30,15 +30,19 @@ exit:
return r;
}
+static void nfc_llc_del_engine(struct nfc_llc_engine *llc_engine)
+{
+ list_del(&llc_engine->entry);
+ kfree_const(llc_engine->name);
+ kfree(llc_engine);
+}
+
void nfc_llc_exit(void)
{
struct nfc_llc_engine *llc_engine, *n;
- list_for_each_entry_safe(llc_engine, n, &llc_engines, entry) {
- list_del(&llc_engine->entry);
- kfree(llc_engine->name);
- kfree(llc_engine);
- }
+ list_for_each_entry_safe(llc_engine, n, &llc_engines, entry)
+ nfc_llc_del_engine(llc_engine);
}
int nfc_llc_register(const char *name, const struct nfc_llc_ops *ops)
@@ -49,7 +53,7 @@ int nfc_llc_register(const char *name, const struct nfc_llc_ops *ops)
if (llc_engine == NULL)
return -ENOMEM;
- llc_engine->name = kstrdup(name, GFP_KERNEL);
+ llc_engine->name = kstrdup_const(name, GFP_KERNEL);
if (llc_engine->name == NULL) {
kfree(llc_engine);
return -ENOMEM;
@@ -82,9 +86,7 @@ void nfc_llc_unregister(const char *name)
if (llc_engine == NULL)
return;
- list_del(&llc_engine->entry);
- kfree(llc_engine->name);
- kfree(llc_engine);
+ nfc_llc_del_engine(llc_engine);
}
struct nfc_llc *nfc_llc_allocate(const char *name, struct nfc_hci_dev *hdev,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c9bbc2686690..61270826b9ac 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2057,7 +2057,7 @@ retry:
skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = READ_ONCE(sk->sk_mark);
skb->tstamp = sockc.transmit_time;
-
+ skb->mono_delivery_time = !!skb->tstamp;
skb_setup_tx_timestamp(skb, sockc.tsflags);
if (unlikely(extra_len == 4))
@@ -2318,7 +2318,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
}
if (po->tp_version <= TPACKET_V2) {
if (macoff + snaplen > po->rx_ring.frame_size) {
- if (po->copy_thresh &&
+ if (READ_ONCE(po->copy_thresh) &&
atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
if (skb_shared(skb)) {
copy_skb = skb_clone(skb, GFP_ATOMIC);
@@ -2586,6 +2586,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
skb->priority = READ_ONCE(po->sk.sk_priority);
skb->mark = READ_ONCE(po->sk.sk_mark);
skb->tstamp = sockc->transmit_time;
+ skb->mono_delivery_time = !!skb->tstamp;
skb_setup_tx_timestamp(skb, sockc->tsflags);
skb_zcopy_set_nouarg(skb, ph.raw);
@@ -3064,6 +3065,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = sockc.mark;
skb->tstamp = sockc.transmit_time;
+ skb->mono_delivery_time = !!skb->tstamp;
if (unlikely(extra_len == 4))
skb->no_fcs = 1;
@@ -3834,7 +3836,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;
- pkt_sk(sk)->copy_thresh = val;
+ WRITE_ONCE(pkt_sk(sk)->copy_thresh, val);
return 0;
}
case PACKET_VERSION:
@@ -4088,6 +4090,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
case PACKET_VNET_HDR_SZ:
val = READ_ONCE(po->vnet_hdr_sz);
break;
+ case PACKET_COPY_THRESH:
+ val = READ_ONCE(pkt_sk(sk)->copy_thresh);
+ break;
case PACKET_VERSION:
val = po->tp_version;
break;
diff --git a/net/packet/diag.c b/net/packet/diag.c
index 9a7980e3309d..47f69f3dbf73 100644
--- a/net/packet/diag.c
+++ b/net/packet/diag.c
@@ -17,7 +17,7 @@ static int pdiag_put_info(const struct packet_sock *po, struct sk_buff *nlskb)
pinfo.pdi_index = po->ifindex;
pinfo.pdi_version = po->tp_version;
pinfo.pdi_reserve = po->tp_reserve;
- pinfo.pdi_copy_thresh = po->copy_thresh;
+ pinfo.pdi_copy_thresh = READ_ONCE(po->copy_thresh);
pinfo.pdi_tstamp = READ_ONCE(po->tp_tstamp);
pinfo.pdi_flags = 0;
@@ -245,6 +245,7 @@ static int packet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
}
static const struct sock_diag_handler packet_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_PACKET,
.dump = packet_diag_handler_dump,
};
diff --git a/net/rds/connection.c b/net/rds/connection.c
index b4cc699c5fad..c749c5525b40 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -829,9 +829,7 @@ int rds_conn_init(void)
if (ret)
return ret;
- rds_conn_slab = kmem_cache_create("rds_connection",
- sizeof(struct rds_connection),
- 0, 0, NULL);
+ rds_conn_slab = KMEM_CACHE(rds_connection, 0);
if (!rds_conn_slab) {
rds_loop_net_exit();
return -ENOMEM;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 465bfe5eb061..5222bc97d192 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -487,7 +487,7 @@ EXPORT_SYMBOL(rxrpc_kernel_new_call_notification);
* rxrpc_kernel_set_max_life - Set maximum lifespan on a call
* @sock: The socket the call is on
* @call: The call to configure
- * @hard_timeout: The maximum lifespan of the call in jiffies
+ * @hard_timeout: The maximum lifespan of the call in ms
*
* Set the maximum lifespan of a call. The call will end with ETIME or
* ETIMEDOUT if it takes longer than this.
@@ -495,14 +495,14 @@ EXPORT_SYMBOL(rxrpc_kernel_new_call_notification);
void rxrpc_kernel_set_max_life(struct socket *sock, struct rxrpc_call *call,
unsigned long hard_timeout)
{
- unsigned long now;
+ ktime_t delay = ms_to_ktime(hard_timeout), expect_term_by;
mutex_lock(&call->user_mutex);
- now = jiffies;
- hard_timeout += now;
- WRITE_ONCE(call->expect_term_by, hard_timeout);
- rxrpc_reduce_call_timer(call, hard_timeout, now, rxrpc_timer_set_for_hard);
+ expect_term_by = ktime_add(ktime_get_real(), delay);
+ WRITE_ONCE(call->expect_term_by, expect_term_by);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_hard);
+ rxrpc_poke_call(call, rxrpc_call_poke_set_timeout);
mutex_unlock(&call->user_mutex);
}
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 7818aae1be8e..08c0a32db8c7 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -198,8 +198,8 @@ struct rxrpc_host_header {
* - max 48 bytes (struct sk_buff::cb)
*/
struct rxrpc_skb_priv {
- struct rxrpc_connection *conn; /* Connection referred to (poke packet) */
union {
+ struct rxrpc_connection *conn; /* Connection referred to (poke packet) */
struct {
u16 offset; /* Offset of data */
u16 len; /* Length of data */
@@ -208,9 +208,12 @@ struct rxrpc_skb_priv {
};
struct {
rxrpc_seq_t first_ack; /* First packet in acks table */
+ rxrpc_seq_t prev_ack; /* Highest seq seen */
+ rxrpc_serial_t acked_serial; /* Packet in response to (or 0) */
+ u8 reason; /* Reason for ack */
u8 nr_acks; /* Number of acks+nacks */
u8 nr_nacks; /* Number of nacks */
- };
+ } ack;
};
struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
};
@@ -248,10 +251,9 @@ struct rxrpc_security {
struct rxrpc_key_token *);
/* Work out how much data we can store in a packet, given an estimate
- * of the amount of data remaining.
+ * of the amount of data remaining and allocate a data buffer.
*/
- int (*how_much_data)(struct rxrpc_call *, size_t,
- size_t *, size_t *, size_t *);
+ struct rxrpc_txbuf *(*alloc_txbuf)(struct rxrpc_call *call, size_t remaining, gfp_t gfp);
/* impose security on a packet */
int (*secure_packet)(struct rxrpc_call *, struct rxrpc_txbuf *);
@@ -292,6 +294,7 @@ struct rxrpc_local {
struct socket *socket; /* my UDP socket */
struct task_struct *io_thread;
struct completion io_thread_ready; /* Indication that the I/O thread started */
+ struct page_frag_cache tx_alloc; /* Tx control packet allocation (I/O thread only) */
struct rxrpc_sock *service; /* Service(s) listening on this endpoint */
#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
struct sk_buff_head rx_delay_queue; /* Delay injection queue */
@@ -352,8 +355,8 @@ struct rxrpc_peer {
u32 mdev_us; /* medium deviation */
u32 mdev_max_us; /* maximal mdev for the last rtt period */
u32 rttvar_us; /* smoothed mdev_max */
- u32 rto_j; /* Retransmission timeout in jiffies */
- u8 backoff; /* Backoff timeout */
+ u32 rto_us; /* Retransmission timeout in usec */
+ u8 backoff; /* Backoff timeout (as shift) */
u8 cong_ssthresh; /* Congestion slow-start threshold */
};
@@ -500,6 +503,8 @@ struct rxrpc_connection {
struct list_head proc_link; /* link in procfs list */
struct list_head link; /* link in master connection list */
struct sk_buff_head rx_queue; /* received conn-level packets */
+ struct page_frag_cache tx_data_alloc; /* Tx DATA packet allocation */
+ struct mutex tx_data_alloc_lock;
struct mutex security_lock; /* Lock for security management */
const struct rxrpc_security *security; /* applied security module */
@@ -618,17 +623,17 @@ struct rxrpc_call {
const struct rxrpc_security *security; /* applied security module */
struct mutex user_mutex; /* User access mutex */
struct sockaddr_rxrpc dest_srx; /* Destination address */
- unsigned long delay_ack_at; /* When DELAY ACK needs to happen */
- unsigned long ack_lost_at; /* When ACK is figured as lost */
- unsigned long resend_at; /* When next resend needs to happen */
- unsigned long ping_at; /* When next to send a ping */
- unsigned long keepalive_at; /* When next to send a keepalive ping */
- unsigned long expect_rx_by; /* When we expect to get a packet by */
- unsigned long expect_req_by; /* When we expect to get a request DATA packet by */
- unsigned long expect_term_by; /* When we expect call termination by */
- u32 next_rx_timo; /* Timeout for next Rx packet (jif) */
- u32 next_req_timo; /* Timeout for next Rx request packet (jif) */
- u32 hard_timo; /* Maximum lifetime or 0 (jif) */
+ ktime_t delay_ack_at; /* When DELAY ACK needs to happen */
+ ktime_t ack_lost_at; /* When ACK is figured as lost */
+ ktime_t resend_at; /* When next resend needs to happen */
+ ktime_t ping_at; /* When next to send a ping */
+ ktime_t keepalive_at; /* When next to send a keepalive ping */
+ ktime_t expect_rx_by; /* When we expect to get a packet by */
+ ktime_t expect_req_by; /* When we expect to get a request DATA packet by */
+ ktime_t expect_term_by; /* When we expect call termination by */
+ u32 next_rx_timo; /* Timeout for next Rx packet (ms) */
+ u32 next_req_timo; /* Timeout for next Rx request packet (ms) */
+ u32 hard_timo; /* Maximum lifetime or 0 (s) */
struct timer_list timer; /* Combined event timer */
struct work_struct destroyer; /* In-process-context destroyer */
rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
@@ -673,7 +678,7 @@ struct rxrpc_call {
rxrpc_seq_t tx_transmitted; /* Highest packet transmitted */
rxrpc_seq_t tx_prepared; /* Highest Tx slot prepared. */
rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */
- u16 tx_backoff; /* Delay to insert due to Tx failure */
+ u16 tx_backoff; /* Delay to insert due to Tx failure (ms) */
u8 tx_winsize; /* Maximum size of Tx window */
#define RXRPC_TX_MAX_WINDOW 128
ktime_t tx_last_sent; /* Last time a transmission occurred */
@@ -788,40 +793,30 @@ struct rxrpc_send_params {
* Buffer of data to be output as a packet.
*/
struct rxrpc_txbuf {
- struct rcu_head rcu;
struct list_head call_link; /* Link in call->tx_sendmsg/tx_buffer */
struct list_head tx_link; /* Link in live Enc queue or Tx queue */
ktime_t last_sent; /* Time at which last transmitted */
refcount_t ref;
rxrpc_seq_t seq; /* Sequence number of this packet */
+ rxrpc_serial_t serial; /* Last serial number transmitted with */
unsigned int call_debug_id;
unsigned int debug_id;
unsigned int len; /* Amount of data in buffer */
unsigned int space; /* Remaining data space */
unsigned int offset; /* Offset of fill point */
- unsigned long flags;
-#define RXRPC_TXBUF_LAST 0 /* Set if last packet in Tx phase */
-#define RXRPC_TXBUF_RESENT 1 /* Set if has been resent */
+ unsigned int flags;
+#define RXRPC_TXBUF_WIRE_FLAGS 0xff /* The wire protocol flags */
+#define RXRPC_TXBUF_RESENT 0x100 /* Set if has been resent */
+ __be16 cksum; /* Checksum to go in header */
+ unsigned short ack_rwind; /* ACK receive window */
u8 /*enum rxrpc_propose_ack_trace*/ ack_why; /* If ack, why */
- struct {
- /* The packet for encrypting and DMA'ing. We align it such
- * that data[] aligns correctly for any crypto blocksize.
- */
- u8 pad[64 - sizeof(struct rxrpc_wire_header)];
- struct rxrpc_wire_header wire; /* Network-ready header */
- union {
- u8 data[RXRPC_JUMBO_DATALEN]; /* Data packet */
- struct {
- struct rxrpc_ackpacket ack;
- DECLARE_FLEX_ARRAY(u8, acks);
- };
- };
- } __aligned(64);
+ u8 nr_kvec; /* Amount of kvec[] used */
+ struct kvec kvec[3];
};
static inline bool rxrpc_sending_to_server(const struct rxrpc_txbuf *txb)
{
- return txb->wire.flags & RXRPC_CLIENT_INITIATED;
+ return txb->flags & RXRPC_CLIENT_INITIATED;
}
static inline bool rxrpc_sending_to_client(const struct rxrpc_txbuf *txb)
@@ -869,17 +864,11 @@ int rxrpc_user_charge_accept(struct rxrpc_sock *, unsigned long);
*/
void rxrpc_propose_ping(struct rxrpc_call *call, u32 serial,
enum rxrpc_propose_ack_trace why);
-void rxrpc_send_ACK(struct rxrpc_call *, u8, rxrpc_serial_t, enum rxrpc_propose_ack_trace);
void rxrpc_propose_delay_ACK(struct rxrpc_call *, rxrpc_serial_t,
enum rxrpc_propose_ack_trace);
void rxrpc_shrink_call_tx_buffer(struct rxrpc_call *);
void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb);
-void rxrpc_reduce_call_timer(struct rxrpc_call *call,
- unsigned long expire_at,
- unsigned long now,
- enum rxrpc_timer_trace why);
-
bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb);
/*
@@ -1160,9 +1149,9 @@ static inline struct rxrpc_net *rxrpc_net(struct net *net)
/*
* output.c
*/
-int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb);
+void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason,
+ rxrpc_serial_t serial, enum rxrpc_propose_ack_trace why);
int rxrpc_send_abort_packet(struct rxrpc_call *);
-int rxrpc_send_data_packet(struct rxrpc_call *, struct rxrpc_txbuf *);
void rxrpc_send_conn_abort(struct rxrpc_connection *conn);
void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb);
void rxrpc_send_keepalive(struct rxrpc_peer *);
@@ -1223,7 +1212,7 @@ static inline int rxrpc_abort_eproto(struct rxrpc_call *call,
*/
void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, int,
rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
-unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *, bool);
+ktime_t rxrpc_get_rto_backoff(struct rxrpc_peer *peer, bool retrans);
void rxrpc_peer_init_rtt(struct rxrpc_peer *);
/*
@@ -1295,8 +1284,9 @@ static inline void rxrpc_sysctl_exit(void) {}
* txbuf.c
*/
extern atomic_t rxrpc_nr_txbuf;
-struct rxrpc_txbuf *rxrpc_alloc_txbuf(struct rxrpc_call *call, u8 packet_type,
- gfp_t gfp);
+struct rxrpc_txbuf *rxrpc_alloc_data_txbuf(struct rxrpc_call *call, size_t data_size,
+ size_t data_align, gfp_t gfp);
+struct rxrpc_txbuf *rxrpc_alloc_ack_txbuf(struct rxrpc_call *call, size_t sack_size);
void rxrpc_get_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what);
void rxrpc_see_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what);
void rxrpc_put_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what);
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index 0f78544d043b..7bbb68504766 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -23,14 +23,14 @@
void rxrpc_propose_ping(struct rxrpc_call *call, u32 serial,
enum rxrpc_propose_ack_trace why)
{
- unsigned long now = jiffies;
- unsigned long ping_at = now + rxrpc_idle_ack_delay;
-
- if (time_before(ping_at, call->ping_at)) {
- WRITE_ONCE(call->ping_at, ping_at);
- rxrpc_reduce_call_timer(call, ping_at, now,
- rxrpc_timer_set_for_ping);
- trace_rxrpc_propose_ack(call, why, RXRPC_ACK_PING, serial);
+ ktime_t delay = ms_to_ktime(READ_ONCE(rxrpc_idle_ack_delay));
+ ktime_t now = ktime_get_real();
+ ktime_t ping_at = ktime_add(now, delay);
+
+ trace_rxrpc_propose_ack(call, why, RXRPC_ACK_PING, serial);
+ if (ktime_before(ping_at, call->ping_at)) {
+ call->ping_at = ping_at;
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_ping);
}
}
@@ -40,62 +40,18 @@ void rxrpc_propose_ping(struct rxrpc_call *call, u32 serial,
void rxrpc_propose_delay_ACK(struct rxrpc_call *call, rxrpc_serial_t serial,
enum rxrpc_propose_ack_trace why)
{
- unsigned long expiry = rxrpc_soft_ack_delay;
- unsigned long now = jiffies, ack_at;
-
- if (rxrpc_soft_ack_delay < expiry)
- expiry = rxrpc_soft_ack_delay;
- if (call->peer->srtt_us != 0)
- ack_at = usecs_to_jiffies(call->peer->srtt_us >> 3);
- else
- ack_at = expiry;
-
- ack_at += READ_ONCE(call->tx_backoff);
- ack_at += now;
- if (time_before(ack_at, call->delay_ack_at)) {
- WRITE_ONCE(call->delay_ack_at, ack_at);
- rxrpc_reduce_call_timer(call, ack_at, now,
- rxrpc_timer_set_for_ack);
- }
+ ktime_t now = ktime_get_real(), delay;
trace_rxrpc_propose_ack(call, why, RXRPC_ACK_DELAY, serial);
-}
-
-/*
- * Queue an ACK for immediate transmission.
- */
-void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason,
- rxrpc_serial_t serial, enum rxrpc_propose_ack_trace why)
-{
- struct rxrpc_txbuf *txb;
-
- if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
- return;
-
- rxrpc_inc_stat(call->rxnet, stat_tx_acks[ack_reason]);
- txb = rxrpc_alloc_txbuf(call, RXRPC_PACKET_TYPE_ACK,
- rcu_read_lock_held() ? GFP_ATOMIC | __GFP_NOWARN : GFP_NOFS);
- if (!txb) {
- kleave(" = -ENOMEM");
- return;
- }
+ if (call->peer->srtt_us)
+ delay = (call->peer->srtt_us >> 3) * NSEC_PER_USEC;
+ else
+ delay = ms_to_ktime(READ_ONCE(rxrpc_soft_ack_delay));
+ ktime_add_ms(delay, call->tx_backoff);
- txb->ack_why = why;
- txb->wire.seq = 0;
- txb->wire.type = RXRPC_PACKET_TYPE_ACK;
- txb->wire.flags |= RXRPC_SLOW_START_OK;
- txb->ack.bufferSpace = 0;
- txb->ack.maxSkew = 0;
- txb->ack.firstPacket = 0;
- txb->ack.previousPacket = 0;
- txb->ack.serial = htonl(serial);
- txb->ack.reason = ack_reason;
- txb->ack.nAcks = 0;
-
- trace_rxrpc_send_ack(call, why, ack_reason, serial);
- rxrpc_send_ack_packet(call, txb);
- rxrpc_put_txbuf(txb, rxrpc_txbuf_put_ack_tx);
+ call->delay_ack_at = ktime_add(now, delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_delayed_ack);
}
/*
@@ -114,25 +70,19 @@ void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb)
struct rxrpc_ackpacket *ack = NULL;
struct rxrpc_skb_priv *sp;
struct rxrpc_txbuf *txb;
- unsigned long resend_at;
- rxrpc_seq_t transmitted = READ_ONCE(call->tx_transmitted);
- ktime_t now, max_age, oldest, ack_ts;
- bool unacked = false;
+ rxrpc_seq_t transmitted = call->tx_transmitted;
+ ktime_t next_resend = KTIME_MAX, rto = ns_to_ktime(call->peer->rto_us * NSEC_PER_USEC);
+ ktime_t resend_at = KTIME_MAX, now, delay;
+ bool unacked = false, did_send = false;
unsigned int i;
- LIST_HEAD(retrans_queue);
_enter("{%d,%d}", call->acks_hard_ack, call->tx_top);
now = ktime_get_real();
- max_age = ktime_sub_us(now, jiffies_to_usecs(call->peer->rto_j));
- oldest = now;
if (list_empty(&call->tx_buffer))
goto no_resend;
- if (list_empty(&call->tx_buffer))
- goto no_further_resend;
-
trace_rxrpc_resend(call, ack_skb);
txb = list_first_entry(&call->tx_buffer, struct rxrpc_txbuf, call_link);
@@ -143,12 +93,12 @@ void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb)
sp = rxrpc_skb(ack_skb);
ack = (void *)ack_skb->data + sizeof(struct rxrpc_wire_header);
- for (i = 0; i < sp->nr_acks; i++) {
+ for (i = 0; i < sp->ack.nr_acks; i++) {
rxrpc_seq_t seq;
if (ack->acks[i] & 1)
continue;
- seq = sp->first_ack + i;
+ seq = sp->ack.first_ack + i;
if (after(txb->seq, transmitted))
break;
if (after(txb->seq, seq))
@@ -160,19 +110,23 @@ void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb)
goto no_further_resend;
found_txb:
- if (after(ntohl(txb->wire.serial), call->acks_highest_serial))
+ resend_at = ktime_add(txb->last_sent, rto);
+ if (after(txb->serial, call->acks_highest_serial)) {
+ if (ktime_after(resend_at, now) &&
+ ktime_before(resend_at, next_resend))
+ next_resend = resend_at;
continue; /* Ack point not yet reached */
+ }
rxrpc_see_txbuf(txb, rxrpc_txbuf_see_unacked);
- if (list_empty(&txb->tx_link)) {
- list_add_tail(&txb->tx_link, &retrans_queue);
- set_bit(RXRPC_TXBUF_RESENT, &txb->flags);
- }
+ trace_rxrpc_retransmit(call, txb->seq, txb->serial,
+ ktime_sub(resend_at, now));
- trace_rxrpc_retransmit(call, txb->seq,
- ktime_to_ns(ktime_sub(txb->last_sent,
- max_age)));
+ txb->flags |= RXRPC_TXBUF_RESENT;
+ rxrpc_transmit_one(call, txb);
+ did_send = true;
+ now = ktime_get_real();
if (list_is_last(&txb->call_link, &call->tx_buffer))
goto no_further_resend;
@@ -184,43 +138,46 @@ void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb)
* seen. Anything between the soft-ACK table and that point will get
* ACK'd or NACK'd in due course, so don't worry about it here; here we
* need to consider retransmitting anything beyond that point.
- *
- * Note that ACK for a packet can beat the update of tx_transmitted.
*/
- if (after_eq(READ_ONCE(call->acks_prev_seq), READ_ONCE(call->tx_transmitted)))
+ if (after_eq(call->acks_prev_seq, call->tx_transmitted))
goto no_further_resend;
list_for_each_entry_from(txb, &call->tx_buffer, call_link) {
- if (before_eq(txb->seq, READ_ONCE(call->acks_prev_seq)))
+ resend_at = ktime_add(txb->last_sent, rto);
+
+ if (before_eq(txb->seq, call->acks_prev_seq))
continue;
- if (after(txb->seq, READ_ONCE(call->tx_transmitted)))
+ if (after(txb->seq, call->tx_transmitted))
break; /* Not transmitted yet */
if (ack && ack->reason == RXRPC_ACK_PING_RESPONSE &&
- before(ntohl(txb->wire.serial), ntohl(ack->serial)))
+ before(txb->serial, ntohl(ack->serial)))
goto do_resend; /* Wasn't accounted for by a more recent ping. */
- if (ktime_after(txb->last_sent, max_age)) {
- if (ktime_before(txb->last_sent, oldest))
- oldest = txb->last_sent;
+ if (ktime_after(resend_at, now)) {
+ if (ktime_before(resend_at, next_resend))
+ next_resend = resend_at;
continue;
}
do_resend:
unacked = true;
- if (list_empty(&txb->tx_link)) {
- list_add_tail(&txb->tx_link, &retrans_queue);
- set_bit(RXRPC_TXBUF_RESENT, &txb->flags);
- rxrpc_inc_stat(call->rxnet, stat_tx_data_retrans);
- }
+
+ txb->flags |= RXRPC_TXBUF_RESENT;
+ rxrpc_transmit_one(call, txb);
+ did_send = true;
+ rxrpc_inc_stat(call->rxnet, stat_tx_data_retrans);
+ now = ktime_get_real();
}
no_further_resend:
no_resend:
- resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest)));
- resend_at += jiffies + rxrpc_get_rto_backoff(call->peer,
- !list_empty(&retrans_queue));
- WRITE_ONCE(call->resend_at, resend_at);
+ if (resend_at < KTIME_MAX) {
+ delay = rxrpc_get_rto_backoff(call->peer, did_send);
+ resend_at = ktime_add(resend_at, delay);
+ trace_rxrpc_timer_set(call, resend_at - now, rxrpc_timer_trace_resend_reset);
+ }
+ call->resend_at = resend_at;
if (unacked)
rxrpc_congestion_timeout(call);
@@ -229,25 +186,15 @@ no_resend:
* that an ACK got lost somewhere. Send a ping to find out instead of
* retransmitting data.
*/
- if (list_empty(&retrans_queue)) {
- rxrpc_reduce_call_timer(call, resend_at, jiffies,
- rxrpc_timer_set_for_resend);
- ack_ts = ktime_sub(now, call->acks_latest_ts);
- if (ktime_to_us(ack_ts) < (call->peer->srtt_us >> 3))
- goto out;
- rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
- rxrpc_propose_ack_ping_for_lost_ack);
- goto out;
- }
+ if (!did_send) {
+ ktime_t next_ping = ktime_add_us(call->acks_latest_ts,
+ call->peer->srtt_us >> 3);
- /* Retransmit the queue */
- while ((txb = list_first_entry_or_null(&retrans_queue,
- struct rxrpc_txbuf, tx_link))) {
- list_del_init(&txb->tx_link);
- rxrpc_transmit_one(call, txb);
+ if (ktime_sub(next_ping, now) <= 0)
+ rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
+ rxrpc_propose_ack_ping_for_0_retrans);
}
-out:
_leave("");
}
@@ -257,13 +204,11 @@ out:
*/
static void rxrpc_begin_service_reply(struct rxrpc_call *call)
{
- unsigned long now = jiffies;
-
rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SEND_REPLY);
- WRITE_ONCE(call->delay_ack_at, now + MAX_JIFFY_OFFSET);
if (call->ackr_reason == RXRPC_ACK_DELAY)
call->ackr_reason = 0;
- trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
+ call->delay_ack_at = KTIME_MAX;
+ trace_rxrpc_timer_can(call, rxrpc_timer_trace_delayed_ack);
}
/*
@@ -320,7 +265,7 @@ static void rxrpc_decant_prepared_tx(struct rxrpc_call *call)
call->tx_top = txb->seq;
list_add_tail(&txb->call_link, &call->tx_buffer);
- if (txb->wire.flags & RXRPC_LAST_PACKET)
+ if (txb->flags & RXRPC_LAST_PACKET)
rxrpc_close_tx_phase(call);
rxrpc_transmit_one(call, txb);
@@ -372,8 +317,8 @@ static void rxrpc_send_initial_ping(struct rxrpc_call *call)
*/
bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
{
- unsigned long now, next, t;
- bool resend = false, expired = false;
+ ktime_t now, t;
+ bool resend = false;
s32 abort_code;
rxrpc_see_call(call, rxrpc_call_see_input);
@@ -397,70 +342,73 @@ bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
if (skb && skb->mark == RXRPC_SKB_MARK_ERROR)
goto out;
+ if (skb)
+ rxrpc_input_call_packet(call, skb);
+
/* If we see our async-event poke, check for timeout trippage. */
- now = jiffies;
- t = READ_ONCE(call->expect_rx_by);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now);
- expired = true;
+ now = ktime_get_real();
+ t = ktime_sub(call->expect_rx_by, now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_expect_rx);
+ goto expired;
}
- t = READ_ONCE(call->expect_req_by);
- if (__rxrpc_call_state(call) == RXRPC_CALL_SERVER_RECV_REQUEST &&
- time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
- expired = true;
+ t = ktime_sub(call->expect_req_by, now);
+ if (t <= 0) {
+ call->expect_req_by = KTIME_MAX;
+ if (__rxrpc_call_state(call) == RXRPC_CALL_SERVER_RECV_REQUEST) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_idle);
+ goto expired;
+ }
}
- t = READ_ONCE(call->expect_term_by);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now);
- expired = true;
+ t = ktime_sub(READ_ONCE(call->expect_term_by), now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_hard);
+ goto expired;
}
- t = READ_ONCE(call->delay_ack_at);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now);
- cmpxchg(&call->delay_ack_at, t, now + MAX_JIFFY_OFFSET);
+ t = ktime_sub(call->delay_ack_at, now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_delayed_ack);
+ call->delay_ack_at = KTIME_MAX;
rxrpc_send_ACK(call, RXRPC_ACK_DELAY, 0,
- rxrpc_propose_ack_ping_for_lost_ack);
+ rxrpc_propose_ack_delayed_ack);
}
- t = READ_ONCE(call->ack_lost_at);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_lost_ack, now);
- cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET);
+ t = ktime_sub(call->ack_lost_at, now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_lost_ack);
+ call->ack_lost_at = KTIME_MAX;
set_bit(RXRPC_CALL_EV_ACK_LOST, &call->events);
}
- t = READ_ONCE(call->keepalive_at);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
- cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
+ t = ktime_sub(call->ping_at, now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_ping);
+ call->ping_at = KTIME_MAX;
rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
rxrpc_propose_ack_ping_for_keepalive);
}
- t = READ_ONCE(call->ping_at);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now);
- cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET);
- rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
- rxrpc_propose_ack_ping_for_keepalive);
- }
-
- t = READ_ONCE(call->resend_at);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now);
- cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET);
+ t = ktime_sub(call->resend_at, now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_resend);
+ call->resend_at = KTIME_MAX;
resend = true;
}
- if (skb)
- rxrpc_input_call_packet(call, skb);
-
rxrpc_transmit_some_data(call);
+ now = ktime_get_real();
+ t = ktime_sub(call->keepalive_at, now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_keepalive);
+ call->keepalive_at = KTIME_MAX;
+ rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
+ rxrpc_propose_ack_ping_for_keepalive);
+ }
+
if (skb) {
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
@@ -472,24 +420,13 @@ bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
rxrpc_send_initial_ping(call);
/* Process events */
- if (expired) {
- if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) &&
- (int)call->conn->hi_serial - (int)call->rx_serial > 0) {
- trace_rxrpc_call_reset(call);
- rxrpc_abort_call(call, 0, RX_CALL_DEAD, -ECONNRESET,
- rxrpc_abort_call_reset);
- } else {
- rxrpc_abort_call(call, 0, RX_CALL_TIMEOUT, -ETIME,
- rxrpc_abort_call_timeout);
- }
- goto out;
- }
-
if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events))
rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
rxrpc_propose_ack_ping_for_lost_ack);
- if (resend && __rxrpc_call_state(call) != RXRPC_CALL_CLIENT_RECV_REPLY)
+ if (resend &&
+ __rxrpc_call_state(call) != RXRPC_CALL_CLIENT_RECV_REPLY &&
+ !test_bit(RXRPC_CALL_TX_ALL_ACKED, &call->flags))
rxrpc_resend(call, NULL);
if (test_and_clear_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags))
@@ -511,23 +448,33 @@ bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
/* Make sure the timer is restarted */
if (!__rxrpc_call_is_complete(call)) {
- next = call->expect_rx_by;
+ ktime_t next = READ_ONCE(call->expect_term_by), delay;
-#define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
+#define set(T) { ktime_t _t = (T); if (ktime_before(_t, next)) next = _t; }
set(call->expect_req_by);
- set(call->expect_term_by);
+ set(call->expect_rx_by);
set(call->delay_ack_at);
set(call->ack_lost_at);
set(call->resend_at);
set(call->keepalive_at);
set(call->ping_at);
- now = jiffies;
- if (time_after_eq(now, next))
+ now = ktime_get_real();
+ delay = ktime_sub(next, now);
+ if (delay <= 0) {
rxrpc_poke_call(call, rxrpc_call_poke_timer_now);
-
- rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
+ } else {
+ unsigned long nowj = jiffies, delayj, nextj;
+
+ delayj = max(nsecs_to_jiffies(delay), 1);
+ nextj = nowj + delayj;
+ if (time_before(nextj, call->timer.expires) ||
+ !timer_pending(&call->timer)) {
+ trace_rxrpc_timer_restart(call, delay, delayj);
+ timer_reduce(&call->timer, nextj);
+ }
+ }
}
out:
@@ -542,4 +489,16 @@ out:
rxrpc_shrink_call_tx_buffer(call);
_leave("");
return true;
+
+expired:
+ if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) &&
+ (int)call->conn->hi_serial - (int)call->rx_serial > 0) {
+ trace_rxrpc_call_reset(call);
+ rxrpc_abort_call(call, 0, RX_CALL_DEAD, -ECONNRESET,
+ rxrpc_abort_call_reset);
+ } else {
+ rxrpc_abort_call(call, 0, RX_CALL_TIMEOUT, -ETIME,
+ rxrpc_abort_call_timeout);
+ }
+ goto out;
}
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 9fc9a6c3f685..01fa71e8b1f7 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -70,20 +70,11 @@ static void rxrpc_call_timer_expired(struct timer_list *t)
_enter("%d", call->debug_id);
if (!__rxrpc_call_is_complete(call)) {
- trace_rxrpc_timer_expired(call, jiffies);
+ trace_rxrpc_timer_expired(call);
rxrpc_poke_call(call, rxrpc_call_poke_timer);
}
}
-void rxrpc_reduce_call_timer(struct rxrpc_call *call,
- unsigned long expire_at,
- unsigned long now,
- enum rxrpc_timer_trace why)
-{
- trace_rxrpc_timer(call, why, now);
- timer_reduce(&call->timer, expire_at);
-}
-
static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
static void rxrpc_destroy_call(struct work_struct *);
@@ -163,12 +154,20 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
spin_lock_init(&call->notify_lock);
spin_lock_init(&call->tx_lock);
refcount_set(&call->ref, 1);
- call->debug_id = debug_id;
- call->tx_total_len = -1;
- call->next_rx_timo = 20 * HZ;
- call->next_req_timo = 1 * HZ;
- call->ackr_window = 1;
- call->ackr_wtop = 1;
+ call->debug_id = debug_id;
+ call->tx_total_len = -1;
+ call->next_rx_timo = 20 * HZ;
+ call->next_req_timo = 1 * HZ;
+ call->ackr_window = 1;
+ call->ackr_wtop = 1;
+ call->delay_ack_at = KTIME_MAX;
+ call->ack_lost_at = KTIME_MAX;
+ call->resend_at = KTIME_MAX;
+ call->ping_at = KTIME_MAX;
+ call->keepalive_at = KTIME_MAX;
+ call->expect_rx_by = KTIME_MAX;
+ call->expect_req_by = KTIME_MAX;
+ call->expect_term_by = KTIME_MAX;
memset(&call->sock_node, 0xed, sizeof(call->sock_node));
@@ -226,11 +225,11 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
__set_bit(RXRPC_CALL_EXCLUSIVE, &call->flags);
if (p->timeouts.normal)
- call->next_rx_timo = min(msecs_to_jiffies(p->timeouts.normal), 1UL);
+ call->next_rx_timo = min(p->timeouts.normal, 1);
if (p->timeouts.idle)
- call->next_req_timo = min(msecs_to_jiffies(p->timeouts.idle), 1UL);
+ call->next_req_timo = min(p->timeouts.idle, 1);
if (p->timeouts.hard)
- call->hard_timo = p->timeouts.hard * HZ;
+ call->hard_timo = p->timeouts.hard;
ret = rxrpc_init_client_call_security(call);
if (ret < 0) {
@@ -253,18 +252,13 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
*/
void rxrpc_start_call_timer(struct rxrpc_call *call)
{
- unsigned long now = jiffies;
- unsigned long j = now + MAX_JIFFY_OFFSET;
-
- call->delay_ack_at = j;
- call->ack_lost_at = j;
- call->resend_at = j;
- call->ping_at = j;
- call->keepalive_at = j;
- call->expect_rx_by = j;
- call->expect_req_by = j;
- call->expect_term_by = j + call->hard_timo;
- call->timer.expires = now;
+ if (call->hard_timo) {
+ ktime_t delay = ms_to_ktime(call->hard_timo * 1000);
+
+ call->expect_term_by = ktime_add(ktime_get_real(), delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_hard);
+ }
+ call->timer.expires = jiffies;
}
/*
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 3b9b267a4431..d25bf1cf3670 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -636,7 +636,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call
test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
unsigned long final_ack_at = jiffies + 2;
- WRITE_ONCE(chan->final_ack_at, final_ack_at);
+ chan->final_ack_at = final_ack_at;
smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
rxrpc_reduce_conn_timer(conn, final_ack_at);
@@ -770,7 +770,7 @@ next:
conn_expires_at = conn->idle_timestamp + expiry;
- now = READ_ONCE(jiffies);
+ now = jiffies;
if (time_after(conn_expires_at, now))
goto not_yet_expired;
}
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 1f251d758cb9..598b4ee389fc 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -88,7 +88,7 @@ void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
struct rxrpc_ackpacket ack;
};
} __attribute__((packed)) pkt;
- struct rxrpc_ackinfo ack_info;
+ struct rxrpc_acktrailer trailer;
size_t len;
int ret, ioc;
u32 serial, mtu, call_id, padding;
@@ -122,8 +122,8 @@ void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
iov[0].iov_len = sizeof(pkt.whdr);
iov[1].iov_base = &padding;
iov[1].iov_len = 3;
- iov[2].iov_base = &ack_info;
- iov[2].iov_len = sizeof(ack_info);
+ iov[2].iov_base = &trailer;
+ iov[2].iov_len = sizeof(trailer);
serial = rxrpc_get_next_serial(conn);
@@ -158,14 +158,14 @@ void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
pkt.ack.serial = htonl(skb ? sp->hdr.serial : 0);
pkt.ack.reason = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE;
pkt.ack.nAcks = 0;
- ack_info.rxMTU = htonl(rxrpc_rx_mtu);
- ack_info.maxMTU = htonl(mtu);
- ack_info.rwind = htonl(rxrpc_rx_window_size);
- ack_info.jumbo_max = htonl(rxrpc_rx_jumbo_max);
+ trailer.maxMTU = htonl(rxrpc_rx_mtu);
+ trailer.ifMTU = htonl(mtu);
+ trailer.rwind = htonl(rxrpc_rx_window_size);
+ trailer.jumbo_max = htonl(rxrpc_rx_jumbo_max);
pkt.whdr.flags |= RXRPC_SLOW_START_OK;
padding = 0;
iov[0].iov_len += sizeof(pkt.ack);
- len += sizeof(pkt.ack) + 3 + sizeof(ack_info);
+ len += sizeof(pkt.ack) + 3 + sizeof(trailer);
ioc = 3;
trace_rxrpc_tx_ack(chan->call_debug_id, serial,
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index df8a271948a1..0af4642aeec4 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -68,6 +68,7 @@ struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *rxnet,
INIT_LIST_HEAD(&conn->proc_link);
INIT_LIST_HEAD(&conn->link);
mutex_init(&conn->security_lock);
+ mutex_init(&conn->tx_data_alloc_lock);
skb_queue_head_init(&conn->rx_queue);
conn->rxnet = rxnet;
conn->security = &rxrpc_no_security;
@@ -341,6 +342,9 @@ static void rxrpc_clean_up_connection(struct work_struct *work)
*/
rxrpc_purge_queue(&conn->rx_queue);
+ if (conn->tx_data_alloc.va)
+ __page_frag_cache_drain(virt_to_page(conn->tx_data_alloc.va),
+ conn->tx_data_alloc.pagecnt_bias);
call_rcu(&conn->rcu, rxrpc_rcu_free_connection);
}
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 9691de00ade7..3dedb8c0618c 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -212,7 +212,7 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
list_for_each_entry_rcu(txb, &call->tx_buffer, call_link, false) {
if (before_eq(txb->seq, call->acks_hard_ack))
continue;
- if (test_bit(RXRPC_TXBUF_LAST, &txb->flags)) {
+ if (txb->flags & RXRPC_LAST_PACKET) {
set_bit(RXRPC_CALL_TX_LAST, &call->flags);
rot_last = true;
}
@@ -252,6 +252,9 @@ static void rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
{
ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
+ call->resend_at = KTIME_MAX;
+ trace_rxrpc_timer_can(call, rxrpc_timer_trace_resend);
+
if (unlikely(call->cong_last_nack)) {
rxrpc_free_skb(call->cong_last_nack, rxrpc_skb_put_last_nack);
call->cong_last_nack = NULL;
@@ -288,15 +291,11 @@ static void rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
static bool rxrpc_receiving_reply(struct rxrpc_call *call)
{
struct rxrpc_ack_summary summary = { 0 };
- unsigned long now, timo;
rxrpc_seq_t top = READ_ONCE(call->tx_top);
if (call->ackr_reason) {
- now = jiffies;
- timo = now + MAX_JIFFY_OFFSET;
-
- WRITE_ONCE(call->delay_ack_at, timo);
- trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
+ call->delay_ack_at = KTIME_MAX;
+ trace_rxrpc_timer_can(call, rxrpc_timer_trace_delayed_ack);
}
if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
@@ -329,7 +328,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
case RXRPC_CALL_SERVER_RECV_REQUEST:
rxrpc_set_call_state(call, RXRPC_CALL_SERVER_ACK_REQUEST);
- call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
+ call->expect_req_by = KTIME_MAX;
rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_processing_op);
break;
@@ -589,14 +588,12 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
case RXRPC_CALL_SERVER_RECV_REQUEST: {
unsigned long timo = READ_ONCE(call->next_req_timo);
- unsigned long now, expect_req_by;
if (timo) {
- now = jiffies;
- expect_req_by = now + timo;
- WRITE_ONCE(call->expect_req_by, expect_req_by);
- rxrpc_reduce_call_timer(call, expect_req_by, now,
- rxrpc_timer_set_for_idle);
+ ktime_t delay = ms_to_ktime(timo);
+
+ call->expect_req_by = ktime_add(ktime_get_real(), delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_idle);
}
break;
}
@@ -670,14 +667,14 @@ static void rxrpc_complete_rtt_probe(struct rxrpc_call *call,
/*
* Process the extra information that may be appended to an ACK packet
*/
-static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
- struct rxrpc_ackinfo *ackinfo)
+static void rxrpc_input_ack_trailer(struct rxrpc_call *call, struct sk_buff *skb,
+ struct rxrpc_acktrailer *trailer)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_peer *peer;
unsigned int mtu;
bool wake = false;
- u32 rwind = ntohl(ackinfo->rwind);
+ u32 rwind = ntohl(trailer->rwind);
if (rwind > RXRPC_TX_MAX_WINDOW)
rwind = RXRPC_TX_MAX_WINDOW;
@@ -691,7 +688,7 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
if (call->cong_ssthresh > rwind)
call->cong_ssthresh = rwind;
- mtu = min(ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU));
+ mtu = min(ntohl(trailer->maxMTU), ntohl(trailer->ifMTU));
peer = call->peer;
if (mtu < peer->maxdata) {
@@ -713,20 +710,19 @@ static rxrpc_seq_t rxrpc_input_check_prev_ack(struct rxrpc_call *call,
rxrpc_seq_t seq)
{
struct sk_buff *skb = call->cong_last_nack;
- struct rxrpc_ackpacket ack;
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
unsigned int i, new_acks = 0, retained_nacks = 0;
- rxrpc_seq_t old_seq = sp->first_ack;
- u8 *acks = skb->data + sizeof(struct rxrpc_wire_header) + sizeof(ack);
+ rxrpc_seq_t old_seq = sp->ack.first_ack;
+ u8 *acks = skb->data + sizeof(struct rxrpc_wire_header) + sizeof(struct rxrpc_ackpacket);
- if (after_eq(seq, old_seq + sp->nr_acks)) {
- summary->nr_new_acks += sp->nr_nacks;
- summary->nr_new_acks += seq - (old_seq + sp->nr_acks);
+ if (after_eq(seq, old_seq + sp->ack.nr_acks)) {
+ summary->nr_new_acks += sp->ack.nr_nacks;
+ summary->nr_new_acks += seq - (old_seq + sp->ack.nr_acks);
summary->nr_retained_nacks = 0;
} else if (seq == old_seq) {
- summary->nr_retained_nacks = sp->nr_nacks;
+ summary->nr_retained_nacks = sp->ack.nr_nacks;
} else {
- for (i = 0; i < sp->nr_acks; i++) {
+ for (i = 0; i < sp->ack.nr_acks; i++) {
if (acks[i] == RXRPC_ACK_TYPE_NACK) {
if (before(old_seq + i, seq))
new_acks++;
@@ -739,7 +735,7 @@ static rxrpc_seq_t rxrpc_input_check_prev_ack(struct rxrpc_call *call,
summary->nr_retained_nacks = retained_nacks;
}
- return old_seq + sp->nr_acks;
+ return old_seq + sp->ack.nr_acks;
}
/*
@@ -759,10 +755,10 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call,
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
unsigned int i, old_nacks = 0;
- rxrpc_seq_t lowest_nak = seq + sp->nr_acks;
+ rxrpc_seq_t lowest_nak = seq + sp->ack.nr_acks;
u8 *acks = skb->data + sizeof(struct rxrpc_wire_header) + sizeof(struct rxrpc_ackpacket);
- for (i = 0; i < sp->nr_acks; i++) {
+ for (i = 0; i < sp->ack.nr_acks; i++) {
if (acks[i] == RXRPC_ACK_TYPE_ACK) {
summary->nr_acks++;
if (after_eq(seq, since))
@@ -774,7 +770,7 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call,
old_nacks++;
} else {
summary->nr_new_nacks++;
- sp->nr_nacks++;
+ sp->ack.nr_nacks++;
}
if (before(seq, lowest_nak))
@@ -835,38 +831,32 @@ static bool rxrpc_is_ack_valid(struct rxrpc_call *call,
static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_ack_summary summary = { 0 };
- struct rxrpc_ackpacket ack;
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- struct rxrpc_ackinfo info;
+ struct rxrpc_acktrailer trailer;
rxrpc_serial_t ack_serial, acked_serial;
rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt, since;
int nr_acks, offset, ioffset;
_enter("");
- offset = sizeof(struct rxrpc_wire_header);
- if (skb_copy_bits(skb, offset, &ack, sizeof(ack)) < 0)
- return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack);
- offset += sizeof(ack);
-
- ack_serial = sp->hdr.serial;
- acked_serial = ntohl(ack.serial);
- first_soft_ack = ntohl(ack.firstPacket);
- prev_pkt = ntohl(ack.previousPacket);
- hard_ack = first_soft_ack - 1;
- nr_acks = ack.nAcks;
- sp->first_ack = first_soft_ack;
- sp->nr_acks = nr_acks;
- summary.ack_reason = (ack.reason < RXRPC_ACK__INVALID ?
- ack.reason : RXRPC_ACK__INVALID);
+ offset = sizeof(struct rxrpc_wire_header) + sizeof(struct rxrpc_ackpacket);
+
+ ack_serial = sp->hdr.serial;
+ acked_serial = sp->ack.acked_serial;
+ first_soft_ack = sp->ack.first_ack;
+ prev_pkt = sp->ack.prev_ack;
+ nr_acks = sp->ack.nr_acks;
+ hard_ack = first_soft_ack - 1;
+ summary.ack_reason = (sp->ack.reason < RXRPC_ACK__INVALID ?
+ sp->ack.reason : RXRPC_ACK__INVALID);
trace_rxrpc_rx_ack(call, ack_serial, acked_serial,
first_soft_ack, prev_pkt,
summary.ack_reason, nr_acks);
- rxrpc_inc_stat(call->rxnet, stat_rx_acks[ack.reason]);
+ rxrpc_inc_stat(call->rxnet, stat_rx_acks[summary.ack_reason]);
if (acked_serial != 0) {
- switch (ack.reason) {
+ switch (summary.ack_reason) {
case RXRPC_ACK_PING_RESPONSE:
rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
rxrpc_rtt_rx_ping_response);
@@ -886,7 +876,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
* indicates that the client address changed due to NAT. The server
* lost the call because it switched to a different peer.
*/
- if (unlikely(ack.reason == RXRPC_ACK_EXCEEDS_WINDOW) &&
+ if (unlikely(summary.ack_reason == RXRPC_ACK_EXCEEDS_WINDOW) &&
first_soft_ack == 1 &&
prev_pkt == 0 &&
rxrpc_is_client_call(call)) {
@@ -899,7 +889,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
* indicate a change of address. However, we can retransmit the call
* if we still have it buffered to the beginning.
*/
- if (unlikely(ack.reason == RXRPC_ACK_OUT_OF_SEQUENCE) &&
+ if (unlikely(summary.ack_reason == RXRPC_ACK_OUT_OF_SEQUENCE) &&
first_soft_ack == 1 &&
prev_pkt == 0 &&
call->acks_hard_ack == 0 &&
@@ -917,11 +907,11 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
goto send_response;
}
- info.rxMTU = 0;
+ trailer.maxMTU = 0;
ioffset = offset + nr_acks + 3;
- if (skb->len >= ioffset + sizeof(info) &&
- skb_copy_bits(skb, ioffset, &info, sizeof(info)) < 0)
- return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack_info);
+ if (skb->len >= ioffset + sizeof(trailer) &&
+ skb_copy_bits(skb, ioffset, &trailer, sizeof(trailer)) < 0)
+ return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack_trailer);
if (nr_acks > 0)
skb_condense(skb);
@@ -940,7 +930,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
call->acks_first_seq = first_soft_ack;
call->acks_prev_seq = prev_pkt;
- switch (ack.reason) {
+ switch (summary.ack_reason) {
case RXRPC_ACK_PING:
break;
default:
@@ -950,8 +940,8 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
}
/* Parse rwind and mtu sizes if provided. */
- if (info.rxMTU)
- rxrpc_input_ackinfo(call, skb, &info);
+ if (trailer.maxMTU)
+ rxrpc_input_ack_trailer(call, skb, &trailer);
if (first_soft_ack == 0)
return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_zero);
@@ -997,7 +987,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
rxrpc_congestion_management(call, skb, &summary, acked_serial);
send_response:
- if (ack.reason == RXRPC_ACK_PING)
+ if (summary.ack_reason == RXRPC_ACK_PING)
rxrpc_send_ACK(call, RXRPC_ACK_PING_RESPONSE, ack_serial,
rxrpc_propose_ack_respond_to_ping);
else if (sp->hdr.flags & RXRPC_REQUEST_ACK)
@@ -1048,12 +1038,10 @@ void rxrpc_input_call_packet(struct rxrpc_call *call, struct sk_buff *skb)
timo = READ_ONCE(call->next_rx_timo);
if (timo) {
- unsigned long now = jiffies, expect_rx_by;
+ ktime_t delay = ms_to_ktime(timo);
- expect_rx_by = now + timo;
- WRITE_ONCE(call->expect_rx_by, expect_rx_by);
- rxrpc_reduce_call_timer(call, expect_rx_by, now,
- rxrpc_timer_set_for_normal);
+ call->expect_rx_by = ktime_add(ktime_get_real(), delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_expect_rx);
}
switch (sp->hdr.type) {
diff --git a/net/rxrpc/insecure.c b/net/rxrpc/insecure.c
index 34353b6e584b..f2701068ed9e 100644
--- a/net/rxrpc/insecure.c
+++ b/net/rxrpc/insecure.c
@@ -15,14 +15,11 @@ static int none_init_connection_security(struct rxrpc_connection *conn,
}
/*
- * Work out how much data we can put in an unsecured packet.
+ * Allocate an appropriately sized buffer for the amount of data remaining.
*/
-static int none_how_much_data(struct rxrpc_call *call, size_t remain,
- size_t *_buf_size, size_t *_data_size, size_t *_offset)
+static struct rxrpc_txbuf *none_alloc_txbuf(struct rxrpc_call *call, size_t remain, gfp_t gfp)
{
- *_buf_size = *_data_size = min_t(size_t, remain, RXRPC_JUMBO_DATALEN);
- *_offset = 0;
- return 0;
+ return rxrpc_alloc_data_txbuf(call, min_t(size_t, remain, RXRPC_JUMBO_DATALEN), 0, gfp);
}
static int none_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
@@ -79,7 +76,7 @@ const struct rxrpc_security rxrpc_no_security = {
.exit = none_exit,
.init_connection_security = none_init_connection_security,
.free_call_crypto = none_free_call_crypto,
- .how_much_data = none_how_much_data,
+ .alloc_txbuf = none_alloc_txbuf,
.secure_packet = none_secure_packet,
.verify_packet = none_verify_packet,
.respond_to_challenge = none_respond_to_challenge,
diff --git a/net/rxrpc/io_thread.c b/net/rxrpc/io_thread.c
index 4a3a08a0e2cd..0300baa9afcd 100644
--- a/net/rxrpc/io_thread.c
+++ b/net/rxrpc/io_thread.c
@@ -124,6 +124,7 @@ static bool rxrpc_extract_header(struct rxrpc_skb_priv *sp,
struct sk_buff *skb)
{
struct rxrpc_wire_header whdr;
+ struct rxrpc_ackpacket ack;
/* dig out the RxRPC connection details */
if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0)
@@ -141,6 +142,16 @@ static bool rxrpc_extract_header(struct rxrpc_skb_priv *sp,
sp->hdr.securityIndex = whdr.securityIndex;
sp->hdr._rsvd = ntohs(whdr._rsvd);
sp->hdr.serviceId = ntohs(whdr.serviceId);
+
+ if (sp->hdr.type == RXRPC_PACKET_TYPE_ACK) {
+ if (skb_copy_bits(skb, sizeof(whdr), &ack, sizeof(ack)) < 0)
+ return rxrpc_bad_message(skb, rxrpc_badmsg_short_ack);
+ sp->ack.first_ack = ntohl(ack.firstPacket);
+ sp->ack.prev_ack = ntohl(ack.previousPacket);
+ sp->ack.acked_serial = ntohl(ack.serial);
+ sp->ack.reason = ack.reason;
+ sp->ack.nr_acks = ack.nAcks;
+ }
return true;
}
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 34d307368135..504453c688d7 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -452,6 +452,9 @@ void rxrpc_destroy_local(struct rxrpc_local *local)
#endif
rxrpc_purge_queue(&local->rx_queue);
rxrpc_purge_client_connections(local);
+ if (local->tx_alloc.va)
+ __page_frag_cache_drain(virt_to_page(local->tx_alloc.va),
+ local->tx_alloc.pagecnt_bias);
}
/*
diff --git a/net/rxrpc/misc.c b/net/rxrpc/misc.c
index 825b81183046..657cf35089a6 100644
--- a/net/rxrpc/misc.c
+++ b/net/rxrpc/misc.c
@@ -17,22 +17,22 @@
unsigned int rxrpc_max_backlog __read_mostly = 10;
/*
- * How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
+ * How long to wait before scheduling an ACK with subtype DELAY (in ms).
*
* We use this when we've received new data packets. If those packets aren't
* all consumed within this time we will send a DELAY ACK if an ACK was not
* requested to let the sender know it doesn't need to resend.
*/
-unsigned long rxrpc_soft_ack_delay = HZ;
+unsigned long rxrpc_soft_ack_delay = 1000;
/*
- * How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
+ * How long to wait before scheduling an ACK with subtype IDLE (in ms).
*
* We use this when we've consumed some previously soft-ACK'd packets when
* further packets aren't immediately received to decide when to send an IDLE
* ACK let the other end know that it can free up its Tx buffer space.
*/
-unsigned long rxrpc_idle_ack_delay = HZ / 2;
+unsigned long rxrpc_idle_ack_delay = 500;
/*
* Receive window size in packets. This indicates the maximum number of
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 4a292f860ae3..5ea9601efd05 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -48,12 +48,10 @@ static const char rxrpc_keepalive_string[] = "";
static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret)
{
if (ret < 0) {
- u16 tx_backoff = READ_ONCE(call->tx_backoff);
-
- if (tx_backoff < HZ)
- WRITE_ONCE(call->tx_backoff, tx_backoff + 1);
+ if (call->tx_backoff < 1000)
+ call->tx_backoff += 100;
} else {
- WRITE_ONCE(call->tx_backoff, 0);
+ call->tx_backoff = 0;
}
}
@@ -65,84 +63,92 @@ static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret)
* Receiving a response to the ping will prevent the ->expect_rx_by timer from
* expiring.
*/
-static void rxrpc_set_keepalive(struct rxrpc_call *call)
+static void rxrpc_set_keepalive(struct rxrpc_call *call, ktime_t now)
{
- unsigned long now = jiffies, keepalive_at = call->next_rx_timo / 6;
+ ktime_t delay = ms_to_ktime(READ_ONCE(call->next_rx_timo) / 6);
- keepalive_at += now;
- WRITE_ONCE(call->keepalive_at, keepalive_at);
- rxrpc_reduce_call_timer(call, keepalive_at, now,
- rxrpc_timer_set_for_keepalive);
+ call->keepalive_at = ktime_add(ktime_get_real(), delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_keepalive);
}
/*
* Fill out an ACK packet.
*/
-static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
- struct rxrpc_call *call,
- struct rxrpc_txbuf *txb,
- u16 *_rwind)
+static void rxrpc_fill_out_ack(struct rxrpc_call *call,
+ struct rxrpc_txbuf *txb,
+ u8 ack_reason,
+ rxrpc_serial_t serial)
{
- struct rxrpc_ackinfo ackinfo;
+ struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
+ struct rxrpc_acktrailer *trailer = txb->kvec[2].iov_base + 3;
+ struct rxrpc_ackpacket *ack = (struct rxrpc_ackpacket *)(whdr + 1);
unsigned int qsize, sack, wrap, to;
rxrpc_seq_t window, wtop;
int rsize;
u32 mtu, jmax;
- u8 *ackp = txb->acks;
+ u8 *filler = txb->kvec[2].iov_base;
+ u8 *sackp = txb->kvec[1].iov_base;
- call->ackr_nr_unacked = 0;
- atomic_set(&call->ackr_nr_consumed, 0);
rxrpc_inc_stat(call->rxnet, stat_tx_ack_fill);
- clear_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags);
window = call->ackr_window;
wtop = call->ackr_wtop;
sack = call->ackr_sack_base % RXRPC_SACK_SIZE;
- txb->ack.firstPacket = htonl(window);
- txb->ack.nAcks = wtop - window;
+
+ whdr->seq = 0;
+ whdr->type = RXRPC_PACKET_TYPE_ACK;
+ txb->flags |= RXRPC_SLOW_START_OK;
+ ack->bufferSpace = 0;
+ ack->maxSkew = 0;
+ ack->firstPacket = htonl(window);
+ ack->previousPacket = htonl(call->rx_highest_seq);
+ ack->serial = htonl(serial);
+ ack->reason = ack_reason;
+ ack->nAcks = wtop - window;
+ filler[0] = 0;
+ filler[1] = 0;
+ filler[2] = 0;
+
+ if (ack_reason == RXRPC_ACK_PING)
+ txb->flags |= RXRPC_REQUEST_ACK;
if (after(wtop, window)) {
+ txb->len += ack->nAcks;
+ txb->kvec[1].iov_base = sackp;
+ txb->kvec[1].iov_len = ack->nAcks;
+
wrap = RXRPC_SACK_SIZE - sack;
- to = min_t(unsigned int, txb->ack.nAcks, RXRPC_SACK_SIZE);
+ to = min_t(unsigned int, ack->nAcks, RXRPC_SACK_SIZE);
- if (sack + txb->ack.nAcks <= RXRPC_SACK_SIZE) {
- memcpy(txb->acks, call->ackr_sack_table + sack, txb->ack.nAcks);
+ if (sack + ack->nAcks <= RXRPC_SACK_SIZE) {
+ memcpy(sackp, call->ackr_sack_table + sack, ack->nAcks);
} else {
- memcpy(txb->acks, call->ackr_sack_table + sack, wrap);
- memcpy(txb->acks + wrap, call->ackr_sack_table,
- to - wrap);
+ memcpy(sackp, call->ackr_sack_table + sack, wrap);
+ memcpy(sackp + wrap, call->ackr_sack_table, to - wrap);
}
-
- ackp += to;
} else if (before(wtop, window)) {
pr_warn("ack window backward %x %x", window, wtop);
- } else if (txb->ack.reason == RXRPC_ACK_DELAY) {
- txb->ack.reason = RXRPC_ACK_IDLE;
+ } else if (ack->reason == RXRPC_ACK_DELAY) {
+ ack->reason = RXRPC_ACK_IDLE;
}
- mtu = conn->peer->if_mtu;
- mtu -= conn->peer->hdrsize;
+ mtu = call->peer->if_mtu;
+ mtu -= call->peer->hdrsize;
jmax = rxrpc_rx_jumbo_max;
qsize = (window - 1) - call->rx_consumed;
rsize = max_t(int, call->rx_winsize - qsize, 0);
- *_rwind = rsize;
- ackinfo.rxMTU = htonl(rxrpc_rx_mtu);
- ackinfo.maxMTU = htonl(mtu);
- ackinfo.rwind = htonl(rsize);
- ackinfo.jumbo_max = htonl(jmax);
-
- *ackp++ = 0;
- *ackp++ = 0;
- *ackp++ = 0;
- memcpy(ackp, &ackinfo, sizeof(ackinfo));
- return txb->ack.nAcks + 3 + sizeof(ackinfo);
+ txb->ack_rwind = rsize;
+ trailer->maxMTU = htonl(rxrpc_rx_mtu);
+ trailer->ifMTU = htonl(mtu);
+ trailer->rwind = htonl(rsize);
+ trailer->jumbo_max = htonl(jmax);
}
/*
* Record the beginning of an RTT probe.
*/
-static int rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial,
- enum rxrpc_rtt_tx_trace why)
+static void rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial,
+ ktime_t now, enum rxrpc_rtt_tx_trace why)
{
unsigned long avail = call->rtt_avail;
int rtt_slot = 9;
@@ -155,47 +161,31 @@ static int rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial,
goto no_slot;
call->rtt_serial[rtt_slot] = serial;
- call->rtt_sent_at[rtt_slot] = ktime_get_real();
+ call->rtt_sent_at[rtt_slot] = now;
smp_wmb(); /* Write data before avail bit */
set_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
trace_rxrpc_rtt_tx(call, why, rtt_slot, serial);
- return rtt_slot;
+ return;
no_slot:
trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_no_slot, rtt_slot, serial);
- return -1;
-}
-
-/*
- * Cancel an RTT probe.
- */
-static void rxrpc_cancel_rtt_probe(struct rxrpc_call *call,
- rxrpc_serial_t serial, int rtt_slot)
-{
- if (rtt_slot != -1) {
- clear_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
- smp_wmb(); /* Clear pending bit before setting slot */
- set_bit(rtt_slot, &call->rtt_avail);
- trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_cancel, rtt_slot, serial);
- }
}
/*
* Transmit an ACK packet.
*/
-int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
+static void rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
{
+ struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
struct rxrpc_connection *conn;
+ struct rxrpc_ackpacket *ack = (struct rxrpc_ackpacket *)(whdr + 1);
struct msghdr msg;
- struct kvec iov[1];
- rxrpc_serial_t serial;
- size_t len, n;
- int ret, rtt_slot = -1;
- u16 rwind;
+ ktime_t now;
+ int ret;
if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
- return -ECONNRESET;
+ return;
conn = call->conn;
@@ -203,55 +193,68 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
msg.msg_namelen = call->peer->srx.transport_len;
msg.msg_control = NULL;
msg.msg_controllen = 0;
- msg.msg_flags = 0;
-
- if (txb->ack.reason == RXRPC_ACK_PING)
- txb->wire.flags |= RXRPC_REQUEST_ACK;
-
- n = rxrpc_fill_out_ack(conn, call, txb, &rwind);
- if (n == 0)
- return 0;
-
- iov[0].iov_base = &txb->wire;
- iov[0].iov_len = sizeof(txb->wire) + sizeof(txb->ack) + n;
- len = iov[0].iov_len;
+ msg.msg_flags = MSG_SPLICE_PAGES;
- serial = rxrpc_get_next_serial(conn);
- txb->wire.serial = htonl(serial);
- trace_rxrpc_tx_ack(call->debug_id, serial,
- ntohl(txb->ack.firstPacket),
- ntohl(txb->ack.serial), txb->ack.reason, txb->ack.nAcks,
- rwind);
+ whdr->flags = txb->flags & RXRPC_TXBUF_WIRE_FLAGS;
- if (txb->ack.reason == RXRPC_ACK_PING)
- rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_ping);
+ txb->serial = rxrpc_get_next_serial(conn);
+ whdr->serial = htonl(txb->serial);
+ trace_rxrpc_tx_ack(call->debug_id, txb->serial,
+ ntohl(ack->firstPacket),
+ ntohl(ack->serial), ack->reason, ack->nAcks,
+ txb->ack_rwind);
rxrpc_inc_stat(call->rxnet, stat_tx_ack_send);
- /* Grab the highest received seq as late as possible */
- txb->ack.previousPacket = htonl(call->rx_highest_seq);
-
- iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len);
- ret = do_udp_sendmsg(conn->local->socket, &msg, len);
+ iov_iter_kvec(&msg.msg_iter, WRITE, txb->kvec, txb->nr_kvec, txb->len);
+ rxrpc_local_dont_fragment(conn->local, false);
+ ret = do_udp_sendmsg(conn->local->socket, &msg, txb->len);
call->peer->last_tx_at = ktime_get_seconds();
if (ret < 0) {
- trace_rxrpc_tx_fail(call->debug_id, serial, ret,
+ trace_rxrpc_tx_fail(call->debug_id, txb->serial, ret,
rxrpc_tx_point_call_ack);
} else {
- trace_rxrpc_tx_packet(call->debug_id, &txb->wire,
+ trace_rxrpc_tx_packet(call->debug_id, whdr,
rxrpc_tx_point_call_ack);
- if (txb->wire.flags & RXRPC_REQUEST_ACK)
- call->peer->rtt_last_req = ktime_get_real();
+ now = ktime_get_real();
+ if (ack->reason == RXRPC_ACK_PING)
+ rxrpc_begin_rtt_probe(call, txb->serial, now, rxrpc_rtt_tx_ping);
+ if (txb->flags & RXRPC_REQUEST_ACK)
+ call->peer->rtt_last_req = now;
+ rxrpc_set_keepalive(call, now);
}
rxrpc_tx_backoff(call, ret);
+}
- if (!__rxrpc_call_is_complete(call)) {
- if (ret < 0)
- rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
- rxrpc_set_keepalive(call);
+/*
+ * Queue an ACK for immediate transmission.
+ */
+void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason,
+ rxrpc_serial_t serial, enum rxrpc_propose_ack_trace why)
+{
+ struct rxrpc_txbuf *txb;
+
+ if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
+ return;
+
+ rxrpc_inc_stat(call->rxnet, stat_tx_acks[ack_reason]);
+
+ txb = rxrpc_alloc_ack_txbuf(call, call->ackr_wtop - call->ackr_window);
+ if (!txb) {
+ kleave(" = -ENOMEM");
+ return;
}
- return ret;
+ txb->ack_why = why;
+
+ rxrpc_fill_out_ack(call, txb, ack_reason, serial);
+ call->ackr_nr_unacked = 0;
+ atomic_set(&call->ackr_nr_consumed, 0);
+ clear_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags);
+
+ trace_rxrpc_send_ack(call, why, ack_reason, serial);
+ rxrpc_send_ack_packet(call, txb);
+ rxrpc_put_txbuf(txb, rxrpc_txbuf_put_ack_tx);
}
/*
@@ -319,38 +322,22 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
}
/*
- * send a packet through the transport endpoint
+ * Prepare a (sub)packet for transmission.
*/
-int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
+static void rxrpc_prepare_data_subpacket(struct rxrpc_call *call, struct rxrpc_txbuf *txb,
+ rxrpc_serial_t serial)
{
+ struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
enum rxrpc_req_ack_trace why;
struct rxrpc_connection *conn = call->conn;
- struct msghdr msg;
- struct kvec iov[1];
- rxrpc_serial_t serial;
- size_t len;
- int ret, rtt_slot = -1;
_enter("%x,{%d}", txb->seq, txb->len);
- /* Each transmission of a Tx packet needs a new serial number */
- serial = rxrpc_get_next_serial(conn);
- txb->wire.serial = htonl(serial);
+ txb->serial = serial;
if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) &&
txb->seq == 1)
- txb->wire.userStatus = RXRPC_USERSTATUS_SERVICE_UPGRADE;
-
- iov[0].iov_base = &txb->wire;
- iov[0].iov_len = sizeof(txb->wire) + txb->len;
- len = iov[0].iov_len;
- iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len);
-
- msg.msg_name = &call->peer->srx.transport;
- msg.msg_namelen = call->peer->srx.transport_len;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = 0;
+ whdr->userStatus = RXRPC_USERSTATUS_SERVICE_UPGRADE;
/* If our RTT cache needs working on, request an ACK. Also request
* ACKs if a DATA packet appears to have been lost.
@@ -359,13 +346,13 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
* service call, lest OpenAFS incorrectly send us an ACK with some
* soft-ACKs in it and then never follow up with a proper hard ACK.
*/
- if (txb->wire.flags & RXRPC_REQUEST_ACK)
+ if (txb->flags & RXRPC_REQUEST_ACK)
why = rxrpc_reqack_already_on;
- else if (test_bit(RXRPC_TXBUF_LAST, &txb->flags) && rxrpc_sending_to_client(txb))
+ else if ((txb->flags & RXRPC_LAST_PACKET) && rxrpc_sending_to_client(txb))
why = rxrpc_reqack_no_srv_last;
else if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events))
why = rxrpc_reqack_ack_lost;
- else if (test_bit(RXRPC_TXBUF_RESENT, &txb->flags))
+ else if (txb->flags & RXRPC_TXBUF_RESENT)
why = rxrpc_reqack_retrans;
else if (call->cong_mode == RXRPC_CALL_SLOW_START && call->cong_cwnd <= 2)
why = rxrpc_reqack_slow_start;
@@ -381,42 +368,116 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
rxrpc_inc_stat(call->rxnet, stat_why_req_ack[why]);
trace_rxrpc_req_ack(call->debug_id, txb->seq, why);
if (why != rxrpc_reqack_no_srv_last)
- txb->wire.flags |= RXRPC_REQUEST_ACK;
+ txb->flags |= RXRPC_REQUEST_ACK;
dont_set_request_ack:
+ whdr->flags = txb->flags & RXRPC_TXBUF_WIRE_FLAGS;
+ whdr->serial = htonl(txb->serial);
+ whdr->cksum = txb->cksum;
+
+ trace_rxrpc_tx_data(call, txb->seq, txb->serial, txb->flags, false);
+}
+
+/*
+ * Prepare a packet for transmission.
+ */
+static size_t rxrpc_prepare_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
+{
+ rxrpc_serial_t serial;
+
+ /* Each transmission of a Tx packet needs a new serial number */
+ serial = rxrpc_get_next_serial(call->conn);
+
+ rxrpc_prepare_data_subpacket(call, txb, serial);
+
+ return txb->len;
+}
+
+/*
+ * Set timeouts after transmitting a packet.
+ */
+static void rxrpc_tstamp_data_packets(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
+{
+ ktime_t now = ktime_get_real();
+ bool ack_requested = txb->flags & RXRPC_REQUEST_ACK;
+
+ call->tx_last_sent = now;
+ txb->last_sent = now;
+
+ if (ack_requested) {
+ rxrpc_begin_rtt_probe(call, txb->serial, now, rxrpc_rtt_tx_data);
+
+ call->peer->rtt_last_req = now;
+ if (call->peer->rtt_count > 1) {
+ ktime_t delay = rxrpc_get_rto_backoff(call->peer, false);
+
+ call->ack_lost_at = ktime_add(now, delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_lost_ack);
+ }
+ }
+
+ if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags)) {
+ ktime_t delay = ms_to_ktime(READ_ONCE(call->next_rx_timo));
+
+ call->expect_rx_by = ktime_add(now, delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_expect_rx);
+ }
+
+ rxrpc_set_keepalive(call, now);
+}
+
+/*
+ * send a packet through the transport endpoint
+ */
+static int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
+{
+ struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
+ struct rxrpc_connection *conn = call->conn;
+ enum rxrpc_tx_point frag;
+ struct msghdr msg;
+ size_t len;
+ int ret;
+
+ _enter("%x,{%d}", txb->seq, txb->len);
+
+ len = rxrpc_prepare_data_packet(call, txb);
+
if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
static int lose;
if ((lose++ & 7) == 7) {
ret = 0;
- trace_rxrpc_tx_data(call, txb->seq, serial,
- txb->wire.flags,
- test_bit(RXRPC_TXBUF_RESENT, &txb->flags),
- true);
+ trace_rxrpc_tx_data(call, txb->seq, txb->serial,
+ txb->flags, true);
goto done;
}
}
- trace_rxrpc_tx_data(call, txb->seq, serial, txb->wire.flags,
- test_bit(RXRPC_TXBUF_RESENT, &txb->flags), false);
+ iov_iter_kvec(&msg.msg_iter, WRITE, txb->kvec, txb->nr_kvec, len);
+
+ msg.msg_name = &call->peer->srx.transport;
+ msg.msg_namelen = call->peer->srx.transport_len;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_flags = MSG_SPLICE_PAGES;
/* Track what we've attempted to transmit at least once so that the
* retransmission algorithm doesn't try to resend what we haven't sent
- * yet. However, this can race as we can receive an ACK before we get
- * to this point. But, OTOH, if we won't get an ACK mentioning this
- * packet unless the far side received it (though it could have
- * discarded it anyway and NAK'd it).
+ * yet.
*/
- cmpxchg(&call->tx_transmitted, txb->seq - 1, txb->seq);
+ if (txb->seq == call->tx_transmitted + 1)
+ call->tx_transmitted = txb->seq;
/* send the packet with the don't fragment bit set if we currently
* think it's small enough */
- if (txb->len >= call->peer->maxdata)
- goto send_fragmentable;
-
- txb->last_sent = ktime_get_real();
- if (txb->wire.flags & RXRPC_REQUEST_ACK)
- rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
+ if (txb->len >= call->peer->maxdata) {
+ rxrpc_local_dont_fragment(conn->local, false);
+ frag = rxrpc_tx_point_call_data_frag;
+ } else {
+ rxrpc_local_dont_fragment(conn->local, true);
+ frag = rxrpc_tx_point_call_data_nofrag;
+ }
+retry:
/* send the packet by UDP
* - returns -EMSGSIZE if UDP would have to fragment the packet
* to go out of the interface
@@ -429,46 +490,21 @@ dont_set_request_ack:
if (ret < 0) {
rxrpc_inc_stat(call->rxnet, stat_tx_data_send_fail);
- rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
- trace_rxrpc_tx_fail(call->debug_id, serial, ret,
- rxrpc_tx_point_call_data_nofrag);
+ trace_rxrpc_tx_fail(call->debug_id, txb->serial, ret, frag);
} else {
- trace_rxrpc_tx_packet(call->debug_id, &txb->wire,
- rxrpc_tx_point_call_data_nofrag);
+ trace_rxrpc_tx_packet(call->debug_id, whdr, frag);
}
rxrpc_tx_backoff(call, ret);
- if (ret == -EMSGSIZE)
- goto send_fragmentable;
+ if (ret == -EMSGSIZE && frag == rxrpc_tx_point_call_data_frag) {
+ rxrpc_local_dont_fragment(conn->local, false);
+ frag = rxrpc_tx_point_call_data_frag;
+ goto retry;
+ }
done:
if (ret >= 0) {
- call->tx_last_sent = txb->last_sent;
- if (txb->wire.flags & RXRPC_REQUEST_ACK) {
- call->peer->rtt_last_req = txb->last_sent;
- if (call->peer->rtt_count > 1) {
- unsigned long nowj = jiffies, ack_lost_at;
-
- ack_lost_at = rxrpc_get_rto_backoff(call->peer, false);
- ack_lost_at += nowj;
- WRITE_ONCE(call->ack_lost_at, ack_lost_at);
- rxrpc_reduce_call_timer(call, ack_lost_at, nowj,
- rxrpc_timer_set_for_lost_ack);
- }
- }
-
- if (txb->seq == 1 &&
- !test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER,
- &call->flags)) {
- unsigned long nowj = jiffies, expect_rx_by;
-
- expect_rx_by = nowj + call->next_rx_timo;
- WRITE_ONCE(call->expect_rx_by, expect_rx_by);
- rxrpc_reduce_call_timer(call, expect_rx_by, nowj,
- rxrpc_timer_set_for_normal);
- }
-
- rxrpc_set_keepalive(call);
+ rxrpc_tstamp_data_packets(call, txb);
} else {
/* Cancel the call if the initial transmission fails,
* particularly if that's due to network routing issues that
@@ -482,41 +518,6 @@ done:
_leave(" = %d [%u]", ret, call->peer->maxdata);
return ret;
-
-send_fragmentable:
- /* attempt to send this message with fragmentation enabled */
- _debug("send fragment");
-
- txb->last_sent = ktime_get_real();
- if (txb->wire.flags & RXRPC_REQUEST_ACK)
- rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
-
- switch (conn->local->srx.transport.family) {
- case AF_INET6:
- case AF_INET:
- rxrpc_local_dont_fragment(conn->local, false);
- rxrpc_inc_stat(call->rxnet, stat_tx_data_send_frag);
- ret = do_udp_sendmsg(conn->local->socket, &msg, len);
- conn->peer->last_tx_at = ktime_get_seconds();
-
- rxrpc_local_dont_fragment(conn->local, true);
- break;
-
- default:
- BUG();
- }
-
- if (ret < 0) {
- rxrpc_inc_stat(call->rxnet, stat_tx_data_send_fail);
- rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
- trace_rxrpc_tx_fail(call->debug_id, serial, ret,
- rxrpc_tx_point_call_data_frag);
- } else {
- trace_rxrpc_tx_packet(call->debug_id, &txb->wire,
- rxrpc_tx_point_call_data_frag);
- }
- rxrpc_tx_backoff(call, ret);
- goto done;
}
/*
@@ -723,11 +724,9 @@ void rxrpc_transmit_one(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
rxrpc_instant_resend(call, txb);
}
} else {
- unsigned long now = jiffies;
- unsigned long resend_at = now + call->peer->rto_j;
+ ktime_t delay = ns_to_ktime(call->peer->rto_us * NSEC_PER_USEC);
- WRITE_ONCE(call->resend_at, resend_at);
- rxrpc_reduce_call_timer(call, resend_at, now,
- rxrpc_timer_set_for_send);
+ call->resend_at = ktime_add(ktime_get_real(), delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_resend_tx);
}
}
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index 26dc2f26d92d..263a2251e3d2 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -52,9 +52,9 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
struct rxrpc_call *call;
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
enum rxrpc_call_state state;
- unsigned long timeout = 0;
rxrpc_seq_t acks_hard_ack;
char lbuff[50], rbuff[50];
+ long timeout = 0;
if (v == &rxnet->calls) {
seq_puts(seq,
@@ -76,10 +76,8 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
sprintf(rbuff, "%pISpc", &call->dest_srx.transport);
state = rxrpc_call_state(call);
- if (state != RXRPC_CALL_SERVER_PREALLOC) {
- timeout = READ_ONCE(call->expect_rx_by);
- timeout -= jiffies;
- }
+ if (state != RXRPC_CALL_SERVER_PREALLOC)
+ timeout = ktime_ms_delta(READ_ONCE(call->expect_rx_by), ktime_get_real());
acks_hard_ack = READ_ONCE(call->acks_hard_ack);
seq_printf(seq,
@@ -309,7 +307,7 @@ static int rxrpc_peer_seq_show(struct seq_file *seq, void *v)
peer->mtu,
now - peer->last_tx_at,
peer->srtt_us >> 3,
- jiffies_to_usecs(peer->rto_j));
+ peer->rto_us);
return 0;
}
diff --git a/net/rxrpc/protocol.h b/net/rxrpc/protocol.h
index e8ee4af43ca8..4fe6b4d20ada 100644
--- a/net/rxrpc/protocol.h
+++ b/net/rxrpc/protocol.h
@@ -135,9 +135,9 @@ struct rxrpc_ackpacket {
/*
* ACK packets can have a further piece of information tagged on the end
*/
-struct rxrpc_ackinfo {
- __be32 rxMTU; /* maximum Rx MTU size (bytes) [AFS 3.3] */
- __be32 maxMTU; /* maximum interface MTU size (bytes) [AFS 3.3] */
+struct rxrpc_acktrailer {
+ __be32 maxMTU; /* maximum Rx MTU size (bytes) [AFS 3.3] */
+ __be32 ifMTU; /* maximum interface MTU size (bytes) [AFS 3.3] */
__be32 rwind; /* Rx window size (packets) [AFS 3.4] */
__be32 jumbo_max; /* max packets to stick into a jumbo packet [AFS 3.5] */
};
diff --git a/net/rxrpc/rtt.c b/net/rxrpc/rtt.c
index be61d6f5be8d..cdab7b7d08a0 100644
--- a/net/rxrpc/rtt.c
+++ b/net/rxrpc/rtt.c
@@ -11,8 +11,8 @@
#include <linux/net.h>
#include "ar-internal.h"
-#define RXRPC_RTO_MAX ((unsigned)(120 * HZ))
-#define RXRPC_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
+#define RXRPC_RTO_MAX (120 * USEC_PER_SEC)
+#define RXRPC_TIMEOUT_INIT ((unsigned int)(1 * MSEC_PER_SEC)) /* RFC6298 2.1 initial RTO value */
#define rxrpc_jiffies32 ((u32)jiffies) /* As rxrpc_jiffies32 */
static u32 rxrpc_rto_min_us(struct rxrpc_peer *peer)
@@ -22,7 +22,7 @@ static u32 rxrpc_rto_min_us(struct rxrpc_peer *peer)
static u32 __rxrpc_set_rto(const struct rxrpc_peer *peer)
{
- return usecs_to_jiffies((peer->srtt_us >> 3) + peer->rttvar_us);
+ return (peer->srtt_us >> 3) + peer->rttvar_us;
}
static u32 rxrpc_bound_rto(u32 rto)
@@ -124,7 +124,7 @@ static void rxrpc_set_rto(struct rxrpc_peer *peer)
/* NOTE: clamping at RXRPC_RTO_MIN is not required, current algo
* guarantees that rto is higher.
*/
- peer->rto_j = rxrpc_bound_rto(rto);
+ peer->rto_us = rxrpc_bound_rto(rto);
}
static void rxrpc_ack_update_rtt(struct rxrpc_peer *peer, long rtt_us)
@@ -163,33 +163,33 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
spin_unlock(&peer->rtt_input_lock);
trace_rxrpc_rtt_rx(call, why, rtt_slot, send_serial, resp_serial,
- peer->srtt_us >> 3, peer->rto_j);
+ peer->srtt_us >> 3, peer->rto_us);
}
/*
- * Get the retransmission timeout to set in jiffies, backing it off each time
- * we retransmit.
+ * Get the retransmission timeout to set in nanoseconds, backing it off each
+ * time we retransmit.
*/
-unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *peer, bool retrans)
+ktime_t rxrpc_get_rto_backoff(struct rxrpc_peer *peer, bool retrans)
{
- u64 timo_j;
- u8 backoff = READ_ONCE(peer->backoff);
+ u64 timo_us;
+ u32 backoff = READ_ONCE(peer->backoff);
- timo_j = peer->rto_j;
- timo_j <<= backoff;
- if (retrans && timo_j * 2 <= RXRPC_RTO_MAX)
+ timo_us = peer->rto_us;
+ timo_us <<= backoff;
+ if (retrans && timo_us * 2 <= RXRPC_RTO_MAX)
WRITE_ONCE(peer->backoff, backoff + 1);
- if (timo_j < 1)
- timo_j = 1;
+ if (timo_us < 1)
+ timo_us = 1;
- return timo_j;
+ return ns_to_ktime(timo_us * NSEC_PER_USEC);
}
void rxrpc_peer_init_rtt(struct rxrpc_peer *peer)
{
- peer->rto_j = RXRPC_TIMEOUT_INIT;
- peer->mdev_us = jiffies_to_usecs(RXRPC_TIMEOUT_INIT);
+ peer->rto_us = RXRPC_TIMEOUT_INIT;
+ peer->mdev_us = RXRPC_TIMEOUT_INIT;
peer->backoff = 0;
//minmax_reset(&peer->rtt_min, rxrpc_jiffies32, ~0U);
}
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 6b32d61d4cdc..f1a68270862d 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -145,16 +145,17 @@ error:
/*
* Work out how much data we can put in a packet.
*/
-static int rxkad_how_much_data(struct rxrpc_call *call, size_t remain,
- size_t *_buf_size, size_t *_data_size, size_t *_offset)
+static struct rxrpc_txbuf *rxkad_alloc_txbuf(struct rxrpc_call *call, size_t remain, gfp_t gfp)
{
- size_t shdr, buf_size, chunk;
+ struct rxrpc_txbuf *txb;
+ size_t shdr, space;
+
+ remain = min(remain, 65535 - sizeof(struct rxrpc_wire_header));
switch (call->conn->security_level) {
default:
- buf_size = chunk = min_t(size_t, remain, RXRPC_JUMBO_DATALEN);
- shdr = 0;
- goto out;
+ space = min_t(size_t, remain, RXRPC_JUMBO_DATALEN);
+ return rxrpc_alloc_data_txbuf(call, space, 0, gfp);
case RXRPC_SECURITY_AUTH:
shdr = sizeof(struct rxkad_level1_hdr);
break;
@@ -163,17 +164,16 @@ static int rxkad_how_much_data(struct rxrpc_call *call, size_t remain,
break;
}
- buf_size = round_down(RXRPC_JUMBO_DATALEN, RXKAD_ALIGN);
+ space = min_t(size_t, round_down(RXRPC_JUMBO_DATALEN, RXKAD_ALIGN), remain + shdr);
+ space = round_up(space, RXKAD_ALIGN);
- chunk = buf_size - shdr;
- if (remain < chunk)
- buf_size = round_up(shdr + remain, RXKAD_ALIGN);
+ txb = rxrpc_alloc_data_txbuf(call, space, RXKAD_ALIGN, gfp);
+ if (!txb)
+ return NULL;
-out:
- *_buf_size = buf_size;
- *_data_size = chunk;
- *_offset = shdr;
- return 0;
+ txb->offset += shdr;
+ txb->space -= shdr;
+ return txb;
}
/*
@@ -251,7 +251,8 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
struct rxrpc_txbuf *txb,
struct skcipher_request *req)
{
- struct rxkad_level1_hdr *hdr = (void *)txb->data;
+ struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
+ struct rxkad_level1_hdr *hdr = (void *)(whdr + 1);
struct rxrpc_crypt iv;
struct scatterlist sg;
size_t pad;
@@ -259,7 +260,7 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
_enter("");
- check = txb->seq ^ ntohl(txb->wire.callNumber);
+ check = txb->seq ^ call->call_id;
hdr->data_size = htonl((u32)check << 16 | txb->len);
txb->len += sizeof(struct rxkad_level1_hdr);
@@ -267,14 +268,14 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
pad = RXKAD_ALIGN - pad;
pad &= RXKAD_ALIGN - 1;
if (pad) {
- memset(txb->data + txb->offset, 0, pad);
+ memset(txb->kvec[0].iov_base + txb->offset, 0, pad);
txb->len += pad;
}
/* start the encryption afresh */
memset(&iv, 0, sizeof(iv));
- sg_init_one(&sg, txb->data, 8);
+ sg_init_one(&sg, hdr, 8);
skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x);
@@ -293,7 +294,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
struct skcipher_request *req)
{
const struct rxrpc_key_token *token;
- struct rxkad_level2_hdr *rxkhdr = (void *)txb->data;
+ struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
+ struct rxkad_level2_hdr *rxkhdr = (void *)(whdr + 1);
struct rxrpc_crypt iv;
struct scatterlist sg;
size_t pad;
@@ -302,7 +304,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
_enter("");
- check = txb->seq ^ ntohl(txb->wire.callNumber);
+ check = txb->seq ^ call->call_id;
rxkhdr->data_size = htonl(txb->len | (u32)check << 16);
rxkhdr->checksum = 0;
@@ -312,7 +314,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
pad = RXKAD_ALIGN - pad;
pad &= RXKAD_ALIGN - 1;
if (pad) {
- memset(txb->data + txb->offset, 0, pad);
+ memset(txb->kvec[0].iov_base + txb->offset, 0, pad);
txb->len += pad;
}
@@ -320,7 +322,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
token = call->conn->key->payload.data[0];
memcpy(&iv, token->kad->session_key, sizeof(iv));
- sg_init_one(&sg, txb->data, txb->len);
+ sg_init_one(&sg, rxkhdr, txb->len);
skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, txb->len, iv.x);
@@ -362,9 +364,9 @@ static int rxkad_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
memcpy(&iv, call->conn->rxkad.csum_iv.x, sizeof(iv));
/* calculate the security checksum */
- x = (ntohl(txb->wire.cid) & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT);
+ x = (call->cid & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT);
x |= txb->seq & 0x3fffffff;
- crypto.buf[0] = txb->wire.callNumber;
+ crypto.buf[0] = htonl(call->call_id);
crypto.buf[1] = htonl(x);
sg_init_one(&sg, crypto.buf, 8);
@@ -378,7 +380,7 @@ static int rxkad_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
y = (y >> 16) & 0xffff;
if (y == 0)
y = 1; /* zero checksums are not permitted */
- txb->wire.cksum = htons(y);
+ txb->cksum = htons(y);
switch (call->conn->security_level) {
case RXRPC_SECURITY_PLAIN:
@@ -726,7 +728,6 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
rxrpc_local_dont_fragment(conn->local, false);
ret = kernel_sendmsg(conn->local->socket, &msg, iov, 3, len);
- rxrpc_local_dont_fragment(conn->local, true);
if (ret < 0) {
trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
rxrpc_tx_point_rxkad_response);
@@ -1256,7 +1257,7 @@ const struct rxrpc_security rxkad = {
.free_preparse_server_key = rxkad_free_preparse_server_key,
.destroy_server_key = rxkad_destroy_server_key,
.init_connection_security = rxkad_init_connection_security,
- .how_much_data = rxkad_how_much_data,
+ .alloc_txbuf = rxkad_alloc_txbuf,
.secure_packet = rxkad_secure_packet,
.verify_packet = rxkad_verify_packet,
.free_call_crypto = rxkad_free_call_crypto,
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 5677d5690a02..6f765768c49c 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -240,7 +240,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
rxrpc_notify_end_tx_t notify_end_tx)
{
rxrpc_seq_t seq = txb->seq;
- bool last = test_bit(RXRPC_TXBUF_LAST, &txb->flags), poke;
+ bool poke, last = txb->flags & RXRPC_LAST_PACKET;
rxrpc_inc_stat(call->rxnet, stat_tx_data);
@@ -336,7 +336,7 @@ reload:
do {
if (!txb) {
- size_t remain, bufsize, chunk, offset;
+ size_t remain;
_debug("alloc");
@@ -348,23 +348,11 @@ reload:
* region (enc blocksize), but the trailer is not.
*/
remain = more ? INT_MAX : msg_data_left(msg);
- ret = call->conn->security->how_much_data(call, remain,
- &bufsize, &chunk, &offset);
- if (ret < 0)
+ txb = call->conn->security->alloc_txbuf(call, remain, sk->sk_allocation);
+ if (IS_ERR(txb)) {
+ ret = PTR_ERR(txb);
goto maybe_error;
-
- _debug("SIZE: %zu/%zu @%zu", chunk, bufsize, offset);
-
- /* create a buffer that we can retain until it's ACK'd */
- ret = -ENOMEM;
- txb = rxrpc_alloc_txbuf(call, RXRPC_PACKET_TYPE_DATA,
- GFP_KERNEL);
- if (!txb)
- goto maybe_error;
-
- txb->offset = offset;
- txb->space -= offset;
- txb->space = min_t(size_t, chunk, txb->space);
+ }
}
_debug("append");
@@ -374,8 +362,8 @@ reload:
size_t copy = min_t(size_t, txb->space, msg_data_left(msg));
_debug("add %zu", copy);
- if (!copy_from_iter_full(txb->data + txb->offset, copy,
- &msg->msg_iter))
+ if (!copy_from_iter_full(txb->kvec[0].iov_base + txb->offset,
+ copy, &msg->msg_iter))
goto efault;
_debug("added");
txb->space -= copy;
@@ -394,18 +382,18 @@ reload:
/* add the packet to the send queue if it's now full */
if (!txb->space ||
(msg_data_left(msg) == 0 && !more)) {
- if (msg_data_left(msg) == 0 && !more) {
- txb->wire.flags |= RXRPC_LAST_PACKET;
- __set_bit(RXRPC_TXBUF_LAST, &txb->flags);
- }
+ if (msg_data_left(msg) == 0 && !more)
+ txb->flags |= RXRPC_LAST_PACKET;
else if (call->tx_top - call->acks_hard_ack <
call->tx_winsize)
- txb->wire.flags |= RXRPC_MORE_PACKETS;
+ txb->flags |= RXRPC_MORE_PACKETS;
ret = call->security->secure_packet(call, txb);
if (ret < 0)
goto out;
+ txb->kvec[0].iov_len += txb->len;
+ txb->len = txb->kvec[0].iov_len;
rxrpc_queue_packet(rx, call, txb, notify_end_tx);
txb = NULL;
}
@@ -621,7 +609,6 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
__releases(&rx->sk.sk_lock.slock)
{
struct rxrpc_call *call;
- unsigned long now, j;
bool dropped_lock = false;
int ret;
@@ -699,25 +686,21 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
switch (p.call.nr_timeouts) {
case 3:
- j = msecs_to_jiffies(p.call.timeouts.normal);
- if (p.call.timeouts.normal > 0 && j == 0)
- j = 1;
- WRITE_ONCE(call->next_rx_timo, j);
+ WRITE_ONCE(call->next_rx_timo, p.call.timeouts.normal);
fallthrough;
case 2:
- j = msecs_to_jiffies(p.call.timeouts.idle);
- if (p.call.timeouts.idle > 0 && j == 0)
- j = 1;
- WRITE_ONCE(call->next_req_timo, j);
+ WRITE_ONCE(call->next_req_timo, p.call.timeouts.idle);
fallthrough;
case 1:
if (p.call.timeouts.hard > 0) {
- j = p.call.timeouts.hard * HZ;
- now = jiffies;
- j += now;
- WRITE_ONCE(call->expect_term_by, j);
- rxrpc_reduce_call_timer(call, j, now,
- rxrpc_timer_set_for_hard);
+ ktime_t delay = ms_to_ktime(p.call.timeouts.hard * MSEC_PER_SEC);
+
+ WRITE_ONCE(call->expect_term_by,
+ ktime_add(p.call.timeouts.hard,
+ ktime_get_real()));
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_hard);
+ rxrpc_poke_call(call, rxrpc_call_poke_set_timeout);
+
}
break;
}
diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c
index ecaeb4ecfb58..c9bedd0e2d86 100644
--- a/net/rxrpc/sysctl.c
+++ b/net/rxrpc/sysctl.c
@@ -15,6 +15,8 @@ static const unsigned int four = 4;
static const unsigned int max_backlog = RXRPC_BACKLOG_MAX - 1;
static const unsigned int n_65535 = 65535;
static const unsigned int n_max_acks = 255;
+static const unsigned long one_ms = 1;
+static const unsigned long max_ms = 1000;
static const unsigned long one_jiffy = 1;
static const unsigned long max_jiffies = MAX_JIFFY_OFFSET;
#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
@@ -28,24 +30,24 @@ static const unsigned long max_500 = 500;
* information on the individual parameters.
*/
static struct ctl_table rxrpc_sysctl_table[] = {
- /* Values measured in milliseconds but used in jiffies */
+ /* Values measured in milliseconds */
{
.procname = "soft_ack_delay",
.data = &rxrpc_soft_ack_delay,
.maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_doulongvec_ms_jiffies_minmax,
- .extra1 = (void *)&one_jiffy,
- .extra2 = (void *)&max_jiffies,
+ .proc_handler = proc_doulongvec_minmax,
+ .extra1 = (void *)&one_ms,
+ .extra2 = (void *)&max_ms,
},
{
.procname = "idle_ack_delay",
.data = &rxrpc_idle_ack_delay,
.maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_doulongvec_ms_jiffies_minmax,
- .extra1 = (void *)&one_jiffy,
- .extra2 = (void *)&max_jiffies,
+ .proc_handler = proc_doulongvec_minmax,
+ .extra1 = (void *)&one_ms,
+ .extra2 = (void *)&max_ms,
},
{
.procname = "idle_conn_expiry",
diff --git a/net/rxrpc/txbuf.c b/net/rxrpc/txbuf.c
index d43be8512386..b2a82ab756c2 100644
--- a/net/rxrpc/txbuf.c
+++ b/net/rxrpc/txbuf.c
@@ -14,45 +14,146 @@ static atomic_t rxrpc_txbuf_debug_ids;
atomic_t rxrpc_nr_txbuf;
/*
- * Allocate and partially initialise an I/O request structure.
+ * Allocate and partially initialise a data transmission buffer.
*/
-struct rxrpc_txbuf *rxrpc_alloc_txbuf(struct rxrpc_call *call, u8 packet_type,
- gfp_t gfp)
+struct rxrpc_txbuf *rxrpc_alloc_data_txbuf(struct rxrpc_call *call, size_t data_size,
+ size_t data_align, gfp_t gfp)
{
+ struct rxrpc_wire_header *whdr;
struct rxrpc_txbuf *txb;
+ size_t total, hoff = 0;
+ void *buf;
txb = kmalloc(sizeof(*txb), gfp);
- if (txb) {
- INIT_LIST_HEAD(&txb->call_link);
- INIT_LIST_HEAD(&txb->tx_link);
- refcount_set(&txb->ref, 1);
- txb->call_debug_id = call->debug_id;
- txb->debug_id = atomic_inc_return(&rxrpc_txbuf_debug_ids);
- txb->space = sizeof(txb->data);
- txb->len = 0;
- txb->offset = 0;
- txb->flags = 0;
- txb->ack_why = 0;
- txb->seq = call->tx_prepared + 1;
- txb->wire.epoch = htonl(call->conn->proto.epoch);
- txb->wire.cid = htonl(call->cid);
- txb->wire.callNumber = htonl(call->call_id);
- txb->wire.seq = htonl(txb->seq);
- txb->wire.type = packet_type;
- txb->wire.flags = call->conn->out_clientflag;
- txb->wire.userStatus = 0;
- txb->wire.securityIndex = call->security_ix;
- txb->wire._rsvd = 0;
- txb->wire.serviceId = htons(call->dest_srx.srx_service);
-
- trace_rxrpc_txbuf(txb->debug_id,
- txb->call_debug_id, txb->seq, 1,
- packet_type == RXRPC_PACKET_TYPE_DATA ?
- rxrpc_txbuf_alloc_data :
- rxrpc_txbuf_alloc_ack);
- atomic_inc(&rxrpc_nr_txbuf);
+ if (!txb)
+ return NULL;
+
+ if (data_align)
+ hoff = round_up(sizeof(*whdr), data_align) - sizeof(*whdr);
+ total = hoff + sizeof(*whdr) + data_size;
+
+ mutex_lock(&call->conn->tx_data_alloc_lock);
+ buf = page_frag_alloc_align(&call->conn->tx_data_alloc, total, gfp,
+ ~(data_align - 1) & ~(L1_CACHE_BYTES - 1));
+ mutex_unlock(&call->conn->tx_data_alloc_lock);
+ if (!buf) {
+ kfree(txb);
+ return NULL;
+ }
+
+ whdr = buf + hoff;
+
+ INIT_LIST_HEAD(&txb->call_link);
+ INIT_LIST_HEAD(&txb->tx_link);
+ refcount_set(&txb->ref, 1);
+ txb->last_sent = KTIME_MIN;
+ txb->call_debug_id = call->debug_id;
+ txb->debug_id = atomic_inc_return(&rxrpc_txbuf_debug_ids);
+ txb->space = data_size;
+ txb->len = 0;
+ txb->offset = sizeof(*whdr);
+ txb->flags = call->conn->out_clientflag;
+ txb->ack_why = 0;
+ txb->seq = call->tx_prepared + 1;
+ txb->serial = 0;
+ txb->cksum = 0;
+ txb->nr_kvec = 1;
+ txb->kvec[0].iov_base = whdr;
+ txb->kvec[0].iov_len = sizeof(*whdr);
+
+ whdr->epoch = htonl(call->conn->proto.epoch);
+ whdr->cid = htonl(call->cid);
+ whdr->callNumber = htonl(call->call_id);
+ whdr->seq = htonl(txb->seq);
+ whdr->type = RXRPC_PACKET_TYPE_DATA;
+ whdr->flags = 0;
+ whdr->userStatus = 0;
+ whdr->securityIndex = call->security_ix;
+ whdr->_rsvd = 0;
+ whdr->serviceId = htons(call->dest_srx.srx_service);
+
+ trace_rxrpc_txbuf(txb->debug_id, txb->call_debug_id, txb->seq, 1,
+ rxrpc_txbuf_alloc_data);
+
+ atomic_inc(&rxrpc_nr_txbuf);
+ return txb;
+}
+
+/*
+ * Allocate and partially initialise an ACK packet.
+ */
+struct rxrpc_txbuf *rxrpc_alloc_ack_txbuf(struct rxrpc_call *call, size_t sack_size)
+{
+ struct rxrpc_wire_header *whdr;
+ struct rxrpc_acktrailer *trailer;
+ struct rxrpc_ackpacket *ack;
+ struct rxrpc_txbuf *txb;
+ gfp_t gfp = rcu_read_lock_held() ? GFP_ATOMIC | __GFP_NOWARN : GFP_NOFS;
+ void *buf, *buf2 = NULL;
+ u8 *filler;
+
+ txb = kmalloc(sizeof(*txb), gfp);
+ if (!txb)
+ return NULL;
+
+ buf = page_frag_alloc(&call->local->tx_alloc,
+ sizeof(*whdr) + sizeof(*ack) + 1 + 3 + sizeof(*trailer), gfp);
+ if (!buf) {
+ kfree(txb);
+ return NULL;
+ }
+
+ if (sack_size) {
+ buf2 = page_frag_alloc(&call->local->tx_alloc, sack_size, gfp);
+ if (!buf2) {
+ page_frag_free(buf);
+ kfree(txb);
+ return NULL;
+ }
}
+ whdr = buf;
+ ack = buf + sizeof(*whdr);
+ filler = buf + sizeof(*whdr) + sizeof(*ack) + 1;
+ trailer = buf + sizeof(*whdr) + sizeof(*ack) + 1 + 3;
+
+ INIT_LIST_HEAD(&txb->call_link);
+ INIT_LIST_HEAD(&txb->tx_link);
+ refcount_set(&txb->ref, 1);
+ txb->call_debug_id = call->debug_id;
+ txb->debug_id = atomic_inc_return(&rxrpc_txbuf_debug_ids);
+ txb->space = 0;
+ txb->len = sizeof(*whdr) + sizeof(*ack) + 3 + sizeof(*trailer);
+ txb->offset = 0;
+ txb->flags = call->conn->out_clientflag;
+ txb->ack_rwind = 0;
+ txb->seq = 0;
+ txb->serial = 0;
+ txb->cksum = 0;
+ txb->nr_kvec = 3;
+ txb->kvec[0].iov_base = whdr;
+ txb->kvec[0].iov_len = sizeof(*whdr) + sizeof(*ack);
+ txb->kvec[1].iov_base = buf2;
+ txb->kvec[1].iov_len = sack_size;
+ txb->kvec[2].iov_base = filler;
+ txb->kvec[2].iov_len = 3 + sizeof(*trailer);
+
+ whdr->epoch = htonl(call->conn->proto.epoch);
+ whdr->cid = htonl(call->cid);
+ whdr->callNumber = htonl(call->call_id);
+ whdr->seq = 0;
+ whdr->type = RXRPC_PACKET_TYPE_ACK;
+ whdr->flags = 0;
+ whdr->userStatus = 0;
+ whdr->securityIndex = call->security_ix;
+ whdr->_rsvd = 0;
+ whdr->serviceId = htons(call->dest_srx.srx_service);
+
+ get_page(virt_to_head_page(trailer));
+
+ trace_rxrpc_txbuf(txb->debug_id, txb->call_debug_id, txb->seq, 1,
+ rxrpc_txbuf_alloc_ack);
+ atomic_inc(&rxrpc_nr_txbuf);
return txb;
}
@@ -71,12 +172,15 @@ void rxrpc_see_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what)
trace_rxrpc_txbuf(txb->debug_id, txb->call_debug_id, txb->seq, r, what);
}
-static void rxrpc_free_txbuf(struct rcu_head *rcu)
+static void rxrpc_free_txbuf(struct rxrpc_txbuf *txb)
{
- struct rxrpc_txbuf *txb = container_of(rcu, struct rxrpc_txbuf, rcu);
+ int i;
trace_rxrpc_txbuf(txb->debug_id, txb->call_debug_id, txb->seq, 0,
rxrpc_txbuf_free);
+ for (i = 0; i < txb->nr_kvec; i++)
+ if (txb->kvec[i].iov_base)
+ page_frag_free(txb->kvec[i].iov_base);
kfree(txb);
atomic_dec(&rxrpc_nr_txbuf);
}
@@ -95,7 +199,7 @@ void rxrpc_put_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what)
dead = __refcount_dec_and_test(&txb->ref, &r);
trace_rxrpc_txbuf(debug_id, call_debug_id, seq, r - 1, what);
if (dead)
- call_rcu(&txb->rcu, rxrpc_free_txbuf);
+ rxrpc_free_txbuf(txb);
}
}
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 470c70deffe2..8180d0c12fce 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -737,16 +737,6 @@ config NET_ACT_SAMPLE
To compile this code as a module, choose M here: the
module will be called act_sample.
-config NET_ACT_IPT
- tristate "IPtables targets"
- depends on NET_CLS_ACT && NETFILTER && NETFILTER_XTABLES
- help
- Say Y here to be able to invoke iptables targets after successful
- classification.
-
- To compile this code as a module, choose M here: the
- module will be called act_ipt.
-
config NET_ACT_NAT
tristate "Stateless NAT"
depends on NET_CLS_ACT
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 3e30d7260493..9ee622fb1160 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -1363,7 +1363,7 @@ struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, u32 flags,
if (rtnl_held)
rtnl_unlock();
- request_module("act_%s", act_name);
+ request_module(NET_ACT_ALIAS_PREFIX "%s", act_name);
if (rtnl_held)
rtnl_lock();
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 6cfee6658103..0e3cf11ae5fc 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -401,6 +401,7 @@ static struct tc_action_ops act_bpf_ops __read_mostly = {
.init = tcf_bpf_init,
.size = sizeof(struct tcf_bpf),
};
+MODULE_ALIAS_NET_ACT("bpf");
static __net_init int bpf_init_net(struct net *net)
{
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index f8762756657d..0fce631e7c91 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -242,6 +242,7 @@ static struct tc_action_ops act_connmark_ops = {
.cleanup = tcf_connmark_cleanup,
.size = sizeof(struct tcf_connmark_info),
};
+MODULE_ALIAS_NET_ACT("connmark");
static __net_init int connmark_init_net(struct net *net)
{
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 7f8b1f2f2ed9..5cc8e407e791 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -709,6 +709,7 @@ static struct tc_action_ops act_csum_ops = {
.offload_act_setup = tcf_csum_offload_act_setup,
.size = sizeof(struct tcf_csum),
};
+MODULE_ALIAS_NET_ACT("csum");
static __net_init int csum_init_net(struct net *net)
{
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 6124d8b128d1..baac083fd8f1 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -1600,6 +1600,7 @@ static struct tc_action_ops act_ct_ops = {
.offload_act_setup = tcf_ct_offload_act_setup,
.size = sizeof(struct tcf_ct),
};
+MODULE_ALIAS_NET_ACT("ct");
static __net_init int ct_init_net(struct net *net)
{
diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c
index e620f9a84afe..5dd41a012110 100644
--- a/net/sched/act_ctinfo.c
+++ b/net/sched/act_ctinfo.c
@@ -363,6 +363,7 @@ static struct tc_action_ops act_ctinfo_ops = {
.cleanup= tcf_ctinfo_cleanup,
.size = sizeof(struct tcf_ctinfo),
};
+MODULE_ALIAS_NET_ACT("ctinfo");
static __net_init int ctinfo_init_net(struct net *net)
{
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 4af3b7ec249f..e949280eb800 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -296,6 +296,7 @@ static struct tc_action_ops act_gact_ops = {
.offload_act_setup = tcf_gact_offload_act_setup,
.size = sizeof(struct tcf_gact),
};
+MODULE_ALIAS_NET_ACT("gact");
static __net_init int gact_init_net(struct net *net)
{
diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c
index c681cd011afd..1dd74125398a 100644
--- a/net/sched/act_gate.c
+++ b/net/sched/act_gate.c
@@ -645,6 +645,7 @@ static struct tc_action_ops act_gate_ops = {
.offload_act_setup = tcf_gate_offload_act_setup,
.size = sizeof(struct tcf_gate),
};
+MODULE_ALIAS_NET_ACT("gate");
static __net_init int gate_init_net(struct net *net)
{
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 0e867d13beb5..107c6d83dc5c 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -889,6 +889,7 @@ static struct tc_action_ops act_ife_ops = {
.init = tcf_ife_init,
.size = sizeof(struct tcf_ife_info),
};
+MODULE_ALIAS_NET_ACT("ife");
static __net_init int ife_init_net(struct net *net)
{
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 6faa7d00da09..5b3814365924 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -635,6 +635,7 @@ static struct tc_action_ops act_mirred_ops = {
.size = sizeof(struct tcf_mirred),
.get_dev = tcf_mirred_get_dev,
};
+MODULE_ALIAS_NET_ACT("mirred");
static __net_init int mirred_init_net(struct net *net)
{
diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
index 34b8edb6cc77..44a37a71ae92 100644
--- a/net/sched/act_mpls.c
+++ b/net/sched/act_mpls.c
@@ -452,6 +452,7 @@ static struct tc_action_ops act_mpls_ops = {
.offload_act_setup = tcf_mpls_offload_act_setup,
.size = sizeof(struct tcf_mpls),
};
+MODULE_ALIAS_NET_ACT("mpls");
static __net_init int mpls_init_net(struct net *net)
{
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index a180e724634e..d541f553805f 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -324,6 +324,7 @@ static struct tc_action_ops act_nat_ops = {
.cleanup = tcf_nat_cleanup,
.size = sizeof(struct tcf_nat),
};
+MODULE_ALIAS_NET_ACT("nat");
static __net_init int nat_init_net(struct net *net)
{
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 2ef22969f274..fc0a35a7b62a 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -515,11 +515,11 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
spin_unlock_bh(&p->tcf_lock);
return -ENOBUFS;
}
+ opt->nkeys = parms->tcfp_nkeys;
memcpy(opt->keys, parms->tcfp_keys,
flex_array_size(opt, keys, parms->tcfp_nkeys));
opt->index = p->tcf_index;
- opt->nkeys = parms->tcfp_nkeys;
opt->flags = parms->tcfp_flags;
opt->action = p->tcf_action;
opt->refcnt = refcount_read(&p->tcf_refcnt) - ref;
@@ -620,6 +620,7 @@ static struct tc_action_ops act_pedit_ops = {
.offload_act_setup = tcf_pedit_offload_act_setup,
.size = sizeof(struct tcf_pedit),
};
+MODULE_ALIAS_NET_ACT("pedit");
static __net_init int pedit_init_net(struct net *net)
{
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index e119b4a3db9f..8555125ed34d 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -502,6 +502,7 @@ static struct tc_action_ops act_police_ops = {
.offload_act_setup = tcf_police_offload_act_setup,
.size = sizeof(struct tcf_police),
};
+MODULE_ALIAS_NET_ACT("police");
static __net_init int police_init_net(struct net *net)
{
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index c5c61efe6db4..a69b53d54039 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -316,6 +316,7 @@ static struct tc_action_ops act_sample_ops = {
.offload_act_setup = tcf_sample_offload_act_setup,
.size = sizeof(struct tcf_sample),
};
+MODULE_ALIAS_NET_ACT("sample");
static __net_init int sample_init_net(struct net *net)
{
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 0a3e92888295..f3abe0545989 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -209,6 +209,7 @@ static struct tc_action_ops act_simp_ops = {
.init = tcf_simp_init,
.size = sizeof(struct tcf_defact),
};
+MODULE_ALIAS_NET_ACT("simple");
static __net_init int simp_init_net(struct net *net)
{
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 754f78b35bb8..1f1d9ce3e968 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -426,6 +426,7 @@ static struct tc_action_ops act_skbedit_ops = {
.offload_act_setup = tcf_skbedit_offload_act_setup,
.size = sizeof(struct tcf_skbedit),
};
+MODULE_ALIAS_NET_ACT("skbedit");
static __net_init int skbedit_init_net(struct net *net)
{
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index bcb673ab0008..39945b139c48 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -287,6 +287,7 @@ static struct tc_action_ops act_skbmod_ops = {
.cleanup = tcf_skbmod_cleanup,
.size = sizeof(struct tcf_skbmod),
};
+MODULE_ALIAS_NET_ACT("skbmod");
static __net_init int skbmod_init_net(struct net *net)
{
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 300b08aa8283..1536f8b16f1b 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -842,6 +842,7 @@ static struct tc_action_ops act_tunnel_key_ops = {
.offload_act_setup = tcf_tunnel_key_offload_act_setup,
.size = sizeof(struct tcf_tunnel_key),
};
+MODULE_ALIAS_NET_ACT("tunnel_key");
static __net_init int tunnel_key_init_net(struct net *net)
{
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 836183011a7c..22f4b1e8ade9 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -427,6 +427,7 @@ static struct tc_action_ops act_vlan_ops = {
.offload_act_setup = tcf_vlan_offload_act_setup,
.size = sizeof(struct tcf_vlan),
};
+MODULE_ALIAS_NET_ACT("vlan");
static __net_init int vlan_init_net(struct net *net)
{
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index ff3d396a65aa..ca5676b2668e 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -257,7 +257,7 @@ tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
#ifdef CONFIG_MODULES
if (rtnl_held)
rtnl_unlock();
- request_module("cls_%s", kind);
+ request_module(NET_CLS_ALIAS_PREFIX "%s", kind);
if (rtnl_held)
rtnl_lock();
ops = __tcf_proto_lookup_ops(kind);
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index a1f56931330c..ecfaa4f9a04e 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -328,6 +328,7 @@ static struct tcf_proto_ops cls_basic_ops __read_mostly = {
.bind_class = basic_bind_class,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("basic");
static int __init init_basic(void)
{
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 382c7a71f81f..5e83e890f6a4 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -693,6 +693,7 @@ static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
.dump = cls_bpf_dump,
.bind_class = cls_bpf_bind_class,
};
+MODULE_ALIAS_NET_CLS("bpf");
static int __init cls_bpf_init_mod(void)
{
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 7ee8dbf49ed0..424252982d6a 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -209,6 +209,7 @@ static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
.dump = cls_cgroup_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("cgroup");
static int __init init_cgroup_cls(void)
{
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 6ab317b48d6c..5502998aace7 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -702,6 +702,7 @@ static struct tcf_proto_ops cls_flow_ops __read_mostly = {
.walk = flow_walk,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("flow");
static int __init cls_flow_init(void)
{
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 6ee7064c82fc..e1314674b4a9 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -3659,6 +3659,7 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = {
.owner = THIS_MODULE,
.flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
};
+MODULE_ALIAS_NET_CLS("flower");
static int __init cls_fl_init(void)
{
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index afc534ee0a18..cdddc8695228 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -433,6 +433,7 @@ static struct tcf_proto_ops cls_fw_ops __read_mostly = {
.bind_class = fw_bind_class,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("fw");
static int __init init_fw(void)
{
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index c4ed11df6254..9f1e62ca508d 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -398,6 +398,7 @@ static struct tcf_proto_ops cls_mall_ops __read_mostly = {
.bind_class = mall_bind_class,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("matchall");
static int __init cls_mall_init(void)
{
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 12a505db4183..b9c58c040c30 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -671,6 +671,7 @@ static struct tcf_proto_ops cls_route4_ops __read_mostly = {
.bind_class = route4_bind_class,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("route");
static int __init init_route4(void)
{
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 289e1755c26b..9412d88a99bc 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -1453,6 +1453,7 @@ static struct tcf_proto_ops cls_u32_ops __read_mostly = {
.bind_class = u32_bind_class,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("u32");
static int __init init_u32(void)
{
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 87f6e3c6daa8..65e05b0c98e4 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -228,7 +228,7 @@ int qdisc_set_default(const char *name)
if (!ops) {
/* Not found, drop lock and try to load module */
write_unlock(&qdisc_mod_lock);
- request_module("sch_%s", name);
+ request_module(NET_SCH_ALIAS_PREFIX "%s", name);
write_lock(&qdisc_mod_lock);
ops = qdisc_lookup_default(name);
@@ -1275,7 +1275,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
* go away in the mean time.
*/
rtnl_unlock();
- request_module("sch_%s", name);
+ request_module(NET_SCH_ALIAS_PREFIX "%s", name);
rtnl_lock();
ops = qdisc_lookup_ops(kind);
if (ops != NULL) {
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 9cff99558694..edee926ccde8 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -3103,6 +3103,7 @@ static struct Qdisc_ops cake_qdisc_ops __read_mostly = {
.dump_stats = cake_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("cake");
static int __init cake_module_init(void)
{
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
index beece8e82c23..69001eff0315 100644
--- a/net/sched/sch_cbs.c
+++ b/net/sched/sch_cbs.c
@@ -546,6 +546,7 @@ static struct Qdisc_ops cbs_qdisc_ops __read_mostly = {
.dump = cbs_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("cbs");
static struct notifier_block cbs_device_notifier = {
.notifier_call = cbs_dev_notifier,
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index ae1da08e268f..ea108030c6b4 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -498,6 +498,7 @@ static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
.dump_stats = choke_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("choke");
static int __init choke_module_init(void)
{
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index d7a4874543de..ecb3f164bb25 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Codel - The Controlled-Delay Active Queue Management algorithm
*
@@ -7,37 +8,6 @@
* Implemented on linux by :
* Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
* Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the authors may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, provided that this notice is retained in full, this
- * software may be distributed under the terms of the GNU General
- * Public License ("GPL") version 2, in which case the provisions of the
- * GPL apply INSTEAD OF those given above.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
*/
#include <linux/module.h>
@@ -287,6 +257,7 @@ static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
.dump_stats = codel_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("codel");
static int __init codel_module_init(void)
{
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 097740a9afea..c69b999fae17 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -481,6 +481,7 @@ static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
.destroy = drr_destroy_qdisc,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("drr");
static int __init drr_init(void)
{
diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c
index 4808159a5466..2e4bef713b6a 100644
--- a/net/sched/sch_etf.c
+++ b/net/sched/sch_etf.c
@@ -500,6 +500,7 @@ static struct Qdisc_ops etf_qdisc_ops __read_mostly = {
.dump = etf_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("etf");
static int __init etf_module_init(void)
{
diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
index f7c88495946b..835b4460b448 100644
--- a/net/sched/sch_ets.c
+++ b/net/sched/sch_ets.c
@@ -812,6 +812,7 @@ static struct Qdisc_ops ets_qdisc_ops __read_mostly = {
.dump = ets_qdisc_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("ets");
static int __init ets_init(void)
{
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 3a31c47fea9b..cdf23ff16f40 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -1264,6 +1264,7 @@ static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
.dump_stats = fq_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("fq");
static int __init fq_module_init(void)
{
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 8c4fee063436..79f9d6de6c85 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -717,6 +717,7 @@ static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
.dump_stats = fq_codel_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("fq_codel");
static int __init fq_codel_module_init(void)
{
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 9b3e9262040b..ff5336493777 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -27,6 +27,7 @@
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/dst.h>
+#include <net/hotdata.h>
#include <trace/events/qdisc.h>
#include <trace/events/net.h>
#include <net/xfrm.h>
@@ -409,7 +410,7 @@ static inline bool qdisc_restart(struct Qdisc *q, int *packets)
void __qdisc_run(struct Qdisc *q)
{
- int quota = READ_ONCE(dev_tx_weight);
+ int quota = READ_ONCE(net_hotdata.dev_tx_weight);
int packets;
while (qdisc_restart(q, &packets)) {
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 8c61eb3dc943..79ba9dc70254 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -930,6 +930,7 @@ static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
.dump = gred_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("gred");
static int __init gred_module_init(void)
{
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 16c45da4036a..4e626df742d7 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1679,6 +1679,7 @@ static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = {
.priv_size = sizeof(struct hfsc_sched),
.owner = THIS_MODULE
};
+MODULE_ALIAS_NET_SCH("hfsc");
static int __init
hfsc_init(void)
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index d26cd436cbe3..3f906df1435b 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -702,6 +702,7 @@ static struct Qdisc_ops hhf_qdisc_ops __read_mostly = {
.dump_stats = hhf_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("hhf");
static int __init hhf_module_init(void)
{
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 7349233eaa9b..93e6fb56f3b5 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -2166,6 +2166,7 @@ static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
.dump = htb_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("htb");
static int __init htb_module_init(void)
{
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index 5fa9eaa79bfc..c2ef9dcf91d2 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -168,6 +168,7 @@ static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
.ingress_block_get = ingress_ingress_block_get,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("ingress");
struct clsact_sched_data {
struct tcf_block *ingress_block;
@@ -344,6 +345,7 @@ static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
.egress_block_get = clsact_egress_block_get,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("clsact");
static int __init ingress_module_init(void)
{
@@ -368,6 +370,5 @@ static void __exit ingress_module_exit(void)
module_init(ingress_module_init);
module_exit(ingress_module_exit);
-MODULE_ALIAS("sch_clsact");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Ingress and clsact based ingress and egress qdiscs");
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 43e53ee00a56..225353fbb3f1 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -774,6 +774,7 @@ static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
.dump = mqprio_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("mqprio");
static int __init mqprio_module_init(void)
{
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index d66d5f0ec080..79e93a19d5fa 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -395,6 +395,7 @@ static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
.dump = multiq_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("multiq");
static int __init multiq_module_init(void)
{
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index fa678eb88528..edc72962ae63 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -1293,6 +1293,7 @@ static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
.dump = netem_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("netem");
static int __init netem_module_init(void)
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index 2da6250ec346..1764059b0635 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -556,6 +556,7 @@ static struct Qdisc_ops pie_qdisc_ops __read_mostly = {
.dump_stats = pie_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("pie");
static int __init pie_module_init(void)
{
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
index 992f0c8d7988..cefb65201e17 100644
--- a/net/sched/sch_plug.c
+++ b/net/sched/sch_plug.c
@@ -213,6 +213,7 @@ static struct Qdisc_ops plug_qdisc_ops __read_mostly = {
.reset = qdisc_reset_queue,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("plug");
static int __init plug_module_init(void)
{
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 8ecdd3ef6f8e..cc30f7a32f1a 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -418,6 +418,7 @@ static struct Qdisc_ops prio_qdisc_ops __read_mostly = {
.dump = prio_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("prio");
static int __init prio_module_init(void)
{
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 48a604c320c7..d584c0c25899 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -1521,6 +1521,7 @@ static struct Qdisc_ops qfq_qdisc_ops __read_mostly = {
.destroy = qfq_destroy_qdisc,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("qfq");
static int __init qfq_init(void)
{
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 607b6c8b3a9b..b5f096588fae 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -548,6 +548,7 @@ static struct Qdisc_ops red_qdisc_ops __read_mostly = {
.dump_stats = red_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("red");
static int __init red_module_init(void)
{
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 1871a1c0224d..b717e15a3a17 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -709,6 +709,7 @@ static struct Qdisc_ops sfb_qdisc_ops __read_mostly = {
.dump_stats = sfb_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("sfb");
static int __init sfb_module_init(void)
{
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index eb77558fa367..e66f4afb920d 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -925,6 +925,7 @@ static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
.dump = sfq_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("sfq");
static int __init sfq_module_init(void)
{
diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c
index 28beb11762d8..b4dd626c309c 100644
--- a/net/sched/sch_skbprio.c
+++ b/net/sched/sch_skbprio.c
@@ -292,6 +292,7 @@ static struct Qdisc_ops skbprio_qdisc_ops __read_mostly = {
.destroy = skbprio_destroy,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("skbprio");
static int __init skbprio_module_init(void)
{
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 31a8252bd09c..c5de70efdc86 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -40,6 +40,8 @@ static struct static_key_false taprio_have_working_mqprio;
#define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
#define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
+#define TAPRIO_SUPPORTED_FLAGS \
+ (TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST | TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
#define TAPRIO_FLAGS_INVALID U32_MAX
struct sched_entry {
@@ -408,19 +410,6 @@ static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch)
return entry;
}
-static bool taprio_flags_valid(u32 flags)
-{
- /* Make sure no other flag bits are set. */
- if (flags & ~(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST |
- TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
- return false;
- /* txtime-assist and full offload are mutually exclusive */
- if ((flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) &&
- (flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
- return false;
- return true;
-}
-
/* This returns the tstamp value set by TCP in terms of the set clock. */
static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
{
@@ -1031,7 +1020,8 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] =
NLA_POLICY_FULL_RANGE_SIGNED(NLA_S64, &taprio_cycle_time_range),
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
- [TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 },
+ [TCA_TAPRIO_ATTR_FLAGS] =
+ NLA_POLICY_MASK(NLA_U32, TAPRIO_SUPPORTED_FLAGS),
[TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 },
[TCA_TAPRIO_ATTR_TC_ENTRY] = { .type = NLA_NESTED },
};
@@ -1815,33 +1805,6 @@ static int taprio_mqprio_cmp(const struct net_device *dev,
return 0;
}
-/* The semantics of the 'flags' argument in relation to 'change()'
- * requests, are interpreted following two rules (which are applied in
- * this order): (1) an omitted 'flags' argument is interpreted as
- * zero; (2) the 'flags' of a "running" taprio instance cannot be
- * changed.
- */
-static int taprio_new_flags(const struct nlattr *attr, u32 old,
- struct netlink_ext_ack *extack)
-{
- u32 new = 0;
-
- if (attr)
- new = nla_get_u32(attr);
-
- if (old != TAPRIO_FLAGS_INVALID && old != new) {
- NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
- return -EOPNOTSUPP;
- }
-
- if (!taprio_flags_valid(new)) {
- NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
- return -EINVAL;
- }
-
- return new;
-}
-
static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
@@ -1852,6 +1815,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
struct net_device *dev = qdisc_dev(sch);
struct tc_mqprio_qopt *mqprio = NULL;
unsigned long flags;
+ u32 taprio_flags;
ktime_t start;
int i, err;
@@ -1863,12 +1827,28 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
- err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS],
- q->flags, extack);
- if (err < 0)
- return err;
+ /* The semantics of the 'flags' argument in relation to 'change()'
+ * requests, are interpreted following two rules (which are applied in
+ * this order): (1) an omitted 'flags' argument is interpreted as
+ * zero; (2) the 'flags' of a "running" taprio instance cannot be
+ * changed.
+ */
+ taprio_flags = tb[TCA_TAPRIO_ATTR_FLAGS] ? nla_get_u32(tb[TCA_TAPRIO_ATTR_FLAGS]) : 0;
- q->flags = err;
+ /* txtime-assist and full offload are mutually exclusive */
+ if ((taprio_flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) &&
+ (taprio_flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[TCA_TAPRIO_ATTR_FLAGS],
+ "TXTIME_ASSIST and FULL_OFFLOAD are mutually exclusive");
+ return -EINVAL;
+ }
+
+ if (q->flags != TAPRIO_FLAGS_INVALID && q->flags != taprio_flags) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Changing 'flags' of a running schedule is not supported");
+ return -EOPNOTSUPP;
+ }
+ q->flags = taprio_flags;
err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
if (err < 0)
@@ -2548,6 +2528,7 @@ static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
.dump_stats = taprio_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("taprio");
static struct notifier_block taprio_device_notifier = {
.notifier_call = taprio_dev_notifier,
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index dd6b1a723bf7..f1d09183ae63 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -608,6 +608,7 @@ static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
.dump = tbf_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("tbf");
static int __init tbf_module_init(void)
{
diff --git a/net/sctp/diag.c b/net/sctp/diag.c
index eb05131ff1dd..23359e522273 100644
--- a/net/sctp/diag.c
+++ b/net/sctp/diag.c
@@ -507,6 +507,7 @@ done:
}
static const struct inet_diag_handler sctp_diag_handler = {
+ .owner = THIS_MODULE,
.dump = sctp_diag_dump,
.dump_one = sctp_diag_dump_one,
.idiag_get_info = sctp_diag_get_info,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 94c6dd53cd62..e849f368ed91 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1495,17 +1495,11 @@ static __init int sctp_init(void)
/* Allocate bind_bucket and chunk caches. */
status = -ENOBUFS;
- sctp_bucket_cachep = kmem_cache_create("sctp_bind_bucket",
- sizeof(struct sctp_bind_bucket),
- 0, SLAB_HWCACHE_ALIGN,
- NULL);
+ sctp_bucket_cachep = KMEM_CACHE(sctp_bind_bucket, SLAB_HWCACHE_ALIGN);
if (!sctp_bucket_cachep)
goto out;
- sctp_chunk_cachep = kmem_cache_create("sctp_chunk",
- sizeof(struct sctp_chunk),
- 0, SLAB_HWCACHE_ALIGN,
- NULL);
+ sctp_chunk_cachep = KMEM_CACHE(sctp_chunk, SLAB_HWCACHE_ALIGN);
if (!sctp_chunk_cachep)
goto err_chunk_cachep;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6b9fcdb0952a..c67679a41044 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -67,6 +67,7 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
#include <net/sctp/stream_sched.h>
+#include <net/rps.h>
/* Forward declarations for internal helper functions. */
static bool sctp_writeable(const struct sock *sk);
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 0f53a5c6fd9d..4b52b3b159c0 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -1046,7 +1046,7 @@ static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
int rc = SMC_CLC_DECL_NOSMCDDEV;
struct smcd_dev *smcd;
int i = 1, entry = 1;
- bool is_virtual;
+ bool is_emulated;
u16 chid;
if (smcd_indicated(ini->smc_type_v1))
@@ -1058,12 +1058,12 @@ static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
chid = smc_ism_get_chid(smcd);
if (!smc_find_ism_v2_is_unique_chid(chid, ini, i))
continue;
- is_virtual = __smc_ism_is_virtual(chid);
+ is_emulated = __smc_ism_is_emulated(chid);
if (!smc_pnet_is_pnetid_set(smcd->pnetid) ||
smc_pnet_is_ndev_pnetid(sock_net(&smc->sk), smcd->pnetid)) {
- if (is_virtual && entry == SMCD_CLC_MAX_V2_GID_ENTRIES)
+ if (is_emulated && entry == SMCD_CLC_MAX_V2_GID_ENTRIES)
/* It's the last GID-CHID entry left in CLC
- * Proposal SMC-Dv2 extension, but a virtual
+ * Proposal SMC-Dv2 extension, but an Emulated-
* ISM device will take two entries. So give
* up it and try the next potential ISM device.
*/
@@ -1073,7 +1073,7 @@ static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
ini->is_smcd = true;
rc = 0;
i++;
- entry = is_virtual ? entry + 2 : entry + 1;
+ entry = is_emulated ? entry + 2 : entry + 1;
if (entry > SMCD_CLC_MAX_V2_GID_ENTRIES)
break;
}
@@ -1414,10 +1414,10 @@ static int smc_connect_ism(struct smc_sock *smc,
if (rc)
return rc;
- if (__smc_ism_is_virtual(ini->ism_chid[ini->ism_selected]))
+ if (__smc_ism_is_emulated(ini->ism_chid[ini->ism_selected]))
ini->ism_peer_gid[ini->ism_selected].gid_ext =
ntohll(aclc->d1.gid_ext);
- /* for non-virtual ISM devices, peer gid_ext remains 0. */
+ /* for non-Emulated-ISM devices, peer gid_ext remains 0. */
}
ini->ism_peer_gid[ini->ism_selected].gid = ntohll(aclc->d0.gid);
@@ -2118,10 +2118,10 @@ static void smc_check_ism_v2_match(struct smc_init_info *ini,
if (smc_ism_get_chid(smcd) == proposed_chid &&
!smc_ism_cantalk(proposed_gid, ISM_RESERVED_VLANID, smcd)) {
ini->ism_peer_gid[*matches].gid = proposed_gid->gid;
- if (__smc_ism_is_virtual(proposed_chid))
+ if (__smc_ism_is_emulated(proposed_chid))
ini->ism_peer_gid[*matches].gid_ext =
proposed_gid->gid_ext;
- /* non-virtual ISM's peer gid_ext remains 0. */
+ /* non-Emulated-ISM's peer gid_ext remains 0. */
ini->ism_dev[*matches] = smcd;
(*matches)++;
break;
@@ -2171,10 +2171,10 @@ static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
smcd_gid.gid = ntohll(smcd_v2_ext->gidchid[i].gid);
smcd_gid.gid_ext = 0;
chid = ntohs(smcd_v2_ext->gidchid[i].chid);
- if (__smc_ism_is_virtual(chid)) {
+ if (__smc_ism_is_emulated(chid)) {
if ((i + 1) == smc_v2_ext->hdr.ism_gid_cnt ||
chid != ntohs(smcd_v2_ext->gidchid[i + 1].chid))
- /* each virtual ISM device takes two GID-CHID
+ /* each Emulated-ISM device takes two GID-CHID
* entries and CHID of the second entry repeats
* that of the first entry.
*
diff --git a/net/smc/smc.h b/net/smc/smc.h
index df64efd2dee8..18c8b7870198 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -56,11 +56,11 @@ enum smc_state { /* possible states of an SMC socket */
};
enum smc_supplemental_features {
- SMC_SPF_VIRT_ISM_DEV = 0,
+ SMC_SPF_EMULATED_ISM_DEV = 0,
};
#define SMC_FEATURE_MASK \
- (BIT(SMC_SPF_VIRT_ISM_DEV))
+ (BIT(SMC_SPF_EMULATED_ISM_DEV))
struct smc_link_group;
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 9a13709bea1c..e55026c7529c 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -952,8 +952,8 @@ int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini)
gidchids[entry].chid =
htons(smc_ism_get_chid(ini->ism_dev[i]));
gidchids[entry].gid = htonll(smcd_gid.gid);
- if (smc_ism_is_virtual(smcd)) {
- /* a virtual ISM device takes two
+ if (smc_ism_is_emulated(smcd)) {
+ /* an Emulated-ISM device takes two
* entries. CHID of the second entry
* repeats that of the first entry.
*/
@@ -1055,7 +1055,7 @@ smcd_clc_prep_confirm_accept(struct smc_connection *conn,
clc->d1.chid = htons(chid);
if (eid && eid[0])
memcpy(clc->d1.eid, eid, SMC_MAX_EID_LEN);
- if (__smc_ism_is_virtual(chid))
+ if (__smc_ism_is_emulated(chid))
clc->d1.gid_ext = htonll(smcd_gid.gid_ext);
len = SMCD_CLC_ACCEPT_CONFIRM_LEN_V2;
if (first_contact) {
diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
index a9f9bdd26dcd..7cc7070b9772 100644
--- a/net/smc/smc_clc.h
+++ b/net/smc/smc_clc.h
@@ -175,7 +175,7 @@ struct smc_clc_msg_proposal { /* clc proposal message sent by Linux */
#define SMCD_CLC_MAX_V2_GID_ENTRIES 8 /* max # of CHID-GID entries in CLC
* proposal SMC-Dv2 extension.
* each ISM device takes one entry and
- * each virtual ISM takes two entries.
+ * each Emulated-ISM takes two entries
*/
struct smc_clc_msg_proposal_area {
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index e4c858411207..9b84d5897aa5 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -1535,7 +1535,7 @@ void smc_smcd_terminate(struct smcd_dev *dev, struct smcd_gid *peer_gid,
list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
if ((!peer_gid->gid ||
(lgr->peer_gid.gid == peer_gid->gid &&
- !smc_ism_is_virtual(dev) ? 1 :
+ !smc_ism_is_emulated(dev) ? 1 :
lgr->peer_gid.gid_ext == peer_gid->gid_ext)) &&
(vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
if (peer_gid->gid) /* peer triggered termination */
@@ -1881,7 +1881,7 @@ static bool smcd_lgr_match(struct smc_link_group *lgr,
lgr->smcd != smcismdev)
return false;
- if (smc_ism_is_virtual(smcismdev) &&
+ if (smc_ism_is_emulated(smcismdev) &&
lgr->peer_gid.gid_ext != peer_gid->gid_ext)
return false;
diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
index 5a33908015f3..6fdb2d96777a 100644
--- a/net/smc/smc_diag.c
+++ b/net/smc/smc_diag.c
@@ -255,6 +255,7 @@ static int smc_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
}
static const struct sock_diag_handler smc_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_SMC,
.dump = smc_diag_handler_dump,
};
diff --git a/net/smc/smc_ism.h b/net/smc/smc_ism.h
index ffff40c30a06..165cd013404b 100644
--- a/net/smc/smc_ism.h
+++ b/net/smc/smc_ism.h
@@ -15,7 +15,7 @@
#include "smc.h"
-#define SMC_VIRTUAL_ISM_CHID_MASK 0xFF00
+#define SMC_EMULATED_ISM_CHID_MASK 0xFF00
#define SMC_ISM_IDENT_MASK 0x00FFFF
struct smcd_dev_list { /* List of SMCD devices */
@@ -66,10 +66,10 @@ static inline int smc_ism_write(struct smcd_dev *smcd, u64 dmb_tok,
return rc < 0 ? rc : 0;
}
-static inline bool __smc_ism_is_virtual(u16 chid)
+static inline bool __smc_ism_is_emulated(u16 chid)
{
/* CHIDs in range of 0xFF00 to 0xFFFF are reserved
- * for virtual ISM device.
+ * for Emulated-ISM device.
*
* loopback-ism: 0xFFFF
* virtio-ism: 0xFF00 ~ 0xFFFE
@@ -77,11 +77,11 @@ static inline bool __smc_ism_is_virtual(u16 chid)
return ((chid & 0xFF00) == 0xFF00);
}
-static inline bool smc_ism_is_virtual(struct smcd_dev *smcd)
+static inline bool smc_ism_is_emulated(struct smcd_dev *smcd)
{
u16 chid = smcd->ops->get_chid(smcd);
- return __smc_ism_is_virtual(chid);
+ return __smc_ism_is_emulated(chid);
}
#endif
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 9f2c58c5a86b..2adb92b8c469 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -806,6 +806,16 @@ static void smc_pnet_create_pnetids_list(struct net *net)
u8 ndev_pnetid[SMC_MAX_PNETID_LEN];
struct net_device *dev;
+ /* Newly created netns do not have devices.
+ * Do not even acquire rtnl.
+ */
+ if (list_empty(&net->dev_base_head))
+ return;
+
+ /* Note: This might not be needed, because smc_pnet_netdev_event()
+ * is also calling smc_pnet_add_base_pnetid() when handling
+ * NETDEV_UP event.
+ */
rtnl_lock();
for_each_netdev(net, dev)
smc_pnet_add_base_pnetid(net, dev, ndev_pnetid);
diff --git a/net/socket.c b/net/socket.c
index ed3df2f749bf..7e9c8fc9a5b4 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -343,7 +343,7 @@ static void init_inodecache(void)
0,
(SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT |
- SLAB_MEM_SPREAD | SLAB_ACCOUNT),
+ SLAB_ACCOUNT),
init_once);
BUG_ON(sock_inode_cachep == NULL);
}
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
index be1c4003d67d..bb0d71eb02a6 100644
--- a/net/tipc/Kconfig
+++ b/net/tipc/Kconfig
@@ -32,16 +32,17 @@ config TIPC_MEDIA_UDP
bool "IP/UDP media type support"
depends on TIPC
select NET_UDP_TUNNEL
+ default y
help
Saying Y here will enable support for running TIPC over IP/UDP
- bool
- default y
+
config TIPC_CRYPTO
bool "TIPC encryption support"
depends on TIPC
select CRYPTO
select CRYPTO_AES
select CRYPTO_GCM
+ default y
help
Saying Y here will enable support for TIPC encryption.
All TIPC messages will be encrypted/decrypted by using the currently most
@@ -49,8 +50,6 @@ config TIPC_CRYPTO
entering the TIPC stack.
Key setting from user-space is performed via netlink by a user program
(e.g. the iproute2 'tipc' tool).
- bool
- default y
config TIPC_DIAG
tristate "TIPC: socket monitoring interface"
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index ee49a9f1dd4f..18e1636aa036 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -18,5 +18,5 @@ tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o
tipc-$(CONFIG_SYSCTL) += sysctl.o
tipc-$(CONFIG_TIPC_CRYPTO) += crypto.o
-
-obj-$(CONFIG_TIPC_DIAG) += diag.o
+obj-$(CONFIG_TIPC_DIAG) += tipc_diag.o
+tipc_diag-y += diag.o
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 878415c43527..5a526ebafeb4 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -1079,30 +1079,27 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
b = tipc_bearer_find(net, name);
if (!b) {
- rtnl_unlock();
NL_SET_ERR_MSG(info->extack, "Bearer not found");
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
#ifdef CONFIG_TIPC_MEDIA_UDP
if (attrs[TIPC_NLA_BEARER_UDP_OPTS]) {
if (b->media->type_id != TIPC_MEDIA_TYPE_UDP) {
- rtnl_unlock();
NL_SET_ERR_MSG(info->extack, "UDP option is unsupported");
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
err = tipc_udp_nl_bearer_add(b,
attrs[TIPC_NLA_BEARER_UDP_OPTS]);
- if (err) {
- rtnl_unlock();
- return err;
- }
}
#endif
+out:
rtnl_unlock();
- return 0;
+ return err;
}
int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
diff --git a/net/tipc/diag.c b/net/tipc/diag.c
index 18733451c9e0..54dde8c4e4d4 100644
--- a/net/tipc/diag.c
+++ b/net/tipc/diag.c
@@ -95,6 +95,7 @@ static int tipc_sock_diag_handler_dump(struct sk_buff *skb,
}
static const struct sock_diag_handler tipc_sock_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_TIPC,
.dump = tipc_sock_diag_handler_dump,
};
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 3105abe97bb9..c1e890a82434 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -86,8 +86,6 @@ struct tipc_bclink_entry {
* @lock: rwlock governing access to structure
* @net: the applicable net namespace
* @hash: links to adjacent nodes in unsorted hash chain
- * @inputq: pointer to input queue containing messages for msg event
- * @namedq: pointer to name table input queue with name table messages
* @active_links: bearer ids of active links, used as index into links[] array
* @links: array containing references to all links to node
* @bc_entry: broadcast link entry
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index bb1118d02f95..7e4135db5816 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -80,7 +80,6 @@ struct sockaddr_pair {
* @phdr: preformatted message header used when sending messages
* @cong_links: list of congested links
* @publications: list of publications for port
- * @blocking_link: address of the congested link we are currently sleeping on
* @pub_count: total # of publications port has made during its lifetime
* @conn_timeout: the time we can wait for an unresponded setup request
* @probe_unacked: probe has not received ack yet
diff --git a/net/unix/Kconfig b/net/unix/Kconfig
index 28b232f281ab..8b5d04210d7c 100644
--- a/net/unix/Kconfig
+++ b/net/unix/Kconfig
@@ -16,11 +16,6 @@ config UNIX
Say Y unless you know what you are doing.
-config UNIX_SCM
- bool
- depends on UNIX
- default y
-
config AF_UNIX_OOB
bool
depends on UNIX
diff --git a/net/unix/Makefile b/net/unix/Makefile
index 20491825b4d0..4ddd125c4642 100644
--- a/net/unix/Makefile
+++ b/net/unix/Makefile
@@ -11,5 +11,3 @@ unix-$(CONFIG_BPF_SYSCALL) += unix_bpf.o
obj-$(CONFIG_UNIX_DIAG) += unix_diag.o
unix_diag-y := diag.o
-
-obj-$(CONFIG_UNIX_SCM) += scm.o
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 0748e7ea5210..5b41e2321209 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -118,8 +118,6 @@
#include <linux/btf_ids.h>
#include <linux/bpf-cgroup.h>
-#include "scm.h"
-
static atomic_long_t unix_nr_socks;
static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
@@ -980,11 +978,11 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
sk->sk_write_space = unix_write_space;
sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
sk->sk_destruct = unix_sock_destructor;
- u = unix_sk(sk);
+ u = unix_sk(sk);
+ u->inflight = 0;
u->path.dentry = NULL;
u->path.mnt = NULL;
spin_lock_init(&u->lock);
- atomic_long_set(&u->inflight, 0);
INIT_LIST_HEAD(&u->link);
mutex_init(&u->iolock); /* single task reading lock */
mutex_init(&u->bindlock); /* single task binding lock */
@@ -1775,6 +1773,52 @@ out:
return err;
}
+/* The "user->unix_inflight" variable is protected by the garbage
+ * collection lock, and we just read it locklessly here. If you go
+ * over the limit, there might be a tiny race in actually noticing
+ * it across threads. Tough.
+ */
+static inline bool too_many_unix_fds(struct task_struct *p)
+{
+ struct user_struct *user = current_user();
+
+ if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
+ return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
+ return false;
+}
+
+static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+{
+ int i;
+
+ if (too_many_unix_fds(current))
+ return -ETOOMANYREFS;
+
+ /* Need to duplicate file references for the sake of garbage
+ * collection. Otherwise a socket in the fps might become a
+ * candidate for GC while the skb is not yet queued.
+ */
+ UNIXCB(skb).fp = scm_fp_dup(scm->fp);
+ if (!UNIXCB(skb).fp)
+ return -ENOMEM;
+
+ for (i = scm->fp->count - 1; i >= 0; i--)
+ unix_inflight(scm->fp->user, scm->fp->fp[i]);
+
+ return 0;
+}
+
+static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+{
+ int i;
+
+ scm->fp = UNIXCB(skb).fp;
+ UNIXCB(skb).fp = NULL;
+
+ for (i = scm->fp->count - 1; i >= 0; i--)
+ unix_notinflight(scm->fp->user, scm->fp->fp[i]);
+}
+
static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
{
scm->fp = scm_fp_dup(UNIXCB(skb).fp);
@@ -1822,6 +1866,21 @@ static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
spin_unlock(&unix_gc_lock);
}
+static void unix_destruct_scm(struct sk_buff *skb)
+{
+ struct scm_cookie scm;
+
+ memset(&scm, 0, sizeof(scm));
+ scm.pid = UNIXCB(skb).pid;
+ if (UNIXCB(skb).fp)
+ unix_detach_fds(&scm, skb);
+
+ /* Alas, it calls VFS */
+ /* So fscking what? fput() had been SMP-safe since the last Summer */
+ scm_destroy(&scm);
+ sock_wfree(skb);
+}
+
static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
{
int err = 0;
@@ -1908,11 +1967,12 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
long timeo;
int err;
- wait_for_unix_gc();
err = scm_send(sock, msg, &scm, false);
if (err < 0)
return err;
+ wait_for_unix_gc(scm.fp);
+
err = -EOPNOTSUPP;
if (msg->msg_flags&MSG_OOB)
goto out;
@@ -2184,11 +2244,12 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
bool fds_sent = false;
int data_len;
- wait_for_unix_gc();
err = scm_send(sock, msg, &scm, false);
if (err < 0)
return err;
+ wait_for_unix_gc(scm.fp);
+
err = -EOPNOTSUPP;
if (msg->msg_flags & MSG_OOB) {
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
diff --git a/net/unix/diag.c b/net/unix/diag.c
index be19827eca36..ae39538c5042 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -322,6 +322,7 @@ static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
}
static const struct sock_diag_handler unix_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_UNIX,
.dump = unix_diag_handler_dump,
};
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 2a81880dac7b..fa39b6265238 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -81,12 +81,80 @@
#include <net/scm.h>
#include <net/tcp_states.h>
-#include "scm.h"
+struct unix_sock *unix_get_socket(struct file *filp)
+{
+ struct inode *inode = file_inode(filp);
+
+ /* Socket ? */
+ if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
+ struct socket *sock = SOCKET_I(inode);
+ const struct proto_ops *ops;
+ struct sock *sk = sock->sk;
+
+ ops = READ_ONCE(sock->ops);
-/* Internal data structures and random procedures: */
+ /* PF_UNIX ? */
+ if (sk && ops && ops->family == PF_UNIX)
+ return unix_sk(sk);
+ }
+
+ return NULL;
+}
+DEFINE_SPINLOCK(unix_gc_lock);
+unsigned int unix_tot_inflight;
static LIST_HEAD(gc_candidates);
-static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
+static LIST_HEAD(gc_inflight_list);
+
+/* Keep the number of times in flight count for the file
+ * descriptor if it is for an AF_UNIX socket.
+ */
+void unix_inflight(struct user_struct *user, struct file *filp)
+{
+ struct unix_sock *u = unix_get_socket(filp);
+
+ spin_lock(&unix_gc_lock);
+
+ if (u) {
+ if (!u->inflight) {
+ WARN_ON_ONCE(!list_empty(&u->link));
+ list_add_tail(&u->link, &gc_inflight_list);
+ } else {
+ WARN_ON_ONCE(list_empty(&u->link));
+ }
+ u->inflight++;
+
+ /* Paired with READ_ONCE() in wait_for_unix_gc() */
+ WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
+ }
+
+ WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1);
+
+ spin_unlock(&unix_gc_lock);
+}
+
+void unix_notinflight(struct user_struct *user, struct file *filp)
+{
+ struct unix_sock *u = unix_get_socket(filp);
+
+ spin_lock(&unix_gc_lock);
+
+ if (u) {
+ WARN_ON_ONCE(!u->inflight);
+ WARN_ON_ONCE(list_empty(&u->link));
+
+ u->inflight--;
+ if (!u->inflight)
+ list_del_init(&u->link);
+
+ /* Paired with READ_ONCE() in wait_for_unix_gc() */
+ WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);
+ }
+
+ WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1);
+
+ spin_unlock(&unix_gc_lock);
+}
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist)
@@ -105,20 +173,15 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
while (nfd--) {
/* Get the socket the fd matches if it indeed does so */
- struct sock *sk = unix_get_socket(*fp++);
-
- if (sk) {
- struct unix_sock *u = unix_sk(sk);
+ struct unix_sock *u = unix_get_socket(*fp++);
- /* Ignore non-candidates, they could
- * have been added to the queues after
- * starting the garbage collection
- */
- if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
- hit = true;
+ /* Ignore non-candidates, they could have been added
+ * to the queues after starting the garbage collection
+ */
+ if (u && test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
+ hit = true;
- func(u);
- }
+ func(u);
}
}
if (hit && hitlist != NULL) {
@@ -151,7 +214,7 @@ static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
/* An embryo cannot be in-flight, so it's safe
* to use the list link.
*/
- BUG_ON(!list_empty(&u->link));
+ WARN_ON_ONCE(!list_empty(&u->link));
list_add_tail(&u->link, &embryos);
}
spin_unlock(&x->sk_receive_queue.lock);
@@ -166,17 +229,18 @@ static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
static void dec_inflight(struct unix_sock *usk)
{
- atomic_long_dec(&usk->inflight);
+ usk->inflight--;
}
static void inc_inflight(struct unix_sock *usk)
{
- atomic_long_inc(&usk->inflight);
+ usk->inflight++;
}
static void inc_inflight_move_tail(struct unix_sock *u)
{
- atomic_long_inc(&u->inflight);
+ u->inflight++;
+
/* If this still might be part of a cycle, move it to the end
* of the list, so that it's checked even if it was already
* passed over
@@ -186,40 +250,16 @@ static void inc_inflight_move_tail(struct unix_sock *u)
}
static bool gc_in_progress;
-#define UNIX_INFLIGHT_TRIGGER_GC 16000
-
-void wait_for_unix_gc(void)
-{
- /* If number of inflight sockets is insane,
- * force a garbage collect right now.
- * Paired with the WRITE_ONCE() in unix_inflight(),
- * unix_notinflight() and gc_in_progress().
- */
- if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
- !READ_ONCE(gc_in_progress))
- unix_gc();
- wait_event(unix_gc_wait, gc_in_progress == false);
-}
-/* The external entry point: unix_gc() */
-void unix_gc(void)
+static void __unix_gc(struct work_struct *work)
{
- struct sk_buff *next_skb, *skb;
- struct unix_sock *u;
- struct unix_sock *next;
struct sk_buff_head hitlist;
- struct list_head cursor;
+ struct unix_sock *u, *next;
LIST_HEAD(not_cycle_list);
+ struct list_head cursor;
spin_lock(&unix_gc_lock);
- /* Avoid a recursive GC. */
- if (gc_in_progress)
- goto out;
-
- /* Paired with READ_ONCE() in wait_for_unix_gc(). */
- WRITE_ONCE(gc_in_progress, true);
-
/* First, select candidates for garbage collection. Only
* in-flight sockets are considered, and from those only ones
* which don't have any external reference.
@@ -237,14 +277,12 @@ void unix_gc(void)
*/
list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
long total_refs;
- long inflight_refs;
total_refs = file_count(u->sk.sk_socket->file);
- inflight_refs = atomic_long_read(&u->inflight);
- BUG_ON(inflight_refs < 1);
- BUG_ON(total_refs < inflight_refs);
- if (total_refs == inflight_refs) {
+ WARN_ON_ONCE(!u->inflight);
+ WARN_ON_ONCE(total_refs < u->inflight);
+ if (total_refs == u->inflight) {
list_move_tail(&u->link, &gc_candidates);
__set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
__set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
@@ -271,7 +309,7 @@ void unix_gc(void)
/* Move cursor to after the current position. */
list_move(&cursor, &u->link);
- if (atomic_long_read(&u->inflight) > 0) {
+ if (u->inflight) {
list_move_tail(&u->link, &not_cycle_list);
__clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
scan_children(&u->sk, inc_inflight_move_tail, NULL);
@@ -306,38 +344,50 @@ void unix_gc(void)
spin_unlock(&unix_gc_lock);
- /* We need io_uring to clean its registered files, ignore all io_uring
- * originated skbs. It's fine as io_uring doesn't keep references to
- * other io_uring instances and so killing all other files in the cycle
- * will put all io_uring references forcing it to go through normal
- * release.path eventually putting registered files.
- */
- skb_queue_walk_safe(&hitlist, skb, next_skb) {
- if (skb->destructor == io_uring_destruct_scm) {
- __skb_unlink(skb, &hitlist);
- skb_queue_tail(&skb->sk->sk_receive_queue, skb);
- }
- }
-
/* Here we are. Hitlist is filled. Die. */
__skb_queue_purge(&hitlist);
spin_lock(&unix_gc_lock);
- /* There could be io_uring registered files, just push them back to
- * the inflight list
- */
- list_for_each_entry_safe(u, next, &gc_candidates, link)
- list_move_tail(&u->link, &gc_inflight_list);
-
/* All candidates should have been detached by now. */
- BUG_ON(!list_empty(&gc_candidates));
+ WARN_ON_ONCE(!list_empty(&gc_candidates));
/* Paired with READ_ONCE() in wait_for_unix_gc(). */
WRITE_ONCE(gc_in_progress, false);
- wake_up(&unix_gc_wait);
-
- out:
spin_unlock(&unix_gc_lock);
}
+
+static DECLARE_WORK(unix_gc_work, __unix_gc);
+
+void unix_gc(void)
+{
+ WRITE_ONCE(gc_in_progress, true);
+ queue_work(system_unbound_wq, &unix_gc_work);
+}
+
+#define UNIX_INFLIGHT_TRIGGER_GC 16000
+#define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8)
+
+void wait_for_unix_gc(struct scm_fp_list *fpl)
+{
+ /* If number of inflight sockets is insane,
+ * force a garbage collect right now.
+ *
+ * Paired with the WRITE_ONCE() in unix_inflight(),
+ * unix_notinflight(), and __unix_gc().
+ */
+ if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
+ !READ_ONCE(gc_in_progress))
+ unix_gc();
+
+ /* Penalise users who want to send AF_UNIX sockets
+ * but whose sockets have not been received yet.
+ */
+ if (!fpl || !fpl->count_unix ||
+ READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER)
+ return;
+
+ if (READ_ONCE(gc_in_progress))
+ flush_work(&unix_gc_work);
+}
diff --git a/net/unix/scm.c b/net/unix/scm.c
deleted file mode 100644
index 822ce0d0d791..000000000000
--- a/net/unix/scm.c
+++ /dev/null
@@ -1,159 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/socket.h>
-#include <linux/net.h>
-#include <linux/fs.h>
-#include <net/af_unix.h>
-#include <net/scm.h>
-#include <linux/init.h>
-#include <linux/io_uring.h>
-
-#include "scm.h"
-
-unsigned int unix_tot_inflight;
-EXPORT_SYMBOL(unix_tot_inflight);
-
-LIST_HEAD(gc_inflight_list);
-EXPORT_SYMBOL(gc_inflight_list);
-
-DEFINE_SPINLOCK(unix_gc_lock);
-EXPORT_SYMBOL(unix_gc_lock);
-
-struct sock *unix_get_socket(struct file *filp)
-{
- struct sock *u_sock = NULL;
- struct inode *inode = file_inode(filp);
-
- /* Socket ? */
- if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
- struct socket *sock = SOCKET_I(inode);
- const struct proto_ops *ops = READ_ONCE(sock->ops);
- struct sock *s = sock->sk;
-
- /* PF_UNIX ? */
- if (s && ops && ops->family == PF_UNIX)
- u_sock = s;
- }
-
- return u_sock;
-}
-EXPORT_SYMBOL(unix_get_socket);
-
-/* Keep the number of times in flight count for the file
- * descriptor if it is for an AF_UNIX socket.
- */
-void unix_inflight(struct user_struct *user, struct file *fp)
-{
- struct sock *s = unix_get_socket(fp);
-
- spin_lock(&unix_gc_lock);
-
- if (s) {
- struct unix_sock *u = unix_sk(s);
-
- if (atomic_long_inc_return(&u->inflight) == 1) {
- BUG_ON(!list_empty(&u->link));
- list_add_tail(&u->link, &gc_inflight_list);
- } else {
- BUG_ON(list_empty(&u->link));
- }
- /* Paired with READ_ONCE() in wait_for_unix_gc() */
- WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
- }
- WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1);
- spin_unlock(&unix_gc_lock);
-}
-
-void unix_notinflight(struct user_struct *user, struct file *fp)
-{
- struct sock *s = unix_get_socket(fp);
-
- spin_lock(&unix_gc_lock);
-
- if (s) {
- struct unix_sock *u = unix_sk(s);
-
- BUG_ON(!atomic_long_read(&u->inflight));
- BUG_ON(list_empty(&u->link));
-
- if (atomic_long_dec_and_test(&u->inflight))
- list_del_init(&u->link);
- /* Paired with READ_ONCE() in wait_for_unix_gc() */
- WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);
- }
- WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1);
- spin_unlock(&unix_gc_lock);
-}
-
-/*
- * The "user->unix_inflight" variable is protected by the garbage
- * collection lock, and we just read it locklessly here. If you go
- * over the limit, there might be a tiny race in actually noticing
- * it across threads. Tough.
- */
-static inline bool too_many_unix_fds(struct task_struct *p)
-{
- struct user_struct *user = current_user();
-
- if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
- return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
- return false;
-}
-
-int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
-{
- int i;
-
- if (too_many_unix_fds(current))
- return -ETOOMANYREFS;
-
- /*
- * Need to duplicate file references for the sake of garbage
- * collection. Otherwise a socket in the fps might become a
- * candidate for GC while the skb is not yet queued.
- */
- UNIXCB(skb).fp = scm_fp_dup(scm->fp);
- if (!UNIXCB(skb).fp)
- return -ENOMEM;
-
- for (i = scm->fp->count - 1; i >= 0; i--)
- unix_inflight(scm->fp->user, scm->fp->fp[i]);
- return 0;
-}
-EXPORT_SYMBOL(unix_attach_fds);
-
-void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
-{
- int i;
-
- scm->fp = UNIXCB(skb).fp;
- UNIXCB(skb).fp = NULL;
-
- for (i = scm->fp->count-1; i >= 0; i--)
- unix_notinflight(scm->fp->user, scm->fp->fp[i]);
-}
-EXPORT_SYMBOL(unix_detach_fds);
-
-void unix_destruct_scm(struct sk_buff *skb)
-{
- struct scm_cookie scm;
-
- memset(&scm, 0, sizeof(scm));
- scm.pid = UNIXCB(skb).pid;
- if (UNIXCB(skb).fp)
- unix_detach_fds(&scm, skb);
-
- /* Alas, it calls VFS */
- /* So fscking what? fput() had been SMP-safe since the last Summer */
- scm_destroy(&scm);
- sock_wfree(skb);
-}
-EXPORT_SYMBOL(unix_destruct_scm);
-
-void io_uring_destruct_scm(struct sk_buff *skb)
-{
- unix_destruct_scm(skb);
-}
-EXPORT_SYMBOL(io_uring_destruct_scm);
diff --git a/net/unix/scm.h b/net/unix/scm.h
deleted file mode 100644
index 5a255a477f16..000000000000
--- a/net/unix/scm.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef NET_UNIX_SCM_H
-#define NET_UNIX_SCM_H
-
-extern struct list_head gc_inflight_list;
-extern spinlock_t unix_gc_lock;
-
-int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb);
-void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb);
-
-#endif
diff --git a/net/vmw_vsock/diag.c b/net/vmw_vsock/diag.c
index 2e29994f92ff..ab87ef66c1e8 100644
--- a/net/vmw_vsock/diag.c
+++ b/net/vmw_vsock/diag.c
@@ -157,6 +157,7 @@ static int vsock_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
}
static const struct sock_diag_handler vsock_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_VSOCK,
.dump = vsock_diag_handler_dump,
};
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index ceb9174c5c3d..3414b2c3abcc 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -6,7 +6,7 @@
*
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright 2018-2023 Intel Corporation
+ * Copyright 2018-2024 Intel Corporation
*/
#include <linux/export.h>
@@ -27,11 +27,10 @@ void cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
if (WARN_ON(!chan))
return;
- chandef->chan = chan;
- chandef->freq1_offset = chan->freq_offset;
- chandef->center_freq2 = 0;
- chandef->edmg.bw_config = 0;
- chandef->edmg.channels = 0;
+ *chandef = (struct cfg80211_chan_def) {
+ .chan = chan,
+ .freq1_offset = chan->freq_offset,
+ };
switch (chan_type) {
case NL80211_CHAN_NO_HT:
@@ -56,6 +55,73 @@ void cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
}
EXPORT_SYMBOL(cfg80211_chandef_create);
+struct cfg80211_per_bw_puncturing_values {
+ u8 len;
+ const u16 *valid_values;
+};
+
+static const u16 puncturing_values_80mhz[] = {
+ 0x8, 0x4, 0x2, 0x1
+};
+
+static const u16 puncturing_values_160mhz[] = {
+ 0x80, 0x40, 0x20, 0x10, 0x8, 0x4, 0x2, 0x1, 0xc0, 0x30, 0xc, 0x3
+};
+
+static const u16 puncturing_values_320mhz[] = {
+ 0xc000, 0x3000, 0xc00, 0x300, 0xc0, 0x30, 0xc, 0x3, 0xf000, 0xf00,
+ 0xf0, 0xf, 0xfc00, 0xf300, 0xf0c0, 0xf030, 0xf00c, 0xf003, 0xc00f,
+ 0x300f, 0xc0f, 0x30f, 0xcf, 0x3f
+};
+
+#define CFG80211_PER_BW_VALID_PUNCTURING_VALUES(_bw) \
+ { \
+ .len = ARRAY_SIZE(puncturing_values_ ## _bw ## mhz), \
+ .valid_values = puncturing_values_ ## _bw ## mhz \
+ }
+
+static const struct cfg80211_per_bw_puncturing_values per_bw_puncturing[] = {
+ CFG80211_PER_BW_VALID_PUNCTURING_VALUES(80),
+ CFG80211_PER_BW_VALID_PUNCTURING_VALUES(160),
+ CFG80211_PER_BW_VALID_PUNCTURING_VALUES(320)
+};
+
+static bool valid_puncturing_bitmap(const struct cfg80211_chan_def *chandef)
+{
+ u32 idx, i, start_freq, primary_center = chandef->chan->center_freq;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_80:
+ idx = 0;
+ start_freq = chandef->center_freq1 - 40;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ idx = 1;
+ start_freq = chandef->center_freq1 - 80;
+ break;
+ case NL80211_CHAN_WIDTH_320:
+ idx = 2;
+ start_freq = chandef->center_freq1 - 160;
+ break;
+ default:
+ return chandef->punctured == 0;
+ }
+
+ if (!chandef->punctured)
+ return true;
+
+ /* check if primary channel is punctured */
+ if (chandef->punctured & (u16)BIT((primary_center - start_freq) / 20))
+ return false;
+
+ for (i = 0; i < per_bw_puncturing[idx].len; i++) {
+ if (per_bw_puncturing[idx].valid_values[i] == chandef->punctured)
+ return true;
+ }
+
+ return false;
+}
+
static bool cfg80211_edmg_chandef_valid(const struct cfg80211_chan_def *chandef)
{
int max_contiguous = 0;
@@ -317,72 +383,81 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
!cfg80211_edmg_chandef_valid(chandef))
return false;
- return true;
+ return valid_puncturing_bitmap(chandef);
}
EXPORT_SYMBOL(cfg80211_chandef_valid);
-static void chandef_primary_freqs(const struct cfg80211_chan_def *c,
- u32 *pri40, u32 *pri80, u32 *pri160)
+int cfg80211_chandef_primary(const struct cfg80211_chan_def *c,
+ enum nl80211_chan_width primary_chan_width,
+ u16 *punctured)
{
- int tmp;
+ int pri_width = nl80211_chan_width_to_mhz(primary_chan_width);
+ int width = cfg80211_chandef_get_width(c);
+ u32 control = c->chan->center_freq;
+ u32 center = c->center_freq1;
+ u16 _punct = 0;
- switch (c->width) {
- case NL80211_CHAN_WIDTH_40:
- *pri40 = c->center_freq1;
- *pri80 = 0;
- *pri160 = 0;
- break;
- case NL80211_CHAN_WIDTH_80:
- case NL80211_CHAN_WIDTH_80P80:
- *pri160 = 0;
- *pri80 = c->center_freq1;
- /* n_P20 */
- tmp = (30 + c->chan->center_freq - c->center_freq1)/20;
- /* n_P40 */
- tmp /= 2;
- /* freq_P40 */
- *pri40 = c->center_freq1 - 20 + 40 * tmp;
- break;
- case NL80211_CHAN_WIDTH_160:
- *pri160 = c->center_freq1;
- /* n_P20 */
- tmp = (70 + c->chan->center_freq - c->center_freq1)/20;
- /* n_P40 */
- tmp /= 2;
- /* freq_P40 */
- *pri40 = c->center_freq1 - 60 + 40 * tmp;
- /* n_P80 */
- tmp /= 2;
- *pri80 = c->center_freq1 - 40 + 80 * tmp;
- break;
- case NL80211_CHAN_WIDTH_320:
- /* n_P20 */
- tmp = (150 + c->chan->center_freq - c->center_freq1) / 20;
- /* n_P40 */
- tmp /= 2;
- /* freq_P40 */
- *pri40 = c->center_freq1 - 140 + 40 * tmp;
- /* n_P80 */
- tmp /= 2;
- *pri80 = c->center_freq1 - 120 + 80 * tmp;
- /* n_P160 */
- tmp /= 2;
- *pri160 = c->center_freq1 - 80 + 160 * tmp;
- break;
- default:
- WARN_ON_ONCE(1);
+ if (WARN_ON_ONCE(pri_width < 0 || width < 0))
+ return -1;
+
+ /* not intended to be called this way, can't determine */
+ if (WARN_ON_ONCE(pri_width > width))
+ return -1;
+
+ if (!punctured)
+ punctured = &_punct;
+
+ *punctured = c->punctured;
+
+ while (width > pri_width) {
+ unsigned int bits_to_drop = width / 20 / 2;
+
+ if (control > center) {
+ center += width / 4;
+ *punctured >>= bits_to_drop;
+ } else {
+ center -= width / 4;
+ *punctured &= (1 << bits_to_drop) - 1;
+ }
+ width /= 2;
}
+
+ return center;
}
+EXPORT_SYMBOL(cfg80211_chandef_primary);
-const struct cfg80211_chan_def *
-cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
- const struct cfg80211_chan_def *c2)
+static const struct cfg80211_chan_def *
+check_chandef_primary_compat(const struct cfg80211_chan_def *c1,
+ const struct cfg80211_chan_def *c2,
+ enum nl80211_chan_width primary_chan_width)
{
- u32 c1_pri40, c1_pri80, c2_pri40, c2_pri80, c1_pri160, c2_pri160;
+ u16 punct_c1 = 0, punct_c2 = 0;
+
+ /* check primary is compatible -> error if not */
+ if (cfg80211_chandef_primary(c1, primary_chan_width, &punct_c1) !=
+ cfg80211_chandef_primary(c2, primary_chan_width, &punct_c2))
+ return ERR_PTR(-EINVAL);
+
+ if (punct_c1 != punct_c2)
+ return ERR_PTR(-EINVAL);
+
+ /* assumes c1 is smaller width, if that was just checked -> done */
+ if (c1->width == primary_chan_width)
+ return c2;
+
+ /* otherwise continue checking the next width */
+ return NULL;
+}
+
+static const struct cfg80211_chan_def *
+_cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
+ const struct cfg80211_chan_def *c2)
+{
+ const struct cfg80211_chan_def *ret;
/* If they are identical, return */
if (cfg80211_chandef_identical(c1, c2))
- return c1;
+ return c2;
/* otherwise, must have same control channel */
if (c1->chan != c2->chan)
@@ -396,53 +471,76 @@ cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
return NULL;
/*
- * can't be compatible if one of them is 5 or 10 MHz,
+ * can't be compatible if one of them is 5/10 MHz or S1G
* but they don't have the same width.
*/
- if (c1->width == NL80211_CHAN_WIDTH_5 ||
- c1->width == NL80211_CHAN_WIDTH_10 ||
- c2->width == NL80211_CHAN_WIDTH_5 ||
- c2->width == NL80211_CHAN_WIDTH_10)
+#define NARROW_OR_S1G(width) ((width) == NL80211_CHAN_WIDTH_5 || \
+ (width) == NL80211_CHAN_WIDTH_10 || \
+ (width) == NL80211_CHAN_WIDTH_1 || \
+ (width) == NL80211_CHAN_WIDTH_2 || \
+ (width) == NL80211_CHAN_WIDTH_4 || \
+ (width) == NL80211_CHAN_WIDTH_8 || \
+ (width) == NL80211_CHAN_WIDTH_16)
+
+ if (NARROW_OR_S1G(c1->width) || NARROW_OR_S1G(c2->width))
return NULL;
- if (c1->width == NL80211_CHAN_WIDTH_20_NOHT ||
- c1->width == NL80211_CHAN_WIDTH_20)
+ /*
+ * Make sure that c1 is always the narrower one, so that later
+ * we either return NULL or c2 and don't have to check both
+ * directions.
+ */
+ if (c1->width > c2->width)
+ swap(c1, c2);
+
+ /*
+ * No further checks needed if the "narrower" one is only 20 MHz.
+ * Here "narrower" includes being a 20 MHz non-HT channel vs. a
+ * 20 MHz HT (or later) one.
+ */
+ if (c1->width <= NL80211_CHAN_WIDTH_20)
return c2;
- if (c2->width == NL80211_CHAN_WIDTH_20_NOHT ||
- c2->width == NL80211_CHAN_WIDTH_20)
- return c1;
+ ret = check_chandef_primary_compat(c1, c2, NL80211_CHAN_WIDTH_40);
+ if (ret)
+ return ret;
- chandef_primary_freqs(c1, &c1_pri40, &c1_pri80, &c1_pri160);
- chandef_primary_freqs(c2, &c2_pri40, &c2_pri80, &c2_pri160);
+ ret = check_chandef_primary_compat(c1, c2, NL80211_CHAN_WIDTH_80);
+ if (ret)
+ return ret;
- if (c1_pri40 != c2_pri40)
+ /*
+ * If c1 is 80+80, then c2 is 160 or higher, but that cannot
+ * match. If c2 was also 80+80 it was already either accepted
+ * or rejected above (identical or not, respectively.)
+ */
+ if (c1->width == NL80211_CHAN_WIDTH_80P80)
return NULL;
- if (c1->width == NL80211_CHAN_WIDTH_40)
- return c2;
-
- if (c2->width == NL80211_CHAN_WIDTH_40)
- return c1;
+ ret = check_chandef_primary_compat(c1, c2, NL80211_CHAN_WIDTH_160);
+ if (ret)
+ return ret;
- if (c1_pri80 != c2_pri80)
- return NULL;
+ /*
+ * Getting here would mean they're both wider than 160, have the
+ * same primary 160, but are not identical - this cannot happen
+ * since they must be 320 (no wider chandefs exist, at least yet.)
+ */
+ WARN_ON_ONCE(1);
- if (c1->width == NL80211_CHAN_WIDTH_80 &&
- c2->width > NL80211_CHAN_WIDTH_80)
- return c2;
+ return NULL;
+}
- if (c2->width == NL80211_CHAN_WIDTH_80 &&
- c1->width > NL80211_CHAN_WIDTH_80)
- return c1;
+const struct cfg80211_chan_def *
+cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
+ const struct cfg80211_chan_def *c2)
+{
+ const struct cfg80211_chan_def *ret;
- WARN_ON(!c1_pri160 && !c2_pri160);
- if (c1_pri160 && c2_pri160 && c1_pri160 != c2_pri160)
+ ret = _cfg80211_chandef_compatible(c1, c2);
+ if (IS_ERR(ret))
return NULL;
-
- if (c1->width > c2->width)
- return c1;
- return c2;
+ return ret;
}
EXPORT_SYMBOL(cfg80211_chandef_compatible);
@@ -1047,7 +1145,7 @@ EXPORT_SYMBOL(cfg80211_chandef_dfs_cac_time);
static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
u32 center_freq, u32 bandwidth,
- u32 prohibited_flags)
+ u32 prohibited_flags, bool monitor)
{
struct ieee80211_channel *c;
u32 freq, start_freq, end_freq;
@@ -1057,7 +1155,11 @@ static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
for (freq = start_freq; freq <= end_freq; freq += MHZ_TO_KHZ(20)) {
c = ieee80211_get_channel_khz(wiphy, freq);
- if (!c || c->flags & prohibited_flags)
+ if (!c)
+ return false;
+ if (monitor && c->flags & IEEE80211_CHAN_CAN_MONITOR)
+ continue;
+ if (c->flags & prohibited_flags)
return false;
}
@@ -1117,9 +1219,9 @@ static bool cfg80211_edmg_usable(struct wiphy *wiphy, u8 edmg_channels,
return true;
}
-bool cfg80211_chandef_usable(struct wiphy *wiphy,
- const struct cfg80211_chan_def *chandef,
- u32 prohibited_flags)
+bool _cfg80211_chandef_usable(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef,
+ u32 prohibited_flags, bool monitor)
{
struct ieee80211_sta_ht_cap *ht_cap;
struct ieee80211_sta_vht_cap *vht_cap;
@@ -1281,14 +1383,22 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
if (!cfg80211_secondary_chans_ok(wiphy,
ieee80211_chandef_to_khz(chandef),
- width, prohibited_flags))
+ width, prohibited_flags, monitor))
return false;
if (!chandef->center_freq2)
return true;
return cfg80211_secondary_chans_ok(wiphy,
MHZ_TO_KHZ(chandef->center_freq2),
- width, prohibited_flags);
+ width, prohibited_flags, monitor);
+}
+
+bool cfg80211_chandef_usable(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef,
+ u32 prohibited_flags)
+{
+ return _cfg80211_chandef_usable(wiphy, chandef, prohibited_flags,
+ false);
}
EXPORT_SYMBOL(cfg80211_chandef_usable);
@@ -1532,72 +1642,3 @@ struct cfg80211_chan_def *wdev_chandef(struct wireless_dev *wdev,
}
}
EXPORT_SYMBOL(wdev_chandef);
-
-struct cfg80211_per_bw_puncturing_values {
- u8 len;
- const u16 *valid_values;
-};
-
-static const u16 puncturing_values_80mhz[] = {
- 0x8, 0x4, 0x2, 0x1
-};
-
-static const u16 puncturing_values_160mhz[] = {
- 0x80, 0x40, 0x20, 0x10, 0x8, 0x4, 0x2, 0x1, 0xc0, 0x30, 0xc, 0x3
-};
-
-static const u16 puncturing_values_320mhz[] = {
- 0xc000, 0x3000, 0xc00, 0x300, 0xc0, 0x30, 0xc, 0x3, 0xf000, 0xf00,
- 0xf0, 0xf, 0xfc00, 0xf300, 0xf0c0, 0xf030, 0xf00c, 0xf003, 0xc00f,
- 0x300f, 0xc0f, 0x30f, 0xcf, 0x3f
-};
-
-#define CFG80211_PER_BW_VALID_PUNCTURING_VALUES(_bw) \
- { \
- .len = ARRAY_SIZE(puncturing_values_ ## _bw ## mhz), \
- .valid_values = puncturing_values_ ## _bw ## mhz \
- }
-
-static const struct cfg80211_per_bw_puncturing_values per_bw_puncturing[] = {
- CFG80211_PER_BW_VALID_PUNCTURING_VALUES(80),
- CFG80211_PER_BW_VALID_PUNCTURING_VALUES(160),
- CFG80211_PER_BW_VALID_PUNCTURING_VALUES(320)
-};
-
-bool cfg80211_valid_disable_subchannel_bitmap(u16 *bitmap,
- const struct cfg80211_chan_def *chandef)
-{
- u32 idx, i, start_freq;
-
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_80:
- idx = 0;
- start_freq = chandef->center_freq1 - 40;
- break;
- case NL80211_CHAN_WIDTH_160:
- idx = 1;
- start_freq = chandef->center_freq1 - 80;
- break;
- case NL80211_CHAN_WIDTH_320:
- idx = 2;
- start_freq = chandef->center_freq1 - 160;
- break;
- default:
- *bitmap = 0;
- break;
- }
-
- if (!*bitmap)
- return true;
-
- /* check if primary channel is punctured */
- if (*bitmap & (u16)BIT((chandef->chan->center_freq - start_freq) / 20))
- return false;
-
- for (i = 0; i < per_bw_puncturing[idx].len; i++)
- if (per_bw_puncturing[idx].valid_values[i] == *bitmap)
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(cfg80211_valid_disable_subchannel_bitmap);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 13657a85cf61..118f2f619828 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -3,7 +3,7 @@
* Wireless configuration interface internals.
*
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#ifndef __NET_WIRELESS_CORE_H
#define __NET_WIRELESS_CORE_H
@@ -362,7 +362,8 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
struct cfg80211_auth_request *req);
int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
struct net_device *dev,
- struct cfg80211_assoc_request *req);
+ struct cfg80211_assoc_request *req,
+ struct netlink_ext_ack *extack);
int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
struct net_device *dev, const u8 *bssid,
const u8 *ie, int ie_len, u16 reason,
@@ -491,6 +492,9 @@ bool cfg80211_is_sub_chan(struct cfg80211_chan_def *chandef,
bool cfg80211_wdev_on_sub_chan(struct wireless_dev *wdev,
struct ieee80211_channel *chan,
bool primary_only);
+bool _cfg80211_chandef_usable(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef,
+ u32 prohibited_flags, bool monitor);
static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
{
@@ -549,9 +553,53 @@ int cfg80211_remove_virtual_intf(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev);
void cfg80211_wdev_release_link_bsses(struct wireless_dev *wdev, u16 link_mask);
+/**
+ * struct cfg80211_colocated_ap - colocated AP information
+ *
+ * @list: linked list to all colocated APs
+ * @bssid: BSSID of the reported AP
+ * @ssid: SSID of the reported AP
+ * @ssid_len: length of the ssid
+ * @center_freq: frequency the reported AP is on
+ * @unsolicited_probe: the reported AP is part of an ESS, where all the APs
+ * that operate in the same channel as the reported AP and that might be
+ * detected by a STA receiving this frame, are transmitting unsolicited
+ * Probe Response frames every 20 TUs
+ * @oct_recommended: OCT is recommended to exchange MMPDUs with the reported AP
+ * @same_ssid: the reported AP has the same SSID as the reporting AP
+ * @multi_bss: the reported AP is part of a multiple BSSID set
+ * @transmitted_bssid: the reported AP is the transmitting BSSID
+ * @colocated_ess: all the APs that share the same ESS as the reported AP are
+ * colocated and can be discovered via legacy bands.
+ * @short_ssid_valid: short_ssid is valid and can be used
+ * @short_ssid: the short SSID for this SSID
+ * @psd_20: The 20MHz PSD EIRP of the primary 20MHz channel for the reported AP
+ */
+struct cfg80211_colocated_ap {
+ struct list_head list;
+ u8 bssid[ETH_ALEN];
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ size_t ssid_len;
+ u32 short_ssid;
+ u32 center_freq;
+ u8 unsolicited_probe:1,
+ oct_recommended:1,
+ same_ssid:1,
+ multi_bss:1,
+ transmitted_bssid:1,
+ colocated_ess:1,
+ short_ssid_valid:1;
+ s8 psd_20;
+};
+
#if IS_ENABLED(CONFIG_CFG80211_KUNIT_TEST)
#define EXPORT_SYMBOL_IF_CFG80211_KUNIT(sym) EXPORT_SYMBOL_IF_KUNIT(sym)
#define VISIBLE_IF_CFG80211_KUNIT
+void cfg80211_free_coloc_ap_list(struct list_head *coloc_ap_list);
+
+int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies,
+ struct list_head *list);
+
size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
const u8 *subie, size_t subie_len,
u8 *new_ie, size_t new_ie_len);
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index f635a8b6ca2e..4052041a19ea 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -4,7 +4,7 @@
*
* Copyright (c) 2009, Jouni Malinen <j@w1.fi>
* Copyright (c) 2015 Intel Deutschland GmbH
- * Copyright (C) 2019-2020, 2022-2023 Intel Corporation
+ * Copyright (C) 2019-2020, 2022-2024 Intel Corporation
*/
#include <linux/kernel.h>
@@ -241,12 +241,12 @@ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
char *buf = kmalloc(128, gfp);
if (buf) {
- sprintf(buf, "MLME-MICHAELMICFAILURE.indication("
- "keyid=%d %scast addr=%pM)", key_id,
- key_type == NL80211_KEYTYPE_GROUP ? "broad" : "uni",
- addr);
memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = strlen(buf);
+ wrqu.data.length =
+ sprintf(buf, "MLME-MICHAELMICFAILURE."
+ "indication(keyid=%d %scast addr=%pM)",
+ key_id, key_type == NL80211_KEYTYPE_GROUP
+ ? "broad" : "uni", addr);
wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf);
kfree(buf);
}
@@ -325,28 +325,136 @@ void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa,
p1[i] &= p2[i];
}
-/* Note: caller must cfg80211_put_bss() regardless of result */
-int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
- struct net_device *dev,
- struct cfg80211_assoc_request *req)
+static int
+cfg80211_mlme_check_mlo_compat(const struct ieee80211_multi_link_elem *mle_a,
+ const struct ieee80211_multi_link_elem *mle_b,
+ struct netlink_ext_ack *extack)
{
- struct wireless_dev *wdev = dev->ieee80211_ptr;
- int err, i, j;
+ const struct ieee80211_mle_basic_common_info *common_a, *common_b;
- lockdep_assert_wiphy(wdev->wiphy);
+ common_a = (const void *)mle_a->variable;
+ common_b = (const void *)mle_b->variable;
+
+ if (memcmp(common_a->mld_mac_addr, common_b->mld_mac_addr, ETH_ALEN)) {
+ NL_SET_ERR_MSG(extack, "AP MLD address mismatch");
+ return -EINVAL;
+ }
+
+ if (ieee80211_mle_get_eml_med_sync_delay((const u8 *)mle_a) !=
+ ieee80211_mle_get_eml_med_sync_delay((const u8 *)mle_b)) {
+ NL_SET_ERR_MSG(extack, "link EML medium sync delay mismatch");
+ return -EINVAL;
+ }
+
+ if (ieee80211_mle_get_eml_cap((const u8 *)mle_a) !=
+ ieee80211_mle_get_eml_cap((const u8 *)mle_b)) {
+ NL_SET_ERR_MSG(extack, "link EML capabilities mismatch");
+ return -EINVAL;
+ }
+
+ if (ieee80211_mle_get_mld_capa_op((const u8 *)mle_a) !=
+ ieee80211_mle_get_mld_capa_op((const u8 *)mle_b)) {
+ NL_SET_ERR_MSG(extack, "link MLD capabilities/ops mismatch");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cfg80211_mlme_check_mlo(struct net_device *dev,
+ struct cfg80211_assoc_request *req,
+ struct netlink_ext_ack *extack)
+{
+ const struct ieee80211_multi_link_elem *mles[ARRAY_SIZE(req->links)] = {};
+ int i;
+
+ if (req->link_id < 0)
+ return 0;
+
+ if (!req->links[req->link_id].bss) {
+ NL_SET_ERR_MSG(extack, "no BSS for assoc link");
+ return -EINVAL;
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < ARRAY_SIZE(req->links); i++) {
+ const struct cfg80211_bss_ies *ies;
+ const struct element *ml;
- for (i = 1; i < ARRAY_SIZE(req->links); i++) {
if (!req->links[i].bss)
continue;
- for (j = 0; j < i; j++) {
- if (req->links[i].bss == req->links[j].bss)
- return -EINVAL;
+
+ if (ether_addr_equal(req->links[i].bss->bssid, dev->dev_addr)) {
+ NL_SET_ERR_MSG(extack, "BSSID must not be our address");
+ req->links[i].error = -EINVAL;
+ goto error;
}
- if (ether_addr_equal(req->links[i].bss->bssid, dev->dev_addr))
- return -EINVAL;
+ ies = rcu_dereference(req->links[i].bss->ies);
+ ml = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_MULTI_LINK,
+ ies->data, ies->len);
+ if (!ml) {
+ NL_SET_ERR_MSG(extack, "MLO BSS w/o ML element");
+ req->links[i].error = -EINVAL;
+ goto error;
+ }
+
+ if (!ieee80211_mle_type_ok(ml->data + 1,
+ IEEE80211_ML_CONTROL_TYPE_BASIC,
+ ml->datalen - 1)) {
+ NL_SET_ERR_MSG(extack, "BSS with invalid ML element");
+ req->links[i].error = -EINVAL;
+ goto error;
+ }
+
+ mles[i] = (const void *)(ml->data + 1);
+
+ if (ieee80211_mle_get_link_id((const u8 *)mles[i]) != i) {
+ NL_SET_ERR_MSG(extack, "link ID mismatch");
+ req->links[i].error = -EINVAL;
+ goto error;
+ }
+ }
+
+ if (WARN_ON(!mles[req->link_id]))
+ goto error;
+
+ for (i = 0; i < ARRAY_SIZE(req->links); i++) {
+ if (i == req->link_id || !req->links[i].bss)
+ continue;
+
+ if (WARN_ON(!mles[i]))
+ goto error;
+
+ if (cfg80211_mlme_check_mlo_compat(mles[req->link_id], mles[i],
+ extack)) {
+ req->links[i].error = -EINVAL;
+ goto error;
+ }
}
+ rcu_read_unlock();
+ return 0;
+error:
+ rcu_read_unlock();
+ return -EINVAL;
+}
+
+/* Note: caller must cfg80211_put_bss() regardless of result */
+int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_assoc_request *req,
+ struct netlink_ext_ack *extack)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ lockdep_assert_wiphy(wdev->wiphy);
+
+ err = cfg80211_mlme_check_mlo(dev, req, extack);
+ if (err)
+ return err;
+
if (wdev->connected &&
(!req->prev_bssid ||
!ether_addr_equal(wdev->u.client.connected_addr, req->prev_bssid)))
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index bd54a928bab4..b4edba6b0b7b 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -5,7 +5,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/if.h>
@@ -581,7 +581,11 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG },
[NL80211_ATTR_STATUS_CODE] = { .type = NLA_U16 },
[NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 },
- [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
+ [NL80211_ATTR_WPA_VERSIONS] =
+ NLA_POLICY_RANGE(NLA_U32, 0,
+ NL80211_WPA_VERSION_1 |
+ NL80211_WPA_VERSION_2 |
+ NL80211_WPA_VERSION_3),
[NL80211_ATTR_PID] = { .type = NLA_U32 },
[NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
[NL80211_ATTR_PMKID] = NLA_POLICY_EXACT_LEN_WARN(WLAN_PMKID_LEN),
@@ -821,6 +825,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA] = { .type = NLA_FLAG },
[NL80211_ATTR_MLO_TTLM_DLINK] = NLA_POLICY_EXACT_LEN(sizeof(u16) * 8),
[NL80211_ATTR_MLO_TTLM_ULINK] = NLA_POLICY_EXACT_LEN(sizeof(u16) * 8),
+ [NL80211_ATTR_ASSOC_SPP_AMSDU] = { .type = NLA_FLAG },
};
/* policy for the key attributes */
@@ -906,22 +911,11 @@ nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = {
};
static const struct nla_policy
-nl80211_match_band_rssi_policy[NUM_NL80211_BANDS] = {
- [NL80211_BAND_2GHZ] = { .type = NLA_S32 },
- [NL80211_BAND_5GHZ] = { .type = NLA_S32 },
- [NL80211_BAND_6GHZ] = { .type = NLA_S32 },
- [NL80211_BAND_60GHZ] = { .type = NLA_S32 },
- [NL80211_BAND_LC] = { .type = NLA_S32 },
-};
-
-static const struct nla_policy
nl80211_match_policy[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1] = {
[NL80211_SCHED_SCAN_MATCH_ATTR_SSID] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_SSID_LEN },
[NL80211_SCHED_SCAN_MATCH_ATTR_BSSID] = NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN),
[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI] = { .type = NLA_U32 },
- [NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI] =
- NLA_POLICY_NESTED(nl80211_match_band_rssi_policy),
};
static const struct nla_policy
@@ -1204,11 +1198,11 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy,
if ((chan->flags & IEEE80211_CHAN_DFS_CONCURRENT) &&
nla_put_flag(msg, NL80211_FREQUENCY_ATTR_DFS_CONCURRENT))
goto nla_put_failure;
- if ((chan->flags & IEEE80211_CHAN_NO_UHB_VLP_CLIENT) &&
- nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT))
+ if ((chan->flags & IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT))
goto nla_put_failure;
- if ((chan->flags & IEEE80211_CHAN_NO_UHB_AFC_CLIENT) &&
- nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT))
+ if ((chan->flags & IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT))
goto nla_put_failure;
}
@@ -3224,24 +3218,9 @@ static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev)
wdev->iftype == NL80211_IFTYPE_P2P_GO;
}
-static int nl80211_parse_punct_bitmap(struct cfg80211_registered_device *rdev,
- struct genl_info *info,
- const struct cfg80211_chan_def *chandef,
- u16 *punct_bitmap)
-{
- if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_PUNCT))
- return -EINVAL;
-
- *punct_bitmap = nla_get_u32(info->attrs[NL80211_ATTR_PUNCT_BITMAP]);
- if (!cfg80211_valid_disable_subchannel_bitmap(punct_bitmap, chandef))
- return -EINVAL;
-
- return 0;
-}
-
-int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
- struct genl_info *info,
- struct cfg80211_chan_def *chandef)
+static int _nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
+ struct genl_info *info, bool monitor,
+ struct cfg80211_chan_def *chandef)
{
struct netlink_ext_ack *extack = info->extack;
struct nlattr **attrs = info->attrs;
@@ -3266,10 +3245,9 @@ int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
chandef->freq1_offset = control_freq % 1000;
chandef->center_freq2 = 0;
- /* Primary channel not allowed */
- if (!chandef->chan || chandef->chan->flags & IEEE80211_CHAN_DISABLED) {
+ if (!chandef->chan) {
NL_SET_ERR_MSG_ATTR(extack, attrs[NL80211_ATTR_WIPHY_FREQ],
- "Channel is disabled");
+ "Unknown channel");
return -EINVAL;
}
@@ -3346,13 +3324,27 @@ int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
chandef->edmg.channels = 0;
}
+ if (info->attrs[NL80211_ATTR_PUNCT_BITMAP]) {
+ chandef->punctured =
+ nla_get_u32(info->attrs[NL80211_ATTR_PUNCT_BITMAP]);
+
+ if (chandef->punctured &&
+ !wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_PUNCT)) {
+ NL_SET_ERR_MSG(extack,
+ "driver doesn't support puncturing");
+ return -EINVAL;
+ }
+ }
+
if (!cfg80211_chandef_valid(chandef)) {
NL_SET_ERR_MSG(extack, "invalid channel definition");
return -EINVAL;
}
- if (!cfg80211_chandef_usable(&rdev->wiphy, chandef,
- IEEE80211_CHAN_DISABLED)) {
+ if (!_cfg80211_chandef_usable(&rdev->wiphy, chandef,
+ IEEE80211_CHAN_DISABLED,
+ monitor)) {
NL_SET_ERR_MSG(extack, "(extension) channel is disabled");
return -EINVAL;
}
@@ -3367,6 +3359,13 @@ int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
return 0;
}
+int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
+ struct genl_info *info,
+ struct cfg80211_chan_def *chandef)
+{
+ return _nl80211_parse_chandef(rdev, info, false, chandef);
+}
+
static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct genl_info *info,
@@ -3391,7 +3390,9 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
link_id = 0;
}
- result = nl80211_parse_chandef(rdev, info, &chandef);
+ result = _nl80211_parse_chandef(rdev, info,
+ iftype == NL80211_IFTYPE_MONITOR,
+ &chandef);
if (result)
return result;
@@ -3822,6 +3823,10 @@ int nl80211_send_chandef(struct sk_buff *msg, const struct cfg80211_chan_def *ch
if (chandef->center_freq2 &&
nla_put_u32(msg, NL80211_ATTR_CENTER_FREQ2, chandef->center_freq2))
return -ENOBUFS;
+ if (chandef->punctured &&
+ nla_put_u32(msg, NL80211_ATTR_PUNCT_BITMAP, chandef->punctured))
+ return -ENOBUFS;
+
return 0;
}
EXPORT_SYMBOL(nl80211_send_chandef);
@@ -4202,8 +4207,6 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
if (netif_running(dev))
return -EBUSY;
- BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN !=
- IEEE80211_MAX_MESH_ID_LEN);
wdev->u.mesh.id_up_len =
nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
memcpy(wdev->u.mesh.id,
@@ -4309,8 +4312,6 @@ static int _nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
case NL80211_IFTYPE_MESH_POINT:
if (!info->attrs[NL80211_ATTR_MESH_ID])
break;
- BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN !=
- IEEE80211_MAX_MESH_ID_LEN);
wdev->u.mesh.id_up_len =
nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
memcpy(wdev->u.mesh.id,
@@ -6069,14 +6070,6 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- if (info->attrs[NL80211_ATTR_PUNCT_BITMAP]) {
- err = nl80211_parse_punct_bitmap(rdev, info,
- &params->chandef,
- &params->punct_bitmap);
- if (err)
- goto out;
- }
-
if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params->chandef,
wdev->iftype)) {
err = -EINVAL;
@@ -6876,7 +6869,7 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
return -EINVAL;
/* When you run into this, adjust the code below for the new flag */
- BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7);
+ BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 8);
switch (statype) {
case CFG80211_STA_MESH_PEER_KERNEL:
@@ -6936,6 +6929,8 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
params->link_sta_params.he_capa ||
params->link_sta_params.eht_capa)
return -EINVAL;
+ if (params->sta_flags_mask & BIT(NL80211_STA_FLAG_SPP_AMSDU))
+ return -EINVAL;
}
if (statype != CFG80211_STA_AP_CLIENT &&
@@ -6959,7 +6954,8 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
BIT(NL80211_STA_FLAG_ASSOCIATED) |
BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
BIT(NL80211_STA_FLAG_WME) |
- BIT(NL80211_STA_FLAG_MFP)))
+ BIT(NL80211_STA_FLAG_MFP) |
+ BIT(NL80211_STA_FLAG_SPP_AMSDU)))
return -EINVAL;
/* but authenticated/associated only if driver handles it */
@@ -7518,7 +7514,7 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
/* When you run into this, adjust the code below for the new flag */
- BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7);
+ BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 8);
switch (dev->ieee80211_ptr->iftype) {
case NL80211_IFTYPE_AP:
@@ -7542,6 +7538,11 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
params.sta_flags_mask & auth_assoc)
return -EINVAL;
+ if (!wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT) &&
+ params.sta_flags_mask & BIT(NL80211_STA_FLAG_SPP_AMSDU))
+ return -EINVAL;
+
/* Older userspace, or userspace wanting to be compatible with
* !NL80211_FEATURE_FULL_AP_CLIENT_STATE, will not set the auth
* and assoc flags in the mask, but assumes the station will be
@@ -7630,14 +7631,16 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
struct station_del_parameters params;
+ int link_id = nl80211_link_id_or_invalid(info->attrs);
memset(&params, 0, sizeof(params));
if (info->attrs[NL80211_ATTR_MAC])
params.mac = nla_data(info->attrs[NL80211_ATTR_MAC]);
- switch (dev->ieee80211_ptr->iftype) {
+ switch (wdev->iftype) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_MESH_POINT:
@@ -7678,6 +7681,17 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
params.reason_code = WLAN_REASON_PREV_AUTH_NOT_VALID;
}
+ /* Link ID not expected in case of non-ML operation */
+ if (!wdev->valid_links && link_id != -1)
+ return -EINVAL;
+
+ /* If given, a valid link ID should be passed during MLO */
+ if (wdev->valid_links && link_id >= 0 &&
+ !(wdev->valid_links & BIT(link_id)))
+ return -EINVAL;
+
+ params.link_id = link_id;
+
return rdev_del_station(rdev, dev, &params);
}
@@ -9483,41 +9497,6 @@ nl80211_parse_sched_scan_plans(struct wiphy *wiphy, int n_plans,
return 0;
}
-static int
-nl80211_parse_sched_scan_per_band_rssi(struct wiphy *wiphy,
- struct cfg80211_match_set *match_sets,
- struct nlattr *tb_band_rssi,
- s32 rssi_thold)
-{
- struct nlattr *attr;
- int i, tmp, ret = 0;
-
- if (!wiphy_ext_feature_isset(wiphy,
- NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD)) {
- if (tb_band_rssi)
- ret = -EOPNOTSUPP;
- else
- for (i = 0; i < NUM_NL80211_BANDS; i++)
- match_sets->per_band_rssi_thold[i] =
- NL80211_SCAN_RSSI_THOLD_OFF;
- return ret;
- }
-
- for (i = 0; i < NUM_NL80211_BANDS; i++)
- match_sets->per_band_rssi_thold[i] = rssi_thold;
-
- nla_for_each_nested(attr, tb_band_rssi, tmp) {
- enum nl80211_band band = nla_type(attr);
-
- if (band < 0 || band >= NUM_NL80211_BANDS)
- return -EINVAL;
-
- match_sets->per_band_rssi_thold[band] = nla_get_s32(attr);
- }
-
- return 0;
-}
-
static struct cfg80211_sched_scan_request *
nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
struct nlattr **attrs, int max_match_sets)
@@ -9792,15 +9771,6 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
if (rssi)
request->match_sets[i].rssi_thold =
nla_get_s32(rssi);
-
- /* Parse per band RSSI attribute */
- err = nl80211_parse_sched_scan_per_band_rssi(wiphy,
- &request->match_sets[i],
- tb[NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI],
- request->match_sets[i].rssi_thold);
- if (err)
- goto out_free;
-
i++;
}
@@ -10080,6 +10050,42 @@ static int nl80211_notify_radar_detection(struct sk_buff *skb,
return 0;
}
+static int nl80211_parse_counter_offsets(struct cfg80211_registered_device *rdev,
+ const u8 *data, size_t datalen,
+ int first_count, struct nlattr *attr,
+ const u16 **offsets, unsigned int *n_offsets)
+{
+ int i;
+
+ *n_offsets = 0;
+
+ if (!attr)
+ return 0;
+
+ if (!nla_len(attr) || (nla_len(attr) % sizeof(u16)))
+ return -EINVAL;
+
+ *n_offsets = nla_len(attr) / sizeof(u16);
+ if (rdev->wiphy.max_num_csa_counters &&
+ (*n_offsets > rdev->wiphy.max_num_csa_counters))
+ return -EINVAL;
+
+ *offsets = nla_data(attr);
+
+ /* sanity checks - counters should fit and be the same */
+ for (i = 0; i < *n_offsets; i++) {
+ u16 offset = (*offsets)[i];
+
+ if (offset >= datalen)
+ return -EINVAL;
+
+ if (first_count != -1 && data[offset] != first_count)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -10091,7 +10097,6 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
int err;
bool need_new_beacon = false;
bool need_handle_dfs_flag = true;
- int len, i;
u32 cs_count;
if (!rdev->ops->channel_switch ||
@@ -10176,72 +10181,23 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
goto free;
}
- len = nla_len(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_BEACON]);
- if (!len || (len % sizeof(u16))) {
- err = -EINVAL;
+ err = nl80211_parse_counter_offsets(rdev, params.beacon_csa.tail,
+ params.beacon_csa.tail_len,
+ params.count,
+ csa_attrs[NL80211_ATTR_CNTDWN_OFFS_BEACON],
+ &params.counter_offsets_beacon,
+ &params.n_counter_offsets_beacon);
+ if (err)
goto free;
- }
- params.n_counter_offsets_beacon = len / sizeof(u16);
- if (rdev->wiphy.max_num_csa_counters &&
- (params.n_counter_offsets_beacon >
- rdev->wiphy.max_num_csa_counters)) {
- err = -EINVAL;
+ err = nl80211_parse_counter_offsets(rdev, params.beacon_csa.probe_resp,
+ params.beacon_csa.probe_resp_len,
+ params.count,
+ csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP],
+ &params.counter_offsets_presp,
+ &params.n_counter_offsets_presp);
+ if (err)
goto free;
- }
-
- params.counter_offsets_beacon =
- nla_data(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_BEACON]);
-
- /* sanity checks - counters should fit and be the same */
- for (i = 0; i < params.n_counter_offsets_beacon; i++) {
- u16 offset = params.counter_offsets_beacon[i];
-
- if (offset >= params.beacon_csa.tail_len) {
- err = -EINVAL;
- goto free;
- }
-
- if (params.beacon_csa.tail[offset] != params.count) {
- err = -EINVAL;
- goto free;
- }
- }
-
- if (csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP]) {
- len = nla_len(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP]);
- if (!len || (len % sizeof(u16))) {
- err = -EINVAL;
- goto free;
- }
-
- params.n_counter_offsets_presp = len / sizeof(u16);
- if (rdev->wiphy.max_num_csa_counters &&
- (params.n_counter_offsets_presp >
- rdev->wiphy.max_num_csa_counters)) {
- err = -EINVAL;
- goto free;
- }
-
- params.counter_offsets_presp =
- nla_data(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP]);
-
- /* sanity checks - counters should fit and be the same */
- for (i = 0; i < params.n_counter_offsets_presp; i++) {
- u16 offset = params.counter_offsets_presp[i];
-
- if (offset >= params.beacon_csa.probe_resp_len) {
- err = -EINVAL;
- goto free;
- }
-
- if (params.beacon_csa.probe_resp[offset] !=
- params.count) {
- err = -EINVAL;
- goto free;
- }
- }
- }
skip_beacons:
err = nl80211_parse_chandef(rdev, info, &params.chandef);
@@ -10272,14 +10228,7 @@ skip_beacons:
if (info->attrs[NL80211_ATTR_CH_SWITCH_BLOCK_TX])
params.block_tx = true;
- if (info->attrs[NL80211_ATTR_PUNCT_BITMAP]) {
- err = nl80211_parse_punct_bitmap(rdev, info,
- &params.chandef,
- &params.punct_bitmap);
- if (err)
- goto free;
- }
-
+ params.link_id = link_id;
err = rdev_channel_switch(rdev, dev, &params);
free:
@@ -10652,13 +10601,6 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
return res;
}
-static bool nl80211_valid_wpa_versions(u32 wpa_versions)
-{
- return !(wpa_versions & ~(NL80211_WPA_VERSION_1 |
- NL80211_WPA_VERSION_2 |
- NL80211_WPA_VERSION_3));
-}
-
static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -10884,12 +10826,9 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
return -EINVAL;
}
- if (info->attrs[NL80211_ATTR_WPA_VERSIONS]) {
+ if (info->attrs[NL80211_ATTR_WPA_VERSIONS])
settings->wpa_versions =
nla_get_u32(info->attrs[NL80211_ATTR_WPA_VERSIONS]);
- if (!nl80211_valid_wpa_versions(settings->wpa_versions))
- return -EINVAL;
- }
if (info->attrs[NL80211_ATTR_AKM_SUITES]) {
void *data;
@@ -11104,6 +11043,15 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
sizeof(req.s1g_capa));
}
+ if (nla_get_flag(info->attrs[NL80211_ATTR_ASSOC_SPP_AMSDU])) {
+ if (!wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT)) {
+ GENL_SET_ERR_MSG(info, "SPP A-MSDUs not supported");
+ return -EINVAL;
+ }
+ req.flags |= ASSOC_REQ_SPP_AMSDU;
+ }
+
req.link_id = nl80211_link_id_or_invalid(info->attrs);
if (info->attrs[NL80211_ATTR_MLO_LINKS]) {
@@ -11229,7 +11177,8 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
struct nlattr *link;
int rem = 0;
- err = cfg80211_mlme_assoc(rdev, dev, &req);
+ err = cfg80211_mlme_assoc(rdev, dev, &req,
+ info->extack);
if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) {
dev->ieee80211_ptr->conn_owner_nlportid =
@@ -12677,23 +12626,12 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
params.buf = nla_data(info->attrs[NL80211_ATTR_FRAME]);
params.len = nla_len(info->attrs[NL80211_ATTR_FRAME]);
- if (info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]) {
- int len = nla_len(info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]);
- int i;
-
- if (len % sizeof(u16))
- return -EINVAL;
-
- params.n_csa_offsets = len / sizeof(u16);
- params.csa_offsets =
- nla_data(info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]);
-
- /* check that all the offsets fit the frame */
- for (i = 0; i < params.n_csa_offsets; i++) {
- if (params.csa_offsets[i] >= params.len)
- return -EINVAL;
- }
- }
+ err = nl80211_parse_counter_offsets(rdev, NULL, params.len, -1,
+ info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX],
+ &params.csa_offsets,
+ &params.n_csa_offsets);
+ if (err)
+ return err;
if (!params.dont_wait_for_ack) {
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
@@ -16830,6 +16768,10 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_del_station,
.flags = GENL_UNS_ADMIN_PERM,
+ /* cannot use NL80211_FLAG_MLO_VALID_LINK_ID, depends on
+ * whether MAC address is passed or not. If MAC address is
+ * passed, then even during MLO, link ID is not required.
+ */
.internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
@@ -19400,7 +19342,7 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
struct cfg80211_chan_def *chandef,
gfp_t gfp,
enum nl80211_commands notif,
- u8 count, bool quiet, u16 punct_bitmap)
+ u8 count, bool quiet)
{
struct wireless_dev *wdev = netdev->ieee80211_ptr;
struct sk_buff *msg;
@@ -19434,9 +19376,6 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
goto nla_put_failure;
}
- if (nla_put_u32(msg, NL80211_ATTR_PUNCT_BITMAP, punct_bitmap))
- goto nla_put_failure;
-
genlmsg_end(msg, hdr);
genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
@@ -19449,7 +19388,7 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
void cfg80211_ch_switch_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
- unsigned int link_id, u16 punct_bitmap)
+ unsigned int link_id)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
@@ -19458,7 +19397,7 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
lockdep_assert_wiphy(wdev->wiphy);
WARN_INVALID_LINK_ID(wdev, link_id);
- trace_cfg80211_ch_switch_notify(dev, chandef, link_id, punct_bitmap);
+ trace_cfg80211_ch_switch_notify(dev, chandef, link_id);
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
@@ -19487,15 +19426,14 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
cfg80211_sched_dfs_chan_update(rdev);
nl80211_ch_switch_notify(rdev, dev, link_id, chandef, GFP_KERNEL,
- NL80211_CMD_CH_SWITCH_NOTIFY, 0, false,
- punct_bitmap);
+ NL80211_CMD_CH_SWITCH_NOTIFY, 0, false);
}
EXPORT_SYMBOL(cfg80211_ch_switch_notify);
void cfg80211_ch_switch_started_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
unsigned int link_id, u8 count,
- bool quiet, u16 punct_bitmap)
+ bool quiet)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
@@ -19504,13 +19442,12 @@ void cfg80211_ch_switch_started_notify(struct net_device *dev,
lockdep_assert_wiphy(wdev->wiphy);
WARN_INVALID_LINK_ID(wdev, link_id);
- trace_cfg80211_ch_switch_started_notify(dev, chandef, link_id,
- punct_bitmap);
+ trace_cfg80211_ch_switch_started_notify(dev, chandef, link_id);
nl80211_ch_switch_notify(rdev, dev, link_id, chandef, GFP_KERNEL,
NL80211_CMD_CH_SWITCH_STARTED_NOTIFY,
- count, quiet, punct_bitmap);
+ count, quiet);
}
EXPORT_SYMBOL(cfg80211_ch_switch_started_notify);
@@ -19887,6 +19824,11 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS))
goto free_msg;
+ if (wakeup->unprot_deauth_disassoc &&
+ nla_put_flag(msg,
+ NL80211_WOWLAN_TRIG_UNPROTECTED_DEAUTH_DISASSOC))
+ goto free_msg;
+
if (wakeup->packet) {
u32 pkt_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211;
u32 len_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN;
@@ -20167,9 +20109,26 @@ int cfg80211_external_auth_request(struct net_device *dev,
if (!hdr)
goto nla_put_failure;
+ /* Some historical mistakes in drivers <-> userspace interface (notably
+ * between drivers and wpa_supplicant) led to a big-endian conversion
+ * being needed on NL80211_ATTR_AKM_SUITES _only_ when its value is
+ * WLAN_AKM_SUITE_SAE. This is now fixed on userspace side, but for the
+ * benefit of older wpa_supplicant versions, send this particular value
+ * in big-endian. Note that newer wpa_supplicant will also detect this
+ * particular value in big endian still, so it all continues to work.
+ */
+ if (params->key_mgmt_suite == WLAN_AKM_SUITE_SAE) {
+ if (nla_put_be32(msg, NL80211_ATTR_AKM_SUITES,
+ cpu_to_be32(WLAN_AKM_SUITE_SAE)))
+ goto nla_put_failure;
+ } else {
+ if (nla_put_u32(msg, NL80211_ATTR_AKM_SUITES,
+ params->key_mgmt_suite))
+ goto nla_put_failure;
+ }
+
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
- nla_put_u32(msg, NL80211_ATTR_AKM_SUITES, params->key_mgmt_suite) ||
nla_put_u32(msg, NL80211_ATTR_EXTERNAL_AUTH_ACTION,
params->action) ||
nla_put(msg, NL80211_ATTR_BSSID, ETH_ALEN, params->bssid) ||
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 2741b626919a..753f8e9aa4b1 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -5,7 +5,7 @@
* Copyright 2008-2011 Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2023 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -57,6 +57,8 @@
#include <linux/verification.h>
#include <linux/moduleparam.h>
#include <linux/firmware.h>
+#include <linux/units.h>
+
#include <net/cfg80211.h>
#include "core.h"
#include "reg.h"
@@ -1289,20 +1291,17 @@ static bool is_valid_rd(const struct ieee80211_regdomain *rd)
static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
u32 freq_khz)
{
-#define ONE_GHZ_IN_KHZ 1000000
/*
* From 802.11ad: directional multi-gigabit (DMG):
* Pertaining to operation in a frequency band containing a channel
* with the Channel starting frequency above 45 GHz.
*/
- u32 limit = freq_khz > 45 * ONE_GHZ_IN_KHZ ?
- 20 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ;
+ u32 limit = freq_khz > 45 * KHZ_PER_GHZ ? 20 * KHZ_PER_GHZ : 2 * KHZ_PER_GHZ;
if (abs(freq_khz - freq_range->start_freq_khz) <= limit)
return true;
if (abs(freq_khz - freq_range->end_freq_khz) <= limit)
return true;
return false;
-#undef ONE_GHZ_IN_KHZ
}
/*
@@ -1595,10 +1594,10 @@ static u32 map_regdom_flags(u32 rd_flags)
channel_flags |= IEEE80211_CHAN_NO_EHT;
if (rd_flags & NL80211_RRF_DFS_CONCURRENT)
channel_flags |= IEEE80211_CHAN_DFS_CONCURRENT;
- if (rd_flags & NL80211_RRF_NO_UHB_VLP_CLIENT)
- channel_flags |= IEEE80211_CHAN_NO_UHB_VLP_CLIENT;
- if (rd_flags & NL80211_RRF_NO_UHB_AFC_CLIENT)
- channel_flags |= IEEE80211_CHAN_NO_UHB_AFC_CLIENT;
+ if (rd_flags & NL80211_RRF_NO_6GHZ_VLP_CLIENT)
+ channel_flags |= IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT;
+ if (rd_flags & NL80211_RRF_NO_6GHZ_AFC_CLIENT)
+ channel_flags |= IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT;
if (rd_flags & NL80211_RRF_PSD)
channel_flags |= IEEE80211_CHAN_PSD;
return channel_flags;
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 389a52c29bfc..5a5dd3ce497f 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -5,7 +5,7 @@
* Copyright 2008 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2016 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -77,45 +77,6 @@ MODULE_PARM_DESC(bss_entries_limit,
#define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ)
-/**
- * struct cfg80211_colocated_ap - colocated AP information
- *
- * @list: linked list to all colocated aPS
- * @bssid: BSSID of the reported AP
- * @ssid: SSID of the reported AP
- * @ssid_len: length of the ssid
- * @center_freq: frequency the reported AP is on
- * @unsolicited_probe: the reported AP is part of an ESS, where all the APs
- * that operate in the same channel as the reported AP and that might be
- * detected by a STA receiving this frame, are transmitting unsolicited
- * Probe Response frames every 20 TUs
- * @oct_recommended: OCT is recommended to exchange MMPDUs with the reported AP
- * @same_ssid: the reported AP has the same SSID as the reporting AP
- * @multi_bss: the reported AP is part of a multiple BSSID set
- * @transmitted_bssid: the reported AP is the transmitting BSSID
- * @colocated_ess: all the APs that share the same ESS as the reported AP are
- * colocated and can be discovered via legacy bands.
- * @short_ssid_valid: short_ssid is valid and can be used
- * @short_ssid: the short SSID for this SSID
- * @psd_20: The 20MHz PSD EIRP of the primary 20MHz channel for the reported AP
- */
-struct cfg80211_colocated_ap {
- struct list_head list;
- u8 bssid[ETH_ALEN];
- u8 ssid[IEEE80211_MAX_SSID_LEN];
- size_t ssid_len;
- u32 short_ssid;
- u32 center_freq;
- u8 unsolicited_probe:1,
- oct_recommended:1,
- same_ssid:1,
- multi_bss:1,
- transmitted_bssid:1,
- colocated_ess:1,
- short_ssid_valid:1;
- s8 psd_20;
-};
-
static void bss_free(struct cfg80211_internal_bss *bss)
{
struct cfg80211_bss_ies *ies;
@@ -566,7 +527,8 @@ static int cfg80211_calc_short_ssid(const struct cfg80211_bss_ies *ies,
return 0;
}
-static void cfg80211_free_coloc_ap_list(struct list_head *coloc_ap_list)
+VISIBLE_IF_CFG80211_KUNIT void
+cfg80211_free_coloc_ap_list(struct list_head *coloc_ap_list)
{
struct cfg80211_colocated_ap *ap, *tmp_ap;
@@ -575,6 +537,7 @@ static void cfg80211_free_coloc_ap_list(struct list_head *coloc_ap_list)
kfree(ap);
}
}
+EXPORT_SYMBOL_IF_CFG80211_KUNIT(cfg80211_free_coloc_ap_list);
static int cfg80211_parse_ap_info(struct cfg80211_colocated_ap *entry,
const u8 *pos, u8 length,
@@ -648,104 +611,140 @@ static int cfg80211_parse_ap_info(struct cfg80211_colocated_ap *entry,
return 0;
}
-static int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies,
- struct list_head *list)
+bool cfg80211_iter_rnr(const u8 *elems, size_t elems_len,
+ enum cfg80211_rnr_iter_ret
+ (*iter)(void *data, u8 type,
+ const struct ieee80211_neighbor_ap_info *info,
+ const u8 *tbtt_info, u8 tbtt_info_len),
+ void *iter_data)
{
- struct ieee80211_neighbor_ap_info *ap_info;
- const struct element *elem, *ssid_elem;
+ const struct element *rnr;
const u8 *pos, *end;
- u32 s_ssid_tmp;
- int n_coloc = 0, ret;
- LIST_HEAD(ap_list);
- ret = cfg80211_calc_short_ssid(ies, &ssid_elem, &s_ssid_tmp);
- if (ret)
- return 0;
+ for_each_element_id(rnr, WLAN_EID_REDUCED_NEIGHBOR_REPORT,
+ elems, elems_len) {
+ const struct ieee80211_neighbor_ap_info *info;
- for_each_element_id(elem, WLAN_EID_REDUCED_NEIGHBOR_REPORT,
- ies->data, ies->len) {
- pos = elem->data;
- end = elem->data + elem->datalen;
+ pos = rnr->data;
+ end = rnr->data + rnr->datalen;
/* RNR IE may contain more than one NEIGHBOR_AP_INFO */
- while (pos + sizeof(*ap_info) <= end) {
- enum nl80211_band band;
- int freq;
+ while (sizeof(*info) <= end - pos) {
u8 length, i, count;
+ u8 type;
- ap_info = (void *)pos;
- count = u8_get_bits(ap_info->tbtt_info_hdr,
- IEEE80211_AP_INFO_TBTT_HDR_COUNT) + 1;
- length = ap_info->tbtt_info_len;
+ info = (void *)pos;
+ count = u8_get_bits(info->tbtt_info_hdr,
+ IEEE80211_AP_INFO_TBTT_HDR_COUNT) +
+ 1;
+ length = info->tbtt_info_len;
- pos += sizeof(*ap_info);
+ pos += sizeof(*info);
- if (!ieee80211_operating_class_to_band(ap_info->op_class,
- &band))
- break;
+ if (count * length > end - pos)
+ return false;
- freq = ieee80211_channel_to_frequency(ap_info->channel,
- band);
+ type = u8_get_bits(info->tbtt_info_hdr,
+ IEEE80211_AP_INFO_TBTT_HDR_TYPE);
- if (end - pos < count * length)
- break;
+ for (i = 0; i < count; i++) {
+ switch (iter(iter_data, type, info,
+ pos, length)) {
+ case RNR_ITER_CONTINUE:
+ break;
+ case RNR_ITER_BREAK:
+ return true;
+ case RNR_ITER_ERROR:
+ return false;
+ }
- if (u8_get_bits(ap_info->tbtt_info_hdr,
- IEEE80211_AP_INFO_TBTT_HDR_TYPE) !=
- IEEE80211_TBTT_INFO_TYPE_TBTT) {
- pos += count * length;
- continue;
+ pos += length;
}
+ }
- /* TBTT info must include bss param + BSSID +
- * (short SSID or same_ssid bit to be set).
- * ignore other options, and move to the
- * next AP info
- */
- if (band != NL80211_BAND_6GHZ ||
- !(length == offsetofend(struct ieee80211_tbtt_info_7_8_9,
- bss_params) ||
- length == sizeof(struct ieee80211_tbtt_info_7_8_9) ||
- length >= offsetofend(struct ieee80211_tbtt_info_ge_11,
- bss_params))) {
- pos += count * length;
- continue;
- }
+ if (pos != end)
+ return false;
+ }
- for (i = 0; i < count; i++) {
- struct cfg80211_colocated_ap *entry;
+ return true;
+}
+EXPORT_SYMBOL_GPL(cfg80211_iter_rnr);
+
+struct colocated_ap_data {
+ const struct element *ssid_elem;
+ struct list_head ap_list;
+ u32 s_ssid_tmp;
+ int n_coloc;
+};
- entry = kzalloc(sizeof(*entry) + IEEE80211_MAX_SSID_LEN,
- GFP_ATOMIC);
+static enum cfg80211_rnr_iter_ret
+cfg80211_parse_colocated_ap_iter(void *_data, u8 type,
+ const struct ieee80211_neighbor_ap_info *info,
+ const u8 *tbtt_info, u8 tbtt_info_len)
+{
+ struct colocated_ap_data *data = _data;
+ struct cfg80211_colocated_ap *entry;
+ enum nl80211_band band;
- if (!entry)
- goto error;
+ if (type != IEEE80211_TBTT_INFO_TYPE_TBTT)
+ return RNR_ITER_CONTINUE;
- entry->center_freq = freq;
+ if (!ieee80211_operating_class_to_band(info->op_class, &band))
+ return RNR_ITER_CONTINUE;
- if (!cfg80211_parse_ap_info(entry, pos, length,
- ssid_elem,
- s_ssid_tmp)) {
- n_coloc++;
- list_add_tail(&entry->list, &ap_list);
- } else {
- kfree(entry);
- }
+ /* TBTT info must include bss param + BSSID + (short SSID or
+ * same_ssid bit to be set). Ignore other options, and move to
+ * the next AP info
+ */
+ if (band != NL80211_BAND_6GHZ ||
+ !(tbtt_info_len == offsetofend(struct ieee80211_tbtt_info_7_8_9,
+ bss_params) ||
+ tbtt_info_len == sizeof(struct ieee80211_tbtt_info_7_8_9) ||
+ tbtt_info_len >= offsetofend(struct ieee80211_tbtt_info_ge_11,
+ bss_params)))
+ return RNR_ITER_CONTINUE;
+
+ entry = kzalloc(sizeof(*entry) + IEEE80211_MAX_SSID_LEN, GFP_ATOMIC);
+ if (!entry)
+ return RNR_ITER_ERROR;
+
+ entry->center_freq =
+ ieee80211_channel_to_frequency(info->channel, band);
+
+ if (!cfg80211_parse_ap_info(entry, tbtt_info, tbtt_info_len,
+ data->ssid_elem, data->s_ssid_tmp)) {
+ data->n_coloc++;
+ list_add_tail(&entry->list, &data->ap_list);
+ } else {
+ kfree(entry);
+ }
- pos += length;
- }
- }
+ return RNR_ITER_CONTINUE;
+}
-error:
- if (pos != end) {
- cfg80211_free_coloc_ap_list(&ap_list);
- return 0;
- }
+VISIBLE_IF_CFG80211_KUNIT int
+cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies,
+ struct list_head *list)
+{
+ struct colocated_ap_data data = {};
+ int ret;
+
+ INIT_LIST_HEAD(&data.ap_list);
+
+ ret = cfg80211_calc_short_ssid(ies, &data.ssid_elem, &data.s_ssid_tmp);
+ if (ret)
+ return 0;
+
+ if (!cfg80211_iter_rnr(ies->data, ies->len,
+ cfg80211_parse_colocated_ap_iter, &data)) {
+ cfg80211_free_coloc_ap_list(&data.ap_list);
+ return 0;
}
- list_splice_tail(&ap_list, list);
- return n_coloc;
+ list_splice_tail(&data.ap_list, list);
+ return data.n_coloc;
}
+EXPORT_SYMBOL_IF_CFG80211_KUNIT(cfg80211_parse_colocated_ap);
static void cfg80211_scan_req_add_chan(struct cfg80211_scan_request *request,
struct ieee80211_channel *chan,
@@ -2122,6 +2121,35 @@ struct cfg80211_inform_single_bss_data {
u64 cannot_use_reasons;
};
+static bool cfg80211_6ghz_power_type_valid(const u8 *ie, size_t ielen,
+ const u32 flags)
+{
+ const struct element *tmp;
+ struct ieee80211_he_operation *he_oper;
+
+ tmp = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ielen);
+ if (tmp && tmp->datalen >= sizeof(*he_oper) + 1) {
+ const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
+
+ he_oper = (void *)&tmp->data[1];
+ he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper);
+
+ if (!he_6ghz_oper)
+ return false;
+
+ switch (u8_get_bits(he_6ghz_oper->control,
+ IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) {
+ case IEEE80211_6GHZ_CTRL_REG_LPI_AP:
+ return true;
+ case IEEE80211_6GHZ_CTRL_REG_SP_AP:
+ return !(flags & IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT);
+ case IEEE80211_6GHZ_CTRL_REG_VLP_AP:
+ return !(flags & IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT);
+ }
+ }
+ return false;
+}
+
/* Returned bss is reference counted and must be cleaned up appropriately. */
static struct cfg80211_bss *
cfg80211_inform_single_bss_data(struct wiphy *wiphy,
@@ -2154,6 +2182,14 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
if (!channel)
return NULL;
+ if (channel->band == NL80211_BAND_6GHZ &&
+ !cfg80211_6ghz_power_type_valid(data->ie, data->ielen,
+ channel->flags)) {
+ data->use_for = 0;
+ data->cannot_use_reasons =
+ NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH;
+ }
+
memcpy(tmp.pub.bssid, data->bssid, ETH_ALEN);
tmp.pub.channel = channel;
if (data->bss_source != BSS_SOURCE_STA_PROFILE)
@@ -2165,6 +2201,9 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
tmp.ts_boottime = drv_data->boottime_ns;
tmp.parent_tsf = drv_data->parent_tsf;
ether_addr_copy(tmp.parent_bssid, drv_data->parent_bssid);
+ tmp.pub.chains = drv_data->chains;
+ memcpy(tmp.pub.chain_signal, drv_data->chain_signal,
+ IEEE80211_MAX_CHAINS);
tmp.pub.use_for = data->use_for;
tmp.pub.cannot_use_reasons = data->cannot_use_reasons;
@@ -2208,6 +2247,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
switch (data->ftype) {
case CFG80211_BSS_FTYPE_BEACON:
+ case CFG80211_BSS_FTYPE_S1G_BEACON:
ies->from_beacon = true;
fallthrough;
case CFG80211_BSS_FTYPE_UNKNOWN:
@@ -2464,16 +2504,22 @@ ssize_t cfg80211_defragment_element(const struct element *elem, const u8 *ies,
if (elem->id == WLAN_EID_EXTENSION) {
copied = elem->datalen - 1;
- if (copied > data_len)
- return -ENOSPC;
- memmove(data, elem->data + 1, copied);
+ if (data) {
+ if (copied > data_len)
+ return -ENOSPC;
+
+ memmove(data, elem->data + 1, copied);
+ }
} else {
copied = elem->datalen;
- if (copied > data_len)
- return -ENOSPC;
- memmove(data, elem->data, copied);
+ if (data) {
+ if (copied > data_len)
+ return -ENOSPC;
+
+ memmove(data, elem->data, copied);
+ }
}
/* Fragmented elements must have 255 bytes */
@@ -2492,10 +2538,13 @@ ssize_t cfg80211_defragment_element(const struct element *elem, const u8 *ies,
elem_datalen = elem->datalen;
- if (copied + elem_datalen > data_len)
- return -ENOSPC;
+ if (data) {
+ if (copied + elem_datalen > data_len)
+ return -ENOSPC;
+
+ memmove(data + copied, elem->data, elem_datalen);
+ }
- memmove(data + copied, elem->data, elem_datalen);
copied += elem_datalen;
/* Only the last fragment may be short */
@@ -2601,77 +2650,168 @@ error:
return NULL;
}
+struct tbtt_info_iter_data {
+ const struct ieee80211_neighbor_ap_info *ap_info;
+ u8 param_ch_count;
+ u32 use_for;
+ u8 mld_id, link_id;
+};
+
+static enum cfg80211_rnr_iter_ret
+cfg802121_mld_ap_rnr_iter(void *_data, u8 type,
+ const struct ieee80211_neighbor_ap_info *info,
+ const u8 *tbtt_info, u8 tbtt_info_len)
+{
+ const struct ieee80211_rnr_mld_params *mld_params;
+ struct tbtt_info_iter_data *data = _data;
+ u8 link_id;
+
+ if (type == IEEE80211_TBTT_INFO_TYPE_TBTT &&
+ tbtt_info_len >= offsetofend(struct ieee80211_tbtt_info_ge_11,
+ mld_params))
+ mld_params = (void *)(tbtt_info +
+ offsetof(struct ieee80211_tbtt_info_ge_11,
+ mld_params));
+ else if (type == IEEE80211_TBTT_INFO_TYPE_MLD &&
+ tbtt_info_len >= sizeof(struct ieee80211_rnr_mld_params))
+ mld_params = (void *)tbtt_info;
+ else
+ return RNR_ITER_CONTINUE;
+
+ link_id = le16_get_bits(mld_params->params,
+ IEEE80211_RNR_MLD_PARAMS_LINK_ID);
+
+ if (data->mld_id != mld_params->mld_id)
+ return RNR_ITER_CONTINUE;
+
+ if (data->link_id != link_id)
+ return RNR_ITER_CONTINUE;
+
+ data->ap_info = info;
+ data->param_ch_count =
+ le16_get_bits(mld_params->params,
+ IEEE80211_RNR_MLD_PARAMS_BSS_CHANGE_COUNT);
+
+ if (type == IEEE80211_TBTT_INFO_TYPE_TBTT)
+ data->use_for = NL80211_BSS_USE_FOR_ALL;
+ else
+ data->use_for = NL80211_BSS_USE_FOR_MLD_LINK;
+ return RNR_ITER_BREAK;
+}
+
static u8
-cfg80211_tbtt_info_for_mld_ap(const u8 *ie, size_t ielen, u8 mld_id, u8 link_id,
- const struct ieee80211_neighbor_ap_info **ap_info,
- const u8 **tbtt_info)
+cfg80211_rnr_info_for_mld_ap(const u8 *ie, size_t ielen, u8 mld_id, u8 link_id,
+ const struct ieee80211_neighbor_ap_info **ap_info,
+ u8 *param_ch_count)
{
- const struct ieee80211_neighbor_ap_info *info;
- const struct element *rnr;
- const u8 *pos, *end;
+ struct tbtt_info_iter_data data = {
+ .mld_id = mld_id,
+ .link_id = link_id,
+ };
- for_each_element_id(rnr, WLAN_EID_REDUCED_NEIGHBOR_REPORT, ie, ielen) {
- pos = rnr->data;
- end = rnr->data + rnr->datalen;
+ cfg80211_iter_rnr(ie, ielen, cfg802121_mld_ap_rnr_iter, &data);
- /* RNR IE may contain more than one NEIGHBOR_AP_INFO */
- while (sizeof(*info) <= end - pos) {
- const struct ieee80211_rnr_mld_params *mld_params;
- u16 params;
- u8 length, i, count, mld_params_offset;
- u8 type, lid;
- u32 use_for;
+ *ap_info = data.ap_info;
+ *param_ch_count = data.param_ch_count;
- info = (void *)pos;
- count = u8_get_bits(info->tbtt_info_hdr,
- IEEE80211_AP_INFO_TBTT_HDR_COUNT) + 1;
- length = info->tbtt_info_len;
+ return data.use_for;
+}
- pos += sizeof(*info);
+static struct element *
+cfg80211_gen_reporter_rnr(struct cfg80211_bss *source_bss, bool is_mbssid,
+ bool same_mld, u8 link_id, u8 bss_change_count,
+ gfp_t gfp)
+{
+ const struct cfg80211_bss_ies *ies;
+ struct ieee80211_neighbor_ap_info ap_info;
+ struct ieee80211_tbtt_info_ge_11 tbtt_info;
+ u32 short_ssid;
+ const struct element *elem;
+ struct element *res;
- if (count * length > end - pos)
- return 0;
+ /*
+ * We only generate the RNR to permit ML lookups. For that we do not
+ * need an entry for the corresponding transmitting BSS, lets just skip
+ * it even though it would be easy to add.
+ */
+ if (!same_mld)
+ return NULL;
- type = u8_get_bits(info->tbtt_info_hdr,
- IEEE80211_AP_INFO_TBTT_HDR_TYPE);
+ /* We could use tx_data->ies if we change cfg80211_calc_short_ssid */
+ rcu_read_lock();
+ ies = rcu_dereference(source_bss->ies);
- if (type == IEEE80211_TBTT_INFO_TYPE_TBTT &&
- length >=
- offsetofend(struct ieee80211_tbtt_info_ge_11,
- mld_params)) {
- mld_params_offset =
- offsetof(struct ieee80211_tbtt_info_ge_11, mld_params);
- use_for = NL80211_BSS_USE_FOR_ALL;
- } else if (type == IEEE80211_TBTT_INFO_TYPE_MLD &&
- length >= sizeof(struct ieee80211_rnr_mld_params)) {
- mld_params_offset = 0;
- use_for = NL80211_BSS_USE_FOR_MLD_LINK;
- } else {
- pos += count * length;
- continue;
- }
+ ap_info.tbtt_info_len = offsetofend(typeof(tbtt_info), mld_params);
+ ap_info.tbtt_info_hdr =
+ u8_encode_bits(IEEE80211_TBTT_INFO_TYPE_TBTT,
+ IEEE80211_AP_INFO_TBTT_HDR_TYPE) |
+ u8_encode_bits(0, IEEE80211_AP_INFO_TBTT_HDR_COUNT);
- for (i = 0; i < count; i++) {
- mld_params = (void *)pos + mld_params_offset;
- params = le16_to_cpu(mld_params->params);
+ ap_info.channel = ieee80211_frequency_to_channel(source_bss->channel->center_freq);
- lid = u16_get_bits(params,
- IEEE80211_RNR_MLD_PARAMS_LINK_ID);
+ /* operating class */
+ elem = cfg80211_find_elem(WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
+ ies->data, ies->len);
+ if (elem && elem->datalen >= 1) {
+ ap_info.op_class = elem->data[0];
+ } else {
+ struct cfg80211_chan_def chandef;
- if (mld_id == mld_params->mld_id &&
- link_id == lid) {
- *ap_info = info;
- *tbtt_info = pos;
+ /* The AP is not providing us with anything to work with. So
+ * make up a somewhat reasonable operating class, but don't
+ * bother with it too much as no one will ever use the
+ * information.
+ */
+ cfg80211_chandef_create(&chandef, source_bss->channel,
+ NL80211_CHAN_NO_HT);
- return use_for;
- }
+ if (!ieee80211_chandef_to_operating_class(&chandef,
+ &ap_info.op_class))
+ goto out_unlock;
+ }
- pos += length;
- }
- }
+ /* Just set TBTT offset and PSD 20 to invalid/unknown */
+ tbtt_info.tbtt_offset = 255;
+ tbtt_info.psd_20 = IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED;
+
+ memcpy(tbtt_info.bssid, source_bss->bssid, ETH_ALEN);
+ if (cfg80211_calc_short_ssid(ies, &elem, &short_ssid))
+ goto out_unlock;
+
+ rcu_read_unlock();
+
+ tbtt_info.short_ssid = cpu_to_le32(short_ssid);
+
+ tbtt_info.bss_params = IEEE80211_RNR_TBTT_PARAMS_SAME_SSID;
+
+ if (is_mbssid) {
+ tbtt_info.bss_params |= IEEE80211_RNR_TBTT_PARAMS_MULTI_BSSID;
+ tbtt_info.bss_params |= IEEE80211_RNR_TBTT_PARAMS_TRANSMITTED_BSSID;
}
- return 0;
+ tbtt_info.mld_params.mld_id = 0;
+ tbtt_info.mld_params.params =
+ le16_encode_bits(link_id, IEEE80211_RNR_MLD_PARAMS_LINK_ID) |
+ le16_encode_bits(bss_change_count,
+ IEEE80211_RNR_MLD_PARAMS_BSS_CHANGE_COUNT);
+
+ res = kzalloc(struct_size(res, data,
+ sizeof(ap_info) + ap_info.tbtt_info_len),
+ gfp);
+ if (!res)
+ return NULL;
+
+ /* Copy the data */
+ res->id = WLAN_EID_REDUCED_NEIGHBOR_REPORT;
+ res->datalen = sizeof(ap_info) + ap_info.tbtt_info_len;
+ memcpy(res->data, &ap_info, sizeof(ap_info));
+ memcpy(res->data + sizeof(ap_info), &tbtt_info, ap_info.tbtt_info_len);
+
+ return res;
+
+out_unlock:
+ rcu_read_unlock();
+ return NULL;
}
static void
@@ -2687,25 +2827,25 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
.source_bss = source_bss,
.bss_source = BSS_SOURCE_STA_PROFILE,
};
+ struct element *reporter_rnr = NULL;
struct ieee80211_multi_link_elem *ml_elem;
struct cfg80211_mle *mle;
u16 control;
u8 ml_common_len;
- u8 *new_ie;
+ u8 *new_ie = NULL;
struct cfg80211_bss *bss;
- int mld_id;
+ u8 mld_id, reporter_link_id, bss_change_count;
u16 seen_links = 0;
- const u8 *pos;
u8 i;
- if (!ieee80211_mle_size_ok(elem->data + 1, elem->datalen - 1))
+ if (!ieee80211_mle_type_ok(elem->data + 1,
+ IEEE80211_ML_CONTROL_TYPE_BASIC,
+ elem->datalen - 1))
return;
- ml_elem = (void *)elem->data + 1;
+ ml_elem = (void *)(elem->data + 1);
control = le16_to_cpu(ml_elem->control);
- if (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE) !=
- IEEE80211_ML_CONTROL_TYPE_BASIC)
- return;
+ ml_common_len = ml_elem->variable[0];
/* Must be present when transmitted by an AP (in a probe response) */
if (!(control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT) ||
@@ -2713,18 +2853,8 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
!(control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP))
return;
- ml_common_len = ml_elem->variable[0];
-
- /* length + MLD MAC address + link ID info + BSS Params Change Count */
- pos = ml_elem->variable + 1 + 6 + 1 + 1;
-
- if (u16_get_bits(control, IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY))
- pos += 2;
- if (u16_get_bits(control, IEEE80211_MLC_BASIC_PRES_EML_CAPA))
- pos += 2;
-
- /* MLD capabilities and operations */
- pos += 2;
+ reporter_link_id = ieee80211_mle_get_link_id(elem->data + 1);
+ bss_change_count = ieee80211_mle_get_bss_param_ch_cnt(elem->data + 1);
/*
* The MLD ID of the reporting AP is always zero. It is set if the AP
@@ -2732,32 +2862,35 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
* relating to a nontransmitted BSS (matching the Multi-BSSID Index,
* Draft P802.11be_D3.2, 35.3.4.2)
*/
- if (u16_get_bits(control, IEEE80211_MLC_BASIC_PRES_MLD_ID)) {
- mld_id = *pos;
- pos += 1;
- } else {
- mld_id = 0;
- }
-
- /* Extended MLD capabilities and operations */
- pos += 2;
+ mld_id = ieee80211_mle_get_mld_id(elem->data + 1);
/* Fully defrag the ML element for sta information/profile iteration */
mle = cfg80211_defrag_mle(elem, tx_data->ie, tx_data->ielen, gfp);
if (!mle)
return;
+ /* No point in doing anything if there is no per-STA profile */
+ if (!mle->sta_prof[0])
+ goto out;
+
new_ie = kmalloc(IEEE80211_MAX_DATA_LEN, gfp);
if (!new_ie)
goto out;
+ reporter_rnr = cfg80211_gen_reporter_rnr(source_bss,
+ u16_get_bits(control,
+ IEEE80211_MLC_BASIC_PRES_MLD_ID),
+ mld_id == 0, reporter_link_id,
+ bss_change_count,
+ gfp);
+
for (i = 0; i < ARRAY_SIZE(mle->sta_prof) && mle->sta_prof[i]; i++) {
const struct ieee80211_neighbor_ap_info *ap_info;
enum nl80211_band band;
u32 freq;
const u8 *profile;
- const u8 *tbtt_info;
ssize_t profile_len;
+ u8 param_ch_count;
u8 link_id, use_for;
if (!ieee80211_mle_basic_sta_prof_size_ok((u8 *)mle->sta_prof[i],
@@ -2800,10 +2933,11 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
profile_len -= 2;
/* Find in RNR to look up channel information */
- use_for = cfg80211_tbtt_info_for_mld_ap(tx_data->ie,
- tx_data->ielen,
- mld_id, link_id,
- &ap_info, &tbtt_info);
+ use_for = cfg80211_rnr_info_for_mld_ap(tx_data->ie,
+ tx_data->ielen,
+ mld_id, link_id,
+ &ap_info,
+ &param_ch_count);
if (!use_for)
continue;
@@ -2846,7 +2980,8 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
continue;
/* Copy the Basic Multi-Link element including the common
- * information, and then fix up the link ID.
+ * information, and then fix up the link ID and BSS param
+ * change count.
* Note that the ML element length has been verified and we
* also checked that it contains the link ID.
*/
@@ -2857,10 +2992,21 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
sizeof(*ml_elem) + ml_common_len);
new_ie[data.ielen + sizeof(*ml_elem) + 1 + ETH_ALEN] = link_id;
+ new_ie[data.ielen + sizeof(*ml_elem) + 1 + ETH_ALEN + 1] =
+ param_ch_count;
data.ielen += sizeof(*ml_elem) + ml_common_len;
- /* TODO: Add an RNR containing only the reporting AP */
+ if (reporter_rnr && (use_for & NL80211_BSS_USE_FOR_NORMAL)) {
+ if (data.ielen + sizeof(struct element) +
+ reporter_rnr->datalen > IEEE80211_MAX_DATA_LEN)
+ continue;
+
+ memcpy(new_ie + data.ielen, reporter_rnr,
+ sizeof(struct element) + reporter_rnr->datalen);
+ data.ielen += sizeof(struct element) +
+ reporter_rnr->datalen;
+ }
bss = cfg80211_inform_single_bss_data(wiphy, &data, gfp);
if (!bss)
@@ -2869,6 +3015,7 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
}
out:
+ kfree(reporter_rnr);
kfree(new_ie);
kfree(mle);
}
@@ -2921,6 +3068,10 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
if (!res)
return NULL;
+ /* don't do any further MBSSID/ML handling for S1G */
+ if (ftype == CFG80211_BSS_FTYPE_S1G_BEACON)
+ return res;
+
cfg80211_parse_mbssid_data(wiphy, &inform_data, res, gfp);
cfg80211_parse_ml_sta_data(wiphy, &inform_data, res, gfp);
@@ -2929,59 +3080,22 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
}
EXPORT_SYMBOL(cfg80211_inform_bss_data);
-static bool cfg80211_uhb_power_type_valid(const u8 *ie,
- size_t ielen,
- const u32 flags)
-{
- const struct element *tmp;
- struct ieee80211_he_operation *he_oper;
-
- tmp = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ielen);
- if (tmp && tmp->datalen >= sizeof(*he_oper) + 1) {
- const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
-
- he_oper = (void *)&tmp->data[1];
- he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper);
-
- if (!he_6ghz_oper)
- return false;
-
- switch (u8_get_bits(he_6ghz_oper->control,
- IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) {
- case IEEE80211_6GHZ_CTRL_REG_LPI_AP:
- return true;
- case IEEE80211_6GHZ_CTRL_REG_SP_AP:
- return !(flags & IEEE80211_CHAN_NO_UHB_AFC_CLIENT);
- case IEEE80211_6GHZ_CTRL_REG_VLP_AP:
- return !(flags & IEEE80211_CHAN_NO_UHB_VLP_CLIENT);
- }
- }
- return false;
-}
-
-/* cfg80211_inform_bss_width_frame helper */
-static struct cfg80211_bss *
-cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
- struct cfg80211_inform_bss *data,
- struct ieee80211_mgmt *mgmt, size_t len,
- gfp_t gfp)
+struct cfg80211_bss *
+cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *data,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ gfp_t gfp)
{
- struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
- struct cfg80211_internal_bss tmp = {}, *res;
- struct cfg80211_bss_ies *ies;
- struct ieee80211_channel *channel;
- bool signal_valid;
+ size_t min_hdr_len = offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
struct ieee80211_ext *ext = NULL;
- u8 *bssid, *variable;
- u16 capability, beacon_int;
- size_t ielen, min_hdr_len = offsetof(struct ieee80211_mgmt,
- u.probe_resp.variable);
- int bss_type;
-
- BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) !=
- offsetof(struct ieee80211_mgmt, u.beacon.variable));
-
- trace_cfg80211_inform_bss_frame(wiphy, data, mgmt, len);
+ enum cfg80211_bss_frame_type ftype;
+ u16 beacon_interval;
+ const u8 *bssid;
+ u16 capability;
+ const u8 *ie;
+ size_t ielen;
+ u64 tsf;
if (WARN_ON(!mgmt))
return NULL;
@@ -2989,9 +3103,10 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
if (WARN_ON(!wiphy))
return NULL;
- if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC &&
- (data->signal < 0 || data->signal > 100)))
- return NULL;
+ BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) !=
+ offsetof(struct ieee80211_mgmt, u.beacon.variable));
+
+ trace_cfg80211_inform_bss_frame(wiphy, data, mgmt, len);
if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
ext = (void *) mgmt;
@@ -3005,32 +3120,17 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
return NULL;
ielen = len - min_hdr_len;
- variable = mgmt->u.probe_resp.variable;
- if (ext) {
- if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
- variable = ext->u.s1g_short_beacon.variable;
- else
- variable = ext->u.s1g_beacon.variable;
- }
-
- channel = cfg80211_get_bss_channel(wiphy, variable, ielen, data->chan);
- if (!channel)
- return NULL;
-
- if (channel->band == NL80211_BAND_6GHZ &&
- !cfg80211_uhb_power_type_valid(variable, ielen, channel->flags)) {
- data->restrict_use = 1;
- data->use_for = 0;
- data->cannot_use_reasons =
- NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH;
- }
-
+ ie = mgmt->u.probe_resp.variable;
if (ext) {
const struct ieee80211_s1g_bcn_compat_ie *compat;
const struct element *elem;
- elem = cfg80211_find_elem(WLAN_EID_S1G_BCN_COMPAT,
- variable, ielen);
+ if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
+ ie = ext->u.s1g_short_beacon.variable;
+ else
+ ie = ext->u.s1g_beacon.variable;
+
+ elem = cfg80211_find_elem(WLAN_EID_S1G_BCN_COMPAT, ie, ielen);
if (!elem)
return NULL;
if (elem->datalen < sizeof(*compat))
@@ -3038,112 +3138,26 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
compat = (void *)elem->data;
bssid = ext->u.s1g_beacon.sa;
capability = le16_to_cpu(compat->compat_info);
- beacon_int = le16_to_cpu(compat->beacon_int);
+ beacon_interval = le16_to_cpu(compat->beacon_int);
} else {
bssid = mgmt->bssid;
- beacon_int = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
+ beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
}
- if (channel->band == NL80211_BAND_60GHZ) {
- bss_type = capability & WLAN_CAPABILITY_DMG_TYPE_MASK;
- if (bss_type == WLAN_CAPABILITY_DMG_TYPE_AP ||
- bss_type == WLAN_CAPABILITY_DMG_TYPE_PBSS)
- regulatory_hint_found_beacon(wiphy, channel, gfp);
- } else {
- if (capability & WLAN_CAPABILITY_ESS)
- regulatory_hint_found_beacon(wiphy, channel, gfp);
- }
-
- ies = kzalloc(sizeof(*ies) + ielen, gfp);
- if (!ies)
- return NULL;
- ies->len = ielen;
- ies->tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
- ies->from_beacon = ieee80211_is_beacon(mgmt->frame_control) ||
- ieee80211_is_s1g_beacon(mgmt->frame_control);
- memcpy(ies->data, variable, ielen);
+ tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
if (ieee80211_is_probe_resp(mgmt->frame_control))
- rcu_assign_pointer(tmp.pub.proberesp_ies, ies);
+ ftype = CFG80211_BSS_FTYPE_PRESP;
+ else if (ext)
+ ftype = CFG80211_BSS_FTYPE_S1G_BEACON;
else
- rcu_assign_pointer(tmp.pub.beacon_ies, ies);
- rcu_assign_pointer(tmp.pub.ies, ies);
+ ftype = CFG80211_BSS_FTYPE_BEACON;
- memcpy(tmp.pub.bssid, bssid, ETH_ALEN);
- tmp.pub.beacon_interval = beacon_int;
- tmp.pub.capability = capability;
- tmp.pub.channel = channel;
- tmp.pub.signal = data->signal;
- tmp.ts_boottime = data->boottime_ns;
- tmp.parent_tsf = data->parent_tsf;
- tmp.pub.chains = data->chains;
- memcpy(tmp.pub.chain_signal, data->chain_signal, IEEE80211_MAX_CHAINS);
- ether_addr_copy(tmp.parent_bssid, data->parent_bssid);
- tmp.pub.use_for = data->restrict_use ?
- data->use_for :
- NL80211_BSS_USE_FOR_ALL;
- tmp.pub.cannot_use_reasons = data->cannot_use_reasons;
-
- signal_valid = data->chan == channel;
- spin_lock_bh(&rdev->bss_lock);
- res = __cfg80211_bss_update(rdev, &tmp, signal_valid, jiffies);
- if (!res)
- goto drop;
-
- rdev_inform_bss(rdev, &res->pub, ies, data->drv_data);
-
- spin_unlock_bh(&rdev->bss_lock);
-
- trace_cfg80211_return_bss(&res->pub);
- /* __cfg80211_bss_update gives us a referenced result */
- return &res->pub;
-
-drop:
- spin_unlock_bh(&rdev->bss_lock);
- return NULL;
-}
-
-struct cfg80211_bss *
-cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
- struct cfg80211_inform_bss *data,
- struct ieee80211_mgmt *mgmt, size_t len,
- gfp_t gfp)
-{
- struct cfg80211_inform_single_bss_data inform_data = {
- .drv_data = data,
- .ie = mgmt->u.probe_resp.variable,
- .ielen = len - offsetof(struct ieee80211_mgmt,
- u.probe_resp.variable),
- .use_for = data->restrict_use ?
- data->use_for :
- NL80211_BSS_USE_FOR_ALL,
- .cannot_use_reasons = data->cannot_use_reasons,
- };
- struct cfg80211_bss *res;
-
- res = cfg80211_inform_single_bss_frame_data(wiphy, data, mgmt,
- len, gfp);
- if (!res)
- return NULL;
-
- /* don't do any further MBSSID/ML handling for S1G */
- if (ieee80211_is_s1g_beacon(mgmt->frame_control))
- return res;
-
- inform_data.ftype = ieee80211_is_beacon(mgmt->frame_control) ?
- CFG80211_BSS_FTYPE_BEACON : CFG80211_BSS_FTYPE_PRESP;
- memcpy(inform_data.bssid, mgmt->bssid, ETH_ALEN);
- inform_data.tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
- inform_data.beacon_interval =
- le16_to_cpu(mgmt->u.probe_resp.beacon_int);
-
- /* process each non-transmitting bss */
- cfg80211_parse_mbssid_data(wiphy, &inform_data, res, gfp);
-
- cfg80211_parse_ml_sta_data(wiphy, &inform_data, res, gfp);
-
- return res;
+ return cfg80211_inform_bss_data(wiphy, data, ftype,
+ bssid, tsf, capability,
+ beacon_interval, ie, ielen,
+ gfp);
}
EXPORT_SYMBOL(cfg80211_inform_bss_frame_data);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 195c8532734b..82e3ce42206c 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -209,7 +209,8 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev,
if (!req.bss) {
err = -ENOENT;
} else {
- err = cfg80211_mlme_assoc(rdev, wdev->netdev, &req);
+ err = cfg80211_mlme_assoc(rdev, wdev->netdev,
+ &req, NULL);
cfg80211_put_bss(&rdev->wiphy, req.bss);
}
diff --git a/net/wireless/tests/Makefile b/net/wireless/tests/Makefile
index 1f6622fcb758..c364e63b508e 100644
--- a/net/wireless/tests/Makefile
+++ b/net/wireless/tests/Makefile
@@ -1,3 +1,3 @@
-cfg80211-tests-y += module.o fragmentation.o scan.o util.o
+cfg80211-tests-y += module.o fragmentation.o scan.o util.o chan.o
obj-$(CONFIG_CFG80211_KUNIT_TEST) += cfg80211-tests.o
diff --git a/net/wireless/tests/chan.c b/net/wireless/tests/chan.c
new file mode 100644
index 000000000000..d02258ac2dab
--- /dev/null
+++ b/net/wireless/tests/chan.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2023-2024 Intel Corporation
+ */
+#include <net/cfg80211.h>
+#include <kunit/test.h>
+
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+
+static struct ieee80211_channel chan_6ghz_1 = {
+ .band = NL80211_BAND_6GHZ,
+ .center_freq = 5955,
+};
+
+static struct ieee80211_channel chan_6ghz_5 = {
+ .band = NL80211_BAND_6GHZ,
+ .center_freq = 5975,
+};
+
+static struct ieee80211_channel chan_6ghz_105 = {
+ .band = NL80211_BAND_6GHZ,
+ .center_freq = 6475,
+};
+
+static const struct chandef_compat_case {
+ const char *desc;
+ /* leave c1 empty for tests for identical */
+ struct cfg80211_chan_def c1, c2;
+ /* we test both ways around, so c2 should always be the compat one */
+ bool compat;
+} chandef_compat_cases[] = {
+ {
+ .desc = "identical non-HT",
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_20_NOHT,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "identical 20 MHz",
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_20,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "identical 40 MHz",
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_40,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955 + 10,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "identical 80 MHz",
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_80,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955 + 10 + 20,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "identical 160 MHz",
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_160,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955 + 10 + 20 + 40,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "identical 320 MHz",
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955 + 10 + 20 + 40 + 80,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "20 MHz in 320 MHz\n",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_20,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955 + 10 + 20 + 40 + 80,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "different 20 MHz",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_20,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_20,
+ .chan = &chan_6ghz_5,
+ .center_freq1 = 5975,
+ },
+ },
+ {
+ .desc = "different primary 160 MHz",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 + 150,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 - 10,
+ },
+ },
+ {
+ /* similar to previous test but one has lower BW */
+ .desc = "matching primary 160 MHz",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_160,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 + 70,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 - 10,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "matching primary 160 MHz & punctured secondary 160 Mhz",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_160,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 + 70,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 - 10,
+ .punctured = 0xf,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "matching primary 160 MHz & punctured matching",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_160,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 + 70,
+ .punctured = 0xc0,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 - 10,
+ .punctured = 0xc000,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "matching primary 160 MHz & punctured not matching",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_160,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 + 70,
+ .punctured = 0x80,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 - 10,
+ .punctured = 0xc000,
+ },
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(chandef_compat, chandef_compat_cases, desc)
+
+static void test_chandef_compat(struct kunit *test)
+{
+ const struct chandef_compat_case *params = test->param_value;
+ const struct cfg80211_chan_def *ret, *expect;
+ struct cfg80211_chan_def c1 = params->c1;
+
+ /* tests with identical ones */
+ if (!params->c1.chan)
+ c1 = params->c2;
+
+ KUNIT_EXPECT_EQ(test, cfg80211_chandef_valid(&c1), true);
+ KUNIT_EXPECT_EQ(test, cfg80211_chandef_valid(&params->c2), true);
+
+ expect = params->compat ? &params->c2 : NULL;
+
+ ret = cfg80211_chandef_compatible(&c1, &params->c2);
+ KUNIT_EXPECT_PTR_EQ(test, ret, expect);
+
+ if (!params->c1.chan)
+ expect = &c1;
+
+ ret = cfg80211_chandef_compatible(&params->c2, &c1);
+ KUNIT_EXPECT_PTR_EQ(test, ret, expect);
+}
+
+static struct kunit_case chandef_compat_test_cases[] = {
+ KUNIT_CASE_PARAM(test_chandef_compat, chandef_compat_gen_params),
+ {}
+};
+
+static struct kunit_suite chandef_compat = {
+ .name = "cfg80211-chandef-compat",
+ .test_cases = chandef_compat_test_cases,
+};
+
+kunit_test_suite(chandef_compat);
diff --git a/net/wireless/tests/fragmentation.c b/net/wireless/tests/fragmentation.c
index 49a339ca8880..411fae18cd88 100644
--- a/net/wireless/tests/fragmentation.c
+++ b/net/wireless/tests/fragmentation.c
@@ -2,7 +2,7 @@
/*
* KUnit tests for element fragmentation
*
- * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2023-2024 Intel Corporation
*/
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
@@ -27,7 +27,12 @@ static void defragment_0(struct kunit *test)
ret = cfg80211_defragment_element((void *)input,
input, sizeof(input),
- data, sizeof(input),
+ NULL, 0,
+ WLAN_EID_FRAGMENT);
+ KUNIT_EXPECT_EQ(test, ret, 253);
+ ret = cfg80211_defragment_element((void *)input,
+ input, sizeof(input),
+ data, ret,
WLAN_EID_FRAGMENT);
KUNIT_EXPECT_EQ(test, ret, 253);
KUNIT_EXPECT_MEMEQ(test, data, input + 3, 253);
@@ -63,7 +68,12 @@ static void defragment_1(struct kunit *test)
ret = cfg80211_defragment_element((void *)input,
input, sizeof(input),
- data, sizeof(input),
+ NULL, 0,
+ WLAN_EID_FRAGMENT);
+ KUNIT_EXPECT_EQ(test, ret, 254 + 7);
+ ret = cfg80211_defragment_element((void *)input,
+ input, sizeof(input),
+ data, ret,
WLAN_EID_FRAGMENT);
/* this means the last fragment was not used */
KUNIT_EXPECT_EQ(test, ret, 254 + 7);
@@ -106,10 +116,15 @@ static void defragment_2(struct kunit *test)
ret = cfg80211_defragment_element((void *)input,
input, sizeof(input),
- data, sizeof(input),
+ NULL, 0,
WLAN_EID_FRAGMENT);
/* this means the last fragment was not used */
KUNIT_EXPECT_EQ(test, ret, 254 + 255 + 1);
+ ret = cfg80211_defragment_element((void *)input,
+ input, sizeof(input),
+ data, ret,
+ WLAN_EID_FRAGMENT);
+ KUNIT_EXPECT_EQ(test, ret, 254 + 255 + 1);
KUNIT_EXPECT_MEMEQ(test, data, input + 3, 254);
KUNIT_EXPECT_MEMEQ(test, data + 254, input + 257 + 2, 255);
KUNIT_EXPECT_MEMEQ(test, data + 254 + 255, input + 2 * 257 + 2, 1);
@@ -134,7 +149,12 @@ static void defragment_at_end(struct kunit *test)
ret = cfg80211_defragment_element((void *)input,
input, sizeof(input),
- data, sizeof(input),
+ NULL, 0,
+ WLAN_EID_FRAGMENT);
+ KUNIT_EXPECT_EQ(test, ret, 254 + 7);
+ ret = cfg80211_defragment_element((void *)input,
+ input, sizeof(input),
+ data, ret,
WLAN_EID_FRAGMENT);
KUNIT_EXPECT_EQ(test, ret, 254 + 7);
KUNIT_EXPECT_MEMEQ(test, data, input + 3, 254);
diff --git a/net/wireless/tests/scan.c b/net/wireless/tests/scan.c
index 77854161cd22..9f458be71659 100644
--- a/net/wireless/tests/scan.c
+++ b/net/wireless/tests/scan.c
@@ -2,7 +2,7 @@
/*
* KUnit tests for inform_bss functions
*
- * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2023-2024 Intel Corporation
*/
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
@@ -406,9 +406,32 @@ static struct inform_bss_ml_sta_case {
const char *desc;
int mld_id;
bool sta_prof_vendor_elems;
+ bool include_oper_class;
+ bool nstr;
} inform_bss_ml_sta_cases[] = {
- { .desc = "no_mld_id", .mld_id = 0, .sta_prof_vendor_elems = false },
- { .desc = "mld_id_eq_1", .mld_id = 1, .sta_prof_vendor_elems = true },
+ {
+ .desc = "zero_mld_id",
+ .mld_id = 0,
+ .sta_prof_vendor_elems = false,
+ }, {
+ .desc = "zero_mld_id_with_oper_class",
+ .mld_id = 0,
+ .sta_prof_vendor_elems = false,
+ .include_oper_class = true,
+ }, {
+ .desc = "mld_id_eq_1",
+ .mld_id = 1,
+ .sta_prof_vendor_elems = true,
+ }, {
+ .desc = "mld_id_eq_1_with_oper_class",
+ .mld_id = 1,
+ .sta_prof_vendor_elems = true,
+ .include_oper_class = true,
+ }, {
+ .desc = "nstr",
+ .mld_id = 0,
+ .nstr = true,
+ },
};
KUNIT_ARRAY_PARAM_DESC(inform_bss_ml_sta, inform_bss_ml_sta_cases, desc)
@@ -440,7 +463,7 @@ static void test_inform_bss_ml_sta(struct kunit *test)
struct {
struct ieee80211_neighbor_ap_info info;
struct ieee80211_tbtt_info_ge_11 ap;
- } __packed rnr = {
+ } __packed rnr_normal = {
.info = {
.tbtt_info_hdr = u8_encode_bits(0, IEEE80211_AP_INFO_TBTT_HDR_COUNT),
.tbtt_info_len = sizeof(struct ieee80211_tbtt_info_ge_11),
@@ -460,6 +483,28 @@ static void test_inform_bss_ml_sta(struct kunit *test)
}
};
struct {
+ struct ieee80211_neighbor_ap_info info;
+ struct ieee80211_rnr_mld_params mld_params;
+ } __packed rnr_nstr = {
+ .info = {
+ .tbtt_info_hdr =
+ u8_encode_bits(0, IEEE80211_AP_INFO_TBTT_HDR_COUNT) |
+ u8_encode_bits(IEEE80211_TBTT_INFO_TYPE_MLD,
+ IEEE80211_AP_INFO_TBTT_HDR_TYPE),
+ .tbtt_info_len = sizeof(struct ieee80211_rnr_mld_params),
+ .op_class = 81,
+ .channel = 11,
+ },
+ .mld_params = {
+ .mld_id = params->mld_id,
+ .params =
+ le16_encode_bits(link_id,
+ IEEE80211_RNR_MLD_PARAMS_LINK_ID),
+ }
+ };
+ size_t rnr_len = params->nstr ? sizeof(rnr_nstr) : sizeof(rnr_normal);
+ void *rnr = params->nstr ? (void *)&rnr_nstr : (void *)&rnr_normal;
+ struct {
__le16 control;
u8 var_len;
u8 mld_mac_addr[ETH_ALEN];
@@ -498,7 +543,7 @@ static void test_inform_bss_ml_sta(struct kunit *test)
u16_encode_bits(link_id,
IEEE80211_MLE_STA_CONTROL_LINK_ID)),
.var_len = sizeof(sta_prof) - 2 - 2,
- .bssid = { *rnr.ap.bssid },
+ .bssid = { *rnr_normal.ap.bssid },
.beacon_int = cpu_to_le16(101),
.tsf_offset = cpu_to_le64(-123ll),
.capabilities = cpu_to_le16(0xdead),
@@ -515,9 +560,15 @@ static void test_inform_bss_ml_sta(struct kunit *test)
skb_put_u8(input, 4);
skb_put_data(input, "TEST", 4);
+ if (params->include_oper_class) {
+ skb_put_u8(input, WLAN_EID_SUPPORTED_REGULATORY_CLASSES);
+ skb_put_u8(input, 1);
+ skb_put_u8(input, 81);
+ }
+
skb_put_u8(input, WLAN_EID_REDUCED_NEIGHBOR_REPORT);
- skb_put_u8(input, sizeof(rnr));
- skb_put_data(input, &rnr, sizeof(rnr));
+ skb_put_u8(input, rnr_len);
+ skb_put_data(input, rnr, rnr_len);
/* build a multi-link element */
skb_put_u8(input, WLAN_EID_EXTENSION);
@@ -563,9 +614,10 @@ static void test_inform_bss_ml_sta(struct kunit *test)
KUNIT_EXPECT_EQ(test, ctx.inform_bss_count, 2);
/* Check link_bss *****************************************************/
- link_bss = cfg80211_get_bss(wiphy, NULL, sta_prof.bssid, NULL, 0,
- IEEE80211_BSS_TYPE_ANY,
- IEEE80211_PRIVACY_ANY);
+ link_bss = __cfg80211_get_bss(wiphy, NULL, sta_prof.bssid, NULL, 0,
+ IEEE80211_BSS_TYPE_ANY,
+ IEEE80211_PRIVACY_ANY,
+ 0);
KUNIT_ASSERT_NOT_NULL(test, link_bss);
KUNIT_EXPECT_EQ(test, link_bss->signal, 0);
KUNIT_EXPECT_EQ(test, link_bss->beacon_interval,
@@ -576,21 +628,43 @@ static void test_inform_bss_ml_sta(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, link_bss->channel,
ieee80211_get_channel_khz(wiphy, MHZ_TO_KHZ(2462)));
+ /* Test wiphy does not set WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY */
+ if (params->nstr) {
+ KUNIT_EXPECT_EQ(test, link_bss->use_for, 0);
+ KUNIT_EXPECT_EQ(test, link_bss->cannot_use_reasons,
+ NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY);
+ KUNIT_EXPECT_NULL(test,
+ cfg80211_get_bss(wiphy, NULL, sta_prof.bssid,
+ NULL, 0,
+ IEEE80211_BSS_TYPE_ANY,
+ IEEE80211_PRIVACY_ANY));
+ } else {
+ KUNIT_EXPECT_EQ(test, link_bss->use_for,
+ NL80211_BSS_USE_FOR_ALL);
+ KUNIT_EXPECT_EQ(test, link_bss->cannot_use_reasons, 0);
+ }
+
rcu_read_lock();
ies = rcu_dereference(link_bss->ies);
KUNIT_EXPECT_NOT_NULL(test, ies);
KUNIT_EXPECT_EQ(test, ies->tsf, tsf + le64_to_cpu(sta_prof.tsf_offset));
/* Resulting length should be:
* SSID (inherited) + RNR (inherited) + vendor element(s) +
+ * operating class (if requested) +
+ * generated RNR (if MLD ID == 0 and not NSTR) +
* MLE common info + MLE header and control
*/
if (params->sta_prof_vendor_elems)
KUNIT_EXPECT_EQ(test, ies->len,
- 6 + 2 + sizeof(rnr) + 2 + 160 + 2 + 165 +
+ 6 + 2 + rnr_len + 2 + 160 + 2 + 165 +
+ (params->include_oper_class ? 3 : 0) +
+ (!params->mld_id && !params->nstr ? 22 : 0) +
mle_basic_common_info.var_len + 5);
else
KUNIT_EXPECT_EQ(test, ies->len,
- 6 + 2 + sizeof(rnr) + 2 + 155 +
+ 6 + 2 + rnr_len + 2 + 155 +
+ (params->include_oper_class ? 3 : 0) +
+ (!params->mld_id && !params->nstr ? 22 : 0) +
mle_basic_common_info.var_len + 5);
rcu_read_unlock();
@@ -598,6 +672,172 @@ static void test_inform_bss_ml_sta(struct kunit *test)
cfg80211_put_bss(wiphy, link_bss);
}
+static struct cfg80211_parse_colocated_ap_case {
+ const char *desc;
+ u8 op_class;
+ u8 channel;
+ struct ieee80211_neighbor_ap_info info;
+ union {
+ struct ieee80211_tbtt_info_ge_11 tbtt_long;
+ struct ieee80211_tbtt_info_7_8_9 tbtt_short;
+ };
+ bool add_junk;
+ bool same_ssid;
+ bool valid;
+} cfg80211_parse_colocated_ap_cases[] = {
+ {
+ .desc = "wrong_band",
+ .info.op_class = 81,
+ .info.channel = 11,
+ .tbtt_long = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP,
+ },
+ .valid = false,
+ },
+ {
+ .desc = "wrong_type",
+ /* IEEE80211_AP_INFO_TBTT_HDR_TYPE is in the least significant bits */
+ .info.tbtt_info_hdr = IEEE80211_TBTT_INFO_TYPE_MLD,
+ .tbtt_long = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP,
+ },
+ .valid = false,
+ },
+ {
+ .desc = "colocated_invalid_len_short",
+ .info.tbtt_info_len = 6,
+ .tbtt_short = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP |
+ IEEE80211_RNR_TBTT_PARAMS_SAME_SSID,
+ },
+ .valid = false,
+ },
+ {
+ .desc = "colocated_invalid_len_short_mld",
+ .info.tbtt_info_len = 10,
+ .tbtt_long = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP,
+ },
+ .valid = false,
+ },
+ {
+ .desc = "colocated_non_mld",
+ .info.tbtt_info_len = sizeof(struct ieee80211_tbtt_info_7_8_9),
+ .tbtt_short = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP |
+ IEEE80211_RNR_TBTT_PARAMS_SAME_SSID,
+ },
+ .same_ssid = true,
+ .valid = true,
+ },
+ {
+ .desc = "colocated_non_mld_invalid_bssid",
+ .info.tbtt_info_len = sizeof(struct ieee80211_tbtt_info_7_8_9),
+ .tbtt_short = {
+ .bssid = { 0xff, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP |
+ IEEE80211_RNR_TBTT_PARAMS_SAME_SSID,
+ },
+ .same_ssid = true,
+ .valid = false,
+ },
+ {
+ .desc = "colocated_mld",
+ .tbtt_long = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP,
+ },
+ .valid = true,
+ },
+ {
+ .desc = "colocated_mld",
+ .tbtt_long = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP,
+ },
+ .add_junk = true,
+ .valid = false,
+ },
+ {
+ .desc = "colocated_disabled_mld",
+ .tbtt_long = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP,
+ .mld_params.params = cpu_to_le16(IEEE80211_RNR_MLD_PARAMS_DISABLED_LINK),
+ },
+ .valid = false,
+ },
+};
+KUNIT_ARRAY_PARAM_DESC(cfg80211_parse_colocated_ap, cfg80211_parse_colocated_ap_cases, desc)
+
+static void test_cfg80211_parse_colocated_ap(struct kunit *test)
+{
+ const struct cfg80211_parse_colocated_ap_case *params = test->param_value;
+ struct sk_buff *input = kunit_zalloc_skb(test, 1024, GFP_KERNEL);
+ struct cfg80211_bss_ies *ies;
+ struct ieee80211_neighbor_ap_info info;
+ LIST_HEAD(coloc_ap_list);
+ int count;
+
+ KUNIT_ASSERT_NOT_NULL(test, input);
+
+ info = params->info;
+
+ /* Reasonable values for a colocated AP */
+ if (!info.tbtt_info_len)
+ info.tbtt_info_len = sizeof(params->tbtt_long);
+ if (!info.op_class)
+ info.op_class = 131;
+ if (!info.channel)
+ info.channel = 33;
+ /* Zero is the correct default for .btt_info_hdr (one entry, TBTT type) */
+
+ skb_put_u8(input, WLAN_EID_SSID);
+ skb_put_u8(input, 4);
+ skb_put_data(input, "TEST", 4);
+
+ skb_put_u8(input, WLAN_EID_REDUCED_NEIGHBOR_REPORT);
+ skb_put_u8(input, sizeof(info) + info.tbtt_info_len + (params->add_junk ? 3 : 0));
+ skb_put_data(input, &info, sizeof(info));
+ skb_put_data(input, &params->tbtt_long, info.tbtt_info_len);
+
+ if (params->add_junk)
+ skb_put_data(input, "123", 3);
+
+ ies = kunit_kzalloc(test, struct_size(ies, data, input->len), GFP_KERNEL);
+ ies->len = input->len;
+ memcpy(ies->data, input->data, input->len);
+
+ count = cfg80211_parse_colocated_ap(ies, &coloc_ap_list);
+
+ KUNIT_EXPECT_EQ(test, count, params->valid);
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&coloc_ap_list), params->valid);
+
+ if (params->valid && !list_empty(&coloc_ap_list)) {
+ struct cfg80211_colocated_ap *ap;
+
+ ap = list_first_entry(&coloc_ap_list, typeof(*ap), list);
+ if (info.tbtt_info_len <= sizeof(params->tbtt_short))
+ KUNIT_EXPECT_MEMEQ(test, ap->bssid, params->tbtt_short.bssid, ETH_ALEN);
+ else
+ KUNIT_EXPECT_MEMEQ(test, ap->bssid, params->tbtt_long.bssid, ETH_ALEN);
+
+ if (params->same_ssid) {
+ KUNIT_EXPECT_EQ(test, ap->ssid_len, 4);
+ KUNIT_EXPECT_MEMEQ(test, ap->ssid, "TEST", 4);
+ } else {
+ KUNIT_EXPECT_EQ(test, ap->ssid_len, 0);
+ }
+ }
+
+ cfg80211_free_coloc_ap_list(&coloc_ap_list);
+}
+
static struct kunit_case gen_new_ie_test_cases[] = {
KUNIT_CASE_PARAM(test_gen_new_ie, gen_new_ie_gen_params),
KUNIT_CASE(test_gen_new_ie_malformed),
@@ -623,3 +863,16 @@ static struct kunit_suite inform_bss = {
};
kunit_test_suite(inform_bss);
+
+static struct kunit_case scan_6ghz_cases[] = {
+ KUNIT_CASE_PARAM(test_cfg80211_parse_colocated_ap,
+ cfg80211_parse_colocated_ap_gen_params),
+ {}
+};
+
+static struct kunit_suite scan_6ghz = {
+ .name = "cfg80211-scan-6ghz",
+ .test_cases = scan_6ghz_cases,
+};
+
+kunit_test_suite(scan_6ghz);
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 1f374c8a17a5..e039e66ab377 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -1,4 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Portions of this file
+ * Copyright(c) 2016-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018, 2020-2024 Intel Corporation
+ */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM cfg80211
@@ -135,7 +140,8 @@
__field(u32, width) \
__field(u32, center_freq1) \
__field(u32, freq1_offset) \
- __field(u32, center_freq2)
+ __field(u32, center_freq2) \
+ __field(u16, punctured)
#define CHAN_DEF_ASSIGN(chandef) \
do { \
if ((chandef) && (chandef)->chan) { \
@@ -148,6 +154,7 @@
__entry->center_freq1 = (chandef)->center_freq1;\
__entry->freq1_offset = (chandef)->freq1_offset;\
__entry->center_freq2 = (chandef)->center_freq2;\
+ __entry->punctured = (chandef)->punctured; \
} else { \
__entry->band = 0; \
__entry->control_freq = 0; \
@@ -156,14 +163,15 @@
__entry->center_freq1 = 0; \
__entry->freq1_offset = 0; \
__entry->center_freq2 = 0; \
+ __entry->punctured = 0; \
} \
} while (0)
#define CHAN_DEF_PR_FMT \
- "band: %d, control freq: %u.%03u, width: %d, cf1: %u.%03u, cf2: %u"
+ "band: %d, control freq: %u.%03u, width: %d, cf1: %u.%03u, cf2: %u, punct: 0x%x"
#define CHAN_DEF_PR_ARG __entry->band, __entry->control_freq, \
__entry->freq_offset, __entry->width, \
__entry->center_freq1, __entry->freq1_offset, \
- __entry->center_freq2
+ __entry->center_freq2, __entry->punctured
#define FILS_AAD_ASSIGN(fa) \
do { \
@@ -810,8 +818,8 @@ DECLARE_EVENT_CLASS(station_add_change,
params->link_sta_params.opmode_notif_used;
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: %pM"
- ", station flags mask: %u, station flags set: %u, "
- "station modify mask: %u, listen interval: %d, aid: %u, "
+ ", station flags mask: 0x%x, station flags set: 0x%x, "
+ "station modify mask: 0x%x, listen interval: %d, aid: %u, "
"plink action: %u, plink state: %u, uapsd queues: %u, vlan:%s",
WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->sta_mac,
__entry->sta_flags_mask, __entry->sta_flags_set,
@@ -859,6 +867,7 @@ DECLARE_EVENT_CLASS(station_del,
MAC_ENTRY(sta_mac)
__field(u8, subtype)
__field(u16, reason_code)
+ __field(int, link_id)
),
TP_fast_assign(
WIPHY_ASSIGN;
@@ -866,11 +875,13 @@ DECLARE_EVENT_CLASS(station_del,
MAC_ASSIGN(sta_mac, params->mac);
__entry->subtype = params->subtype;
__entry->reason_code = params->reason_code;
+ __entry->link_id = params->link_id;
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: %pM"
- ", subtype: %u, reason_code: %u",
+ ", subtype: %u, reason_code: %u, link_id: %d",
WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->sta_mac,
- __entry->subtype, __entry->reason_code)
+ __entry->subtype, __entry->reason_code,
+ __entry->link_id)
);
DEFINE_EVENT(station_del, rdev_del_station,
@@ -1064,7 +1075,7 @@ TRACE_EVENT(rdev_return_int_mpath_info,
),
TP_printk(WIPHY_PR_FMT ", returned %d. mpath info - generation: %d, "
"filled: %u, frame qlen: %u, sn: %u, metric: %u, exptime: %u,"
- " discovery timeout: %u, discovery retries: %u, flags: %u",
+ " discovery timeout: %u, discovery retries: %u, flags: 0x%x",
WIPHY_PR_ARG, __entry->ret, __entry->generation,
__entry->filled, __entry->frame_qlen, __entry->sn,
__entry->metric, __entry->exptime, __entry->discovery_timeout,
@@ -1306,7 +1317,7 @@ TRACE_EVENT(rdev_assoc,
req->fils_nonces, 2 * FILS_NONCE_LEN);
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: %pM"
- ", previous bssid: %pM, use mfp: %s, flags: %u",
+ ", previous bssid: %pM, use mfp: %s, flags: 0x%x",
WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->bssid,
__entry->prev_bssid, BOOL_TO_STR(__entry->use_mfp),
__entry->flags)
@@ -1428,7 +1439,7 @@ TRACE_EVENT(rdev_connect,
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: %pM"
", ssid: %s, auth type: %d, privacy: %s, wpa versions: %u, "
- "flags: %u, previous bssid: %pM",
+ "flags: 0x%x, previous bssid: %pM",
WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->bssid, __entry->ssid,
__entry->auth_type, BOOL_TO_STR(__entry->privacy),
__entry->wpa_versions, __entry->flags, __entry->prev_bssid)
@@ -2324,6 +2335,7 @@ TRACE_EVENT(rdev_channel_switch,
__field(u8, count)
__dynamic_array(u16, bcn_ofs, params->n_counter_offsets_beacon)
__dynamic_array(u16, pres_ofs, params->n_counter_offsets_presp)
+ __field(u8, link_id)
),
TP_fast_assign(
WIPHY_ASSIGN;
@@ -2341,11 +2353,13 @@ TRACE_EVENT(rdev_channel_switch,
memcpy(__get_dynamic_array(pres_ofs),
params->counter_offsets_presp,
params->n_counter_offsets_presp * sizeof(u16));
+ __entry->link_id = params->link_id;
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT
- ", block_tx: %d, count: %u, radar_required: %d",
+ ", block_tx: %d, count: %u, radar_required: %d, link_id: %d",
WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG,
- __entry->block_tx, __entry->count, __entry->radar_required)
+ __entry->block_tx, __entry->count, __entry->radar_required,
+ __entry->link_id)
);
TRACE_EVENT(rdev_set_qos_map,
@@ -3267,47 +3281,39 @@ TRACE_EVENT(cfg80211_chandef_dfs_required,
TRACE_EVENT(cfg80211_ch_switch_notify,
TP_PROTO(struct net_device *netdev,
struct cfg80211_chan_def *chandef,
- unsigned int link_id,
- u16 punct_bitmap),
- TP_ARGS(netdev, chandef, link_id, punct_bitmap),
+ unsigned int link_id),
+ TP_ARGS(netdev, chandef, link_id),
TP_STRUCT__entry(
NETDEV_ENTRY
CHAN_DEF_ENTRY
__field(unsigned int, link_id)
- __field(u16, punct_bitmap)
),
TP_fast_assign(
NETDEV_ASSIGN;
CHAN_DEF_ASSIGN(chandef);
__entry->link_id = link_id;
- __entry->punct_bitmap = punct_bitmap;
),
- TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d, punct_bitmap:%u",
- NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id,
- __entry->punct_bitmap)
+ TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d",
+ NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id)
);
TRACE_EVENT(cfg80211_ch_switch_started_notify,
TP_PROTO(struct net_device *netdev,
struct cfg80211_chan_def *chandef,
- unsigned int link_id,
- u16 punct_bitmap),
- TP_ARGS(netdev, chandef, link_id, punct_bitmap),
+ unsigned int link_id),
+ TP_ARGS(netdev, chandef, link_id),
TP_STRUCT__entry(
NETDEV_ENTRY
CHAN_DEF_ENTRY
__field(unsigned int, link_id)
- __field(u16, punct_bitmap)
),
TP_fast_assign(
NETDEV_ASSIGN;
CHAN_DEF_ASSIGN(chandef);
__entry->link_id = link_id;
- __entry->punct_bitmap = punct_bitmap;
),
- TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d, punct_bitmap:%u",
- NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id,
- __entry->punct_bitmap)
+ TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d",
+ NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id)
);
TRACE_EVENT(cfg80211_radar_event,
diff --git a/net/wireless/util.c b/net/wireless/util.c
index d1ce3bee2797..2bde8a354631 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -791,15 +791,19 @@ ieee80211_amsdu_subframe_length(void *field, u8 mesh_flags, u8 hdr_type)
bool ieee80211_is_valid_amsdu(struct sk_buff *skb, u8 mesh_hdr)
{
- int offset = 0, remaining, subframe_len, padding;
+ int offset = 0, subframe_len, padding;
for (offset = 0; offset < skb->len; offset += subframe_len + padding) {
+ int remaining = skb->len - offset;
struct {
__be16 len;
u8 mesh_flags;
} hdr;
u16 len;
+ if (sizeof(hdr) > remaining)
+ return false;
+
if (skb_copy_bits(skb, offset + 2 * ETH_ALEN, &hdr, sizeof(hdr)) < 0)
return false;
@@ -807,7 +811,6 @@ bool ieee80211_is_valid_amsdu(struct sk_buff *skb, u8 mesh_hdr)
mesh_hdr);
subframe_len = sizeof(struct ethhdr) + len;
padding = (4 - subframe_len) & 0x3;
- remaining = skb->len - offset;
if (subframe_len > remaining)
return false;
@@ -825,7 +828,7 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
{
unsigned int hlen = ALIGN(extra_headroom, 4);
struct sk_buff *frame = NULL;
- int offset = 0, remaining;
+ int offset = 0;
struct {
struct ethhdr eth;
uint8_t flags;
@@ -839,10 +842,14 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
copy_len = sizeof(hdr);
while (!last) {
+ int remaining = skb->len - offset;
unsigned int subframe_len;
int len, mesh_len = 0;
u8 padding;
+ if (copy_len > remaining)
+ goto purge;
+
skb_copy_bits(skb, offset, &hdr, copy_len);
if (iftype == NL80211_IFTYPE_MESH_POINT)
mesh_len = __ieee80211_get_mesh_hdrlen(hdr.flags);
@@ -852,7 +859,6 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
padding = (4 - subframe_len) & 0x3;
/* the last MSDU has no padding */
- remaining = skb->len - offset;
if (subframe_len > remaining)
goto purge;
/* mitigate A-MSDU aggregation injection attacks */
@@ -2073,6 +2079,82 @@ bool ieee80211_operating_class_to_band(u8 operating_class,
}
EXPORT_SYMBOL(ieee80211_operating_class_to_band);
+bool ieee80211_operating_class_to_chandef(u8 operating_class,
+ struct ieee80211_channel *chan,
+ struct cfg80211_chan_def *chandef)
+{
+ u32 control_freq, offset = 0;
+ enum nl80211_band band;
+
+ if (!ieee80211_operating_class_to_band(operating_class, &band) ||
+ !chan || band != chan->band)
+ return false;
+
+ control_freq = chan->center_freq;
+ chandef->chan = chan;
+
+ if (control_freq >= 5955)
+ offset = control_freq - 5955;
+ else if (control_freq >= 5745)
+ offset = control_freq - 5745;
+ else if (control_freq >= 5180)
+ offset = control_freq - 5180;
+ offset /= 20;
+
+ switch (operating_class) {
+ case 81: /* 2 GHz band; 20 MHz; channels 1..13 */
+ case 82: /* 2 GHz band; 20 MHz; channel 14 */
+ case 115: /* 5 GHz band; 20 MHz; channels 36,40,44,48 */
+ case 118: /* 5 GHz band; 20 MHz; channels 52,56,60,64 */
+ case 121: /* 5 GHz band; 20 MHz; channels 100..144 */
+ case 124: /* 5 GHz band; 20 MHz; channels 149,153,157,161 */
+ case 125: /* 5 GHz band; 20 MHz; channels 149..177 */
+ case 131: /* 6 GHz band; 20 MHz; channels 1..233*/
+ case 136: /* 6 GHz band; 20 MHz; channel 2 */
+ chandef->center_freq1 = control_freq;
+ chandef->width = NL80211_CHAN_WIDTH_20;
+ return true;
+ case 83: /* 2 GHz band; 40 MHz; channels 1..9 */
+ case 116: /* 5 GHz band; 40 MHz; channels 36,44 */
+ case 119: /* 5 GHz band; 40 MHz; channels 52,60 */
+ case 122: /* 5 GHz band; 40 MHz; channels 100,108,116,124,132,140 */
+ case 126: /* 5 GHz band; 40 MHz; channels 149,157,165,173 */
+ chandef->center_freq1 = control_freq + 10;
+ chandef->width = NL80211_CHAN_WIDTH_40;
+ return true;
+ case 84: /* 2 GHz band; 40 MHz; channels 5..13 */
+ case 117: /* 5 GHz band; 40 MHz; channels 40,48 */
+ case 120: /* 5 GHz band; 40 MHz; channels 56,64 */
+ case 123: /* 5 GHz band; 40 MHz; channels 104,112,120,128,136,144 */
+ case 127: /* 5 GHz band; 40 MHz; channels 153,161,169,177 */
+ chandef->center_freq1 = control_freq - 10;
+ chandef->width = NL80211_CHAN_WIDTH_40;
+ return true;
+ case 132: /* 6 GHz band; 40 MHz; channels 1,5,..,229*/
+ chandef->center_freq1 = control_freq + 10 - (offset & 1) * 20;
+ chandef->width = NL80211_CHAN_WIDTH_40;
+ return true;
+ case 128: /* 5 GHz band; 80 MHz; channels 36..64,100..144,149..177 */
+ case 133: /* 6 GHz band; 80 MHz; channels 1,5,..,229 */
+ chandef->center_freq1 = control_freq + 30 - (offset & 3) * 20;
+ chandef->width = NL80211_CHAN_WIDTH_80;
+ return true;
+ case 129: /* 5 GHz band; 160 MHz; channels 36..64,100..144,149..177 */
+ case 134: /* 6 GHz band; 160 MHz; channels 1,5,..,229 */
+ chandef->center_freq1 = control_freq + 70 - (offset & 7) * 20;
+ chandef->width = NL80211_CHAN_WIDTH_160;
+ return true;
+ case 130: /* 5 GHz band; 80+80 MHz; channels 36..64,100..144,149..177 */
+ case 135: /* 6 GHz band; 80+80 MHz; channels 1,5,..,229 */
+ /* The center_freq2 of 80+80 MHz is unknown */
+ case 137: /* 6 GHz band; 320 MHz; channels 1,5,..,229 */
+ /* 320-1 or 320-2 channelization is unknown */
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL(ieee80211_operating_class_to_chandef);
+
bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
u8 *op_class)
{
diff --git a/net/x25/Kconfig b/net/x25/Kconfig
index 68729aa3a5d5..dc72302cbd07 100644
--- a/net/x25/Kconfig
+++ b/net/x25/Kconfig
@@ -17,8 +17,6 @@ config X25
if you want that) and the lower level data link layer protocol LAPB
(say Y to "LAPB Data Link Driver" below if you want that).
- You can read more about X.25 at <https://www.sangoma.com/tutorials/x25/> and
- <http://docwiki.cisco.com/wiki/X.25>.
Information about X.25 for Linux is contained in the files
<file:Documentation/networking/x25.rst> and
<file:Documentation/networking/x25-iface.rst>.
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index f7a7c7798c3b..d18d51412cc0 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -460,12 +460,12 @@ static int x25_getsockopt(struct socket *sock, int level, int optname,
if (get_user(len, optlen))
goto out;
- len = min_t(unsigned int, len, sizeof(int));
-
rc = -EINVAL;
if (len < 0)
goto out;
+ len = min_t(unsigned int, len, sizeof(int));
+
rc = -EFAULT;
if (put_user(len, optlen))
goto out;
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index b78c0e095e22..3404d076a8a3 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -313,10 +313,13 @@ static bool xsk_is_bound(struct xdp_sock *xs)
static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
{
+ struct net_device *dev = xdp->rxq->dev;
+ u32 qid = xdp->rxq->queue_index;
+
if (!xsk_is_bound(xs))
return -ENXIO;
- if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
+ if (!dev->_rx[qid].pool || xs->umem != dev->_rx[qid].pool->umem)
return -EINVAL;
if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
diff --git a/net/xdp/xsk_diag.c b/net/xdp/xsk_diag.c
index 9f8955367275..09dcea0cbbed 100644
--- a/net/xdp/xsk_diag.c
+++ b/net/xdp/xsk_diag.c
@@ -194,6 +194,7 @@ static int xsk_diag_handler_dump(struct sk_buff *nlskb, struct nlmsghdr *hdr)
}
static const struct sock_diag_handler xsk_diag_handler = {
+ .owner = THIS_MODULE,
.family = AF_XDP,
.dump = xsk_diag_handler_dump,
};
diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c
index d3b3f9e720b3..fe82e2d07300 100644
--- a/net/xfrm/espintcp.c
+++ b/net/xfrm/espintcp.c
@@ -10,6 +10,7 @@
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6_stubs.h>
#endif
+#include <net/hotdata.h>
static void handle_nonesp(struct espintcp_ctx *ctx, struct sk_buff *skb,
struct sock *sk)
@@ -169,7 +170,8 @@ int espintcp_queue_out(struct sock *sk, struct sk_buff *skb)
{
struct espintcp_ctx *ctx = espintcp_getctx(sk);
- if (skb_queue_len(&ctx->out_queue) >= READ_ONCE(netdev_max_backlog))
+ if (skb_queue_len(&ctx->out_queue) >=
+ READ_ONCE(net_hotdata.max_backlog))
return -ENOBUFS;
__skb_queue_tail(&ctx->out_queue, skb);
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index bd4ce21d76d7..161f535c8b94 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -21,6 +21,7 @@
#include <net/ip_tunnels.h>
#include <net/ip6_tunnel.h>
#include <net/dst_metadata.h>
+#include <net/hotdata.h>
#include "xfrm_inout.h"
@@ -764,7 +765,7 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
trans = this_cpu_ptr(&xfrm_trans_tasklet);
- if (skb_queue_len(&trans->queue) >= READ_ONCE(netdev_max_backlog))
+ if (skb_queue_len(&trans->queue) >= READ_ONCE(net_hotdata.max_backlog))
return -ENOBUFS;
BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb));
diff --git a/net/xfrm/xfrm_interface_bpf.c b/net/xfrm/xfrm_interface_bpf.c
index 7d5e920141e9..5ea15037ebd1 100644
--- a/net/xfrm/xfrm_interface_bpf.c
+++ b/net/xfrm/xfrm_interface_bpf.c
@@ -93,10 +93,10 @@ __bpf_kfunc int bpf_skb_set_xfrm_info(struct __sk_buff *skb_ctx, const struct bp
__bpf_kfunc_end_defs();
-BTF_SET8_START(xfrm_ifc_kfunc_set)
+BTF_KFUNCS_START(xfrm_ifc_kfunc_set)
BTF_ID_FLAGS(func, bpf_skb_get_xfrm_info)
BTF_ID_FLAGS(func, bpf_skb_set_xfrm_info)
-BTF_SET8_END(xfrm_ifc_kfunc_set)
+BTF_KFUNCS_END(xfrm_ifc_kfunc_set)
static const struct btf_kfunc_id_set xfrm_interface_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c
index 21d50d75c260..4df5c06e3ece 100644
--- a/net/xfrm/xfrm_interface_core.c
+++ b/net/xfrm/xfrm_interface_core.c
@@ -240,7 +240,6 @@ static void xfrmi_dev_free(struct net_device *dev)
struct xfrm_if *xi = netdev_priv(dev);
gro_cells_destroy(&xi->gro_cells);
- free_percpu(dev->tstats);
}
static int xfrmi_create(struct net_device *dev)
@@ -727,7 +726,7 @@ static int xfrmi_get_iflink(const struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
- return xi->p.link;
+ return READ_ONCE(xi->p.link);
}
static const struct net_device_ops xfrmi_netdev_ops = {
@@ -749,6 +748,7 @@ static void xfrmi_dev_setup(struct net_device *dev)
dev->flags = IFF_NOARP;
dev->needs_free_netdev = true;
dev->priv_destructor = xfrmi_dev_free;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
netif_keep_dst(dev);
eth_broadcast_addr(dev->broadcast);
@@ -765,15 +765,9 @@ static int xfrmi_dev_init(struct net_device *dev)
struct net_device *phydev = __dev_get_by_index(xi->net, xi->p.link);
int err;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
err = gro_cells_init(&xi->gro_cells, dev);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
dev->features |= NETIF_F_LLTX;
dev->features |= XFRMI_FEATURES;
@@ -957,12 +951,12 @@ static struct rtnl_link_ops xfrmi_link_ops __read_mostly = {
.get_link_net = xfrmi_get_link_net,
};
-static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list)
+static void __net_exit xfrmi_exit_batch_rtnl(struct list_head *net_exit_list,
+ struct list_head *dev_to_kill)
{
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_exit_list, exit_list) {
struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
struct xfrm_if __rcu **xip;
@@ -973,18 +967,16 @@ static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list)
for (xip = &xfrmn->xfrmi[i];
(xi = rtnl_dereference(*xip)) != NULL;
xip = &xi->next)
- unregister_netdevice_queue(xi->dev, &list);
+ unregister_netdevice_queue(xi->dev, dev_to_kill);
}
xi = rtnl_dereference(xfrmn->collect_md_xfrmi);
if (xi)
- unregister_netdevice_queue(xi->dev, &list);
+ unregister_netdevice_queue(xi->dev, dev_to_kill);
}
- unregister_netdevice_many(&list);
- rtnl_unlock();
}
static struct pernet_operations xfrmi_net_ops = {
- .exit_batch = xfrmi_exit_batch_net,
+ .exit_batch_rtnl = xfrmi_exit_batch_rtnl,
.id = &xfrmi_net_id,
.size = sizeof(struct xfrmi_net),
};
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index da6ecc6b3e15..6affe5cd85d8 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -29,6 +29,7 @@
#include <linux/audit.h>
#include <linux/rhashtable.h>
#include <linux/if_tunnel.h>
+#include <linux/icmp.h>
#include <net/dst.h>
#include <net/flow.h>
#include <net/inet_ecn.h>
@@ -3505,6 +3506,128 @@ static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int
return 0;
}
+static bool icmp_err_packet(const struct flowi *fl, unsigned short family)
+{
+ const struct flowi4 *fl4 = &fl->u.ip4;
+
+ if (family == AF_INET &&
+ fl4->flowi4_proto == IPPROTO_ICMP &&
+ (fl4->fl4_icmp_type == ICMP_DEST_UNREACH ||
+ fl4->fl4_icmp_type == ICMP_TIME_EXCEEDED))
+ return true;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (family == AF_INET6) {
+ const struct flowi6 *fl6 = &fl->u.ip6;
+
+ if (fl6->flowi6_proto == IPPROTO_ICMPV6 &&
+ (fl6->fl6_icmp_type == ICMPV6_DEST_UNREACH ||
+ fl6->fl6_icmp_type == ICMPV6_PKT_TOOBIG ||
+ fl6->fl6_icmp_type == ICMPV6_TIME_EXCEED))
+ return true;
+ }
+#endif
+ return false;
+}
+
+static bool xfrm_icmp_flow_decode(struct sk_buff *skb, unsigned short family,
+ const struct flowi *fl, struct flowi *fl1)
+{
+ bool ret = true;
+ struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
+ int hl = family == AF_INET ? (sizeof(struct iphdr) + sizeof(struct icmphdr)) :
+ (sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr));
+
+ if (!newskb)
+ return true;
+
+ if (!pskb_pull(newskb, hl))
+ goto out;
+
+ skb_reset_network_header(newskb);
+
+ if (xfrm_decode_session_reverse(dev_net(skb->dev), newskb, fl1, family) < 0)
+ goto out;
+
+ fl1->flowi_oif = fl->flowi_oif;
+ fl1->flowi_mark = fl->flowi_mark;
+ fl1->flowi_tos = fl->flowi_tos;
+ nf_nat_decode_session(newskb, fl1, family);
+ ret = false;
+
+out:
+ consume_skb(newskb);
+ return ret;
+}
+
+static bool xfrm_selector_inner_icmp_match(struct sk_buff *skb, unsigned short family,
+ const struct xfrm_selector *sel,
+ const struct flowi *fl)
+{
+ bool ret = false;
+
+ if (icmp_err_packet(fl, family)) {
+ struct flowi fl1;
+
+ if (xfrm_icmp_flow_decode(skb, family, fl, &fl1))
+ return ret;
+
+ ret = xfrm_selector_match(sel, &fl1, family);
+ }
+
+ return ret;
+}
+
+static inline struct
+xfrm_policy *xfrm_in_fwd_icmp(struct sk_buff *skb,
+ const struct flowi *fl, unsigned short family,
+ u32 if_id)
+{
+ struct xfrm_policy *pol = NULL;
+
+ if (icmp_err_packet(fl, family)) {
+ struct flowi fl1;
+ struct net *net = dev_net(skb->dev);
+
+ if (xfrm_icmp_flow_decode(skb, family, fl, &fl1))
+ return pol;
+
+ pol = xfrm_policy_lookup(net, &fl1, family, XFRM_POLICY_FWD, if_id);
+ }
+
+ return pol;
+}
+
+static inline struct
+dst_entry *xfrm_out_fwd_icmp(struct sk_buff *skb, struct flowi *fl,
+ unsigned short family, struct dst_entry *dst)
+{
+ if (icmp_err_packet(fl, family)) {
+ struct net *net = dev_net(skb->dev);
+ struct dst_entry *dst2;
+ struct flowi fl1;
+
+ if (xfrm_icmp_flow_decode(skb, family, fl, &fl1))
+ return dst;
+
+ dst_hold(dst);
+
+ dst2 = xfrm_lookup(net, dst, &fl1, NULL, (XFRM_LOOKUP_QUEUE | XFRM_LOOKUP_ICMP));
+
+ if (IS_ERR(dst2))
+ return dst;
+
+ if (dst2->xfrm) {
+ dst_release(dst);
+ dst = dst2;
+ } else {
+ dst_release(dst2);
+ }
+ }
+
+ return dst;
+}
+
int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
unsigned short family)
{
@@ -3551,9 +3674,17 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
for (i = sp->len - 1; i >= 0; i--) {
struct xfrm_state *x = sp->xvec[i];
+ int ret = 0;
+
if (!xfrm_selector_match(&x->sel, &fl, family)) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
- return 0;
+ ret = 1;
+ if (x->props.flags & XFRM_STATE_ICMP &&
+ xfrm_selector_inner_icmp_match(skb, family, &x->sel, &fl))
+ ret = 0;
+ if (ret) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
+ return 0;
+ }
}
}
}
@@ -3576,6 +3707,9 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
return 0;
}
+ if (!pol && dir == XFRM_POLICY_FWD)
+ pol = xfrm_in_fwd_icmp(skb, &fl, family, if_id);
+
if (!pol) {
if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
@@ -3709,6 +3843,10 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
res = 0;
dst = NULL;
}
+
+ if (dst && !dst->xfrm)
+ dst = xfrm_out_fwd_icmp(skb, &fl, family, dst);
+
skb_dst_set(skb, dst);
return res;
}
@@ -4027,10 +4165,7 @@ static int __net_init xfrm_policy_init(struct net *net)
int dir, err;
if (net_eq(net, &init_net)) {
- xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
- sizeof(struct xfrm_dst),
- 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
- NULL);
+ xfrm_dst_cache = KMEM_CACHE(xfrm_dst, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
err = rhashtable_init(&xfrm_policy_inexact_table,
&xfrm_pol_inexact_params);
BUG_ON(err);
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index fee9b5cf37a7..5f9bf8e5c933 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -52,6 +52,7 @@ static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
memset(buff, 0, sizeof(unsigned long) * LINUX_MIB_XFRMMAX);
+ xfrm_state_update_stats(net);
snmp_get_cpu_field_batch(buff, xfrm_mib_list,
net->mib.xfrm_statistics);
for (i = 0; xfrm_mib_list[i].name; i++)
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index bda5327bf34d..0c306473a79d 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -570,7 +570,7 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
int err = 0;
spin_lock(&x->lock);
- xfrm_dev_state_update_curlft(x);
+ xfrm_dev_state_update_stats(x);
if (x->km.state == XFRM_STATE_DEAD)
goto out;
@@ -1935,7 +1935,7 @@ EXPORT_SYMBOL(xfrm_state_update);
int xfrm_state_check_expire(struct xfrm_state *x)
{
- xfrm_dev_state_update_curlft(x);
+ xfrm_dev_state_update_stats(x);
if (!READ_ONCE(x->curlft.use_time))
WRITE_ONCE(x->curlft.use_time, ktime_get_real_seconds());
@@ -1957,6 +1957,19 @@ int xfrm_state_check_expire(struct xfrm_state *x)
}
EXPORT_SYMBOL(xfrm_state_check_expire);
+void xfrm_state_update_stats(struct net *net)
+{
+ struct xfrm_state *x;
+ int i;
+
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
+ for (i = 0; i <= net->xfrm.state_hmask; i++) {
+ hlist_for_each_entry(x, net->xfrm.state_bydst + i, bydst)
+ xfrm_dev_state_update_stats(x);
+ }
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+}
+
struct xfrm_state *
xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
u8 proto, unsigned short family)
diff --git a/net/xfrm/xfrm_state_bpf.c b/net/xfrm/xfrm_state_bpf.c
index 9e20d4a377f7..2248eda741f8 100644
--- a/net/xfrm/xfrm_state_bpf.c
+++ b/net/xfrm/xfrm_state_bpf.c
@@ -117,10 +117,10 @@ __bpf_kfunc void bpf_xdp_xfrm_state_release(struct xfrm_state *x)
__bpf_kfunc_end_defs();
-BTF_SET8_START(xfrm_state_kfunc_set)
+BTF_KFUNCS_START(xfrm_state_kfunc_set)
BTF_ID_FLAGS(func, bpf_xdp_get_xfrm_state, KF_RET_NULL | KF_ACQUIRE)
BTF_ID_FLAGS(func, bpf_xdp_xfrm_state_release, KF_RELEASE)
-BTF_SET8_END(xfrm_state_kfunc_set)
+BTF_KFUNCS_END(xfrm_state_kfunc_set)
static const struct btf_kfunc_id_set xfrm_state_xdp_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 912c1189ba41..810b520493f3 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -902,7 +902,7 @@ static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
memcpy(&p->sel, &x->sel, sizeof(p->sel));
memcpy(&p->lft, &x->lft, sizeof(p->lft));
if (x->xso.dev)
- xfrm_dev_state_update_curlft(x);
+ xfrm_dev_state_update_stats(x);
memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
put_unaligned(x->stats.replay_window, &p->stats.replay_window);
put_unaligned(x->stats.replay, &p->stats.replay);
diff --git a/rust/kernel/net/phy.rs b/rust/kernel/net/phy.rs
index e457b3c7cb2f..96e09c6e8530 100644
--- a/rust/kernel/net/phy.rs
+++ b/rust/kernel/net/phy.rs
@@ -4,7 +4,7 @@
//! Network PHY device.
//!
-//! C headers: [`include/linux/phy.h`](../../../../../../../include/linux/phy.h).
+//! C headers: [`include/linux/phy.h`](srctree/include/linux/phy.h).
use crate::{bindings, error::*, prelude::*, str::CStr, types::Opaque};
@@ -16,7 +16,7 @@ use core::marker::PhantomData;
///
/// Some of PHY drivers access to the state of PHY's software state machine.
///
-/// [`enum phy_state`]: ../../../../../../../include/linux/phy.h
+/// [`enum phy_state`]: srctree/include/linux/phy.h
#[derive(PartialEq, Eq)]
pub enum DeviceState {
/// PHY device and driver are not ready for anything.
@@ -61,7 +61,7 @@ pub enum DuplexMode {
/// Referencing a `phy_device` using this struct asserts that you are in
/// a context where all methods defined on this struct are safe to call.
///
-/// [`struct phy_device`]: ../../../../../../../include/linux/phy.h
+/// [`struct phy_device`]: srctree/include/linux/phy.h
// During the calls to most functions in [`Driver`], the C side (`PHYLIB`) holds a lock that is
// unique for every instance of [`Device`]. `PHYLIB` uses a different serialization technique for
// [`Driver::resume`] and [`Driver::suspend`]: `PHYLIB` updates `phy_device`'s state with
@@ -486,7 +486,7 @@ impl<T: Driver> Adapter<T> {
///
/// `self.0` is always in a valid state.
///
-/// [`struct phy_driver`]: ../../../../../../../include/linux/phy.h
+/// [`struct phy_driver`]: srctree/include/linux/phy.h
#[repr(transparent)]
pub struct DriverVTable(Opaque<bindings::phy_driver>);
@@ -580,12 +580,12 @@ pub trait Driver {
/// Issues a PHY software reset.
fn soft_reset(_dev: &mut Device) -> Result {
- Err(code::ENOTSUPP)
+ kernel::build_error(VTABLE_DEFAULT_ERROR)
}
/// Probes the hardware to determine what abilities it has.
fn get_features(_dev: &mut Device) -> Result {
- Err(code::ENOTSUPP)
+ kernel::build_error(VTABLE_DEFAULT_ERROR)
}
/// Returns true if this is a suitable driver for the given phydev.
@@ -597,32 +597,32 @@ pub trait Driver {
/// Configures the advertisement and resets auto-negotiation
/// if auto-negotiation is enabled.
fn config_aneg(_dev: &mut Device) -> Result {
- Err(code::ENOTSUPP)
+ kernel::build_error(VTABLE_DEFAULT_ERROR)
}
/// Determines the negotiated speed and duplex.
fn read_status(_dev: &mut Device) -> Result<u16> {
- Err(code::ENOTSUPP)
+ kernel::build_error(VTABLE_DEFAULT_ERROR)
}
/// Suspends the hardware, saving state if needed.
fn suspend(_dev: &mut Device) -> Result {
- Err(code::ENOTSUPP)
+ kernel::build_error(VTABLE_DEFAULT_ERROR)
}
/// Resumes the hardware, restoring state if needed.
fn resume(_dev: &mut Device) -> Result {
- Err(code::ENOTSUPP)
+ kernel::build_error(VTABLE_DEFAULT_ERROR)
}
/// Overrides the default MMD read function for reading a MMD register.
fn read_mmd(_dev: &mut Device, _devnum: u8, _regnum: u16) -> Result<u16> {
- Err(code::ENOTSUPP)
+ kernel::build_error(VTABLE_DEFAULT_ERROR)
}
/// Overrides the default MMD write function for writing a MMD register.
fn write_mmd(_dev: &mut Device, _devnum: u8, _regnum: u16, _val: u16) -> Result {
- Err(code::ENOTSUPP)
+ kernel::build_error(VTABLE_DEFAULT_ERROR)
}
/// Callback for notification of link change.
diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c
index d2fbcf963cdf..07ff471ed6ae 100644
--- a/samples/bpf/map_perf_test_user.c
+++ b/samples/bpf/map_perf_test_user.c
@@ -370,7 +370,7 @@ static void run_perf_test(int tasks)
static void fill_lpm_trie(void)
{
- struct bpf_lpm_trie_key *key;
+ struct bpf_lpm_trie_key_u8 *key;
unsigned long value = 0;
unsigned int i;
int r;
diff --git a/samples/bpf/xdp_router_ipv4_user.c b/samples/bpf/xdp_router_ipv4_user.c
index 9d41db09c480..266fdd0b025d 100644
--- a/samples/bpf/xdp_router_ipv4_user.c
+++ b/samples/bpf/xdp_router_ipv4_user.c
@@ -91,7 +91,7 @@ static int recv_msg(struct sockaddr_nl sock_addr, int sock)
static void read_route(struct nlmsghdr *nh, int nll)
{
char dsts[24], gws[24], ifs[16], dsts_len[24], metrics[24];
- struct bpf_lpm_trie_key *prefix_key;
+ struct bpf_lpm_trie_key_u8 *prefix_key;
struct rtattr *rt_attr;
struct rtmsg *rt_msg;
int rtm_family;
diff --git a/scripts/bpf_doc.py b/scripts/bpf_doc.py
index 0669bac5e900..4606944984ee 100755
--- a/scripts/bpf_doc.py
+++ b/scripts/bpf_doc.py
@@ -827,7 +827,7 @@ class PrinterHelpers(Printer):
print(' *{}{}'.format(' \t' if line else '', line))
print(' */')
- print('static %s %s(*%s)(' % (self.map_type(proto['ret_type']),
+ print('static %s %s(* const %s)(' % (self.map_type(proto['ret_type']),
proto['ret_star'], proto['name']), end='')
comma = ''
for i, a in enumerate(proto['args']):
diff --git a/security/security.c b/security/security.c
index 7035ee35a393..aef69632d0a9 100644
--- a/security/security.c
+++ b/security/security.c
@@ -5448,29 +5448,87 @@ int security_bpf_prog(struct bpf_prog *prog)
}
/**
- * security_bpf_map_alloc() - Allocate a bpf map LSM blob
- * @map: bpf map
+ * security_bpf_map_create() - Check if BPF map creation is allowed
+ * @map: BPF map object
+ * @attr: BPF syscall attributes used to create BPF map
+ * @token: BPF token used to grant user access
+ *
+ * Do a check when the kernel creates a new BPF map. This is also the
+ * point where LSM blob is allocated for LSMs that need them.
+ *
+ * Return: Returns 0 on success, error on failure.
+ */
+int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
+ struct bpf_token *token)
+{
+ return call_int_hook(bpf_map_create, 0, map, attr, token);
+}
+
+/**
+ * security_bpf_prog_load() - Check if loading of BPF program is allowed
+ * @prog: BPF program object
+ * @attr: BPF syscall attributes used to create BPF program
+ * @token: BPF token used to grant user access to BPF subsystem
*
- * Initialize the security field inside bpf map.
+ * Perform an access control check when the kernel loads a BPF program and
+ * allocates associated BPF program object. This hook is also responsible for
+ * allocating any required LSM state for the BPF program.
*
* Return: Returns 0 on success, error on failure.
*/
-int security_bpf_map_alloc(struct bpf_map *map)
+int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
+ struct bpf_token *token)
{
- return call_int_hook(bpf_map_alloc_security, 0, map);
+ return call_int_hook(bpf_prog_load, 0, prog, attr, token);
}
/**
- * security_bpf_prog_alloc() - Allocate a bpf program LSM blob
- * @aux: bpf program aux info struct
+ * security_bpf_token_create() - Check if creating of BPF token is allowed
+ * @token: BPF token object
+ * @attr: BPF syscall attributes used to create BPF token
+ * @path: path pointing to BPF FS mount point from which BPF token is created
*
- * Initialize the security field inside bpf program.
+ * Do a check when the kernel instantiates a new BPF token object from BPF FS
+ * instance. This is also the point where LSM blob can be allocated for LSMs.
*
* Return: Returns 0 on success, error on failure.
*/
-int security_bpf_prog_alloc(struct bpf_prog_aux *aux)
+int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
+ struct path *path)
{
- return call_int_hook(bpf_prog_alloc_security, 0, aux);
+ return call_int_hook(bpf_token_create, 0, token, attr, path);
+}
+
+/**
+ * security_bpf_token_cmd() - Check if BPF token is allowed to delegate
+ * requested BPF syscall command
+ * @token: BPF token object
+ * @cmd: BPF syscall command requested to be delegated by BPF token
+ *
+ * Do a check when the kernel decides whether provided BPF token should allow
+ * delegation of requested BPF syscall command.
+ *
+ * Return: Returns 0 on success, error on failure.
+ */
+int security_bpf_token_cmd(const struct bpf_token *token, enum bpf_cmd cmd)
+{
+ return call_int_hook(bpf_token_cmd, 0, token, cmd);
+}
+
+/**
+ * security_bpf_token_capable() - Check if BPF token is allowed to delegate
+ * requested BPF-related capability
+ * @token: BPF token object
+ * @cap: capabilities requested to be delegated by BPF token
+ *
+ * Do a check when the kernel decides whether provided BPF token should allow
+ * delegation of requested BPF-related capabilities.
+ *
+ * Return: Returns 0 on success, error on failure.
+ */
+int security_bpf_token_capable(const struct bpf_token *token, int cap)
+{
+ return call_int_hook(bpf_token_capable, 0, token, cap);
}
/**
@@ -5481,18 +5539,29 @@ int security_bpf_prog_alloc(struct bpf_prog_aux *aux)
*/
void security_bpf_map_free(struct bpf_map *map)
{
- call_void_hook(bpf_map_free_security, map);
+ call_void_hook(bpf_map_free, map);
+}
+
+/**
+ * security_bpf_prog_free() - Free a BPF program's LSM blob
+ * @prog: BPF program struct
+ *
+ * Clean up the security information stored inside BPF program.
+ */
+void security_bpf_prog_free(struct bpf_prog *prog)
+{
+ call_void_hook(bpf_prog_free, prog);
}
/**
- * security_bpf_prog_free() - Free a bpf program's LSM blob
- * @aux: bpf program aux info struct
+ * security_bpf_token_free() - Free a BPF token's LSM blob
+ * @token: BPF token struct
*
- * Clean up the security information stored inside bpf prog.
+ * Clean up the security information stored inside BPF token.
*/
-void security_bpf_prog_free(struct bpf_prog_aux *aux)
+void security_bpf_token_free(struct bpf_token *token)
{
- call_void_hook(bpf_prog_free_security, aux);
+ call_void_hook(bpf_token_free, token);
}
#endif /* CONFIG_BPF_SYSCALL */
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 338b023a8c3e..7da35f51a0ef 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -6920,7 +6920,8 @@ static int selinux_bpf_prog(struct bpf_prog *prog)
BPF__PROG_RUN, NULL);
}
-static int selinux_bpf_map_alloc(struct bpf_map *map)
+static int selinux_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
+ struct bpf_token *token)
{
struct bpf_security_struct *bpfsec;
@@ -6942,7 +6943,8 @@ static void selinux_bpf_map_free(struct bpf_map *map)
kfree(bpfsec);
}
-static int selinux_bpf_prog_alloc(struct bpf_prog_aux *aux)
+static int selinux_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
+ struct bpf_token *token)
{
struct bpf_security_struct *bpfsec;
@@ -6951,16 +6953,39 @@ static int selinux_bpf_prog_alloc(struct bpf_prog_aux *aux)
return -ENOMEM;
bpfsec->sid = current_sid();
- aux->security = bpfsec;
+ prog->aux->security = bpfsec;
return 0;
}
-static void selinux_bpf_prog_free(struct bpf_prog_aux *aux)
+static void selinux_bpf_prog_free(struct bpf_prog *prog)
{
- struct bpf_security_struct *bpfsec = aux->security;
+ struct bpf_security_struct *bpfsec = prog->aux->security;
- aux->security = NULL;
+ prog->aux->security = NULL;
+ kfree(bpfsec);
+}
+
+static int selinux_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
+ struct path *path)
+{
+ struct bpf_security_struct *bpfsec;
+
+ bpfsec = kzalloc(sizeof(*bpfsec), GFP_KERNEL);
+ if (!bpfsec)
+ return -ENOMEM;
+
+ bpfsec->sid = current_sid();
+ token->security = bpfsec;
+
+ return 0;
+}
+
+static void selinux_bpf_token_free(struct bpf_token *token)
+{
+ struct bpf_security_struct *bpfsec = token->security;
+
+ token->security = NULL;
kfree(bpfsec);
}
#endif
@@ -7324,8 +7349,9 @@ static struct security_hook_list selinux_hooks[] __ro_after_init = {
LSM_HOOK_INIT(bpf, selinux_bpf),
LSM_HOOK_INIT(bpf_map, selinux_bpf_map),
LSM_HOOK_INIT(bpf_prog, selinux_bpf_prog),
- LSM_HOOK_INIT(bpf_map_free_security, selinux_bpf_map_free),
- LSM_HOOK_INIT(bpf_prog_free_security, selinux_bpf_prog_free),
+ LSM_HOOK_INIT(bpf_map_free, selinux_bpf_map_free),
+ LSM_HOOK_INIT(bpf_prog_free, selinux_bpf_prog_free),
+ LSM_HOOK_INIT(bpf_token_free, selinux_bpf_token_free),
#endif
#ifdef CONFIG_PERF_EVENTS
@@ -7382,8 +7408,9 @@ static struct security_hook_list selinux_hooks[] __ro_after_init = {
LSM_HOOK_INIT(audit_rule_init, selinux_audit_rule_init),
#endif
#ifdef CONFIG_BPF_SYSCALL
- LSM_HOOK_INIT(bpf_map_alloc_security, selinux_bpf_map_alloc),
- LSM_HOOK_INIT(bpf_prog_alloc_security, selinux_bpf_prog_alloc),
+ LSM_HOOK_INIT(bpf_map_create, selinux_bpf_map_create),
+ LSM_HOOK_INIT(bpf_prog_load, selinux_bpf_prog_load),
+ LSM_HOOK_INIT(bpf_token_create, selinux_bpf_token_create),
#endif
#ifdef CONFIG_PERF_EVENTS
LSM_HOOK_INIT(perf_event_alloc, selinux_perf_event_alloc),
diff --git a/tools/bpf/bpftool/Documentation/bpftool-gen.rst b/tools/bpf/bpftool/Documentation/bpftool-gen.rst
index 5006e724d1bc..5e60825818dd 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-gen.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-gen.rst
@@ -257,18 +257,48 @@ EXAMPLES
return 0;
}
-This is example BPF application with two BPF programs and a mix of BPF maps
-and global variables. Source code is split across two source code files.
+**$ cat example3.bpf.c**
+
+::
+
+ #include <linux/ptrace.h>
+ #include <linux/bpf.h>
+ #include <bpf/bpf_helpers.h>
+ /* This header file is provided by the bpf_testmod module. */
+ #include "bpf_testmod.h"
+
+ int test_2_result = 0;
+
+ /* bpf_Testmod.ko calls this function, passing a "4"
+ * and testmod_map->data.
+ */
+ SEC("struct_ops/test_2")
+ void BPF_PROG(test_2, int a, int b)
+ {
+ test_2_result = a + b;
+ }
+
+ SEC(".struct_ops")
+ struct bpf_testmod_ops testmod_map = {
+ .test_2 = (void *)test_2,
+ .data = 0x1,
+ };
+
+This is example BPF application with three BPF programs and a mix of BPF
+maps and global variables. Source code is split across three source code
+files.
**$ clang --target=bpf -g example1.bpf.c -o example1.bpf.o**
**$ clang --target=bpf -g example2.bpf.c -o example2.bpf.o**
-**$ bpftool gen object example.bpf.o example1.bpf.o example2.bpf.o**
+**$ clang --target=bpf -g example3.bpf.c -o example3.bpf.o**
+
+**$ bpftool gen object example.bpf.o example1.bpf.o example2.bpf.o example3.bpf.o**
-This set of commands compiles *example1.bpf.c* and *example2.bpf.c*
-individually and then statically links respective object files into the final
-BPF ELF object file *example.bpf.o*.
+This set of commands compiles *example1.bpf.c*, *example2.bpf.c* and
+*example3.bpf.c* individually and then statically links respective object
+files into the final BPF ELF object file *example.bpf.o*.
**$ bpftool gen skeleton example.bpf.o name example | tee example.skel.h**
@@ -291,7 +321,15 @@ BPF ELF object file *example.bpf.o*.
struct bpf_map *data;
struct bpf_map *bss;
struct bpf_map *my_map;
+ struct bpf_map *testmod_map;
} maps;
+ struct {
+ struct example__testmod_map__bpf_testmod_ops {
+ const struct bpf_program *test_1;
+ const struct bpf_program *test_2;
+ int data;
+ } *testmod_map;
+ } struct_ops;
struct {
struct bpf_program *handle_sys_enter;
struct bpf_program *handle_sys_exit;
@@ -304,6 +342,7 @@ BPF ELF object file *example.bpf.o*.
struct {
int x;
} data;
+ int test_2_result;
} *bss;
struct example__data {
_Bool global_flag;
@@ -342,10 +381,16 @@ BPF ELF object file *example.bpf.o*.
skel->rodata->param1 = 128;
+ /* Change the value through the pointer of shadow type */
+ skel->struct_ops.testmod_map->data = 13;
+
err = example__load(skel);
if (err)
goto cleanup;
+ /* The result of the function test_2() */
+ printf("test_2_result: %d\n", skel->bss->test_2_result);
+
err = example__attach(skel);
if (err)
goto cleanup;
@@ -372,6 +417,7 @@ BPF ELF object file *example.bpf.o*.
::
+ test_2_result: 17
my_map name: my_map
sys_enter prog FD: 8
my_static_var: 7
diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst
index 3b7ba037af95..9d6a314dfd7a 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-map.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst
@@ -55,7 +55,7 @@ MAP COMMANDS
| | **devmap** | **devmap_hash** | **sockmap** | **cpumap** | **xskmap** | **sockhash**
| | **cgroup_storage** | **reuseport_sockarray** | **percpu_cgroup_storage**
| | **queue** | **stack** | **sk_storage** | **struct_ops** | **ringbuf** | **inode_storage**
-| | **task_storage** | **bloom_filter** | **user_ringbuf** | **cgrp_storage** }
+| | **task_storage** | **bloom_filter** | **user_ringbuf** | **cgrp_storage** | **arena** }
DESCRIPTION
===========
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index ee3ce2b8000d..4fa4ade1ce74 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -7,6 +7,7 @@
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
+#include <libgen.h>
#include <linux/err.h>
#include <stdbool.h>
#include <stdio.h>
@@ -54,11 +55,27 @@ static bool str_has_suffix(const char *str, const char *suffix)
return true;
}
+static const struct btf_type *
+resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
+{
+ const struct btf_type *t;
+
+ t = skip_mods_and_typedefs(btf, id, NULL);
+ if (!btf_is_ptr(t))
+ return NULL;
+
+ t = skip_mods_and_typedefs(btf, t->type, res_id);
+
+ return btf_is_func_proto(t) ? t : NULL;
+}
+
static void get_obj_name(char *name, const char *file)
{
- /* Using basename() GNU version which doesn't modify arg. */
- strncpy(name, basename(file), MAX_OBJ_NAME_LEN - 1);
- name[MAX_OBJ_NAME_LEN - 1] = '\0';
+ char file_copy[PATH_MAX];
+
+ /* Using basename() POSIX version to be more portable. */
+ strncpy(file_copy, file, PATH_MAX - 1)[PATH_MAX - 1] = '\0';
+ strncpy(name, basename(file_copy), MAX_OBJ_NAME_LEN - 1)[MAX_OBJ_NAME_LEN - 1] = '\0';
if (str_has_suffix(name, ".o"))
name[strlen(name) - 2] = '\0';
sanitize_identifier(name);
@@ -103,6 +120,12 @@ static bool get_datasec_ident(const char *sec_name, char *buf, size_t buf_sz)
static const char *pfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
int i, n;
+ /* recognize hard coded LLVM section name */
+ if (strcmp(sec_name, ".arena.1") == 0) {
+ /* this is the name to use in skeleton */
+ snprintf(buf, buf_sz, "arena");
+ return true;
+ }
for (i = 0, n = ARRAY_SIZE(pfxs); i < n; i++) {
const char *pfx = pfxs[i];
@@ -231,8 +254,15 @@ static const struct btf_type *find_type_for_map(struct btf *btf, const char *map
return NULL;
}
-static bool is_internal_mmapable_map(const struct bpf_map *map, char *buf, size_t sz)
+static bool is_mmapable_map(const struct bpf_map *map, char *buf, size_t sz)
{
+ size_t tmp_sz;
+
+ if (bpf_map__type(map) == BPF_MAP_TYPE_ARENA && bpf_map__initial_value(map, &tmp_sz)) {
+ snprintf(buf, sz, "arena");
+ return true;
+ }
+
if (!bpf_map__is_internal(map) || !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
return false;
@@ -257,7 +287,7 @@ static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
bpf_object__for_each_map(map, obj) {
/* only generate definitions for memory-mapped internal maps */
- if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
+ if (!is_mmapable_map(map, map_ident, sizeof(map_ident)))
continue;
sec = find_type_for_map(btf, map_ident);
@@ -310,7 +340,7 @@ static int codegen_subskel_datasecs(struct bpf_object *obj, const char *obj_name
bpf_object__for_each_map(map, obj) {
/* only generate definitions for memory-mapped internal maps */
- if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
+ if (!is_mmapable_map(map, map_ident, sizeof(map_ident)))
continue;
sec = find_type_for_map(btf, map_ident);
@@ -487,7 +517,7 @@ static void codegen_asserts(struct bpf_object *obj, const char *obj_name)
", obj_name);
bpf_object__for_each_map(map, obj) {
- if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
+ if (!is_mmapable_map(map, map_ident, sizeof(map_ident)))
continue;
sec = find_type_for_map(btf, map_ident);
@@ -703,7 +733,7 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
const void *mmap_data = NULL;
size_t mmap_size = 0;
- if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
+ if (!is_mmapable_map(map, ident, sizeof(ident)))
continue;
codegen("\
@@ -765,7 +795,7 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
bpf_object__for_each_map(map, obj) {
const char *mmap_flags;
- if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
+ if (!is_mmapable_map(map, ident, sizeof(ident)))
continue;
if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG)
@@ -854,7 +884,7 @@ codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped)
",
i, bpf_map__name(map), i, ident);
/* memory-mapped internal maps */
- if (mmaped && is_internal_mmapable_map(map, ident, sizeof(ident))) {
+ if (mmaped && is_mmapable_map(map, ident, sizeof(ident))) {
printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
i, ident);
}
@@ -906,6 +936,207 @@ codegen_progs_skeleton(struct bpf_object *obj, size_t prog_cnt, bool populate_li
}
}
+static int walk_st_ops_shadow_vars(struct btf *btf, const char *ident,
+ const struct btf_type *map_type, __u32 map_type_id)
+{
+ LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts, .indent_level = 3);
+ const struct btf_type *member_type;
+ __u32 offset, next_offset = 0;
+ const struct btf_member *m;
+ struct btf_dump *d = NULL;
+ const char *member_name;
+ __u32 member_type_id;
+ int i, err = 0, n;
+ int size;
+
+ d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
+ if (!d)
+ return -errno;
+
+ n = btf_vlen(map_type);
+ for (i = 0, m = btf_members(map_type); i < n; i++, m++) {
+ member_type = skip_mods_and_typedefs(btf, m->type, &member_type_id);
+ member_name = btf__name_by_offset(btf, m->name_off);
+
+ offset = m->offset / 8;
+ if (next_offset < offset)
+ printf("\t\t\tchar __padding_%d[%d];\n", i, offset - next_offset);
+
+ switch (btf_kind(member_type)) {
+ case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ /* scalar type */
+ printf("\t\t\t");
+ opts.field_name = member_name;
+ err = btf_dump__emit_type_decl(d, member_type_id, &opts);
+ if (err) {
+ p_err("Failed to emit type declaration for %s: %d", member_name, err);
+ goto out;
+ }
+ printf(";\n");
+
+ size = btf__resolve_size(btf, member_type_id);
+ if (size < 0) {
+ p_err("Failed to resolve size of %s: %d\n", member_name, size);
+ err = size;
+ goto out;
+ }
+
+ next_offset = offset + size;
+ break;
+
+ case BTF_KIND_PTR:
+ if (resolve_func_ptr(btf, m->type, NULL)) {
+ /* Function pointer */
+ printf("\t\t\tstruct bpf_program *%s;\n", member_name);
+
+ next_offset = offset + sizeof(void *);
+ break;
+ }
+ /* All pointer types are unsupported except for
+ * function pointers.
+ */
+ fallthrough;
+
+ default:
+ /* Unsupported types
+ *
+ * Types other than scalar types and function
+ * pointers are currently not supported in order to
+ * prevent conflicts in the generated code caused
+ * by multiple definitions. For instance, if the
+ * struct type FOO is used in a struct_ops map,
+ * bpftool has to generate definitions for FOO,
+ * which may result in conflicts if FOO is defined
+ * in different skeleton files.
+ */
+ size = btf__resolve_size(btf, member_type_id);
+ if (size < 0) {
+ p_err("Failed to resolve size of %s: %d\n", member_name, size);
+ err = size;
+ goto out;
+ }
+ printf("\t\t\tchar __unsupported_%d[%d];\n", i, size);
+
+ next_offset = offset + size;
+ break;
+ }
+ }
+
+ /* Cannot fail since it must be a struct type */
+ size = btf__resolve_size(btf, map_type_id);
+ if (next_offset < (__u32)size)
+ printf("\t\t\tchar __padding_end[%d];\n", size - next_offset);
+
+out:
+ btf_dump__free(d);
+
+ return err;
+}
+
+/* Generate the pointer of the shadow type for a struct_ops map.
+ *
+ * This function adds a pointer of the shadow type for a struct_ops map.
+ * The members of a struct_ops map can be exported through a pointer to a
+ * shadow type. The user can access these members through the pointer.
+ *
+ * A shadow type includes not all members, only members of some types.
+ * They are scalar types and function pointers. The function pointers are
+ * translated to the pointer of the struct bpf_program. The scalar types
+ * are translated to the original type without any modifiers.
+ *
+ * Unsupported types will be translated to a char array to occupy the same
+ * space as the original field, being renamed as __unsupported_*. The user
+ * should treat these fields as opaque data.
+ */
+static int gen_st_ops_shadow_type(const char *obj_name, struct btf *btf, const char *ident,
+ const struct bpf_map *map)
+{
+ const struct btf_type *map_type;
+ const char *type_name;
+ __u32 map_type_id;
+ int err;
+
+ map_type_id = bpf_map__btf_value_type_id(map);
+ if (map_type_id == 0)
+ return -EINVAL;
+ map_type = btf__type_by_id(btf, map_type_id);
+ if (!map_type)
+ return -EINVAL;
+
+ type_name = btf__name_by_offset(btf, map_type->name_off);
+
+ printf("\t\tstruct %s__%s__%s {\n", obj_name, ident, type_name);
+
+ err = walk_st_ops_shadow_vars(btf, ident, map_type, map_type_id);
+ if (err)
+ return err;
+
+ printf("\t\t} *%s;\n", ident);
+
+ return 0;
+}
+
+static int gen_st_ops_shadow(const char *obj_name, struct btf *btf, struct bpf_object *obj)
+{
+ int err, st_ops_cnt = 0;
+ struct bpf_map *map;
+ char ident[256];
+
+ if (!btf)
+ return 0;
+
+ /* Generate the pointers to shadow types of
+ * struct_ops maps.
+ */
+ bpf_object__for_each_map(map, obj) {
+ if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
+ continue;
+ if (!get_map_ident(map, ident, sizeof(ident)))
+ continue;
+
+ if (st_ops_cnt == 0) /* first struct_ops map */
+ printf("\tstruct {\n");
+ st_ops_cnt++;
+
+ err = gen_st_ops_shadow_type(obj_name, btf, ident, map);
+ if (err)
+ return err;
+ }
+
+ if (st_ops_cnt)
+ printf("\t} struct_ops;\n");
+
+ return 0;
+}
+
+/* Generate the code to initialize the pointers of shadow types. */
+static void gen_st_ops_shadow_init(struct btf *btf, struct bpf_object *obj)
+{
+ struct bpf_map *map;
+ char ident[256];
+
+ if (!btf)
+ return;
+
+ /* Initialize the pointers to_ops shadow types of
+ * struct_ops maps.
+ */
+ bpf_object__for_each_map(map, obj) {
+ if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
+ continue;
+ if (!get_map_ident(map, ident, sizeof(ident)))
+ continue;
+ codegen("\
+ \n\
+ obj->struct_ops.%1$s = bpf_map__initial_value(obj->maps.%1$s, NULL);\n\
+ \n\
+ ", ident);
+ }
+}
+
static int do_skeleton(int argc, char **argv)
{
char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
@@ -1049,6 +1280,11 @@ static int do_skeleton(int argc, char **argv)
printf("\t} maps;\n");
}
+ btf = bpf_object__btf(obj);
+ err = gen_st_ops_shadow(obj_name, btf, obj);
+ if (err)
+ goto out;
+
if (prog_cnt) {
printf("\tstruct {\n");
bpf_object__for_each_program(prog, obj) {
@@ -1072,7 +1308,6 @@ static int do_skeleton(int argc, char **argv)
printf("\t} links;\n");
}
- btf = bpf_object__btf(obj);
if (btf) {
err = codegen_datasecs(obj, obj_name);
if (err)
@@ -1130,6 +1365,12 @@ static int do_skeleton(int argc, char **argv)
if (err) \n\
goto err_out; \n\
\n\
+ ", obj_name);
+
+ gen_st_ops_shadow_init(btf, obj);
+
+ codegen("\
+ \n\
return obj; \n\
err_out: \n\
%1$s__destroy(obj); \n\
@@ -1389,7 +1630,7 @@ static int do_subskeleton(int argc, char **argv)
/* Also count all maps that have a name */
map_cnt++;
- if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
+ if (!is_mmapable_map(map, ident, sizeof(ident)))
continue;
map_type_id = bpf_map__btf_value_type_id(map);
@@ -1439,6 +1680,10 @@ static int do_subskeleton(int argc, char **argv)
printf("\t} maps;\n");
}
+ err = gen_st_ops_shadow(obj_name, btf, obj);
+ if (err)
+ goto out;
+
if (prog_cnt) {
printf("\tstruct {\n");
bpf_object__for_each_program(prog, obj) {
@@ -1507,7 +1752,7 @@ static int do_subskeleton(int argc, char **argv)
/* walk through each symbol and emit the runtime representation */
bpf_object__for_each_map(map, obj) {
- if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
+ if (!is_mmapable_map(map, ident, sizeof(ident)))
continue;
map_type_id = bpf_map__btf_value_type_id(map);
@@ -1550,6 +1795,12 @@ static int do_subskeleton(int argc, char **argv)
if (err) \n\
goto err; \n\
\n\
+ ");
+
+ gen_st_ops_shadow_init(btf, obj);
+
+ codegen("\
+ \n\
return obj; \n\
err: \n\
%1$s__destroy(obj); \n\
diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c
index cb46667a6b2e..afde9d0c2ea1 100644
--- a/tools/bpf/bpftool/link.c
+++ b/tools/bpf/bpftool/link.c
@@ -249,18 +249,44 @@ static int get_prog_info(int prog_id, struct bpf_prog_info *info)
return err;
}
-static int cmp_u64(const void *A, const void *B)
+struct addr_cookie {
+ __u64 addr;
+ __u64 cookie;
+};
+
+static int cmp_addr_cookie(const void *A, const void *B)
{
- const __u64 *a = A, *b = B;
+ const struct addr_cookie *a = A, *b = B;
- return *a - *b;
+ if (a->addr == b->addr)
+ return 0;
+ return a->addr < b->addr ? -1 : 1;
+}
+
+static struct addr_cookie *
+get_addr_cookie_array(__u64 *addrs, __u64 *cookies, __u32 count)
+{
+ struct addr_cookie *data;
+ __u32 i;
+
+ data = calloc(count, sizeof(data[0]));
+ if (!data) {
+ p_err("mem alloc failed");
+ return NULL;
+ }
+ for (i = 0; i < count; i++) {
+ data[i].addr = addrs[i];
+ data[i].cookie = cookies[i];
+ }
+ qsort(data, count, sizeof(data[0]), cmp_addr_cookie);
+ return data;
}
static void
show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr)
{
+ struct addr_cookie *data;
__u32 i, j = 0;
- __u64 *addrs;
jsonw_bool_field(json_wtr, "retprobe",
info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN);
@@ -268,14 +294,20 @@ show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr)
jsonw_uint_field(json_wtr, "missed", info->kprobe_multi.missed);
jsonw_name(json_wtr, "funcs");
jsonw_start_array(json_wtr);
- addrs = u64_to_ptr(info->kprobe_multi.addrs);
- qsort(addrs, info->kprobe_multi.count, sizeof(addrs[0]), cmp_u64);
+ data = get_addr_cookie_array(u64_to_ptr(info->kprobe_multi.addrs),
+ u64_to_ptr(info->kprobe_multi.cookies),
+ info->kprobe_multi.count);
+ if (!data)
+ return;
/* Load it once for all. */
if (!dd.sym_count)
kernel_syms_load(&dd);
+ if (!dd.sym_count)
+ goto error;
+
for (i = 0; i < dd.sym_count; i++) {
- if (dd.sym_mapping[i].address != addrs[j])
+ if (dd.sym_mapping[i].address != data[j].addr)
continue;
jsonw_start_object(json_wtr);
jsonw_uint_field(json_wtr, "addr", dd.sym_mapping[i].address);
@@ -287,11 +319,14 @@ show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr)
} else {
jsonw_string_field(json_wtr, "module", dd.sym_mapping[i].module);
}
+ jsonw_uint_field(json_wtr, "cookie", data[j].cookie);
jsonw_end_object(json_wtr);
if (j++ == info->kprobe_multi.count)
break;
}
jsonw_end_array(json_wtr);
+error:
+ free(data);
}
static __u64 *u64_to_arr(__u64 val)
@@ -334,6 +369,7 @@ show_perf_event_kprobe_json(struct bpf_link_info *info, json_writer_t *wtr)
u64_to_ptr(info->perf_event.kprobe.func_name));
jsonw_uint_field(wtr, "offset", info->perf_event.kprobe.offset);
jsonw_uint_field(wtr, "missed", info->perf_event.kprobe.missed);
+ jsonw_uint_field(wtr, "cookie", info->perf_event.kprobe.cookie);
}
static void
@@ -343,6 +379,7 @@ show_perf_event_uprobe_json(struct bpf_link_info *info, json_writer_t *wtr)
jsonw_string_field(wtr, "file",
u64_to_ptr(info->perf_event.uprobe.file_name));
jsonw_uint_field(wtr, "offset", info->perf_event.uprobe.offset);
+ jsonw_uint_field(wtr, "cookie", info->perf_event.uprobe.cookie);
}
static void
@@ -350,6 +387,7 @@ show_perf_event_tracepoint_json(struct bpf_link_info *info, json_writer_t *wtr)
{
jsonw_string_field(wtr, "tracepoint",
u64_to_ptr(info->perf_event.tracepoint.tp_name));
+ jsonw_uint_field(wtr, "cookie", info->perf_event.tracepoint.cookie);
}
static char *perf_config_hw_cache_str(__u64 config)
@@ -426,6 +464,8 @@ show_perf_event_event_json(struct bpf_link_info *info, json_writer_t *wtr)
else
jsonw_uint_field(wtr, "event_config", config);
+ jsonw_uint_field(wtr, "cookie", info->perf_event.event.cookie);
+
if (type == PERF_TYPE_HW_CACHE && perf_config)
free((void *)perf_config);
}
@@ -670,8 +710,8 @@ void netfilter_dump_plain(const struct bpf_link_info *info)
static void show_kprobe_multi_plain(struct bpf_link_info *info)
{
+ struct addr_cookie *data;
__u32 i, j = 0;
- __u64 *addrs;
if (!info->kprobe_multi.count)
return;
@@ -683,21 +723,24 @@ static void show_kprobe_multi_plain(struct bpf_link_info *info)
printf("func_cnt %u ", info->kprobe_multi.count);
if (info->kprobe_multi.missed)
printf("missed %llu ", info->kprobe_multi.missed);
- addrs = (__u64 *)u64_to_ptr(info->kprobe_multi.addrs);
- qsort(addrs, info->kprobe_multi.count, sizeof(__u64), cmp_u64);
+ data = get_addr_cookie_array(u64_to_ptr(info->kprobe_multi.addrs),
+ u64_to_ptr(info->kprobe_multi.cookies),
+ info->kprobe_multi.count);
+ if (!data)
+ return;
/* Load it once for all. */
if (!dd.sym_count)
kernel_syms_load(&dd);
if (!dd.sym_count)
- return;
+ goto error;
- printf("\n\t%-16s %s", "addr", "func [module]");
+ printf("\n\t%-16s %-16s %s", "addr", "cookie", "func [module]");
for (i = 0; i < dd.sym_count; i++) {
- if (dd.sym_mapping[i].address != addrs[j])
+ if (dd.sym_mapping[i].address != data[j].addr)
continue;
- printf("\n\t%016lx %s",
- dd.sym_mapping[i].address, dd.sym_mapping[i].name);
+ printf("\n\t%016lx %-16llx %s",
+ dd.sym_mapping[i].address, data[j].cookie, dd.sym_mapping[i].name);
if (dd.sym_mapping[i].module[0] != '\0')
printf(" [%s] ", dd.sym_mapping[i].module);
else
@@ -706,6 +749,8 @@ static void show_kprobe_multi_plain(struct bpf_link_info *info)
if (j++ == info->kprobe_multi.count)
break;
}
+error:
+ free(data);
}
static void show_uprobe_multi_plain(struct bpf_link_info *info)
@@ -754,6 +799,8 @@ static void show_perf_event_kprobe_plain(struct bpf_link_info *info)
printf("+%#x", info->perf_event.kprobe.offset);
if (info->perf_event.kprobe.missed)
printf(" missed %llu", info->perf_event.kprobe.missed);
+ if (info->perf_event.kprobe.cookie)
+ printf(" cookie %llu", info->perf_event.kprobe.cookie);
printf(" ");
}
@@ -770,6 +817,8 @@ static void show_perf_event_uprobe_plain(struct bpf_link_info *info)
else
printf("\n\tuprobe ");
printf("%s+%#x ", buf, info->perf_event.uprobe.offset);
+ if (info->perf_event.uprobe.cookie)
+ printf("cookie %llu ", info->perf_event.uprobe.cookie);
}
static void show_perf_event_tracepoint_plain(struct bpf_link_info *info)
@@ -781,6 +830,8 @@ static void show_perf_event_tracepoint_plain(struct bpf_link_info *info)
return;
printf("\n\ttracepoint %s ", buf);
+ if (info->perf_event.tracepoint.cookie)
+ printf("cookie %llu ", info->perf_event.tracepoint.cookie);
}
static void show_perf_event_event_plain(struct bpf_link_info *info)
@@ -802,6 +853,9 @@ static void show_perf_event_event_plain(struct bpf_link_info *info)
else
printf("%llu ", config);
+ if (info->perf_event.event.cookie)
+ printf("cookie %llu ", info->perf_event.event.cookie);
+
if (type == PERF_TYPE_HW_CACHE && perf_config)
free((void *)perf_config);
}
@@ -952,6 +1006,14 @@ again:
return -ENOMEM;
}
info.kprobe_multi.addrs = ptr_to_u64(addrs);
+ cookies = calloc(count, sizeof(__u64));
+ if (!cookies) {
+ p_err("mem alloc failed");
+ free(addrs);
+ close(fd);
+ return -ENOMEM;
+ }
+ info.kprobe_multi.cookies = ptr_to_u64(cookies);
goto again;
}
}
@@ -977,7 +1039,7 @@ again:
cookies = calloc(count, sizeof(__u64));
if (!cookies) {
p_err("mem alloc failed");
- free(cookies);
+ free(ref_ctr_offsets);
free(offsets);
close(fd);
return -ENOMEM;
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index f98f7bbea2b1..b89bd792c1d5 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -1463,7 +1463,7 @@ static int do_help(int argc, char **argv)
" devmap | devmap_hash | sockmap | cpumap | xskmap | sockhash |\n"
" cgroup_storage | reuseport_sockarray | percpu_cgroup_storage |\n"
" queue | stack | sk_storage | struct_ops | ringbuf | inode_storage |\n"
- " task_storage | bloom_filter | user_ringbuf | cgrp_storage }\n"
+ " task_storage | bloom_filter | user_ringbuf | cgrp_storage | arena }\n"
" " HELP_SPEC_OPTIONS " |\n"
" {-f|--bpffs} | {-n|--nomount} }\n"
"",
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index feb8e305804f..9cb42a3366c0 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -2298,7 +2298,7 @@ static int profile_open_perf_events(struct profiler_bpf *obj)
int map_fd;
profile_perf_events = calloc(
- sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric);
+ obj->rodata->num_cpu * obj->rodata->num_metric, sizeof(int));
if (!profile_perf_events) {
p_err("failed to allocate memory for perf_event array: %s",
strerror(errno));
diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
index 27a23196d58e..d9520cb826b3 100644
--- a/tools/bpf/resolve_btfids/main.c
+++ b/tools/bpf/resolve_btfids/main.c
@@ -70,6 +70,7 @@
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
+#include <linux/btf_ids.h>
#include <linux/rbtree.h>
#include <linux/zalloc.h>
#include <linux/err.h>
@@ -78,7 +79,7 @@
#include <subcmd/parse-options.h>
#define BTF_IDS_SECTION ".BTF_ids"
-#define BTF_ID "__BTF_ID__"
+#define BTF_ID_PREFIX "__BTF_ID__"
#define BTF_STRUCT "struct"
#define BTF_UNION "union"
@@ -89,6 +90,14 @@
#define ADDR_CNT 100
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+# define ELFDATANATIVE ELFDATA2LSB
+#elif __BYTE_ORDER == __BIG_ENDIAN
+# define ELFDATANATIVE ELFDATA2MSB
+#else
+# error "Unknown machine endianness!"
+#endif
+
struct btf_id {
struct rb_node rb_node;
char *name;
@@ -116,6 +125,7 @@ struct object {
int idlist_shndx;
size_t strtabidx;
unsigned long idlist_addr;
+ int encoding;
} efile;
struct rb_root sets;
@@ -161,7 +171,7 @@ static int eprintf(int level, int var, const char *fmt, ...)
static bool is_btf_id(const char *name)
{
- return name && !strncmp(name, BTF_ID, sizeof(BTF_ID) - 1);
+ return name && !strncmp(name, BTF_ID_PREFIX, sizeof(BTF_ID_PREFIX) - 1);
}
static struct btf_id *btf_id__find(struct rb_root *root, const char *name)
@@ -319,6 +329,7 @@ static int elf_collect(struct object *obj)
{
Elf_Scn *scn = NULL;
size_t shdrstrndx;
+ GElf_Ehdr ehdr;
int idx = 0;
Elf *elf;
int fd;
@@ -350,6 +361,13 @@ static int elf_collect(struct object *obj)
return -1;
}
+ if (gelf_getehdr(obj->efile.elf, &ehdr) == NULL) {
+ pr_err("FAILED cannot get ELF header: %s\n",
+ elf_errmsg(-1));
+ return -1;
+ }
+ obj->efile.encoding = ehdr.e_ident[EI_DATA];
+
/*
* Scan all the elf sections and look for save data
* from .BTF_ids section and symbols.
@@ -441,7 +459,7 @@ static int symbols_collect(struct object *obj)
* __BTF_ID__TYPE__vfs_truncate__0
* prefix = ^
*/
- prefix = name + sizeof(BTF_ID) - 1;
+ prefix = name + sizeof(BTF_ID_PREFIX) - 1;
/* struct */
if (!strncmp(prefix, BTF_STRUCT, sizeof(BTF_STRUCT) - 1)) {
@@ -649,19 +667,18 @@ static int cmp_id(const void *pa, const void *pb)
static int sets_patch(struct object *obj)
{
Elf_Data *data = obj->efile.idlist;
- int *ptr = data->d_buf;
struct rb_node *next;
next = rb_first(&obj->sets);
while (next) {
- unsigned long addr, idx;
+ struct btf_id_set8 *set8;
+ struct btf_id_set *set;
+ unsigned long addr, off;
struct btf_id *id;
- int *base;
- int cnt;
id = rb_entry(next, struct btf_id, rb_node);
addr = id->addr[0];
- idx = addr - obj->efile.idlist_addr;
+ off = addr - obj->efile.idlist_addr;
/* sets are unique */
if (id->addr_cnt != 1) {
@@ -670,14 +687,39 @@ static int sets_patch(struct object *obj)
return -1;
}
- idx = idx / sizeof(int);
- base = &ptr[idx] + (id->is_set8 ? 2 : 1);
- cnt = ptr[idx];
+ if (id->is_set) {
+ set = data->d_buf + off;
+ qsort(set->ids, set->cnt, sizeof(set->ids[0]), cmp_id);
+ } else {
+ set8 = data->d_buf + off;
+ /*
+ * Make sure id is at the beginning of the pairs
+ * struct, otherwise the below qsort would not work.
+ */
+ BUILD_BUG_ON(set8->pairs != &set8->pairs[0].id);
+ qsort(set8->pairs, set8->cnt, sizeof(set8->pairs[0]), cmp_id);
- pr_debug("sorting addr %5lu: cnt %6d [%s]\n",
- (idx + 1) * sizeof(int), cnt, id->name);
+ /*
+ * When ELF endianness does not match endianness of the
+ * host, libelf will do the translation when updating
+ * the ELF. This, however, corrupts SET8 flags which are
+ * already in the target endianness. So, let's bswap
+ * them to the host endianness and libelf will then
+ * correctly translate everything.
+ */
+ if (obj->efile.encoding != ELFDATANATIVE) {
+ int i;
+
+ set8->flags = bswap_32(set8->flags);
+ for (i = 0; i < set8->cnt; i++) {
+ set8->pairs[i].flags =
+ bswap_32(set8->pairs[i].flags);
+ }
+ }
+ }
- qsort(base, cnt, id->is_set8 ? sizeof(uint64_t) : sizeof(int), cmp_id);
+ pr_debug("sorting addr %5lu: cnt %6d [%s]\n",
+ off, id->is_set ? set->cnt : set8->cnt, id->name);
next = rb_next(next);
}
diff --git a/tools/include/linux/btf_ids.h b/tools/include/linux/btf_ids.h
index 2f882d5cb30f..72535f00572f 100644
--- a/tools/include/linux/btf_ids.h
+++ b/tools/include/linux/btf_ids.h
@@ -8,6 +8,15 @@ struct btf_id_set {
u32 ids[];
};
+struct btf_id_set8 {
+ u32 cnt;
+ u32 flags;
+ struct {
+ u32 id;
+ u32 flags;
+ } pairs[];
+};
+
#ifdef CONFIG_DEBUG_INFO_BTF
#include <linux/compiler.h> /* for __PASTE */
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 7f24d898efbb..3c42b9f1bada 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -42,6 +42,7 @@
#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
#define BPF_JSLT 0xc0 /* SLT is signed, '<' */
#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
+#define BPF_JCOND 0xe0 /* conditional pseudo jumps: may_goto, goto_or_nop */
#define BPF_CALL 0x80 /* function call */
#define BPF_EXIT 0x90 /* function return */
@@ -50,6 +51,10 @@
#define BPF_XCHG (0xe0 | BPF_FETCH) /* atomic exchange */
#define BPF_CMPXCHG (0xf0 | BPF_FETCH) /* atomic compare-and-write */
+enum bpf_cond_pseudo_jmp {
+ BPF_MAY_GOTO = 0,
+};
+
/* Register numbers */
enum {
BPF_REG_0 = 0,
@@ -77,12 +82,29 @@ struct bpf_insn {
__s32 imm; /* signed immediate constant */
};
-/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
+/* Deprecated: use struct bpf_lpm_trie_key_u8 (when the "data" member is needed for
+ * byte access) or struct bpf_lpm_trie_key_hdr (when using an alternative type for
+ * the trailing flexible array member) instead.
+ */
struct bpf_lpm_trie_key {
__u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
__u8 data[0]; /* Arbitrary size */
};
+/* Header for bpf_lpm_trie_key structs */
+struct bpf_lpm_trie_key_hdr {
+ __u32 prefixlen;
+};
+
+/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry, with trailing byte array. */
+struct bpf_lpm_trie_key_u8 {
+ union {
+ struct bpf_lpm_trie_key_hdr hdr;
+ __u32 prefixlen;
+ };
+ __u8 data[]; /* Arbitrary size */
+};
+
struct bpf_cgroup_storage_key {
__u64 cgroup_inode_id; /* cgroup inode id */
__u32 attach_type; /* program attach type (enum bpf_attach_type) */
@@ -617,7 +639,11 @@ union bpf_iter_link_info {
* to NULL to begin the batched operation. After each subsequent
* **BPF_MAP_LOOKUP_BATCH**, the caller should pass the resultant
* *out_batch* as the *in_batch* for the next operation to
- * continue iteration from the current point.
+ * continue iteration from the current point. Both *in_batch* and
+ * *out_batch* must point to memory large enough to hold a key,
+ * except for maps of type **BPF_MAP_TYPE_{HASH, PERCPU_HASH,
+ * LRU_HASH, LRU_PERCPU_HASH}**, for which batch parameters
+ * must be at least 4 bytes wide regardless of key size.
*
* The *keys* and *values* are output parameters which must point
* to memory large enough to hold *count* items based on the key
@@ -847,6 +873,36 @@ union bpf_iter_link_info {
* Returns zero on success. On error, -1 is returned and *errno*
* is set appropriately.
*
+ * BPF_TOKEN_CREATE
+ * Description
+ * Create BPF token with embedded information about what
+ * BPF-related functionality it allows:
+ * - a set of allowed bpf() syscall commands;
+ * - a set of allowed BPF map types to be created with
+ * BPF_MAP_CREATE command, if BPF_MAP_CREATE itself is allowed;
+ * - a set of allowed BPF program types and BPF program attach
+ * types to be loaded with BPF_PROG_LOAD command, if
+ * BPF_PROG_LOAD itself is allowed.
+ *
+ * BPF token is created (derived) from an instance of BPF FS,
+ * assuming it has necessary delegation mount options specified.
+ * This BPF token can be passed as an extra parameter to various
+ * bpf() syscall commands to grant BPF subsystem functionality to
+ * unprivileged processes.
+ *
+ * When created, BPF token is "associated" with the owning
+ * user namespace of BPF FS instance (super block) that it was
+ * derived from, and subsequent BPF operations performed with
+ * BPF token would be performing capabilities checks (i.e.,
+ * CAP_BPF, CAP_PERFMON, CAP_NET_ADMIN, CAP_SYS_ADMIN) within
+ * that user namespace. Without BPF token, such capabilities
+ * have to be granted in init user namespace, making bpf()
+ * syscall incompatible with user namespace, for the most part.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
* NOTES
* eBPF objects (maps and programs) can be shared between processes.
*
@@ -901,6 +957,8 @@ enum bpf_cmd {
BPF_ITER_CREATE,
BPF_LINK_DETACH,
BPF_PROG_BIND_MAP,
+ BPF_TOKEN_CREATE,
+ __MAX_BPF_CMD,
};
enum bpf_map_type {
@@ -951,6 +1009,8 @@ enum bpf_map_type {
BPF_MAP_TYPE_BLOOM_FILTER,
BPF_MAP_TYPE_USER_RINGBUF,
BPF_MAP_TYPE_CGRP_STORAGE,
+ BPF_MAP_TYPE_ARENA,
+ __MAX_BPF_MAP_TYPE
};
/* Note that tracing related programs such as
@@ -995,6 +1055,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_SK_LOOKUP,
BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
BPF_PROG_TYPE_NETFILTER,
+ __MAX_BPF_PROG_TYPE
};
enum bpf_attach_type {
@@ -1278,6 +1339,10 @@ enum {
*/
#define BPF_PSEUDO_KFUNC_CALL 2
+enum bpf_addr_space_cast {
+ BPF_ADDR_SPACE_CAST = 1,
+};
+
/* flags for BPF_MAP_UPDATE_ELEM command */
enum {
BPF_ANY = 0, /* create new element or update existing */
@@ -1330,6 +1395,18 @@ enum {
/* Get path from provided FD in BPF_OBJ_PIN/BPF_OBJ_GET commands */
BPF_F_PATH_FD = (1U << 14),
+
+/* Flag for value_type_btf_obj_fd, the fd is available */
+ BPF_F_VTYPE_BTF_OBJ_FD = (1U << 15),
+
+/* BPF token FD is passed in a corresponding command's token_fd field */
+ BPF_F_TOKEN_FD = (1U << 16),
+
+/* When user space page faults in bpf_arena send SIGSEGV instead of inserting new page */
+ BPF_F_SEGV_ON_FAULT = (1U << 17),
+
+/* Do not translate kernel bpf_arena pointers to user pointers */
+ BPF_F_NO_USER_CONV = (1U << 18),
};
/* Flags for BPF_PROG_QUERY. */
@@ -1401,8 +1478,20 @@ union bpf_attr {
* BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the
* number of hash functions (if 0, the bloom filter will default
* to using 5 hash functions).
+ *
+ * BPF_MAP_TYPE_ARENA - contains the address where user space
+ * is going to mmap() the arena. It has to be page aligned.
*/
__u64 map_extra;
+
+ __s32 value_type_btf_obj_fd; /* fd pointing to a BTF
+ * type data for
+ * btf_vmlinux_value_type_id.
+ */
+ /* BPF token FD to use with BPF_MAP_CREATE operation.
+ * If provided, map_flags should have BPF_F_TOKEN_FD flag set.
+ */
+ __s32 map_token_fd;
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -1472,6 +1561,10 @@ union bpf_attr {
* truncated), or smaller (if log buffer wasn't filled completely).
*/
__u32 log_true_size;
+ /* BPF token FD to use with BPF_PROG_LOAD operation.
+ * If provided, prog_flags should have BPF_F_TOKEN_FD flag set.
+ */
+ __s32 prog_token_fd;
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -1584,6 +1677,11 @@ union bpf_attr {
* truncated), or smaller (if log buffer wasn't filled completely).
*/
__u32 btf_log_true_size;
+ __u32 btf_flags;
+ /* BPF token FD to use with BPF_BTF_LOAD operation.
+ * If provided, btf_flags should have BPF_F_TOKEN_FD flag set.
+ */
+ __s32 btf_token_fd;
};
struct {
@@ -1714,6 +1812,11 @@ union bpf_attr {
__u32 flags; /* extra flags */
} prog_bind_map;
+ struct { /* struct used by BPF_TOKEN_CREATE command */
+ __u32 flags;
+ __u32 bpffs_fd;
+ } token_create;
+
} __attribute__((aligned(8)));
/* The description below is an attempt at providing documentation to eBPF
@@ -4839,9 +4942,9 @@ union bpf_attr {
* going through the CPU's backlog queue.
*
* The *flags* argument is reserved and must be 0. The helper is
- * currently only supported for tc BPF program types at the ingress
- * hook and for veth device types. The peer device must reside in a
- * different network namespace.
+ * currently only supported for tc BPF program types at the
+ * ingress hook and for veth and netkit target device types. The
+ * peer device must reside in a different network namespace.
* Return
* The helper returns **TC_ACT_REDIRECT** on success or
* **TC_ACT_SHOT** on error.
@@ -6487,7 +6590,7 @@ struct bpf_map_info {
__u32 btf_id;
__u32 btf_key_type_id;
__u32 btf_value_type_id;
- __u32 :32; /* alignment pad */
+ __u32 btf_vmlinux_id;
__u64 map_extra;
} __attribute__((aligned(8)));
@@ -6563,6 +6666,7 @@ struct bpf_link_info {
__u32 count; /* in/out: kprobe_multi function count */
__u32 flags;
__u64 missed;
+ __aligned_u64 cookies;
} kprobe_multi;
struct {
__aligned_u64 path;
@@ -6582,6 +6686,7 @@ struct bpf_link_info {
__aligned_u64 file_name; /* in/out */
__u32 name_len;
__u32 offset; /* offset from file_name */
+ __u64 cookie;
} uprobe; /* BPF_PERF_EVENT_UPROBE, BPF_PERF_EVENT_URETPROBE */
struct {
__aligned_u64 func_name; /* in/out */
@@ -6589,14 +6694,19 @@ struct bpf_link_info {
__u32 offset; /* offset from func_name */
__u64 addr;
__u64 missed;
+ __u64 cookie;
} kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */
struct {
__aligned_u64 tp_name; /* in/out */
__u32 name_len;
+ __u32 :32;
+ __u64 cookie;
} tracepoint; /* BPF_PERF_EVENT_TRACEPOINT */
struct {
__u64 config;
__u32 type;
+ __u32 :32;
+ __u64 cookie;
} event; /* BPF_PERF_EVENT_EVENT */
};
} perf_event;
@@ -6904,6 +7014,7 @@ enum {
BPF_TCP_LISTEN,
BPF_TCP_CLOSING, /* Now a valid state */
BPF_TCP_NEW_SYN_RECV,
+ BPF_TCP_BOUND_INACTIVE,
BPF_TCP_MAX_STATES /* Leave at the end! */
};
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
index a0aa05a28cf2..f0d71b2a3f1e 100644
--- a/tools/include/uapi/linux/if_link.h
+++ b/tools/include/uapi/linux/if_link.h
@@ -974,6 +974,7 @@ enum {
IFLA_BOND_AD_LACP_ACTIVE,
IFLA_BOND_MISSED_MAX,
IFLA_BOND_NS_IP6_TARGET,
+ IFLA_BOND_COUPLED_CONTROL,
__IFLA_BOND_MAX,
};
diff --git a/tools/include/uapi/linux/netdev.h b/tools/include/uapi/linux/netdev.h
index 93cb411adf72..bb65ee840cda 100644
--- a/tools/include/uapi/linux/netdev.h
+++ b/tools/include/uapi/linux/netdev.h
@@ -70,6 +70,10 @@ enum netdev_queue_type {
NETDEV_QUEUE_TYPE_TX,
};
+enum netdev_qstats_scope {
+ NETDEV_QSTATS_SCOPE_QUEUE = 1,
+};
+
enum {
NETDEV_A_DEV_IFINDEX = 1,
NETDEV_A_DEV_PAD,
@@ -133,6 +137,21 @@ enum {
};
enum {
+ NETDEV_A_QSTATS_IFINDEX = 1,
+ NETDEV_A_QSTATS_QUEUE_TYPE,
+ NETDEV_A_QSTATS_QUEUE_ID,
+ NETDEV_A_QSTATS_SCOPE,
+ NETDEV_A_QSTATS_RX_PACKETS = 8,
+ NETDEV_A_QSTATS_RX_BYTES,
+ NETDEV_A_QSTATS_TX_PACKETS,
+ NETDEV_A_QSTATS_TX_BYTES,
+ NETDEV_A_QSTATS_RX_ALLOC_FAIL,
+
+ __NETDEV_A_QSTATS_MAX,
+ NETDEV_A_QSTATS_MAX = (__NETDEV_A_QSTATS_MAX - 1)
+};
+
+enum {
NETDEV_CMD_DEV_GET = 1,
NETDEV_CMD_DEV_ADD_NTF,
NETDEV_CMD_DEV_DEL_NTF,
@@ -144,6 +163,7 @@ enum {
NETDEV_CMD_PAGE_POOL_STATS_GET,
NETDEV_CMD_QUEUE_GET,
NETDEV_CMD_NAPI_GET,
+ NETDEV_CMD_QSTATS_GET,
__NETDEV_CMD_MAX,
NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index 2d0c282c8588..b6619199a706 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1,4 +1,4 @@
libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \
netlink.o bpf_prog_linfo.o libbpf_probes.o hashmap.o \
btf_dump.o ringbuf.o strset.o linker.o gen_loader.o relo_core.o \
- usdt.o zip.o elf.o
+ usdt.o zip.o elf.o features.o
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 9dc9625651dc..97ec005c3c47 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -103,7 +103,7 @@ int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts)
* [0] https://lore.kernel.org/bpf/20201201215900.3569844-1-guro@fb.com/
* [1] d05512618056 ("bpf: Add bpf_ktime_get_coarse_ns helper")
*/
-int probe_memcg_account(void)
+int probe_memcg_account(int token_fd)
{
const size_t attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd);
struct bpf_insn insns[] = {
@@ -120,6 +120,9 @@ int probe_memcg_account(void)
attr.insns = ptr_to_u64(insns);
attr.insn_cnt = insn_cnt;
attr.license = ptr_to_u64("GPL");
+ attr.prog_token_fd = token_fd;
+ if (token_fd)
+ attr.prog_flags |= BPF_F_TOKEN_FD;
prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, attr_sz);
if (prog_fd >= 0) {
@@ -146,7 +149,7 @@ int bump_rlimit_memlock(void)
struct rlimit rlim;
/* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */
- if (memlock_bumped || kernel_supports(NULL, FEAT_MEMCG_ACCOUNT))
+ if (memlock_bumped || feat_supported(NULL, FEAT_MEMCG_ACCOUNT))
return 0;
memlock_bumped = true;
@@ -169,7 +172,7 @@ int bpf_map_create(enum bpf_map_type map_type,
__u32 max_entries,
const struct bpf_map_create_opts *opts)
{
- const size_t attr_sz = offsetofend(union bpf_attr, map_extra);
+ const size_t attr_sz = offsetofend(union bpf_attr, map_token_fd);
union bpf_attr attr;
int fd;
@@ -181,7 +184,7 @@ int bpf_map_create(enum bpf_map_type map_type,
return libbpf_err(-EINVAL);
attr.map_type = map_type;
- if (map_name && kernel_supports(NULL, FEAT_PROG_NAME))
+ if (map_name && feat_supported(NULL, FEAT_PROG_NAME))
libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
attr.key_size = key_size;
attr.value_size = value_size;
@@ -191,6 +194,7 @@ int bpf_map_create(enum bpf_map_type map_type,
attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0);
attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0);
attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0);
+ attr.value_type_btf_obj_fd = OPTS_GET(opts, value_type_btf_obj_fd, 0);
attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0);
attr.map_flags = OPTS_GET(opts, map_flags, 0);
@@ -198,6 +202,8 @@ int bpf_map_create(enum bpf_map_type map_type,
attr.numa_node = OPTS_GET(opts, numa_node, 0);
attr.map_ifindex = OPTS_GET(opts, map_ifindex, 0);
+ attr.map_token_fd = OPTS_GET(opts, token_fd, 0);
+
fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, attr_sz);
return libbpf_err_errno(fd);
}
@@ -232,7 +238,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
const struct bpf_insn *insns, size_t insn_cnt,
struct bpf_prog_load_opts *opts)
{
- const size_t attr_sz = offsetofend(union bpf_attr, log_true_size);
+ const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
void *finfo = NULL, *linfo = NULL;
const char *func_info, *line_info;
__u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd;
@@ -261,8 +267,9 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
attr.prog_flags = OPTS_GET(opts, prog_flags, 0);
attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0);
attr.kern_version = OPTS_GET(opts, kern_version, 0);
+ attr.prog_token_fd = OPTS_GET(opts, token_fd, 0);
- if (prog_name && kernel_supports(NULL, FEAT_PROG_NAME))
+ if (prog_name && feat_supported(NULL, FEAT_PROG_NAME))
libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
attr.license = ptr_to_u64(license);
@@ -1182,7 +1189,7 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd)
int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts *opts)
{
- const size_t attr_sz = offsetofend(union bpf_attr, btf_log_true_size);
+ const size_t attr_sz = offsetofend(union bpf_attr, btf_token_fd);
union bpf_attr attr;
char *log_buf;
size_t log_size;
@@ -1207,6 +1214,10 @@ int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts
attr.btf = ptr_to_u64(btf_data);
attr.btf_size = btf_size;
+
+ attr.btf_flags = OPTS_GET(opts, btf_flags, 0);
+ attr.btf_token_fd = OPTS_GET(opts, token_fd, 0);
+
/* log_level == 0 and log_buf != NULL means "try loading without
* log_buf, but retry with log_buf and log_level=1 on error", which is
* consistent across low-level and high-level BTF and program loading
@@ -1287,3 +1298,20 @@ int bpf_prog_bind_map(int prog_fd, int map_fd,
ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, attr_sz);
return libbpf_err_errno(ret);
}
+
+int bpf_token_create(int bpffs_fd, struct bpf_token_create_opts *opts)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, token_create);
+ union bpf_attr attr;
+ int fd;
+
+ if (!OPTS_VALID(opts, bpf_token_create_opts))
+ return libbpf_err(-EINVAL);
+
+ memset(&attr, 0, attr_sz);
+ attr.token_create.bpffs_fd = bpffs_fd;
+ attr.token_create.flags = OPTS_GET(opts, flags, 0);
+
+ fd = sys_bpf_fd(BPF_TOKEN_CREATE, &attr, attr_sz);
+ return libbpf_err_errno(fd);
+}
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index d0f53772bdc0..df0db2f0cdb7 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -35,7 +35,7 @@
extern "C" {
#endif
-int libbpf_set_memlock_rlim(size_t memlock_bytes);
+LIBBPF_API int libbpf_set_memlock_rlim(size_t memlock_bytes);
struct bpf_map_create_opts {
size_t sz; /* size of this struct for forward/backward compatibility */
@@ -51,8 +51,12 @@ struct bpf_map_create_opts {
__u32 numa_node;
__u32 map_ifindex;
+ __s32 value_type_btf_obj_fd;
+
+ __u32 token_fd;
+ size_t :0;
};
-#define bpf_map_create_opts__last_field map_ifindex
+#define bpf_map_create_opts__last_field token_fd
LIBBPF_API int bpf_map_create(enum bpf_map_type map_type,
const char *map_name,
@@ -102,9 +106,10 @@ struct bpf_prog_load_opts {
* If kernel doesn't support this feature, log_size is left unchanged.
*/
__u32 log_true_size;
+ __u32 token_fd;
size_t :0;
};
-#define bpf_prog_load_opts__last_field log_true_size
+#define bpf_prog_load_opts__last_field token_fd
LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type,
const char *prog_name, const char *license,
@@ -130,9 +135,12 @@ struct bpf_btf_load_opts {
* If kernel doesn't support this feature, log_size is left unchanged.
*/
__u32 log_true_size;
+
+ __u32 btf_flags;
+ __u32 token_fd;
size_t :0;
};
-#define bpf_btf_load_opts__last_field log_true_size
+#define bpf_btf_load_opts__last_field token_fd
LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size,
struct bpf_btf_load_opts *opts);
@@ -182,10 +190,14 @@ LIBBPF_API int bpf_map_delete_batch(int fd, const void *keys,
/**
* @brief **bpf_map_lookup_batch()** allows for batch lookup of BPF map elements.
*
- * The parameter *in_batch* is the address of the first element in the batch to read.
- * *out_batch* is an output parameter that should be passed as *in_batch* to subsequent
- * calls to **bpf_map_lookup_batch()**. NULL can be passed for *in_batch* to indicate
- * that the batched lookup starts from the beginning of the map.
+ * The parameter *in_batch* is the address of the first element in the batch to
+ * read. *out_batch* is an output parameter that should be passed as *in_batch*
+ * to subsequent calls to **bpf_map_lookup_batch()**. NULL can be passed for
+ * *in_batch* to indicate that the batched lookup starts from the beginning of
+ * the map. Both *in_batch* and *out_batch* must point to memory large enough to
+ * hold a single key, except for maps of type **BPF_MAP_TYPE_{HASH, PERCPU_HASH,
+ * LRU_HASH, LRU_PERCPU_HASH}**, for which the memory size must be at
+ * least 4 bytes wide regardless of key size.
*
* The *keys* and *values* are output parameters which must point to memory large enough to
* hold *count* items based on the key and value size of the map *map_fd*. The *keys*
@@ -218,7 +230,10 @@ LIBBPF_API int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch,
*
* @param fd BPF map file descriptor
* @param in_batch address of the first element in batch to read, can pass NULL to
- * get address of the first element in *out_batch*
+ * get address of the first element in *out_batch*. If not NULL, must be large
+ * enough to hold a key. For **BPF_MAP_TYPE_{HASH, PERCPU_HASH, LRU_HASH,
+ * LRU_PERCPU_HASH}**, the memory size must be at least 4 bytes wide regardless
+ * of key size.
* @param out_batch output parameter that should be passed to next call as *in_batch*
* @param keys pointer to an array of *count* keys
* @param values pointer to an array large enough for *count* values
@@ -492,7 +507,10 @@ LIBBPF_API int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len);
* program corresponding to *prog_fd*.
*
* Populates up to *info_len* bytes of *info* and updates *info_len* with the
- * actual number of bytes written to *info*.
+ * actual number of bytes written to *info*. Note that *info* should be
+ * zero-initialized or initialized as expected by the requested *info*
+ * type. Failing to (zero-)initialize *info* under certain circumstances can
+ * result in this helper returning an error.
*
* @param prog_fd BPF program file descriptor
* @param info pointer to **struct bpf_prog_info** that will be populated with
@@ -509,7 +527,10 @@ LIBBPF_API int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info,
* map corresponding to *map_fd*.
*
* Populates up to *info_len* bytes of *info* and updates *info_len* with the
- * actual number of bytes written to *info*.
+ * actual number of bytes written to *info*. Note that *info* should be
+ * zero-initialized or initialized as expected by the requested *info*
+ * type. Failing to (zero-)initialize *info* under certain circumstances can
+ * result in this helper returning an error.
*
* @param map_fd BPF map file descriptor
* @param info pointer to **struct bpf_map_info** that will be populated with
@@ -522,11 +543,14 @@ LIBBPF_API int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info,
LIBBPF_API int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len);
/**
- * @brief **bpf_btf_get_info_by_fd()** obtains information about the
+ * @brief **bpf_btf_get_info_by_fd()** obtains information about the
* BTF object corresponding to *btf_fd*.
*
* Populates up to *info_len* bytes of *info* and updates *info_len* with the
- * actual number of bytes written to *info*.
+ * actual number of bytes written to *info*. Note that *info* should be
+ * zero-initialized or initialized as expected by the requested *info*
+ * type. Failing to (zero-)initialize *info* under certain circumstances can
+ * result in this helper returning an error.
*
* @param btf_fd BTF object file descriptor
* @param info pointer to **struct bpf_btf_info** that will be populated with
@@ -543,7 +567,10 @@ LIBBPF_API int bpf_btf_get_info_by_fd(int btf_fd, struct bpf_btf_info *info, __u
* link corresponding to *link_fd*.
*
* Populates up to *info_len* bytes of *info* and updates *info_len* with the
- * actual number of bytes written to *info*.
+ * actual number of bytes written to *info*. Note that *info* should be
+ * zero-initialized or initialized as expected by the requested *info*
+ * type. Failing to (zero-)initialize *info* under certain circumstances can
+ * result in this helper returning an error.
*
* @param link_fd BPF link file descriptor
* @param info pointer to **struct bpf_link_info** that will be populated with
@@ -640,6 +667,30 @@ struct bpf_test_run_opts {
LIBBPF_API int bpf_prog_test_run_opts(int prog_fd,
struct bpf_test_run_opts *opts);
+struct bpf_token_create_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+ __u32 flags;
+ size_t :0;
+};
+#define bpf_token_create_opts__last_field flags
+
+/**
+ * @brief **bpf_token_create()** creates a new instance of BPF token derived
+ * from specified BPF FS mount point.
+ *
+ * BPF token created with this API can be passed to bpf() syscall for
+ * commands like BPF_PROG_LOAD, BPF_MAP_CREATE, etc.
+ *
+ * @param bpffs_fd FD for BPF FS instance from which to derive a BPF token
+ * instance.
+ * @param opts optional BPF token creation options, can be NULL
+ *
+ * @return BPF token FD > 0, on success; negative error code, otherwise (errno
+ * is also set to the error code)
+ */
+LIBBPF_API int bpf_token_create(int bpffs_fd,
+ struct bpf_token_create_opts *opts);
+
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
index 7325a12692a3..1ce738d91685 100644
--- a/tools/lib/bpf/bpf_core_read.h
+++ b/tools/lib/bpf/bpf_core_read.h
@@ -2,6 +2,8 @@
#ifndef __BPF_CORE_READ_H__
#define __BPF_CORE_READ_H__
+#include <bpf/bpf_helpers.h>
+
/*
* enum bpf_field_info_kind is passed as a second argument into
* __builtin_preserve_field_info() built-in to get a specific aspect of
@@ -44,7 +46,7 @@ enum bpf_enum_value_kind {
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \
bpf_probe_read_kernel( \
- (void *)dst, \
+ (void *)dst, \
__CORE_RELO(src, fld, BYTE_SIZE), \
(const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
#else
@@ -143,8 +145,29 @@ enum bpf_enum_value_kind {
} \
})
+/* Differentiator between compilers builtin implementations. This is a
+ * requirement due to the compiler parsing differences where GCC optimizes
+ * early in parsing those constructs of type pointers to the builtin specific
+ * type, resulting in not being possible to collect the required type
+ * information in the builtin expansion.
+ */
+#ifdef __clang__
+#define ___bpf_typeof(type) ((typeof(type) *) 0)
+#else
+#define ___bpf_typeof1(type, NR) ({ \
+ extern typeof(type) *___concat(bpf_type_tmp_, NR); \
+ ___concat(bpf_type_tmp_, NR); \
+})
+#define ___bpf_typeof(type) ___bpf_typeof1(type, __COUNTER__)
+#endif
+
+#ifdef __clang__
#define ___bpf_field_ref1(field) (field)
-#define ___bpf_field_ref2(type, field) (((typeof(type) *)0)->field)
+#define ___bpf_field_ref2(type, field) (___bpf_typeof(type)->field)
+#else
+#define ___bpf_field_ref1(field) (&(field))
+#define ___bpf_field_ref2(type, field) (&(___bpf_typeof(type)->field))
+#endif
#define ___bpf_field_ref(args...) \
___bpf_apply(___bpf_field_ref, ___bpf_narg(args))(args)
@@ -194,7 +217,7 @@ enum bpf_enum_value_kind {
* BTF. Always succeeds.
*/
#define bpf_core_type_id_local(type) \
- __builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_LOCAL)
+ __builtin_btf_type_id(*___bpf_typeof(type), BPF_TYPE_ID_LOCAL)
/*
* Convenience macro to get BTF type ID of a target kernel's type that matches
@@ -204,7 +227,7 @@ enum bpf_enum_value_kind {
* - 0, if no matching type was found in a target kernel BTF.
*/
#define bpf_core_type_id_kernel(type) \
- __builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_TARGET)
+ __builtin_btf_type_id(*___bpf_typeof(type), BPF_TYPE_ID_TARGET)
/*
* Convenience macro to check that provided named type
@@ -214,7 +237,7 @@ enum bpf_enum_value_kind {
* 0, if no matching type is found.
*/
#define bpf_core_type_exists(type) \
- __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_EXISTS)
+ __builtin_preserve_type_info(*___bpf_typeof(type), BPF_TYPE_EXISTS)
/*
* Convenience macro to check that provided named type
@@ -224,7 +247,7 @@ enum bpf_enum_value_kind {
* 0, if the type does not match any in the target kernel
*/
#define bpf_core_type_matches(type) \
- __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_MATCHES)
+ __builtin_preserve_type_info(*___bpf_typeof(type), BPF_TYPE_MATCHES)
/*
* Convenience macro to get the byte size of a provided named type
@@ -234,7 +257,7 @@ enum bpf_enum_value_kind {
* 0, if no matching type is found.
*/
#define bpf_core_type_size(type) \
- __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_SIZE)
+ __builtin_preserve_type_info(*___bpf_typeof(type), BPF_TYPE_SIZE)
/*
* Convenience macro to check that provided enumerator value is defined in
@@ -244,8 +267,13 @@ enum bpf_enum_value_kind {
* kernel's BTF;
* 0, if no matching enum and/or enum value within that enum is found.
*/
+#ifdef __clang__
#define bpf_core_enum_value_exists(enum_type, enum_value) \
__builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_EXISTS)
+#else
+#define bpf_core_enum_value_exists(enum_type, enum_value) \
+ __builtin_preserve_enum_value(___bpf_typeof(enum_type), enum_value, BPF_ENUMVAL_EXISTS)
+#endif
/*
* Convenience macro to get the integer value of an enumerator value in
@@ -255,8 +283,13 @@ enum bpf_enum_value_kind {
* present in target kernel's BTF;
* 0, if no matching enum and/or enum value within that enum is found.
*/
+#ifdef __clang__
#define bpf_core_enum_value(enum_type, enum_value) \
__builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_VALUE)
+#else
+#define bpf_core_enum_value(enum_type, enum_value) \
+ __builtin_preserve_enum_value(___bpf_typeof(enum_type), enum_value, BPF_ENUMVAL_VALUE)
+#endif
/*
* bpf_core_read() abstracts away bpf_probe_read_kernel() call and captures
@@ -268,7 +301,7 @@ enum bpf_enum_value_kind {
* a relocation, which records BTF type ID describing root struct/union and an
* accessor string which describes exact embedded field that was used to take
* an address. See detailed description of this relocation format and
- * semantics in comments to struct bpf_field_reloc in libbpf_internal.h.
+ * semantics in comments to struct bpf_core_relo in include/uapi/linux/bpf.h.
*
* This relocation allows libbpf to adjust BPF instruction to use correct
* actual field offset, based on target kernel BTF type that matches original
@@ -292,6 +325,17 @@ enum bpf_enum_value_kind {
#define bpf_core_read_user_str(dst, sz, src) \
bpf_probe_read_user_str(dst, sz, (const void *)__builtin_preserve_access_index(src))
+extern void *bpf_rdonly_cast(const void *obj, __u32 btf_id) __ksym __weak;
+
+/*
+ * Cast provided pointer *ptr* into a pointer to a specified *type* in such
+ * a way that BPF verifier will become aware of associated kernel-side BTF
+ * type. This allows to access members of kernel types directly without the
+ * need to use BPF_CORE_READ() macros.
+ */
+#define bpf_core_cast(ptr, type) \
+ ((typeof(type) *)bpf_rdonly_cast((ptr), bpf_core_type_id_kernel(type)))
+
#define ___concat(a, b) a ## b
#define ___apply(fn, n) ___concat(fn, n)
#define ___nth(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, __11, N, ...) N
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index 2324cc42b017..cd17f6d0791f 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -13,6 +13,7 @@
#define __uint(name, val) int (*name)[val]
#define __type(name, val) typeof(val) *name
#define __array(name, val) typeof(val) *name[]
+#define __ulong(name, val) enum { ___bpf_concat(__unique_value, __COUNTER__) = val } name
/*
* Helper macro to place programs, maps, license in
@@ -190,6 +191,9 @@ enum libbpf_tristate {
#define __arg_ctx __attribute__((btf_decl_tag("arg:ctx")))
#define __arg_nonnull __attribute((btf_decl_tag("arg:nonnull")))
+#define __arg_nullable __attribute((btf_decl_tag("arg:nullable")))
+#define __arg_trusted __attribute((btf_decl_tag("arg:trusted")))
+#define __arg_arena __attribute((btf_decl_tag("arg:arena")))
#ifndef ___bpf_concat
#define ___bpf_concat(a, b) a ## b
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index ee95fd379d4d..2d0840ef599a 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -1079,6 +1079,11 @@ struct btf *btf__new(const void *data, __u32 size)
return libbpf_ptr(btf_new(data, size, NULL));
}
+struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf)
+{
+ return libbpf_ptr(btf_new(data, size, base_btf));
+}
+
static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
struct btf_ext **btf_ext)
{
@@ -1317,7 +1322,9 @@ struct btf *btf__parse_split(const char *path, struct btf *base_btf)
static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
-int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level)
+int btf_load_into_kernel(struct btf *btf,
+ char *log_buf, size_t log_sz, __u32 log_level,
+ int token_fd)
{
LIBBPF_OPTS(bpf_btf_load_opts, opts);
__u32 buf_sz = 0, raw_size;
@@ -1367,6 +1374,10 @@ retry_load:
opts.log_level = log_level;
}
+ opts.token_fd = token_fd;
+ if (token_fd)
+ opts.btf_flags |= BPF_F_TOKEN_FD;
+
btf->fd = bpf_btf_load(raw_data, raw_size, &opts);
if (btf->fd < 0) {
/* time to turn on verbose mode and try again */
@@ -1394,7 +1405,7 @@ done:
int btf__load_into_kernel(struct btf *btf)
{
- return btf_load_into_kernel(btf, NULL, 0, 0);
+ return btf_load_into_kernel(btf, NULL, 0, 0, 0);
}
int btf__fd(const struct btf *btf)
@@ -3039,12 +3050,16 @@ done:
return btf_ext;
}
-const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size)
+const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size)
{
*size = btf_ext->data_size;
return btf_ext->data;
}
+__attribute__((alias("btf_ext__raw_data")))
+const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size);
+
+
struct btf_dedup;
static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts);
@@ -4926,10 +4941,9 @@ static int btf_dedup_remap_types(struct btf_dedup *d)
*/
struct btf *btf__load_vmlinux_btf(void)
{
+ const char *sysfs_btf_path = "/sys/kernel/btf/vmlinux";
+ /* fall back locations, trying to find vmlinux on disk */
const char *locations[] = {
- /* try canonical vmlinux BTF through sysfs first */
- "/sys/kernel/btf/vmlinux",
- /* fall back to trying to find vmlinux on disk otherwise */
"/boot/vmlinux-%1$s",
"/lib/modules/%1$s/vmlinux-%1$s",
"/lib/modules/%1$s/build/vmlinux",
@@ -4943,8 +4957,23 @@ struct btf *btf__load_vmlinux_btf(void)
struct btf *btf;
int i, err;
- uname(&buf);
+ /* is canonical sysfs location accessible? */
+ if (faccessat(AT_FDCWD, sysfs_btf_path, F_OK, AT_EACCESS) < 0) {
+ pr_warn("kernel BTF is missing at '%s', was CONFIG_DEBUG_INFO_BTF enabled?\n",
+ sysfs_btf_path);
+ } else {
+ btf = btf__parse(sysfs_btf_path, NULL);
+ if (!btf) {
+ err = -errno;
+ pr_warn("failed to read kernel BTF from '%s': %d\n", sysfs_btf_path, err);
+ return libbpf_err_ptr(err);
+ }
+ pr_debug("loaded kernel BTF from '%s'\n", sysfs_btf_path);
+ return btf;
+ }
+ /* try fallback locations */
+ uname(&buf);
for (i = 0; i < ARRAY_SIZE(locations); i++) {
snprintf(path, PATH_MAX, locations[i], buf.release);
diff --git a/tools/lib/bpf/elf.c b/tools/lib/bpf/elf.c
index b02faec748a5..c92e02394159 100644
--- a/tools/lib/bpf/elf.c
+++ b/tools/lib/bpf/elf.c
@@ -11,8 +11,6 @@
#include "libbpf_internal.h"
#include "str_error.h"
-#define STRERR_BUFSIZE 128
-
/* A SHT_GNU_versym section holds 16-bit words. This bit is set if
* the symbol is hidden and can only be seen when referenced using an
* explicit version number. This is a GNU extension.
diff --git a/tools/lib/bpf/features.c b/tools/lib/bpf/features.c
new file mode 100644
index 000000000000..4e783cc7fc4b
--- /dev/null
+++ b/tools/lib/bpf/features.c
@@ -0,0 +1,583 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+#include <linux/kernel.h>
+#include <linux/filter.h>
+#include "bpf.h"
+#include "libbpf.h"
+#include "libbpf_common.h"
+#include "libbpf_internal.h"
+#include "str_error.h"
+
+static inline __u64 ptr_to_u64(const void *ptr)
+{
+ return (__u64)(unsigned long)ptr;
+}
+
+int probe_fd(int fd)
+{
+ if (fd >= 0)
+ close(fd);
+ return fd >= 0;
+}
+
+static int probe_kern_prog_name(int token_fd)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, prog_name);
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ union bpf_attr attr;
+ int ret;
+
+ memset(&attr, 0, attr_sz);
+ attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+ attr.license = ptr_to_u64("GPL");
+ attr.insns = ptr_to_u64(insns);
+ attr.insn_cnt = (__u32)ARRAY_SIZE(insns);
+ attr.prog_token_fd = token_fd;
+ if (token_fd)
+ attr.prog_flags |= BPF_F_TOKEN_FD;
+ libbpf_strlcpy(attr.prog_name, "libbpf_nametest", sizeof(attr.prog_name));
+
+ /* make sure loading with name works */
+ ret = sys_bpf_prog_load(&attr, attr_sz, PROG_LOAD_ATTEMPTS);
+ return probe_fd(ret);
+}
+
+static int probe_kern_global_data(int token_fd)
+{
+ char *cp, errmsg[STRERR_BUFSIZE];
+ struct bpf_insn insns[] = {
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ LIBBPF_OPTS(bpf_map_create_opts, map_opts,
+ .token_fd = token_fd,
+ .map_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ LIBBPF_OPTS(bpf_prog_load_opts, prog_opts,
+ .token_fd = token_fd,
+ .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ int ret, map, insn_cnt = ARRAY_SIZE(insns);
+
+ map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, &map_opts);
+ if (map < 0) {
+ ret = -errno;
+ cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
+ pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
+ __func__, cp, -ret);
+ return ret;
+ }
+
+ insns[0].imm = map;
+
+ ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts);
+ close(map);
+ return probe_fd(ret);
+}
+
+static int probe_kern_btf(int token_fd)
+{
+ static const char strs[] = "\0int";
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_btf_func(int token_fd)
+{
+ static const char strs[] = "\0int\0x\0a";
+ /* void x(int a) {} */
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* FUNC_PROTO */ /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
+ BTF_PARAM_ENC(7, 1),
+ /* FUNC x */ /* [3] */
+ BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_btf_func_global(int token_fd)
+{
+ static const char strs[] = "\0int\0x\0a";
+ /* static void x(int a) {} */
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* FUNC_PROTO */ /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
+ BTF_PARAM_ENC(7, 1),
+ /* FUNC x BTF_FUNC_GLOBAL */ /* [3] */
+ BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_btf_datasec(int token_fd)
+{
+ static const char strs[] = "\0x\0.data";
+ /* static int a; */
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* VAR x */ /* [2] */
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
+ BTF_VAR_STATIC,
+ /* DATASEC val */ /* [3] */
+ BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(2, 0, 4),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_btf_qmark_datasec(int token_fd)
+{
+ static const char strs[] = "\0x\0?.data";
+ /* static int a; */
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* VAR x */ /* [2] */
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
+ BTF_VAR_STATIC,
+ /* DATASEC ?.data */ /* [3] */
+ BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(2, 0, 4),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_btf_float(int token_fd)
+{
+ static const char strs[] = "\0float";
+ __u32 types[] = {
+ /* float */
+ BTF_TYPE_FLOAT_ENC(1, 4),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_btf_decl_tag(int token_fd)
+{
+ static const char strs[] = "\0tag";
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* VAR x */ /* [2] */
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
+ BTF_VAR_STATIC,
+ /* attr */
+ BTF_TYPE_DECL_TAG_ENC(1, 2, -1),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_btf_type_tag(int token_fd)
+{
+ static const char strs[] = "\0tag";
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* attr */
+ BTF_TYPE_TYPE_TAG_ENC(1, 1), /* [2] */
+ /* ptr */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), /* [3] */
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_array_mmap(int token_fd)
+{
+ LIBBPF_OPTS(bpf_map_create_opts, opts,
+ .map_flags = BPF_F_MMAPABLE | (token_fd ? BPF_F_TOKEN_FD : 0),
+ .token_fd = token_fd,
+ );
+ int fd;
+
+ fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_mmap", sizeof(int), sizeof(int), 1, &opts);
+ return probe_fd(fd);
+}
+
+static int probe_kern_exp_attach_type(int token_fd)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+ .token_fd = token_fd,
+ .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ int fd, insn_cnt = ARRAY_SIZE(insns);
+
+ /* use any valid combination of program type and (optional)
+ * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
+ * to see if kernel supports expected_attach_type field for
+ * BPF_PROG_LOAD command
+ */
+ fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts);
+ return probe_fd(fd);
+}
+
+static int probe_kern_probe_read_kernel(int token_fd)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .token_fd = token_fd,
+ .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ struct bpf_insn insns[] = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */
+ BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */
+ BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
+ BPF_EXIT_INSN(),
+ };
+ int fd, insn_cnt = ARRAY_SIZE(insns);
+
+ fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
+ return probe_fd(fd);
+}
+
+static int probe_prog_bind_map(int token_fd)
+{
+ char *cp, errmsg[STRERR_BUFSIZE];
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ LIBBPF_OPTS(bpf_map_create_opts, map_opts,
+ .token_fd = token_fd,
+ .map_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ LIBBPF_OPTS(bpf_prog_load_opts, prog_opts,
+ .token_fd = token_fd,
+ .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
+
+ map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, &map_opts);
+ if (map < 0) {
+ ret = -errno;
+ cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
+ pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
+ __func__, cp, -ret);
+ return ret;
+ }
+
+ prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts);
+ if (prog < 0) {
+ close(map);
+ return 0;
+ }
+
+ ret = bpf_prog_bind_map(prog, map, NULL);
+
+ close(map);
+ close(prog);
+
+ return ret >= 0;
+}
+
+static int probe_module_btf(int token_fd)
+{
+ static const char strs[] = "\0int";
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
+ };
+ struct bpf_btf_info info;
+ __u32 len = sizeof(info);
+ char name[16];
+ int fd, err;
+
+ fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd);
+ if (fd < 0)
+ return 0; /* BTF not supported at all */
+
+ memset(&info, 0, sizeof(info));
+ info.name = ptr_to_u64(name);
+ info.name_len = sizeof(name);
+
+ /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
+ * kernel's module BTF support coincides with support for
+ * name/name_len fields in struct bpf_btf_info.
+ */
+ err = bpf_btf_get_info_by_fd(fd, &info, &len);
+ close(fd);
+ return !err;
+}
+
+static int probe_perf_link(int token_fd)
+{
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .token_fd = token_fd,
+ .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ int prog_fd, link_fd, err;
+
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL",
+ insns, ARRAY_SIZE(insns), &opts);
+ if (prog_fd < 0)
+ return -errno;
+
+ /* use invalid perf_event FD to get EBADF, if link is supported;
+ * otherwise EINVAL should be returned
+ */
+ link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL);
+ err = -errno; /* close() can clobber errno */
+
+ if (link_fd >= 0)
+ close(link_fd);
+ close(prog_fd);
+
+ return link_fd < 0 && err == -EBADF;
+}
+
+static int probe_uprobe_multi_link(int token_fd)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
+ .expected_attach_type = BPF_TRACE_UPROBE_MULTI,
+ .token_fd = token_fd,
+ .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ LIBBPF_OPTS(bpf_link_create_opts, link_opts);
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ int prog_fd, link_fd, err;
+ unsigned long offset = 0;
+
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL",
+ insns, ARRAY_SIZE(insns), &load_opts);
+ if (prog_fd < 0)
+ return -errno;
+
+ /* Creating uprobe in '/' binary should fail with -EBADF. */
+ link_opts.uprobe_multi.path = "/";
+ link_opts.uprobe_multi.offsets = &offset;
+ link_opts.uprobe_multi.cnt = 1;
+
+ link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
+ err = -errno; /* close() can clobber errno */
+
+ if (link_fd >= 0)
+ close(link_fd);
+ close(prog_fd);
+
+ return link_fd < 0 && err == -EBADF;
+}
+
+static int probe_kern_bpf_cookie(int token_fd)
+{
+ struct bpf_insn insns[] = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie),
+ BPF_EXIT_INSN(),
+ };
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .token_fd = token_fd,
+ .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ int ret, insn_cnt = ARRAY_SIZE(insns);
+
+ ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
+ return probe_fd(ret);
+}
+
+static int probe_kern_btf_enum64(int token_fd)
+{
+ static const char strs[] = "\0enum64";
+ __u32 types[] = {
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_arg_ctx_tag(int token_fd)
+{
+ static const char strs[] = "\0a\0b\0arg:ctx\0";
+ const __u32 types[] = {
+ /* [1] INT */
+ BTF_TYPE_INT_ENC(1 /* "a" */, BTF_INT_SIGNED, 0, 32, 4),
+ /* [2] PTR -> VOID */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0),
+ /* [3] FUNC_PROTO `int(void *a)` */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
+ BTF_PARAM_ENC(1 /* "a" */, 2),
+ /* [4] FUNC 'a' -> FUNC_PROTO (main prog) */
+ BTF_TYPE_ENC(1 /* "a" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 3),
+ /* [5] FUNC_PROTO `int(void *b __arg_ctx)` */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
+ BTF_PARAM_ENC(3 /* "b" */, 2),
+ /* [6] FUNC 'b' -> FUNC_PROTO (subprog) */
+ BTF_TYPE_ENC(3 /* "b" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 5),
+ /* [7] DECL_TAG 'arg:ctx' -> func 'b' arg 'b' */
+ BTF_TYPE_DECL_TAG_ENC(5 /* "arg:ctx" */, 6, 0),
+ };
+ const struct bpf_insn insns[] = {
+ /* main prog */
+ BPF_CALL_REL(+1),
+ BPF_EXIT_INSN(),
+ /* global subprog */
+ BPF_EMIT_CALL(BPF_FUNC_get_func_ip), /* needs PTR_TO_CTX */
+ BPF_EXIT_INSN(),
+ };
+ const struct bpf_func_info_min func_infos[] = {
+ { 0, 4 }, /* main prog -> FUNC 'a' */
+ { 2, 6 }, /* subprog -> FUNC 'b' */
+ };
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .token_fd = token_fd,
+ .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ int prog_fd, btf_fd, insn_cnt = ARRAY_SIZE(insns);
+
+ btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd);
+ if (btf_fd < 0)
+ return 0;
+
+ opts.prog_btf_fd = btf_fd;
+ opts.func_info = &func_infos;
+ opts.func_info_cnt = ARRAY_SIZE(func_infos);
+ opts.func_info_rec_size = sizeof(func_infos[0]);
+
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, "det_arg_ctx",
+ "GPL", insns, insn_cnt, &opts);
+ close(btf_fd);
+
+ return probe_fd(prog_fd);
+}
+
+typedef int (*feature_probe_fn)(int /* token_fd */);
+
+static struct kern_feature_cache feature_cache;
+
+static struct kern_feature_desc {
+ const char *desc;
+ feature_probe_fn probe;
+} feature_probes[__FEAT_CNT] = {
+ [FEAT_PROG_NAME] = {
+ "BPF program name", probe_kern_prog_name,
+ },
+ [FEAT_GLOBAL_DATA] = {
+ "global variables", probe_kern_global_data,
+ },
+ [FEAT_BTF] = {
+ "minimal BTF", probe_kern_btf,
+ },
+ [FEAT_BTF_FUNC] = {
+ "BTF functions", probe_kern_btf_func,
+ },
+ [FEAT_BTF_GLOBAL_FUNC] = {
+ "BTF global function", probe_kern_btf_func_global,
+ },
+ [FEAT_BTF_DATASEC] = {
+ "BTF data section and variable", probe_kern_btf_datasec,
+ },
+ [FEAT_ARRAY_MMAP] = {
+ "ARRAY map mmap()", probe_kern_array_mmap,
+ },
+ [FEAT_EXP_ATTACH_TYPE] = {
+ "BPF_PROG_LOAD expected_attach_type attribute",
+ probe_kern_exp_attach_type,
+ },
+ [FEAT_PROBE_READ_KERN] = {
+ "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
+ },
+ [FEAT_PROG_BIND_MAP] = {
+ "BPF_PROG_BIND_MAP support", probe_prog_bind_map,
+ },
+ [FEAT_MODULE_BTF] = {
+ "module BTF support", probe_module_btf,
+ },
+ [FEAT_BTF_FLOAT] = {
+ "BTF_KIND_FLOAT support", probe_kern_btf_float,
+ },
+ [FEAT_PERF_LINK] = {
+ "BPF perf link support", probe_perf_link,
+ },
+ [FEAT_BTF_DECL_TAG] = {
+ "BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
+ },
+ [FEAT_BTF_TYPE_TAG] = {
+ "BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
+ },
+ [FEAT_MEMCG_ACCOUNT] = {
+ "memcg-based memory accounting", probe_memcg_account,
+ },
+ [FEAT_BPF_COOKIE] = {
+ "BPF cookie support", probe_kern_bpf_cookie,
+ },
+ [FEAT_BTF_ENUM64] = {
+ "BTF_KIND_ENUM64 support", probe_kern_btf_enum64,
+ },
+ [FEAT_SYSCALL_WRAPPER] = {
+ "Kernel using syscall wrapper", probe_kern_syscall_wrapper,
+ },
+ [FEAT_UPROBE_MULTI_LINK] = {
+ "BPF multi-uprobe link support", probe_uprobe_multi_link,
+ },
+ [FEAT_ARG_CTX_TAG] = {
+ "kernel-side __arg_ctx tag", probe_kern_arg_ctx_tag,
+ },
+ [FEAT_BTF_QMARK_DATASEC] = {
+ "BTF DATASEC names starting from '?'", probe_kern_btf_qmark_datasec,
+ },
+};
+
+bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id)
+{
+ struct kern_feature_desc *feat = &feature_probes[feat_id];
+ int ret;
+
+ /* assume global feature cache, unless custom one is provided */
+ if (!cache)
+ cache = &feature_cache;
+
+ if (READ_ONCE(cache->res[feat_id]) == FEAT_UNKNOWN) {
+ ret = feat->probe(cache->token_fd);
+ if (ret > 0) {
+ WRITE_ONCE(cache->res[feat_id], FEAT_SUPPORTED);
+ } else if (ret == 0) {
+ WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
+ } else {
+ pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
+ WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
+ }
+ }
+
+ return READ_ONCE(cache->res[feat_id]) == FEAT_SUPPORTED;
+}
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index afd09571c482..efab29b8935b 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -33,6 +33,7 @@
#include <linux/filter.h>
#include <linux/limits.h>
#include <linux/perf_event.h>
+#include <linux/bpf_perf_event.h>
#include <linux/ring_buffer.h>
#include <sys/epoll.h>
#include <sys/ioctl.h>
@@ -59,6 +60,8 @@
#define BPF_FS_MAGIC 0xcafe4a11
#endif
+#define BPF_FS_DEFAULT_PATH "/sys/fs/bpf"
+
#define BPF_INSN_SZ (sizeof(struct bpf_insn))
/* vsprintf() in __base_pr() uses nonliteral format string. It may break
@@ -70,6 +73,7 @@
static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
+static int map_set_def_max_entries(struct bpf_map *map);
static const char * const attach_type_name[] = {
[BPF_CGROUP_INET_INGRESS] = "cgroup_inet_ingress",
@@ -181,6 +185,7 @@ static const char * const map_type_name[] = {
[BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter",
[BPF_MAP_TYPE_USER_RINGBUF] = "user_ringbuf",
[BPF_MAP_TYPE_CGRP_STORAGE] = "cgrp_storage",
+ [BPF_MAP_TYPE_ARENA] = "arena",
};
static const char * const prog_type_name[] = {
@@ -493,6 +498,7 @@ struct bpf_struct_ops {
#define KSYMS_SEC ".ksyms"
#define STRUCT_OPS_SEC ".struct_ops"
#define STRUCT_OPS_LINK_SEC ".struct_ops.link"
+#define ARENA_SEC ".arena.1"
enum libbpf_map_type {
LIBBPF_MAP_UNSPEC,
@@ -527,6 +533,7 @@ struct bpf_map {
struct bpf_map_def def;
__u32 numa_node;
__u32 btf_var_idx;
+ int mod_btf_fd;
__u32 btf_key_type_id;
__u32 btf_value_type_id;
__u32 btf_vmlinux_value_type_id;
@@ -607,6 +614,7 @@ enum sec_type {
SEC_BSS,
SEC_DATA,
SEC_RODATA,
+ SEC_ST_OPS,
};
struct elf_sec_desc {
@@ -622,8 +630,7 @@ struct elf_state {
Elf *elf;
Elf64_Ehdr *ehdr;
Elf_Data *symbols;
- Elf_Data *st_ops_data;
- Elf_Data *st_ops_link_data;
+ Elf_Data *arena_data;
size_t shstrndx; /* section index for section name strings */
size_t strtabidx;
struct elf_sec_desc *secs;
@@ -632,8 +639,8 @@ struct elf_state {
__u32 btf_maps_sec_btf_id;
int text_shndx;
int symbols_shndx;
- int st_ops_shndx;
- int st_ops_link_shndx;
+ bool has_st_ops;
+ int arena_data_shndx;
};
struct usdt_manager;
@@ -693,6 +700,14 @@ struct bpf_object {
struct usdt_manager *usdt_man;
+ struct bpf_map *arena_map;
+ void *arena_data;
+ size_t arena_data_sz;
+
+ struct kern_feature_cache *feat_cache;
+ char *token_path;
+ int token_fd;
+
char path[];
};
@@ -930,22 +945,33 @@ find_member_by_name(const struct btf *btf, const struct btf_type *t,
return NULL;
}
+static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
+ __u16 kind, struct btf **res_btf,
+ struct module_btf **res_mod_btf);
+
#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
const char *name, __u32 kind);
static int
-find_struct_ops_kern_types(const struct btf *btf, const char *tname,
+find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw,
+ struct module_btf **mod_btf,
const struct btf_type **type, __u32 *type_id,
const struct btf_type **vtype, __u32 *vtype_id,
const struct btf_member **data_member)
{
const struct btf_type *kern_type, *kern_vtype;
const struct btf_member *kern_data_member;
+ struct btf *btf;
__s32 kern_vtype_id, kern_type_id;
+ char tname[256];
__u32 i;
- kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
+ snprintf(tname, sizeof(tname), "%.*s",
+ (int)bpf_core_essential_name_len(tname_raw), tname_raw);
+
+ kern_type_id = find_ksym_btf_id(obj, tname, BTF_KIND_STRUCT,
+ &btf, mod_btf);
if (kern_type_id < 0) {
pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
tname);
@@ -998,15 +1024,72 @@ static bool bpf_map__is_struct_ops(const struct bpf_map *map)
return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
}
+static bool is_valid_st_ops_program(struct bpf_object *obj,
+ const struct bpf_program *prog)
+{
+ int i;
+
+ for (i = 0; i < obj->nr_programs; i++) {
+ if (&obj->programs[i] == prog)
+ return prog->type == BPF_PROG_TYPE_STRUCT_OPS;
+ }
+
+ return false;
+}
+
+/* For each struct_ops program P, referenced from some struct_ops map M,
+ * enable P.autoload if there are Ms for which M.autocreate is true,
+ * disable P.autoload if for all Ms M.autocreate is false.
+ * Don't change P.autoload for programs that are not referenced from any maps.
+ */
+static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj)
+{
+ struct bpf_program *prog, *slot_prog;
+ struct bpf_map *map;
+ int i, j, k, vlen;
+
+ for (i = 0; i < obj->nr_programs; ++i) {
+ int should_load = false;
+ int use_cnt = 0;
+
+ prog = &obj->programs[i];
+ if (prog->type != BPF_PROG_TYPE_STRUCT_OPS)
+ continue;
+
+ for (j = 0; j < obj->nr_maps; ++j) {
+ map = &obj->maps[j];
+ if (!bpf_map__is_struct_ops(map))
+ continue;
+
+ vlen = btf_vlen(map->st_ops->type);
+ for (k = 0; k < vlen; ++k) {
+ slot_prog = map->st_ops->progs[k];
+ if (prog != slot_prog)
+ continue;
+
+ use_cnt++;
+ if (map->autocreate)
+ should_load = true;
+ }
+ }
+ if (use_cnt)
+ prog->autoload = should_load;
+ }
+
+ return 0;
+}
+
/* Init the map's fields that depend on kern_btf */
-static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
- const struct btf *btf,
- const struct btf *kern_btf)
+static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
{
const struct btf_member *member, *kern_member, *kern_data_member;
const struct btf_type *type, *kern_type, *kern_vtype;
__u32 i, kern_type_id, kern_vtype_id, kern_data_off;
+ struct bpf_object *obj = map->obj;
+ const struct btf *btf = obj->btf;
struct bpf_struct_ops *st_ops;
+ const struct btf *kern_btf;
+ struct module_btf *mod_btf;
void *data, *kern_data;
const char *tname;
int err;
@@ -1014,16 +1097,19 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
st_ops = map->st_ops;
type = st_ops->type;
tname = st_ops->tname;
- err = find_struct_ops_kern_types(kern_btf, tname,
+ err = find_struct_ops_kern_types(obj, tname, &mod_btf,
&kern_type, &kern_type_id,
&kern_vtype, &kern_vtype_id,
&kern_data_member);
if (err)
return err;
+ kern_btf = mod_btf ? mod_btf->btf : obj->btf_vmlinux;
+
pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
+ map->mod_btf_fd = mod_btf ? mod_btf->fd : -1;
map->def.value_size = kern_vtype->size;
map->btf_vmlinux_value_type_id = kern_vtype_id;
@@ -1081,9 +1167,16 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
if (btf_is_ptr(mtype)) {
struct bpf_program *prog;
- prog = st_ops->progs[i];
+ /* Update the value from the shadow type */
+ prog = *(void **)mdata;
+ st_ops->progs[i] = prog;
if (!prog)
continue;
+ if (!is_valid_st_ops_program(obj, prog)) {
+ pr_warn("struct_ops init_kern %s: member %s is not a struct_ops program\n",
+ map->name, mname);
+ return -ENOTSUP;
+ }
kern_mtype = skip_mods_and_typedefs(kern_btf,
kern_mtype->type,
@@ -1099,8 +1192,34 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
return -ENOTSUP;
}
- prog->attach_btf_id = kern_type_id;
- prog->expected_attach_type = kern_member_idx;
+ if (mod_btf)
+ prog->attach_btf_obj_fd = mod_btf->fd;
+
+ /* if we haven't yet processed this BPF program, record proper
+ * attach_btf_id and member_idx
+ */
+ if (!prog->attach_btf_id) {
+ prog->attach_btf_id = kern_type_id;
+ prog->expected_attach_type = kern_member_idx;
+ }
+
+ /* struct_ops BPF prog can be re-used between multiple
+ * .struct_ops & .struct_ops.link as long as it's the
+ * same struct_ops struct definition and the same
+ * function pointer field
+ */
+ if (prog->attach_btf_id != kern_type_id) {
+ pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: attach_btf_id %u != kern_type_id %u\n",
+ map->name, mname, prog->name, prog->sec_name, prog->type,
+ prog->attach_btf_id, kern_type_id);
+ return -EINVAL;
+ }
+ if (prog->expected_attach_type != kern_member_idx) {
+ pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: expected_attach_type %u != kern_member_idx %u\n",
+ map->name, mname, prog->name, prog->sec_name, prog->type,
+ prog->expected_attach_type, kern_member_idx);
+ return -EINVAL;
+ }
st_ops->kern_func_off[i] = kern_data_off + kern_moff;
@@ -1141,8 +1260,10 @@ static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
if (!bpf_map__is_struct_ops(map))
continue;
- err = bpf_map__init_kern_struct_ops(map, obj->btf,
- obj->btf_vmlinux);
+ if (!map->autocreate)
+ continue;
+
+ err = bpf_map__init_kern_struct_ops(map);
if (err)
return err;
}
@@ -1151,7 +1272,7 @@ static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
}
static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
- int shndx, Elf_Data *data, __u32 map_flags)
+ int shndx, Elf_Data *data)
{
const struct btf_type *type, *datasec;
const struct btf_var_secinfo *vsi;
@@ -1207,12 +1328,22 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
map->name = strdup(var_name);
if (!map->name)
return -ENOMEM;
+ map->btf_value_type_id = type_id;
+
+ /* Follow same convention as for programs autoload:
+ * SEC("?.struct_ops") means map is not created by default.
+ */
+ if (sec_name[0] == '?') {
+ map->autocreate = false;
+ /* from now on forget there was ? in section name */
+ sec_name++;
+ }
map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
map->def.key_size = sizeof(int);
map->def.value_size = type->size;
map->def.max_entries = 1;
- map->def.map_flags = map_flags;
+ map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0;
map->st_ops = calloc(1, sizeof(*map->st_ops));
if (!map->st_ops)
@@ -1247,15 +1378,25 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
static int bpf_object_init_struct_ops(struct bpf_object *obj)
{
- int err;
+ const char *sec_name;
+ int sec_idx, err;
- err = init_struct_ops_maps(obj, STRUCT_OPS_SEC, obj->efile.st_ops_shndx,
- obj->efile.st_ops_data, 0);
- err = err ?: init_struct_ops_maps(obj, STRUCT_OPS_LINK_SEC,
- obj->efile.st_ops_link_shndx,
- obj->efile.st_ops_link_data,
- BPF_F_LINK);
- return err;
+ for (sec_idx = 0; sec_idx < obj->efile.sec_cnt; ++sec_idx) {
+ struct elf_sec_desc *desc = &obj->efile.secs[sec_idx];
+
+ if (desc->sec_type != SEC_ST_OPS)
+ continue;
+
+ sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
+ if (!sec_name)
+ return -LIBBPF_ERRNO__FORMAT;
+
+ err = init_struct_ops_maps(obj, sec_name, sec_idx, desc->data);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
static struct bpf_object *bpf_object__new(const char *path,
@@ -1293,8 +1434,6 @@ static struct bpf_object *bpf_object__new(const char *path,
obj->efile.obj_buf = obj_buf;
obj->efile.obj_buf_sz = obj_buf_sz;
obj->efile.btf_maps_shndx = -1;
- obj->efile.st_ops_shndx = -1;
- obj->efile.st_ops_link_shndx = -1;
obj->kconfig_map_idx = -1;
obj->kern_version = get_kernel_version();
@@ -1311,8 +1450,7 @@ static void bpf_object__elf_finish(struct bpf_object *obj)
elf_end(obj->efile.elf);
obj->efile.elf = NULL;
obj->efile.symbols = NULL;
- obj->efile.st_ops_data = NULL;
- obj->efile.st_ops_link_data = NULL;
+ obj->efile.arena_data = NULL;
zfree(&obj->efile.secs);
obj->efile.sec_cnt = 0;
@@ -1503,11 +1641,20 @@ static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *nam
return ERR_PTR(-ENOENT);
}
+/* Some versions of Android don't provide memfd_create() in their libc
+ * implementation, so avoid complications and just go straight to Linux
+ * syscall.
+ */
+static int sys_memfd_create(const char *name, unsigned flags)
+{
+ return syscall(__NR_memfd_create, name, flags);
+}
+
static int create_placeholder_fd(void)
{
int fd;
- fd = ensure_good_fd(memfd_create("libbpf-placeholder-fd", MFD_CLOEXEC));
+ fd = ensure_good_fd(sys_memfd_create("libbpf-placeholder-fd", MFD_CLOEXEC));
if (fd < 0)
return -errno;
return fd;
@@ -1546,7 +1693,7 @@ static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
return map;
}
-static size_t bpf_map_mmap_sz(unsigned int value_sz, unsigned int max_entries)
+static size_t array_map_mmap_sz(unsigned int value_sz, unsigned int max_entries)
{
const long page_sz = sysconf(_SC_PAGE_SIZE);
size_t map_sz;
@@ -1556,6 +1703,20 @@ static size_t bpf_map_mmap_sz(unsigned int value_sz, unsigned int max_entries)
return map_sz;
}
+static size_t bpf_map_mmap_sz(const struct bpf_map *map)
+{
+ const long page_sz = sysconf(_SC_PAGE_SIZE);
+
+ switch (map->def.type) {
+ case BPF_MAP_TYPE_ARRAY:
+ return array_map_mmap_sz(map->def.value_size, map->def.max_entries);
+ case BPF_MAP_TYPE_ARENA:
+ return page_sz * map->def.max_entries;
+ default:
+ return 0; /* not supported */
+ }
+}
+
static int bpf_map_mmap_resize(struct bpf_map *map, size_t old_sz, size_t new_sz)
{
void *mmaped;
@@ -1698,7 +1859,7 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
def->value_size = data_sz;
def->max_entries = 1;
def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
- ? BPF_F_RDONLY_PROG : 0;
+ ? BPF_F_RDONLY_PROG : 0;
/* failures are fine because of maps like .rodata.str1.1 */
(void) map_fill_btf_type_info(obj, map);
@@ -1709,7 +1870,7 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
map->name, map->sec_idx, map->sec_offset, def->map_flags);
- mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
+ mmap_sz = bpf_map_mmap_sz(map);
map->mmaped = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
if (map->mmaped == MAP_FAILED) {
@@ -2197,6 +2358,46 @@ static bool get_map_field_int(const char *map_name, const struct btf *btf,
return true;
}
+static bool get_map_field_long(const char *map_name, const struct btf *btf,
+ const struct btf_member *m, __u64 *res)
+{
+ const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
+ const char *name = btf__name_by_offset(btf, m->name_off);
+
+ if (btf_is_ptr(t)) {
+ __u32 res32;
+ bool ret;
+
+ ret = get_map_field_int(map_name, btf, m, &res32);
+ if (ret)
+ *res = (__u64)res32;
+ return ret;
+ }
+
+ if (!btf_is_enum(t) && !btf_is_enum64(t)) {
+ pr_warn("map '%s': attr '%s': expected ENUM or ENUM64, got %s.\n",
+ map_name, name, btf_kind_str(t));
+ return false;
+ }
+
+ if (btf_vlen(t) != 1) {
+ pr_warn("map '%s': attr '%s': invalid __ulong\n",
+ map_name, name);
+ return false;
+ }
+
+ if (btf_is_enum(t)) {
+ const struct btf_enum *e = btf_enum(t);
+
+ *res = e->val;
+ } else {
+ const struct btf_enum64 *e = btf_enum64(t);
+
+ *res = btf_enum64_value(e);
+ }
+ return true;
+}
+
static int pathname_concat(char *buf, size_t buf_sz, const char *path, const char *name)
{
int len;
@@ -2216,7 +2417,7 @@ static int build_map_pin_path(struct bpf_map *map, const char *path)
int err;
if (!path)
- path = "/sys/fs/bpf";
+ path = BPF_FS_DEFAULT_PATH;
err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
if (err)
@@ -2430,9 +2631,9 @@ int parse_btf_map_def(const char *map_name, struct btf *btf,
map_def->pinning = val;
map_def->parts |= MAP_DEF_PINNING;
} else if (strcmp(name, "map_extra") == 0) {
- __u32 map_extra;
+ __u64 map_extra;
- if (!get_map_field_int(map_name, btf, m, &map_extra))
+ if (!get_map_field_long(map_name, btf, m, &map_extra))
return -EINVAL;
map_def->map_extra = map_extra;
map_def->parts |= MAP_DEF_MAP_EXTRA;
@@ -2650,6 +2851,32 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
return 0;
}
+static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map,
+ const char *sec_name, int sec_idx,
+ void *data, size_t data_sz)
+{
+ const long page_sz = sysconf(_SC_PAGE_SIZE);
+ size_t mmap_sz;
+
+ mmap_sz = bpf_map_mmap_sz(obj->arena_map);
+ if (roundup(data_sz, page_sz) > mmap_sz) {
+ pr_warn("elf: sec '%s': declared ARENA map size (%zu) is too small to hold global __arena variables of size %zu\n",
+ sec_name, mmap_sz, data_sz);
+ return -E2BIG;
+ }
+
+ obj->arena_data = malloc(data_sz);
+ if (!obj->arena_data)
+ return -ENOMEM;
+ memcpy(obj->arena_data, data, data_sz);
+ obj->arena_data_sz = data_sz;
+
+ /* make bpf_map__init_value() work for ARENA maps */
+ map->mmaped = obj->arena_data;
+
+ return 0;
+}
+
static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
const char *pin_root_path)
{
@@ -2699,6 +2926,33 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
return err;
}
+ for (i = 0; i < obj->nr_maps; i++) {
+ struct bpf_map *map = &obj->maps[i];
+
+ if (map->def.type != BPF_MAP_TYPE_ARENA)
+ continue;
+
+ if (obj->arena_map) {
+ pr_warn("map '%s': only single ARENA map is supported (map '%s' is also ARENA)\n",
+ map->name, obj->arena_map->name);
+ return -EINVAL;
+ }
+ obj->arena_map = map;
+
+ if (obj->efile.arena_data) {
+ err = init_arena_map_data(obj, map, ARENA_SEC, obj->efile.arena_data_shndx,
+ obj->efile.arena_data->d_buf,
+ obj->efile.arena_data->d_size);
+ if (err)
+ return err;
+ }
+ }
+ if (obj->efile.arena_data && !obj->arena_map) {
+ pr_warn("elf: sec '%s': to use global __arena variables the ARENA map should be explicitly declared in SEC(\".maps\")\n",
+ ARENA_SEC);
+ return -ENOENT;
+ }
+
return 0;
}
@@ -2731,6 +2985,11 @@ static bool section_have_execinstr(struct bpf_object *obj, int idx)
return sh->sh_flags & SHF_EXECINSTR;
}
+static bool starts_with_qmark(const char *s)
+{
+ return s && s[0] == '?';
+}
+
static bool btf_needs_sanitization(struct bpf_object *obj)
{
bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
@@ -2740,9 +2999,10 @@ static bool btf_needs_sanitization(struct bpf_object *obj)
bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
+ bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC);
return !has_func || !has_datasec || !has_func_global || !has_float ||
- !has_decl_tag || !has_type_tag || !has_enum64;
+ !has_decl_tag || !has_type_tag || !has_enum64 || !has_qmark_datasec;
}
static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
@@ -2754,6 +3014,7 @@ static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
+ bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC);
int enum64_placeholder_id = 0;
struct btf_type *t;
int i, j, vlen;
@@ -2780,7 +3041,7 @@ static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
name = (char *)btf__name_by_offset(btf, t->name_off);
while (*name) {
- if (*name == '.')
+ if (*name == '.' || *name == '?')
*name = '_';
name++;
}
@@ -2795,6 +3056,14 @@ static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
vt = (void *)btf__type_by_id(btf, v->type);
m->name_off = vt->name_off;
}
+ } else if (!has_qmark_datasec && btf_is_datasec(t) &&
+ starts_with_qmark(btf__name_by_offset(btf, t->name_off))) {
+ /* replace '?' prefix with '_' for DATASEC names */
+ char *name;
+
+ name = (char *)btf__name_by_offset(btf, t->name_off);
+ if (name[0] == '?')
+ name[0] = '_';
} else if (!has_func && btf_is_func_proto(t)) {
/* replace FUNC_PROTO with ENUM */
vlen = btf_vlen(t);
@@ -2848,14 +3117,13 @@ static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
static bool libbpf_needs_btf(const struct bpf_object *obj)
{
return obj->efile.btf_maps_shndx >= 0 ||
- obj->efile.st_ops_shndx >= 0 ||
- obj->efile.st_ops_link_shndx >= 0 ||
+ obj->efile.has_st_ops ||
obj->nr_extern > 0;
}
static bool kernel_needs_btf(const struct bpf_object *obj)
{
- return obj->efile.st_ops_shndx >= 0 || obj->efile.st_ops_link_shndx >= 0;
+ return obj->efile.has_st_ops;
}
static int bpf_object__init_btf(struct bpf_object *obj,
@@ -3225,7 +3493,7 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
} else {
/* currently BPF_BTF_LOAD only supports log_level 1 */
err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size,
- obj->log_level ? 1 : 0);
+ obj->log_level ? 1 : 0, obj->token_fd);
}
if (sanitize) {
if (!err) {
@@ -3556,12 +3824,17 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
sec_desc->sec_type = SEC_RODATA;
sec_desc->shdr = sh;
sec_desc->data = data;
- } else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
- obj->efile.st_ops_data = data;
- obj->efile.st_ops_shndx = idx;
- } else if (strcmp(name, STRUCT_OPS_LINK_SEC) == 0) {
- obj->efile.st_ops_link_data = data;
- obj->efile.st_ops_link_shndx = idx;
+ } else if (strcmp(name, STRUCT_OPS_SEC) == 0 ||
+ strcmp(name, STRUCT_OPS_LINK_SEC) == 0 ||
+ strcmp(name, "?" STRUCT_OPS_SEC) == 0 ||
+ strcmp(name, "?" STRUCT_OPS_LINK_SEC) == 0) {
+ sec_desc->sec_type = SEC_ST_OPS;
+ sec_desc->shdr = sh;
+ sec_desc->data = data;
+ obj->efile.has_st_ops = true;
+ } else if (strcmp(name, ARENA_SEC) == 0) {
+ obj->efile.arena_data = data;
+ obj->efile.arena_data_shndx = idx;
} else {
pr_info("elf: skipping unrecognized data section(%d) %s\n",
idx, name);
@@ -3577,6 +3850,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
if (!section_have_execinstr(obj, targ_sec_idx) &&
strcmp(name, ".rel" STRUCT_OPS_SEC) &&
strcmp(name, ".rel" STRUCT_OPS_LINK_SEC) &&
+ strcmp(name, ".rel?" STRUCT_OPS_SEC) &&
+ strcmp(name, ".rel?" STRUCT_OPS_LINK_SEC) &&
strcmp(name, ".rel" MAPS_ELF_SEC)) {
pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
idx, name, targ_sec_idx,
@@ -4189,6 +4464,15 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
+ /* arena data relocation */
+ if (shdr_idx == obj->efile.arena_data_shndx) {
+ reloc_desc->type = RELO_DATA;
+ reloc_desc->insn_idx = insn_idx;
+ reloc_desc->map_idx = obj->arena_map - obj->maps;
+ reloc_desc->sym_off = sym->st_value;
+ return 0;
+ }
+
/* generic map reference relocation */
if (type == LIBBPF_MAP_UNSPEC) {
if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
@@ -4546,6 +4830,58 @@ int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
return 0;
}
+static int bpf_object_prepare_token(struct bpf_object *obj)
+{
+ const char *bpffs_path;
+ int bpffs_fd = -1, token_fd, err;
+ bool mandatory;
+ enum libbpf_print_level level;
+
+ /* token is explicitly prevented */
+ if (obj->token_path && obj->token_path[0] == '\0') {
+ pr_debug("object '%s': token is prevented, skipping...\n", obj->name);
+ return 0;
+ }
+
+ mandatory = obj->token_path != NULL;
+ level = mandatory ? LIBBPF_WARN : LIBBPF_DEBUG;
+
+ bpffs_path = obj->token_path ?: BPF_FS_DEFAULT_PATH;
+ bpffs_fd = open(bpffs_path, O_DIRECTORY, O_RDWR);
+ if (bpffs_fd < 0) {
+ err = -errno;
+ __pr(level, "object '%s': failed (%d) to open BPF FS mount at '%s'%s\n",
+ obj->name, err, bpffs_path,
+ mandatory ? "" : ", skipping optional step...");
+ return mandatory ? err : 0;
+ }
+
+ token_fd = bpf_token_create(bpffs_fd, 0);
+ close(bpffs_fd);
+ if (token_fd < 0) {
+ if (!mandatory && token_fd == -ENOENT) {
+ pr_debug("object '%s': BPF FS at '%s' doesn't have BPF token delegation set up, skipping...\n",
+ obj->name, bpffs_path);
+ return 0;
+ }
+ __pr(level, "object '%s': failed (%d) to create BPF token from '%s'%s\n",
+ obj->name, token_fd, bpffs_path,
+ mandatory ? "" : ", skipping optional step...");
+ return mandatory ? token_fd : 0;
+ }
+
+ obj->feat_cache = calloc(1, sizeof(*obj->feat_cache));
+ if (!obj->feat_cache) {
+ close(token_fd);
+ return -ENOMEM;
+ }
+
+ obj->token_fd = token_fd;
+ obj->feat_cache->token_fd = token_fd;
+
+ return 0;
+}
+
static int
bpf_object__probe_loading(struct bpf_object *obj)
{
@@ -4555,6 +4891,10 @@ bpf_object__probe_loading(struct bpf_object *obj)
BPF_EXIT_INSN(),
};
int ret, insn_cnt = ARRAY_SIZE(insns);
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .token_fd = obj->token_fd,
+ .prog_flags = obj->token_fd ? BPF_F_TOKEN_FD : 0,
+ );
if (obj->gen_loader)
return 0;
@@ -4564,9 +4904,9 @@ bpf_object__probe_loading(struct bpf_object *obj)
pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %d), you might need to do it explicitly!\n", ret);
/* make sure basic loading works */
- ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
+ ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &opts);
if (ret < 0)
- ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
+ ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
if (ret < 0) {
ret = errno;
cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
@@ -4581,462 +4921,18 @@ bpf_object__probe_loading(struct bpf_object *obj)
return 0;
}
-static int probe_fd(int fd)
-{
- if (fd >= 0)
- close(fd);
- return fd >= 0;
-}
-
-static int probe_kern_prog_name(void)
-{
- const size_t attr_sz = offsetofend(union bpf_attr, prog_name);
- struct bpf_insn insns[] = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- };
- union bpf_attr attr;
- int ret;
-
- memset(&attr, 0, attr_sz);
- attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
- attr.license = ptr_to_u64("GPL");
- attr.insns = ptr_to_u64(insns);
- attr.insn_cnt = (__u32)ARRAY_SIZE(insns);
- libbpf_strlcpy(attr.prog_name, "libbpf_nametest", sizeof(attr.prog_name));
-
- /* make sure loading with name works */
- ret = sys_bpf_prog_load(&attr, attr_sz, PROG_LOAD_ATTEMPTS);
- return probe_fd(ret);
-}
-
-static int probe_kern_global_data(void)
-{
- char *cp, errmsg[STRERR_BUFSIZE];
- struct bpf_insn insns[] = {
- BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- };
- int ret, map, insn_cnt = ARRAY_SIZE(insns);
-
- map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, NULL);
- if (map < 0) {
- ret = -errno;
- cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
- pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
- __func__, cp, -ret);
- return ret;
- }
-
- insns[0].imm = map;
-
- ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
- close(map);
- return probe_fd(ret);
-}
-
-static int probe_kern_btf(void)
-{
- static const char strs[] = "\0int";
- __u32 types[] = {
- /* int */
- BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
- };
-
- return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs)));
-}
-
-static int probe_kern_btf_func(void)
-{
- static const char strs[] = "\0int\0x\0a";
- /* void x(int a) {} */
- __u32 types[] = {
- /* int */
- BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
- /* FUNC_PROTO */ /* [2] */
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
- BTF_PARAM_ENC(7, 1),
- /* FUNC x */ /* [3] */
- BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
- };
-
- return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs)));
-}
-
-static int probe_kern_btf_func_global(void)
-{
- static const char strs[] = "\0int\0x\0a";
- /* static void x(int a) {} */
- __u32 types[] = {
- /* int */
- BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
- /* FUNC_PROTO */ /* [2] */
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
- BTF_PARAM_ENC(7, 1),
- /* FUNC x BTF_FUNC_GLOBAL */ /* [3] */
- BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
- };
-
- return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs)));
-}
-
-static int probe_kern_btf_datasec(void)
-{
- static const char strs[] = "\0x\0.data";
- /* static int a; */
- __u32 types[] = {
- /* int */
- BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
- /* VAR x */ /* [2] */
- BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
- BTF_VAR_STATIC,
- /* DATASEC val */ /* [3] */
- BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
- BTF_VAR_SECINFO_ENC(2, 0, 4),
- };
-
- return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs)));
-}
-
-static int probe_kern_btf_float(void)
-{
- static const char strs[] = "\0float";
- __u32 types[] = {
- /* float */
- BTF_TYPE_FLOAT_ENC(1, 4),
- };
-
- return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs)));
-}
-
-static int probe_kern_btf_decl_tag(void)
-{
- static const char strs[] = "\0tag";
- __u32 types[] = {
- /* int */
- BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
- /* VAR x */ /* [2] */
- BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
- BTF_VAR_STATIC,
- /* attr */
- BTF_TYPE_DECL_TAG_ENC(1, 2, -1),
- };
-
- return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs)));
-}
-
-static int probe_kern_btf_type_tag(void)
-{
- static const char strs[] = "\0tag";
- __u32 types[] = {
- /* int */
- BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
- /* attr */
- BTF_TYPE_TYPE_TAG_ENC(1, 1), /* [2] */
- /* ptr */
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), /* [3] */
- };
-
- return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs)));
-}
-
-static int probe_kern_array_mmap(void)
-{
- LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_MMAPABLE);
- int fd;
-
- fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_mmap", sizeof(int), sizeof(int), 1, &opts);
- return probe_fd(fd);
-}
-
-static int probe_kern_exp_attach_type(void)
-{
- LIBBPF_OPTS(bpf_prog_load_opts, opts, .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE);
- struct bpf_insn insns[] = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- };
- int fd, insn_cnt = ARRAY_SIZE(insns);
-
- /* use any valid combination of program type and (optional)
- * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
- * to see if kernel supports expected_attach_type field for
- * BPF_PROG_LOAD command
- */
- fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts);
- return probe_fd(fd);
-}
-
-static int probe_kern_probe_read_kernel(void)
-{
- struct bpf_insn insns[] = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */
- BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */
- BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
- BPF_EXIT_INSN(),
- };
- int fd, insn_cnt = ARRAY_SIZE(insns);
-
- fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
- return probe_fd(fd);
-}
-
-static int probe_prog_bind_map(void)
-{
- char *cp, errmsg[STRERR_BUFSIZE];
- struct bpf_insn insns[] = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- };
- int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
-
- map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, NULL);
- if (map < 0) {
- ret = -errno;
- cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
- pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
- __func__, cp, -ret);
- return ret;
- }
-
- prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
- if (prog < 0) {
- close(map);
- return 0;
- }
-
- ret = bpf_prog_bind_map(prog, map, NULL);
-
- close(map);
- close(prog);
-
- return ret >= 0;
-}
-
-static int probe_module_btf(void)
-{
- static const char strs[] = "\0int";
- __u32 types[] = {
- /* int */
- BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
- };
- struct bpf_btf_info info;
- __u32 len = sizeof(info);
- char name[16];
- int fd, err;
-
- fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
- if (fd < 0)
- return 0; /* BTF not supported at all */
-
- memset(&info, 0, sizeof(info));
- info.name = ptr_to_u64(name);
- info.name_len = sizeof(name);
-
- /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
- * kernel's module BTF support coincides with support for
- * name/name_len fields in struct bpf_btf_info.
- */
- err = bpf_btf_get_info_by_fd(fd, &info, &len);
- close(fd);
- return !err;
-}
-
-static int probe_perf_link(void)
-{
- struct bpf_insn insns[] = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- };
- int prog_fd, link_fd, err;
-
- prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL",
- insns, ARRAY_SIZE(insns), NULL);
- if (prog_fd < 0)
- return -errno;
-
- /* use invalid perf_event FD to get EBADF, if link is supported;
- * otherwise EINVAL should be returned
- */
- link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL);
- err = -errno; /* close() can clobber errno */
-
- if (link_fd >= 0)
- close(link_fd);
- close(prog_fd);
-
- return link_fd < 0 && err == -EBADF;
-}
-
-static int probe_uprobe_multi_link(void)
-{
- LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
- .expected_attach_type = BPF_TRACE_UPROBE_MULTI,
- );
- LIBBPF_OPTS(bpf_link_create_opts, link_opts);
- struct bpf_insn insns[] = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- };
- int prog_fd, link_fd, err;
- unsigned long offset = 0;
-
- prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL",
- insns, ARRAY_SIZE(insns), &load_opts);
- if (prog_fd < 0)
- return -errno;
-
- /* Creating uprobe in '/' binary should fail with -EBADF. */
- link_opts.uprobe_multi.path = "/";
- link_opts.uprobe_multi.offsets = &offset;
- link_opts.uprobe_multi.cnt = 1;
-
- link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
- err = -errno; /* close() can clobber errno */
-
- if (link_fd >= 0)
- close(link_fd);
- close(prog_fd);
-
- return link_fd < 0 && err == -EBADF;
-}
-
-static int probe_kern_bpf_cookie(void)
-{
- struct bpf_insn insns[] = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie),
- BPF_EXIT_INSN(),
- };
- int ret, insn_cnt = ARRAY_SIZE(insns);
-
- ret = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", insns, insn_cnt, NULL);
- return probe_fd(ret);
-}
-
-static int probe_kern_btf_enum64(void)
-{
- static const char strs[] = "\0enum64";
- __u32 types[] = {
- BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
- };
-
- return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs)));
-}
-
-static int probe_kern_syscall_wrapper(void);
-
-enum kern_feature_result {
- FEAT_UNKNOWN = 0,
- FEAT_SUPPORTED = 1,
- FEAT_MISSING = 2,
-};
-
-typedef int (*feature_probe_fn)(void);
-
-static struct kern_feature_desc {
- const char *desc;
- feature_probe_fn probe;
- enum kern_feature_result res;
-} feature_probes[__FEAT_CNT] = {
- [FEAT_PROG_NAME] = {
- "BPF program name", probe_kern_prog_name,
- },
- [FEAT_GLOBAL_DATA] = {
- "global variables", probe_kern_global_data,
- },
- [FEAT_BTF] = {
- "minimal BTF", probe_kern_btf,
- },
- [FEAT_BTF_FUNC] = {
- "BTF functions", probe_kern_btf_func,
- },
- [FEAT_BTF_GLOBAL_FUNC] = {
- "BTF global function", probe_kern_btf_func_global,
- },
- [FEAT_BTF_DATASEC] = {
- "BTF data section and variable", probe_kern_btf_datasec,
- },
- [FEAT_ARRAY_MMAP] = {
- "ARRAY map mmap()", probe_kern_array_mmap,
- },
- [FEAT_EXP_ATTACH_TYPE] = {
- "BPF_PROG_LOAD expected_attach_type attribute",
- probe_kern_exp_attach_type,
- },
- [FEAT_PROBE_READ_KERN] = {
- "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
- },
- [FEAT_PROG_BIND_MAP] = {
- "BPF_PROG_BIND_MAP support", probe_prog_bind_map,
- },
- [FEAT_MODULE_BTF] = {
- "module BTF support", probe_module_btf,
- },
- [FEAT_BTF_FLOAT] = {
- "BTF_KIND_FLOAT support", probe_kern_btf_float,
- },
- [FEAT_PERF_LINK] = {
- "BPF perf link support", probe_perf_link,
- },
- [FEAT_BTF_DECL_TAG] = {
- "BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
- },
- [FEAT_BTF_TYPE_TAG] = {
- "BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
- },
- [FEAT_MEMCG_ACCOUNT] = {
- "memcg-based memory accounting", probe_memcg_account,
- },
- [FEAT_BPF_COOKIE] = {
- "BPF cookie support", probe_kern_bpf_cookie,
- },
- [FEAT_BTF_ENUM64] = {
- "BTF_KIND_ENUM64 support", probe_kern_btf_enum64,
- },
- [FEAT_SYSCALL_WRAPPER] = {
- "Kernel using syscall wrapper", probe_kern_syscall_wrapper,
- },
- [FEAT_UPROBE_MULTI_LINK] = {
- "BPF multi-uprobe link support", probe_uprobe_multi_link,
- },
-};
-
bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
{
- struct kern_feature_desc *feat = &feature_probes[feat_id];
- int ret;
-
- if (obj && obj->gen_loader)
+ if (obj->gen_loader)
/* To generate loader program assume the latest kernel
* to avoid doing extra prog_load, map_create syscalls.
*/
return true;
- if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
- ret = feat->probe();
- if (ret > 0) {
- WRITE_ONCE(feat->res, FEAT_SUPPORTED);
- } else if (ret == 0) {
- WRITE_ONCE(feat->res, FEAT_MISSING);
- } else {
- pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
- WRITE_ONCE(feat->res, FEAT_MISSING);
- }
- }
+ if (obj->token_fd)
+ return feat_supported(obj->feat_cache, feat_id);
- return READ_ONCE(feat->res) == FEAT_SUPPORTED;
+ return feat_supported(NULL, feat_id);
}
static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
@@ -5117,6 +5013,7 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
return 0;
}
+
err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
if (err) {
err = -errno;
@@ -5160,9 +5057,17 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
create_attr.map_flags = def->map_flags;
create_attr.numa_node = map->numa_node;
create_attr.map_extra = map->map_extra;
+ create_attr.token_fd = obj->token_fd;
+ if (obj->token_fd)
+ create_attr.map_flags |= BPF_F_TOKEN_FD;
- if (bpf_map__is_struct_ops(map))
+ if (bpf_map__is_struct_ops(map)) {
create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
+ if (map->mod_btf_fd >= 0) {
+ create_attr.value_type_btf_obj_fd = map->mod_btf_fd;
+ create_attr.map_flags |= BPF_F_VTYPE_BTF_OBJ_FD;
+ }
+ }
if (obj->btf && btf__fd(obj->btf) >= 0) {
create_attr.btf_fd = btf__fd(obj->btf);
@@ -5172,6 +5077,9 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
if (bpf_map_type__is_map_in_map(def->type)) {
if (map->inner_map) {
+ err = map_set_def_max_entries(map->inner_map);
+ if (err)
+ return err;
err = bpf_object__create_map(obj, map->inner_map, true);
if (err) {
pr_warn("map '%s': failed to create inner map: %d\n",
@@ -5198,11 +5106,16 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
case BPF_MAP_TYPE_SOCKHASH:
case BPF_MAP_TYPE_QUEUE:
case BPF_MAP_TYPE_STACK:
+ case BPF_MAP_TYPE_ARENA:
create_attr.btf_fd = 0;
create_attr.btf_key_type_id = 0;
create_attr.btf_value_type_id = 0;
map->btf_key_type_id = 0;
map->btf_value_type_id = 0;
+ break;
+ case BPF_MAP_TYPE_STRUCT_OPS:
+ create_attr.btf_value_type_id = 0;
+ break;
default:
break;
}
@@ -5438,7 +5351,23 @@ retry:
if (err < 0)
goto err_out;
}
-
+ if (map->def.type == BPF_MAP_TYPE_ARENA) {
+ map->mmaped = mmap((void *)map->map_extra, bpf_map_mmap_sz(map),
+ PROT_READ | PROT_WRITE,
+ map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED,
+ map->fd, 0);
+ if (map->mmaped == MAP_FAILED) {
+ err = -errno;
+ map->mmaped = NULL;
+ pr_warn("map '%s': failed to mmap arena: %d\n",
+ map->name, err);
+ return err;
+ }
+ if (obj->arena_data) {
+ memcpy(map->mmaped, obj->arena_data, obj->arena_data_sz);
+ zfree(&obj->arena_data);
+ }
+ }
if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) {
err = init_map_in_map_slots(obj, map);
if (err < 0)
@@ -6695,6 +6624,14 @@ static struct {
/* all other program types don't have "named" context structs */
};
+/* forward declarations for arch-specific underlying types of bpf_user_pt_regs_t typedef,
+ * for below __builtin_types_compatible_p() checks;
+ * with this approach we don't need any extra arch-specific #ifdef guards
+ */
+struct pt_regs;
+struct user_pt_regs;
+struct user_regs_struct;
+
static bool need_func_arg_type_fixup(const struct btf *btf, const struct bpf_program *prog,
const char *subprog_name, int arg_idx,
int arg_type_id, const char *ctx_name)
@@ -6735,11 +6672,21 @@ static bool need_func_arg_type_fixup(const struct btf *btf, const struct bpf_pro
/* special cases */
switch (prog->type) {
case BPF_PROG_TYPE_KPROBE:
- case BPF_PROG_TYPE_PERF_EVENT:
/* `struct pt_regs *` is expected, but we need to fix up */
if (btf_is_struct(t) && strcmp(tname, "pt_regs") == 0)
return true;
break;
+ case BPF_PROG_TYPE_PERF_EVENT:
+ if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct pt_regs) &&
+ btf_is_struct(t) && strcmp(tname, "pt_regs") == 0)
+ return true;
+ if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_pt_regs) &&
+ btf_is_struct(t) && strcmp(tname, "user_pt_regs") == 0)
+ return true;
+ if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_regs_struct) &&
+ btf_is_struct(t) && strcmp(tname, "user_regs_struct") == 0)
+ return true;
+ break;
case BPF_PROG_TYPE_RAW_TRACEPOINT:
case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
/* allow u64* as ctx */
@@ -6818,69 +6765,6 @@ static int clone_func_btf_info(struct btf *btf, int orig_fn_id, struct bpf_progr
return fn_id;
}
-static int probe_kern_arg_ctx_tag(void)
-{
- /* To minimize merge conflicts with BPF token series that refactors
- * feature detection code a lot, we don't integrate
- * probe_kern_arg_ctx_tag() into kernel_supports() feature-detection
- * framework yet, doing our own caching internally.
- * This will be cleaned up a bit later when bpf/bpf-next trees settle.
- */
- static int cached_result = -1;
- static const char strs[] = "\0a\0b\0arg:ctx\0";
- const __u32 types[] = {
- /* [1] INT */
- BTF_TYPE_INT_ENC(1 /* "a" */, BTF_INT_SIGNED, 0, 32, 4),
- /* [2] PTR -> VOID */
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0),
- /* [3] FUNC_PROTO `int(void *a)` */
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
- BTF_PARAM_ENC(1 /* "a" */, 2),
- /* [4] FUNC 'a' -> FUNC_PROTO (main prog) */
- BTF_TYPE_ENC(1 /* "a" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 3),
- /* [5] FUNC_PROTO `int(void *b __arg_ctx)` */
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
- BTF_PARAM_ENC(3 /* "b" */, 2),
- /* [6] FUNC 'b' -> FUNC_PROTO (subprog) */
- BTF_TYPE_ENC(3 /* "b" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 5),
- /* [7] DECL_TAG 'arg:ctx' -> func 'b' arg 'b' */
- BTF_TYPE_DECL_TAG_ENC(5 /* "arg:ctx" */, 6, 0),
- };
- const struct bpf_insn insns[] = {
- /* main prog */
- BPF_CALL_REL(+1),
- BPF_EXIT_INSN(),
- /* global subprog */
- BPF_EMIT_CALL(BPF_FUNC_get_func_ip), /* needs PTR_TO_CTX */
- BPF_EXIT_INSN(),
- };
- const struct bpf_func_info_min func_infos[] = {
- { 0, 4 }, /* main prog -> FUNC 'a' */
- { 2, 6 }, /* subprog -> FUNC 'b' */
- };
- LIBBPF_OPTS(bpf_prog_load_opts, opts);
- int prog_fd, btf_fd, insn_cnt = ARRAY_SIZE(insns);
-
- if (cached_result >= 0)
- return cached_result;
-
- btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
- if (btf_fd < 0)
- return 0;
-
- opts.prog_btf_fd = btf_fd;
- opts.func_info = &func_infos;
- opts.func_info_cnt = ARRAY_SIZE(func_infos);
- opts.func_info_rec_size = sizeof(func_infos[0]);
-
- prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, "det_arg_ctx",
- "GPL", insns, insn_cnt, &opts);
- close(btf_fd);
-
- cached_result = probe_fd(prog_fd);
- return cached_result;
-}
-
/* Check if main program or global subprog's function prototype has `arg:ctx`
* argument tags, and, if necessary, substitute correct type to match what BPF
* verifier would expect, taking into account specific program type. This
@@ -6905,7 +6789,7 @@ static int bpf_program_fixup_func_info(struct bpf_object *obj, struct bpf_progra
return 0;
/* don't do any fix ups if kernel natively supports __arg_ctx */
- if (probe_kern_arg_ctx_tag() > 0)
+ if (kernel_supports(obj, FEAT_ARG_CTX_TAG))
return 0;
/* some BPF program types just don't have named context structs, so
@@ -7292,12 +7176,12 @@ static int bpf_object__collect_relos(struct bpf_object *obj)
data = sec_desc->data;
idx = shdr->sh_info;
- if (shdr->sh_type != SHT_REL) {
+ if (shdr->sh_type != SHT_REL || idx < 0 || idx >= obj->efile.sec_cnt) {
pr_warn("internal error at %d\n", __LINE__);
return -LIBBPF_ERRNO__INTERNAL;
}
- if (idx == obj->efile.st_ops_shndx || idx == obj->efile.st_ops_link_shndx)
+ if (obj->efile.secs[idx].sec_type == SEC_ST_OPS)
err = bpf_object__collect_st_ops_relos(obj, shdr, data);
else if (idx == obj->efile.btf_maps_shndx)
err = bpf_object__collect_map_relos(obj, shdr, data);
@@ -7473,6 +7357,10 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog
load_attr.prog_flags = prog->prog_flags;
load_attr.fd_array = obj->fd_array;
+ load_attr.token_fd = obj->token_fd;
+ if (obj->token_fd)
+ load_attr.prog_flags |= BPF_F_TOKEN_FD;
+
/* adjust load_attr if sec_def provides custom preload callback */
if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) {
err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie);
@@ -7918,7 +7806,7 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object
static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
const struct bpf_object_open_opts *opts)
{
- const char *obj_name, *kconfig, *btf_tmp_path;
+ const char *obj_name, *kconfig, *btf_tmp_path, *token_path;
struct bpf_object *obj;
char tmp_name[64];
int err;
@@ -7955,6 +7843,16 @@ static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf,
if (log_size && !log_buf)
return ERR_PTR(-EINVAL);
+ token_path = OPTS_GET(opts, bpf_token_path, NULL);
+ /* if user didn't specify bpf_token_path explicitly, check if
+ * LIBBPF_BPF_TOKEN_PATH envvar was set and treat it as bpf_token_path
+ * option
+ */
+ if (!token_path)
+ token_path = getenv("LIBBPF_BPF_TOKEN_PATH");
+ if (token_path && strlen(token_path) >= PATH_MAX)
+ return ERR_PTR(-ENAMETOOLONG);
+
obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
if (IS_ERR(obj))
return obj;
@@ -7963,6 +7861,14 @@ static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf,
obj->log_size = log_size;
obj->log_level = log_level;
+ if (token_path) {
+ obj->token_path = strdup(token_path);
+ if (!obj->token_path) {
+ err = -ENOMEM;
+ goto out;
+ }
+ }
+
btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL);
if (btf_tmp_path) {
if (strlen(btf_tmp_path) >= PATH_MAX) {
@@ -8449,11 +8355,20 @@ static void bpf_map_prepare_vdata(const struct bpf_map *map)
static int bpf_object_prepare_struct_ops(struct bpf_object *obj)
{
+ struct bpf_map *map;
int i;
- for (i = 0; i < obj->nr_maps; i++)
- if (bpf_map__is_struct_ops(&obj->maps[i]))
- bpf_map_prepare_vdata(&obj->maps[i]);
+ for (i = 0; i < obj->nr_maps; i++) {
+ map = &obj->maps[i];
+
+ if (!bpf_map__is_struct_ops(map))
+ continue;
+
+ if (!map->autocreate)
+ continue;
+
+ bpf_map_prepare_vdata(map);
+ }
return 0;
}
@@ -8473,11 +8388,13 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
if (obj->gen_loader)
bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps);
- err = bpf_object__probe_loading(obj);
+ err = bpf_object_prepare_token(obj);
+ err = err ? : bpf_object__probe_loading(obj);
err = err ? : bpf_object__load_vmlinux_btf(obj, false);
err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
err = err ? : bpf_object__sanitize_maps(obj);
err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
+ err = err ? : bpf_object_adjust_struct_ops_autoload(obj);
err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
err = err ? : bpf_object__sanitize_and_load_btf(obj);
err = err ? : bpf_object__create_maps(obj);
@@ -8947,13 +8864,9 @@ static void bpf_map__destroy(struct bpf_map *map)
zfree(&map->init_slots);
map->init_slots_sz = 0;
- if (map->mmaped) {
- size_t mmap_sz;
-
- mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
- munmap(map->mmaped, mmap_sz);
- map->mmaped = NULL;
- }
+ if (map->mmaped && map->mmaped != map->obj->arena_data)
+ munmap(map->mmaped, bpf_map_mmap_sz(map));
+ map->mmaped = NULL;
if (map->st_ops) {
zfree(&map->st_ops->data);
@@ -9008,6 +8921,13 @@ void bpf_object__close(struct bpf_object *obj)
}
zfree(&obj->programs);
+ zfree(&obj->feat_cache);
+ zfree(&obj->token_path);
+ if (obj->token_fd > 0)
+ close(obj->token_fd);
+
+ zfree(&obj->arena_data);
+
free(obj);
}
@@ -9668,7 +9588,9 @@ static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
return NULL;
}
-/* Collect the reloc from ELF and populate the st_ops->progs[] */
+/* Collect the reloc from ELF, populate the st_ops->progs[], and update
+ * st_ops->data for shadow type.
+ */
static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
Elf64_Shdr *shdr, Elf_Data *data)
{
@@ -9760,28 +9682,15 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
return -EINVAL;
}
- /* if we haven't yet processed this BPF program, record proper
- * attach_btf_id and member_idx
- */
- if (!prog->attach_btf_id) {
- prog->attach_btf_id = st_ops->type_id;
- prog->expected_attach_type = member_idx;
- }
+ st_ops->progs[member_idx] = prog;
- /* struct_ops BPF prog can be re-used between multiple
- * .struct_ops & .struct_ops.link as long as it's the
- * same struct_ops struct definition and the same
- * function pointer field
+ /* st_ops->data will be exposed to users, being returned by
+ * bpf_map__initial_value() as a pointer to the shadow
+ * type. All function pointers in the original struct type
+ * should be converted to a pointer to struct bpf_program
+ * in the shadow type.
*/
- if (prog->attach_btf_id != st_ops->type_id ||
- prog->expected_attach_type != member_idx) {
- pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
- map->name, prog->name, prog->sec_name, prog->type,
- prog->attach_btf_id, prog->expected_attach_type, name);
- return -EINVAL;
- }
-
- st_ops->progs[member_idx] = prog;
+ *((struct bpf_program **)(st_ops->data + moff)) = prog;
}
return 0;
@@ -9966,7 +9875,9 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attac
*btf_obj_fd = 0;
*btf_type_id = 1;
} else {
- err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
+ err = find_kernel_btf_id(prog->obj, attach_name,
+ attach_type, btf_obj_fd,
+ btf_type_id);
}
if (err) {
pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %d\n",
@@ -10188,11 +10099,14 @@ int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
return libbpf_err(-EBUSY);
if (map->mmaped) {
- int err;
size_t mmap_old_sz, mmap_new_sz;
+ int err;
- mmap_old_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
- mmap_new_sz = bpf_map_mmap_sz(size, map->def.max_entries);
+ if (map->def.type != BPF_MAP_TYPE_ARRAY)
+ return -EOPNOTSUPP;
+
+ mmap_old_sz = bpf_map_mmap_sz(map);
+ mmap_new_sz = array_map_mmap_sz(size, map->def.max_entries);
err = bpf_map_mmap_resize(map, mmap_old_sz, mmap_new_sz);
if (err) {
pr_warn("map '%s': failed to resize memory-mapped region: %d\n",
@@ -10225,22 +10139,41 @@ __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
int bpf_map__set_initial_value(struct bpf_map *map,
const void *data, size_t size)
{
+ size_t actual_sz;
+
if (map->obj->loaded || map->reused)
return libbpf_err(-EBUSY);
- if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
- size != map->def.value_size)
+ if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG)
+ return libbpf_err(-EINVAL);
+
+ if (map->def.type == BPF_MAP_TYPE_ARENA)
+ actual_sz = map->obj->arena_data_sz;
+ else
+ actual_sz = map->def.value_size;
+ if (size != actual_sz)
return libbpf_err(-EINVAL);
memcpy(map->mmaped, data, size);
return 0;
}
-void *bpf_map__initial_value(struct bpf_map *map, size_t *psize)
+void *bpf_map__initial_value(const struct bpf_map *map, size_t *psize)
{
+ if (bpf_map__is_struct_ops(map)) {
+ if (psize)
+ *psize = map->def.value_size;
+ return map->st_ops->data;
+ }
+
if (!map->mmaped)
return NULL;
- *psize = map->def.value_size;
+
+ if (map->def.type == BPF_MAP_TYPE_ARENA)
+ *psize = map->obj->arena_data_sz;
+ else
+ *psize = map->def.value_size;
+
return map->mmaped;
}
@@ -11028,7 +10961,7 @@ static const char *arch_specific_syscall_pfx(void)
#endif
}
-static int probe_kern_syscall_wrapper(void)
+int probe_kern_syscall_wrapper(int token_fd)
{
char syscall_name[64];
const char *ksys_pfx;
@@ -13717,7 +13650,7 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
for (i = 0; i < s->map_cnt; i++) {
struct bpf_map *map = *s->maps[i].map;
- size_t mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
+ size_t mmap_sz = bpf_map_mmap_sz(map);
int prot, map_fd = map->fd;
void **mmaped = s->maps[i].mmaped;
@@ -13729,6 +13662,11 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
continue;
}
+ if (map->def.type == BPF_MAP_TYPE_ARENA) {
+ *mmaped = map->mmaped;
+ continue;
+ }
+
if (map->def.map_flags & BPF_F_RDONLY_PROG)
prot = PROT_READ;
else
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 6cd9c501624f..7b510761f545 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -177,10 +177,29 @@ struct bpf_object_open_opts {
* logs through its print callback.
*/
__u32 kernel_log_level;
+ /* Path to BPF FS mount point to derive BPF token from.
+ *
+ * Created BPF token will be used for all bpf() syscall operations
+ * that accept BPF token (e.g., map creation, BTF and program loads,
+ * etc) automatically within instantiated BPF object.
+ *
+ * If bpf_token_path is not specified, libbpf will consult
+ * LIBBPF_BPF_TOKEN_PATH environment variable. If set, it will be
+ * taken as a value of bpf_token_path option and will force libbpf to
+ * either create BPF token from provided custom BPF FS path, or will
+ * disable implicit BPF token creation, if envvar value is an empty
+ * string. bpf_token_path overrides LIBBPF_BPF_TOKEN_PATH, if both are
+ * set at the same time.
+ *
+ * Setting bpf_token_path option to empty string disables libbpf's
+ * automatic attempt to create BPF token from default BPF FS mount
+ * point (/sys/fs/bpf), in case this default behavior is undesirable.
+ */
+ const char *bpf_token_path;
size_t :0;
};
-#define bpf_object_open_opts__last_field kernel_log_level
+#define bpf_object_open_opts__last_field bpf_token_path
/**
* @brief **bpf_object__open()** creates a bpf_object by opening
@@ -995,7 +1014,7 @@ LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra);
LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map,
const void *data, size_t size);
-LIBBPF_API void *bpf_map__initial_value(struct bpf_map *map, size_t *psize);
+LIBBPF_API void *bpf_map__initial_value(const struct bpf_map *map, size_t *psize);
/**
* @brief **bpf_map__is_internal()** tells the caller whether or not the
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 91c5aef7dae7..86804fd90dd1 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -245,7 +245,6 @@ LIBBPF_0.3.0 {
btf__parse_raw_split;
btf__parse_split;
btf__new_empty_split;
- btf__new_split;
ring_buffer__epoll_fd;
} LIBBPF_0.2.0;
@@ -326,7 +325,6 @@ LIBBPF_0.7.0 {
bpf_xdp_detach;
bpf_xdp_query;
bpf_xdp_query_id;
- btf_ext__raw_data;
libbpf_probe_bpf_helper;
libbpf_probe_bpf_map_type;
libbpf_probe_bpf_prog_type;
@@ -411,4 +409,8 @@ LIBBPF_1.3.0 {
} LIBBPF_1.2.0;
LIBBPF_1.4.0 {
+ global:
+ bpf_token_create;
+ btf__new_split;
+ btf_ext__raw_data;
} LIBBPF_1.3.0;
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index 27e4e320e1a6..864b36177424 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -15,9 +15,24 @@
#include <linux/err.h>
#include <fcntl.h>
#include <unistd.h>
+#include <sys/syscall.h>
#include <libelf.h>
#include "relo_core.h"
+/* Android's libc doesn't support AT_EACCESS in faccessat() implementation
+ * ([0]), and just returns -EINVAL even if file exists and is accessible.
+ * See [1] for issues caused by this.
+ *
+ * So just redefine it to 0 on Android.
+ *
+ * [0] https://android.googlesource.com/platform/bionic/+/refs/heads/android13-release/libc/bionic/faccessat.cpp#50
+ * [1] https://github.com/libbpf/libbpf-bootstrap/issues/250#issuecomment-1911324250
+ */
+#ifdef __ANDROID__
+#undef AT_EACCESS
+#define AT_EACCESS 0
+#endif
+
/* make sure libbpf doesn't use kernel-only integer typedefs */
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
@@ -357,18 +372,39 @@ enum kern_feature_id {
FEAT_SYSCALL_WRAPPER,
/* BPF multi-uprobe link support */
FEAT_UPROBE_MULTI_LINK,
+ /* Kernel supports arg:ctx tag (__arg_ctx) for global subprogs natively */
+ FEAT_ARG_CTX_TAG,
+ /* Kernel supports '?' at the front of datasec names */
+ FEAT_BTF_QMARK_DATASEC,
__FEAT_CNT,
};
-int probe_memcg_account(void);
+enum kern_feature_result {
+ FEAT_UNKNOWN = 0,
+ FEAT_SUPPORTED = 1,
+ FEAT_MISSING = 2,
+};
+
+struct kern_feature_cache {
+ enum kern_feature_result res[__FEAT_CNT];
+ int token_fd;
+};
+
+bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id);
bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id);
+
+int probe_kern_syscall_wrapper(int token_fd);
+int probe_memcg_account(int token_fd);
int bump_rlimit_memlock(void);
int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz);
int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz);
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
- const char *str_sec, size_t str_len);
-int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level);
+ const char *str_sec, size_t str_len,
+ int token_fd);
+int btf_load_into_kernel(struct btf *btf,
+ char *log_buf, size_t log_sz, __u32 log_level,
+ int token_fd);
struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf);
void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
@@ -532,6 +568,17 @@ static inline bool is_ldimm64_insn(struct bpf_insn *insn)
return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
}
+/* Unconditionally dup FD, ensuring it doesn't use [0, 2] range.
+ * Original FD is not closed or altered in any other way.
+ * Preserves original FD value, if it's invalid (negative).
+ */
+static inline int dup_good_fd(int fd)
+{
+ if (fd < 0)
+ return fd;
+ return fcntl(fd, F_DUPFD_CLOEXEC, 3);
+}
+
/* if fd is stdin, stdout, or stderr, dup to a fd greater than 2
* Takes ownership of the fd passed in, and closes it if calling
* fcntl(fd, F_DUPFD_CLOEXEC, 3).
@@ -543,7 +590,7 @@ static inline int ensure_good_fd(int fd)
if (fd < 0)
return fd;
if (fd < 3) {
- fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
+ fd = dup_good_fd(fd);
saved_errno = errno;
close(old_fd);
errno = saved_errno;
@@ -555,6 +602,15 @@ static inline int ensure_good_fd(int fd)
return fd;
}
+static inline int sys_dup2(int oldfd, int newfd)
+{
+#ifdef __NR_dup2
+ return syscall(__NR_dup2, oldfd, newfd);
+#else
+ return syscall(__NR_dup3, oldfd, newfd, 0);
+#endif
+}
+
/* Point *fixed_fd* to the same file that *tmp_fd* points to.
* Regardless of success, *tmp_fd* is closed.
* Whatever *fixed_fd* pointed to is closed silently.
@@ -563,7 +619,7 @@ static inline int reuse_fd(int fixed_fd, int tmp_fd)
{
int err;
- err = dup2(tmp_fd, fixed_fd);
+ err = sys_dup2(tmp_fd, fixed_fd);
err = err < 0 ? -errno : 0;
close(tmp_fd); /* clean up temporary FD */
return err;
@@ -613,4 +669,6 @@ int elf_resolve_syms_offsets(const char *binary_path, int cnt,
int elf_resolve_pattern_offsets(const char *binary_path, const char *pattern,
unsigned long **poffsets, size_t *pcnt);
+int probe_fd(int fd);
+
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index 9c4db90b92b6..302188122439 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -219,7 +219,8 @@ int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts)
}
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
- const char *str_sec, size_t str_len)
+ const char *str_sec, size_t str_len,
+ int token_fd)
{
struct btf_header hdr = {
.magic = BTF_MAGIC,
@@ -229,6 +230,10 @@ int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
.str_off = types_len,
.str_len = str_len,
};
+ LIBBPF_OPTS(bpf_btf_load_opts, opts,
+ .token_fd = token_fd,
+ .btf_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
int btf_fd, btf_len;
__u8 *raw_btf;
@@ -241,7 +246,7 @@ int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len);
memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len);
- btf_fd = bpf_btf_load(raw_btf, btf_len, NULL);
+ btf_fd = bpf_btf_load(raw_btf, btf_len, &opts);
free(raw_btf);
return btf_fd;
@@ -271,7 +276,7 @@ static int load_local_storage_btf(void)
};
return libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs));
+ strs, sizeof(strs), 0);
}
static int probe_map_create(enum bpf_map_type map_type)
@@ -326,12 +331,20 @@ static int probe_map_create(enum bpf_map_type map_type)
case BPF_MAP_TYPE_STRUCT_OPS:
/* we'll get -ENOTSUPP for invalid BTF type ID for struct_ops */
opts.btf_vmlinux_value_type_id = 1;
+ opts.value_type_btf_obj_fd = -1;
exp_err = -524; /* -ENOTSUPP */
break;
case BPF_MAP_TYPE_BLOOM_FILTER:
key_size = 0;
max_entries = 1;
break;
+ case BPF_MAP_TYPE_ARENA:
+ key_size = 0;
+ value_size = 0;
+ max_entries = 1; /* one page */
+ opts.map_extra = 0; /* can mmap() at any address */
+ opts.map_flags = BPF_F_MMAPABLE;
+ break;
case BPF_MAP_TYPE_HASH:
case BPF_MAP_TYPE_ARRAY:
case BPF_MAP_TYPE_PROG_ARRAY:
diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
index 16bca56002ab..0d4be829551b 100644
--- a/tools/lib/bpf/linker.c
+++ b/tools/lib/bpf/linker.c
@@ -2732,7 +2732,7 @@ static int finalize_btf(struct bpf_linker *linker)
/* Emit .BTF.ext section */
if (linker->btf_ext) {
- raw_data = btf_ext__get_raw_data(linker->btf_ext, &raw_sz);
+ raw_data = btf_ext__raw_data(linker->btf_ext, &raw_sz);
if (!raw_data)
return -ENOMEM;
diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
index 090bcf6e3b3d..68a2def17175 100644
--- a/tools/lib/bpf/netlink.c
+++ b/tools/lib/bpf/netlink.c
@@ -496,8 +496,8 @@ int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts)
if (err)
return libbpf_err(err);
- opts->feature_flags = md.flags;
- opts->xdp_zc_max_segs = md.xdp_zc_max_segs;
+ OPTS_SET(opts, feature_flags, md.flags);
+ OPTS_SET(opts, xdp_zc_max_segs, md.xdp_zc_max_segs);
skip_feature_flags:
return 0;
diff --git a/tools/lib/bpf/str_error.h b/tools/lib/bpf/str_error.h
index a139334d57b6..626d7ffb03d6 100644
--- a/tools/lib/bpf/str_error.h
+++ b/tools/lib/bpf/str_error.h
@@ -2,5 +2,8 @@
#ifndef __LIBBPF_STR_ERROR_H
#define __LIBBPF_STR_ERROR_H
+#define STRERR_BUFSIZE 128
+
char *libbpf_strerror_r(int err, char *dst, int len);
+
#endif /* __LIBBPF_STR_ERROR_H */
diff --git a/tools/net/ynl/Makefile b/tools/net/ynl/Makefile
index da1aa10bbcc3..8e9e09d84e26 100644
--- a/tools/net/ynl/Makefile
+++ b/tools/net/ynl/Makefile
@@ -11,11 +11,11 @@ $(SUBDIRS):
$(MAKE) -C $@ ; \
fi
-clean hardclean:
+clean distclean:
@for dir in $(SUBDIRS) ; do \
if [ -f "$$dir/Makefile" ] ; then \
$(MAKE) -C $$dir $@; \
fi \
done
-.PHONY: clean all $(SUBDIRS)
+.PHONY: all clean distclean $(SUBDIRS)
diff --git a/tools/net/ynl/Makefile.deps b/tools/net/ynl/Makefile.deps
index 3110f84dd029..07373c5a7afe 100644
--- a/tools/net/ynl/Makefile.deps
+++ b/tools/net/ynl/Makefile.deps
@@ -15,7 +15,12 @@ UAPI_PATH:=../../../../include/uapi/
get_hdr_inc=-D$(1) -include $(UAPI_PATH)/linux/$(2)
CFLAGS_devlink:=$(call get_hdr_inc,_LINUX_DEVLINK_H_,devlink.h)
+CFLAGS_dpll:=$(call get_hdr_inc,_LINUX_DPLL_H,dpll.h)
CFLAGS_ethtool:=$(call get_hdr_inc,_LINUX_ETHTOOL_NETLINK_H_,ethtool_netlink.h)
CFLAGS_handshake:=$(call get_hdr_inc,_LINUX_HANDSHAKE_H,handshake.h)
+CFLAGS_mptcp_pm:=$(call get_hdr_inc,_LINUX_MPTCP_PM_H,mptcp_pm.h)
CFLAGS_netdev:=$(call get_hdr_inc,_LINUX_NETDEV_H,netdev.h)
CFLAGS_nfsd:=$(call get_hdr_inc,_LINUX_NFSD_NETLINK_H,nfsd_netlink.h)
+CFLAGS_ovs_datapath:=$(call get_hdr_inc,__LINUX_OPENVSWITCH_H,openvswitch.h)
+CFLAGS_ovs_flow:=$(call get_hdr_inc,__LINUX_OPENVSWITCH_H,openvswitch.h)
+CFLAGS_ovs_vport:=$(call get_hdr_inc,__LINUX_OPENVSWITCH_H,openvswitch.h)
diff --git a/tools/net/ynl/cli.py b/tools/net/ynl/cli.py
index 2ad9ec0f5545..f131e33ac3ee 100755
--- a/tools/net/ynl/cli.py
+++ b/tools/net/ynl/cli.py
@@ -6,7 +6,16 @@ import json
import pprint
import time
-from lib import YnlFamily, Netlink
+from lib import YnlFamily, Netlink, NlError
+
+
+class YnlEncoder(json.JSONEncoder):
+ def default(self, obj):
+ if isinstance(obj, bytes):
+ return bytes.hex(obj)
+ if isinstance(obj, set):
+ return list(obj)
+ return json.JSONEncoder.default(self, obj)
def main():
@@ -28,8 +37,17 @@ def main():
parser.add_argument('--append', dest='flags', action='append_const',
const=Netlink.NLM_F_APPEND)
parser.add_argument('--process-unknown', action=argparse.BooleanOptionalAction)
+ parser.add_argument('--output-json', action='store_true')
+ parser.add_argument('--dbg-small-recv', default=0, const=4000,
+ action='store', nargs='?', type=int)
args = parser.parse_args()
+ def output(msg):
+ if args.output_json:
+ print(json.dumps(msg, cls=YnlEncoder))
+ else:
+ pprint.PrettyPrinter().pprint(msg)
+
if args.no_schema:
args.schema = ''
@@ -37,7 +55,10 @@ def main():
if args.json_text:
attrs = json.loads(args.json_text)
- ynl = YnlFamily(args.spec, args.schema, args.process_unknown)
+ ynl = YnlFamily(args.spec, args.schema, args.process_unknown,
+ recv_size=args.dbg_small_recv)
+ if args.dbg_small_recv:
+ ynl.set_recv_dbg(True)
if args.ntf:
ynl.ntf_subscribe(args.ntf)
@@ -45,16 +66,20 @@ def main():
if args.sleep:
time.sleep(args.sleep)
- if args.do:
- reply = ynl.do(args.do, attrs, args.flags)
- pprint.PrettyPrinter().pprint(reply)
- if args.dump:
- reply = ynl.dump(args.dump, attrs)
- pprint.PrettyPrinter().pprint(reply)
+ try:
+ if args.do:
+ reply = ynl.do(args.do, attrs, args.flags)
+ output(reply)
+ if args.dump:
+ reply = ynl.dump(args.dump, attrs)
+ output(reply)
+ except NlError as e:
+ print(e)
+ exit(1)
if args.ntf:
ynl.check_ntf()
- pprint.PrettyPrinter().pprint(ynl.async_msg_queue)
+ output(ynl.async_msg_queue)
if __name__ == "__main__":
diff --git a/tools/net/ynl/generated/Makefile b/tools/net/ynl/generated/Makefile
index 84cbabdd02a8..713f5fb9cc2d 100644
--- a/tools/net/ynl/generated/Makefile
+++ b/tools/net/ynl/generated/Makefile
@@ -14,7 +14,10 @@ YNL_GEN_ARG_ethtool:=--user-header linux/ethtool_netlink.h \
TOOL:=../ynl-gen-c.py
-GENS:=ethtool devlink handshake fou netdev nfsd
+GENS_PATHS=$(shell grep -nrI --files-without-match \
+ 'protocol: netlink' \
+ ../../../../Documentation/netlink/specs/)
+GENS=$(patsubst ../../../../Documentation/netlink/specs/%.yaml,%,${GENS_PATHS})
SRCS=$(patsubst %,%-user.c,${GENS})
HDRS=$(patsubst %,%-user.h,${GENS})
OBJS=$(patsubst %,%-user.o,${GENS})
@@ -40,11 +43,11 @@ protos.a: $(OBJS)
clean:
rm -f *.o
-hardclean: clean
+distclean: clean
rm -f *.c *.h *.a
regen:
@../ynl-regen.sh
-.PHONY: all clean hardclean regen
+.PHONY: all clean distclean regen
.DEFAULT_GOAL: all
diff --git a/tools/net/ynl/lib/Makefile b/tools/net/ynl/lib/Makefile
index d2e50fd0a52d..dfff3ecd1cba 100644
--- a/tools/net/ynl/lib/Makefile
+++ b/tools/net/ynl/lib/Makefile
@@ -17,12 +17,13 @@ ynl.a: $(OBJS)
ar rcs $@ $(OBJS)
clean:
rm -f *.o *.d *~
+ rm -rf __pycache__
-hardclean: clean
+distclean: clean
rm -f *.a
%.o: %.c
$(COMPILE.c) -MMD -c -o $@ $<
-.PHONY: all clean
+.PHONY: all clean distclean
.DEFAULT_GOAL=all
diff --git a/tools/net/ynl/lib/__init__.py b/tools/net/ynl/lib/__init__.py
index f7eaa07783e7..9137b83e580a 100644
--- a/tools/net/ynl/lib/__init__.py
+++ b/tools/net/ynl/lib/__init__.py
@@ -2,7 +2,7 @@
from .nlspec import SpecAttr, SpecAttrSet, SpecEnumEntry, SpecEnumSet, \
SpecFamily, SpecOperation
-from .ynl import YnlFamily, Netlink
+from .ynl import YnlFamily, Netlink, NlError
__all__ = ["SpecAttr", "SpecAttrSet", "SpecEnumEntry", "SpecEnumSet",
- "SpecFamily", "SpecOperation", "YnlFamily", "Netlink"]
+ "SpecFamily", "SpecOperation", "YnlFamily", "Netlink", "NlError"]
diff --git a/tools/net/ynl/lib/nlspec.py b/tools/net/ynl/lib/nlspec.py
index 44f13e383e8a..6d08ab9e213f 100644
--- a/tools/net/ynl/lib/nlspec.py
+++ b/tools/net/ynl/lib/nlspec.py
@@ -144,7 +144,7 @@ class SpecEnumSet(SpecElement):
class SpecAttr(SpecElement):
- """ Single Netlink atttribute type
+ """ Single Netlink attribute type
Represents a single attribute type within an attr space.
@@ -248,6 +248,7 @@ class SpecStructMember(SpecElement):
len integer, optional byte length of binary types
display_hint string, hint to help choose format specifier
when displaying the value
+ struct string, name of nested struct type
"""
def __init__(self, family, yaml):
super().__init__(family, yaml)
@@ -256,6 +257,7 @@ class SpecStructMember(SpecElement):
self.enum = yaml.get('enum')
self.len = yaml.get('len')
self.display_hint = yaml.get('display-hint')
+ self.struct = yaml.get('struct')
class SpecStruct(SpecElement):
@@ -306,10 +308,9 @@ class SpecSubMessage(SpecElement):
class SpecSubMessageFormat(SpecElement):
- """ Netlink sub-message definition
+ """ Netlink sub-message format definition
- Represents a set of sub-message formats for polymorphic nlattrs
- that contain type-specific sub messages.
+ Represents a single format for a sub-message.
Attributes:
value attribute value to match against type selector
@@ -417,6 +418,7 @@ class SpecFamily(SpecElement):
consts dict of all constants/enums
fixed_header string, optional name of family default fixed header struct
mcast_groups dict of all multicast groups (index by name)
+ kernel_family dict of kernel family attributes
"""
def __init__(self, spec_path, schema_path=None, exclude_ops=None):
with open(spec_path, "r") as stream:
@@ -460,6 +462,7 @@ class SpecFamily(SpecElement):
self.ntfs = collections.OrderedDict()
self.consts = collections.OrderedDict()
self.mcast_groups = collections.OrderedDict()
+ self.kernel_family = collections.OrderedDict(self.yaml.get('kernel-family', {}))
last_exception = None
while len(self._resolution_list) > 0:
diff --git a/tools/net/ynl/lib/ynl-priv.h b/tools/net/ynl/lib/ynl-priv.h
index 7491da8e7555..6cf890080dc0 100644
--- a/tools/net/ynl/lib/ynl-priv.h
+++ b/tools/net/ynl/lib/ynl-priv.h
@@ -2,16 +2,16 @@
#ifndef __YNL_C_PRIV_H
#define __YNL_C_PRIV_H 1
+#include <stdbool.h>
#include <stddef.h>
-#include <libmnl/libmnl.h>
#include <linux/types.h>
+struct ynl_parse_arg;
+
/*
* YNL internals / low level stuff
*/
-/* Generic mnl helper code */
-
enum ynl_policy_type {
YNL_PT_REJECT = 1,
YNL_PT_IGNORE,
@@ -27,6 +27,20 @@ enum ynl_policy_type {
YNL_PT_BITFIELD32,
};
+enum ynl_parse_result {
+ YNL_PARSE_CB_ERROR = -1,
+ YNL_PARSE_CB_STOP = 0,
+ YNL_PARSE_CB_OK = 1,
+};
+
+#define YNL_SOCKET_BUFFER_SIZE (1 << 17)
+
+#define YNL_ARRAY_SIZE(array) (sizeof(array) ? \
+ sizeof(array) / sizeof(array[0]) : 0)
+
+typedef int (*ynl_parse_cb_t)(const struct nlmsghdr *nlh,
+ struct ynl_parse_arg *yarg);
+
struct ynl_policy_attr {
enum ynl_policy_type type;
unsigned int len;
@@ -80,8 +94,6 @@ struct ynl_ntf_base_type {
unsigned char data[] __attribute__((aligned(8)));
};
-extern mnl_cb_t ynl_cb_array[NLMSG_MIN_TYPE];
-
struct nlmsghdr *
ynl_gemsg_start_req(struct ynl_sock *ys, __u32 id, __u8 cmd, __u8 version);
struct nlmsghdr *
@@ -89,30 +101,26 @@ ynl_gemsg_start_dump(struct ynl_sock *ys, __u32 id, __u8 cmd, __u8 version);
int ynl_attr_validate(struct ynl_parse_arg *yarg, const struct nlattr *attr);
-int ynl_recv_ack(struct ynl_sock *ys, int ret);
-int ynl_cb_null(const struct nlmsghdr *nlh, void *data);
-
/* YNL specific helpers used by the auto-generated code */
struct ynl_req_state {
struct ynl_parse_arg yarg;
- mnl_cb_t cb;
+ ynl_parse_cb_t cb;
__u32 rsp_cmd;
};
struct ynl_dump_state {
- struct ynl_sock *ys;
- struct ynl_policy_nest *rsp_policy;
+ struct ynl_parse_arg yarg;
void *first;
struct ynl_dump_list_type *last;
size_t alloc_sz;
- mnl_cb_t cb;
+ ynl_parse_cb_t cb;
__u32 rsp_cmd;
};
struct ynl_ntf_info {
struct ynl_policy_nest *policy;
- mnl_cb_t cb;
+ ynl_parse_cb_t cb;
size_t alloc_sz;
void (*free)(struct ynl_ntf_base_type *ntf);
};
@@ -125,20 +133,325 @@ int ynl_exec_dump(struct ynl_sock *ys, struct nlmsghdr *req_nlh,
void ynl_error_unknown_notification(struct ynl_sock *ys, __u8 cmd);
int ynl_error_parse(struct ynl_parse_arg *yarg, const char *msg);
-#ifndef MNL_HAS_AUTO_SCALARS
-static inline uint64_t mnl_attr_get_uint(const struct nlattr *attr)
+/* Netlink message handling helpers */
+
+#define YNL_MSG_OVERFLOW 1
+
+static inline struct nlmsghdr *ynl_nlmsg_put_header(void *buf)
+{
+ struct nlmsghdr *nlh = buf;
+
+ memset(nlh, 0, sizeof(*nlh));
+ nlh->nlmsg_len = NLMSG_HDRLEN;
+
+ return nlh;
+}
+
+static inline unsigned int ynl_nlmsg_data_len(const struct nlmsghdr *nlh)
{
- if (mnl_attr_get_payload_len(attr) == 4)
- return mnl_attr_get_u32(attr);
- return mnl_attr_get_u64(attr);
+ return nlh->nlmsg_len - NLMSG_HDRLEN;
+}
+
+static inline void *ynl_nlmsg_data(const struct nlmsghdr *nlh)
+{
+ return (unsigned char *)nlh + NLMSG_HDRLEN;
+}
+
+static inline void *
+ynl_nlmsg_data_offset(const struct nlmsghdr *nlh, unsigned int offset)
+{
+ return (unsigned char *)nlh + NLMSG_HDRLEN + offset;
+}
+
+static inline void *ynl_nlmsg_end_addr(const struct nlmsghdr *nlh)
+{
+ return (char *)nlh + nlh->nlmsg_len;
+}
+
+static inline void *
+ynl_nlmsg_put_extra_header(struct nlmsghdr *nlh, unsigned int size)
+{
+ void *tail = ynl_nlmsg_end_addr(nlh);
+
+ nlh->nlmsg_len += NLMSG_ALIGN(size);
+ return tail;
+}
+
+/* Netlink attribute helpers */
+
+static inline unsigned int ynl_attr_type(const struct nlattr *attr)
+{
+ return attr->nla_type & NLA_TYPE_MASK;
+}
+
+static inline unsigned int ynl_attr_data_len(const struct nlattr *attr)
+{
+ return attr->nla_len - NLA_HDRLEN;
+}
+
+static inline void *ynl_attr_data(const struct nlattr *attr)
+{
+ return (unsigned char *)attr + NLA_HDRLEN;
+}
+
+static inline void *ynl_attr_data_end(const struct nlattr *attr)
+{
+ return ynl_attr_data(attr) + ynl_attr_data_len(attr);
+}
+
+#define ynl_attr_for_each(attr, nlh, fixed_hdr_sz) \
+ for ((attr) = ynl_attr_first(nlh, (nlh)->nlmsg_len, \
+ NLMSG_HDRLEN + fixed_hdr_sz); attr; \
+ (attr) = ynl_attr_next(ynl_nlmsg_end_addr(nlh), attr))
+
+#define ynl_attr_for_each_nested(attr, outer) \
+ for ((attr) = ynl_attr_first(outer, outer->nla_len, \
+ sizeof(struct nlattr)); attr; \
+ (attr) = ynl_attr_next(ynl_attr_data_end(outer), attr))
+
+#define ynl_attr_for_each_payload(start, len, attr) \
+ for ((attr) = ynl_attr_first(start, len, 0); attr; \
+ (attr) = ynl_attr_next(start + len, attr))
+
+static inline struct nlattr *
+ynl_attr_if_good(const void *end, struct nlattr *attr)
+{
+ if (attr + 1 > (const struct nlattr *)end)
+ return NULL;
+ if (ynl_attr_data_end(attr) > end)
+ return NULL;
+ return attr;
+}
+
+static inline struct nlattr *
+ynl_attr_next(const void *end, const struct nlattr *prev)
+{
+ struct nlattr *attr;
+
+ attr = (void *)((char *)prev + NLA_ALIGN(prev->nla_len));
+ return ynl_attr_if_good(end, attr);
+}
+
+static inline struct nlattr *
+ynl_attr_first(const void *start, size_t len, size_t skip)
+{
+ struct nlattr *attr;
+
+ attr = (void *)((char *)start + NLMSG_ALIGN(skip));
+ return ynl_attr_if_good(start + len, attr);
+}
+
+static inline bool
+__ynl_attr_put_overflow(struct nlmsghdr *nlh, size_t size)
+{
+ bool o;
+
+ /* ynl_msg_start() stashed buffer length in nlmsg_pid. */
+ o = nlh->nlmsg_len + NLA_HDRLEN + NLMSG_ALIGN(size) > nlh->nlmsg_pid;
+ if (o)
+ /* YNL_MSG_OVERFLOW is < NLMSG_HDRLEN, all subsequent checks
+ * are guaranteed to fail.
+ */
+ nlh->nlmsg_pid = YNL_MSG_OVERFLOW;
+ return o;
+}
+
+static inline struct nlattr *
+ynl_attr_nest_start(struct nlmsghdr *nlh, unsigned int attr_type)
+{
+ struct nlattr *attr;
+
+ if (__ynl_attr_put_overflow(nlh, 0))
+ return ynl_nlmsg_end_addr(nlh) - NLA_HDRLEN;
+
+ attr = ynl_nlmsg_end_addr(nlh);
+ attr->nla_type = attr_type | NLA_F_NESTED;
+ nlh->nlmsg_len += NLA_HDRLEN;
+
+ return attr;
}
static inline void
-mnl_attr_put_uint(struct nlmsghdr *nlh, uint16_t type, uint64_t data)
+ynl_attr_nest_end(struct nlmsghdr *nlh, struct nlattr *attr)
{
- if ((uint32_t)data == (uint64_t)data)
- return mnl_attr_put_u32(nlh, type, data);
- return mnl_attr_put_u64(nlh, type, data);
+ attr->nla_len = (char *)ynl_nlmsg_end_addr(nlh) - (char *)attr;
+}
+
+static inline void
+ynl_attr_put(struct nlmsghdr *nlh, unsigned int attr_type,
+ const void *value, size_t size)
+{
+ struct nlattr *attr;
+
+ if (__ynl_attr_put_overflow(nlh, size))
+ return;
+
+ attr = ynl_nlmsg_end_addr(nlh);
+ attr->nla_type = attr_type;
+ attr->nla_len = NLA_HDRLEN + size;
+
+ memcpy(ynl_attr_data(attr), value, size);
+
+ nlh->nlmsg_len += NLMSG_ALIGN(attr->nla_len);
+}
+
+static inline void
+ynl_attr_put_str(struct nlmsghdr *nlh, unsigned int attr_type, const char *str)
+{
+ struct nlattr *attr;
+ size_t len;
+
+ len = strlen(str);
+ if (__ynl_attr_put_overflow(nlh, len))
+ return;
+
+ attr = ynl_nlmsg_end_addr(nlh);
+ attr->nla_type = attr_type;
+
+ strcpy(ynl_attr_data(attr), str);
+ attr->nla_len = NLA_HDRLEN + NLA_ALIGN(len);
+
+ nlh->nlmsg_len += NLMSG_ALIGN(attr->nla_len);
+}
+
+static inline const char *ynl_attr_get_str(const struct nlattr *attr)
+{
+ return (const char *)ynl_attr_data(attr);
+}
+
+static inline __s8 ynl_attr_get_s8(const struct nlattr *attr)
+{
+ return *(__s8 *)ynl_attr_data(attr);
+}
+
+static inline __s16 ynl_attr_get_s16(const struct nlattr *attr)
+{
+ return *(__s16 *)ynl_attr_data(attr);
+}
+
+static inline __s32 ynl_attr_get_s32(const struct nlattr *attr)
+{
+ return *(__s32 *)ynl_attr_data(attr);
+}
+
+static inline __s64 ynl_attr_get_s64(const struct nlattr *attr)
+{
+ __s64 tmp;
+
+ memcpy(&tmp, (unsigned char *)(attr + 1), sizeof(tmp));
+ return tmp;
+}
+
+static inline __u8 ynl_attr_get_u8(const struct nlattr *attr)
+{
+ return *(__u8 *)ynl_attr_data(attr);
+}
+
+static inline __u16 ynl_attr_get_u16(const struct nlattr *attr)
+{
+ return *(__u16 *)ynl_attr_data(attr);
+}
+
+static inline __u32 ynl_attr_get_u32(const struct nlattr *attr)
+{
+ return *(__u32 *)ynl_attr_data(attr);
+}
+
+static inline __u64 ynl_attr_get_u64(const struct nlattr *attr)
+{
+ __u64 tmp;
+
+ memcpy(&tmp, (unsigned char *)(attr + 1), sizeof(tmp));
+ return tmp;
+}
+
+static inline void
+ynl_attr_put_s8(struct nlmsghdr *nlh, unsigned int attr_type, __s8 value)
+{
+ ynl_attr_put(nlh, attr_type, &value, sizeof(value));
+}
+
+static inline void
+ynl_attr_put_s16(struct nlmsghdr *nlh, unsigned int attr_type, __s16 value)
+{
+ ynl_attr_put(nlh, attr_type, &value, sizeof(value));
+}
+
+static inline void
+ynl_attr_put_s32(struct nlmsghdr *nlh, unsigned int attr_type, __s32 value)
+{
+ ynl_attr_put(nlh, attr_type, &value, sizeof(value));
+}
+
+static inline void
+ynl_attr_put_s64(struct nlmsghdr *nlh, unsigned int attr_type, __s64 value)
+{
+ ynl_attr_put(nlh, attr_type, &value, sizeof(value));
+}
+
+static inline void
+ynl_attr_put_u8(struct nlmsghdr *nlh, unsigned int attr_type, __u8 value)
+{
+ ynl_attr_put(nlh, attr_type, &value, sizeof(value));
+}
+
+static inline void
+ynl_attr_put_u16(struct nlmsghdr *nlh, unsigned int attr_type, __u16 value)
+{
+ ynl_attr_put(nlh, attr_type, &value, sizeof(value));
+}
+
+static inline void
+ynl_attr_put_u32(struct nlmsghdr *nlh, unsigned int attr_type, __u32 value)
+{
+ ynl_attr_put(nlh, attr_type, &value, sizeof(value));
+}
+
+static inline void
+ynl_attr_put_u64(struct nlmsghdr *nlh, unsigned int attr_type, __u64 value)
+{
+ ynl_attr_put(nlh, attr_type, &value, sizeof(value));
+}
+
+static inline __u64 ynl_attr_get_uint(const struct nlattr *attr)
+{
+ switch (ynl_attr_data_len(attr)) {
+ case 4:
+ return ynl_attr_get_u32(attr);
+ case 8:
+ return ynl_attr_get_u64(attr);
+ default:
+ return 0;
+ }
+}
+
+static inline __s64 ynl_attr_get_sint(const struct nlattr *attr)
+{
+ switch (ynl_attr_data_len(attr)) {
+ case 4:
+ return ynl_attr_get_s32(attr);
+ case 8:
+ return ynl_attr_get_s64(attr);
+ default:
+ return 0;
+ }
+}
+
+static inline void
+ynl_attr_put_uint(struct nlmsghdr *nlh, __u16 type, __u64 data)
+{
+ if ((__u32)data == (__u64)data)
+ ynl_attr_put_u32(nlh, type, data);
+ else
+ ynl_attr_put_u64(nlh, type, data);
+}
+
+static inline void
+ynl_attr_put_sint(struct nlmsghdr *nlh, __u16 type, __s64 data)
+{
+ if ((__s32)data == (__s64)data)
+ ynl_attr_put_s32(nlh, type, data);
+ else
+ ynl_attr_put_s64(nlh, type, data);
}
-#endif
#endif
diff --git a/tools/net/ynl/lib/ynl.c b/tools/net/ynl/lib/ynl.c
index 45e49671ae87..4b9c091fc86b 100644
--- a/tools/net/ynl/lib/ynl.c
+++ b/tools/net/ynl/lib/ynl.c
@@ -3,10 +3,11 @@
#include <poll.h>
#include <string.h>
#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
#include <linux/types.h>
-
-#include <libmnl/libmnl.h>
#include <linux/genetlink.h>
+#include <sys/socket.h>
#include "ynl.h"
@@ -92,9 +93,9 @@ ynl_err_walk(struct ynl_sock *ys, void *start, void *end, unsigned int off,
data_len = end - start;
- mnl_attr_for_each_payload(start, data_len) {
+ ynl_attr_for_each_payload(start, data_len, attr) {
astart_off = (char *)attr - (char *)start;
- aend_off = astart_off + mnl_attr_get_payload_len(attr);
+ aend_off = astart_off + ynl_attr_data_len(attr);
if (aend_off <= off)
continue;
@@ -106,7 +107,7 @@ ynl_err_walk(struct ynl_sock *ys, void *start, void *end, unsigned int off,
off -= astart_off;
- type = mnl_attr_get_type(attr);
+ type = ynl_attr_type(attr);
if (ynl_err_walk_report_one(policy, type, str, str_sz, &n))
return n;
@@ -124,8 +125,8 @@ ynl_err_walk(struct ynl_sock *ys, void *start, void *end, unsigned int off,
}
off -= sizeof(struct nlattr);
- start = mnl_attr_get_payload(attr);
- end = start + mnl_attr_get_payload_len(attr);
+ start = ynl_attr_data(attr);
+ end = start + ynl_attr_data_len(attr);
return n + ynl_err_walk(ys, start, end, off, policy->table[type].nest,
&str[n], str_sz - n, nest_pol);
@@ -147,14 +148,14 @@ ynl_ext_ack_check(struct ynl_sock *ys, const struct nlmsghdr *nlh,
if (!(nlh->nlmsg_flags & NLM_F_ACK_TLVS)) {
yerr_msg(ys, "%s", strerror(ys->err.code));
- return MNL_CB_OK;
+ return YNL_PARSE_CB_OK;
}
- mnl_attr_for_each(attr, nlh, hlen) {
+ ynl_attr_for_each(attr, nlh, hlen) {
unsigned int len, type;
- len = mnl_attr_get_payload_len(attr);
- type = mnl_attr_get_type(attr);
+ len = ynl_attr_data_len(attr);
+ type = ynl_attr_type(attr);
if (type > NLMSGERR_ATTR_MAX)
continue;
@@ -166,12 +167,12 @@ ynl_ext_ack_check(struct ynl_sock *ys, const struct nlmsghdr *nlh,
case NLMSGERR_ATTR_MISS_TYPE:
case NLMSGERR_ATTR_MISS_NEST:
if (len != sizeof(__u32))
- return MNL_CB_ERROR;
+ return YNL_PARSE_CB_ERROR;
break;
case NLMSGERR_ATTR_MSG:
- str = mnl_attr_get_payload(attr);
+ str = ynl_attr_get_str(attr);
if (str[len - 1])
- return MNL_CB_ERROR;
+ return YNL_PARSE_CB_ERROR;
break;
default:
break;
@@ -185,14 +186,13 @@ ynl_ext_ack_check(struct ynl_sock *ys, const struct nlmsghdr *nlh,
unsigned int n, off;
void *start, *end;
- ys->err.attr_offs = mnl_attr_get_u32(tb[NLMSGERR_ATTR_OFFS]);
+ ys->err.attr_offs = ynl_attr_get_u32(tb[NLMSGERR_ATTR_OFFS]);
n = snprintf(bad_attr, sizeof(bad_attr), "%sbad attribute: ",
str ? " (" : "");
- start = mnl_nlmsg_get_payload_offset(ys->nlh,
- ys->family->hdr_len);
- end = mnl_nlmsg_get_payload_tail(ys->nlh);
+ start = ynl_nlmsg_data_offset(ys->nlh, ys->family->hdr_len);
+ end = ynl_nlmsg_end_addr(ys->nlh);
off = ys->err.attr_offs;
off -= sizeof(struct nlmsghdr);
@@ -211,18 +211,17 @@ ynl_ext_ack_check(struct ynl_sock *ys, const struct nlmsghdr *nlh,
void *start, *end;
int n2;
- type = mnl_attr_get_u32(tb[NLMSGERR_ATTR_MISS_TYPE]);
+ type = ynl_attr_get_u32(tb[NLMSGERR_ATTR_MISS_TYPE]);
n = snprintf(miss_attr, sizeof(miss_attr), "%smissing attribute: ",
bad_attr[0] ? ", " : (str ? " (" : ""));
- start = mnl_nlmsg_get_payload_offset(ys->nlh,
- ys->family->hdr_len);
- end = mnl_nlmsg_get_payload_tail(ys->nlh);
+ start = ynl_nlmsg_data_offset(ys->nlh, ys->family->hdr_len);
+ end = ynl_nlmsg_end_addr(ys->nlh);
nest_pol = ys->req_policy;
if (tb[NLMSGERR_ATTR_MISS_NEST]) {
- off = mnl_attr_get_u32(tb[NLMSGERR_ATTR_MISS_NEST]);
+ off = ynl_attr_get_u32(tb[NLMSGERR_ATTR_MISS_NEST]);
off -= sizeof(struct nlmsghdr);
off -= ys->family->hdr_len;
@@ -254,13 +253,13 @@ ynl_ext_ack_check(struct ynl_sock *ys, const struct nlmsghdr *nlh,
else
yerr_msg(ys, "%s", strerror(ys->err.code));
- return MNL_CB_OK;
+ return YNL_PARSE_CB_OK;
}
-static int ynl_cb_error(const struct nlmsghdr *nlh, void *data)
+static int
+ynl_cb_error(const struct nlmsghdr *nlh, struct ynl_parse_arg *yarg)
{
- const struct nlmsgerr *err = mnl_nlmsg_get_payload(nlh);
- struct ynl_parse_arg *yarg = data;
+ const struct nlmsgerr *err = ynl_nlmsg_data(nlh);
unsigned int hlen;
int code;
@@ -270,16 +269,15 @@ static int ynl_cb_error(const struct nlmsghdr *nlh, void *data)
hlen = sizeof(*err);
if (!(nlh->nlmsg_flags & NLM_F_CAPPED))
- hlen += mnl_nlmsg_get_payload_len(&err->msg);
+ hlen += ynl_nlmsg_data_len(&err->msg);
ynl_ext_ack_check(yarg->ys, nlh, hlen);
- return code ? MNL_CB_ERROR : MNL_CB_STOP;
+ return code ? YNL_PARSE_CB_ERROR : YNL_PARSE_CB_STOP;
}
-static int ynl_cb_done(const struct nlmsghdr *nlh, void *data)
+static int ynl_cb_done(const struct nlmsghdr *nlh, struct ynl_parse_arg *yarg)
{
- struct ynl_parse_arg *yarg = data;
int err;
err = *(int *)NLMSG_DATA(nlh);
@@ -289,23 +287,11 @@ static int ynl_cb_done(const struct nlmsghdr *nlh, void *data)
ynl_ext_ack_check(yarg->ys, nlh, sizeof(int));
- return MNL_CB_ERROR;
+ return YNL_PARSE_CB_ERROR;
}
- return MNL_CB_STOP;
-}
-
-static int ynl_cb_noop(const struct nlmsghdr *nlh, void *data)
-{
- return MNL_CB_OK;
+ return YNL_PARSE_CB_STOP;
}
-mnl_cb_t ynl_cb_array[NLMSG_MIN_TYPE] = {
- [NLMSG_NOOP] = ynl_cb_noop,
- [NLMSG_ERROR] = ynl_cb_error,
- [NLMSG_DONE] = ynl_cb_done,
- [NLMSG_OVERRUN] = ynl_cb_noop,
-};
-
/* Attribute validation */
int ynl_attr_validate(struct ynl_parse_arg *yarg, const struct nlattr *attr)
@@ -314,9 +300,9 @@ int ynl_attr_validate(struct ynl_parse_arg *yarg, const struct nlattr *attr)
unsigned int type, len;
unsigned char *data;
- data = mnl_attr_get_payload(attr);
- len = mnl_attr_get_payload_len(attr);
- type = mnl_attr_get_type(attr);
+ data = ynl_attr_data(attr);
+ len = ynl_attr_data_len(attr);
+ type = ynl_attr_type(attr);
if (type > yarg->rsp_policy->max_attr) {
yerr(yarg->ys, YNL_ERROR_INTERNAL,
"Internal error, validating unknown attribute");
@@ -413,14 +399,38 @@ struct nlmsghdr *ynl_msg_start(struct ynl_sock *ys, __u32 id, __u16 flags)
ynl_err_reset(ys);
- nlh = ys->nlh = mnl_nlmsg_put_header(ys->tx_buf);
+ nlh = ys->nlh = ynl_nlmsg_put_header(ys->tx_buf);
nlh->nlmsg_type = id;
nlh->nlmsg_flags = flags;
nlh->nlmsg_seq = ++ys->seq;
+ /* This is a local YNL hack for length checking, we put the buffer
+ * length in nlmsg_pid, since messages sent to the kernel always use
+ * PID 0. Message needs to be terminated with ynl_msg_end().
+ */
+ nlh->nlmsg_pid = YNL_SOCKET_BUFFER_SIZE;
+
return nlh;
}
+static int ynl_msg_end(struct ynl_sock *ys, struct nlmsghdr *nlh)
+{
+ /* We stash buffer length in nlmsg_pid. */
+ if (nlh->nlmsg_pid == 0) {
+ yerr(ys, YNL_ERROR_INPUT_INVALID,
+ "Unknown input buffer length");
+ return -EINVAL;
+ }
+ if (nlh->nlmsg_pid == YNL_MSG_OVERFLOW) {
+ yerr(ys, YNL_ERROR_INPUT_TOO_BIG,
+ "Constructed message longer than internal buffer");
+ return -EMSGSIZE;
+ }
+
+ nlh->nlmsg_pid = 0;
+ return 0;
+}
+
struct nlmsghdr *
ynl_gemsg_start(struct ynl_sock *ys, __u32 id, __u16 flags,
__u8 cmd, __u8 version)
@@ -435,7 +445,7 @@ ynl_gemsg_start(struct ynl_sock *ys, __u32 id, __u16 flags,
gehdr.cmd = cmd;
gehdr.version = version;
- data = mnl_nlmsg_put_extra_header(nlh, sizeof(gehdr));
+ data = ynl_nlmsg_put_extra_header(nlh, sizeof(gehdr));
memcpy(data, &gehdr, sizeof(gehdr));
return nlh;
@@ -464,33 +474,85 @@ ynl_gemsg_start_dump(struct ynl_sock *ys, __u32 id, __u8 cmd, __u8 version)
cmd, version);
}
-int ynl_recv_ack(struct ynl_sock *ys, int ret)
+static int ynl_cb_null(const struct nlmsghdr *nlh, struct ynl_parse_arg *yarg)
{
- struct ynl_parse_arg yarg = { .ys = ys, };
+ yerr(yarg->ys, YNL_ERROR_UNEXPECT_MSG,
+ "Received a message when none were expected");
- if (!ret) {
- yerr(ys, YNL_ERROR_EXPECT_ACK,
- "Expecting an ACK but nothing received");
- return -1;
+ return YNL_PARSE_CB_ERROR;
+}
+
+static int
+__ynl_sock_read_msgs(struct ynl_parse_arg *yarg, ynl_parse_cb_t cb, int flags)
+{
+ struct ynl_sock *ys = yarg->ys;
+ const struct nlmsghdr *nlh;
+ ssize_t len, rem;
+ int ret;
+
+ len = recv(ys->socket, ys->rx_buf, YNL_SOCKET_BUFFER_SIZE, flags);
+ if (len < 0) {
+ if (flags & MSG_DONTWAIT && errno == EAGAIN)
+ return YNL_PARSE_CB_STOP;
+ return len;
}
- ret = mnl_socket_recvfrom(ys->sock, ys->rx_buf, MNL_SOCKET_BUFFER_SIZE);
- if (ret < 0) {
- perr(ys, "Socket receive failed");
- return ret;
+ ret = YNL_PARSE_CB_STOP;
+ for (rem = len; rem > 0; NLMSG_NEXT(nlh, rem)) {
+ nlh = (struct nlmsghdr *)&ys->rx_buf[len - rem];
+ if (!NLMSG_OK(nlh, rem)) {
+ yerr(yarg->ys, YNL_ERROR_INV_RESP,
+ "Invalid message or trailing data in the response.");
+ return YNL_PARSE_CB_ERROR;
+ }
+
+ if (nlh->nlmsg_flags & NLM_F_DUMP_INTR) {
+ /* TODO: handle this better */
+ yerr(yarg->ys, YNL_ERROR_DUMP_INTER,
+ "Dump interrupted / inconsistent, please retry.");
+ return YNL_PARSE_CB_ERROR;
+ }
+
+ switch (nlh->nlmsg_type) {
+ case 0:
+ yerr(yarg->ys, YNL_ERROR_INV_RESP,
+ "Invalid message type in the response.");
+ return YNL_PARSE_CB_ERROR;
+ case NLMSG_NOOP:
+ case NLMSG_OVERRUN ... NLMSG_MIN_TYPE - 1:
+ ret = YNL_PARSE_CB_OK;
+ break;
+ case NLMSG_ERROR:
+ ret = ynl_cb_error(nlh, yarg);
+ break;
+ case NLMSG_DONE:
+ ret = ynl_cb_done(nlh, yarg);
+ break;
+ default:
+ ret = cb(nlh, yarg);
+ break;
+ }
}
- return mnl_cb_run(ys->rx_buf, ret, ys->seq, ys->portid,
- ynl_cb_null, &yarg);
+
+ return ret;
}
-int ynl_cb_null(const struct nlmsghdr *nlh, void *data)
+static int ynl_sock_read_msgs(struct ynl_parse_arg *yarg, ynl_parse_cb_t cb)
{
- struct ynl_parse_arg *yarg = data;
+ return __ynl_sock_read_msgs(yarg, cb, 0);
+}
- yerr(yarg->ys, YNL_ERROR_UNEXPECT_MSG,
- "Received a message when none were expected");
+static int ynl_recv_ack(struct ynl_sock *ys, int ret)
+{
+ struct ynl_parse_arg yarg = { .ys = ys, };
- return MNL_CB_ERROR;
+ if (!ret) {
+ yerr(ys, YNL_ERROR_EXPECT_ACK,
+ "Expecting an ACK but nothing received");
+ return -1;
+ }
+
+ return ynl_sock_read_msgs(&yarg, ynl_cb_null);
}
/* Init/fini and genetlink boiler plate */
@@ -500,7 +562,7 @@ ynl_get_family_info_mcast(struct ynl_sock *ys, const struct nlattr *mcasts)
const struct nlattr *entry, *attr;
unsigned int i;
- mnl_attr_for_each_nested(attr, mcasts)
+ ynl_attr_for_each_nested(attr, mcasts)
ys->n_mcast_groups++;
if (!ys->n_mcast_groups)
@@ -509,16 +571,16 @@ ynl_get_family_info_mcast(struct ynl_sock *ys, const struct nlattr *mcasts)
ys->mcast_groups = calloc(ys->n_mcast_groups,
sizeof(*ys->mcast_groups));
if (!ys->mcast_groups)
- return MNL_CB_ERROR;
+ return YNL_PARSE_CB_ERROR;
i = 0;
- mnl_attr_for_each_nested(entry, mcasts) {
- mnl_attr_for_each_nested(attr, entry) {
- if (mnl_attr_get_type(attr) == CTRL_ATTR_MCAST_GRP_ID)
- ys->mcast_groups[i].id = mnl_attr_get_u32(attr);
- if (mnl_attr_get_type(attr) == CTRL_ATTR_MCAST_GRP_NAME) {
+ ynl_attr_for_each_nested(entry, mcasts) {
+ ynl_attr_for_each_nested(attr, entry) {
+ if (ynl_attr_type(attr) == CTRL_ATTR_MCAST_GRP_ID)
+ ys->mcast_groups[i].id = ynl_attr_get_u32(attr);
+ if (ynl_attr_type(attr) == CTRL_ATTR_MCAST_GRP_NAME) {
strncpy(ys->mcast_groups[i].name,
- mnl_attr_get_str(attr),
+ ynl_attr_get_str(attr),
GENL_NAMSIZ - 1);
ys->mcast_groups[i].name[GENL_NAMSIZ - 1] = 0;
}
@@ -529,35 +591,35 @@ ynl_get_family_info_mcast(struct ynl_sock *ys, const struct nlattr *mcasts)
return 0;
}
-static int ynl_get_family_info_cb(const struct nlmsghdr *nlh, void *data)
+static int
+ynl_get_family_info_cb(const struct nlmsghdr *nlh, struct ynl_parse_arg *yarg)
{
- struct ynl_parse_arg *yarg = data;
struct ynl_sock *ys = yarg->ys;
const struct nlattr *attr;
bool found_id = true;
- mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
- if (mnl_attr_get_type(attr) == CTRL_ATTR_MCAST_GROUPS)
+ ynl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ if (ynl_attr_type(attr) == CTRL_ATTR_MCAST_GROUPS)
if (ynl_get_family_info_mcast(ys, attr))
- return MNL_CB_ERROR;
+ return YNL_PARSE_CB_ERROR;
- if (mnl_attr_get_type(attr) != CTRL_ATTR_FAMILY_ID)
+ if (ynl_attr_type(attr) != CTRL_ATTR_FAMILY_ID)
continue;
- if (mnl_attr_get_payload_len(attr) != sizeof(__u16)) {
+ if (ynl_attr_data_len(attr) != sizeof(__u16)) {
yerr(ys, YNL_ERROR_ATTR_INVALID, "Invalid family ID");
- return MNL_CB_ERROR;
+ return YNL_PARSE_CB_ERROR;
}
- ys->family_id = mnl_attr_get_u16(attr);
+ ys->family_id = ynl_attr_get_u16(attr);
found_id = true;
}
if (!found_id) {
yerr(ys, YNL_ERROR_ATTR_MISSING, "Family ID missing");
- return MNL_CB_ERROR;
+ return YNL_PARSE_CB_ERROR;
}
- return MNL_CB_OK;
+ return YNL_PARSE_CB_OK;
}
static int ynl_sock_read_family(struct ynl_sock *ys, const char *family_name)
@@ -567,22 +629,19 @@ static int ynl_sock_read_family(struct ynl_sock *ys, const char *family_name)
int err;
nlh = ynl_gemsg_start_req(ys, GENL_ID_CTRL, CTRL_CMD_GETFAMILY, 1);
- mnl_attr_put_strz(nlh, CTRL_ATTR_FAMILY_NAME, family_name);
+ ynl_attr_put_str(nlh, CTRL_ATTR_FAMILY_NAME, family_name);
+
+ err = ynl_msg_end(ys, nlh);
+ if (err < 0)
+ return err;
- err = mnl_socket_sendto(ys->sock, nlh, nlh->nlmsg_len);
+ err = send(ys->socket, nlh, nlh->nlmsg_len, 0);
if (err < 0) {
perr(ys, "failed to request socket family info");
return err;
}
- err = mnl_socket_recvfrom(ys->sock, ys->rx_buf, MNL_SOCKET_BUFFER_SIZE);
- if (err <= 0) {
- perr(ys, "failed to receive the socket family info");
- return err;
- }
- err = mnl_cb_run2(ys->rx_buf, err, ys->seq, ys->portid,
- ynl_get_family_info_cb, &yarg,
- ynl_cb_array, ARRAY_SIZE(ynl_cb_array));
+ err = ynl_sock_read_msgs(&yarg, ynl_get_family_info_cb);
if (err < 0) {
free(ys->mcast_groups);
perr(ys, "failed to receive the socket family info - no such family?");
@@ -601,38 +660,54 @@ static int ynl_sock_read_family(struct ynl_sock *ys, const char *family_name)
struct ynl_sock *
ynl_sock_create(const struct ynl_family *yf, struct ynl_error *yse)
{
+ struct sockaddr_nl addr;
struct ynl_sock *ys;
+ socklen_t addrlen;
int one = 1;
- ys = malloc(sizeof(*ys) + 2 * MNL_SOCKET_BUFFER_SIZE);
+ ys = malloc(sizeof(*ys) + 2 * YNL_SOCKET_BUFFER_SIZE);
if (!ys)
return NULL;
memset(ys, 0, sizeof(*ys));
ys->family = yf;
ys->tx_buf = &ys->raw_buf[0];
- ys->rx_buf = &ys->raw_buf[MNL_SOCKET_BUFFER_SIZE];
+ ys->rx_buf = &ys->raw_buf[YNL_SOCKET_BUFFER_SIZE];
ys->ntf_last_next = &ys->ntf_first;
- ys->sock = mnl_socket_open(NETLINK_GENERIC);
- if (!ys->sock) {
+ ys->socket = socket(AF_NETLINK, SOCK_RAW, NETLINK_GENERIC);
+ if (ys->socket < 0) {
__perr(yse, "failed to create a netlink socket");
goto err_free_sock;
}
- if (mnl_socket_setsockopt(ys->sock, NETLINK_CAP_ACK,
- &one, sizeof(one))) {
+ if (setsockopt(ys->socket, SOL_NETLINK, NETLINK_CAP_ACK,
+ &one, sizeof(one))) {
__perr(yse, "failed to enable netlink ACK");
goto err_close_sock;
}
- if (mnl_socket_setsockopt(ys->sock, NETLINK_EXT_ACK,
- &one, sizeof(one))) {
+ if (setsockopt(ys->socket, SOL_NETLINK, NETLINK_EXT_ACK,
+ &one, sizeof(one))) {
__perr(yse, "failed to enable netlink ext ACK");
goto err_close_sock;
}
+ memset(&addr, 0, sizeof(addr));
+ addr.nl_family = AF_NETLINK;
+ if (bind(ys->socket, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
+ __perr(yse, "unable to bind to a socket address");
+ goto err_close_sock;;
+ }
+
+ memset(&addr, 0, sizeof(addr));
+ addrlen = sizeof(addr);
+ if (getsockname(ys->socket, (struct sockaddr *)&addr, &addrlen) < 0) {
+ __perr(yse, "unable to read socket address");
+ goto err_close_sock;;
+ }
+ ys->portid = addr.nl_pid;
ys->seq = random();
- ys->portid = mnl_socket_get_portid(ys->sock);
+
if (ynl_sock_read_family(ys, yf->name)) {
if (yse)
@@ -643,7 +718,7 @@ ynl_sock_create(const struct ynl_family *yf, struct ynl_error *yse)
return ys;
err_close_sock:
- mnl_socket_close(ys->sock);
+ close(ys->socket);
err_free_sock:
free(ys);
return NULL;
@@ -653,7 +728,7 @@ void ynl_sock_destroy(struct ynl_sock *ys)
{
struct ynl_ntf_base_type *ntf;
- mnl_socket_close(ys->sock);
+ close(ys->socket);
while ((ntf = ynl_ntf_dequeue(ys)))
ynl_ntf_free(ntf);
free(ys->mcast_groups);
@@ -680,9 +755,9 @@ int ynl_subscribe(struct ynl_sock *ys, const char *grp_name)
return -1;
}
- err = mnl_socket_setsockopt(ys->sock, NETLINK_ADD_MEMBERSHIP,
- &ys->mcast_groups[i].id,
- sizeof(ys->mcast_groups[i].id));
+ err = setsockopt(ys->socket, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP,
+ &ys->mcast_groups[i].id,
+ sizeof(ys->mcast_groups[i].id));
if (err < 0) {
perr(ys, "Subscribing to multicast group failed");
return -1;
@@ -693,7 +768,7 @@ int ynl_subscribe(struct ynl_sock *ys, const char *grp_name)
int ynl_socket_get_fd(struct ynl_sock *ys)
{
- return mnl_socket_get_fd(ys->sock);
+ return ys->socket;
}
struct ynl_ntf_base_type *ynl_ntf_dequeue(struct ynl_sock *ys)
@@ -719,12 +794,12 @@ static int ynl_ntf_parse(struct ynl_sock *ys, const struct nlmsghdr *nlh)
struct genlmsghdr *gehdr;
int ret;
- gehdr = mnl_nlmsg_get_payload(nlh);
+ gehdr = ynl_nlmsg_data(nlh);
if (gehdr->cmd >= ys->family->ntf_info_size)
- return MNL_CB_ERROR;
+ return YNL_PARSE_CB_ERROR;
info = &ys->family->ntf_info[gehdr->cmd];
if (!info->cb)
- return MNL_CB_ERROR;
+ return YNL_PARSE_CB_ERROR;
rsp = calloc(1, info->alloc_sz);
rsp->free = info->free;
@@ -732,7 +807,7 @@ static int ynl_ntf_parse(struct ynl_sock *ys, const struct nlmsghdr *nlh)
yarg.rsp_policy = info->policy;
ret = info->cb(nlh, &yarg);
- if (ret <= MNL_CB_STOP)
+ if (ret <= YNL_PARSE_CB_STOP)
goto err_free;
rsp->family = nlh->nlmsg_type;
@@ -741,46 +816,27 @@ static int ynl_ntf_parse(struct ynl_sock *ys, const struct nlmsghdr *nlh)
*ys->ntf_last_next = rsp;
ys->ntf_last_next = &rsp->next;
- return MNL_CB_OK;
+ return YNL_PARSE_CB_OK;
err_free:
info->free(rsp);
- return MNL_CB_ERROR;
+ return YNL_PARSE_CB_ERROR;
}
-static int ynl_ntf_trampoline(const struct nlmsghdr *nlh, void *data)
+static int
+ynl_ntf_trampoline(const struct nlmsghdr *nlh, struct ynl_parse_arg *yarg)
{
- struct ynl_parse_arg *yarg = data;
-
return ynl_ntf_parse(yarg->ys, nlh);
}
int ynl_ntf_check(struct ynl_sock *ys)
{
struct ynl_parse_arg yarg = { .ys = ys, };
- ssize_t len;
int err;
do {
- /* libmnl doesn't let us pass flags to the recv to make
- * it non-blocking so we need to poll() or peek() :|
- */
- struct pollfd pfd = { };
-
- pfd.fd = mnl_socket_get_fd(ys->sock);
- pfd.events = POLLIN;
- err = poll(&pfd, 1, 1);
- if (err < 1)
- return err;
-
- len = mnl_socket_recvfrom(ys->sock, ys->rx_buf,
- MNL_SOCKET_BUFFER_SIZE);
- if (len < 0)
- return len;
-
- err = mnl_cb_run2(ys->rx_buf, len, ys->seq, ys->portid,
- ynl_ntf_trampoline, &yarg,
- ynl_cb_array, NLMSG_MIN_TYPE);
+ err = __ynl_sock_read_msgs(&yarg, ynl_ntf_trampoline,
+ MSG_DONTWAIT);
if (err < 0)
return err;
} while (err > 0);
@@ -801,7 +857,7 @@ void ynl_error_unknown_notification(struct ynl_sock *ys, __u8 cmd)
int ynl_error_parse(struct ynl_parse_arg *yarg, const char *msg)
{
yerr(yarg->ys, YNL_ERROR_INV_RESP, "Error parsing response: %s", msg);
- return MNL_CB_ERROR;
+ return YNL_PARSE_CB_ERROR;
}
static int
@@ -809,27 +865,28 @@ ynl_check_alien(struct ynl_sock *ys, const struct nlmsghdr *nlh, __u32 rsp_cmd)
{
struct genlmsghdr *gehdr;
- if (mnl_nlmsg_get_payload_len(nlh) < sizeof(*gehdr)) {
+ if (ynl_nlmsg_data_len(nlh) < sizeof(*gehdr)) {
yerr(ys, YNL_ERROR_INV_RESP,
"Kernel responded with truncated message");
return -1;
}
- gehdr = mnl_nlmsg_get_payload(nlh);
+ gehdr = ynl_nlmsg_data(nlh);
if (gehdr->cmd != rsp_cmd)
return ynl_ntf_parse(ys, nlh);
return 0;
}
-static int ynl_req_trampoline(const struct nlmsghdr *nlh, void *data)
+static
+int ynl_req_trampoline(const struct nlmsghdr *nlh, struct ynl_parse_arg *yarg)
{
- struct ynl_req_state *yrs = data;
+ struct ynl_req_state *yrs = (void *)yarg;
int ret;
ret = ynl_check_alien(yrs->yarg.ys, nlh, yrs->rsp_cmd);
if (ret)
- return ret < 0 ? MNL_CB_ERROR : MNL_CB_OK;
+ return ret < 0 ? YNL_PARSE_CB_ERROR : YNL_PARSE_CB_OK;
return yrs->cb(nlh, &yrs->yarg);
}
@@ -837,43 +894,38 @@ static int ynl_req_trampoline(const struct nlmsghdr *nlh, void *data)
int ynl_exec(struct ynl_sock *ys, struct nlmsghdr *req_nlh,
struct ynl_req_state *yrs)
{
- ssize_t len;
int err;
- err = mnl_socket_sendto(ys->sock, req_nlh, req_nlh->nlmsg_len);
+ err = ynl_msg_end(ys, req_nlh);
+ if (err < 0)
+ return err;
+
+ err = send(ys->socket, req_nlh, req_nlh->nlmsg_len, 0);
if (err < 0)
return err;
do {
- len = mnl_socket_recvfrom(ys->sock, ys->rx_buf,
- MNL_SOCKET_BUFFER_SIZE);
- if (len < 0)
- return len;
-
- err = mnl_cb_run2(ys->rx_buf, len, ys->seq, ys->portid,
- ynl_req_trampoline, yrs,
- ynl_cb_array, NLMSG_MIN_TYPE);
- if (err < 0)
- return err;
+ err = ynl_sock_read_msgs(&yrs->yarg, ynl_req_trampoline);
} while (err > 0);
- return 0;
+ return err;
}
-static int ynl_dump_trampoline(const struct nlmsghdr *nlh, void *data)
+static int
+ynl_dump_trampoline(const struct nlmsghdr *nlh, struct ynl_parse_arg *data)
{
- struct ynl_dump_state *ds = data;
+ struct ynl_dump_state *ds = (void *)data;
struct ynl_dump_list_type *obj;
struct ynl_parse_arg yarg = {};
int ret;
- ret = ynl_check_alien(ds->ys, nlh, ds->rsp_cmd);
+ ret = ynl_check_alien(ds->yarg.ys, nlh, ds->rsp_cmd);
if (ret)
- return ret < 0 ? MNL_CB_ERROR : MNL_CB_OK;
+ return ret < 0 ? YNL_PARSE_CB_ERROR : YNL_PARSE_CB_OK;
obj = calloc(1, ds->alloc_sz);
if (!obj)
- return MNL_CB_ERROR;
+ return YNL_PARSE_CB_ERROR;
if (!ds->first)
ds->first = obj;
@@ -881,8 +933,7 @@ static int ynl_dump_trampoline(const struct nlmsghdr *nlh, void *data)
ds->last->next = obj;
ds->last = obj;
- yarg.ys = ds->ys;
- yarg.rsp_policy = ds->rsp_policy;
+ yarg = ds->yarg;
yarg.data = &obj->data;
return ds->cb(nlh, &yarg);
@@ -900,22 +951,18 @@ static void *ynl_dump_end(struct ynl_dump_state *ds)
int ynl_exec_dump(struct ynl_sock *ys, struct nlmsghdr *req_nlh,
struct ynl_dump_state *yds)
{
- ssize_t len;
int err;
- err = mnl_socket_sendto(ys->sock, req_nlh, req_nlh->nlmsg_len);
+ err = ynl_msg_end(ys, req_nlh);
if (err < 0)
return err;
- do {
- len = mnl_socket_recvfrom(ys->sock, ys->rx_buf,
- MNL_SOCKET_BUFFER_SIZE);
- if (len < 0)
- goto err_close_list;
+ err = send(ys->socket, req_nlh, req_nlh->nlmsg_len, 0);
+ if (err < 0)
+ return err;
- err = mnl_cb_run2(ys->rx_buf, len, ys->seq, ys->portid,
- ynl_dump_trampoline, yds,
- ynl_cb_array, NLMSG_MIN_TYPE);
+ do {
+ err = ynl_sock_read_msgs(&yds->yarg, ynl_dump_trampoline);
if (err < 0)
goto err_close_list;
} while (err > 0);
diff --git a/tools/net/ynl/lib/ynl.h b/tools/net/ynl/lib/ynl.h
index ce77a6d76ce0..9842e85a8c57 100644
--- a/tools/net/ynl/lib/ynl.h
+++ b/tools/net/ynl/lib/ynl.h
@@ -12,6 +12,7 @@ enum ynl_error_code {
YNL_ERROR_NONE = 0,
__YNL_ERRNO_END = 4096,
YNL_ERROR_INTERNAL,
+ YNL_ERROR_DUMP_INTER,
YNL_ERROR_EXPECT_ACK,
YNL_ERROR_EXPECT_MSG,
YNL_ERROR_UNEXPECT_MSG,
@@ -19,6 +20,8 @@ enum ynl_error_code {
YNL_ERROR_ATTR_INVALID,
YNL_ERROR_UNKNOWN_NTF,
YNL_ERROR_INV_RESP,
+ YNL_ERROR_INPUT_INVALID,
+ YNL_ERROR_INPUT_TOO_BIG,
};
/**
@@ -58,7 +61,7 @@ struct ynl_sock {
/* private: */
const struct ynl_family *family;
- struct mnl_socket *sock;
+ int socket;
__u32 seq;
__u32 portid;
__u16 family_id;
diff --git a/tools/net/ynl/lib/ynl.py b/tools/net/ynl/lib/ynl.py
index 1e10512b2117..5fa7957f6e0f 100644
--- a/tools/net/ynl/lib/ynl.py
+++ b/tools/net/ynl/lib/ynl.py
@@ -7,6 +7,7 @@ import random
import socket
import struct
from struct import Struct
+import sys
import yaml
import ipaddress
import uuid
@@ -84,6 +85,10 @@ class NlError(Exception):
return f"Netlink error: {os.strerror(-self.nl_msg.error)}\n{self.nl_msg}"
+class ConfigError(Exception):
+ pass
+
+
class NlAttr:
ScalarFormat = namedtuple('ScalarFormat', ['native', 'big', 'little'])
type_formats = {
@@ -113,20 +118,6 @@ class NlAttr:
else format.little
return format.native
- @classmethod
- def formatted_string(cls, raw, display_hint):
- if display_hint == 'mac':
- formatted = ':'.join('%02x' % b for b in raw)
- elif display_hint == 'hex':
- formatted = bytes.hex(raw, ' ')
- elif display_hint in [ 'ipv4', 'ipv6' ]:
- formatted = format(ipaddress.ip_address(raw))
- elif display_hint == 'uuid':
- formatted = str(uuid.UUID(bytes=raw))
- else:
- formatted = raw
- return formatted
-
def as_scalar(self, attr_type, byte_order=None):
format = self.get_format(attr_type, byte_order)
return format.unpack(self.raw)[0]
@@ -148,23 +139,6 @@ class NlAttr:
format = self.get_format(type)
return [ x[0] for x in format.iter_unpack(self.raw) ]
- def as_struct(self, members):
- value = dict()
- offset = 0
- for m in members:
- # TODO: handle non-scalar members
- if m.type == 'binary':
- decoded = self.raw[offset : offset + m['len']]
- offset += m['len']
- elif m.type in NlAttr.type_formats:
- format = self.get_format(m.type, m.byte_order)
- [ decoded ] = format.unpack_from(self.raw, offset)
- offset += format.size
- if m.display_hint:
- decoded = self.formatted_string(decoded, m.display_hint)
- value[m.name] = decoded
- return value
-
def __repr__(self):
return f"[type:{self.type} len:{self._len}] {self.raw}"
@@ -244,11 +218,11 @@ class NlMsg:
return self.nl_type
def __repr__(self):
- msg = f"nl_len = {self.nl_len} ({len(self.raw)}) nl_flags = 0x{self.nl_flags:x} nl_type = {self.nl_type}\n"
+ msg = f"nl_len = {self.nl_len} ({len(self.raw)}) nl_flags = 0x{self.nl_flags:x} nl_type = {self.nl_type}"
if self.error:
- msg += '\terror: ' + str(self.error)
+ msg += '\n\terror: ' + str(self.error)
if self.extack:
- msg += '\textack: ' + repr(self.extack)
+ msg += '\n\textack: ' + repr(self.extack)
return msg
@@ -370,7 +344,7 @@ class NetlinkProtocol:
fixed_header_size = 0
if ynl:
op = ynl.rsp_by_value[msg.cmd()]
- fixed_header_size = ynl._fixed_header_size(op.fixed_header)
+ fixed_header_size = ynl._struct_size(op.fixed_header)
msg.raw_attrs = NlAttrs(msg.raw, fixed_header_size)
return msg
@@ -379,6 +353,9 @@ class NetlinkProtocol:
raise Exception(f'Multicast group "{mcast_name}" not present in the spec')
return mcast_groups[mcast_name].value
+ def msghdr_size(self):
+ return 16
+
class GenlProtocol(NetlinkProtocol):
def __init__(self, family_name):
@@ -404,6 +381,28 @@ class GenlProtocol(NetlinkProtocol):
raise Exception(f'Multicast group "{mcast_name}" not present in the family')
return self.genl_family['mcast'][mcast_name]
+ def msghdr_size(self):
+ return super().msghdr_size() + 4
+
+
+class SpaceAttrs:
+ SpecValuesPair = namedtuple('SpecValuesPair', ['spec', 'values'])
+
+ def __init__(self, attr_space, attrs, outer = None):
+ outer_scopes = outer.scopes if outer else []
+ inner_scope = self.SpecValuesPair(attr_space, attrs)
+ self.scopes = [inner_scope] + outer_scopes
+
+ def lookup(self, name):
+ for scope in self.scopes:
+ if name in scope.spec:
+ if name in scope.values:
+ return scope.values[name]
+ spec_name = scope.spec.yaml['name']
+ raise Exception(
+ f"No value for '{name}' in attribute space '{spec_name}'")
+ raise Exception(f"Attribute '{name}' not defined in any attribute-set")
+
#
# YNL implementation details.
@@ -411,7 +410,8 @@ class GenlProtocol(NetlinkProtocol):
class YnlFamily(SpecFamily):
- def __init__(self, def_path, schema=None, process_unknown=False):
+ def __init__(self, def_path, schema=None, process_unknown=False,
+ recv_size=0):
super().__init__(def_path, schema)
self.include_raw = False
@@ -426,6 +426,17 @@ class YnlFamily(SpecFamily):
except KeyError:
raise Exception(f"Family '{self.yaml['name']}' not supported by the kernel")
+ self._recv_dbg = False
+ # Note that netlink will use conservative (min) message size for
+ # the first dump recv() on the socket, our setting will only matter
+ # from the second recv() on.
+ self._recv_size = recv_size if recv_size else 131072
+ # Netlink will always allocate at least PAGE_SIZE - sizeof(skb_shinfo)
+ # for a message, so smaller receive sizes will lead to truncation.
+ # Note that the min size for other families may be larger than 4k!
+ if self._recv_size < 4000:
+ raise ConfigError()
+
self.sock = socket.socket(socket.AF_NETLINK, socket.SOCK_RAW, self.nlproto.proto_num)
self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_CAP_ACK, 1)
self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_EXT_ACK, 1)
@@ -449,18 +460,61 @@ class YnlFamily(SpecFamily):
self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_ADD_MEMBERSHIP,
mcast_id)
- def _add_attr(self, space, name, value):
+ def set_recv_dbg(self, enabled):
+ self._recv_dbg = enabled
+
+ def _recv_dbg_print(self, reply, nl_msgs):
+ if not self._recv_dbg:
+ return
+ print("Recv: read", len(reply), "bytes,",
+ len(nl_msgs.msgs), "messages", file=sys.stderr)
+ for nl_msg in nl_msgs:
+ print(" ", nl_msg, file=sys.stderr)
+
+ def _encode_enum(self, attr_spec, value):
+ enum = self.consts[attr_spec['enum']]
+ if enum.type == 'flags' or attr_spec.get('enum-as-flags', False):
+ scalar = 0
+ if isinstance(value, str):
+ value = [value]
+ for single_value in value:
+ scalar += enum.entries[single_value].user_value(as_flags = True)
+ return scalar
+ else:
+ return enum.entries[value].user_value()
+
+ def _get_scalar(self, attr_spec, value):
+ try:
+ return int(value)
+ except (ValueError, TypeError) as e:
+ if 'enum' not in attr_spec:
+ raise e
+ return self._encode_enum(attr_spec, value)
+
+ def _add_attr(self, space, name, value, search_attrs):
try:
attr = self.attr_sets[space][name]
except KeyError:
raise Exception(f"Space '{space}' has no attribute '{name}'")
nl_type = attr.value
+
+ if attr.is_multi and isinstance(value, list):
+ attr_payload = b''
+ for subvalue in value:
+ attr_payload += self._add_attr(space, name, subvalue, search_attrs)
+ return attr_payload
+
if attr["type"] == 'nest':
nl_type |= Netlink.NLA_F_NESTED
attr_payload = b''
+ sub_attrs = SpaceAttrs(self.attr_sets[space], value, search_attrs)
for subname, subvalue in value.items():
- attr_payload += self._add_attr(attr['nested-attributes'], subname, subvalue)
+ attr_payload += self._add_attr(attr['nested-attributes'],
+ subname, subvalue, sub_attrs)
elif attr["type"] == 'flag':
+ if not value:
+ # If value is absent or false then skip attribute creation.
+ return b''
attr_payload = b''
elif attr["type"] == 'string':
attr_payload = str(value).encode('ascii') + b'\x00'
@@ -469,18 +523,36 @@ class YnlFamily(SpecFamily):
attr_payload = value
elif isinstance(value, str):
attr_payload = bytes.fromhex(value)
+ elif isinstance(value, dict) and attr.struct_name:
+ attr_payload = self._encode_struct(attr.struct_name, value)
else:
raise Exception(f'Unknown type for binary attribute, value: {value}')
- elif attr.is_auto_scalar:
- scalar = int(value)
- real_type = attr["type"][0] + ('32' if scalar.bit_length() <= 32 else '64')
- format = NlAttr.get_format(real_type, attr.byte_order)
- attr_payload = format.pack(int(value))
- elif attr['type'] in NlAttr.type_formats:
- format = NlAttr.get_format(attr['type'], attr.byte_order)
- attr_payload = format.pack(int(value))
+ elif attr['type'] in NlAttr.type_formats or attr.is_auto_scalar:
+ scalar = self._get_scalar(attr, value)
+ if attr.is_auto_scalar:
+ attr_type = attr["type"][0] + ('32' if scalar.bit_length() <= 32 else '64')
+ else:
+ attr_type = attr["type"]
+ format = NlAttr.get_format(attr_type, attr.byte_order)
+ attr_payload = format.pack(scalar)
elif attr['type'] in "bitfield32":
- attr_payload = struct.pack("II", int(value["value"]), int(value["selector"]))
+ scalar_value = self._get_scalar(attr, value["value"])
+ scalar_selector = self._get_scalar(attr, value["selector"])
+ attr_payload = struct.pack("II", scalar_value, scalar_selector)
+ elif attr['type'] == 'sub-message':
+ msg_format = self._resolve_selector(attr, search_attrs)
+ attr_payload = b''
+ if msg_format.fixed_header:
+ attr_payload += self._encode_struct(msg_format.fixed_header, value)
+ if msg_format.attr_set:
+ if msg_format.attr_set in self.attr_sets:
+ nl_type |= Netlink.NLA_F_NESTED
+ sub_attrs = SpaceAttrs(msg_format.attr_set, value, search_attrs)
+ for subname, subvalue in value.items():
+ attr_payload += self._add_attr(msg_format.attr_set,
+ subname, subvalue, sub_attrs)
+ else:
+ raise Exception(f"Unknown attribute-set '{msg_format.attr_set}'")
else:
raise Exception(f'Unknown type at {space} {name} {value} {attr["type"]}')
@@ -503,17 +575,13 @@ class YnlFamily(SpecFamily):
def _decode_binary(self, attr, attr_spec):
if attr_spec.struct_name:
- members = self.consts[attr_spec.struct_name]
- decoded = attr.as_struct(members)
- for m in members:
- if m.enum:
- decoded[m.name] = self._decode_enum(decoded[m.name], m)
+ decoded = self._decode_struct(attr.raw, attr_spec.struct_name)
elif attr_spec.sub_type:
decoded = attr.as_c_array(attr_spec.sub_type)
else:
decoded = attr.as_bin()
if attr_spec.display_hint:
- decoded = NlAttr.formatted_string(decoded, attr_spec.display_hint)
+ decoded = self._formatted_string(decoded, attr_spec.display_hint)
return decoded
def _decode_array_nest(self, attr, attr_spec):
@@ -527,6 +595,16 @@ class YnlFamily(SpecFamily):
decoded.append({ item.type: subattrs })
return decoded
+ def _decode_nest_type_value(self, attr, attr_spec):
+ decoded = {}
+ value = attr
+ for name in attr_spec['type-value']:
+ value = NlAttr(value.raw, 0)
+ decoded[name] = value.type
+ subattrs = self._decode(NlAttrs(value.raw), attr_spec['nested-attributes'])
+ decoded.update(subattrs)
+ return decoded
+
def _decode_unknown(self, attr):
if attr.is_nest:
return self._decode(NlAttrs(attr.raw), None)
@@ -548,29 +626,27 @@ class YnlFamily(SpecFamily):
else:
rsp[name] = [decoded]
- def _resolve_selector(self, attr_spec, vals):
+ def _resolve_selector(self, attr_spec, search_attrs):
sub_msg = attr_spec.sub_message
if sub_msg not in self.sub_msgs:
raise Exception(f"No sub-message spec named {sub_msg} for {attr_spec.name}")
sub_msg_spec = self.sub_msgs[sub_msg]
selector = attr_spec.selector
- if selector not in vals:
- raise Exception(f"There is no value for {selector} to resolve '{attr_spec.name}'")
- value = vals[selector]
+ value = search_attrs.lookup(selector)
if value not in sub_msg_spec.formats:
raise Exception(f"No message format for '{value}' in sub-message spec '{sub_msg}'")
spec = sub_msg_spec.formats[value]
return spec
- def _decode_sub_msg(self, attr, attr_spec, rsp):
- msg_format = self._resolve_selector(attr_spec, rsp)
+ def _decode_sub_msg(self, attr, attr_spec, search_attrs):
+ msg_format = self._resolve_selector(attr_spec, search_attrs)
decoded = {}
offset = 0
if msg_format.fixed_header:
- decoded.update(self._decode_fixed_header(attr, msg_format.fixed_header));
- offset = self._fixed_header_size(msg_format.fixed_header)
+ decoded.update(self._decode_struct(attr.raw, msg_format.fixed_header));
+ offset = self._struct_size(msg_format.fixed_header)
if msg_format.attr_set:
if msg_format.attr_set in self.attr_sets:
subdict = self._decode(NlAttrs(attr.raw, offset), msg_format.attr_set)
@@ -579,10 +655,12 @@ class YnlFamily(SpecFamily):
raise Exception(f"Unknown attribute-set '{attr_space}' when decoding '{attr_spec.name}'")
return decoded
- def _decode(self, attrs, space):
+ def _decode(self, attrs, space, outer_attrs = None):
+ rsp = dict()
if space:
attr_space = self.attr_sets[space]
- rsp = dict()
+ search_attrs = SpaceAttrs(attr_space, rsp, outer_attrs)
+
for attr in attrs:
try:
attr_spec = attr_space.attrs_by_val[attr.type]
@@ -594,7 +672,7 @@ class YnlFamily(SpecFamily):
continue
if attr_spec["type"] == 'nest':
- subdict = self._decode(NlAttrs(attr.raw), attr_spec['nested-attributes'])
+ subdict = self._decode(NlAttrs(attr.raw), attr_spec['nested-attributes'], search_attrs)
decoded = subdict
elif attr_spec["type"] == 'string':
decoded = attr.as_strz()
@@ -617,7 +695,9 @@ class YnlFamily(SpecFamily):
selector = self._decode_enum(selector, attr_spec)
decoded = {"value": value, "selector": selector}
elif attr_spec["type"] == 'sub-message':
- decoded = self._decode_sub_msg(attr, attr_spec, rsp)
+ decoded = self._decode_sub_msg(attr, attr_spec, search_attrs)
+ elif attr_spec["type"] == 'nest-type-value':
+ decoded = self._decode_nest_type_value(attr, attr_spec)
else:
if not self.process_unknown:
raise Exception(f'Unknown {attr_spec["type"]} with name {attr_spec["name"]}')
@@ -658,20 +738,23 @@ class YnlFamily(SpecFamily):
return
msg = self.nlproto.decode(self, NlMsg(request, 0, op.attr_set))
- offset = 20 + self._fixed_header_size(op.fixed_header)
+ offset = self.nlproto.msghdr_size() + self._struct_size(op.fixed_header)
path = self._decode_extack_path(msg.raw_attrs, op.attr_set, offset,
extack['bad-attr-offs'])
if path:
del extack['bad-attr-offs']
extack['bad-attr'] = path
- def _fixed_header_size(self, name):
+ def _struct_size(self, name):
if name:
- fixed_header_members = self.consts[name].members
+ members = self.consts[name].members
size = 0
- for m in fixed_header_members:
+ for m in members:
if m.type in ['pad', 'binary']:
- size += m.len
+ if m.struct:
+ size += self._struct_size(m.struct)
+ else:
+ size += m.len
else:
format = NlAttr.get_format(m.type, m.byte_order)
size += format.size
@@ -679,26 +762,71 @@ class YnlFamily(SpecFamily):
else:
return 0
- def _decode_fixed_header(self, msg, name):
- fixed_header_members = self.consts[name].members
- fixed_header_attrs = dict()
+ def _decode_struct(self, data, name):
+ members = self.consts[name].members
+ attrs = dict()
offset = 0
- for m in fixed_header_members:
+ for m in members:
value = None
if m.type == 'pad':
offset += m.len
elif m.type == 'binary':
- value = msg.raw[offset : offset + m.len]
- offset += m.len
+ if m.struct:
+ len = self._struct_size(m.struct)
+ value = self._decode_struct(data[offset : offset + len],
+ m.struct)
+ offset += len
+ else:
+ value = data[offset : offset + m.len]
+ offset += m.len
else:
format = NlAttr.get_format(m.type, m.byte_order)
- [ value ] = format.unpack_from(msg.raw, offset)
+ [ value ] = format.unpack_from(data, offset)
offset += format.size
if value is not None:
if m.enum:
value = self._decode_enum(value, m)
- fixed_header_attrs[m.name] = value
- return fixed_header_attrs
+ elif m.display_hint:
+ value = self._formatted_string(value, m.display_hint)
+ attrs[m.name] = value
+ return attrs
+
+ def _encode_struct(self, name, vals):
+ members = self.consts[name].members
+ attr_payload = b''
+ for m in members:
+ value = vals.pop(m.name) if m.name in vals else None
+ if m.type == 'pad':
+ attr_payload += bytearray(m.len)
+ elif m.type == 'binary':
+ if m.struct:
+ if value is None:
+ value = dict()
+ attr_payload += self._encode_struct(m.struct, value)
+ else:
+ if value is None:
+ attr_payload += bytearray(m.len)
+ else:
+ attr_payload += bytes.fromhex(value)
+ else:
+ if value is None:
+ value = 0
+ format = NlAttr.get_format(m.type, m.byte_order)
+ attr_payload += format.pack(value)
+ return attr_payload
+
+ def _formatted_string(self, raw, display_hint):
+ if display_hint == 'mac':
+ formatted = ':'.join('%02x' % b for b in raw)
+ elif display_hint == 'hex':
+ formatted = bytes.hex(raw, ' ')
+ elif display_hint in [ 'ipv4', 'ipv6' ]:
+ formatted = format(ipaddress.ip_address(raw))
+ elif display_hint == 'uuid':
+ formatted = str(uuid.UUID(bytes=raw))
+ else:
+ formatted = raw
+ return formatted
def handle_ntf(self, decoded):
msg = dict()
@@ -707,7 +835,7 @@ class YnlFamily(SpecFamily):
op = self.rsp_by_value[decoded.cmd()]
attrs = self._decode(decoded.raw_attrs, op.attr_set.name)
if op.fixed_header:
- attrs.update(self._decode_fixed_header(decoded, op.fixed_header))
+ attrs.update(self._decode_struct(decoded.raw, op.fixed_header))
msg['name'] = op['name']
msg['msg'] = attrs
@@ -716,11 +844,12 @@ class YnlFamily(SpecFamily):
def check_ntf(self):
while True:
try:
- reply = self.sock.recv(128 * 1024, socket.MSG_DONTWAIT)
+ reply = self.sock.recv(self._recv_size, socket.MSG_DONTWAIT)
except BlockingIOError:
return
nms = NlMsgs(reply)
+ self._recv_dbg_print(reply, nms)
for nl_msg in nms:
if nl_msg.error:
print("Netlink error in ntf!?", os.strerror(-nl_msg.error))
@@ -759,20 +888,11 @@ class YnlFamily(SpecFamily):
req_seq = random.randint(1024, 65535)
msg = self.nlproto.message(nl_flags, op.req_value, 1, req_seq)
- fixed_header_members = []
if op.fixed_header:
- fixed_header_members = self.consts[op.fixed_header].members
- for m in fixed_header_members:
- value = vals.pop(m.name) if m.name in vals else 0
- if m.type == 'pad':
- msg += bytearray(m.len)
- elif m.type == 'binary':
- msg += bytes.fromhex(value)
- else:
- format = NlAttr.get_format(m.type, m.byte_order)
- msg += format.pack(value)
+ msg += self._encode_struct(op.fixed_header, vals)
+ search_attrs = SpaceAttrs(op.attr_set, vals)
for name, value in vals.items():
- msg += self._add_attr(op.attr_set.name, name, value)
+ msg += self._add_attr(op.attr_set.name, name, value, search_attrs)
msg = _genl_msg_finalize(msg)
self.sock.send(msg, 0)
@@ -780,8 +900,9 @@ class YnlFamily(SpecFamily):
done = False
rsp = []
while not done:
- reply = self.sock.recv(128 * 1024)
+ reply = self.sock.recv(self._recv_size)
nms = NlMsgs(reply, attr_space=op.attr_set)
+ self._recv_dbg_print(reply, nms)
for nl_msg in nms:
if nl_msg.extack:
self._decode_extack(msg, op, nl_msg.extack)
@@ -808,7 +929,7 @@ class YnlFamily(SpecFamily):
rsp_msg = self._decode(decoded.raw_attrs, op.attr_set.name)
if op.fixed_header:
- rsp_msg.update(self._decode_fixed_header(decoded, op.fixed_header))
+ rsp_msg.update(self._decode_struct(decoded.raw, op.fixed_header))
rsp.append(rsp_msg)
if not rsp:
diff --git a/tools/net/ynl/samples/.gitignore b/tools/net/ynl/samples/.gitignore
index 49637b26c482..dda6686257a7 100644
--- a/tools/net/ynl/samples/.gitignore
+++ b/tools/net/ynl/samples/.gitignore
@@ -1,4 +1,5 @@
ethtool
devlink
netdev
+ovs
page-pool \ No newline at end of file
diff --git a/tools/net/ynl/samples/Makefile b/tools/net/ynl/samples/Makefile
index 28bdb1557a54..e194a7565861 100644
--- a/tools/net/ynl/samples/Makefile
+++ b/tools/net/ynl/samples/Makefile
@@ -9,7 +9,7 @@ ifeq ("$(DEBUG)","1")
CFLAGS += -g -fsanitize=address -fsanitize=leak -static-libasan
endif
-LDLIBS=-lmnl ../lib/ynl.a ../generated/protos.a
+LDLIBS=../lib/ynl.a ../generated/protos.a
SRCS=$(wildcard *.c)
BINS=$(patsubst %.c,%,${SRCS})
@@ -28,8 +28,8 @@ $(BINS): ../lib/ynl.a ../generated/protos.a $(SRCS)
clean:
rm -f *.o *.d *~
-hardclean: clean
+distclean: clean
rm -f $(BINS)
-.PHONY: all clean
+.PHONY: all clean distclean
.DEFAULT_GOAL=all
diff --git a/tools/net/ynl/samples/ovs.c b/tools/net/ynl/samples/ovs.c
new file mode 100644
index 000000000000..3e975c003d77
--- /dev/null
+++ b/tools/net/ynl/samples/ovs.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <string.h>
+
+#include <ynl.h>
+
+#include "ovs_datapath-user.h"
+
+int main(int argc, char **argv)
+{
+ struct ynl_sock *ys;
+ int err;
+
+ ys = ynl_sock_create(&ynl_ovs_datapath_family, NULL);
+ if (!ys)
+ return 1;
+
+ if (argc > 1) {
+ struct ovs_datapath_new_req *req;
+
+ req = ovs_datapath_new_req_alloc();
+ if (!req)
+ goto err_close;
+
+ ovs_datapath_new_req_set_upcall_pid(req, 1);
+ ovs_datapath_new_req_set_name(req, argv[1]);
+
+ err = ovs_datapath_new(ys, req);
+ ovs_datapath_new_req_free(req);
+ if (err)
+ goto err_close;
+ } else {
+ struct ovs_datapath_get_req_dump *req;
+ struct ovs_datapath_get_list *dps;
+
+ printf("Dump:\n");
+ req = ovs_datapath_get_req_dump_alloc();
+
+ dps = ovs_datapath_get_dump(ys, req);
+ ovs_datapath_get_req_dump_free(req);
+ if (!dps)
+ goto err_close;
+
+ ynl_dump_foreach(dps, dp) {
+ printf(" %s(%d): pid:%u cache:%u\n",
+ dp->name, dp->_hdr.dp_ifindex,
+ dp->upcall_pid, dp->masks_cache_size);
+ }
+ ovs_datapath_get_list_free(dps);
+ }
+
+ ynl_sock_destroy(ys);
+
+ return 0;
+
+err_close:
+ fprintf(stderr, "YNL (%d): %s\n", ys->err.code, ys->err.msg);
+ ynl_sock_destroy(ys);
+ return 2;
+}
diff --git a/tools/net/ynl/samples/page-pool.c b/tools/net/ynl/samples/page-pool.c
index 098b5190d0e5..332f281ee5cb 100644
--- a/tools/net/ynl/samples/page-pool.c
+++ b/tools/net/ynl/samples/page-pool.c
@@ -95,6 +95,8 @@ int main(int argc, char **argv)
if (pp->_present.alloc_fast)
s->alloc_fast += pp->alloc_fast;
+ if (pp->_present.alloc_refill)
+ s->alloc_fast += pp->alloc_refill;
if (pp->_present.alloc_slow)
s->alloc_slow += pp->alloc_slow;
if (pp->_present.recycle_ring)
diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py
index 7fc1aa788f6f..6b7eb2d2aaf1 100755
--- a/tools/net/ynl/ynl-gen-c.py
+++ b/tools/net/ynl/ynl-gen-c.py
@@ -40,14 +40,6 @@ class BaseNlLib:
def get_family_id(self):
return 'ys->family_id'
- def parse_cb_run(self, cb, data, is_dump=False, indent=1):
- ind = '\n\t\t' + '\t' * indent + ' '
- if is_dump:
- return f"mnl_cb_run2(ys->rx_buf, len, 0, 0, {cb}, {data},{ind}ynl_cb_array, NLMSG_MIN_TYPE)"
- else:
- return f"mnl_cb_run2(ys->rx_buf, len, ys->seq, ys->portid,{ind}{cb}, {data},{ind}" + \
- "ynl_cb_array, NLMSG_MIN_TYPE)"
-
class Type(SpecAttr):
def __init__(self, family, attr_set, attr, value):
@@ -88,6 +80,8 @@ class Type(SpecAttr):
value = self.checks.get(limit, default)
if value is None:
return value
+ elif value in self.family.consts:
+ return c_upper(f"{self.family['name']}-{value}")
if not isinstance(value, int):
value = limit_to_number(value)
return value
@@ -168,15 +162,6 @@ class Type(SpecAttr):
spec = self._attr_policy(policy)
cw.p(f"\t[{self.enum_name}] = {spec},")
- def _mnl_type(self):
- # mnl does not have helpers for signed integer types
- # turn signed type into unsigned
- # this only makes sense for scalar types
- t = self.type
- if t[0] == 's':
- t = 'u' + t[1:]
- return t
-
def _attr_typol(self):
raise Exception(f"Type policy not implemented for class type {self.type}")
@@ -192,7 +177,7 @@ class Type(SpecAttr):
ri.cw.p(f"{line};")
def _attr_put_simple(self, ri, var, put_type):
- line = f"mnl_attr_put_{put_type}(nlh, {self.enum_name}, {var}->{self.c_name})"
+ line = f"ynl_attr_put_{put_type}(nlh, {self.enum_name}, {var}->{self.c_name})"
self._attr_put_line(ri, var, line)
def attr_put(self, ri, var):
@@ -217,7 +202,7 @@ class Type(SpecAttr):
if not self.is_multi_val():
ri.cw.p("if (ynl_attr_validate(yarg, attr))")
- ri.cw.p("return MNL_CB_ERROR;")
+ ri.cw.p("return YNL_PARSE_CB_ERROR;")
if self.presence_type() == 'bit':
ri.cw.p(f"{var}->_present.{self.c_name} = 1;")
@@ -264,7 +249,7 @@ class TypeUnused(Type):
return []
def _attr_get(self, ri, var):
- return ['return MNL_CB_ERROR;'], None, None
+ return ['return YNL_PARSE_CB_ERROR;'], None, None
def _attr_typol(self):
return '.type = YNL_PT_REJECT, '
@@ -357,9 +342,6 @@ class TypeScalar(Type):
else:
self.type_name = '__' + self.type
- def mnl_type(self):
- return self._mnl_type()
-
def _attr_policy(self, policy):
if 'flags-mask' in self.checks or self.is_bitfield:
if self.is_bitfield:
@@ -387,10 +369,10 @@ class TypeScalar(Type):
return [f'{self.type_name} {self.c_name}{self.byte_order_comment}']
def attr_put(self, ri, var):
- self._attr_put_simple(ri, var, self.mnl_type())
+ self._attr_put_simple(ri, var, self.type)
def _attr_get(self, ri, var):
- return f"{var}->{self.c_name} = mnl_attr_get_{self.mnl_type()}(attr);", None, None
+ return f"{var}->{self.c_name} = ynl_attr_get_{self.type}(attr);", None, None
def _setter_lines(self, ri, member, presence):
return [f"{member} = {self.c_name};"]
@@ -404,7 +386,7 @@ class TypeFlag(Type):
return '.type = YNL_PT_FLAG, '
def attr_put(self, ri, var):
- self._attr_put_line(ri, var, f"mnl_attr_put(nlh, {self.enum_name}, 0, NULL)")
+ self._attr_put_line(ri, var, f"ynl_attr_put(nlh, {self.enum_name}, NULL, 0)")
def _attr_get(self, ri, var):
return [], None, None
@@ -446,15 +428,15 @@ class TypeString(Type):
cw.p(f"\t[{self.enum_name}] = {spec},")
def attr_put(self, ri, var):
- self._attr_put_simple(ri, var, 'strz')
+ self._attr_put_simple(ri, var, 'str')
def _attr_get(self, ri, var):
len_mem = var + '->_present.' + self.c_name + '_len'
return [f"{len_mem} = len;",
f"{var}->{self.c_name} = malloc(len + 1);",
- f"memcpy({var}->{self.c_name}, mnl_attr_get_str(attr), len);",
+ f"memcpy({var}->{self.c_name}, ynl_attr_get_str(attr), len);",
f"{var}->{self.c_name}[len] = 0;"], \
- ['len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));'], \
+ ['len = strnlen(ynl_attr_get_str(attr), ynl_attr_data_len(attr));'], \
['unsigned int len;']
def _setter_lines(self, ri, member, presence):
@@ -493,15 +475,15 @@ class TypeBinary(Type):
return mem
def attr_put(self, ri, var):
- self._attr_put_line(ri, var, f"mnl_attr_put(nlh, {self.enum_name}, " +
- f"{var}->_present.{self.c_name}_len, {var}->{self.c_name})")
+ self._attr_put_line(ri, var, f"ynl_attr_put(nlh, {self.enum_name}, " +
+ f"{var}->{self.c_name}, {var}->_present.{self.c_name}_len)")
def _attr_get(self, ri, var):
len_mem = var + '->_present.' + self.c_name + '_len'
return [f"{len_mem} = len;",
f"{var}->{self.c_name} = malloc(len);",
- f"memcpy({var}->{self.c_name}, mnl_attr_get_payload(attr), len);"], \
- ['len = mnl_attr_get_payload_len(attr);'], \
+ f"memcpy({var}->{self.c_name}, ynl_attr_data(attr), len);"], \
+ ['len = ynl_attr_data_len(attr);'], \
['unsigned int len;']
def _setter_lines(self, ri, member, presence):
@@ -526,11 +508,11 @@ class TypeBitfield32(Type):
return f"NLA_POLICY_BITFIELD32({mask})"
def attr_put(self, ri, var):
- line = f"mnl_attr_put(nlh, {self.enum_name}, sizeof(struct nla_bitfield32), &{var}->{self.c_name})"
+ line = f"ynl_attr_put(nlh, {self.enum_name}, &{var}->{self.c_name}, sizeof(struct nla_bitfield32))"
self._attr_put_line(ri, var, line)
def _attr_get(self, ri, var):
- return f"memcpy(&{var}->{self.c_name}, mnl_attr_get_payload(attr), sizeof(struct nla_bitfield32));", None, None
+ return f"memcpy(&{var}->{self.c_name}, ynl_attr_data(attr), sizeof(struct nla_bitfield32));", None, None
def _setter_lines(self, ri, member, presence):
return [f"memcpy(&{member}, {self.c_name}, sizeof(struct nla_bitfield32));"]
@@ -563,7 +545,7 @@ class TypeNest(Type):
def _attr_get(self, ri, var):
get_lines = [f"if ({self.nested_render_name}_parse(&parg, attr))",
- "return MNL_CB_ERROR;"]
+ "return YNL_PARSE_CB_ERROR;"]
init_lines = [f"parg.rsp_policy = &{self.nested_render_name}_nest;",
f"parg.data = &{var}->{self.c_name};"]
return get_lines, init_lines, None
@@ -589,9 +571,6 @@ class TypeMultiAttr(Type):
def presence_type(self):
return 'count'
- def mnl_type(self):
- return self._mnl_type()
-
def _complex_member_type(self, ri):
if 'type' not in self.attr or self.attr['type'] == 'nest':
return self.nested_struct_type
@@ -625,9 +604,9 @@ class TypeMultiAttr(Type):
def attr_put(self, ri, var):
if self.attr['type'] in scalars:
- put_type = self.mnl_type()
+ put_type = self.type
ri.cw.p(f"for (unsigned int i = 0; i < {var}->n_{self.c_name}; i++)")
- ri.cw.p(f"mnl_attr_put_{put_type}(nlh, {self.enum_name}, {var}->{self.c_name}[i]);")
+ ri.cw.p(f"ynl_attr_put_{put_type}(nlh, {self.enum_name}, {var}->{self.c_name}[i]);")
elif 'type' not in self.attr or self.attr['type'] == 'nest':
ri.cw.p(f"for (unsigned int i = 0; i < {var}->n_{self.c_name}; i++)")
self._attr_put_line(ri, var, f"{self.nested_render_name}_put(nlh, " +
@@ -665,7 +644,7 @@ class TypeArrayNest(Type):
def _attr_get(self, ri, var):
local_vars = ['const struct nlattr *attr2;']
get_lines = [f'attr_{self.c_name} = attr;',
- 'mnl_attr_for_each_nested(attr2, attr)',
+ 'ynl_attr_for_each_nested(attr2, attr)',
f'\t{var}->n_{self.c_name}++;']
return get_lines, None, local_vars
@@ -690,8 +669,8 @@ class TypeNestTypeValue(Type):
local_vars += [f'__u32 {", ".join(tv_names)};']
for level in self.attr["type-value"]:
level = c_lower(level)
- get_lines += [f'attr_{level} = mnl_attr_get_payload({prev});']
- get_lines += [f'{level} = mnl_attr_get_type(attr_{level});']
+ get_lines += [f'attr_{level} = ynl_attr_data({prev});']
+ get_lines += [f'{level} = ynl_attr_type(attr_{level});']
prev = 'attr_' + level
tv_args = f", {', '.join(tv_names)}"
@@ -1550,7 +1529,7 @@ def _put_enum_to_str_helper(cw, render_name, map_name, arg_name, enum=None):
cw.block_start()
if enum and enum.type == 'flags':
cw.p(f'{arg_name} = ffs({arg_name}) - 1;')
- cw.p(f'if ({arg_name} < 0 || {arg_name} >= (int)MNL_ARRAY_SIZE({map_name}))')
+ cw.p(f'if ({arg_name} < 0 || {arg_name} >= (int)YNL_ARRAY_SIZE({map_name}))')
cw.p('return NULL;')
cw.p(f'return {map_name}[{arg_name}];')
cw.block_end()
@@ -1612,12 +1591,12 @@ def put_req_nested(ri, struct):
ri.cw.block_start()
ri.cw.write_func_lvar('struct nlattr *nest;')
- ri.cw.p("nest = mnl_attr_nest_start(nlh, attr_type);")
+ ri.cw.p("nest = ynl_attr_nest_start(nlh, attr_type);")
for _, arg in struct.member_list():
arg.attr_put(ri, "obj")
- ri.cw.p("mnl_attr_nest_end(nlh, nest);")
+ ri.cw.p("ynl_attr_nest_end(nlh, nest);")
ri.cw.nl()
ri.cw.p('return 0;')
@@ -1627,11 +1606,11 @@ def put_req_nested(ri, struct):
def _multi_parse(ri, struct, init_lines, local_vars):
if struct.nested:
- iter_line = "mnl_attr_for_each_nested(attr, nested)"
+ iter_line = "ynl_attr_for_each_nested(attr, nested)"
else:
if ri.fixed_hdr:
local_vars += ['void *hdr;']
- iter_line = "mnl_attr_for_each(attr, nlh, yarg->ys->family->hdr_len)"
+ iter_line = "ynl_attr_for_each(attr, nlh, yarg->ys->family->hdr_len)"
array_nests = set()
multi_attrs = set()
@@ -1665,7 +1644,7 @@ def _multi_parse(ri, struct, init_lines, local_vars):
ri.cw.p(f'dst->{arg} = {arg};')
if ri.fixed_hdr:
- ri.cw.p('hdr = mnl_nlmsg_get_payload_offset(nlh, sizeof(struct genlmsghdr));')
+ ri.cw.p('hdr = ynl_nlmsg_data_offset(nlh, sizeof(struct genlmsghdr));')
ri.cw.p(f"memcpy(&dst->_hdr, hdr, sizeof({ri.fixed_hdr}));")
for anest in sorted(all_multi):
aspec = struct[anest]
@@ -1674,7 +1653,7 @@ def _multi_parse(ri, struct, init_lines, local_vars):
ri.cw.nl()
ri.cw.block_start(line=iter_line)
- ri.cw.p('unsigned int type = mnl_attr_get_type(attr);')
+ ri.cw.p('unsigned int type = ynl_attr_type(attr);')
ri.cw.nl()
first = True
@@ -1690,14 +1669,14 @@ def _multi_parse(ri, struct, init_lines, local_vars):
aspec = struct[anest]
ri.cw.block_start(line=f"if (n_{aspec.c_name})")
- ri.cw.p(f"dst->{aspec.c_name} = calloc({aspec.c_name}, sizeof(*dst->{aspec.c_name}));")
+ ri.cw.p(f"dst->{aspec.c_name} = calloc(n_{aspec.c_name}, sizeof(*dst->{aspec.c_name}));")
ri.cw.p(f"dst->n_{aspec.c_name} = n_{aspec.c_name};")
ri.cw.p('i = 0;')
ri.cw.p(f"parg.rsp_policy = &{aspec.nested_render_name}_nest;")
- ri.cw.block_start(line=f"mnl_attr_for_each_nested(attr, attr_{aspec.c_name})")
+ ri.cw.block_start(line=f"ynl_attr_for_each_nested(attr, attr_{aspec.c_name})")
ri.cw.p(f"parg.data = &dst->{aspec.c_name}[i];")
- ri.cw.p(f"if ({aspec.nested_render_name}_parse(&parg, attr, mnl_attr_get_type(attr)))")
- ri.cw.p('return MNL_CB_ERROR;')
+ ri.cw.p(f"if ({aspec.nested_render_name}_parse(&parg, attr, ynl_attr_type(attr)))")
+ ri.cw.p('return YNL_PARSE_CB_ERROR;')
ri.cw.p('i++;')
ri.cw.block_end()
ri.cw.block_end()
@@ -1712,13 +1691,13 @@ def _multi_parse(ri, struct, init_lines, local_vars):
if 'nested-attributes' in aspec:
ri.cw.p(f"parg.rsp_policy = &{aspec.nested_render_name}_nest;")
ri.cw.block_start(line=iter_line)
- ri.cw.block_start(line=f"if (mnl_attr_get_type(attr) == {aspec.enum_name})")
+ ri.cw.block_start(line=f"if (ynl_attr_type(attr) == {aspec.enum_name})")
if 'nested-attributes' in aspec:
ri.cw.p(f"parg.data = &dst->{aspec.c_name}[i];")
ri.cw.p(f"if ({aspec.nested_render_name}_parse(&parg, attr))")
- ri.cw.p('return MNL_CB_ERROR;')
+ ri.cw.p('return YNL_PARSE_CB_ERROR;')
elif aspec.type in scalars:
- ri.cw.p(f"dst->{aspec.c_name}[i] = mnl_attr_get_{aspec.mnl_type()}(attr);")
+ ri.cw.p(f"dst->{aspec.c_name}[i] = ynl_attr_get_{aspec.type}(attr);")
else:
raise Exception('Nest parsing type not supported yet')
ri.cw.p('i++;')
@@ -1730,7 +1709,7 @@ def _multi_parse(ri, struct, init_lines, local_vars):
if struct.nested:
ri.cw.p('return 0;')
else:
- ri.cw.p('return MNL_CB_OK;')
+ ri.cw.p('return YNL_PARSE_CB_OK;')
ri.cw.block_end()
ri.cw.nl()
@@ -1760,10 +1739,9 @@ def parse_rsp_msg(ri, deref=False):
return
func_args = ['const struct nlmsghdr *nlh',
- 'void *data']
+ 'struct ynl_parse_arg *yarg']
local_vars = [f'{type_name(ri, "reply", deref=deref)} *dst;',
- 'struct ynl_parse_arg *yarg = data;',
'const struct nlattr *attr;']
init_lines = ['dst = yarg->data;']
@@ -1774,7 +1752,7 @@ def parse_rsp_msg(ri, deref=False):
else:
# Empty reply
ri.cw.block_start()
- ri.cw.p('return MNL_CB_OK;')
+ ri.cw.p('return YNL_PARSE_CB_OK;')
ri.cw.block_end()
ri.cw.nl()
@@ -1809,7 +1787,7 @@ def print_req(ri):
if ri.fixed_hdr:
ri.cw.p("hdr_len = sizeof(req->_hdr);")
- ri.cw.p("hdr = mnl_nlmsg_put_extra_header(nlh, hdr_len);")
+ ri.cw.p("hdr = ynl_nlmsg_put_extra_header(nlh, hdr_len);")
ri.cw.p("memcpy(hdr, &req->_hdr, hdr_len);")
ri.cw.nl()
@@ -1859,20 +1837,21 @@ def print_dump(ri):
ri.cw.write_func_lvar(local_vars)
- ri.cw.p('yds.ys = ys;')
+ ri.cw.p('yds.yarg.ys = ys;')
+ ri.cw.p(f"yds.yarg.rsp_policy = &{ri.struct['reply'].render_name}_nest;")
+ ri.cw.p("yds.yarg.data = NULL;")
ri.cw.p(f"yds.alloc_sz = sizeof({type_name(ri, rdir(direction))});")
ri.cw.p(f"yds.cb = {op_prefix(ri, 'reply', deref=True)}_parse;")
if ri.op.value is not None:
ri.cw.p(f'yds.rsp_cmd = {ri.op.enum_name};')
else:
ri.cw.p(f'yds.rsp_cmd = {ri.op.rsp_value};')
- ri.cw.p(f"yds.rsp_policy = &{ri.struct['reply'].render_name}_nest;")
ri.cw.nl()
ri.cw.p(f"nlh = ynl_gemsg_start_dump(ys, {ri.nl.get_family_id()}, {ri.op.enum_name}, 1);")
if ri.fixed_hdr:
ri.cw.p("hdr_len = sizeof(req->_hdr);")
- ri.cw.p("hdr = mnl_nlmsg_put_extra_header(nlh, hdr_len);")
+ ri.cw.p("hdr = ynl_nlmsg_put_extra_header(nlh, hdr_len);")
ri.cw.p("memcpy(hdr, &req->_hdr, hdr_len);")
ri.cw.nl()
@@ -2363,6 +2342,10 @@ def print_kernel_family_struct_hdr(family, cw):
cw.p(f"extern struct genl_family {family.c_name}_nl_family;")
cw.nl()
+ if 'sock-priv' in family.kernel_family:
+ cw.p(f'void {family.c_name}_nl_sock_priv_init({family.kernel_family["sock-priv"]} *priv);')
+ cw.p(f'void {family.c_name}_nl_sock_priv_destroy({family.kernel_family["sock-priv"]} *priv);')
+ cw.nl()
def print_kernel_family_struct_src(family, cw):
@@ -2384,6 +2367,11 @@ def print_kernel_family_struct_src(family, cw):
if family.mcgrps['list']:
cw.p(f'.mcgrps\t\t= {family.c_name}_nl_mcgrps,')
cw.p(f'.n_mcgrps\t= ARRAY_SIZE({family.c_name}_nl_mcgrps),')
+ if 'sock-priv' in family.kernel_family:
+ cw.p(f'.sock_priv_size\t= sizeof({family.kernel_family["sock-priv"]}),')
+ # Force cast here, actual helpers take pointer to the real type.
+ cw.p(f'.sock_priv_init\t= (void *){family.c_name}_nl_sock_priv_init,')
+ cw.p(f'.sock_priv_destroy = (void *){family.c_name}_nl_sock_priv_destroy,')
cw.block_end(';')
@@ -2584,7 +2572,7 @@ def render_user_family(family, cw, prototype):
cw.p('.hdr_len\t= sizeof(struct genlmsghdr),')
if family.ntfs:
cw.p(f".ntf_info\t= {family['name']}_ntf_info,")
- cw.p(f".ntf_info_size\t= MNL_ARRAY_SIZE({family['name']}_ntf_info),")
+ cw.p(f".ntf_info_size\t= YNL_ARRAY_SIZE({family['name']}_ntf_info),")
cw.block_end(line=';')
@@ -2680,6 +2668,7 @@ def main():
cw.p(f'#include "{os.path.basename(args.out_file[:-2])}.h"')
cw.nl()
headers = ['uapi/' + parsed.uapi_header]
+ headers += parsed.kernel_family.get('headers', [])
else:
cw.p('#include <stdlib.h>')
cw.p('#include <string.h>')
@@ -2700,7 +2689,6 @@ def main():
if args.mode == "user":
if not args.header:
- cw.p("#include <libmnl/libmnl.h>")
cw.p("#include <linux/genetlink.h>")
cw.nl()
for one in args.user_header:
diff --git a/tools/net/ynl/ynl-gen-rst.py b/tools/net/ynl/ynl-gen-rst.py
index 262d88f88696..927407b3efb3 100755
--- a/tools/net/ynl/ynl-gen-rst.py
+++ b/tools/net/ynl/ynl-gen-rst.py
@@ -189,12 +189,19 @@ def parse_operations(operations: List[Dict[str, Any]]) -> str:
def parse_entries(entries: List[Dict[str, Any]], level: int) -> str:
"""Parse a list of entries"""
+ ignored = ["pad"]
lines = []
for entry in entries:
if isinstance(entry, dict):
# entries could be a list or a dictionary
+ field_name = entry.get("name", "")
+ if field_name in ignored:
+ continue
+ type_ = entry.get("type")
+ if type_:
+ field_name += f" ({inline(type_)})"
lines.append(
- rst_fields(entry.get("name", ""), sanitize(entry.get("doc", "")), level)
+ rst_fields(field_name, sanitize(entry.get("doc", "")), level)
)
elif isinstance(entry, list):
lines.append(rst_list_inline(entry, level))
diff --git a/tools/testing/kunit/configs/all_tests.config b/tools/testing/kunit/configs/all_tests.config
index 3bf506d4a63c..a6cf69a665e8 100644
--- a/tools/testing/kunit/configs/all_tests.config
+++ b/tools/testing/kunit/configs/all_tests.config
@@ -23,10 +23,16 @@ CONFIG_USB4=y
CONFIG_NET=y
CONFIG_MCTP=y
+CONFIG_MCTP_FLOWS=y
CONFIG_INET=y
CONFIG_MPTCP=y
+CONFIG_CFG80211=y
+CONFIG_MAC80211=y
+CONFIG_WLAN_VENDOR_INTEL=y
+CONFIG_IWLWIFI=y
+
CONFIG_DAMON=y
CONFIG_DAMON_VADDR=y
CONFIG_DAMON_PADDR=y
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index cd9ae576bfde..d117e8a96ded 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -193,6 +193,8 @@ run_tests: all
@for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests \
+ SRC_PATH=$(shell readlink -e $$(pwd)) \
+ OBJ_PATH=$(BUILD) \
O=$(abs_objtree); \
done;
@@ -244,7 +246,10 @@ ifdef INSTALL_PATH
@ret=1; \
for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
- $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET INSTALL_PATH=$(INSTALL_PATH)/$$TARGET install \
+ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install \
+ INSTALL_PATH=$(INSTALL_PATH)/$$TARGET \
+ SRC_PATH=$(shell readlink -e $$(pwd)) \
+ OBJ_PATH=$(INSTALL_PATH) \
O=$(abs_objtree) \
$(if $(FORCE_TARGETS),|| exit); \
ret=$$((ret * $$?)); \
diff --git a/tools/testing/selftests/alsa/test-pcmtest-driver.c b/tools/testing/selftests/alsa/test-pcmtest-driver.c
index a52ecd43dbe3..ca81afa4ee90 100644
--- a/tools/testing/selftests/alsa/test-pcmtest-driver.c
+++ b/tools/testing/selftests/alsa/test-pcmtest-driver.c
@@ -127,11 +127,11 @@ FIXTURE_SETUP(pcmtest) {
int err;
if (geteuid())
- SKIP(exit(-1), "This test needs root to run!");
+ SKIP(return, "This test needs root to run!");
err = read_patterns();
if (err)
- SKIP(exit(-1), "Can't read patterns. Probably, module isn't loaded");
+ SKIP(return, "Can't read patterns. Probably, module isn't loaded");
card_name = malloc(127);
ASSERT_NE(card_name, NULL);
diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64
index 5c2cc7e8c5d0..d8ade15e2789 100644
--- a/tools/testing/selftests/bpf/DENYLIST.aarch64
+++ b/tools/testing/selftests/bpf/DENYLIST.aarch64
@@ -1,6 +1,5 @@
bpf_cookie/multi_kprobe_attach_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
bpf_cookie/multi_kprobe_link_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
-exceptions # JIT does not support calling kfunc bpf_throw: -524
fexit_sleep # The test never returns. The remaining tests cannot start.
kprobe_multi_bench_attach # needs CONFIG_FPROBE
kprobe_multi_test # needs CONFIG_FPROBE
@@ -11,3 +10,5 @@ fill_link_info/kprobe_multi_link_info # bpf_program__attach_kprobe_mu
fill_link_info/kretprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95
fill_link_info/kprobe_multi_invalid_ubuff # bpf_program__attach_kprobe_multi_opts unexpected error: -95
missed/kprobe_recursion # missed_kprobe_recursion__attach unexpected error: -95 (errno 95)
+verifier_arena # JIT does not support arena
+arena_htab # JIT does not support arena
diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x
index 1a63996c0304..f4a2f66a683d 100644
--- a/tools/testing/selftests/bpf/DENYLIST.s390x
+++ b/tools/testing/selftests/bpf/DENYLIST.s390x
@@ -3,3 +3,6 @@
exceptions # JIT does not support calling kfunc bpf_throw (exceptions)
get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace)
stacktrace_build_id # compare_map_keys stackid_hmap vs. stackmap err -2 errno 2 (?)
+verifier_iterating_callbacks
+verifier_arena # JIT does not support arena
+arena_htab # JIT does not support arena
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index fd15017ed3b1..3b9eb40d6343 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -34,13 +34,26 @@ LIBELF_CFLAGS := $(shell $(PKG_CONFIG) libelf --cflags 2>/dev/null)
LIBELF_LIBS := $(shell $(PKG_CONFIG) libelf --libs 2>/dev/null || echo -lelf)
CFLAGS += -g $(OPT_FLAGS) -rdynamic \
- -Wall -Werror \
+ -Wall -Werror -fno-omit-frame-pointer \
$(GENFLAGS) $(SAN_CFLAGS) $(LIBELF_CFLAGS) \
-I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \
-I$(TOOLSINCDIR) -I$(APIDIR) -I$(OUTPUT)
LDFLAGS += $(SAN_LDFLAGS)
LDLIBS += $(LIBELF_LIBS) -lz -lrt -lpthread
+# The following tests perform type punning and they may break strict
+# aliasing rules, which are exploited by both GCC and clang by default
+# while optimizing. This can lead to broken programs.
+progs/bind4_prog.c-CFLAGS := -fno-strict-aliasing
+progs/bind6_prog.c-CFLAGS := -fno-strict-aliasing
+progs/dynptr_fail.c-CFLAGS := -fno-strict-aliasing
+progs/linked_list_fail.c-CFLAGS := -fno-strict-aliasing
+progs/map_kptr_fail.c-CFLAGS := -fno-strict-aliasing
+progs/syscall.c-CFLAGS := -fno-strict-aliasing
+progs/test_pkt_md_access.c-CFLAGS := -fno-strict-aliasing
+progs/test_sk_lookup.c-CFLAGS := -fno-strict-aliasing
+progs/timer_crash.c-CFLAGS := -fno-strict-aliasing
+
ifneq ($(LLVM),)
# Silence some warnings when compiled with clang
CFLAGS += -Wno-unused-command-line-argument
@@ -64,6 +77,15 @@ TEST_INST_SUBDIRS := no_alu32
ifneq ($(BPF_GCC),)
TEST_GEN_PROGS += test_progs-bpf_gcc
TEST_INST_SUBDIRS += bpf_gcc
+
+# The following tests contain C code that, although technically legal,
+# triggers GCC warnings that cannot be disabled: declaration of
+# anonymous struct types in function parameter lists.
+progs/btf_dump_test_case_bitfields.c-CFLAGS := -Wno-error
+progs/btf_dump_test_case_namespacing.c-CFLAGS := -Wno-error
+progs/btf_dump_test_case_packing.c-CFLAGS := -Wno-error
+progs/btf_dump_test_case_padding.c-CFLAGS := -Wno-error
+progs/btf_dump_test_case_syntax.c-CFLAGS := -Wno-error
endif
ifneq ($(CLANG_CPUV4),)
@@ -110,7 +132,7 @@ TEST_GEN_PROGS_EXTENDED = test_sock_addr test_skb_cgroup_id_user \
flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \
test_lirc_mode2_user xdping test_cpp runqslower bench bpf_testmod.ko \
xskxceiver xdp_redirect_multi xdp_synproxy veristat xdp_hw_metadata \
- xdp_features
+ xdp_features bpf_test_no_cfi.ko
TEST_GEN_FILES += liburandom_read.so urandom_read sign-file uprobe_multi
@@ -175,8 +197,7 @@ endif
# NOTE: Semicolon at the end is critical to override lib.mk's default static
# rule for binaries.
$(notdir $(TEST_GEN_PROGS) \
- $(TEST_GEN_PROGS_EXTENDED) \
- $(TEST_CUSTOM_PROGS)): %: $(OUTPUT)/% ;
+ $(TEST_GEN_PROGS_EXTENDED)): %: $(OUTPUT)/% ;
# sort removes libbpf duplicates when not cross-building
MAKE_DIRS := $(sort $(BUILD_DIR)/libbpf $(HOST_BUILD_DIR)/libbpf \
@@ -233,6 +254,12 @@ $(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_testmo
$(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_testmod
$(Q)cp bpf_testmod/bpf_testmod.ko $@
+$(OUTPUT)/bpf_test_no_cfi.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_no_cfi/Makefile bpf_test_no_cfi/*.[ch])
+ $(call msg,MOD,,$@)
+ $(Q)$(RM) bpf_test_no_cfi/bpf_test_no_cfi.ko # force re-compilation
+ $(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_test_no_cfi
+ $(Q)cp bpf_test_no_cfi/bpf_test_no_cfi.ko $@
+
DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool
ifneq ($(CROSS_COMPILE),)
CROSS_BPFTOOL := $(SCRATCH_DIR)/sbin/bpftool
@@ -382,11 +409,11 @@ endif
CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH))
BPF_CFLAGS = -g -Wall -Werror -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \
-I$(INCLUDE_DIR) -I$(CURDIR) -I$(APIDIR) \
- -I$(abspath $(OUTPUT)/../usr/include)
+ -I$(abspath $(OUTPUT)/../usr/include) \
+ -Wno-compare-distinct-pointer-types
# TODO: enable me -Wsign-compare
-CLANG_CFLAGS = $(CLANG_SYS_INCLUDES) \
- -Wno-compare-distinct-pointer-types
+CLANG_CFLAGS = $(CLANG_SYS_INCLUDES)
$(OUTPUT)/test_l4lb_noinline.o: BPF_CFLAGS += -fno-inline
$(OUTPUT)/test_xdp_noinline.o: BPF_CFLAGS += -fno-inline
@@ -504,7 +531,8 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.bpf.o: \
$(wildcard $(BPFDIR)/*.bpf.h) \
| $(TRUNNER_OUTPUT) $$(BPFOBJ)
$$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \
- $(TRUNNER_BPF_CFLAGS))
+ $(TRUNNER_BPF_CFLAGS) \
+ $$($$<-CFLAGS))
$(TRUNNER_BPF_SKELS): %.skel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
@@ -514,6 +542,7 @@ $(TRUNNER_BPF_SKELS): %.skel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$(Q)diff $$(<:.o=.linked2.o) $$(<:.o=.linked3.o)
$(Q)$$(BPFTOOL) gen skeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.bpf.o=)) > $$@
$(Q)$$(BPFTOOL) gen subskeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.bpf.o=)) > $$(@:.skel.h=.subskel.h)
+ $(Q)rm -f $$(<:.o=.linked1.o) $$(<:.o=.linked2.o) $$(<:.o=.linked3.o)
$(TRUNNER_BPF_LSKELS): %.lskel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
@@ -522,6 +551,7 @@ $(TRUNNER_BPF_LSKELS): %.lskel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$(Q)$$(BPFTOOL) gen object $$(<:.o=.llinked3.o) $$(<:.o=.llinked2.o)
$(Q)diff $$(<:.o=.llinked2.o) $$(<:.o=.llinked3.o)
$(Q)$$(BPFTOOL) gen skeleton -L $$(<:.o=.llinked3.o) name $$(notdir $$(<:.bpf.o=_lskel)) > $$@
+ $(Q)rm -f $$(<:.o=.llinked1.o) $$(<:.o=.llinked2.o) $$(<:.o=.llinked3.o)
$(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_BPF_OBJS) $(BPFTOOL) | $(TRUNNER_OUTPUT)
$$(call msg,LINK-BPF,$(TRUNNER_BINARY),$$(@:.skel.h=.bpf.o))
@@ -532,6 +562,7 @@ $(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_BPF_OBJS) $(BPFTOOL) | $(TRUNNER_OUTPUT)
$$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
$(Q)$$(BPFTOOL) gen skeleton $$(@:.skel.h=.linked3.o) name $$(notdir $$(@:.skel.h=)) > $$@
$(Q)$$(BPFTOOL) gen subskeleton $$(@:.skel.h=.linked3.o) name $$(notdir $$(@:.skel.h=)) > $$(@:.skel.h=.subskel.h)
+ $(Q)rm -f $$(@:.skel.h=.linked1.o) $$(@:.skel.h=.linked2.o) $$(@:.skel.h=.linked3.o)
endif
# ensure we set up tests.h header generation rule just once
@@ -606,6 +637,7 @@ TRUNNER_EXTRA_SOURCES := test_progs.c \
flow_dissector_load.h \
ip_check_defrag_frags.h
TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \
+ $(OUTPUT)/bpf_test_no_cfi.ko \
$(OUTPUT)/liburandom_read.so \
$(OUTPUT)/xdp_synproxy \
$(OUTPUT)/sign-file \
@@ -729,11 +761,12 @@ $(OUTPUT)/uprobe_multi: uprobe_multi.c
$(call msg,BINARY,,$@)
$(Q)$(CC) $(CFLAGS) $(LDFLAGS) $^ $(LDLIBS) -o $@
-EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
+EXTRA_CLEAN := $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
prog_tests/tests.h map_tests/tests.h verifier/tests.h \
feature bpftool \
$(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h *.subskel.h \
no_alu32 cpuv4 bpf_gcc bpf_testmod.ko \
+ bpf_test_no_cfi.ko \
liburandom_read.so)
.PHONY: docs docs-clean
diff --git a/tools/testing/selftests/bpf/README.rst b/tools/testing/selftests/bpf/README.rst
index 9af79c7a9b58..9b974e425af3 100644
--- a/tools/testing/selftests/bpf/README.rst
+++ b/tools/testing/selftests/bpf/README.rst
@@ -115,7 +115,7 @@ the insn 20 undoes map_value addition. It is currently impossible for the
verifier to understand such speculative pointer arithmetic.
Hence `this patch`__ addresses it on the compiler side. It was committed on llvm 12.
-__ https://reviews.llvm.org/D85570
+__ https://github.com/llvm/llvm-project/commit/ddf1864ace484035e3cde5e83b3a31ac81e059c6
The corresponding C code
@@ -165,7 +165,7 @@ This is due to a llvm BPF backend bug. `The fix`__
has been pushed to llvm 10.x release branch and will be
available in 10.0.1. The patch is available in llvm 11.0.0 trunk.
-__ https://reviews.llvm.org/D78466
+__ https://github.com/llvm/llvm-project/commit/3cb7e7bf959dcd3b8080986c62e10a75c7af43f0
bpf_verif_scale/loop6.bpf.o test failure with Clang 12
======================================================
@@ -204,7 +204,7 @@ r5(w5) is eventually saved on stack at insn #24 for later use.
This cause later verifier failure. The bug has been `fixed`__ in
Clang 13.
-__ https://reviews.llvm.org/D97479
+__ https://github.com/llvm/llvm-project/commit/1959ead525b8830cc8a345f45e1c3ef9902d3229
BPF CO-RE-based tests and Clang version
=======================================
@@ -221,11 +221,11 @@ failures:
- __builtin_btf_type_id() [0_, 1_, 2_];
- __builtin_preserve_type_info(), __builtin_preserve_enum_value() [3_, 4_].
-.. _0: https://reviews.llvm.org/D74572
-.. _1: https://reviews.llvm.org/D74668
-.. _2: https://reviews.llvm.org/D85174
-.. _3: https://reviews.llvm.org/D83878
-.. _4: https://reviews.llvm.org/D83242
+.. _0: https://github.com/llvm/llvm-project/commit/6b01b465388b204d543da3cf49efd6080db094a9
+.. _1: https://github.com/llvm/llvm-project/commit/072cde03aaa13a2c57acf62d79876bf79aa1919f
+.. _2: https://github.com/llvm/llvm-project/commit/00602ee7ef0bf6c68d690a2bd729c12b95c95c99
+.. _3: https://github.com/llvm/llvm-project/commit/6d218b4adb093ff2e9764febbbc89f429412006c
+.. _4: https://github.com/llvm/llvm-project/commit/6d6750696400e7ce988d66a1a00e1d0cb32815f8
Floating-point tests and Clang version
======================================
@@ -234,7 +234,7 @@ Certain selftests, e.g. core_reloc, require support for the floating-point
types, which was introduced in `Clang 13`__. The older Clang versions will
either crash when compiling these tests, or generate an incorrect BTF.
-__ https://reviews.llvm.org/D83289
+__ https://github.com/llvm/llvm-project/commit/a7137b238a07d9399d3ae96c0b461571bd5aa8b2
Kernel function call test and Clang version
===========================================
@@ -248,7 +248,7 @@ Without it, the error from compiling bpf selftests looks like:
libbpf: failed to find BTF for extern 'tcp_slow_start' [25] section: -2
-__ https://reviews.llvm.org/D93563
+__ https://github.com/llvm/llvm-project/commit/886f9ff53155075bd5f1e994f17b85d1e1b7470c
btf_tag test and Clang version
==============================
@@ -264,8 +264,8 @@ Without them, the btf_tag selftest will be skipped and you will observe:
#<test_num> btf_tag:SKIP
-.. _0: https://reviews.llvm.org/D111588
-.. _1: https://reviews.llvm.org/D111199
+.. _0: https://github.com/llvm/llvm-project/commit/a162b67c98066218d0d00aa13b99afb95d9bb5e6
+.. _1: https://github.com/llvm/llvm-project/commit/3466e00716e12e32fdb100e3fcfca5c2b3e8d784
Clang dependencies for static linking tests
===========================================
@@ -274,7 +274,7 @@ linked_vars, linked_maps, and linked_funcs tests depend on `Clang fix`__ to
generate valid BTF information for weak variables. Please make sure you use
Clang that contains the fix.
-__ https://reviews.llvm.org/D100362
+__ https://github.com/llvm/llvm-project/commit/968292cb93198442138128d850fd54dc7edc0035
Clang relocation changes
========================
@@ -292,7 +292,7 @@ Here, ``type 2`` refers to new relocation type ``R_BPF_64_ABS64``.
To fix this issue, user newer libbpf.
.. Links
-.. _clang reloc patch: https://reviews.llvm.org/D102712
+.. _clang reloc patch: https://github.com/llvm/llvm-project/commit/6a2ea84600ba4bd3b2733bd8f08f5115eb32164b
.. _kernel llvm reloc: /Documentation/bpf/llvm_reloc.rst
Clang dependencies for the u32 spill test (xdpwall)
@@ -304,6 +304,6 @@ from running test_progs will look like:
.. code-block:: console
- test_xdpwall:FAIL:Does LLVM have https://reviews.llvm.org/D109073? unexpected error: -4007
+ test_xdpwall:FAIL:Does LLVM have https://github.com/llvm/llvm-project/commit/ea72b0319d7b0f0c2fcf41d121afa5d031b319d5? unexpected error: -4007
-__ https://reviews.llvm.org/D109073
+__ https://github.com/llvm/llvm-project/commit/ea72b0319d7b0f0c2fcf41d121afa5d031b319d5
diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c
index 73ce11b0547d..b2b4c391eb0a 100644
--- a/tools/testing/selftests/bpf/bench.c
+++ b/tools/testing/selftests/bpf/bench.c
@@ -323,14 +323,14 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
break;
case 'p':
env.producer_cnt = strtol(arg, NULL, 10);
- if (env.producer_cnt <= 0) {
+ if (env.producer_cnt < 0) {
fprintf(stderr, "Invalid producer count: %s\n", arg);
argp_usage(state);
}
break;
case 'c':
env.consumer_cnt = strtol(arg, NULL, 10);
- if (env.consumer_cnt <= 0) {
+ if (env.consumer_cnt < 0) {
fprintf(stderr, "Invalid consumer count: %s\n", arg);
argp_usage(state);
}
@@ -495,14 +495,20 @@ extern const struct bench bench_trig_base;
extern const struct bench bench_trig_tp;
extern const struct bench bench_trig_rawtp;
extern const struct bench bench_trig_kprobe;
+extern const struct bench bench_trig_kretprobe;
+extern const struct bench bench_trig_kprobe_multi;
+extern const struct bench bench_trig_kretprobe_multi;
extern const struct bench bench_trig_fentry;
+extern const struct bench bench_trig_fexit;
extern const struct bench bench_trig_fentry_sleep;
extern const struct bench bench_trig_fmodret;
extern const struct bench bench_trig_uprobe_base;
-extern const struct bench bench_trig_uprobe_with_nop;
-extern const struct bench bench_trig_uretprobe_with_nop;
-extern const struct bench bench_trig_uprobe_without_nop;
-extern const struct bench bench_trig_uretprobe_without_nop;
+extern const struct bench bench_trig_uprobe_nop;
+extern const struct bench bench_trig_uretprobe_nop;
+extern const struct bench bench_trig_uprobe_push;
+extern const struct bench bench_trig_uretprobe_push;
+extern const struct bench bench_trig_uprobe_ret;
+extern const struct bench bench_trig_uretprobe_ret;
extern const struct bench bench_rb_libbpf;
extern const struct bench bench_rb_custom;
extern const struct bench bench_pb_libbpf;
@@ -537,14 +543,20 @@ static const struct bench *benchs[] = {
&bench_trig_tp,
&bench_trig_rawtp,
&bench_trig_kprobe,
+ &bench_trig_kretprobe,
+ &bench_trig_kprobe_multi,
+ &bench_trig_kretprobe_multi,
&bench_trig_fentry,
+ &bench_trig_fexit,
&bench_trig_fentry_sleep,
&bench_trig_fmodret,
&bench_trig_uprobe_base,
- &bench_trig_uprobe_with_nop,
- &bench_trig_uretprobe_with_nop,
- &bench_trig_uprobe_without_nop,
- &bench_trig_uretprobe_without_nop,
+ &bench_trig_uprobe_nop,
+ &bench_trig_uretprobe_nop,
+ &bench_trig_uprobe_push,
+ &bench_trig_uretprobe_push,
+ &bench_trig_uprobe_ret,
+ &bench_trig_uretprobe_ret,
&bench_rb_libbpf,
&bench_rb_custom,
&bench_pb_libbpf,
@@ -607,6 +619,10 @@ static void setup_benchmark(void)
bench->setup();
for (i = 0; i < env.consumer_cnt; i++) {
+ if (!bench->consumer_thread) {
+ fprintf(stderr, "benchmark doesn't support consumers!\n");
+ exit(1);
+ }
err = pthread_create(&state.consumers[i], NULL,
bench->consumer_thread, (void *)(long)i);
if (err) {
@@ -626,6 +642,10 @@ static void setup_benchmark(void)
env.prod_cpus.next_cpu = env.cons_cpus.next_cpu;
for (i = 0; i < env.producer_cnt; i++) {
+ if (!bench->producer_thread) {
+ fprintf(stderr, "benchmark doesn't support producers!\n");
+ exit(1);
+ }
err = pthread_create(&state.producers[i], NULL,
bench->producer_thread, (void *)(long)i);
if (err) {
diff --git a/tools/testing/selftests/bpf/benchs/bench_trigger.c b/tools/testing/selftests/bpf/benchs/bench_trigger.c
index dbd362771d6a..ace0d1011a8e 100644
--- a/tools/testing/selftests/bpf/benchs/bench_trigger.c
+++ b/tools/testing/selftests/bpf/benchs/bench_trigger.c
@@ -85,12 +85,36 @@ static void trigger_kprobe_setup(void)
attach_bpf(ctx.skel->progs.bench_trigger_kprobe);
}
+static void trigger_kretprobe_setup(void)
+{
+ setup_ctx();
+ attach_bpf(ctx.skel->progs.bench_trigger_kretprobe);
+}
+
+static void trigger_kprobe_multi_setup(void)
+{
+ setup_ctx();
+ attach_bpf(ctx.skel->progs.bench_trigger_kprobe_multi);
+}
+
+static void trigger_kretprobe_multi_setup(void)
+{
+ setup_ctx();
+ attach_bpf(ctx.skel->progs.bench_trigger_kretprobe_multi);
+}
+
static void trigger_fentry_setup(void)
{
setup_ctx();
attach_bpf(ctx.skel->progs.bench_trigger_fentry);
}
+static void trigger_fexit_setup(void)
+{
+ setup_ctx();
+ attach_bpf(ctx.skel->progs.bench_trigger_fexit);
+}
+
static void trigger_fentry_sleep_setup(void)
{
setup_ctx();
@@ -113,12 +137,25 @@ static void trigger_fmodret_setup(void)
* GCC doesn't generate stack setup preample for these functions due to them
* having no input arguments and doing nothing in the body.
*/
-__weak void uprobe_target_with_nop(void)
+__weak void uprobe_target_nop(void)
{
asm volatile ("nop");
}
-__weak void uprobe_target_without_nop(void)
+__weak void opaque_noop_func(void)
+{
+}
+
+__weak int uprobe_target_push(void)
+{
+ /* overhead of function call is negligible compared to uprobe
+ * triggering, so this shouldn't affect benchmark results much
+ */
+ opaque_noop_func();
+ return 1;
+}
+
+__weak void uprobe_target_ret(void)
{
asm volatile ("");
}
@@ -126,27 +163,34 @@ __weak void uprobe_target_without_nop(void)
static void *uprobe_base_producer(void *input)
{
while (true) {
- uprobe_target_with_nop();
+ uprobe_target_nop();
atomic_inc(&base_hits.value);
}
return NULL;
}
-static void *uprobe_producer_with_nop(void *input)
+static void *uprobe_producer_nop(void *input)
+{
+ while (true)
+ uprobe_target_nop();
+ return NULL;
+}
+
+static void *uprobe_producer_push(void *input)
{
while (true)
- uprobe_target_with_nop();
+ uprobe_target_push();
return NULL;
}
-static void *uprobe_producer_without_nop(void *input)
+static void *uprobe_producer_ret(void *input)
{
while (true)
- uprobe_target_without_nop();
+ uprobe_target_ret();
return NULL;
}
-static void usetup(bool use_retprobe, bool use_nop)
+static void usetup(bool use_retprobe, void *target_addr)
{
size_t uprobe_offset;
struct bpf_link *link;
@@ -159,11 +203,7 @@ static void usetup(bool use_retprobe, bool use_nop)
exit(1);
}
- if (use_nop)
- uprobe_offset = get_uprobe_offset(&uprobe_target_with_nop);
- else
- uprobe_offset = get_uprobe_offset(&uprobe_target_without_nop);
-
+ uprobe_offset = get_uprobe_offset(target_addr);
link = bpf_program__attach_uprobe(ctx.skel->progs.bench_trigger_uprobe,
use_retprobe,
-1 /* all PIDs */,
@@ -176,24 +216,34 @@ static void usetup(bool use_retprobe, bool use_nop)
ctx.skel->links.bench_trigger_uprobe = link;
}
-static void uprobe_setup_with_nop(void)
+static void uprobe_setup_nop(void)
{
- usetup(false, true);
+ usetup(false, &uprobe_target_nop);
}
-static void uretprobe_setup_with_nop(void)
+static void uretprobe_setup_nop(void)
{
- usetup(true, true);
+ usetup(true, &uprobe_target_nop);
}
-static void uprobe_setup_without_nop(void)
+static void uprobe_setup_push(void)
{
- usetup(false, false);
+ usetup(false, &uprobe_target_push);
}
-static void uretprobe_setup_without_nop(void)
+static void uretprobe_setup_push(void)
{
- usetup(true, false);
+ usetup(true, &uprobe_target_push);
+}
+
+static void uprobe_setup_ret(void)
+{
+ usetup(false, &uprobe_target_ret);
+}
+
+static void uretprobe_setup_ret(void)
+{
+ usetup(true, &uprobe_target_ret);
}
const struct bench bench_trig_base = {
@@ -235,6 +285,36 @@ const struct bench bench_trig_kprobe = {
.report_final = hits_drops_report_final,
};
+const struct bench bench_trig_kretprobe = {
+ .name = "trig-kretprobe",
+ .validate = trigger_validate,
+ .setup = trigger_kretprobe_setup,
+ .producer_thread = trigger_producer,
+ .measure = trigger_measure,
+ .report_progress = hits_drops_report_progress,
+ .report_final = hits_drops_report_final,
+};
+
+const struct bench bench_trig_kprobe_multi = {
+ .name = "trig-kprobe-multi",
+ .validate = trigger_validate,
+ .setup = trigger_kprobe_multi_setup,
+ .producer_thread = trigger_producer,
+ .measure = trigger_measure,
+ .report_progress = hits_drops_report_progress,
+ .report_final = hits_drops_report_final,
+};
+
+const struct bench bench_trig_kretprobe_multi = {
+ .name = "trig-kretprobe-multi",
+ .validate = trigger_validate,
+ .setup = trigger_kretprobe_multi_setup,
+ .producer_thread = trigger_producer,
+ .measure = trigger_measure,
+ .report_progress = hits_drops_report_progress,
+ .report_final = hits_drops_report_final,
+};
+
const struct bench bench_trig_fentry = {
.name = "trig-fentry",
.validate = trigger_validate,
@@ -245,6 +325,16 @@ const struct bench bench_trig_fentry = {
.report_final = hits_drops_report_final,
};
+const struct bench bench_trig_fexit = {
+ .name = "trig-fexit",
+ .validate = trigger_validate,
+ .setup = trigger_fexit_setup,
+ .producer_thread = trigger_producer,
+ .measure = trigger_measure,
+ .report_progress = hits_drops_report_progress,
+ .report_final = hits_drops_report_final,
+};
+
const struct bench bench_trig_fentry_sleep = {
.name = "trig-fentry-sleep",
.validate = trigger_validate,
@@ -274,37 +364,55 @@ const struct bench bench_trig_uprobe_base = {
.report_final = hits_drops_report_final,
};
-const struct bench bench_trig_uprobe_with_nop = {
- .name = "trig-uprobe-with-nop",
- .setup = uprobe_setup_with_nop,
- .producer_thread = uprobe_producer_with_nop,
+const struct bench bench_trig_uprobe_nop = {
+ .name = "trig-uprobe-nop",
+ .setup = uprobe_setup_nop,
+ .producer_thread = uprobe_producer_nop,
+ .measure = trigger_measure,
+ .report_progress = hits_drops_report_progress,
+ .report_final = hits_drops_report_final,
+};
+
+const struct bench bench_trig_uretprobe_nop = {
+ .name = "trig-uretprobe-nop",
+ .setup = uretprobe_setup_nop,
+ .producer_thread = uprobe_producer_nop,
+ .measure = trigger_measure,
+ .report_progress = hits_drops_report_progress,
+ .report_final = hits_drops_report_final,
+};
+
+const struct bench bench_trig_uprobe_push = {
+ .name = "trig-uprobe-push",
+ .setup = uprobe_setup_push,
+ .producer_thread = uprobe_producer_push,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
-const struct bench bench_trig_uretprobe_with_nop = {
- .name = "trig-uretprobe-with-nop",
- .setup = uretprobe_setup_with_nop,
- .producer_thread = uprobe_producer_with_nop,
+const struct bench bench_trig_uretprobe_push = {
+ .name = "trig-uretprobe-push",
+ .setup = uretprobe_setup_push,
+ .producer_thread = uprobe_producer_push,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
-const struct bench bench_trig_uprobe_without_nop = {
- .name = "trig-uprobe-without-nop",
- .setup = uprobe_setup_without_nop,
- .producer_thread = uprobe_producer_without_nop,
+const struct bench bench_trig_uprobe_ret = {
+ .name = "trig-uprobe-ret",
+ .setup = uprobe_setup_ret,
+ .producer_thread = uprobe_producer_ret,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
-const struct bench bench_trig_uretprobe_without_nop = {
- .name = "trig-uretprobe-without-nop",
- .setup = uretprobe_setup_without_nop,
- .producer_thread = uprobe_producer_without_nop,
+const struct bench bench_trig_uretprobe_ret = {
+ .name = "trig-uretprobe-ret",
+ .setup = uretprobe_setup_ret,
+ .producer_thread = uprobe_producer_ret,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
diff --git a/tools/testing/selftests/bpf/benchs/run_bench_uprobes.sh b/tools/testing/selftests/bpf/benchs/run_bench_uprobes.sh
new file mode 100755
index 000000000000..9bdcc74e03a4
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/run_bench_uprobes.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+set -eufo pipefail
+
+for i in base {uprobe,uretprobe}-{nop,push,ret}
+do
+ summary=$(sudo ./bench -w2 -d5 -a trig-$i | tail -n1 | cut -d'(' -f1 | cut -d' ' -f3-)
+ printf "%-15s: %s\n" $i "$summary"
+done
diff --git a/tools/testing/selftests/bpf/bpf_arena_alloc.h b/tools/testing/selftests/bpf/bpf_arena_alloc.h
new file mode 100644
index 000000000000..c27678299e0c
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_arena_alloc.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#pragma once
+#include "bpf_arena_common.h"
+
+#ifndef __round_mask
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#endif
+#ifndef round_up
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+#endif
+
+#ifdef __BPF__
+#define NR_CPUS (sizeof(struct cpumask) * 8)
+
+static void __arena * __arena page_frag_cur_page[NR_CPUS];
+static int __arena page_frag_cur_offset[NR_CPUS];
+
+/* Simple page_frag allocator */
+static inline void __arena* bpf_alloc(unsigned int size)
+{
+ __u64 __arena *obj_cnt;
+ __u32 cpu = bpf_get_smp_processor_id();
+ void __arena *page = page_frag_cur_page[cpu];
+ int __arena *cur_offset = &page_frag_cur_offset[cpu];
+ int offset;
+
+ size = round_up(size, 8);
+ if (size >= PAGE_SIZE - 8)
+ return NULL;
+ if (!page) {
+refill:
+ page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ if (!page)
+ return NULL;
+ cast_kern(page);
+ page_frag_cur_page[cpu] = page;
+ *cur_offset = PAGE_SIZE - 8;
+ obj_cnt = page + PAGE_SIZE - 8;
+ *obj_cnt = 0;
+ } else {
+ cast_kern(page);
+ obj_cnt = page + PAGE_SIZE - 8;
+ }
+
+ offset = *cur_offset - size;
+ if (offset < 0)
+ goto refill;
+
+ (*obj_cnt)++;
+ *cur_offset = offset;
+ return page + offset;
+}
+
+static inline void bpf_free(void __arena *addr)
+{
+ __u64 __arena *obj_cnt;
+
+ addr = (void __arena *)(((long)addr) & ~(PAGE_SIZE - 1));
+ obj_cnt = addr + PAGE_SIZE - 8;
+ if (--(*obj_cnt) == 0)
+ bpf_arena_free_pages(&arena, addr, 1);
+}
+#else
+static inline void __arena* bpf_alloc(unsigned int size) { return NULL; }
+static inline void bpf_free(void __arena *addr) {}
+#endif
diff --git a/tools/testing/selftests/bpf/bpf_arena_common.h b/tools/testing/selftests/bpf/bpf_arena_common.h
new file mode 100644
index 000000000000..bcf195c64a45
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_arena_common.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#pragma once
+
+#ifndef WRITE_ONCE
+#define WRITE_ONCE(x, val) ((*(volatile typeof(x) *) &(x)) = (val))
+#endif
+
+#ifndef NUMA_NO_NODE
+#define NUMA_NO_NODE (-1)
+#endif
+
+#ifndef arena_container_of
+#define arena_container_of(ptr, type, member) \
+ ({ \
+ void __arena *__mptr = (void __arena *)(ptr); \
+ ((type *)(__mptr - offsetof(type, member))); \
+ })
+#endif
+
+#ifdef __BPF__ /* when compiled as bpf program */
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE __PAGE_SIZE
+/*
+ * for older kernels try sizeof(struct genradix_node)
+ * or flexible:
+ * static inline long __bpf_page_size(void) {
+ * return bpf_core_enum_value(enum page_size_enum___l, __PAGE_SIZE___l) ?: sizeof(struct genradix_node);
+ * }
+ * but generated code is not great.
+ */
+#endif
+
+#if defined(__BPF_FEATURE_ARENA_CAST) && !defined(BPF_ARENA_FORCE_ASM)
+#define __arena __attribute__((address_space(1)))
+#define cast_kern(ptr) /* nop for bpf prog. emitted by LLVM */
+#define cast_user(ptr) /* nop for bpf prog. emitted by LLVM */
+#else
+#define __arena
+#define cast_kern(ptr) bpf_addr_space_cast(ptr, 0, 1)
+#define cast_user(ptr) bpf_addr_space_cast(ptr, 1, 0)
+#endif
+
+void __arena* bpf_arena_alloc_pages(void *map, void __arena *addr, __u32 page_cnt,
+ int node_id, __u64 flags) __ksym __weak;
+void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt) __ksym __weak;
+
+#else /* when compiled as user space code */
+
+#define __arena
+#define __arg_arena
+#define cast_kern(ptr) /* nop for user space */
+#define cast_user(ptr) /* nop for user space */
+__weak char arena[1];
+
+#ifndef offsetof
+#define offsetof(type, member) ((unsigned long)&((type *)0)->member)
+#endif
+
+static inline void __arena* bpf_arena_alloc_pages(void *map, void *addr, __u32 page_cnt,
+ int node_id, __u64 flags)
+{
+ return NULL;
+}
+static inline void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt)
+{
+}
+
+#endif
diff --git a/tools/testing/selftests/bpf/bpf_arena_htab.h b/tools/testing/selftests/bpf/bpf_arena_htab.h
new file mode 100644
index 000000000000..acc01a876668
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_arena_htab.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#pragma once
+#include <errno.h>
+#include "bpf_arena_alloc.h"
+#include "bpf_arena_list.h"
+
+struct htab_bucket {
+ struct arena_list_head head;
+};
+typedef struct htab_bucket __arena htab_bucket_t;
+
+struct htab {
+ htab_bucket_t *buckets;
+ int n_buckets;
+};
+typedef struct htab __arena htab_t;
+
+static inline htab_bucket_t *__select_bucket(htab_t *htab, __u32 hash)
+{
+ htab_bucket_t *b = htab->buckets;
+
+ cast_kern(b);
+ return &b[hash & (htab->n_buckets - 1)];
+}
+
+static inline arena_list_head_t *select_bucket(htab_t *htab, __u32 hash)
+{
+ return &__select_bucket(htab, hash)->head;
+}
+
+struct hashtab_elem {
+ int hash;
+ int key;
+ int value;
+ struct arena_list_node hash_node;
+};
+typedef struct hashtab_elem __arena hashtab_elem_t;
+
+static hashtab_elem_t *lookup_elem_raw(arena_list_head_t *head, __u32 hash, int key)
+{
+ hashtab_elem_t *l;
+
+ list_for_each_entry(l, head, hash_node)
+ if (l->hash == hash && l->key == key)
+ return l;
+
+ return NULL;
+}
+
+static int htab_hash(int key)
+{
+ return key;
+}
+
+__weak int htab_lookup_elem(htab_t *htab __arg_arena, int key)
+{
+ hashtab_elem_t *l_old;
+ arena_list_head_t *head;
+
+ cast_kern(htab);
+ head = select_bucket(htab, key);
+ l_old = lookup_elem_raw(head, htab_hash(key), key);
+ if (l_old)
+ return l_old->value;
+ return 0;
+}
+
+__weak int htab_update_elem(htab_t *htab __arg_arena, int key, int value)
+{
+ hashtab_elem_t *l_new = NULL, *l_old;
+ arena_list_head_t *head;
+
+ cast_kern(htab);
+ head = select_bucket(htab, key);
+ l_old = lookup_elem_raw(head, htab_hash(key), key);
+
+ l_new = bpf_alloc(sizeof(*l_new));
+ if (!l_new)
+ return -ENOMEM;
+ l_new->key = key;
+ l_new->hash = htab_hash(key);
+ l_new->value = value;
+
+ list_add_head(&l_new->hash_node, head);
+ if (l_old) {
+ list_del(&l_old->hash_node);
+ bpf_free(l_old);
+ }
+ return 0;
+}
+
+void htab_init(htab_t *htab)
+{
+ void __arena *buckets = bpf_arena_alloc_pages(&arena, NULL, 2, NUMA_NO_NODE, 0);
+
+ cast_user(buckets);
+ htab->buckets = buckets;
+ htab->n_buckets = 2 * PAGE_SIZE / sizeof(struct htab_bucket);
+}
diff --git a/tools/testing/selftests/bpf/bpf_arena_list.h b/tools/testing/selftests/bpf/bpf_arena_list.h
new file mode 100644
index 000000000000..b99b9f408eff
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_arena_list.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#pragma once
+#include "bpf_arena_common.h"
+
+struct arena_list_node;
+
+typedef struct arena_list_node __arena arena_list_node_t;
+
+struct arena_list_node {
+ arena_list_node_t *next;
+ arena_list_node_t * __arena *pprev;
+};
+
+struct arena_list_head {
+ struct arena_list_node __arena *first;
+};
+typedef struct arena_list_head __arena arena_list_head_t;
+
+#define list_entry(ptr, type, member) arena_container_of(ptr, type, member)
+
+#define list_entry_safe(ptr, type, member) \
+ ({ typeof(*ptr) * ___ptr = (ptr); \
+ ___ptr ? ({ cast_kern(___ptr); list_entry(___ptr, type, member); }) : NULL; \
+ })
+
+#ifndef __BPF__
+static inline void *bpf_iter_num_new(struct bpf_iter_num *it, int i, int j) { return NULL; }
+static inline void bpf_iter_num_destroy(struct bpf_iter_num *it) {}
+static inline bool bpf_iter_num_next(struct bpf_iter_num *it) { return true; }
+#define cond_break ({})
+#endif
+
+/* Safely walk link list elements. Deletion of elements is allowed. */
+#define list_for_each_entry(pos, head, member) \
+ for (void * ___tmp = (pos = list_entry_safe((head)->first, \
+ typeof(*(pos)), member), \
+ (void *)0); \
+ pos && ({ ___tmp = (void *)pos->member.next; 1; }); \
+ cond_break, \
+ pos = list_entry_safe((void __arena *)___tmp, typeof(*(pos)), member))
+
+static inline void list_add_head(arena_list_node_t *n, arena_list_head_t *h)
+{
+ arena_list_node_t *first = h->first, * __arena *tmp;
+
+ cast_user(first);
+ cast_kern(n);
+ WRITE_ONCE(n->next, first);
+ cast_kern(first);
+ if (first) {
+ tmp = &n->next;
+ cast_user(tmp);
+ WRITE_ONCE(first->pprev, tmp);
+ }
+ cast_user(n);
+ WRITE_ONCE(h->first, n);
+
+ tmp = &h->first;
+ cast_user(tmp);
+ cast_kern(n);
+ WRITE_ONCE(n->pprev, tmp);
+}
+
+static inline void __list_del(arena_list_node_t *n)
+{
+ arena_list_node_t *next = n->next, *tmp;
+ arena_list_node_t * __arena *pprev = n->pprev;
+
+ cast_user(next);
+ cast_kern(pprev);
+ tmp = *pprev;
+ cast_kern(tmp);
+ WRITE_ONCE(tmp, next);
+ if (next) {
+ cast_user(pprev);
+ cast_kern(next);
+ WRITE_ONCE(next->pprev, pprev);
+ }
+}
+
+#define POISON_POINTER_DELTA 0
+
+#define LIST_POISON1 ((void __arena *) 0x100 + POISON_POINTER_DELTA)
+#define LIST_POISON2 ((void __arena *) 0x122 + POISON_POINTER_DELTA)
+
+static inline void list_del(arena_list_node_t *n)
+{
+ __list_del(n);
+ n->next = LIST_POISON1;
+ n->pprev = LIST_POISON2;
+}
diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
index f44875f8b367..a5b9df38c162 100644
--- a/tools/testing/selftests/bpf/bpf_experimental.h
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -260,11 +260,11 @@ extern void bpf_throw(u64 cookie) __ksym;
#define __is_signed_type(type) (((type)(-1)) < (type)1)
-#define __bpf_cmp(LHS, OP, SIGN, PRED, RHS, DEFAULT) \
+#define __bpf_cmp(LHS, OP, PRED, RHS, DEFAULT) \
({ \
__label__ l_true; \
bool ret = DEFAULT; \
- asm volatile goto("if %[lhs] " SIGN #OP " %[rhs] goto %l[l_true]" \
+ asm volatile goto("if %[lhs] " OP " %[rhs] goto %l[l_true]" \
:: [lhs] "r"((short)LHS), [rhs] PRED (RHS) :: l_true); \
ret = !DEFAULT; \
l_true: \
@@ -276,7 +276,7 @@ l_true: \
* __lhs OP __rhs below will catch the mistake.
* Be aware that we check only __lhs to figure out the sign of compare.
*/
-#define _bpf_cmp(LHS, OP, RHS, NOFLIP) \
+#define _bpf_cmp(LHS, OP, RHS, UNLIKELY) \
({ \
typeof(LHS) __lhs = (LHS); \
typeof(RHS) __rhs = (RHS); \
@@ -285,14 +285,17 @@ l_true: \
(void)(__lhs OP __rhs); \
if (__cmp_cannot_be_signed(OP) || !__is_signed_type(typeof(__lhs))) { \
if (sizeof(__rhs) == 8) \
- ret = __bpf_cmp(__lhs, OP, "", "r", __rhs, NOFLIP); \
+ /* "i" will truncate 64-bit constant into s32, \
+ * so we have to use extra register via "r". \
+ */ \
+ ret = __bpf_cmp(__lhs, #OP, "r", __rhs, UNLIKELY); \
else \
- ret = __bpf_cmp(__lhs, OP, "", "i", __rhs, NOFLIP); \
+ ret = __bpf_cmp(__lhs, #OP, "ri", __rhs, UNLIKELY); \
} else { \
if (sizeof(__rhs) == 8) \
- ret = __bpf_cmp(__lhs, OP, "s", "r", __rhs, NOFLIP); \
+ ret = __bpf_cmp(__lhs, "s"#OP, "r", __rhs, UNLIKELY); \
else \
- ret = __bpf_cmp(__lhs, OP, "s", "i", __rhs, NOFLIP); \
+ ret = __bpf_cmp(__lhs, "s"#OP, "ri", __rhs, UNLIKELY); \
} \
ret; \
})
@@ -304,7 +307,7 @@ l_true: \
#ifndef bpf_cmp_likely
#define bpf_cmp_likely(LHS, OP, RHS) \
({ \
- bool ret; \
+ bool ret = 0; \
if (__builtin_strcmp(#OP, "==") == 0) \
ret = _bpf_cmp(LHS, !=, RHS, false); \
else if (__builtin_strcmp(#OP, "!=") == 0) \
@@ -318,16 +321,71 @@ l_true: \
else if (__builtin_strcmp(#OP, ">=") == 0) \
ret = _bpf_cmp(LHS, <, RHS, false); \
else \
- (void) "bug"; \
+ asm volatile("r0 " #OP " invalid compare"); \
ret; \
})
#endif
+#define cond_break \
+ ({ __label__ l_break, l_continue; \
+ asm volatile goto("1:.byte 0xe5; \
+ .byte 0; \
+ .long ((%l[l_break] - 1b - 8) / 8) & 0xffff; \
+ .short 0" \
+ :::: l_break); \
+ goto l_continue; \
+ l_break: break; \
+ l_continue:; \
+ })
+
#ifndef bpf_nop_mov
#define bpf_nop_mov(var) \
asm volatile("%[reg]=%[reg]"::[reg]"r"((short)var))
#endif
+/* emit instruction:
+ * rX = rX .off = BPF_ADDR_SPACE_CAST .imm32 = (dst_as << 16) | src_as
+ */
+#ifndef bpf_addr_space_cast
+#define bpf_addr_space_cast(var, dst_as, src_as)\
+ asm volatile(".byte 0xBF; \
+ .ifc %[reg], r0; \
+ .byte 0x00; \
+ .endif; \
+ .ifc %[reg], r1; \
+ .byte 0x11; \
+ .endif; \
+ .ifc %[reg], r2; \
+ .byte 0x22; \
+ .endif; \
+ .ifc %[reg], r3; \
+ .byte 0x33; \
+ .endif; \
+ .ifc %[reg], r4; \
+ .byte 0x44; \
+ .endif; \
+ .ifc %[reg], r5; \
+ .byte 0x55; \
+ .endif; \
+ .ifc %[reg], r6; \
+ .byte 0x66; \
+ .endif; \
+ .ifc %[reg], r7; \
+ .byte 0x77; \
+ .endif; \
+ .ifc %[reg], r8; \
+ .byte 0x88; \
+ .endif; \
+ .ifc %[reg], r9; \
+ .byte 0x99; \
+ .endif; \
+ .short %[off]; \
+ .long %[as]" \
+ : [reg]"+r"(var) \
+ : [off]"i"(BPF_ADDR_SPACE_CAST) \
+ , [as]"i"((dst_as << 16) | src_as));
+#endif
+
/* Description
* Assert that a conditional expression is true.
* Returns
diff --git a/tools/testing/selftests/bpf/bpf_kfuncs.h b/tools/testing/selftests/bpf/bpf_kfuncs.h
index b4e78c1eb37b..14ebe7d9e1a3 100644
--- a/tools/testing/selftests/bpf/bpf_kfuncs.h
+++ b/tools/testing/selftests/bpf/bpf_kfuncs.h
@@ -9,7 +9,7 @@ struct bpf_sock_addr_kern;
* Error code
*/
extern int bpf_dynptr_from_skb(struct __sk_buff *skb, __u64 flags,
- struct bpf_dynptr *ptr__uninit) __ksym;
+ struct bpf_dynptr *ptr__uninit) __ksym __weak;
/* Description
* Initializes an xdp-type dynptr
@@ -17,7 +17,7 @@ extern int bpf_dynptr_from_skb(struct __sk_buff *skb, __u64 flags,
* Error code
*/
extern int bpf_dynptr_from_xdp(struct xdp_md *xdp, __u64 flags,
- struct bpf_dynptr *ptr__uninit) __ksym;
+ struct bpf_dynptr *ptr__uninit) __ksym __weak;
/* Description
* Obtain a read-only pointer to the dynptr's data
@@ -26,7 +26,7 @@ extern int bpf_dynptr_from_xdp(struct xdp_md *xdp, __u64 flags,
* buffer if unable to obtain a direct pointer
*/
extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, __u32 offset,
- void *buffer, __u32 buffer__szk) __ksym;
+ void *buffer, __u32 buffer__szk) __ksym __weak;
/* Description
* Obtain a read-write pointer to the dynptr's data
@@ -35,13 +35,13 @@ extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, __u32 offset,
* buffer if unable to obtain a direct pointer
*/
extern void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *ptr, __u32 offset,
- void *buffer, __u32 buffer__szk) __ksym;
+ void *buffer, __u32 buffer__szk) __ksym __weak;
-extern int bpf_dynptr_adjust(const struct bpf_dynptr *ptr, __u32 start, __u32 end) __ksym;
-extern bool bpf_dynptr_is_null(const struct bpf_dynptr *ptr) __ksym;
-extern bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *ptr) __ksym;
-extern __u32 bpf_dynptr_size(const struct bpf_dynptr *ptr) __ksym;
-extern int bpf_dynptr_clone(const struct bpf_dynptr *ptr, struct bpf_dynptr *clone__init) __ksym;
+extern int bpf_dynptr_adjust(const struct bpf_dynptr *ptr, __u32 start, __u32 end) __ksym __weak;
+extern bool bpf_dynptr_is_null(const struct bpf_dynptr *ptr) __ksym __weak;
+extern bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *ptr) __ksym __weak;
+extern __u32 bpf_dynptr_size(const struct bpf_dynptr *ptr) __ksym __weak;
+extern int bpf_dynptr_clone(const struct bpf_dynptr *ptr, struct bpf_dynptr *clone__init) __ksym __weak;
/* Description
* Modify the address of a AF_UNIX sockaddr.
@@ -51,9 +51,19 @@ extern int bpf_dynptr_clone(const struct bpf_dynptr *ptr, struct bpf_dynptr *clo
extern int bpf_sock_addr_set_sun_path(struct bpf_sock_addr_kern *sa_kern,
const __u8 *sun_path, __u32 sun_path__sz) __ksym;
+/* Description
+ * Allocate and configure a reqsk and link it with a listener and skb.
+ * Returns
+ * Error code
+ */
+struct sock;
+struct bpf_tcp_req_attrs;
+extern int bpf_sk_assign_tcp_reqsk(struct __sk_buff *skb, struct sock *sk,
+ struct bpf_tcp_req_attrs *attrs, int attrs__sz) __ksym;
+
void *bpf_cast_to_kern_ctx(void *) __ksym;
-void *bpf_rdonly_cast(void *obj, __u32 btf_id) __ksym;
+extern void *bpf_rdonly_cast(const void *obj, __u32 btf_id) __ksym __weak;
extern int bpf_get_file_xattr(struct file *file, const char *name,
struct bpf_dynptr *value_ptr) __ksym;
diff --git a/tools/testing/selftests/bpf/bpf_test_no_cfi/Makefile b/tools/testing/selftests/bpf/bpf_test_no_cfi/Makefile
new file mode 100644
index 000000000000..ed5143b79edf
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_test_no_cfi/Makefile
@@ -0,0 +1,19 @@
+BPF_TEST_NO_CFI_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
+KDIR ?= $(abspath $(BPF_TEST_NO_CFI_DIR)/../../../../..)
+
+ifeq ($(V),1)
+Q =
+else
+Q = @
+endif
+
+MODULES = bpf_test_no_cfi.ko
+
+obj-m += bpf_test_no_cfi.o
+
+all:
+ +$(Q)make -C $(KDIR) M=$(BPF_TEST_NO_CFI_DIR) modules
+
+clean:
+ +$(Q)make -C $(KDIR) M=$(BPF_TEST_NO_CFI_DIR) clean
+
diff --git a/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c b/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c
new file mode 100644
index 000000000000..b1dd889d5d7d
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <linux/bpf.h>
+#include <linux/btf.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+struct bpf_test_no_cfi_ops {
+ void (*fn_1)(void);
+ void (*fn_2)(void);
+};
+
+static int dummy_init(struct btf *btf)
+{
+ return 0;
+}
+
+static int dummy_init_member(const struct btf_type *t,
+ const struct btf_member *member,
+ void *kdata, const void *udata)
+{
+ return 0;
+}
+
+static int dummy_reg(void *kdata)
+{
+ return 0;
+}
+
+static void dummy_unreg(void *kdata)
+{
+}
+
+static const struct bpf_verifier_ops dummy_verifier_ops;
+
+static void bpf_test_no_cfi_ops__fn_1(void)
+{
+}
+
+static void bpf_test_no_cfi_ops__fn_2(void)
+{
+}
+
+static struct bpf_test_no_cfi_ops __test_no_cif_ops = {
+ .fn_1 = bpf_test_no_cfi_ops__fn_1,
+ .fn_2 = bpf_test_no_cfi_ops__fn_2,
+};
+
+static struct bpf_struct_ops test_no_cif_ops = {
+ .verifier_ops = &dummy_verifier_ops,
+ .init = dummy_init,
+ .init_member = dummy_init_member,
+ .reg = dummy_reg,
+ .unreg = dummy_unreg,
+ .name = "bpf_test_no_cfi_ops",
+ .owner = THIS_MODULE,
+};
+
+static int bpf_test_no_cfi_init(void)
+{
+ int ret;
+
+ ret = register_bpf_struct_ops(&test_no_cif_ops,
+ bpf_test_no_cfi_ops);
+ if (!ret)
+ return -EINVAL;
+
+ test_no_cif_ops.cfi_stubs = &__test_no_cif_ops;
+ ret = register_bpf_struct_ops(&test_no_cif_ops,
+ bpf_test_no_cfi_ops);
+ return ret;
+}
+
+static void bpf_test_no_cfi_exit(void)
+{
+}
+
+module_init(bpf_test_no_cfi_init);
+module_exit(bpf_test_no_cfi_exit);
+
+MODULE_AUTHOR("Kuifeng Lee");
+MODULE_DESCRIPTION("BPF no cfi_stubs test module");
+MODULE_LICENSE("Dual BSD/GPL");
+
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index 91907b321f91..39ad96a18123 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
+#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/btf_ids.h>
+#include <linux/delay.h>
#include <linux/error-injection.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -341,12 +343,12 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
.write = bpf_testmod_test_write,
};
-BTF_SET8_START(bpf_testmod_common_kfunc_ids)
+BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
BTF_ID_FLAGS(func, bpf_kfunc_common_test)
-BTF_SET8_END(bpf_testmod_common_kfunc_ids)
+BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids)
static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
.owner = THIS_MODULE,
@@ -492,7 +494,7 @@ __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused
return arg;
}
-BTF_SET8_START(bpf_testmod_check_kfunc_ids)
+BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
@@ -518,13 +520,120 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
-BTF_SET8_END(bpf_testmod_check_kfunc_ids)
+BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
+
+static int bpf_testmod_ops_init(struct btf *btf)
+{
+ return 0;
+}
+
+static bool bpf_testmod_ops_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
+}
+
+static int bpf_testmod_ops_init_member(const struct btf_type *t,
+ const struct btf_member *member,
+ void *kdata, const void *udata)
+{
+ if (member->offset == offsetof(struct bpf_testmod_ops, data) * 8) {
+ /* For data fields, this function has to copy it and return
+ * 1 to indicate that the data has been handled by the
+ * struct_ops type, or the verifier will reject the map if
+ * the value of the data field is not zero.
+ */
+ ((struct bpf_testmod_ops *)kdata)->data = ((struct bpf_testmod_ops *)udata)->data;
+ return 1;
+ }
+ return 0;
+}
static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
.owner = THIS_MODULE,
.set = &bpf_testmod_check_kfunc_ids,
};
+static const struct bpf_verifier_ops bpf_testmod_verifier_ops = {
+ .is_valid_access = bpf_testmod_ops_is_valid_access,
+};
+
+static int bpf_dummy_reg(void *kdata)
+{
+ struct bpf_testmod_ops *ops = kdata;
+
+ if (ops->test_1)
+ ops->test_1();
+ /* Some test cases (ex. struct_ops_maybe_null) may not have test_2
+ * initialized, so we need to check for NULL.
+ */
+ if (ops->test_2)
+ ops->test_2(4, ops->data);
+
+ return 0;
+}
+
+static void bpf_dummy_unreg(void *kdata)
+{
+}
+
+static int bpf_testmod_test_1(void)
+{
+ return 0;
+}
+
+static void bpf_testmod_test_2(int a, int b)
+{
+}
+
+static int bpf_testmod_ops__test_maybe_null(int dummy,
+ struct task_struct *task__nullable)
+{
+ return 0;
+}
+
+static struct bpf_testmod_ops __bpf_testmod_ops = {
+ .test_1 = bpf_testmod_test_1,
+ .test_2 = bpf_testmod_test_2,
+ .test_maybe_null = bpf_testmod_ops__test_maybe_null,
+};
+
+struct bpf_struct_ops bpf_bpf_testmod_ops = {
+ .verifier_ops = &bpf_testmod_verifier_ops,
+ .init = bpf_testmod_ops_init,
+ .init_member = bpf_testmod_ops_init_member,
+ .reg = bpf_dummy_reg,
+ .unreg = bpf_dummy_unreg,
+ .cfi_stubs = &__bpf_testmod_ops,
+ .name = "bpf_testmod_ops",
+ .owner = THIS_MODULE,
+};
+
+static int bpf_dummy_reg2(void *kdata)
+{
+ struct bpf_testmod_ops2 *ops = kdata;
+
+ ops->test_1();
+ return 0;
+}
+
+static struct bpf_testmod_ops2 __bpf_testmod_ops2 = {
+ .test_1 = bpf_testmod_test_1,
+};
+
+struct bpf_struct_ops bpf_testmod_ops2 = {
+ .verifier_ops = &bpf_testmod_verifier_ops,
+ .init = bpf_testmod_ops_init,
+ .init_member = bpf_testmod_ops_init_member,
+ .reg = bpf_dummy_reg2,
+ .unreg = bpf_dummy_unreg,
+ .cfi_stubs = &__bpf_testmod_ops2,
+ .name = "bpf_testmod_ops2",
+ .owner = THIS_MODULE,
+};
+
extern int bpf_fentry_test1(int a);
static int bpf_testmod_init(void)
@@ -535,6 +644,8 @@ static int bpf_testmod_init(void)
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
+ ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops);
+ ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2);
if (ret < 0)
return ret;
if (bpf_fentry_test1(0) < 0)
@@ -544,7 +655,15 @@ static int bpf_testmod_init(void)
static void bpf_testmod_exit(void)
{
- return sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
+ /* Need to wait for all references to be dropped because
+ * bpf_kfunc_call_test_release() which currently resides in kernel can
+ * be called after bpf_testmod is unloaded. Once release function is
+ * moved into the module this wait can be removed.
+ */
+ while (refcount_read(&prog_test_struct.cnt) > 1)
+ msleep(20);
+
+ sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
}
module_init(bpf_testmod_init);
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h
index f32793efe095..23fa1872ee67 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h
@@ -5,6 +5,8 @@
#include <linux/types.h>
+struct task_struct;
+
struct bpf_testmod_test_read_ctx {
char *buf;
loff_t off;
@@ -28,4 +30,67 @@ struct bpf_iter_testmod_seq {
int cnt;
};
+struct bpf_testmod_ops {
+ int (*test_1)(void);
+ void (*test_2)(int a, int b);
+ /* Used to test nullable arguments. */
+ int (*test_maybe_null)(int dummy, struct task_struct *task);
+
+ /* The following fields are used to test shadow copies. */
+ char onebyte;
+ struct {
+ int a;
+ int b;
+ } unsupported;
+ int data;
+
+ /* The following pointers are used to test the maps having multiple
+ * pages of trampolines.
+ */
+ int (*tramp_1)(int value);
+ int (*tramp_2)(int value);
+ int (*tramp_3)(int value);
+ int (*tramp_4)(int value);
+ int (*tramp_5)(int value);
+ int (*tramp_6)(int value);
+ int (*tramp_7)(int value);
+ int (*tramp_8)(int value);
+ int (*tramp_9)(int value);
+ int (*tramp_10)(int value);
+ int (*tramp_11)(int value);
+ int (*tramp_12)(int value);
+ int (*tramp_13)(int value);
+ int (*tramp_14)(int value);
+ int (*tramp_15)(int value);
+ int (*tramp_16)(int value);
+ int (*tramp_17)(int value);
+ int (*tramp_18)(int value);
+ int (*tramp_19)(int value);
+ int (*tramp_20)(int value);
+ int (*tramp_21)(int value);
+ int (*tramp_22)(int value);
+ int (*tramp_23)(int value);
+ int (*tramp_24)(int value);
+ int (*tramp_25)(int value);
+ int (*tramp_26)(int value);
+ int (*tramp_27)(int value);
+ int (*tramp_28)(int value);
+ int (*tramp_29)(int value);
+ int (*tramp_30)(int value);
+ int (*tramp_31)(int value);
+ int (*tramp_32)(int value);
+ int (*tramp_33)(int value);
+ int (*tramp_34)(int value);
+ int (*tramp_35)(int value);
+ int (*tramp_36)(int value);
+ int (*tramp_37)(int value);
+ int (*tramp_38)(int value);
+ int (*tramp_39)(int value);
+ int (*tramp_40)(int value);
+};
+
+struct bpf_testmod_ops2 {
+ int (*test_1)(void);
+};
+
#endif /* _BPF_TESTMOD_H */
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index c125c441abc7..01f241ea2c67 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -81,6 +81,7 @@ CONFIG_NF_NAT=y
CONFIG_RC_CORE=y
CONFIG_SECURITY=y
CONFIG_SECURITYFS=y
+CONFIG_SYN_COOKIES=y
CONFIG_TEST_BPF=m
CONFIG_USERFAULTFD=y
CONFIG_VSOCKETS=y
diff --git a/tools/testing/selftests/bpf/prog_tests/arena_htab.c b/tools/testing/selftests/bpf/prog_tests/arena_htab.c
new file mode 100644
index 000000000000..0766702de846
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/arena_htab.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <sys/mman.h>
+#include <network_helpers.h>
+
+#include "arena_htab_asm.skel.h"
+#include "arena_htab.skel.h"
+
+#define PAGE_SIZE 4096
+
+#include "bpf_arena_htab.h"
+
+static void test_arena_htab_common(struct htab *htab)
+{
+ int i;
+
+ printf("htab %p buckets %p n_buckets %d\n", htab, htab->buckets, htab->n_buckets);
+ ASSERT_OK_PTR(htab->buckets, "htab->buckets shouldn't be NULL");
+ for (i = 0; htab->buckets && i < 16; i += 4) {
+ /*
+ * Walk htab buckets and link lists since all pointers are correct,
+ * though they were written by bpf program.
+ */
+ int val = htab_lookup_elem(htab, i);
+
+ ASSERT_EQ(i, val, "key == value");
+ }
+}
+
+static void test_arena_htab_llvm(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+ struct arena_htab *skel;
+ struct htab *htab;
+ size_t arena_sz;
+ void *area;
+ int ret;
+
+ skel = arena_htab__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "arena_htab__open_and_load"))
+ return;
+
+ area = bpf_map__initial_value(skel->maps.arena, &arena_sz);
+ /* fault-in a page with pgoff == 0 as sanity check */
+ *(volatile int *)area = 0x55aa;
+
+ /* bpf prog will allocate more pages */
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.arena_htab_llvm), &opts);
+ ASSERT_OK(ret, "ret");
+ ASSERT_OK(opts.retval, "retval");
+ if (skel->bss->skip) {
+ printf("%s:SKIP:compiler doesn't support arena_cast\n", __func__);
+ test__skip();
+ goto out;
+ }
+ htab = skel->bss->htab_for_user;
+ test_arena_htab_common(htab);
+out:
+ arena_htab__destroy(skel);
+}
+
+static void test_arena_htab_asm(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+ struct arena_htab_asm *skel;
+ struct htab *htab;
+ int ret;
+
+ skel = arena_htab_asm__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "arena_htab_asm__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.arena_htab_asm), &opts);
+ ASSERT_OK(ret, "ret");
+ ASSERT_OK(opts.retval, "retval");
+ htab = skel->bss->htab_for_user;
+ test_arena_htab_common(htab);
+ arena_htab_asm__destroy(skel);
+}
+
+void test_arena_htab(void)
+{
+ if (test__start_subtest("arena_htab_llvm"))
+ test_arena_htab_llvm();
+ if (test__start_subtest("arena_htab_asm"))
+ test_arena_htab_asm();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/arena_list.c b/tools/testing/selftests/bpf/prog_tests/arena_list.c
new file mode 100644
index 000000000000..e61886debab1
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/arena_list.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <sys/mman.h>
+#include <network_helpers.h>
+
+#define PAGE_SIZE 4096
+
+#include "bpf_arena_list.h"
+#include "arena_list.skel.h"
+
+struct elem {
+ struct arena_list_node node;
+ __u64 value;
+};
+
+static int list_sum(struct arena_list_head *head)
+{
+ struct elem __arena *n;
+ int sum = 0;
+
+ list_for_each_entry(n, head, node)
+ sum += n->value;
+ return sum;
+}
+
+static void test_arena_list_add_del(int cnt)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+ struct arena_list *skel;
+ int expected_sum = (u64)cnt * (cnt - 1) / 2;
+ int ret, sum;
+
+ skel = arena_list__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "arena_list__open_and_load"))
+ return;
+
+ skel->bss->cnt = cnt;
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.arena_list_add), &opts);
+ ASSERT_OK(ret, "ret_add");
+ ASSERT_OK(opts.retval, "retval");
+ if (skel->bss->skip) {
+ printf("%s:SKIP:compiler doesn't support arena_cast\n", __func__);
+ test__skip();
+ goto out;
+ }
+ sum = list_sum(skel->bss->list_head);
+ ASSERT_EQ(sum, expected_sum, "sum of elems");
+ ASSERT_EQ(skel->arena->arena_sum, expected_sum, "__arena sum of elems");
+ ASSERT_EQ(skel->arena->test_val, cnt + 1, "num of elems");
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.arena_list_del), &opts);
+ ASSERT_OK(ret, "ret_del");
+ sum = list_sum(skel->bss->list_head);
+ ASSERT_EQ(sum, 0, "sum of list elems after del");
+ ASSERT_EQ(skel->bss->list_sum, expected_sum, "sum of list elems computed by prog");
+ ASSERT_EQ(skel->arena->arena_sum, expected_sum, "__arena sum of elems");
+out:
+ arena_list__destroy(skel);
+}
+
+void test_arena_list(void)
+{
+ if (test__start_subtest("arena_list_1"))
+ test_arena_list_add_del(1);
+ if (test__start_subtest("arena_list_1000"))
+ test_arena_list_add_del(1000);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/bad_struct_ops.c b/tools/testing/selftests/bpf/prog_tests/bad_struct_ops.c
new file mode 100644
index 000000000000..6a707213e46b
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bad_struct_ops.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "bad_struct_ops.skel.h"
+#include "bad_struct_ops2.skel.h"
+
+static void invalid_prog_reuse(void)
+{
+ struct bad_struct_ops *skel;
+ char *log = NULL;
+ int err;
+
+ skel = bad_struct_ops__open();
+ if (!ASSERT_OK_PTR(skel, "bad_struct_ops__open"))
+ return;
+
+ if (start_libbpf_log_capture())
+ goto cleanup;
+
+ err = bad_struct_ops__load(skel);
+ log = stop_libbpf_log_capture();
+ ASSERT_ERR(err, "bad_struct_ops__load should fail");
+ ASSERT_HAS_SUBSTR(log,
+ "struct_ops init_kern testmod_2 func ptr test_1: invalid reuse of prog test_1",
+ "expected init_kern message");
+
+cleanup:
+ free(log);
+ bad_struct_ops__destroy(skel);
+}
+
+static void unused_program(void)
+{
+ struct bad_struct_ops2 *skel;
+ char *log = NULL;
+ int err;
+
+ skel = bad_struct_ops2__open();
+ if (!ASSERT_OK_PTR(skel, "bad_struct_ops2__open"))
+ return;
+
+ /* struct_ops programs not referenced from any maps are open
+ * with autoload set to true.
+ */
+ ASSERT_TRUE(bpf_program__autoload(skel->progs.foo), "foo autoload == true");
+
+ if (start_libbpf_log_capture())
+ goto cleanup;
+
+ err = bad_struct_ops2__load(skel);
+ ASSERT_ERR(err, "bad_struct_ops2__load should fail");
+ log = stop_libbpf_log_capture();
+ ASSERT_HAS_SUBSTR(log, "prog 'foo': failed to load",
+ "message about 'foo' failing to load");
+
+cleanup:
+ free(log);
+ bad_struct_ops2__destroy(skel);
+}
+
+void test_bad_struct_ops(void)
+{
+ if (test__start_subtest("invalid_prog_reuse"))
+ invalid_prog_reuse();
+ if (test__start_subtest("unused_program"))
+ unused_program();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
index e770912fc1d2..4c6ada5b270b 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
@@ -35,7 +35,7 @@ static int check_load(const char *file, enum bpf_prog_type type)
}
bpf_program__set_type(prog, type);
- bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS);
+ bpf_program__set_flags(prog, testing_prog_flags());
bpf_program__set_log_level(prog, 4 | extra_prog_load_log_flags);
err = bpf_object__load(obj);
diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
index 816145bcb647..00965a6e83bb 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf.c
@@ -3535,6 +3535,32 @@ static struct btf_raw_test raw_tests[] = {
.value_type_id = 1,
.max_entries = 1,
},
+{
+ .descr = "datasec: name '?.foo bar:buz' is ok",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* VAR x */ /* [2] */
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
+ BTF_VAR_STATIC,
+ /* DATASEC ?.data */ /* [3] */
+ BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(2, 0, 4),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0x\0?.foo bar:buz"),
+},
+{
+ .descr = "type name '?foo' is not ok",
+ .raw_types = {
+ /* union ?foo; */
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_FWD, 1, 0), 0), /* [1] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0?foo"),
+ .err_str = "Invalid name",
+ .btf_load_err = true,
+},
{
.descr = "float test #1, well-formed",
@@ -4363,6 +4389,9 @@ static void do_test_raw(unsigned int test_num)
if (err || btf_fd < 0)
goto done;
+ if (!test->map_type)
+ goto done;
+
opts.btf_fd = btf_fd;
opts.btf_key_type_id = test->key_type_id;
opts.btf_value_type_id = test->value_type_id;
diff --git a/tools/testing/selftests/bpf/prog_tests/cpumask.c b/tools/testing/selftests/bpf/prog_tests/cpumask.c
index c2e886399e3c..ecf89df78109 100644
--- a/tools/testing/selftests/bpf/prog_tests/cpumask.c
+++ b/tools/testing/selftests/bpf/prog_tests/cpumask.c
@@ -27,7 +27,7 @@ static void verify_success(const char *prog_name)
struct bpf_program *prog;
struct bpf_link *link = NULL;
pid_t child_pid;
- int status;
+ int status, err;
skel = cpumask_success__open();
if (!ASSERT_OK_PTR(skel, "cpumask_success__open"))
@@ -36,8 +36,8 @@ static void verify_success(const char *prog_name)
skel->bss->pid = getpid();
skel->bss->nr_cpus = libbpf_num_possible_cpus();
- cpumask_success__load(skel);
- if (!ASSERT_OK_PTR(skel, "cpumask_success__load"))
+ err = cpumask_success__load(skel);
+ if (!ASSERT_OK(err, "cpumask_success__load"))
goto cleanup;
prog = bpf_object__find_program_by_name(skel->obj, prog_name);
diff --git a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c
index 4951aa978f33..3b7c57fe55a5 100644
--- a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c
+++ b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c
@@ -626,50 +626,6 @@ err:
return false;
}
-/* Request BPF program instructions after all rewrites are applied,
- * e.g. verifier.c:convert_ctx_access() is done.
- */
-static int get_xlated_program(int fd_prog, struct bpf_insn **buf, __u32 *cnt)
-{
- struct bpf_prog_info info = {};
- __u32 info_len = sizeof(info);
- __u32 xlated_prog_len;
- __u32 buf_element_size = sizeof(struct bpf_insn);
-
- if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
- perror("bpf_prog_get_info_by_fd failed");
- return -1;
- }
-
- xlated_prog_len = info.xlated_prog_len;
- if (xlated_prog_len % buf_element_size) {
- printf("Program length %d is not multiple of %d\n",
- xlated_prog_len, buf_element_size);
- return -1;
- }
-
- *cnt = xlated_prog_len / buf_element_size;
- *buf = calloc(*cnt, buf_element_size);
- if (!buf) {
- perror("can't allocate xlated program buffer");
- return -ENOMEM;
- }
-
- bzero(&info, sizeof(info));
- info.xlated_prog_len = xlated_prog_len;
- info.xlated_prog_insns = (__u64)(unsigned long)*buf;
- if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
- perror("second bpf_prog_get_info_by_fd failed");
- goto out_free_buf;
- }
-
- return 0;
-
-out_free_buf:
- free(*buf);
- return -1;
-}
-
static void print_insn(void *private_data, const char *fmt, ...)
{
va_list args;
diff --git a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c
index 5c0ebe6ba866..dcb9e5070cc3 100644
--- a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c
+++ b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c
@@ -72,6 +72,6 @@ fail:
bpf_tc_hook_destroy(&qdisc_hook);
close_netns(nstoken);
}
- SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null");
+ SYS_NOFAIL("ip netns del " NS_TEST);
decap_sanity__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/fib_lookup.c b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c
index 4ad4cd69152e..3379df2d4cf2 100644
--- a/tools/testing/selftests/bpf/prog_tests/fib_lookup.c
+++ b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c
@@ -298,6 +298,6 @@ void test_fib_lookup(void)
fail:
if (nstoken)
close_netns(nstoken);
- SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null");
+ SYS_NOFAIL("ip netns del " NS_TEST);
fib_lookup__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
index d4b1901f7879..f3932941bbaa 100644
--- a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
+++ b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
@@ -19,6 +19,7 @@ static const char *kmulti_syms[] = {
};
#define KMULTI_CNT ARRAY_SIZE(kmulti_syms)
static __u64 kmulti_addrs[KMULTI_CNT];
+static __u64 kmulti_cookies[] = { 3, 1, 2 };
#define KPROBE_FUNC "bpf_fentry_test1"
static __u64 kprobe_addr;
@@ -31,6 +32,8 @@ static noinline void uprobe_func(void)
asm volatile ("");
}
+#define PERF_EVENT_COOKIE 0xdeadbeef
+
static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long addr,
ssize_t offset, ssize_t entry_offset)
{
@@ -62,6 +65,8 @@ again:
ASSERT_EQ(info.perf_event.kprobe.addr, addr + entry_offset,
"kprobe_addr");
+ ASSERT_EQ(info.perf_event.kprobe.cookie, PERF_EVENT_COOKIE, "kprobe_cookie");
+
if (!info.perf_event.kprobe.func_name) {
ASSERT_EQ(info.perf_event.kprobe.name_len, 0, "name_len");
info.perf_event.kprobe.func_name = ptr_to_u64(&buf);
@@ -81,6 +86,8 @@ again:
goto again;
}
+ ASSERT_EQ(info.perf_event.tracepoint.cookie, PERF_EVENT_COOKIE, "tracepoint_cookie");
+
err = strncmp(u64_to_ptr(info.perf_event.tracepoint.tp_name), TP_NAME,
strlen(TP_NAME));
ASSERT_EQ(err, 0, "cmp_tp_name");
@@ -96,10 +103,17 @@ again:
goto again;
}
+ ASSERT_EQ(info.perf_event.uprobe.cookie, PERF_EVENT_COOKIE, "uprobe_cookie");
+
err = strncmp(u64_to_ptr(info.perf_event.uprobe.file_name), UPROBE_FILE,
strlen(UPROBE_FILE));
ASSERT_EQ(err, 0, "cmp_file_name");
break;
+ case BPF_PERF_EVENT_EVENT:
+ ASSERT_EQ(info.perf_event.event.type, PERF_TYPE_SOFTWARE, "event_type");
+ ASSERT_EQ(info.perf_event.event.config, PERF_COUNT_SW_PAGE_FAULTS, "event_config");
+ ASSERT_EQ(info.perf_event.event.cookie, PERF_EVENT_COOKIE, "event_cookie");
+ break;
default:
err = -1;
break;
@@ -139,6 +153,7 @@ static void test_kprobe_fill_link_info(struct test_fill_link_info *skel,
DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts,
.attach_mode = PROBE_ATTACH_MODE_LINK,
.retprobe = type == BPF_PERF_EVENT_KRETPROBE,
+ .bpf_cookie = PERF_EVENT_COOKIE,
);
ssize_t entry_offset = 0;
struct bpf_link *link;
@@ -163,10 +178,13 @@ static void test_kprobe_fill_link_info(struct test_fill_link_info *skel,
static void test_tp_fill_link_info(struct test_fill_link_info *skel)
{
+ DECLARE_LIBBPF_OPTS(bpf_tracepoint_opts, opts,
+ .bpf_cookie = PERF_EVENT_COOKIE,
+ );
struct bpf_link *link;
int link_fd, err;
- link = bpf_program__attach_tracepoint(skel->progs.tp_run, TP_CAT, TP_NAME);
+ link = bpf_program__attach_tracepoint_opts(skel->progs.tp_run, TP_CAT, TP_NAME, &opts);
if (!ASSERT_OK_PTR(link, "attach_tp"))
return;
@@ -176,16 +194,53 @@ static void test_tp_fill_link_info(struct test_fill_link_info *skel)
bpf_link__destroy(link);
}
+static void test_event_fill_link_info(struct test_fill_link_info *skel)
+{
+ DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, opts,
+ .bpf_cookie = PERF_EVENT_COOKIE,
+ );
+ struct bpf_link *link;
+ int link_fd, err, pfd;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_PAGE_FAULTS,
+ .freq = 1,
+ .sample_freq = 1,
+ .size = sizeof(struct perf_event_attr),
+ };
+
+ pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu 0 */,
+ -1 /* group id */, 0 /* flags */);
+ if (!ASSERT_GE(pfd, 0, "perf_event_open"))
+ return;
+
+ link = bpf_program__attach_perf_event_opts(skel->progs.event_run, pfd, &opts);
+ if (!ASSERT_OK_PTR(link, "attach_event"))
+ goto error;
+
+ link_fd = bpf_link__fd(link);
+ err = verify_perf_link_info(link_fd, BPF_PERF_EVENT_EVENT, 0, 0, 0);
+ ASSERT_OK(err, "verify_perf_link_info");
+ bpf_link__destroy(link);
+
+error:
+ close(pfd);
+}
+
static void test_uprobe_fill_link_info(struct test_fill_link_info *skel,
enum bpf_perf_event_type type)
{
+ DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts,
+ .retprobe = type == BPF_PERF_EVENT_URETPROBE,
+ .bpf_cookie = PERF_EVENT_COOKIE,
+ );
struct bpf_link *link;
int link_fd, err;
- link = bpf_program__attach_uprobe(skel->progs.uprobe_run,
- type == BPF_PERF_EVENT_URETPROBE,
- 0, /* self pid */
- UPROBE_FILE, uprobe_offset);
+ link = bpf_program__attach_uprobe_opts(skel->progs.uprobe_run,
+ 0, /* self pid */
+ UPROBE_FILE, uprobe_offset,
+ &opts);
if (!ASSERT_OK_PTR(link, "attach_uprobe"))
return;
@@ -195,11 +250,11 @@ static void test_uprobe_fill_link_info(struct test_fill_link_info *skel,
bpf_link__destroy(link);
}
-static int verify_kmulti_link_info(int fd, bool retprobe)
+static int verify_kmulti_link_info(int fd, bool retprobe, bool has_cookies)
{
+ __u64 addrs[KMULTI_CNT], cookies[KMULTI_CNT];
struct bpf_link_info info;
__u32 len = sizeof(info);
- __u64 addrs[KMULTI_CNT];
int flags, i, err;
memset(&info, 0, sizeof(info));
@@ -221,18 +276,22 @@ again:
if (!info.kprobe_multi.addrs) {
info.kprobe_multi.addrs = ptr_to_u64(addrs);
+ info.kprobe_multi.cookies = ptr_to_u64(cookies);
goto again;
}
- for (i = 0; i < KMULTI_CNT; i++)
+ for (i = 0; i < KMULTI_CNT; i++) {
ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs");
+ ASSERT_EQ(cookies[i], has_cookies ? kmulti_cookies[i] : 0,
+ "kmulti_cookies_value");
+ }
return 0;
}
static void verify_kmulti_invalid_user_buffer(int fd)
{
+ __u64 addrs[KMULTI_CNT], cookies[KMULTI_CNT];
struct bpf_link_info info;
__u32 len = sizeof(info);
- __u64 addrs[KMULTI_CNT];
int err, i;
memset(&info, 0, sizeof(info));
@@ -266,7 +325,20 @@ static void verify_kmulti_invalid_user_buffer(int fd)
info.kprobe_multi.count = KMULTI_CNT;
info.kprobe_multi.addrs = 0x1; /* invalid addr */
err = bpf_link_get_info_by_fd(fd, &info, &len);
- ASSERT_EQ(err, -EFAULT, "invalid_buff");
+ ASSERT_EQ(err, -EFAULT, "invalid_buff_addrs");
+
+ info.kprobe_multi.count = KMULTI_CNT;
+ info.kprobe_multi.addrs = ptr_to_u64(addrs);
+ info.kprobe_multi.cookies = 0x1; /* invalid addr */
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ ASSERT_EQ(err, -EFAULT, "invalid_buff_cookies");
+
+ /* cookies && !count */
+ info.kprobe_multi.count = 0;
+ info.kprobe_multi.addrs = ptr_to_u64(NULL);
+ info.kprobe_multi.cookies = ptr_to_u64(cookies);
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ ASSERT_EQ(err, -EINVAL, "invalid_cookies_count");
}
static int symbols_cmp_r(const void *a, const void *b)
@@ -278,13 +350,15 @@ static int symbols_cmp_r(const void *a, const void *b)
}
static void test_kprobe_multi_fill_link_info(struct test_fill_link_info *skel,
- bool retprobe, bool invalid)
+ bool retprobe, bool cookies,
+ bool invalid)
{
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
struct bpf_link *link;
int link_fd, err;
opts.syms = kmulti_syms;
+ opts.cookies = cookies ? kmulti_cookies : NULL;
opts.cnt = KMULTI_CNT;
opts.retprobe = retprobe;
link = bpf_program__attach_kprobe_multi_opts(skel->progs.kmulti_run, NULL, &opts);
@@ -293,7 +367,7 @@ static void test_kprobe_multi_fill_link_info(struct test_fill_link_info *skel,
link_fd = bpf_link__fd(link);
if (!invalid) {
- err = verify_kmulti_link_info(link_fd, retprobe);
+ err = verify_kmulti_link_info(link_fd, retprobe, cookies);
ASSERT_OK(err, "verify_kmulti_link_info");
} else {
verify_kmulti_invalid_user_buffer(link_fd);
@@ -513,6 +587,8 @@ void test_fill_link_info(void)
test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, true);
if (test__start_subtest("tracepoint_link_info"))
test_tp_fill_link_info(skel);
+ if (test__start_subtest("event_link_info"))
+ test_event_fill_link_info(skel);
uprobe_offset = get_uprobe_offset(&uprobe_func);
if (test__start_subtest("uprobe_link_info"))
@@ -523,12 +599,16 @@ void test_fill_link_info(void)
qsort(kmulti_syms, KMULTI_CNT, sizeof(kmulti_syms[0]), symbols_cmp_r);
for (i = 0; i < KMULTI_CNT; i++)
kmulti_addrs[i] = ksym_get_addr(kmulti_syms[i]);
- if (test__start_subtest("kprobe_multi_link_info"))
- test_kprobe_multi_fill_link_info(skel, false, false);
- if (test__start_subtest("kretprobe_multi_link_info"))
- test_kprobe_multi_fill_link_info(skel, true, false);
+ if (test__start_subtest("kprobe_multi_link_info")) {
+ test_kprobe_multi_fill_link_info(skel, false, false, false);
+ test_kprobe_multi_fill_link_info(skel, false, true, false);
+ }
+ if (test__start_subtest("kretprobe_multi_link_info")) {
+ test_kprobe_multi_fill_link_info(skel, true, false, false);
+ test_kprobe_multi_fill_link_info(skel, true, true, false);
+ }
if (test__start_subtest("kprobe_multi_invalid_ubuff"))
- test_kprobe_multi_fill_link_info(skel, true, true);
+ test_kprobe_multi_fill_link_info(skel, true, true, true);
if (test__start_subtest("uprobe_multi_link_info"))
test_uprobe_multi_fill_link_info(skel, false, false);
diff --git a/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c
index 57c814f5f6a7..8dd2af9081f4 100644
--- a/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c
+++ b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c
@@ -59,9 +59,9 @@ static int setup_topology(bool ipv6)
/* Wait for up to 5s for links to come up */
for (i = 0; i < 5; ++i) {
if (ipv6)
- up = !system("ip netns exec " NS0 " ping -6 -c 1 -W 1 " VETH1_ADDR6 " &>/dev/null");
+ up = !SYS_NOFAIL("ip netns exec " NS0 " ping -6 -c 1 -W 1 " VETH1_ADDR6);
else
- up = !system("ip netns exec " NS0 " ping -c 1 -W 1 " VETH1_ADDR " &>/dev/null");
+ up = !SYS_NOFAIL("ip netns exec " NS0 " ping -c 1 -W 1 " VETH1_ADDR);
if (up)
break;
diff --git a/tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c b/tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c
new file mode 100644
index 000000000000..7def158da9eb
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <test_progs.h>
+
+#include "linux/filter.h"
+#include "kptr_xchg_inline.skel.h"
+
+void test_kptr_xchg_inline(void)
+{
+ struct kptr_xchg_inline *skel;
+ struct bpf_insn *insn = NULL;
+ struct bpf_insn exp;
+ unsigned int cnt;
+ int err;
+
+#if !(defined(__x86_64__) || defined(__aarch64__) || \
+ (defined(__riscv) && __riscv_xlen == 64))
+ test__skip();
+ return;
+#endif
+
+ skel = kptr_xchg_inline__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_load"))
+ return;
+
+ err = get_xlated_program(bpf_program__fd(skel->progs.kptr_xchg_inline), &insn, &cnt);
+ if (!ASSERT_OK(err, "prog insn"))
+ goto out;
+
+ /* The original instructions are:
+ * r1 = map[id:xxx][0]+0
+ * r2 = 0
+ * call bpf_kptr_xchg#yyy
+ *
+ * call bpf_kptr_xchg#yyy will be inlined as:
+ * r0 = r2
+ * r0 = atomic64_xchg((u64 *)(r1 +0), r0)
+ */
+ if (!ASSERT_GT(cnt, 5, "insn cnt"))
+ goto out;
+
+ exp = BPF_MOV64_REG(BPF_REG_0, BPF_REG_2);
+ if (!ASSERT_OK(memcmp(&insn[3], &exp, sizeof(exp)), "mov"))
+ goto out;
+
+ exp = BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_1, BPF_REG_0, 0);
+ if (!ASSERT_OK(memcmp(&insn[4], &exp, sizeof(exp)), "xchg"))
+ goto out;
+out:
+ free(insn);
+ kptr_xchg_inline__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c b/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c
index 9f766ddd946a..4ed46ed58a7b 100644
--- a/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c
+++ b/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c
@@ -30,6 +30,8 @@ void test_libbpf_probe_prog_types(void)
if (prog_type == BPF_PROG_TYPE_UNSPEC)
continue;
+ if (strcmp(prog_type_name, "__MAX_BPF_PROG_TYPE") == 0)
+ continue;
if (!test__start_subtest(prog_type_name))
continue;
@@ -68,6 +70,8 @@ void test_libbpf_probe_map_types(void)
if (map_type == BPF_MAP_TYPE_UNSPEC)
continue;
+ if (strcmp(map_type_name, "__MAX_BPF_MAP_TYPE") == 0)
+ continue;
if (!test__start_subtest(map_type_name))
continue;
diff --git a/tools/testing/selftests/bpf/prog_tests/libbpf_str.c b/tools/testing/selftests/bpf/prog_tests/libbpf_str.c
index eb34d612d6f8..62ea855ec4d0 100644
--- a/tools/testing/selftests/bpf/prog_tests/libbpf_str.c
+++ b/tools/testing/selftests/bpf/prog_tests/libbpf_str.c
@@ -132,6 +132,9 @@ static void test_libbpf_bpf_map_type_str(void)
const char *map_type_str;
char buf[256];
+ if (map_type == __MAX_BPF_MAP_TYPE)
+ continue;
+
map_type_name = btf__str_by_offset(btf, e->name_off);
map_type_str = libbpf_bpf_map_type_str(map_type);
ASSERT_OK_PTR(map_type_str, map_type_name);
@@ -186,6 +189,9 @@ static void test_libbpf_bpf_prog_type_str(void)
const char *prog_type_str;
char buf[256];
+ if (prog_type == __MAX_BPF_PROG_TYPE)
+ continue;
+
prog_type_name = btf__str_by_offset(btf, e->name_off);
prog_type_str = libbpf_bpf_prog_type_str(prog_type);
ASSERT_OK_PTR(prog_type_str, prog_type_name);
diff --git a/tools/testing/selftests/bpf/prog_tests/log_fixup.c b/tools/testing/selftests/bpf/prog_tests/log_fixup.c
index 7a3fa2ff567b..90a98e23be61 100644
--- a/tools/testing/selftests/bpf/prog_tests/log_fixup.c
+++ b/tools/testing/selftests/bpf/prog_tests/log_fixup.c
@@ -169,9 +169,9 @@ void test_log_fixup(void)
if (test__start_subtest("bad_core_relo_trunc_none"))
bad_core_relo(0, TRUNC_NONE /* full buf */);
if (test__start_subtest("bad_core_relo_trunc_partial"))
- bad_core_relo(280, TRUNC_PARTIAL /* truncate original log a bit */);
+ bad_core_relo(300, TRUNC_PARTIAL /* truncate original log a bit */);
if (test__start_subtest("bad_core_relo_trunc_full"))
- bad_core_relo(220, TRUNC_FULL /* truncate also libbpf's message patch */);
+ bad_core_relo(240, TRUNC_FULL /* truncate also libbpf's message patch */);
if (test__start_subtest("bad_core_relo_subprog"))
bad_core_relo_subprog();
if (test__start_subtest("missing_map"))
diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h
index e9190574e79f..fb1eb8c67361 100644
--- a/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h
+++ b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h
@@ -27,8 +27,6 @@
} \
})
-#define NETNS "ns_lwt"
-
static inline int netns_create(void)
{
return system("ip netns add " NETNS);
diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c
index 59b38569f310..835a1d756c16 100644
--- a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c
@@ -54,6 +54,7 @@
#include <stdbool.h>
#include <stdlib.h>
+#define NETNS "ns_lwt_redirect"
#include "lwt_helpers.h"
#include "test_progs.h"
#include "network_helpers.h"
@@ -85,7 +86,7 @@ static void ping_dev(const char *dev, bool is_ingress)
snprintf(ip, sizeof(ip), "20.0.0.%d", link_index);
/* We won't get a reply. Don't fail here */
- SYS_NOFAIL("ping %s -c1 -W1 -s %d >/dev/null 2>&1",
+ SYS_NOFAIL("ping %s -c1 -W1 -s %d",
ip, ICMP_PAYLOAD_SIZE);
}
@@ -203,6 +204,7 @@ static int setup_redirect_target(const char *target_dev, bool need_mac)
if (!ASSERT_GE(target_index, 0, "if_nametoindex"))
goto fail;
+ SYS(fail, "sysctl -w net.ipv6.conf.all.disable_ipv6=1");
SYS(fail, "ip link add link_err type dummy");
SYS(fail, "ip link set lo up");
SYS(fail, "ip addr add dev lo " LOCAL_SRC "/32");
diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c
index f4bb2d5fcae0..03825d2b45a8 100644
--- a/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c
+++ b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c
@@ -48,6 +48,7 @@
* For case 2, force UDP packets to overflow fq limit. As long as kernel
* is not crashed, it is considered successful.
*/
+#define NETNS "ns_lwt_reroute"
#include "lwt_helpers.h"
#include "network_helpers.h"
#include <linux/net_tstamp.h>
@@ -63,7 +64,7 @@
static void ping_once(const char *ip)
{
/* We won't get a reply. Don't fail here */
- SYS_NOFAIL("ping %s -c1 -W1 -s %d >/dev/null 2>&1",
+ SYS_NOFAIL("ping %s -c1 -W1 -s %d",
ip, ICMP_PAYLOAD_SIZE);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c
index 7c0be7cf550b..8f8d792307c1 100644
--- a/tools/testing/selftests/bpf/prog_tests/mptcp.c
+++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c
@@ -79,7 +79,7 @@ static void cleanup_netns(struct nstoken *nstoken)
if (nstoken)
close_netns(nstoken);
- SYS_NOFAIL("ip netns del %s &> /dev/null", NS_TEST);
+ SYS_NOFAIL("ip netns del %s", NS_TEST);
}
static int verify_tsk(int map_fd, int client_fd)
diff --git a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c
index 3f1f58d3a729..a1f7e7378a64 100644
--- a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c
+++ b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c
@@ -29,6 +29,10 @@ static void test_success(void)
bpf_program__set_autoload(skel->progs.non_sleepable_1, true);
bpf_program__set_autoload(skel->progs.non_sleepable_2, true);
bpf_program__set_autoload(skel->progs.task_trusted_non_rcuptr, true);
+ bpf_program__set_autoload(skel->progs.rcu_read_lock_subprog, true);
+ bpf_program__set_autoload(skel->progs.rcu_read_lock_global_subprog, true);
+ bpf_program__set_autoload(skel->progs.rcu_read_lock_subprog_lock, true);
+ bpf_program__set_autoload(skel->progs.rcu_read_lock_subprog_unlock, true);
err = rcu_read_lock__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto out;
@@ -75,6 +79,8 @@ static const char * const inproper_region_tests[] = {
"inproper_sleepable_helper",
"inproper_sleepable_kfunc",
"nested_rcu_region",
+ "rcu_read_lock_global_subprog_lock",
+ "rcu_read_lock_global_subprog_unlock",
};
static void test_inproper_region(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
index 820d0bcfc474..eb74363f9f70 100644
--- a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
+++ b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
@@ -840,7 +840,7 @@ static int load_range_cmp_prog(struct range x, struct range y, enum op op,
.log_level = 2,
.log_buf = log_buf,
.log_size = log_sz,
- .prog_flags = BPF_F_TEST_REG_INVARIANTS,
+ .prog_flags = testing_prog_flags(),
);
/* ; skip exit block below
diff --git a/tools/testing/selftests/bpf/prog_tests/sock_destroy.c b/tools/testing/selftests/bpf/prog_tests/sock_destroy.c
index b0583309a94e..9c11938fe597 100644
--- a/tools/testing/selftests/bpf/prog_tests/sock_destroy.c
+++ b/tools/testing/selftests/bpf/prog_tests/sock_destroy.c
@@ -214,7 +214,7 @@ void test_sock_destroy(void)
cleanup:
if (nstoken)
close_netns(nstoken);
- SYS_NOFAIL("ip netns del " TEST_NS " &> /dev/null");
+ SYS_NOFAIL("ip netns del " TEST_NS);
if (cgroup_fd >= 0)
close(cgroup_fd);
sock_destroy_prog__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c
index 0c365f36c73b..d56e18b25528 100644
--- a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c
+++ b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c
@@ -112,7 +112,7 @@ void test_sock_iter_batch(void)
{
struct nstoken *nstoken = NULL;
- SYS_NOFAIL("ip netns del " TEST_NS " &> /dev/null");
+ SYS_NOFAIL("ip netns del " TEST_NS);
SYS(done, "ip netns add %s", TEST_NS);
SYS(done, "ip -net %s link set dev lo up", TEST_NS);
@@ -131,5 +131,5 @@ void test_sock_iter_batch(void)
close_netns(nstoken);
done:
- SYS_NOFAIL("ip netns del " TEST_NS " &> /dev/null");
+ SYS_NOFAIL("ip netns del " TEST_NS);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/spin_lock.c b/tools/testing/selftests/bpf/prog_tests/spin_lock.c
index 18d451be57c8..2b0068742ef9 100644
--- a/tools/testing/selftests/bpf/prog_tests/spin_lock.c
+++ b/tools/testing/selftests/bpf/prog_tests/spin_lock.c
@@ -48,6 +48,8 @@ static struct {
{ "lock_id_mismatch_innermapval_kptr", "bpf_spin_unlock of different lock" },
{ "lock_id_mismatch_innermapval_global", "bpf_spin_unlock of different lock" },
{ "lock_id_mismatch_innermapval_mapval", "bpf_spin_unlock of different lock" },
+ { "lock_global_subprog_call1", "global function calls are not allowed while holding a lock" },
+ { "lock_global_subprog_call2", "global function calls are not allowed while holding a lock" },
};
static int match_regex(const char *pattern, const char *string)
diff --git a/tools/testing/selftests/bpf/prog_tests/struct_ops_autocreate.c b/tools/testing/selftests/bpf/prog_tests/struct_ops_autocreate.c
new file mode 100644
index 000000000000..a5cc593c1e1d
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/struct_ops_autocreate.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "struct_ops_autocreate.skel.h"
+#include "struct_ops_autocreate2.skel.h"
+
+static void cant_load_full_object(void)
+{
+ struct struct_ops_autocreate *skel;
+ char *log = NULL;
+ int err;
+
+ skel = struct_ops_autocreate__open();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_autocreate__open"))
+ return;
+
+ if (start_libbpf_log_capture())
+ goto cleanup;
+ /* The testmod_2 map BTF type (struct bpf_testmod_ops___v2) doesn't
+ * match the BTF of the actual struct bpf_testmod_ops defined in the
+ * kernel, so we should fail to load it if we don't disable autocreate
+ * for that map.
+ */
+ err = struct_ops_autocreate__load(skel);
+ log = stop_libbpf_log_capture();
+ if (!ASSERT_ERR(err, "struct_ops_autocreate__load"))
+ goto cleanup;
+
+ ASSERT_HAS_SUBSTR(log, "libbpf: struct_ops init_kern", "init_kern message");
+ ASSERT_EQ(err, -ENOTSUP, "errno should be ENOTSUP");
+
+cleanup:
+ free(log);
+ struct_ops_autocreate__destroy(skel);
+}
+
+static int check_test_1_link(struct struct_ops_autocreate *skel, struct bpf_map *map)
+{
+ struct bpf_link *link;
+ int err;
+
+ link = bpf_map__attach_struct_ops(skel->maps.testmod_1);
+ if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops"))
+ return -1;
+
+ /* test_1() would be called from bpf_dummy_reg2() in bpf_testmod.c */
+ err = ASSERT_EQ(skel->bss->test_1_result, 42, "test_1_result");
+ bpf_link__destroy(link);
+ return err;
+}
+
+static void can_load_partial_object(void)
+{
+ struct struct_ops_autocreate *skel;
+ int err;
+
+ skel = struct_ops_autocreate__open();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_autocreate__open_opts"))
+ return;
+
+ err = bpf_map__set_autocreate(skel->maps.testmod_2, false);
+ if (!ASSERT_OK(err, "bpf_map__set_autocreate"))
+ goto cleanup;
+
+ ASSERT_TRUE(bpf_program__autoload(skel->progs.test_1), "test_1 default autoload");
+ ASSERT_TRUE(bpf_program__autoload(skel->progs.test_2), "test_2 default autoload");
+
+ err = struct_ops_autocreate__load(skel);
+ if (ASSERT_OK(err, "struct_ops_autocreate__load"))
+ goto cleanup;
+
+ ASSERT_TRUE(bpf_program__autoload(skel->progs.test_1), "test_1 actual autoload");
+ ASSERT_FALSE(bpf_program__autoload(skel->progs.test_2), "test_2 actual autoload");
+
+ check_test_1_link(skel, skel->maps.testmod_1);
+
+cleanup:
+ struct_ops_autocreate__destroy(skel);
+}
+
+static void optional_maps(void)
+{
+ struct struct_ops_autocreate *skel;
+ int err;
+
+ skel = struct_ops_autocreate__open();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_autocreate__open"))
+ return;
+
+ ASSERT_TRUE(bpf_map__autocreate(skel->maps.testmod_1), "testmod_1 autocreate");
+ ASSERT_TRUE(bpf_map__autocreate(skel->maps.testmod_2), "testmod_2 autocreate");
+ ASSERT_FALSE(bpf_map__autocreate(skel->maps.optional_map), "optional_map autocreate");
+ ASSERT_FALSE(bpf_map__autocreate(skel->maps.optional_map2), "optional_map2 autocreate");
+
+ err = bpf_map__set_autocreate(skel->maps.testmod_1, false);
+ err |= bpf_map__set_autocreate(skel->maps.testmod_2, false);
+ err |= bpf_map__set_autocreate(skel->maps.optional_map2, true);
+ if (!ASSERT_OK(err, "bpf_map__set_autocreate"))
+ goto cleanup;
+
+ err = struct_ops_autocreate__load(skel);
+ if (ASSERT_OK(err, "struct_ops_autocreate__load"))
+ goto cleanup;
+
+ check_test_1_link(skel, skel->maps.optional_map2);
+
+cleanup:
+ struct_ops_autocreate__destroy(skel);
+}
+
+/* Swap test_mod1->test_1 program from 'bar' to 'foo' using shadow vars.
+ * test_mod1 load should enable autoload for 'foo'.
+ */
+static void autoload_and_shadow_vars(void)
+{
+ struct struct_ops_autocreate2 *skel = NULL;
+ struct bpf_link *link = NULL;
+ int err;
+
+ skel = struct_ops_autocreate2__open();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_autocreate__open_opts"))
+ return;
+
+ ASSERT_FALSE(bpf_program__autoload(skel->progs.foo), "foo default autoload");
+ ASSERT_FALSE(bpf_program__autoload(skel->progs.bar), "bar default autoload");
+
+ /* loading map testmod_1 would switch foo's autoload to true */
+ skel->struct_ops.testmod_1->test_1 = skel->progs.foo;
+
+ err = struct_ops_autocreate2__load(skel);
+ if (ASSERT_OK(err, "struct_ops_autocreate__load"))
+ goto cleanup;
+
+ ASSERT_TRUE(bpf_program__autoload(skel->progs.foo), "foo actual autoload");
+ ASSERT_FALSE(bpf_program__autoload(skel->progs.bar), "bar actual autoload");
+
+ link = bpf_map__attach_struct_ops(skel->maps.testmod_1);
+ if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops"))
+ goto cleanup;
+
+ /* test_1() would be called from bpf_dummy_reg2() in bpf_testmod.c */
+ err = ASSERT_EQ(skel->bss->test_1_result, 42, "test_1_result");
+
+cleanup:
+ bpf_link__destroy(link);
+ struct_ops_autocreate2__destroy(skel);
+}
+
+void test_struct_ops_autocreate(void)
+{
+ if (test__start_subtest("cant_load_full_object"))
+ cant_load_full_object();
+ if (test__start_subtest("can_load_partial_object"))
+ can_load_partial_object();
+ if (test__start_subtest("autoload_and_shadow_vars"))
+ autoload_and_shadow_vars();
+ if (test__start_subtest("optional_maps"))
+ optional_maps();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
index ea8537c54413..c33c05161a9e 100644
--- a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
+++ b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
@@ -117,12 +117,6 @@ static void test_recursion(void)
ASSERT_OK(err, "lookup map_b");
ASSERT_EQ(value, 100, "map_b value");
- prog_fd = bpf_program__fd(skel->progs.on_lookup);
- memset(&info, 0, sizeof(info));
- err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
- ASSERT_OK(err, "get prog info");
- ASSERT_GT(info.recursion_misses, 0, "on_lookup prog recursion");
-
prog_fd = bpf_program__fd(skel->progs.on_update);
memset(&info, 0, sizeof(info));
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
index 518f143c5b0f..dbe06aeaa2b2 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
@@ -188,6 +188,7 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result)
{
struct nstoken *nstoken = NULL;
char src_fwd_addr[IFADDR_STR_LEN+1] = {};
+ char src_addr[IFADDR_STR_LEN + 1] = {};
int err;
if (result->dev_mode == MODE_VETH) {
@@ -208,6 +209,9 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result)
if (get_ifaddr("src_fwd", src_fwd_addr))
goto fail;
+ if (get_ifaddr("src", src_addr))
+ goto fail;
+
result->ifindex_src = if_nametoindex("src");
if (!ASSERT_GT(result->ifindex_src, 0, "ifindex_src"))
goto fail;
@@ -270,6 +274,13 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result)
SYS(fail, "ip route add " IP4_DST "/32 dev dst_fwd scope global");
SYS(fail, "ip route add " IP6_DST "/128 dev dst_fwd scope global");
+ if (result->dev_mode == MODE_VETH) {
+ SYS(fail, "ip neigh add " IP4_SRC " dev src_fwd lladdr %s", src_addr);
+ SYS(fail, "ip neigh add " IP6_SRC " dev src_fwd lladdr %s", src_addr);
+ SYS(fail, "ip neigh add " IP4_DST " dev dst_fwd lladdr %s", MAC_DST);
+ SYS(fail, "ip neigh add " IP6_DST " dev dst_fwd lladdr %s", MAC_DST);
+ }
+
close_netns(nstoken);
/** setup in 'dst' namespace */
@@ -280,6 +291,7 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result)
SYS(fail, "ip addr add " IP4_DST "/32 dev dst");
SYS(fail, "ip addr add " IP6_DST "/128 dev dst nodad");
SYS(fail, "ip link set dev dst up");
+ SYS(fail, "ip link set dev lo up");
SYS(fail, "ip route add " IP4_SRC "/32 dev dst scope global");
SYS(fail, "ip route add " IP4_NET "/16 dev dst scope global");
@@ -457,7 +469,7 @@ static int set_forwarding(bool enable)
return 0;
}
-static void rcv_tstamp(int fd, const char *expected, size_t s)
+static int __rcv_tstamp(int fd, const char *expected, size_t s, __u64 *tstamp)
{
struct __kernel_timespec pkt_ts = {};
char ctl[CMSG_SPACE(sizeof(pkt_ts))];
@@ -478,7 +490,7 @@ static void rcv_tstamp(int fd, const char *expected, size_t s)
ret = recvmsg(fd, &msg, 0);
if (!ASSERT_EQ(ret, s, "recvmsg"))
- return;
+ return -1;
ASSERT_STRNEQ(data, expected, s, "expected rcv data");
cmsg = CMSG_FIRSTHDR(&msg);
@@ -487,6 +499,12 @@ static void rcv_tstamp(int fd, const char *expected, size_t s)
memcpy(&pkt_ts, CMSG_DATA(cmsg), sizeof(pkt_ts));
pkt_ns = pkt_ts.tv_sec * NSEC_PER_SEC + pkt_ts.tv_nsec;
+ if (tstamp) {
+ /* caller will check the tstamp itself */
+ *tstamp = pkt_ns;
+ return 0;
+ }
+
ASSERT_NEQ(pkt_ns, 0, "pkt rcv tstamp");
ret = clock_gettime(CLOCK_REALTIME, &now_ts);
@@ -496,6 +514,60 @@ static void rcv_tstamp(int fd, const char *expected, size_t s)
if (ASSERT_GE(now_ns, pkt_ns, "check rcv tstamp"))
ASSERT_LT(now_ns - pkt_ns, 5 * NSEC_PER_SEC,
"check rcv tstamp");
+ return 0;
+}
+
+static void rcv_tstamp(int fd, const char *expected, size_t s)
+{
+ __rcv_tstamp(fd, expected, s, NULL);
+}
+
+static int wait_netstamp_needed_key(void)
+{
+ int opt = 1, srv_fd = -1, cli_fd = -1, nretries = 0, err, n;
+ char buf[] = "testing testing";
+ struct nstoken *nstoken;
+ __u64 tstamp = 0;
+
+ nstoken = open_netns(NS_DST);
+ if (!nstoken)
+ return -1;
+
+ srv_fd = start_server(AF_INET6, SOCK_DGRAM, "::1", 0, 0);
+ if (!ASSERT_GE(srv_fd, 0, "start_server"))
+ goto done;
+
+ err = setsockopt(srv_fd, SOL_SOCKET, SO_TIMESTAMPNS_NEW,
+ &opt, sizeof(opt));
+ if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS_NEW)"))
+ goto done;
+
+ cli_fd = connect_to_fd(srv_fd, TIMEOUT_MILLIS);
+ if (!ASSERT_GE(cli_fd, 0, "connect_to_fd"))
+ goto done;
+
+again:
+ n = write(cli_fd, buf, sizeof(buf));
+ if (!ASSERT_EQ(n, sizeof(buf), "send to server"))
+ goto done;
+ err = __rcv_tstamp(srv_fd, buf, sizeof(buf), &tstamp);
+ if (!ASSERT_OK(err, "__rcv_tstamp"))
+ goto done;
+ if (!tstamp && nretries++ < 5) {
+ sleep(1);
+ printf("netstamp_needed_key retry#%d\n", nretries);
+ goto again;
+ }
+
+done:
+ if (!tstamp && srv_fd != -1) {
+ close(srv_fd);
+ srv_fd = -1;
+ }
+ if (cli_fd != -1)
+ close(cli_fd);
+ close_netns(nstoken);
+ return srv_fd;
}
static void snd_tstamp(int fd, char *b, size_t s)
@@ -832,11 +904,20 @@ static void test_tc_redirect_dtime(struct netns_setup_result *setup_result)
{
struct test_tc_dtime *skel;
struct nstoken *nstoken;
- int err;
+ int hold_tstamp_fd, err;
+
+ /* Hold a sk with the SOCK_TIMESTAMP set to ensure there
+ * is no delay in the kernel net_enable_timestamp().
+ * This ensures the following tests must have
+ * non zero rcv tstamp in the recvmsg().
+ */
+ hold_tstamp_fd = wait_netstamp_needed_key();
+ if (!ASSERT_GE(hold_tstamp_fd, 0, "wait_netstamp_needed_key"))
+ return;
skel = test_tc_dtime__open();
if (!ASSERT_OK_PTR(skel, "test_tc_dtime__open"))
- return;
+ goto done;
skel->rodata->IFINDEX_SRC = setup_result->ifindex_src_fwd;
skel->rodata->IFINDEX_DST = setup_result->ifindex_dst_fwd;
@@ -881,6 +962,7 @@ static void test_tc_redirect_dtime(struct netns_setup_result *setup_result)
done:
test_tc_dtime__destroy(skel);
+ close(hold_tstamp_fd);
}
static void test_tc_redirect_neigh_fib(struct netns_setup_result *setup_result)
diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_custom_syncookie.c b/tools/testing/selftests/bpf/prog_tests/tcp_custom_syncookie.c
new file mode 100644
index 000000000000..eaf441dc7e79
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tcp_custom_syncookie.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+
+#define _GNU_SOURCE
+#include <sched.h>
+#include <stdlib.h>
+#include <net/if.h>
+
+#include "test_progs.h"
+#include "cgroup_helpers.h"
+#include "network_helpers.h"
+#include "test_tcp_custom_syncookie.skel.h"
+
+static struct test_tcp_custom_syncookie_case {
+ int family, type;
+ char addr[16];
+ char name[10];
+} test_cases[] = {
+ {
+ .name = "IPv4 TCP",
+ .family = AF_INET,
+ .type = SOCK_STREAM,
+ .addr = "127.0.0.1",
+ },
+ {
+ .name = "IPv6 TCP",
+ .family = AF_INET6,
+ .type = SOCK_STREAM,
+ .addr = "::1",
+ },
+};
+
+static int setup_netns(void)
+{
+ if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns"))
+ return -1;
+
+ if (!ASSERT_OK(system("ip link set dev lo up"), "ip"))
+ goto err;
+
+ if (!ASSERT_OK(write_sysctl("/proc/sys/net/ipv4/tcp_ecn", "1"),
+ "write_sysctl"))
+ goto err;
+
+ return 0;
+err:
+ return -1;
+}
+
+static int setup_tc(struct test_tcp_custom_syncookie *skel)
+{
+ LIBBPF_OPTS(bpf_tc_hook, qdisc_lo, .attach_point = BPF_TC_INGRESS);
+ LIBBPF_OPTS(bpf_tc_opts, tc_attach,
+ .prog_fd = bpf_program__fd(skel->progs.tcp_custom_syncookie));
+
+ qdisc_lo.ifindex = if_nametoindex("lo");
+ if (!ASSERT_OK(bpf_tc_hook_create(&qdisc_lo), "qdisc add dev lo clsact"))
+ goto err;
+
+ if (!ASSERT_OK(bpf_tc_attach(&qdisc_lo, &tc_attach),
+ "filter add dev lo ingress"))
+ goto err;
+
+ return 0;
+err:
+ return -1;
+}
+
+#define msg "Hello World"
+#define msglen 11
+
+static void transfer_message(int sender, int receiver)
+{
+ char buf[msglen];
+ int ret;
+
+ ret = send(sender, msg, msglen, 0);
+ if (!ASSERT_EQ(ret, msglen, "send"))
+ return;
+
+ memset(buf, 0, sizeof(buf));
+
+ ret = recv(receiver, buf, msglen, 0);
+ if (!ASSERT_EQ(ret, msglen, "recv"))
+ return;
+
+ ret = strncmp(buf, msg, msglen);
+ if (!ASSERT_EQ(ret, 0, "strncmp"))
+ return;
+}
+
+static void create_connection(struct test_tcp_custom_syncookie_case *test_case)
+{
+ int server, client, child;
+
+ server = start_server(test_case->family, test_case->type, test_case->addr, 0, 0);
+ if (!ASSERT_NEQ(server, -1, "start_server"))
+ return;
+
+ client = connect_to_fd(server, 0);
+ if (!ASSERT_NEQ(client, -1, "connect_to_fd"))
+ goto close_server;
+
+ child = accept(server, NULL, 0);
+ if (!ASSERT_NEQ(child, -1, "accept"))
+ goto close_client;
+
+ transfer_message(client, child);
+ transfer_message(child, client);
+
+ close(child);
+close_client:
+ close(client);
+close_server:
+ close(server);
+}
+
+void test_tcp_custom_syncookie(void)
+{
+ struct test_tcp_custom_syncookie *skel;
+ int i;
+
+ if (setup_netns())
+ return;
+
+ skel = test_tcp_custom_syncookie__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ if (setup_tc(skel))
+ goto destroy_skel;
+
+ for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
+ if (!test__start_subtest(test_cases[i].name))
+ continue;
+
+ skel->bss->handled_syn = false;
+ skel->bss->handled_ack = false;
+
+ create_connection(&test_cases[i]);
+
+ ASSERT_EQ(skel->bss->handled_syn, true, "SYN is not handled at tc.");
+ ASSERT_EQ(skel->bss->handled_ack, true, "ACK is not handled at tc");
+ }
+
+destroy_skel:
+ system("tc qdisc del dev lo clsact");
+
+ test_tcp_custom_syncookie__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_maybe_null.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_maybe_null.c
new file mode 100644
index 000000000000..01dc2613c8a5
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_maybe_null.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+
+#include "struct_ops_maybe_null.skel.h"
+#include "struct_ops_maybe_null_fail.skel.h"
+
+/* Test that the verifier accepts a program that access a nullable pointer
+ * with a proper check.
+ */
+static void maybe_null(void)
+{
+ struct struct_ops_maybe_null *skel;
+
+ skel = struct_ops_maybe_null__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_module_open_and_load"))
+ return;
+
+ struct_ops_maybe_null__destroy(skel);
+}
+
+/* Test that the verifier rejects a program that access a nullable pointer
+ * without a check beforehand.
+ */
+static void maybe_null_fail(void)
+{
+ struct struct_ops_maybe_null_fail *skel;
+
+ skel = struct_ops_maybe_null_fail__open_and_load();
+ if (ASSERT_ERR_PTR(skel, "struct_ops_module_fail__open_and_load"))
+ return;
+
+ struct_ops_maybe_null_fail__destroy(skel);
+}
+
+void test_struct_ops_maybe_null(void)
+{
+ /* The verifier verifies the programs at load time, so testing both
+ * programs in the same compile-unit is complicated. We run them in
+ * separate objects to simplify the testing.
+ */
+ if (test__start_subtest("maybe_null"))
+ maybe_null();
+ if (test__start_subtest("maybe_null_fail"))
+ maybe_null_fail();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
new file mode 100644
index 000000000000..ee5372c7f2c7
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <time.h>
+
+#include "struct_ops_module.skel.h"
+
+static void check_map_info(struct bpf_map_info *info)
+{
+ struct bpf_btf_info btf_info;
+ char btf_name[256];
+ u32 btf_info_len = sizeof(btf_info);
+ int err, fd;
+
+ fd = bpf_btf_get_fd_by_id(info->btf_vmlinux_id);
+ if (!ASSERT_GE(fd, 0, "get_value_type_btf_obj_fd"))
+ return;
+
+ memset(&btf_info, 0, sizeof(btf_info));
+ btf_info.name = ptr_to_u64(btf_name);
+ btf_info.name_len = sizeof(btf_name);
+ err = bpf_btf_get_info_by_fd(fd, &btf_info, &btf_info_len);
+ if (!ASSERT_OK(err, "get_value_type_btf_obj_info"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(strcmp(btf_name, "bpf_testmod"), 0, "get_value_type_btf_obj_name"))
+ goto cleanup;
+
+cleanup:
+ close(fd);
+}
+
+static int attach_ops_and_check(struct struct_ops_module *skel,
+ struct bpf_map *map,
+ int expected_test_2_result)
+{
+ struct bpf_link *link;
+
+ link = bpf_map__attach_struct_ops(map);
+ ASSERT_OK_PTR(link, "attach_test_mod_1");
+ if (!link)
+ return -1;
+
+ /* test_{1,2}() would be called from bpf_dummy_reg() in bpf_testmod.c */
+ ASSERT_EQ(skel->bss->test_1_result, 0xdeadbeef, "test_1_result");
+ ASSERT_EQ(skel->bss->test_2_result, expected_test_2_result, "test_2_result");
+
+ bpf_link__destroy(link);
+ return 0;
+}
+
+static void test_struct_ops_load(void)
+{
+ struct struct_ops_module *skel;
+ struct bpf_map_info info = {};
+ int err;
+ u32 len;
+
+ skel = struct_ops_module__open();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_module_open"))
+ return;
+
+ skel->struct_ops.testmod_1->data = 13;
+ skel->struct_ops.testmod_1->test_2 = skel->progs.test_3;
+ /* Since test_2() is not being used, it should be disabled from
+ * auto-loading, or it will fail to load.
+ */
+ bpf_program__set_autoload(skel->progs.test_2, false);
+
+ err = struct_ops_module__load(skel);
+ if (!ASSERT_OK(err, "struct_ops_module_load"))
+ goto cleanup;
+
+ len = sizeof(info);
+ err = bpf_map_get_info_by_fd(bpf_map__fd(skel->maps.testmod_1), &info,
+ &len);
+ if (!ASSERT_OK(err, "bpf_map_get_info_by_fd"))
+ goto cleanup;
+
+ check_map_info(&info);
+ /* test_3() will be called from bpf_dummy_reg() in bpf_testmod.c
+ *
+ * In bpf_testmod.c it will pass 4 and 13 (the value of data) to
+ * .test_2. So, the value of test_2_result should be 20 (4 + 13 +
+ * 3).
+ */
+ if (!attach_ops_and_check(skel, skel->maps.testmod_1, 20))
+ goto cleanup;
+ if (!attach_ops_and_check(skel, skel->maps.testmod_2, 12))
+ goto cleanup;
+
+cleanup:
+ struct_ops_module__destroy(skel);
+}
+
+void serial_test_struct_ops_module(void)
+{
+ if (test__start_subtest("test_struct_ops_load"))
+ test_struct_ops_load();
+}
+
diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_multi_pages.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_multi_pages.c
new file mode 100644
index 000000000000..645d32b5160c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_multi_pages.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+
+#include "struct_ops_multi_pages.skel.h"
+
+static void do_struct_ops_multi_pages(void)
+{
+ struct struct_ops_multi_pages *skel;
+ struct bpf_link *link;
+
+ /* The size of all trampolines of skel->maps.multi_pages should be
+ * over 1 page (at least for x86).
+ */
+ skel = struct_ops_multi_pages__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_multi_pages_open_and_load"))
+ return;
+
+ link = bpf_map__attach_struct_ops(skel->maps.multi_pages);
+ ASSERT_OK_PTR(link, "attach_multi_pages");
+
+ bpf_link__destroy(link);
+ struct_ops_multi_pages__destroy(skel);
+}
+
+void test_struct_ops_multi_pages(void)
+{
+ if (test__start_subtest("multi_pages"))
+ do_struct_ops_multi_pages();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_no_cfi.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_no_cfi.c
new file mode 100644
index 000000000000..106ea447965a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_no_cfi.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <testing_helpers.h>
+
+static void load_bpf_test_no_cfi(void)
+{
+ int fd;
+ int err;
+
+ fd = open("bpf_test_no_cfi.ko", O_RDONLY);
+ if (!ASSERT_GE(fd, 0, "open"))
+ return;
+
+ /* The module will try to register a struct_ops type without
+ * cfi_stubs and with cfi_stubs.
+ *
+ * The one without cfi_stub should fail. The module will be loaded
+ * successfully only if the result of the registration is as
+ * expected, or it fails.
+ */
+ err = finit_module(fd, "", 0);
+ close(fd);
+ if (!ASSERT_OK(err, "finit_module"))
+ return;
+
+ err = delete_module("bpf_test_no_cfi", 0);
+ ASSERT_OK(err, "delete_module");
+}
+
+void test_struct_ops_no_cfi(void)
+{
+ if (test__start_subtest("load_bpf_test_no_cfi"))
+ load_bpf_test_no_cfi();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_tunnel.c b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c
index 2b3c6dd66259..5f1fb0a2ea56 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_tunnel.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c
@@ -118,9 +118,9 @@ fail:
static void cleanup(void)
{
SYS_NOFAIL("test -f /var/run/netns/at_ns0 && ip netns delete at_ns0");
- SYS_NOFAIL("ip link del veth1 2> /dev/null");
- SYS_NOFAIL("ip link del %s 2> /dev/null", VXLAN_TUNL_DEV1);
- SYS_NOFAIL("ip link del %s 2> /dev/null", IP6VXLAN_TUNL_DEV1);
+ SYS_NOFAIL("ip link del veth1");
+ SYS_NOFAIL("ip link del %s", VXLAN_TUNL_DEV1);
+ SYS_NOFAIL("ip link del %s", IP6VXLAN_TUNL_DEV1);
}
static int add_vxlan_tunnel(void)
@@ -265,9 +265,9 @@ fail:
static void delete_ipip_tunnel(void)
{
SYS_NOFAIL("ip -n at_ns0 link delete dev %s", IPIP_TUNL_DEV0);
- SYS_NOFAIL("ip -n at_ns0 fou del port 5555 2> /dev/null");
+ SYS_NOFAIL("ip -n at_ns0 fou del port 5555");
SYS_NOFAIL("ip link delete dev %s", IPIP_TUNL_DEV1);
- SYS_NOFAIL("ip fou del port 5555 2> /dev/null");
+ SYS_NOFAIL("ip fou del port 5555");
}
static int add_xfrm_tunnel(void)
@@ -346,13 +346,13 @@ fail:
static void delete_xfrm_tunnel(void)
{
- SYS_NOFAIL("ip xfrm policy delete dir out src %s/32 dst %s/32 2> /dev/null",
+ SYS_NOFAIL("ip xfrm policy delete dir out src %s/32 dst %s/32",
IP4_ADDR_TUNL_DEV1, IP4_ADDR_TUNL_DEV0);
- SYS_NOFAIL("ip xfrm policy delete dir in src %s/32 dst %s/32 2> /dev/null",
+ SYS_NOFAIL("ip xfrm policy delete dir in src %s/32 dst %s/32",
IP4_ADDR_TUNL_DEV0, IP4_ADDR_TUNL_DEV1);
- SYS_NOFAIL("ip xfrm state delete src %s dst %s proto esp spi %d 2> /dev/null",
+ SYS_NOFAIL("ip xfrm state delete src %s dst %s proto esp spi %d",
IP4_ADDR_VETH0, IP4_ADDR1_VETH1, XFRM_SPI_IN_TO_OUT);
- SYS_NOFAIL("ip xfrm state delete src %s dst %s proto esp spi %d 2> /dev/null",
+ SYS_NOFAIL("ip xfrm state delete src %s dst %s proto esp spi %d",
IP4_ADDR1_VETH1, IP4_ADDR_VETH0, XFRM_SPI_OUT_TO_IN);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/token.c b/tools/testing/selftests/bpf/prog_tests/token.c
new file mode 100644
index 000000000000..fc4a175d8d76
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/token.c
@@ -0,0 +1,1052 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+#define _GNU_SOURCE
+#include <test_progs.h>
+#include <bpf/btf.h>
+#include "cap_helpers.h"
+#include <fcntl.h>
+#include <sched.h>
+#include <signal.h>
+#include <unistd.h>
+#include <linux/filter.h>
+#include <linux/unistd.h>
+#include <linux/mount.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/un.h>
+#include "priv_map.skel.h"
+#include "priv_prog.skel.h"
+#include "dummy_st_ops_success.skel.h"
+#include "token_lsm.skel.h"
+
+static inline int sys_mount(const char *dev_name, const char *dir_name,
+ const char *type, unsigned long flags,
+ const void *data)
+{
+ return syscall(__NR_mount, dev_name, dir_name, type, flags, data);
+}
+
+static inline int sys_fsopen(const char *fsname, unsigned flags)
+{
+ return syscall(__NR_fsopen, fsname, flags);
+}
+
+static inline int sys_fspick(int dfd, const char *path, unsigned flags)
+{
+ return syscall(__NR_fspick, dfd, path, flags);
+}
+
+static inline int sys_fsconfig(int fs_fd, unsigned cmd, const char *key, const void *val, int aux)
+{
+ return syscall(__NR_fsconfig, fs_fd, cmd, key, val, aux);
+}
+
+static inline int sys_fsmount(int fs_fd, unsigned flags, unsigned ms_flags)
+{
+ return syscall(__NR_fsmount, fs_fd, flags, ms_flags);
+}
+
+static inline int sys_move_mount(int from_dfd, const char *from_path,
+ int to_dfd, const char *to_path,
+ unsigned flags)
+{
+ return syscall(__NR_move_mount, from_dfd, from_path, to_dfd, to_path, flags);
+}
+
+static int drop_priv_caps(__u64 *old_caps)
+{
+ return cap_disable_effective((1ULL << CAP_BPF) |
+ (1ULL << CAP_PERFMON) |
+ (1ULL << CAP_NET_ADMIN) |
+ (1ULL << CAP_SYS_ADMIN), old_caps);
+}
+
+static int restore_priv_caps(__u64 old_caps)
+{
+ return cap_enable_effective(old_caps, NULL);
+}
+
+static int set_delegate_mask(int fs_fd, const char *key, __u64 mask, const char *mask_str)
+{
+ char buf[32];
+ int err;
+
+ if (!mask_str) {
+ if (mask == ~0ULL) {
+ mask_str = "any";
+ } else {
+ snprintf(buf, sizeof(buf), "0x%llx", (unsigned long long)mask);
+ mask_str = buf;
+ }
+ }
+
+ err = sys_fsconfig(fs_fd, FSCONFIG_SET_STRING, key,
+ mask_str, 0);
+ if (err < 0)
+ err = -errno;
+ return err;
+}
+
+#define zclose(fd) do { if (fd >= 0) close(fd); fd = -1; } while (0)
+
+struct bpffs_opts {
+ __u64 cmds;
+ __u64 maps;
+ __u64 progs;
+ __u64 attachs;
+ const char *cmds_str;
+ const char *maps_str;
+ const char *progs_str;
+ const char *attachs_str;
+};
+
+static int create_bpffs_fd(void)
+{
+ int fs_fd;
+
+ /* create VFS context */
+ fs_fd = sys_fsopen("bpf", 0);
+ ASSERT_GE(fs_fd, 0, "fs_fd");
+
+ return fs_fd;
+}
+
+static int materialize_bpffs_fd(int fs_fd, struct bpffs_opts *opts)
+{
+ int mnt_fd, err;
+
+ /* set up token delegation mount options */
+ err = set_delegate_mask(fs_fd, "delegate_cmds", opts->cmds, opts->cmds_str);
+ if (!ASSERT_OK(err, "fs_cfg_cmds"))
+ return err;
+ err = set_delegate_mask(fs_fd, "delegate_maps", opts->maps, opts->maps_str);
+ if (!ASSERT_OK(err, "fs_cfg_maps"))
+ return err;
+ err = set_delegate_mask(fs_fd, "delegate_progs", opts->progs, opts->progs_str);
+ if (!ASSERT_OK(err, "fs_cfg_progs"))
+ return err;
+ err = set_delegate_mask(fs_fd, "delegate_attachs", opts->attachs, opts->attachs_str);
+ if (!ASSERT_OK(err, "fs_cfg_attachs"))
+ return err;
+
+ /* instantiate FS object */
+ err = sys_fsconfig(fs_fd, FSCONFIG_CMD_CREATE, NULL, NULL, 0);
+ if (err < 0)
+ return -errno;
+
+ /* create O_PATH fd for detached mount */
+ mnt_fd = sys_fsmount(fs_fd, 0, 0);
+ if (err < 0)
+ return -errno;
+
+ return mnt_fd;
+}
+
+/* send FD over Unix domain (AF_UNIX) socket */
+static int sendfd(int sockfd, int fd)
+{
+ struct msghdr msg = {};
+ struct cmsghdr *cmsg;
+ int fds[1] = { fd }, err;
+ char iobuf[1];
+ struct iovec io = {
+ .iov_base = iobuf,
+ .iov_len = sizeof(iobuf),
+ };
+ union {
+ char buf[CMSG_SPACE(sizeof(fds))];
+ struct cmsghdr align;
+ } u;
+
+ msg.msg_iov = &io;
+ msg.msg_iovlen = 1;
+ msg.msg_control = u.buf;
+ msg.msg_controllen = sizeof(u.buf);
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(fds));
+ memcpy(CMSG_DATA(cmsg), fds, sizeof(fds));
+
+ err = sendmsg(sockfd, &msg, 0);
+ if (err < 0)
+ err = -errno;
+ if (!ASSERT_EQ(err, 1, "sendmsg"))
+ return -EINVAL;
+
+ return 0;
+}
+
+/* receive FD over Unix domain (AF_UNIX) socket */
+static int recvfd(int sockfd, int *fd)
+{
+ struct msghdr msg = {};
+ struct cmsghdr *cmsg;
+ int fds[1], err;
+ char iobuf[1];
+ struct iovec io = {
+ .iov_base = iobuf,
+ .iov_len = sizeof(iobuf),
+ };
+ union {
+ char buf[CMSG_SPACE(sizeof(fds))];
+ struct cmsghdr align;
+ } u;
+
+ msg.msg_iov = &io;
+ msg.msg_iovlen = 1;
+ msg.msg_control = u.buf;
+ msg.msg_controllen = sizeof(u.buf);
+
+ err = recvmsg(sockfd, &msg, 0);
+ if (err < 0)
+ err = -errno;
+ if (!ASSERT_EQ(err, 1, "recvmsg"))
+ return -EINVAL;
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+ if (!ASSERT_OK_PTR(cmsg, "cmsg_null") ||
+ !ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(fds)), "cmsg_len") ||
+ !ASSERT_EQ(cmsg->cmsg_level, SOL_SOCKET, "cmsg_level") ||
+ !ASSERT_EQ(cmsg->cmsg_type, SCM_RIGHTS, "cmsg_type"))
+ return -EINVAL;
+
+ memcpy(fds, CMSG_DATA(cmsg), sizeof(fds));
+ *fd = fds[0];
+
+ return 0;
+}
+
+static ssize_t write_nointr(int fd, const void *buf, size_t count)
+{
+ ssize_t ret;
+
+ do {
+ ret = write(fd, buf, count);
+ } while (ret < 0 && errno == EINTR);
+
+ return ret;
+}
+
+static int write_file(const char *path, const void *buf, size_t count)
+{
+ int fd;
+ ssize_t ret;
+
+ fd = open(path, O_WRONLY | O_CLOEXEC | O_NOCTTY | O_NOFOLLOW);
+ if (fd < 0)
+ return -1;
+
+ ret = write_nointr(fd, buf, count);
+ close(fd);
+ if (ret < 0 || (size_t)ret != count)
+ return -1;
+
+ return 0;
+}
+
+static int create_and_enter_userns(void)
+{
+ uid_t uid;
+ gid_t gid;
+ char map[100];
+
+ uid = getuid();
+ gid = getgid();
+
+ if (unshare(CLONE_NEWUSER))
+ return -1;
+
+ if (write_file("/proc/self/setgroups", "deny", sizeof("deny") - 1) &&
+ errno != ENOENT)
+ return -1;
+
+ snprintf(map, sizeof(map), "0 %d 1", uid);
+ if (write_file("/proc/self/uid_map", map, strlen(map)))
+ return -1;
+
+
+ snprintf(map, sizeof(map), "0 %d 1", gid);
+ if (write_file("/proc/self/gid_map", map, strlen(map)))
+ return -1;
+
+ if (setgid(0))
+ return -1;
+
+ if (setuid(0))
+ return -1;
+
+ return 0;
+}
+
+typedef int (*child_callback_fn)(int bpffs_fd, struct token_lsm *lsm_skel);
+
+static void child(int sock_fd, struct bpffs_opts *opts, child_callback_fn callback)
+{
+ int mnt_fd = -1, fs_fd = -1, err = 0, bpffs_fd = -1, token_fd = -1;
+ struct token_lsm *lsm_skel = NULL;
+
+ /* load and attach LSM "policy" before we go into unpriv userns */
+ lsm_skel = token_lsm__open_and_load();
+ if (!ASSERT_OK_PTR(lsm_skel, "lsm_skel_load")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+ lsm_skel->bss->my_pid = getpid();
+ err = token_lsm__attach(lsm_skel);
+ if (!ASSERT_OK(err, "lsm_skel_attach"))
+ goto cleanup;
+
+ /* setup userns with root mappings */
+ err = create_and_enter_userns();
+ if (!ASSERT_OK(err, "create_and_enter_userns"))
+ goto cleanup;
+
+ /* setup mountns to allow creating BPF FS (fsopen("bpf")) from unpriv process */
+ err = unshare(CLONE_NEWNS);
+ if (!ASSERT_OK(err, "create_mountns"))
+ goto cleanup;
+
+ err = sys_mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0);
+ if (!ASSERT_OK(err, "remount_root"))
+ goto cleanup;
+
+ fs_fd = create_bpffs_fd();
+ if (!ASSERT_GE(fs_fd, 0, "create_bpffs_fd")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* ensure unprivileged child cannot set delegation options */
+ err = set_delegate_mask(fs_fd, "delegate_cmds", 0x1, NULL);
+ ASSERT_EQ(err, -EPERM, "delegate_cmd_eperm");
+ err = set_delegate_mask(fs_fd, "delegate_maps", 0x1, NULL);
+ ASSERT_EQ(err, -EPERM, "delegate_maps_eperm");
+ err = set_delegate_mask(fs_fd, "delegate_progs", 0x1, NULL);
+ ASSERT_EQ(err, -EPERM, "delegate_progs_eperm");
+ err = set_delegate_mask(fs_fd, "delegate_attachs", 0x1, NULL);
+ ASSERT_EQ(err, -EPERM, "delegate_attachs_eperm");
+
+ /* pass BPF FS context object to parent */
+ err = sendfd(sock_fd, fs_fd);
+ if (!ASSERT_OK(err, "send_fs_fd"))
+ goto cleanup;
+ zclose(fs_fd);
+
+ /* avoid mucking around with mount namespaces and mounting at
+ * well-known path, just get detach-mounted BPF FS fd back from parent
+ */
+ err = recvfd(sock_fd, &mnt_fd);
+ if (!ASSERT_OK(err, "recv_mnt_fd"))
+ goto cleanup;
+
+ /* try to fspick() BPF FS and try to add some delegation options */
+ fs_fd = sys_fspick(mnt_fd, "", FSPICK_EMPTY_PATH);
+ if (!ASSERT_GE(fs_fd, 0, "bpffs_fspick")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* ensure unprivileged child cannot reconfigure to set delegation options */
+ err = set_delegate_mask(fs_fd, "delegate_cmds", 0, "any");
+ if (!ASSERT_EQ(err, -EPERM, "delegate_cmd_eperm_reconfig")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+ err = set_delegate_mask(fs_fd, "delegate_maps", 0, "any");
+ if (!ASSERT_EQ(err, -EPERM, "delegate_maps_eperm_reconfig")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+ err = set_delegate_mask(fs_fd, "delegate_progs", 0, "any");
+ if (!ASSERT_EQ(err, -EPERM, "delegate_progs_eperm_reconfig")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+ err = set_delegate_mask(fs_fd, "delegate_attachs", 0, "any");
+ if (!ASSERT_EQ(err, -EPERM, "delegate_attachs_eperm_reconfig")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+ zclose(fs_fd);
+
+ bpffs_fd = openat(mnt_fd, ".", 0, O_RDWR);
+ if (!ASSERT_GE(bpffs_fd, 0, "bpffs_open")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* create BPF token FD and pass it to parent for some extra checks */
+ token_fd = bpf_token_create(bpffs_fd, NULL);
+ if (!ASSERT_GT(token_fd, 0, "child_token_create")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+ err = sendfd(sock_fd, token_fd);
+ if (!ASSERT_OK(err, "send_token_fd"))
+ goto cleanup;
+ zclose(token_fd);
+
+ /* do custom test logic with customly set up BPF FS instance */
+ err = callback(bpffs_fd, lsm_skel);
+ if (!ASSERT_OK(err, "test_callback"))
+ goto cleanup;
+
+ err = 0;
+cleanup:
+ zclose(sock_fd);
+ zclose(mnt_fd);
+ zclose(fs_fd);
+ zclose(bpffs_fd);
+ zclose(token_fd);
+
+ lsm_skel->bss->my_pid = 0;
+ token_lsm__destroy(lsm_skel);
+
+ exit(-err);
+}
+
+static int wait_for_pid(pid_t pid)
+{
+ int status, ret;
+
+again:
+ ret = waitpid(pid, &status, 0);
+ if (ret == -1) {
+ if (errno == EINTR)
+ goto again;
+
+ return -1;
+ }
+
+ if (!WIFEXITED(status))
+ return -1;
+
+ return WEXITSTATUS(status);
+}
+
+static void parent(int child_pid, struct bpffs_opts *bpffs_opts, int sock_fd)
+{
+ int fs_fd = -1, mnt_fd = -1, token_fd = -1, err;
+
+ err = recvfd(sock_fd, &fs_fd);
+ if (!ASSERT_OK(err, "recv_bpffs_fd"))
+ goto cleanup;
+
+ mnt_fd = materialize_bpffs_fd(fs_fd, bpffs_opts);
+ if (!ASSERT_GE(mnt_fd, 0, "materialize_bpffs_fd")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+ zclose(fs_fd);
+
+ /* pass BPF FS context object to parent */
+ err = sendfd(sock_fd, mnt_fd);
+ if (!ASSERT_OK(err, "send_mnt_fd"))
+ goto cleanup;
+ zclose(mnt_fd);
+
+ /* receive BPF token FD back from child for some extra tests */
+ err = recvfd(sock_fd, &token_fd);
+ if (!ASSERT_OK(err, "recv_token_fd"))
+ goto cleanup;
+
+ err = wait_for_pid(child_pid);
+ ASSERT_OK(err, "waitpid_child");
+
+cleanup:
+ zclose(sock_fd);
+ zclose(fs_fd);
+ zclose(mnt_fd);
+ zclose(token_fd);
+
+ if (child_pid > 0)
+ (void)kill(child_pid, SIGKILL);
+}
+
+static void subtest_userns(struct bpffs_opts *bpffs_opts,
+ child_callback_fn child_cb)
+{
+ int sock_fds[2] = { -1, -1 };
+ int child_pid = 0, err;
+
+ err = socketpair(AF_UNIX, SOCK_STREAM, 0, sock_fds);
+ if (!ASSERT_OK(err, "socketpair"))
+ goto cleanup;
+
+ child_pid = fork();
+ if (!ASSERT_GE(child_pid, 0, "fork"))
+ goto cleanup;
+
+ if (child_pid == 0) {
+ zclose(sock_fds[0]);
+ return child(sock_fds[1], bpffs_opts, child_cb);
+
+ } else {
+ zclose(sock_fds[1]);
+ return parent(child_pid, bpffs_opts, sock_fds[0]);
+ }
+
+cleanup:
+ zclose(sock_fds[0]);
+ zclose(sock_fds[1]);
+ if (child_pid > 0)
+ (void)kill(child_pid, SIGKILL);
+}
+
+static int userns_map_create(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ LIBBPF_OPTS(bpf_map_create_opts, map_opts);
+ int err, token_fd = -1, map_fd = -1;
+ __u64 old_caps = 0;
+
+ /* create BPF token from BPF FS mount */
+ token_fd = bpf_token_create(mnt_fd, NULL);
+ if (!ASSERT_GT(token_fd, 0, "token_create")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* while inside non-init userns, we need both a BPF token *and*
+ * CAP_BPF inside current userns to create privileged map; let's test
+ * that neither BPF token alone nor namespaced CAP_BPF is sufficient
+ */
+ err = drop_priv_caps(&old_caps);
+ if (!ASSERT_OK(err, "drop_caps"))
+ goto cleanup;
+
+ /* no token, no CAP_BPF -> fail */
+ map_opts.map_flags = 0;
+ map_opts.token_fd = 0;
+ map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "wo_token_wo_bpf", 0, 8, 1, &map_opts);
+ if (!ASSERT_LT(map_fd, 0, "stack_map_wo_token_wo_cap_bpf_should_fail")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* token without CAP_BPF -> fail */
+ map_opts.map_flags = BPF_F_TOKEN_FD;
+ map_opts.token_fd = token_fd;
+ map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "w_token_wo_bpf", 0, 8, 1, &map_opts);
+ if (!ASSERT_LT(map_fd, 0, "stack_map_w_token_wo_cap_bpf_should_fail")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* get back effective local CAP_BPF (and CAP_SYS_ADMIN) */
+ err = restore_priv_caps(old_caps);
+ if (!ASSERT_OK(err, "restore_caps"))
+ goto cleanup;
+
+ /* CAP_BPF without token -> fail */
+ map_opts.map_flags = 0;
+ map_opts.token_fd = 0;
+ map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "wo_token_w_bpf", 0, 8, 1, &map_opts);
+ if (!ASSERT_LT(map_fd, 0, "stack_map_wo_token_w_cap_bpf_should_fail")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* finally, namespaced CAP_BPF + token -> success */
+ map_opts.map_flags = BPF_F_TOKEN_FD;
+ map_opts.token_fd = token_fd;
+ map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "w_token_w_bpf", 0, 8, 1, &map_opts);
+ if (!ASSERT_GT(map_fd, 0, "stack_map_w_token_w_cap_bpf")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+cleanup:
+ zclose(token_fd);
+ zclose(map_fd);
+ return err;
+}
+
+static int userns_btf_load(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ LIBBPF_OPTS(bpf_btf_load_opts, btf_opts);
+ int err, token_fd = -1, btf_fd = -1;
+ const void *raw_btf_data;
+ struct btf *btf = NULL;
+ __u32 raw_btf_size;
+ __u64 old_caps = 0;
+
+ /* create BPF token from BPF FS mount */
+ token_fd = bpf_token_create(mnt_fd, NULL);
+ if (!ASSERT_GT(token_fd, 0, "token_create")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* while inside non-init userns, we need both a BPF token *and*
+ * CAP_BPF inside current userns to create privileged map; let's test
+ * that neither BPF token alone nor namespaced CAP_BPF is sufficient
+ */
+ err = drop_priv_caps(&old_caps);
+ if (!ASSERT_OK(err, "drop_caps"))
+ goto cleanup;
+
+ /* setup a trivial BTF data to load to the kernel */
+ btf = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf, "empty_btf"))
+ goto cleanup;
+
+ ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "int_type");
+
+ raw_btf_data = btf__raw_data(btf, &raw_btf_size);
+ if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data"))
+ goto cleanup;
+
+ /* no token + no CAP_BPF -> failure */
+ btf_opts.btf_flags = 0;
+ btf_opts.token_fd = 0;
+ btf_fd = bpf_btf_load(raw_btf_data, raw_btf_size, &btf_opts);
+ if (!ASSERT_LT(btf_fd, 0, "no_token_no_cap_should_fail"))
+ goto cleanup;
+
+ /* token + no CAP_BPF -> failure */
+ btf_opts.btf_flags = BPF_F_TOKEN_FD;
+ btf_opts.token_fd = token_fd;
+ btf_fd = bpf_btf_load(raw_btf_data, raw_btf_size, &btf_opts);
+ if (!ASSERT_LT(btf_fd, 0, "token_no_cap_should_fail"))
+ goto cleanup;
+
+ /* get back effective local CAP_BPF (and CAP_SYS_ADMIN) */
+ err = restore_priv_caps(old_caps);
+ if (!ASSERT_OK(err, "restore_caps"))
+ goto cleanup;
+
+ /* token + CAP_BPF -> success */
+ btf_opts.btf_flags = BPF_F_TOKEN_FD;
+ btf_opts.token_fd = token_fd;
+ btf_fd = bpf_btf_load(raw_btf_data, raw_btf_size, &btf_opts);
+ if (!ASSERT_GT(btf_fd, 0, "token_and_cap_success"))
+ goto cleanup;
+
+ err = 0;
+cleanup:
+ btf__free(btf);
+ zclose(btf_fd);
+ zclose(token_fd);
+ return err;
+}
+
+static int userns_prog_load(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, prog_opts);
+ int err, token_fd = -1, prog_fd = -1;
+ struct bpf_insn insns[] = {
+ /* bpf_jiffies64() requires CAP_BPF */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
+ /* bpf_get_current_task() requires CAP_PERFMON */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_current_task),
+ /* r0 = 0; exit; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ size_t insn_cnt = ARRAY_SIZE(insns);
+ __u64 old_caps = 0;
+
+ /* create BPF token from BPF FS mount */
+ token_fd = bpf_token_create(mnt_fd, NULL);
+ if (!ASSERT_GT(token_fd, 0, "token_create")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* validate we can successfully load BPF program with token; this
+ * being XDP program (CAP_NET_ADMIN) using bpf_jiffies64() (CAP_BPF)
+ * and bpf_get_current_task() (CAP_PERFMON) helpers validates we have
+ * BPF token wired properly in a bunch of places in the kernel
+ */
+ prog_opts.prog_flags = BPF_F_TOKEN_FD;
+ prog_opts.token_fd = token_fd;
+ prog_opts.expected_attach_type = BPF_XDP;
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL",
+ insns, insn_cnt, &prog_opts);
+ if (!ASSERT_GT(prog_fd, 0, "prog_fd")) {
+ err = -EPERM;
+ goto cleanup;
+ }
+
+ /* no token + caps -> failure */
+ prog_opts.prog_flags = 0;
+ prog_opts.token_fd = 0;
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL",
+ insns, insn_cnt, &prog_opts);
+ if (!ASSERT_EQ(prog_fd, -EPERM, "prog_fd_eperm")) {
+ err = -EPERM;
+ goto cleanup;
+ }
+
+ err = drop_priv_caps(&old_caps);
+ if (!ASSERT_OK(err, "drop_caps"))
+ goto cleanup;
+
+ /* no caps + token -> failure */
+ prog_opts.prog_flags = BPF_F_TOKEN_FD;
+ prog_opts.token_fd = token_fd;
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL",
+ insns, insn_cnt, &prog_opts);
+ if (!ASSERT_EQ(prog_fd, -EPERM, "prog_fd_eperm")) {
+ err = -EPERM;
+ goto cleanup;
+ }
+
+ /* no caps + no token -> definitely a failure */
+ prog_opts.prog_flags = 0;
+ prog_opts.token_fd = 0;
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL",
+ insns, insn_cnt, &prog_opts);
+ if (!ASSERT_EQ(prog_fd, -EPERM, "prog_fd_eperm")) {
+ err = -EPERM;
+ goto cleanup;
+ }
+
+ err = 0;
+cleanup:
+ zclose(prog_fd);
+ zclose(token_fd);
+ return err;
+}
+
+static int userns_obj_priv_map(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts);
+ char buf[256];
+ struct priv_map *skel;
+ int err;
+
+ skel = priv_map__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) {
+ priv_map__destroy(skel);
+ return -EINVAL;
+ }
+
+ /* use bpf_token_path to provide BPF FS path */
+ snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
+ opts.bpf_token_path = buf;
+ skel = priv_map__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "obj_token_path_open"))
+ return -EINVAL;
+
+ err = priv_map__load(skel);
+ priv_map__destroy(skel);
+ if (!ASSERT_OK(err, "obj_token_path_load"))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int userns_obj_priv_prog(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts);
+ char buf[256];
+ struct priv_prog *skel;
+ int err;
+
+ skel = priv_prog__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) {
+ priv_prog__destroy(skel);
+ return -EINVAL;
+ }
+
+ /* use bpf_token_path to provide BPF FS path */
+ snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
+ opts.bpf_token_path = buf;
+ skel = priv_prog__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "obj_token_path_open"))
+ return -EINVAL;
+ err = priv_prog__load(skel);
+ priv_prog__destroy(skel);
+ if (!ASSERT_OK(err, "obj_token_path_load"))
+ return -EINVAL;
+
+ /* provide BPF token, but reject bpf_token_capable() with LSM */
+ lsm_skel->bss->reject_capable = true;
+ lsm_skel->bss->reject_cmd = false;
+ skel = priv_prog__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "obj_token_lsm_reject_cap_open"))
+ return -EINVAL;
+ err = priv_prog__load(skel);
+ priv_prog__destroy(skel);
+ if (!ASSERT_ERR(err, "obj_token_lsm_reject_cap_load"))
+ return -EINVAL;
+
+ /* provide BPF token, but reject bpf_token_cmd() with LSM */
+ lsm_skel->bss->reject_capable = false;
+ lsm_skel->bss->reject_cmd = true;
+ skel = priv_prog__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "obj_token_lsm_reject_cmd_open"))
+ return -EINVAL;
+ err = priv_prog__load(skel);
+ priv_prog__destroy(skel);
+ if (!ASSERT_ERR(err, "obj_token_lsm_reject_cmd_load"))
+ return -EINVAL;
+
+ return 0;
+}
+
+/* this test is called with BPF FS that doesn't delegate BPF_BTF_LOAD command,
+ * which should cause struct_ops application to fail, as BTF won't be uploaded
+ * into the kernel, even if STRUCT_OPS programs themselves are allowed
+ */
+static int validate_struct_ops_load(int mnt_fd, bool expect_success)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts);
+ char buf[256];
+ struct dummy_st_ops_success *skel;
+ int err;
+
+ snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
+ opts.bpf_token_path = buf;
+ skel = dummy_st_ops_success__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "obj_token_path_open"))
+ return -EINVAL;
+
+ err = dummy_st_ops_success__load(skel);
+ dummy_st_ops_success__destroy(skel);
+ if (expect_success) {
+ if (!ASSERT_OK(err, "obj_token_path_load"))
+ return -EINVAL;
+ } else /* expect failure */ {
+ if (!ASSERT_ERR(err, "obj_token_path_load"))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int userns_obj_priv_btf_fail(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ return validate_struct_ops_load(mnt_fd, false /* should fail */);
+}
+
+static int userns_obj_priv_btf_success(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ return validate_struct_ops_load(mnt_fd, true /* should succeed */);
+}
+
+#define TOKEN_ENVVAR "LIBBPF_BPF_TOKEN_PATH"
+#define TOKEN_BPFFS_CUSTOM "/bpf-token-fs"
+
+static int userns_obj_priv_implicit_token(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts);
+ struct dummy_st_ops_success *skel;
+ int err;
+
+ /* before we mount BPF FS with token delegation, struct_ops skeleton
+ * should fail to load
+ */
+ skel = dummy_st_ops_success__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) {
+ dummy_st_ops_success__destroy(skel);
+ return -EINVAL;
+ }
+
+ /* mount custom BPF FS over /sys/fs/bpf so that libbpf can create BPF
+ * token automatically and implicitly
+ */
+ err = sys_move_mount(mnt_fd, "", AT_FDCWD, "/sys/fs/bpf", MOVE_MOUNT_F_EMPTY_PATH);
+ if (!ASSERT_OK(err, "move_mount_bpffs"))
+ return -EINVAL;
+
+ /* disable implicit BPF token creation by setting
+ * LIBBPF_BPF_TOKEN_PATH envvar to empty value, load should fail
+ */
+ err = setenv(TOKEN_ENVVAR, "", 1 /*overwrite*/);
+ if (!ASSERT_OK(err, "setenv_token_path"))
+ return -EINVAL;
+ skel = dummy_st_ops_success__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "obj_token_envvar_disabled_load")) {
+ unsetenv(TOKEN_ENVVAR);
+ dummy_st_ops_success__destroy(skel);
+ return -EINVAL;
+ }
+ unsetenv(TOKEN_ENVVAR);
+
+ /* now the same struct_ops skeleton should succeed thanks to libppf
+ * creating BPF token from /sys/fs/bpf mount point
+ */
+ skel = dummy_st_ops_success__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "obj_implicit_token_load"))
+ return -EINVAL;
+
+ dummy_st_ops_success__destroy(skel);
+
+ /* now disable implicit token through empty bpf_token_path, should fail */
+ opts.bpf_token_path = "";
+ skel = dummy_st_ops_success__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "obj_empty_token_path_open"))
+ return -EINVAL;
+
+ err = dummy_st_ops_success__load(skel);
+ dummy_st_ops_success__destroy(skel);
+ if (!ASSERT_ERR(err, "obj_empty_token_path_load"))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts);
+ struct dummy_st_ops_success *skel;
+ int err;
+
+ /* before we mount BPF FS with token delegation, struct_ops skeleton
+ * should fail to load
+ */
+ skel = dummy_st_ops_success__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) {
+ dummy_st_ops_success__destroy(skel);
+ return -EINVAL;
+ }
+
+ /* mount custom BPF FS over custom location, so libbpf can't create
+ * BPF token implicitly, unless pointed to it through
+ * LIBBPF_BPF_TOKEN_PATH envvar
+ */
+ rmdir(TOKEN_BPFFS_CUSTOM);
+ if (!ASSERT_OK(mkdir(TOKEN_BPFFS_CUSTOM, 0777), "mkdir_bpffs_custom"))
+ goto err_out;
+ err = sys_move_mount(mnt_fd, "", AT_FDCWD, TOKEN_BPFFS_CUSTOM, MOVE_MOUNT_F_EMPTY_PATH);
+ if (!ASSERT_OK(err, "move_mount_bpffs"))
+ goto err_out;
+
+ /* even though we have BPF FS with delegation, it's not at default
+ * /sys/fs/bpf location, so we still fail to load until envvar is set up
+ */
+ skel = dummy_st_ops_success__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load2")) {
+ dummy_st_ops_success__destroy(skel);
+ goto err_out;
+ }
+
+ err = setenv(TOKEN_ENVVAR, TOKEN_BPFFS_CUSTOM, 1 /*overwrite*/);
+ if (!ASSERT_OK(err, "setenv_token_path"))
+ goto err_out;
+
+ /* now the same struct_ops skeleton should succeed thanks to libppf
+ * creating BPF token from custom mount point
+ */
+ skel = dummy_st_ops_success__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "obj_implicit_token_load"))
+ goto err_out;
+
+ dummy_st_ops_success__destroy(skel);
+
+ /* now disable implicit token through empty bpf_token_path, envvar
+ * will be ignored, should fail
+ */
+ opts.bpf_token_path = "";
+ skel = dummy_st_ops_success__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "obj_empty_token_path_open"))
+ goto err_out;
+
+ err = dummy_st_ops_success__load(skel);
+ dummy_st_ops_success__destroy(skel);
+ if (!ASSERT_ERR(err, "obj_empty_token_path_load"))
+ goto err_out;
+
+ rmdir(TOKEN_BPFFS_CUSTOM);
+ unsetenv(TOKEN_ENVVAR);
+ return 0;
+err_out:
+ rmdir(TOKEN_BPFFS_CUSTOM);
+ unsetenv(TOKEN_ENVVAR);
+ return -EINVAL;
+}
+
+#define bit(n) (1ULL << (n))
+
+void test_token(void)
+{
+ if (test__start_subtest("map_token")) {
+ struct bpffs_opts opts = {
+ .cmds_str = "map_create",
+ .maps_str = "stack",
+ };
+
+ subtest_userns(&opts, userns_map_create);
+ }
+ if (test__start_subtest("btf_token")) {
+ struct bpffs_opts opts = {
+ .cmds = 1ULL << BPF_BTF_LOAD,
+ };
+
+ subtest_userns(&opts, userns_btf_load);
+ }
+ if (test__start_subtest("prog_token")) {
+ struct bpffs_opts opts = {
+ .cmds_str = "PROG_LOAD",
+ .progs_str = "XDP",
+ .attachs_str = "xdp",
+ };
+
+ subtest_userns(&opts, userns_prog_load);
+ }
+ if (test__start_subtest("obj_priv_map")) {
+ struct bpffs_opts opts = {
+ .cmds = bit(BPF_MAP_CREATE),
+ .maps = bit(BPF_MAP_TYPE_QUEUE),
+ };
+
+ subtest_userns(&opts, userns_obj_priv_map);
+ }
+ if (test__start_subtest("obj_priv_prog")) {
+ struct bpffs_opts opts = {
+ .cmds = bit(BPF_PROG_LOAD),
+ .progs = bit(BPF_PROG_TYPE_KPROBE),
+ .attachs = ~0ULL,
+ };
+
+ subtest_userns(&opts, userns_obj_priv_prog);
+ }
+ if (test__start_subtest("obj_priv_btf_fail")) {
+ struct bpffs_opts opts = {
+ /* disallow BTF loading */
+ .cmds = bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD),
+ .maps = bit(BPF_MAP_TYPE_STRUCT_OPS),
+ .progs = bit(BPF_PROG_TYPE_STRUCT_OPS),
+ .attachs = ~0ULL,
+ };
+
+ subtest_userns(&opts, userns_obj_priv_btf_fail);
+ }
+ if (test__start_subtest("obj_priv_btf_success")) {
+ struct bpffs_opts opts = {
+ /* allow BTF loading */
+ .cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD),
+ .maps = bit(BPF_MAP_TYPE_STRUCT_OPS),
+ .progs = bit(BPF_PROG_TYPE_STRUCT_OPS),
+ .attachs = ~0ULL,
+ };
+
+ subtest_userns(&opts, userns_obj_priv_btf_success);
+ }
+ if (test__start_subtest("obj_priv_implicit_token")) {
+ struct bpffs_opts opts = {
+ /* allow BTF loading */
+ .cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD),
+ .maps = bit(BPF_MAP_TYPE_STRUCT_OPS),
+ .progs = bit(BPF_PROG_TYPE_STRUCT_OPS),
+ .attachs = ~0ULL,
+ };
+
+ subtest_userns(&opts, userns_obj_priv_implicit_token);
+ }
+ if (test__start_subtest("obj_priv_implicit_token_envvar")) {
+ struct bpffs_opts opts = {
+ /* allow BTF loading */
+ .cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD),
+ .maps = bit(BPF_MAP_TYPE_STRUCT_OPS),
+ .progs = bit(BPF_PROG_TYPE_STRUCT_OPS),
+ .attachs = ~0ULL,
+ };
+
+ subtest_userns(&opts, userns_obj_priv_implicit_token_envvar);
+ }
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_failure.c b/tools/testing/selftests/bpf/prog_tests/tracing_failure.c
new file mode 100644
index 000000000000..a222df765bc3
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tracing_failure.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include "tracing_failure.skel.h"
+
+static void test_bpf_spin_lock(bool is_spin_lock)
+{
+ struct tracing_failure *skel;
+ int err;
+
+ skel = tracing_failure__open();
+ if (!ASSERT_OK_PTR(skel, "tracing_failure__open"))
+ return;
+
+ if (is_spin_lock)
+ bpf_program__set_autoload(skel->progs.test_spin_lock, true);
+ else
+ bpf_program__set_autoload(skel->progs.test_spin_unlock, true);
+
+ err = tracing_failure__load(skel);
+ if (!ASSERT_OK(err, "tracing_failure__load"))
+ goto out;
+
+ err = tracing_failure__attach(skel);
+ ASSERT_ERR(err, "tracing_failure__attach");
+
+out:
+ tracing_failure__destroy(skel);
+}
+
+void test_tracing_failure(void)
+{
+ if (test__start_subtest("bpf_spin_lock"))
+ test_bpf_spin_lock(true);
+ if (test__start_subtest("bpf_spin_unlock"))
+ test_bpf_spin_lock(false);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
index d62c5bf00e71..985273832f89 100644
--- a/tools/testing/selftests/bpf/prog_tests/verifier.c
+++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
@@ -4,6 +4,7 @@
#include "cap_helpers.h"
#include "verifier_and.skel.h"
+#include "verifier_arena.skel.h"
#include "verifier_array_access.skel.h"
#include "verifier_basic_stack.skel.h"
#include "verifier_bitfield_write.skel.h"
@@ -28,6 +29,7 @@
#include "verifier_div0.skel.h"
#include "verifier_div_overflow.skel.h"
#include "verifier_global_subprogs.skel.h"
+#include "verifier_global_ptr_args.skel.h"
#include "verifier_gotol.skel.h"
#include "verifier_helper_access_var_len.skel.h"
#include "verifier_helper_packet_access.skel.h"
@@ -117,6 +119,7 @@ static void run_tests_aux(const char *skel_name,
#define RUN(skel) run_tests_aux(#skel, skel##__elf_bytes, NULL)
void test_verifier_and(void) { RUN(verifier_and); }
+void test_verifier_arena(void) { RUN(verifier_arena); }
void test_verifier_basic_stack(void) { RUN(verifier_basic_stack); }
void test_verifier_bitfield_write(void) { RUN(verifier_bitfield_write); }
void test_verifier_bounds(void) { RUN(verifier_bounds); }
@@ -140,6 +143,7 @@ void test_verifier_direct_stack_access_wraparound(void) { RUN(verifier_direct_st
void test_verifier_div0(void) { RUN(verifier_div0); }
void test_verifier_div_overflow(void) { RUN(verifier_div_overflow); }
void test_verifier_global_subprogs(void) { RUN(verifier_global_subprogs); }
+void test_verifier_global_ptr_args(void) { RUN(verifier_global_ptr_args); }
void test_verifier_gotol(void) { RUN(verifier_gotol); }
void test_verifier_helper_access_var_len(void) { RUN(verifier_helper_access_var_len); }
void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_access); }
diff --git a/tools/testing/selftests/bpf/prog_tests/xdpwall.c b/tools/testing/selftests/bpf/prog_tests/xdpwall.c
index f3927829a55a..4599154c8e9b 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdpwall.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdpwall.c
@@ -9,7 +9,7 @@ void test_xdpwall(void)
struct xdpwall *skel;
skel = xdpwall__open_and_load();
- ASSERT_OK_PTR(skel, "Does LLMV have https://reviews.llvm.org/D109073?");
+ ASSERT_OK_PTR(skel, "Does LLVM have https://github.com/llvm/llvm-project/commit/ea72b0319d7b0f0c2fcf41d121afa5d031b319d5?");
xdpwall__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/progs/arena_htab.c b/tools/testing/selftests/bpf/progs/arena_htab.c
new file mode 100644
index 000000000000..b7bb712cacfd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/arena_htab.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARENA);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __uint(max_entries, 100); /* number of pages */
+} arena SEC(".maps");
+
+#include "bpf_arena_htab.h"
+
+void __arena *htab_for_user;
+bool skip = false;
+
+int zero = 0;
+
+SEC("syscall")
+int arena_htab_llvm(void *ctx)
+{
+#if defined(__BPF_FEATURE_ARENA_CAST) || defined(BPF_ARENA_FORCE_ASM)
+ struct htab __arena *htab;
+ __u64 i;
+
+ htab = bpf_alloc(sizeof(*htab));
+ cast_kern(htab);
+ htab_init(htab);
+
+ /* first run. No old elems in the table */
+ for (i = zero; i < 1000; i++)
+ htab_update_elem(htab, i, i);
+
+ /* should replace all elems with new ones */
+ for (i = zero; i < 1000; i++)
+ htab_update_elem(htab, i, i);
+ cast_user(htab);
+ htab_for_user = htab;
+#else
+ skip = true;
+#endif
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/arena_htab_asm.c b/tools/testing/selftests/bpf/progs/arena_htab_asm.c
new file mode 100644
index 000000000000..6cd70ea12f0d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/arena_htab_asm.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#define BPF_ARENA_FORCE_ASM
+#define arena_htab_llvm arena_htab_asm
+#include "arena_htab.c"
diff --git a/tools/testing/selftests/bpf/progs/arena_list.c b/tools/testing/selftests/bpf/progs/arena_list.c
new file mode 100644
index 000000000000..cd35b8448435
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/arena_list.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARENA);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __uint(max_entries, 100); /* number of pages */
+#ifdef __TARGET_ARCH_arm64
+ __ulong(map_extra, 0x1ull << 32); /* start of mmap() region */
+#else
+ __ulong(map_extra, 0x1ull << 44); /* start of mmap() region */
+#endif
+} arena SEC(".maps");
+
+#include "bpf_arena_alloc.h"
+#include "bpf_arena_list.h"
+
+struct elem {
+ struct arena_list_node node;
+ __u64 value;
+};
+
+struct arena_list_head __arena *list_head;
+int list_sum;
+int cnt;
+bool skip = false;
+
+#ifdef __BPF_FEATURE_ARENA_CAST
+long __arena arena_sum;
+int __arena test_val = 1;
+struct arena_list_head __arena global_head;
+#else
+long arena_sum SEC(".arena.1");
+int test_val SEC(".arena.1");
+#endif
+
+int zero;
+
+SEC("syscall")
+int arena_list_add(void *ctx)
+{
+#ifdef __BPF_FEATURE_ARENA_CAST
+ __u64 i;
+
+ list_head = &global_head;
+
+ for (i = zero; i < cnt; cond_break, i++) {
+ struct elem __arena *n = bpf_alloc(sizeof(*n));
+
+ test_val++;
+ n->value = i;
+ arena_sum += i;
+ list_add_head(&n->node, list_head);
+ }
+#else
+ skip = true;
+#endif
+ return 0;
+}
+
+SEC("syscall")
+int arena_list_del(void *ctx)
+{
+#ifdef __BPF_FEATURE_ARENA_CAST
+ struct elem __arena *n;
+ int sum = 0;
+
+ arena_sum = 0;
+ list_for_each_entry(n, list_head, node) {
+ sum += n->value;
+ arena_sum += n->value;
+ list_del(&n->node);
+ bpf_free(n);
+ }
+ list_sum = sum;
+#else
+ skip = true;
+#endif
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/async_stack_depth.c b/tools/testing/selftests/bpf/progs/async_stack_depth.c
index 3517c0e01206..36734683acbd 100644
--- a/tools/testing/selftests/bpf/progs/async_stack_depth.c
+++ b/tools/testing/selftests/bpf/progs/async_stack_depth.c
@@ -30,7 +30,7 @@ static int bad_timer_cb(void *map, int *key, struct bpf_timer *timer)
}
SEC("tc")
-__failure __msg("combined stack size of 2 calls is 576. Too large")
+__failure __msg("combined stack size of 2 calls is")
int pseudo_call_check(struct __sk_buff *ctx)
{
struct hmap_elem *elem;
@@ -45,7 +45,7 @@ int pseudo_call_check(struct __sk_buff *ctx)
}
SEC("tc")
-__failure __msg("combined stack size of 2 calls is 608. Too large")
+__failure __msg("combined stack size of 2 calls is")
int async_call_root_check(struct __sk_buff *ctx)
{
struct hmap_elem *elem;
diff --git a/tools/testing/selftests/bpf/progs/bad_struct_ops.c b/tools/testing/selftests/bpf/progs/bad_struct_ops.c
new file mode 100644
index 000000000000..b7e175cd0af0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bad_struct_ops.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../bpf_testmod/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("struct_ops/test_1")
+int BPF_PROG(test_1) { return 0; }
+
+SEC("struct_ops/test_2")
+int BPF_PROG(test_2) { return 0; }
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_1 = {
+ .test_1 = (void *)test_1,
+ .test_2 = (void *)test_2
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops2 testmod_2 = {
+ .test_1 = (void *)test_1
+};
diff --git a/tools/testing/selftests/bpf/progs/bad_struct_ops2.c b/tools/testing/selftests/bpf/progs/bad_struct_ops2.c
new file mode 100644
index 000000000000..64a95f6be86d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bad_struct_ops2.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+/* This is an unused struct_ops program, it lacks corresponding
+ * struct_ops map, which provides attachment information.
+ * W/o additional configuration attempt to load such
+ * BPF object file would fail.
+ */
+SEC("struct_ops/foo")
+void foo(void) {}
diff --git a/tools/testing/selftests/bpf/progs/bpf_compiler.h b/tools/testing/selftests/bpf/progs/bpf_compiler.h
new file mode 100644
index 000000000000..a7c343dc82e6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_compiler.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BPF_COMPILER_H__
+#define __BPF_COMPILER_H__
+
+#define DO_PRAGMA_(X) _Pragma(#X)
+
+#if __clang__
+#define __pragma_loop_unroll DO_PRAGMA_(clang loop unroll(enable))
+#else
+/* In GCC -funroll-loops, which is enabled with -O2, should have the
+ same impact than the loop-unroll-enable pragma above. */
+#define __pragma_loop_unroll
+#endif
+
+#if __clang__
+#define __pragma_loop_unroll_count(N) DO_PRAGMA_(clang loop unroll_count(N))
+#else
+#define __pragma_loop_unroll_count(N) DO_PRAGMA_(GCC unroll N)
+#endif
+
+#if __clang__
+#define __pragma_loop_unroll_full DO_PRAGMA_(clang loop unroll(full))
+#else
+#define __pragma_loop_unroll_full DO_PRAGMA_(GCC unroll 65534)
+#endif
+
+#if __clang__
+#define __pragma_loop_no_unroll DO_PRAGMA_(clang loop unroll(disable))
+#else
+#define __pragma_loop_no_unroll DO_PRAGMA_(GCC unroll 1)
+#endif
+
+#endif
diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h
index 2fd59970c43a..fb2f5513e29e 100644
--- a/tools/testing/selftests/bpf/progs/bpf_misc.h
+++ b/tools/testing/selftests/bpf/progs/bpf_misc.h
@@ -80,7 +80,7 @@
#define __imm(name) [name]"i"(name)
#define __imm_const(name, expr) [name]"i"(expr)
#define __imm_addr(name) [name]"i"(&name)
-#define __imm_ptr(name) [name]"p"(&name)
+#define __imm_ptr(name) [name]"r"(&name)
#define __imm_insn(name, expr) [name]"i"(*(long *)&(expr))
/* Magic constants used with __retval() */
diff --git a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h
index e8bd4b7b5ef7..7001965d1cc3 100644
--- a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h
+++ b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h
@@ -51,9 +51,25 @@
#define ICSK_TIME_LOSS_PROBE 5
#define ICSK_TIME_REO_TIMEOUT 6
+#define ETH_ALEN 6
#define ETH_HLEN 14
+#define ETH_P_IP 0x0800
#define ETH_P_IPV6 0x86DD
+#define NEXTHDR_TCP 6
+
+#define TCPOPT_NOP 1
+#define TCPOPT_EOL 0
+#define TCPOPT_MSS 2
+#define TCPOPT_WINDOW 3
+#define TCPOPT_TIMESTAMP 8
+#define TCPOPT_SACK_PERM 4
+
+#define TCPOLEN_MSS 4
+#define TCPOLEN_WINDOW 3
+#define TCPOLEN_TIMESTAMP 10
+#define TCPOLEN_SACK_PERM 2
+
#define CHECKSUM_NONE 0
#define CHECKSUM_PARTIAL 3
diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c b/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c
index 610c2427fd93..3500e4b69ebe 100644
--- a/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c
+++ b/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c
@@ -27,32 +27,6 @@ bool is_cgroup1 = 0;
struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym;
void bpf_cgroup_release(struct cgroup *cgrp) __ksym;
-static void __on_lookup(struct cgroup *cgrp)
-{
- bpf_cgrp_storage_delete(&map_a, cgrp);
- bpf_cgrp_storage_delete(&map_b, cgrp);
-}
-
-SEC("fentry/bpf_local_storage_lookup")
-int BPF_PROG(on_lookup)
-{
- struct task_struct *task = bpf_get_current_task_btf();
- struct cgroup *cgrp;
-
- if (is_cgroup1) {
- cgrp = bpf_task_get_cgroup1(task, target_hid);
- if (!cgrp)
- return 0;
-
- __on_lookup(cgrp);
- bpf_cgroup_release(cgrp);
- return 0;
- }
-
- __on_lookup(task->cgroups->dfl_cgrp);
- return 0;
-}
-
static void __on_update(struct cgroup *cgrp)
{
long *ptr;
diff --git a/tools/testing/selftests/bpf/progs/connect_unix_prog.c b/tools/testing/selftests/bpf/progs/connect_unix_prog.c
index ca8aa2f116b3..2ef0e0c46d17 100644
--- a/tools/testing/selftests/bpf/progs/connect_unix_prog.c
+++ b/tools/testing/selftests/bpf/progs/connect_unix_prog.c
@@ -28,8 +28,7 @@ int connect_unix_prog(struct bpf_sock_addr *ctx)
if (sa_kern->uaddrlen != unaddrlen)
return 0;
- sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr,
- bpf_core_type_id_kernel(struct sockaddr_un));
+ sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un);
if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS,
sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0)
return 0;
diff --git a/tools/testing/selftests/bpf/progs/cpumask_common.h b/tools/testing/selftests/bpf/progs/cpumask_common.h
index 0cd4aebb97cf..c705d8112a35 100644
--- a/tools/testing/selftests/bpf/progs/cpumask_common.h
+++ b/tools/testing/selftests/bpf/progs/cpumask_common.h
@@ -23,41 +23,42 @@ struct array_map {
__uint(max_entries, 1);
} __cpumask_map SEC(".maps");
-struct bpf_cpumask *bpf_cpumask_create(void) __ksym;
-void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym;
-struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym;
-u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym;
-u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym;
+struct bpf_cpumask *bpf_cpumask_create(void) __ksym __weak;
+void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym __weak;
+struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym __weak;
+u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym __weak;
+u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym __weak;
u32 bpf_cpumask_first_and(const struct cpumask *src1,
- const struct cpumask *src2) __ksym;
-void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
-void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
-bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym;
-bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
-bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
-void bpf_cpumask_setall(struct bpf_cpumask *cpumask) __ksym;
-void bpf_cpumask_clear(struct bpf_cpumask *cpumask) __ksym;
+ const struct cpumask *src2) __ksym __weak;
+void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak;
+void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak;
+bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym __weak;
+bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak;
+bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak;
+void bpf_cpumask_setall(struct bpf_cpumask *cpumask) __ksym __weak;
+void bpf_cpumask_clear(struct bpf_cpumask *cpumask) __ksym __weak;
bool bpf_cpumask_and(struct bpf_cpumask *cpumask,
const struct cpumask *src1,
- const struct cpumask *src2) __ksym;
+ const struct cpumask *src2) __ksym __weak;
void bpf_cpumask_or(struct bpf_cpumask *cpumask,
const struct cpumask *src1,
- const struct cpumask *src2) __ksym;
+ const struct cpumask *src2) __ksym __weak;
void bpf_cpumask_xor(struct bpf_cpumask *cpumask,
const struct cpumask *src1,
- const struct cpumask *src2) __ksym;
-bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) __ksym;
-bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) __ksym;
-bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) __ksym;
-bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym;
-bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym;
-void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym;
-u32 bpf_cpumask_any_distribute(const struct cpumask *src) __ksym;
-u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, const struct cpumask *src2) __ksym;
-u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym;
-
-void bpf_rcu_read_lock(void) __ksym;
-void bpf_rcu_read_unlock(void) __ksym;
+ const struct cpumask *src2) __ksym __weak;
+bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) __ksym __weak;
+bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) __ksym __weak;
+bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) __ksym __weak;
+bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym __weak;
+bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym __weak;
+void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym __weak;
+u32 bpf_cpumask_any_distribute(const struct cpumask *src) __ksym __weak;
+u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1,
+ const struct cpumask *src2) __ksym __weak;
+u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym __weak;
+
+void bpf_rcu_read_lock(void) __ksym __weak;
+void bpf_rcu_read_unlock(void) __ksym __weak;
static inline const struct cpumask *cast(struct bpf_cpumask *cpumask)
{
diff --git a/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c b/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c
index 9c078f34bbb2..5a76754f846b 100644
--- a/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c
+++ b/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c
@@ -27,8 +27,7 @@ int getpeername_unix_prog(struct bpf_sock_addr *ctx)
if (sa_kern->uaddrlen != unaddrlen)
return 1;
- sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr,
- bpf_core_type_id_kernel(struct sockaddr_un));
+ sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un);
if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS,
sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0)
return 1;
diff --git a/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c b/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c
index ac7145111497..7867113c696f 100644
--- a/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c
+++ b/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c
@@ -27,8 +27,7 @@ int getsockname_unix_prog(struct bpf_sock_addr *ctx)
if (sa_kern->uaddrlen != unaddrlen)
return 1;
- sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr,
- bpf_core_type_id_kernel(struct sockaddr_un));
+ sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un);
if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS,
sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0)
return 1;
diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c
index fe971992e635..3db416606f2f 100644
--- a/tools/testing/selftests/bpf/progs/iters.c
+++ b/tools/testing/selftests/bpf/progs/iters.c
@@ -5,6 +5,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
+#include "bpf_compiler.h"
#define ARRAY_SIZE(x) (int)(sizeof(x) / sizeof((x)[0]))
@@ -78,8 +79,8 @@ int iter_err_unsafe_asm_loop(const void *ctx)
"*(u32 *)(r1 + 0) = r6;" /* invalid */
:
: [it]"r"(&it),
- [small_arr]"p"(small_arr),
- [zero]"p"(zero),
+ [small_arr]"r"(small_arr),
+ [zero]"r"(zero),
__imm(bpf_iter_num_new),
__imm(bpf_iter_num_next),
__imm(bpf_iter_num_destroy)
@@ -183,7 +184,7 @@ int iter_pragma_unroll_loop(const void *ctx)
MY_PID_GUARD();
bpf_iter_num_new(&it, 0, 2);
-#pragma nounroll
+ __pragma_loop_no_unroll
for (i = 0; i < 3; i++) {
v = bpf_iter_num_next(&it);
bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1);
@@ -238,7 +239,7 @@ int iter_multiple_sequential_loops(const void *ctx)
bpf_iter_num_destroy(&it);
bpf_iter_num_new(&it, 0, 2);
-#pragma nounroll
+ __pragma_loop_no_unroll
for (i = 0; i < 3; i++) {
v = bpf_iter_num_next(&it);
bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1);
diff --git a/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c b/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c
new file mode 100644
index 000000000000..2414ac20b6d5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <linux/types.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct bin_data {
+ char blob[32];
+};
+
+#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
+private(kptr) struct bin_data __kptr * ptr;
+
+SEC("tc")
+__naked int kptr_xchg_inline(void)
+{
+ asm volatile (
+ "r1 = %[ptr] ll;"
+ "r2 = 0;"
+ "call %[bpf_kptr_xchg];"
+ "if r0 == 0 goto 1f;"
+ "r1 = r0;"
+ "r2 = 0;"
+ "call %[bpf_obj_drop_impl];"
+ "1:"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_addr(ptr),
+ __imm(bpf_kptr_xchg),
+ __imm(bpf_obj_drop_impl)
+ : __clobber_all
+ );
+}
+
+/* BTF FUNC records are not generated for kfuncs referenced
+ * from inline assembly. These records are necessary for
+ * libbpf to link the program. The function below is a hack
+ * to ensure that BTF FUNC records are generated.
+ */
+void __btf_root(void)
+{
+ bpf_obj_drop(NULL);
+}
diff --git a/tools/testing/selftests/bpf/progs/loop4.c b/tools/testing/selftests/bpf/progs/loop4.c
index b35337926d66..0de0357f57cc 100644
--- a/tools/testing/selftests/bpf/progs/loop4.c
+++ b/tools/testing/selftests/bpf/progs/loop4.c
@@ -3,6 +3,8 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_compiler.h"
+
char _license[] SEC("license") = "GPL";
SEC("socket")
@@ -10,7 +12,7 @@ int combinations(volatile struct __sk_buff* skb)
{
int ret = 0, i;
-#pragma nounroll
+ __pragma_loop_no_unroll
for (i = 0; i < 20; i++)
if (skb->len)
ret |= 1 << i;
diff --git a/tools/testing/selftests/bpf/progs/map_ptr_kern.c b/tools/testing/selftests/bpf/progs/map_ptr_kern.c
index 3325da17ec81..efaf622c28dd 100644
--- a/tools/testing/selftests/bpf/progs/map_ptr_kern.c
+++ b/tools/testing/selftests/bpf/progs/map_ptr_kern.c
@@ -316,7 +316,7 @@ struct lpm_trie {
} __attribute__((preserve_access_index));
struct lpm_key {
- struct bpf_lpm_trie_key trie_key;
+ struct bpf_lpm_trie_key_hdr trie_key;
__u32 data;
};
diff --git a/tools/testing/selftests/bpf/progs/priv_map.c b/tools/testing/selftests/bpf/progs/priv_map.c
new file mode 100644
index 000000000000..9085be50f03b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/priv_map.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_QUEUE);
+ __uint(max_entries, 1);
+ __type(value, __u32);
+} priv_map SEC(".maps");
diff --git a/tools/testing/selftests/bpf/progs/priv_prog.c b/tools/testing/selftests/bpf/progs/priv_prog.c
new file mode 100644
index 000000000000..3c7b2b618c8a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/priv_prog.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("kprobe")
+int kprobe_prog(void *ctx)
+{
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/profiler.inc.h b/tools/testing/selftests/bpf/progs/profiler.inc.h
index de3b6e4e4d0a..6957d9f2805e 100644
--- a/tools/testing/selftests/bpf/progs/profiler.inc.h
+++ b/tools/testing/selftests/bpf/progs/profiler.inc.h
@@ -8,6 +8,7 @@
#include "profiler.h"
#include "err.h"
#include "bpf_experimental.h"
+#include "bpf_compiler.h"
#ifndef NULL
#define NULL 0
@@ -169,7 +170,7 @@ static INLINE int get_var_spid_index(struct var_kill_data_arr_t* arr_struct,
int spid)
{
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++)
if (arr_struct->array[i].meta.pid == spid)
@@ -185,7 +186,7 @@ static INLINE void populate_ancestors(struct task_struct* task,
ancestors_data->num_ancestors = 0;
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (num_ancestors = 0; num_ancestors < MAX_ANCESTORS; num_ancestors++) {
parent = BPF_CORE_READ(parent, real_parent);
@@ -212,7 +213,7 @@ static INLINE void* read_full_cgroup_path(struct kernfs_node* cgroup_node,
size_t filepart_length;
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < MAX_CGROUPS_PATH_DEPTH; i++) {
filepart_length =
@@ -261,7 +262,7 @@ static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data,
int cgrp_id = bpf_core_enum_value(enum cgroup_subsys_id___local,
pids_cgrp_id___local);
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
struct cgroup_subsys_state* subsys =
@@ -402,7 +403,7 @@ static INLINE int trace_var_sys_kill(void* ctx, int tpid, int sig)
if (kill_data == NULL)
return 0;
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++)
if (arr_struct->array[i].meta.pid == 0) {
@@ -482,7 +483,7 @@ read_absolute_file_path_from_dentry(struct dentry* filp_dentry, void* payload)
struct dentry* parent_dentry;
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < MAX_PATH_DEPTH; i++) {
filepart_length =
@@ -508,7 +509,7 @@ is_ancestor_in_allowed_inodes(struct dentry* filp_dentry)
{
struct dentry* parent_dentry;
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < MAX_PATH_DEPTH; i++) {
u64 dir_ino = BPF_CORE_READ(filp_dentry, d_inode, i_ino);
@@ -629,7 +630,7 @@ int raw_tracepoint__sched_process_exit(void* ctx)
struct kernfs_node* proc_kernfs = BPF_CORE_READ(task, cgroups, dfl_cgrp, kn);
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++) {
struct var_kill_data_t* past_kill_data = &arr_struct->array[i];
diff --git a/tools/testing/selftests/bpf/progs/pyperf.h b/tools/testing/selftests/bpf/progs/pyperf.h
index 026d573ce179..86484f07e1d1 100644
--- a/tools/testing/selftests/bpf/progs/pyperf.h
+++ b/tools/testing/selftests/bpf/progs/pyperf.h
@@ -8,6 +8,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
+#include "bpf_compiler.h"
#define FUNCTION_NAME_LEN 64
#define FILE_NAME_LEN 128
@@ -298,11 +299,11 @@ int __on_event(struct bpf_raw_tracepoint_args *ctx)
#if defined(USE_ITER)
/* no for loop, no unrolling */
#elif defined(NO_UNROLL)
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
#elif defined(UNROLL_COUNT)
-#pragma clang loop unroll_count(UNROLL_COUNT)
+ __pragma_loop_unroll_count(UNROLL_COUNT)
#else
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
#endif /* NO_UNROLL */
/* Unwind python stack */
#ifdef USE_ITER
diff --git a/tools/testing/selftests/bpf/progs/rcu_read_lock.c b/tools/testing/selftests/bpf/progs/rcu_read_lock.c
index 14fb01437fb8..ab3a532b7dd6 100644
--- a/tools/testing/selftests/bpf/progs/rcu_read_lock.c
+++ b/tools/testing/selftests/bpf/progs/rcu_read_lock.c
@@ -319,3 +319,123 @@ int cross_rcu_region(void *ctx)
bpf_rcu_read_unlock();
return 0;
}
+
+__noinline
+static int static_subprog(void *ctx)
+{
+ volatile int ret = 0;
+
+ if (bpf_get_prandom_u32())
+ return ret + 42;
+ return ret + bpf_get_prandom_u32();
+}
+
+__noinline
+int global_subprog(u64 a)
+{
+ volatile int ret = a;
+
+ return ret + static_subprog(NULL);
+}
+
+__noinline
+static int static_subprog_lock(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ if (bpf_get_prandom_u32())
+ return ret + 42;
+ return ret + bpf_get_prandom_u32();
+}
+
+__noinline
+int global_subprog_lock(u64 a)
+{
+ volatile int ret = a;
+
+ return ret + static_subprog_lock(NULL);
+}
+
+__noinline
+static int static_subprog_unlock(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_unlock();
+ if (bpf_get_prandom_u32())
+ return ret + 42;
+ return ret + bpf_get_prandom_u32();
+}
+
+__noinline
+int global_subprog_unlock(u64 a)
+{
+ volatile int ret = a;
+
+ return ret + static_subprog_unlock(NULL);
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_subprog(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ if (bpf_get_prandom_u32())
+ ret += static_subprog(ctx);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_global_subprog(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ if (bpf_get_prandom_u32())
+ ret += global_subprog(ret);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_subprog_lock(void *ctx)
+{
+ volatile int ret = 0;
+
+ ret += static_subprog_lock(ctx);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_global_subprog_lock(void *ctx)
+{
+ volatile int ret = 0;
+
+ ret += global_subprog_lock(ret);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_subprog_unlock(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ ret += static_subprog_unlock(ctx);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_global_subprog_unlock(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ ret += global_subprog_unlock(ret);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c b/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c
index 4dfbc8552558..1c7ab44bccfa 100644
--- a/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c
+++ b/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c
@@ -27,8 +27,7 @@ int recvmsg_unix_prog(struct bpf_sock_addr *ctx)
if (sa_kern->uaddrlen != unaddrlen)
return 1;
- sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr,
- bpf_core_type_id_kernel(struct sockaddr_un));
+ sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un);
if (memcmp(sa_kern_unaddr->sun_path, SERVUN_ADDRESS,
sizeof(SERVUN_ADDRESS) - 1) != 0)
return 1;
diff --git a/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c b/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c
index 1f67e832666e..d8869b03dda9 100644
--- a/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c
+++ b/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c
@@ -28,8 +28,7 @@ int sendmsg_unix_prog(struct bpf_sock_addr *ctx)
if (sa_kern->uaddrlen != unaddrlen)
return 0;
- sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr,
- bpf_core_type_id_kernel(struct sockaddr_un));
+ sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un);
if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS,
sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0)
return 0;
diff --git a/tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c b/tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c
index 3e745793b27a..46d6eb2a3b17 100644
--- a/tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c
+++ b/tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c
@@ -12,8 +12,6 @@ int cookie_found = 0;
__u64 cookie = 0;
__u32 omem = 0;
-void *bpf_rdonly_cast(void *, __u32) __ksym;
-
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
@@ -29,7 +27,7 @@ int BPF_PROG(bpf_local_storage_destroy, struct bpf_local_storage *local_storage)
if (local_storage_ptr != local_storage)
return 0;
- sk = bpf_rdonly_cast(sk_ptr, bpf_core_type_id_kernel(struct sock));
+ sk = bpf_core_cast(sk_ptr, struct sock);
if (sk->sk_cookie.counter != cookie)
return 0;
diff --git a/tools/testing/selftests/bpf/progs/sock_iter_batch.c b/tools/testing/selftests/bpf/progs/sock_iter_batch.c
index ffbbfe1fa1c1..96531b0d9d55 100644
--- a/tools/testing/selftests/bpf/progs/sock_iter_batch.c
+++ b/tools/testing/selftests/bpf/progs/sock_iter_batch.c
@@ -32,7 +32,7 @@ int iter_tcp_soreuse(struct bpf_iter__tcp *ctx)
if (!sk)
return 0;
- sk = bpf_rdonly_cast(sk, bpf_core_type_id_kernel(struct sock));
+ sk = bpf_core_cast(sk, struct sock);
if (sk->sk_family != AF_INET6 ||
sk->sk_state != TCP_LISTEN ||
!ipv6_addr_loopback(&sk->sk_v6_rcv_saddr))
@@ -68,7 +68,7 @@ int iter_udp_soreuse(struct bpf_iter__udp *ctx)
if (!sk)
return 0;
- sk = bpf_rdonly_cast(sk, bpf_core_type_id_kernel(struct sock));
+ sk = bpf_core_cast(sk, struct sock);
if (sk->sk_family != AF_INET6 ||
!ipv6_addr_loopback(&sk->sk_v6_rcv_saddr))
return 0;
diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h
index 40df2cc26eaf..f74459eead26 100644
--- a/tools/testing/selftests/bpf/progs/strobemeta.h
+++ b/tools/testing/selftests/bpf/progs/strobemeta.h
@@ -10,6 +10,8 @@
#include <linux/types.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_compiler.h"
+
typedef uint32_t pid_t;
struct task_struct {};
@@ -419,9 +421,9 @@ static __always_inline uint64_t read_map_var(struct strobemeta_cfg *cfg,
}
#ifdef NO_UNROLL
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
#else
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < STROBE_MAX_MAP_ENTRIES; ++i) {
if (i >= map.cnt)
@@ -560,25 +562,25 @@ static void *read_strobe_meta(struct task_struct *task,
payload_off = sizeof(data->payload);
#else
#ifdef NO_UNROLL
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
#else
-#pragma unroll
+ __pragma_loop_unroll
#endif /* NO_UNROLL */
for (int i = 0; i < STROBE_MAX_INTS; ++i) {
read_int_var(cfg, i, tls_base, &value, data);
}
#ifdef NO_UNROLL
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
#else
-#pragma unroll
+ __pragma_loop_unroll
#endif /* NO_UNROLL */
for (int i = 0; i < STROBE_MAX_STRS; ++i) {
payload_off = read_str_var(cfg, i, tls_base, &value, data, payload_off);
}
#ifdef NO_UNROLL
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
#else
-#pragma unroll
+ __pragma_loop_unroll
#endif /* NO_UNROLL */
for (int i = 0; i < STROBE_MAX_MAPS; ++i) {
payload_off = read_map_var(cfg, i, tls_base, &value, data, payload_off);
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_autocreate.c b/tools/testing/selftests/bpf/progs/struct_ops_autocreate.c
new file mode 100644
index 000000000000..ba10c3896213
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_autocreate.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+int test_1_result = 0;
+
+SEC("struct_ops/test_1")
+int BPF_PROG(test_1)
+{
+ test_1_result = 42;
+ return 0;
+}
+
+SEC("struct_ops/test_1")
+int BPF_PROG(test_2)
+{
+ return 0;
+}
+
+struct bpf_testmod_ops___v1 {
+ int (*test_1)(void);
+};
+
+struct bpf_testmod_ops___v2 {
+ int (*test_1)(void);
+ int (*does_not_exist)(void);
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops___v1 testmod_1 = {
+ .test_1 = (void *)test_1
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops___v2 testmod_2 = {
+ .test_1 = (void *)test_1,
+ .does_not_exist = (void *)test_2
+};
+
+SEC("?.struct_ops")
+struct bpf_testmod_ops___v1 optional_map = {
+ .test_1 = (void *)test_1,
+};
+
+SEC("?.struct_ops.link")
+struct bpf_testmod_ops___v1 optional_map2 = {
+ .test_1 = (void *)test_1,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_autocreate2.c b/tools/testing/selftests/bpf/progs/struct_ops_autocreate2.c
new file mode 100644
index 000000000000..6049d9c902d3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_autocreate2.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+int test_1_result = 0;
+
+SEC("?struct_ops/test_1")
+int BPF_PROG(foo)
+{
+ test_1_result = 42;
+ return 0;
+}
+
+SEC("?struct_ops/test_1")
+int BPF_PROG(bar)
+{
+ test_1_result = 24;
+ return 0;
+}
+
+struct bpf_testmod_ops {
+ int (*test_1)(void);
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_1 = {
+ .test_1 = (void *)bar
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c
new file mode 100644
index 000000000000..b450f72e744a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../bpf_testmod/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+pid_t tgid = 0;
+
+/* This is a test BPF program that uses struct_ops to access an argument
+ * that may be NULL. This is a test for the verifier to ensure that it can
+ * rip PTR_MAYBE_NULL correctly.
+ */
+SEC("struct_ops/test_maybe_null")
+int BPF_PROG(test_maybe_null, int dummy,
+ struct task_struct *task)
+{
+ if (task)
+ tgid = task->tgid;
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_1 = {
+ .test_maybe_null = (void *)test_maybe_null,
+};
+
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c
new file mode 100644
index 000000000000..6283099ec383
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../bpf_testmod/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+pid_t tgid = 0;
+
+SEC("struct_ops/test_maybe_null_struct_ptr")
+int BPF_PROG(test_maybe_null_struct_ptr, int dummy,
+ struct task_struct *task)
+{
+ tgid = task->tgid;
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_struct_ptr = {
+ .test_maybe_null = (void *)test_maybe_null_struct_ptr,
+};
+
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_module.c b/tools/testing/selftests/bpf/progs/struct_ops_module.c
new file mode 100644
index 000000000000..026cabfa7f1f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_module.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../bpf_testmod/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+int test_1_result = 0;
+int test_2_result = 0;
+
+SEC("struct_ops/test_1")
+int BPF_PROG(test_1)
+{
+ test_1_result = 0xdeadbeef;
+ return 0;
+}
+
+SEC("struct_ops/test_2")
+void BPF_PROG(test_2, int a, int b)
+{
+ test_2_result = a + b;
+}
+
+SEC("struct_ops/test_3")
+int BPF_PROG(test_3, int a, int b)
+{
+ test_2_result = a + b + 3;
+ return a + b + 3;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_1 = {
+ .test_1 = (void *)test_1,
+ .test_2 = (void *)test_2,
+ .data = 0x1,
+};
+
+SEC("struct_ops/test_2")
+void BPF_PROG(test_2_v2, int a, int b)
+{
+ test_2_result = a * b;
+}
+
+struct bpf_testmod_ops___v2 {
+ int (*test_1)(void);
+ void (*test_2)(int a, int b);
+ int (*test_maybe_null)(int dummy, struct task_struct *task);
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops___v2 testmod_2 = {
+ .test_1 = (void *)test_1,
+ .test_2 = (void *)test_2_v2,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c b/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c
new file mode 100644
index 000000000000..9efcc6e4d356
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../bpf_testmod/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+#define TRAMP(x) \
+ SEC("struct_ops/tramp_" #x) \
+ int BPF_PROG(tramp_ ## x, int a) \
+ { \
+ return a; \
+ }
+
+TRAMP(1)
+TRAMP(2)
+TRAMP(3)
+TRAMP(4)
+TRAMP(5)
+TRAMP(6)
+TRAMP(7)
+TRAMP(8)
+TRAMP(9)
+TRAMP(10)
+TRAMP(11)
+TRAMP(12)
+TRAMP(13)
+TRAMP(14)
+TRAMP(15)
+TRAMP(16)
+TRAMP(17)
+TRAMP(18)
+TRAMP(19)
+TRAMP(20)
+TRAMP(21)
+TRAMP(22)
+TRAMP(23)
+TRAMP(24)
+TRAMP(25)
+TRAMP(26)
+TRAMP(27)
+TRAMP(28)
+TRAMP(29)
+TRAMP(30)
+TRAMP(31)
+TRAMP(32)
+TRAMP(33)
+TRAMP(34)
+TRAMP(35)
+TRAMP(36)
+TRAMP(37)
+TRAMP(38)
+TRAMP(39)
+TRAMP(40)
+
+#define F_TRAMP(x) .tramp_ ## x = (void *)tramp_ ## x
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops multi_pages = {
+ F_TRAMP(1),
+ F_TRAMP(2),
+ F_TRAMP(3),
+ F_TRAMP(4),
+ F_TRAMP(5),
+ F_TRAMP(6),
+ F_TRAMP(7),
+ F_TRAMP(8),
+ F_TRAMP(9),
+ F_TRAMP(10),
+ F_TRAMP(11),
+ F_TRAMP(12),
+ F_TRAMP(13),
+ F_TRAMP(14),
+ F_TRAMP(15),
+ F_TRAMP(16),
+ F_TRAMP(17),
+ F_TRAMP(18),
+ F_TRAMP(19),
+ F_TRAMP(20),
+ F_TRAMP(21),
+ F_TRAMP(22),
+ F_TRAMP(23),
+ F_TRAMP(24),
+ F_TRAMP(25),
+ F_TRAMP(26),
+ F_TRAMP(27),
+ F_TRAMP(28),
+ F_TRAMP(29),
+ F_TRAMP(30),
+ F_TRAMP(31),
+ F_TRAMP(32),
+ F_TRAMP(33),
+ F_TRAMP(34),
+ F_TRAMP(35),
+ F_TRAMP(36),
+ F_TRAMP(37),
+ F_TRAMP(38),
+ F_TRAMP(39),
+ F_TRAMP(40),
+};
diff --git a/tools/testing/selftests/bpf/progs/task_ls_recursion.c b/tools/testing/selftests/bpf/progs/task_ls_recursion.c
index 4542dc683b44..f1853c38aada 100644
--- a/tools/testing/selftests/bpf/progs/task_ls_recursion.c
+++ b/tools/testing/selftests/bpf/progs/task_ls_recursion.c
@@ -27,23 +27,6 @@ struct {
__type(value, long);
} map_b SEC(".maps");
-SEC("fentry/bpf_local_storage_lookup")
-int BPF_PROG(on_lookup)
-{
- struct task_struct *task = bpf_get_current_task_btf();
-
- if (!test_pid || task->pid != test_pid)
- return 0;
-
- /* The bpf_task_storage_delete will call
- * bpf_local_storage_lookup. The prog->active will
- * stop the recursion.
- */
- bpf_task_storage_delete(&map_a, task);
- bpf_task_storage_delete(&map_b, task);
- return 0;
-}
-
SEC("fentry/bpf_local_storage_update")
int BPF_PROG(on_update)
{
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.c b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
index 66b304982245..683c8aaa63da 100644
--- a/tools/testing/selftests/bpf/progs/test_cls_redirect.c
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
@@ -20,8 +20,11 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bpf_compiler.h"
#include "test_cls_redirect.h"
+#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
+
#ifdef SUBPROGS
#define INLINING __noinline
#else
@@ -267,7 +270,7 @@ static INLINING void pkt_ipv4_checksum(struct iphdr *iph)
uint32_t acc = 0;
uint16_t *ipw = (uint16_t *)iph;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (size_t i = 0; i < sizeof(struct iphdr) / 2; i++) {
acc += ipw[i];
}
@@ -294,7 +297,7 @@ bool pkt_skip_ipv6_extension_headers(buf_t *pkt,
};
*is_fragment = false;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (int i = 0; i < 6; i++) {
switch (exthdr.next) {
case IPPROTO_FRAGMENT:
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c
index f41c81212ee9..da54c09e9a15 100644
--- a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c
@@ -23,6 +23,8 @@
#include "test_cls_redirect.h"
#include "bpf_kfuncs.h"
+#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
+
#define offsetofend(TYPE, MEMBER) \
(offsetof(TYPE, MEMBER) + sizeof((((TYPE *)0)->MEMBER)))
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c b/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c
index 22aba3f6e344..6fc8b9d66e34 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c
@@ -80,7 +80,7 @@ int test_core_type_id(void *ctx)
* to detect whether this test has to be executed, however strange
* that might look like.
*
- * [0] https://reviews.llvm.org/D85174
+ * [0] https://github.com/llvm/llvm-project/commit/00602ee7ef0bf6c68d690a2bd729c12b95c95c99
*/
#if __has_builtin(__builtin_preserve_type_info)
struct core_reloc_type_id_output *out = (void *)&data.out;
diff --git a/tools/testing/selftests/bpf/progs/test_fill_link_info.c b/tools/testing/selftests/bpf/progs/test_fill_link_info.c
index 69509f8bb680..6afa834756e9 100644
--- a/tools/testing/selftests/bpf/progs/test_fill_link_info.c
+++ b/tools/testing/selftests/bpf/progs/test_fill_link_info.c
@@ -33,6 +33,12 @@ int BPF_PROG(tp_run)
return 0;
}
+SEC("perf_event")
+int event_run(void *ctx)
+{
+ return 0;
+}
+
SEC("kprobe.multi")
int BPF_PROG(kmulti_run)
{
diff --git a/tools/testing/selftests/bpf/progs/test_global_func1.c b/tools/testing/selftests/bpf/progs/test_global_func1.c
index 17a9f59bf5f3..fc69ff18880d 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func1.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func1.c
@@ -5,7 +5,7 @@
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
-#define MAX_STACK (512 - 3 * 32 + 8)
+#define MAX_STACK 260
static __attribute__ ((noinline))
int f0(int var, struct __sk_buff *skb)
@@ -30,6 +30,10 @@ int f3(int, struct __sk_buff *skb, int);
__attribute__ ((noinline))
int f2(int val, struct __sk_buff *skb)
{
+ volatile char buf[MAX_STACK] = {};
+
+ __sink(buf[MAX_STACK - 1]);
+
return f1(skb) + f3(val, skb, 1);
}
@@ -44,7 +48,7 @@ int f3(int val, struct __sk_buff *skb, int var)
}
SEC("tc")
-__failure __msg("combined stack size of 4 calls is 544")
+__failure __msg("combined stack size of 3 calls is")
int global_func1(struct __sk_buff *skb)
{
return f0(1, skb) + f1(skb) + f2(2, skb) + f3(3, skb, 4);
diff --git a/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c b/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c
index 9a06e5eb1fbe..143c8a4852bf 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c
@@ -26,6 +26,23 @@ int kprobe_typedef_ctx(void *ctx)
return kprobe_typedef_ctx_subprog(ctx);
}
+/* s390x defines:
+ *
+ * typedef user_pt_regs bpf_user_pt_regs_t;
+ * typedef struct { ... } user_pt_regs;
+ *
+ * And so "canonical" underlying struct type is anonymous.
+ * So on s390x only valid ways to have PTR_TO_CTX argument in global subprogs
+ * are:
+ * - bpf_user_pt_regs_t *ctx (typedef);
+ * - struct bpf_user_pt_regs_t *ctx (backwards compatible struct hack);
+ * - void *ctx __arg_ctx (arg:ctx tag)
+ *
+ * Other architectures also allow using underlying struct types (e.g.,
+ * `struct pt_regs *ctx` for x86-64)
+ */
+#ifndef bpf_target_s390
+
#define pt_regs_struct_t typeof(*(__PT_REGS_CAST((struct pt_regs *)NULL)))
__weak int kprobe_struct_ctx_subprog(pt_regs_struct_t *ctx)
@@ -40,6 +57,8 @@ int kprobe_resolved_ctx(void *ctx)
return kprobe_struct_ctx_subprog(ctx);
}
+#endif
+
/* this is current hack to make this work on old kernels */
struct bpf_user_pt_regs_t {};
diff --git a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
index 48ff2b2ad5e7..fed66f36adb6 100644
--- a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
+++ b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
@@ -6,6 +6,8 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bpf_compiler.h"
+
/* Packet parsing state machine helpers. */
#define cursor_advance(_cursor, _len) \
({ void *_tmp = _cursor; _cursor += _len; _tmp; })
@@ -131,7 +133,7 @@ int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh,
*pad_off = 0;
// we can only go as far as ~10 TLVs due to the BPF max stack size
- #pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (int i = 0; i < 10; i++) {
struct sr6_tlv_t tlv;
@@ -302,7 +304,7 @@ int __encap_srh(struct __sk_buff *skb)
seg = (struct ip6_addr_t *)((char *)srh + sizeof(*srh));
- #pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (unsigned long long lo = 0; lo < 4; lo++) {
seg->lo = bpf_cpu_to_be64(4 - lo);
seg->hi = bpf_cpu_to_be64(hi);
diff --git a/tools/testing/selftests/bpf/progs/test_map_in_map.c b/tools/testing/selftests/bpf/progs/test_map_in_map.c
index f416032ba858..b295f9b721bf 100644
--- a/tools/testing/selftests/bpf/progs/test_map_in_map.c
+++ b/tools/testing/selftests/bpf/progs/test_map_in_map.c
@@ -21,6 +21,32 @@ struct {
__type(value, __u32);
} mim_hash SEC(".maps");
+/* The following three maps are used to test
+ * perf_event_array map can be an inner
+ * map of hash/array_of_maps.
+ */
+struct perf_event_array {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+} inner_map0 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __array(values, struct perf_event_array);
+} mim_array_pe SEC(".maps") = {
+ .values = {&inner_map0}};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __array(values, struct perf_event_array);
+} mim_hash_pe SEC(".maps") = {
+ .values = {&inner_map0}};
+
SEC("xdp")
int xdp_mimtest0(struct xdp_md *ctx)
{
diff --git a/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c b/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c
index 4bdd65b5aa2d..2fdc44e76624 100644
--- a/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c
+++ b/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c
@@ -6,13 +6,13 @@
char tp_name[128];
-SEC("lsm/bpf")
+SEC("lsm.s/bpf")
int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size)
{
switch (cmd) {
case BPF_RAW_TRACEPOINT_OPEN:
- bpf_probe_read_user_str(tp_name, sizeof(tp_name) - 1,
- (void *)attr->raw_tracepoint.name);
+ bpf_copy_from_user(tp_name, sizeof(tp_name) - 1,
+ (void *)attr->raw_tracepoint.name);
break;
default:
break;
diff --git a/tools/testing/selftests/bpf/progs/test_seg6_loop.c b/tools/testing/selftests/bpf/progs/test_seg6_loop.c
index a7278f064368..5059050f74f6 100644
--- a/tools/testing/selftests/bpf/progs/test_seg6_loop.c
+++ b/tools/testing/selftests/bpf/progs/test_seg6_loop.c
@@ -6,6 +6,8 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bpf_compiler.h"
+
/* Packet parsing state machine helpers. */
#define cursor_advance(_cursor, _len) \
({ void *_tmp = _cursor; _cursor += _len; _tmp; })
@@ -134,7 +136,7 @@ static __always_inline int is_valid_tlv_boundary(struct __sk_buff *skb,
// we can only go as far as ~10 TLVs due to the BPF max stack size
// workaround: define induction variable "i" as "long" instead
// of "int" to prevent alu32 sub-register spilling.
- #pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
for (long i = 0; i < 100; i++) {
struct sr6_tlv_t tlv;
diff --git a/tools/testing/selftests/bpf/progs/test_siphash.h b/tools/testing/selftests/bpf/progs/test_siphash.h
new file mode 100644
index 000000000000..5d3a7ec36780
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_siphash.h
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+
+#ifndef _TEST_SIPHASH_H
+#define _TEST_SIPHASH_H
+
+/* include/linux/bitops.h */
+static inline u64 rol64(u64 word, unsigned int shift)
+{
+ return (word << (shift & 63)) | (word >> ((-shift) & 63));
+}
+
+/* include/linux/siphash.h */
+#define SIPHASH_PERMUTATION(a, b, c, d) ( \
+ (a) += (b), (b) = rol64((b), 13), (b) ^= (a), (a) = rol64((a), 32), \
+ (c) += (d), (d) = rol64((d), 16), (d) ^= (c), \
+ (a) += (d), (d) = rol64((d), 21), (d) ^= (a), \
+ (c) += (b), (b) = rol64((b), 17), (b) ^= (c), (c) = rol64((c), 32))
+
+#define SIPHASH_CONST_0 0x736f6d6570736575ULL
+#define SIPHASH_CONST_1 0x646f72616e646f6dULL
+#define SIPHASH_CONST_2 0x6c7967656e657261ULL
+#define SIPHASH_CONST_3 0x7465646279746573ULL
+
+/* lib/siphash.c */
+#define SIPROUND SIPHASH_PERMUTATION(v0, v1, v2, v3)
+
+#define PREAMBLE(len) \
+ u64 v0 = SIPHASH_CONST_0; \
+ u64 v1 = SIPHASH_CONST_1; \
+ u64 v2 = SIPHASH_CONST_2; \
+ u64 v3 = SIPHASH_CONST_3; \
+ u64 b = ((u64)(len)) << 56; \
+ v3 ^= key->key[1]; \
+ v2 ^= key->key[0]; \
+ v1 ^= key->key[1]; \
+ v0 ^= key->key[0];
+
+#define POSTAMBLE \
+ v3 ^= b; \
+ SIPROUND; \
+ SIPROUND; \
+ v0 ^= b; \
+ v2 ^= 0xff; \
+ SIPROUND; \
+ SIPROUND; \
+ SIPROUND; \
+ SIPROUND; \
+ return (v0 ^ v1) ^ (v2 ^ v3);
+
+static inline u64 siphash_2u64(const u64 first, const u64 second, const siphash_key_t *key)
+{
+ PREAMBLE(16)
+ v3 ^= first;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= first;
+ v3 ^= second;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= second;
+ POSTAMBLE
+}
+#endif
diff --git a/tools/testing/selftests/bpf/progs/test_skb_ctx.c b/tools/testing/selftests/bpf/progs/test_skb_ctx.c
index c482110cfc95..a724a70c6700 100644
--- a/tools/testing/selftests/bpf/progs/test_skb_ctx.c
+++ b/tools/testing/selftests/bpf/progs/test_skb_ctx.c
@@ -3,12 +3,14 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_compiler.h"
+
char _license[] SEC("license") = "GPL";
SEC("tc")
int process(struct __sk_buff *skb)
{
- #pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (int i = 0; i < 5; i++) {
if (skb->cb[i] != i + 1)
return 1;
diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock.c b/tools/testing/selftests/bpf/progs/test_spin_lock.c
index b2440a0ff422..d8d77bdffd3d 100644
--- a/tools/testing/selftests/bpf/progs/test_spin_lock.c
+++ b/tools/testing/selftests/bpf/progs/test_spin_lock.c
@@ -101,4 +101,69 @@ int bpf_spin_lock_test(struct __sk_buff *skb)
err:
return err;
}
+
+struct bpf_spin_lock lockA __hidden SEC(".data.A");
+
+__noinline
+static int static_subprog(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ if (ctx->protocol)
+ return ret;
+ return ret + ctx->len;
+}
+
+__noinline
+static int static_subprog_lock(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ ret = static_subprog(ctx);
+ bpf_spin_lock(&lockA);
+ return ret + ctx->len;
+}
+
+__noinline
+static int static_subprog_unlock(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ ret = static_subprog(ctx);
+ bpf_spin_unlock(&lockA);
+ return ret + ctx->len;
+}
+
+SEC("tc")
+int lock_static_subprog_call(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ bpf_spin_lock(&lockA);
+ if (ctx->mark == 42)
+ ret = static_subprog(ctx);
+ bpf_spin_unlock(&lockA);
+ return ret;
+}
+
+SEC("tc")
+int lock_static_subprog_lock(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ ret = static_subprog_lock(ctx);
+ bpf_spin_unlock(&lockA);
+ return ret;
+}
+
+SEC("tc")
+int lock_static_subprog_unlock(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ bpf_spin_lock(&lockA);
+ ret = static_subprog_unlock(ctx);
+ return ret;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c b/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
index 86cd183ef6dc..43f40c4fe241 100644
--- a/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
+++ b/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
@@ -201,4 +201,48 @@ CHECK(innermapval_mapval, &iv->lock, &v->lock);
#undef CHECK
+__noinline
+int global_subprog(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ if (ctx->protocol)
+ ret += ctx->protocol;
+ return ret + ctx->mark;
+}
+
+__noinline
+static int static_subprog_call_global(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ if (ctx->protocol)
+ return ret;
+ return ret + ctx->len + global_subprog(ctx);
+}
+
+SEC("?tc")
+int lock_global_subprog_call1(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ bpf_spin_lock(&lockA);
+ if (ctx->mark == 42)
+ ret = global_subprog(ctx);
+ bpf_spin_unlock(&lockA);
+ return ret;
+}
+
+SEC("?tc")
+int lock_global_subprog_call2(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ bpf_spin_lock(&lockA);
+ if (ctx->mark == 42)
+ ret = static_subprog_call_global(ctx);
+ bpf_spin_unlock(&lockA);
+ return ret;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
index 553a282d816a..7f74077d6622 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
@@ -9,6 +9,8 @@
#include <bpf/bpf_helpers.h>
+#include "bpf_compiler.h"
+
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
@@ -30,7 +32,7 @@ static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
if (ret < 0 || ret != sizeof(tcp_mem_name) - 1)
return 0;
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
for (i = 0; i < sizeof(tcp_mem_name); ++i)
if (name[i] != tcp_mem_name[i])
return 0;
@@ -59,7 +61,7 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx)
if (ret < 0 || ret >= MAX_VALUE_STR_LEN)
return 0;
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) {
ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0,
tcp_mem + i);
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
index 2b64bc563a12..68a75436e8af 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
@@ -9,6 +9,8 @@
#include <bpf/bpf_helpers.h>
+#include "bpf_compiler.h"
+
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
@@ -30,7 +32,7 @@ static __attribute__((noinline)) int is_tcp_mem(struct bpf_sysctl *ctx)
if (ret < 0 || ret != sizeof(tcp_mem_name) - 1)
return 0;
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
for (i = 0; i < sizeof(tcp_mem_name); ++i)
if (name[i] != tcp_mem_name[i])
return 0;
@@ -57,7 +59,7 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx)
if (ret < 0 || ret >= MAX_VALUE_STR_LEN)
return 0;
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) {
ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0,
tcp_mem + i);
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
index 5489823c83fc..efc3c61f7852 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
@@ -9,6 +9,8 @@
#include <bpf/bpf_helpers.h>
+#include "bpf_compiler.h"
+
/* Max supported length of a string with unsigned long in base 10 (pow2 - 1). */
#define MAX_ULONG_STR_LEN 0xF
@@ -31,7 +33,7 @@ static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
if (ret < 0 || ret != sizeof(tcp_mem_name) - 1)
return 0;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (i = 0; i < sizeof(tcp_mem_name); ++i)
if (name[i] != tcp_mem_name[i])
return 0;
@@ -57,7 +59,7 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx)
if (ret < 0 || ret >= MAX_VALUE_STR_LEN)
return 0;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) {
ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0,
tcp_mem + i);
diff --git a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
index e6e678aa9874..404124a93892 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
@@ -19,6 +19,9 @@
#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_compiler.h"
+
+#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
static const int cfg_port = 8000;
@@ -81,7 +84,7 @@ static __always_inline void set_ipv4_csum(struct iphdr *iph)
iph->check = 0;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (i = 0, csum = 0; i < sizeof(*iph) >> 1; i++)
csum += *iph16++;
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c
new file mode 100644
index 000000000000..c8e4553648bf
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include "bpf_tracing_net.h"
+#include "bpf_kfuncs.h"
+#include "test_siphash.h"
+#include "test_tcp_custom_syncookie.h"
+
+#define MAX_PACKET_OFF 0xffff
+
+/* Hash is calculated for each client and split into ISN and TS.
+ *
+ * MSB LSB
+ * ISN: | 31 ... 8 | 7 6 | 5 | 4 | 3 2 1 0 |
+ * | Hash_1 | MSS | ECN | SACK | WScale |
+ *
+ * TS: | 31 ... 8 | 7 ... 0 |
+ * | Random | Hash_2 |
+ */
+#define COOKIE_BITS 8
+#define COOKIE_MASK (((__u32)1 << COOKIE_BITS) - 1)
+
+enum {
+ /* 0xf is invalid thus means that SYN did not have WScale. */
+ BPF_SYNCOOKIE_WSCALE_MASK = (1 << 4) - 1,
+ BPF_SYNCOOKIE_SACK = (1 << 4),
+ BPF_SYNCOOKIE_ECN = (1 << 5),
+};
+
+#define MSS_LOCAL_IPV4 65495
+#define MSS_LOCAL_IPV6 65476
+
+const __u16 msstab4[] = {
+ 536,
+ 1300,
+ 1460,
+ MSS_LOCAL_IPV4,
+};
+
+const __u16 msstab6[] = {
+ 1280 - 60, /* IPV6_MIN_MTU - 60 */
+ 1480 - 60,
+ 9000 - 60,
+ MSS_LOCAL_IPV6,
+};
+
+static siphash_key_t test_key_siphash = {
+ { 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }
+};
+
+struct tcp_syncookie {
+ struct __sk_buff *skb;
+ void *data;
+ void *data_end;
+ struct ethhdr *eth;
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ struct tcphdr *tcp;
+ __be32 *ptr32;
+ struct bpf_tcp_req_attrs attrs;
+ u32 off;
+ u32 cookie;
+ u64 first;
+};
+
+bool handled_syn, handled_ack;
+
+static int tcp_load_headers(struct tcp_syncookie *ctx)
+{
+ ctx->data = (void *)(long)ctx->skb->data;
+ ctx->data_end = (void *)(long)ctx->skb->data_end;
+ ctx->eth = (struct ethhdr *)(long)ctx->skb->data;
+
+ if (ctx->eth + 1 > ctx->data_end)
+ goto err;
+
+ switch (bpf_ntohs(ctx->eth->h_proto)) {
+ case ETH_P_IP:
+ ctx->ipv4 = (struct iphdr *)(ctx->eth + 1);
+
+ if (ctx->ipv4 + 1 > ctx->data_end)
+ goto err;
+
+ if (ctx->ipv4->ihl != sizeof(*ctx->ipv4) / 4)
+ goto err;
+
+ if (ctx->ipv4->version != 4)
+ goto err;
+
+ if (ctx->ipv4->protocol != IPPROTO_TCP)
+ goto err;
+
+ ctx->tcp = (struct tcphdr *)(ctx->ipv4 + 1);
+ break;
+ case ETH_P_IPV6:
+ ctx->ipv6 = (struct ipv6hdr *)(ctx->eth + 1);
+
+ if (ctx->ipv6 + 1 > ctx->data_end)
+ goto err;
+
+ if (ctx->ipv6->version != 6)
+ goto err;
+
+ if (ctx->ipv6->nexthdr != NEXTHDR_TCP)
+ goto err;
+
+ ctx->tcp = (struct tcphdr *)(ctx->ipv6 + 1);
+ break;
+ default:
+ goto err;
+ }
+
+ if (ctx->tcp + 1 > ctx->data_end)
+ goto err;
+
+ return 0;
+err:
+ return -1;
+}
+
+static int tcp_reload_headers(struct tcp_syncookie *ctx)
+{
+ /* Without volatile,
+ * R3 32-bit pointer arithmetic prohibited
+ */
+ volatile u64 data_len = ctx->skb->data_end - ctx->skb->data;
+
+ if (ctx->tcp->doff < sizeof(*ctx->tcp) / 4)
+ goto err;
+
+ /* Needed to calculate csum and parse TCP options. */
+ if (bpf_skb_change_tail(ctx->skb, data_len + 60 - ctx->tcp->doff * 4, 0))
+ goto err;
+
+ ctx->data = (void *)(long)ctx->skb->data;
+ ctx->data_end = (void *)(long)ctx->skb->data_end;
+ ctx->eth = (struct ethhdr *)(long)ctx->skb->data;
+ if (ctx->ipv4) {
+ ctx->ipv4 = (struct iphdr *)(ctx->eth + 1);
+ ctx->ipv6 = NULL;
+ ctx->tcp = (struct tcphdr *)(ctx->ipv4 + 1);
+ } else {
+ ctx->ipv4 = NULL;
+ ctx->ipv6 = (struct ipv6hdr *)(ctx->eth + 1);
+ ctx->tcp = (struct tcphdr *)(ctx->ipv6 + 1);
+ }
+
+ if ((void *)ctx->tcp + 60 > ctx->data_end)
+ goto err;
+
+ return 0;
+err:
+ return -1;
+}
+
+static __sum16 tcp_v4_csum(struct tcp_syncookie *ctx, __wsum csum)
+{
+ return csum_tcpudp_magic(ctx->ipv4->saddr, ctx->ipv4->daddr,
+ ctx->tcp->doff * 4, IPPROTO_TCP, csum);
+}
+
+static __sum16 tcp_v6_csum(struct tcp_syncookie *ctx, __wsum csum)
+{
+ return csum_ipv6_magic(&ctx->ipv6->saddr, &ctx->ipv6->daddr,
+ ctx->tcp->doff * 4, IPPROTO_TCP, csum);
+}
+
+static int tcp_validate_header(struct tcp_syncookie *ctx)
+{
+ s64 csum;
+
+ if (tcp_reload_headers(ctx))
+ goto err;
+
+ csum = bpf_csum_diff(0, 0, (void *)ctx->tcp, ctx->tcp->doff * 4, 0);
+ if (csum < 0)
+ goto err;
+
+ if (ctx->ipv4) {
+ /* check tcp_v4_csum(csum) is 0 if not on lo. */
+
+ csum = bpf_csum_diff(0, 0, (void *)ctx->ipv4, ctx->ipv4->ihl * 4, 0);
+ if (csum < 0)
+ goto err;
+
+ if (csum_fold(csum) != 0)
+ goto err;
+ } else if (ctx->ipv6) {
+ /* check tcp_v6_csum(csum) is 0 if not on lo. */
+ }
+
+ return 0;
+err:
+ return -1;
+}
+
+static __always_inline void *next(struct tcp_syncookie *ctx, __u32 sz)
+{
+ __u64 off = ctx->off;
+ __u8 *data;
+
+ /* Verifier forbids access to packet when offset exceeds MAX_PACKET_OFF */
+ if (off > MAX_PACKET_OFF - sz)
+ return NULL;
+
+ data = ctx->data + off;
+ barrier_var(data);
+ if (data + sz >= ctx->data_end)
+ return NULL;
+
+ ctx->off += sz;
+ return data;
+}
+
+static int tcp_parse_option(__u32 index, struct tcp_syncookie *ctx)
+{
+ __u8 *opcode, *opsize, *wscale;
+ __u32 *tsval, *tsecr;
+ __u16 *mss;
+ __u32 off;
+
+ off = ctx->off;
+ opcode = next(ctx, 1);
+ if (!opcode)
+ goto stop;
+
+ if (*opcode == TCPOPT_EOL)
+ goto stop;
+
+ if (*opcode == TCPOPT_NOP)
+ goto next;
+
+ opsize = next(ctx, 1);
+ if (!opsize)
+ goto stop;
+
+ if (*opsize < 2)
+ goto stop;
+
+ switch (*opcode) {
+ case TCPOPT_MSS:
+ mss = next(ctx, 2);
+ if (*opsize == TCPOLEN_MSS && ctx->tcp->syn && mss)
+ ctx->attrs.mss = get_unaligned_be16(mss);
+ break;
+ case TCPOPT_WINDOW:
+ wscale = next(ctx, 1);
+ if (*opsize == TCPOLEN_WINDOW && ctx->tcp->syn && wscale) {
+ ctx->attrs.wscale_ok = 1;
+ ctx->attrs.snd_wscale = *wscale;
+ }
+ break;
+ case TCPOPT_TIMESTAMP:
+ tsval = next(ctx, 4);
+ tsecr = next(ctx, 4);
+ if (*opsize == TCPOLEN_TIMESTAMP && tsval && tsecr) {
+ ctx->attrs.rcv_tsval = get_unaligned_be32(tsval);
+ ctx->attrs.rcv_tsecr = get_unaligned_be32(tsecr);
+
+ if (ctx->tcp->syn && ctx->attrs.rcv_tsecr)
+ ctx->attrs.tstamp_ok = 0;
+ else
+ ctx->attrs.tstamp_ok = 1;
+ }
+ break;
+ case TCPOPT_SACK_PERM:
+ if (*opsize == TCPOLEN_SACK_PERM && ctx->tcp->syn)
+ ctx->attrs.sack_ok = 1;
+ break;
+ }
+
+ ctx->off = off + *opsize;
+next:
+ return 0;
+stop:
+ return 1;
+}
+
+static void tcp_parse_options(struct tcp_syncookie *ctx)
+{
+ ctx->off = (__u8 *)(ctx->tcp + 1) - (__u8 *)ctx->data,
+
+ bpf_loop(40, tcp_parse_option, ctx, 0);
+}
+
+static int tcp_validate_sysctl(struct tcp_syncookie *ctx)
+{
+ if ((ctx->ipv4 && ctx->attrs.mss != MSS_LOCAL_IPV4) ||
+ (ctx->ipv6 && ctx->attrs.mss != MSS_LOCAL_IPV6))
+ goto err;
+
+ if (!ctx->attrs.wscale_ok || ctx->attrs.snd_wscale != 7)
+ goto err;
+
+ if (!ctx->attrs.tstamp_ok)
+ goto err;
+
+ if (!ctx->attrs.sack_ok)
+ goto err;
+
+ if (!ctx->tcp->ece || !ctx->tcp->cwr)
+ goto err;
+
+ return 0;
+err:
+ return -1;
+}
+
+static void tcp_prepare_cookie(struct tcp_syncookie *ctx)
+{
+ u32 seq = bpf_ntohl(ctx->tcp->seq);
+ u64 first = 0, second;
+ int mssind = 0;
+ u32 hash;
+
+ if (ctx->ipv4) {
+ for (mssind = ARRAY_SIZE(msstab4) - 1; mssind; mssind--)
+ if (ctx->attrs.mss >= msstab4[mssind])
+ break;
+
+ ctx->attrs.mss = msstab4[mssind];
+
+ first = (u64)ctx->ipv4->saddr << 32 | ctx->ipv4->daddr;
+ } else if (ctx->ipv6) {
+ for (mssind = ARRAY_SIZE(msstab6) - 1; mssind; mssind--)
+ if (ctx->attrs.mss >= msstab6[mssind])
+ break;
+
+ ctx->attrs.mss = msstab6[mssind];
+
+ first = (u64)ctx->ipv6->saddr.in6_u.u6_addr8[0] << 32 |
+ ctx->ipv6->daddr.in6_u.u6_addr32[0];
+ }
+
+ second = (u64)seq << 32 | ctx->tcp->source << 16 | ctx->tcp->dest;
+ hash = siphash_2u64(first, second, &test_key_siphash);
+
+ if (ctx->attrs.tstamp_ok) {
+ ctx->attrs.rcv_tsecr = bpf_get_prandom_u32();
+ ctx->attrs.rcv_tsecr &= ~COOKIE_MASK;
+ ctx->attrs.rcv_tsecr |= hash & COOKIE_MASK;
+ }
+
+ hash &= ~COOKIE_MASK;
+ hash |= mssind << 6;
+
+ if (ctx->attrs.wscale_ok)
+ hash |= ctx->attrs.snd_wscale & BPF_SYNCOOKIE_WSCALE_MASK;
+
+ if (ctx->attrs.sack_ok)
+ hash |= BPF_SYNCOOKIE_SACK;
+
+ if (ctx->attrs.tstamp_ok && ctx->tcp->ece && ctx->tcp->cwr)
+ hash |= BPF_SYNCOOKIE_ECN;
+
+ ctx->cookie = hash;
+}
+
+static void tcp_write_options(struct tcp_syncookie *ctx)
+{
+ ctx->ptr32 = (__be32 *)(ctx->tcp + 1);
+
+ *ctx->ptr32++ = bpf_htonl(TCPOPT_MSS << 24 | TCPOLEN_MSS << 16 |
+ ctx->attrs.mss);
+
+ if (ctx->attrs.wscale_ok)
+ *ctx->ptr32++ = bpf_htonl(TCPOPT_NOP << 24 |
+ TCPOPT_WINDOW << 16 |
+ TCPOLEN_WINDOW << 8 |
+ ctx->attrs.snd_wscale);
+
+ if (ctx->attrs.tstamp_ok) {
+ if (ctx->attrs.sack_ok)
+ *ctx->ptr32++ = bpf_htonl(TCPOPT_SACK_PERM << 24 |
+ TCPOLEN_SACK_PERM << 16 |
+ TCPOPT_TIMESTAMP << 8 |
+ TCPOLEN_TIMESTAMP);
+ else
+ *ctx->ptr32++ = bpf_htonl(TCPOPT_NOP << 24 |
+ TCPOPT_NOP << 16 |
+ TCPOPT_TIMESTAMP << 8 |
+ TCPOLEN_TIMESTAMP);
+
+ *ctx->ptr32++ = bpf_htonl(ctx->attrs.rcv_tsecr);
+ *ctx->ptr32++ = bpf_htonl(ctx->attrs.rcv_tsval);
+ } else if (ctx->attrs.sack_ok) {
+ *ctx->ptr32++ = bpf_htonl(TCPOPT_NOP << 24 |
+ TCPOPT_NOP << 16 |
+ TCPOPT_SACK_PERM << 8 |
+ TCPOLEN_SACK_PERM);
+ }
+}
+
+static int tcp_handle_syn(struct tcp_syncookie *ctx)
+{
+ s64 csum;
+
+ if (tcp_validate_header(ctx))
+ goto err;
+
+ tcp_parse_options(ctx);
+
+ if (tcp_validate_sysctl(ctx))
+ goto err;
+
+ tcp_prepare_cookie(ctx);
+ tcp_write_options(ctx);
+
+ swap(ctx->tcp->source, ctx->tcp->dest);
+ ctx->tcp->check = 0;
+ ctx->tcp->ack_seq = bpf_htonl(bpf_ntohl(ctx->tcp->seq) + 1);
+ ctx->tcp->seq = bpf_htonl(ctx->cookie);
+ ctx->tcp->doff = ((long)ctx->ptr32 - (long)ctx->tcp) >> 2;
+ ctx->tcp->ack = 1;
+ if (!ctx->attrs.tstamp_ok || !ctx->tcp->ece || !ctx->tcp->cwr)
+ ctx->tcp->ece = 0;
+ ctx->tcp->cwr = 0;
+
+ csum = bpf_csum_diff(0, 0, (void *)ctx->tcp, ctx->tcp->doff * 4, 0);
+ if (csum < 0)
+ goto err;
+
+ if (ctx->ipv4) {
+ swap(ctx->ipv4->saddr, ctx->ipv4->daddr);
+ ctx->tcp->check = tcp_v4_csum(ctx, csum);
+
+ ctx->ipv4->check = 0;
+ ctx->ipv4->tos = 0;
+ ctx->ipv4->tot_len = bpf_htons((long)ctx->ptr32 - (long)ctx->ipv4);
+ ctx->ipv4->id = 0;
+ ctx->ipv4->ttl = 64;
+
+ csum = bpf_csum_diff(0, 0, (void *)ctx->ipv4, sizeof(*ctx->ipv4), 0);
+ if (csum < 0)
+ goto err;
+
+ ctx->ipv4->check = csum_fold(csum);
+ } else if (ctx->ipv6) {
+ swap(ctx->ipv6->saddr, ctx->ipv6->daddr);
+ ctx->tcp->check = tcp_v6_csum(ctx, csum);
+
+ *(__be32 *)ctx->ipv6 = bpf_htonl(0x60000000);
+ ctx->ipv6->payload_len = bpf_htons((long)ctx->ptr32 - (long)ctx->tcp);
+ ctx->ipv6->hop_limit = 64;
+ }
+
+ swap_array(ctx->eth->h_source, ctx->eth->h_dest);
+
+ if (bpf_skb_change_tail(ctx->skb, (long)ctx->ptr32 - (long)ctx->eth, 0))
+ goto err;
+
+ return bpf_redirect(ctx->skb->ifindex, 0);
+err:
+ return TC_ACT_SHOT;
+}
+
+static int tcp_validate_cookie(struct tcp_syncookie *ctx)
+{
+ u32 cookie = bpf_ntohl(ctx->tcp->ack_seq) - 1;
+ u32 seq = bpf_ntohl(ctx->tcp->seq) - 1;
+ u64 first = 0, second;
+ int mssind;
+ u32 hash;
+
+ if (ctx->ipv4)
+ first = (u64)ctx->ipv4->saddr << 32 | ctx->ipv4->daddr;
+ else if (ctx->ipv6)
+ first = (u64)ctx->ipv6->saddr.in6_u.u6_addr8[0] << 32 |
+ ctx->ipv6->daddr.in6_u.u6_addr32[0];
+
+ second = (u64)seq << 32 | ctx->tcp->source << 16 | ctx->tcp->dest;
+ hash = siphash_2u64(first, second, &test_key_siphash);
+
+ if (ctx->attrs.tstamp_ok)
+ hash -= ctx->attrs.rcv_tsecr & COOKIE_MASK;
+ else
+ hash &= ~COOKIE_MASK;
+
+ hash -= cookie & ~COOKIE_MASK;
+ if (hash)
+ goto err;
+
+ mssind = (cookie & (3 << 6)) >> 6;
+ if (ctx->ipv4) {
+ if (mssind > ARRAY_SIZE(msstab4))
+ goto err;
+
+ ctx->attrs.mss = msstab4[mssind];
+ } else {
+ if (mssind > ARRAY_SIZE(msstab6))
+ goto err;
+
+ ctx->attrs.mss = msstab6[mssind];
+ }
+
+ ctx->attrs.snd_wscale = cookie & BPF_SYNCOOKIE_WSCALE_MASK;
+ ctx->attrs.rcv_wscale = ctx->attrs.snd_wscale;
+ ctx->attrs.wscale_ok = ctx->attrs.snd_wscale == BPF_SYNCOOKIE_WSCALE_MASK;
+ ctx->attrs.sack_ok = cookie & BPF_SYNCOOKIE_SACK;
+ ctx->attrs.ecn_ok = cookie & BPF_SYNCOOKIE_ECN;
+
+ return 0;
+err:
+ return -1;
+}
+
+static int tcp_handle_ack(struct tcp_syncookie *ctx)
+{
+ struct bpf_sock_tuple tuple;
+ struct bpf_sock *skc;
+ int ret = TC_ACT_OK;
+ struct sock *sk;
+ u32 tuple_size;
+
+ if (ctx->ipv4) {
+ tuple.ipv4.saddr = ctx->ipv4->saddr;
+ tuple.ipv4.daddr = ctx->ipv4->daddr;
+ tuple.ipv4.sport = ctx->tcp->source;
+ tuple.ipv4.dport = ctx->tcp->dest;
+ tuple_size = sizeof(tuple.ipv4);
+ } else if (ctx->ipv6) {
+ __builtin_memcpy(tuple.ipv6.saddr, &ctx->ipv6->saddr, sizeof(tuple.ipv6.saddr));
+ __builtin_memcpy(tuple.ipv6.daddr, &ctx->ipv6->daddr, sizeof(tuple.ipv6.daddr));
+ tuple.ipv6.sport = ctx->tcp->source;
+ tuple.ipv6.dport = ctx->tcp->dest;
+ tuple_size = sizeof(tuple.ipv6);
+ } else {
+ goto out;
+ }
+
+ skc = bpf_skc_lookup_tcp(ctx->skb, &tuple, tuple_size, -1, 0);
+ if (!skc)
+ goto out;
+
+ if (skc->state != TCP_LISTEN)
+ goto release;
+
+ sk = (struct sock *)bpf_skc_to_tcp_sock(skc);
+ if (!sk)
+ goto err;
+
+ if (tcp_validate_header(ctx))
+ goto err;
+
+ tcp_parse_options(ctx);
+
+ if (tcp_validate_cookie(ctx))
+ goto err;
+
+ ret = bpf_sk_assign_tcp_reqsk(ctx->skb, sk, &ctx->attrs, sizeof(ctx->attrs));
+ if (ret < 0)
+ goto err;
+
+release:
+ bpf_sk_release(skc);
+out:
+ return ret;
+
+err:
+ ret = TC_ACT_SHOT;
+ goto release;
+}
+
+SEC("tc")
+int tcp_custom_syncookie(struct __sk_buff *skb)
+{
+ struct tcp_syncookie ctx = {
+ .skb = skb,
+ };
+
+ if (tcp_load_headers(&ctx))
+ return TC_ACT_OK;
+
+ if (ctx.tcp->rst)
+ return TC_ACT_OK;
+
+ if (ctx.tcp->syn) {
+ if (ctx.tcp->ack)
+ return TC_ACT_OK;
+
+ handled_syn = true;
+
+ return tcp_handle_syn(&ctx);
+ }
+
+ handled_ack = true;
+
+ return tcp_handle_ack(&ctx);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h
new file mode 100644
index 000000000000..29a6a53cf229
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+
+#ifndef _TEST_TCP_SYNCOOKIE_H
+#define _TEST_TCP_SYNCOOKIE_H
+
+#define __packed __attribute__((__packed__))
+#define __force
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+#define swap(a, b) \
+ do { \
+ typeof(a) __tmp = (a); \
+ (a) = (b); \
+ (b) = __tmp; \
+ } while (0)
+
+#define swap_array(a, b) \
+ do { \
+ typeof(a) __tmp[sizeof(a)]; \
+ __builtin_memcpy(__tmp, a, sizeof(a)); \
+ __builtin_memcpy(a, b, sizeof(a)); \
+ __builtin_memcpy(b, __tmp, sizeof(a)); \
+ } while (0)
+
+/* asm-generic/unaligned.h */
+#define __get_unaligned_t(type, ptr) ({ \
+ const struct { type x; } __packed * __pptr = (typeof(__pptr))(ptr); \
+ __pptr->x; \
+})
+
+#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+ return bpf_ntohs(__get_unaligned_t(__be16, p));
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+ return bpf_ntohl(__get_unaligned_t(__be32, p));
+}
+
+/* lib/checksum.c */
+static inline u32 from64to32(u64 x)
+{
+ /* add up 32-bit and 32-bit for 32+c bit */
+ x = (x & 0xffffffff) + (x >> 32);
+ /* add up carry.. */
+ x = (x & 0xffffffff) + (x >> 32);
+ return (u32)x;
+}
+
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ __u32 len, __u8 proto, __wsum sum)
+{
+ unsigned long long s = (__force u32)sum;
+
+ s += (__force u32)saddr;
+ s += (__force u32)daddr;
+#ifdef __BIG_ENDIAN
+ s += proto + len;
+#else
+ s += (proto + len) << 8;
+#endif
+ return (__force __wsum)from64to32(s);
+}
+
+/* asm-generic/checksum.h */
+static inline __sum16 csum_fold(__wsum csum)
+{
+ u32 sum = (__force u32)csum;
+
+ sum = (sum & 0xffff) + (sum >> 16);
+ sum = (sum & 0xffff) + (sum >> 16);
+ return (__force __sum16)~sum;
+}
+
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
+}
+
+/* net/ipv6/ip6_checksum.c */
+static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, __u8 proto, __wsum csum)
+{
+ int carry;
+ __u32 ulen;
+ __u32 uproto;
+ __u32 sum = (__force u32)csum;
+
+ sum += (__force u32)saddr->in6_u.u6_addr32[0];
+ carry = (sum < (__force u32)saddr->in6_u.u6_addr32[0]);
+ sum += carry;
+
+ sum += (__force u32)saddr->in6_u.u6_addr32[1];
+ carry = (sum < (__force u32)saddr->in6_u.u6_addr32[1]);
+ sum += carry;
+
+ sum += (__force u32)saddr->in6_u.u6_addr32[2];
+ carry = (sum < (__force u32)saddr->in6_u.u6_addr32[2]);
+ sum += carry;
+
+ sum += (__force u32)saddr->in6_u.u6_addr32[3];
+ carry = (sum < (__force u32)saddr->in6_u.u6_addr32[3]);
+ sum += carry;
+
+ sum += (__force u32)daddr->in6_u.u6_addr32[0];
+ carry = (sum < (__force u32)daddr->in6_u.u6_addr32[0]);
+ sum += carry;
+
+ sum += (__force u32)daddr->in6_u.u6_addr32[1];
+ carry = (sum < (__force u32)daddr->in6_u.u6_addr32[1]);
+ sum += carry;
+
+ sum += (__force u32)daddr->in6_u.u6_addr32[2];
+ carry = (sum < (__force u32)daddr->in6_u.u6_addr32[2]);
+ sum += carry;
+
+ sum += (__force u32)daddr->in6_u.u6_addr32[3];
+ carry = (sum < (__force u32)daddr->in6_u.u6_addr32[3]);
+ sum += carry;
+
+ ulen = (__force u32)bpf_htonl((__u32)len);
+ sum += ulen;
+ carry = (sum < ulen);
+ sum += carry;
+
+ uproto = (__force u32)bpf_htonl(proto);
+ sum += uproto;
+ carry = (sum < uproto);
+ sum += carry;
+
+ return csum_fold((__force __wsum)sum);
+}
+#endif
diff --git a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
index cf7ed8cbb1fe..a3f3f43fc195 100644
--- a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
@@ -59,7 +59,7 @@ int bpf_testcb(struct bpf_sock_ops *skops)
asm volatile (
"%[op] = *(u32 *)(%[skops] +96)"
- : [op] "+r"(op)
+ : [op] "=r"(op)
: [skops] "r"(skops)
:);
diff --git a/tools/testing/selftests/bpf/progs/test_xdp.c b/tools/testing/selftests/bpf/progs/test_xdp.c
index d7a9a74b7245..8caf58be5818 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp.c
@@ -19,6 +19,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include "test_iptunnel_common.h"
+#include "bpf_compiler.h"
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
@@ -137,7 +138,7 @@ static __always_inline int handle_ipv4(struct xdp_md *xdp)
iph->ttl = 8;
next_iph = (__u16 *)iph;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (i = 0; i < sizeof(*iph) >> 1; i++)
csum += *next_iph++;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c b/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c
index 78c368e71797..67a77944ef29 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c
@@ -18,11 +18,11 @@
#include "test_iptunnel_common.h"
#include "bpf_kfuncs.h"
-const size_t tcphdr_sz = sizeof(struct tcphdr);
-const size_t udphdr_sz = sizeof(struct udphdr);
-const size_t ethhdr_sz = sizeof(struct ethhdr);
-const size_t iphdr_sz = sizeof(struct iphdr);
-const size_t ipv6hdr_sz = sizeof(struct ipv6hdr);
+#define tcphdr_sz sizeof(struct tcphdr)
+#define udphdr_sz sizeof(struct udphdr)
+#define ethhdr_sz sizeof(struct ethhdr)
+#define iphdr_sz sizeof(struct iphdr)
+#define ipv6hdr_sz sizeof(struct ipv6hdr)
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_loop.c b/tools/testing/selftests/bpf/progs/test_xdp_loop.c
index c98fb44156f0..93267a68825b 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_loop.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_loop.c
@@ -15,6 +15,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include "test_iptunnel_common.h"
+#include "bpf_compiler.h"
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
@@ -133,7 +134,7 @@ static __always_inline int handle_ipv4(struct xdp_md *xdp)
iph->ttl = 8;
next_iph = (__u16 *)iph;
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
for (i = 0; i < sizeof(*iph) >> 1; i++)
csum += *next_iph++;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
index 42c8f6ded0e4..5c7e4758a0ca 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
@@ -15,6 +15,7 @@
#include <linux/udp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bpf_compiler.h"
static __always_inline __u32 rol32(__u32 word, unsigned int shift)
{
@@ -362,7 +363,7 @@ bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval,
iph->ttl = 4;
next_iph_u16 = (__u16 *) iph;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (int i = 0; i < sizeof(struct iphdr) >> 1; i++)
csum += *next_iph_u16++;
iph->check = ~((csum & 0xffff) + (csum >> 16));
@@ -409,7 +410,7 @@ int send_icmp_reply(void *data, void *data_end)
iph->saddr = tmp_addr;
iph->check = 0;
next_iph_u16 = (__u16 *) iph;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (int i = 0; i < sizeof(struct iphdr) >> 1; i++)
csum += *next_iph_u16++;
iph->check = ~((csum & 0xffff) + (csum >> 16));
diff --git a/tools/testing/selftests/bpf/progs/token_lsm.c b/tools/testing/selftests/bpf/progs/token_lsm.c
new file mode 100644
index 000000000000..e4d59b6ba743
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/token_lsm.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+int my_pid;
+bool reject_capable;
+bool reject_cmd;
+
+SEC("lsm/bpf_token_capable")
+int BPF_PROG(token_capable, struct bpf_token *token, int cap)
+{
+ if (my_pid == 0 || my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+ if (reject_capable)
+ return -1;
+ return 0;
+}
+
+SEC("lsm/bpf_token_cmd")
+int BPF_PROG(token_cmd, struct bpf_token *token, enum bpf_cmd cmd)
+{
+ if (my_pid == 0 || my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+ if (reject_cmd)
+ return -1;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/tracing_failure.c b/tools/testing/selftests/bpf/progs/tracing_failure.c
new file mode 100644
index 000000000000..d41665d2ec8c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tracing_failure.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("?fentry/bpf_spin_lock")
+int BPF_PROG(test_spin_lock, struct bpf_spin_lock *lock)
+{
+ return 0;
+}
+
+SEC("?fentry/bpf_spin_unlock")
+int BPF_PROG(test_spin_unlock, struct bpf_spin_lock *lock)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/trigger_bench.c b/tools/testing/selftests/bpf/progs/trigger_bench.c
index 694e7cec1823..5fda43901033 100644
--- a/tools/testing/selftests/bpf/progs/trigger_bench.c
+++ b/tools/testing/selftests/bpf/progs/trigger_bench.c
@@ -33,6 +33,27 @@ int bench_trigger_kprobe(void *ctx)
return 0;
}
+SEC("kretprobe/" SYS_PREFIX "sys_getpgid")
+int bench_trigger_kretprobe(void *ctx)
+{
+ __sync_add_and_fetch(&hits, 1);
+ return 0;
+}
+
+SEC("kprobe.multi/" SYS_PREFIX "sys_getpgid")
+int bench_trigger_kprobe_multi(void *ctx)
+{
+ __sync_add_and_fetch(&hits, 1);
+ return 0;
+}
+
+SEC("kretprobe.multi/" SYS_PREFIX "sys_getpgid")
+int bench_trigger_kretprobe_multi(void *ctx)
+{
+ __sync_add_and_fetch(&hits, 1);
+ return 0;
+}
+
SEC("fentry/" SYS_PREFIX "sys_getpgid")
int bench_trigger_fentry(void *ctx)
{
@@ -40,6 +61,13 @@ int bench_trigger_fentry(void *ctx)
return 0;
}
+SEC("fexit/" SYS_PREFIX "sys_getpgid")
+int bench_trigger_fexit(void *ctx)
+{
+ __sync_add_and_fetch(&hits, 1);
+ return 0;
+}
+
SEC("fentry.s/" SYS_PREFIX "sys_getpgid")
int bench_trigger_fentry_sleep(void *ctx)
{
diff --git a/tools/testing/selftests/bpf/progs/type_cast.c b/tools/testing/selftests/bpf/progs/type_cast.c
index a9629ac230fd..9d808b8f4ab0 100644
--- a/tools/testing/selftests/bpf/progs/type_cast.c
+++ b/tools/testing/selftests/bpf/progs/type_cast.c
@@ -4,6 +4,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
@@ -19,9 +20,6 @@ char name[IFNAMSIZ];
unsigned int inum;
unsigned int meta_len, frag0_len, kskb_len, kskb2_len;
-void *bpf_cast_to_kern_ctx(void *) __ksym;
-void *bpf_rdonly_cast(void *, __u32) __ksym;
-
SEC("?xdp")
int md_xdp(struct xdp_md *ctx)
{
@@ -48,13 +46,12 @@ int md_skb(struct __sk_buff *skb)
/* Simulate the following kernel macro:
* #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
*/
- shared_info = bpf_rdonly_cast(kskb->head + kskb->end,
- bpf_core_type_id_kernel(struct skb_shared_info));
+ shared_info = bpf_core_cast(kskb->head + kskb->end, struct skb_shared_info);
meta_len = shared_info->meta_len;
frag0_len = shared_info->frag_list->len;
/* kskb2 should be equal to kskb */
- kskb2 = bpf_rdonly_cast(kskb, bpf_core_type_id_kernel(struct sk_buff));
+ kskb2 = bpf_core_cast(kskb, typeof(*kskb2));
kskb2_len = kskb2->len;
return 0;
}
@@ -65,7 +62,7 @@ int BPF_PROG(untrusted_ptr, struct pt_regs *regs, long id)
struct task_struct *task, *task_dup;
task = bpf_get_current_task_btf();
- task_dup = bpf_rdonly_cast(task, bpf_core_type_id_kernel(struct task_struct));
+ task_dup = bpf_core_cast(task, struct task_struct);
(void)bpf_task_storage_get(&enter_id, task_dup, 0, 0);
return 0;
}
@@ -73,7 +70,7 @@ int BPF_PROG(untrusted_ptr, struct pt_regs *regs, long id)
SEC("?tracepoint/syscalls/sys_enter_nanosleep")
int kctx_u64(void *ctx)
{
- u64 *kctx = bpf_rdonly_cast(ctx, bpf_core_type_id_kernel(u64));
+ u64 *kctx = bpf_core_cast(ctx, u64);
(void)kctx;
return 0;
diff --git a/tools/testing/selftests/bpf/progs/verifier_arena.c b/tools/testing/selftests/bpf/progs/verifier_arena.c
new file mode 100644
index 000000000000..5540b05ff9ee
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_arena.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+#include "bpf_arena_common.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARENA);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __uint(max_entries, 2); /* arena of two pages close to 32-bit boundary*/
+ __ulong(map_extra, (1ull << 44) | (~0u - __PAGE_SIZE * 2 + 1)); /* start of mmap() region */
+} arena SEC(".maps");
+
+SEC("syscall")
+__success __retval(0)
+int basic_alloc1(void *ctx)
+{
+#if defined(__BPF_FEATURE_ARENA_CAST)
+ volatile int __arena *page1, *page2, *no_page, *page3;
+
+ page1 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ if (!page1)
+ return 1;
+ *page1 = 1;
+ page2 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ if (!page2)
+ return 2;
+ *page2 = 2;
+ no_page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ if (no_page)
+ return 3;
+ if (*page1 != 1)
+ return 4;
+ if (*page2 != 2)
+ return 5;
+ bpf_arena_free_pages(&arena, (void __arena *)page2, 1);
+ if (*page1 != 1)
+ return 6;
+ if (*page2 != 0) /* use-after-free should return 0 */
+ return 7;
+ page3 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ if (!page3)
+ return 8;
+ *page3 = 3;
+ if (page2 != page3)
+ return 9;
+ if (*page1 != 1)
+ return 10;
+#endif
+ return 0;
+}
+
+SEC("syscall")
+__success __retval(0)
+int basic_alloc2(void *ctx)
+{
+#if defined(__BPF_FEATURE_ARENA_CAST)
+ volatile char __arena *page1, *page2, *page3, *page4;
+
+ page1 = bpf_arena_alloc_pages(&arena, NULL, 2, NUMA_NO_NODE, 0);
+ if (!page1)
+ return 1;
+ page2 = page1 + __PAGE_SIZE;
+ page3 = page1 + __PAGE_SIZE * 2;
+ page4 = page1 - __PAGE_SIZE;
+ *page1 = 1;
+ *page2 = 2;
+ *page3 = 3;
+ *page4 = 4;
+ if (*page1 != 1)
+ return 1;
+ if (*page2 != 2)
+ return 2;
+ if (*page3 != 0)
+ return 3;
+ if (*page4 != 0)
+ return 4;
+ bpf_arena_free_pages(&arena, (void __arena *)page1, 2);
+ if (*page1 != 0)
+ return 5;
+ if (*page2 != 0)
+ return 6;
+ if (*page3 != 0)
+ return 7;
+ if (*page4 != 0)
+ return 8;
+#endif
+ return 0;
+}
+
+struct bpf_arena___l {
+ struct bpf_map map;
+} __attribute__((preserve_access_index));
+
+SEC("syscall")
+__success __retval(0) __log_level(2)
+int basic_alloc3(void *ctx)
+{
+ struct bpf_arena___l *ar = (struct bpf_arena___l *)&arena;
+ volatile char __arena *pages;
+
+ pages = bpf_arena_alloc_pages(&ar->map, NULL, ar->map.max_entries, NUMA_NO_NODE, 0);
+ if (!pages)
+ return 1;
+ return 0;
+}
+
+SEC("iter.s/bpf_map")
+__success __log_level(2)
+int iter_maps1(struct bpf_iter__bpf_map *ctx)
+{
+ struct bpf_map *map = ctx->map;
+
+ if (!map)
+ return 0;
+ bpf_arena_alloc_pages(map, NULL, map->max_entries, 0, 0);
+ return 0;
+}
+
+SEC("iter.s/bpf_map")
+__failure __msg("expected pointer to STRUCT bpf_map")
+int iter_maps2(struct bpf_iter__bpf_map *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+
+ bpf_arena_alloc_pages((void *)seq, NULL, 1, 0, 0);
+ return 0;
+}
+
+SEC("iter.s/bpf_map")
+__failure __msg("untrusted_ptr_bpf_map")
+int iter_maps3(struct bpf_iter__bpf_map *ctx)
+{
+ struct bpf_map *map = ctx->map;
+
+ if (!map)
+ return 0;
+ bpf_arena_alloc_pages(map->inner_map_meta, NULL, map->max_entries, 0, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c b/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c
index be95570ab382..28b602ac9cbe 100644
--- a/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c
+++ b/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c
@@ -568,7 +568,7 @@ l0_%=: r0 = 0; \
SEC("tc")
__description("direct packet access: test23 (x += pkt_ptr, 4)")
-__failure __msg("invalid access to packet, off=0 size=8, R5(id=2,off=0,r=0)")
+__failure __msg("invalid access to packet, off=0 size=8, R5(id=3,off=0,r=0)")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void test23_x_pkt_ptr_4(void)
{
diff --git a/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c b/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
new file mode 100644
index 000000000000..4ab0ef18d7eb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+#include "xdp_metadata.h"
+#include "bpf_kfuncs.h"
+
+extern struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak;
+extern void bpf_task_release(struct task_struct *p) __ksym __weak;
+
+__weak int subprog_trusted_task_nullable(struct task_struct *task __arg_trusted __arg_nullable)
+{
+ if (!task)
+ return 0;
+ return task->pid + task->tgid;
+}
+
+__weak int subprog_trusted_task_nullable_extra_layer(struct task_struct *task __arg_trusted __arg_nullable)
+{
+ return subprog_trusted_task_nullable(task) + subprog_trusted_task_nullable(NULL);
+}
+
+SEC("?tp_btf/task_newtask")
+__success __log_level(2)
+__msg("Validating subprog_trusted_task_nullable() func#1...")
+__msg(": R1=trusted_ptr_or_null_task_struct(")
+int trusted_task_arg_nullable(void *ctx)
+{
+ struct task_struct *t1 = bpf_get_current_task_btf();
+ struct task_struct *t2 = bpf_task_acquire(t1);
+ int res = 0;
+
+ /* known NULL */
+ res += subprog_trusted_task_nullable(NULL);
+
+ /* known non-NULL */
+ res += subprog_trusted_task_nullable(t1);
+ res += subprog_trusted_task_nullable_extra_layer(t1);
+
+ /* unknown if NULL or not */
+ res += subprog_trusted_task_nullable(t2);
+ res += subprog_trusted_task_nullable_extra_layer(t2);
+
+ if (t2) {
+ /* known non-NULL after explicit NULL check, just in case */
+ res += subprog_trusted_task_nullable(t2);
+ res += subprog_trusted_task_nullable_extra_layer(t2);
+
+ bpf_task_release(t2);
+ }
+
+ return res;
+}
+
+__weak int subprog_trusted_task_nonnull(struct task_struct *task __arg_trusted)
+{
+ return task->pid + task->tgid;
+}
+
+SEC("?kprobe")
+__failure __log_level(2)
+__msg("R1 type=scalar expected=ptr_, trusted_ptr_, rcu_ptr_")
+__msg("Caller passes invalid args into func#1 ('subprog_trusted_task_nonnull')")
+int trusted_task_arg_nonnull_fail1(void *ctx)
+{
+ return subprog_trusted_task_nonnull(NULL);
+}
+
+SEC("?tp_btf/task_newtask")
+__failure __log_level(2)
+__msg("R1 type=ptr_or_null_ expected=ptr_, trusted_ptr_, rcu_ptr_")
+__msg("Caller passes invalid args into func#1 ('subprog_trusted_task_nonnull')")
+int trusted_task_arg_nonnull_fail2(void *ctx)
+{
+ struct task_struct *t = bpf_get_current_task_btf();
+ struct task_struct *nullable;
+ int res;
+
+ nullable = bpf_task_acquire(t);
+
+ /* should fail, PTR_TO_BTF_ID_OR_NULL */
+ res = subprog_trusted_task_nonnull(nullable);
+
+ if (nullable)
+ bpf_task_release(nullable);
+
+ return res;
+}
+
+SEC("?kprobe")
+__success __log_level(2)
+__msg("Validating subprog_trusted_task_nonnull() func#1...")
+__msg(": R1=trusted_ptr_task_struct(")
+int trusted_task_arg_nonnull(void *ctx)
+{
+ struct task_struct *t = bpf_get_current_task_btf();
+
+ return subprog_trusted_task_nonnull(t);
+}
+
+struct task_struct___local {} __attribute__((preserve_access_index));
+
+__weak int subprog_nullable_task_flavor(
+ struct task_struct___local *task __arg_trusted __arg_nullable)
+{
+ char buf[16];
+
+ if (!task)
+ return 0;
+
+ return bpf_copy_from_user_task(&buf, sizeof(buf), NULL, (void *)task, 0);
+}
+
+SEC("?uprobe.s")
+__success __log_level(2)
+__msg("Validating subprog_nullable_task_flavor() func#1...")
+__msg(": R1=trusted_ptr_or_null_task_struct(")
+int flavor_ptr_nullable(void *ctx)
+{
+ struct task_struct___local *t = (void *)bpf_get_current_task_btf();
+
+ return subprog_nullable_task_flavor(t);
+}
+
+__weak int subprog_nonnull_task_flavor(struct task_struct___local *task __arg_trusted)
+{
+ char buf[16];
+
+ return bpf_copy_from_user_task(&buf, sizeof(buf), NULL, (void *)task, 0);
+}
+
+SEC("?uprobe.s")
+__success __log_level(2)
+__msg("Validating subprog_nonnull_task_flavor() func#1...")
+__msg(": R1=trusted_ptr_task_struct(")
+int flavor_ptr_nonnull(void *ctx)
+{
+ struct task_struct *t = bpf_get_current_task_btf();
+
+ return subprog_nonnull_task_flavor((void *)t);
+}
+
+__weak int subprog_trusted_destroy(struct task_struct *task __arg_trusted)
+{
+ bpf_task_release(task); /* should be rejected */
+
+ return 0;
+}
+
+SEC("?tp_btf/task_newtask")
+__failure __log_level(2)
+__msg("release kernel function bpf_task_release expects refcounted PTR_TO_BTF_ID")
+int BPF_PROG(trusted_destroy_fail, struct task_struct *task, u64 clone_flags)
+{
+ return subprog_trusted_destroy(task);
+}
+
+__weak int subprog_trusted_acq_rel(struct task_struct *task __arg_trusted)
+{
+ struct task_struct *owned;
+
+ owned = bpf_task_acquire(task);
+ if (!owned)
+ return 0;
+
+ bpf_task_release(owned); /* this one is OK, we acquired it locally */
+
+ return 0;
+}
+
+SEC("?tp_btf/task_newtask")
+__success __log_level(2)
+int BPF_PROG(trusted_acq_rel, struct task_struct *task, u64 clone_flags)
+{
+ return subprog_trusted_acq_rel(task);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c
index 67dddd941891..baff5ffe9405 100644
--- a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c
+++ b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c
@@ -115,6 +115,35 @@ int arg_tag_nullable_ptr_fail(void *ctx)
return subprog_nullable_ptr_bad(&x);
}
+typedef struct {
+ int x;
+} user_struct_t;
+
+__noinline __weak int subprog_user_anon_mem(user_struct_t *t)
+{
+ return t ? t->x : 0;
+}
+
+SEC("?tracepoint")
+__failure __log_level(2)
+__msg("invalid bpf_context access")
+__msg("Caller passes invalid args into func#1 ('subprog_user_anon_mem')")
+int anon_user_mem_invalid(void *ctx)
+{
+ /* can't pass PTR_TO_CTX as user memory */
+ return subprog_user_anon_mem(ctx);
+}
+
+SEC("?tracepoint")
+__success __log_level(2)
+__msg("Func#1 ('subprog_user_anon_mem') is safe for any args that match its prototype")
+int anon_user_mem_valid(void *ctx)
+{
+ user_struct_t t = { .x = 42 };
+
+ return subprog_user_anon_mem(&t);
+}
+
__noinline __weak int subprog_nonnull_ptr_good(int *p1 __arg_nonnull, int *p2 __arg_nonnull)
{
return (*p1) * (*p2); /* good, no need for NULL checks */
diff --git a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
index a955a6358206..99e561f18f9b 100644
--- a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
+++ b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
@@ -1,8 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-
-#include <linux/bpf.h>
-#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
+#include "bpf_experimental.h"
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
@@ -309,4 +307,103 @@ int iter_limit_bug(struct __sk_buff *skb)
return 0;
}
+#define ARR_SZ 1000000
+int zero;
+char arr[ARR_SZ];
+
+SEC("socket")
+__success __retval(0xd495cdc0)
+int cond_break1(const void *ctx)
+{
+ unsigned long i;
+ unsigned int sum = 0;
+
+ for (i = zero; i < ARR_SZ; cond_break, i++)
+ sum += i;
+ for (i = zero; i < ARR_SZ; i++) {
+ barrier_var(i);
+ sum += i + arr[i];
+ cond_break;
+ }
+
+ return sum;
+}
+
+SEC("socket")
+__success __retval(999000000)
+int cond_break2(const void *ctx)
+{
+ int i, j;
+ int sum = 0;
+
+ for (i = zero; i < 1000; cond_break, i++)
+ for (j = zero; j < 1000; j++) {
+ sum += i + j;
+ cond_break;
+ }
+
+ return sum;
+}
+
+static __noinline int loop(void)
+{
+ int i, sum = 0;
+
+ for (i = zero; i <= 1000000; i++, cond_break)
+ sum += i;
+
+ return sum;
+}
+
+SEC("socket")
+__success __retval(0x6a5a2920)
+int cond_break3(const void *ctx)
+{
+ return loop();
+}
+
+SEC("socket")
+__success __retval(1)
+int cond_break4(const void *ctx)
+{
+ int cnt = zero;
+
+ for (;;) {
+ /* should eventually break out of the loop */
+ cond_break;
+ cnt++;
+ }
+ /* if we looped a bit, it's a success */
+ return cnt > 1 ? 1 : 0;
+}
+
+static __noinline int static_subprog(void)
+{
+ int cnt = zero;
+
+ for (;;) {
+ cond_break;
+ cnt++;
+ }
+
+ return cnt;
+}
+
+SEC("socket")
+__success __retval(1)
+int cond_break5(const void *ctx)
+{
+ int cnt1 = zero, cnt2;
+
+ for (;;) {
+ cond_break;
+ cnt1++;
+ }
+
+ cnt2 = static_subprog();
+
+ /* main and subprog have to loop a bit */
+ return cnt1 > 1 && cnt2 > 1 ? 1 : 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_loops1.c b/tools/testing/selftests/bpf/progs/verifier_loops1.c
index 71735dbf33d4..e07b43b78fd2 100644
--- a/tools/testing/selftests/bpf/progs/verifier_loops1.c
+++ b/tools/testing/selftests/bpf/progs/verifier_loops1.c
@@ -259,4 +259,28 @@ l0_%=: r2 += r1; \
" ::: __clobber_all);
}
+SEC("xdp")
+__success
+__naked void not_an_inifinite_loop(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r0 &= 0xff; \
+ *(u64 *)(r10 - 8) = r0; \
+ r0 = 0; \
+loop_%=: \
+ r0 = *(u64 *)(r10 - 8); \
+ if r0 > 10 goto exit_%=; \
+ r0 += 1; \
+ *(u64 *)(r10 - 8) = r0; \
+ r0 = 0; \
+ goto loop_%=; \
+exit_%=: \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
index 39fe3372e0e0..85e48069c9e6 100644
--- a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
+++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
@@ -217,7 +217,7 @@ __naked void uninit_u32_from_the_stack(void)
SEC("tc")
__description("Spill a u32 const scalar. Refill as u16. Offset to skb->data")
-__failure __msg("invalid access to packet")
+__success __retval(0)
__naked void u16_offset_to_skb_data(void)
{
asm volatile (" \
@@ -225,13 +225,19 @@ __naked void u16_offset_to_skb_data(void)
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
w4 = 20; \
*(u32*)(r10 - 8) = r4; \
- r4 = *(u16*)(r10 - 8); \
+ "
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r4 = *(u16*)(r10 - 8);"
+#else
+ "r4 = *(u16*)(r10 - 6);"
+#endif
+ " \
r0 = r2; \
- /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
+ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */\
r0 += r4; \
- /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\
+ /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\
if r0 > r3 goto l0_%=; \
- /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\
+ /* r0 = *(u32 *)r2 R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\
r0 = *(u32*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
@@ -243,7 +249,7 @@ l0_%=: r0 = 0; \
SEC("tc")
__description("Spill u32 const scalars. Refill as u64. Offset to skb->data")
-__failure __msg("invalid access to packet")
+__failure __msg("math between pkt pointer and register with unbounded min value is not allowed")
__naked void u64_offset_to_skb_data(void)
{
asm volatile (" \
@@ -253,13 +259,11 @@ __naked void u64_offset_to_skb_data(void)
w7 = 20; \
*(u32*)(r10 - 4) = r6; \
*(u32*)(r10 - 8) = r7; \
- r4 = *(u16*)(r10 - 8); \
+ r4 = *(u64*)(r10 - 8); \
r0 = r2; \
- /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
+ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4= */ \
r0 += r4; \
- /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\
if r0 > r3 goto l0_%=; \
- /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\
r0 = *(u32*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
@@ -270,7 +274,7 @@ l0_%=: r0 = 0; \
}
SEC("tc")
-__description("Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data")
+__description("Spill a u32 const scalar. Refill as u16 from MSB. Offset to skb->data")
__failure __msg("invalid access to packet")
__naked void _6_offset_to_skb_data(void)
{
@@ -279,7 +283,13 @@ __naked void _6_offset_to_skb_data(void)
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
w4 = 20; \
*(u32*)(r10 - 8) = r4; \
- r4 = *(u16*)(r10 - 6); \
+ "
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r4 = *(u16*)(r10 - 6);"
+#else
+ "r4 = *(u16*)(r10 - 8);"
+#endif
+ " \
r0 = r2; \
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
r0 += r4; \
@@ -454,9 +464,9 @@ l0_%=: r1 >>= 16; \
SEC("raw_tp")
__log_level(2)
__success
-__msg("fp-8=0m??mmmm")
-__msg("fp-16=00mm??mm")
-__msg("fp-24=00mm???m")
+__msg("fp-8=0m??scalar()")
+__msg("fp-16=00mm??scalar()")
+__msg("fp-24=00mm???scalar()")
__naked void spill_subregs_preserve_stack_zero(void)
{
asm volatile (
@@ -495,14 +505,14 @@ char single_byte_buf[1] SEC(".data.single_byte_buf");
SEC("raw_tp")
__log_level(2)
__success
-/* make sure fp-8 is all STACK_ZERO */
-__msg("2: (7a) *(u64 *)(r10 -8) = 0 ; R10=fp0 fp-8_w=00000000")
+/* fp-8 is spilled IMPRECISE value zero (represented by a zero value fake reg) */
+__msg("2: (7a) *(u64 *)(r10 -8) = 0 ; R10=fp0 fp-8_w=0")
/* but fp-16 is spilled IMPRECISE zero const reg */
__msg("4: (7b) *(u64 *)(r10 -16) = r0 ; R0_w=0 R10=fp0 fp-16_w=0")
-/* validate that assigning R2 from STACK_ZERO doesn't mark register
+/* validate that assigning R2 from STACK_SPILL with zero value doesn't mark register
* precise immediately; if necessary, it will be marked precise later
*/
-__msg("6: (71) r2 = *(u8 *)(r10 -1) ; R2_w=0 R10=fp0 fp-8_w=00000000")
+__msg("6: (71) r2 = *(u8 *)(r10 -1) ; R2_w=0 R10=fp0 fp-8_w=0")
/* similarly, when R2 is assigned from spilled register, it is initially
* imprecise, but will be marked precise later once it is used in precise context
*/
@@ -520,14 +530,14 @@ __msg("mark_precise: frame0: regs=r0 stack= before 3: (b7) r0 = 0")
__naked void partial_stack_load_preserves_zeros(void)
{
asm volatile (
- /* fp-8 is all STACK_ZERO */
+ /* fp-8 is value zero (represented by a zero value fake reg) */
".8byte %[fp8_st_zero];" /* LLVM-18+: *(u64 *)(r10 -8) = 0; */
/* fp-16 is const zero register */
"r0 = 0;"
"*(u64 *)(r10 -16) = r0;"
- /* load single U8 from non-aligned STACK_ZERO slot */
+ /* load single U8 from non-aligned spilled value zero slot */
"r1 = %[single_byte_buf];"
"r2 = *(u8 *)(r10 -1);"
"r1 += r2;"
@@ -539,7 +549,7 @@ __naked void partial_stack_load_preserves_zeros(void)
"r1 += r2;"
"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
- /* load single U16 from non-aligned STACK_ZERO slot */
+ /* load single U16 from non-aligned spilled value zero slot */
"r1 = %[single_byte_buf];"
"r2 = *(u16 *)(r10 -2);"
"r1 += r2;"
@@ -551,7 +561,7 @@ __naked void partial_stack_load_preserves_zeros(void)
"r1 += r2;"
"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
- /* load single U32 from non-aligned STACK_ZERO slot */
+ /* load single U32 from non-aligned spilled value zero slot */
"r1 = %[single_byte_buf];"
"r2 = *(u32 *)(r10 -4);"
"r1 += r2;"
@@ -583,6 +593,47 @@ __naked void partial_stack_load_preserves_zeros(void)
: __clobber_common);
}
+SEC("raw_tp")
+__log_level(2)
+__success
+/* fp-4 is STACK_ZERO */
+__msg("2: (62) *(u32 *)(r10 -4) = 0 ; R10=fp0 fp-8=0000????")
+__msg("4: (71) r2 = *(u8 *)(r10 -1) ; R2_w=0 R10=fp0 fp-8=0000????")
+__msg("5: (0f) r1 += r2")
+__msg("mark_precise: frame0: last_idx 5 first_idx 0 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r2 stack= before 4: (71) r2 = *(u8 *)(r10 -1)")
+__naked void partial_stack_load_preserves_partial_zeros(void)
+{
+ asm volatile (
+ /* fp-4 is value zero */
+ ".8byte %[fp4_st_zero];" /* LLVM-18+: *(u32 *)(r10 -4) = 0; */
+
+ /* load single U8 from non-aligned stack zero slot */
+ "r1 = %[single_byte_buf];"
+ "r2 = *(u8 *)(r10 -1);"
+ "r1 += r2;"
+ "*(u8 *)(r1 + 0) = r2;" /* this should be fine */
+
+ /* load single U16 from non-aligned stack zero slot */
+ "r1 = %[single_byte_buf];"
+ "r2 = *(u16 *)(r10 -2);"
+ "r1 += r2;"
+ "*(u8 *)(r1 + 0) = r2;" /* this should be fine */
+
+ /* load single U32 from non-aligned stack zero slot */
+ "r1 = %[single_byte_buf];"
+ "r2 = *(u32 *)(r10 -4);"
+ "r1 += r2;"
+ "*(u8 *)(r1 + 0) = r2;" /* this should be fine */
+
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_ptr(single_byte_buf),
+ __imm_insn(fp4_st_zero, BPF_ST_MEM(BPF_W, BPF_REG_FP, -4, 0))
+ : __clobber_common);
+}
+
char two_byte_buf[2] SEC(".data.two_byte_buf");
SEC("raw_tp")
@@ -737,4 +788,460 @@ __naked void stack_load_preserves_const_precision_subreg(void)
: __clobber_common);
}
+SEC("xdp")
+__description("32-bit spilled reg range should be tracked")
+__success __retval(0)
+__naked void spill_32bit_range_track(void)
+{
+ asm volatile(" \
+ call %[bpf_ktime_get_ns]; \
+ /* Make r0 bounded. */ \
+ r0 &= 65535; \
+ /* Assign an ID to r0. */ \
+ r1 = r0; \
+ /* 32-bit spill r0 to stack. */ \
+ *(u32*)(r10 - 8) = r0; \
+ /* Boundary check on r0. */ \
+ if r0 < 1 goto l0_%=; \
+ /* 32-bit fill r1 from stack. */ \
+ r1 = *(u32*)(r10 - 8); \
+ /* r1 == r0 => r1 >= 1 always. */ \
+ if r1 >= 1 goto l0_%=; \
+ /* Dead branch: the verifier should prune it. \
+ * Do an invalid memory access if the verifier \
+ * follows it. \
+ */ \
+ r0 = *(u64*)(r9 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("64-bit spill of 64-bit reg should assign ID")
+__success __retval(0)
+__naked void spill_64bit_of_64bit_ok(void)
+{
+ asm volatile (" \
+ /* Roll one bit to make the register inexact. */\
+ call %[bpf_get_prandom_u32]; \
+ r0 &= 0x80000000; \
+ r0 <<= 32; \
+ /* 64-bit spill r0 to stack - should assign an ID. */\
+ *(u64*)(r10 - 8) = r0; \
+ /* 64-bit fill r1 from stack - should preserve the ID. */\
+ r1 = *(u64*)(r10 - 8); \
+ /* Compare r1 with another register to trigger find_equal_scalars.\
+ * Having one random bit is important here, otherwise the verifier cuts\
+ * the corners. \
+ */ \
+ r2 = 0; \
+ if r1 != r2 goto l0_%=; \
+ /* The result of this comparison is predefined. */\
+ if r0 == r2 goto l0_%=; \
+ /* Dead branch: the verifier should prune it. Do an invalid memory\
+ * access if the verifier follows it. \
+ */ \
+ r0 = *(u64*)(r9 + 0); \
+ exit; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("32-bit spill of 32-bit reg should assign ID")
+__success __retval(0)
+__naked void spill_32bit_of_32bit_ok(void)
+{
+ asm volatile (" \
+ /* Roll one bit to make the register inexact. */\
+ call %[bpf_get_prandom_u32]; \
+ w0 &= 0x80000000; \
+ /* 32-bit spill r0 to stack - should assign an ID. */\
+ *(u32*)(r10 - 8) = r0; \
+ /* 32-bit fill r1 from stack - should preserve the ID. */\
+ r1 = *(u32*)(r10 - 8); \
+ /* Compare r1 with another register to trigger find_equal_scalars.\
+ * Having one random bit is important here, otherwise the verifier cuts\
+ * the corners. \
+ */ \
+ r2 = 0; \
+ if r1 != r2 goto l0_%=; \
+ /* The result of this comparison is predefined. */\
+ if r0 == r2 goto l0_%=; \
+ /* Dead branch: the verifier should prune it. Do an invalid memory\
+ * access if the verifier follows it. \
+ */ \
+ r0 = *(u64*)(r9 + 0); \
+ exit; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("16-bit spill of 16-bit reg should assign ID")
+__success __retval(0)
+__naked void spill_16bit_of_16bit_ok(void)
+{
+ asm volatile (" \
+ /* Roll one bit to make the register inexact. */\
+ call %[bpf_get_prandom_u32]; \
+ r0 &= 0x8000; \
+ /* 16-bit spill r0 to stack - should assign an ID. */\
+ *(u16*)(r10 - 8) = r0; \
+ /* 16-bit fill r1 from stack - should preserve the ID. */\
+ r1 = *(u16*)(r10 - 8); \
+ /* Compare r1 with another register to trigger find_equal_scalars.\
+ * Having one random bit is important here, otherwise the verifier cuts\
+ * the corners. \
+ */ \
+ r2 = 0; \
+ if r1 != r2 goto l0_%=; \
+ /* The result of this comparison is predefined. */\
+ if r0 == r2 goto l0_%=; \
+ /* Dead branch: the verifier should prune it. Do an invalid memory\
+ * access if the verifier follows it. \
+ */ \
+ r0 = *(u64*)(r9 + 0); \
+ exit; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("8-bit spill of 8-bit reg should assign ID")
+__success __retval(0)
+__naked void spill_8bit_of_8bit_ok(void)
+{
+ asm volatile (" \
+ /* Roll one bit to make the register inexact. */\
+ call %[bpf_get_prandom_u32]; \
+ r0 &= 0x80; \
+ /* 8-bit spill r0 to stack - should assign an ID. */\
+ *(u8*)(r10 - 8) = r0; \
+ /* 8-bit fill r1 from stack - should preserve the ID. */\
+ r1 = *(u8*)(r10 - 8); \
+ /* Compare r1 with another register to trigger find_equal_scalars.\
+ * Having one random bit is important here, otherwise the verifier cuts\
+ * the corners. \
+ */ \
+ r2 = 0; \
+ if r1 != r2 goto l0_%=; \
+ /* The result of this comparison is predefined. */\
+ if r0 == r2 goto l0_%=; \
+ /* Dead branch: the verifier should prune it. Do an invalid memory\
+ * access if the verifier follows it. \
+ */ \
+ r0 = *(u64*)(r9 + 0); \
+ exit; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("spill unbounded reg, then range check src")
+__success __retval(0)
+__naked void spill_unbounded(void)
+{
+ asm volatile (" \
+ /* Produce an unbounded scalar. */ \
+ call %[bpf_get_prandom_u32]; \
+ /* Spill r0 to stack. */ \
+ *(u64*)(r10 - 8) = r0; \
+ /* Boundary check on r0. */ \
+ if r0 > 16 goto l0_%=; \
+ /* Fill r0 from stack. */ \
+ r0 = *(u64*)(r10 - 8); \
+ /* Boundary check on r0 with predetermined result. */\
+ if r0 <= 16 goto l0_%=; \
+ /* Dead branch: the verifier should prune it. Do an invalid memory\
+ * access if the verifier follows it. \
+ */ \
+ r0 = *(u64*)(r9 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("32-bit fill after 64-bit spill")
+__success __retval(0)
+__naked void fill_32bit_after_spill_64bit(void)
+{
+ asm volatile(" \
+ /* Randomize the upper 32 bits. */ \
+ call %[bpf_get_prandom_u32]; \
+ r0 <<= 32; \
+ /* 64-bit spill r0 to stack. */ \
+ *(u64*)(r10 - 8) = r0; \
+ /* 32-bit fill r0 from stack. */ \
+ "
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r0 = *(u32*)(r10 - 8);"
+#else
+ "r0 = *(u32*)(r10 - 4);"
+#endif
+ " \
+ /* Boundary check on r0 with predetermined result. */\
+ if r0 == 0 goto l0_%=; \
+ /* Dead branch: the verifier should prune it. Do an invalid memory\
+ * access if the verifier follows it. \
+ */ \
+ r0 = *(u64*)(r9 + 0); \
+l0_%=: exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("32-bit fill after 64-bit spill of 32-bit value should preserve ID")
+__success __retval(0)
+__naked void fill_32bit_after_spill_64bit_preserve_id(void)
+{
+ asm volatile (" \
+ /* Randomize the lower 32 bits. */ \
+ call %[bpf_get_prandom_u32]; \
+ w0 &= 0xffffffff; \
+ /* 64-bit spill r0 to stack - should assign an ID. */\
+ *(u64*)(r10 - 8) = r0; \
+ /* 32-bit fill r1 from stack - should preserve the ID. */\
+ "
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r1 = *(u32*)(r10 - 8);"
+#else
+ "r1 = *(u32*)(r10 - 4);"
+#endif
+ " \
+ /* Compare r1 with another register to trigger find_equal_scalars. */\
+ r2 = 0; \
+ if r1 != r2 goto l0_%=; \
+ /* The result of this comparison is predefined. */\
+ if r0 == r2 goto l0_%=; \
+ /* Dead branch: the verifier should prune it. Do an invalid memory\
+ * access if the verifier follows it. \
+ */ \
+ r0 = *(u64*)(r9 + 0); \
+ exit; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("32-bit fill after 64-bit spill should clear ID")
+__failure __msg("math between ctx pointer and 4294967295 is not allowed")
+__naked void fill_32bit_after_spill_64bit_clear_id(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ /* Roll one bit to force the verifier to track both branches. */\
+ call %[bpf_get_prandom_u32]; \
+ r0 &= 0x8; \
+ /* Put a large number into r1. */ \
+ r1 = 0xffffffff; \
+ r1 <<= 32; \
+ r1 += r0; \
+ /* 64-bit spill r1 to stack - should assign an ID. */\
+ *(u64*)(r10 - 8) = r1; \
+ /* 32-bit fill r2 from stack - should clear the ID. */\
+ "
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r2 = *(u32*)(r10 - 8);"
+#else
+ "r2 = *(u32*)(r10 - 4);"
+#endif
+ " \
+ /* Compare r2 with another register to trigger find_equal_scalars.\
+ * Having one random bit is important here, otherwise the verifier cuts\
+ * the corners. If the ID was mistakenly preserved on fill, this would\
+ * cause the verifier to think that r1 is also equal to zero in one of\
+ * the branches, and equal to eight on the other branch.\
+ */ \
+ r3 = 0; \
+ if r2 != r3 goto l0_%=; \
+l0_%=: r1 >>= 32; \
+ /* The verifier shouldn't propagate r2's range to r1, so it should\
+ * still remember r1 = 0xffffffff and reject the below.\
+ */ \
+ r6 += r1; \
+ r0 = *(u32*)(r6 + 0); \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/* stacksafe(): check if stack spill of an imprecise scalar in old state
+ * is considered equivalent to STACK_{MISC,INVALID} in cur state.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("8: (79) r1 = *(u64 *)(r10 -8)")
+__msg("8: safe")
+__msg("processed 11 insns")
+/* STACK_INVALID should prevent verifier in unpriv mode from
+ * considering states equivalent and force an error on second
+ * verification path (entry - label 1 - label 2).
+ */
+__failure_unpriv
+__msg_unpriv("8: (79) r1 = *(u64 *)(r10 -8)")
+__msg_unpriv("9: (95) exit")
+__msg_unpriv("8: (79) r1 = *(u64 *)(r10 -8)")
+__msg_unpriv("invalid read from stack off -8+2 size 8")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void old_imprecise_scalar_vs_cur_stack_misc(void)
+{
+ asm volatile(
+ /* get a random value for branching */
+ "call %[bpf_ktime_get_ns];"
+ "if r0 == 0 goto 1f;"
+ /* conjure scalar at fp-8 */
+ "r0 = 42;"
+ "*(u64*)(r10 - 8) = r0;"
+ "goto 2f;"
+"1:"
+ /* conjure STACK_{MISC,INVALID} at fp-8 */
+ "call %[bpf_ktime_get_ns];"
+ "*(u16*)(r10 - 8) = r0;"
+ "*(u16*)(r10 - 4) = r0;"
+"2:"
+ /* read fp-8, should be considered safe on second visit */
+ "r1 = *(u64*)(r10 - 8);"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* stacksafe(): check that stack spill of a precise scalar in old state
+ * is not considered equivalent to STACK_MISC in cur state.
+ */
+SEC("socket")
+__success __log_level(2)
+/* verifier should visit 'if r1 == 0x2a ...' two times:
+ * - once for path entry - label 2;
+ * - once for path entry - label 1 - label 2.
+ */
+__msg("if r1 == 0x2a goto pc+0")
+__msg("if r1 == 0x2a goto pc+0")
+__msg("processed 15 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void old_precise_scalar_vs_cur_stack_misc(void)
+{
+ asm volatile(
+ /* get a random value for branching */
+ "call %[bpf_ktime_get_ns];"
+ "if r0 == 0 goto 1f;"
+ /* conjure scalar at fp-8 */
+ "r0 = 42;"
+ "*(u64*)(r10 - 8) = r0;"
+ "goto 2f;"
+"1:"
+ /* conjure STACK_MISC at fp-8 */
+ "call %[bpf_ktime_get_ns];"
+ "*(u64*)(r10 - 8) = r0;"
+ "*(u32*)(r10 - 4) = r0;"
+"2:"
+ /* read fp-8, should not be considered safe on second visit */
+ "r1 = *(u64*)(r10 - 8);"
+ /* use r1 in precise context */
+ "if r1 == 42 goto +0;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* stacksafe(): check if STACK_MISC in old state is considered
+ * equivalent to stack spill of a scalar in cur state.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("8: (79) r0 = *(u64 *)(r10 -8)")
+__msg("8: safe")
+__msg("processed 11 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void old_stack_misc_vs_cur_scalar(void)
+{
+ asm volatile(
+ /* get a random value for branching */
+ "call %[bpf_ktime_get_ns];"
+ "if r0 == 0 goto 1f;"
+ /* conjure STACK_{MISC,INVALID} at fp-8 */
+ "call %[bpf_ktime_get_ns];"
+ "*(u16*)(r10 - 8) = r0;"
+ "*(u16*)(r10 - 4) = r0;"
+ "goto 2f;"
+"1:"
+ /* conjure scalar at fp-8 */
+ "r0 = 42;"
+ "*(u64*)(r10 - 8) = r0;"
+"2:"
+ /* read fp-8, should be considered safe on second visit */
+ "r0 = *(u64*)(r10 - 8);"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* stacksafe(): check that STACK_MISC in old state is not considered
+ * equivalent to stack spill of a non-scalar in cur state.
+ */
+SEC("socket")
+__success __log_level(2)
+/* verifier should process exit instructions twice:
+ * - once for path entry - label 2;
+ * - once for path entry - label 1 - label 2.
+ */
+__msg("r1 = *(u64 *)(r10 -8)")
+__msg("exit")
+__msg("r1 = *(u64 *)(r10 -8)")
+__msg("exit")
+__msg("processed 11 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void old_stack_misc_vs_cur_ctx_ptr(void)
+{
+ asm volatile(
+ /* remember context pointer in r9 */
+ "r9 = r1;"
+ /* get a random value for branching */
+ "call %[bpf_ktime_get_ns];"
+ "if r0 == 0 goto 1f;"
+ /* conjure STACK_MISC at fp-8 */
+ "call %[bpf_ktime_get_ns];"
+ "*(u64*)(r10 - 8) = r0;"
+ "*(u32*)(r10 - 4) = r0;"
+ "goto 2f;"
+"1:"
+ /* conjure context pointer in fp-8 */
+ "*(u64*)(r10 - 8) = r9;"
+"2:"
+ /* read fp-8, should not be considered safe on second visit */
+ "r1 = *(u64*)(r10 - 8);"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_spin_lock.c b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c
index 9c1aa69650f8..fb316c080c84 100644
--- a/tools/testing/selftests/bpf/progs/verifier_spin_lock.c
+++ b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c
@@ -330,7 +330,7 @@ l1_%=: r7 = r0; \
SEC("cgroup/skb")
__description("spin_lock: test10 lock in subprog without unlock")
-__failure __msg("unlock is missing")
+__success
__failure_unpriv __msg_unpriv("")
__naked void lock_in_subprog_without_unlock(void)
{
diff --git a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
index 518329c666e9..7ea9785738b5 100644
--- a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
+++ b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
@@ -7,6 +7,8 @@
#include <bpf/bpf_endian.h>
#include <asm/errno.h>
+#include "bpf_compiler.h"
+
#define TC_ACT_OK 0
#define TC_ACT_SHOT 2
@@ -151,11 +153,11 @@ static __always_inline __u16 csum_ipv6_magic(const struct in6_addr *saddr,
__u64 sum = csum;
int i;
-#pragma unroll
+ __pragma_loop_unroll
for (i = 0; i < 4; i++)
sum += (__u32)saddr->in6_u.u6_addr32[i];
-#pragma unroll
+ __pragma_loop_unroll
for (i = 0; i < 4; i++)
sum += (__u32)daddr->in6_u.u6_addr32[i];
diff --git a/tools/testing/selftests/bpf/progs/xdping_kern.c b/tools/testing/selftests/bpf/progs/xdping_kern.c
index 54cf1765118b..44e2b0ef23ae 100644
--- a/tools/testing/selftests/bpf/progs/xdping_kern.c
+++ b/tools/testing/selftests/bpf/progs/xdping_kern.c
@@ -15,6 +15,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bpf_compiler.h"
#include "xdping.h"
struct {
@@ -116,7 +117,7 @@ int xdping_client(struct xdp_md *ctx)
return XDP_PASS;
if (pinginfo->start) {
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (i = 0; i < XDPING_MAX_COUNT; i++) {
if (pinginfo->times[i] == 0)
break;
diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c
index f01391021218..524c38e9cde4 100644
--- a/tools/testing/selftests/bpf/test_loader.c
+++ b/tools/testing/selftests/bpf/test_loader.c
@@ -181,7 +181,7 @@ static int parse_test_spec(struct test_loader *tester,
memset(spec, 0, sizeof(*spec));
spec->prog_name = bpf_program__name(prog);
- spec->prog_flags = BPF_F_TEST_REG_INVARIANTS; /* by default be strict */
+ spec->prog_flags = testing_prog_flags();
btf = bpf_object__btf(obj);
if (!btf) {
@@ -501,7 +501,7 @@ static bool is_unpriv_capable_map(struct bpf_map *map)
}
}
-static int do_prog_test_run(int fd_prog, int *retval)
+static int do_prog_test_run(int fd_prog, int *retval, bool empty_opts)
{
__u8 tmp_out[TEST_DATA_LEN << 2] = {};
__u8 tmp_in[TEST_DATA_LEN] = {};
@@ -514,6 +514,10 @@ static int do_prog_test_run(int fd_prog, int *retval)
.repeat = 1,
);
+ if (empty_opts) {
+ memset(&topts, 0, sizeof(struct bpf_test_run_opts));
+ topts.sz = sizeof(struct bpf_test_run_opts);
+ }
err = bpf_prog_test_run_opts(fd_prog, &topts);
saved_errno = errno;
@@ -649,7 +653,8 @@ void run_subtest(struct test_loader *tester,
}
}
- do_prog_test_run(bpf_program__fd(tprog), &retval);
+ do_prog_test_run(bpf_program__fd(tprog), &retval,
+ bpf_program__type(tprog) == BPF_PROG_TYPE_SYSCALL ? true : false);
if (retval != subspec->retval && subspec->retval != POINTER_VALUE) {
PRINT_FAIL("Unexpected retval: %d != %d\n", retval, subspec->retval);
goto tobj_cleanup;
@@ -688,7 +693,7 @@ static void process_subtest(struct test_loader *tester,
++nr_progs;
specs = calloc(nr_progs, sizeof(struct test_spec));
- if (!ASSERT_OK_PTR(specs, "Can't alloc specs array"))
+ if (!ASSERT_OK_PTR(specs, "specs_alloc"))
return;
i = 0;
diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c
index c028d621c744..d98c72dc563e 100644
--- a/tools/testing/selftests/bpf/test_lpm_map.c
+++ b/tools/testing/selftests/bpf/test_lpm_map.c
@@ -211,7 +211,7 @@ static void test_lpm_map(int keysize)
volatile size_t n_matches, n_matches_after_delete;
size_t i, j, n_nodes, n_lookups;
struct tlpm_node *t, *list = NULL;
- struct bpf_lpm_trie_key *key;
+ struct bpf_lpm_trie_key_u8 *key;
uint8_t *data, *value;
int r, map;
@@ -331,8 +331,8 @@ static void test_lpm_map(int keysize)
static void test_lpm_ipaddr(void)
{
LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC);
- struct bpf_lpm_trie_key *key_ipv4;
- struct bpf_lpm_trie_key *key_ipv6;
+ struct bpf_lpm_trie_key_u8 *key_ipv4;
+ struct bpf_lpm_trie_key_u8 *key_ipv6;
size_t key_size_ipv4;
size_t key_size_ipv6;
int map_fd_ipv4;
@@ -423,7 +423,7 @@ static void test_lpm_ipaddr(void)
static void test_lpm_delete(void)
{
LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC);
- struct bpf_lpm_trie_key *key;
+ struct bpf_lpm_trie_key_u8 *key;
size_t key_size;
int map_fd;
__u64 value;
@@ -532,7 +532,7 @@ static void test_lpm_delete(void)
static void test_lpm_get_next_key(void)
{
LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC);
- struct bpf_lpm_trie_key *key_p, *next_key_p;
+ struct bpf_lpm_trie_key_u8 *key_p, *next_key_p;
size_t key_size;
__u32 value = 0;
int map_fd;
@@ -693,9 +693,9 @@ static void *lpm_test_command(void *arg)
{
int i, j, ret, iter, key_size;
struct lpm_mt_test_info *info = arg;
- struct bpf_lpm_trie_key *key_p;
+ struct bpf_lpm_trie_key_u8 *key_p;
- key_size = sizeof(struct bpf_lpm_trie_key) + sizeof(__u32);
+ key_size = sizeof(*key_p) + sizeof(__u32);
key_p = alloca(key_size);
for (iter = 0; iter < info->iter; iter++)
for (i = 0; i < MAX_TEST_KEYS; i++) {
@@ -717,7 +717,7 @@ static void *lpm_test_command(void *arg)
ret = bpf_map_lookup_elem(info->map_fd, key_p, &value);
assert(ret == 0 || errno == ENOENT);
} else {
- struct bpf_lpm_trie_key *next_key_p = alloca(key_size);
+ struct bpf_lpm_trie_key_u8 *next_key_p = alloca(key_size);
ret = bpf_map_get_next_key(info->map_fd, key_p, next_key_p);
assert(ret == 0 || errno == ENOENT || errno == ENOMEM);
}
@@ -752,7 +752,7 @@ static void test_lpm_multi_thread(void)
/* create a trie */
value_size = sizeof(__u32);
- key_size = sizeof(struct bpf_lpm_trie_key) + value_size;
+ key_size = sizeof(struct bpf_lpm_trie_key_hdr) + value_size;
map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, key_size, value_size, 100, &opts);
/* create 4 threads to test update, delete, lookup and get_next_key */
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 767e0693df10..dfbab214f4d1 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -1190,7 +1190,11 @@ static void test_map_in_map(void)
goto out_map_in_map;
}
- bpf_object__load(obj);
+ err = bpf_object__load(obj);
+ if (err) {
+ printf("Failed to load test prog\n");
+ goto out_map_in_map;
+ }
map = bpf_object__find_map_by_name(obj, "mim_array");
if (!map) {
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 1b9387890148..89ff704e9dad 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -547,24 +547,6 @@ int bpf_find_map(const char *test, struct bpf_object *obj, const char *name)
return bpf_map__fd(map);
}
-static bool is_jit_enabled(void)
-{
- const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
- bool enabled = false;
- int sysctl_fd;
-
- sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
- if (sysctl_fd != -1) {
- char tmpc;
-
- if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
- enabled = (tmpc != '0');
- close(sysctl_fd);
- }
-
- return enabled;
-}
-
int compare_map_keys(int map1_fd, int map2_fd)
{
__u32 key, next_key;
@@ -701,11 +683,69 @@ static const struct argp_option opts[] = {
{},
};
+static FILE *libbpf_capture_stream;
+
+static struct {
+ char *buf;
+ size_t buf_sz;
+} libbpf_output_capture;
+
+/* Creates a global memstream capturing INFO and WARN level output
+ * passed to libbpf_print_fn.
+ * Returns 0 on success, negative value on failure.
+ * On failure the description is printed using PRINT_FAIL and
+ * current test case is marked as fail.
+ */
+int start_libbpf_log_capture(void)
+{
+ if (libbpf_capture_stream) {
+ PRINT_FAIL("%s: libbpf_capture_stream != NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ libbpf_capture_stream = open_memstream(&libbpf_output_capture.buf,
+ &libbpf_output_capture.buf_sz);
+ if (!libbpf_capture_stream) {
+ PRINT_FAIL("%s: open_memstream failed errno=%d\n", __func__, errno);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Destroys global memstream created by start_libbpf_log_capture().
+ * Returns a pointer to captured data which has to be freed.
+ * Returned buffer is null terminated.
+ */
+char *stop_libbpf_log_capture(void)
+{
+ char *buf;
+
+ if (!libbpf_capture_stream)
+ return NULL;
+
+ fputc(0, libbpf_capture_stream);
+ fclose(libbpf_capture_stream);
+ libbpf_capture_stream = NULL;
+ /* get 'buf' after fclose(), see open_memstream() documentation */
+ buf = libbpf_output_capture.buf;
+ memset(&libbpf_output_capture, 0, sizeof(libbpf_output_capture));
+ return buf;
+}
+
static int libbpf_print_fn(enum libbpf_print_level level,
const char *format, va_list args)
{
+ if (libbpf_capture_stream && level != LIBBPF_DEBUG) {
+ va_list args2;
+
+ va_copy(args2, args);
+ vfprintf(libbpf_capture_stream, format, args2);
+ }
+
if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG)
return 0;
+
vfprintf(stdout, format, args);
return 0;
}
@@ -1099,6 +1139,7 @@ static void run_one_test(int test_num)
cleanup_cgroup_environment();
stdio_restore();
+ free(stop_libbpf_log_capture());
dump_test_log(test, state, false, false, NULL);
}
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
index 2f9f6f250f17..0ba5a20b19ba 100644
--- a/tools/testing/selftests/bpf/test_progs.h
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -385,13 +385,21 @@ int test__join_cgroup(const char *path);
goto goto_label; \
})
+#define ALL_TO_DEV_NULL " >/dev/null 2>&1"
+
#define SYS_NOFAIL(fmt, ...) \
({ \
char cmd[1024]; \
- snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \
+ int n; \
+ n = snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \
+ if (n < sizeof(cmd) && sizeof(cmd) - n >= sizeof(ALL_TO_DEV_NULL)) \
+ strcat(cmd, ALL_TO_DEV_NULL); \
system(cmd); \
})
+int start_libbpf_log_capture(void);
+char *stop_libbpf_log_capture(void);
+
static inline __u64 ptr_to_u64(const void *ptr)
{
return (__u64) (unsigned long) ptr;
diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
index b0068a9d2cfe..80c42583f597 100644
--- a/tools/testing/selftests/bpf/test_sock_addr.c
+++ b/tools/testing/selftests/bpf/test_sock_addr.c
@@ -19,6 +19,7 @@
#include <bpf/libbpf.h>
#include "cgroup_helpers.h"
+#include "testing_helpers.h"
#include "bpf_util.h"
#ifndef ENOTSUPP
@@ -679,7 +680,7 @@ static int load_path(const struct sock_addr_test *test, const char *path)
bpf_program__set_type(prog, BPF_PROG_TYPE_CGROUP_SOCK_ADDR);
bpf_program__set_expected_attach_type(prog, test->expected_attach_type);
- bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS);
+ bpf_program__set_flags(prog, testing_prog_flags());
err = bpf_object__load(obj);
if (err) {
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index f36e41435be7..df04bda1c927 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -67,6 +67,7 @@
#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
+#define F_NEEDS_JIT_ENABLED (1 << 2)
/* need CAP_BPF, CAP_NET_ADMIN, CAP_PERFMON to load progs */
#define ADMIN_CAPS (1ULL << CAP_NET_ADMIN | \
@@ -74,6 +75,7 @@
1ULL << CAP_BPF)
#define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
static bool unpriv_disabled = false;
+static bool jit_disabled;
static int skips;
static bool verbose = false;
static int verif_log_level = 0;
@@ -1341,48 +1343,6 @@ static bool cmp_str_seq(const char *log, const char *exp)
return true;
}
-static struct bpf_insn *get_xlated_program(int fd_prog, int *cnt)
-{
- __u32 buf_element_size = sizeof(struct bpf_insn);
- struct bpf_prog_info info = {};
- __u32 info_len = sizeof(info);
- __u32 xlated_prog_len;
- struct bpf_insn *buf;
-
- if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
- perror("bpf_prog_get_info_by_fd failed");
- return NULL;
- }
-
- xlated_prog_len = info.xlated_prog_len;
- if (xlated_prog_len % buf_element_size) {
- printf("Program length %d is not multiple of %d\n",
- xlated_prog_len, buf_element_size);
- return NULL;
- }
-
- *cnt = xlated_prog_len / buf_element_size;
- buf = calloc(*cnt, buf_element_size);
- if (!buf) {
- perror("can't allocate xlated program buffer");
- return NULL;
- }
-
- bzero(&info, sizeof(info));
- info.xlated_prog_len = xlated_prog_len;
- info.xlated_prog_insns = (__u64)(unsigned long)buf;
- if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
- perror("second bpf_prog_get_info_by_fd failed");
- goto out_free_buf;
- }
-
- return buf;
-
-out_free_buf:
- free(buf);
- return NULL;
-}
-
static bool is_null_insn(struct bpf_insn *insn)
{
struct bpf_insn null_insn = {};
@@ -1505,7 +1465,7 @@ static void print_insn(struct bpf_insn *buf, int cnt)
static bool check_xlated_program(struct bpf_test *test, int fd_prog)
{
struct bpf_insn *buf;
- int cnt;
+ unsigned int cnt;
bool result = true;
bool check_expected = !is_null_insn(test->expected_insns);
bool check_unexpected = !is_null_insn(test->unexpected_insns);
@@ -1513,8 +1473,7 @@ static bool check_xlated_program(struct bpf_test *test, int fd_prog)
if (!check_expected && !check_unexpected)
goto out;
- buf = get_xlated_program(fd_prog, &cnt);
- if (!buf) {
+ if (get_xlated_program(fd_prog, &buf, &cnt)) {
printf("FAIL: can't get xlated program\n");
result = false;
goto out;
@@ -1567,6 +1526,13 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
__u32 pflags;
int i, err;
+ if ((test->flags & F_NEEDS_JIT_ENABLED) && jit_disabled) {
+ printf("SKIP (requires BPF JIT)\n");
+ skips++;
+ sched_yield();
+ return;
+ }
+
fd_prog = -1;
for (i = 0; i < MAX_NR_MAPS; i++)
map_fds[i] = -1;
@@ -1588,7 +1554,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
if (fixup_skips != skips)
return;
- pflags = BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS;
+ pflags = testing_prog_flags();
if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
pflags |= BPF_F_STRICT_ALIGNMENT;
if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
@@ -1887,6 +1853,8 @@ int main(int argc, char **argv)
return EXIT_FAILURE;
}
+ jit_disabled = !is_jit_enabled();
+
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c
index d2458c1b1671..28b6646662af 100644
--- a/tools/testing/selftests/bpf/testing_helpers.c
+++ b/tools/testing/selftests/bpf/testing_helpers.c
@@ -252,6 +252,34 @@ __u32 link_info_prog_id(const struct bpf_link *link, struct bpf_link_info *info)
int extra_prog_load_log_flags = 0;
+int testing_prog_flags(void)
+{
+ static int cached_flags = -1;
+ static int prog_flags[] = { BPF_F_TEST_RND_HI32, BPF_F_TEST_REG_INVARIANTS };
+ static struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ int insn_cnt = ARRAY_SIZE(insns), i, fd, flags = 0;
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
+
+ if (cached_flags >= 0)
+ return cached_flags;
+
+ for (i = 0; i < ARRAY_SIZE(prog_flags); i++) {
+ opts.prog_flags = prog_flags[i];
+ fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "flag-test", "GPL",
+ insns, insn_cnt, &opts);
+ if (fd >= 0) {
+ flags |= prog_flags[i];
+ close(fd);
+ }
+ }
+
+ cached_flags = flags;
+ return cached_flags;
+}
+
int bpf_prog_test_load(const char *file, enum bpf_prog_type type,
struct bpf_object **pobj, int *prog_fd)
{
@@ -276,7 +304,7 @@ int bpf_prog_test_load(const char *file, enum bpf_prog_type type,
if (type != BPF_PROG_TYPE_UNSPEC && bpf_program__type(prog) != type)
bpf_program__set_type(prog, type);
- flags = bpf_program__flags(prog) | BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS;
+ flags = bpf_program__flags(prog) | testing_prog_flags();
bpf_program__set_flags(prog, flags);
err = bpf_object__load(obj);
@@ -299,7 +327,7 @@ int bpf_test_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
{
LIBBPF_OPTS(bpf_prog_load_opts, opts,
.kern_version = kern_version,
- .prog_flags = BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS,
+ .prog_flags = testing_prog_flags(),
.log_level = extra_prog_load_log_flags,
.log_buf = log_buf,
.log_size = log_buf_sz,
@@ -328,12 +356,12 @@ __u64 read_perf_max_sample_freq(void)
return sample_freq;
}
-static int finit_module(int fd, const char *param_values, int flags)
+int finit_module(int fd, const char *param_values, int flags)
{
return syscall(__NR_finit_module, fd, param_values, flags);
}
-static int delete_module(const char *name, int flags)
+int delete_module(const char *name, int flags)
{
return syscall(__NR_delete_module, name, flags);
}
@@ -387,3 +415,63 @@ int kern_sync_rcu(void)
{
return syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0, 0);
}
+
+int get_xlated_program(int fd_prog, struct bpf_insn **buf, __u32 *cnt)
+{
+ __u32 buf_element_size = sizeof(struct bpf_insn);
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ __u32 xlated_prog_len;
+
+ if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
+ perror("bpf_prog_get_info_by_fd failed");
+ return -1;
+ }
+
+ xlated_prog_len = info.xlated_prog_len;
+ if (xlated_prog_len % buf_element_size) {
+ printf("Program length %u is not multiple of %u\n",
+ xlated_prog_len, buf_element_size);
+ return -1;
+ }
+
+ *cnt = xlated_prog_len / buf_element_size;
+ *buf = calloc(*cnt, buf_element_size);
+ if (!buf) {
+ perror("can't allocate xlated program buffer");
+ return -ENOMEM;
+ }
+
+ bzero(&info, sizeof(info));
+ info.xlated_prog_len = xlated_prog_len;
+ info.xlated_prog_insns = (__u64)(unsigned long)*buf;
+ if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
+ perror("second bpf_prog_get_info_by_fd failed");
+ goto out_free_buf;
+ }
+
+ return 0;
+
+out_free_buf:
+ free(*buf);
+ *buf = NULL;
+ return -1;
+}
+
+bool is_jit_enabled(void)
+{
+ const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
+ bool enabled = false;
+ int sysctl_fd;
+
+ sysctl_fd = open(jit_sysctl, O_RDONLY);
+ if (sysctl_fd != -1) {
+ char tmpc;
+
+ if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
+ enabled = (tmpc != '0');
+ close(sysctl_fd);
+ }
+
+ return enabled;
+}
diff --git a/tools/testing/selftests/bpf/testing_helpers.h b/tools/testing/selftests/bpf/testing_helpers.h
index 35284faff4f2..d55f6ab12433 100644
--- a/tools/testing/selftests/bpf/testing_helpers.h
+++ b/tools/testing/selftests/bpf/testing_helpers.h
@@ -36,6 +36,8 @@ __u64 read_perf_max_sample_freq(void);
int load_bpf_testmod(bool verbose);
int unload_bpf_testmod(bool verbose);
int kern_sync_rcu(void);
+int finit_module(int fd, const char *param_values, int flags);
+int delete_module(const char *name, int flags);
static inline __u64 get_time_ns(void)
{
@@ -46,4 +48,12 @@ static inline __u64 get_time_ns(void)
return (u64)t.tv_sec * 1000000000 + t.tv_nsec;
}
+struct bpf_insn;
+/* Request BPF program instructions after all rewrites are applied,
+ * e.g. verifier.c:convert_ctx_access() is done.
+ */
+int get_xlated_program(int fd_prog, struct bpf_insn **buf, __u32 *cnt);
+int testing_prog_flags(void);
+bool is_jit_enabled(void);
+
#endif /* __TESTING_HELPERS_H */
diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
index 4faa898ff7fc..27fd7ed3e4b0 100644
--- a/tools/testing/selftests/bpf/trace_helpers.c
+++ b/tools/testing/selftests/bpf/trace_helpers.c
@@ -271,7 +271,7 @@ ssize_t get_uprobe_offset(const void *addr)
* addi r2,r2,XXXX
*/
{
- const u32 *insn = (const u32 *)(uintptr_t)addr;
+ const __u32 *insn = (const __u32 *)(uintptr_t)addr;
if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
diff --git a/tools/testing/selftests/bpf/verifier/bpf_loop_inline.c b/tools/testing/selftests/bpf/verifier/bpf_loop_inline.c
index a535d41dc20d..59125b22ae39 100644
--- a/tools/testing/selftests/bpf/verifier/bpf_loop_inline.c
+++ b/tools/testing/selftests/bpf/verifier/bpf_loop_inline.c
@@ -57,6 +57,7 @@
.expected_insns = { PSEUDO_CALL_INSN() },
.unexpected_insns = { HELPER_CALL_INSN() },
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .flags = F_NEEDS_JIT_ENABLED,
.result = ACCEPT,
.runs = 0,
.func_info = { { 0, MAIN_TYPE }, { 12, CALLBACK_TYPE } },
@@ -90,6 +91,7 @@
.expected_insns = { HELPER_CALL_INSN() },
.unexpected_insns = { PSEUDO_CALL_INSN() },
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .flags = F_NEEDS_JIT_ENABLED,
.result = ACCEPT,
.runs = 0,
.func_info = { { 0, MAIN_TYPE }, { 16, CALLBACK_TYPE } },
@@ -127,6 +129,7 @@
.expected_insns = { HELPER_CALL_INSN() },
.unexpected_insns = { PSEUDO_CALL_INSN() },
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .flags = F_NEEDS_JIT_ENABLED,
.result = ACCEPT,
.runs = 0,
.func_info = {
@@ -165,6 +168,7 @@
.expected_insns = { PSEUDO_CALL_INSN() },
.unexpected_insns = { HELPER_CALL_INSN() },
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .flags = F_NEEDS_JIT_ENABLED,
.result = ACCEPT,
.runs = 0,
.func_info = {
@@ -235,6 +239,7 @@
},
.unexpected_insns = { HELPER_CALL_INSN() },
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .flags = F_NEEDS_JIT_ENABLED,
.result = ACCEPT,
.func_info = {
{ 0, MAIN_TYPE },
@@ -252,6 +257,7 @@
.unexpected_insns = { HELPER_CALL_INSN() },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .flags = F_NEEDS_JIT_ENABLED,
.func_info = { { 0, MAIN_TYPE }, { 16, CALLBACK_TYPE } },
.func_info_cnt = 2,
BTF_TYPES
diff --git a/tools/testing/selftests/bpf/verifier/precise.c b/tools/testing/selftests/bpf/verifier/precise.c
index 8a2ff81d8350..0a9293a57211 100644
--- a/tools/testing/selftests/bpf/verifier/precise.c
+++ b/tools/testing/selftests/bpf/verifier/precise.c
@@ -183,10 +183,10 @@
.prog_type = BPF_PROG_TYPE_XDP,
.flags = BPF_F_TEST_STATE_FREQ,
.errstr = "mark_precise: frame0: last_idx 7 first_idx 7\
- mark_precise: frame0: parent state regs=r4 stack=:\
+ mark_precise: frame0: parent state regs=r4 stack=-8:\
mark_precise: frame0: last_idx 6 first_idx 4\
- mark_precise: frame0: regs=r4 stack= before 6: (b7) r0 = -1\
- mark_precise: frame0: regs=r4 stack= before 5: (79) r4 = *(u64 *)(r10 -8)\
+ mark_precise: frame0: regs=r4 stack=-8 before 6: (b7) r0 = -1\
+ mark_precise: frame0: regs=r4 stack=-8 before 5: (79) r4 = *(u64 *)(r10 -8)\
mark_precise: frame0: regs= stack=-8 before 4: (7b) *(u64 *)(r3 -8) = r0\
mark_precise: frame0: parent state regs=r0 stack=:\
mark_precise: frame0: last_idx 3 first_idx 3\
diff --git a/tools/testing/selftests/bpf/xdp_hw_metadata.c b/tools/testing/selftests/bpf/xdp_hw_metadata.c
index 878d68db0325..bdf5d8180067 100644
--- a/tools/testing/selftests/bpf/xdp_hw_metadata.c
+++ b/tools/testing/selftests/bpf/xdp_hw_metadata.c
@@ -480,7 +480,7 @@ peek:
for (int j = 0; j < 500; j++) {
if (complete_tx(xsk, clock_id))
break;
- usleep(10*1000);
+ usleep(10);
}
}
}
diff --git a/tools/testing/selftests/drivers/net/bonding/Makefile b/tools/testing/selftests/drivers/net/bonding/Makefile
index 8a72bb7de70f..03a089165d3f 100644
--- a/tools/testing/selftests/drivers/net/bonding/Makefile
+++ b/tools/testing/selftests/drivers/net/bonding/Makefile
@@ -15,7 +15,10 @@ TEST_PROGS := \
TEST_FILES := \
lag_lib.sh \
bond_topo_2d1c.sh \
- bond_topo_3d1c.sh \
- net_forwarding_lib.sh
+ bond_topo_3d1c.sh
+
+TEST_INCLUDES := \
+ ../../../net/forwarding/lib.sh \
+ ../../../net/lib.sh
include ../../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh b/tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh
index 6358df5752f9..1ec7f59db7f4 100755
--- a/tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh
+++ b/tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh
@@ -20,21 +20,21 @@
# +------+ +------+
#
# We use veths instead of physical interfaces
+REQUIRE_MZ=no
+NUM_NETIFS=0
+lib_dir=$(dirname "$0")
+source "$lib_dir"/../../../net/forwarding/lib.sh
set -e
-tmp=$(mktemp -q dump.XXXXXX)
cleanup() {
ip link del fab-br0 >/dev/null 2>&1 || :
ip link del fbond >/dev/null 2>&1 || :
ip link del veth1-bond >/dev/null 2>&1 || :
ip link del veth2-bond >/dev/null 2>&1 || :
- modprobe -r bonding >/dev/null 2>&1 || :
- rm -f -- ${tmp}
}
trap cleanup 0 1 2
cleanup
-sleep 1
# create the bridge
ip link add fab-br0 address 52:54:00:3B:7C:A6 mtu 1500 type bridge \
@@ -67,13 +67,12 @@ ip link set fab-br0 up
ip link set fbond up
ip addr add dev fab-br0 10.0.0.3
-tcpdump -n -i veth1-end -e ether proto 0x8809 >${tmp} 2>&1 &
-sleep 15
-pkill tcpdump >/dev/null 2>&1
rc=0
-num=$(grep "packets captured" ${tmp} | awk '{print $1}')
-if test "$num" -gt 0; then
- echo "PASS, captured ${num}"
+tc qdisc add dev veth1-end clsact
+tc filter add dev veth1-end ingress protocol 0x8809 pref 1 handle 101 flower skip_hw action pass
+if slowwait_for_counter 15 2 \
+ tc_rule_handle_stats_get "dev veth1-end ingress" 101 ".packets" "" &> /dev/null; then
+ echo "PASS, captured 2"
else
echo "FAIL"
rc=1
diff --git a/tools/testing/selftests/drivers/net/bonding/bond-eth-type-change.sh b/tools/testing/selftests/drivers/net/bonding/bond-eth-type-change.sh
index 862e947e17c7..8293dbc7c18f 100755
--- a/tools/testing/selftests/drivers/net/bonding/bond-eth-type-change.sh
+++ b/tools/testing/selftests/drivers/net/bonding/bond-eth-type-change.sh
@@ -11,7 +11,7 @@ ALL_TESTS="
REQUIRE_MZ=no
NUM_NETIFS=0
lib_dir=$(dirname "$0")
-source "$lib_dir"/net_forwarding_lib.sh
+source "$lib_dir"/../../../net/forwarding/lib.sh
bond_check_flags()
{
diff --git a/tools/testing/selftests/drivers/net/bonding/bond-lladdr-target.sh b/tools/testing/selftests/drivers/net/bonding/bond-lladdr-target.sh
index 89af402fabbe..78d3e0fe6604 100755
--- a/tools/testing/selftests/drivers/net/bonding/bond-lladdr-target.sh
+++ b/tools/testing/selftests/drivers/net/bonding/bond-lladdr-target.sh
@@ -17,6 +17,11 @@
# +----------------+
#
# We use veths instead of physical interfaces
+REQUIRE_MZ=no
+NUM_NETIFS=0
+lib_dir=$(dirname "$0")
+source "$lib_dir"/../../../net/forwarding/lib.sh
+
sw="sw-$(mktemp -u XXXXXX)"
host="ns-$(mktemp -u XXXXXX)"
@@ -26,6 +31,16 @@ cleanup()
ip netns del $host
}
+wait_lladdr_dad()
+{
+ $@ | grep fe80 | grep -qv tentative
+}
+
+wait_bond_up()
+{
+ $@ | grep -q 'state UP'
+}
+
trap cleanup 0 1 2
ip netns add $sw
@@ -37,8 +52,8 @@ ip -n $host link add veth1 type veth peer name veth1 netns $sw
ip -n $sw link add br0 type bridge
ip -n $sw link set br0 up
sw_lladdr=$(ip -n $sw addr show br0 | awk '/fe80/{print $2}' | cut -d'/' -f1)
-# sleep some time to make sure bridge lladdr pass DAD
-sleep 2
+# wait some time to make sure bridge lladdr pass DAD
+slowwait 2 wait_lladdr_dad ip -n $sw addr show br0
ip -n $host link add bond0 type bond mode 1 ns_ip6_target ${sw_lladdr} \
arp_validate 3 arp_interval 1000
@@ -53,7 +68,7 @@ ip -n $sw link set veth1 master br0
ip -n $sw link set veth0 up
ip -n $sw link set veth1 up
-sleep 5
+slowwait 5 wait_bond_up ip -n $host link show bond0
rc=0
if ip -n $host link show bond0 | grep -q LOWER_UP; then
diff --git a/tools/testing/selftests/drivers/net/bonding/bond_options.sh b/tools/testing/selftests/drivers/net/bonding/bond_options.sh
index 9a3d3c389dad..41d0859feb7d 100755
--- a/tools/testing/selftests/drivers/net/bonding/bond_options.sh
+++ b/tools/testing/selftests/drivers/net/bonding/bond_options.sh
@@ -45,15 +45,23 @@ skip_ns()
}
active_slave=""
+active_slave_changed()
+{
+ local old_active_slave=$1
+ local new_active_slave=$(cmd_jq "ip -n ${s_ns} -d -j link show bond0" \
+ ".[].linkinfo.info_data.active_slave")
+ [ "$new_active_slave" != "$old_active_slave" -a "$new_active_slave" != "null" ]
+}
+
check_active_slave()
{
local target_active_slave=$1
+ slowwait 5 active_slave_changed $active_slave
active_slave=$(cmd_jq "ip -n ${s_ns} -d -j link show bond0" ".[].linkinfo.info_data.active_slave")
test "$active_slave" = "$target_active_slave"
check_err $? "Current active slave is $active_slave but not $target_active_slave"
}
-
# Test bonding prio option
prio_test()
{
@@ -86,13 +94,13 @@ prio_test()
# active slave should be the higher prio slave
ip -n ${s_ns} link set $active_slave down
- bond_check_connection "fail over"
check_active_slave eth2
+ bond_check_connection "fail over"
# when only 1 slave is up
ip -n ${s_ns} link set $active_slave down
- bond_check_connection "only 1 slave up"
check_active_slave eth0
+ bond_check_connection "only 1 slave up"
# when a higher prio slave change to up
ip -n ${s_ns} link set eth2 up
@@ -142,8 +150,8 @@ prio_test()
check_active_slave "eth1"
ip -n ${s_ns} link set $active_slave down
- bond_check_connection "change slave prio"
check_active_slave "eth0"
+ bond_check_connection "change slave prio"
fi
}
@@ -201,6 +209,15 @@ prio()
prio_ns "active-backup"
}
+wait_mii_up()
+{
+ for i in $(seq 0 2); do
+ mii_status=$(cmd_jq "ip -n ${s_ns} -j -d link show eth$i" ".[].linkinfo.info_slave_data.mii_status")
+ [ ${mii_status} != "UP" ] && return 1
+ done
+ return 0
+}
+
arp_validate_test()
{
local param="$1"
@@ -213,7 +230,7 @@ arp_validate_test()
[ $RET -ne 0 ] && log_test "arp_validate" "$retmsg"
# wait for a while to make sure the mii status stable
- sleep 5
+ slowwait 5 wait_mii_up
for i in $(seq 0 2); do
mii_status=$(cmd_jq "ip -n ${s_ns} -j -d link show eth$i" ".[].linkinfo.info_slave_data.mii_status")
if [ ${mii_status} != "UP" ]; then
@@ -278,10 +295,13 @@ garp_test()
active_slave=$(cmd_jq "ip -n ${s_ns} -d -j link show bond0" ".[].linkinfo.info_data.active_slave")
ip -n ${s_ns} link set ${active_slave} down
- exp_num=$(echo "${param}" | cut -f6 -d ' ')
- sleep $((exp_num + 2))
+ # wait for active link change
+ slowwait 2 active_slave_changed $active_slave
+ exp_num=$(echo "${param}" | cut -f6 -d ' ')
active_slave=$(cmd_jq "ip -n ${s_ns} -d -j link show bond0" ".[].linkinfo.info_data.active_slave")
+ slowwait_for_counter $((exp_num + 5)) $exp_num \
+ tc_rule_handle_stats_get "dev s${active_slave#eth} ingress" 101 ".packets" "-n ${g_ns}"
# check result
real_num=$(tc_rule_handle_stats_get "dev s${active_slave#eth} ingress" 101 ".packets" "-n ${g_ns}")
@@ -298,8 +318,8 @@ garp_test()
num_grat_arp()
{
local val
- for val in 10 20 30 50; do
- garp_test "mode active-backup miimon 100 num_grat_arp $val peer_notify_delay 1000"
+ for val in 10 20 30; do
+ garp_test "mode active-backup miimon 10 num_grat_arp $val peer_notify_delay 100"
log_test "num_grat_arp" "active-backup miimon num_grat_arp $val"
done
}
diff --git a/tools/testing/selftests/drivers/net/bonding/bond_topo_2d1c.sh b/tools/testing/selftests/drivers/net/bonding/bond_topo_2d1c.sh
index a509ef949dcf..195ef83cfbf1 100644
--- a/tools/testing/selftests/drivers/net/bonding/bond_topo_2d1c.sh
+++ b/tools/testing/selftests/drivers/net/bonding/bond_topo_2d1c.sh
@@ -28,7 +28,7 @@
REQUIRE_MZ=no
NUM_NETIFS=0
lib_dir=$(dirname "$0")
-source ${lib_dir}/net_forwarding_lib.sh
+source "$lib_dir"/../../../net/forwarding/lib.sh
s_ns="s-$(mktemp -u XXXXXX)"
c_ns="c-$(mktemp -u XXXXXX)"
@@ -73,7 +73,6 @@ server_create()
ip -n ${s_ns} link set bond0 up
ip -n ${s_ns} addr add ${s_ip4}/24 dev bond0
ip -n ${s_ns} addr add ${s_ip6}/24 dev bond0
- sleep 2
}
# Reset bond with new mode and options
@@ -96,7 +95,8 @@ bond_reset()
ip -n ${s_ns} link set bond0 up
ip -n ${s_ns} addr add ${s_ip4}/24 dev bond0
ip -n ${s_ns} addr add ${s_ip6}/24 dev bond0
- sleep 2
+ # Wait for IPv6 address ready as it needs DAD
+ slowwait 2 ip netns exec ${s_ns} ping6 ${c_ip6} -c 1 -W 0.1 &> /dev/null
}
server_destroy()
@@ -150,7 +150,7 @@ bond_check_connection()
{
local msg=${1:-"check connection"}
- sleep 2
+ slowwait 2 ip netns exec ${s_ns} ping ${c_ip4} -c 1 -W 0.1 &> /dev/null
ip netns exec ${s_ns} ping ${c_ip4} -c5 -i 0.1 &>/dev/null
check_err $? "${msg}: ping failed"
ip netns exec ${s_ns} ping6 ${c_ip6} -c5 -i 0.1 &>/dev/null
diff --git a/tools/testing/selftests/drivers/net/bonding/dev_addr_lists.sh b/tools/testing/selftests/drivers/net/bonding/dev_addr_lists.sh
index 5cfe7d8ebc25..e6fa24eded5b 100755
--- a/tools/testing/selftests/drivers/net/bonding/dev_addr_lists.sh
+++ b/tools/testing/selftests/drivers/net/bonding/dev_addr_lists.sh
@@ -14,7 +14,7 @@ ALL_TESTS="
REQUIRE_MZ=no
NUM_NETIFS=0
lib_dir=$(dirname "$0")
-source "$lib_dir"/net_forwarding_lib.sh
+source "$lib_dir"/../../../net/forwarding/lib.sh
source "$lib_dir"/lag_lib.sh
diff --git a/tools/testing/selftests/drivers/net/bonding/lag_lib.sh b/tools/testing/selftests/drivers/net/bonding/lag_lib.sh
index dbdd736a41d3..bf9bcd1b5ec0 100644
--- a/tools/testing/selftests/drivers/net/bonding/lag_lib.sh
+++ b/tools/testing/selftests/drivers/net/bonding/lag_lib.sh
@@ -107,13 +107,12 @@ lag_setup2x2()
NAMESPACES="${namespaces}"
}
-# cleanup all lag related namespaces and remove the bonding module
+# cleanup all lag related namespaces
lag_cleanup()
{
for n in ${NAMESPACES}; do
ip netns delete ${n} >/dev/null 2>&1 || true
done
- modprobe -r bonding
}
SWITCH="lag_node1"
@@ -159,7 +158,7 @@ test_bond_recovery()
create_bond $@
# verify connectivity
- ip netns exec ${CLIENT} ping ${SWITCHIP} -c 2 >/dev/null 2>&1
+ slowwait 2 ip netns exec ${CLIENT} ping ${SWITCHIP} -c 2 -W 0.1 &> /dev/null
check_err $? "No connectivity"
# force the links of the bond down
@@ -169,7 +168,7 @@ test_bond_recovery()
ip netns exec ${SWITCH} ip link set eth1 down
# re-verify connectivity
- ip netns exec ${CLIENT} ping ${SWITCHIP} -c 2 >/dev/null 2>&1
+ slowwait 2 ip netns exec ${CLIENT} ping ${SWITCHIP} -c 2 -W 0.1 &> /dev/null
local rc=$?
check_err $rc "Bond failed to recover"
diff --git a/tools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh b/tools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh
index b76bf5030952..9d26ab4cad0b 100755
--- a/tools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh
+++ b/tools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh
@@ -23,7 +23,7 @@ REQUIRE_MZ=no
REQUIRE_JQ=no
NUM_NETIFS=0
lib_dir=$(dirname "$0")
-source "$lib_dir"/net_forwarding_lib.sh
+source "$lib_dir"/../../../net/forwarding/lib.sh
source "$lib_dir"/lag_lib.sh
cleanup()
diff --git a/tools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh b/tools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh
index 8c2619002147..2d275b3e47dd 100755
--- a/tools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh
+++ b/tools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh
@@ -23,7 +23,7 @@ REQUIRE_MZ=no
REQUIRE_JQ=no
NUM_NETIFS=0
lib_dir=$(dirname "$0")
-source "$lib_dir"/net_forwarding_lib.sh
+source "$lib_dir"/../../../net/forwarding/lib.sh
source "$lib_dir"/lag_lib.sh
cleanup()
diff --git a/tools/testing/selftests/drivers/net/bonding/net_forwarding_lib.sh b/tools/testing/selftests/drivers/net/bonding/net_forwarding_lib.sh
deleted file mode 120000
index 39c96828c5ef..000000000000
--- a/tools/testing/selftests/drivers/net/bonding/net_forwarding_lib.sh
+++ /dev/null
@@ -1 +0,0 @@
-../../../net/forwarding/lib.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/Makefile b/tools/testing/selftests/drivers/net/dsa/Makefile
index c393e7b73805..cd6817fe5be6 100644
--- a/tools/testing/selftests/drivers/net/dsa/Makefile
+++ b/tools/testing/selftests/drivers/net/dsa/Makefile
@@ -11,8 +11,22 @@ TEST_PROGS = bridge_locked_port.sh \
tc_actions.sh \
test_bridge_fdb_stress.sh
-TEST_PROGS_EXTENDED := lib.sh tc_common.sh
+TEST_FILES := \
+ run_net_forwarding_test.sh \
+ forwarding.config
-TEST_FILES := forwarding.config
+TEST_INCLUDES := \
+ ../../../net/forwarding/bridge_locked_port.sh \
+ ../../../net/forwarding/bridge_mdb.sh \
+ ../../../net/forwarding/bridge_mld.sh \
+ ../../../net/forwarding/bridge_vlan_aware.sh \
+ ../../../net/forwarding/bridge_vlan_mcast.sh \
+ ../../../net/forwarding/bridge_vlan_unaware.sh \
+ ../../../net/forwarding/lib.sh \
+ ../../../net/forwarding/local_termination.sh \
+ ../../../net/forwarding/no_forwarding.sh \
+ ../../../net/forwarding/tc_actions.sh \
+ ../../../net/forwarding/tc_common.sh \
+ ../../../net/lib.sh
include ../../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/dsa/bridge_locked_port.sh b/tools/testing/selftests/drivers/net/dsa/bridge_locked_port.sh
index f5eb940c4c7c..d16a65e7595d 120000
--- a/tools/testing/selftests/drivers/net/dsa/bridge_locked_port.sh
+++ b/tools/testing/selftests/drivers/net/dsa/bridge_locked_port.sh
@@ -1 +1 @@
-../../../net/forwarding/bridge_locked_port.sh \ No newline at end of file
+run_net_forwarding_test.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/bridge_mdb.sh b/tools/testing/selftests/drivers/net/dsa/bridge_mdb.sh
index 76492da525f7..d16a65e7595d 120000
--- a/tools/testing/selftests/drivers/net/dsa/bridge_mdb.sh
+++ b/tools/testing/selftests/drivers/net/dsa/bridge_mdb.sh
@@ -1 +1 @@
-../../../net/forwarding/bridge_mdb.sh \ No newline at end of file
+run_net_forwarding_test.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/bridge_mld.sh b/tools/testing/selftests/drivers/net/dsa/bridge_mld.sh
index 81a7e0df0474..d16a65e7595d 120000
--- a/tools/testing/selftests/drivers/net/dsa/bridge_mld.sh
+++ b/tools/testing/selftests/drivers/net/dsa/bridge_mld.sh
@@ -1 +1 @@
-../../../net/forwarding/bridge_mld.sh \ No newline at end of file
+run_net_forwarding_test.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/bridge_vlan_aware.sh b/tools/testing/selftests/drivers/net/dsa/bridge_vlan_aware.sh
index 9831ed74376a..d16a65e7595d 120000
--- a/tools/testing/selftests/drivers/net/dsa/bridge_vlan_aware.sh
+++ b/tools/testing/selftests/drivers/net/dsa/bridge_vlan_aware.sh
@@ -1 +1 @@
-../../../net/forwarding/bridge_vlan_aware.sh \ No newline at end of file
+run_net_forwarding_test.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/bridge_vlan_mcast.sh b/tools/testing/selftests/drivers/net/dsa/bridge_vlan_mcast.sh
index 7f3c3f0bf719..d16a65e7595d 120000
--- a/tools/testing/selftests/drivers/net/dsa/bridge_vlan_mcast.sh
+++ b/tools/testing/selftests/drivers/net/dsa/bridge_vlan_mcast.sh
@@ -1 +1 @@
-../../../net/forwarding/bridge_vlan_mcast.sh \ No newline at end of file
+run_net_forwarding_test.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/bridge_vlan_unaware.sh b/tools/testing/selftests/drivers/net/dsa/bridge_vlan_unaware.sh
index bf1a57e6bde1..d16a65e7595d 120000
--- a/tools/testing/selftests/drivers/net/dsa/bridge_vlan_unaware.sh
+++ b/tools/testing/selftests/drivers/net/dsa/bridge_vlan_unaware.sh
@@ -1 +1 @@
-../../../net/forwarding/bridge_vlan_unaware.sh \ No newline at end of file
+run_net_forwarding_test.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/lib.sh b/tools/testing/selftests/drivers/net/dsa/lib.sh
deleted file mode 120000
index 39c96828c5ef..000000000000
--- a/tools/testing/selftests/drivers/net/dsa/lib.sh
+++ /dev/null
@@ -1 +0,0 @@
-../../../net/forwarding/lib.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/local_termination.sh b/tools/testing/selftests/drivers/net/dsa/local_termination.sh
index c08166f84501..d16a65e7595d 120000
--- a/tools/testing/selftests/drivers/net/dsa/local_termination.sh
+++ b/tools/testing/selftests/drivers/net/dsa/local_termination.sh
@@ -1 +1 @@
-../../../net/forwarding/local_termination.sh \ No newline at end of file
+run_net_forwarding_test.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/no_forwarding.sh b/tools/testing/selftests/drivers/net/dsa/no_forwarding.sh
index b9757466bc97..d16a65e7595d 120000
--- a/tools/testing/selftests/drivers/net/dsa/no_forwarding.sh
+++ b/tools/testing/selftests/drivers/net/dsa/no_forwarding.sh
@@ -1 +1 @@
-../../../net/forwarding/no_forwarding.sh \ No newline at end of file
+run_net_forwarding_test.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/run_net_forwarding_test.sh b/tools/testing/selftests/drivers/net/dsa/run_net_forwarding_test.sh
new file mode 100755
index 000000000000..4106c0a102ea
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/dsa/run_net_forwarding_test.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+libdir=$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")
+testname=$(basename "${BASH_SOURCE[0]}")
+
+source "$libdir"/forwarding.config
+cd "$libdir"/../../../net/forwarding/ || exit 1
+source "./$testname" "$@"
diff --git a/tools/testing/selftests/drivers/net/dsa/tc_actions.sh b/tools/testing/selftests/drivers/net/dsa/tc_actions.sh
index 306213d9430e..d16a65e7595d 120000
--- a/tools/testing/selftests/drivers/net/dsa/tc_actions.sh
+++ b/tools/testing/selftests/drivers/net/dsa/tc_actions.sh
@@ -1 +1 @@
-../../../net/forwarding/tc_actions.sh \ No newline at end of file
+run_net_forwarding_test.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/tc_common.sh b/tools/testing/selftests/drivers/net/dsa/tc_common.sh
deleted file mode 120000
index bc3465bdc36b..000000000000
--- a/tools/testing/selftests/drivers/net/dsa/tc_common.sh
+++ /dev/null
@@ -1 +0,0 @@
-../../../net/forwarding/tc_common.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/test_bridge_fdb_stress.sh b/tools/testing/selftests/drivers/net/dsa/test_bridge_fdb_stress.sh
index 92acab83fbe2..74682151d04d 100755
--- a/tools/testing/selftests/drivers/net/dsa/test_bridge_fdb_stress.sh
+++ b/tools/testing/selftests/drivers/net/dsa/test_bridge_fdb_stress.sh
@@ -19,7 +19,7 @@ REQUIRE_JQ="no"
REQUIRE_MZ="no"
NETIF_CREATE="no"
lib_dir=$(dirname "$0")
-source "$lib_dir"/lib.sh
+source "$lib_dir"/../../../net/forwarding/lib.sh
cleanup() {
echo "Cleaning up"
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
index 616d3581419c..31252bc8775e 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
@@ -869,7 +869,7 @@ bloom_simple_test()
bloom_complex_test()
{
# Bloom filter index computation is affected from region ID, eRP
- # ID and from the region key size. In order to excercise those parts
+ # ID and from the region key size. In order to exercise those parts
# of the Bloom filter code, use a series of regions, each with a
# different key size and send packet that should hit all of them.
local index
diff --git a/tools/testing/selftests/drivers/net/netdevsim/Makefile b/tools/testing/selftests/drivers/net/netdevsim/Makefile
new file mode 100644
index 000000000000..5bace0b7fb57
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/netdevsim/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0+ OR MIT
+
+TEST_PROGS = devlink.sh \
+ devlink_in_netns.sh \
+ devlink_trap.sh \
+ ethtool-coalesce.sh \
+ ethtool-fec.sh \
+ ethtool-pause.sh \
+ ethtool-ring.sh \
+ fib.sh \
+ hw_stats_l3.sh \
+ nexthop.sh \
+ peer.sh \
+ psample.sh \
+ tc-mq-visibility.sh \
+ udp_tunnel_nic.sh \
+
+include ../../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
index 46e20b13473c..b5ea2526f23c 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
@@ -31,7 +31,7 @@ devlink_wait()
fw_flash_test()
{
- DUMMYFILE=$(find /lib/firmware -maxdepth 1 -type f -printf '%f\n' |head -1)
+ DUMMYFILE=$(find /lib/firmware -type f -printf '%P\n' | head -1)
RET=0
if [ -z "$DUMMYFILE" ]
diff --git a/tools/testing/selftests/drivers/net/netdevsim/ethtool-fec.sh b/tools/testing/selftests/drivers/net/netdevsim/ethtool-fec.sh
index 7d7829f57550..6c52ce1b0450 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/ethtool-fec.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/ethtool-fec.sh
@@ -49,7 +49,7 @@ for o in llrs rs; do
Active FEC encoding: ${o^^}"
done
-# Test mutliple bits
+# Test multiple bits
$ETHTOOL --set-fec $NSIM_NETDEV encoding rs llrs
check $?
s=$($ETHTOOL --show-fec $NSIM_NETDEV | tail -2)
diff --git a/tools/testing/selftests/drivers/net/netdevsim/peer.sh b/tools/testing/selftests/drivers/net/netdevsim/peer.sh
new file mode 100755
index 000000000000..aed62d9e6c0a
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/netdevsim/peer.sh
@@ -0,0 +1,143 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-only
+
+source ../../../net/net_helper.sh
+
+NSIM_DEV_1_ID=$((256 + RANDOM % 256))
+NSIM_DEV_1_SYS=/sys/bus/netdevsim/devices/netdevsim$NSIM_DEV_1_ID
+NSIM_DEV_2_ID=$((512 + RANDOM % 256))
+NSIM_DEV_2_SYS=/sys/bus/netdevsim/devices/netdevsim$NSIM_DEV_2_ID
+
+NSIM_DEV_SYS_NEW=/sys/bus/netdevsim/new_device
+NSIM_DEV_SYS_DEL=/sys/bus/netdevsim/del_device
+NSIM_DEV_SYS_LINK=/sys/bus/netdevsim/link_device
+NSIM_DEV_SYS_UNLINK=/sys/bus/netdevsim/unlink_device
+
+socat_check()
+{
+ if [ ! -x "$(command -v socat)" ]; then
+ echo "socat command not found. Skipping test"
+ return 1
+ fi
+
+ return 0
+}
+
+setup_ns()
+{
+ set -e
+ ip netns add nssv
+ ip netns add nscl
+
+ NSIM_DEV_1_NAME=$(find $NSIM_DEV_1_SYS/net -maxdepth 1 -type d ! \
+ -path $NSIM_DEV_1_SYS/net -exec basename {} \;)
+ NSIM_DEV_2_NAME=$(find $NSIM_DEV_2_SYS/net -maxdepth 1 -type d ! \
+ -path $NSIM_DEV_2_SYS/net -exec basename {} \;)
+
+ ip link set $NSIM_DEV_1_NAME netns nssv
+ ip link set $NSIM_DEV_2_NAME netns nscl
+
+ ip netns exec nssv ip addr add '192.168.1.1/24' dev $NSIM_DEV_1_NAME
+ ip netns exec nscl ip addr add '192.168.1.2/24' dev $NSIM_DEV_2_NAME
+
+ ip netns exec nssv ip link set dev $NSIM_DEV_1_NAME up
+ ip netns exec nscl ip link set dev $NSIM_DEV_2_NAME up
+ set +e
+}
+
+cleanup_ns()
+{
+ ip netns del nscl
+ ip netns del nssv
+}
+
+###
+### Code start
+###
+
+socat_check || exit 4
+
+modprobe netdevsim
+
+# linking
+
+echo $NSIM_DEV_1_ID > $NSIM_DEV_SYS_NEW
+echo $NSIM_DEV_2_ID > $NSIM_DEV_SYS_NEW
+udevadm settle
+
+setup_ns
+
+NSIM_DEV_1_FD=$((256 + RANDOM % 256))
+exec {NSIM_DEV_1_FD}</var/run/netns/nssv
+NSIM_DEV_1_IFIDX=$(ip netns exec nssv cat /sys/class/net/$NSIM_DEV_1_NAME/ifindex)
+
+NSIM_DEV_2_FD=$((256 + RANDOM % 256))
+exec {NSIM_DEV_2_FD}</var/run/netns/nscl
+NSIM_DEV_2_IFIDX=$(ip netns exec nscl cat /sys/class/net/$NSIM_DEV_2_NAME/ifindex)
+
+echo "$NSIM_DEV_1_FD:$NSIM_DEV_1_IFIDX $NSIM_DEV_2_FD:2000" > $NSIM_DEV_SYS_LINK 2>/dev/null
+if [ $? -eq 0 ]; then
+ echo "linking with non-existent netdevsim should fail"
+ cleanup_ns
+ exit 1
+fi
+
+echo "$NSIM_DEV_1_FD:$NSIM_DEV_1_IFIDX 2000:$NSIM_DEV_2_IFIDX" > $NSIM_DEV_SYS_LINK 2>/dev/null
+if [ $? -eq 0 ]; then
+ echo "linking with non-existent netnsid should fail"
+ cleanup_ns
+ exit 1
+fi
+
+echo "$NSIM_DEV_1_FD:$NSIM_DEV_1_IFIDX $NSIM_DEV_1_FD:$NSIM_DEV_1_IFIDX" > $NSIM_DEV_SYS_LINK 2>/dev/null
+if [ $? -eq 0 ]; then
+ echo "linking with self should fail"
+ cleanup_ns
+ exit 1
+fi
+
+echo "$NSIM_DEV_1_FD:$NSIM_DEV_1_IFIDX $NSIM_DEV_2_FD:$NSIM_DEV_2_IFIDX" > $NSIM_DEV_SYS_LINK
+if [ $? -ne 0 ]; then
+ echo "linking netdevsim1 with netdevsim2 should succeed"
+ cleanup_ns
+ exit 1
+fi
+
+# argument error checking
+
+echo "$NSIM_DEV_1_FD:$NSIM_DEV_1_IFIDX $NSIM_DEV_2_FD:a" > $NSIM_DEV_SYS_LINK 2>/dev/null
+if [ $? -eq 0 ]; then
+ echo "invalid arg should fail"
+ cleanup_ns
+ exit 1
+fi
+
+# send/recv packets
+
+tmp_file=$(mktemp)
+ip netns exec nssv socat TCP-LISTEN:1234,fork $tmp_file &
+pid=$!
+res=0
+
+wait_local_port_listen nssv 1234 tcp
+
+echo "HI" | ip netns exec nscl socat STDIN TCP:192.168.1.1:1234
+
+count=$(cat $tmp_file | wc -c)
+if [[ $count -ne 3 ]]; then
+ echo "expected 3 bytes, got $count"
+ res=1
+fi
+
+echo "$NSIM_DEV_1_FD:$NSIM_DEV_1_IFIDX" > $NSIM_DEV_SYS_UNLINK
+
+echo $NSIM_DEV_2_ID > $NSIM_DEV_SYS_DEL
+
+kill $pid
+echo $NSIM_DEV_1_ID > $NSIM_DEV_SYS_DEL
+
+cleanup_ns
+
+modprobe -r netdevsim
+
+exit $res
diff --git a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
index f98435c502f6..384cfa3d38a6 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
@@ -270,7 +270,7 @@ for port in 0 1; do
echo 1 > $NSIM_DEV_SYS/new_port
fi
NSIM_NETDEV=`get_netdev_name old_netdevs`
- ifconfig $NSIM_NETDEV up
+ ip link set dev $NSIM_NETDEV up
msg="new NIC device created"
exp0=( 0 0 0 0 )
@@ -284,8 +284,8 @@ for port in 0 1; do
msg="VxLAN v4 devices go down"
exp0=( 0 0 0 0 )
- ifconfig vxlan1 down
- ifconfig vxlan0 down
+ ip link set dev vxlan1 down
+ ip link set dev vxlan0 down
check_tables
msg="VxLAN v6 devices"
@@ -293,7 +293,7 @@ for port in 0 1; do
new_vxlan vxlanA 4789 $NSIM_NETDEV 6
for ifc in vxlan0 vxlan1; do
- ifconfig $ifc up
+ ip link set dev $ifc up
done
new_vxlan vxlanB 4789 $NSIM_NETDEV 6
@@ -307,14 +307,14 @@ for port in 0 1; do
new_geneve gnv0 6081
msg="NIC device goes down"
- ifconfig $NSIM_NETDEV down
+ ip link set dev $NSIM_NETDEV down
if [ $port -eq 1 ]; then
exp0=( 0 0 0 0 )
exp1=( 0 0 0 0 )
fi
check_tables
msg="NIC device goes up again"
- ifconfig $NSIM_NETDEV up
+ ip link set dev $NSIM_NETDEV up
exp0=( `mke 4789 1` `mke 4790 1` 0 0 )
exp1=( `mke 6081 2` 0 0 0 )
check_tables
@@ -433,7 +433,7 @@ for port in 0 1; do
echo $port > $NSIM_DEV_SYS/new_port
NSIM_NETDEV=`get_netdev_name old_netdevs`
- ifconfig $NSIM_NETDEV up
+ ip link set dev $NSIM_NETDEV up
overflow_table0 "overflow NIC table"
overflow_table1 "overflow NIC table"
@@ -491,7 +491,7 @@ for port in 0 1; do
echo $port > $NSIM_DEV_SYS/new_port
NSIM_NETDEV=`get_netdev_name old_netdevs`
- ifconfig $NSIM_NETDEV up
+ ip link set dev $NSIM_NETDEV up
overflow_table0 "overflow NIC table"
overflow_table1 "overflow NIC table"
@@ -548,7 +548,7 @@ for port in 0 1; do
echo $port > $NSIM_DEV_SYS/new_port
NSIM_NETDEV=`get_netdev_name old_netdevs`
- ifconfig $NSIM_NETDEV up
+ ip link set dev $NSIM_NETDEV up
overflow_table0 "destroy NIC"
overflow_table1 "destroy NIC"
@@ -578,7 +578,7 @@ for port in 0 1; do
echo $port > $NSIM_DEV_SYS/new_port
NSIM_NETDEV=`get_netdev_name old_netdevs`
- ifconfig $NSIM_NETDEV up
+ ip link set dev $NSIM_NETDEV up
msg="create VxLANs v6"
new_vxlan vxlanA0 10000 $NSIM_NETDEV 6
@@ -639,7 +639,7 @@ for port in 0 1; do
echo $port > $NSIM_DEV_SYS/new_port
NSIM_NETDEV=`get_netdev_name old_netdevs`
- ifconfig $NSIM_NETDEV up
+ ip link set dev $NSIM_NETDEV up
echo 110 > $NSIM_DEV_DFS/ports/$port/udp_ports_inject_error
@@ -695,7 +695,7 @@ for port in 0 1; do
echo $port > $NSIM_DEV_SYS/new_port
NSIM_NETDEV=`get_netdev_name old_netdevs`
- ifconfig $NSIM_NETDEV up
+ ip link set dev $NSIM_NETDEV up
msg="create VxLANs v6"
exp0=( `mke 10000 1` 0 0 0 )
@@ -755,7 +755,7 @@ for port in 0 1; do
echo $port > $NSIM_DEV_SYS/new_port
NSIM_NETDEV=`get_netdev_name old_netdevs`
- ifconfig $NSIM_NETDEV up
+ ip link set dev $NSIM_NETDEV up
msg="create VxLANs v6"
exp0=( `mke 10000 1` 0 0 0 )
@@ -768,7 +768,7 @@ for port in 0 1; do
check_tables
msg="NIC device goes down"
- ifconfig $NSIM_NETDEV down
+ ip link set dev $NSIM_NETDEV down
if [ $port -eq 1 ]; then
exp0=( 0 0 0 0 )
exp1=( 0 0 0 0 )
@@ -779,7 +779,7 @@ for port in 0 1; do
check_tables
msg="NIC device goes up again"
- ifconfig $NSIM_NETDEV up
+ ip link set dev $NSIM_NETDEV up
exp0=( `mke 10000 1` 0 0 0 )
check_tables
@@ -827,12 +827,12 @@ new_vxlan vxlan1 4789 $NSIM_NETDEV2
msg="VxLAN v4 devices go down"
exp0=( 0 0 0 0 )
-ifconfig vxlan1 down
-ifconfig vxlan0 down
+ip link set dev vxlan1 down
+ip link set dev vxlan0 down
check_tables
for ifc in vxlan0 vxlan1; do
- ifconfig $ifc up
+ ip link set dev $ifc up
done
msg="VxLAN v6 device"
@@ -844,11 +844,11 @@ exp1=( `mke 6081 2` 0 0 0 )
new_geneve gnv0 6081
msg="NIC device goes down"
-ifconfig $NSIM_NETDEV down
+ip link set dev $NSIM_NETDEV down
check_tables
msg="NIC device goes up again"
-ifconfig $NSIM_NETDEV up
+ip link set dev $NSIM_NETDEV up
check_tables
for i in `seq 2`; do
diff --git a/tools/testing/selftests/drivers/net/team/Makefile b/tools/testing/selftests/drivers/net/team/Makefile
index 6a86e61e8bfe..2d5a76d99181 100644
--- a/tools/testing/selftests/drivers/net/team/Makefile
+++ b/tools/testing/selftests/drivers/net/team/Makefile
@@ -3,8 +3,9 @@
TEST_PROGS := dev_addr_lists.sh
-TEST_FILES := \
- lag_lib.sh \
- net_forwarding_lib.sh
+TEST_INCLUDES := \
+ ../bonding/lag_lib.sh \
+ ../../../net/forwarding/lib.sh \
+ ../../../net/lib.sh
include ../../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/team/dev_addr_lists.sh b/tools/testing/selftests/drivers/net/team/dev_addr_lists.sh
index 33913112d5ca..b1ec7755b783 100755
--- a/tools/testing/selftests/drivers/net/team/dev_addr_lists.sh
+++ b/tools/testing/selftests/drivers/net/team/dev_addr_lists.sh
@@ -11,9 +11,9 @@ ALL_TESTS="
REQUIRE_MZ=no
NUM_NETIFS=0
lib_dir=$(dirname "$0")
-source "$lib_dir"/net_forwarding_lib.sh
+source "$lib_dir"/../../../net/forwarding/lib.sh
-source "$lib_dir"/lag_lib.sh
+source "$lib_dir"/../bonding/lag_lib.sh
destroy()
diff --git a/tools/testing/selftests/drivers/net/team/lag_lib.sh b/tools/testing/selftests/drivers/net/team/lag_lib.sh
deleted file mode 120000
index e1347a10afde..000000000000
--- a/tools/testing/selftests/drivers/net/team/lag_lib.sh
+++ /dev/null
@@ -1 +0,0 @@
-../bonding/lag_lib.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/team/net_forwarding_lib.sh b/tools/testing/selftests/drivers/net/team/net_forwarding_lib.sh
deleted file mode 120000
index 39c96828c5ef..000000000000
--- a/tools/testing/selftests/drivers/net/team/net_forwarding_lib.sh
+++ /dev/null
@@ -1 +0,0 @@
-../../../net/forwarding/lib.sh \ No newline at end of file
diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
index a781e6311810..541bf192e30e 100644
--- a/tools/testing/selftests/kselftest.h
+++ b/tools/testing/selftests/kselftest.h
@@ -25,6 +25,7 @@
* ksft_test_result_skip(fmt, ...);
* ksft_test_result_xfail(fmt, ...);
* ksft_test_result_error(fmt, ...);
+ * ksft_test_result_code(exit_code, test_name, fmt, ...);
*
* When all tests are finished, clean up and exit the program with one of:
*
@@ -254,6 +255,50 @@ static inline __printf(1, 2) void ksft_test_result_error(const char *msg, ...)
va_end(args);
}
+static inline __printf(3, 4)
+void ksft_test_result_code(int exit_code, const char *test_name,
+ const char *msg, ...)
+{
+ const char *tap_code = "ok";
+ const char *directive = "";
+ int saved_errno = errno;
+ va_list args;
+
+ switch (exit_code) {
+ case KSFT_PASS:
+ ksft_cnt.ksft_pass++;
+ break;
+ case KSFT_XFAIL:
+ directive = " # XFAIL ";
+ ksft_cnt.ksft_xfail++;
+ break;
+ case KSFT_XPASS:
+ directive = " # XPASS ";
+ ksft_cnt.ksft_xpass++;
+ break;
+ case KSFT_SKIP:
+ directive = " # SKIP ";
+ ksft_cnt.ksft_xskip++;
+ break;
+ case KSFT_FAIL:
+ default:
+ tap_code = "not ok";
+ ksft_cnt.ksft_fail++;
+ break;
+ }
+
+ /* Docs seem to call for double space if directive is absent */
+ if (!directive[0] && msg[0])
+ directive = " # ";
+
+ va_start(args, msg);
+ printf("%s %u %s%s", tap_code, ksft_test_num(), test_name, directive);
+ errno = saved_errno;
+ vprintf(msg, args);
+ printf("\n");
+ va_end(args);
+}
+
static inline int ksft_exit_pass(void)
{
ksft_print_cnts();
diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
index e05ac8261046..4fd735e48ee7 100644
--- a/tools/testing/selftests/kselftest_harness.h
+++ b/tools/testing/selftests/kselftest_harness.h
@@ -56,6 +56,7 @@
#include <asm/types.h>
#include <ctype.h>
#include <errno.h>
+#include <limits.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
@@ -95,14 +96,6 @@
* E.g., #define TH_LOG_ENABLED 1
*
* If no definition is provided, logging is enabled by default.
- *
- * If there is no way to print an error message for the process running the
- * test (e.g. not allowed to write to stderr), it is still possible to get the
- * ASSERT_* number for which the test failed. This behavior can be enabled by
- * writing `_metadata->no_print = true;` before the check sequence that is
- * unable to print. When an error occur, instead of printing an error message
- * and calling `abort(3)`, the test process call `_exit(2)` with the assert
- * number as argument, which is then printed by the parent process.
*/
#define TH_LOG(fmt, ...) do { \
if (TH_LOG_ENABLED) \
@@ -135,8 +128,7 @@
fprintf(TH_LOG_STREAM, "# SKIP %s\n", \
_metadata->results->reason); \
} \
- _metadata->passed = 1; \
- _metadata->skip = 1; \
+ _metadata->exit_code = KSFT_SKIP; \
_metadata->trigger = 0; \
statement; \
} while (0)
@@ -363,6 +355,11 @@
* Defines a test that depends on a fixture (e.g., is part of a test case).
* Very similar to TEST() except that *self* is the setup instance of fixture's
* datatype exposed for use by the implementation.
+ *
+ * The @test_name code is run in a separate process sharing the same memory
+ * (i.e. vfork), which means that the test process can update its privileges
+ * without impacting the related FIXTURE_TEARDOWN() (e.g. to remove files from
+ * a directory where write access was dropped).
*/
#define TEST_F(fixture_name, test_name) \
__TEST_F_IMPL(fixture_name, test_name, -1, TEST_TIMEOUT_DEFAULT)
@@ -384,17 +381,34 @@
{ \
/* fixture data is alloced, setup, and torn down per call. */ \
FIXTURE_DATA(fixture_name) self; \
+ pid_t child = 1; \
+ int status = 0; \
memset(&self, 0, sizeof(FIXTURE_DATA(fixture_name))); \
if (setjmp(_metadata->env) == 0) { \
- fixture_name##_setup(_metadata, &self, variant->data); \
- /* Let setup failure terminate early. */ \
- if (!_metadata->passed || _metadata->skip) \
- return; \
- _metadata->setup_completed = true; \
- fixture_name##_##test_name(_metadata, &self, variant->data); \
+ /* Use the same _metadata. */ \
+ child = vfork(); \
+ if (child == 0) { \
+ fixture_name##_setup(_metadata, &self, variant->data); \
+ /* Let setup failure terminate early. */ \
+ if (_metadata->exit_code) \
+ _exit(0); \
+ _metadata->setup_completed = true; \
+ fixture_name##_##test_name(_metadata, &self, variant->data); \
+ } else if (child < 0 || child != waitpid(child, &status, 0)) { \
+ ksft_print_msg("ERROR SPAWNING TEST GRANDCHILD\n"); \
+ _metadata->exit_code = KSFT_FAIL; \
+ } \
+ } \
+ if (child == 0) { \
+ if (_metadata->setup_completed && !_metadata->teardown_parent) \
+ fixture_name##_teardown(_metadata, &self, variant->data); \
+ _exit(0); \
} \
- if (_metadata->setup_completed) \
+ if (_metadata->setup_completed && _metadata->teardown_parent) \
fixture_name##_teardown(_metadata, &self, variant->data); \
+ if (!WIFEXITED(status) && WIFSIGNALED(status)) \
+ /* Forward signal to __wait_for_test(). */ \
+ kill(getpid(), WTERMSIG(status)); \
__test_check_assert(_metadata); \
} \
static struct __test_metadata \
@@ -404,6 +418,7 @@
.fixture = &_##fixture_name##_fixture_object, \
.termsig = signal, \
.timeout = tmout, \
+ .teardown_parent = false, \
}; \
static void __attribute__((constructor)) \
_register_##fixture_name##_##test_name(void) \
@@ -694,18 +709,12 @@
for (; _metadata->trigger; _metadata->trigger = \
__bail(_assert, _metadata))
-#define __INC_STEP(_metadata) \
- /* Keep "step" below 255 (which is used for "SKIP" reporting). */ \
- if (_metadata->passed && _metadata->step < 253) \
- _metadata->step++;
-
#define is_signed_type(var) (!!(((__typeof__(var))(-1)) < (__typeof__(var))1))
#define __EXPECT(_expected, _expected_str, _seen, _seen_str, _t, _assert) do { \
/* Avoid multiple evaluation of the cases */ \
__typeof__(_expected) __exp = (_expected); \
__typeof__(_seen) __seen = (_seen); \
- if (_assert) __INC_STEP(_metadata); \
if (!(__exp _t __seen)) { \
/* Report with actual signedness to avoid weird output. */ \
switch (is_signed_type(__exp) * 2 + is_signed_type(__seen)) { \
@@ -742,7 +751,7 @@
break; \
} \
} \
- _metadata->passed = 0; \
+ _metadata->exit_code = KSFT_FAIL; \
/* Ensure the optional handler is triggered */ \
_metadata->trigger = 1; \
} \
@@ -751,10 +760,9 @@
#define __EXPECT_STR(_expected, _seen, _t, _assert) do { \
const char *__exp = (_expected); \
const char *__seen = (_seen); \
- if (_assert) __INC_STEP(_metadata); \
if (!(strcmp(__exp, __seen) _t 0)) { \
__TH_LOG("Expected '%s' %s '%s'.", __exp, #_t, __seen); \
- _metadata->passed = 0; \
+ _metadata->exit_code = KSFT_FAIL; \
_metadata->trigger = 1; \
} \
} while (0); OPTIONAL_HANDLER(_assert)
@@ -800,6 +808,37 @@ struct __fixture_metadata {
.prev = &_fixture_global,
};
+struct __test_xfail {
+ struct __fixture_metadata *fixture;
+ struct __fixture_variant_metadata *variant;
+ struct __test_metadata *test;
+ struct __test_xfail *prev, *next;
+};
+
+/**
+ * XFAIL_ADD() - mark variant + test case combination as expected to fail
+ * @fixture_name: name of the fixture
+ * @variant_name: name of the variant
+ * @test_name: name of the test case
+ *
+ * Mark a combination of variant + test case for a given fixture as expected
+ * to fail. Tests marked this way will report XPASS / XFAIL return codes,
+ * instead of PASS / FAIL,and use respective counters.
+ */
+#define XFAIL_ADD(fixture_name, variant_name, test_name) \
+ static struct __test_xfail \
+ _##fixture_name##_##variant_name##_##test_name##_xfail = \
+ { \
+ .fixture = &_##fixture_name##_fixture_object, \
+ .variant = &_##fixture_name##_##variant_name##_object, \
+ .test = &_##fixture_name##_##test_name##_object, \
+ }; \
+ static void __attribute__((constructor)) \
+ _register_##fixture_name##_##variant_name##_##test_name##_xfail(void) \
+ { \
+ __register_xfail(&_##fixture_name##_##variant_name##_##test_name##_xfail); \
+ }
+
static struct __fixture_metadata *__fixture_list = &_fixture_global;
static int __constructor_order;
@@ -814,6 +853,7 @@ static inline void __register_fixture(struct __fixture_metadata *f)
struct __fixture_variant_metadata {
const char *name;
const void *data;
+ struct __test_xfail *xfails;
struct __fixture_variant_metadata *prev, *next;
};
@@ -832,20 +872,24 @@ struct __test_metadata {
pid_t pid; /* pid of test when being run */
struct __fixture_metadata *fixture;
int termsig;
- int passed;
- int skip; /* did SKIP get used? */
+ int exit_code;
int trigger; /* extra handler after the evaluation */
int timeout; /* seconds to wait for test timeout */
bool timed_out; /* did this test timeout instead of exiting? */
- __u8 step;
- bool no_print; /* manual trigger when TH_LOG_STREAM is not available */
bool aborted; /* stopped test due to failed ASSERT */
bool setup_completed; /* did setup finish? */
+ bool teardown_parent; /* run teardown in a parent process */
jmp_buf env; /* for exiting out of test early */
struct __test_results *results;
struct __test_metadata *prev, *next;
};
+static inline bool __test_passed(struct __test_metadata *metadata)
+{
+ return metadata->exit_code != KSFT_FAIL &&
+ metadata->exit_code <= KSFT_SKIP;
+}
+
/*
* Since constructors are called in reverse order, reverse the test
* list so tests are run in source declaration order.
@@ -860,6 +904,11 @@ static inline void __register_test(struct __test_metadata *t)
__LIST_APPEND(t->fixture->tests, t);
}
+static inline void __register_xfail(struct __test_xfail *xf)
+{
+ __LIST_APPEND(xf->variant->xfails, xf);
+}
+
static inline int __bail(int for_realz, struct __test_metadata *t)
{
/* if this is ASSERT, return immediately. */
@@ -873,11 +922,8 @@ static inline int __bail(int for_realz, struct __test_metadata *t)
static inline void __test_check_assert(struct __test_metadata *t)
{
- if (t->aborted) {
- if (t->no_print)
- _exit(t->step);
+ if (t->aborted)
abort();
- }
}
struct __test_metadata *__active_test;
@@ -913,7 +959,7 @@ void __wait_for_test(struct __test_metadata *t)
int status;
if (sigaction(SIGALRM, &action, &saved_action)) {
- t->passed = 0;
+ t->exit_code = KSFT_FAIL;
fprintf(TH_LOG_STREAM,
"# %s: unable to install SIGALRM handler\n",
t->name);
@@ -925,7 +971,7 @@ void __wait_for_test(struct __test_metadata *t)
waitpid(t->pid, &status, 0);
alarm(0);
if (sigaction(SIGALRM, &saved_action, NULL)) {
- t->passed = 0;
+ t->exit_code = KSFT_FAIL;
fprintf(TH_LOG_STREAM,
"# %s: unable to uninstall SIGALRM handler\n",
t->name);
@@ -934,16 +980,16 @@ void __wait_for_test(struct __test_metadata *t)
__active_test = NULL;
if (t->timed_out) {
- t->passed = 0;
+ t->exit_code = KSFT_FAIL;
fprintf(TH_LOG_STREAM,
"# %s: Test terminated by timeout\n", t->name);
} else if (WIFEXITED(status)) {
- if (WEXITSTATUS(status) == 255) {
- /* SKIP */
- t->passed = 1;
- t->skip = 1;
+ if (WEXITSTATUS(status) == KSFT_SKIP ||
+ WEXITSTATUS(status) == KSFT_XPASS ||
+ WEXITSTATUS(status) == KSFT_XFAIL) {
+ t->exit_code = WEXITSTATUS(status);
} else if (t->termsig != -1) {
- t->passed = 0;
+ t->exit_code = KSFT_FAIL;
fprintf(TH_LOG_STREAM,
"# %s: Test exited normally instead of by signal (code: %d)\n",
t->name,
@@ -951,26 +997,25 @@ void __wait_for_test(struct __test_metadata *t)
} else {
switch (WEXITSTATUS(status)) {
/* Success */
- case 0:
- t->passed = 1;
+ case KSFT_PASS:
+ t->exit_code = KSFT_PASS;
break;
- /* Other failure, assume step report. */
+ /* Failure */
default:
- t->passed = 0;
+ t->exit_code = KSFT_FAIL;
fprintf(TH_LOG_STREAM,
- "# %s: Test failed at step #%d\n",
- t->name,
- WEXITSTATUS(status));
+ "# %s: Test failed\n",
+ t->name);
}
}
} else if (WIFSIGNALED(status)) {
- t->passed = 0;
+ t->exit_code = KSFT_FAIL;
if (WTERMSIG(status) == SIGABRT) {
fprintf(TH_LOG_STREAM,
"# %s: Test terminated by assertion\n",
t->name);
} else if (WTERMSIG(status) == t->termsig) {
- t->passed = 1;
+ t->exit_code = KSFT_PASS;
} else {
fprintf(TH_LOG_STREAM,
"# %s: Test terminated unexpectedly by signal %d\n",
@@ -1110,16 +1155,19 @@ void __run_test(struct __fixture_metadata *f,
struct __fixture_variant_metadata *variant,
struct __test_metadata *t)
{
+ struct __test_xfail *xfail;
+ char test_name[LINE_MAX];
+ const char *diagnostic;
+
/* reset test struct */
- t->passed = 1;
- t->skip = 0;
+ t->exit_code = KSFT_PASS;
t->trigger = 0;
- t->step = 1;
- t->no_print = 0;
memset(t->results->reason, 0, sizeof(t->results->reason));
- ksft_print_msg(" RUN %s%s%s.%s ...\n",
- f->name, variant->name[0] ? "." : "", variant->name, t->name);
+ snprintf(test_name, sizeof(test_name), "%s%s%s.%s",
+ f->name, variant->name[0] ? "." : "", variant->name, t->name);
+
+ ksft_print_msg(" RUN %s ...\n", test_name);
/* Make sure output buffers are flushed before fork */
fflush(stdout);
@@ -1128,29 +1176,33 @@ void __run_test(struct __fixture_metadata *f,
t->pid = fork();
if (t->pid < 0) {
ksft_print_msg("ERROR SPAWNING TEST CHILD\n");
- t->passed = 0;
+ t->exit_code = KSFT_FAIL;
} else if (t->pid == 0) {
setpgrp();
t->fn(t, variant);
- if (t->skip)
- _exit(255);
- /* Pass is exit 0 */
- if (t->passed)
- _exit(0);
- /* Something else happened, report the step. */
- _exit(t->step);
+ _exit(t->exit_code);
} else {
__wait_for_test(t);
}
- ksft_print_msg(" %4s %s%s%s.%s\n", t->passed ? "OK" : "FAIL",
- f->name, variant->name[0] ? "." : "", variant->name, t->name);
+ ksft_print_msg(" %4s %s\n",
+ __test_passed(t) ? "OK" : "FAIL", test_name);
- if (t->skip)
- ksft_test_result_skip("%s\n", t->results->reason[0] ?
- t->results->reason : "unknown");
+ /* Check if we're expecting this test to fail */
+ for (xfail = variant->xfails; xfail; xfail = xfail->next)
+ if (xfail->test == t)
+ break;
+ if (xfail)
+ t->exit_code = __test_passed(t) ? KSFT_XPASS : KSFT_XFAIL;
+
+ if (t->results->reason[0])
+ diagnostic = t->results->reason;
+ else if (t->exit_code == KSFT_PASS || t->exit_code == KSFT_FAIL)
+ diagnostic = NULL;
else
- ksft_test_result(t->passed, "%s%s%s.%s\n",
- f->name, variant->name[0] ? "." : "", variant->name, t->name);
+ diagnostic = "unknown";
+
+ ksft_test_result_code(t->exit_code, test_name,
+ diagnostic ? "%s" : "", diagnostic);
}
static int test_harness_run(int argc, char **argv)
@@ -1198,7 +1250,7 @@ static int test_harness_run(int argc, char **argv)
t->results = results;
__run_test(f, v, t);
t->results = NULL;
- if (t->passed)
+ if (__test_passed(t))
pass_count++;
else
ret = 1;
diff --git a/tools/testing/selftests/landlock/base_test.c b/tools/testing/selftests/landlock/base_test.c
index 646f778dfb1e..a6f89aaea77d 100644
--- a/tools/testing/selftests/landlock/base_test.c
+++ b/tools/testing/selftests/landlock/base_test.c
@@ -307,7 +307,7 @@ TEST(ruleset_fd_transfer)
dir_fd = open("/tmp", O_RDONLY | O_DIRECTORY | O_CLOEXEC);
ASSERT_LE(0, dir_fd);
ASSERT_EQ(0, close(dir_fd));
- _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
+ _exit(_metadata->exit_code);
return;
}
diff --git a/tools/testing/selftests/landlock/common.h b/tools/testing/selftests/landlock/common.h
index e64bbdf0e86e..401e2eb092a3 100644
--- a/tools/testing/selftests/landlock/common.h
+++ b/tools/testing/selftests/landlock/common.h
@@ -23,62 +23,8 @@
#define __maybe_unused __attribute__((__unused__))
#endif
-/*
- * TEST_F_FORK() is useful when a test drop privileges but the corresponding
- * FIXTURE_TEARDOWN() requires them (e.g. to remove files from a directory
- * where write actions are denied). For convenience, FIXTURE_TEARDOWN() is
- * also called when the test failed, but not when FIXTURE_SETUP() failed. For
- * this to be possible, we must not call abort() but instead exit smoothly
- * (hence the step print).
- */
-/* clang-format off */
-#define TEST_F_FORK(fixture_name, test_name) \
- static void fixture_name##_##test_name##_child( \
- struct __test_metadata *_metadata, \
- FIXTURE_DATA(fixture_name) *self, \
- const FIXTURE_VARIANT(fixture_name) *variant); \
- TEST_F(fixture_name, test_name) \
- { \
- int status; \
- const pid_t child = fork(); \
- if (child < 0) \
- abort(); \
- if (child == 0) { \
- _metadata->no_print = 1; \
- fixture_name##_##test_name##_child(_metadata, self, variant); \
- if (_metadata->skip) \
- _exit(255); \
- if (_metadata->passed) \
- _exit(0); \
- _exit(_metadata->step); \
- } \
- if (child != waitpid(child, &status, 0)) \
- abort(); \
- if (WIFSIGNALED(status) || !WIFEXITED(status)) { \
- _metadata->passed = 0; \
- _metadata->step = 1; \
- return; \
- } \
- switch (WEXITSTATUS(status)) { \
- case 0: \
- _metadata->passed = 1; \
- break; \
- case 255: \
- _metadata->passed = 1; \
- _metadata->skip = 1; \
- break; \
- default: \
- _metadata->passed = 0; \
- _metadata->step = WEXITSTATUS(status); \
- break; \
- } \
- } \
- static void fixture_name##_##test_name##_child( \
- struct __test_metadata __attribute__((unused)) *_metadata, \
- FIXTURE_DATA(fixture_name) __attribute__((unused)) *self, \
- const FIXTURE_VARIANT(fixture_name) \
- __attribute__((unused)) *variant)
-/* clang-format on */
+/* TEST_F_FORK() should not be used for new tests. */
+#define TEST_F_FORK(fixture_name, test_name) TEST_F(fixture_name, test_name)
#ifndef landlock_create_ruleset
static inline int
diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c
index 2d6d9b43d958..9a6036fbf289 100644
--- a/tools/testing/selftests/landlock/fs_test.c
+++ b/tools/testing/selftests/landlock/fs_test.c
@@ -285,6 +285,8 @@ static void prepare_layout_opt(struct __test_metadata *const _metadata,
static void prepare_layout(struct __test_metadata *const _metadata)
{
+ _metadata->teardown_parent = true;
+
prepare_layout_opt(_metadata, &mnt_tmp);
}
@@ -1964,7 +1966,7 @@ static void test_execute(struct __test_metadata *const _metadata, const int err,
strerror(errno));
};
ASSERT_EQ(err, errno);
- _exit(_metadata->passed ? 2 : 1);
+ _exit(__test_passed(_metadata) ? 2 : 1);
return;
}
ASSERT_EQ(child, waitpid(child, &status, 0));
@@ -3807,7 +3809,7 @@ TEST_F_FORK(ftruncate, open_and_ftruncate_in_different_processes)
ASSERT_EQ(0, close(socket_fds[0]));
- _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
+ _exit(_metadata->exit_code);
return;
}
@@ -3861,9 +3863,7 @@ FIXTURE_SETUP(layout1_bind)
FIXTURE_TEARDOWN(layout1_bind)
{
- set_cap(_metadata, CAP_SYS_ADMIN);
- EXPECT_EQ(0, umount(dir_s2d2));
- clear_cap(_metadata, CAP_SYS_ADMIN);
+ /* umount(dir_s2d2)) is handled by namespace lifetime. */
remove_layout1(_metadata);
@@ -4276,9 +4276,8 @@ FIXTURE_TEARDOWN(layout2_overlay)
EXPECT_EQ(0, remove_path(lower_fl1));
EXPECT_EQ(0, remove_path(lower_do1_fo2));
EXPECT_EQ(0, remove_path(lower_fo1));
- set_cap(_metadata, CAP_SYS_ADMIN);
- EXPECT_EQ(0, umount(LOWER_BASE));
- clear_cap(_metadata, CAP_SYS_ADMIN);
+
+ /* umount(LOWER_BASE)) is handled by namespace lifetime. */
EXPECT_EQ(0, remove_path(LOWER_BASE));
EXPECT_EQ(0, remove_path(upper_do1_fu3));
@@ -4287,14 +4286,11 @@ FIXTURE_TEARDOWN(layout2_overlay)
EXPECT_EQ(0, remove_path(upper_do1_fo2));
EXPECT_EQ(0, remove_path(upper_fo1));
EXPECT_EQ(0, remove_path(UPPER_WORK "/work"));
- set_cap(_metadata, CAP_SYS_ADMIN);
- EXPECT_EQ(0, umount(UPPER_BASE));
- clear_cap(_metadata, CAP_SYS_ADMIN);
+
+ /* umount(UPPER_BASE)) is handled by namespace lifetime. */
EXPECT_EQ(0, remove_path(UPPER_BASE));
- set_cap(_metadata, CAP_SYS_ADMIN);
- EXPECT_EQ(0, umount(MERGE_DATA));
- clear_cap(_metadata, CAP_SYS_ADMIN);
+ /* umount(MERGE_DATA)) is handled by namespace lifetime. */
EXPECT_EQ(0, remove_path(MERGE_DATA));
cleanup_layout(_metadata);
@@ -4691,6 +4687,8 @@ FIXTURE_SETUP(layout3_fs)
SKIP(return, "this filesystem is not supported (setup)");
}
+ _metadata->teardown_parent = true;
+
slash = strrchr(variant->file_path, '/');
ASSERT_NE(slash, NULL);
dir_len = (size_t)slash - (size_t)variant->file_path;
diff --git a/tools/testing/selftests/landlock/net_test.c b/tools/testing/selftests/landlock/net_test.c
index 936cfc879f1d..f21cfbbc3638 100644
--- a/tools/testing/selftests/landlock/net_test.c
+++ b/tools/testing/selftests/landlock/net_test.c
@@ -539,7 +539,7 @@ static void test_bind_and_connect(struct __test_metadata *const _metadata,
}
EXPECT_EQ(0, close(connect_fd));
- _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
+ _exit(_metadata->exit_code);
return;
}
@@ -834,7 +834,7 @@ TEST_F(protocol, connect_unspec)
}
EXPECT_EQ(0, close(connect_fd));
- _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
+ _exit(_metadata->exit_code);
return;
}
diff --git a/tools/testing/selftests/landlock/ptrace_test.c b/tools/testing/selftests/landlock/ptrace_test.c
index 55e7871631a1..a19db4d0b3bd 100644
--- a/tools/testing/selftests/landlock/ptrace_test.c
+++ b/tools/testing/selftests/landlock/ptrace_test.c
@@ -314,7 +314,7 @@ TEST_F(hierarchy, trace)
ASSERT_EQ(0, pipe2(pipe_parent, O_CLOEXEC));
if (variant->domain_both) {
create_domain(_metadata);
- if (!_metadata->passed)
+ if (!__test_passed(_metadata))
/* Aborts before forking. */
return;
}
@@ -375,7 +375,7 @@ TEST_F(hierarchy, trace)
/* Waits for the parent PTRACE_ATTACH test. */
ASSERT_EQ(1, read(pipe_parent[0], &buf_child, 1));
- _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
+ _exit(_metadata->exit_code);
return;
}
@@ -430,9 +430,10 @@ TEST_F(hierarchy, trace)
/* Signals that the parent PTRACE_ATTACH test is done. */
ASSERT_EQ(1, write(pipe_parent[1], ".", 1));
ASSERT_EQ(child, waitpid(child, &status, 0));
+
if (WIFSIGNALED(status) || !WIFEXITED(status) ||
WEXITSTATUS(status) != EXIT_SUCCESS)
- _metadata->passed = 0;
+ _metadata->exit_code = KSFT_FAIL;
}
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 286ce0ee102b..da2cade3bab0 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -70,11 +70,29 @@ define RUN_TESTS
run_many $(1)
endef
+define INSTALL_INCLUDES
+ $(if $(TEST_INCLUDES), \
+ relative_files=""; \
+ for entry in $(TEST_INCLUDES); do \
+ entry_dir=$$(readlink -e "$$(dirname "$$entry")"); \
+ entry_name=$$(basename "$$entry"); \
+ relative_dir=$${entry_dir#"$$SRC_PATH"/}; \
+ if [ "$$relative_dir" = "$$entry_dir" ]; then \
+ echo "Error: TEST_INCLUDES entry \"$$entry\" not located inside selftests directory ($$SRC_PATH)" >&2; \
+ exit 1; \
+ fi; \
+ relative_files="$$relative_files $$relative_dir/$$entry_name"; \
+ done; \
+ cd $(SRC_PATH) && rsync -aR $$relative_files $(OBJ_PATH)/ \
+ )
+endef
+
run_tests: all
ifdef building_out_of_srctree
@if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)$(TEST_GEN_MODS_DIR)" != "X" ]; then \
rsync -aq --copy-unsafe-links $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(TEST_GEN_MODS_DIR) $(OUTPUT); \
fi
+ @$(INSTALL_INCLUDES)
@if [ "X$(TEST_PROGS)" != "X" ]; then \
$(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) \
$(addprefix $(OUTPUT)/,$(TEST_PROGS))) ; \
@@ -116,6 +134,7 @@ endef
install: all
ifdef INSTALL_PATH
$(INSTALL_RULE)
+ $(INSTALL_INCLUDES)
else
$(error Error: set INSTALL_PATH to use install)
endif
diff --git a/tools/testing/selftests/mm/hmm-tests.c b/tools/testing/selftests/mm/hmm-tests.c
index 20294553a5dd..d2cfc9b494a0 100644
--- a/tools/testing/selftests/mm/hmm-tests.c
+++ b/tools/testing/selftests/mm/hmm-tests.c
@@ -138,7 +138,7 @@ FIXTURE_SETUP(hmm)
self->fd = hmm_open(variant->device_number);
if (self->fd < 0 && hmm_is_coherent_type(variant->device_number))
- SKIP(exit(0), "DEVICE_COHERENT not available");
+ SKIP(return, "DEVICE_COHERENT not available");
ASSERT_GE(self->fd, 0);
}
@@ -149,7 +149,7 @@ FIXTURE_SETUP(hmm2)
self->fd0 = hmm_open(variant->device_number0);
if (self->fd0 < 0 && hmm_is_coherent_type(variant->device_number0))
- SKIP(exit(0), "DEVICE_COHERENT not available");
+ SKIP(return, "DEVICE_COHERENT not available");
ASSERT_GE(self->fd0, 0);
self->fd1 = hmm_open(variant->device_number1);
ASSERT_GE(self->fd1, 0);
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 211753756bde..7b6918d5f4af 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -97,6 +97,8 @@ TEST_PROGS += vlan_hw_filter.sh
TEST_FILES := settings
TEST_FILES += in_netns.sh lib.sh net_helper.sh setup_loopback.sh setup_veth.sh
+TEST_INCLUDES := forwarding/lib.sh
+
include ../lib.mk
$(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma
diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh
index 0d4f252427e2..386ebd829df5 100755
--- a/tools/testing/selftests/net/fcnal-test.sh
+++ b/tools/testing/selftests/net/fcnal-test.sh
@@ -38,6 +38,9 @@
# server / client nomenclature relative to ns-A
source lib.sh
+
+PATH=$PWD:$PWD/tools/testing/selftests/net:$PATH
+
VERBOSE=0
NSA_DEV=eth1
@@ -97,6 +100,7 @@ log_test()
local rc=$1
local expected=$2
local msg="$3"
+ local ans
[ "${VERBOSE}" = "1" ] && echo
@@ -106,19 +110,20 @@ log_test()
else
nfail=$((nfail+1))
printf "TEST: %-70s [FAIL]\n" "${msg}"
+ echo " expected rc $expected; actual rc $rc"
if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
echo
echo "hit enter to continue, 'q' to quit"
- read a
- [ "$a" = "q" ] && exit 1
+ read ans
+ [ "$ans" = "q" ] && exit 1
fi
fi
if [ "${PAUSE}" = "yes" ]; then
echo
echo "hit enter to continue, 'q' to quit"
- read a
- [ "$a" = "q" ] && exit 1
+ read ans
+ [ "$ans" = "q" ] && exit 1
fi
kill_procs
@@ -187,6 +192,15 @@ kill_procs()
sleep 1
}
+set_ping_group()
+{
+ if [ "$VERBOSE" = "1" ]; then
+ echo "COMMAND: ${NSA_CMD} sysctl -q -w net.ipv4.ping_group_range='0 2147483647'"
+ fi
+
+ ${NSA_CMD} sysctl -q -w net.ipv4.ping_group_range='0 2147483647'
+}
+
do_run_cmd()
{
local cmd="$*"
@@ -835,14 +849,14 @@ ipv4_ping()
set_sysctl net.ipv4.raw_l3mdev_accept=1 2>/dev/null
ipv4_ping_novrf
setup
- set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
+ set_ping_group
ipv4_ping_novrf
log_subsection "With VRF"
setup "yes"
ipv4_ping_vrf
setup "yes"
- set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
+ set_ping_group
ipv4_ping_vrf
}
@@ -2053,12 +2067,12 @@ ipv4_addr_bind()
log_subsection "No VRF"
setup
- set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
+ set_ping_group
ipv4_addr_bind_novrf
log_subsection "With VRF"
setup "yes"
- set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
+ set_ping_group
ipv4_addr_bind_vrf
}
@@ -2521,14 +2535,14 @@ ipv6_ping()
setup
ipv6_ping_novrf
setup
- set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
+ set_ping_group
ipv6_ping_novrf
log_subsection "With VRF"
setup "yes"
ipv6_ping_vrf
setup "yes"
- set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
+ set_ping_group
ipv6_ping_vrf
}
diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh
index d5a281aadbac..ac0b2c6a5761 100755
--- a/tools/testing/selftests/net/fib_nexthops.sh
+++ b/tools/testing/selftests/net/fib_nexthops.sh
@@ -2066,6 +2066,12 @@ basic()
run_cmd "$IP nexthop get id 1"
log_test $? 2 "Nexthop get on non-existent id"
+ run_cmd "$IP nexthop del id 1"
+ log_test $? 2 "Nexthop del with non-existent id"
+
+ run_cmd "$IP nexthop del id 1 group 1/2/3/4/5/6/7/8"
+ log_test $? 2 "Nexthop del with non-existent id and extra attributes"
+
# attempt to create nh without a device or gw - fails
run_cmd "$IP nexthop add id 1"
log_test $? 2 "Nexthop with no device or gateway"
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index b3ecccbbfcd2..73895711cdf4 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -743,6 +743,43 @@ fib_notify_test()
cleanup &> /dev/null
}
+# Create a new dummy_10 to remove all associated routes.
+reset_dummy_10()
+{
+ $IP link del dev dummy_10
+
+ $IP link add dummy_10 type dummy
+ $IP link set dev dummy_10 up
+ $IP -6 address add 2001:10::1/64 dev dummy_10
+}
+
+check_rt_num()
+{
+ local expected=$1
+ local num=$2
+
+ if [ $num -ne $expected ]; then
+ echo "FAIL: Expected $expected routes, got $num"
+ ret=1
+ else
+ ret=0
+ fi
+}
+
+check_rt_num_clean()
+{
+ local expected=$1
+ local num=$2
+
+ if [ $num -ne $expected ]; then
+ log_test 1 0 "expected $expected routes, got $num"
+ set +e
+ cleanup &> /dev/null
+ return 1
+ fi
+ return 0
+}
+
fib6_gc_test()
{
setup
@@ -751,7 +788,8 @@ fib6_gc_test()
echo "Fib6 garbage collection test"
set -e
- EXPIRE=3
+ EXPIRE=5
+ GC_WAIT_TIME=$((EXPIRE * 2 + 2))
# Check expiration of routes every $EXPIRE seconds (GC)
$NS_EXEC sysctl -wq net.ipv6.route.gc_interval=$EXPIRE
@@ -763,44 +801,110 @@ fib6_gc_test()
$NS_EXEC sysctl -wq net.ipv6.route.flush=1
# Temporary routes
- for i in $(seq 1 1000); do
+ for i in $(seq 1 5); do
# Expire route after $EXPIRE seconds
$IP -6 route add 2001:20::$i \
via 2001:10::2 dev dummy_10 expires $EXPIRE
done
- sleep $(($EXPIRE * 2))
- N_EXP_SLEEP=$($IP -6 route list |grep expires|wc -l)
- if [ $N_EXP_SLEEP -ne 0 ]; then
- echo "FAIL: expected 0 routes with expires, got $N_EXP_SLEEP"
- ret=1
- else
- ret=0
- fi
+ sleep $GC_WAIT_TIME
+ $NS_EXEC sysctl -wq net.ipv6.route.flush=1
+ check_rt_num 0 $($IP -6 route list |grep expires|wc -l)
+ log_test $ret 0 "ipv6 route garbage collection"
+
+ reset_dummy_10
# Permanent routes
- for i in $(seq 1 5000); do
+ for i in $(seq 1 5); do
$IP -6 route add 2001:30::$i \
via 2001:10::2 dev dummy_10
done
# Temporary routes
- for i in $(seq 1 1000); do
+ for i in $(seq 1 5); do
# Expire route after $EXPIRE seconds
$IP -6 route add 2001:20::$i \
via 2001:10::2 dev dummy_10 expires $EXPIRE
done
- sleep $(($EXPIRE * 2))
- N_EXP_SLEEP=$($IP -6 route list |grep expires|wc -l)
- if [ $N_EXP_SLEEP -ne 0 ]; then
- echo "FAIL: expected 0 routes with expires," \
- "got $N_EXP_SLEEP (5000 permanent routes)"
- ret=1
- else
- ret=0
+ # Wait for GC
+ sleep $GC_WAIT_TIME
+ check_rt_num 0 $($IP -6 route list |grep expires|wc -l)
+ log_test $ret 0 "ipv6 route garbage collection (with permanent routes)"
+
+ reset_dummy_10
+
+ # Permanent routes
+ for i in $(seq 1 5); do
+ $IP -6 route add 2001:20::$i \
+ via 2001:10::2 dev dummy_10
+ done
+ # Replace with temporary routes
+ for i in $(seq 1 5); do
+ # Expire route after $EXPIRE seconds
+ $IP -6 route replace 2001:20::$i \
+ via 2001:10::2 dev dummy_10 expires $EXPIRE
+ done
+ # Wait for GC
+ sleep $GC_WAIT_TIME
+ check_rt_num 0 $($IP -6 route list |grep expires|wc -l)
+ log_test $ret 0 "ipv6 route garbage collection (replace with expires)"
+
+ reset_dummy_10
+
+ # Temporary routes
+ for i in $(seq 1 5); do
+ # Expire route after $EXPIRE seconds
+ $IP -6 route add 2001:20::$i \
+ via 2001:10::2 dev dummy_10 expires $EXPIRE
+ done
+ # Replace with permanent routes
+ for i in $(seq 1 5); do
+ $IP -6 route replace 2001:20::$i \
+ via 2001:10::2 dev dummy_10
+ done
+ check_rt_num_clean 0 $($IP -6 route list |grep expires|wc -l) || return
+
+ # Wait for GC
+ sleep $GC_WAIT_TIME
+ check_rt_num 5 $($IP -6 route list |grep -v expires|grep 2001:20::|wc -l)
+ log_test $ret 0 "ipv6 route garbage collection (replace with permanent)"
+
+ # ra6 is required for the next test. (ipv6toolkit)
+ if [ ! -x "$(command -v ra6)" ]; then
+ echo "SKIP: ra6 not found."
+ set +e
+ cleanup &> /dev/null
+ return
fi
- set +e
+ # Delete dummy_10 and remove all routes
+ $IP link del dev dummy_10
- log_test $ret 0 "ipv6 route garbage collection"
+ # Create a pair of veth devices to send a RA message from one
+ # device to another.
+ $IP link add veth1 type veth peer name veth2
+ $IP link set dev veth1 up
+ $IP link set dev veth2 up
+ $IP -6 address add 2001:10::1/64 dev veth1 nodad
+ $IP -6 address add 2001:10::2/64 dev veth2 nodad
+
+ # Make veth1 ready to receive RA messages.
+ $NS_EXEC sysctl -wq net.ipv6.conf.veth1.accept_ra=2
+
+ # Send a RA message with a route from veth2 to veth1.
+ $NS_EXEC ra6 -i veth2 -d 2001:10::1 -t $EXPIRE
+
+ # Wait for the RA message.
+ sleep 1
+
+ # systemd may mess up the test. You syould make sure that
+ # systemd-networkd.service and systemd-networkd.socket are stopped.
+ check_rt_num_clean 1 $($IP -6 route list|grep expires|wc -l) || return
+
+ # Wait for GC
+ sleep $GC_WAIT_TIME
+ check_rt_num 0 $($IP -6 route list |grep expires|wc -l)
+ log_test $ret 0 "ipv6 route garbage collection (RA message)"
+
+ set +e
cleanup &> /dev/null
}
diff --git a/tools/testing/selftests/net/forwarding/Makefile b/tools/testing/selftests/net/forwarding/Makefile
index 4de92632f483..535865b3d1d6 100644
--- a/tools/testing/selftests/net/forwarding/Makefile
+++ b/tools/testing/selftests/net/forwarding/Makefile
@@ -123,10 +123,14 @@ TEST_FILES := devlink_lib.sh \
mirror_gre_topo_lib.sh \
mirror_lib.sh \
mirror_topo_lib.sh \
+ router_mpath_nh_lib.sh \
sch_ets_core.sh \
sch_ets_tests.sh \
sch_tbf_core.sh \
sch_tbf_etsprio.sh \
tc_common.sh
+TEST_INCLUDES := \
+ ../lib.sh
+
include ../../lib.mk
diff --git a/tools/testing/selftests/net/forwarding/config b/tools/testing/selftests/net/forwarding/config
index 697994a9278b..8d7a1a004b7c 100644
--- a/tools/testing/selftests/net/forwarding/config
+++ b/tools/testing/selftests/net/forwarding/config
@@ -6,14 +6,49 @@ CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_NET_VRF=m
CONFIG_BPF_SYSCALL=y
CONFIG_CGROUP_BPF=y
+CONFIG_DUMMY=m
+CONFIG_IPV6=y
+CONFIG_IPV6_GRE=m
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_MACVLAN=m
CONFIG_NET_ACT_CT=m
CONFIG_NET_ACT_MIRRED=m
CONFIG_NET_ACT_MPLS=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_SAMPLE=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_ACT_TUNNEL_KEY=m
CONFIG_NET_ACT_VLAN=m
CONFIG_NET_CLS_FLOWER=m
CONFIG_NET_CLS_MATCHALL=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_META=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IPIP=m
+CONFIG_NET_SCH_ETS=m
CONFIG_NET_SCH_INGRESS=m
CONFIG_NET_ACT_GACT=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_TC_SKB_EXT=y
+CONFIG_NET_TEAM=y
+CONFIG_NET_TEAM_MODE_LOADBALANCE=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_FLOW_TABLE=m
+CONFIG_NF_TABLES=m
CONFIG_VETH=m
CONFIG_NAMESPACES=y
CONFIG_NET_NS=y
+CONFIG_VXLAN=m
+CONFIG_XFRM_USER=m
diff --git a/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh b/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh
index 56eb83d1a3bd..1783c10215e5 100755
--- a/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh
+++ b/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh
@@ -183,42 +183,42 @@ send_src_ipv4()
{
ip vrf exec v$h1 $MZ $h1 -q -p 64 \
-A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
- -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+ -d $MZ_DELAY -c 50 -t udp "sp=20000,dp=30000"
}
send_dst_ipv4()
{
ip vrf exec v$h1 $MZ $h1 -q -p 64 \
-A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
- -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+ -d $MZ_DELAY -c 50 -t udp "sp=20000,dp=30000"
}
send_src_udp4()
{
ip vrf exec v$h1 $MZ $h1 -q -p 64 \
-A 198.51.100.2 -B 203.0.113.2 \
- -d 1msec -t udp "sp=0-32768,dp=30000"
+ -d $MZ_DELAY -t udp "sp=0-32768,dp=30000"
}
send_dst_udp4()
{
ip vrf exec v$h1 $MZ $h1 -q -p 64 \
-A 198.51.100.2 -B 203.0.113.2 \
- -d 1msec -t udp "sp=20000,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=20000,dp=0-32768"
}
send_src_ipv6()
{
ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
-A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:4::2 \
- -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+ -d $MZ_DELAY -c 50 -t udp "sp=20000,dp=30000"
}
send_dst_ipv6()
{
ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
-A 2001:db8:1::2 -B "2001:db8:4::2-2001:db8:4::fd" \
- -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+ -d $MZ_DELAY -c 50 -t udp "sp=20000,dp=30000"
}
send_flowlabel()
@@ -234,14 +234,14 @@ send_src_udp6()
{
ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
-A 2001:db8:1::2 -B 2001:db8:4::2 \
- -d 1msec -t udp "sp=0-32768,dp=30000"
+ -d $MZ_DELAY -t udp "sp=0-32768,dp=30000"
}
send_dst_udp6()
{
ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
-A 2001:db8:1::2 -B 2001:db8:4::2 \
- -d 1msec -t udp "sp=20000,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=20000,dp=0-32768"
}
custom_hash_test()
diff --git a/tools/testing/selftests/net/forwarding/forwarding.config.sample b/tools/testing/selftests/net/forwarding/forwarding.config.sample
index 4a546509de90..1fc4f0242fc5 100644
--- a/tools/testing/selftests/net/forwarding/forwarding.config.sample
+++ b/tools/testing/selftests/net/forwarding/forwarding.config.sample
@@ -28,6 +28,8 @@ PING=ping
PING6=ping6
# Packet generator. Some distributions use 'mz'.
MZ=mausezahn
+# mausezahn delay between transmissions in microseconds.
+MZ_DELAY=0
# Time to wait after interfaces participating in the test are all UP
WAIT_TIME=5
# Whether to pause on failure or not.
diff --git a/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh b/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh
index 0446db9c6f74..9788bd0f6e8b 100755
--- a/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh
+++ b/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh
@@ -278,42 +278,42 @@ send_src_ipv4()
{
ip vrf exec v$h1 $MZ $h1 -q -p 64 \
-A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
- -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+ -d $MZ_DELAY -c 50 -t udp "sp=20000,dp=30000"
}
send_dst_ipv4()
{
ip vrf exec v$h1 $MZ $h1 -q -p 64 \
-A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
- -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+ -d $MZ_DELAY -c 50 -t udp "sp=20000,dp=30000"
}
send_src_udp4()
{
ip vrf exec v$h1 $MZ $h1 -q -p 64 \
-A 198.51.100.2 -B 203.0.113.2 \
- -d 1msec -t udp "sp=0-32768,dp=30000"
+ -d $MZ_DELAY -t udp "sp=0-32768,dp=30000"
}
send_dst_udp4()
{
ip vrf exec v$h1 $MZ $h1 -q -p 64 \
-A 198.51.100.2 -B 203.0.113.2 \
- -d 1msec -t udp "sp=20000,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=20000,dp=0-32768"
}
send_src_ipv6()
{
ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
-A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:2::2 \
- -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+ -d $MZ_DELAY -c 50 -t udp "sp=20000,dp=30000"
}
send_dst_ipv6()
{
ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
-A 2001:db8:1::2 -B "2001:db8:2::2-2001:db8:2::fd" \
- -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+ -d $MZ_DELAY -c 50 -t udp "sp=20000,dp=30000"
}
send_flowlabel()
@@ -329,14 +329,14 @@ send_src_udp6()
{
ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
-A 2001:db8:1::2 -B 2001:db8:2::2 \
- -d 1msec -t udp "sp=0-32768,dp=30000"
+ -d $MZ_DELAY -t udp "sp=0-32768,dp=30000"
}
send_dst_udp6()
{
ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
-A 2001:db8:1::2 -B 2001:db8:2::2 \
- -d 1msec -t udp "sp=20000,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=20000,dp=0-32768"
}
custom_hash_test()
diff --git a/tools/testing/selftests/net/forwarding/gre_inner_v4_multipath.sh b/tools/testing/selftests/net/forwarding/gre_inner_v4_multipath.sh
index e4009f658003..efca6114a3ce 100755
--- a/tools/testing/selftests/net/forwarding/gre_inner_v4_multipath.sh
+++ b/tools/testing/selftests/net/forwarding/gre_inner_v4_multipath.sh
@@ -267,7 +267,7 @@ multipath4_test()
ip vrf exec v$h1 \
$MZ $h1 -q -p 64 -A "192.0.3.2-192.0.3.62" -B "192.0.4.2-192.0.4.62" \
- -d 1msec -c 50 -t udp "sp=1024,dp=1024"
+ -d $MZ_DELAY -c 50 -t udp "sp=1024,dp=1024"
sleep 1
local t1_111=$(tc_rule_stats_get $ul32 111 ingress)
diff --git a/tools/testing/selftests/net/forwarding/gre_inner_v6_multipath.sh b/tools/testing/selftests/net/forwarding/gre_inner_v6_multipath.sh
index e449475c4d3e..a71ad39fc0c3 100755
--- a/tools/testing/selftests/net/forwarding/gre_inner_v6_multipath.sh
+++ b/tools/testing/selftests/net/forwarding/gre_inner_v6_multipath.sh
@@ -266,9 +266,9 @@ multipath6_test()
local t0_222=$(tc_rule_stats_get $ul32 222 ingress)
ip vrf exec v$h1 \
- $MZ $h1 -6 -q -p 64 -A "2001:db8:1::2-2001:db8:1::1e" \
- -B "2001:db8:2::2-2001:db8:2::1e" \
- -d 1msec -c 50 -t udp "sp=1024,dp=1024"
+ $MZ $h1 -6 -q -p 64 -A "2001:db8:1::2-2001:db8:1::3e" \
+ -B "2001:db8:2::2-2001:db8:2::3e" \
+ -d $MZ_DELAY -c 50 -t udp "sp=1024,dp=1024"
sleep 1
local t1_111=$(tc_rule_stats_get $ul32 111 ingress)
diff --git a/tools/testing/selftests/net/forwarding/gre_multipath.sh b/tools/testing/selftests/net/forwarding/gre_multipath.sh
index a8d8e8b3dc81..57531c1d884d 100755
--- a/tools/testing/selftests/net/forwarding/gre_multipath.sh
+++ b/tools/testing/selftests/net/forwarding/gre_multipath.sh
@@ -220,7 +220,7 @@ multipath4_test()
ip vrf exec v$h1 \
$MZ $h1 -q -p 64 -A 192.0.2.1 -B 192.0.2.18 \
- -d 1msec -t udp "sp=1024,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=1024,dp=0-32768"
local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
diff --git a/tools/testing/selftests/net/forwarding/gre_multipath_nh.sh b/tools/testing/selftests/net/forwarding/gre_multipath_nh.sh
index d03aa2cab9fd..7d5b2b9cc133 100755
--- a/tools/testing/selftests/net/forwarding/gre_multipath_nh.sh
+++ b/tools/testing/selftests/net/forwarding/gre_multipath_nh.sh
@@ -64,7 +64,6 @@ ALL_TESTS="
ping_ipv6
multipath_ipv4
multipath_ipv6
- multipath_ipv6_l4
"
NUM_NETIFS=6
@@ -245,7 +244,7 @@ multipath4_test()
ip vrf exec v$h1 \
$MZ $h1 -q -p 64 -A 192.0.2.1 -B 192.0.2.18 \
- -d 1msec -t udp "sp=1024,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=1024,dp=0-32768"
local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
@@ -264,34 +263,6 @@ multipath6_test()
local weight1=$1; shift
local weight2=$1; shift
- sysctl_set net.ipv6.fib_multipath_hash_policy 0
- ip nexthop replace id 103 group 101,$weight1/102,$weight2
-
- local t0_111=$(tc_rule_stats_get $ul2 111 ingress)
- local t0_222=$(tc_rule_stats_get $ul2 222 ingress)
-
- # Generate 16384 echo requests, each with a random flow label.
- for ((i=0; i < 16384; ++i)); do
- ip vrf exec v$h1 $PING6 2001:db8:2::2 -F 0 -c 1 -q &> /dev/null
- done
-
- local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
- local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
-
- local d111=$((t1_111 - t0_111))
- local d222=$((t1_222 - t0_222))
- multipath_eval "$what" $weight1 $weight2 $d111 $d222
-
- ip nexthop replace id 103 group 101/102
- sysctl_restore net.ipv6.fib_multipath_hash_policy
-}
-
-multipath6_l4_test()
-{
- local what=$1; shift
- local weight1=$1; shift
- local weight2=$1; shift
-
sysctl_set net.ipv6.fib_multipath_hash_policy 1
ip nexthop replace id 103 group 101,$weight1/102,$weight2
@@ -300,7 +271,7 @@ multipath6_l4_test()
ip vrf exec v$h1 \
$MZ $h1 -6 -q -p 64 -A 2001:db8:1::1 -B 2001:db8:2::2 \
- -d 1msec -t udp "sp=1024,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=1024,dp=0-32768"
local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
@@ -339,14 +310,6 @@ multipath_ipv6()
multipath6_test "Weighted MP 11:45" 11 45
}
-multipath_ipv6_l4()
-{
- log_info "Running IPv6 L4 hash multipath tests"
- multipath6_l4_test "ECMP" 1 1
- multipath6_l4_test "Weighted MP 2:1" 2 1
- multipath6_l4_test "Weighted MP 11:45" 11 45
-}
-
trap cleanup EXIT
setup_prepare
diff --git a/tools/testing/selftests/net/forwarding/gre_multipath_nh_res.sh b/tools/testing/selftests/net/forwarding/gre_multipath_nh_res.sh
index 088b65e64d66..370f9925302d 100755
--- a/tools/testing/selftests/net/forwarding/gre_multipath_nh_res.sh
+++ b/tools/testing/selftests/net/forwarding/gre_multipath_nh_res.sh
@@ -64,7 +64,6 @@ ALL_TESTS="
ping_ipv6
multipath_ipv4
multipath_ipv6
- multipath_ipv6_l4
"
NUM_NETIFS=6
@@ -248,7 +247,7 @@ multipath4_test()
ip vrf exec v$h1 \
$MZ $h1 -q -p 64 -A 192.0.2.1 -B 192.0.2.18 \
- -d 1msec -t udp "sp=1024,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=1024,dp=0-32768"
local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
@@ -267,35 +266,6 @@ multipath6_test()
local weight1=$1; shift
local weight2=$1; shift
- sysctl_set net.ipv6.fib_multipath_hash_policy 0
- ip nexthop replace id 103 group 101,$weight1/102,$weight2 \
- type resilient
-
- local t0_111=$(tc_rule_stats_get $ul2 111 ingress)
- local t0_222=$(tc_rule_stats_get $ul2 222 ingress)
-
- # Generate 16384 echo requests, each with a random flow label.
- for ((i=0; i < 16384; ++i)); do
- ip vrf exec v$h1 $PING6 2001:db8:2::2 -F 0 -c 1 -q &> /dev/null
- done
-
- local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
- local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
-
- local d111=$((t1_111 - t0_111))
- local d222=$((t1_222 - t0_222))
- multipath_eval "$what" $weight1 $weight2 $d111 $d222
-
- ip nexthop replace id 103 group 101/102 type resilient
- sysctl_restore net.ipv6.fib_multipath_hash_policy
-}
-
-multipath6_l4_test()
-{
- local what=$1; shift
- local weight1=$1; shift
- local weight2=$1; shift
-
sysctl_set net.ipv6.fib_multipath_hash_policy 1
ip nexthop replace id 103 group 101,$weight1/102,$weight2 \
type resilient
@@ -305,7 +275,7 @@ multipath6_l4_test()
ip vrf exec v$h1 \
$MZ $h1 -6 -q -p 64 -A 2001:db8:1::1 -B 2001:db8:2::2 \
- -d 1msec -t udp "sp=1024,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=1024,dp=0-32768"
local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
@@ -344,14 +314,6 @@ multipath_ipv6()
multipath6_test "Weighted MP 11:45" 11 45
}
-multipath_ipv6_l4()
-{
- log_info "Running IPv6 L4 hash multipath tests"
- multipath6_l4_test "ECMP" 1 1
- multipath6_l4_test "Weighted MP 2:1" 2 1
- multipath6_l4_test "Weighted MP 11:45" 11 45
-}
-
trap cleanup EXIT
setup_prepare
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh b/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh
index d40183b4eccc..2ab9eaaa5532 100755
--- a/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh
+++ b/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh
@@ -280,42 +280,42 @@ send_src_ipv4()
{
ip vrf exec v$h1 $MZ $h1 -q -p 64 \
-A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
- -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+ -d $MZ_DELAY -c 50 -t udp "sp=20000,dp=30000"
}
send_dst_ipv4()
{
ip vrf exec v$h1 $MZ $h1 -q -p 64 \
-A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
- -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+ -d $MZ_DELAY -c 50 -t udp "sp=20000,dp=30000"
}
send_src_udp4()
{
ip vrf exec v$h1 $MZ $h1 -q -p 64 \
-A 198.51.100.2 -B 203.0.113.2 \
- -d 1msec -t udp "sp=0-32768,dp=30000"
+ -d $MZ_DELAY -t udp "sp=0-32768,dp=30000"
}
send_dst_udp4()
{
ip vrf exec v$h1 $MZ $h1 -q -p 64 \
-A 198.51.100.2 -B 203.0.113.2 \
- -d 1msec -t udp "sp=20000,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=20000,dp=0-32768"
}
send_src_ipv6()
{
ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
-A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:2::2 \
- -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+ -d $MZ_DELAY -c 50 -t udp "sp=20000,dp=30000"
}
send_dst_ipv6()
{
ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
-A 2001:db8:1::2 -B "2001:db8:2::2-2001:db8:2::fd" \
- -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+ -d $MZ_DELAY -c 50 -t udp "sp=20000,dp=30000"
}
send_flowlabel()
@@ -331,14 +331,14 @@ send_src_udp6()
{
ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
-A 2001:db8:1::2 -B 2001:db8:2::2 \
- -d 1msec -t udp "sp=0-32768,dp=30000"
+ -d $MZ_DELAY -t udp "sp=0-32768,dp=30000"
}
send_dst_udp6()
{
ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
-A 2001:db8:1::2 -B 2001:db8:2::2 \
- -d 1msec -t udp "sp=20000,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=20000,dp=0-32768"
}
custom_hash_test()
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_inner_v4_multipath.sh b/tools/testing/selftests/net/forwarding/ip6gre_inner_v4_multipath.sh
index a257979d3fc5..32d1461f37b7 100755
--- a/tools/testing/selftests/net/forwarding/ip6gre_inner_v4_multipath.sh
+++ b/tools/testing/selftests/net/forwarding/ip6gre_inner_v4_multipath.sh
@@ -266,7 +266,7 @@ multipath4_test()
ip vrf exec v$h1 \
$MZ $h1 -q -p 64 -A "192.0.3.2-192.0.3.62" -B "192.0.4.2-192.0.4.62" \
- -d 1msec -c 50 -t udp "sp=1024,dp=1024"
+ -d $MZ_DELAY -c 50 -t udp "sp=1024,dp=1024"
sleep 1
local t1_111=$(tc_rule_stats_get $ul32 111 ingress)
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_inner_v6_multipath.sh b/tools/testing/selftests/net/forwarding/ip6gre_inner_v6_multipath.sh
index d208f5243ade..e1a4b50505f5 100755
--- a/tools/testing/selftests/net/forwarding/ip6gre_inner_v6_multipath.sh
+++ b/tools/testing/selftests/net/forwarding/ip6gre_inner_v6_multipath.sh
@@ -265,9 +265,9 @@ multipath6_test()
local t0_222=$(tc_rule_stats_get $ul32 222 ingress)
ip vrf exec v$h1 \
- $MZ $h1 -6 -q -p 64 -A "2001:db8:1::2-2001:db8:1::1e" \
- -B "2001:db8:2::2-2001:db8:2::1e" \
- -d 1msec -c 50 -t udp "sp=1024,dp=1024"
+ $MZ $h1 -6 -q -p 64 -A "2001:db8:1::2-2001:db8:1::3e" \
+ -B "2001:db8:2::2-2001:db8:2::3e" \
+ -d $MZ_DELAY -c 50 -t udp "sp=1024,dp=1024"
sleep 1
local t1_111=$(tc_rule_stats_get $ul32 111 ingress)
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_lib.sh b/tools/testing/selftests/net/forwarding/ip6gre_lib.sh
index 58a3597037b1..24f4ab328bd2 100644
--- a/tools/testing/selftests/net/forwarding/ip6gre_lib.sh
+++ b/tools/testing/selftests/net/forwarding/ip6gre_lib.sh
@@ -356,7 +356,7 @@ test_traffic_ip4ip6()
flower $TC_FLAG dst_ip 203.0.113.1 action pass
$MZ $h1 -c 1000 -p 64 -a $h1mac -b $ol1mac -A 198.51.100.1 \
- -B 203.0.113.1 -t ip -q -d 1msec
+ -B 203.0.113.1 -t ip -q -d $MZ_DELAY
# Check ports after encap and after decap.
tc_check_at_least_x_packets "dev $ul1 egress" 101 1000
@@ -389,7 +389,7 @@ test_traffic_ip6ip6()
flower $TC_FLAG dst_ip 2001:db8:2::1 action pass
$MZ -6 $h1 -c 1000 -p 64 -a $h1mac -b $ol1mac -A 2001:db8:1::1 \
- -B 2001:db8:2::1 -t ip -q -d 1msec
+ -B 2001:db8:2::1 -t ip -q -d $MZ_DELAY
# Check ports after encap and after decap.
tc_check_at_least_x_packets "dev $ul1 egress" 101 1000
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 8a61464ab6eb..e579c2e0c462 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -8,6 +8,7 @@
PING=${PING:=ping}
PING6=${PING6:=ping6}
MZ=${MZ:=mausezahn}
+MZ_DELAY=${MZ_DELAY:=0}
ARPING=${ARPING:=arping}
TEAMD=${TEAMD:=teamd}
WAIT_TIME=${WAIT_TIME:=5}
@@ -29,23 +30,20 @@ STABLE_MAC_ADDRS=${STABLE_MAC_ADDRS:=no}
TCPDUMP_EXTRA_FLAGS=${TCPDUMP_EXTRA_FLAGS:=}
TROUTE6=${TROUTE6:=traceroute6}
-relative_path="${BASH_SOURCE%/*}"
-if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then
- relative_path="."
-fi
+net_forwarding_dir=$(dirname "$(readlink -e "${BASH_SOURCE[0]}")")
-if [[ -f $relative_path/forwarding.config ]]; then
- source "$relative_path/forwarding.config"
+if [[ -f $net_forwarding_dir/forwarding.config ]]; then
+ source "$net_forwarding_dir/forwarding.config"
fi
-# Kselftest framework requirement - SKIP code is 4.
-ksft_skip=4
+source "$net_forwarding_dir/../lib.sh"
-busywait()
+# timeout in seconds
+slowwait()
{
local timeout=$1; shift
- local start_time="$(date -u +%s%3N)"
+ local start_time="$(date -u +%s)"
while true
do
local out
@@ -56,11 +54,13 @@ busywait()
return 0
fi
- local current_time="$(date -u +%s%3N)"
+ local current_time="$(date -u +%s)"
if ((current_time - start_time > timeout)); then
echo -n "$out"
return 1
fi
+
+ sleep 0.1
done
}
@@ -505,6 +505,15 @@ busywait_for_counter()
busywait "$timeout" until_counter_is ">= $((base + delta))" "$@"
}
+slowwait_for_counter()
+{
+ local timeout=$1; shift
+ local delta=$1; shift
+
+ local base=$("$@")
+ slowwait "$timeout" until_counter_is ">= $((base + delta))" "$@"
+}
+
setup_wait_dev()
{
local dev=$1; shift
@@ -891,6 +900,33 @@ hw_stats_get()
jq ".[0].stats64.$dir.$stat"
}
+__nh_stats_get()
+{
+ local key=$1; shift
+ local group_id=$1; shift
+ local member_id=$1; shift
+
+ ip -j -s -s nexthop show id $group_id |
+ jq --argjson member_id "$member_id" --arg key "$key" \
+ '.[].group_stats[] | select(.id == $member_id) | .[$key]'
+}
+
+nh_stats_get()
+{
+ local group_id=$1; shift
+ local member_id=$1; shift
+
+ __nh_stats_get packets "$group_id" "$member_id"
+}
+
+nh_stats_get_hw()
+{
+ local group_id=$1; shift
+ local member_id=$1; shift
+
+ __nh_stats_get packets_hw "$group_id" "$member_id"
+}
+
humanize()
{
local speed=$1; shift
@@ -2001,3 +2037,10 @@ bail_on_lldpad()
fi
fi
}
+
+absval()
+{
+ local v=$1; shift
+
+ echo $((v > 0 ? v : -v))
+}
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh b/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh
index fac486178ef7..0c36546e131e 100644
--- a/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-source "$relative_path/mirror_lib.sh"
+source "$net_forwarding_dir/mirror_lib.sh"
quick_test_span_gre_dir_ips()
{
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh b/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh
index 39c03e2867f4..6e615fffa4ef 100644
--- a/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh
@@ -33,7 +33,7 @@
# | |
# +-------------------------------------------------------------------------+
-source "$relative_path/mirror_topo_lib.sh"
+source "$net_forwarding_dir/mirror_topo_lib.sh"
mirror_gre_topo_h3_create()
{
diff --git a/tools/testing/selftests/net/forwarding/router_mpath_nh.sh b/tools/testing/selftests/net/forwarding/router_mpath_nh.sh
index a0d612e04990..3f0f5dc95542 100755
--- a/tools/testing/selftests/net/forwarding/router_mpath_nh.sh
+++ b/tools/testing/selftests/net/forwarding/router_mpath_nh.sh
@@ -7,9 +7,12 @@ ALL_TESTS="
multipath_test
ping_ipv4_blackhole
ping_ipv6_blackhole
+ nh_stats_test_v4
+ nh_stats_test_v6
"
NUM_NETIFS=8
source lib.sh
+source router_mpath_nh_lib.sh
h1_create()
{
@@ -204,7 +207,7 @@ multipath4_test()
t0_rp13=$(link_stats_tx_packets_get $rp13)
ip vrf exec vrf-h1 $MZ $h1 -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \
- -d 1msec -t udp "sp=1024,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=1024,dp=0-32768"
t1_rp12=$(link_stats_tx_packets_get $rp12)
t1_rp13=$(link_stats_tx_packets_get $rp13)
@@ -218,7 +221,7 @@ multipath4_test()
sysctl_restore net.ipv4.fib_multipath_hash_policy
}
-multipath6_l4_test()
+multipath6_test()
{
local desc="$1"
local weight_rp12=$2
@@ -237,7 +240,7 @@ multipath6_l4_test()
t0_rp13=$(link_stats_tx_packets_get $rp13)
$MZ $h1 -6 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
- -d 1msec -t udp "sp=1024,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=1024,dp=0-32768"
t1_rp12=$(link_stats_tx_packets_get $rp12)
t1_rp13=$(link_stats_tx_packets_get $rp13)
@@ -251,34 +254,6 @@ multipath6_l4_test()
sysctl_restore net.ipv6.fib_multipath_hash_policy
}
-multipath6_test()
-{
- local desc="$1"
- local weight_rp12=$2
- local weight_rp13=$3
- local t0_rp12 t0_rp13 t1_rp12 t1_rp13
- local packets_rp12 packets_rp13
-
- ip nexthop replace id 106 group 104,$weight_rp12/105,$weight_rp13
-
- t0_rp12=$(link_stats_tx_packets_get $rp12)
- t0_rp13=$(link_stats_tx_packets_get $rp13)
-
- # Generate 16384 echo requests, each with a random flow label.
- for _ in $(seq 1 16384); do
- ip vrf exec vrf-h1 $PING6 2001:db8:2::2 -F 0 -c 1 -q >/dev/null 2>&1
- done
-
- t1_rp12=$(link_stats_tx_packets_get $rp12)
- t1_rp13=$(link_stats_tx_packets_get $rp13)
-
- let "packets_rp12 = $t1_rp12 - $t0_rp12"
- let "packets_rp13 = $t1_rp13 - $t0_rp13"
- multipath_eval "$desc" $weight_rp12 $weight_rp13 $packets_rp12 $packets_rp13
-
- ip nexthop replace id 106 group 104/105
-}
-
multipath_test()
{
log_info "Running IPv4 multipath tests"
@@ -301,11 +276,6 @@ multipath_test()
multipath6_test "ECMP" 1 1
multipath6_test "Weighted MP 2:1" 2 1
multipath6_test "Weighted MP 11:45" 11 45
-
- log_info "Running IPv6 L4 hash multipath tests"
- multipath6_l4_test "ECMP" 1 1
- multipath6_l4_test "Weighted MP 2:1" 2 1
- multipath6_l4_test "Weighted MP 11:45" 11 45
}
ping_ipv4_blackhole()
@@ -358,6 +328,16 @@ ping_ipv6_blackhole()
ip -6 nexthop del id 1001
}
+nh_stats_test_v4()
+{
+ __nh_stats_test_v4 mpath
+}
+
+nh_stats_test_v6()
+{
+ __nh_stats_test_v6 mpath
+}
+
setup_prepare()
{
h1=${NETIFS[p1]}
diff --git a/tools/testing/selftests/net/forwarding/router_mpath_nh_lib.sh b/tools/testing/selftests/net/forwarding/router_mpath_nh_lib.sh
new file mode 100644
index 000000000000..7e7d62161c34
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/router_mpath_nh_lib.sh
@@ -0,0 +1,129 @@
+# SPDX-License-Identifier: GPL-2.0
+
+nh_stats_do_test()
+{
+ local what=$1; shift
+ local nh1_id=$1; shift
+ local nh2_id=$1; shift
+ local group_id=$1; shift
+ local stats_get=$1; shift
+ local mz="$@"
+
+ local dp
+
+ RET=0
+
+ sleep 2
+ for ((dp=0; dp < 60000; dp += 10000)); do
+ local dd
+ local t0_rp12=$(link_stats_tx_packets_get $rp12)
+ local t0_rp13=$(link_stats_tx_packets_get $rp13)
+ local t0_nh1=$($stats_get $group_id $nh1_id)
+ local t0_nh2=$($stats_get $group_id $nh2_id)
+
+ ip vrf exec vrf-h1 \
+ $mz -q -p 64 -d 0 -t udp \
+ "sp=1024,dp=$((dp))-$((dp + 10000))"
+ sleep 2
+
+ local t1_rp12=$(link_stats_tx_packets_get $rp12)
+ local t1_rp13=$(link_stats_tx_packets_get $rp13)
+ local t1_nh1=$($stats_get $group_id $nh1_id)
+ local t1_nh2=$($stats_get $group_id $nh2_id)
+
+ local d_rp12=$((t1_rp12 - t0_rp12))
+ local d_rp13=$((t1_rp13 - t0_rp13))
+ local d_nh1=$((t1_nh1 - t0_nh1))
+ local d_nh2=$((t1_nh2 - t0_nh2))
+
+ dd=$(absval $((d_rp12 - d_nh1)))
+ ((dd < 10))
+ check_err $? "Discrepancy between link and $stats_get: d_rp12=$d_rp12 d_nh1=$d_nh1"
+
+ dd=$(absval $((d_rp13 - d_nh2)))
+ ((dd < 10))
+ check_err $? "Discrepancy between link and $stats_get: d_rp13=$d_rp13 d_nh2=$d_nh2"
+ done
+
+ log_test "NH stats test $what"
+}
+
+nh_stats_test_dispatch_swhw()
+{
+ local what=$1; shift
+ local nh1_id=$1; shift
+ local nh2_id=$1; shift
+ local group_id=$1; shift
+ local mz="$@"
+
+ local used
+
+ nh_stats_do_test "$what" "$nh1_id" "$nh2_id" "$group_id" \
+ nh_stats_get "${mz[@]}"
+
+ used=$(ip -s -j -d nexthop show id $group_id |
+ jq '.[].hw_stats.used')
+ kind=$(ip -j -d link show dev $rp11 |
+ jq -r '.[].linkinfo.info_kind')
+ if [[ $used == true ]]; then
+ nh_stats_do_test "HW $what" "$nh1_id" "$nh2_id" "$group_id" \
+ nh_stats_get_hw "${mz[@]}"
+ elif [[ $kind == veth ]]; then
+ log_test_skip "HW stats not offloaded on veth topology"
+ fi
+}
+
+nh_stats_test_dispatch()
+{
+ local nhgtype=$1; shift
+ local what=$1; shift
+ local nh1_id=$1; shift
+ local nh2_id=$1; shift
+ local group_id=$1; shift
+ local mz="$@"
+
+ local enabled
+ local kind
+
+ if ! ip nexthop help 2>&1 | grep -q hw_stats; then
+ log_test_skip "NH stats test: ip doesn't support HW stats"
+ return
+ fi
+
+ ip nexthop replace id $group_id group $nh1_id/$nh2_id \
+ hw_stats on type $nhgtype
+ enabled=$(ip -s -j -d nexthop show id $group_id |
+ jq '.[].hw_stats.enabled')
+ if [[ $enabled == true ]]; then
+ nh_stats_test_dispatch_swhw "$what" "$nh1_id" "$nh2_id" \
+ "$group_id" "${mz[@]}"
+ elif [[ $enabled == false ]]; then
+ check_err 1 "HW stats still disabled after enabling"
+ log_test "NH stats test"
+ else
+ log_test_skip "NH stats test: ip doesn't report hw_stats info"
+ fi
+
+ ip nexthop replace id $group_id group $nh1_id/$nh2_id \
+ hw_stats off type $nhgtype
+}
+
+__nh_stats_test_v4()
+{
+ local nhgtype=$1; shift
+
+ sysctl_set net.ipv4.fib_multipath_hash_policy 1
+ nh_stats_test_dispatch $nhgtype "IPv4" 101 102 103 \
+ $MZ $h1 -A 192.0.2.2 -B 198.51.100.2
+ sysctl_restore net.ipv4.fib_multipath_hash_policy
+}
+
+__nh_stats_test_v6()
+{
+ local nhgtype=$1; shift
+
+ sysctl_set net.ipv6.fib_multipath_hash_policy 1
+ nh_stats_test_dispatch $nhgtype "IPv6" 104 105 106 \
+ $MZ -6 $h1 -A 2001:db8:1::2 -B 2001:db8:2::2
+ sysctl_restore net.ipv6.fib_multipath_hash_policy
+}
diff --git a/tools/testing/selftests/net/forwarding/router_mpath_nh_res.sh b/tools/testing/selftests/net/forwarding/router_mpath_nh_res.sh
index cb08ffe2356a..4b483d24ad00 100755
--- a/tools/testing/selftests/net/forwarding/router_mpath_nh_res.sh
+++ b/tools/testing/selftests/net/forwarding/router_mpath_nh_res.sh
@@ -5,9 +5,12 @@ ALL_TESTS="
ping_ipv4
ping_ipv6
multipath_test
+ nh_stats_test_v4
+ nh_stats_test_v6
"
NUM_NETIFS=8
source lib.sh
+source router_mpath_nh_lib.sh
h1_create()
{
@@ -205,7 +208,7 @@ multipath4_test()
t0_rp13=$(link_stats_tx_packets_get $rp13)
ip vrf exec vrf-h1 $MZ $h1 -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \
- -d 1msec -t udp "sp=1024,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=1024,dp=0-32768"
t1_rp12=$(link_stats_tx_packets_get $rp12)
t1_rp13=$(link_stats_tx_packets_get $rp13)
@@ -235,7 +238,7 @@ multipath6_l4_test()
t0_rp13=$(link_stats_tx_packets_get $rp13)
$MZ $h1 -6 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
- -d 1msec -t udp "sp=1024,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=1024,dp=0-32768"
t1_rp12=$(link_stats_tx_packets_get $rp12)
t1_rp13=$(link_stats_tx_packets_get $rp13)
@@ -333,6 +336,16 @@ multipath_test()
ip nexthop replace id 106 group 104,1/105,1 type resilient
}
+nh_stats_test_v4()
+{
+ __nh_stats_test_v4 resilient
+}
+
+nh_stats_test_v6()
+{
+ __nh_stats_test_v6 resilient
+}
+
setup_prepare()
{
h1=${NETIFS[p1]}
diff --git a/tools/testing/selftests/net/forwarding/router_multipath.sh b/tools/testing/selftests/net/forwarding/router_multipath.sh
index 464821c587a5..e2be354167a1 100755
--- a/tools/testing/selftests/net/forwarding/router_multipath.sh
+++ b/tools/testing/selftests/net/forwarding/router_multipath.sh
@@ -179,7 +179,7 @@ multipath4_test()
t0_rp13=$(link_stats_tx_packets_get $rp13)
ip vrf exec vrf-h1 $MZ $h1 -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \
- -d 1msec -t udp "sp=1024,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=1024,dp=0-32768"
t1_rp12=$(link_stats_tx_packets_get $rp12)
t1_rp13=$(link_stats_tx_packets_get $rp13)
@@ -195,7 +195,7 @@ multipath4_test()
sysctl_restore net.ipv4.fib_multipath_hash_policy
}
-multipath6_l4_test()
+multipath6_test()
{
local desc="$1"
local weight_rp12=$2
@@ -216,7 +216,7 @@ multipath6_l4_test()
t0_rp13=$(link_stats_tx_packets_get $rp13)
$MZ $h1 -6 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
- -d 1msec -t udp "sp=1024,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=1024,dp=0-32768"
t1_rp12=$(link_stats_tx_packets_get $rp12)
t1_rp13=$(link_stats_tx_packets_get $rp13)
@@ -232,38 +232,6 @@ multipath6_l4_test()
sysctl_restore net.ipv6.fib_multipath_hash_policy
}
-multipath6_test()
-{
- local desc="$1"
- local weight_rp12=$2
- local weight_rp13=$3
- local t0_rp12 t0_rp13 t1_rp12 t1_rp13
- local packets_rp12 packets_rp13
-
- ip route replace 2001:db8:2::/64 vrf vrf-r1 \
- nexthop via fe80:2::22 dev $rp12 weight $weight_rp12 \
- nexthop via fe80:3::23 dev $rp13 weight $weight_rp13
-
- t0_rp12=$(link_stats_tx_packets_get $rp12)
- t0_rp13=$(link_stats_tx_packets_get $rp13)
-
- # Generate 16384 echo requests, each with a random flow label.
- for _ in $(seq 1 16384); do
- ip vrf exec vrf-h1 $PING6 2001:db8:2::2 -F 0 -c 1 -q &> /dev/null
- done
-
- t1_rp12=$(link_stats_tx_packets_get $rp12)
- t1_rp13=$(link_stats_tx_packets_get $rp13)
-
- let "packets_rp12 = $t1_rp12 - $t0_rp12"
- let "packets_rp13 = $t1_rp13 - $t0_rp13"
- multipath_eval "$desc" $weight_rp12 $weight_rp13 $packets_rp12 $packets_rp13
-
- ip route replace 2001:db8:2::/64 vrf vrf-r1 \
- nexthop via fe80:2::22 dev $rp12 \
- nexthop via fe80:3::23 dev $rp13
-}
-
multipath_test()
{
log_info "Running IPv4 multipath tests"
@@ -275,11 +243,6 @@ multipath_test()
multipath6_test "ECMP" 1 1
multipath6_test "Weighted MP 2:1" 2 1
multipath6_test "Weighted MP 11:45" 11 45
-
- log_info "Running IPv6 L4 hash multipath tests"
- multipath6_l4_test "ECMP" 1 1
- multipath6_l4_test "Weighted MP 2:1" 2 1
- multipath6_l4_test "Weighted MP 11:45" 11 45
}
setup_prepare()
diff --git a/tools/testing/selftests/net/forwarding/tc_police.sh b/tools/testing/selftests/net/forwarding/tc_police.sh
index 0a51eef21b9e..5103f64a71d6 100755
--- a/tools/testing/selftests/net/forwarding/tc_police.sh
+++ b/tools/testing/selftests/net/forwarding/tc_police.sh
@@ -140,7 +140,7 @@ police_common_test()
sleep 10
local t1=$(tc_rule_stats_get $h2 1 ingress .bytes)
- local er=$((80 * 1000 * 1000))
+ local er=$((10 * 1000 * 1000))
local nr=$(rate $t0 $t1 10)
local nr_pct=$((100 * (nr - er) / er))
((-10 <= nr_pct && nr_pct <= 10))
@@ -157,7 +157,7 @@ police_rx_test()
# Rule to police traffic destined to $h2 on ingress of $rp1
tc filter add dev $rp1 ingress protocol ip pref 1 handle 101 flower \
dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \
- action police rate 80mbit burst 16k conform-exceed drop/ok
+ action police rate 10mbit burst 16k conform-exceed drop/ok
police_common_test "police on rx"
@@ -169,7 +169,7 @@ police_tx_test()
# Rule to police traffic destined to $h2 on egress of $rp2
tc filter add dev $rp2 egress protocol ip pref 1 handle 101 flower \
dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \
- action police rate 80mbit burst 16k conform-exceed drop/ok
+ action police rate 10mbit burst 16k conform-exceed drop/ok
police_common_test "police on tx"
@@ -190,7 +190,7 @@ police_shared_common_test()
sleep 10
local t1=$(tc_rule_stats_get $h2 1 ingress .bytes)
- local er=$((80 * 1000 * 1000))
+ local er=$((10 * 1000 * 1000))
local nr=$(rate $t0 $t1 10)
local nr_pct=$((100 * (nr - er) / er))
((-10 <= nr_pct && nr_pct <= 10))
@@ -211,7 +211,7 @@ police_shared_test()
# Rule to police traffic destined to $h2 on ingress of $rp1
tc filter add dev $rp1 ingress protocol ip pref 1 handle 101 flower \
dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \
- action police rate 80mbit burst 16k conform-exceed drop/ok \
+ action police rate 10mbit burst 16k conform-exceed drop/ok \
index 10
# Rule to police a different flow destined to $h2 on egress of $rp2
@@ -250,7 +250,7 @@ police_mirror_common_test()
# Rule to police traffic destined to $h2 and mirror to $h3
tc filter add dev $pol_if $dir protocol ip pref 1 handle 101 flower \
dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \
- action police rate 80mbit burst 16k conform-exceed drop/pipe \
+ action police rate 10mbit burst 16k conform-exceed drop/pipe \
action mirred egress mirror dev $rp3
mausezahn $h1 -a own -b $(mac_get $rp1) -A 192.0.2.1 -B 198.51.100.1 \
@@ -260,7 +260,7 @@ police_mirror_common_test()
sleep 10
local t1=$(tc_rule_stats_get $h2 1 ingress .bytes)
- local er=$((80 * 1000 * 1000))
+ local er=$((10 * 1000 * 1000))
local nr=$(rate $t0 $t1 10)
local nr_pct=$((100 * (nr - er) / er))
((-10 <= nr_pct && nr_pct <= 10))
@@ -270,7 +270,7 @@ police_mirror_common_test()
sleep 10
local t1=$(tc_rule_stats_get $h3 1 ingress .bytes)
- local er=$((80 * 1000 * 1000))
+ local er=$((10 * 1000 * 1000))
local nr=$(rate $t0 $t1 10)
local nr_pct=$((100 * (nr - er) / er))
((-10 <= nr_pct && nr_pct <= 10))
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
index eb307ca37bfa..6f0a2e452ba1 100755
--- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
+++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
@@ -495,7 +495,7 @@ vxlan_ping_test()
local delta=$((t1 - t0))
# Tolerate a couple stray extra packets.
- ((expect <= delta && delta <= expect + 2))
+ ((expect <= delta && delta <= expect + 5))
check_err $? "$capture_dev: Expected to capture $expect packets, got $delta."
}
@@ -532,7 +532,7 @@ __test_ecn_encap()
RET=0
tc filter add dev v1 egress pref 77 prot ip \
- flower ip_tos $tos action pass
+ flower ip_tos $tos ip_proto udp dst_port $VXPORT action pass
sleep 1
vxlan_ping_test $h1 192.0.2.3 "-Q $q" v1 egress 77 10
tc filter del dev v1 egress pref 77 prot ip
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh
index ac97f07e5ce8..a0bb4524e1e9 100755
--- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh
+++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh
@@ -616,7 +616,7 @@ vxlan_ping_test()
local delta=$((t1 - t0))
# Tolerate a couple stray extra packets.
- ((expect <= delta && delta <= expect + 2))
+ ((expect <= delta && delta <= expect + 5))
check_err $? "$capture_dev: Expected to capture $expect packets, got $delta."
}
@@ -653,7 +653,7 @@ __test_ecn_encap()
RET=0
tc filter add dev v1 egress pref 77 protocol ipv6 \
- flower ip_tos $tos action pass
+ flower ip_tos $tos ip_proto udp dst_port $VXPORT action pass
sleep 1
vxlan_ping_test $h1 2001:db8:1::3 "-Q $q" v1 egress 77 10
tc filter del dev v1 egress pref 77 protocol ipv6
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1q.sh
index a596bbf3ed6a..fb9a34cb50c6 100755
--- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1q.sh
+++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1q.sh
@@ -750,7 +750,7 @@ __test_learning()
expects[0]=0; expects[$idx1]=10; expects[$idx2]=0
vxlan_flood_test $mac $dst $vid "${expects[@]}"
- sleep 20
+ sleep 60
bridge fdb show brport $vx | grep $mac | grep -q self
check_fail $?
@@ -796,11 +796,11 @@ test_learning()
local dst=192.0.2.100
local vid=10
- # Enable learning on the VxLAN devices and set ageing time to 10 seconds
- ip link set dev br1 type bridge ageing_time 1000
- ip link set dev vx10 type vxlan ageing 10
+ # Enable learning on the VxLAN devices and set ageing time to 30 seconds
+ ip link set dev br1 type bridge ageing_time 3000
+ ip link set dev vx10 type vxlan ageing 30
ip link set dev vx10 type vxlan learning
- ip link set dev vx20 type vxlan ageing 10
+ ip link set dev vx20 type vxlan ageing 30
ip link set dev vx20 type vxlan learning
reapply_config
diff --git a/tools/testing/selftests/net/fq_band_pktlimit.sh b/tools/testing/selftests/net/fq_band_pktlimit.sh
index 24b77bdf41ff..977070ed42b3 100755
--- a/tools/testing/selftests/net/fq_band_pktlimit.sh
+++ b/tools/testing/selftests/net/fq_band_pktlimit.sh
@@ -8,7 +8,7 @@
# 3. send 20 pkts on band A: verify that 0 are queued, 20 dropped
# 4. send 20 pkts on band B: verify that 10 are queued, 10 dropped
#
-# Send packets with a 100ms delay to ensure that previously sent
+# Send packets with a delay to ensure that previously sent
# packets are still queued when later ones are sent.
# Use SO_TXTIME for this.
@@ -29,19 +29,21 @@ ip -6 addr add fdaa::1/128 dev dummy0
ip -6 route add fdaa::/64 dev dummy0
tc qdisc replace dev dummy0 root handle 1: fq quantum 1514 initial_quantum 1514 limit 10
-./cmsg_sender -6 -p u -d 100000 -n 20 fdaa::2 8000
+DELAY=400000
+
+./cmsg_sender -6 -p u -d "${DELAY}" -n 20 fdaa::2 8000
OUT1="$(tc -s qdisc show dev dummy0 | grep '^\ Sent')"
-./cmsg_sender -6 -p u -d 100000 -n 20 fdaa::2 8000
+./cmsg_sender -6 -p u -d "${DELAY}" -n 20 fdaa::2 8000
OUT2="$(tc -s qdisc show dev dummy0 | grep '^\ Sent')"
-./cmsg_sender -6 -p u -d 100000 -n 20 -P 7 fdaa::2 8000
+./cmsg_sender -6 -p u -d "${DELAY}" -n 20 -P 7 fdaa::2 8000
OUT3="$(tc -s qdisc show dev dummy0 | grep '^\ Sent')"
# Initial stats will report zero sent, as all packets are still
-# queued in FQ. Sleep for the delay period (100ms) and see that
+# queued in FQ. Sleep for at least the delay period and see that
# twenty are now sent.
-sleep 0.1
+sleep 0.6
OUT4="$(tc -s qdisc show dev dummy0 | grep '^\ Sent')"
# Log the output after the test
diff --git a/tools/testing/selftests/net/ip_local_port_range.c b/tools/testing/selftests/net/ip_local_port_range.c
index 6ebd58869a63..193b82745fd8 100644
--- a/tools/testing/selftests/net/ip_local_port_range.c
+++ b/tools/testing/selftests/net/ip_local_port_range.c
@@ -365,9 +365,6 @@ TEST_F(ip_local_port_range, late_bind)
__u32 range;
__u16 port;
- if (variant->so_protocol == IPPROTO_SCTP)
- SKIP(return, "SCTP doesn't support IP_BIND_ADDRESS_NO_PORT");
-
fd = socket(variant->so_domain, variant->so_type, 0);
ASSERT_GE(fd, 0) TH_LOG("socket failed");
@@ -414,6 +411,9 @@ TEST_F(ip_local_port_range, late_bind)
ASSERT_TRUE(!err) TH_LOG("close failed");
}
+XFAIL_ADD(ip_local_port_range, ip4_stcp, late_bind);
+XFAIL_ADD(ip_local_port_range, ip6_stcp, late_bind);
+
TEST_F(ip_local_port_range, get_port_range)
{
__u16 lo, hi;
diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
index 75fc95675e2d..bc97ab33a00e 100755
--- a/tools/testing/selftests/net/mptcp/diag.sh
+++ b/tools/testing/selftests/net/mptcp/diag.sh
@@ -1,14 +1,15 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+# Double quotes to prevent globbing and word splitting is recommended in new
+# code but we accept it, especially because there were too many before having
+# address all other issues detected by shellcheck.
+#shellcheck disable=SC2086
+
. "$(dirname "${0}")/mptcp_lib.sh"
-sec=$(date +%s)
-rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
-ns="ns1-$rndh"
-ksft_skip=4
-test_cnt=1
-timeout_poll=100
+ns=""
+timeout_poll=30
timeout_test=$((timeout_poll * 2 + 1))
ret=0
@@ -26,25 +27,17 @@ flush_pids()
done
}
+# This function is used in the cleanup trap
+#shellcheck disable=SC2317
cleanup()
{
ip netns pids "${ns}" | xargs --no-run-if-empty kill -SIGKILL &>/dev/null
- ip netns del $ns
+ mptcp_lib_ns_exit "${ns}"
}
mptcp_lib_check_mptcp
-
-ip -Version > /dev/null 2>&1
-if [ $? -ne 0 ];then
- echo "SKIP: Could not run test without ip tool"
- exit $ksft_skip
-fi
-ss -h | grep -q MPTCP
-if [ $? -ne 0 ];then
- echo "SKIP: ss tool does not support MPTCP"
- exit $ksft_skip
-fi
+mptcp_lib_check_tools ip ss
get_msk_inuse()
{
@@ -61,21 +54,20 @@ __chk_nr()
nr=$(eval $command)
- printf "%-50s" "$msg"
+ mptcp_lib_print_title "$msg"
if [ "$nr" != "$expected" ]; then
if [ "$nr" = "$skip" ] && ! mptcp_lib_expect_all_features; then
- echo "[ skip ] Feature probably not supported"
+ mptcp_lib_pr_skip "Feature probably not supported"
mptcp_lib_result_skip "${msg}"
else
- echo "[ fail ] expected $expected found $nr"
+ mptcp_lib_pr_fail "expected $expected found $nr"
mptcp_lib_result_fail "${msg}"
ret=${KSFT_FAIL}
fi
else
- echo "[ ok ]"
+ mptcp_lib_pr_ok
mptcp_lib_result_pass "${msg}"
fi
- test_cnt=$((test_cnt+1))
}
__chk_msk_nr()
@@ -120,20 +112,19 @@ wait_msk_nr()
sleep 1
done
- printf "%-50s" "$msg"
+ mptcp_lib_print_title "$msg"
if [ $i -ge $timeout ]; then
- echo "[ fail ] timeout while expecting $expected max $max last $nr"
+ mptcp_lib_pr_fail "timeout while expecting $expected max $max last $nr"
mptcp_lib_result_fail "${msg} # timeout"
ret=${KSFT_FAIL}
elif [ $nr != $expected ]; then
- echo "[ fail ] expected $expected found $nr"
+ mptcp_lib_pr_fail "expected $expected found $nr"
mptcp_lib_result_fail "${msg} # unexpected result"
ret=${KSFT_FAIL}
else
- echo "[ ok ]"
+ mptcp_lib_pr_ok
mptcp_lib_result_pass "${msg}"
fi
- test_cnt=$((test_cnt+1))
}
chk_msk_fallback_nr()
@@ -186,7 +177,7 @@ chk_msk_inuse()
expected=$((expected + listen_nr))
for _ in $(seq 10); do
- if [ $(get_msk_inuse) -eq $expected ];then
+ if [ "$(get_msk_inuse)" -eq $expected ]; then
break
fi
sleep 0.1
@@ -224,8 +215,7 @@ wait_connected()
}
trap cleanup EXIT
-ip netns add $ns
-ip -n $ns link set dev lo up
+mptcp_lib_ns_init ns
echo "a" | \
timeout ${timeout_test} \
@@ -273,7 +263,7 @@ chk_msk_inuse 0 "1->0"
chk_msk_cestab 0 "1->0"
NR_CLIENTS=100
-for I in `seq 1 $NR_CLIENTS`; do
+for I in $(seq 1 $NR_CLIENTS); do
echo "a" | \
timeout ${timeout_test} \
ip netns exec $ns \
@@ -282,7 +272,7 @@ for I in `seq 1 $NR_CLIENTS`; do
done
mptcp_lib_wait_local_port_listen $ns $((NR_CLIENTS + 10001))
-for I in `seq 1 $NR_CLIENTS`; do
+for I in $(seq 1 $NR_CLIENTS); do
echo "b" | \
timeout ${timeout_test} \
ip netns exec $ns \
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
index 7898d62fce0b..4c4248554826 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
@@ -1,6 +1,11 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+# Double quotes to prevent globbing and word splitting is recommended in new
+# code but we accept it, especially because there were too many before having
+# address all other issues detected by shellcheck.
+#shellcheck disable=SC2086
+
. "$(dirname "${0}")/mptcp_lib.sh"
time_start=$(date +%s)
@@ -13,7 +18,6 @@ sout=""
cin_disconnect=""
cin=""
cout=""
-ksft_skip=4
capture=false
timeout_poll=30
timeout_test=$((timeout_poll * 2 + 1))
@@ -29,6 +33,7 @@ do_tcp=0
checksum=false
filesize=0
connect_per_transfer=1
+port=$((10000 - 1))
if [ $tc_loss -eq 100 ];then
tc_loss=1%
@@ -60,14 +65,14 @@ while getopts "$optstring" option;do
case "$option" in
"h")
usage $0
- exit 0
+ exit ${KSFT_PASS}
;;
"d")
if [ $OPTARG -ge 0 ];then
tc_delay="$OPTARG"
else
echo "-d requires numeric argument, got \"$OPTARG\"" 1>&2
- exit 1
+ exit ${KSFT_FAIL}
fi
;;
"e")
@@ -91,7 +96,7 @@ while getopts "$optstring" option;do
sndbuf="$OPTARG"
else
echo "-S requires numeric argument, got \"$OPTARG\"" 1>&2
- exit 1
+ exit ${KSFT_FAIL}
fi
;;
"R")
@@ -99,7 +104,7 @@ while getopts "$optstring" option;do
rcvbuf="$OPTARG"
else
echo "-R requires numeric argument, got \"$OPTARG\"" 1>&2
- exit 1
+ exit ${KSFT_FAIL}
fi
;;
"m")
@@ -116,21 +121,20 @@ while getopts "$optstring" option;do
;;
"?")
usage $0
- exit 1
+ exit ${KSFT_FAIL}
;;
esac
done
-sec=$(date +%s)
-rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
-ns1="ns1-$rndh"
-ns2="ns2-$rndh"
-ns3="ns3-$rndh"
-ns4="ns4-$rndh"
+ns1=""
+ns2=""
+ns3=""
+ns4=""
-TEST_COUNT=0
TEST_GROUP=""
+# This function is used in the cleanup trap
+#shellcheck disable=SC2317
cleanup()
{
rm -f "$cin_disconnect" "$cout_disconnect"
@@ -138,21 +142,12 @@ cleanup()
rm -f "$sin" "$sout"
rm -f "$capout"
- local netns
- for netns in "$ns1" "$ns2" "$ns3" "$ns4";do
- ip netns del $netns
- rm -f /tmp/$netns.{nstat,out}
- done
+ mptcp_lib_ns_exit "${ns1}" "${ns2}" "${ns3}" "${ns4}"
}
mptcp_lib_check_mptcp
mptcp_lib_check_kallsyms
-
-ip -Version > /dev/null 2>&1
-if [ $? -ne 0 ];then
- echo "SKIP: Could not run test without ip tool"
- exit $ksft_skip
-fi
+mptcp_lib_check_tools ip
sin=$(mktemp)
sout=$(mktemp)
@@ -163,10 +158,7 @@ cin_disconnect="$cin".disconnect
cout_disconnect="$cout".disconnect
trap cleanup EXIT
-for i in "$ns1" "$ns2" "$ns3" "$ns4";do
- ip netns add $i || exit $ksft_skip
- ip -net $i link set lo up
-done
+mptcp_lib_ns_init ns1 ns2 ns3 ns4
# "$ns1" ns2 ns3 ns4
# ns1eth2 ns2eth1 ns2eth3 ns3eth2 ns3eth4 ns4eth3
@@ -225,8 +217,9 @@ set_ethtool_flags() {
local dev="$2"
local flags="$3"
- ip netns exec $ns ethtool -K $dev $flags 2>/dev/null
- [ $? -eq 0 ] && echo "INFO: set $ns dev $dev: ethtool -K $flags"
+ if ip netns exec $ns ethtool -K $dev $flags 2>/dev/null; then
+ mptcp_lib_pr_info "set $ns dev $dev: ethtool -K $flags"
+ fi
}
set_random_ethtool_flags() {
@@ -254,16 +247,23 @@ else
set_ethtool_flags "$ns4" ns4eth3 "$ethtool_args"
fi
+print_larger_title() {
+ # here we don't have the time, a bit longer for the alignment
+ MPTCP_LIB_TEST_FORMAT="%02u %-69s" \
+ mptcp_lib_print_title "${@}"
+}
+
check_mptcp_disabled()
{
- local disabled_ns="ns_disabled-$rndh"
- ip netns add ${disabled_ns} || exit $ksft_skip
+ local disabled_ns
+ mptcp_lib_ns_init disabled_ns
+ print_larger_title "New MPTCP socket can be blocked via sysctl"
# net.mptcp.enabled should be enabled by default
if [ "$(ip netns exec ${disabled_ns} sysctl net.mptcp.enabled | awk '{ print $3 }')" -ne 1 ]; then
- echo -e "net.mptcp.enabled sysctl is not 1 by default\t\t[ FAIL ]"
+ mptcp_lib_pr_fail "net.mptcp.enabled sysctl is not 1 by default"
mptcp_lib_result_fail "net.mptcp.enabled sysctl is not 1 by default"
- ret=1
+ ret=${KSFT_FAIL}
return 1
fi
ip netns exec ${disabled_ns} sysctl -q net.mptcp.enabled=0
@@ -271,16 +271,16 @@ check_mptcp_disabled()
local err=0
LC_ALL=C ip netns exec ${disabled_ns} ./mptcp_connect -p 10000 -s MPTCP 127.0.0.1 < "$cin" 2>&1 | \
grep -q "^socket: Protocol not available$" && err=1
- ip netns delete ${disabled_ns}
+ mptcp_lib_ns_exit "${disabled_ns}"
if [ ${err} -eq 0 ]; then
- echo -e "New MPTCP socket cannot be blocked via sysctl\t\t[ FAIL ]"
+ mptcp_lib_pr_fail "New MPTCP socket cannot be blocked via sysctl"
mptcp_lib_result_fail "New MPTCP socket cannot be blocked via sysctl"
- ret=1
+ ret=${KSFT_FAIL}
return 1
fi
- echo -e "New MPTCP socket can be blocked via sysctl\t\t[ OK ]"
+ mptcp_lib_pr_ok
mptcp_lib_result_pass "New MPTCP socket can be blocked via sysctl"
return 0
}
@@ -301,8 +301,8 @@ do_ping()
ip netns exec ${connector_ns} ping ${ping_args} $connect_addr >/dev/null || rc=1
if [ $rc -ne 0 ] ; then
- echo "$listener_ns -> $connect_addr connectivity [ FAIL ]" 1>&2
- ret=1
+ mptcp_lib_pr_fail "$listener_ns -> $connect_addr connectivity"
+ ret=${KSFT_FAIL}
return 1
fi
@@ -320,24 +320,22 @@ do_transfer()
local local_addr="$6"
local extra_args="$7"
- local port
- port=$((10000+$TEST_COUNT))
- TEST_COUNT=$((TEST_COUNT+1))
+ port=$((port + 1))
if [ "$rcvbuf" -gt 0 ]; then
- extra_args="$extra_args -R $rcvbuf"
+ extra_args+=" -R $rcvbuf"
fi
if [ "$sndbuf" -gt 0 ]; then
- extra_args="$extra_args -S $sndbuf"
+ extra_args+=" -S $sndbuf"
fi
if [ -n "$testmode" ]; then
- extra_args="$extra_args -m $testmode"
+ extra_args+=" -m $testmode"
fi
if [ -n "$extra_args" ] && $options_log; then
- echo "INFO: extra options: $extra_args"
+ mptcp_lib_pr_info "extra options: $extra_args"
fi
options_log=false
@@ -349,10 +347,11 @@ do_transfer()
addr_port=$(printf "%s:%d" ${connect_addr} ${port})
local result_msg
result_msg="$(printf "%.3s %-5s -> %.3s (%-20s) %-5s" ${connector_ns} ${cl_proto} ${listener_ns} ${addr_port} ${srv_proto})"
- printf "%s\t" "${result_msg}"
+ mptcp_lib_print_title "${result_msg}"
if $capture; then
local capuser
+ local rndh="${connector_ns:4}"
if [ -z $SUDO_USER ] ; then
capuser=""
else
@@ -378,12 +377,18 @@ do_transfer()
nstat -n
fi
- local stat_synrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
- local stat_ackrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
- local stat_cookietx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent")
- local stat_cookierx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv")
- local stat_csum_err_s=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtDataCsumErr")
- local stat_csum_err_c=$(mptcp_lib_get_counter "${connector_ns}" "MPTcpExtDataCsumErr")
+ local stat_synrx_last_l
+ local stat_ackrx_last_l
+ local stat_cookietx_last
+ local stat_cookierx_last
+ local stat_csum_err_s
+ local stat_csum_err_c
+ stat_synrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
+ stat_ackrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
+ stat_cookietx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent")
+ stat_cookierx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv")
+ stat_csum_err_s=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtDataCsumErr")
+ stat_csum_err_c=$(mptcp_lib_get_counter "${connector_ns}" "MPTcpExtDataCsumErr")
timeout ${timeout_test} \
ip netns exec ${listener_ns} \
@@ -427,7 +432,7 @@ do_transfer()
result_msg+=" # time=${duration}ms"
printf "(duration %05sms) " "${duration}"
if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
- echo "[ FAIL ] client exit code $retc, server $rets" 1>&2
+ mptcp_lib_pr_fail "client exit code $retc, server $rets"
echo -e "\nnetns ${listener_ns} socket stat for ${port}:" 1>&2
ip netns exec ${listener_ns} ss -Menita 1>&2 -o "sport = :$port"
cat /tmp/${listener_ns}.out
@@ -446,11 +451,17 @@ do_transfer()
mptcp_lib_check_transfer $cin $sout "file received by server"
rets=$?
- local stat_synrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
- local stat_ackrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
- local stat_cookietx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent")
- local stat_cookierx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv")
- local stat_ooo_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtTCPOFOQueue")
+ local extra=""
+ local stat_synrx_now_l
+ local stat_ackrx_now_l
+ local stat_cookietx_now
+ local stat_cookierx_now
+ local stat_ooo_now
+ stat_synrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
+ stat_ackrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
+ stat_cookietx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent")
+ stat_cookierx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv")
+ stat_ooo_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtTCPOFOQueue")
expect_synrx=$((stat_synrx_last_l))
expect_ackrx=$((stat_ackrx_last_l))
@@ -459,75 +470,79 @@ do_transfer()
cookies=${cookies##*=}
if [ ${cl_proto} = "MPTCP" ] && [ ${srv_proto} = "MPTCP" ]; then
- expect_synrx=$((stat_synrx_last_l+$connect_per_transfer))
- expect_ackrx=$((stat_ackrx_last_l+$connect_per_transfer))
+ expect_synrx=$((stat_synrx_last_l+connect_per_transfer))
+ expect_ackrx=$((stat_ackrx_last_l+connect_per_transfer))
fi
if [ ${stat_synrx_now_l} -lt ${expect_synrx} ]; then
- printf "[ FAIL ] lower MPC SYN rx (%d) than expected (%d)\n" \
- "${stat_synrx_now_l}" "${expect_synrx}" 1>&2
+ mptcp_lib_pr_fail "lower MPC SYN rx (${stat_synrx_now_l})" \
+ "than expected (${expect_synrx})"
retc=1
fi
- if [ ${stat_ackrx_now_l} -lt ${expect_ackrx} -a ${stat_ooo_now} -eq 0 ]; then
+ if [ ${stat_ackrx_now_l} -lt ${expect_ackrx} ] && [ ${stat_ooo_now} -eq 0 ]; then
if [ ${stat_ooo_now} -eq 0 ]; then
- printf "[ FAIL ] lower MPC ACK rx (%d) than expected (%d)\n" \
- "${stat_ackrx_now_l}" "${expect_ackrx}" 1>&2
+ mptcp_lib_pr_fail "lower MPC ACK rx (${stat_ackrx_now_l})" \
+ "than expected (${expect_ackrx})"
rets=1
else
- printf "[ Note ] fallback due to TCP OoO"
+ extra+=" [ Note ] fallback due to TCP OoO"
fi
fi
if $checksum; then
- local csum_err_s=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtDataCsumErr")
- local csum_err_c=$(mptcp_lib_get_counter "${connector_ns}" "MPTcpExtDataCsumErr")
+ local csum_err_s
+ local csum_err_c
+ csum_err_s=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtDataCsumErr")
+ csum_err_c=$(mptcp_lib_get_counter "${connector_ns}" "MPTcpExtDataCsumErr")
local csum_err_s_nr=$((csum_err_s - stat_csum_err_s))
if [ $csum_err_s_nr -gt 0 ]; then
- printf "[ FAIL ]\nserver got $csum_err_s_nr data checksum error[s]"
+ mptcp_lib_pr_fail "server got ${csum_err_s_nr} data checksum error[s]"
rets=1
fi
local csum_err_c_nr=$((csum_err_c - stat_csum_err_c))
if [ $csum_err_c_nr -gt 0 ]; then
- printf "[ FAIL ]\nclient got $csum_err_c_nr data checksum error[s]"
+ mptcp_lib_pr_fail "client got ${csum_err_c_nr} data checksum error[s]"
retc=1
fi
fi
- if [ $retc -eq 0 ] && [ $rets -eq 0 ]; then
- printf "[ OK ]"
- mptcp_lib_result_pass "${TEST_GROUP}: ${result_msg}"
- else
- mptcp_lib_result_fail "${TEST_GROUP}: ${result_msg}"
- fi
-
if [ $cookies -eq 2 ];then
if [ $stat_cookietx_last -ge $stat_cookietx_now ] ;then
- printf " WARN: CookieSent: did not advance"
+ extra+=" WARN: CookieSent: did not advance"
fi
if [ $stat_cookierx_last -ge $stat_cookierx_now ] ;then
- printf " WARN: CookieRecv: did not advance"
+ extra+=" WARN: CookieRecv: did not advance"
fi
else
if [ $stat_cookietx_last -ne $stat_cookietx_now ] ;then
- printf " WARN: CookieSent: changed"
+ extra+=" WARN: CookieSent: changed"
fi
if [ $stat_cookierx_last -ne $stat_cookierx_now ] ;then
- printf " WARN: CookieRecv: changed"
+ extra+=" WARN: CookieRecv: changed"
fi
fi
if [ ${stat_synrx_now_l} -gt ${expect_synrx} ]; then
- printf " WARN: SYNRX: expect %d, got %d (probably retransmissions)" \
- "${expect_synrx}" "${stat_synrx_now_l}"
+ extra+=" WARN: SYNRX: expect ${expect_synrx},"
+ extra+=" got ${stat_synrx_now_l} (probably retransmissions)"
fi
if [ ${stat_ackrx_now_l} -gt ${expect_ackrx} ]; then
- printf " WARN: ACKRX: expect %d, got %d (probably retransmissions)" \
- "${expect_ackrx}" "${stat_ackrx_now_l}"
+ extra+=" WARN: ACKRX: expect ${expect_ackrx},"
+ extra+=" got ${stat_ackrx_now_l} (probably retransmissions)"
+ fi
+
+ if [ $retc -eq 0 ] && [ $rets -eq 0 ]; then
+ mptcp_lib_pr_ok "${extra:1}"
+ mptcp_lib_result_pass "${TEST_GROUP}: ${result_msg}"
+ else
+ if [ -n "${extra}" ]; then
+ mptcp_lib_print_warn "${extra:1}"
+ fi
+ mptcp_lib_result_fail "${TEST_GROUP}: ${result_msg}"
fi
- echo
cat "$capout"
[ $retc -eq 0 ] && [ $rets -eq 0 ]
}
@@ -653,12 +668,12 @@ run_test_transparent()
# following function has been exported (T). Not great but better than
# checking for a specific kernel version.
if ! mptcp_lib_kallsyms_has "T __ip_sock_set_tos$"; then
- echo "INFO: ${msg} not supported by the kernel: SKIP"
+ mptcp_lib_pr_skip "${msg} not supported by the kernel"
mptcp_lib_result_skip "${TEST_GROUP}"
return
fi
-ip netns exec "$listener_ns" nft -f /dev/stdin <<"EOF"
+ if ! ip netns exec "$listener_ns" nft -f /dev/stdin <<"EOF"
flush ruleset
table inet mangle {
chain divert {
@@ -669,8 +684,8 @@ table inet mangle {
}
}
EOF
- if [ $? -ne 0 ]; then
- echo "SKIP: $msg, could not load nft ruleset"
+ then
+ mptcp_lib_pr_skip "$msg, could not load nft ruleset"
mptcp_lib_fail_if_expected_feature "nft rules"
mptcp_lib_result_skip "${TEST_GROUP}"
return
@@ -684,28 +699,26 @@ EOF
local_addr="0.0.0.0"
fi
- ip -net "$listener_ns" $r6flag rule add fwmark 1 lookup 100
- if [ $? -ne 0 ]; then
+ if ! ip -net "$listener_ns" $r6flag rule add fwmark 1 lookup 100; then
ip netns exec "$listener_ns" nft flush ruleset
- echo "SKIP: $msg, ip $r6flag rule failed"
+ mptcp_lib_pr_skip "$msg, ip $r6flag rule failed"
mptcp_lib_fail_if_expected_feature "ip rule"
mptcp_lib_result_skip "${TEST_GROUP}"
return
fi
- ip -net "$listener_ns" route add local $local_addr/0 dev lo table 100
- if [ $? -ne 0 ]; then
+ if ! ip -net "$listener_ns" route add local $local_addr/0 dev lo table 100; then
ip netns exec "$listener_ns" nft flush ruleset
ip -net "$listener_ns" $r6flag rule del fwmark 1 lookup 100
- echo "SKIP: $msg, ip route add local $local_addr failed"
+ mptcp_lib_pr_skip "$msg, ip route add local $local_addr failed"
mptcp_lib_fail_if_expected_feature "ip route"
mptcp_lib_result_skip "${TEST_GROUP}"
return
fi
- echo "INFO: test $msg"
+ mptcp_lib_pr_info "test $msg"
- TEST_COUNT=10000
+ port=$((20000 - 1))
local extra_args="-o TRANSPARENT"
do_transfer ${listener_ns} ${connector_ns} MPTCP MPTCP \
${connect_addr} ${local_addr} "${extra_args}"
@@ -716,12 +729,12 @@ EOF
ip -net "$listener_ns" route del local $local_addr/0 dev lo table 100
if [ $lret -ne 0 ]; then
- echo "FAIL: $msg, mptcp connection error" 1>&2
+ mptcp_lib_pr_fail "$msg, mptcp connection error"
ret=$lret
return 1
fi
- echo "PASS: $msg"
+ mptcp_lib_pr_info "$msg pass"
return 0
}
@@ -730,7 +743,7 @@ run_tests_peekmode()
local peekmode="$1"
TEST_GROUP="peek mode: ${peekmode}"
- echo "INFO: with peek mode: ${peekmode}"
+ mptcp_lib_pr_info "with peek mode: ${peekmode}"
run_tests_lo "$ns1" "$ns1" 10.0.1.1 1 "-P ${peekmode}"
run_tests_lo "$ns1" "$ns1" dead:beef:1::1 1 "-P ${peekmode}"
}
@@ -740,12 +753,12 @@ run_tests_mptfo()
TEST_GROUP="MPTFO"
if ! mptcp_lib_kallsyms_has "mptcp_fastopen_"; then
- echo "INFO: TFO not supported by the kernel: SKIP"
+ mptcp_lib_pr_skip "TFO not supported by the kernel"
mptcp_lib_result_skip "${TEST_GROUP}"
return
fi
- echo "INFO: with MPTFO start"
+ mptcp_lib_pr_info "with MPTFO start"
ip netns exec "$ns1" sysctl -q net.ipv4.tcp_fastopen=2
ip netns exec "$ns2" sysctl -q net.ipv4.tcp_fastopen=1
@@ -757,7 +770,7 @@ run_tests_mptfo()
ip netns exec "$ns1" sysctl -q net.ipv4.tcp_fastopen=0
ip netns exec "$ns2" sysctl -q net.ipv4.tcp_fastopen=0
- echo "INFO: with MPTFO end"
+ mptcp_lib_pr_info "with MPTFO end"
}
run_tests_disconnect()
@@ -768,7 +781,7 @@ run_tests_disconnect()
TEST_GROUP="full disconnect"
if ! mptcp_lib_kallsyms_has "mptcp_pm_data_reset$"; then
- echo "INFO: Full disconnect not supported: SKIP"
+ mptcp_lib_pr_skip "Full disconnect not supported"
mptcp_lib_result_skip "${TEST_GROUP}"
return
fi
@@ -781,7 +794,7 @@ run_tests_disconnect()
cin_disconnect="$old_cin"
connect_per_transfer=3
- echo "INFO: disconnect"
+ mptcp_lib_pr_info "disconnect"
run_tests_lo "$ns1" "$ns1" 10.0.1.1 1 "-I 3 -i $old_cin"
run_tests_lo "$ns1" "$ns1" dead:beef:1::1 1 "-I 3 -i $old_cin"
@@ -805,10 +818,10 @@ log_if_error()
local msg="$1"
if [ ${ret} -ne 0 ]; then
- echo "FAIL: ${msg}" 1>&2
+ mptcp_lib_pr_fail "${msg}"
final_ret=${ret}
- ret=0
+ ret=${KSFT_PASS}
return ${final_ret}
fi
@@ -830,7 +843,7 @@ check_mptcp_disabled
stop_if_error "The kernel configuration is not valid for MPTCP"
-echo "INFO: validating network environment with pings"
+print_larger_title "Validating network environment with pings"
for sender in "$ns1" "$ns2" "$ns3" "$ns4";do
do_ping "$ns1" $sender 10.0.1.1
do_ping "$ns1" $sender dead:beef:1::1
@@ -852,12 +865,13 @@ done
mptcp_lib_result_code "${ret}" "ping tests"
stop_if_error "Could not even run ping tests"
+mptcp_lib_pr_ok
[ -n "$tc_loss" ] && tc -net "$ns2" qdisc add dev ns2eth3 root netem loss random $tc_loss delay ${tc_delay}ms
-echo -n "INFO: Using loss of $tc_loss "
-test "$tc_delay" -gt 0 && echo -n "delay $tc_delay ms "
+tc_info="loss of $tc_loss "
+test "$tc_delay" -gt 0 && tc_info+="delay $tc_delay ms "
-reorder_delay=$(($tc_delay / 4))
+reorder_delay=$((tc_delay / 4))
if [ -z "${tc_reorder}" ]; then
reorder1=$((RANDOM%10))
@@ -866,17 +880,17 @@ if [ -z "${tc_reorder}" ]; then
if [ $reorder_delay -gt 0 ] && [ $reorder1 -lt 100 ] && [ $reorder2 -gt 0 ]; then
tc_reorder="reorder ${reorder1}% ${reorder2}%"
- echo -n "$tc_reorder with delay ${reorder_delay}ms "
+ tc_info+="$tc_reorder with delay ${reorder_delay}ms "
fi
elif [ "$tc_reorder" = "0" ];then
tc_reorder=""
elif [ "$reorder_delay" -gt 0 ];then
# reordering requires some delay
tc_reorder="reorder $tc_reorder"
- echo -n "$tc_reorder with delay ${reorder_delay}ms "
+ tc_info+="$tc_reorder with delay ${reorder_delay}ms "
fi
-echo "on ns3eth4"
+mptcp_lib_pr_info "Using ${tc_info}on ns3eth4"
tc -net "$ns3" qdisc add dev ns3eth4 root netem delay ${reorder_delay}ms $tc_reorder
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index e4581b0dfb96..5e9211e89825 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -21,19 +21,19 @@ cinfail=""
cinsent=""
tmpfile=""
cout=""
+err=""
capout=""
ns1=""
ns2=""
-ksft_skip=4
iptables="iptables"
ip6tables="ip6tables"
timeout_poll=30
timeout_test=$((timeout_poll * 2 + 1))
-capture=0
-checksum=0
+capture=false
+checksum=false
ip_mptcp=0
check_invert=0
-validate_checksum=0
+validate_checksum=false
init=0
evts_ns1=""
evts_ns2=""
@@ -47,7 +47,7 @@ declare -A all_tests
declare -a only_tests_ids
declare -a only_tests_names
declare -A failed_tests
-TEST_COUNT=0
+MPTCP_LIB_TEST_FORMAT="%03u %s\n"
TEST_NAME=""
nr_blank=6
@@ -85,22 +85,12 @@ init_partial()
{
capout=$(mktemp)
- local sec rndh
- sec=$(date +%s)
- rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
-
- ns1="ns1-$rndh"
- ns2="ns2-$rndh"
+ mptcp_lib_ns_init ns1 ns2
local netns
for netns in "$ns1" "$ns2"; do
- ip netns add $netns || exit $ksft_skip
- ip -net $netns link set lo up
- ip netns exec $netns sysctl -q net.mptcp.enabled=1
ip netns exec $netns sysctl -q net.mptcp.pm_type=0 2>/dev/null || true
- ip netns exec $netns sysctl -q net.ipv4.conf.all.rp_filter=0
- ip netns exec $netns sysctl -q net.ipv4.conf.default.rp_filter=0
- if [ $checksum -eq 1 ]; then
+ if $checksum; then
ip netns exec $netns sysctl -q net.mptcp.checksum_enabled=1
fi
done
@@ -144,51 +134,22 @@ cleanup_partial()
{
rm -f "$capout"
- local netns
- for netns in "$ns1" "$ns2"; do
- ip netns del $netns
- rm -f /tmp/$netns.{nstat,out}
- done
-}
-
-check_tools()
-{
- mptcp_lib_check_mptcp
- mptcp_lib_check_kallsyms
-
- if ! ip -Version &> /dev/null; then
- echo "SKIP: Could not run test without ip tool"
- exit $ksft_skip
- fi
-
- if ! ss -h | grep -q MPTCP; then
- echo "SKIP: ss tool does not support MPTCP"
- exit $ksft_skip
- fi
-
- # Use the legacy version if available to support old kernel versions
- if iptables-legacy -V &> /dev/null; then
- iptables="iptables-legacy"
- ip6tables="ip6tables-legacy"
- elif ! iptables -V &> /dev/null; then
- echo "SKIP: Could not run all tests without iptables tool"
- exit $ksft_skip
- elif ! ip6tables -V &> /dev/null; then
- echo "SKIP: Could not run all tests without ip6tables tool"
- exit $ksft_skip
- fi
+ mptcp_lib_ns_exit "${ns1}" "${ns2}"
}
init() {
init=1
- check_tools
+ mptcp_lib_check_mptcp
+ mptcp_lib_check_kallsyms
+ mptcp_lib_check_tools ip ss "${iptables}" "${ip6tables}"
sin=$(mktemp)
sout=$(mktemp)
cin=$(mktemp)
cinsent=$(mktemp)
cout=$(mktemp)
+ err=$(mktemp)
evts_ns1=$(mktemp)
evts_ns2=$(mktemp)
@@ -204,14 +165,10 @@ cleanup()
rm -f "$sin" "$sout" "$cinsent" "$cinfail"
rm -f "$tmpfile"
rm -rf $evts_ns1 $evts_ns2
+ rm -f "$err"
cleanup_partial
}
-print_title()
-{
- printf "%03u %s\n" "${TEST_COUNT}" "${TEST_NAME}"
-}
-
print_check()
{
printf "%-${nr_blank}s%-36s" " " "${*}"
@@ -227,17 +184,17 @@ print_info()
print_ok()
{
- mptcp_lib_print_ok "[ ok ]${1:+ ${*}}"
+ mptcp_lib_pr_ok "${@}"
}
print_fail()
{
- mptcp_lib_print_err "[fail]${1:+ ${*}}"
+ mptcp_lib_pr_fail "${@}"
}
print_skip()
{
- mptcp_lib_print_warn "[skip]${1:+ ${*}}"
+ mptcp_lib_pr_skip "${@}"
}
# [ $1: fail msg ]
@@ -270,7 +227,7 @@ skip_test()
local i
for i in "${only_tests_ids[@]}"; do
- if [ "${TEST_COUNT}" -eq "${i}" ]; then
+ if [ "$((MPTCP_LIB_TEST_COUNTER+1))" -eq "${i}" ]; then
return 1
fi
done
@@ -305,14 +262,13 @@ reset()
TEST_NAME="${1}"
- TEST_COUNT=$((TEST_COUNT+1))
-
if skip_test; then
+ MPTCP_LIB_TEST_COUNTER=$((MPTCP_LIB_TEST_COUNTER+1))
last_test_ignored=1
return 1
fi
- print_title
+ mptcp_lib_print_title "${TEST_NAME}"
if [ "${init}" != "1" ]; then
init
@@ -385,7 +341,7 @@ reset_with_checksum()
ip netns exec $ns1 sysctl -q net.mptcp.checksum_enabled=$ns1_enable
ip netns exec $ns2 sysctl -q net.mptcp.checksum_enabled=$ns2_enable
- validate_checksum=1
+ validate_checksum=true
}
reset_with_allow_join_id0()
@@ -418,7 +374,7 @@ reset_with_allow_join_id0()
setup_fail_rules()
{
check_invert=1
- validate_checksum=1
+ validate_checksum=true
local i="$1"
local ip="${2:-4}"
local tables
@@ -435,15 +391,15 @@ setup_fail_rules()
-p tcp \
-m length --length 150:9999 \
-m statistic --mode nth --packet 1 --every 99999 \
- -j MARK --set-mark 42 || return ${ksft_skip}
+ -j MARK --set-mark 42 || return ${KSFT_SKIP}
- tc -n $ns2 qdisc add dev ns2eth$i clsact || return ${ksft_skip}
+ tc -n $ns2 qdisc add dev ns2eth$i clsact || return ${KSFT_SKIP}
tc -n $ns2 filter add dev ns2eth$i egress \
protocol ip prio 1000 \
handle 42 fw \
action pedit munge offset 148 u8 invert \
pipe csum tcp \
- index 100 || return ${ksft_skip}
+ index 100 || return ${KSFT_SKIP}
}
reset_with_fail()
@@ -457,7 +413,7 @@ reset_with_fail()
local rc=0
setup_fail_rules "${@}" || rc=$?
- if [ ${rc} -eq ${ksft_skip} ]; then
+ if [ ${rc} -eq ${KSFT_SKIP} ]; then
mark_as_skipped "unable to set the 'fail' rules"
return 1
fi
@@ -467,12 +423,8 @@ reset_with_events()
{
reset "${1}" || return 1
- :> "$evts_ns1"
- :> "$evts_ns2"
- ip netns exec $ns1 ./pm_nl_ctl events >> "$evts_ns1" 2>&1 &
- evts_ns1_pid=$!
- ip netns exec $ns2 ./pm_nl_ctl events >> "$evts_ns2" 2>&1 &
- evts_ns2_pid=$!
+ mptcp_lib_events "${ns1}" "${evts_ns1}" evts_ns1_pid
+ mptcp_lib_events "${ns2}" "${evts_ns2}" evts_ns2_pid
}
reset_with_tcp_filter()
@@ -497,13 +449,15 @@ reset_with_tcp_filter()
# $1: err msg
fail_test()
{
- ret=1
+ ret=${KSFT_FAIL}
- print_fail "${@}"
+ if [ ${#} -gt 0 ]; then
+ print_fail "${@}"
+ fi
# just in case a test is marked twice as failed
if [ ${last_test_failed} -eq 0 ]; then
- failed_tests[${TEST_COUNT}]="${TEST_NAME}"
+ failed_tests[${MPTCP_LIB_TEST_COUNTER}]="${TEST_NAME}"
dump_stats
last_test_failed=1
fi
@@ -645,7 +599,9 @@ wait_mpj()
kill_events_pids()
{
mptcp_lib_kill_wait $evts_ns1_pid
+ evts_ns1_pid=0
mptcp_lib_kill_wait $evts_ns2_pid
+ evts_ns2_pid=0
}
pm_nl_set_limits()
@@ -799,18 +755,18 @@ pm_nl_check_endpoint()
line="${line% }"
# the dump order is: address id flags port dev
[ -n "$addr" ] && expected_line="$addr"
- expected_line="$expected_line $id"
- [ -n "$_flags" ] && expected_line="$expected_line ${_flags//","/" "}"
- [ -n "$dev" ] && expected_line="$expected_line $dev"
- [ -n "$port" ] && expected_line="$expected_line $port"
+ expected_line+=" $id"
+ [ -n "$_flags" ] && expected_line+=" ${_flags//","/" "}"
+ [ -n "$dev" ] && expected_line+=" $dev"
+ [ -n "$port" ] && expected_line+=" $port"
else
line=$(ip netns exec $ns ./pm_nl_ctl get $_id)
# the dump order is: id flags dev address port
expected_line="$id"
- [ -n "$flags" ] && expected_line="$expected_line $flags"
- [ -n "$dev" ] && expected_line="$expected_line $dev"
- [ -n "$addr" ] && expected_line="$expected_line $addr"
- [ -n "$_port" ] && expected_line="$expected_line $_port"
+ [ -n "$flags" ] && expected_line+=" $flags"
+ [ -n "$dev" ] && expected_line+=" $dev"
+ [ -n "$addr" ] && expected_line+=" $addr"
+ [ -n "$_port" ] && expected_line+=" $_port"
fi
if [ "$line" = "$expected_line" ]; then
print_ok
@@ -1012,7 +968,7 @@ do_transfer()
local srv_proto="$4"
local connect_addr="$5"
- local port=$((10000 + TEST_COUNT - 1))
+ local port=$((10000 + MPTCP_LIB_TEST_COUNTER - 1))
local cappid
local FAILING_LINKS=${FAILING_LINKS:-""}
local fastclose=${fastclose:-""}
@@ -1022,7 +978,7 @@ do_transfer()
:> "$sout"
:> "$capout"
- if [ $capture -eq 1 ]; then
+ if $capture; then
local capuser
if [ -z $SUDO_USER ] ; then
capuser=""
@@ -1030,9 +986,9 @@ do_transfer()
capuser="-Z $SUDO_USER"
fi
- capfile=$(printf "mp_join-%02u-%s.pcap" "$TEST_COUNT" "${listener_ns}")
+ capfile=$(printf "mp_join-%02u-%s.pcap" "$MPTCP_LIB_TEST_COUNTER" "${listener_ns}")
- echo "Capturing traffic for test $TEST_COUNT into $capfile"
+ echo "Capturing traffic for test $MPTCP_LIB_TEST_COUNTER into $capfile"
ip netns exec ${listener_ns} tcpdump -i any -s 65535 -B 32768 $capuser -w $capfile > "$capout" 2>&1 &
cappid=$!
@@ -1124,7 +1080,7 @@ do_transfer()
wait $spid
local rets=$?
- if [ $capture -eq 1 ]; then
+ if $capture; then
sleep 1
kill $cappid
fi
@@ -1261,7 +1217,7 @@ chk_csum_nr()
print_check "sum"
count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtDataCsumErr")
if [ "$count" != "$csum_ns1" ]; then
- extra_msg="$extra_msg ns1=$count"
+ extra_msg+=" ns1=$count"
fi
if [ -z "$count" ]; then
print_skip
@@ -1274,7 +1230,7 @@ chk_csum_nr()
print_check "csum"
count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtDataCsumErr")
if [ "$count" != "$csum_ns2" ]; then
- extra_msg="$extra_msg ns2=$count"
+ extra_msg+=" ns2=$count"
fi
if [ -z "$count" ]; then
print_skip
@@ -1318,7 +1274,7 @@ chk_fail_nr()
print_check "ftx"
count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPFailTx")
if [ "$count" != "$fail_tx" ]; then
- extra_msg="$extra_msg,tx=$count"
+ extra_msg+=",tx=$count"
fi
if [ -z "$count" ]; then
print_skip
@@ -1332,7 +1288,7 @@ chk_fail_nr()
print_check "failrx"
count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtMPFailRx")
if [ "$count" != "$fail_rx" ]; then
- extra_msg="$extra_msg,rx=$count"
+ extra_msg+=",rx=$count"
fi
if [ -z "$count" ]; then
print_skip
@@ -1367,7 +1323,7 @@ chk_fclose_nr()
if [ -z "$count" ]; then
print_skip
elif [ "$count" != "$fclose_tx" ]; then
- extra_msg="$extra_msg,tx=$count"
+ extra_msg+=",tx=$count"
fail_test "got $count MP_FASTCLOSE[s] TX expected $fclose_tx"
else
print_ok
@@ -1378,7 +1334,7 @@ chk_fclose_nr()
if [ -z "$count" ]; then
print_skip
elif [ "$count" != "$fclose_rx" ]; then
- extra_msg="$extra_msg,rx=$count"
+ extra_msg+=",rx=$count"
fail_test "got $count MP_FASTCLOSE[s] RX expected $fclose_rx"
else
print_ok
@@ -1512,7 +1468,7 @@ chk_join_nr()
else
print_ok
fi
- if [ $validate_checksum -eq 1 ]; then
+ if $validate_checksum; then
chk_csum_nr $csum_ns1 $csum_ns2
chk_fail_nr $fail_nr $fail_nr
chk_rst_nr $rst_nr $rst_nr
@@ -1747,7 +1703,7 @@ chk_rm_nr()
count=$((count + cnt))
if [ "$count" != "$rm_subflow_nr" ]; then
suffix="$count in [$rm_subflow_nr:$((rm_subflow_nr*2))]"
- extra_msg="$extra_msg simult"
+ extra_msg+=" simult"
fi
if [ $count -ge "$rm_subflow_nr" ] && \
[ "$count" -le "$((rm_subflow_nr *2 ))" ]; then
@@ -2828,29 +2784,16 @@ backup_tests()
fi
}
-SUB_ESTABLISHED=10 # MPTCP_EVENT_SUB_ESTABLISHED
-LISTENER_CREATED=15 #MPTCP_EVENT_LISTENER_CREATED
-LISTENER_CLOSED=16 #MPTCP_EVENT_LISTENER_CLOSED
-
-AF_INET=2
-AF_INET6=10
-
verify_listener_events()
{
- local evt=$1
local e_type=$2
- local e_family=$3
local e_saddr=$4
local e_sport=$5
- local type
- local family
- local saddr
- local sport
local name
- if [ $e_type = $LISTENER_CREATED ]; then
+ if [ $e_type = $MPTCP_LIB_EVENT_LISTENER_CREATED ]; then
name="LISTENER_CREATED"
- elif [ $e_type = $LISTENER_CLOSED ]; then
+ elif [ $e_type = $MPTCP_LIB_EVENT_LISTENER_CLOSED ]; then
name="LISTENER_CLOSED "
else
name="$e_type"
@@ -2863,23 +2806,11 @@ verify_listener_events()
return
fi
- type=$(mptcp_lib_evts_get_info type "$evt" "$e_type")
- family=$(mptcp_lib_evts_get_info family "$evt" "$e_type")
- sport=$(mptcp_lib_evts_get_info sport "$evt" "$e_type")
- if [ $family ] && [ $family = $AF_INET6 ]; then
- saddr=$(mptcp_lib_evts_get_info saddr6 "$evt" "$e_type")
- else
- saddr=$(mptcp_lib_evts_get_info saddr4 "$evt" "$e_type")
- fi
-
- if [ $type ] && [ $type = $e_type ] &&
- [ $family ] && [ $family = $e_family ] &&
- [ $saddr ] && [ $saddr = $e_saddr ] &&
- [ $sport ] && [ $sport = $e_sport ]; then
+ if mptcp_lib_verify_listener_events "${@}"; then
print_ok
return 0
fi
- fail_test "$e_type:$type $e_family:$family $e_saddr:$saddr $e_sport:$sport"
+ fail_test
}
add_addr_ports_tests()
@@ -2917,8 +2848,10 @@ add_addr_ports_tests()
chk_add_nr 1 1 1
chk_rm_nr 1 1 invert
- verify_listener_events $evts_ns1 $LISTENER_CREATED $AF_INET 10.0.2.1 10100
- verify_listener_events $evts_ns1 $LISTENER_CLOSED $AF_INET 10.0.2.1 10100
+ verify_listener_events $evts_ns1 $MPTCP_LIB_EVENT_LISTENER_CREATED \
+ $MPTCP_LIB_AF_INET 10.0.2.1 10100
+ verify_listener_events $evts_ns1 $MPTCP_LIB_EVENT_LISTENER_CLOSED \
+ $MPTCP_LIB_AF_INET 10.0.2.1 10100
kill_events_pids
fi
@@ -3356,6 +3289,77 @@ userspace_pm_rm_sf()
wait_rm_sf $1 "${cnt}"
}
+check_output()
+{
+ local cmd="$1"
+ local expected="$2"
+ local msg="$3"
+ local rc=0
+
+ mptcp_lib_check_output "${err}" "${cmd}" "${expected}" || rc=${?}
+ if [ ${rc} -eq 2 ]; then
+ fail_test "fail to check output # error ${rc}"
+ elif [ ${rc} -eq 0 ]; then
+ print_ok
+ elif [ ${rc} -eq 1 ]; then
+ fail_test "fail to check output # different output"
+ fi
+}
+
+# $1: ns
+userspace_pm_dump()
+{
+ local evts=$evts_ns1
+ local tk
+
+ [ "$1" == "$ns2" ] && evts=$evts_ns2
+ tk=$(mptcp_lib_evts_get_info token "$evts")
+
+ ip netns exec $1 ./pm_nl_ctl dump token $tk
+}
+
+# $1: ns ; $2: id
+userspace_pm_get_addr()
+{
+ local evts=$evts_ns1
+ local tk
+
+ [ "$1" == "$ns2" ] && evts=$evts_ns2
+ tk=$(mptcp_lib_evts_get_info token "$evts")
+
+ ip netns exec $1 ./pm_nl_ctl get $2 token $tk
+}
+
+userspace_pm_chk_dump_addr()
+{
+ local ns="${1}"
+ local exp="${2}"
+ local check="${3}"
+
+ print_check "dump addrs ${check}"
+
+ if mptcp_lib_kallsyms_has "mptcp_userspace_pm_dump_addr$"; then
+ check_output "userspace_pm_dump ${ns}" "${exp}"
+ else
+ print_skip
+ fi
+}
+
+userspace_pm_chk_get_addr()
+{
+ local ns="${1}"
+ local id="${2}"
+ local exp="${3}"
+
+ print_check "get id ${id} addr"
+
+ if mptcp_lib_kallsyms_has "mptcp_userspace_pm_get_addr$"; then
+ check_output "userspace_pm_get_addr ${ns} ${id}" "${exp}"
+ else
+ print_skip
+ fi
+}
+
userspace_tests()
{
# userspace pm type prevents add_addr
@@ -3447,10 +3451,18 @@ userspace_tests()
chk_mptcp_info subflows 2 subflows 2
chk_subflows_total 3 3
chk_mptcp_info add_addr_signal 2 add_addr_accepted 2
+ userspace_pm_chk_dump_addr "${ns1}" \
+ $'id 10 flags signal 10.0.2.1\nid 20 flags signal 10.0.3.1' \
+ "signal"
+ userspace_pm_chk_get_addr "${ns1}" "10" "id 10 flags signal 10.0.2.1"
+ userspace_pm_chk_get_addr "${ns1}" "20" "id 20 flags signal 10.0.3.1"
userspace_pm_rm_addr $ns1 10
- userspace_pm_rm_sf $ns1 "::ffff:10.0.2.1" $SUB_ESTABLISHED
+ userspace_pm_rm_sf $ns1 "::ffff:10.0.2.1" $MPTCP_LIB_EVENT_SUB_ESTABLISHED
+ userspace_pm_chk_dump_addr "${ns1}" \
+ "id 20 flags signal 10.0.3.1" "after rm_addr 10"
userspace_pm_rm_addr $ns1 20
- userspace_pm_rm_sf $ns1 10.0.3.1 $SUB_ESTABLISHED
+ userspace_pm_rm_sf $ns1 10.0.3.1 $MPTCP_LIB_EVENT_SUB_ESTABLISHED
+ userspace_pm_chk_dump_addr "${ns1}" "" "after rm_addr 20"
chk_rm_nr 2 2 invert
chk_mptcp_info subflows 0 subflows 0
chk_subflows_total 1 1
@@ -3471,8 +3483,15 @@ userspace_tests()
chk_join_nr 1 1 1
chk_mptcp_info subflows 1 subflows 1
chk_subflows_total 2 2
+ userspace_pm_chk_dump_addr "${ns2}" \
+ "id 20 flags subflow 10.0.3.2" \
+ "subflow"
+ userspace_pm_chk_get_addr "${ns2}" "20" "id 20 flags subflow 10.0.3.2"
userspace_pm_rm_addr $ns2 20
- userspace_pm_rm_sf $ns2 10.0.3.2 $SUB_ESTABLISHED
+ userspace_pm_rm_sf $ns2 10.0.3.2 $MPTCP_LIB_EVENT_SUB_ESTABLISHED
+ userspace_pm_chk_dump_addr "${ns2}" \
+ "" \
+ "after rm_addr 20"
chk_rm_nr 1 1
chk_mptcp_info subflows 0 subflows 0
chk_subflows_total 1 1
@@ -3492,6 +3511,8 @@ userspace_tests()
chk_mptcp_info subflows 0 subflows 0
chk_subflows_total 1 1
userspace_pm_add_sf $ns2 10.0.3.2 0
+ userspace_pm_chk_dump_addr "${ns2}" \
+ "id 0 flags subflow 10.0.3.2" "id 0 subflow"
chk_join_nr 1 1 1
chk_mptcp_info subflows 1 subflows 1
chk_subflows_total 2 2
@@ -3610,7 +3631,7 @@ usage()
{
if [ -n "${1}" ]; then
echo "${1}"
- ret=1
+ ret=${KSFT_FAIL}
fi
echo "mptcp_join usage:"
@@ -3673,10 +3694,10 @@ while getopts "${all_tests_args}cCih" opt; do
tests+=("${all_tests[${opt}]}")
;;
c)
- capture=1
+ capture=true
;;
C)
- checksum=1
+ checksum=true
;;
i)
ip_mptcp=1
diff --git a/tools/testing/selftests/net/mptcp/mptcp_lib.sh b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
index 3777d66fc56d..d529b4b37af8 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_lib.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
@@ -8,7 +8,21 @@ readonly KSFT_SKIP=4
# shellcheck disable=SC2155 # declare and assign separately
readonly KSFT_TEST="${MPTCP_LIB_KSFT_TEST:-$(basename "${0}" .sh)}"
+# These variables are used in some selftests, read-only
+declare -rx MPTCP_LIB_EVENT_ANNOUNCED=6 # MPTCP_EVENT_ANNOUNCED
+declare -rx MPTCP_LIB_EVENT_REMOVED=7 # MPTCP_EVENT_REMOVED
+declare -rx MPTCP_LIB_EVENT_SUB_ESTABLISHED=10 # MPTCP_EVENT_SUB_ESTABLISHED
+declare -rx MPTCP_LIB_EVENT_SUB_CLOSED=11 # MPTCP_EVENT_SUB_CLOSED
+declare -rx MPTCP_LIB_EVENT_LISTENER_CREATED=15 # MPTCP_EVENT_LISTENER_CREATED
+declare -rx MPTCP_LIB_EVENT_LISTENER_CLOSED=16 # MPTCP_EVENT_LISTENER_CLOSED
+
+declare -rx MPTCP_LIB_AF_INET=2
+declare -rx MPTCP_LIB_AF_INET6=10
+
MPTCP_LIB_SUBTESTS=()
+MPTCP_LIB_SUBTESTS_DUPLICATED=0
+MPTCP_LIB_TEST_COUNTER=0
+MPTCP_LIB_TEST_FORMAT="%02u %-50s"
# only if supported (or forced) and not disabled, see no-color.org
if { [ -t 1 ] || [ "${SELFTESTS_MPTCP_LIB_COLOR_FORCE:-}" = "1" ]; } &&
@@ -47,6 +61,23 @@ mptcp_lib_print_err() {
mptcp_lib_print_color "${MPTCP_LIB_COLOR_RED}${*}"
}
+# shellcheck disable=SC2120 # parameters are optional
+mptcp_lib_pr_ok() {
+ mptcp_lib_print_ok "[ OK ]${1:+ ${*}}"
+}
+
+mptcp_lib_pr_skip() {
+ mptcp_lib_print_warn "[SKIP]${1:+ ${*}}"
+}
+
+mptcp_lib_pr_fail() {
+ mptcp_lib_print_err "[FAIL]${1:+ ${*}}"
+}
+
+mptcp_lib_pr_info() {
+ mptcp_lib_print_info "INFO: ${*}"
+}
+
# SELFTESTS_MPTCP_LIB_EXPECT_ALL_FEATURES env var can be set when validating all
# features using the last version of the kernel and the selftests to make sure
# a test is not being skipped by mistake.
@@ -77,14 +108,14 @@ mptcp_lib_has_file() {
mptcp_lib_check_mptcp() {
if ! mptcp_lib_has_file "/proc/sys/net/mptcp/enabled"; then
- echo "SKIP: MPTCP support is not available"
+ mptcp_lib_pr_skip "MPTCP support is not available"
exit ${KSFT_SKIP}
fi
}
mptcp_lib_check_kallsyms() {
if ! mptcp_lib_has_file "/proc/kallsyms"; then
- echo "SKIP: CONFIG_KALLSYMS is missing"
+ mptcp_lib_pr_skip "CONFIG_KALLSYMS is missing"
exit ${KSFT_SKIP}
fi
}
@@ -146,12 +177,26 @@ mptcp_lib_kversion_ge() {
mptcp_lib_fail_if_expected_feature "kernel version ${1} lower than ${v}"
}
+__mptcp_lib_result_check_duplicated() {
+ local subtest
+
+ for subtest in "${MPTCP_LIB_SUBTESTS[@]}"; do
+ if [[ "${subtest}" == *" - ${KSFT_TEST}: ${*%% #*}" ]]; then
+ MPTCP_LIB_SUBTESTS_DUPLICATED=1
+ mptcp_lib_print_err "Duplicated entry: ${*}"
+ break
+ fi
+ done
+}
+
__mptcp_lib_result_add() {
local result="${1}"
shift
local id=$((${#MPTCP_LIB_SUBTESTS[@]} + 1))
+ __mptcp_lib_result_check_duplicated "${*}"
+
MPTCP_LIB_SUBTESTS+=("${result} ${id} - ${KSFT_TEST}: ${*}")
}
@@ -206,6 +251,12 @@ mptcp_lib_result_print_all_tap() {
for subtest in "${MPTCP_LIB_SUBTESTS[@]}"; do
printf "%s\n" "${subtest}"
done
+
+ if [ "${MPTCP_LIB_SUBTESTS_DUPLICATED}" = 1 ] &&
+ mptcp_lib_expect_all_features; then
+ mptcp_lib_print_err "Duplicated test entries"
+ exit ${KSFT_FAIL}
+ fi
}
# get the value of keyword $1 in the line marked by keyword $2
@@ -271,7 +322,7 @@ mptcp_lib_check_transfer() {
local what="${3}"
if ! cmp "$in" "$out" > /dev/null 2>&1; then
- echo "[ FAIL ] $what does not match (in, out):"
+ mptcp_lib_pr_fail "$what does not match (in, out):"
mptcp_lib_print_file_err "$in"
mptcp_lib_print_file_err "$out"
@@ -298,3 +349,159 @@ mptcp_lib_wait_local_port_listen() {
sleep 0.1
done
}
+
+mptcp_lib_check_output() {
+ local err="${1}"
+ local cmd="${2}"
+ local expected="${3}"
+ local cmd_ret=0
+ local out
+
+ if ! out=$(${cmd} 2>"${err}"); then
+ cmd_ret=${?}
+ fi
+
+ if [ ${cmd_ret} -ne 0 ]; then
+ mptcp_lib_pr_fail "command execution '${cmd}' stderr"
+ cat "${err}"
+ return 2
+ elif [ "${out}" = "${expected}" ]; then
+ return 0
+ else
+ mptcp_lib_pr_fail "expected '${expected}' got '${out}'"
+ return 1
+ fi
+}
+
+mptcp_lib_check_tools() {
+ local tool
+
+ for tool in "${@}"; do
+ case "${tool}" in
+ "ip")
+ if ! ip -Version &> /dev/null; then
+ mptcp_lib_pr_skip "Could not run test without ip tool"
+ exit ${KSFT_SKIP}
+ fi
+ ;;
+ "ss")
+ if ! ss -h | grep -q MPTCP; then
+ mptcp_lib_pr_skip "ss tool does not support MPTCP"
+ exit ${KSFT_SKIP}
+ fi
+ ;;
+ "iptables"* | "ip6tables"*)
+ if ! "${tool}" -V &> /dev/null; then
+ mptcp_lib_pr_skip "Could not run all tests without ${tool}"
+ exit ${KSFT_SKIP}
+ fi
+ ;;
+ *)
+ mptcp_lib_pr_fail "Internal error: unsupported tool: ${tool}"
+ exit ${KSFT_FAIL}
+ ;;
+ esac
+ done
+}
+
+mptcp_lib_ns_init() {
+ local sec rndh
+
+ sec=$(date +%s)
+ rndh=$(printf %x "${sec}")-$(mktemp -u XXXXXX)
+
+ local netns
+ for netns in "${@}"; do
+ eval "${netns}=${netns}-${rndh}"
+
+ ip netns add "${!netns}" || exit ${KSFT_SKIP}
+ ip -net "${!netns}" link set lo up
+ ip netns exec "${!netns}" sysctl -q net.mptcp.enabled=1
+ ip netns exec "${!netns}" sysctl -q net.ipv4.conf.all.rp_filter=0
+ ip netns exec "${!netns}" sysctl -q net.ipv4.conf.default.rp_filter=0
+ done
+}
+
+mptcp_lib_ns_exit() {
+ local netns
+ for netns in "${@}"; do
+ ip netns del "${netns}"
+ rm -f /tmp/"${netns}".{nstat,out}
+ done
+}
+
+mptcp_lib_events() {
+ local ns="${1}"
+ local evts="${2}"
+ declare -n pid="${3}"
+
+ :>"${evts}"
+
+ mptcp_lib_kill_wait "${pid:-0}"
+ ip netns exec "${ns}" ./pm_nl_ctl events >> "${evts}" 2>&1 &
+ pid=$!
+}
+
+mptcp_lib_print_title() {
+ : "${MPTCP_LIB_TEST_COUNTER:?}"
+ : "${MPTCP_LIB_TEST_FORMAT:?}"
+
+ # shellcheck disable=SC2059 # the format is in a variable
+ printf "${MPTCP_LIB_TEST_FORMAT}" "$((++MPTCP_LIB_TEST_COUNTER))" "${*}"
+}
+
+# $1: var name ; $2: prev ret
+mptcp_lib_check_expected_one() {
+ local var="${1}"
+ local exp="e_${var}"
+ local prev_ret="${2}"
+
+ if [ "${!var}" = "${!exp}" ]; then
+ return 0
+ fi
+
+ if [ "${prev_ret}" = "0" ]; then
+ mptcp_lib_pr_fail
+ fi
+
+ mptcp_lib_print_err "Expected value for '${var}': '${!exp}', got '${!var}'."
+ return 1
+}
+
+# $@: all var names to check
+mptcp_lib_check_expected() {
+ local rc=0
+ local var
+
+ for var in "${@}"; do
+ mptcp_lib_check_expected_one "${var}" "${rc}" || rc=1
+ done
+
+ return "${rc}"
+}
+
+# shellcheck disable=SC2034 # Some variables are used below but indirectly
+mptcp_lib_verify_listener_events() {
+ local evt=${1}
+ local e_type=${2}
+ local e_family=${3}
+ local e_saddr=${4}
+ local e_sport=${5}
+ local type
+ local family
+ local saddr
+ local sport
+ local rc=0
+
+ type=$(mptcp_lib_evts_get_info type "${evt}" "${e_type}")
+ family=$(mptcp_lib_evts_get_info family "${evt}" "${e_type}")
+ if [ "${family}" ] && [ "${family}" = "${AF_INET6}" ]; then
+ saddr=$(mptcp_lib_evts_get_info saddr6 "${evt}" "${e_type}")
+ else
+ saddr=$(mptcp_lib_evts_get_info saddr4 "${evt}" "${e_type}")
+ fi
+ sport=$(mptcp_lib_evts_get_info sport "${evt}" "${e_type}")
+
+ mptcp_lib_check_expected "type" "family" "saddr" "sport" || rc="${?}"
+ return "${rc}"
+}
diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
index c643872ddf47..e2d70c18786e 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
@@ -1,6 +1,11 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+# Double quotes to prevent globbing and word splitting is recommended in new
+# code but we accept it, especially because there were too many before having
+# address all other issues detected by shellcheck.
+#shellcheck disable=SC2086
+
. "$(dirname "${0}")/mptcp_lib.sh"
ret=0
@@ -8,17 +13,14 @@ sin=""
sout=""
cin=""
cout=""
-ksft_skip=4
timeout_poll=30
timeout_test=$((timeout_poll * 2 + 1))
iptables="iptables"
ip6tables="ip6tables"
-sec=$(date +%s)
-rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
-ns1="ns1-$rndh"
-ns2="ns2-$rndh"
-ns_sbox="ns_sbox-$rndh"
+ns1=""
+ns2=""
+ns_sbox=""
add_mark_rules()
{
@@ -40,17 +42,10 @@ add_mark_rules()
init()
{
- local netns
- for netns in "$ns1" "$ns2" "$ns_sbox";do
- ip netns add $netns || exit $ksft_skip
- ip -net $netns link set lo up
- ip netns exec $netns sysctl -q net.mptcp.enabled=1
- ip netns exec $netns sysctl -q net.ipv4.conf.all.rp_filter=0
- ip netns exec $netns sysctl -q net.ipv4.conf.default.rp_filter=0
- done
+ mptcp_lib_ns_init ns1 ns2 ns_sbox
local i
- for i in `seq 1 4`; do
+ for i in $(seq 1 4); do
ip link add ns1eth$i netns "$ns1" type veth peer name ns2eth$i netns "$ns2"
ip -net "$ns1" addr add 10.0.$i.1/24 dev ns1eth$i
ip -net "$ns1" addr add dead:beef:$i::1/64 dev ns1eth$i nodad
@@ -77,36 +72,18 @@ init()
add_mark_rules $ns2 2
}
+# This function is used in the cleanup trap
+#shellcheck disable=SC2317
cleanup()
{
- local netns
- for netns in "$ns1" "$ns2" "$ns_sbox"; do
- ip netns del $netns
- done
+ mptcp_lib_ns_exit "${ns1}" "${ns2}" "${ns_sbox}"
rm -f "$cin" "$cout"
rm -f "$sin" "$sout"
}
mptcp_lib_check_mptcp
mptcp_lib_check_kallsyms
-
-ip -Version > /dev/null 2>&1
-if [ $? -ne 0 ];then
- echo "SKIP: Could not run test without ip tool"
- exit $ksft_skip
-fi
-
-# Use the legacy version if available to support old kernel versions
-if iptables-legacy -V &> /dev/null; then
- iptables="iptables-legacy"
- ip6tables="ip6tables-legacy"
-elif ! iptables -V &> /dev/null; then
- echo "SKIP: Could not run all tests without iptables tool"
- exit $ksft_skip
-elif ! ip6tables -V &> /dev/null; then
- echo "SKIP: Could not run all tests without ip6tables tool"
- exit $ksft_skip
-fi
+mptcp_lib_check_tools ip "${iptables}" "${ip6tables}"
check_mark()
{
@@ -126,8 +103,9 @@ check_mark()
local v
for v in $values; do
if [ $v -ne 0 ]; then
- echo "FAIL: got $tables $values in ns $ns , not 0 - not all expected packets marked" 1>&2
- ret=1
+ mptcp_lib_pr_fail "got $tables $values in ns $ns," \
+ "not 0 - not all expected packets marked"
+ ret=${KSFT_FAIL}
return 1
fi
done
@@ -135,6 +113,11 @@ check_mark()
return 0
}
+print_title()
+{
+ mptcp_lib_print_title "${@}"
+}
+
do_transfer()
{
local listener_ns="$1"
@@ -184,8 +167,9 @@ do_transfer()
wait $spid
local rets=$?
+ print_title "Transfer ${ip:2}"
if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
- echo " client exit code $retc, server $rets" 1>&2
+ mptcp_lib_pr_fail "client exit code $retc, server $rets"
echo -e "\nnetns ${listener_ns} socket stat for ${port}:" 1>&2
ip netns exec ${listener_ns} ss -Menita 1>&2 -o "sport = :$port"
@@ -194,10 +178,17 @@ do_transfer()
mptcp_lib_result_fail "transfer ${ip}"
- ret=1
+ ret=${KSFT_FAIL}
return 1
fi
+ if ! mptcp_lib_check_transfer $cin $sout "file received by server"; then
+ rets=1
+ else
+ mptcp_lib_pr_ok
+ fi
+ mptcp_lib_result_code "${rets}" "transfer ${ip}"
+ print_title "Mark ${ip:2}"
if [ $local_addr = "::" ];then
check_mark $listener_ns 6 || retc=1
check_mark $connector_ns 6 || retc=1
@@ -206,15 +197,13 @@ do_transfer()
check_mark $connector_ns 4 || retc=1
fi
- mptcp_lib_check_transfer $cin $sout "file received by server"
- rets=$?
-
mptcp_lib_result_code "${retc}" "mark ${ip}"
- mptcp_lib_result_code "${rets}" "transfer ${ip}"
if [ $retc -eq 0 ] && [ $rets -eq 0 ];then
+ mptcp_lib_pr_ok
return 0
fi
+ mptcp_lib_pr_fail
return 1
}
@@ -235,7 +224,7 @@ do_mptcp_sockopt_tests()
local lret=0
if ! mptcp_lib_kallsyms_has "mptcp_diag_fill_info$"; then
- echo "INFO: MPTCP sockopt not supported: SKIP"
+ mptcp_lib_pr_skip "MPTCP sockopt not supported"
mptcp_lib_result_skip "sockopt"
return
fi
@@ -243,23 +232,27 @@ do_mptcp_sockopt_tests()
ip netns exec "$ns_sbox" ./mptcp_sockopt
lret=$?
+ print_title "SOL_MPTCP sockopt v4"
if [ $lret -ne 0 ]; then
- echo "FAIL: SOL_MPTCP getsockopt" 1>&2
+ mptcp_lib_pr_fail
mptcp_lib_result_fail "sockopt v4"
ret=$lret
return
fi
+ mptcp_lib_pr_ok
mptcp_lib_result_pass "sockopt v4"
ip netns exec "$ns_sbox" ./mptcp_sockopt -6
lret=$?
+ print_title "SOL_MPTCP sockopt v6"
if [ $lret -ne 0 ]; then
- echo "FAIL: SOL_MPTCP getsockopt (ipv6)" 1>&2
+ mptcp_lib_pr_fail
mptcp_lib_result_fail "sockopt v6"
ret=$lret
return
fi
+ mptcp_lib_pr_ok
mptcp_lib_result_pass "sockopt v6"
}
@@ -282,16 +275,17 @@ run_tests()
do_tcpinq_test()
{
+ print_title "TCP_INQ cmsg/ioctl $*"
ip netns exec "$ns_sbox" ./mptcp_inq "$@"
local lret=$?
if [ $lret -ne 0 ];then
ret=$lret
- echo "FAIL: mptcp_inq $@" 1>&2
+ mptcp_lib_pr_fail
mptcp_lib_result_fail "TCP_INQ: $*"
return $lret
fi
- echo "PASS: TCP_INQ cmsg/ioctl $@"
+ mptcp_lib_pr_ok
mptcp_lib_result_pass "TCP_INQ: $*"
return $lret
}
@@ -301,7 +295,7 @@ do_tcpinq_tests()
local lret=0
if ! mptcp_lib_kallsyms_has "mptcp_ioctl$"; then
- echo "INFO: TCP_INQ not supported: SKIP"
+ mptcp_lib_pr_skip "TCP_INQ not supported"
mptcp_lib_result_skip "TCP_INQ"
return
fi
@@ -337,15 +331,7 @@ trap cleanup EXIT
run_tests $ns1 $ns2 10.0.1.1
run_tests $ns1 $ns2 dead:beef:1::1
-if [ $ret -eq 0 ];then
- echo "PASS: all packets had packet mark set"
-fi
-
do_mptcp_sockopt_tests
-if [ $ret -eq 0 ];then
- echo "PASS: SOL_MPTCP getsockopt has expected information"
-fi
-
do_tcpinq_tests
mptcp_lib_result_print_all_tap
diff --git a/tools/testing/selftests/net/mptcp/pm_netlink.sh b/tools/testing/selftests/net/mptcp/pm_netlink.sh
index 71899a3ffa7a..6ab8c5d36340 100755
--- a/tools/testing/selftests/net/mptcp/pm_netlink.sh
+++ b/tools/testing/selftests/net/mptcp/pm_netlink.sh
@@ -1,77 +1,69 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+# Double quotes to prevent globbing and word splitting is recommended in new
+# code but we accept it, especially because there were too many before having
+# address all other issues detected by shellcheck.
+#shellcheck disable=SC2086
+
. "$(dirname "${0}")/mptcp_lib.sh"
-ksft_skip=4
ret=0
usage() {
echo "Usage: $0 [ -h ]"
}
-
+optstring=h
while getopts "$optstring" option;do
case "$option" in
"h")
usage $0
- exit 0
+ exit ${KSFT_PASS}
;;
"?")
usage $0
- exit 1
+ exit ${KSFT_FAIL}
;;
esac
done
-sec=$(date +%s)
-rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
-ns1="ns1-$rndh"
+ns1=""
err=$(mktemp)
-ret=0
+# This function is used in the cleanup trap
+#shellcheck disable=SC2317
cleanup()
{
rm -f $err
- ip netns del $ns1
+ mptcp_lib_ns_exit "${ns1}"
}
mptcp_lib_check_mptcp
-
-ip -Version > /dev/null 2>&1
-if [ $? -ne 0 ];then
- echo "SKIP: Could not run test without ip tool"
- exit $ksft_skip
-fi
+mptcp_lib_check_tools ip
trap cleanup EXIT
-ip netns add $ns1 || exit $ksft_skip
-ip -net $ns1 link set lo up
-ip netns exec $ns1 sysctl -q net.mptcp.enabled=1
+mptcp_lib_ns_init ns1
check()
{
local cmd="$1"
local expected="$2"
local msg="$3"
- local out=`$cmd 2>$err`
- local cmd_ret=$?
-
- printf "%-50s" "$msg"
- if [ $cmd_ret -ne 0 ]; then
- echo "[FAIL] command execution '$cmd' stderr "
- cat $err
- mptcp_lib_result_fail "${msg} # error ${cmd_ret}"
- ret=1
- elif [ "$out" = "$expected" ]; then
- echo "[ OK ]"
+ local rc=0
+
+ mptcp_lib_print_title "$msg"
+ mptcp_lib_check_output "${err}" "${cmd}" "${expected}" || rc=${?}
+ if [ ${rc} -eq 2 ]; then
+ mptcp_lib_result_fail "${msg} # error ${rc}"
+ ret=${KSFT_FAIL}
+ elif [ ${rc} -eq 0 ]; then
+ mptcp_lib_print_ok "[ OK ]"
mptcp_lib_result_pass "${msg}"
- else
- echo -n "[FAIL] "
- echo "expected '$expected' got '$out'"
+ elif [ ${rc} -eq 1 ]; then
mptcp_lib_result_fail "${msg} # different output"
- ret=1
+ ret=${KSFT_FAIL}
fi
}
@@ -105,14 +97,14 @@ check "ip netns exec $ns1 ./pm_nl_ctl get 4" "" "duplicate addr"
ip netns exec $ns1 ./pm_nl_ctl add 10.0.1.4 flags signal
check "ip netns exec $ns1 ./pm_nl_ctl get 4" "id 4 flags signal 10.0.1.4" "id addr increment"
-for i in `seq 5 9`; do
+for i in $(seq 5 9); do
ip netns exec $ns1 ./pm_nl_ctl add 10.0.1.$i flags signal >/dev/null 2>&1
done
check "ip netns exec $ns1 ./pm_nl_ctl get 9" "id 9 flags signal 10.0.1.9" "hard addr limit"
check "ip netns exec $ns1 ./pm_nl_ctl get 10" "" "above hard addr limit"
ip netns exec $ns1 ./pm_nl_ctl del 9
-for i in `seq 10 255`; do
+for i in $(seq 10 255); do
ip netns exec $ns1 ./pm_nl_ctl add 10.0.0.9 id $i
ip netns exec $ns1 ./pm_nl_ctl del $i
done
@@ -197,7 +189,8 @@ subflow,backup,fullmesh 10.0.1.1" " (backup,fullmesh)"
else
for st in fullmesh nofullmesh backup,fullmesh; do
st=" (${st})"
- printf "%-50s%s\n" "${st}" "[SKIP]"
+ mptcp_lib_print_title "${st}"
+ mptcp_lib_pr_skip
mptcp_lib_result_skip "${st}"
done
fi
diff --git a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
index 49369c4a5f26..7426a2cbd4a0 100644
--- a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+++ b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
@@ -453,6 +453,7 @@ int csf(int fd, int pm_family, int argc, char *argv[])
char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
1024];
+ u_int32_t flags = MPTCP_PM_ADDR_FLAG_SUBFLOW;
const char *params[5];
struct nlmsghdr *nh;
struct rtattr *addr;
@@ -558,6 +559,13 @@ int csf(int fd, int pm_family, int argc, char *argv[])
off += NLMSG_ALIGN(rta->rta_len);
}
+ /* addr flags */
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_FLAGS;
+ rta->rta_len = RTA_LENGTH(4);
+ memcpy(RTA_DATA(rta), &flags, 4);
+ off += NLMSG_ALIGN(rta->rta_len);
+
addr->rta_len = off - addr_start;
}
@@ -1079,6 +1087,7 @@ int get_addr(int fd, int pm_family, int argc, char *argv[])
1024];
struct rtattr *rta, *nest;
struct nlmsghdr *nh;
+ u_int32_t token = 0;
int nest_start;
u_int8_t id;
int off = 0;
@@ -1089,10 +1098,12 @@ int get_addr(int fd, int pm_family, int argc, char *argv[])
MPTCP_PM_VER);
/* the only argument is the address id */
- if (argc != 3)
+ if (argc != 3 && argc != 5)
syntax(argv);
id = atoi(argv[2]);
+ if (argc == 5 && !strcmp(argv[3], "token"))
+ token = strtoul(argv[4], NULL, 10);
nest_start = off;
nest = (void *)(data + off);
@@ -1108,6 +1119,15 @@ int get_addr(int fd, int pm_family, int argc, char *argv[])
off += NLMSG_ALIGN(rta->rta_len);
nest->rta_len = off - nest_start;
+ /* token */
+ if (token) {
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ATTR_TOKEN;
+ rta->rta_len = RTA_LENGTH(4);
+ memcpy(RTA_DATA(rta), &token, 4);
+ off += NLMSG_ALIGN(rta->rta_len);
+ }
+
print_addrs(nh, pm_family, do_nl_req(fd, nh, off, sizeof(data)));
return 0;
}
@@ -1119,8 +1139,16 @@ int dump_addrs(int fd, int pm_family, int argc, char *argv[])
1024];
pid_t pid = getpid();
struct nlmsghdr *nh;
+ u_int32_t token = 0;
+ struct rtattr *rta;
int off = 0;
+ if (argc != 2 && argc != 4)
+ syntax(argv);
+
+ if (argc == 4 && !strcmp(argv[2], "token"))
+ token = strtoul(argv[3], NULL, 10);
+
memset(data, 0, sizeof(data));
nh = (void *)data;
off = init_genl_req(data, pm_family, MPTCP_PM_CMD_GET_ADDR,
@@ -1130,6 +1158,15 @@ int dump_addrs(int fd, int pm_family, int argc, char *argv[])
nh->nlmsg_pid = pid;
nh->nlmsg_len = off;
+ /* token */
+ if (token) {
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ATTR_TOKEN;
+ rta->rta_len = RTA_LENGTH(4);
+ memcpy(RTA_DATA(rta), &token, 4);
+ off += NLMSG_ALIGN(rta->rta_len);
+ }
+
print_addrs(nh, pm_family, do_nl_req(fd, nh, off, sizeof(data)));
return 0;
}
diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
index 8f9ddb3ad4fe..1b2366220388 100755
--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
+++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
@@ -1,21 +1,30 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+# Double quotes to prevent globbing and word splitting is recommended in new
+# code but we accept it, especially because there were too many before having
+# address all other issues detected by shellcheck.
+#shellcheck disable=SC2086
+
. "$(dirname "${0}")/mptcp_lib.sh"
-sec=$(date +%s)
-rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
-ns1="ns1-$rndh"
-ns2="ns2-$rndh"
-ns3="ns3-$rndh"
+ns1=""
+ns2=""
+ns3=""
capture=false
-ksft_skip=4
timeout_poll=30
timeout_test=$((timeout_poll * 2 + 1))
-test_cnt=1
+# a bit more space: because we have more to display
+MPTCP_LIB_TEST_FORMAT="%02u %-60s"
ret=0
bail=0
slack=50
+large=""
+small=""
+sout=""
+cout=""
+capout=""
+size=0
usage() {
echo "Usage: $0 [ -b ] [ -c ] [ -d ]"
@@ -24,25 +33,19 @@ usage() {
echo -e "\t-d: debug this script"
}
+# This function is used in the cleanup trap
+#shellcheck disable=SC2317
cleanup()
{
rm -f "$cout" "$sout"
rm -f "$large" "$small"
rm -f "$capout"
- local netns
- for netns in "$ns1" "$ns2" "$ns3";do
- ip netns del $netns
- done
+ mptcp_lib_ns_exit "${ns1}" "${ns2}" "${ns3}"
}
mptcp_lib_check_mptcp
-
-ip -Version > /dev/null 2>&1
-if [ $? -ne 0 ];then
- echo "SKIP: Could not run test without ip tool"
- exit $ksft_skip
-fi
+mptcp_lib_check_tools ip
# "$ns1" ns2 ns3
# ns1eth1 ns2eth1 ns2eth3 ns3eth1
@@ -64,12 +67,7 @@ setup()
trap cleanup EXIT
- for i in "$ns1" "$ns2" "$ns3";do
- ip netns add $i || exit $ksft_skip
- ip -net $i link set lo up
- ip netns exec $i sysctl -q net.ipv4.conf.all.rp_filter=0
- ip netns exec $i sysctl -q net.ipv4.conf.default.rp_filter=0
- done
+ mptcp_lib_ns_init ns1 ns2 ns3
ip link add ns1eth1 netns "$ns1" type veth peer name ns2eth1 netns "$ns2"
ip link add ns1eth2 netns "$ns1" type veth peer name ns2eth2 netns "$ns2"
@@ -129,8 +127,7 @@ do_transfer()
local sin=$2
local max_time=$3
local port
- port=$((10000+$test_cnt))
- test_cnt=$((test_cnt+1))
+ port=$((10000+MPTCP_LIB_TEST_COUNTER))
:> "$cout"
:> "$sout"
@@ -138,6 +135,7 @@ do_transfer()
if $capture; then
local capuser
+ local rndh="${ns1:4}"
if [ -z $SUDO_USER ] ; then
capuser=""
else
@@ -189,12 +187,12 @@ do_transfer()
printf "%-16s" " max $max_time "
if [ $retc -eq 0 ] && [ $rets -eq 0 ] && \
[ $cmpc -eq 0 ] && [ $cmps -eq 0 ]; then
- echo "[ OK ]"
+ mptcp_lib_pr_ok
cat "$capout"
return 0
fi
- echo " [ fail ]"
+ mptcp_lib_pr_fail
echo "client exit code $retc, server $rets" 1>&2
echo -e "\nnetns ${ns3} socket stat for $port:" 1>&2
ip netns exec ${ns3} ss -nita 1>&2 -o "sport = :$port"
@@ -241,7 +239,7 @@ run_test()
# completion (see mptcp_connect): 200ms on each side, add some slack
time=$((time + 400 + slack))
- printf "%-60s" "$msg"
+ mptcp_lib_print_title "$msg"
do_transfer $small $large $time
lret=$?
mptcp_lib_result_code "${lret}" "${msg}"
@@ -251,7 +249,7 @@ run_test()
fi
msg+=" - reverse direction"
- printf "%-60s" "${msg}"
+ mptcp_lib_print_title "${msg}"
do_transfer $large $small $time
lret=$?
mptcp_lib_result_code "${lret}" "${msg}"
@@ -265,7 +263,7 @@ while getopts "bcdh" option;do
case "$option" in
"h")
usage $0
- exit 0
+ exit ${KSFT_PASS}
;;
"b")
bail=1
@@ -278,7 +276,7 @@ while getopts "bcdh" option;do
;;
"?")
usage $0
- exit 1
+ exit ${KSFT_FAIL}
;;
esac
done
diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
index 1b94a75604fe..9e2981f2d7f5 100755
--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
+++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
@@ -5,7 +5,7 @@
# code but we accept it.
#shellcheck disable=SC2086
-# Some variables are used below but indirectly, see check_expected_one()
+# Some variables are used below but indirectly, see verify_*_event()
#shellcheck disable=SC2034
. "$(dirname "${0}")/mptcp_lib.sh"
@@ -17,21 +17,17 @@ if ! mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
echo "userspace pm tests are not supported by the kernel: SKIP"
exit ${KSFT_SKIP}
fi
+mptcp_lib_check_tools ip
-if ! ip -Version &> /dev/null; then
- echo "SKIP: Cannot not run test without ip tool"
- exit ${KSFT_SKIP}
-fi
+ANNOUNCED=${MPTCP_LIB_EVENT_ANNOUNCED}
+REMOVED=${MPTCP_LIB_EVENT_REMOVED}
+SUB_ESTABLISHED=${MPTCP_LIB_EVENT_SUB_ESTABLISHED}
+SUB_CLOSED=${MPTCP_LIB_EVENT_SUB_CLOSED}
+LISTENER_CREATED=${MPTCP_LIB_EVENT_LISTENER_CREATED}
+LISTENER_CLOSED=${MPTCP_LIB_EVENT_LISTENER_CLOSED}
-ANNOUNCED=6 # MPTCP_EVENT_ANNOUNCED
-REMOVED=7 # MPTCP_EVENT_REMOVED
-SUB_ESTABLISHED=10 # MPTCP_EVENT_SUB_ESTABLISHED
-SUB_CLOSED=11 # MPTCP_EVENT_SUB_CLOSED
-LISTENER_CREATED=15 #MPTCP_EVENT_LISTENER_CREATED
-LISTENER_CLOSED=16 #MPTCP_EVENT_LISTENER_CLOSED
-
-AF_INET=2
-AF_INET6=10
+AF_INET=${MPTCP_LIB_AF_INET}
+AF_INET6=${MPTCP_LIB_AF_INET6}
file=""
server_evts=""
@@ -54,20 +50,16 @@ app6_port=50004
client_addr_id=${RANDOM:0:2}
server_addr_id=${RANDOM:0:2}
-sec=$(date +%s)
-rndh=$(printf %x "$sec")-$(mktemp -u XXXXXX)
-ns1="ns1-$rndh"
-ns2="ns2-$rndh"
+ns1=""
+ns2=""
ret=0
test_name=""
-
-_printf() {
- stdbuf -o0 -e0 printf "${@}"
-}
+# a bit more space: because we have more to display
+MPTCP_LIB_TEST_FORMAT="%02u %-68s"
print_title()
{
- _printf "INFO: %s\n" "${1}"
+ mptcp_lib_pr_info "${1}"
}
# $1: test name
@@ -75,36 +67,29 @@ print_test()
{
test_name="${1}"
- _printf "%-68s" "${test_name}"
-}
-
-print_results()
-{
- _printf "[%s]\n" "${1}"
+ mptcp_lib_print_title "${test_name}"
}
test_pass()
{
- print_results " OK "
+ mptcp_lib_pr_ok
mptcp_lib_result_pass "${test_name}"
}
test_skip()
{
- print_results "SKIP"
+ mptcp_lib_pr_skip
mptcp_lib_result_skip "${test_name}"
}
# $1: msg
test_fail()
{
- print_results "FAIL"
- ret=1
-
- if [ -n "${1}" ]; then
- _printf "\t%s\n" "${1}"
+ if [ ${#} -gt 0 ]
+ then
+ mptcp_lib_pr_fail "${@}"
fi
-
+ ret=${KSFT_FAIL}
mptcp_lib_result_fail "${test_name}"
}
@@ -122,23 +107,18 @@ cleanup()
mptcp_lib_kill_wait $pid
done
- local netns
- for netns in "$ns1" "$ns2" ;do
- ip netns del "$netns"
- done
+ mptcp_lib_ns_exit "${ns1}" "${ns2}"
rm -rf $file $client_evts $server_evts
- _printf "Done\n"
+ mptcp_lib_pr_info "Done"
}
trap cleanup EXIT
# Create and configure network namespaces for testing
+mptcp_lib_ns_init ns1 ns2
for i in "$ns1" "$ns2" ;do
- ip netns add "$i" || exit 1
- ip -net "$i" link set lo up
- ip netns exec "$i" sysctl -q net.mptcp.enabled=1
ip netns exec "$i" sysctl -q net.mptcp.pm_type=1
done
@@ -160,17 +140,23 @@ ip -net "$ns2" addr add dead:beef:1::2/64 dev ns2eth1 nodad
ip -net "$ns2" addr add dead:beef:2::2/64 dev ns2eth1 nodad
ip -net "$ns2" link set ns2eth1 up
+file=$(mktemp)
+mptcp_lib_make_file "$file" 2 1
+
+# Capture netlink events over the two network namespaces running
+# the MPTCP client and server
+client_evts=$(mktemp)
+mptcp_lib_events "${ns2}" "${client_evts}" client_evts_pid
+server_evts=$(mktemp)
+mptcp_lib_events "${ns1}" "${server_evts}" server_evts_pid
+sleep 0.5
+
print_title "Init"
print_test "Created network namespaces ns1, ns2"
test_pass
make_connection()
{
- if [ -z "$file" ]; then
- file=$(mktemp)
- fi
- mptcp_lib_make_file "$file" 2 1
-
local is_v6=$1
local app_port=$app4_port
local connect_addr="10.0.1.1"
@@ -184,27 +170,8 @@ make_connection()
is_v6="v4"
fi
- # Capture netlink events over the two network namespaces running
- # the MPTCP client and server
- if [ -z "$client_evts" ]; then
- client_evts=$(mktemp)
- fi
:>"$client_evts"
- if [ $client_evts_pid -ne 0 ]; then
- mptcp_lib_kill_wait $client_evts_pid
- fi
- ip netns exec "$ns2" ./pm_nl_ctl events >> "$client_evts" 2>&1 &
- client_evts_pid=$!
- if [ -z "$server_evts" ]; then
- server_evts=$(mktemp)
- fi
:>"$server_evts"
- if [ $server_evts_pid -ne 0 ]; then
- mptcp_lib_kill_wait $server_evts_pid
- fi
- ip netns exec "$ns1" ./pm_nl_ctl events >> "$server_evts" 2>&1 &
- server_evts_pid=$!
- sleep 0.5
# Run the server
ip netns exec "$ns1" \
@@ -242,7 +209,7 @@ make_connection()
else
test_fail "Expected tokens (c:${client_token} - s:${server_token}) and server (c:${client_serverside} - s:${server_serverside})"
mptcp_lib_result_print_all_tap
- exit 1
+ exit ${KSFT_FAIL}
fi
if [ "$is_v6" = "v6" ]
@@ -261,45 +228,16 @@ make_connection()
fi
}
-# $1: var name ; $2: prev ret
-check_expected_one()
-{
- local var="${1}"
- local exp="e_${var}"
- local prev_ret="${2}"
-
- if [ "${!var}" = "${!exp}" ]
- then
- return 0
- fi
-
- if [ "${prev_ret}" = "0" ]
- then
- test_fail
- fi
-
- _printf "\tExpected value for '%s': '%s', got '%s'.\n" \
- "${var}" "${!exp}" "${!var}"
- return 1
-}
-
# $@: all var names to check
check_expected()
{
- local rc=0
- local var
-
- for var in "${@}"
- do
- check_expected_one "${var}" "${rc}" || rc=1
- done
-
- if [ ${rc} -eq 0 ]
+ if mptcp_lib_check_expected "${@}"
then
test_pass
return 0
fi
+ test_fail
return 1
}
@@ -449,7 +387,7 @@ test_remove()
then
test_pass
else
- test_fail
+ test_fail "unexpected type: ${type}"
fi
# RM_ADDR using an invalid addr id should result in no action
@@ -462,7 +400,7 @@ test_remove()
then
test_pass
else
- test_fail
+ test_fail "unexpected type: ${type}"
fi
# RM_ADDR from the client to server machine
@@ -897,32 +835,11 @@ test_prio()
verify_listener_events()
{
- local evt=$1
- local e_type=$2
- local e_family=$3
- local e_saddr=$4
- local e_sport=$5
- local type
- local family
- local saddr
- local sport
-
- if [ $e_type = $LISTENER_CREATED ]; then
- print_test "CREATE_LISTENER $e_saddr:$e_sport"
- elif [ $e_type = $LISTENER_CLOSED ]; then
- print_test "CLOSE_LISTENER $e_saddr:$e_sport"
- fi
-
- type=$(mptcp_lib_evts_get_info type $evt $e_type)
- family=$(mptcp_lib_evts_get_info family $evt $e_type)
- sport=$(mptcp_lib_evts_get_info sport $evt $e_type)
- if [ $family ] && [ $family = $AF_INET6 ]; then
- saddr=$(mptcp_lib_evts_get_info saddr6 $evt $e_type)
+ if mptcp_lib_verify_listener_events "${@}"; then
+ test_pass
else
- saddr=$(mptcp_lib_evts_get_info saddr4 $evt $e_type)
+ test_fail
fi
-
- check_expected "type" "family" "saddr" "sport"
}
test_listener()
@@ -944,6 +861,7 @@ test_listener()
local listener_pid=$!
sleep 0.5
+ print_test "CREATE_LISTENER 10.0.2.2:$client4_port"
verify_listener_events $client_evts $LISTENER_CREATED $AF_INET 10.0.2.2 $client4_port
# ADD_ADDR from client to server machine reusing the subflow port
@@ -960,6 +878,7 @@ test_listener()
mptcp_lib_kill_wait $listener_pid
sleep 0.5
+ print_test "CLOSE_LISTENER 10.0.2.2:$client4_port"
verify_listener_events $client_evts $LISTENER_CLOSED $AF_INET 10.0.2.2 $client4_port
}
diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh
index 36e40256ab92..5cae53543849 100755
--- a/tools/testing/selftests/net/openvswitch/openvswitch.sh
+++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh
@@ -17,6 +17,7 @@ tests="
ct_connect_v4 ip4-ct-xon: Basic ipv4 tcp connection using ct
connect_v4 ip4-xon: Basic ipv4 ping between two NS
nat_connect_v4 ip4-nat-xon: Basic ipv4 tcp connection via NAT
+ nat_related_v4 ip4-nat-related: ICMP related matches work with SNAT
netlink_checks ovsnl: validate netlink attrs and settings
upcall_interfaces ovs: test the upcall interfaces
drop_reason drop: test drop reasons are emitted"
@@ -473,6 +474,67 @@ test_nat_connect_v4 () {
return 0
}
+# nat_related_v4 test
+# - client->server ip packets go via SNAT
+# - client solicits ICMP destination unreachable packet from server
+# - undo NAT for ICMP reply and test dst ip has been updated
+test_nat_related_v4 () {
+ which nc >/dev/null 2>/dev/null || return $ksft_skip
+
+ sbx_add "test_nat_related_v4" || return $?
+
+ ovs_add_dp "test_nat_related_v4" natrelated4 || return 1
+ info "create namespaces"
+ for ns in client server; do
+ ovs_add_netns_and_veths "test_nat_related_v4" "natrelated4" "$ns" \
+ "${ns:0:1}0" "${ns:0:1}1" || return 1
+ done
+
+ ip netns exec client ip addr add 172.31.110.10/24 dev c1
+ ip netns exec client ip link set c1 up
+ ip netns exec server ip addr add 172.31.110.20/24 dev s1
+ ip netns exec server ip link set s1 up
+
+ ip netns exec server ip route add 192.168.0.20/32 via 172.31.110.10
+
+ # Allow ARP
+ ovs_add_flow "test_nat_related_v4" natrelated4 \
+ "in_port(1),eth(),eth_type(0x0806),arp()" "2" || return 1
+ ovs_add_flow "test_nat_related_v4" natrelated4 \
+ "in_port(2),eth(),eth_type(0x0806),arp()" "1" || return 1
+
+ # Allow IP traffic from client->server, rewrite source IP with SNAT to 192.168.0.20
+ ovs_add_flow "test_nat_related_v4" natrelated4 \
+ "ct_state(-trk),in_port(1),eth(),eth_type(0x0800),ipv4(dst=172.31.110.20)" \
+ "ct(commit,nat(src=192.168.0.20)),recirc(0x1)" || return 1
+ ovs_add_flow "test_nat_related_v4" natrelated4 \
+ "recirc_id(0x1),ct_state(+trk-inv),in_port(1),eth(),eth_type(0x0800),ipv4()" \
+ "2" || return 1
+
+ # Allow related ICMP responses back from server and undo NAT to restore original IP
+ # Drop any ICMP related packets where dst ip hasn't been restored back to original IP
+ ovs_add_flow "test_nat_related_v4" natrelated4 \
+ "ct_state(-trk),in_port(2),eth(),eth_type(0x0800),ipv4()" \
+ "ct(commit,nat),recirc(0x2)" || return 1
+ ovs_add_flow "test_nat_related_v4" natrelated4 \
+ "recirc_id(0x2),ct_state(+rel+trk),in_port(2),eth(),eth_type(0x0800),ipv4(src=172.31.110.20,dst=172.31.110.10,proto=1),icmp()" \
+ "1" || return 1
+ ovs_add_flow "test_nat_related_v4" natrelated4 \
+ "recirc_id(0x2),ct_state(+rel+trk),in_port(2),eth(),eth_type(0x0800),ipv4(dst=192.168.0.20,proto=1),icmp()" \
+ "drop" || return 1
+
+ # Solicit destination unreachable response from server
+ ovs_sbx "test_nat_related_v4" ip netns exec client \
+ bash -c "echo a | nc -u -w 1 172.31.110.20 10000"
+
+ # Check to make sure no packets matched the drop rule with incorrect dst ip
+ python3 "$ovs_base/ovs-dpctl.py" dump-flows natrelated4 \
+ | grep "drop" | grep "packets:0" >/dev/null || return 1
+
+ info "done..."
+ return 0
+}
+
# netlink_validation
# - Create a dp
# - check no warning with "old version" simulation
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 874a2952aa8e..bdf6f10d0558 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -801,6 +801,8 @@ kci_test_ipsec_offload()
end_test "FAIL: ipsec_offload SA offload missing from list output"
fi
+ # we didn't create a peer, make sure we can Tx
+ ip neigh add $dstip dev $dev lladdr 00:11:22:33:44:55
# use ping to exercise the Tx path
ping -I $dev -c 3 -W 1 -i 0 $dstip >/dev/null
diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c
index 2672ac0b6d1f..8457b7ccbc09 100644
--- a/tools/testing/selftests/net/so_txtime.c
+++ b/tools/testing/selftests/net/so_txtime.c
@@ -134,8 +134,11 @@ static void do_recv_one(int fdr, struct timed_send *ts)
if (rbuf[0] != ts->data)
error(1, 0, "payload mismatch. expected %c", ts->data);
- if (llabs(tstop - texpect) > cfg_variance_us)
- error(1, 0, "exceeds variance (%d us)", cfg_variance_us);
+ if (llabs(tstop - texpect) > cfg_variance_us) {
+ fprintf(stderr, "exceeds variance (%d us)\n", cfg_variance_us);
+ if (!getenv("KSFT_MACHINE_SLOW"))
+ exit(1);
+ }
}
static void do_recv_verify_empty(int fdr)
diff --git a/tools/testing/selftests/net/test_vxlan_mdb.sh b/tools/testing/selftests/net/test_vxlan_mdb.sh
index 84a05a9e46d8..74ff9fb2a6f0 100755
--- a/tools/testing/selftests/net/test_vxlan_mdb.sh
+++ b/tools/testing/selftests/net/test_vxlan_mdb.sh
@@ -1014,10 +1014,10 @@ flush()
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 port vx0"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010"
- log_test $? 254 "Flush by port"
+ log_test $? 254 "Flush by port - matching"
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 port veth0"
- log_test $? 255 "Flush by wrong port"
+ log_test $? 255 "Flush by port - non-matching"
# Check that when flushing by source VNI only entries programmed with
# the specified source VNI are flushed and the rest are not.
@@ -1030,9 +1030,9 @@ flush()
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 src_vni 10010"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010"
- log_test $? 254 "Flush by specified source VNI"
+ log_test $? 254 "Flush by source VNI - matching"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10011"
- log_test $? 0 "Flush by unspecified source VNI"
+ log_test $? 0 "Flush by source VNI - non-matching"
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0"
@@ -1058,9 +1058,9 @@ flush()
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 proto bgp"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep \"proto bgp\""
- log_test $? 1 "Flush by specified routing protocol"
+ log_test $? 1 "Flush by routing protocol - matching"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep \"proto zebra\""
- log_test $? 0 "Flush by unspecified routing protocol"
+ log_test $? 0 "Flush by routing protocol - non-matching"
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0"
@@ -1075,9 +1075,9 @@ flush()
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 dst 198.51.100.2"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep 198.51.100.2"
- log_test $? 1 "Flush by specified destination IP - IPv4"
+ log_test $? 1 "Flush by IPv4 destination IP - matching"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep 198.51.100.1"
- log_test $? 0 "Flush by unspecified destination IP - IPv4"
+ log_test $? 0 "Flush by IPv4 destination IP - non-matching"
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0"
@@ -1089,9 +1089,9 @@ flush()
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 dst 2001:db8:1000::2"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep 2001:db8:1000::2"
- log_test $? 1 "Flush by specified destination IP - IPv6"
+ log_test $? 1 "Flush by IPv6 destination IP - matching"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep 2001:db8:1000::1"
- log_test $? 0 "Flush by unspecified destination IP - IPv6"
+ log_test $? 0 "Flush by IPv6 destination IP - non-matching"
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0"
@@ -1104,9 +1104,9 @@ flush()
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 dst_port 11111"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep \"dst_port 11111\""
- log_test $? 1 "Flush by specified UDP destination port"
+ log_test $? 1 "Flush by UDP destination port - matching"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep \"dst_port 22222\""
- log_test $? 0 "Flush by unspecified UDP destination port"
+ log_test $? 0 "Flush by UDP destination port - non-matching"
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0"
@@ -1121,9 +1121,9 @@ flush()
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 dst_port 4789"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep 198.51.100.1"
- log_test $? 1 "Flush by device's UDP destination port"
+ log_test $? 1 "Flush by device's UDP destination port - matching"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep 198.51.100.2"
- log_test $? 0 "Flush by unspecified UDP destination port"
+ log_test $? 0 "Flush by device's UDP destination port - non-matching"
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0"
@@ -1136,9 +1136,9 @@ flush()
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 vni 20010"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep \" vni 20010\""
- log_test $? 1 "Flush by specified destination VNI"
+ log_test $? 1 "Flush by destination VNI - matching"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep \" vni 20011\""
- log_test $? 0 "Flush by unspecified destination VNI"
+ log_test $? 0 "Flush by destination VNI - non-matching"
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0"
@@ -1153,9 +1153,9 @@ flush()
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 vni 10010"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep 198.51.100.1"
- log_test $? 1 "Flush by destination VNI equal to source VNI"
+ log_test $? 1 "Flush by destination VNI equal to source VNI - matching"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep 198.51.100.2"
- log_test $? 0 "Flush by unspecified destination VNI"
+ log_test $? 0 "Flush by destination VNI equal to source VNI - non-matching"
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0"
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index b95c249f81c2..c6eda21cefb6 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -1927,7 +1927,7 @@ TEST_F(tls_err, poll_partial_rec_async)
pfd.events = POLLIN;
EXPECT_EQ(poll(&pfd, 1, 20), 1);
- exit(!_metadata->passed);
+ exit(!__test_passed(_metadata));
}
}
diff --git a/tools/testing/selftests/net/txtimestamp.c b/tools/testing/selftests/net/txtimestamp.c
index 10f2fde3686b..ec60a16c9307 100644
--- a/tools/testing/selftests/net/txtimestamp.c
+++ b/tools/testing/selftests/net/txtimestamp.c
@@ -163,7 +163,8 @@ static void validate_timestamp(struct timespec *cur, int min_delay)
if (cur64 < start64 + min_delay || cur64 > start64 + max_delay) {
fprintf(stderr, "ERROR: %" PRId64 " us expected between %d and %d\n",
cur64 - start64, min_delay, max_delay);
- test_failed = true;
+ if (!getenv("KSFT_MACHINE_SLOW"))
+ test_failed = true;
}
}
diff --git a/tools/testing/selftests/net/txtimestamp.sh b/tools/testing/selftests/net/txtimestamp.sh
index 31637769f59f..25baca4b148e 100755
--- a/tools/testing/selftests/net/txtimestamp.sh
+++ b/tools/testing/selftests/net/txtimestamp.sh
@@ -8,13 +8,13 @@ set -e
setup() {
# set 1ms delay on lo egress
- tc qdisc add dev lo root netem delay 1ms
+ tc qdisc add dev lo root netem delay 10ms
# set 2ms delay on ifb0 egress
modprobe ifb
ip link add ifb_netem0 type ifb
ip link set dev ifb_netem0 up
- tc qdisc add dev ifb_netem0 root netem delay 2ms
+ tc qdisc add dev ifb_netem0 root netem delay 20ms
# redirect lo ingress through ifb0 egress
tc qdisc add dev lo handle ffff: ingress
@@ -24,9 +24,11 @@ setup() {
}
run_test_v4v6() {
- # SND will be delayed 1000us
- # ACK will be delayed 6000us: 1 + 2 ms round-trip
- local -r args="$@ -v 1000 -V 6000"
+ # SND will be delayed 10ms
+ # ACK will be delayed 60ms: 10 + 20 ms round-trip
+ # allow +/- tolerance of 8ms
+ # wait for ACK to be queued
+ local -r args="$@ -v 10000 -V 60000 -t 8000 -S 80000"
./txtimestamp ${args} -4 -L 127.0.0.1
./txtimestamp ${args} -6 -L ::1
diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c
index 7badaf215de2..1d975bf52af3 100644
--- a/tools/testing/selftests/net/udpgso.c
+++ b/tools/testing/selftests/net/udpgso.c
@@ -56,7 +56,6 @@ static bool cfg_do_msgmore;
static bool cfg_do_setsockopt;
static int cfg_specific_test_id = -1;
-static const char cfg_ifname[] = "lo";
static unsigned short cfg_port = 9000;
static char buf[ETH_MAX_MTU];
@@ -69,8 +68,13 @@ struct testcase {
int r_len_last; /* recv(): size of last non-mss dgram, if any */
};
-const struct in6_addr addr6 = IN6ADDR_LOOPBACK_INIT;
-const struct in_addr addr4 = { .s_addr = __constant_htonl(INADDR_LOOPBACK + 2) };
+const struct in6_addr addr6 = {
+ { { 0xfd, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 } }, /* fd00::1 */
+};
+
+const struct in_addr addr4 = {
+ __constant_htonl(0x0a000001), /* 10.0.0.1 */
+};
struct testcase testcases_v4[] = {
{
@@ -274,48 +278,6 @@ struct testcase testcases_v6[] = {
}
};
-static unsigned int get_device_mtu(int fd, const char *ifname)
-{
- struct ifreq ifr;
-
- memset(&ifr, 0, sizeof(ifr));
-
- strcpy(ifr.ifr_name, ifname);
-
- if (ioctl(fd, SIOCGIFMTU, &ifr))
- error(1, errno, "ioctl get mtu");
-
- return ifr.ifr_mtu;
-}
-
-static void __set_device_mtu(int fd, const char *ifname, unsigned int mtu)
-{
- struct ifreq ifr;
-
- memset(&ifr, 0, sizeof(ifr));
-
- ifr.ifr_mtu = mtu;
- strcpy(ifr.ifr_name, ifname);
-
- if (ioctl(fd, SIOCSIFMTU, &ifr))
- error(1, errno, "ioctl set mtu");
-}
-
-static void set_device_mtu(int fd, int mtu)
-{
- int val;
-
- val = get_device_mtu(fd, cfg_ifname);
- fprintf(stderr, "device mtu (orig): %u\n", val);
-
- __set_device_mtu(fd, cfg_ifname, mtu);
- val = get_device_mtu(fd, cfg_ifname);
- if (val != mtu)
- error(1, 0, "unable to set device mtu to %u\n", val);
-
- fprintf(stderr, "device mtu (test): %u\n", val);
-}
-
static void set_pmtu_discover(int fd, bool is_ipv4)
{
int level, name, val;
@@ -354,81 +316,6 @@ static unsigned int get_path_mtu(int fd, bool is_ipv4)
return mtu;
}
-/* very wordy version of system("ip route add dev lo mtu 1500 127.0.0.3/32") */
-static void set_route_mtu(int mtu, bool is_ipv4)
-{
- struct sockaddr_nl nladdr = { .nl_family = AF_NETLINK };
- struct nlmsghdr *nh;
- struct rtattr *rta;
- struct rtmsg *rt;
- char data[NLMSG_ALIGN(sizeof(*nh)) +
- NLMSG_ALIGN(sizeof(*rt)) +
- NLMSG_ALIGN(RTA_LENGTH(sizeof(addr6))) +
- NLMSG_ALIGN(RTA_LENGTH(sizeof(int))) +
- NLMSG_ALIGN(RTA_LENGTH(0) + RTA_LENGTH(sizeof(int)))];
- int fd, ret, alen, off = 0;
-
- alen = is_ipv4 ? sizeof(addr4) : sizeof(addr6);
-
- fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
- if (fd == -1)
- error(1, errno, "socket netlink");
-
- memset(data, 0, sizeof(data));
-
- nh = (void *)data;
- nh->nlmsg_type = RTM_NEWROUTE;
- nh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE;
- off += NLMSG_ALIGN(sizeof(*nh));
-
- rt = (void *)(data + off);
- rt->rtm_family = is_ipv4 ? AF_INET : AF_INET6;
- rt->rtm_table = RT_TABLE_MAIN;
- rt->rtm_dst_len = alen << 3;
- rt->rtm_protocol = RTPROT_BOOT;
- rt->rtm_scope = RT_SCOPE_UNIVERSE;
- rt->rtm_type = RTN_UNICAST;
- off += NLMSG_ALIGN(sizeof(*rt));
-
- rta = (void *)(data + off);
- rta->rta_type = RTA_DST;
- rta->rta_len = RTA_LENGTH(alen);
- if (is_ipv4)
- memcpy(RTA_DATA(rta), &addr4, alen);
- else
- memcpy(RTA_DATA(rta), &addr6, alen);
- off += NLMSG_ALIGN(rta->rta_len);
-
- rta = (void *)(data + off);
- rta->rta_type = RTA_OIF;
- rta->rta_len = RTA_LENGTH(sizeof(int));
- *((int *)(RTA_DATA(rta))) = 1; //if_nametoindex("lo");
- off += NLMSG_ALIGN(rta->rta_len);
-
- /* MTU is a subtype in a metrics type */
- rta = (void *)(data + off);
- rta->rta_type = RTA_METRICS;
- rta->rta_len = RTA_LENGTH(0) + RTA_LENGTH(sizeof(int));
- off += NLMSG_ALIGN(rta->rta_len);
-
- /* now fill MTU subtype. Note that it fits within above rta_len */
- rta = (void *)(((char *) rta) + RTA_LENGTH(0));
- rta->rta_type = RTAX_MTU;
- rta->rta_len = RTA_LENGTH(sizeof(int));
- *((int *)(RTA_DATA(rta))) = mtu;
-
- nh->nlmsg_len = off;
-
- ret = sendto(fd, data, off, 0, (void *)&nladdr, sizeof(nladdr));
- if (ret != off)
- error(1, errno, "send netlink: %uB != %uB\n", ret, off);
-
- if (close(fd))
- error(1, errno, "close netlink");
-
- fprintf(stderr, "route mtu (test): %u\n", mtu);
-}
-
static bool __send_one(int fd, struct msghdr *msg, int flags)
{
int ret;
@@ -591,15 +478,10 @@ static void run_test(struct sockaddr *addr, socklen_t alen)
/* Do not fragment these datagrams: only succeed if GSO works */
set_pmtu_discover(fdt, addr->sa_family == AF_INET);
- if (cfg_do_connectionless) {
- set_device_mtu(fdt, CONST_MTU_TEST);
+ if (cfg_do_connectionless)
run_all(fdt, fdr, addr, alen);
- }
if (cfg_do_connected) {
- set_device_mtu(fdt, CONST_MTU_TEST + 100);
- set_route_mtu(CONST_MTU_TEST, addr->sa_family == AF_INET);
-
if (connect(fdt, addr, alen))
error(1, errno, "connect");
diff --git a/tools/testing/selftests/net/udpgso.sh b/tools/testing/selftests/net/udpgso.sh
index fec24f584fe9..6c63178086b0 100755
--- a/tools/testing/selftests/net/udpgso.sh
+++ b/tools/testing/selftests/net/udpgso.sh
@@ -3,27 +3,56 @@
#
# Run a series of udpgso regression tests
+set -o errexit
+set -o nounset
+
+setup_loopback() {
+ ip addr add dev lo 10.0.0.1/32
+ ip addr add dev lo fd00::1/128 nodad noprefixroute
+}
+
+test_dev_mtu() {
+ setup_loopback
+ # Reduce loopback MTU
+ ip link set dev lo mtu 1500
+}
+
+test_route_mtu() {
+ setup_loopback
+ # Remove default local routes
+ ip route del local 10.0.0.1/32 table local dev lo
+ ip route del local fd00::1/128 table local dev lo
+ # Install local routes with reduced MTU
+ ip route add local 10.0.0.1/32 table local dev lo mtu 1500
+ ip route add local fd00::1/128 table local dev lo mtu 1500
+}
+
+if [ "$#" -gt 0 ]; then
+ "$1"
+ shift 2 # pop "test_*" arg and "--" delimiter
+ exec "$@"
+fi
+
echo "ipv4 cmsg"
-./in_netns.sh ./udpgso -4 -C
+./in_netns.sh "$0" test_dev_mtu -- ./udpgso -4 -C
echo "ipv4 setsockopt"
-./in_netns.sh ./udpgso -4 -C -s
+./in_netns.sh "$0" test_dev_mtu -- ./udpgso -4 -C -s
echo "ipv6 cmsg"
-./in_netns.sh ./udpgso -6 -C
+./in_netns.sh "$0" test_dev_mtu -- ./udpgso -6 -C
echo "ipv6 setsockopt"
-./in_netns.sh ./udpgso -6 -C -s
+./in_netns.sh "$0" test_dev_mtu -- ./udpgso -6 -C -s
echo "ipv4 connected"
-./in_netns.sh ./udpgso -4 -c
+./in_netns.sh "$0" test_route_mtu -- ./udpgso -4 -c
-# blocked on 2nd loopback address
-# echo "ipv6 connected"
-# ./in_netns.sh ./udpgso -6 -c
+echo "ipv6 connected"
+./in_netns.sh "$0" test_route_mtu -- ./udpgso -6 -c
echo "ipv4 msg_more"
-./in_netns.sh ./udpgso -4 -C -m
+./in_netns.sh "$0" test_dev_mtu -- ./udpgso -4 -C -m
echo "ipv6 msg_more"
-./in_netns.sh ./udpgso -6 -C -m
+./in_netns.sh "$0" test_dev_mtu -- ./udpgso -6 -C -m
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index cacf6507f690..783ebce8c4de 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1576,7 +1576,7 @@ void start_tracer(struct __test_metadata *_metadata, int fd, pid_t tracee,
ASSERT_EQ(0, ret);
}
/* Directly report the status of our test harness results. */
- syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
+ syscall(__NR_exit, _metadata->exit_code);
}
/* Common tracer setup/teardown functions. */
@@ -1623,7 +1623,7 @@ void teardown_trace_fixture(struct __test_metadata *_metadata,
ASSERT_EQ(0, kill(tracer, SIGUSR1));
ASSERT_EQ(tracer, waitpid(tracer, &status, 0));
if (WEXITSTATUS(status))
- _metadata->passed = 0;
+ _metadata->exit_code = KSFT_FAIL;
}
}
@@ -3088,8 +3088,7 @@ TEST(syscall_restart)
}
/* Directly report the status of our test harness results. */
- syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS
- : EXIT_FAILURE);
+ syscall(__NR_exit, _metadata->exit_code);
}
EXPECT_EQ(0, close(pipefd[0]));
@@ -3174,7 +3173,7 @@ TEST(syscall_restart)
ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
if (WIFSIGNALED(status) || WEXITSTATUS(status))
- _metadata->passed = 0;
+ _metadata->exit_code = KSFT_FAIL;
}
TEST_SIGNAL(filter_flag_log, SIGSYS)
diff --git a/tools/testing/selftests/tc-testing/config b/tools/testing/selftests/tc-testing/config
index c60acba951c2..db176fe7d0c3 100644
--- a/tools/testing/selftests/tc-testing/config
+++ b/tools/testing/selftests/tc-testing/config
@@ -8,6 +8,7 @@ CONFIG_VETH=y
#
# Core Netfilter Configuration
#
+CONFIG_NETFILTER=y
CONFIG_NETFILTER_ADVANCED=y
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_MARK=y
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
index b53d12909962..b73bd255ea36 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
@@ -649,5 +649,408 @@
"teardown": [
"$TC actions flush action mirred"
]
+ },
+ {
+ "id": "456d",
+ "name": "Add mirred mirror to egress block action",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "dependsOn": "$TC actions add action mirred help 2>&1 | grep -q blockid",
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ],
+ [
+ "$TC qdisc add dev $DEV1 egress_block 21 clsact",
+ 0
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mirred egress mirror index 1 blockid 21",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -j actions get action mirred index 1",
+ "matchJSON": [
+ {
+ "total acts": 0
+ },
+ {
+ "actions": [
+ {
+ "order": 1,
+ "kind": "mirred",
+ "mirred_action": "mirror",
+ "direction": "egress",
+ "to_blockid": 21,
+ "control_action": {
+ "type": "pipe"
+ },
+ "index": 1,
+ "ref": 1,
+ "bind": 0,
+ "not_in_hw": true
+ }
+ ]
+ }
+ ],
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 egress_block 21 clsact",
+ "$TC actions flush action mirred"
+ ]
+ },
+ {
+ "id": "2358",
+ "name": "Add mirred mirror to ingress block action",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "dependsOn": "$TC actions add action mirred help 2>&1 | grep -q blockid",
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ],
+ [
+ "$TC qdisc add dev $DEV1 ingress_block 21 clsact",
+ 0
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mirred ingress mirror index 1 blockid 21",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -j actions get action mirred index 1",
+ "matchJSON": [
+ {
+ "total acts": 0
+ },
+ {
+ "actions": [
+ {
+ "order": 1,
+ "kind": "mirred",
+ "mirred_action": "mirror",
+ "direction": "ingress",
+ "to_blockid": 21,
+ "control_action": {
+ "type": "pipe"
+ },
+ "index": 1,
+ "ref": 1,
+ "bind": 0,
+ "not_in_hw": true
+ }
+ ]
+ }
+ ],
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress_block 21 clsact",
+ "$TC actions flush action mirred"
+ ]
+ },
+ {
+ "id": "fdb1",
+ "name": "Add mirred redirect to egress block action",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "dependsOn": "$TC actions add action mirred help 2>&1 | grep -q blockid",
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ],
+ [
+ "$TC qdisc add dev $DEV1 ingress_block 21 clsact",
+ 0
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mirred egress redirect index 1 blockid 21",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -j actions get action mirred index 1",
+ "matchJSON": [
+ {
+ "total acts": 0
+ },
+ {
+ "actions": [
+ {
+ "order": 1,
+ "kind": "mirred",
+ "mirred_action": "redirect",
+ "direction": "egress",
+ "to_blockid": 21,
+ "control_action": {
+ "type": "stolen"
+ },
+ "index": 1,
+ "ref": 1,
+ "bind": 0,
+ "not_in_hw": true
+ }
+ ]
+ }
+ ],
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress_block 21 clsact",
+ "$TC actions flush action mirred"
+ ]
+ },
+ {
+ "id": "20cc",
+ "name": "Add mirred redirect to ingress block action",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "dependsOn": "$TC actions add action mirred help 2>&1 | grep -q blockid",
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ],
+ [
+ "$TC qdisc add dev $DEV1 ingress_block 21 clsact",
+ 0
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mirred ingress redirect index 1 blockid 21",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -j actions get action mirred index 1",
+ "matchJSON": [
+ {
+ "total acts": 0
+ },
+ {
+ "actions": [
+ {
+ "order": 1,
+ "kind": "mirred",
+ "mirred_action": "redirect",
+ "direction": "ingress",
+ "to_blockid": 21,
+ "control_action": {
+ "type": "stolen"
+ },
+ "index": 1,
+ "ref": 1,
+ "bind": 0,
+ "not_in_hw": true
+ }
+ ]
+ }
+ ],
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress_block 21 clsact",
+ "$TC actions flush action mirred"
+ ]
+ },
+ {
+ "id": "e739",
+ "name": "Try to add mirred action with both dev and block",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "dependsOn": "$TC actions add action mirred help 2>&1 | grep -q blockid",
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ],
+ [
+ "$TC qdisc add dev $DEV1 ingress_block 21 clsact",
+ 0
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mirred ingress redirect index 1 blockid 21 dev $DEV1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC -j actions list action mirred",
+ "matchJSON": [],
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress_block 21 clsact",
+ "$TC actions flush action mirred"
+ ]
+ },
+ {
+ "id": "2f47",
+ "name": "Try to add mirred action without specifying neither dev nor block",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ],
+ [
+ "$TC qdisc add dev $DEV1 ingress_block 21 clsact",
+ 0
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mirred ingress redirect index 1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC -j actions list action mirred",
+ "matchJSON": [],
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress_block 21 clsact",
+ "$TC actions flush action mirred"
+ ]
+ },
+ {
+ "id": "3188",
+ "name": "Replace mirred redirect to dev action with redirect to block",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "dependsOn": "$TC actions add action mirred help 2>&1 | grep -q blockid",
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ],
+ [
+ "$TC qdisc add dev $DEV1 ingress_block 21 clsact",
+ 0
+ ],
+ [
+ "$TC actions add action mirred ingress redirect index 1 dev $DEV1",
+ 0
+ ]
+ ],
+ "cmdUnderTest": "$TC actions replace action mirred egress redirect index 1 blockid 21",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -j actions get action mirred index 1",
+ "matchJSON": [
+ {
+ "total acts": 0
+ },
+ {
+ "actions": [
+ {
+ "order": 1,
+ "kind": "mirred",
+ "mirred_action": "redirect",
+ "direction": "egress",
+ "to_blockid": 21,
+ "control_action": {
+ "type": "stolen"
+ },
+ "index": 1,
+ "ref": 1,
+ "bind": 0,
+ "not_in_hw": true
+ }
+ ]
+ }
+ ],
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress_block 21 clsact",
+ "$TC actions flush action mirred"
+ ]
+ },
+ {
+ "id": "83cc",
+ "name": "Replace mirred redirect to block action with mirror to dev",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "dependsOn": "$TC actions add action mirred help 2>&1 | grep -q blockid",
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ],
+ [
+ "$TC qdisc add dev $DEV1 ingress_block 21 clsact",
+ 0
+ ],
+ [
+ "$TC actions add action mirred egress redirect index 1 blockid 21",
+ 0
+ ]
+ ],
+ "cmdUnderTest": "$TC actions replace action mirred ingress mirror index 1 dev lo",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -j actions get action mirred index 1",
+ "matchJSON": [
+ {
+ "total acts": 0
+ },
+ {
+ "actions": [
+ {
+ "order": 1,
+ "kind": "mirred",
+ "mirred_action": "mirror",
+ "direction": "ingress",
+ "to_dev": "lo",
+ "control_action": {
+ "type": "pipe"
+ },
+ "index": 1,
+ "ref": 1,
+ "bind": 0,
+ "not_in_hw": true
+ }
+ ]
+ }
+ ],
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress_block 21 clsact",
+ "$TC actions flush action mirred"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json
index be293e7c6d18..3a537b2ec4c9 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json
@@ -77,7 +77,7 @@
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq quantum 9000",
"expExitCode": "0",
"verifyCmd": "$TC qdisc show dev $DUMMY",
- "matchPattern": "qdisc fq 1: root refcnt [0-9]+ limit 10000p flow_limit 100p buckets.*orphan_mask 1023 quantum 9000b",
+ "matchPattern": "qdisc fq 1: root refcnt [0-9]+ limit 10000p flow_limit 100p.*quantum 9000b",
"matchCount": "1",
"teardown": [
"$TC qdisc del dev $DUMMY handle 1: root"
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
index 2d603ef2e375..12da0a939e3e 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
@@ -167,6 +167,7 @@
"plugins": {
"requires": "nsPlugin"
},
+ "dependsOn": "echo '' | jq",
"setup": [
"echo \"1 1 8\" > /sys/bus/netdevsim/new_device",
"$TC qdisc replace dev $ETH handle 8001: parent root stab overhead 24 taprio num_tc 8 map 0 1 2 3 4 5 6 7 queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 base-time 0 sched-entry S ff 20000000 clockid CLOCK_TAI",
@@ -192,6 +193,7 @@
"plugins": {
"requires": "nsPlugin"
},
+ "dependsOn": "echo '' | jq",
"setup": [
"echo \"1 1 8\" > /sys/bus/netdevsim/new_device",
"$TC qdisc replace dev $ETH handle 8001: parent root stab overhead 24 taprio num_tc 8 map 0 1 2 3 4 5 6 7 queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 base-time 0 sched-entry S ff 20000000 flags 0x2",
diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
index caeacc691587..ee349187636f 100755
--- a/tools/testing/selftests/tc-testing/tdc.py
+++ b/tools/testing/selftests/tc-testing/tdc.py
@@ -541,7 +541,7 @@ def test_runner(pm, args, filtered_tests):
message = pmtf.message
output = pmtf.output
res = TestResult(tidx['id'], tidx['name'])
- res.set_result(ResultState.skip)
+ res.set_result(ResultState.fail)
res.set_errormsg(pmtf.message)
res.set_failmsg(pmtf.output)
tsr.add_resultdata(res)
diff --git a/tools/testing/selftests/tc-testing/tdc.sh b/tools/testing/selftests/tc-testing/tdc.sh
index c53ede8b730d..cddff1772e10 100755
--- a/tools/testing/selftests/tc-testing/tdc.sh
+++ b/tools/testing/selftests/tc-testing/tdc.sh
@@ -63,5 +63,4 @@ try_modprobe sch_hfsc
try_modprobe sch_hhf
try_modprobe sch_htb
try_modprobe sch_teql
-./tdc.py -J`nproc` -c actions
-./tdc.py -J`nproc` -c qdisc
+./tdc.py -J`nproc`
diff --git a/tools/testing/vsock/util.c b/tools/testing/vsock/util.c
index ae2b33c21c45..554b290fefdc 100644
--- a/tools/testing/vsock/util.c
+++ b/tools/testing/vsock/util.c
@@ -33,8 +33,7 @@ void init_signals(void)
signal(SIGPIPE, SIG_IGN);
}
-/* Parse a CID in string representation */
-unsigned int parse_cid(const char *str)
+static unsigned int parse_uint(const char *str, const char *err_str)
{
char *endptr = NULL;
unsigned long n;
@@ -42,12 +41,24 @@ unsigned int parse_cid(const char *str)
errno = 0;
n = strtoul(str, &endptr, 10);
if (errno || *endptr != '\0') {
- fprintf(stderr, "malformed CID \"%s\"\n", str);
+ fprintf(stderr, "malformed %s \"%s\"\n", err_str, str);
exit(EXIT_FAILURE);
}
return n;
}
+/* Parse a CID in string representation */
+unsigned int parse_cid(const char *str)
+{
+ return parse_uint(str, "CID");
+}
+
+/* Parse a port in string representation */
+unsigned int parse_port(const char *str)
+{
+ return parse_uint(str, "port");
+}
+
/* Wait for the remote to close the connection */
void vsock_wait_remote_close(int fd)
{
diff --git a/tools/testing/vsock/util.h b/tools/testing/vsock/util.h
index 03c88d0cb861..e95e62485959 100644
--- a/tools/testing/vsock/util.h
+++ b/tools/testing/vsock/util.h
@@ -12,10 +12,13 @@ enum test_mode {
TEST_MODE_SERVER
};
+#define DEFAULT_PEER_PORT 1234
+
/* Test runner options */
struct test_opts {
enum test_mode mode;
unsigned int peer_cid;
+ unsigned int peer_port;
};
/* A test case definition. Test functions must print failures to stderr and
@@ -35,6 +38,7 @@ struct test_case {
void init_signals(void);
unsigned int parse_cid(const char *str);
+unsigned int parse_port(const char *str);
int vsock_stream_connect(unsigned int cid, unsigned int port);
int vsock_bind_connect(unsigned int cid, unsigned int port,
unsigned int bind_port, int type);
diff --git a/tools/testing/vsock/vsock_diag_test.c b/tools/testing/vsock/vsock_diag_test.c
index fa927ad16f8a..081e045f4696 100644
--- a/tools/testing/vsock/vsock_diag_test.c
+++ b/tools/testing/vsock/vsock_diag_test.c
@@ -39,6 +39,8 @@ static const char *sock_type_str(int type)
return "DGRAM";
case SOCK_STREAM:
return "STREAM";
+ case SOCK_SEQPACKET:
+ return "SEQPACKET";
default:
return "INVALID TYPE";
}
@@ -342,7 +344,7 @@ static void test_listen_socket_server(const struct test_opts *opts)
} addr = {
.svm = {
.svm_family = AF_VSOCK,
- .svm_port = 1234,
+ .svm_port = opts->peer_port,
.svm_cid = VMADDR_CID_ANY,
},
};
@@ -378,7 +380,7 @@ static void test_connect_client(const struct test_opts *opts)
LIST_HEAD(sockets);
struct vsock_stat *st;
- fd = vsock_stream_connect(opts->peer_cid, 1234);
+ fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
if (fd < 0) {
perror("connect");
exit(EXIT_FAILURE);
@@ -403,7 +405,7 @@ static void test_connect_server(const struct test_opts *opts)
LIST_HEAD(sockets);
int client_fd;
- client_fd = vsock_stream_accept(VMADDR_CID_ANY, 1234, NULL);
+ client_fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
if (client_fd < 0) {
perror("accept");
exit(EXIT_FAILURE);
@@ -462,6 +464,11 @@ static const struct option longopts[] = {
.val = 'p',
},
{
+ .name = "peer-port",
+ .has_arg = required_argument,
+ .val = 'q',
+ },
+ {
.name = "list",
.has_arg = no_argument,
.val = 'l',
@@ -481,7 +488,7 @@ static const struct option longopts[] = {
static void usage(void)
{
- fprintf(stderr, "Usage: vsock_diag_test [--help] [--control-host=<host>] --control-port=<port> --mode=client|server --peer-cid=<cid> [--list] [--skip=<test_id>]\n"
+ fprintf(stderr, "Usage: vsock_diag_test [--help] [--control-host=<host>] --control-port=<port> --mode=client|server --peer-cid=<cid> [--peer-port=<port>] [--list] [--skip=<test_id>]\n"
"\n"
" Server: vsock_diag_test --control-port=1234 --mode=server --peer-cid=3\n"
" Client: vsock_diag_test --control-host=192.168.0.1 --control-port=1234 --mode=client --peer-cid=2\n"
@@ -503,9 +510,11 @@ static void usage(void)
" --control-port <port> Server port to listen on/connect to\n"
" --mode client|server Server or client mode\n"
" --peer-cid <cid> CID of the other side\n"
+ " --peer-port <port> AF_VSOCK port used for the test [default: %d]\n"
" --list List of tests that will be executed\n"
" --skip <test_id> Test ID to skip;\n"
- " use multiple --skip options to skip more tests\n"
+ " use multiple --skip options to skip more tests\n",
+ DEFAULT_PEER_PORT
);
exit(EXIT_FAILURE);
}
@@ -517,6 +526,7 @@ int main(int argc, char **argv)
struct test_opts opts = {
.mode = TEST_MODE_UNSET,
.peer_cid = VMADDR_CID_ANY,
+ .peer_port = DEFAULT_PEER_PORT,
};
init_signals();
@@ -544,6 +554,9 @@ int main(int argc, char **argv)
case 'p':
opts.peer_cid = parse_cid(optarg);
break;
+ case 'q':
+ opts.peer_port = parse_port(optarg);
+ break;
case 'P':
control_port = optarg;
break;
diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c
index 66246d81d654..f851f8961247 100644
--- a/tools/testing/vsock/vsock_test.c
+++ b/tools/testing/vsock/vsock_test.c
@@ -34,7 +34,7 @@ static void test_stream_connection_reset(const struct test_opts *opts)
} addr = {
.svm = {
.svm_family = AF_VSOCK,
- .svm_port = 1234,
+ .svm_port = opts->peer_port,
.svm_cid = opts->peer_cid,
},
};
@@ -70,7 +70,7 @@ static void test_stream_bind_only_client(const struct test_opts *opts)
} addr = {
.svm = {
.svm_family = AF_VSOCK,
- .svm_port = 1234,
+ .svm_port = opts->peer_port,
.svm_cid = opts->peer_cid,
},
};
@@ -112,7 +112,7 @@ static void test_stream_bind_only_server(const struct test_opts *opts)
} addr = {
.svm = {
.svm_family = AF_VSOCK,
- .svm_port = 1234,
+ .svm_port = opts->peer_port,
.svm_cid = VMADDR_CID_ANY,
},
};
@@ -138,7 +138,7 @@ static void test_stream_client_close_client(const struct test_opts *opts)
{
int fd;
- fd = vsock_stream_connect(opts->peer_cid, 1234);
+ fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
if (fd < 0) {
perror("connect");
exit(EXIT_FAILURE);
@@ -152,7 +152,7 @@ static void test_stream_client_close_server(const struct test_opts *opts)
{
int fd;
- fd = vsock_stream_accept(VMADDR_CID_ANY, 1234, NULL);
+ fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
if (fd < 0) {
perror("accept");
exit(EXIT_FAILURE);
@@ -173,7 +173,7 @@ static void test_stream_server_close_client(const struct test_opts *opts)
{
int fd;
- fd = vsock_stream_connect(opts->peer_cid, 1234);
+ fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
if (fd < 0) {
perror("connect");
exit(EXIT_FAILURE);
@@ -194,7 +194,7 @@ static void test_stream_server_close_server(const struct test_opts *opts)
{
int fd;
- fd = vsock_stream_accept(VMADDR_CID_ANY, 1234, NULL);
+ fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
if (fd < 0) {
perror("accept");
exit(EXIT_FAILURE);
@@ -215,7 +215,7 @@ static void test_stream_multiconn_client(const struct test_opts *opts)
int i;
for (i = 0; i < MULTICONN_NFDS; i++) {
- fds[i] = vsock_stream_connect(opts->peer_cid, 1234);
+ fds[i] = vsock_stream_connect(opts->peer_cid, opts->peer_port);
if (fds[i] < 0) {
perror("connect");
exit(EXIT_FAILURE);
@@ -239,7 +239,7 @@ static void test_stream_multiconn_server(const struct test_opts *opts)
int i;
for (i = 0; i < MULTICONN_NFDS; i++) {
- fds[i] = vsock_stream_accept(VMADDR_CID_ANY, 1234, NULL);
+ fds[i] = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
if (fds[i] < 0) {
perror("accept");
exit(EXIT_FAILURE);
@@ -267,9 +267,9 @@ static void test_msg_peek_client(const struct test_opts *opts,
int i;
if (seqpacket)
- fd = vsock_seqpacket_connect(opts->peer_cid, 1234);
+ fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port);
else
- fd = vsock_stream_connect(opts->peer_cid, 1234);
+ fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
if (fd < 0) {
perror("connect");
@@ -295,9 +295,9 @@ static void test_msg_peek_server(const struct test_opts *opts,
int fd;
if (seqpacket)
- fd = vsock_seqpacket_accept(VMADDR_CID_ANY, 1234, NULL);
+ fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
else
- fd = vsock_stream_accept(VMADDR_CID_ANY, 1234, NULL);
+ fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
if (fd < 0) {
perror("accept");
@@ -363,7 +363,7 @@ static void test_seqpacket_msg_bounds_client(const struct test_opts *opts)
int msg_count;
int fd;
- fd = vsock_seqpacket_connect(opts->peer_cid, 1234);
+ fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port);
if (fd < 0) {
perror("connect");
exit(EXIT_FAILURE);
@@ -434,7 +434,7 @@ static void test_seqpacket_msg_bounds_server(const struct test_opts *opts)
struct msghdr msg = {0};
struct iovec iov = {0};
- fd = vsock_seqpacket_accept(VMADDR_CID_ANY, 1234, NULL);
+ fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
if (fd < 0) {
perror("accept");
exit(EXIT_FAILURE);
@@ -505,7 +505,7 @@ static void test_seqpacket_msg_trunc_client(const struct test_opts *opts)
int fd;
char buf[MESSAGE_TRUNC_SZ];
- fd = vsock_seqpacket_connect(opts->peer_cid, 1234);
+ fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port);
if (fd < 0) {
perror("connect");
exit(EXIT_FAILURE);
@@ -524,7 +524,7 @@ static void test_seqpacket_msg_trunc_server(const struct test_opts *opts)
struct msghdr msg = {0};
struct iovec iov = {0};
- fd = vsock_seqpacket_accept(VMADDR_CID_ANY, 1234, NULL);
+ fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
if (fd < 0) {
perror("accept");
exit(EXIT_FAILURE);
@@ -575,7 +575,7 @@ static void test_seqpacket_timeout_client(const struct test_opts *opts)
time_t read_enter_ns;
time_t read_overhead_ns;
- fd = vsock_seqpacket_connect(opts->peer_cid, 1234);
+ fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port);
if (fd < 0) {
perror("connect");
exit(EXIT_FAILURE);
@@ -620,7 +620,7 @@ static void test_seqpacket_timeout_server(const struct test_opts *opts)
{
int fd;
- fd = vsock_seqpacket_accept(VMADDR_CID_ANY, 1234, NULL);
+ fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
if (fd < 0) {
perror("accept");
exit(EXIT_FAILURE);
@@ -639,7 +639,7 @@ static void test_seqpacket_bigmsg_client(const struct test_opts *opts)
len = sizeof(sock_buf_size);
- fd = vsock_seqpacket_connect(opts->peer_cid, 1234);
+ fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port);
if (fd < 0) {
perror("connect");
exit(EXIT_FAILURE);
@@ -671,7 +671,7 @@ static void test_seqpacket_bigmsg_server(const struct test_opts *opts)
{
int fd;
- fd = vsock_seqpacket_accept(VMADDR_CID_ANY, 1234, NULL);
+ fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
if (fd < 0) {
perror("accept");
exit(EXIT_FAILURE);
@@ -692,7 +692,7 @@ static void test_seqpacket_invalid_rec_buffer_client(const struct test_opts *opt
unsigned char *buf2;
int buf_size = getpagesize() * 3;
- fd = vsock_seqpacket_connect(opts->peer_cid, 1234);
+ fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port);
if (fd < 0) {
perror("connect");
exit(EXIT_FAILURE);
@@ -732,7 +732,7 @@ static void test_seqpacket_invalid_rec_buffer_server(const struct test_opts *opt
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
int i;
- fd = vsock_seqpacket_accept(VMADDR_CID_ANY, 1234, NULL);
+ fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
if (fd < 0) {
perror("accept");
exit(EXIT_FAILURE);
@@ -808,7 +808,7 @@ static void test_stream_poll_rcvlowat_server(const struct test_opts *opts)
int fd;
int i;
- fd = vsock_stream_accept(VMADDR_CID_ANY, 1234, NULL);
+ fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
if (fd < 0) {
perror("accept");
exit(EXIT_FAILURE);
@@ -839,7 +839,7 @@ static void test_stream_poll_rcvlowat_client(const struct test_opts *opts)
short poll_flags;
int fd;
- fd = vsock_stream_connect(opts->peer_cid, 1234);
+ fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
if (fd < 0) {
perror("connect");
exit(EXIT_FAILURE);
@@ -906,9 +906,9 @@ static void test_inv_buf_client(const struct test_opts *opts, bool stream)
int fd;
if (stream)
- fd = vsock_stream_connect(opts->peer_cid, 1234);
+ fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
else
- fd = vsock_seqpacket_connect(opts->peer_cid, 1234);
+ fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port);
if (fd < 0) {
perror("connect");
@@ -941,9 +941,9 @@ static void test_inv_buf_server(const struct test_opts *opts, bool stream)
int fd;
if (stream)
- fd = vsock_stream_accept(VMADDR_CID_ANY, 1234, NULL);
+ fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
else
- fd = vsock_seqpacket_accept(VMADDR_CID_ANY, 1234, NULL);
+ fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
if (fd < 0) {
perror("accept");
@@ -986,7 +986,7 @@ static void test_stream_virtio_skb_merge_client(const struct test_opts *opts)
{
int fd;
- fd = vsock_stream_connect(opts->peer_cid, 1234);
+ fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
if (fd < 0) {
perror("connect");
exit(EXIT_FAILURE);
@@ -1015,7 +1015,7 @@ static void test_stream_virtio_skb_merge_server(const struct test_opts *opts)
unsigned char buf[64];
int fd;
- fd = vsock_stream_accept(VMADDR_CID_ANY, 1234, NULL);
+ fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
if (fd < 0) {
perror("accept");
exit(EXIT_FAILURE);
@@ -1108,7 +1108,7 @@ static void test_stream_shutwr_client(const struct test_opts *opts)
sigaction(SIGPIPE, &act, NULL);
- fd = vsock_stream_connect(opts->peer_cid, 1234);
+ fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
if (fd < 0) {
perror("connect");
exit(EXIT_FAILURE);
@@ -1130,7 +1130,7 @@ static void test_stream_shutwr_server(const struct test_opts *opts)
{
int fd;
- fd = vsock_stream_accept(VMADDR_CID_ANY, 1234, NULL);
+ fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
if (fd < 0) {
perror("accept");
exit(EXIT_FAILURE);
@@ -1151,7 +1151,7 @@ static void test_stream_shutrd_client(const struct test_opts *opts)
sigaction(SIGPIPE, &act, NULL);
- fd = vsock_stream_connect(opts->peer_cid, 1234);
+ fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
if (fd < 0) {
perror("connect");
exit(EXIT_FAILURE);
@@ -1170,7 +1170,7 @@ static void test_stream_shutrd_server(const struct test_opts *opts)
{
int fd;
- fd = vsock_stream_accept(VMADDR_CID_ANY, 1234, NULL);
+ fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
if (fd < 0) {
perror("accept");
exit(EXIT_FAILURE);
@@ -1193,7 +1193,7 @@ static void test_double_bind_connect_server(const struct test_opts *opts)
struct sockaddr_vm sa_client;
socklen_t socklen_client = sizeof(sa_client);
- listen_fd = vsock_stream_listen(VMADDR_CID_ANY, 1234);
+ listen_fd = vsock_stream_listen(VMADDR_CID_ANY, opts->peer_port);
for (i = 0; i < 2; i++) {
control_writeln("LISTENING");
@@ -1226,7 +1226,13 @@ static void test_double_bind_connect_client(const struct test_opts *opts)
/* Wait until server is ready to accept a new connection */
control_expectln("LISTENING");
- client_fd = vsock_bind_connect(opts->peer_cid, 1234, 4321, SOCK_STREAM);
+ /* We use 'peer_port + 1' as "some" port for the 'bind()'
+ * call. It is safe for overflow, but must be considered,
+ * when running multiple test applications simultaneously
+ * where 'peer-port' argument differs by 1.
+ */
+ client_fd = vsock_bind_connect(opts->peer_cid, opts->peer_port,
+ opts->peer_port + 1, SOCK_STREAM);
close(client_fd);
}
@@ -1246,7 +1252,7 @@ static void test_stream_rcvlowat_def_cred_upd_client(const struct test_opts *opt
void *buf;
int fd;
- fd = vsock_stream_connect(opts->peer_cid, 1234);
+ fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
if (fd < 0) {
perror("connect");
exit(EXIT_FAILURE);
@@ -1282,7 +1288,7 @@ static void test_stream_credit_update_test(const struct test_opts *opts,
void *buf;
int fd;
- fd = vsock_stream_accept(VMADDR_CID_ANY, 1234, NULL);
+ fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
if (fd < 0) {
perror("accept");
exit(EXIT_FAILURE);
@@ -1543,6 +1549,11 @@ static const struct option longopts[] = {
.val = 'p',
},
{
+ .name = "peer-port",
+ .has_arg = required_argument,
+ .val = 'q',
+ },
+ {
.name = "list",
.has_arg = no_argument,
.val = 'l',
@@ -1562,7 +1573,7 @@ static const struct option longopts[] = {
static void usage(void)
{
- fprintf(stderr, "Usage: vsock_test [--help] [--control-host=<host>] --control-port=<port> --mode=client|server --peer-cid=<cid> [--list] [--skip=<test_id>]\n"
+ fprintf(stderr, "Usage: vsock_test [--help] [--control-host=<host>] --control-port=<port> --mode=client|server --peer-cid=<cid> [--peer-port=<port>] [--list] [--skip=<test_id>]\n"
"\n"
" Server: vsock_test --control-port=1234 --mode=server --peer-cid=3\n"
" Client: vsock_test --control-host=192.168.0.1 --control-port=1234 --mode=client --peer-cid=2\n"
@@ -1577,6 +1588,9 @@ static void usage(void)
"connect to.\n"
"\n"
"The CID of the other side must be given with --peer-cid=<cid>.\n"
+ "During the test, two AF_VSOCK ports will be used: the port\n"
+ "specified with --peer-port=<port> (or the default port)\n"
+ "and the next one.\n"
"\n"
"Options:\n"
" --help This help message\n"
@@ -1584,9 +1598,11 @@ static void usage(void)
" --control-port <port> Server port to listen on/connect to\n"
" --mode client|server Server or client mode\n"
" --peer-cid <cid> CID of the other side\n"
+ " --peer-port <port> AF_VSOCK port used for the test [default: %d]\n"
" --list List of tests that will be executed\n"
" --skip <test_id> Test ID to skip;\n"
- " use multiple --skip options to skip more tests\n"
+ " use multiple --skip options to skip more tests\n",
+ DEFAULT_PEER_PORT
);
exit(EXIT_FAILURE);
}
@@ -1598,6 +1614,7 @@ int main(int argc, char **argv)
struct test_opts opts = {
.mode = TEST_MODE_UNSET,
.peer_cid = VMADDR_CID_ANY,
+ .peer_port = DEFAULT_PEER_PORT,
};
srand(time(NULL));
@@ -1626,6 +1643,9 @@ int main(int argc, char **argv)
case 'p':
opts.peer_cid = parse_cid(optarg);
break;
+ case 'q':
+ opts.peer_port = parse_port(optarg);
+ break;
case 'P':
control_port = optarg;
break;
diff --git a/tools/testing/vsock/vsock_test_zerocopy.c b/tools/testing/vsock/vsock_test_zerocopy.c
index a16ff76484e6..04c376b6937f 100644
--- a/tools/testing/vsock/vsock_test_zerocopy.c
+++ b/tools/testing/vsock/vsock_test_zerocopy.c
@@ -152,9 +152,9 @@ static void test_client(const struct test_opts *opts,
int fd;
if (sock_seqpacket)
- fd = vsock_seqpacket_connect(opts->peer_cid, 1234);
+ fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port);
else
- fd = vsock_stream_connect(opts->peer_cid, 1234);
+ fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
if (fd < 0) {
perror("connect");
@@ -248,9 +248,9 @@ static void test_server(const struct test_opts *opts,
int fd;
if (sock_seqpacket)
- fd = vsock_seqpacket_accept(VMADDR_CID_ANY, 1234, NULL);
+ fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
else
- fd = vsock_stream_accept(VMADDR_CID_ANY, 1234, NULL);
+ fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
if (fd < 0) {
perror("accept");
@@ -323,7 +323,7 @@ void test_stream_msgzcopy_empty_errq_client(const struct test_opts *opts)
ssize_t res;
int fd;
- fd = vsock_stream_connect(opts->peer_cid, 1234);
+ fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
if (fd < 0) {
perror("connect");
exit(EXIT_FAILURE);
@@ -347,7 +347,7 @@ void test_stream_msgzcopy_empty_errq_server(const struct test_opts *opts)
{
int fd;
- fd = vsock_stream_accept(VMADDR_CID_ANY, 1234, NULL);
+ fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
if (fd < 0) {
perror("accept");
exit(EXIT_FAILURE);
diff --git a/tools/testing/vsock/vsock_uring_test.c b/tools/testing/vsock/vsock_uring_test.c
index d976d35f0ba9..6c3e6f70c457 100644
--- a/tools/testing/vsock/vsock_uring_test.c
+++ b/tools/testing/vsock/vsock_uring_test.c
@@ -66,7 +66,7 @@ static void vsock_io_uring_client(const struct test_opts *opts,
struct msghdr msg;
int fd;
- fd = vsock_stream_connect(opts->peer_cid, 1234);
+ fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
if (fd < 0) {
perror("connect");
exit(EXIT_FAILURE);
@@ -120,7 +120,7 @@ static void vsock_io_uring_server(const struct test_opts *opts,
void *data;
int fd;
- fd = vsock_stream_accept(VMADDR_CID_ANY, 1234, NULL);
+ fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
if (fd < 0) {
perror("accept");
exit(EXIT_FAILURE);
@@ -248,6 +248,11 @@ static const struct option longopts[] = {
.val = 'p',
},
{
+ .name = "peer-port",
+ .has_arg = required_argument,
+ .val = 'q',
+ },
+ {
.name = "help",
.has_arg = no_argument,
.val = '?',
@@ -257,7 +262,7 @@ static const struct option longopts[] = {
static void usage(void)
{
- fprintf(stderr, "Usage: vsock_uring_test [--help] [--control-host=<host>] --control-port=<port> --mode=client|server --peer-cid=<cid>\n"
+ fprintf(stderr, "Usage: vsock_uring_test [--help] [--control-host=<host>] --control-port=<port> --mode=client|server --peer-cid=<cid> [--peer-port=<port>]\n"
"\n"
" Server: vsock_uring_test --control-port=1234 --mode=server --peer-cid=3\n"
" Client: vsock_uring_test --control-host=192.168.0.1 --control-port=1234 --mode=client --peer-cid=2\n"
@@ -271,6 +276,8 @@ static void usage(void)
" --control-port <port> Server port to listen on/connect to\n"
" --mode client|server Server or client mode\n"
" --peer-cid <cid> CID of the other side\n"
+ " --peer-port <port> AF_VSOCK port used for the test [default: %d]\n",
+ DEFAULT_PEER_PORT
);
exit(EXIT_FAILURE);
}
@@ -282,6 +289,7 @@ int main(int argc, char **argv)
struct test_opts opts = {
.mode = TEST_MODE_UNSET,
.peer_cid = VMADDR_CID_ANY,
+ .peer_port = DEFAULT_PEER_PORT,
};
init_signals();
@@ -309,6 +317,9 @@ int main(int argc, char **argv)
case 'p':
opts.peer_cid = parse_cid(optarg);
break;
+ case 'q':
+ opts.peer_port = parse_port(optarg);
+ break;
case 'P':
control_port = optarg;
break;
diff --git a/tools/virtio/.gitignore b/tools/virtio/.gitignore
index 9934d48d9a55..7e47b281c442 100644
--- a/tools/virtio/.gitignore
+++ b/tools/virtio/.gitignore
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
*.d
virtio_test
+vhost_net_test
vringh_test
virtio-trace/trace-agent
diff --git a/tools/virtio/Makefile b/tools/virtio/Makefile
index d128925980e0..e25e99c1c3b7 100644
--- a/tools/virtio/Makefile
+++ b/tools/virtio/Makefile
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
all: test mod
-test: virtio_test vringh_test
+test: virtio_test vringh_test vhost_net_test
virtio_test: virtio_ring.o virtio_test.o
vringh_test: vringh_test.o vringh.o virtio_ring.o
+vhost_net_test: virtio_ring.o vhost_net_test.o
try-run = $(shell set -e; \
if ($(1)) >/dev/null 2>&1; \
@@ -49,6 +50,7 @@ oot-clean: OOT_BUILD+=clean
.PHONY: all test mod clean vhost oot oot-clean oot-build
clean:
- ${RM} *.o vringh_test virtio_test vhost_test/*.o vhost_test/.*.cmd \
- vhost_test/Module.symvers vhost_test/modules.order *.d
+ ${RM} *.o vringh_test virtio_test vhost_net_test vhost_test/*.o \
+ vhost_test/.*.cmd vhost_test/Module.symvers \
+ vhost_test/modules.order *.d
-include *.d
diff --git a/tools/virtio/linux/virtio_config.h b/tools/virtio/linux/virtio_config.h
index 2a8a70e2a950..42a564f22f2d 100644
--- a/tools/virtio/linux/virtio_config.h
+++ b/tools/virtio/linux/virtio_config.h
@@ -1,4 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LINUX_VIRTIO_CONFIG_H
+#define LINUX_VIRTIO_CONFIG_H
#include <linux/virtio_byteorder.h>
#include <linux/virtio.h>
#include <uapi/linux/virtio_config.h>
@@ -95,3 +97,5 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
{
return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
}
+
+#endif
diff --git a/tools/virtio/vhost_net_test.c b/tools/virtio/vhost_net_test.c
new file mode 100644
index 000000000000..389d99a6d7c7
--- /dev/null
+++ b/tools/virtio/vhost_net_test.c
@@ -0,0 +1,532 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <getopt.h>
+#include <limits.h>
+#include <string.h>
+#include <poll.h>
+#include <sys/eventfd.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <linux/vhost.h>
+#include <linux/if.h>
+#include <linux/if_tun.h>
+#include <linux/in.h>
+#include <linux/if_packet.h>
+#include <linux/virtio_net.h>
+#include <netinet/ether.h>
+
+#define HDR_LEN sizeof(struct virtio_net_hdr_mrg_rxbuf)
+#define TEST_BUF_LEN 256
+#define TEST_PTYPE ETH_P_LOOPBACK
+#define DESC_NUM 256
+
+/* Used by implementation of kmalloc() in tools/virtio/linux/kernel.h */
+void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end;
+
+struct vq_info {
+ int kick;
+ int call;
+ int idx;
+ long started;
+ long completed;
+ struct pollfd fds;
+ void *ring;
+ /* copy used for control */
+ struct vring vring;
+ struct virtqueue *vq;
+};
+
+struct vdev_info {
+ struct virtio_device vdev;
+ int control;
+ struct vq_info vqs[2];
+ int nvqs;
+ void *buf;
+ size_t buf_size;
+ char *test_buf;
+ char *res_buf;
+ struct vhost_memory *mem;
+ int sock;
+ int ifindex;
+ unsigned char mac[ETHER_ADDR_LEN];
+};
+
+static int tun_alloc(struct vdev_info *dev, char *tun_name)
+{
+ struct ifreq ifr;
+ int len = HDR_LEN;
+ int fd, e;
+
+ fd = open("/dev/net/tun", O_RDWR);
+ if (fd < 0) {
+ perror("Cannot open /dev/net/tun");
+ return fd;
+ }
+
+ memset(&ifr, 0, sizeof(ifr));
+
+ ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR;
+ strncpy(ifr.ifr_name, tun_name, IFNAMSIZ);
+
+ e = ioctl(fd, TUNSETIFF, &ifr);
+ if (e < 0) {
+ perror("ioctl[TUNSETIFF]");
+ close(fd);
+ return e;
+ }
+
+ e = ioctl(fd, TUNSETVNETHDRSZ, &len);
+ if (e < 0) {
+ perror("ioctl[TUNSETVNETHDRSZ]");
+ close(fd);
+ return e;
+ }
+
+ e = ioctl(fd, SIOCGIFHWADDR, &ifr);
+ if (e < 0) {
+ perror("ioctl[SIOCGIFHWADDR]");
+ close(fd);
+ return e;
+ }
+
+ memcpy(dev->mac, &ifr.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
+ return fd;
+}
+
+static void vdev_create_socket(struct vdev_info *dev, char *tun_name)
+{
+ struct ifreq ifr;
+
+ dev->sock = socket(AF_PACKET, SOCK_RAW, htons(TEST_PTYPE));
+ assert(dev->sock != -1);
+
+ strncpy(ifr.ifr_name, tun_name, IFNAMSIZ);
+ assert(ioctl(dev->sock, SIOCGIFINDEX, &ifr) >= 0);
+
+ dev->ifindex = ifr.ifr_ifindex;
+
+ /* Set the flags that bring the device up */
+ assert(ioctl(dev->sock, SIOCGIFFLAGS, &ifr) >= 0);
+ ifr.ifr_flags |= (IFF_UP | IFF_RUNNING);
+ assert(ioctl(dev->sock, SIOCSIFFLAGS, &ifr) >= 0);
+}
+
+static void vdev_send_packet(struct vdev_info *dev)
+{
+ char *sendbuf = dev->test_buf + HDR_LEN;
+ struct sockaddr_ll saddrll = {0};
+ int sockfd = dev->sock;
+ int ret;
+
+ saddrll.sll_family = PF_PACKET;
+ saddrll.sll_ifindex = dev->ifindex;
+ saddrll.sll_halen = ETH_ALEN;
+ saddrll.sll_protocol = htons(TEST_PTYPE);
+
+ ret = sendto(sockfd, sendbuf, TEST_BUF_LEN, 0,
+ (struct sockaddr *)&saddrll,
+ sizeof(struct sockaddr_ll));
+ assert(ret >= 0);
+}
+
+static bool vq_notify(struct virtqueue *vq)
+{
+ struct vq_info *info = vq->priv;
+ unsigned long long v = 1;
+ int r;
+
+ r = write(info->kick, &v, sizeof(v));
+ assert(r == sizeof(v));
+
+ return true;
+}
+
+static void vhost_vq_setup(struct vdev_info *dev, struct vq_info *info)
+{
+ struct vhost_vring_addr addr = {
+ .index = info->idx,
+ .desc_user_addr = (uint64_t)(unsigned long)info->vring.desc,
+ .avail_user_addr = (uint64_t)(unsigned long)info->vring.avail,
+ .used_user_addr = (uint64_t)(unsigned long)info->vring.used,
+ };
+ struct vhost_vring_state state = { .index = info->idx };
+ struct vhost_vring_file file = { .index = info->idx };
+ int r;
+
+ state.num = info->vring.num;
+ r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state);
+ assert(r >= 0);
+
+ state.num = 0;
+ r = ioctl(dev->control, VHOST_SET_VRING_BASE, &state);
+ assert(r >= 0);
+
+ r = ioctl(dev->control, VHOST_SET_VRING_ADDR, &addr);
+ assert(r >= 0);
+
+ file.fd = info->kick;
+ r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
+ assert(r >= 0);
+}
+
+static void vq_reset(struct vq_info *info, int num, struct virtio_device *vdev)
+{
+ if (info->vq)
+ vring_del_virtqueue(info->vq);
+
+ memset(info->ring, 0, vring_size(num, 4096));
+ vring_init(&info->vring, num, info->ring, 4096);
+ info->vq = vring_new_virtqueue(info->idx, num, 4096, vdev, true, false,
+ info->ring, vq_notify, NULL, "test");
+ assert(info->vq);
+ info->vq->priv = info;
+}
+
+static void vq_info_add(struct vdev_info *dev, int idx, int num, int fd)
+{
+ struct vhost_vring_file backend = { .index = idx, .fd = fd };
+ struct vq_info *info = &dev->vqs[idx];
+ int r;
+
+ info->idx = idx;
+ info->kick = eventfd(0, EFD_NONBLOCK);
+ r = posix_memalign(&info->ring, 4096, vring_size(num, 4096));
+ assert(r >= 0);
+ vq_reset(info, num, &dev->vdev);
+ vhost_vq_setup(dev, info);
+
+ r = ioctl(dev->control, VHOST_NET_SET_BACKEND, &backend);
+ assert(!r);
+}
+
+static void vdev_info_init(struct vdev_info *dev, unsigned long long features)
+{
+ struct ether_header *eh;
+ int i, r;
+
+ dev->vdev.features = features;
+ INIT_LIST_HEAD(&dev->vdev.vqs);
+ spin_lock_init(&dev->vdev.vqs_list_lock);
+
+ dev->buf_size = (HDR_LEN + TEST_BUF_LEN) * 2;
+ dev->buf = malloc(dev->buf_size);
+ assert(dev->buf);
+ dev->test_buf = dev->buf;
+ dev->res_buf = dev->test_buf + HDR_LEN + TEST_BUF_LEN;
+
+ memset(dev->test_buf, 0, HDR_LEN + TEST_BUF_LEN);
+ eh = (struct ether_header *)(dev->test_buf + HDR_LEN);
+ eh->ether_type = htons(TEST_PTYPE);
+ memcpy(eh->ether_dhost, dev->mac, ETHER_ADDR_LEN);
+ memcpy(eh->ether_shost, dev->mac, ETHER_ADDR_LEN);
+
+ for (i = sizeof(*eh); i < TEST_BUF_LEN; i++)
+ dev->test_buf[i + HDR_LEN] = (char)i;
+
+ dev->control = open("/dev/vhost-net", O_RDWR);
+ assert(dev->control >= 0);
+
+ r = ioctl(dev->control, VHOST_SET_OWNER, NULL);
+ assert(r >= 0);
+
+ dev->mem = malloc(offsetof(struct vhost_memory, regions) +
+ sizeof(dev->mem->regions[0]));
+ assert(dev->mem);
+ memset(dev->mem, 0, offsetof(struct vhost_memory, regions) +
+ sizeof(dev->mem->regions[0]));
+ dev->mem->nregions = 1;
+ dev->mem->regions[0].guest_phys_addr = (long)dev->buf;
+ dev->mem->regions[0].userspace_addr = (long)dev->buf;
+ dev->mem->regions[0].memory_size = dev->buf_size;
+
+ r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
+ assert(r >= 0);
+
+ r = ioctl(dev->control, VHOST_SET_FEATURES, &features);
+ assert(r >= 0);
+
+ dev->nvqs = 2;
+}
+
+static void wait_for_interrupt(struct vq_info *vq)
+{
+ unsigned long long val;
+
+ poll(&vq->fds, 1, 100);
+
+ if (vq->fds.revents & POLLIN)
+ read(vq->fds.fd, &val, sizeof(val));
+}
+
+static void verify_res_buf(char *res_buf)
+{
+ int i;
+
+ for (i = ETHER_HDR_LEN; i < TEST_BUF_LEN; i++)
+ assert(res_buf[i] == (char)i);
+}
+
+static void run_tx_test(struct vdev_info *dev, struct vq_info *vq,
+ bool delayed, int bufs)
+{
+ long long spurious = 0;
+ struct scatterlist sl;
+ unsigned int len;
+ int r;
+
+ for (;;) {
+ long started_before = vq->started;
+ long completed_before = vq->completed;
+
+ virtqueue_disable_cb(vq->vq);
+ do {
+ while (vq->started < bufs &&
+ (vq->started - vq->completed) < 1) {
+ sg_init_one(&sl, dev->test_buf, HDR_LEN + TEST_BUF_LEN);
+ r = virtqueue_add_outbuf(vq->vq, &sl, 1,
+ dev->test_buf + vq->started,
+ GFP_ATOMIC);
+ if (unlikely(r != 0))
+ break;
+
+ ++vq->started;
+
+ if (unlikely(!virtqueue_kick(vq->vq))) {
+ r = -1;
+ break;
+ }
+ }
+
+ if (vq->started >= bufs)
+ r = -1;
+
+ /* Flush out completed bufs if any */
+ while (virtqueue_get_buf(vq->vq, &len)) {
+ int n;
+
+ n = recvfrom(dev->sock, dev->res_buf, TEST_BUF_LEN, 0, NULL, NULL);
+ assert(n == TEST_BUF_LEN);
+ verify_res_buf(dev->res_buf);
+
+ ++vq->completed;
+ r = 0;
+ }
+ } while (r == 0);
+
+ if (vq->completed == completed_before && vq->started == started_before)
+ ++spurious;
+
+ assert(vq->completed <= bufs);
+ assert(vq->started <= bufs);
+ if (vq->completed == bufs)
+ break;
+
+ if (delayed) {
+ if (virtqueue_enable_cb_delayed(vq->vq))
+ wait_for_interrupt(vq);
+ } else {
+ if (virtqueue_enable_cb(vq->vq))
+ wait_for_interrupt(vq);
+ }
+ }
+ printf("TX spurious wakeups: 0x%llx started=0x%lx completed=0x%lx\n",
+ spurious, vq->started, vq->completed);
+}
+
+static void run_rx_test(struct vdev_info *dev, struct vq_info *vq,
+ bool delayed, int bufs)
+{
+ long long spurious = 0;
+ struct scatterlist sl;
+ unsigned int len;
+ int r;
+
+ for (;;) {
+ long started_before = vq->started;
+ long completed_before = vq->completed;
+
+ do {
+ while (vq->started < bufs &&
+ (vq->started - vq->completed) < 1) {
+ sg_init_one(&sl, dev->res_buf, HDR_LEN + TEST_BUF_LEN);
+
+ r = virtqueue_add_inbuf(vq->vq, &sl, 1,
+ dev->res_buf + vq->started,
+ GFP_ATOMIC);
+ if (unlikely(r != 0))
+ break;
+
+ ++vq->started;
+
+ vdev_send_packet(dev);
+
+ if (unlikely(!virtqueue_kick(vq->vq))) {
+ r = -1;
+ break;
+ }
+ }
+
+ if (vq->started >= bufs)
+ r = -1;
+
+ /* Flush out completed bufs if any */
+ while (virtqueue_get_buf(vq->vq, &len)) {
+ struct ether_header *eh;
+
+ eh = (struct ether_header *)(dev->res_buf + HDR_LEN);
+
+ /* tun netdev is up and running, only handle the
+ * TEST_PTYPE packet.
+ */
+ if (eh->ether_type == htons(TEST_PTYPE)) {
+ assert(len == TEST_BUF_LEN + HDR_LEN);
+ verify_res_buf(dev->res_buf + HDR_LEN);
+ }
+
+ ++vq->completed;
+ r = 0;
+ }
+ } while (r == 0);
+
+ if (vq->completed == completed_before && vq->started == started_before)
+ ++spurious;
+
+ assert(vq->completed <= bufs);
+ assert(vq->started <= bufs);
+ if (vq->completed == bufs)
+ break;
+ }
+
+ printf("RX spurious wakeups: 0x%llx started=0x%lx completed=0x%lx\n",
+ spurious, vq->started, vq->completed);
+}
+
+static const char optstring[] = "h";
+static const struct option longopts[] = {
+ {
+ .name = "help",
+ .val = 'h',
+ },
+ {
+ .name = "event-idx",
+ .val = 'E',
+ },
+ {
+ .name = "no-event-idx",
+ .val = 'e',
+ },
+ {
+ .name = "indirect",
+ .val = 'I',
+ },
+ {
+ .name = "no-indirect",
+ .val = 'i',
+ },
+ {
+ .name = "virtio-1",
+ .val = '1',
+ },
+ {
+ .name = "no-virtio-1",
+ .val = '0',
+ },
+ {
+ .name = "delayed-interrupt",
+ .val = 'D',
+ },
+ {
+ .name = "no-delayed-interrupt",
+ .val = 'd',
+ },
+ {
+ .name = "buf-num",
+ .val = 'n',
+ .has_arg = required_argument,
+ },
+ {
+ .name = "batch",
+ .val = 'b',
+ .has_arg = required_argument,
+ },
+ {
+ }
+};
+
+static void help(int status)
+{
+ fprintf(stderr, "Usage: vhost_net_test [--help]"
+ " [--no-indirect]"
+ " [--no-event-idx]"
+ " [--no-virtio-1]"
+ " [--delayed-interrupt]"
+ " [--buf-num]"
+ "\n");
+
+ exit(status);
+}
+
+int main(int argc, char **argv)
+{
+ unsigned long long features = (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
+ (1ULL << VIRTIO_RING_F_EVENT_IDX) | (1ULL << VIRTIO_F_VERSION_1);
+ char tun_name[IFNAMSIZ];
+ long nbufs = 0x100000;
+ struct vdev_info dev;
+ bool delayed = false;
+ int o, fd;
+
+ for (;;) {
+ o = getopt_long(argc, argv, optstring, longopts, NULL);
+ switch (o) {
+ case -1:
+ goto done;
+ case '?':
+ help(2);
+ case 'e':
+ features &= ~(1ULL << VIRTIO_RING_F_EVENT_IDX);
+ break;
+ case 'h':
+ help(0);
+ case 'i':
+ features &= ~(1ULL << VIRTIO_RING_F_INDIRECT_DESC);
+ break;
+ case '0':
+ features &= ~(1ULL << VIRTIO_F_VERSION_1);
+ break;
+ case 'D':
+ delayed = true;
+ break;
+ case 'n':
+ nbufs = strtol(optarg, NULL, 10);
+ assert(nbufs > 0);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ }
+
+done:
+ memset(&dev, 0, sizeof(dev));
+ snprintf(tun_name, IFNAMSIZ, "tun_%d", getpid());
+
+ fd = tun_alloc(&dev, tun_name);
+ assert(fd >= 0);
+
+ vdev_info_init(&dev, features);
+ vq_info_add(&dev, 0, DESC_NUM, fd);
+ vq_info_add(&dev, 1, DESC_NUM, fd);
+ vdev_create_socket(&dev, tun_name);
+
+ run_rx_test(&dev, &dev.vqs[0], delayed, nbufs);
+ run_tx_test(&dev, &dev.vqs[1], delayed, nbufs);
+
+ return 0;
+}