summaryrefslogtreecommitdiff
path: root/include/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-03-12 17:44:08 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-03-12 17:44:08 -0700
commit9187210eee7d87eea37b45ea93454a88681894a4 (patch)
tree31b4610e62cdd5e1dfb700014aa619e41145d7d3 /include/net
parent1f440397665f4241346e4cc6d93f8b73880815d1 (diff)
parented1f164038b50c5864aa85389f3ffd456f050cca (diff)
Merge tag 'net-next-6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from Jakub Kicinski: "Core & protocols: - Large effort by Eric to lower rtnl_lock pressure and remove locks: - Make commonly used parts of rtnetlink (address, route dumps etc) lockless, protected by RCU instead of rtnl_lock. - Add a netns exit callback which already holds rtnl_lock, allowing netns exit to take rtnl_lock once in the core instead of once for each driver / callback. - Remove locks / serialization in the socket diag interface. - Remove 6 calls to synchronize_rcu() while holding rtnl_lock. - Remove the dev_base_lock, depend on RCU where necessary. - Support busy polling on a per-epoll context basis. Poll length and budget parameters can be set independently of system defaults. - Introduce struct net_hotdata, to make sure read-mostly global config variables fit in as few cache lines as possible. - Add optional per-nexthop statistics to ease monitoring / debug of ECMP imbalance problems. - Support TCP_NOTSENT_LOWAT in MPTCP. - Ensure that IPv6 temporary addresses' preferred lifetimes are long enough, compared to other configured lifetimes, and at least 2 sec. - Support forwarding of ICMP Error messages in IPSec, per RFC 4301. - Add support for the independent control state machine for bonding per IEEE 802.1AX-2008 5.4.15 in addition to the existing coupled control state machine. - Add "network ID" to MCTP socket APIs to support hosts with multiple disjoint MCTP networks. - Re-use the mono_delivery_time skbuff bit for packets which user space wants to be sent at a specified time. Maintain the timing information while traversing veth links, bridge etc. - Take advantage of MSG_SPLICE_PAGES for RxRPC DATA and ACK packets. - Simplify many places iterating over netdevs by using an xarray instead of a hash table walk (hash table remains in place, for use on fastpaths). - Speed up scanning for expired routes by keeping a dedicated list. - Speed up "generic" XDP by trying harder to avoid large allocations. - Support attaching arbitrary metadata to netconsole messages. Things we sprinkled into general kernel code: - Enforce VM_IOREMAP flag and range in ioremap_page_range and introduce VM_SPARSE kind and vm_area_[un]map_pages (used by bpf_arena). - Rework selftest harness to enable the use of the full range of ksft exit code (pass, fail, skip, xfail, xpass). Netfilter: - Allow userspace to define a table that is exclusively owned by a daemon (via netlink socket aliveness) without auto-removing this table when the userspace program exits. Such table gets marked as orphaned and a restarting management daemon can re-attach/regain ownership. - Speed up element insertions to nftables' concatenated-ranges set type. Compact a few related data structures. BPF: - Add BPF token support for delegating a subset of BPF subsystem functionality from privileged system-wide daemons such as systemd through special mount options for userns-bound BPF fs to a trusted & unprivileged application. - Introduce bpf_arena which is sparse shared memory region between BPF program and user space where structures inside the arena can have pointers to other areas of the arena, and pointers work seamlessly for both user-space programs and BPF programs. - Introduce may_goto instruction that is a contract between the verifier and the program. The verifier allows the program to loop assuming it's behaving well, but reserves the right to terminate it. - Extend the BPF verifier to enable static subprog calls in spin lock critical sections. - Support registration of struct_ops types from modules which helps projects like fuse-bpf that seeks to implement a new struct_ops type. - Add support for retrieval of cookies for perf/kprobe multi links. - Support arbitrary TCP SYN cookie generation / validation in the TC layer with BPF to allow creating SYN flood handling in BPF firewalls. - Add code generation to inline the bpf_kptr_xchg() helper which improves performance when stashing/popping the allocated BPF objects. Wireless: - Add SPP (signaling and payload protected) AMSDU support. - Support wider bandwidth OFDMA, as required for EHT operation. Driver API: - Major overhaul of the Energy Efficient Ethernet internals to support new link modes (2.5GE, 5GE), share more code between drivers (especially those using phylib), and encourage more uniform behavior. Convert and clean up drivers. - Define an API for querying per netdev queue statistics from drivers. - IPSec: account in global stats for fully offloaded sessions. - Create a concept of Ethernet PHY Packages at the Device Tree level, to allow parameterizing the existing PHY package code. - Enable Rx hashing (RSS) on GTP protocol fields. Misc: - Improvements and refactoring all over networking selftests. - Create uniform module aliases for TC classifiers, actions, and packet schedulers to simplify creating modprobe policies. - Address all missing MODULE_DESCRIPTION() warnings in networking. - Extend the Netlink descriptions in YAML to cover message encapsulation or "Netlink polymorphism", where interpretation of nested attributes depends on link type, classifier type or some other "class type". Drivers: - Ethernet high-speed NICs: - Add a new driver for Marvell's Octeon PCI Endpoint NIC VF. - Intel (100G, ice, idpf): - support E825-C devices - nVidia/Mellanox: - support devices with one port and multiple PCIe links - Broadcom (bnxt): - support n-tuple filters - support configuring the RSS key - Wangxun (ngbe/txgbe): - implement irq_domain for TXGBE's sub-interrupts - Pensando/AMD: - support XDP - optimize queue submission and wakeup handling (+17% bps) - optimize struct layout, saving 28% of memory on queues - Ethernet NICs embedded and virtual: - Google cloud vNIC: - refactor driver to perform memory allocations for new queue config before stopping and freeing the old queue memory - Synopsys (stmmac): - obey queueMaxSDU and implement counters required by 802.1Qbv - Renesas (ravb): - support packet checksum offload - suspend to RAM and runtime PM support - Ethernet switches: - nVidia/Mellanox: - support for nexthop group statistics - Microchip: - ksz8: implement PHY loopback - add support for KSZ8567, a 7-port 10/100Mbps switch - PTP: - New driver for RENESAS FemtoClock3 Wireless clock generator. - Support OCP PTP cards designed and built by Adva. - CAN: - Support recvmsg() flags for own, local and remote traffic on CAN BCM sockets. - Support for esd GmbH PCIe/402 CAN device family. - m_can: - Rx/Tx submission coalescing - wake on frame Rx - WiFi: - Intel (iwlwifi): - enable signaling and payload protected A-MSDUs - support wider-bandwidth OFDMA - support for new devices - bump FW API to 89 for AX devices; 90 for BZ/SC devices - MediaTek (mt76): - mt7915: newer ADIE version support - mt7925: radio temperature sensor support - Qualcomm (ath11k): - support 6 GHz station power modes: Low Power Indoor (LPI), Standard Power) SP and Very Low Power (VLP) - QCA6390 & WCN6855: support 2 concurrent station interfaces - QCA2066 support - Qualcomm (ath12k): - refactoring in preparation for Multi-Link Operation (MLO) support - 1024 Block Ack window size support - firmware-2.bin support - support having multiple identical PCI devices (firmware needs to have ATH12K_FW_FEATURE_MULTI_QRTR_ID) - QCN9274: support split-PHY devices - WCN7850: enable Power Save Mode in station mode - WCN7850: P2P support - RealTek: - rtw88: support for more rtw8811cu and rtw8821cu devices - rtw89: support SCAN_RANDOM_SN and SET_SCAN_DWELL - rtlwifi: speed up USB firmware initialization - rtwl8xxxu: - RTL8188F: concurrent interface support - Channel Switch Announcement (CSA) support in AP mode - Broadcom (brcmfmac): - per-vendor feature support - per-vendor SAE password setup - DMI nvram filename quirk for ACEPC W5 Pro" * tag 'net-next-6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (2255 commits) nexthop: Fix splat with CONFIG_DEBUG_PREEMPT=y nexthop: Fix out-of-bounds access during attribute validation nexthop: Only parse NHA_OP_FLAGS for dump messages that require it nexthop: Only parse NHA_OP_FLAGS for get messages that require it bpf: move sleepable flag from bpf_prog_aux to bpf_prog bpf: hardcode BPF_PROG_PACK_SIZE to 2MB * num_possible_nodes() selftests/bpf: Add kprobe multi triggering benchmarks ptp: Move from simple ida to xarray vxlan: Remove generic .ndo_get_stats64 vxlan: Do not alloc tstats manually devlink: Add comments to use netlink gen tool nfp: flower: handle acti_netdevs allocation failure net/packet: Add getsockopt support for PACKET_COPY_THRESH net/netlink: Add getsockopt support for NETLINK_LISTEN_ALL_NSID selftests/bpf: Add bpf_arena_htab test. selftests/bpf: Add bpf_arena_list test. selftests/bpf: Add unit tests for bpf_arena_alloc/free_pages bpf: Add helper macro bpf_addr_space_cast() libbpf: Recognize __arena global variables. bpftool: Recognize arena map type ...
Diffstat (limited to 'include/net')
-rw-r--r--include/net/act_api.h2
-rw-r--r--include/net/addrconf.h7
-rw-r--r--include/net/af_unix.h22
-rw-r--r--include/net/bluetooth/bluetooth.h2
-rw-r--r--include/net/bluetooth/hci.h19
-rw-r--r--include/net/bluetooth/hci_core.h37
-rw-r--r--include/net/bluetooth/hci_sync.h22
-rw-r--r--include/net/bluetooth/l2cap.h44
-rw-r--r--include/net/bond_3ad.h2
-rw-r--r--include/net/bond_options.h1
-rw-r--r--include/net/bonding.h23
-rw-r--r--include/net/cfg80211.h138
-rw-r--r--include/net/cfg802154.h1
-rw-r--r--include/net/dropreason-core.h26
-rw-r--r--include/net/dsa.h4
-rw-r--r--include/net/dst.h1
-rw-r--r--include/net/eee.h38
-rw-r--r--include/net/genetlink.h6
-rw-r--r--include/net/gro.h46
-rw-r--r--include/net/hotdata.h52
-rw-r--r--include/net/if_inet6.h4
-rw-r--r--include/net/inet_sock.h1
-rw-r--r--include/net/ioam6.h4
-rw-r--r--include/net/ip6_fib.h52
-rw-r--r--include/net/ip6_route.h5
-rw-r--r--include/net/ip_fib.h1
-rw-r--r--include/net/ip_tunnels.h3
-rw-r--r--include/net/ipv6.h8
-rw-r--r--include/net/mac80211.h163
-rw-r--r--include/net/mctp.h6
-rw-r--r--include/net/net_namespace.h5
-rw-r--r--include/net/netdev_queues.h56
-rw-r--r--include/net/netfilter/nf_queue.h1
-rw-r--r--include/net/netfilter/nf_tables.h6
-rw-r--r--include/net/netlabel.h7
-rw-r--r--include/net/netmem.h41
-rw-r--r--include/net/nexthop.h34
-rw-r--r--include/net/nfc/nfc.h2
-rw-r--r--include/net/page_pool/types.h13
-rw-r--r--include/net/pkt_cls.h2
-rw-r--r--include/net/pkt_sched.h2
-rw-r--r--include/net/protocol.h3
-rw-r--r--include/net/request_sock.h39
-rw-r--r--include/net/route.h7
-rw-r--r--include/net/rps.h125
-rw-r--r--include/net/rtnetlink.h1
-rw-r--r--include/net/scm.h1
-rw-r--r--include/net/sctp/structs.h5
-rw-r--r--include/net/sock.h173
-rw-r--r--include/net/tcp.h51
-rw-r--r--include/net/xfrm.h14
51 files changed, 1006 insertions, 322 deletions
diff --git a/include/net/act_api.h b/include/net/act_api.h
index e1e5e72b901e..77ee0c657e2c 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -201,6 +201,8 @@ int tcf_idr_release(struct tc_action *a, bool bind);
int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
int tcf_unregister_action(struct tc_action_ops *a,
struct pernet_operations *ops);
+#define NET_ACT_ALIAS_PREFIX "net-act-"
+#define MODULE_ALIAS_NET_ACT(kind) MODULE_ALIAS(NET_ACT_ALIAS_PREFIX kind)
int tcf_action_destroy(struct tc_action *actions[], int bind);
int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
int nr_actions, struct tcf_result *res);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 61ebe723ee4d..9d06eb945509 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -8,8 +8,9 @@
#define MIN_VALID_LIFETIME (2*3600) /* 2 hours */
-#define TEMP_VALID_LIFETIME (7*86400)
-#define TEMP_PREFERRED_LIFETIME (86400)
+#define TEMP_VALID_LIFETIME (7*86400) /* 1 week */
+#define TEMP_PREFERRED_LIFETIME (86400) /* 24 hours */
+#define REGEN_MIN_ADVANCE (2) /* 2 seconds */
#define REGEN_MAX_RETRY (3)
#define MAX_DESYNC_FACTOR (600)
@@ -416,7 +417,7 @@ static inline bool ip6_ignore_linkdown(const struct net_device *dev)
if (unlikely(!idev))
return true;
- return !!idev->cnf.ignore_routes_with_linkdown;
+ return !!READ_ONCE(idev->cnf.ignore_routes_with_linkdown);
}
void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp);
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index afd40dce40f3..627ea8e2d915 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -8,21 +8,29 @@
#include <linux/refcount.h>
#include <net/sock.h>
+#if IS_ENABLED(CONFIG_UNIX)
+struct unix_sock *unix_get_socket(struct file *filp);
+#else
+static inline struct unix_sock *unix_get_socket(struct file *filp)
+{
+ return NULL;
+}
+#endif
+
+extern spinlock_t unix_gc_lock;
+extern unsigned int unix_tot_inflight;
+
void unix_inflight(struct user_struct *user, struct file *fp);
void unix_notinflight(struct user_struct *user, struct file *fp);
-void unix_destruct_scm(struct sk_buff *skb);
-void io_uring_destruct_scm(struct sk_buff *skb);
void unix_gc(void);
-void wait_for_unix_gc(void);
-struct sock *unix_get_socket(struct file *filp);
+void wait_for_unix_gc(struct scm_fp_list *fpl);
+
struct sock *unix_peer_get(struct sock *sk);
#define UNIX_HASH_MOD (256 - 1)
#define UNIX_HASH_SIZE (256 * 2)
#define UNIX_HASH_BITS 8
-extern unsigned int unix_tot_inflight;
-
struct unix_address {
refcount_t refcnt;
int len;
@@ -55,7 +63,7 @@ struct unix_sock {
struct mutex iolock, bindlock;
struct sock *peer;
struct list_head link;
- atomic_long_t inflight;
+ unsigned long inflight;
spinlock_t lock;
unsigned long gc_flags;
#define UNIX_GC_CANDIDATE 0
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 7ffa8c192c3f..9fe95a22abeb 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -164,6 +164,8 @@ struct bt_voice {
#define BT_ISO_QOS_BIG_UNSET 0xff
#define BT_ISO_QOS_BIS_UNSET 0xff
+#define BT_ISO_SYNC_TIMEOUT 0x07d0 /* 20 secs */
+
struct bt_iso_io_qos {
__u32 interval;
__u16 latency;
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index bdee5d649cc6..8701ca5f31ee 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -330,6 +330,14 @@ enum {
* during the hdev->setup vendor callback.
*/
HCI_QUIRK_BROKEN_LE_CODED,
+
+ /*
+ * When this quirk is set, the HCI_OP_READ_ENC_KEY_SIZE command is
+ * skipped during an HCI_EV_ENCRYPT_CHANGE event. This is required
+ * for Actions Semiconductor ATS2851 based controllers, which erroneously
+ * claim to support it.
+ */
+ HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE,
};
/* HCI device flags */
@@ -372,6 +380,7 @@ enum {
HCI_SETUP,
HCI_CONFIG,
HCI_DEBUGFS_CREATED,
+ HCI_POWERING_DOWN,
HCI_AUTO_OFF,
HCI_RFKILLED,
HCI_MGMT,
@@ -393,7 +402,6 @@ enum {
HCI_LIMITED_PRIVACY,
HCI_RPA_EXPIRED,
HCI_RPA_RESOLVING,
- HCI_HS_ENABLED,
HCI_LE_ENABLED,
HCI_ADVERTISING,
HCI_ADVERTISING_CONNECTABLE,
@@ -437,7 +445,7 @@ enum {
#define HCI_NCMD_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */
#define HCI_ACL_TX_TIMEOUT msecs_to_jiffies(45000) /* 45 seconds */
#define HCI_AUTO_OFF_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
-#define HCI_POWER_OFF_TIMEOUT msecs_to_jiffies(5000) /* 5 seconds */
+#define HCI_ACL_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */
#define HCI_LE_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */
#define HCI_LE_AUTOCONN_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */
@@ -653,6 +661,7 @@ enum {
#define HCI_ERROR_PIN_OR_KEY_MISSING 0x06
#define HCI_ERROR_MEMORY_EXCEEDED 0x07
#define HCI_ERROR_CONNECTION_TIMEOUT 0x08
+#define HCI_ERROR_COMMAND_DISALLOWED 0x0c
#define HCI_ERROR_REJ_LIMITED_RESOURCES 0x0d
#define HCI_ERROR_REJ_BAD_ADDR 0x0f
#define HCI_ERROR_INVALID_PARAMETERS 0x12
@@ -661,6 +670,7 @@ enum {
#define HCI_ERROR_REMOTE_POWER_OFF 0x15
#define HCI_ERROR_LOCAL_HOST_TERM 0x16
#define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18
+#define HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE 0x1e
#define HCI_ERROR_INVALID_LL_PARAMS 0x1e
#define HCI_ERROR_UNSPECIFIED 0x1f
#define HCI_ERROR_ADVERTISING_TIMEOUT 0x3c
@@ -2035,6 +2045,7 @@ struct hci_cp_le_set_per_adv_params {
} __packed;
#define HCI_MAX_PER_AD_LENGTH 252
+#define HCI_MAX_PER_AD_TOT_LEN 1650
#define HCI_OP_LE_SET_PER_ADV_DATA 0x203f
struct hci_cp_le_set_per_adv_data {
@@ -2795,6 +2806,10 @@ struct hci_ev_le_per_adv_report {
__u8 data[];
} __packed;
+#define LE_PA_DATA_COMPLETE 0x00
+#define LE_PA_DATA_MORE_TO_COME 0x01
+#define LE_PA_DATA_TRUNCATED 0x02
+
#define HCI_EV_LE_EXT_ADV_SET_TERM 0x12
struct hci_evt_le_ext_adv_set_term {
__u8 status;
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 8f8dd9173714..56fb42df44a3 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -1,7 +1,7 @@
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
- Copyright 2023 NXP
+ Copyright 2023-2024 NXP
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -552,6 +552,7 @@ struct hci_dev {
__u32 req_status;
__u32 req_result;
struct sk_buff *req_skb;
+ struct sk_buff *req_rsp;
void *smp_data;
void *smp_bredr_data;
@@ -734,8 +735,9 @@ struct hci_conn {
__u16 le_supv_timeout;
__u8 le_adv_data[HCI_MAX_EXT_AD_LENGTH];
__u8 le_adv_data_len;
- __u8 le_per_adv_data[HCI_MAX_PER_AD_LENGTH];
- __u8 le_per_adv_data_len;
+ __u8 le_per_adv_data[HCI_MAX_PER_AD_TOT_LEN];
+ __u16 le_per_adv_data_len;
+ __u16 le_per_adv_data_offset;
__u8 le_tx_phy;
__u8 le_rx_phy;
__s8 rssi;
@@ -1083,6 +1085,24 @@ static inline unsigned int hci_conn_count(struct hci_dev *hdev)
return c->acl_num + c->amp_num + c->sco_num + c->le_num + c->iso_num;
}
+static inline bool hci_conn_valid(struct hci_dev *hdev, struct hci_conn *conn)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c == conn) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+ rcu_read_unlock();
+
+ return false;
+}
+
static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle)
{
struct hci_conn_hash *h = &hdev->conn_hash;
@@ -1480,7 +1500,6 @@ struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
bdaddr_t *dst, u8 role);
void hci_conn_del(struct hci_conn *conn);
void hci_conn_hash_flush(struct hci_dev *hdev);
-void hci_conn_check_pending(struct hci_dev *hdev);
struct hci_chan *hci_chan_create(struct hci_conn *conn);
void hci_chan_del(struct hci_chan *chan);
@@ -1494,11 +1513,13 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, bool dst_resolved, u8 sec_level,
u16 conn_timeout, u8 role);
+void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status);
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
u8 sec_level, u8 auth_type,
- enum conn_reasons conn_reason);
+ enum conn_reasons conn_reason, u16 timeout);
struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
- __u16 setting, struct bt_codec *codec);
+ __u16 setting, struct bt_codec *codec,
+ u16 timeout);
struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
__u8 dst_type, struct bt_iso_qos *qos);
struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
@@ -1509,8 +1530,8 @@ struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
__u8 dst_type, struct bt_iso_qos *qos,
__u8 data_len, __u8 *data);
-int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
- __u8 sid, struct bt_iso_qos *qos);
+struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
+ __u8 dst_type, __u8 sid, struct bt_iso_qos *qos);
int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
struct bt_iso_qos *qos,
__u16 sync_handle, __u8 num_bis, __u8 bis[]);
diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
index 6efbc2152146..6a9d063e9f47 100644
--- a/include/net/bluetooth/hci_sync.h
+++ b/include/net/bluetooth/hci_sync.h
@@ -42,12 +42,24 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
void hci_cmd_sync_init(struct hci_dev *hdev);
void hci_cmd_sync_clear(struct hci_dev *hdev);
void hci_cmd_sync_cancel(struct hci_dev *hdev, int err);
-void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err);
+void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err);
int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
void *data, hci_cmd_sync_work_destroy_t destroy);
int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
void *data, hci_cmd_sync_work_destroy_t destroy);
+int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+struct hci_cmd_sync_work_entry *
+hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+void hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
+ struct hci_cmd_sync_work_entry *entry);
+bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,
+ hci_cmd_sync_work_func_t func, void *data,
+ hci_cmd_sync_work_destroy_t destroy);
int hci_update_eir_sync(struct hci_dev *hdev);
int hci_update_class_sync(struct hci_dev *hdev);
@@ -127,8 +139,6 @@ struct hci_conn;
int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason);
-int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn);
-
int hci_le_create_cis_sync(struct hci_dev *hdev);
int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle);
@@ -138,3 +148,9 @@ int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason);
int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle);
int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle);
+
+int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn);
+
+int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn);
+
+int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn);
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index cf393e72d6ed..a4278aa618ab 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -59,8 +59,6 @@
#define L2CAP_WAIT_ACK_POLL_PERIOD msecs_to_jiffies(200)
#define L2CAP_WAIT_ACK_TIMEOUT msecs_to_jiffies(10000)
-#define L2CAP_A2MP_DEFAULT_MTU 670
-
/* L2CAP socket address */
struct sockaddr_l2 {
sa_family_t l2_family;
@@ -109,12 +107,6 @@ struct l2cap_conninfo {
#define L2CAP_ECHO_RSP 0x09
#define L2CAP_INFO_REQ 0x0a
#define L2CAP_INFO_RSP 0x0b
-#define L2CAP_CREATE_CHAN_REQ 0x0c
-#define L2CAP_CREATE_CHAN_RSP 0x0d
-#define L2CAP_MOVE_CHAN_REQ 0x0e
-#define L2CAP_MOVE_CHAN_RSP 0x0f
-#define L2CAP_MOVE_CHAN_CFM 0x10
-#define L2CAP_MOVE_CHAN_CFM_RSP 0x11
#define L2CAP_CONN_PARAM_UPDATE_REQ 0x12
#define L2CAP_CONN_PARAM_UPDATE_RSP 0x13
#define L2CAP_LE_CONN_REQ 0x14
@@ -144,7 +136,6 @@ struct l2cap_conninfo {
/* L2CAP fixed channels */
#define L2CAP_FC_SIG_BREDR 0x02
#define L2CAP_FC_CONNLESS 0x04
-#define L2CAP_FC_A2MP 0x08
#define L2CAP_FC_ATT 0x10
#define L2CAP_FC_SIG_LE 0x20
#define L2CAP_FC_SMP_LE 0x40
@@ -267,7 +258,6 @@ struct l2cap_conn_rsp {
/* channel identifier */
#define L2CAP_CID_SIGNALING 0x0001
#define L2CAP_CID_CONN_LESS 0x0002
-#define L2CAP_CID_A2MP 0x0003
#define L2CAP_CID_ATT 0x0004
#define L2CAP_CID_LE_SIGNALING 0x0005
#define L2CAP_CID_SMP 0x0006
@@ -282,7 +272,6 @@ struct l2cap_conn_rsp {
#define L2CAP_CR_BAD_PSM 0x0002
#define L2CAP_CR_SEC_BLOCK 0x0003
#define L2CAP_CR_NO_MEM 0x0004
-#define L2CAP_CR_BAD_AMP 0x0005
#define L2CAP_CR_INVALID_SCID 0x0006
#define L2CAP_CR_SCID_IN_USE 0x0007
@@ -404,29 +393,6 @@ struct l2cap_info_rsp {
__u8 data[];
} __packed;
-struct l2cap_create_chan_req {
- __le16 psm;
- __le16 scid;
- __u8 amp_id;
-} __packed;
-
-struct l2cap_create_chan_rsp {
- __le16 dcid;
- __le16 scid;
- __le16 result;
- __le16 status;
-} __packed;
-
-struct l2cap_move_chan_req {
- __le16 icid;
- __u8 dest_amp_id;
-} __packed;
-
-struct l2cap_move_chan_rsp {
- __le16 icid;
- __le16 result;
-} __packed;
-
#define L2CAP_MR_SUCCESS 0x0000
#define L2CAP_MR_PEND 0x0001
#define L2CAP_MR_BAD_ID 0x0002
@@ -539,8 +505,6 @@ struct l2cap_seq_list {
struct l2cap_chan {
struct l2cap_conn *conn;
- struct hci_conn *hs_hcon;
- struct hci_chan *hs_hchan;
struct kref kref;
atomic_t nesting;
@@ -591,12 +555,6 @@ struct l2cap_chan {
unsigned long conn_state;
unsigned long flags;
- __u8 remote_amp_id;
- __u8 local_amp_id;
- __u8 move_id;
- __u8 move_state;
- __u8 move_role;
-
__u16 next_tx_seq;
__u16 expected_ack_seq;
__u16 expected_tx_seq;
@@ -981,7 +939,7 @@ int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid);
struct l2cap_chan *l2cap_chan_create(void);
void l2cap_chan_close(struct l2cap_chan *chan, int reason);
int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
- bdaddr_t *dst, u8 dst_type);
+ bdaddr_t *dst, u8 dst_type, u16 timeout);
int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu);
int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len);
void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index c5e57c6bd873..9ce5ac2bfbad 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -54,6 +54,8 @@ typedef enum {
AD_MUX_DETACHED, /* mux machine */
AD_MUX_WAITING, /* mux machine */
AD_MUX_ATTACHED, /* mux machine */
+ AD_MUX_COLLECTING, /* mux machine */
+ AD_MUX_DISTRIBUTING, /* mux machine */
AD_MUX_COLLECTING_DISTRIBUTING /* mux machine */
} mux_states_t;
diff --git a/include/net/bond_options.h b/include/net/bond_options.h
index 69292ecc0325..473a0147769e 100644
--- a/include/net/bond_options.h
+++ b/include/net/bond_options.h
@@ -76,6 +76,7 @@ enum {
BOND_OPT_MISSED_MAX,
BOND_OPT_NS_TARGETS,
BOND_OPT_PRIO,
+ BOND_OPT_COUPLED_CONTROL,
BOND_OPT_LAST
};
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 5b8b1b644a2d..b61fb1aa3a56 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -148,6 +148,7 @@ struct bond_params {
#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr ns_targets[BOND_MAX_NS_TARGETS];
#endif
+ int coupled_control;
/* 2 bytes of padding : see ether_addr_equal_64bits() */
u8 ad_actor_system[ETH_ALEN + 2];
@@ -167,6 +168,7 @@ struct slave {
u8 backup:1, /* indicates backup slave. Value corresponds with
BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
inactive:1, /* indicates inactive slave */
+ rx_disabled:1, /* indicates whether slave's Rx is disabled */
should_notify:1, /* indicates whether the state changed */
should_notify_link:1; /* indicates whether the link changed */
u8 duplex;
@@ -568,6 +570,14 @@ static inline void bond_set_slave_inactive_flags(struct slave *slave,
bond_set_slave_state(slave, BOND_STATE_BACKUP, notify);
if (!slave->bond->params.all_slaves_active)
slave->inactive = 1;
+ if (BOND_MODE(slave->bond) == BOND_MODE_8023AD)
+ slave->rx_disabled = 1;
+}
+
+static inline void bond_set_slave_tx_disabled_flags(struct slave *slave,
+ bool notify)
+{
+ bond_set_slave_state(slave, BOND_STATE_BACKUP, notify);
}
static inline void bond_set_slave_active_flags(struct slave *slave,
@@ -575,6 +585,14 @@ static inline void bond_set_slave_active_flags(struct slave *slave,
{
bond_set_slave_state(slave, BOND_STATE_ACTIVE, notify);
slave->inactive = 0;
+ if (BOND_MODE(slave->bond) == BOND_MODE_8023AD)
+ slave->rx_disabled = 0;
+}
+
+static inline void bond_set_slave_rx_enabled_flags(struct slave *slave,
+ bool notify)
+{
+ slave->rx_disabled = 0;
}
static inline bool bond_is_slave_inactive(struct slave *slave)
@@ -582,6 +600,11 @@ static inline bool bond_is_slave_inactive(struct slave *slave)
return slave->inactive;
}
+static inline bool bond_is_slave_rx_disabled(struct slave *slave)
+{
+ return slave->rx_disabled;
+}
+
static inline void bond_propose_link_state(struct slave *slave, int state)
{
slave->link_new_state = state;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 2b54fdd8ca15..2e2be4fd2bb6 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -7,7 +7,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021, 2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/ethtool.h>
@@ -118,10 +118,13 @@ struct wiphy;
* restrictions.
* @IEEE80211_CHAN_NO_EHT: EHT operation is not permitted on this channel.
* @IEEE80211_CHAN_DFS_CONCURRENT: See %NL80211_RRF_DFS_CONCURRENT
- * @IEEE80211_CHAN_NO_UHB_VLP_CLIENT: Client connection with VLP AP
+ * @IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT: Client connection with VLP AP
* not permitted using this channel
- * @IEEE80211_CHAN_NO_UHB_AFC_CLIENT: Client connection with AFC AP
+ * @IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT: Client connection with AFC AP
* not permitted using this channel
+ * @IEEE80211_CHAN_CAN_MONITOR: This channel can be used for monitor
+ * mode even in the presence of other (regulatory) restrictions,
+ * even if it is otherwise disabled.
*/
enum ieee80211_channel_flags {
IEEE80211_CHAN_DISABLED = 1<<0,
@@ -146,8 +149,9 @@ enum ieee80211_channel_flags {
IEEE80211_CHAN_NO_320MHZ = 1<<19,
IEEE80211_CHAN_NO_EHT = 1<<20,
IEEE80211_CHAN_DFS_CONCURRENT = 1<<21,
- IEEE80211_CHAN_NO_UHB_VLP_CLIENT= 1<<22,
- IEEE80211_CHAN_NO_UHB_AFC_CLIENT= 1<<23,
+ IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT = 1<<22,
+ IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT = 1<<23,
+ IEEE80211_CHAN_CAN_MONITOR = 1<<24,
};
#define IEEE80211_CHAN_NO_HT40 \
@@ -808,6 +812,9 @@ struct key_params {
* chan will define the primary channel and all other
* parameters are ignored.
* @freq1_offset: offset from @center_freq1, in KHz
+ * @punctured: mask of the punctured 20 MHz subchannels, with
+ * bits turned on being disabled (punctured); numbered
+ * from lower to higher frequency (like in the spec)
*/
struct cfg80211_chan_def {
struct ieee80211_channel *chan;
@@ -816,6 +823,7 @@ struct cfg80211_chan_def {
u32 center_freq2;
struct ieee80211_edmg edmg;
u16 freq1_offset;
+ u16 punctured;
};
/*
@@ -956,7 +964,8 @@ cfg80211_chandef_identical(const struct cfg80211_chan_def *chandef1,
chandef1->width == chandef2->width &&
chandef1->center_freq1 == chandef2->center_freq1 &&
chandef1->freq1_offset == chandef2->freq1_offset &&
- chandef1->center_freq2 == chandef2->center_freq2);
+ chandef1->center_freq2 == chandef2->center_freq2 &&
+ chandef1->punctured == chandef2->punctured);
}
/**
@@ -1048,6 +1057,20 @@ cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy,
const struct cfg80211_chan_def *chandef);
/**
+ * cfg80211_chandef_primary - calculate primary 40/80/160 MHz freq
+ * @chandef: chandef to calculate for
+ * @primary_chan_width: primary channel width to calculate center for
+ * @punctured: punctured sub-channel bitmap, will be recalculated
+ * according to the new bandwidth, can be %NULL
+ *
+ * Returns: the primary 40/80/160 MHz channel center frequency, or -1
+ * for errors, updating the punctured bitmap
+ */
+int cfg80211_chandef_primary(const struct cfg80211_chan_def *chandef,
+ enum nl80211_chan_width primary_chan_width,
+ u16 *punctured);
+
+/**
* nl80211_send_chandef - sends the channel definition.
* @msg: the msg to send channel definition
* @chandef: the channel definition to check
@@ -1457,9 +1480,6 @@ struct cfg80211_unsol_bcast_probe_resp {
* @fils_discovery: FILS discovery transmission parameters
* @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
* @mbssid_config: AP settings for multiple bssid
- * @punct_bitmap: Preamble puncturing bitmap. Each bit represents
- * a 20 MHz channel, lowest bit corresponding to the lowest channel.
- * Bit set to 1 indicates that the channel is punctured.
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -1494,7 +1514,6 @@ struct cfg80211_ap_settings {
struct cfg80211_fils_discovery fils_discovery;
struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
struct cfg80211_mbssid_config mbssid_config;
- u16 punct_bitmap;
};
@@ -1528,9 +1547,8 @@ struct cfg80211_ap_update {
* @radar_required: whether radar detection is required on the new channel
* @block_tx: whether transmissions should be blocked while changing
* @count: number of beacons until switch
- * @punct_bitmap: Preamble puncturing bitmap. Each bit represents
- * a 20 MHz channel, lowest bit corresponding to the lowest channel.
- * Bit set to 1 indicates that the channel is punctured.
+ * @link_id: defines the link on which channel switch is expected during
+ * MLO. 0 in case of non-MLO.
*/
struct cfg80211_csa_settings {
struct cfg80211_chan_def chandef;
@@ -1543,7 +1561,7 @@ struct cfg80211_csa_settings {
bool radar_required;
bool block_tx;
u8 count;
- u16 punct_bitmap;
+ u8 link_id;
};
/**
@@ -1766,11 +1784,15 @@ struct station_parameters {
* @subtype: Management frame subtype to use for indicating removal
* (10 = Disassociation, 12 = Deauthentication)
* @reason_code: Reason code for the Disassociation/Deauthentication frame
+ * @link_id: Link ID indicating a link that stations to be flushed must be
+ * using; valid only for MLO, but can also be -1 for MLO to really
+ * remove all stations.
*/
struct station_del_parameters {
const u8 *mac;
u8 subtype;
u16 reason_code;
+ int link_id;
};
/**
@@ -2695,19 +2717,11 @@ static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask)
* @bssid: BSSID to be matched; may be all-zero BSSID in case of SSID match
* or no match (RSSI only)
* @rssi_thold: don't report scan results below this threshold (in s32 dBm)
- * @per_band_rssi_thold: Minimum rssi threshold for each band to be applied
- * for filtering out scan results received. Drivers advertise this support
- * of band specific rssi based filtering through the feature capability
- * %NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD. These band
- * specific rssi thresholds take precedence over rssi_thold, if specified.
- * If not specified for any band, it will be assigned with rssi_thold of
- * corresponding matchset.
*/
struct cfg80211_match_set {
struct cfg80211_ssid ssid;
u8 bssid[ETH_ALEN];
s32 rssi_thold;
- s32 per_band_rssi_thold[NUM_NL80211_BANDS];
};
/**
@@ -3063,6 +3077,7 @@ struct cfg80211_assoc_link {
* @CONNECT_REQ_MLO_SUPPORT: Userspace indicates support for handling MLD links.
* Drivers shall disable MLO features for the current association if this
* flag is not set.
+ * @ASSOC_REQ_SPP_AMSDU: SPP A-MSDUs will be used on this connection (if any)
*/
enum cfg80211_assoc_req_flags {
ASSOC_REQ_DISABLE_HT = BIT(0),
@@ -3072,6 +3087,7 @@ enum cfg80211_assoc_req_flags {
ASSOC_REQ_DISABLE_HE = BIT(4),
ASSOC_REQ_DISABLE_EHT = BIT(5),
CONNECT_REQ_MLO_SUPPORT = BIT(6),
+ ASSOC_REQ_SPP_AMSDU = BIT(7),
};
/**
@@ -3596,12 +3612,15 @@ struct cfg80211_wowlan_nd_info {
* @tcp_connlost: TCP connection lost or failed to establish
* @tcp_nomoretokens: TCP data ran out of tokens
* @net_detect: if not %NULL, woke up because of net detect
+ * @unprot_deauth_disassoc: woke up due to unprotected deauth or
+ * disassoc frame (in MFP).
*/
struct cfg80211_wowlan_wakeup {
bool disconnect, magic_pkt, gtk_rekey_failure,
eap_identity_req, four_way_handshake,
rfkill_release, packet_80211,
- tcp_match, tcp_connlost, tcp_nomoretokens;
+ tcp_match, tcp_connlost, tcp_nomoretokens,
+ unprot_deauth_disassoc;
s32 pattern_idx;
u32 packet_present_len, packet_len;
const void *packet;
@@ -4923,7 +4942,7 @@ struct cfg80211_ops {
* enum wiphy_flags - wiphy capability flags
*
* @WIPHY_FLAG_SPLIT_SCAN_6GHZ: if set to true, the scan request will be split
- * into two, first for legacy bands and second for UHB.
+ * into two, first for legacy bands and second for 6 GHz.
* @WIPHY_FLAG_NETNS_OK: if not set, do not allow changing the netns of this
* wiphy at all
* @WIPHY_FLAG_PS_ON_BY_DEFAULT: if set to true, powersave will be enabled
@@ -6204,7 +6223,7 @@ struct wireless_dev {
int beacon_interval;
struct cfg80211_chan_def preset_chandef;
struct cfg80211_chan_def chandef;
- u8 id[IEEE80211_MAX_SSID_LEN];
+ u8 id[IEEE80211_MAX_MESH_ID_LEN];
u8 id_len, id_up_len;
} mesh;
struct {
@@ -6852,13 +6871,45 @@ cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
}
/**
+ * enum cfg80211_rnr_iter_ret - reduced neighbor report iteration state
+ * @RNR_ITER_CONTINUE: continue iterating with the next entry
+ * @RNR_ITER_BREAK: break iteration and return success
+ * @RNR_ITER_ERROR: break iteration and return error
+ */
+enum cfg80211_rnr_iter_ret {
+ RNR_ITER_CONTINUE,
+ RNR_ITER_BREAK,
+ RNR_ITER_ERROR,
+};
+
+/**
+ * cfg80211_iter_rnr - iterate reduced neighbor report entries
+ * @elems: the frame elements to iterate RNR elements and then
+ * their entries in
+ * @elems_len: length of the elements
+ * @iter: iteration function, see also &enum cfg80211_rnr_iter_ret
+ * for the return value
+ * @iter_data: additional data passed to the iteration function
+ * Return: %true on success (after successfully iterating all entries
+ * or if the iteration function returned %RNR_ITER_BREAK),
+ * %false on error (iteration function returned %RNR_ITER_ERROR
+ * or elements were malformed.)
+ */
+bool cfg80211_iter_rnr(const u8 *elems, size_t elems_len,
+ enum cfg80211_rnr_iter_ret
+ (*iter)(void *data, u8 type,
+ const struct ieee80211_neighbor_ap_info *info,
+ const u8 *tbtt_info, u8 tbtt_info_len),
+ void *iter_data);
+
+/**
* cfg80211_defragment_element - Defrag the given element data into a buffer
*
* @elem: the element to defragment
* @ies: elements where @elem is contained
* @ieslen: length of @ies
- * @data: buffer to store element data
- * @data_len: length of @data
+ * @data: buffer to store element data, or %NULL to just determine size
+ * @data_len: length of @data, or 0
* @frag_id: the element ID of fragments
*
* Return: length of @data, or -EINVAL on error
@@ -7156,11 +7207,13 @@ size_t cfg80211_merge_profile(const u8 *ie, size_t ielen,
* from a beacon or probe response
* @CFG80211_BSS_FTYPE_BEACON: data comes from a beacon
* @CFG80211_BSS_FTYPE_PRESP: data comes from a probe response
+ * @CFG80211_BSS_FTYPE_S1G_BEACON: data comes from an S1G beacon
*/
enum cfg80211_bss_frame_type {
CFG80211_BSS_FTYPE_UNKNOWN,
CFG80211_BSS_FTYPE_BEACON,
CFG80211_BSS_FTYPE_PRESP,
+ CFG80211_BSS_FTYPE_S1G_BEACON,
};
/**
@@ -8733,14 +8786,13 @@ bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
* @dev: the device which switched channels
* @chandef: the new channel definition
* @link_id: the link ID for MLO, must be 0 for non-MLO
- * @punct_bitmap: the new puncturing bitmap
*
* Caller must hold wiphy mutex, therefore must only be called from sleepable
* driver context!
*/
void cfg80211_ch_switch_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
- unsigned int link_id, u16 punct_bitmap);
+ unsigned int link_id);
/*
* cfg80211_ch_switch_started_notify - notify channel switch start
@@ -8749,7 +8801,6 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
* @link_id: the link ID for MLO, must be 0 for non-MLO
* @count: the number of TBTTs until the channel switch happens
* @quiet: whether or not immediate quiet was requested by the AP
- * @punct_bitmap: the future puncturing bitmap
*
* Inform the userspace about the channel switch that has just
* started, so that it can take appropriate actions (eg. starting
@@ -8758,7 +8809,7 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
void cfg80211_ch_switch_started_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
unsigned int link_id, u8 count,
- bool quiet, u16 punct_bitmap);
+ bool quiet);
/**
* ieee80211_operating_class_to_band - convert operating class to band
@@ -8772,6 +8823,19 @@ bool ieee80211_operating_class_to_band(u8 operating_class,
enum nl80211_band *band);
/**
+ * ieee80211_operating_class_to_chandef - convert operating class to chandef
+ *
+ * @operating_class: the operating class to convert
+ * @chan: the ieee80211_channel to convert
+ * @chandef: a pointer to the resulting chandef
+ *
+ * Returns %true if the conversion was successful, %false otherwise.
+ */
+bool ieee80211_operating_class_to_chandef(u8 operating_class,
+ struct ieee80211_channel *chan,
+ struct cfg80211_chan_def *chandef);
+
+/**
* ieee80211_chandef_to_operating_class - convert chandef to operation class
*
* @chandef: the chandef to convert
@@ -9377,18 +9441,6 @@ static inline int cfg80211_color_change_notify(struct net_device *dev)
}
/**
- * cfg80211_valid_disable_subchannel_bitmap - validate puncturing bitmap
- * @bitmap: bitmap to be validated
- * @chandef: channel definition
- *
- * Validate the puncturing bitmap.
- *
- * Return: %true if the bitmap is valid. %false otherwise.
- */
-bool cfg80211_valid_disable_subchannel_bitmap(u16 *bitmap,
- const struct cfg80211_chan_def *chandef);
-
-/**
* cfg80211_links_removed - Notify about removed STA MLD setup links.
* @dev: network device.
* @link_mask: BIT mask of removed STA MLD setup link IDs.
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index cd95711b12b8..76d2cd2e2b30 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -401,6 +401,7 @@ struct ieee802154_llsec_key {
struct ieee802154_llsec_key_entry {
struct list_head list;
+ struct rcu_head rcu;
struct ieee802154_llsec_key_id id;
struct ieee802154_llsec_key *key;
diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h
index 6d3a20163260..9707ab54fdd5 100644
--- a/include/net/dropreason-core.h
+++ b/include/net/dropreason-core.h
@@ -30,6 +30,7 @@
FN(TCP_AOFAILURE) \
FN(SOCKET_BACKLOG) \
FN(TCP_FLAGS) \
+ FN(TCP_ABORT_ON_DATA) \
FN(TCP_ZEROWINDOW) \
FN(TCP_OLD_DATA) \
FN(TCP_OVERWINDOW) \
@@ -37,6 +38,7 @@
FN(TCP_RFC7323_PAWS) \
FN(TCP_OLD_SEQUENCE) \
FN(TCP_INVALID_SEQUENCE) \
+ FN(TCP_INVALID_ACK_SEQUENCE) \
FN(TCP_RESET) \
FN(TCP_INVALID_SYN) \
FN(TCP_CLOSE) \
@@ -54,6 +56,7 @@
FN(NEIGH_QUEUEFULL) \
FN(NEIGH_DEAD) \
FN(TC_EGRESS) \
+ FN(SECURITY_HOOK) \
FN(QDISC_DROP) \
FN(CPU_BACKLOG) \
FN(XDP) \
@@ -105,7 +108,13 @@ enum skb_drop_reason {
SKB_CONSUMED,
/** @SKB_DROP_REASON_NOT_SPECIFIED: drop reason is not specified */
SKB_DROP_REASON_NOT_SPECIFIED,
- /** @SKB_DROP_REASON_NO_SOCKET: socket not found */
+ /**
+ * @SKB_DROP_REASON_NO_SOCKET: no valid socket that can be used.
+ * Reason could be one of three cases:
+ * 1) no established/listening socket found during lookup process
+ * 2) no valid request socket during 3WHS process
+ * 3) no valid child socket during 3WHS process
+ */
SKB_DROP_REASON_NO_SOCKET,
/** @SKB_DROP_REASON_PKT_TOO_SMALL: packet size is too small */
SKB_DROP_REASON_PKT_TOO_SMALL,
@@ -198,6 +207,11 @@ enum skb_drop_reason {
/** @SKB_DROP_REASON_TCP_FLAGS: TCP flags invalid */
SKB_DROP_REASON_TCP_FLAGS,
/**
+ * @SKB_DROP_REASON_TCP_ABORT_ON_DATA: abort on data, corresponding to
+ * LINUX_MIB_TCPABORTONDATA
+ */
+ SKB_DROP_REASON_TCP_ABORT_ON_DATA,
+ /**
* @SKB_DROP_REASON_TCP_ZEROWINDOW: TCP receive window size is zero,
* see LINUX_MIB_TCPZEROWINDOWDROP
*/
@@ -221,13 +235,19 @@ enum skb_drop_reason {
SKB_DROP_REASON_TCP_OFOMERGE,
/**
* @SKB_DROP_REASON_TCP_RFC7323_PAWS: PAWS check, corresponding to
- * LINUX_MIB_PAWSESTABREJECTED
+ * LINUX_MIB_PAWSESTABREJECTED, LINUX_MIB_PAWSACTIVEREJECTED
*/
SKB_DROP_REASON_TCP_RFC7323_PAWS,
/** @SKB_DROP_REASON_TCP_OLD_SEQUENCE: Old SEQ field (duplicate packet) */
SKB_DROP_REASON_TCP_OLD_SEQUENCE,
/** @SKB_DROP_REASON_TCP_INVALID_SEQUENCE: Not acceptable SEQ field */
SKB_DROP_REASON_TCP_INVALID_SEQUENCE,
+ /**
+ * @SKB_DROP_REASON_TCP_INVALID_ACK_SEQUENCE: Not acceptable ACK SEQ
+ * field because ack sequence is not in the window between snd_una
+ * and snd_nxt
+ */
+ SKB_DROP_REASON_TCP_INVALID_ACK_SEQUENCE,
/** @SKB_DROP_REASON_TCP_RESET: Invalid RST packet */
SKB_DROP_REASON_TCP_RESET,
/**
@@ -271,6 +291,8 @@ enum skb_drop_reason {
SKB_DROP_REASON_NEIGH_DEAD,
/** @SKB_DROP_REASON_TC_EGRESS: dropped in TC egress HOOK */
SKB_DROP_REASON_TC_EGRESS,
+ /** @SKB_DROP_REASON_SECURITY_HOOK: dropped due to security HOOK */
+ SKB_DROP_REASON_SECURITY_HOOK,
/**
* @SKB_DROP_REASON_QDISC_DROP: dropped by qdisc when packet outputting (
* failed to enqueue to current qdisc)
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 82135fbdb1e6..7c0da9effe4e 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -991,9 +991,9 @@ struct dsa_switch_ops {
* Port's MAC EEE settings
*/
int (*set_mac_eee)(struct dsa_switch *ds, int port,
- struct ethtool_eee *e);
+ struct ethtool_keee *e);
int (*get_mac_eee)(struct dsa_switch *ds, int port,
- struct ethtool_eee *e);
+ struct ethtool_keee *e);
/* EEPROM access */
int (*get_eeprom_len)(struct dsa_switch *ds);
diff --git a/include/net/dst.h b/include/net/dst.h
index f5dfc8fb7b37..0aa331bd2fdb 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -390,7 +390,6 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
void dst_init(struct dst_entry *dst, struct dst_ops *ops,
struct net_device *dev, int initial_obsolete,
unsigned short flags);
-struct dst_entry *dst_destroy(struct dst_entry *dst);
void dst_dev_put(struct dst_entry *dst);
static inline void dst_confirm(struct dst_entry *dst)
diff --git a/include/net/eee.h b/include/net/eee.h
new file mode 100644
index 000000000000..84837aba3cd9
--- /dev/null
+++ b/include/net/eee.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _EEE_H
+#define _EEE_H
+
+#include <linux/types.h>
+
+struct eee_config {
+ u32 tx_lpi_timer;
+ bool tx_lpi_enabled;
+ bool eee_enabled;
+};
+
+static inline bool eeecfg_mac_can_tx_lpi(const struct eee_config *eeecfg)
+{
+ /* eee_enabled is the master on/off */
+ if (!eeecfg->eee_enabled || !eeecfg->tx_lpi_enabled)
+ return false;
+
+ return true;
+}
+
+static inline void eeecfg_to_eee(struct ethtool_keee *eee,
+ const struct eee_config *eeecfg)
+{
+ eee->tx_lpi_timer = eeecfg->tx_lpi_timer;
+ eee->tx_lpi_enabled = eeecfg->tx_lpi_enabled;
+ eee->eee_enabled = eeecfg->eee_enabled;
+}
+
+static inline void eee_to_eeecfg(struct eee_config *eeecfg,
+ const struct ethtool_keee *eee)
+{
+ eeecfg->tx_lpi_timer = eee->tx_lpi_timer;
+ eeecfg->tx_lpi_enabled = eee->tx_lpi_enabled;
+ eeecfg->eee_enabled = eee->eee_enabled;
+}
+
+#endif
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index e61469129402..9ece6e5a3ea8 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -41,6 +41,8 @@ struct genl_info;
* do additional, common, filtering and return an error
* @post_doit: called after an operation's doit callback, it may
* undo operations done by pre_doit, for example release locks
+ * @bind: called when family multicast group is added to a netlink socket
+ * @unbind: called when family multicast group is removed from a netlink socket
* @module: pointer to the owning module (set to THIS_MODULE)
* @mcgrps: multicast groups used by this family
* @n_mcgrps: number of multicast groups
@@ -84,6 +86,8 @@ struct genl_family {
void (*post_doit)(const struct genl_split_ops *ops,
struct sk_buff *skb,
struct genl_info *info);
+ int (*bind)(int mcgrp);
+ void (*unbind)(int mcgrp);
const struct genl_ops * ops;
const struct genl_small_ops *small_ops;
const struct genl_split_ops *split_ops;
@@ -149,7 +153,7 @@ static inline void *genl_info_userhdr(const struct genl_info *info)
/* Report that a root attribute is missing */
#define GENL_REQ_ATTR_CHECK(info, attr) ({ \
- struct genl_info *__info = (info); \
+ const struct genl_info *__info = (info); \
\
NL_REQ_ATTR_CHECK(__info->extack, NULL, __info->attrs, (attr)); \
})
diff --git a/include/net/gro.h b/include/net/gro.h
index b435f0ddbf64..50f1e403dbbb 100644
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef _NET_IPV6_GRO_H
-#define _NET_IPV6_GRO_H
+#ifndef _NET_GRO_H
+#define _NET_GRO_H
#include <linux/indirect_call_wrapper.h>
#include <linux/ip.h>
@@ -9,6 +9,7 @@
#include <net/ip6_checksum.h>
#include <linux/skbuff.h>
#include <net/udp.h>
+#include <net/hotdata.h>
struct napi_gro_cb {
union {
@@ -139,21 +140,16 @@ static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
NAPI_GRO_CB(skb)->data_offset += len;
}
-static inline void *skb_gro_header_fast(struct sk_buff *skb,
+static inline void *skb_gro_header_fast(const struct sk_buff *skb,
unsigned int offset)
{
return NAPI_GRO_CB(skb)->frag0 + offset;
}
-static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
+static inline bool skb_gro_may_pull(const struct sk_buff *skb,
+ unsigned int hlen)
{
- return NAPI_GRO_CB(skb)->frag0_len < hlen;
-}
-
-static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
-{
- NAPI_GRO_CB(skb)->frag0 = NULL;
- NAPI_GRO_CB(skb)->frag0_len = 0;
+ return likely(hlen <= NAPI_GRO_CB(skb)->frag0_len);
}
static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
@@ -162,28 +158,30 @@ static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
if (!pskb_may_pull(skb, hlen))
return NULL;
- skb_gro_frag0_invalidate(skb);
return skb->data + offset;
}
-static inline void *skb_gro_header(struct sk_buff *skb,
- unsigned int hlen, unsigned int offset)
+static inline void *skb_gro_header(struct sk_buff *skb, unsigned int hlen,
+ unsigned int offset)
{
void *ptr;
ptr = skb_gro_header_fast(skb, offset);
- if (skb_gro_header_hard(skb, hlen))
+ if (!skb_gro_may_pull(skb, hlen))
ptr = skb_gro_header_slow(skb, hlen, offset);
return ptr;
}
-static inline void *skb_gro_network_header(struct sk_buff *skb)
+static inline void *skb_gro_network_header(const struct sk_buff *skb)
{
- return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
- skb_network_offset(skb);
+ if (skb_gro_may_pull(skb, skb_gro_offset(skb)))
+ return skb_gro_header_fast(skb, skb_network_offset(skb));
+
+ return skb_network_header(skb);
}
-static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
+static inline __wsum inet_gro_compute_pseudo(const struct sk_buff *skb,
+ int proto)
{
const struct iphdr *iph = skb_gro_network_header(skb);
@@ -421,7 +419,8 @@ static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
return uh;
}
-static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto)
+static inline __wsum ip6_gro_compute_pseudo(const struct sk_buff *skb,
+ int proto)
{
const struct ipv6hdr *iph = skb_gro_network_header(skb);
@@ -448,7 +447,7 @@ static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb,
{
list_add_tail(&skb->list, &napi->rx_list);
napi->rx_count += segs;
- if (napi->rx_count >= READ_ONCE(gro_normal_batch))
+ if (napi->rx_count >= READ_ONCE(net_hotdata.gro_normal_batch))
gro_normal_list(napi);
}
@@ -495,6 +494,7 @@ static inline void inet6_get_iif_sdif(const struct sk_buff *skb, int *iif, int *
#endif
}
-extern struct list_head offload_base;
+struct packet_offload *gro_find_receive_by_type(__be16 type);
+struct packet_offload *gro_find_complete_by_type(__be16 type);
-#endif /* _NET_IPV6_GRO_H */
+#endif /* _NET_GRO_H */
diff --git a/include/net/hotdata.h b/include/net/hotdata.h
new file mode 100644
index 000000000000..003667a1efd6
--- /dev/null
+++ b/include/net/hotdata.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _NET_HOTDATA_H
+#define _NET_HOTDATA_H
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <net/protocol.h>
+
+/* Read mostly data used in network fast paths. */
+struct net_hotdata {
+#if IS_ENABLED(CONFIG_INET)
+ struct packet_offload ip_packet_offload;
+ struct net_offload tcpv4_offload;
+ struct net_protocol tcp_protocol;
+ struct net_offload udpv4_offload;
+ struct net_protocol udp_protocol;
+ struct packet_offload ipv6_packet_offload;
+ struct net_offload tcpv6_offload;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct inet6_protocol tcpv6_protocol;
+ struct inet6_protocol udpv6_protocol;
+#endif
+ struct net_offload udpv6_offload;
+#endif
+ struct list_head offload_base;
+ struct list_head ptype_all;
+ struct kmem_cache *skbuff_cache;
+ struct kmem_cache *skbuff_fclone_cache;
+ struct kmem_cache *skb_small_head_cache;
+#ifdef CONFIG_RPS
+ struct rps_sock_flow_table __rcu *rps_sock_flow_table;
+ u32 rps_cpu_mask;
+#endif
+ int gro_normal_batch;
+ int netdev_budget;
+ int netdev_budget_usecs;
+ int tstamp_prequeue;
+ int max_backlog;
+ int dev_tx_weight;
+ int dev_rx_weight;
+};
+
+#define inet_ehash_secret net_hotdata.tcp_protocol.secret
+#define udp_ehash_secret net_hotdata.udp_protocol.secret
+#define inet6_ehash_secret net_hotdata.tcpv6_protocol.secret
+#define tcp_ipv6_hash_secret net_hotdata.tcpv6_offload.secret
+#define udp6_ehash_secret net_hotdata.udpv6_protocol.secret
+#define udp_ipv6_hash_secret net_hotdata.udpv6_offload.secret
+
+extern struct net_hotdata net_hotdata;
+
+#endif /* _NET_HOTDATA_H */
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index f07642264c1e..238ad3349456 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -144,7 +144,7 @@ struct ipv6_ac_socklist {
struct ifacaddr6 {
struct in6_addr aca_addr;
struct fib6_info *aca_rt;
- struct ifacaddr6 *aca_next;
+ struct ifacaddr6 __rcu *aca_next;
struct hlist_node aca_addr_lst;
int aca_users;
refcount_t aca_refcnt;
@@ -196,7 +196,7 @@ struct inet6_dev {
spinlock_t mc_report_lock; /* mld query report lock */
struct mutex mc_lock; /* mld global lock */
- struct ifacaddr6 *ac_list;
+ struct ifacaddr6 __rcu *ac_list;
rwlock_t lock;
refcount_t refcnt;
__u32 if_flags;
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index d94c242eb3ed..f9ddd47dc4f8 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -274,6 +274,7 @@ enum {
INET_FLAGS_REPFLOW = 27,
INET_FLAGS_RTALERT_ISOLATE = 28,
INET_FLAGS_SNDFLOW = 29,
+ INET_FLAGS_RTALERT = 30,
};
/* cmsg flags for inet */
diff --git a/include/net/ioam6.h b/include/net/ioam6.h
index 781d2d8b2f29..2cbbee6e806a 100644
--- a/include/net/ioam6.h
+++ b/include/net/ioam6.h
@@ -12,6 +12,7 @@
#include <linux/net.h>
#include <linux/ipv6.h>
#include <linux/ioam6.h>
+#include <linux/ioam6_genl.h>
#include <linux/rhashtable-types.h>
struct ioam6_namespace {
@@ -65,4 +66,7 @@ void ioam6_exit(void);
int ioam6_iptunnel_init(void);
void ioam6_iptunnel_exit(void);
+void ioam6_event(enum ioam6_event_type type, struct net *net, gfp_t gfp,
+ void *opt, unsigned int opt_len);
+
#endif /* _NET_IOAM6_H */
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 9ba6413fd2e3..323c94f1845b 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -30,12 +30,6 @@
#define RT6_DEBUG 2
-#if RT6_DEBUG >= 3
-#define RT6_TRACE(x...) pr_debug(x)
-#else
-#define RT6_TRACE(x...) do { ; } while (0)
-#endif
-
struct rt6_info;
struct fib6_info;
@@ -179,6 +173,9 @@ struct fib6_info {
refcount_t fib6_ref;
unsigned long expires;
+
+ struct hlist_node gc_link;
+
struct dst_metrics *fib6_metrics;
#define fib6_pmtu fib6_metrics->metrics[RTAX_MTU-1]
@@ -247,12 +244,18 @@ static inline bool fib6_requires_src(const struct fib6_info *rt)
return rt->fib6_src.plen > 0;
}
+/* The callers should hold f6i->fib6_table->tb6_lock if a route has ever
+ * been added to a table before.
+ */
static inline void fib6_clean_expires(struct fib6_info *f6i)
{
f6i->fib6_flags &= ~RTF_EXPIRES;
f6i->expires = 0;
}
+/* The callers should hold f6i->fib6_table->tb6_lock if a route has ever
+ * been added to a table before.
+ */
static inline void fib6_set_expires(struct fib6_info *f6i,
unsigned long expires)
{
@@ -333,8 +336,10 @@ static inline bool fib6_info_hold_safe(struct fib6_info *f6i)
static inline void fib6_info_release(struct fib6_info *f6i)
{
- if (f6i && refcount_dec_and_test(&f6i->fib6_ref))
+ if (f6i && refcount_dec_and_test(&f6i->fib6_ref)) {
+ DEBUG_NET_WARN_ON_ONCE(!hlist_unhashed(&f6i->gc_link));
call_rcu(&f6i->rcu, fib6_info_destroy_rcu);
+ }
}
enum fib6_walk_state {
@@ -388,6 +393,7 @@ struct fib6_table {
struct inet_peer_base tb6_peers;
unsigned int flags;
unsigned int fib_seq;
+ struct hlist_head tb6_gc_hlist; /* GC candidates */
#define RT6_TABLE_HAS_DFLT_ROUTER BIT(0)
};
@@ -504,6 +510,38 @@ void fib6_gc_cleanup(void);
int fib6_init(void);
+/* Add the route to the gc list if it is not already there
+ *
+ * The callers should hold f6i->fib6_table->tb6_lock.
+ */
+static inline void fib6_add_gc_list(struct fib6_info *f6i)
+{
+ /* If fib6_node is null, the f6i is not in (or removed from) the
+ * table.
+ *
+ * There is a gap between finding the f6i from the table and
+ * calling this function without the protection of the tb6_lock.
+ * This check makes sure the f6i is not added to the gc list when
+ * it is not on the table.
+ */
+ if (!rcu_dereference_protected(f6i->fib6_node,
+ lockdep_is_held(&f6i->fib6_table->tb6_lock)))
+ return;
+
+ if (hlist_unhashed(&f6i->gc_link))
+ hlist_add_head(&f6i->gc_link, &f6i->fib6_table->tb6_gc_hlist);
+}
+
+/* Remove the route from the gc list if it is on the list.
+ *
+ * The callers should hold f6i->fib6_table->tb6_lock.
+ */
+static inline void fib6_remove_gc_list(struct fib6_info *f6i)
+{
+ if (!hlist_unhashed(&f6i->gc_link))
+ hlist_del_init(&f6i->gc_link);
+}
+
struct ipv6_route_iter {
struct seq_net_private p;
struct fib6_walker w;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 28b065790261..a30c6aa9e5cf 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -170,7 +170,8 @@ struct fib6_info *rt6_get_dflt_router(struct net *net,
struct fib6_info *rt6_add_dflt_router(struct net *net,
const struct in6_addr *gwaddr,
struct net_device *dev, unsigned int pref,
- u32 defrtr_usr_metric);
+ u32 defrtr_usr_metric,
+ int lifetime);
void rt6_purge_dflt_routers(struct net *net);
@@ -331,7 +332,7 @@ static inline unsigned int ip6_dst_mtu_maybe_forward(const struct dst_entry *dst
rcu_read_lock();
idev = __in6_dev_get(dst->dev);
if (idev)
- mtu = idev->cnf.mtu6;
+ mtu = READ_ONCE(idev->cnf.mtu6);
rcu_read_unlock();
out:
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index d4667b7797e3..9b2f69ba5e49 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -264,6 +264,7 @@ struct fib_dump_filter {
bool filter_set;
bool dump_routes;
bool dump_exceptions;
+ bool rtnl_held;
unsigned char protocol;
unsigned char rt_type;
unsigned int flags;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 2d746f4c9a0a..5cd64bb2104d 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -284,7 +284,8 @@ int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
struct rtnl_link_ops *ops, char *devname);
void ip_tunnel_delete_nets(struct list_head *list_net, unsigned int id,
- struct rtnl_link_ops *ops);
+ struct rtnl_link_ops *ops,
+ struct list_head *dev_to_kill);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index cf25ea21d770..88a8e554f7a1 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -534,13 +534,15 @@ static inline int ipv6_hopopt_jumbo_remove(struct sk_buff *skb)
return 0;
}
-static inline bool ipv6_accept_ra(struct inet6_dev *idev)
+static inline bool ipv6_accept_ra(const struct inet6_dev *idev)
{
+ s32 accept_ra = READ_ONCE(idev->cnf.accept_ra);
+
/* If forwarding is enabled, RA are not accepted unless the special
* hybrid mode (accept_ra=2) is enabled.
*/
- return idev->cnf.forwarding ? idev->cnf.accept_ra == 2 :
- idev->cnf.accept_ra;
+ return READ_ONCE(idev->cnf.forwarding) ? accept_ra == 2 :
+ accept_ra;
}
#define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index d400fe2e8668..353488ab94a2 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -7,7 +7,7 @@
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2023 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
*/
#ifndef MAC80211_H
@@ -214,6 +214,10 @@ struct ieee80211_low_level_stats {
* @IEEE80211_CHANCTX_CHANGE_CHANNEL: switched to another operating channel,
* this is used only with channel switching with CSA
* @IEEE80211_CHANCTX_CHANGE_MIN_WIDTH: The min required channel width changed
+ * @IEEE80211_CHANCTX_CHANGE_AP: The AP channel definition changed, so (wider
+ * bandwidth) OFDMA settings need to be changed
+ * @IEEE80211_CHANCTX_CHANGE_PUNCTURING: The punctured channel(s) bitmap
+ * was changed.
*/
enum ieee80211_chanctx_change {
IEEE80211_CHANCTX_CHANGE_WIDTH = BIT(0),
@@ -221,6 +225,19 @@ enum ieee80211_chanctx_change {
IEEE80211_CHANCTX_CHANGE_RADAR = BIT(2),
IEEE80211_CHANCTX_CHANGE_CHANNEL = BIT(3),
IEEE80211_CHANCTX_CHANGE_MIN_WIDTH = BIT(4),
+ IEEE80211_CHANCTX_CHANGE_AP = BIT(5),
+ IEEE80211_CHANCTX_CHANGE_PUNCTURING = BIT(6),
+};
+
+/**
+ * struct ieee80211_chan_req - A channel "request"
+ * @oper: channel definition to use for operation
+ * @ap: the channel definition of the AP, if any
+ * (otherwise the chan member is %NULL)
+ */
+struct ieee80211_chan_req {
+ struct cfg80211_chan_def oper;
+ struct cfg80211_chan_def ap;
};
/**
@@ -231,6 +248,8 @@ enum ieee80211_chanctx_change {
*
* @def: the channel definition
* @min_def: the minimum channel definition currently required.
+ * @ap: the channel definition the AP actually is operating as,
+ * for use with (wider bandwidth) OFDMA
* @rx_chains_static: The number of RX chains that must always be
* active on the channel to receive MIMO transmissions
* @rx_chains_dynamic: The number of RX chains that must be enabled
@@ -243,6 +262,7 @@ enum ieee80211_chanctx_change {
struct ieee80211_chanctx_conf {
struct cfg80211_chan_def def;
struct cfg80211_chan_def min_def;
+ struct cfg80211_chan_def ap;
u8 rx_chains_static, rx_chains_dynamic;
@@ -340,8 +360,8 @@ struct ieee80211_vif_chanctx_switch {
* @BSS_CHANGED_FILS_DISCOVERY: FILS discovery status changed.
* @BSS_CHANGED_UNSOL_BCAST_PROBE_RESP: Unsolicited broadcast probe response
* status changed.
- * @BSS_CHANGED_EHT_PUNCTURING: The channel puncturing bitmap changed.
* @BSS_CHANGED_MLD_VALID_LINKS: MLD valid links status changed.
+ * @BSS_CHANGED_MLD_TTLM: TID to link mapping was changed
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
@@ -376,8 +396,8 @@ enum ieee80211_bss_change {
BSS_CHANGED_HE_BSS_COLOR = 1<<29,
BSS_CHANGED_FILS_DISCOVERY = 1<<30,
BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = 1<<31,
- BSS_CHANGED_EHT_PUNCTURING = BIT_ULL(32),
BSS_CHANGED_MLD_VALID_LINKS = BIT_ULL(33),
+ BSS_CHANGED_MLD_TTLM = BIT_ULL(34),
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -537,6 +557,10 @@ struct ieee80211_fils_discovery {
* to that BSS) that can change during the lifetime of the BSS.
*
* @vif: reference to owning VIF
+ * @bss: the cfg80211 bss descriptor. Valid only for a station, and only
+ * when associated. Note: This contains information which is not
+ * necessarily authenticated. For example, information coming from probe
+ * responses.
* @addr: (link) address used locally
* @link_id: link ID, or 0 for non-MLO
* @htc_trig_based_pkt_ext: default PE in 4us units, if BSS supports HE
@@ -581,7 +605,7 @@ struct ieee80211_fils_discovery {
* @mcast_rate: per-band multicast rate index + 1 (0: disabled)
* @bssid: The BSSID for this BSS
* @enable_beacon: whether beaconing should be enabled or not
- * @chandef: Channel definition for this BSS -- the hardware might be
+ * @chanreq: Channel request for this BSS -- the hardware might be
* configured a higher bandwidth than this BSS uses, for example.
* @mu_group: VHT MU-MIMO group membership data
* @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation.
@@ -642,9 +666,7 @@ struct ieee80211_fils_discovery {
* @tx_pwr_env_num: number of @tx_pwr_env.
* @pwr_reduction: power constraint of BSS.
* @eht_support: does this BSS support EHT
- * @eht_puncturing: bitmap to indicate which channels are punctured in this BSS
* @csa_active: marks whether a channel switch is going on.
- * @csa_punct_bitmap: new puncturing bitmap for channel switch
* @mu_mimo_owner: indicates interface owns MU-MIMO capability
* @chanctx_conf: The channel context this interface is assigned to, or %NULL
* when it is not assigned. This pointer is RCU-protected due to the TX
@@ -682,6 +704,7 @@ struct ieee80211_fils_discovery {
*/
struct ieee80211_bss_conf {
struct ieee80211_vif *vif;
+ struct cfg80211_bss *bss;
const u8 *bssid;
unsigned int link_id;
@@ -714,7 +737,7 @@ struct ieee80211_bss_conf {
u32 cqm_rssi_hyst;
s32 cqm_rssi_low;
s32 cqm_rssi_high;
- struct cfg80211_chan_def chandef;
+ struct ieee80211_chan_req chanreq;
struct ieee80211_mu_group_data mu_group;
bool qos;
bool hidden_ssid;
@@ -747,10 +770,8 @@ struct ieee80211_bss_conf {
u8 tx_pwr_env_num;
u8 pwr_reduction;
bool eht_support;
- u16 eht_puncturing;
bool csa_active;
- u16 csa_punct_bitmap;
bool mu_mimo_owner;
struct ieee80211_chanctx_conf __rcu *chanctx_conf;
@@ -1742,8 +1763,9 @@ struct ieee80211_conf {
* @chandef: the new channel to switch to
* @count: the number of TBTT's until the channel switch event
* @delay: maximum delay between the time the AP transmitted the last beacon in
- * current channel and the expected time of the first beacon in the new
- * channel, expressed in TU.
+ * current channel and the expected time of the first beacon in the new
+ * channel, expressed in TU.
+ * @link_id: the link ID of the link doing the channel switch, 0 for non-MLO
*/
struct ieee80211_channel_switch {
u64 timestamp;
@@ -1751,6 +1773,7 @@ struct ieee80211_channel_switch {
bool block_tx;
struct cfg80211_chan_def chandef;
u8 count;
+ u8 link_id;
u32 delay;
};
@@ -1772,6 +1795,10 @@ struct ieee80211_channel_switch {
* this is not pure P2P vif.
* @IEEE80211_VIF_EML_ACTIVE: The driver indicates that EML operation is
* enabled for the interface.
+ * @IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW: Ignore wider bandwidth OFDMA
+ * operation on this interface and request a channel context without
+ * the AP definition. Use this e.g. because the device is able to
+ * handle OFDMA (downlink and trigger for uplink) on a per-AP basis.
*/
enum ieee80211_vif_flags {
IEEE80211_VIF_BEACON_FILTER = BIT(0),
@@ -1779,6 +1806,7 @@ enum ieee80211_vif_flags {
IEEE80211_VIF_SUPPORTS_UAPSD = BIT(2),
IEEE80211_VIF_GET_NOA_UPDATE = BIT(3),
IEEE80211_VIF_EML_ACTIVE = BIT(4),
+ IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW = BIT(5),
};
@@ -1808,9 +1836,11 @@ enum ieee80211_offload_flags {
* @ps: power-save mode (STA only). This flag is NOT affected by
* offchannel/dynamic_ps operations.
* @aid: association ID number, valid only when @assoc is true
- * @eml_cap: EML capabilities as described in P802.11be_D2.2 Figure 9-1002k.
+ * @eml_cap: EML capabilities as described in P802.11be_D4.1 Figure 9-1001j.
* @eml_med_sync_delay: Medium Synchronization delay as described in
- * P802.11be_D2.2 Figure 9-1002j.
+ * P802.11be_D4.1 Figure 9-1001i.
+ * @mld_capa_op: MLD Capabilities and Operations per P802.11be_D4.1
+ * Figure 9-1001k
* @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The
* may filter ARP queries targeted for other addresses than listed here.
* The driver must allow ARP queries targeted for all address listed here
@@ -1835,6 +1865,7 @@ struct ieee80211_vif_cfg {
u16 aid;
u16 eml_cap;
u16 eml_med_sync_delay;
+ u16 mld_capa_op;
__be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
int arp_addr_cnt;
@@ -1845,6 +1876,35 @@ struct ieee80211_vif_cfg {
u8 ap_addr[ETH_ALEN] __aligned(2);
};
+#define IEEE80211_TTLM_NUM_TIDS 8
+
+/**
+ * struct ieee80211_neg_ttlm - negotiated TID to link map info
+ *
+ * @downlink: bitmap of active links per TID for downlink, or 0 if mapping for
+ * this TID is not included.
+ * @uplink: bitmap of active links per TID for uplink, or 0 if mapping for this
+ * TID is not included.
+ * @valid: info is valid or not.
+ */
+struct ieee80211_neg_ttlm {
+ u16 downlink[IEEE80211_TTLM_NUM_TIDS];
+ u16 uplink[IEEE80211_TTLM_NUM_TIDS];
+ bool valid;
+};
+
+/**
+ * enum ieee80211_neg_ttlm_res - return value for negotiated TTLM handling
+ * @NEG_TTLM_RES_ACCEPT: accept the request
+ * @NEG_TTLM_RES_REJECT: reject the request
+ * @NEG_TTLM_RES_SUGGEST_PREFERRED: reject and suggest a new mapping
+ */
+enum ieee80211_neg_ttlm_res {
+ NEG_TTLM_RES_ACCEPT,
+ NEG_TTLM_RES_REJECT,
+ NEG_TTLM_RES_SUGGEST_PREFERRED
+};
+
/**
* struct ieee80211_vif - per-interface data
*
@@ -1863,6 +1923,11 @@ struct ieee80211_vif_cfg {
* API calls meant for that purpose.
* @dormant_links: bitmap of valid but disabled links, or 0 for non-MLO.
* Must be a subset of valid_links.
+ * @suspended_links: subset of dormant_links representing links that are
+ * suspended.
+ * 0 for non-MLO.
+ * @neg_ttlm: negotiated TID to link mapping info.
+ * see &struct ieee80211_neg_ttlm.
* @addr: address of this interface
* @p2p: indicates whether this AP or STA interface is a p2p
* interface, i.e. a GO or p2p-sta respectively
@@ -1900,7 +1965,8 @@ struct ieee80211_vif {
struct ieee80211_vif_cfg cfg;
struct ieee80211_bss_conf bss_conf;
struct ieee80211_bss_conf __rcu *link_conf[IEEE80211_MLD_MAX_NUM_LINKS];
- u16 valid_links, active_links, dormant_links;
+ u16 valid_links, active_links, dormant_links, suspended_links;
+ struct ieee80211_neg_ttlm neg_ttlm;
u8 addr[ETH_ALEN] __aligned(2);
bool p2p;
@@ -1947,6 +2013,21 @@ static inline bool ieee80211_vif_is_mld(const struct ieee80211_vif *vif)
return vif->valid_links != 0;
}
+/**
+ * ieee80211_vif_link_active - check if a given link is active
+ * @vif: the vif
+ * @link_id: the link ID to check
+ * Return: %true if the vif is an MLD and the link is active, or if
+ * the vif is not an MLD and the link ID is 0; %false otherwise.
+ */
+static inline bool ieee80211_vif_link_active(const struct ieee80211_vif *vif,
+ unsigned int link_id)
+{
+ if (!ieee80211_vif_is_mld(vif))
+ return link_id == 0;
+ return vif->active_links & BIT(link_id);
+}
+
#define for_each_vif_active_link(vif, link, link_id) \
for (link_id = 0; link_id < ARRAY_SIZE((vif)->link_conf); link_id++) \
if ((!(vif)->active_links || \
@@ -2041,6 +2122,8 @@ static inline bool lockdep_vif_wiphy_mutex_held(struct ieee80211_vif *vif)
* @IEEE80211_KEY_FLAG_GENERATE_MMIE: This flag should be set by the driver
* for a AES_CMAC key to indicate that it requires sequence number
* generation only
+ * @IEEE80211_KEY_FLAG_SPP_AMSDU: SPP A-MSDUs can be used with this key
+ * (set by mac80211 from the sta->spp_amsdu flag)
*/
enum ieee80211_key_flags {
IEEE80211_KEY_FLAG_GENERATE_IV_MGMT = BIT(0),
@@ -2054,6 +2137,7 @@ enum ieee80211_key_flags {
IEEE80211_KEY_FLAG_PUT_MIC_SPACE = BIT(8),
IEEE80211_KEY_FLAG_NO_AUTO_TX = BIT(9),
IEEE80211_KEY_FLAG_GENERATE_MMIE = BIT(10),
+ IEEE80211_KEY_FLAG_SPP_AMSDU = BIT(11),
};
/**
@@ -2352,6 +2436,7 @@ struct ieee80211_link_sta {
* would be assigned to link[link_id] where link_id is the id assigned
* by the AP.
* @valid_links: bitmap of valid links, or 0 for non-MLO
+ * @spp_amsdu: indicates whether the STA uses SPP A-MSDU or not.
*/
struct ieee80211_sta {
u8 addr[ETH_ALEN];
@@ -2365,6 +2450,7 @@ struct ieee80211_sta {
bool tdls_initiator;
bool mfp;
bool mlo;
+ bool spp_amsdu;
u8 max_amsdu_subframes;
struct ieee80211_sta_aggregates *cur;
@@ -2692,6 +2778,11 @@ struct ieee80211_txq {
* @IEEE80211_HW_DISALLOW_PUNCTURING: HW requires disabling puncturing in EHT
* and connecting with a lower bandwidth instead
*
+ * @IEEE80211_HW_HANDLES_QUIET_CSA: HW/driver handles quieting for CSA, so
+ * no need to stop queues. This really should be set by a driver that
+ * implements MLO, so operation can continue on other links when one
+ * link is switching.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2750,6 +2841,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_DETECTS_COLOR_COLLISION,
IEEE80211_HW_MLO_MCAST_MULTI_LINK_TX,
IEEE80211_HW_DISALLOW_PUNCTURING,
+ IEEE80211_HW_HANDLES_QUIET_CSA,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -4182,7 +4274,7 @@ struct ieee80211_prep_tx_info {
* after a channel switch procedure is completed, allowing the
* driver to go back to a normal configuration.
* @abort_channel_switch: This is an optional callback that is called
- * when channel switch procedure was completed, allowing the
+ * when channel switch procedure was aborted, allowing the
* driver to go back to a normal configuration.
* @channel_switch_rx_beacon: This is an optional callback that is called
* when channel switch procedure is in progress and additional beacon with
@@ -4293,6 +4385,10 @@ struct ieee80211_prep_tx_info {
* flow offloading for flows originating from the vif.
* Note that the driver must not assume that the vif driver_data is valid
* at this point, since the callback can be called during netdev teardown.
+ * @can_neg_ttlm: for managed interface, requests the driver to determine
+ * if the requested TID-To-Link mapping can be accepted or not.
+ * If it's not accepted the driver may suggest a preferred mapping and
+ * modify @ttlm parameter with the suggested TID-to-Link mapping.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -4574,7 +4670,8 @@ struct ieee80211_ops {
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
void (*abort_channel_switch)(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
void (*channel_switch_rx_beacon)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *ch_switch);
@@ -4673,6 +4770,9 @@ struct ieee80211_ops {
struct net_device *dev,
enum tc_setup_type type,
void *type_data);
+ enum ieee80211_neg_ttlm_res
+ (*can_neg_ttlm)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_neg_ttlm *ttlm);
};
/**
@@ -5455,6 +5555,7 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
/**
* ieee80211_beacon_update_cntdwn - request mac80211 to decrement the beacon countdown
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: valid link_id during MLO or 0 for non-MLO
*
* The beacon counter should be updated after each beacon transmission.
* This function is called implicitly when
@@ -5464,7 +5565,8 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
*
* Return: new countdown value
*/
-u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif);
+u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif,
+ unsigned int link_id);
/**
* ieee80211_beacon_set_cntdwn - request mac80211 to set beacon countdown
@@ -5482,20 +5584,23 @@ void ieee80211_beacon_set_cntdwn(struct ieee80211_vif *vif, u8 counter);
/**
* ieee80211_csa_finish - notify mac80211 about channel switch
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: valid link_id during MLO or 0 for non-MLO
*
* After a channel switch announcement was scheduled and the counter in this
* announcement hits 1, this function must be called by the driver to
* notify mac80211 that the channel can be changed.
*/
-void ieee80211_csa_finish(struct ieee80211_vif *vif);
+void ieee80211_csa_finish(struct ieee80211_vif *vif, unsigned int link_id);
/**
* ieee80211_beacon_cntdwn_is_complete - find out if countdown reached 1
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: valid link_id during MLO or 0 for non-MLO
*
* This function returns whether the countdown reached zero.
*/
-bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif);
+bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif,
+ unsigned int link_id);
/**
* ieee80211_color_change_finish - notify mac80211 about color change
@@ -5831,6 +5936,7 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf);
* ieee80211_gtk_rekey_add - add a GTK key from rekeying during WoWLAN
* @vif: the virtual interface to add the key on
* @keyconf: new key data
+ * @link_id: the link id of the key or -1 for non-MLO
*
* When GTK rekeying was done while the system was suspended, (a) new
* key(s) will be available. These will be needed by mac80211 for proper
@@ -5858,7 +5964,8 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf);
*/
struct ieee80211_key_conf *
ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf);
+ struct ieee80211_key_conf *keyconf,
+ int link_id);
/**
* ieee80211_gtk_rekey_notify - notify userspace supplicant of rekeying
@@ -7416,11 +7523,10 @@ ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @color_bitmap: a 64 bit bitmap representing the colors that the local BSS is
* aware of.
- * @gfp: allocation flags
*/
void
ieee80211_obss_color_collision_notify(struct ieee80211_vif *vif,
- u64 color_bitmap, gfp_t gfp);
+ u64 color_bitmap);
/**
* ieee80211_is_tx_data - check if frame is a data frame
@@ -7480,4 +7586,17 @@ int ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links);
void ieee80211_set_active_links_async(struct ieee80211_vif *vif,
u16 active_links);
+/* for older drivers - let's not document these ... */
+int ieee80211_emulate_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx);
+void ieee80211_emulate_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx);
+void ieee80211_emulate_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed);
+int ieee80211_emulate_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode);
+
#endif /* MAC80211_H */
diff --git a/include/net/mctp.h b/include/net/mctp.h
index 2bff5f47ce82..7b17c52e8ce2 100644
--- a/include/net/mctp.h
+++ b/include/net/mctp.h
@@ -87,7 +87,7 @@ struct mctp_sock {
};
/* Key for matching incoming packets to sockets or reassembly contexts.
- * Packets are matched on (src,dest,tag).
+ * Packets are matched on (peer EID, local EID, tag).
*
* Lifetime / locking requirements:
*
@@ -133,6 +133,7 @@ struct mctp_sock {
* - through an expiry timeout, on a per-socket timer
*/
struct mctp_sk_key {
+ unsigned int net;
mctp_eid_t peer_addr;
mctp_eid_t local_addr; /* MCTP_ADDR_ANY for local owned tags */
__u8 tag; /* incoming tag match; invert TO for local */
@@ -255,7 +256,8 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
void mctp_key_unref(struct mctp_sk_key *key);
struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
- mctp_eid_t daddr, mctp_eid_t saddr,
+ unsigned int netid,
+ mctp_eid_t local, mctp_eid_t peer,
bool manual, u8 *tagp);
/* routing <--> device interface */
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 13b3a4e29fdb..20c34bd7a077 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -67,8 +67,6 @@ struct net {
*/
spinlock_t rules_mod_lock;
- atomic_t dev_unreg_count;
-
unsigned int dev_base_seq; /* protected by rtnl_mutex */
u32 ifindex;
@@ -450,6 +448,9 @@ struct pernet_operations {
void (*pre_exit)(struct net *net);
void (*exit)(struct net *net);
void (*exit_batch)(struct list_head *net_exit_list);
+ /* Following method is called with RTNL held. */
+ void (*exit_batch_rtnl)(struct list_head *net_exit_list,
+ struct list_head *dev_kill_list);
unsigned int *id;
size_t size;
};
diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h
index 8b8ed4e13d74..1ec408585373 100644
--- a/include/net/netdev_queues.h
+++ b/include/net/netdev_queues.h
@@ -4,6 +4,62 @@
#include <linux/netdevice.h>
+/* See the netdev.yaml spec for definition of each statistic */
+struct netdev_queue_stats_rx {
+ u64 bytes;
+ u64 packets;
+ u64 alloc_fail;
+};
+
+struct netdev_queue_stats_tx {
+ u64 bytes;
+ u64 packets;
+};
+
+/**
+ * struct netdev_stat_ops - netdev ops for fine grained stats
+ * @get_queue_stats_rx: get stats for a given Rx queue
+ * @get_queue_stats_tx: get stats for a given Tx queue
+ * @get_base_stats: get base stats (not belonging to any live instance)
+ *
+ * Query stats for a given object. The values of the statistics are undefined
+ * on entry (specifically they are *not* zero-initialized). Drivers should
+ * assign values only to the statistics they collect. Statistics which are not
+ * collected must be left undefined.
+ *
+ * Queue objects are not necessarily persistent, and only currently active
+ * queues are queried by the per-queue callbacks. This means that per-queue
+ * statistics will not generally add up to the total number of events for
+ * the device. The @get_base_stats callback allows filling in the delta
+ * between events for currently live queues and overall device history.
+ * When the statistics for the entire device are queried, first @get_base_stats
+ * is issued to collect the delta, and then a series of per-queue callbacks.
+ * Only statistics which are set in @get_base_stats will be reported
+ * at the device level, meaning that unlike in queue callbacks, setting
+ * a statistic to zero in @get_base_stats is a legitimate thing to do.
+ * This is because @get_base_stats has a second function of designating which
+ * statistics are in fact correct for the entire device (e.g. when history
+ * for some of the events is not maintained, and reliable "total" cannot
+ * be provided).
+ *
+ * Device drivers can assume that when collecting total device stats,
+ * the @get_base_stats and subsequent per-queue calls are performed
+ * "atomically" (without releasing the rtnl_lock).
+ *
+ * Device drivers are encouraged to reset the per-queue statistics when
+ * number of queues change. This is because the primary use case for
+ * per-queue statistics is currently to detect traffic imbalance.
+ */
+struct netdev_stat_ops {
+ void (*get_queue_stats_rx)(struct net_device *dev, int idx,
+ struct netdev_queue_stats_rx *stats);
+ void (*get_queue_stats_tx)(struct net_device *dev, int idx,
+ struct netdev_queue_stats_tx *stats);
+ void (*get_base_stats)(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx);
+};
+
/**
* DOC: Lockless queue stopping / waking helpers.
*
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index c81021ab07aa..4aeffddb7586 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -35,7 +35,6 @@ struct nf_queue_handler {
void nf_register_queue_handler(const struct nf_queue_handler *qh);
void nf_unregister_queue_handler(void);
-void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
void nf_queue_entry_free(struct nf_queue_entry *entry);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 510244cc0f8f..e27c28b612e4 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -1277,6 +1277,12 @@ static inline bool nft_table_has_owner(const struct nft_table *table)
return table->flags & NFT_TABLE_F_OWNER;
}
+static inline bool nft_table_is_orphan(const struct nft_table *table)
+{
+ return (table->flags & (NFT_TABLE_F_OWNER | NFT_TABLE_F_PERSIST)) ==
+ NFT_TABLE_F_PERSIST;
+}
+
static inline bool nft_base_chain_netdev(int family, u32 hooknum)
{
return family == NFPROTO_NETDEV ||
diff --git a/include/net/netlabel.h b/include/net/netlabel.h
index 43ae50337685..f3ab0b8a4b18 100644
--- a/include/net/netlabel.h
+++ b/include/net/netlabel.h
@@ -145,15 +145,14 @@ struct netlbl_lsm_cache {
* processing.
*
*/
-#define NETLBL_CATMAP_MAPTYPE u64
#define NETLBL_CATMAP_MAPCNT 4
-#define NETLBL_CATMAP_MAPSIZE (sizeof(NETLBL_CATMAP_MAPTYPE) * 8)
+#define NETLBL_CATMAP_MAPSIZE (sizeof(u64) * 8)
#define NETLBL_CATMAP_SIZE (NETLBL_CATMAP_MAPSIZE * \
NETLBL_CATMAP_MAPCNT)
-#define NETLBL_CATMAP_BIT (NETLBL_CATMAP_MAPTYPE)0x01
+#define NETLBL_CATMAP_BIT ((u64)0x01)
struct netlbl_lsm_catmap {
u32 startbit;
- NETLBL_CATMAP_MAPTYPE bitmap[NETLBL_CATMAP_MAPCNT];
+ u64 bitmap[NETLBL_CATMAP_MAPCNT];
struct netlbl_lsm_catmap *next;
};
diff --git a/include/net/netmem.h b/include/net/netmem.h
new file mode 100644
index 000000000000..d8b810245c1d
--- /dev/null
+++ b/include/net/netmem.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Network memory
+ *
+ * Author: Mina Almasry <almasrymina@google.com>
+ */
+
+#ifndef _NET_NETMEM_H
+#define _NET_NETMEM_H
+
+/**
+ * typedef netmem_ref - a nonexistent type marking a reference to generic
+ * network memory.
+ *
+ * A netmem_ref currently is always a reference to a struct page. This
+ * abstraction is introduced so support for new memory types can be added.
+ *
+ * Use the supplied helpers to obtain the underlying memory pointer and fields.
+ */
+typedef unsigned long __bitwise netmem_ref;
+
+/* This conversion fails (returns NULL) if the netmem_ref is not struct page
+ * backed.
+ *
+ * Currently struct page is the only possible netmem, and this helper never
+ * fails.
+ */
+static inline struct page *netmem_to_page(netmem_ref netmem)
+{
+ return (__force struct page *)netmem;
+}
+
+/* Converting from page to netmem is always safe, because a page can always be
+ * a netmem.
+ */
+static inline netmem_ref page_to_netmem(struct page *page)
+{
+ return (__force netmem_ref)page;
+}
+
+#endif /* _NET_NETMEM_H */
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index d92046a4a078..7ca315ad500e 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -47,6 +47,8 @@ struct nh_config {
bool nh_grp_res_has_idle_timer;
bool nh_grp_res_has_unbalanced_timer;
+ bool nh_hw_stats;
+
struct nlattr *nh_encap;
u16 nh_encap_type;
@@ -95,8 +97,14 @@ struct nh_res_table {
struct nh_res_bucket nh_buckets[] __counted_by(num_nh_buckets);
};
+struct nh_grp_entry_stats {
+ u64_stats_t packets;
+ struct u64_stats_sync syncp;
+};
+
struct nh_grp_entry {
struct nexthop *nh;
+ struct nh_grp_entry_stats __percpu *stats;
u8 weight;
union {
@@ -114,6 +122,7 @@ struct nh_grp_entry {
struct list_head nh_list;
struct nexthop *nh_parent; /* nexthop of group with this entry */
+ u64 packets_hw;
};
struct nh_group {
@@ -124,6 +133,7 @@ struct nh_group {
bool resilient;
bool fdb_nh;
bool has_v4;
+ bool hw_stats;
struct nh_res_table __rcu *res_table;
struct nh_grp_entry nh_entries[] __counted_by(num_nh);
@@ -157,6 +167,7 @@ enum nexthop_event_type {
NEXTHOP_EVENT_REPLACE,
NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE,
NEXTHOP_EVENT_BUCKET_REPLACE,
+ NEXTHOP_EVENT_HW_STATS_REPORT_DELTA,
};
enum nh_notifier_info_type {
@@ -164,6 +175,7 @@ enum nh_notifier_info_type {
NH_NOTIFIER_INFO_TYPE_GRP,
NH_NOTIFIER_INFO_TYPE_RES_TABLE,
NH_NOTIFIER_INFO_TYPE_RES_BUCKET,
+ NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS,
};
struct nh_notifier_single_info {
@@ -173,6 +185,7 @@ struct nh_notifier_single_info {
__be32 ipv4;
struct in6_addr ipv6;
};
+ u32 id;
u8 is_reject:1,
is_fdb:1,
has_encap:1;
@@ -180,13 +193,13 @@ struct nh_notifier_single_info {
struct nh_notifier_grp_entry_info {
u8 weight;
- u32 id;
struct nh_notifier_single_info nh;
};
struct nh_notifier_grp_info {
u16 num_nh;
bool is_fdb;
+ bool hw_stats;
struct nh_notifier_grp_entry_info nh_entries[] __counted_by(num_nh);
};
@@ -200,9 +213,21 @@ struct nh_notifier_res_bucket_info {
struct nh_notifier_res_table_info {
u16 num_nh_buckets;
+ bool hw_stats;
struct nh_notifier_single_info nhs[] __counted_by(num_nh_buckets);
};
+struct nh_notifier_grp_hw_stats_entry_info {
+ u32 id;
+ u64 packets;
+};
+
+struct nh_notifier_grp_hw_stats_info {
+ u16 num_nh;
+ bool hw_stats_used;
+ struct nh_notifier_grp_hw_stats_entry_info stats[] __counted_by(num_nh);
+};
+
struct nh_notifier_info {
struct net *net;
struct netlink_ext_ack *extack;
@@ -213,17 +238,22 @@ struct nh_notifier_info {
struct nh_notifier_grp_info *nh_grp;
struct nh_notifier_res_table_info *nh_res_table;
struct nh_notifier_res_bucket_info *nh_res_bucket;
+ struct nh_notifier_grp_hw_stats_info *nh_grp_hw_stats;
};
};
int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack);
+int __unregister_nexthop_notifier(struct net *net, struct notifier_block *nb);
int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb);
void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap);
void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
bool offload, bool trap);
void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
unsigned long *activity);
+void nh_grp_hw_stats_report_delta(struct nh_notifier_grp_hw_stats_info *info,
+ unsigned int nh_idx,
+ u64 delta_packets);
/* caller is holding rcu or rtnl; no reference taken to nexthop */
struct nexthop *nexthop_find_by_id(struct net *net, u32 id);
@@ -316,7 +346,7 @@ static inline
int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh,
u8 rt_family)
{
- struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
+ struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
int i;
for (i = 0; i < nhg->num_nh; i++) {
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 5dee575fbe86..3d07abacf08b 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -196,7 +196,7 @@ struct nfc_dev {
};
#define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev)
-extern struct class nfc_class;
+extern const struct class nfc_class;
struct nfc_dev *nfc_allocate_device(const struct nfc_ops *ops,
u32 supported_protocols,
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index 76481c465375..5e43a08d3231 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -18,8 +18,9 @@
* Please note DMA-sync-for-CPU is still
* device driver responsibility
*/
-#define PP_FLAG_ALL (PP_FLAG_DMA_MAP |\
- PP_FLAG_DMA_SYNC_DEV)
+#define PP_FLAG_SYSTEM_POOL BIT(2) /* Global system page_pool */
+#define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | \
+ PP_FLAG_SYSTEM_POOL)
/*
* Fast allocation side cache array/stack
@@ -128,6 +129,7 @@ struct page_pool_stats {
struct page_pool {
struct page_pool_params_fast p;
+ int cpuid;
bool has_init_callback;
long frag_users;
@@ -203,21 +205,18 @@ struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
unsigned int size, gfp_t gfp);
struct page_pool *page_pool_create(const struct page_pool_params *params);
+struct page_pool *page_pool_create_percpu(const struct page_pool_params *params,
+ int cpuid);
struct xdp_mem_info;
#ifdef CONFIG_PAGE_POOL
-void page_pool_unlink_napi(struct page_pool *pool);
void page_pool_destroy(struct page_pool *pool);
void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
struct xdp_mem_info *mem);
void page_pool_put_page_bulk(struct page_pool *pool, void **data,
int count);
#else
-static inline void page_pool_unlink_napi(struct page_pool *pool)
-{
-}
-
static inline void page_pool_destroy(struct page_pool *pool)
{
}
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index f308e8268651..a4ee43f493bb 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -24,6 +24,8 @@ struct tcf_walker {
int register_tcf_proto_ops(struct tcf_proto_ops *ops);
void unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
+#define NET_CLS_ALIAS_PREFIX "net-cls-"
+#define MODULE_ALIAS_NET_CLS(kind) MODULE_ALIAS(NET_CLS_ALIAS_PREFIX kind)
struct tcf_block_ext_info {
enum flow_block_binder_type binder_type;
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 1e200d9a066d..d7b7b6cd4aa1 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -100,6 +100,8 @@ struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
int register_qdisc(struct Qdisc_ops *qops);
void unregister_qdisc(struct Qdisc_ops *qops);
+#define NET_SCH_ALIAS_PREFIX "net-sch-"
+#define MODULE_ALIAS_NET_SCH(id) MODULE_ALIAS(NET_SCH_ALIAS_PREFIX id)
void qdisc_get_default(char *id, size_t len);
int qdisc_set_default(const char *id);
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 6aef8cb11cc8..b2499f88f8f8 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -46,6 +46,7 @@ struct net_protocol {
* socket lookup?
*/
icmp_strict_tag_validation:1;
+ u32 secret;
};
#if IS_ENABLED(CONFIG_IPV6)
@@ -59,6 +60,7 @@ struct inet6_protocol {
__be32 info);
unsigned int flags; /* INET6_PROTO_xxx */
+ u32 secret;
};
#define INET6_PROTO_NOPOLICY 0x1
@@ -68,6 +70,7 @@ struct inet6_protocol {
struct net_offload {
struct offload_callbacks callbacks;
unsigned int flags; /* Flags used by IPv6 for now */
+ u32 secret;
};
/* This should be set for any extension header which is compatible with GSO. */
#define INET6_PROTO_GSO_EXTHDR 0x1
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 144c39db9898..8839133d6f6b 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -83,6 +83,45 @@ static inline struct sock *req_to_sk(struct request_sock *req)
return (struct sock *)req;
}
+/**
+ * skb_steal_sock - steal a socket from an sk_buff
+ * @skb: sk_buff to steal the socket from
+ * @refcounted: is set to true if the socket is reference-counted
+ * @prefetched: is set to true if the socket was assigned from bpf
+ */
+static inline struct sock *skb_steal_sock(struct sk_buff *skb,
+ bool *refcounted, bool *prefetched)
+{
+ struct sock *sk = skb->sk;
+
+ if (!sk) {
+ *prefetched = false;
+ *refcounted = false;
+ return NULL;
+ }
+
+ *prefetched = skb_sk_is_prefetched(skb);
+ if (*prefetched) {
+#if IS_ENABLED(CONFIG_SYN_COOKIES)
+ if (sk->sk_state == TCP_NEW_SYN_RECV && inet_reqsk(sk)->syncookie) {
+ struct request_sock *req = inet_reqsk(sk);
+
+ *refcounted = false;
+ sk = req->rsk_listener;
+ req->rsk_listener = NULL;
+ return sk;
+ }
+#endif
+ *refcounted = sk_is_refcounted(sk);
+ } else {
+ *refcounted = true;
+ }
+
+ skb->destructor = NULL;
+ skb->sk = NULL;
+ return sk;
+}
+
static inline struct request_sock *
reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
bool attach_listener)
diff --git a/include/net/route.h b/include/net/route.h
index 980ab474eabd..d4a0147942f1 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -37,9 +37,6 @@
#define RTO_ONLINK 0x01
-#define RT_CONN_FLAGS(sk) (RT_TOS(READ_ONCE(inet_sk(sk)->tos)) | sock_flag(sk, SOCK_LOCALROUTE))
-#define RT_CONN_FLAGS_TOS(sk,tos) (RT_TOS(tos) | sock_flag(sk, SOCK_LOCALROUTE))
-
static inline __u8 ip_sock_rt_scope(const struct sock *sk)
{
if (sock_flag(sk, SOCK_LOCALROUTE))
@@ -163,8 +160,8 @@ static inline struct rtable *ip_route_output_ports(struct net *net, struct flowi
__u8 proto, __u8 tos, int oif)
{
flowi4_init_output(fl4, oif, sk ? READ_ONCE(sk->sk_mark) : 0, tos,
- RT_SCOPE_UNIVERSE, proto,
- sk ? inet_sk_flowi_flags(sk) : 0,
+ sk ? ip_sock_rt_scope(sk) : RT_SCOPE_UNIVERSE,
+ proto, sk ? inet_sk_flowi_flags(sk) : 0,
daddr, saddr, dport, sport, sock_net_uid(net, sk));
if (sk)
security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
diff --git a/include/net/rps.h b/include/net/rps.h
new file mode 100644
index 000000000000..7660243e905b
--- /dev/null
+++ b/include/net/rps.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _NET_RPS_H
+#define _NET_RPS_H
+
+#include <linux/types.h>
+#include <linux/static_key.h>
+#include <net/sock.h>
+#include <net/hotdata.h>
+
+#ifdef CONFIG_RPS
+
+extern struct static_key_false rps_needed;
+extern struct static_key_false rfs_needed;
+
+/*
+ * This structure holds an RPS map which can be of variable length. The
+ * map is an array of CPUs.
+ */
+struct rps_map {
+ unsigned int len;
+ struct rcu_head rcu;
+ u16 cpus[];
+};
+#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
+
+/*
+ * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
+ * tail pointer for that CPU's input queue at the time of last enqueue, and
+ * a hardware filter index.
+ */
+struct rps_dev_flow {
+ u16 cpu;
+ u16 filter;
+ unsigned int last_qtail;
+};
+#define RPS_NO_FILTER 0xffff
+
+/*
+ * The rps_dev_flow_table structure contains a table of flow mappings.
+ */
+struct rps_dev_flow_table {
+ unsigned int mask;
+ struct rcu_head rcu;
+ struct rps_dev_flow flows[];
+};
+#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
+ ((_num) * sizeof(struct rps_dev_flow)))
+
+/*
+ * The rps_sock_flow_table contains mappings of flows to the last CPU
+ * on which they were processed by the application (set in recvmsg).
+ * Each entry is a 32bit value. Upper part is the high-order bits
+ * of flow hash, lower part is CPU number.
+ * rps_cpu_mask is used to partition the space, depending on number of
+ * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
+ * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
+ * meaning we use 32-6=26 bits for the hash.
+ */
+struct rps_sock_flow_table {
+ u32 mask;
+
+ u32 ents[] ____cacheline_aligned_in_smp;
+};
+#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
+
+#define RPS_NO_CPU 0xffff
+
+static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
+ u32 hash)
+{
+ unsigned int index = hash & table->mask;
+ u32 val = hash & ~net_hotdata.rps_cpu_mask;
+
+ /* We only give a hint, preemption can change CPU under us */
+ val |= raw_smp_processor_id();
+
+ /* The following WRITE_ONCE() is paired with the READ_ONCE()
+ * here, and another one in get_rps_cpu().
+ */
+ if (READ_ONCE(table->ents[index]) != val)
+ WRITE_ONCE(table->ents[index], val);
+}
+
+#endif /* CONFIG_RPS */
+
+static inline void sock_rps_record_flow_hash(__u32 hash)
+{
+#ifdef CONFIG_RPS
+ struct rps_sock_flow_table *sock_flow_table;
+
+ if (!hash)
+ return;
+ rcu_read_lock();
+ sock_flow_table = rcu_dereference(net_hotdata.rps_sock_flow_table);
+ if (sock_flow_table)
+ rps_record_sock_flow(sock_flow_table, hash);
+ rcu_read_unlock();
+#endif
+}
+
+static inline void sock_rps_record_flow(const struct sock *sk)
+{
+#ifdef CONFIG_RPS
+ if (static_branch_unlikely(&rfs_needed)) {
+ /* Reading sk->sk_rxhash might incur an expensive cache line
+ * miss.
+ *
+ * TCP_ESTABLISHED does cover almost all states where RFS
+ * might be useful, and is cheaper [1] than testing :
+ * IPv4: inet_sk(sk)->inet_daddr
+ * IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
+ * OR an additional socket flag
+ * [1] : sk_state and sk_prot are in the same cache line.
+ */
+ if (sk->sk_state == TCP_ESTABLISHED) {
+ /* This READ_ONCE() is paired with the WRITE_ONCE()
+ * from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
+ */
+ sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
+ }
+ }
+#endif
+}
+
+#endif /* _NET_RPS_H */
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 6506221c5fe3..3bfb80bad173 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -12,6 +12,7 @@ typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
enum rtnl_link_flags {
RTNL_FLAG_DOIT_UNLOCKED = BIT(0),
RTNL_FLAG_BULK_DEL_SUPPORTED = BIT(1),
+ RTNL_FLAG_DUMP_UNLOCKED = BIT(2),
};
enum rtnl_kinds {
diff --git a/include/net/scm.h b/include/net/scm.h
index cf68acec4d70..92276a2c5543 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -25,6 +25,7 @@ struct scm_creds {
struct scm_fp_list {
short count;
+ short count_unix;
short max;
struct user_struct *user;
struct file *fp[SCM_MAX_FD];
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 5a24d6d8522a..f24a1bbcb3ef 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -242,10 +242,7 @@ struct sctp_sock {
int do_auto_asconf;
};
-static inline struct sctp_sock *sctp_sk(const struct sock *sk)
-{
- return (struct sctp_sock *)sk;
-}
+#define sctp_sk(ptr) container_of_const(ptr, struct sctp_sock, inet.sk)
static inline struct sock *sctp_opt2sk(const struct sctp_sock *sp)
{
diff --git a/include/net/sock.h b/include/net/sock.h
index 54ca8dcbfb43..b5e00702acc1 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -378,14 +378,10 @@ struct sock {
#define sk_flags __sk_common.skc_flags
#define sk_rxhash __sk_common.skc_rxhash
- /* early demux fields */
- struct dst_entry __rcu *sk_rx_dst;
- int sk_rx_dst_ifindex;
- u32 sk_rx_dst_cookie;
+ __cacheline_group_begin(sock_write_rx);
- socket_lock_t sk_lock;
atomic_t sk_drops;
- int sk_rcvlowat;
+ __s32 sk_peek_off;
struct sk_buff_head sk_error_queue;
struct sk_buff_head sk_receive_queue;
/*
@@ -402,18 +398,24 @@ struct sock {
struct sk_buff *head;
struct sk_buff *tail;
} sk_backlog;
-
#define sk_rmem_alloc sk_backlog.rmem_alloc
- int sk_forward_alloc;
- u32 sk_reserved_mem;
+ __cacheline_group_end(sock_write_rx);
+
+ __cacheline_group_begin(sock_read_rx);
+ /* early demux fields */
+ struct dst_entry __rcu *sk_rx_dst;
+ int sk_rx_dst_ifindex;
+ u32 sk_rx_dst_cookie;
+
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sk_ll_usec;
- /* ===== mostly read cache line ===== */
unsigned int sk_napi_id;
+ u16 sk_busy_poll_budget;
+ u8 sk_prefer_busy_poll;
#endif
+ u8 sk_userlocks;
int sk_rcvbuf;
- int sk_disconnects;
struct sk_filter __rcu *sk_filter;
union {
@@ -422,15 +424,33 @@ struct sock {
struct socket_wq *sk_wq_raw;
/* public: */
};
+
+ void (*sk_data_ready)(struct sock *sk);
+ long sk_rcvtimeo;
+ int sk_rcvlowat;
+ __cacheline_group_end(sock_read_rx);
+
+ __cacheline_group_begin(sock_read_rxtx);
+ int sk_err;
+ struct socket *sk_socket;
+ struct mem_cgroup *sk_memcg;
#ifdef CONFIG_XFRM
struct xfrm_policy __rcu *sk_policy[2];
#endif
+ __cacheline_group_end(sock_read_rxtx);
- struct dst_entry __rcu *sk_dst_cache;
+ __cacheline_group_begin(sock_write_rxtx);
+ socket_lock_t sk_lock;
+ u32 sk_reserved_mem;
+ int sk_forward_alloc;
+ u32 sk_tsflags;
+ __cacheline_group_end(sock_write_rxtx);
+
+ __cacheline_group_begin(sock_write_tx);
+ int sk_write_pending;
atomic_t sk_omem_alloc;
int sk_sndbuf;
- /* ===== cache line for TX ===== */
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
unsigned long sk_tsq_flags;
@@ -439,22 +459,36 @@ struct sock {
struct rb_root tcp_rtx_queue;
};
struct sk_buff_head sk_write_queue;
- __s32 sk_peek_off;
- int sk_write_pending;
- __u32 sk_dst_pending_confirm;
+ u32 sk_dst_pending_confirm;
u32 sk_pacing_status; /* see enum sk_pacing */
- long sk_sndtimeo;
+ struct page_frag sk_frag;
struct timer_list sk_timer;
- __u32 sk_priority;
- __u32 sk_mark;
+
unsigned long sk_pacing_rate; /* bytes per second */
+ atomic_t sk_zckey;
+ atomic_t sk_tskey;
+ __cacheline_group_end(sock_write_tx);
+
+ __cacheline_group_begin(sock_read_tx);
unsigned long sk_max_pacing_rate;
- struct page_frag sk_frag;
+ long sk_sndtimeo;
+ u32 sk_priority;
+ u32 sk_mark;
+ struct dst_entry __rcu *sk_dst_cache;
netdev_features_t sk_route_caps;
- int sk_gso_type;
+#ifdef CONFIG_SOCK_VALIDATE_XMIT
+ struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
+ struct net_device *dev,
+ struct sk_buff *skb);
+#endif
+ u16 sk_gso_type;
+ u16 sk_gso_max_segs;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
- __u32 sk_txhash;
+ u32 sk_txhash;
+ u8 sk_pacing_shift;
+ bool sk_use_task_frag;
+ __cacheline_group_end(sock_read_tx);
/*
* Because of non atomicity rules, all
@@ -463,64 +497,44 @@ struct sock {
u8 sk_gso_disabled : 1,
sk_kern_sock : 1,
sk_no_check_tx : 1,
- sk_no_check_rx : 1,
- sk_userlocks : 4;
- u8 sk_pacing_shift;
+ sk_no_check_rx : 1;
+ u8 sk_shutdown;
u16 sk_type;
u16 sk_protocol;
- u16 sk_gso_max_segs;
unsigned long sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
- int sk_err,
- sk_err_soft;
+ int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
- u8 sk_txrehash;
-#ifdef CONFIG_NET_RX_BUSY_POLL
- u8 sk_prefer_busy_poll;
- u16 sk_busy_poll_budget;
-#endif
spinlock_t sk_peer_lock;
int sk_bind_phc;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
- long sk_rcvtimeo;
ktime_t sk_stamp;
#if BITS_PER_LONG==32
seqlock_t sk_stamp_seq;
#endif
- atomic_t sk_tskey;
- atomic_t sk_zckey;
- u32 sk_tsflags;
- u8 sk_shutdown;
+ int sk_disconnects;
+ u8 sk_txrehash;
u8 sk_clockid;
u8 sk_txtime_deadline_mode : 1,
sk_txtime_report_errors : 1,
sk_txtime_unused : 6;
- bool sk_use_task_frag;
- struct socket *sk_socket;
void *sk_user_data;
#ifdef CONFIG_SECURITY
void *sk_security;
#endif
struct sock_cgroup_data sk_cgrp_data;
- struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *sk);
- void (*sk_data_ready)(struct sock *sk);
void (*sk_write_space)(struct sock *sk);
void (*sk_error_report)(struct sock *sk);
int (*sk_backlog_rcv)(struct sock *sk,
struct sk_buff *skb);
-#ifdef CONFIG_SOCK_VALIDATE_XMIT
- struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
- struct net_device *dev,
- struct sk_buff *skb);
-#endif
void (*sk_destruct)(struct sock *sk);
struct sock_reuseport __rcu *sk_reuseport_cb;
#ifdef CONFIG_BPF_SYSCALL
@@ -1103,41 +1117,6 @@ static inline void sk_incoming_cpu_update(struct sock *sk)
WRITE_ONCE(sk->sk_incoming_cpu, cpu);
}
-static inline void sock_rps_record_flow_hash(__u32 hash)
-{
-#ifdef CONFIG_RPS
- struct rps_sock_flow_table *sock_flow_table;
-
- rcu_read_lock();
- sock_flow_table = rcu_dereference(rps_sock_flow_table);
- rps_record_sock_flow(sock_flow_table, hash);
- rcu_read_unlock();
-#endif
-}
-
-static inline void sock_rps_record_flow(const struct sock *sk)
-{
-#ifdef CONFIG_RPS
- if (static_branch_unlikely(&rfs_needed)) {
- /* Reading sk->sk_rxhash might incur an expensive cache line
- * miss.
- *
- * TCP_ESTABLISHED does cover almost all states where RFS
- * might be useful, and is cheaper [1] than testing :
- * IPv4: inet_sk(sk)->inet_daddr
- * IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
- * OR an additional socket flag
- * [1] : sk_state and sk_prot are in the same cache line.
- */
- if (sk->sk_state == TCP_ESTABLISHED) {
- /* This READ_ONCE() is paired with the WRITE_ONCE()
- * from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
- */
- sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
- }
- }
-#endif
-}
static inline void sock_rps_save_rxhash(struct sock *sk,
const struct sk_buff *skb)
@@ -1429,6 +1408,7 @@ sk_memory_allocated(const struct sock *sk)
/* 1 MB per cpu, in page units */
#define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
+extern int sysctl_mem_pcpu_rsv;
static inline void
sk_memory_allocated_add(struct sock *sk, int amt)
@@ -1437,7 +1417,7 @@ sk_memory_allocated_add(struct sock *sk, int amt)
preempt_disable();
local_reserve = __this_cpu_add_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
- if (local_reserve >= SK_MEMORY_PCPU_RESERVE) {
+ if (local_reserve >= READ_ONCE(sysctl_mem_pcpu_rsv)) {
__this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
}
@@ -1451,7 +1431,7 @@ sk_memory_allocated_sub(struct sock *sk, int amt)
preempt_disable();
local_reserve = __this_cpu_sub_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
- if (local_reserve <= -SK_MEMORY_PCPU_RESERVE) {
+ if (local_reserve <= -READ_ONCE(sysctl_mem_pcpu_rsv)) {
__this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
}
@@ -2830,31 +2810,6 @@ sk_is_refcounted(struct sock *sk)
return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE);
}
-/**
- * skb_steal_sock - steal a socket from an sk_buff
- * @skb: sk_buff to steal the socket from
- * @refcounted: is set to true if the socket is reference-counted
- * @prefetched: is set to true if the socket was assigned from bpf
- */
-static inline struct sock *
-skb_steal_sock(struct sk_buff *skb, bool *refcounted, bool *prefetched)
-{
- if (skb->sk) {
- struct sock *sk = skb->sk;
-
- *refcounted = true;
- *prefetched = skb_sk_is_prefetched(skb);
- if (*prefetched)
- *refcounted = sk_is_refcounted(sk);
- skb->destructor = NULL;
- skb->sk = NULL;
- return sk;
- }
- *prefetched = false;
- *refcounted = false;
- return NULL;
-}
-
/* Checks if this SKB belongs to an HW offloaded socket
* and whether any SW fallbacks are required based on dev.
* Check decrypted mark in case skb_orphan() cleared socket.
diff --git a/include/net/tcp.h b/include/net/tcp.h
index f6eba9652d01..6ae35199d3b3 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -348,7 +348,7 @@ void tcp_wfree(struct sk_buff *skb);
void tcp_write_timer_handler(struct sock *sk);
void tcp_delack_timer_handler(struct sock *sk);
int tcp_ioctl(struct sock *sk, int cmd, int *karg);
-int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
+enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
void tcp_rcv_space_adjust(struct sock *sk);
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
@@ -396,8 +396,8 @@ enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
struct request_sock *req, bool fastopen,
bool *lost_race);
-int tcp_child_process(struct sock *parent, struct sock *child,
- struct sk_buff *skb);
+enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
+ struct sk_buff *skb);
void tcp_enter_loss(struct sock *sk);
void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
void tcp_clear_retrans(struct tcp_sock *tp);
@@ -498,6 +498,22 @@ struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
struct tcp_options_received *tcp_opt,
int mss, u32 tsoff);
+#if IS_ENABLED(CONFIG_BPF)
+struct bpf_tcp_req_attrs {
+ u32 rcv_tsval;
+ u32 rcv_tsecr;
+ u16 mss;
+ u8 rcv_wscale;
+ u8 snd_wscale;
+ u8 ecn_ok;
+ u8 wscale_ok;
+ u8 sack_ok;
+ u8 tstamp_ok;
+ u8 usec_ts_ok;
+ u8 reserved[3];
+};
+#endif
+
#ifdef CONFIG_SYN_COOKIES
/* Syncookies use a monotonic timer which increments every 60 seconds.
@@ -577,6 +593,15 @@ static inline u32 tcp_cookie_time(void)
return val;
}
+/* Convert one nsec 64bit timestamp to ts (ms or usec resolution) */
+static inline u64 tcp_ns_to_ts(bool usec_ts, u64 val)
+{
+ if (usec_ts)
+ return div_u64(val, NSEC_PER_USEC);
+
+ return div_u64(val, NSEC_PER_MSEC);
+}
+
u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
u16 *mssp);
__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
@@ -590,6 +615,26 @@ static inline bool cookie_ecn_ok(const struct net *net, const struct dst_entry *
dst_feature(dst, RTAX_FEATURE_ECN);
}
+#if IS_ENABLED(CONFIG_BPF)
+static inline bool cookie_bpf_ok(struct sk_buff *skb)
+{
+ return skb->sk;
+}
+
+struct request_sock *cookie_bpf_check(struct sock *sk, struct sk_buff *skb);
+#else
+static inline bool cookie_bpf_ok(struct sk_buff *skb)
+{
+ return false;
+}
+
+static inline struct request_sock *cookie_bpf_check(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ return NULL;
+}
+#endif
+
/* From net/ipv6/syncookies.c */
int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th);
struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 1d107241b901..57c743b7e4fe 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -51,8 +51,10 @@
#ifdef CONFIG_XFRM_STATISTICS
#define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
+#define XFRM_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.xfrm_statistics, field, val)
#else
#define XFRM_INC_STATS(net, field) ((void)(net))
+#define XFRM_ADD_STATS(net, field, val) ((void)(net))
#endif
@@ -1577,22 +1579,20 @@ struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
unsigned short family);
int xfrm_state_check_expire(struct xfrm_state *x);
+void xfrm_state_update_stats(struct net *net);
#ifdef CONFIG_XFRM_OFFLOAD
-static inline void xfrm_dev_state_update_curlft(struct xfrm_state *x)
+static inline void xfrm_dev_state_update_stats(struct xfrm_state *x)
{
struct xfrm_dev_offload *xdo = &x->xso;
struct net_device *dev = xdo->dev;
- if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
- return;
-
if (dev && dev->xfrmdev_ops &&
- dev->xfrmdev_ops->xdo_dev_state_update_curlft)
- dev->xfrmdev_ops->xdo_dev_state_update_curlft(x);
+ dev->xfrmdev_ops->xdo_dev_state_update_stats)
+ dev->xfrmdev_ops->xdo_dev_state_update_stats(x);
}
#else
-static inline void xfrm_dev_state_update_curlft(struct xfrm_state *x) {}
+static inline void xfrm_dev_state_update_stats(struct xfrm_state *x) {}
#endif
void xfrm_state_insert(struct xfrm_state *x);
int xfrm_state_add(struct xfrm_state *x);