summaryrefslogtreecommitdiff
path: root/include/net
diff options
context:
space:
mode:
Diffstat (limited to 'include/net')
-rw-r--r--include/net/act_api.h2
-rw-r--r--include/net/af_rxrpc.h2
-rw-r--r--include/net/bluetooth/hci_core.h6
-rw-r--r--include/net/bluetooth/l2cap.h2
-rw-r--r--include/net/bluetooth/mgmt.h18
-rw-r--r--include/net/bpf_sk_storage.h12
-rw-r--r--include/net/caif/caif_spi.h155
-rw-r--r--include/net/cfg80211.h113
-rw-r--r--include/net/checksum.h22
-rw-r--r--include/net/devlink.h228
-rw-r--r--include/net/drop_monitor.h36
-rw-r--r--include/net/dsa.h86
-rw-r--r--include/net/dst.h2
-rw-r--r--include/net/flow.h1
-rw-r--r--include/net/genetlink.h76
-rw-r--r--include/net/inet_connection_sock.h10
-rw-r--r--include/net/inet_sock.h7
-rw-r--r--include/net/ip.h8
-rw-r--r--include/net/ip_vs.h3
-rw-r--r--include/net/ipv6_stubs.h3
-rw-r--r--include/net/mac80211.h149
-rw-r--r--include/net/mptcp.h6
-rw-r--r--include/net/ndisc.h2
-rw-r--r--include/net/net_namespace.h2
-rw-r--r--include/net/netfilter/nf_log.h1
-rw-r--r--include/net/netfilter/nf_tables.h31
-rw-r--r--include/net/netfilter/nf_tables_core.h11
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h33
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h46
-rw-r--r--include/net/netlink.h106
-rw-r--r--include/net/netns/can.h1
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/netns/nexthop.h2
-rw-r--r--include/net/netns/nftables.h1
-rw-r--r--include/net/nexthop.h4
-rw-r--r--include/net/pkt_sched.h5
-rw-r--r--include/net/request_sock.h9
-rw-r--r--include/net/sch_generic.h11
-rw-r--r--include/net/sctp/structs.h8
-rw-r--r--include/net/smc.h4
-rw-r--r--include/net/sock.h10
-rw-r--r--include/net/switchdev.h1
-rw-r--r--include/net/tc_act/tc_tunnel_key.h5
-rw-r--r--include/net/tc_act/tc_vlan.h2
-rw-r--r--include/net/tcp.h40
-rw-r--r--include/net/tls.h4
-rw-r--r--include/net/udp_tunnel.h24
-rw-r--r--include/net/vxlan.h3
-rw-r--r--include/net/xdp_sock.h30
-rw-r--r--include/net/xdp_sock_drv.h122
-rw-r--r--include/net/xfrm.h49
-rw-r--r--include/net/xsk_buff_pool.h53
52 files changed, 1088 insertions, 480 deletions
diff --git a/include/net/act_api.h b/include/net/act_api.h
index cb382a89ea58..87214927314a 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -166,8 +166,6 @@ int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
struct nlattr *est, struct tc_action **a,
const struct tc_action_ops *ops, int bind,
u32 flags);
-void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a);
-
void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
struct tc_action **a, int bind);
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 91eacbdcf33d..f6abcc0bbd6e 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -59,7 +59,7 @@ bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
struct sockaddr_rxrpc *);
-u32 rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *);
+bool rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *, u32 *);
int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
rxrpc_user_attach_call_t, unsigned long, gfp_t,
unsigned int);
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 8caac20556b4..9873e1c8cd16 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -484,6 +484,9 @@ struct hci_dev {
enum suspended_state suspend_state;
bool scanning_paused;
bool suspended;
+ u8 wake_reason;
+ bdaddr_t wake_addr;
+ u8 wake_addr_type;
wait_queue_head_t suspend_wait_q;
DECLARE_BITMAP(suspend_tasks, __SUSPEND_NUM_TASKS);
@@ -1750,6 +1753,9 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, s8 rssi, u8 *name, u8 name_len);
void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
+void mgmt_suspending(struct hci_dev *hdev, u8 state);
+void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
+ u8 addr_type);
bool mgmt_powering_down(struct hci_dev *hdev);
void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent);
void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent);
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 8f1e6a7a2df8..1d1232917de7 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -665,6 +665,8 @@ struct l2cap_ops {
struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
unsigned long hdr_len,
unsigned long len, int nb);
+ int (*filter) (struct l2cap_chan * chan,
+ struct sk_buff *skb);
};
struct l2cap_conn {
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index beae5c3980f0..6b55155e05e9 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -572,6 +572,8 @@ struct mgmt_rp_add_advertising {
#define MGMT_ADV_FLAG_SEC_1M BIT(7)
#define MGMT_ADV_FLAG_SEC_2M BIT(8)
#define MGMT_ADV_FLAG_SEC_CODED BIT(9)
+#define MGMT_ADV_FLAG_CAN_SET_TX_POWER BIT(10)
+#define MGMT_ADV_FLAG_HW_OFFLOAD BIT(11)
#define MGMT_ADV_FLAG_SEC_MASK (MGMT_ADV_FLAG_SEC_1M | MGMT_ADV_FLAG_SEC_2M | \
MGMT_ADV_FLAG_SEC_CODED)
@@ -840,6 +842,7 @@ struct mgmt_ev_device_connected {
#define MGMT_DEV_DISCONN_LOCAL_HOST 0x02
#define MGMT_DEV_DISCONN_REMOTE 0x03
#define MGMT_DEV_DISCONN_AUTH_FAILURE 0x04
+#define MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND 0x05
#define MGMT_EV_DEVICE_DISCONNECTED 0x000C
struct mgmt_ev_device_disconnected {
@@ -1028,3 +1031,18 @@ struct mgmt_ev_adv_monitor_added {
struct mgmt_ev_adv_monitor_removed {
__le16 monitor_handle;
} __packed;
+
+#define MGMT_EV_CONTROLLER_SUSPEND 0x002d
+struct mgmt_ev_controller_suspend {
+ __u8 suspend_state;
+} __packed;
+
+#define MGMT_EV_CONTROLLER_RESUME 0x002e
+struct mgmt_ev_controller_resume {
+ __u8 wake_reason;
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_WAKE_REASON_NON_BT_WAKE 0x0
+#define MGMT_WAKE_REASON_UNEXPECTED 0x1
+#define MGMT_WAKE_REASON_REMOTE_WAKE 0x2
diff --git a/include/net/bpf_sk_storage.h b/include/net/bpf_sk_storage.h
index 5036c94c0503..3c516dd07caf 100644
--- a/include/net/bpf_sk_storage.h
+++ b/include/net/bpf_sk_storage.h
@@ -3,6 +3,17 @@
#ifndef _BPF_SK_STORAGE_H
#define _BPF_SK_STORAGE_H
+#include <linux/rculist.h>
+#include <linux/list.h>
+#include <linux/hash.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/bpf.h>
+#include <net/sock.h>
+#include <uapi/linux/sock_diag.h>
+#include <uapi/linux/btf.h>
+#include <linux/bpf_local_storage.h>
+
struct sock;
void bpf_sk_storage_free(struct sock *sk);
@@ -10,6 +21,7 @@ void bpf_sk_storage_free(struct sock *sk);
extern const struct bpf_func_proto bpf_sk_storage_get_proto;
extern const struct bpf_func_proto bpf_sk_storage_delete_proto;
+struct bpf_local_storage_elem;
struct bpf_sk_storage_diag;
struct sk_buff;
struct nlattr;
diff --git a/include/net/caif/caif_spi.h b/include/net/caif/caif_spi.h
deleted file mode 100644
index a0bf4cbce71b..000000000000
--- a/include/net/caif/caif_spi.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson AB 2010
- * Author: Daniel Martensson / Daniel.Martensson@stericsson.com
- */
-
-#ifndef CAIF_SPI_H_
-#define CAIF_SPI_H_
-
-#include <net/caif/caif_device.h>
-
-#define SPI_CMD_WR 0x00
-#define SPI_CMD_RD 0x01
-#define SPI_CMD_EOT 0x02
-#define SPI_CMD_IND 0x04
-
-#define SPI_DMA_BUF_LEN 8192
-
-#define WL_SZ 2 /* 16 bits. */
-#define SPI_CMD_SZ 4 /* 32 bits. */
-#define SPI_IND_SZ 4 /* 32 bits. */
-
-#define SPI_XFER 0
-#define SPI_SS_ON 1
-#define SPI_SS_OFF 2
-#define SPI_TERMINATE 3
-
-/* Minimum time between different levels is 50 microseconds. */
-#define MIN_TRANSITION_TIME_USEC 50
-
-/* Defines for calculating duration of SPI transfers for a particular
- * number of bytes.
- */
-#define SPI_MASTER_CLK_MHZ 13
-#define SPI_XFER_TIME_USEC(bytes, clk) (((bytes) * 8) / clk)
-
-/* Normally this should be aligned on the modem in order to benefit from full
- * duplex transfers. However a size of 8188 provokes errors when running with
- * the modem. These errors occur when packet sizes approaches 4 kB of data.
- */
-#define CAIF_MAX_SPI_FRAME 4092
-
-/* Maximum number of uplink CAIF frames that can reside in the same SPI frame.
- * This number should correspond with the modem setting. The application side
- * CAIF accepts any number of embedded downlink CAIF frames.
- */
-#define CAIF_MAX_SPI_PKTS 9
-
-/* Decides if SPI buffers should be prefilled with 0xFF pattern for easier
- * debugging. Both TX and RX buffers will be filled before the transfer.
- */
-#define CFSPI_DBG_PREFILL 0
-
-/* Structure describing a SPI transfer. */
-struct cfspi_xfer {
- u16 tx_dma_len;
- u16 rx_dma_len;
- void *va_tx[2];
- dma_addr_t pa_tx[2];
- void *va_rx;
- dma_addr_t pa_rx;
-};
-
-/* Structure implemented by the SPI interface. */
-struct cfspi_ifc {
- void (*ss_cb) (bool assert, struct cfspi_ifc *ifc);
- void (*xfer_done_cb) (struct cfspi_ifc *ifc);
- void *priv;
-};
-
-/* Structure implemented by SPI clients. */
-struct cfspi_dev {
- int (*init_xfer) (struct cfspi_xfer *xfer, struct cfspi_dev *dev);
- void (*sig_xfer) (bool xfer, struct cfspi_dev *dev);
- struct cfspi_ifc *ifc;
- char *name;
- u32 clk_mhz;
- void *priv;
-};
-
-/* Enumeration describing the CAIF SPI state. */
-enum cfspi_state {
- CFSPI_STATE_WAITING = 0,
- CFSPI_STATE_AWAKE,
- CFSPI_STATE_FETCH_PKT,
- CFSPI_STATE_GET_NEXT,
- CFSPI_STATE_INIT_XFER,
- CFSPI_STATE_WAIT_ACTIVE,
- CFSPI_STATE_SIG_ACTIVE,
- CFSPI_STATE_WAIT_XFER_DONE,
- CFSPI_STATE_XFER_DONE,
- CFSPI_STATE_WAIT_INACTIVE,
- CFSPI_STATE_SIG_INACTIVE,
- CFSPI_STATE_DELIVER_PKT,
- CFSPI_STATE_MAX,
-};
-
-/* Structure implemented by SPI physical interfaces. */
-struct cfspi {
- struct caif_dev_common cfdev;
- struct net_device *ndev;
- struct platform_device *pdev;
- struct sk_buff_head qhead;
- struct sk_buff_head chead;
- u16 cmd;
- u16 tx_cpck_len;
- u16 tx_npck_len;
- u16 rx_cpck_len;
- u16 rx_npck_len;
- struct cfspi_ifc ifc;
- struct cfspi_xfer xfer;
- struct cfspi_dev *dev;
- unsigned long state;
- struct work_struct work;
- struct workqueue_struct *wq;
- struct list_head list;
- int flow_off_sent;
- u32 qd_low_mark;
- u32 qd_high_mark;
- struct completion comp;
- wait_queue_head_t wait;
- spinlock_t lock;
- bool flow_stop;
- bool slave;
- bool slave_talked;
-#ifdef CONFIG_DEBUG_FS
- enum cfspi_state dbg_state;
- u16 pcmd;
- u16 tx_ppck_len;
- u16 rx_ppck_len;
- struct dentry *dbgfs_dir;
- struct dentry *dbgfs_state;
- struct dentry *dbgfs_frame;
-#endif /* CONFIG_DEBUG_FS */
-};
-
-extern int spi_frm_align;
-extern int spi_up_head_align;
-extern int spi_up_tail_align;
-extern int spi_down_head_align;
-extern int spi_down_tail_align;
-extern struct platform_driver cfspi_spi_driver;
-
-void cfspi_dbg_state(struct cfspi *cfspi, int state);
-int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len);
-int cfspi_xmitlen(struct cfspi *cfspi);
-int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len);
-int cfspi_spi_remove(struct platform_device *pdev);
-int cfspi_spi_probe(struct platform_device *pdev);
-int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len);
-int cfspi_xmitlen(struct cfspi *cfspi);
-int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len);
-void cfspi_xfer(struct work_struct *work);
-
-#endif /* CAIF_SPI_H_ */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index d9e6b9fbd95b..661edfc8722e 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -96,6 +96,16 @@ struct wiphy;
* @IEEE80211_CHAN_NO_10MHZ: 10 MHz bandwidth is not permitted
* on this channel.
* @IEEE80211_CHAN_NO_HE: HE operation is not permitted on this channel.
+ * @IEEE80211_CHAN_1MHZ: 1 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_2MHZ: 2 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_4MHZ: 4 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_8MHZ: 8 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_16MHZ: 16 MHz bandwidth is permitted
+ * on this channel.
*
*/
enum ieee80211_channel_flags {
@@ -113,6 +123,11 @@ enum ieee80211_channel_flags {
IEEE80211_CHAN_NO_20MHZ = 1<<11,
IEEE80211_CHAN_NO_10MHZ = 1<<12,
IEEE80211_CHAN_NO_HE = 1<<13,
+ IEEE80211_CHAN_1MHZ = 1<<14,
+ IEEE80211_CHAN_2MHZ = 1<<15,
+ IEEE80211_CHAN_4MHZ = 1<<16,
+ IEEE80211_CHAN_8MHZ = 1<<17,
+ IEEE80211_CHAN_16MHZ = 1<<18,
};
#define IEEE80211_CHAN_NO_HT40 \
@@ -254,13 +269,23 @@ struct ieee80211_rate {
* struct ieee80211_he_obss_pd - AP settings for spatial reuse
*
* @enable: is the feature enabled.
+ * @sr_ctrl: The SR Control field of SRP element.
+ * @non_srg_max_offset: non-SRG maximum tx power offset
* @min_offset: minimal tx power offset an associated station shall use
* @max_offset: maximum tx power offset an associated station shall use
+ * @bss_color_bitmap: bitmap that indicates the BSS color values used by
+ * members of the SRG
+ * @partial_bssid_bitmap: bitmap that indicates the partial BSSID values
+ * used by members of the SRG
*/
struct ieee80211_he_obss_pd {
bool enable;
+ u8 sr_ctrl;
+ u8 non_srg_max_offset;
u8 min_offset;
u8 max_offset;
+ u8 bss_color_bitmap[8];
+ u8 partial_bssid_bitmap[8];
};
/**
@@ -449,7 +474,9 @@ struct ieee80211_sta_s1g_cap {
* @n_bitrates: Number of bitrates in @bitrates
* @ht_cap: HT capabilities in this band
* @vht_cap: VHT capabilities in this band
+ * @s1g_cap: S1G capabilities in this band
* @edmg_cap: EDMG capabilities in this band
+ * @s1g_cap: S1G capabilities in this band (S1B band only, of course)
* @n_iftype_data: number of iftype data entries
* @iftype_data: interface type data entries. Note that the bits in
* @types_mask inside this structure cannot overlap (i.e. only
@@ -678,7 +705,10 @@ struct cfg80211_bitrate_mask {
u32 legacy;
u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
u16 vht_mcs[NL80211_VHT_NSS_MAX];
+ u16 he_mcs[NL80211_HE_NSS_MAX];
enum nl80211_txrate_gi gi;
+ enum nl80211_he_gi he_gi;
+ enum nl80211_he_ltf he_ltf;
} control[NUM_NL80211_BANDS];
};
@@ -1065,6 +1095,39 @@ struct cfg80211_acl_data {
};
/**
+ * struct cfg80211_fils_discovery - FILS discovery parameters from
+ * IEEE Std 802.11ai-2016, Annex C.3 MIB detail.
+ *
+ * @min_interval: Minimum packet interval in TUs (0 - 10000)
+ * @max_interval: Maximum packet interval in TUs (0 - 10000)
+ * @tmpl_len: Template length
+ * @tmpl: Template data for FILS discovery frame including the action
+ * frame headers.
+ */
+struct cfg80211_fils_discovery {
+ u32 min_interval;
+ u32 max_interval;
+ size_t tmpl_len;
+ const u8 *tmpl;
+};
+
+/**
+ * struct cfg80211_unsol_bcast_probe_resp - Unsolicited broadcast probe
+ * response parameters in 6GHz.
+ *
+ * @interval: Packet interval in TUs. Maximum allowed is 20 TU, as mentioned
+ * in IEEE P802.11ax/D6.0 26.17.2.3.2 - AP behavior for fast passive
+ * scanning
+ * @tmpl_len: Template length
+ * @tmpl: Template data for probe response
+ */
+struct cfg80211_unsol_bcast_probe_resp {
+ u32 interval;
+ size_t tmpl_len;
+ const u8 *tmpl;
+};
+
+/**
* enum cfg80211_ap_settings_flags - AP settings flags
*
* Used by cfg80211_ap_settings
@@ -1111,6 +1174,8 @@ enum cfg80211_ap_settings_flags {
* @he_obss_pd: OBSS Packet Detection settings
* @he_bss_color: BSS Color settings
* @he_oper: HE operation IE (or %NULL if HE isn't enabled)
+ * @fils_discovery: FILS discovery transmission parameters
+ * @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -1141,6 +1206,8 @@ struct cfg80211_ap_settings {
u32 flags;
struct ieee80211_he_obss_pd he_obss_pd;
struct cfg80211_he_bss_color he_bss_color;
+ struct cfg80211_fils_discovery fils_discovery;
+ struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
};
/**
@@ -1784,6 +1851,7 @@ struct mpath_info {
* (or NULL for no change)
* @basic_rates_len: number of basic rates
* @ap_isolate: do not forward packets between connected stations
+ * (0 = no, 1 = yes, -1 = do not change)
* @ht_opmode: HT Operation mode
* (u16 = opmode, -1 = do not change)
* @p2p_ctwindow: P2P CT Window (-1 = no change)
@@ -2039,6 +2107,27 @@ struct cfg80211_scan_info {
};
/**
+ * struct cfg80211_scan_6ghz_params - relevant for 6 GHz only
+ *
+ * @short_bssid: short ssid to scan for
+ * @bssid: bssid to scan for
+ * @channel_idx: idx of the channel in the channel array in the scan request
+ * which the above info relvant to
+ * @unsolicited_probe: the AP transmits unsolicited probe response every 20 TU
+ * @short_ssid_valid: short_ssid is valid and can be used
+ * @psc_no_listen: when set, and the channel is a PSC channel, no need to wait
+ * 20 TUs before starting to send probe requests.
+ */
+struct cfg80211_scan_6ghz_params {
+ u32 short_ssid;
+ u32 channel_idx;
+ u8 bssid[ETH_ALEN];
+ bool unsolicited_probe;
+ bool short_ssid_valid;
+ bool psc_no_listen;
+};
+
+/**
* struct cfg80211_scan_request - scan request description
*
* @ssids: SSIDs to scan for (active scan only)
@@ -2065,6 +2154,10 @@ struct cfg80211_scan_info {
* @mac_addr_mask: MAC address mask used with randomisation, bits that
* are 0 in the mask should be randomised, bits that are 1 should
* be taken from the @mac_addr
+ * @scan_6ghz: relevant for split scan request only,
+ * true if this is the second scan request
+ * @n_6ghz_params: number of 6 GHz params
+ * @scan_6ghz_params: 6 GHz params
* @bssid: BSSID to scan for (most commonly, the wildcard BSSID)
*/
struct cfg80211_scan_request {
@@ -2092,6 +2185,9 @@ struct cfg80211_scan_request {
struct cfg80211_scan_info info;
bool notified;
bool no_cck;
+ bool scan_6ghz;
+ u32 n_6ghz_params;
+ struct cfg80211_scan_6ghz_params *scan_6ghz_params;
/* keep last */
struct ieee80211_channel *channels[];
@@ -2471,6 +2567,8 @@ enum cfg80211_assoc_req_flags {
* @fils_nonces: FILS nonces (part of AAD) for protecting (Re)Association
* Request/Response frame or %NULL if FILS is not used. This field starts
* with 16 octets of STA Nonce followed by 16 octets of AP Nonce.
+ * @s1g_capa: S1G capability override
+ * @s1g_capa_mask: S1G capability override mask
*/
struct cfg80211_assoc_request {
struct cfg80211_bss *bss;
@@ -2485,6 +2583,7 @@ struct cfg80211_assoc_request {
const u8 *fils_kek;
size_t fils_kek_len;
const u8 *fils_nonces;
+ struct ieee80211_s1g_cap s1g_capa, s1g_capa_mask;
};
/**
@@ -4160,6 +4259,8 @@ struct cfg80211_ops {
/**
* enum wiphy_flags - wiphy capability flags
*
+ * @WIPHY_FLAG_SPLIT_SCAN_6GHZ: if set to true, the scan request will be split
+ * into two, first for legacy bands and second for UHB.
* @WIPHY_FLAG_NETNS_OK: if not set, do not allow changing the netns of this
* wiphy at all
* @WIPHY_FLAG_PS_ON_BY_DEFAULT: if set to true, powersave will be enabled
@@ -4203,7 +4304,7 @@ struct cfg80211_ops {
enum wiphy_flags {
WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK = BIT(0),
/* use hole at 1 */
- /* use hole at 2 */
+ WIPHY_FLAG_SPLIT_SCAN_6GHZ = BIT(2),
WIPHY_FLAG_NETNS_OK = BIT(3),
WIPHY_FLAG_PS_ON_BY_DEFAULT = BIT(4),
WIPHY_FLAG_4ADDR_AP = BIT(5),
@@ -5276,6 +5377,16 @@ ieee80211_channel_to_khz(const struct ieee80211_channel *chan)
}
/**
+ * ieee80211_s1g_channel_width - get allowed channel width from @chan
+ *
+ * Only allowed for band NL80211_BAND_S1GHZ
+ * @chan: channel
+ * Return: The allowed channel width for this center_freq
+ */
+enum nl80211_chan_width
+ieee80211_s1g_channel_width(const struct ieee80211_channel *chan);
+
+/**
* ieee80211_channel_to_freq_khz - convert channel number to frequency
* @chan: channel number
* @band: band, necessary due to channel number overlap
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 46754ba9d7b7..0d05b9e8690b 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -24,26 +24,32 @@
#ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
static inline
__wsum csum_and_copy_from_user (const void __user *src, void *dst,
- int len, __wsum sum, int *err_ptr)
+ int len)
{
if (copy_from_user(dst, src, len))
- *err_ptr = -EFAULT;
- return csum_partial(dst, len, sum);
+ return 0;
+ return csum_partial(dst, len, ~0U);
}
#endif
#ifndef HAVE_CSUM_COPY_USER
static __inline__ __wsum csum_and_copy_to_user
-(const void *src, void __user *dst, int len, __wsum sum, int *err_ptr)
+(const void *src, void __user *dst, int len)
{
- sum = csum_partial(src, len, sum);
+ __wsum sum = csum_partial(src, len, ~0U);
if (copy_to_user(dst, src, len) == 0)
return sum;
- if (len)
- *err_ptr = -EFAULT;
+ return 0;
+}
+#endif
- return (__force __wsum)-1; /* invalid checksum */
+#ifndef _HAVE_ARCH_CSUM_AND_COPY
+static inline __wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len)
+{
+ memcpy(dst, src, len);
+ return csum_partial(dst, len, 0);
}
#endif
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 8f3c8a443238..b01bb9bca5a2 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -20,6 +20,14 @@
#include <uapi/linux/devlink.h>
#include <linux/xarray.h>
+#define DEVLINK_RELOAD_STATS_ARRAY_SIZE \
+ (__DEVLINK_RELOAD_LIMIT_MAX * __DEVLINK_RELOAD_ACTION_MAX)
+
+struct devlink_dev_stats {
+ u32 reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
+ u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
+};
+
struct devlink_ops;
struct devlink {
@@ -38,6 +46,7 @@ struct devlink {
struct list_head trap_policer_list;
const struct devlink_ops *ops;
struct xarray snapshot_ids;
+ struct devlink_dev_stats stats;
struct device *dev;
possible_net_t _net;
struct mutex lock; /* Serializes access to devlink instance specific objects such as
@@ -57,13 +66,30 @@ struct devlink_port_phys_attrs {
u32 split_subport_number; /* If the port is split, this is the number of subport. */
};
+/**
+ * struct devlink_port_pci_pf_attrs - devlink port's PCI PF attributes
+ * @controller: Associated controller number
+ * @pf: Associated PCI PF number for this port.
+ * @external: when set, indicates if a port is for an external controller
+ */
struct devlink_port_pci_pf_attrs {
- u16 pf; /* Associated PCI PF for this port. */
+ u32 controller;
+ u16 pf;
+ u8 external:1;
};
+/**
+ * struct devlink_port_pci_vf_attrs - devlink port's PCI VF attributes
+ * @controller: Associated controller number
+ * @pf: Associated PCI PF number for this port.
+ * @vf: Associated PCI VF for of the PCI PF for this port.
+ * @external: when set, indicates if a port is for an external controller
+ */
struct devlink_port_pci_vf_attrs {
- u16 pf; /* Associated PCI PF for this port. */
- u16 vf; /* Associated PCI VF for of the PCI PF for this port. */
+ u32 controller;
+ u16 pf;
+ u16 vf;
+ u8 external:1;
};
/**
@@ -73,6 +99,9 @@ struct devlink_port_pci_vf_attrs {
* @splittable: indicates if the port can be split.
* @lanes: maximum number of lanes the port supports. 0 value is not passed to netlink.
* @switch_id: if the port is part of switch, this is buffer with ID, otherwise this is NULL
+ * @phys: physical port attributes
+ * @pci_pf: PCI PF port attributes
+ * @pci_vf: PCI VF port attributes
*/
struct devlink_port_attrs {
u8 split:1,
@@ -90,6 +119,7 @@ struct devlink_port_attrs {
struct devlink_port {
struct list_head list;
struct list_head param_list;
+ struct list_head region_list;
struct devlink *devlink;
unsigned int index;
bool registered;
@@ -372,6 +402,25 @@ struct devlink_param_gset_ctx {
};
/**
+ * struct devlink_flash_notify - devlink dev flash notify data
+ * @status_msg: current status string
+ * @component: firmware component being updated
+ * @done: amount of work completed of total amount
+ * @total: amount of work expected to be done
+ * @timeout: expected max timeout in seconds
+ *
+ * These are values to be given to userland to be displayed in order
+ * to show current activity in a firmware update process.
+ */
+struct devlink_flash_notify {
+ const char *status_msg;
+ const char *component;
+ unsigned long done;
+ unsigned long total;
+ unsigned long timeout;
+};
+
+/**
* struct devlink_param - devlink configuration parameter data
* @name: name of the parameter
* @generic: indicates if the parameter is generic or driver specific
@@ -420,6 +469,7 @@ enum devlink_param_generic_id {
DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE,
DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_REMOTE_DEV_RESET,
/* add new param generic ids above here*/
__DEVLINK_PARAM_GENERIC_ID_MAX,
@@ -457,6 +507,9 @@ enum devlink_param_generic_id {
#define DEVLINK_PARAM_GENERIC_ENABLE_ROCE_NAME "enable_roce"
#define DEVLINK_PARAM_GENERIC_ENABLE_ROCE_TYPE DEVLINK_PARAM_TYPE_BOOL
+#define DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_NAME "enable_remote_dev_reset"
+#define DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_TYPE DEVLINK_PARAM_TYPE_BOOL
+
#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
{ \
.id = DEVLINK_PARAM_GENERIC_ID_##_id, \
@@ -511,6 +564,24 @@ enum devlink_param_generic_id {
/* Firmware bundle identifier */
#define DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID "fw.bundle_id"
+/**
+ * struct devlink_flash_update_params - Flash Update parameters
+ * @file_name: the name of the flash firmware file to update from
+ * @component: the flash component to update
+ *
+ * With the exception of file_name, drivers must opt-in to parameters by
+ * setting the appropriate bit in the supported_flash_update_params field in
+ * their devlink_ops structure.
+ */
+struct devlink_flash_update_params {
+ const char *file_name;
+ const char *component;
+ u32 overwrite_mask;
+};
+
+#define DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT BIT(0)
+#define DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK BIT(1)
+
struct devlink_region;
struct devlink_info_req;
@@ -522,12 +593,36 @@ struct devlink_info_req;
* the data variable must be updated to point to the snapshot data.
* The function will be called while the devlink instance lock is
* held.
+ * @priv: Pointer to driver private data for the region operation
*/
struct devlink_region_ops {
const char *name;
void (*destructor)(const void *data);
- int (*snapshot)(struct devlink *devlink, struct netlink_ext_ack *extack,
+ int (*snapshot)(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
u8 **data);
+ void *priv;
+};
+
+/**
+ * struct devlink_port_region_ops - Region operations for a port
+ * @name: region name
+ * @destructor: callback used to free snapshot memory when deleting
+ * @snapshot: callback to request an immediate snapshot. On success,
+ * the data variable must be updated to point to the snapshot data.
+ * The function will be called while the devlink instance lock is
+ * held.
+ * @priv: Pointer to driver private data for the region operation
+ */
+struct devlink_port_region_ops {
+ const char *name;
+ void (*destructor)(const void *data);
+ int (*snapshot)(struct devlink_port *port,
+ const struct devlink_port_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data);
+ void *priv;
};
struct devlink_fmsg;
@@ -546,6 +641,7 @@ enum devlink_health_reporter_state {
* @dump: callback to dump an object
* if priv_ctx is NULL, run a full dump
* @diagnose: callback to diagnose the current status
+ * @test: callback to trigger a test event
*/
struct devlink_health_reporter_ops {
@@ -558,6 +654,24 @@ struct devlink_health_reporter_ops {
int (*diagnose)(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg,
struct netlink_ext_ack *extack);
+ int (*test)(struct devlink_health_reporter *reporter,
+ struct netlink_ext_ack *extack);
+};
+
+/**
+ * struct devlink_trap_metadata - Packet trap metadata.
+ * @trap_name: Trap name.
+ * @trap_group_name: Trap group name.
+ * @input_dev: Input netdevice.
+ * @fa_cookie: Flow action user cookie.
+ * @trap_type: Trap type.
+ */
+struct devlink_trap_metadata {
+ const char *trap_name;
+ const char *trap_group_name;
+ struct net_device *input_dev;
+ const struct flow_action_cookie *fa_cookie;
+ enum devlink_trap_type trap_type;
};
/**
@@ -704,6 +818,22 @@ enum devlink_trap_generic_id {
DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_SAMPLE,
DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_TRAP,
DEVLINK_TRAP_GENERIC_ID_EARLY_DROP,
+ DEVLINK_TRAP_GENERIC_ID_VXLAN_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_LLC_SNAP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_VLAN_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_PPPOE_PPP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_MPLS_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_ARP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_IP_1_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_IP_N_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_GRE_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_UDP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_TCP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_IPSEC_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_SCTP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_DCCP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_GTP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_ESP_PARSING,
/* Add new generic trap IDs above */
__DEVLINK_TRAP_GENERIC_ID_MAX,
@@ -739,6 +869,7 @@ enum devlink_trap_group_generic_id {
DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_GENERAL,
DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_SAMPLE,
DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_TRAP,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_PARSER_ERROR_DROPS,
/* Add new generic trap group IDs above */
__DEVLINK_TRAP_GROUP_GENERIC_ID_MAX,
@@ -894,6 +1025,39 @@ enum devlink_trap_group_generic_id {
"flow_action_trap"
#define DEVLINK_TRAP_GENERIC_NAME_EARLY_DROP \
"early_drop"
+#define DEVLINK_TRAP_GENERIC_NAME_VXLAN_PARSING \
+ "vxlan_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_LLC_SNAP_PARSING \
+ "llc_snap_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_VLAN_PARSING \
+ "vlan_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_PPPOE_PPP_PARSING \
+ "pppoe_ppp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_MPLS_PARSING \
+ "mpls_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_ARP_PARSING \
+ "arp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_IP_1_PARSING \
+ "ip_1_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_IP_N_PARSING \
+ "ip_n_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_GRE_PARSING \
+ "gre_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_UDP_PARSING \
+ "udp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_TCP_PARSING \
+ "tcp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_IPSEC_PARSING \
+ "ipsec_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_SCTP_PARSING \
+ "sctp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_DCCP_PARSING \
+ "dccp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_GTP_PARSING \
+ "gtp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_ESP_PARSING \
+ "esp_parsing"
+
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_L2_DROPS \
"l2_drops"
@@ -945,6 +1109,8 @@ enum devlink_trap_group_generic_id {
"acl_sample"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_ACL_TRAP \
"acl_trap"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_PARSER_ERROR_DROPS \
+ "parser_error_drops"
#define DEVLINK_TRAP_GENERIC(_type, _init_action, _id, _group_id, \
_metadata_cap) \
@@ -991,9 +1157,20 @@ enum devlink_trap_group_generic_id {
}
struct devlink_ops {
+ /**
+ * @supported_flash_update_params:
+ * mask of parameters supported by the driver's .flash_update
+ * implemementation.
+ */
+ u32 supported_flash_update_params;
+ unsigned long reload_actions;
+ unsigned long reload_limits;
int (*reload_down)(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
struct netlink_ext_ack *extack);
- int (*reload_up)(struct devlink *devlink,
+ int (*reload_up)(struct devlink *devlink, enum devlink_reload_action action,
+ enum devlink_reload_limit limit, u32 *actions_performed,
struct netlink_ext_ack *extack);
int (*port_type_set)(struct devlink_port *devlink_port,
enum devlink_port_type port_type);
@@ -1051,8 +1228,15 @@ struct devlink_ops {
struct netlink_ext_ack *extack);
int (*info_get)(struct devlink *devlink, struct devlink_info_req *req,
struct netlink_ext_ack *extack);
- int (*flash_update)(struct devlink *devlink, const char *file_name,
- const char *component,
+ /**
+ * @flash_update: Device flash update function
+ *
+ * Used to perform a flash update for the device. The set of
+ * parameters supported by the driver should be set in
+ * supported_flash_update_params.
+ */
+ int (*flash_update)(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack);
/**
* @trap_init: Trap initialization function.
@@ -1098,6 +1282,16 @@ struct devlink_ops {
const struct devlink_trap_policer *policer,
struct netlink_ext_ack *extack);
/**
+ * @trap_group_action_set: Trap group action set function.
+ *
+ * If this callback is populated, it will take precedence over looping
+ * over all traps in a group and calling .trap_action_set().
+ */
+ int (*trap_group_action_set)(struct devlink *devlink,
+ const struct devlink_trap_group *group,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack);
+ /**
* @trap_policer_init: Trap policer initialization function.
*
* Should be used by device drivers to initialize the trap policer in
@@ -1203,9 +1397,10 @@ void devlink_port_type_ib_set(struct devlink_port *devlink_port,
void devlink_port_type_clear(struct devlink_port *devlink_port);
void devlink_port_attrs_set(struct devlink_port *devlink_port,
struct devlink_port_attrs *devlink_port_attrs);
-void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u16 pf);
-void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port,
- u16 pf, u16 vf);
+void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u32 controller,
+ u16 pf, bool external);
+void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 controller,
+ u16 pf, u16 vf, bool external);
int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
u32 size, u16 ingress_pools_count,
u16 egress_pools_count, u16 ingress_tc_count,
@@ -1289,7 +1484,13 @@ struct devlink_region *
devlink_region_create(struct devlink *devlink,
const struct devlink_region_ops *ops,
u32 region_max_snapshots, u64 region_size);
+struct devlink_region *
+devlink_port_region_create(struct devlink_port *port,
+ const struct devlink_port_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size);
void devlink_region_destroy(struct devlink_region *region);
+void devlink_port_region_destroy(struct devlink_region *region);
+
int devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id);
void devlink_region_snapshot_id_put(struct devlink *devlink, u32 id);
int devlink_region_snapshot_create(struct devlink_region *region,
@@ -1371,6 +1572,9 @@ void
devlink_health_reporter_recovery_done(struct devlink_health_reporter *reporter);
bool devlink_is_reload_failed(const struct devlink *devlink);
+void devlink_remote_reload_actions_performed(struct devlink *devlink,
+ enum devlink_reload_limit limit,
+ u32 actions_performed);
void devlink_flash_update_begin_notify(struct devlink *devlink);
void devlink_flash_update_end_notify(struct devlink *devlink);
@@ -1379,6 +1583,10 @@ void devlink_flash_update_status_notify(struct devlink *devlink,
const char *component,
unsigned long done,
unsigned long total);
+void devlink_flash_update_timeout_notify(struct devlink *devlink,
+ const char *status_msg,
+ const char *component,
+ unsigned long timeout);
int devlink_traps_register(struct devlink *devlink,
const struct devlink_trap *traps,
diff --git a/include/net/drop_monitor.h b/include/net/drop_monitor.h
deleted file mode 100644
index 3f5b6ddb3179..000000000000
--- a/include/net/drop_monitor.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-
-#ifndef _NET_DROP_MONITOR_H_
-#define _NET_DROP_MONITOR_H_
-
-#include <linux/ktime.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <net/flow_offload.h>
-
-/**
- * struct net_dm_hw_metadata - Hardware-supplied packet metadata.
- * @trap_group_name: Hardware trap group name.
- * @trap_name: Hardware trap name.
- * @input_dev: Input netdevice.
- * @fa_cookie: Flow action user cookie.
- */
-struct net_dm_hw_metadata {
- const char *trap_group_name;
- const char *trap_name;
- struct net_device *input_dev;
- const struct flow_action_cookie *fa_cookie;
-};
-
-#if IS_REACHABLE(CONFIG_NET_DROP_MONITOR)
-void net_dm_hw_report(struct sk_buff *skb,
- const struct net_dm_hw_metadata *hw_metadata);
-#else
-static inline void
-net_dm_hw_report(struct sk_buff *skb,
- const struct net_dm_hw_metadata *hw_metadata)
-{
-}
-#endif
-
-#endif /* _NET_DROP_MONITOR_H_ */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 75c8fac82017..35429a140dfa 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -74,8 +74,8 @@ struct dsa_device_ops {
struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev);
struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt);
- int (*flow_dissect)(const struct sk_buff *skb, __be16 *proto,
- int *offset);
+ void (*flow_dissect)(const struct sk_buff *skb, __be16 *proto,
+ int *offset);
/* Used to determine which traffic should match the DSA filter in
* eth_type_trans, and which, if any, should bypass it and be processed
* as regular on the master net device.
@@ -84,6 +84,13 @@ struct dsa_device_ops {
unsigned int overhead;
const char *name;
enum dsa_tag_protocol proto;
+ /* Some tagging protocols either mangle or shift the destination MAC
+ * address, in which case the DSA master would drop packets on ingress
+ * if what it understands out of the destination MAC address is not in
+ * its RX filter.
+ */
+ bool promisc_on_master;
+ bool tail_tag;
};
/* This structure defines the control interfaces that are overlayed by the
@@ -208,6 +215,7 @@ struct dsa_port {
u8 stp_state;
struct net_device *bridge_dev;
struct devlink_port devlink_port;
+ bool devlink_port_setup;
struct phylink *pl;
struct phylink_config pl_config;
@@ -301,6 +309,14 @@ struct dsa_switch {
*/
bool configure_vlan_while_not_filtering;
+ /* If the switch driver always programs the CPU port as egress tagged
+ * despite the VLAN configuration indicating otherwise, then setting
+ * @untag_bridge_pvid will force the DSA receive path to pop the bridge's
+ * default_pvid VLAN tagged frames to offer a consistent behavior
+ * between a vlan_filtering=0 and vlan_filtering=1 bridge device.
+ */
+ bool untag_bridge_pvid;
+
/* In case vlan_filtering_is_global is set, the VLAN awareness state
* should be retrieved from here and not from the per-port settings.
*/
@@ -536,7 +552,8 @@ struct dsa_switch_ops {
* VLAN support
*/
int (*port_vlan_filtering)(struct dsa_switch *ds, int port,
- bool vlan_filtering);
+ bool vlan_filtering,
+ struct switchdev_trans *trans);
int (*port_vlan_prepare)(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
void (*port_vlan_add)(struct dsa_switch *ds, int port,
@@ -612,11 +629,14 @@ struct dsa_switch_ops {
bool (*port_rxtstamp)(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type);
- /* Devlink parameters */
+ /* Devlink parameters, etc */
int (*devlink_param_get)(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx);
int (*devlink_param_set)(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx);
+ int (*devlink_info_get)(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
/*
* MTU change functionality. Switches can also adjust their MRU through
@@ -658,12 +678,44 @@ void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
void *occ_get_priv);
void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
u64 resource_id);
+struct devlink_region *
+dsa_devlink_region_create(struct dsa_switch *ds,
+ const struct devlink_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size);
+struct devlink_region *
+dsa_devlink_port_region_create(struct dsa_switch *ds,
+ int port,
+ const struct devlink_port_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size);
+void dsa_devlink_region_destroy(struct devlink_region *region);
+
struct dsa_port *dsa_port_from_netdev(struct net_device *netdev);
struct dsa_devlink_priv {
struct dsa_switch *ds;
};
+static inline struct dsa_switch *dsa_devlink_to_ds(struct devlink *dl)
+{
+ struct dsa_devlink_priv *dl_priv = devlink_priv(dl);
+
+ return dl_priv->ds;
+}
+
+static inline
+struct dsa_switch *dsa_devlink_port_to_ds(struct devlink_port *port)
+{
+ struct devlink *dl = port->devlink;
+ struct dsa_devlink_priv *dl_priv = devlink_priv(dl);
+
+ return dl_priv->ds;
+}
+
+static inline int dsa_devlink_port_to_port(struct devlink_port *port)
+{
+ return port->index;
+}
+
struct dsa_switch_driver {
struct list_head list;
const struct dsa_switch_ops *ops;
@@ -689,6 +741,32 @@ static inline bool dsa_can_decode(const struct sk_buff *skb,
return false;
}
+/* All DSA tags that push the EtherType to the right (basically all except tail
+ * tags, which don't break dissection) can be treated the same from the
+ * perspective of the flow dissector.
+ *
+ * We need to return:
+ * - offset: the (B - A) difference between:
+ * A. the position of the real EtherType and
+ * B. the current skb->data (aka ETH_HLEN bytes into the frame, aka 2 bytes
+ * after the normal EtherType was supposed to be)
+ * The offset in bytes is exactly equal to the tagger overhead (and half of
+ * that, in __be16 shorts).
+ *
+ * - proto: the value of the real EtherType.
+ */
+static inline void dsa_tag_generic_flow_dissect(const struct sk_buff *skb,
+ __be16 *proto, int *offset)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ const struct dsa_device_ops *ops = skb->dev->dsa_ptr->tag_ops;
+ int tag_len = ops->overhead;
+
+ *offset = tag_len;
+ *proto = ((__be16 *)skb->data)[(tag_len / 2) - 1];
+#endif
+}
+
#if IS_ENABLED(CONFIG_NET_DSA)
static inline int __dsa_netdevice_ops_check(struct net_device *dev)
{
diff --git a/include/net/dst.h b/include/net/dst.h
index 6ae2e625050d..8ea8812b0b41 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -214,7 +214,7 @@ dst_allfrag(const struct dst_entry *dst)
static inline int
dst_metric_locked(const struct dst_entry *dst, int metric)
{
- return dst_metric(dst, RTAX_LOCK) & (1<<metric);
+ return dst_metric(dst, RTAX_LOCK) & (1 << metric);
}
static inline void dst_hold(struct dst_entry *dst)
diff --git a/include/net/flow.h b/include/net/flow.h
index 929d3ca614d0..b2531df3f65f 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -116,6 +116,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
fl4->saddr = saddr;
fl4->fl4_dport = dport;
fl4->fl4_sport = sport;
+ fl4->flowi4_multipath_hash = 0;
}
/* Reset some input parameters after previous lookup */
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 6e5f1e1aa822..e55ec1597ce7 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -41,6 +41,8 @@ struct genl_info;
* (private)
* @ops: the operations supported by this family
* @n_ops: number of operations supported by this family
+ * @small_ops: the small-struct operations supported by this family
+ * @n_small_ops: number of small-struct operations supported by this family
*/
struct genl_family {
int id; /* private */
@@ -48,8 +50,12 @@ struct genl_family {
char name[GENL_NAMSIZ];
unsigned int version;
unsigned int maxattr;
- bool netnsok;
- bool parallel_ops;
+ unsigned int mcgrp_offset; /* private */
+ u8 netnsok:1;
+ u8 parallel_ops:1;
+ u8 n_ops;
+ u8 n_small_ops;
+ u8 n_mcgrps;
const struct nla_policy *policy;
int (*pre_doit)(const struct genl_ops *ops,
struct sk_buff *skb,
@@ -58,10 +64,8 @@ struct genl_family {
struct sk_buff *skb,
struct genl_info *info);
const struct genl_ops * ops;
+ const struct genl_small_ops *small_ops;
const struct genl_multicast_group *mcgrps;
- unsigned int n_ops;
- unsigned int n_mcgrps;
- unsigned int mcgrp_offset; /* private */
struct module *module;
};
@@ -101,14 +105,6 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net)
#define GENL_SET_ERR_MSG(info, msg) NL_SET_ERR_MSG((info)->extack, msg)
-static inline int genl_err_attr(struct genl_info *info, int err,
- const struct nlattr *attr)
-{
- info->extack->bad_attr = attr;
-
- return err;
-}
-
enum genl_validate_flags {
GENL_DONT_VALIDATE_STRICT = BIT(0),
GENL_DONT_VALIDATE_DUMP = BIT(1),
@@ -116,28 +112,34 @@ enum genl_validate_flags {
};
/**
- * struct genl_info - info that is available during dumpit op call
- * @family: generic netlink family - for internal genl code usage
- * @ops: generic netlink ops - for internal genl code usage
- * @attrs: netlink attributes
+ * struct genl_small_ops - generic netlink operations (small version)
+ * @cmd: command identifier
+ * @internal_flags: flags used by the family
+ * @flags: flags
+ * @validate: validation flags from enum genl_validate_flags
+ * @doit: standard command callback
+ * @dumpit: callback for dumpers
+ *
+ * This is a cut-down version of struct genl_ops for users who don't need
+ * most of the ancillary infra and want to save space.
*/
-struct genl_dumpit_info {
- const struct genl_family *family;
- const struct genl_ops *ops;
- struct nlattr **attrs;
+struct genl_small_ops {
+ int (*doit)(struct sk_buff *skb, struct genl_info *info);
+ int (*dumpit)(struct sk_buff *skb, struct netlink_callback *cb);
+ u8 cmd;
+ u8 internal_flags;
+ u8 flags;
+ u8 validate;
};
-static inline const struct genl_dumpit_info *
-genl_dumpit_info(struct netlink_callback *cb)
-{
- return cb->data;
-}
-
/**
* struct genl_ops - generic netlink operations
* @cmd: command identifier
* @internal_flags: flags used by the family
* @flags: flags
+ * @maxattr: maximum number of attributes supported
+ * @policy: netlink policy (takes precedence over family policy)
+ * @validate: validation flags from enum genl_validate_flags
* @doit: standard command callback
* @start: start callback for dumps
* @dumpit: callback for dumpers
@@ -150,12 +152,32 @@ struct genl_ops {
int (*dumpit)(struct sk_buff *skb,
struct netlink_callback *cb);
int (*done)(struct netlink_callback *cb);
+ const struct nla_policy *policy;
+ unsigned int maxattr;
u8 cmd;
u8 internal_flags;
u8 flags;
u8 validate;
};
+/**
+ * struct genl_info - info that is available during dumpit op call
+ * @family: generic netlink family - for internal genl code usage
+ * @ops: generic netlink ops - for internal genl code usage
+ * @attrs: netlink attributes
+ */
+struct genl_dumpit_info {
+ const struct genl_family *family;
+ struct genl_ops op;
+ struct nlattr **attrs;
+};
+
+static inline const struct genl_dumpit_info *
+genl_dumpit_info(struct netlink_callback *cb)
+{
+ return cb->data;
+}
+
int genl_register_family(struct genl_family *family);
int genl_unregister_family(const struct genl_family *family);
void genl_notify(const struct genl_family *family, struct sk_buff *skb,
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index aa8893c68c50..7338b3865a2a 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -86,6 +86,8 @@ struct inet_connection_sock {
struct timer_list icsk_retransmit_timer;
struct timer_list icsk_delack_timer;
__u32 icsk_rto;
+ __u32 icsk_rto_min;
+ __u32 icsk_delack_max;
__u32 icsk_pmtu_cookie;
const struct tcp_congestion_ops *icsk_ca_ops;
const struct inet_connection_sock_af_ops *icsk_af_ops;
@@ -94,7 +96,8 @@ struct inet_connection_sock {
void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq);
struct hlist_node icsk_listen_portaddr_node;
unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
- __u8 icsk_ca_state:6,
+ __u8 icsk_ca_state:5,
+ icsk_ca_initialized:1,
icsk_ca_setsockopt:1,
icsk_ca_dst_locked:1;
__u8 icsk_retransmits;
@@ -107,7 +110,7 @@ struct inet_connection_sock {
__u8 pending; /* ACK is pending */
__u8 quick; /* Scheduled number of quick acks */
__u8 pingpong; /* The session is interactive */
- __u8 blocked; /* Delayed ACK was blocked by socket lock */
+ __u8 retry; /* Number of attempts */
__u32 ato; /* Predicted tick of soft clock */
unsigned long timeout; /* Currently scheduled timeout */
__u32 lrcvtime; /* timestamp of last received data packet */
@@ -195,7 +198,8 @@ static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
#endif
} else if (what == ICSK_TIME_DACK) {
- icsk->icsk_ack.blocked = icsk->icsk_ack.pending = 0;
+ icsk->icsk_ack.pending = 0;
+ icsk->icsk_ack.retry = 0;
#ifdef INET_CSK_CLEAR_TIMERS
sk_stop_timer(sk, &icsk->icsk_delack_timer);
#endif
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index a3702d1d4875..89163ef8cf4b 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -296,13 +296,6 @@ static inline void __inet_sk_copy_descendant(struct sock *sk_to,
memcpy(inet_sk(sk_to) + 1, inet_sk(sk_from) + 1,
sk_from->sk_prot->obj_size - ancestor_size);
}
-#if !(IS_ENABLED(CONFIG_IPV6))
-static inline void inet_sk_copy_descendant(struct sock *sk_to,
- const struct sock *sk_from)
-{
- __inet_sk_copy_descendant(sk_to, sk_from, sizeof(struct inet_sock));
-}
-#endif
int inet_sk_rebuild_header(struct sock *sk);
diff --git a/include/net/ip.h b/include/net/ip.h
index b09c48d862cc..2d6b985d11cc 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -151,7 +151,7 @@ int igmp_mc_init(void);
int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
__be32 saddr, __be32 daddr,
- struct ip_options_rcu *opt);
+ struct ip_options_rcu *opt, u8 tos);
int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
struct net_device *orig_dev);
void ip_list_rcv(struct list_head *head, struct packet_type *pt,
@@ -436,12 +436,18 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
bool forwarding)
{
struct net *net = dev_net(dst->dev);
+ unsigned int mtu;
if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
ip_mtu_locked(dst) ||
!forwarding)
return dst_mtu(dst);
+ /* 'forwarding = true' case should always honour route mtu */
+ mtu = dst_metric_raw(dst, RTAX_MTU);
+ if (mtu)
+ return mtu;
+
return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
}
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 9a59a33787cb..d609e957a3ec 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -25,9 +25,6 @@
#include <linux/ip.h>
#include <linux/ipv6.h> /* for struct ipv6hdr */
#include <net/ipv6.h>
-#if IS_ENABLED(CONFIG_IP_VS_IPV6)
-#include <linux/netfilter_ipv6/ip6_tables.h>
-#endif
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <net/netfilter/nf_conntrack.h>
#endif
diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h
index d7a7f7c81e7b..8fce558b5fea 100644
--- a/include/net/ipv6_stubs.h
+++ b/include/net/ipv6_stubs.h
@@ -63,6 +63,9 @@ struct ipv6_stub {
int encap_type);
#endif
struct neigh_table *nd_tbl;
+
+ int (*ipv6_fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
+ int (*output)(struct net *, struct sock *, struct sk_buff *));
};
extern const struct ipv6_stub *ipv6_stub __read_mostly;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 66e2bfd165e8..e8e295dae744 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -317,6 +317,9 @@ struct ieee80211_vif_chanctx_switch {
* @BSS_CHANGED_TWT: TWT status changed
* @BSS_CHANGED_HE_OBSS_PD: OBSS Packet Detection status changed.
* @BSS_CHANGED_HE_BSS_COLOR: BSS Color has changed
+ * @BSS_CHANGED_FILS_DISCOVERY: FILS discovery status changed.
+ * @BSS_CHANGED_UNSOL_BCAST_PROBE_RESP: Unsolicited broadcast probe response
+ * status changed.
*
*/
enum ieee80211_bss_change {
@@ -350,6 +353,8 @@ enum ieee80211_bss_change {
BSS_CHANGED_TWT = 1<<27,
BSS_CHANGED_HE_OBSS_PD = 1<<28,
BSS_CHANGED_HE_BSS_COLOR = 1<<29,
+ BSS_CHANGED_FILS_DISCOVERY = 1<<30,
+ BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = 1<<31,
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -491,6 +496,18 @@ struct ieee80211_ftm_responder_params {
};
/**
+ * struct ieee80211_fils_discovery - FILS discovery parameters from
+ * IEEE Std 802.11ai-2016, Annex C.3 MIB detail.
+ *
+ * @min_interval: Minimum packet interval in TUs (0 - 10000)
+ * @max_interval: Maximum packet interval in TUs (0 - 10000)
+ */
+struct ieee80211_fils_discovery {
+ u32 min_interval;
+ u32 max_interval;
+};
+
+/**
* struct ieee80211_bss_conf - holds the BSS's changing parameters
*
* This structure keeps information about a BSS (and an association
@@ -607,6 +624,12 @@ struct ieee80211_ftm_responder_params {
* @he_oper: HE operation information of the AP we are connected to
* @he_obss_pd: OBSS Packet Detection parameters.
* @he_bss_color: BSS coloring settings, if BSS supports HE
+ * @fils_discovery: FILS discovery configuration
+ * @unsol_bcast_probe_resp_interval: Unsolicited broadcast probe response
+ * interval.
+ * @s1g: BSS is S1G BSS (affects Association Request format).
+ * @beacon_tx_rate: The configured beacon transmit rate that needs to be passed
+ * to driver when rate control is offloaded to firmware.
*/
struct ieee80211_bss_conf {
const u8 *bssid;
@@ -674,6 +697,10 @@ struct ieee80211_bss_conf {
} he_oper;
struct ieee80211_he_obss_pd he_obss_pd;
struct cfg80211_he_bss_color he_bss_color;
+ struct ieee80211_fils_discovery fils_discovery;
+ u32 unsol_bcast_probe_resp_interval;
+ bool s1g;
+ struct cfg80211_bitrate_mask beacon_tx_rate;
};
/**
@@ -720,9 +747,8 @@ struct ieee80211_bss_conf {
* @IEEE80211_TX_INTFL_OFFCHAN_TX_OK: Internal to mac80211. Used to indicate
* that a frame can be transmitted while the queues are stopped for
* off-channel operation.
- * @IEEE80211_TX_INTFL_NEED_TXPROCESSING: completely internal to mac80211,
- * used to indicate that a pending frame requires TX processing before
- * it can be sent out.
+ * @IEEE80211_TX_CTL_HW_80211_ENCAP: This frame uses hardware encapsulation
+ * (header conversion)
* @IEEE80211_TX_INTFL_RETRIED: completely internal to mac80211,
* used to indicate that a frame was already retried due to PS
* @IEEE80211_TX_INTFL_DONT_ENCRYPT: completely internal to mac80211,
@@ -791,7 +817,7 @@ enum mac80211_tx_info_flags {
IEEE80211_TX_STAT_AMPDU_NO_BACK = BIT(11),
IEEE80211_TX_CTL_RATE_CTRL_PROBE = BIT(12),
IEEE80211_TX_INTFL_OFFCHAN_TX_OK = BIT(13),
- IEEE80211_TX_INTFL_NEED_TXPROCESSING = BIT(14),
+ IEEE80211_TX_CTL_HW_80211_ENCAP = BIT(14),
IEEE80211_TX_INTFL_RETRIED = BIT(15),
IEEE80211_TX_INTFL_DONT_ENCRYPT = BIT(16),
IEEE80211_TX_CTL_NO_PS_BUFFER = BIT(17),
@@ -812,6 +838,8 @@ enum mac80211_tx_info_flags {
#define IEEE80211_TX_CTL_STBC_SHIFT 23
+#define IEEE80211_TX_RC_S1G_MCS IEEE80211_TX_RC_VHT_MCS
+
/**
* enum mac80211_tx_control_flags - flags to describe transmit control
*
@@ -823,8 +851,9 @@ enum mac80211_tx_info_flags {
* @IEEE80211_TX_CTRL_AMSDU: This frame is an A-MSDU frame
* @IEEE80211_TX_CTRL_FAST_XMIT: This frame is going through the fast_xmit path
* @IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP: This frame skips mesh path lookup
- * @IEEE80211_TX_CTRL_HW_80211_ENCAP: This frame uses hardware encapsulation
- * (header conversion)
+ * @IEEE80211_TX_INTCFL_NEED_TXPROCESSING: completely internal to mac80211,
+ * used to indicate that a pending frame requires TX processing before
+ * it can be sent out.
* @IEEE80211_TX_CTRL_NO_SEQNO: Do not overwrite the sequence number that
* has already been assigned to this frame.
*
@@ -837,7 +866,7 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_CTRL_AMSDU = BIT(3),
IEEE80211_TX_CTRL_FAST_XMIT = BIT(4),
IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP = BIT(5),
- IEEE80211_TX_CTRL_HW_80211_ENCAP = BIT(6),
+ IEEE80211_TX_INTCFL_NEED_TXPROCESSING = BIT(6),
IEEE80211_TX_CTRL_NO_SEQNO = BIT(7),
};
@@ -1002,7 +1031,8 @@ ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate)
* @status.ampdu_ack_len: AMPDU ack length
* @status.ampdu_len: AMPDU length
* @status.antenna: (legacy, kept only for iwlegacy)
- * @status.tx_time: airtime consumed for transmission
+ * @status.tx_time: airtime consumed for transmission; note this is only
+ * used for WMM AC, not for airtime fairness
* @status.is_valid_ack_signal: ACK signal is valid
* @status.status_driver_data: driver use area
* @ack: union part for pure ACK data
@@ -1095,12 +1125,14 @@ ieee80211_info_get_tx_time_est(struct ieee80211_tx_info *info)
* @info: Basic tx status information
* @skb: Packet skb (can be NULL if not provided by the driver)
* @rate: The TX rate that was used when sending the packet
+ * @free_list: list where processed skbs are stored to be free'd by the driver
*/
struct ieee80211_tx_status {
struct ieee80211_sta *sta;
struct ieee80211_tx_info *info;
struct sk_buff *skb;
struct rate_info *rate;
+ struct list_head *free_list;
};
/**
@@ -1606,6 +1638,21 @@ enum ieee80211_vif_flags {
IEEE80211_VIF_GET_NOA_UPDATE = BIT(3),
};
+
+/**
+ * enum ieee80211_offload_flags - virtual interface offload flags
+ *
+ * @IEEE80211_OFFLOAD_ENCAP_ENABLED: tx encapsulation offload is enabled
+ * The driver supports sending frames passed as 802.3 frames by mac80211.
+ * It must also support sending 802.11 packets for the same interface.
+ * @IEEE80211_OFFLOAD_ENCAP_4ADDR: support 4-address mode encapsulation offload
+ */
+
+enum ieee80211_offload_flags {
+ IEEE80211_OFFLOAD_ENCAP_ENABLED = BIT(0),
+ IEEE80211_OFFLOAD_ENCAP_4ADDR = BIT(1),
+};
+
/**
* struct ieee80211_vif - per-interface data
*
@@ -1626,6 +1673,11 @@ enum ieee80211_vif_flags {
* these need to be set (or cleared) when the interface is added
* or, if supported by the driver, the interface type is changed
* at runtime, mac80211 will never touch this field
+ * @offloaad_flags: hardware offload capabilities/flags for this interface.
+ * These are initialized by mac80211 before calling .add_interface,
+ * .change_interface or .update_vif_offload and updated by the driver
+ * within these ops, based on supported features or runtime change
+ * restrictions.
* @hw_queue: hardware queue for each AC
* @cab_queue: content-after-beacon (DTIM beacon really) queue, AP mode only
* @chanctx_conf: The channel context this interface is assigned to, or %NULL
@@ -1645,6 +1697,8 @@ enum ieee80211_vif_flags {
* @txq: the multicast data TX queue (if driver uses the TXQ abstraction)
* @txqs_stopped: per AC flag to indicate that intermediate TXQs are stopped,
* protected by fq->lock.
+ * @offload_flags: 802.3 -> 802.11 enapsulation offload flags, see
+ * &enum ieee80211_offload_flags.
*/
struct ieee80211_vif {
enum nl80211_iftype type;
@@ -1662,6 +1716,7 @@ struct ieee80211_vif {
struct ieee80211_chanctx_conf __rcu *chanctx_conf;
u32 driver_flags;
+ u32 offload_flags;
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *debugfs_dir;
@@ -2328,6 +2383,9 @@ struct ieee80211_txq {
* aggregating MPDUs with the same keyid, allowing mac80211 to keep Tx
* A-MPDU sessions active while rekeying with Extended Key ID.
*
+ * @IEEE80211_HW_SUPPORTS_TX_ENCAP_OFFLOAD: Hardware supports tx encapsulation
+ * offload
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2380,6 +2438,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_SUPPORTS_MULTI_BSSID,
IEEE80211_HW_SUPPORTS_ONLY_HE_MULTI_BSSID,
IEEE80211_HW_AMPDU_KEYBORDER_SUPPORT,
+ IEEE80211_HW_SUPPORTS_TX_ENCAP_OFFLOAD,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -3736,7 +3795,7 @@ enum ieee80211_reconfig_type {
* decremented, and when they reach 1 the driver must call
* ieee80211_csa_finish(). Drivers which use ieee80211_beacon_get()
* get the csa counter decremented by mac80211, but must check if it is
- * 1 using ieee80211_csa_is_complete() after the beacon has been
+ * 1 using ieee80211_beacon_counter_is_complete() after the beacon has been
* transmitted and then call ieee80211_csa_finish().
* If the CSA count starts as zero or 1, this function will not be called,
* since there won't be any time to beacon before the switch anyway.
@@ -3814,6 +3873,10 @@ enum ieee80211_reconfig_type {
* @set_tid_config: Apply TID specific configurations. This callback may sleep.
* @reset_tid_config: Reset TID specific configuration for the peer.
* This callback may sleep.
+ * @update_vif_offload: Update virtual interface offload flags
+ * This callback may sleep.
+ * @sta_set_4addr: Called to notify the driver when a station starts/stops using
+ * 4-address mode
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -4125,6 +4188,10 @@ struct ieee80211_ops {
int (*reset_tid_config)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u8 tids);
+ void (*update_vif_offload)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+ void (*sta_set_4addr)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enabled);
};
/**
@@ -4763,21 +4830,21 @@ void ieee80211_tx_status_8023(struct ieee80211_hw *hw,
*/
void ieee80211_report_low_ack(struct ieee80211_sta *sta, u32 num_packets);
-#define IEEE80211_MAX_CSA_COUNTERS_NUM 2
+#define IEEE80211_MAX_CNTDWN_COUNTERS_NUM 2
/**
* struct ieee80211_mutable_offsets - mutable beacon offsets
* @tim_offset: position of TIM element
* @tim_length: size of TIM element
- * @csa_counter_offs: array of IEEE80211_MAX_CSA_COUNTERS_NUM offsets
- * to CSA counters. This array can contain zero values which
+ * @cntdwn_counter_offs: array of IEEE80211_MAX_CNTDWN_COUNTERS_NUM offsets
+ * to countdown counters. This array can contain zero values which
* should be ignored.
*/
struct ieee80211_mutable_offsets {
u16 tim_offset;
u16 tim_length;
- u16 csa_counter_offs[IEEE80211_MAX_CSA_COUNTERS_NUM];
+ u16 cntdwn_counter_offs[IEEE80211_MAX_CNTDWN_COUNTERS_NUM];
};
/**
@@ -4846,31 +4913,31 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
}
/**
- * ieee80211_csa_update_counter - request mac80211 to decrement the csa counter
+ * ieee80211_beacon_update_cntdwn - request mac80211 to decrement the beacon countdown
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
*
- * The csa counter should be updated after each beacon transmission.
+ * The beacon counter should be updated after each beacon transmission.
* This function is called implicitly when
* ieee80211_beacon_get/ieee80211_beacon_get_tim are called, however if the
* beacon frames are generated by the device, the driver should call this
- * function after each beacon transmission to sync mac80211's csa counters.
+ * function after each beacon transmission to sync mac80211's beacon countdown.
*
- * Return: new csa counter value
+ * Return: new countdown value
*/
-u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif);
+u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif);
/**
- * ieee80211_csa_set_counter - request mac80211 to set csa counter
+ * ieee80211_beacon_set_cntdwn - request mac80211 to set beacon countdown
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @counter: the new value for the counter
*
- * The csa counter can be changed by the device, this API should be
+ * The beacon countdown can be changed by the device, this API should be
* used by the device driver to update csa counter in mac80211.
*
- * It should never be used together with ieee80211_csa_update_counter(),
+ * It should never be used together with ieee80211_beacon_update_cntdwn(),
* as it will cause a race condition around the counter value.
*/
-void ieee80211_csa_set_counter(struct ieee80211_vif *vif, u8 counter);
+void ieee80211_beacon_set_cntdwn(struct ieee80211_vif *vif, u8 counter);
/**
* ieee80211_csa_finish - notify mac80211 about channel switch
@@ -4883,13 +4950,12 @@ void ieee80211_csa_set_counter(struct ieee80211_vif *vif, u8 counter);
void ieee80211_csa_finish(struct ieee80211_vif *vif);
/**
- * ieee80211_csa_is_complete - find out if counters reached 1
+ * ieee80211_beacon_cntdwn_is_complete - find out if countdown reached 1
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
*
- * This function returns whether the channel switch counters reached zero.
+ * This function returns whether the countdown reached zero.
*/
-bool ieee80211_csa_is_complete(struct ieee80211_vif *vif);
-
+bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif);
/**
* ieee80211_proberesp_get - retrieve a Probe Response template
@@ -5344,11 +5410,15 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw);
* @IEEE80211_IFACE_ITER_RESUME_ALL: During resume, iterate over all
* interfaces, even if they haven't been re-added to the driver yet.
* @IEEE80211_IFACE_ITER_ACTIVE: Iterate only active interfaces (netdev is up).
+ * @IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER: Skip any interfaces where SDATA
+ * is not in the driver. This may fix crashes during firmware recovery
+ * for instance.
*/
enum ieee80211_interface_iteration_flags {
IEEE80211_IFACE_ITER_NORMAL = 0,
IEEE80211_IFACE_ITER_RESUME_ALL = BIT(0),
IEEE80211_IFACE_ITER_ACTIVE = BIT(1),
+ IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER = BIT(2),
};
/**
@@ -5648,7 +5718,7 @@ void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *pubsta, int tid);
/**
* ieee80211_sta_register_airtime - register airtime usage for a sta/tid
*
- * Register airtime usage for a given sta on a given tid. The driver can call
+ * Register airtime usage for a given sta on a given tid. The driver must call
* this function to notify mac80211 that a station used a certain amount of
* airtime. This information will be used by the TXQ scheduler to schedule
* stations in a way that ensures airtime fairness.
@@ -6594,4 +6664,29 @@ u32 ieee80211_calc_tx_airtime(struct ieee80211_hw *hw,
*/
bool ieee80211_set_hw_80211_encap(struct ieee80211_vif *vif, bool enable);
+/**
+ * ieee80211_get_fils_discovery_tmpl - Get FILS discovery template.
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * The driver is responsible for freeing the returned skb.
+ *
+ * Return: FILS discovery template. %NULL on error.
+ */
+struct sk_buff *ieee80211_get_fils_discovery_tmpl(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_get_unsol_bcast_probe_resp_tmpl - Get unsolicited broadcast
+ * probe response template.
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * The driver is responsible for freeing the returned skb.
+ *
+ * Return: Unsolicited broadcast probe response template. %NULL on error.
+ */
+struct sk_buff *
+ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
#endif /* MAC80211_H */
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index 3525d2822abe..753ba7e755d6 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -85,8 +85,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
unsigned int *size, unsigned int remaining,
struct mptcp_out_options *opts);
-void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
- struct tcp_options_received *opt_rx);
+void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb);
void mptcp_write_options(__be32 *ptr, struct mptcp_out_options *opts);
@@ -185,8 +184,7 @@ static inline bool mptcp_established_options(struct sock *sk,
}
static inline void mptcp_incoming_options(struct sock *sk,
- struct sk_buff *skb,
- struct tcp_options_received *opt_rx)
+ struct sk_buff *skb)
{
}
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 9205a76d967a..38e4094960ce 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -494,7 +494,7 @@ int igmp6_event_report(struct sk_buff *skb);
#ifdef CONFIG_SYSCTL
int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+ void *buffer, size_t *lenp, loff_t *ppos);
int ndisc_ifinfo_sysctl_strategy(struct ctl_table *ctl,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen);
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 2ee5901bec7a..22bc07f4b043 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -230,7 +230,7 @@ extern struct list_head net_namespace_list;
struct net *get_net_ns_by_pid(pid_t pid);
struct net *get_net_ns_by_fd(int fd);
-u64 net_gen_cookie(struct net *net);
+u64 __net_gen_cookie(struct net *net);
#ifdef CONFIG_SYSCTL
void ipx_register_sysctl(void);
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
index 0d3920896d50..716db4a0fed8 100644
--- a/include/net/netfilter/nf_log.h
+++ b/include/net/netfilter/nf_log.h
@@ -108,6 +108,7 @@ int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
unsigned int logflags);
void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m,
struct sock *sk);
+void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb);
void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
unsigned int hooknum, const struct sk_buff *skb,
const struct net_device *in,
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index bf9491b77d16..55b4cadf290a 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -14,6 +14,8 @@
#include <net/netlink.h>
#include <net/flow_offload.h>
+#define NFT_MAX_HOOKS (NF_INET_INGRESS + 1)
+
struct module;
#define NFT_JUMP_STACK_SIZE 16
@@ -143,16 +145,11 @@ static inline u64 nft_reg_load64(const u32 *sreg)
static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
unsigned int len)
{
+ if (len % NFT_REG32_SIZE)
+ dst[len / NFT_REG32_SIZE] = 0;
memcpy(dst, src, len);
}
-static inline void nft_data_debug(const struct nft_data *data)
-{
- pr_debug("data[0]=%x data[1]=%x data[2]=%x data[3]=%x\n",
- data->data[0], data->data[1],
- data->data[2], data->data[3]);
-}
-
/**
* struct nft_ctx - nf_tables rule/set context
*
@@ -894,6 +891,12 @@ static inline struct nft_expr *nft_expr_last(const struct nft_rule *rule)
return (struct nft_expr *)&rule->data[rule->dlen];
}
+static inline bool nft_expr_more(const struct nft_rule *rule,
+ const struct nft_expr *expr)
+{
+ return expr != nft_expr_last(rule) && expr->ops;
+}
+
static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule)
{
return (void *)&rule->data[rule->dlen];
@@ -950,6 +953,8 @@ struct nft_chain {
bound:1,
genmask:2;
char *name;
+ u16 udlen;
+ u8 *udata;
/* Only used during control plane commit phase: */
struct nft_rule **rules_next;
@@ -982,7 +987,7 @@ struct nft_chain_type {
int family;
struct module *owner;
unsigned int hook_mask;
- nf_hookfn *hooks[NF_MAX_HOOKS];
+ nf_hookfn *hooks[NFT_MAX_HOOKS];
int (*ops_register)(struct net *net, const struct nf_hook_ops *ops);
void (*ops_unregister)(struct net *net, const struct nf_hook_ops *ops);
};
@@ -1080,8 +1085,16 @@ struct nft_table {
flags:8,
genmask:2;
char *name;
+ u16 udlen;
+ u8 *udata;
};
+static inline bool nft_base_chain_netdev(int family, u32 hooknum)
+{
+ return family == NFPROTO_NETDEV ||
+ (family == NFPROTO_INET && hooknum == NF_INET_INGRESS);
+}
+
void nft_register_chain_type(const struct nft_chain_type *);
void nft_unregister_chain_type(const struct nft_chain_type *);
@@ -1121,6 +1134,8 @@ struct nft_object {
u32 genmask:2,
use:30;
u64 handle;
+ u16 udlen;
+ u8 *udata;
/* runtime data below here */
const struct nft_object_ops *ops ____cacheline_aligned;
unsigned char data[]
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index 78516de14d31..8657e6815b07 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -23,10 +23,19 @@ extern struct nft_object_type nft_secmark_obj_type;
int nf_tables_core_module_init(void);
void nf_tables_core_module_exit(void);
+struct nft_bitwise_fast_expr {
+ u32 mask;
+ u32 xor;
+ enum nft_registers sreg:8;
+ enum nft_registers dreg:8;
+};
+
struct nft_cmp_fast_expr {
u32 data;
+ u32 mask;
enum nft_registers sreg:8;
u8 len;
+ bool inv;
};
struct nft_immediate_expr {
@@ -66,6 +75,8 @@ struct nft_payload_set {
extern const struct nft_expr_ops nft_payload_fast_ops;
+extern const struct nft_expr_ops nft_bitwise_fast_ops;
+
extern struct static_key_false nft_counters_enabled;
extern struct static_key_false nft_trace_enabled;
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index ed7b511f0a59..1f7bea39ad1b 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -53,4 +53,37 @@ static inline void nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
nft_set_pktinfo_unspec(pkt, skb);
}
+static inline int nft_set_pktinfo_ipv4_ingress(struct nft_pktinfo *pkt,
+ struct sk_buff *skb)
+{
+ struct iphdr *iph;
+ u32 len, thoff;
+
+ if (!pskb_may_pull(skb, sizeof(*iph)))
+ return -1;
+
+ iph = ip_hdr(skb);
+ if (iph->ihl < 5 || iph->version != 4)
+ goto inhdr_error;
+
+ len = ntohs(iph->tot_len);
+ thoff = iph->ihl * 4;
+ if (skb->len < len) {
+ __IP_INC_STATS(nft_net(pkt), IPSTATS_MIB_INTRUNCATEDPKTS);
+ return -1;
+ } else if (len < thoff) {
+ goto inhdr_error;
+ }
+
+ pkt->tprot_set = true;
+ pkt->tprot = iph->protocol;
+ pkt->xt.thoff = thoff;
+ pkt->xt.fragoff = ntohs(iph->frag_off) & IP_OFFSET;
+
+ return 0;
+
+inhdr_error:
+ __IP_INC_STATS(nft_net(pkt), IPSTATS_MIB_INHDRERRORS);
+ return -1;
+}
#endif
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index d0f1c537b017..867de29f3f7a 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -70,4 +70,50 @@ static inline void nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
nft_set_pktinfo_unspec(pkt, skb);
}
+static inline int nft_set_pktinfo_ipv6_ingress(struct nft_pktinfo *pkt,
+ struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ unsigned int flags = IP6_FH_F_AUTH;
+ unsigned short frag_off;
+ unsigned int thoff = 0;
+ struct inet6_dev *idev;
+ struct ipv6hdr *ip6h;
+ int protohdr;
+ u32 pkt_len;
+
+ if (!pskb_may_pull(skb, sizeof(*ip6h)))
+ return -1;
+
+ ip6h = ipv6_hdr(skb);
+ if (ip6h->version != 6)
+ goto inhdr_error;
+
+ pkt_len = ntohs(ip6h->payload_len);
+ if (pkt_len + sizeof(*ip6h) > skb->len) {
+ idev = __in6_dev_get(nft_in(pkt));
+ __IP6_INC_STATS(nft_net(pkt), idev, IPSTATS_MIB_INTRUNCATEDPKTS);
+ return -1;
+ }
+
+ protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
+ if (protohdr < 0)
+ goto inhdr_error;
+
+ pkt->tprot_set = true;
+ pkt->tprot = protohdr;
+ pkt->xt.thoff = thoff;
+ pkt->xt.fragoff = frag_off;
+
+ return 0;
+
+inhdr_error:
+ idev = __in6_dev_get(nft_in(pkt));
+ __IP6_INC_STATS(nft_net(pkt), idev, IPSTATS_MIB_INHDRERRORS);
+ return -1;
+#else
+ return -1;
+#endif
+}
+
#endif
diff --git a/include/net/netlink.h b/include/net/netlink.h
index c0411f14fb53..7356f41d23ba 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -181,8 +181,6 @@ enum {
NLA_S64,
NLA_BITFIELD32,
NLA_REJECT,
- NLA_EXACT_LEN,
- NLA_MIN_LEN,
__NLA_TYPE_MAX,
};
@@ -199,11 +197,12 @@ struct netlink_range_validation_signed {
enum nla_policy_validation {
NLA_VALIDATE_NONE,
NLA_VALIDATE_RANGE,
+ NLA_VALIDATE_RANGE_WARN_TOO_LONG,
NLA_VALIDATE_MIN,
NLA_VALIDATE_MAX,
+ NLA_VALIDATE_MASK,
NLA_VALIDATE_RANGE_PTR,
NLA_VALIDATE_FUNCTION,
- NLA_VALIDATE_WARN_TOO_LONG,
};
/**
@@ -222,7 +221,7 @@ enum nla_policy_validation {
* NLA_NUL_STRING Maximum length of string (excluding NUL)
* NLA_FLAG Unused
* NLA_BINARY Maximum length of attribute payload
- * NLA_MIN_LEN Minimum length of attribute payload
+ * (but see also below with the validation type)
* NLA_NESTED,
* NLA_NESTED_ARRAY Length verification is done by checking len of
* nested header (or empty); len field is used if
@@ -237,11 +236,6 @@ enum nla_policy_validation {
* just like "All other"
* NLA_BITFIELD32 Unused
* NLA_REJECT Unused
- * NLA_EXACT_LEN Attribute should have exactly this length, otherwise
- * it is rejected or warned about, the latter happening
- * if and only if the `validation_type' is set to
- * NLA_VALIDATE_WARN_TOO_LONG.
- * NLA_MIN_LEN Minimum length of attribute payload
* All other Minimum length of attribute payload
*
* Meaning of validation union:
@@ -296,6 +290,11 @@ enum nla_policy_validation {
* pointer to a struct netlink_range_validation_signed
* that indicates the min/max values.
* Use NLA_POLICY_FULL_RANGE_SIGNED().
+ *
+ * NLA_BINARY If the validation type is like the ones for integers
+ * above, then the min/max length (not value like for
+ * integers) of the attribute is enforced.
+ *
* All other Unused - but note that it's a union
*
* Meaning of `validate' field, use via NLA_POLICY_VALIDATE_FN:
@@ -309,7 +308,7 @@ enum nla_policy_validation {
* static const struct nla_policy my_policy[ATTR_MAX+1] = {
* [ATTR_FOO] = { .type = NLA_U16 },
* [ATTR_BAR] = { .type = NLA_STRING, .len = BARSIZ },
- * [ATTR_BAZ] = { .type = NLA_EXACT_LEN, .len = sizeof(struct mystruct) },
+ * [ATTR_BAZ] = NLA_POLICY_EXACT_LEN(sizeof(struct mystruct)),
* [ATTR_GOO] = NLA_POLICY_BITFIELD32(myvalidflags),
* };
*/
@@ -319,6 +318,7 @@ struct nla_policy {
u16 len;
union {
const u32 bitfield32_valid;
+ const u32 mask;
const char *reject_message;
const struct nla_policy *nested_policy;
struct netlink_range_validation *range;
@@ -335,9 +335,10 @@ struct nla_policy {
* nesting validation starts here.
*
* Additionally, it means that NLA_UNSPEC is actually NLA_REJECT
- * for any types >= this, so need to use NLA_MIN_LEN to get the
- * previous pure { .len = xyz } behaviour. The advantage of this
- * is that types not specified in the policy will be rejected.
+ * for any types >= this, so need to use NLA_POLICY_MIN_LEN() to
+ * get the previous pure { .len = xyz } behaviour. The advantage
+ * of this is that types not specified in the policy will be
+ * rejected.
*
* For completely new families it should be set to 1 so that the
* validation is enforced for all attributes. For existing ones
@@ -349,12 +350,6 @@ struct nla_policy {
};
};
-#define NLA_POLICY_EXACT_LEN(_len) { .type = NLA_EXACT_LEN, .len = _len }
-#define NLA_POLICY_EXACT_LEN_WARN(_len) \
- { .type = NLA_EXACT_LEN, .len = _len, \
- .validation_type = NLA_VALIDATE_WARN_TOO_LONG, }
-#define NLA_POLICY_MIN_LEN(_len) { .type = NLA_MIN_LEN, .len = _len }
-
#define NLA_POLICY_ETH_ADDR NLA_POLICY_EXACT_LEN(ETH_ALEN)
#define NLA_POLICY_ETH_ADDR_COMPAT NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN)
@@ -369,20 +364,25 @@ struct nla_policy {
#define NLA_POLICY_BITFIELD32(valid) \
{ .type = NLA_BITFIELD32, .bitfield32_valid = valid }
+#define __NLA_IS_UINT_TYPE(tp) \
+ (tp == NLA_U8 || tp == NLA_U16 || tp == NLA_U32 || tp == NLA_U64)
+#define __NLA_IS_SINT_TYPE(tp) \
+ (tp == NLA_S8 || tp == NLA_S16 || tp == NLA_S32 || tp == NLA_S64)
+
#define __NLA_ENSURE(condition) BUILD_BUG_ON_ZERO(!(condition))
#define NLA_ENSURE_UINT_TYPE(tp) \
- (__NLA_ENSURE(tp == NLA_U8 || tp == NLA_U16 || \
- tp == NLA_U32 || tp == NLA_U64 || \
- tp == NLA_MSECS) + tp)
+ (__NLA_ENSURE(__NLA_IS_UINT_TYPE(tp)) + tp)
+#define NLA_ENSURE_UINT_OR_BINARY_TYPE(tp) \
+ (__NLA_ENSURE(__NLA_IS_UINT_TYPE(tp) || \
+ tp == NLA_MSECS || \
+ tp == NLA_BINARY) + tp)
#define NLA_ENSURE_SINT_TYPE(tp) \
- (__NLA_ENSURE(tp == NLA_S8 || tp == NLA_S16 || \
- tp == NLA_S32 || tp == NLA_S64) + tp)
-#define NLA_ENSURE_INT_TYPE(tp) \
- (__NLA_ENSURE(tp == NLA_S8 || tp == NLA_U8 || \
- tp == NLA_S16 || tp == NLA_U16 || \
- tp == NLA_S32 || tp == NLA_U32 || \
- tp == NLA_S64 || tp == NLA_U64 || \
- tp == NLA_MSECS) + tp)
+ (__NLA_ENSURE(__NLA_IS_SINT_TYPE(tp)) + tp)
+#define NLA_ENSURE_INT_OR_BINARY_TYPE(tp) \
+ (__NLA_ENSURE(__NLA_IS_UINT_TYPE(tp) || \
+ __NLA_IS_SINT_TYPE(tp) || \
+ tp == NLA_MSECS || \
+ tp == NLA_BINARY) + tp)
#define NLA_ENSURE_NO_VALIDATION_PTR(tp) \
(__NLA_ENSURE(tp != NLA_BITFIELD32 && \
tp != NLA_REJECT && \
@@ -390,14 +390,14 @@ struct nla_policy {
tp != NLA_NESTED_ARRAY) + tp)
#define NLA_POLICY_RANGE(tp, _min, _max) { \
- .type = NLA_ENSURE_INT_TYPE(tp), \
+ .type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \
.validation_type = NLA_VALIDATE_RANGE, \
.min = _min, \
.max = _max \
}
#define NLA_POLICY_FULL_RANGE(tp, _range) { \
- .type = NLA_ENSURE_UINT_TYPE(tp), \
+ .type = NLA_ENSURE_UINT_OR_BINARY_TYPE(tp), \
.validation_type = NLA_VALIDATE_RANGE_PTR, \
.range = _range, \
}
@@ -409,17 +409,23 @@ struct nla_policy {
}
#define NLA_POLICY_MIN(tp, _min) { \
- .type = NLA_ENSURE_INT_TYPE(tp), \
+ .type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \
.validation_type = NLA_VALIDATE_MIN, \
.min = _min, \
}
#define NLA_POLICY_MAX(tp, _max) { \
- .type = NLA_ENSURE_INT_TYPE(tp), \
+ .type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \
.validation_type = NLA_VALIDATE_MAX, \
.max = _max, \
}
+#define NLA_POLICY_MASK(tp, _mask) { \
+ .type = NLA_ENSURE_UINT_TYPE(tp), \
+ .validation_type = NLA_VALIDATE_MASK, \
+ .mask = _mask, \
+}
+
#define NLA_POLICY_VALIDATE_FN(tp, fn, ...) { \
.type = NLA_ENSURE_NO_VALIDATION_PTR(tp), \
.validation_type = NLA_VALIDATE_FUNCTION, \
@@ -427,6 +433,15 @@ struct nla_policy {
.len = __VA_ARGS__ + 0, \
}
+#define NLA_POLICY_EXACT_LEN(_len) NLA_POLICY_RANGE(NLA_BINARY, _len, _len)
+#define NLA_POLICY_EXACT_LEN_WARN(_len) { \
+ .type = NLA_BINARY, \
+ .validation_type = NLA_VALIDATE_RANGE_WARN_TOO_LONG, \
+ .min = _len, \
+ .max = _len \
+}
+#define NLA_POLICY_MIN_LEN(_len) NLA_POLICY_MIN(NLA_BINARY, _len)
+
/**
* struct nl_info - netlink source information
* @nlh: Netlink message header of original request
@@ -726,7 +741,6 @@ static inline int __nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
* @hdrlen: length of family specific header
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
- * @validate: validation strictness
* @extack: extended ACK report struct
*
* See nla_parse()
@@ -824,7 +838,6 @@ static inline int nla_validate_deprecated(const struct nlattr *head, int len,
* @len: length of attribute stream
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
- * @validate: validation strictness
* @extack: extended ACK report struct
*
* Validates all attributes in the specified attribute stream against the
@@ -1933,10 +1946,21 @@ void nla_get_range_unsigned(const struct nla_policy *pt,
void nla_get_range_signed(const struct nla_policy *pt,
struct netlink_range_validation_signed *range);
-int netlink_policy_dump_start(const struct nla_policy *policy,
- unsigned int maxtype,
- unsigned long *state);
-bool netlink_policy_dump_loop(unsigned long *state);
-int netlink_policy_dump_write(struct sk_buff *skb, unsigned long state);
+struct netlink_policy_dump_state;
+
+int netlink_policy_dump_add_policy(struct netlink_policy_dump_state **pstate,
+ const struct nla_policy *policy,
+ unsigned int maxtype);
+int netlink_policy_dump_get_policy_idx(struct netlink_policy_dump_state *state,
+ const struct nla_policy *policy,
+ unsigned int maxtype);
+bool netlink_policy_dump_loop(struct netlink_policy_dump_state *state);
+int netlink_policy_dump_write(struct sk_buff *skb,
+ struct netlink_policy_dump_state *state);
+int netlink_policy_dump_attr_size_estimate(const struct nla_policy *pt);
+int netlink_policy_dump_write_attr(struct sk_buff *skb,
+ const struct nla_policy *pt,
+ int nestattr);
+void netlink_policy_dump_free(struct netlink_policy_dump_state *state);
#endif
diff --git a/include/net/netns/can.h b/include/net/netns/can.h
index b6ab7d1530d7..52fbd8291a96 100644
--- a/include/net/netns/can.h
+++ b/include/net/netns/can.h
@@ -15,7 +15,6 @@ struct can_rcv_lists_stats;
struct netns_can {
#if IS_ENABLED(CONFIG_PROC_FS)
struct proc_dir_entry *proc_dir;
- struct proc_dir_entry *pde_version;
struct proc_dir_entry *pde_stats;
struct proc_dir_entry *pde_reset_stats;
struct proc_dir_entry *pde_rcvlist_all;
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 9e36738c1fe1..8e4fcac4df72 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -183,6 +183,7 @@ struct netns_ipv4 {
unsigned int sysctl_tcp_fastopen_blackhole_timeout;
atomic_t tfo_active_disable_times;
unsigned long tfo_active_disable_stamp;
+ int sysctl_tcp_reflect_tos;
int sysctl_udp_wmem_min;
int sysctl_udp_rmem_min;
diff --git a/include/net/netns/nexthop.h b/include/net/netns/nexthop.h
index 1937476c94a0..1849e77eb68a 100644
--- a/include/net/netns/nexthop.h
+++ b/include/net/netns/nexthop.h
@@ -14,6 +14,6 @@ struct netns_nexthop {
unsigned int seq; /* protected by rtnl_mutex */
u32 last_id_allocated;
- struct atomic_notifier_head notifier_chain;
+ struct blocking_notifier_head notifier_chain;
};
#endif
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index a1a8d45adb42..6c0806bd8d1e 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -8,6 +8,7 @@ struct netns_nftables {
struct list_head tables;
struct list_head commit_list;
struct list_head module_list;
+ struct list_head notify_list;
struct mutex commit_mutex;
unsigned int base_seq;
u8 gencursor;
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 3a4f9e3b91a5..2fd76a9b6dc8 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -105,13 +105,9 @@ struct nexthop {
};
enum nexthop_event_type {
- NEXTHOP_EVENT_ADD,
NEXTHOP_EVENT_DEL
};
-int call_nexthop_notifier(struct notifier_block *nb, struct net *net,
- enum nexthop_event_type event_type,
- struct nexthop *nh);
int register_nexthop_notifier(struct net *net, struct notifier_block *nb);
int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb);
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index ac8c890a2657..4ed32e6b0201 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -19,12 +19,9 @@ struct qdisc_walker {
int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *);
};
-#define QDISC_ALIGNTO 64
-#define QDISC_ALIGN(len) (((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1))
-
static inline void *qdisc_priv(struct Qdisc *q)
{
- return (char *) q + QDISC_ALIGN(sizeof(struct Qdisc));
+ return &q->privdata;
}
/*
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index b2eb8b4ba697..29e41ff3ec93 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -41,6 +41,13 @@ struct request_sock_ops {
int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
+struct saved_syn {
+ u32 mac_hdrlen;
+ u32 network_hdrlen;
+ u32 tcp_hdrlen;
+ u8 data[];
+};
+
/* struct request_sock - mini sock to represent a connection request
*/
struct request_sock {
@@ -60,7 +67,7 @@ struct request_sock {
struct timer_list rsk_timer;
const struct request_sock_ops *rsk_ops;
struct sock *sk;
- u32 *saved_syn;
+ struct saved_syn *saved_syn;
u32 secid;
u32 peer_secid;
};
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index d60e7c39d60c..d8fd8676fc72 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -91,7 +91,7 @@ struct Qdisc {
struct net_rate_estimator __rcu *rate_est;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
- int padded;
+ int pad;
refcount_t refcnt;
/*
@@ -112,6 +112,9 @@ struct Qdisc {
/* for NOLOCK qdisc, true if there are no enqueued skbs */
bool empty;
struct rcu_head rcu;
+
+ /* private data */
+ long privdata[] ____cacheline_aligned;
};
static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
@@ -1047,12 +1050,6 @@ static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
return 0;
}
-static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
- struct sk_buff **to_free)
-{
- return __qdisc_queue_drop_head(sch, &sch->q, to_free);
-}
-
static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
{
const struct qdisc_skb_head *qh = &sch->q;
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index b33f1aefad09..0bdff38eb4bb 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -226,12 +226,14 @@ struct sctp_sock {
data_ready_signalled:1;
atomic_t pd_mode;
+
+ /* Fields after this point will be skipped on copies, like on accept
+ * and peeloff operations
+ */
+
/* Receive to here while partial delivery is in effect. */
struct sk_buff_head pd_lobby;
- /* These must be the last fields, as they will skipped on copies,
- * like on accept and peeloff operations
- */
struct list_head auto_asconf_list;
int do_auto_asconf;
};
diff --git a/include/net/smc.h b/include/net/smc.h
index 646feb4bc75f..e441aa97ad61 100644
--- a/include/net/smc.h
+++ b/include/net/smc.h
@@ -37,6 +37,8 @@ struct smcd_dmb {
#define ISM_EVENT_GID 1
#define ISM_EVENT_SWR 2
+#define ISM_RESERVED_VLANID 0x1FFF
+
#define ISM_ERROR 0xFFFF
struct smcd_event {
@@ -63,6 +65,8 @@ struct smcd_ops {
int (*move_data)(struct smcd_dev *dev, u64 dmb_tok, unsigned int idx,
bool sf, unsigned int offset, void *data,
unsigned int size);
+ void (*get_system_eid)(struct smcd_dev *dev, u8 **eid);
+ u16 (*get_chid)(struct smcd_dev *dev);
};
struct smcd_dev {
diff --git a/include/net/sock.h b/include/net/sock.h
index 064637d1ddf6..a5c6ae78df77 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -246,7 +246,7 @@ struct sock_common {
/* public: */
};
-struct bpf_sk_storage;
+struct bpf_local_storage;
/**
* struct sock - network layer representation of sockets
@@ -517,7 +517,7 @@ struct sock {
void (*sk_destruct)(struct sock *sk);
struct sock_reuseport __rcu *sk_reuseport_cb;
#ifdef CONFIG_BPF_SYSCALL
- struct bpf_sk_storage __rcu *sk_bpf_storage;
+ struct bpf_local_storage __rcu *sk_bpf_storage;
#endif
struct rcu_head sk_rcu;
};
@@ -845,7 +845,6 @@ enum sock_flags {
SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
- SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
SOCK_MEMALLOC, /* VM depends on this socket for swapping */
SOCK_TIMESTAMPING_RX_SOFTWARE, /* %SOF_TIMESTAMPING_RX_SOFTWARE */
SOCK_FASYNC, /* fasync() active */
@@ -1478,7 +1477,7 @@ sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
{
if (!sk_has_account(sk))
return true;
- return size<= sk->sk_forward_alloc ||
+ return size <= sk->sk_forward_alloc ||
__sk_mem_schedule(sk, size, SK_MEM_RECV) ||
skb_pfmemalloc(skb);
}
@@ -1526,7 +1525,6 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
{
- sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
sk_wmem_queued_add(sk, -skb->truesize);
sk_mem_uncharge(sk, skb->truesize);
if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
@@ -2197,6 +2195,8 @@ void sk_reset_timer(struct sock *sk, struct timer_list *timer,
void sk_stop_timer(struct sock *sk, struct timer_list *timer);
+void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer);
+
int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
struct sk_buff *skb, unsigned int flags,
void (*destructor)(struct sock *sk,
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index ff2246914301..53e8b4994296 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -203,6 +203,7 @@ enum switchdev_notifier_type {
SWITCHDEV_FDB_ADD_TO_DEVICE,
SWITCHDEV_FDB_DEL_TO_DEVICE,
SWITCHDEV_FDB_OFFLOADED,
+ SWITCHDEV_FDB_FLUSH_TO_BRIDGE,
SWITCHDEV_PORT_OBJ_ADD, /* Blocking. */
SWITCHDEV_PORT_OBJ_DEL, /* Blocking. */
diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
index e1057b255f69..879fe8cff581 100644
--- a/include/net/tc_act/tc_tunnel_key.h
+++ b/include/net/tc_act/tc_tunnel_key.h
@@ -56,7 +56,10 @@ static inline struct ip_tunnel_info *tcf_tunnel_info(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
struct tcf_tunnel_key *t = to_tunnel_key(a);
- struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
+ struct tcf_tunnel_key_params *params;
+
+ params = rcu_dereference_protected(t->params,
+ lockdep_is_held(&a->tcfa_lock));
return &params->tcft_enc_metadata->u.tun_info;
#else
diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h
index 4e2502408c31..f051046ba034 100644
--- a/include/net/tc_act/tc_vlan.h
+++ b/include/net/tc_act/tc_vlan.h
@@ -11,6 +11,8 @@
struct tcf_vlan_params {
int tcfv_action;
+ unsigned char tcfv_push_dst[ETH_ALEN];
+ unsigned char tcfv_push_src[ETH_ALEN];
u16 tcfv_push_vid;
__be16 tcfv_push_proto;
u8 tcfv_push_prio;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index eab6c7510b5b..d4ef5bf94168 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -394,7 +394,7 @@ void tcp_metrics_init(void);
bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
void tcp_close(struct sock *sk, long timeout);
void tcp_init_sock(struct sock *sk);
-void tcp_init_transfer(struct sock *sk, int bpf_op);
+void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
__poll_t tcp_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int tcp_getsockopt(struct sock *sk, int level, int optname,
@@ -455,7 +455,8 @@ enum tcp_synack_type {
struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
struct request_sock *req,
struct tcp_fastopen_cookie *foc,
- enum tcp_synack_type synack_type);
+ enum tcp_synack_type synack_type,
+ struct sk_buff *syn_skb);
int tcp_disconnect(struct sock *sk, int flags);
void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
@@ -699,7 +700,7 @@ static inline void tcp_fast_path_check(struct sock *sk)
static inline u32 tcp_rto_min(struct sock *sk)
{
const struct dst_entry *dst = __sk_dst_get(sk);
- u32 rto_min = TCP_RTO_MIN;
+ u32 rto_min = inet_csk(sk)->icsk_rto_min;
if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
@@ -941,16 +942,6 @@ INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *skb));
#endif
-static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
-{
-#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
- if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
- skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
- return true;
-#endif
- return false;
-}
-
/* TCP_SKB_CB reference means this can not be used from early demux */
static inline int tcp_v4_sdif(struct sk_buff *skb)
{
@@ -1113,7 +1104,7 @@ void tcp_get_available_congestion_control(char *buf, size_t len);
void tcp_get_allowed_congestion_control(char *buf, size_t len);
int tcp_set_allowed_congestion_control(char *allowed);
int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
- bool reinit, bool cap_net_admin);
+ bool cap_net_admin);
u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
@@ -1423,6 +1414,8 @@ static inline int tcp_full_space(const struct sock *sk)
return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
}
+void tcp_cleanup_rbuf(struct sock *sk, int copied);
+
/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
* If 87.5 % (7/8) of the space has been consumed, we want to override
* SO_RCVLOWAT constraint, since we are receiving skbs with too small
@@ -2035,7 +2028,8 @@ struct tcp_request_sock_ops {
int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl, struct request_sock *req,
struct tcp_fastopen_cookie *foc,
- enum tcp_synack_type synack_type);
+ enum tcp_synack_type synack_type,
+ struct sk_buff *syn_skb);
};
extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
@@ -2233,6 +2227,22 @@ int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
struct msghdr *msg, int len, int flags);
#endif /* CONFIG_NET_SOCK_MSG */
+#ifdef CONFIG_CGROUP_BPF
+static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
+ struct sk_buff *skb,
+ unsigned int end_offset)
+{
+ skops->skb = skb;
+ skops->skb_data_end = skb->data + end_offset;
+}
+#else
+static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
+ struct sk_buff *skb,
+ unsigned int end_offset)
+{
+}
+#endif
+
/* Call BPF_SOCK_OPS program that returns an int. If the return value
* is < 0, then the BPF op failed (for example if the loaded BPF
* program does not support the chosen operation or there is no BPF
diff --git a/include/net/tls.h b/include/net/tls.h
index e5dac7e74e79..baf1e99d8193 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -679,10 +679,6 @@ int decrypt_skb(struct sock *sk, struct sk_buff *skb,
struct scatterlist *sgout);
struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
-struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
- struct net_device *dev,
- struct sk_buff *skb);
-
int tls_sw_fallback_init(struct sock *sk,
struct tls_offload_context_tx *offload_ctx,
struct tls_crypto_info *crypto_info);
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index 94bb7a882250..2ea453dac876 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -200,11 +200,27 @@ enum udp_tunnel_nic_info_flags {
UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = BIT(3),
};
+struct udp_tunnel_nic;
+
+#define UDP_TUNNEL_NIC_MAX_SHARING_DEVICES (U16_MAX / 2)
+
+struct udp_tunnel_nic_shared {
+ struct udp_tunnel_nic *udp_tunnel_nic_info;
+
+ struct list_head devices;
+};
+
+struct udp_tunnel_nic_shared_node {
+ struct net_device *dev;
+ struct list_head list;
+};
+
/**
* struct udp_tunnel_nic_info - driver UDP tunnel offload information
* @set_port: callback for adding a new port
* @unset_port: callback for removing a port
* @sync_table: callback for syncing the entire port table at once
+ * @shared: reference to device global state (optional)
* @flags: device flags from enum udp_tunnel_nic_info_flags
* @tables: UDP port tables this device has
* @tables.n_entries: number of entries in this table
@@ -213,6 +229,12 @@ enum udp_tunnel_nic_info_flags {
* Drivers are expected to provide either @set_port and @unset_port callbacks
* or the @sync_table callback. Callbacks are invoked with rtnl lock held.
*
+ * Devices which (misguidedly) share the UDP tunnel port table across multiple
+ * netdevs should allocate an instance of struct udp_tunnel_nic_shared and
+ * point @shared at it.
+ * There must never be more than %UDP_TUNNEL_NIC_MAX_SHARING_DEVICES devices
+ * sharing a table.
+ *
* Known limitations:
* - UDP tunnel port notifications are fundamentally best-effort -
* it is likely the driver will both see skbs which use a UDP tunnel port,
@@ -234,6 +256,8 @@ struct udp_tunnel_nic_info {
/* all at once */
int (*sync_table)(struct net_device *dev, unsigned int table);
+ struct udp_tunnel_nic_shared *shared;
+
unsigned int flags;
struct udp_tunnel_nic_table_info {
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 3a41627cbdfe..08537aa14f7c 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -121,6 +121,9 @@ struct vxlanhdr_gbp {
#define VXLAN_GBP_POLICY_APPLIED (BIT(3) << 16)
#define VXLAN_GBP_ID_MASK (0xFFFF)
+#define VXLAN_GBP_MASK (VXLAN_GBP_DONT_LEARN | VXLAN_GBP_POLICY_APPLIED | \
+ VXLAN_GBP_ID_MASK)
+
/*
* VXLAN Generic Protocol Extension (VXLAN_F_GPE):
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index c9d87cc40c11..1a9559c0cbdd 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -18,25 +18,19 @@ struct xsk_queue;
struct xdp_buff;
struct xdp_umem {
- struct xsk_queue *fq;
- struct xsk_queue *cq;
- struct xsk_buff_pool *pool;
+ void *addrs;
u64 size;
u32 headroom;
u32 chunk_size;
+ u32 chunks;
+ u32 npgs;
struct user_struct *user;
refcount_t users;
- struct work_struct work;
- struct page **pgs;
- u32 npgs;
- u16 queue_id;
- u8 need_wakeup;
u8 flags;
- int id;
- struct net_device *dev;
bool zc;
- spinlock_t xsk_tx_list_lock;
- struct list_head xsk_tx_list;
+ struct page **pgs;
+ int id;
+ struct list_head xsk_dma_list;
};
struct xsk_map {
@@ -48,10 +42,11 @@ struct xsk_map {
struct xdp_sock {
/* struct sock must be the first member of struct xdp_sock */
struct sock sk;
- struct xsk_queue *rx;
+ struct xsk_queue *rx ____cacheline_aligned_in_smp;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
+ struct xsk_buff_pool *pool;
u16 queue_id;
bool zc;
enum {
@@ -59,10 +54,9 @@ struct xdp_sock {
XSK_BOUND,
XSK_UNBOUND,
} state;
- /* Protects multiple processes in the control path */
- struct mutex mutex;
+
struct xsk_queue *tx ____cacheline_aligned_in_smp;
- struct list_head list;
+ struct list_head tx_list;
/* Mutual exclusion of NAPI TX thread and sendmsg error paths
* in the SKB destructor callback.
*/
@@ -77,6 +71,10 @@ struct xdp_sock {
struct list_head map_list;
/* Protects map_list */
spinlock_t map_list_lock;
+ /* Protects multiple processes in the control path */
+ struct mutex mutex;
+ struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */
+ struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
};
#ifdef CONFIG_XDP_SOCKETS
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
index ccf848f7efa4..5b1ee8a9976d 100644
--- a/include/net/xdp_sock_drv.h
+++ b/include/net/xdp_sock_drv.h
@@ -11,47 +11,50 @@
#ifdef CONFIG_XDP_SOCKETS
-void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
-bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
-void xsk_umem_consume_tx_done(struct xdp_umem *umem);
-struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
-void xsk_set_rx_need_wakeup(struct xdp_umem *umem);
-void xsk_set_tx_need_wakeup(struct xdp_umem *umem);
-void xsk_clear_rx_need_wakeup(struct xdp_umem *umem);
-void xsk_clear_tx_need_wakeup(struct xdp_umem *umem);
-bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem);
+void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
+bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
+void xsk_tx_release(struct xsk_buff_pool *pool);
+struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
+ u16 queue_id);
+void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
+void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
+void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
+void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
+bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
-static inline u32 xsk_umem_get_headroom(struct xdp_umem *umem)
+static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
{
- return XDP_PACKET_HEADROOM + umem->headroom;
+ return XDP_PACKET_HEADROOM + pool->headroom;
}
-static inline u32 xsk_umem_get_chunk_size(struct xdp_umem *umem)
+static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
{
- return umem->chunk_size;
+ return pool->chunk_size;
}
-static inline u32 xsk_umem_get_rx_frame_size(struct xdp_umem *umem)
+static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
{
- return xsk_umem_get_chunk_size(umem) - xsk_umem_get_headroom(umem);
+ return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
}
-static inline void xsk_buff_set_rxq_info(struct xdp_umem *umem,
+static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
struct xdp_rxq_info *rxq)
{
- xp_set_rxq_info(umem->pool, rxq);
+ xp_set_rxq_info(pool, rxq);
}
-static inline void xsk_buff_dma_unmap(struct xdp_umem *umem,
+static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
unsigned long attrs)
{
- xp_dma_unmap(umem->pool, attrs);
+ xp_dma_unmap(pool, attrs);
}
-static inline int xsk_buff_dma_map(struct xdp_umem *umem, struct device *dev,
- unsigned long attrs)
+static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
+ struct device *dev, unsigned long attrs)
{
- return xp_dma_map(umem->pool, dev, attrs, umem->pgs, umem->npgs);
+ struct xdp_umem *umem = pool->umem;
+
+ return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
}
static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
@@ -68,14 +71,14 @@ static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
return xp_get_frame_dma(xskb);
}
-static inline struct xdp_buff *xsk_buff_alloc(struct xdp_umem *umem)
+static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
{
- return xp_alloc(umem->pool);
+ return xp_alloc(pool);
}
-static inline bool xsk_buff_can_alloc(struct xdp_umem *umem, u32 count)
+static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
{
- return xp_can_alloc(umem->pool, count);
+ return xp_can_alloc(pool, count);
}
static inline void xsk_buff_free(struct xdp_buff *xdp)
@@ -85,100 +88,104 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
xp_free(xskb);
}
-static inline dma_addr_t xsk_buff_raw_get_dma(struct xdp_umem *umem, u64 addr)
+static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
+ u64 addr)
{
- return xp_raw_get_dma(umem->pool, addr);
+ return xp_raw_get_dma(pool, addr);
}
-static inline void *xsk_buff_raw_get_data(struct xdp_umem *umem, u64 addr)
+static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
{
- return xp_raw_get_data(umem->pool, addr);
+ return xp_raw_get_data(pool, addr);
}
-static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
+static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
{
struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
+ if (!pool->dma_need_sync)
+ return;
+
xp_dma_sync_for_cpu(xskb);
}
-static inline void xsk_buff_raw_dma_sync_for_device(struct xdp_umem *umem,
+static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
dma_addr_t dma,
size_t size)
{
- xp_dma_sync_for_device(umem->pool, dma, size);
+ xp_dma_sync_for_device(pool, dma, size);
}
#else
-static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
+static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
{
}
-static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
- struct xdp_desc *desc)
+static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
+ struct xdp_desc *desc)
{
return false;
}
-static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
+static inline void xsk_tx_release(struct xsk_buff_pool *pool)
{
}
-static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
- u16 queue_id)
+static inline struct xsk_buff_pool *
+xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
{
return NULL;
}
-static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
+static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
{
}
-static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
+static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
{
}
-static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
+static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
{
}
-static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
+static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
{
}
-static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
+static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
{
return false;
}
-static inline u32 xsk_umem_get_headroom(struct xdp_umem *umem)
+static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
{
return 0;
}
-static inline u32 xsk_umem_get_chunk_size(struct xdp_umem *umem)
+static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
{
return 0;
}
-static inline u32 xsk_umem_get_rx_frame_size(struct xdp_umem *umem)
+static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
{
return 0;
}
-static inline void xsk_buff_set_rxq_info(struct xdp_umem *umem,
+static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
struct xdp_rxq_info *rxq)
{
}
-static inline void xsk_buff_dma_unmap(struct xdp_umem *umem,
+static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
unsigned long attrs)
{
}
-static inline int xsk_buff_dma_map(struct xdp_umem *umem, struct device *dev,
- unsigned long attrs)
+static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
+ struct device *dev, unsigned long attrs)
{
return 0;
}
@@ -193,12 +200,12 @@ static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
return 0;
}
-static inline struct xdp_buff *xsk_buff_alloc(struct xdp_umem *umem)
+static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
{
return NULL;
}
-static inline bool xsk_buff_can_alloc(struct xdp_umem *umem, u32 count)
+static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
{
return false;
}
@@ -207,21 +214,22 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
{
}
-static inline dma_addr_t xsk_buff_raw_get_dma(struct xdp_umem *umem, u64 addr)
+static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
+ u64 addr)
{
return 0;
}
-static inline void *xsk_buff_raw_get_data(struct xdp_umem *umem, u64 addr)
+static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
{
return NULL;
}
-static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
+static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
{
}
-static inline void xsk_buff_raw_dma_sync_for_device(struct xdp_umem *umem,
+static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
dma_addr_t dma,
size_t size)
{
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 2737d24ec244..b2a06f10b62c 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1773,21 +1773,17 @@ static inline unsigned int xfrm_replay_state_esn_len(struct xfrm_replay_state_es
static inline int xfrm_replay_clone(struct xfrm_state *x,
struct xfrm_state *orig)
{
- x->replay_esn = kzalloc(xfrm_replay_state_esn_len(orig->replay_esn),
+
+ x->replay_esn = kmemdup(orig->replay_esn,
+ xfrm_replay_state_esn_len(orig->replay_esn),
GFP_KERNEL);
if (!x->replay_esn)
return -ENOMEM;
-
- x->replay_esn->bmp_len = orig->replay_esn->bmp_len;
- x->replay_esn->replay_window = orig->replay_esn->replay_window;
-
- x->preplay_esn = kmemdup(x->replay_esn,
- xfrm_replay_state_esn_len(x->replay_esn),
+ x->preplay_esn = kmemdup(orig->preplay_esn,
+ xfrm_replay_state_esn_len(orig->preplay_esn),
GFP_KERNEL);
- if (!x->preplay_esn) {
- kfree(x->replay_esn);
+ if (!x->preplay_esn)
return -ENOMEM;
- }
return 0;
}
@@ -2000,6 +1996,39 @@ static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
return 0;
}
+extern const int xfrm_msg_min[XFRM_NR_MSGTYPES];
+extern const struct nla_policy xfrma_policy[XFRMA_MAX+1];
+
+struct xfrm_translator {
+ /* Allocate frag_list and put compat translation there */
+ int (*alloc_compat)(struct sk_buff *skb, const struct nlmsghdr *src);
+
+ /* Allocate nlmsg with 64-bit translaton of received 32-bit message */
+ struct nlmsghdr *(*rcv_msg_compat)(const struct nlmsghdr *nlh,
+ int maxtype, const struct nla_policy *policy,
+ struct netlink_ext_ack *extack);
+
+ /* Translate 32-bit user_policy from sockptr */
+ int (*xlate_user_policy_sockptr)(u8 **pdata32, int optlen);
+
+ struct module *owner;
+};
+
+#if IS_ENABLED(CONFIG_XFRM_USER_COMPAT)
+extern int xfrm_register_translator(struct xfrm_translator *xtr);
+extern int xfrm_unregister_translator(struct xfrm_translator *xtr);
+extern struct xfrm_translator *xfrm_get_translator(void);
+extern void xfrm_put_translator(struct xfrm_translator *xtr);
+#else
+static inline struct xfrm_translator *xfrm_get_translator(void)
+{
+ return NULL;
+}
+static inline void xfrm_put_translator(struct xfrm_translator *xtr)
+{
+}
+#endif
+
#if IS_ENABLED(CONFIG_IPV6)
static inline bool xfrm6_local_dontfrag(const struct sock *sk)
{
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index 6842990e2712..0140d086dc84 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -13,6 +13,8 @@ struct xsk_buff_pool;
struct xdp_rxq_info;
struct xsk_queue;
struct xdp_desc;
+struct xdp_umem;
+struct xdp_sock;
struct device;
struct page;
@@ -26,34 +28,68 @@ struct xdp_buff_xsk {
struct list_head free_list_node;
};
+struct xsk_dma_map {
+ dma_addr_t *dma_pages;
+ struct device *dev;
+ struct net_device *netdev;
+ refcount_t users;
+ struct list_head list; /* Protected by the RTNL_LOCK */
+ u32 dma_pages_cnt;
+ bool dma_need_sync;
+};
+
struct xsk_buff_pool {
- struct xsk_queue *fq;
+ /* Members only used in the control path first. */
+ struct device *dev;
+ struct net_device *netdev;
+ struct list_head xsk_tx_list;
+ /* Protects modifications to the xsk_tx_list */
+ spinlock_t xsk_tx_list_lock;
+ refcount_t users;
+ struct xdp_umem *umem;
+ struct work_struct work;
struct list_head free_list;
+ u32 heads_cnt;
+ u16 queue_id;
+
+ /* Data path members as close to free_heads at the end as possible. */
+ struct xsk_queue *fq ____cacheline_aligned_in_smp;
+ struct xsk_queue *cq;
+ /* For performance reasons, each buff pool has its own array of dma_pages
+ * even when they are identical.
+ */
dma_addr_t *dma_pages;
struct xdp_buff_xsk *heads;
u64 chunk_mask;
u64 addrs_cnt;
u32 free_list_cnt;
u32 dma_pages_cnt;
- u32 heads_cnt;
u32 free_heads_cnt;
u32 headroom;
u32 chunk_size;
u32 frame_len;
+ u8 cached_need_wakeup;
+ bool uses_need_wakeup;
bool dma_need_sync;
bool unaligned;
void *addrs;
- struct device *dev;
struct xdp_buff_xsk *free_heads[];
};
/* AF_XDP core. */
-struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks,
- u32 chunk_size, u32 headroom, u64 size,
- bool unaligned);
-void xp_set_fq(struct xsk_buff_pool *pool, struct xsk_queue *fq);
+struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
+ struct xdp_umem *umem);
+int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
+ u16 queue_id, u16 flags);
+int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
+ struct net_device *dev, u16 queue_id);
void xp_destroy(struct xsk_buff_pool *pool);
void xp_release(struct xdp_buff_xsk *xskb);
+void xp_get_pool(struct xsk_buff_pool *pool);
+void xp_put_pool(struct xsk_buff_pool *pool);
+void xp_clear_dev(struct xsk_buff_pool *pool);
+void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
+void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
/* AF_XDP, and XDP core. */
void xp_free(struct xdp_buff_xsk *xskb);
@@ -80,9 +116,6 @@ static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
{
- if (!xskb->pool->dma_need_sync)
- return;
-
xp_dma_sync_for_cpu_slow(xskb);
}