summaryrefslogtreecommitdiff
path: root/include/net
diff options
context:
space:
mode:
Diffstat (limited to 'include/net')
-rw-r--r--include/net/9p/client.h2
-rw-r--r--include/net/act_api.h1
-rw-r--r--include/net/addrconf.h45
-rw-r--r--include/net/af_rxrpc.h56
-rw-r--r--include/net/af_unix.h86
-rw-r--r--include/net/af_vsock.h8
-rw-r--r--include/net/ax25.h16
-rw-r--r--include/net/bluetooth/bluetooth.h22
-rw-r--r--include/net/bluetooth/hci.h233
-rw-r--r--include/net/bluetooth/hci_core.h277
-rw-r--r--include/net/bluetooth/hci_drv.h76
-rw-r--r--include/net/bluetooth/hci_mon.h2
-rw-r--r--include/net/bluetooth/hci_sock.h2
-rw-r--r--include/net/bluetooth/hci_sync.h36
-rw-r--r--include/net/bluetooth/l2cap.h49
-rw-r--r--include/net/bluetooth/mgmt.h11
-rw-r--r--include/net/bluetooth/rfcomm.h2
-rw-r--r--include/net/bond_3ad.h5
-rw-r--r--include/net/bond_alb.h2
-rw-r--r--include/net/bond_options.h2
-rw-r--r--include/net/bonding.h3
-rw-r--r--include/net/busy_poll.h36
-rw-r--r--include/net/caif/caif_layer.h6
-rw-r--r--include/net/caif/cfpkt.h2
-rw-r--r--include/net/caif/cfsrvl.h1
-rw-r--r--include/net/calipso.h2
-rw-r--r--include/net/cfg80211.h676
-rw-r--r--include/net/checksum.h18
-rw-r--r--include/net/cipso_ipv4.h8
-rw-r--r--include/net/devlink.h73
-rw-r--r--include/net/dropreason-core.h173
-rw-r--r--include/net/dropreason.h6
-rw-r--r--include/net/dsa.h90
-rw-r--r--include/net/dscp.h76
-rw-r--r--include/net/dst.h13
-rw-r--r--include/net/dst_cache.h4
-rw-r--r--include/net/dst_metadata.h10
-rw-r--r--include/net/dst_ops.h2
-rw-r--r--include/net/eee.h5
-rw-r--r--include/net/erspan.h4
-rw-r--r--include/net/espintcp.h2
-rw-r--r--include/net/fib_notifier.h2
-rw-r--r--include/net/fib_rules.h31
-rw-r--r--include/net/flow.h2
-rw-r--r--include/net/flow_dissector.h25
-rw-r--r--include/net/flow_offload.h93
-rw-r--r--include/net/genetlink.h27
-rw-r--r--include/net/gre.h70
-rw-r--r--include/net/gro.h132
-rw-r--r--include/net/gtp.h5
-rw-r--r--include/net/hotdata.h4
-rw-r--r--include/net/hwbm.h4
-rw-r--r--include/net/ieee80211_radiotap.h46
-rw-r--r--include/net/ieee8021q.h57
-rw-r--r--include/net/inet6_connection_sock.h2
-rw-r--r--include/net/inet6_hashtables.h16
-rw-r--r--include/net/inet_common.h4
-rw-r--r--include/net/inet_connection_sock.h53
-rw-r--r--include/net/inet_frag.h10
-rw-r--r--include/net/inet_hashtables.h28
-rw-r--r--include/net/inet_sock.h23
-rw-r--r--include/net/inet_timewait_sock.h21
-rw-r--r--include/net/inetpeer.h12
-rw-r--r--include/net/ip.h63
-rw-r--r--include/net/ip6_fib.h17
-rw-r--r--include/net/ip6_route.h31
-rw-r--r--include/net/ip6_tunnel.h4
-rw-r--r--include/net/ip_fib.h60
-rw-r--r--include/net/ip_tunnels.h184
-rw-r--r--include/net/ipcomp.h13
-rw-r--r--include/net/ipv6.h40
-rw-r--r--include/net/ipv6_frag.h5
-rw-r--r--include/net/ipv6_stubs.h5
-rw-r--r--include/net/iucv/iucv.h39
-rw-r--r--include/net/iw_handler.h53
-rw-r--r--include/net/kcm.h1
-rw-r--r--include/net/l3mdev.h31
-rw-r--r--include/net/lapb.h2
-rw-r--r--include/net/lib80211.h122
-rw-r--r--include/net/libeth/cache.h66
-rw-r--r--include/net/libeth/rx.h308
-rw-r--r--include/net/libeth/tx.h129
-rw-r--r--include/net/libeth/types.h25
-rw-r--r--include/net/llc_c_st.h4
-rw-r--r--include/net/llc_pdu.h2
-rw-r--r--include/net/llc_s_st.h4
-rw-r--r--include/net/lwtunnel.h1
-rw-r--r--include/net/mac80211.h413
-rw-r--r--include/net/mac802154.h6
-rw-r--r--include/net/macsec.h4
-rw-r--r--include/net/mana/gdma.h84
-rw-r--r--include/net/mana/hw_channel.h9
-rw-r--r--include/net/mana/mana.h58
-rw-r--r--include/net/mctp.h22
-rw-r--r--include/net/mctpdevice.h4
-rw-r--r--include/net/mptcp.h31
-rw-r--r--include/net/ndisc.h17
-rw-r--r--include/net/neighbour.h33
-rw-r--r--include/net/neighbour_tables.h12
-rw-r--r--include/net/net_debug.h6
-rw-r--r--include/net/net_namespace.h36
-rw-r--r--include/net/net_shaper.h120
-rw-r--r--include/net/netdev_lock.h138
-rw-r--r--include/net/netdev_netlink.h12
-rw-r--r--include/net/netdev_queues.h106
-rw-r--r--include/net/netdev_rx_queue.h14
-rw-r--r--include/net/netfilter/nf_conntrack.h18
-rw-r--r--include/net/netfilter/nf_conntrack_count.h6
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h12
-rw-r--r--include/net/netfilter/nf_flow_table.h16
-rw-r--r--include/net/netfilter/nf_hooks_lwtunnel.h2
-rw-r--r--include/net/netfilter/nf_tables.h320
-rw-r--r--include/net/netfilter/nf_tables_core.h1
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h10
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h5
-rw-r--r--include/net/netfilter/nf_tproxy.h5
-rw-r--r--include/net/netfilter/nft_fib.h34
-rw-r--r--include/net/netfilter/nft_meta.h3
-rw-r--r--include/net/netfilter/nft_reject.h3
-rw-r--r--include/net/netlabel.h33
-rw-r--r--include/net/netlink.h406
-rw-r--r--include/net/netmem.h272
-rw-r--r--include/net/netns/core.h1
-rw-r--r--include/net/netns/ipv4.h39
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/netns/netfilter.h3
-rw-r--r--include/net/netns/sctp.h4
-rw-r--r--include/net/netns/xfrm.h3
-rw-r--r--include/net/nexthop.h8
-rw-r--r--include/net/nfc/nci.h4
-rw-r--r--include/net/nfc/nci_core.h4
-rw-r--r--include/net/nfc/nfc.h12
-rw-r--r--include/net/nl802154.h2
-rw-r--r--include/net/p8022.h16
-rw-r--r--include/net/page_pool/helpers.h193
-rw-r--r--include/net/page_pool/memory_provider.h51
-rw-r--r--include/net/page_pool/types.h88
-rw-r--r--include/net/pfcp.h90
-rw-r--r--include/net/phonet/pn_dev.h8
-rw-r--r--include/net/pkt_cls.h25
-rw-r--r--include/net/proto_memory.h83
-rw-r--r--include/net/psample.h13
-rw-r--r--include/net/red.h20
-rw-r--r--include/net/regulatory.h4
-rw-r--r--include/net/request_sock.h52
-rw-r--r--include/net/route.h118
-rw-r--r--include/net/rps.h59
-rw-r--r--include/net/rstreason.h221
-rw-r--r--include/net/rtnetlink.h92
-rw-r--r--include/net/sch_generic.h38
-rw-r--r--include/net/scm.h131
-rw-r--r--include/net/sctp/checksum.h32
-rw-r--r--include/net/sctp/sctp.h6
-rw-r--r--include/net/sctp/sm.h1
-rw-r--r--include/net/sctp/stream_sched.h8
-rw-r--r--include/net/sctp/structs.h25
-rw-r--r--include/net/secure_seq.h4
-rw-r--r--include/net/seg6.h7
-rw-r--r--include/net/seg6_hmac.h7
-rw-r--r--include/net/seg6_local.h1
-rw-r--r--include/net/smc.h24
-rw-r--r--include/net/snmp.h5
-rw-r--r--include/net/sock.h342
-rw-r--r--include/net/sock_reuseport.h2
-rw-r--r--include/net/strparser.h4
-rw-r--r--include/net/tcp.h356
-rw-r--r--include/net/tcp_ao.h52
-rw-r--r--include/net/tcx.h18
-rw-r--r--include/net/timewait_sock.h9
-rw-r--r--include/net/tls.h17
-rw-r--r--include/net/udp.h163
-rw-r--r--include/net/udp_tunnel.h19
-rw-r--r--include/net/vxlan.h6
-rw-r--r--include/net/x25.h2
-rw-r--r--include/net/xdp.h199
-rw-r--r--include/net/xdp_sock.h27
-rw-r--r--include/net/xdp_sock_drv.h108
-rw-r--r--include/net/xfrm.h239
-rw-r--r--include/net/xsk_buff_pool.h51
179 files changed, 6899 insertions, 2508 deletions
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index 78ebcf782ce5..4f785098c67a 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -207,6 +207,8 @@ int p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err
int p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
int *err);
int p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err);
+struct netfs_io_subrequest;
+void p9_client_write_subreq(struct netfs_io_subrequest *subreq);
int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset);
int p9dirent_read(struct p9_client *clnt, char *buf, int len,
struct p9_dirent *dirent);
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 77ee0c657e2c..404df8557f6a 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -219,7 +219,6 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind,
int ref, bool terse);
int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
-int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
static inline void tcf_action_update_bstats(struct tc_action *a,
struct sk_buff *skb)
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 62a407db1bf5..9e5e95988b9e 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -37,10 +37,14 @@ struct prefix_info {
struct __packed {
#if defined(__BIG_ENDIAN_BITFIELD)
__u8 onlink : 1,
- autoconf : 1,
- reserved : 6;
+ autoconf : 1,
+ routeraddr : 1,
+ preferpd : 1,
+ reserved : 4;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
- __u8 reserved : 6,
+ __u8 reserved : 4,
+ preferpd : 1,
+ routeraddr : 1,
autoconf : 1,
onlink : 1;
#else
@@ -84,6 +88,23 @@ struct ifa6_config {
u16 scope;
};
+enum addr_type_t {
+ UNICAST_ADDR,
+ MULTICAST_ADDR,
+ ANYCAST_ADDR,
+};
+
+struct inet6_fill_args {
+ u32 portid;
+ u32 seq;
+ int event;
+ unsigned int flags;
+ int netnsid;
+ int ifindex;
+ enum addr_type_t type;
+ bool force_rt_scope_universe;
+};
+
int addrconf_init(void);
void addrconf_cleanup(void);
@@ -183,10 +204,12 @@ static inline int addrconf_ifid_eui48(u8 *eui, struct net_device *dev)
return 0;
}
+#define INFINITY_LIFE_TIME 0xFFFFFFFF
+
static inline unsigned long addrconf_timeout_fixup(u32 timeout,
unsigned int unit)
{
- if (timeout == 0xffffffff)
+ if (timeout == INFINITY_LIFE_TIME)
return ~0UL;
/*
@@ -324,10 +347,15 @@ static inline struct inet6_dev *__in6_dev_get(const struct net_device *dev)
return rcu_dereference_rtnl(dev->ip6_ptr);
}
+static inline struct inet6_dev *__in6_dev_get_rtnl_net(const struct net_device *dev)
+{
+ return rtnl_net_dereference(dev_net(dev), dev->ip6_ptr);
+}
+
/**
* __in6_dev_stats_get - get inet6_dev pointer for stats
* @dev: network device
- * @skb: skb for original incoming interface if neeeded
+ * @skb: skb for original incoming interface if needed
*
* Caller must hold rcu_read_lock or RTNL, because this function
* does not take a reference on the inet6_dev.
@@ -519,4 +547,11 @@ int if6_proc_init(void);
void if6_proc_exit(void);
#endif
+int inet6_fill_ifmcaddr(struct sk_buff *skb,
+ const struct ifmcaddr6 *ifmca,
+ struct inet6_fill_args *args);
+
+int inet6_fill_ifacaddr(struct sk_buff *skb,
+ const struct ifacaddr6 *ifaca,
+ struct inet6_fill_args *args);
#endif
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 0754c463224a..0fb4c41c9bbf 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -16,6 +16,7 @@ struct sock;
struct socket;
struct rxrpc_call;
struct rxrpc_peer;
+struct krb5_buffer;
enum rxrpc_abort_reason;
enum rxrpc_interruptibility {
@@ -24,23 +25,33 @@ enum rxrpc_interruptibility {
RXRPC_UNINTERRUPTIBLE, /* Call should not be interruptible at all */
};
+enum rxrpc_oob_type {
+ RXRPC_OOB_CHALLENGE, /* Security challenge for a connection */
+};
+
/*
* Debug ID counter for tracing.
*/
extern atomic_t rxrpc_debug_id;
+/*
+ * Operations table for rxrpc to call out to a kernel application (e.g. kAFS).
+ */
+struct rxrpc_kernel_ops {
+ void (*notify_new_call)(struct sock *sk, struct rxrpc_call *call,
+ unsigned long user_call_ID);
+ void (*discard_new_call)(struct rxrpc_call *call, unsigned long user_call_ID);
+ void (*user_attach_call)(struct rxrpc_call *call, unsigned long user_call_ID);
+ void (*notify_oob)(struct sock *sk, struct sk_buff *oob);
+};
+
typedef void (*rxrpc_notify_rx_t)(struct sock *, struct rxrpc_call *,
unsigned long);
typedef void (*rxrpc_notify_end_tx_t)(struct sock *, struct rxrpc_call *,
unsigned long);
-typedef void (*rxrpc_notify_new_call_t)(struct sock *, struct rxrpc_call *,
- unsigned long);
-typedef void (*rxrpc_discard_new_call_t)(struct rxrpc_call *, unsigned long);
-typedef void (*rxrpc_user_attach_call_t)(struct rxrpc_call *, unsigned long);
-void rxrpc_kernel_new_call_notification(struct socket *,
- rxrpc_notify_new_call_t,
- rxrpc_discard_new_call_t);
+void rxrpc_kernel_set_notifications(struct socket *sock,
+ const struct rxrpc_kernel_ops *app_ops);
struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
struct rxrpc_peer *peer,
struct key *key,
@@ -69,17 +80,36 @@ struct rxrpc_peer *rxrpc_kernel_get_peer(struct rxrpc_peer *peer);
struct rxrpc_peer *rxrpc_kernel_get_call_peer(struct socket *sock, struct rxrpc_call *call);
const struct sockaddr_rxrpc *rxrpc_kernel_remote_srx(const struct rxrpc_peer *peer);
const struct sockaddr *rxrpc_kernel_remote_addr(const struct rxrpc_peer *peer);
+unsigned long rxrpc_kernel_set_peer_data(struct rxrpc_peer *peer, unsigned long app_data);
+unsigned long rxrpc_kernel_get_peer_data(const struct rxrpc_peer *peer);
unsigned int rxrpc_kernel_get_srtt(const struct rxrpc_peer *);
-int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
- rxrpc_user_attach_call_t, unsigned long, gfp_t,
- unsigned int);
+int rxrpc_kernel_charge_accept(struct socket *sock, rxrpc_notify_rx_t notify_rx,
+ unsigned long user_call_ID, gfp_t gfp,
+ unsigned int debug_id);
void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
-u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
-void rxrpc_kernel_set_max_life(struct socket *, struct rxrpc_call *,
- unsigned long);
int rxrpc_sock_set_min_security_level(struct sock *sk, unsigned int val);
int rxrpc_sock_set_security_keyring(struct sock *, struct key *);
+int rxrpc_sock_set_manage_response(struct sock *sk, bool set);
+
+enum rxrpc_oob_type rxrpc_kernel_query_oob(struct sk_buff *oob,
+ struct rxrpc_peer **_peer,
+ unsigned long *_peer_appdata);
+struct sk_buff *rxrpc_kernel_dequeue_oob(struct socket *sock,
+ enum rxrpc_oob_type *_type);
+void rxrpc_kernel_free_oob(struct sk_buff *oob);
+void rxrpc_kernel_query_challenge(struct sk_buff *challenge,
+ struct rxrpc_peer **_peer,
+ unsigned long *_peer_appdata,
+ u16 *_service_id, u8 *_security_index);
+int rxrpc_kernel_reject_challenge(struct sk_buff *challenge, u32 abort_code,
+ int error, enum rxrpc_abort_reason why);
+int rxkad_kernel_respond_to_challenge(struct sk_buff *challenge);
+u32 rxgk_kernel_query_challenge(struct sk_buff *challenge);
+int rxgk_kernel_respond_to_challenge(struct sk_buff *challenge,
+ struct krb5_buffer *appdata);
+u8 rxrpc_kernel_query_call_security(struct rxrpc_call *call,
+ u16 *_service_id, u32 *_enctype);
#endif /* _NET_RXRPC_H */
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 3dee0b2721aa..1af1841b7601 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -2,11 +2,15 @@
#ifndef __LINUX_NET_AFUNIX_H
#define __LINUX_NET_AFUNIX_H
-#include <linux/socket.h>
-#include <linux/un.h>
+#include <linux/atomic.h>
#include <linux/mutex.h>
+#include <linux/net.h>
+#include <linux/path.h>
#include <linux/refcount.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
#include <net/sock.h>
+#include <uapi/linux/un.h>
#if IS_ENABLED(CONFIG_UNIX)
struct unix_sock *unix_get_socket(struct file *filp);
@@ -17,43 +21,17 @@ static inline struct unix_sock *unix_get_socket(struct file *filp)
}
#endif
-extern spinlock_t unix_gc_lock;
-extern unsigned int unix_tot_inflight;
-
-void unix_inflight(struct user_struct *user, struct file *fp);
-void unix_notinflight(struct user_struct *user, struct file *fp);
-void unix_gc(void);
-void wait_for_unix_gc(struct scm_fp_list *fpl);
-
-struct sock *unix_peer_get(struct sock *sk);
-
-#define UNIX_HASH_MOD (256 - 1)
-#define UNIX_HASH_SIZE (256 * 2)
-#define UNIX_HASH_BITS 8
-
struct unix_address {
refcount_t refcnt;
int len;
struct sockaddr_un name[];
};
-struct unix_skb_parms {
- struct pid *pid; /* Skb credentials */
- kuid_t uid;
- kgid_t gid;
- struct scm_fp_list *fp; /* Passed files */
-#ifdef CONFIG_SECURITY_NETWORK
- u32 secid; /* Security ID */
-#endif
- u32 consumed;
-} __randomize_layout;
-
struct scm_stat {
atomic_t nr_fds;
+ unsigned long nr_unix_fds;
};
-#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
-
/* The AF_UNIX socket */
struct unix_sock {
/* WARNING: sk has to be the first member */
@@ -62,13 +40,11 @@ struct unix_sock {
struct path path;
struct mutex iolock, bindlock;
struct sock *peer;
- struct list_head link;
- unsigned long inflight;
+ struct sock *listener;
+ struct unix_vertex *vertex;
spinlock_t lock;
- unsigned long gc_flags;
-#define UNIX_GC_CANDIDATE 0
-#define UNIX_GC_MAYBE_CYCLE 1
struct socket_wq peer_wq;
+#define peer_wait peer_wq.wait
wait_queue_entry_t peer_wake;
struct scm_stat scm_stat;
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
@@ -81,47 +57,5 @@ struct unix_sock {
#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock)
-enum unix_socket_lock_class {
- U_LOCK_NORMAL,
- U_LOCK_SECOND, /* for double locking, see unix_state_double_lock(). */
- U_LOCK_DIAG, /* used while dumping icons, see sk_diag_dump_icons(). */
- U_LOCK_GC_LISTENER, /* used for listening socket while determining gc
- * candidates to close a small race window.
- */
-};
-static inline void unix_state_lock_nested(struct sock *sk,
- enum unix_socket_lock_class subclass)
-{
- spin_lock_nested(&unix_sk(sk)->lock, subclass);
-}
-
-#define peer_wait peer_wq.wait
-
-long unix_inq_len(struct sock *sk);
-long unix_outq_len(struct sock *sk);
-
-int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
- int flags);
-int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
- int flags);
-#ifdef CONFIG_SYSCTL
-int unix_sysctl_register(struct net *net);
-void unix_sysctl_unregister(struct net *net);
-#else
-static inline int unix_sysctl_register(struct net *net) { return 0; }
-static inline void unix_sysctl_unregister(struct net *net) {}
-#endif
-
-#ifdef CONFIG_BPF_SYSCALL
-extern struct proto unix_dgram_proto;
-extern struct proto unix_stream_proto;
-
-int unix_dgram_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
-int unix_stream_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
-void __init unix_bpf_build_proto(void);
-#else
-static inline void __init unix_bpf_build_proto(void)
-{}
-#endif
#endif
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 535701efc1e5..d56e6e135158 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -169,6 +169,9 @@ struct vsock_transport {
void (*notify_buffer_size)(struct vsock_sock *, u64 *);
int (*notify_set_rcvlowat)(struct vsock_sock *vsk, int val);
+ /* SIOCOUTQ ioctl */
+ ssize_t (*unsent_bytes)(struct vsock_sock *vsk);
+
/* Shutdown. */
int (*shutdown)(struct vsock_sock *, int);
@@ -218,6 +221,7 @@ void vsock_for_each_connected_socket(struct vsock_transport *transport,
void (*fn)(struct sock *sk));
int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk);
bool vsock_find_cid(unsigned int cid);
+void vsock_linger(struct sock *sk);
/**** TAP ****/
@@ -230,8 +234,12 @@ struct vsock_tap {
int vsock_add_tap(struct vsock_tap *vt);
int vsock_remove_tap(struct vsock_tap *vt);
void vsock_deliver_tap(struct sk_buff *build_skb(void *opaque), void *opaque);
+int __vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags);
int vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags);
+int __vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t len, int flags);
int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags);
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 0d939e5aee4e..a7bba42dde15 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -139,7 +139,9 @@ enum {
AX25_VALUES_N2, /* Default N2 value */
AX25_VALUES_PACLEN, /* AX.25 MTU */
AX25_VALUES_PROTOCOL, /* Std AX.25, DAMA Slave, DAMA Master */
+#ifdef CONFIG_AX25_DAMA_SLAVE
AX25_VALUES_DS_TIMEOUT, /* DAMA Slave timeout */
+#endif
AX25_MAX_VALUES /* THIS MUST REMAIN THE LAST ENTRY OF THIS LIST */
};
@@ -216,7 +218,7 @@ typedef struct {
struct ctl_table;
typedef struct ax25_dev {
- struct ax25_dev *next;
+ struct list_head list;
struct net_device *dev;
netdevice_tracker dev_tracker;
@@ -229,6 +231,7 @@ typedef struct ax25_dev {
#endif
refcount_t refcount;
bool device_up;
+ struct rcu_head rcu;
} ax25_dev;
typedef struct ax25_cb {
@@ -288,9 +291,8 @@ static inline void ax25_dev_hold(ax25_dev *ax25_dev)
static inline void ax25_dev_put(ax25_dev *ax25_dev)
{
- if (refcount_dec_and_test(&ax25_dev->refcount)) {
- kfree(ax25_dev);
- }
+ if (refcount_dec_and_test(&ax25_dev->refcount))
+ kfree_rcu(ax25_dev, rcu);
}
static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev)
{
@@ -330,13 +332,12 @@ int ax25_addr_size(const ax25_digi *);
void ax25_digi_invert(const ax25_digi *, ax25_digi *);
/* ax25_dev.c */
-extern ax25_dev *ax25_dev_list;
extern spinlock_t ax25_dev_lock;
#if IS_ENABLED(CONFIG_AX25)
-static inline ax25_dev *ax25_dev_ax25dev(struct net_device *dev)
+static inline ax25_dev *ax25_dev_ax25dev(const struct net_device *dev)
{
- return dev->ax25_ptr;
+ return rcu_dereference_rtnl(dev->ax25_ptr);
}
#endif
@@ -417,7 +418,6 @@ void ax25_rt_device_down(struct net_device *);
int ax25_rt_ioctl(unsigned int, void __user *);
extern const struct seq_operations ax25_rt_seqops;
ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev);
-int ax25_rt_autobind(ax25_cb *, ax25_address *);
struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *,
ax25_address *, ax25_digi *);
void ax25_rt_free(void);
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index eaec5d6caa29..114299bd8b98 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -29,6 +29,7 @@
#include <linux/poll.h>
#include <net/sock.h>
#include <linux/seq_file.h>
+#include <linux/ethtool.h>
#define BT_SUBSYS_VERSION 2
#define BT_SUBSYS_REVISION 22
@@ -123,6 +124,7 @@ struct bt_voice {
#define BT_VOICE_TRANSPARENT 0x0003
#define BT_VOICE_CVSD_16BIT 0x0060
+#define BT_VOICE_TRANSPARENT_16BIT 0x0063
#define BT_SNDMTU 12
#define BT_RCVMTU 13
@@ -155,6 +157,7 @@ struct bt_voice {
#define BT_PKT_STATUS 16
#define BT_SCM_PKT_STATUS 0x03
+#define BT_SCM_ERROR 0x04
#define BT_ISO_QOS 17
@@ -285,7 +288,7 @@ void bt_err_ratelimited(const char *fmt, ...);
bt_err_ratelimited("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
/* Connection and socket states */
-enum {
+enum bt_sock_state {
BT_CONNECTED = 1, /* Equal to TCP_ESTABLISHED to make net code happy */
BT_OPEN,
BT_BOUND,
@@ -403,6 +406,7 @@ int bt_sock_register(int proto, const struct net_proto_family *ops);
void bt_sock_unregister(int proto);
void bt_sock_link(struct bt_sock_list *l, struct sock *s);
void bt_sock_unlink(struct bt_sock_list *l, struct sock *s);
+bool bt_sock_linked(struct bt_sock_list *l, struct sock *s);
struct sock *bt_sock_alloc(struct net *net, struct socket *sock,
struct proto *prot, int proto, gfp_t prio, int kern);
int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
@@ -441,6 +445,13 @@ typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode);
typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status,
u16 opcode, struct sk_buff *skb);
+void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
+ hci_req_complete_t *req_complete,
+ hci_req_complete_skb_t *req_complete_skb);
+
+int hci_ethtool_ts_info(unsigned int index, int sk_proto,
+ struct kernel_ethtool_ts_info *ts_info);
+
#define HCI_REQ_START BIT(0)
#define HCI_REQ_SKB BIT(1)
@@ -585,15 +596,6 @@ static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk,
return skb;
}
-static inline int bt_copy_from_sockptr(void *dst, size_t dst_size,
- sockptr_t src, size_t src_size)
-{
- if (dst_size > src_size)
- return -EINVAL;
-
- return copy_from_sockptr(dst, src, dst_size);
-}
-
int bt_to_errno(u16 code);
__u8 bt_status(int err);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 5c12761cbc0e..82cbd54443ac 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -1,7 +1,7 @@
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
- Copyright 2023 NXP
+ Copyright 2023-2024 NXP
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -29,13 +29,11 @@
#define HCI_MAX_ACL_SIZE 1024
#define HCI_MAX_SCO_SIZE 255
#define HCI_MAX_ISO_SIZE 251
+#define HCI_MAX_ISO_BIS 31
#define HCI_MAX_EVENT_SIZE 260
#define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4)
#define HCI_LINK_KEY_SIZE 16
-#define HCI_AMP_LINK_KEY_SIZE (2 * HCI_LINK_KEY_SIZE)
-
-#define HCI_MAX_AMP_ASSOC_SIZE 672
#define HCI_MAX_CPB_DATA_SIZE 252
@@ -70,26 +68,7 @@
#define HCI_I2C 8
#define HCI_SMD 9
#define HCI_VIRTIO 10
-
-/* HCI controller types */
-#define HCI_PRIMARY 0x00
-#define HCI_AMP 0x01
-
-/* First BR/EDR Controller shall have ID = 0 */
-#define AMP_ID_BREDR 0x00
-
-/* AMP controller types */
-#define AMP_TYPE_BREDR 0x00
-#define AMP_TYPE_80211 0x01
-
-/* AMP controller status */
-#define AMP_STATUS_POWERED_DOWN 0x00
-#define AMP_STATUS_BLUETOOTH_ONLY 0x01
-#define AMP_STATUS_NO_CAPACITY 0x02
-#define AMP_STATUS_LOW_CAPACITY 0x03
-#define AMP_STATUS_MEDIUM_CAPACITY 0x04
-#define AMP_STATUS_HIGH_CAPACITY 0x05
-#define AMP_STATUS_FULL_CAPACITY 0x06
+#define HCI_IPC 11
/* HCI device quirks */
enum {
@@ -229,14 +208,24 @@ enum {
*/
HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
- /* When this quirk is set, the controller has validated that
- * LE states reported through the HCI_LE_READ_SUPPORTED_STATES are
- * valid. This mechanism is necessary as many controllers have
- * been seen has having trouble initiating a connectable
- * advertisement despite the state combination being reported as
- * supported.
+ /* When this quirk is set consider Sync Flow Control as supported by
+ * the driver.
+ *
+ * This quirk must be set before hci_register_dev is called.
*/
- HCI_QUIRK_VALID_LE_STATES,
+ HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED,
+
+ /* When this quirk is set, the LE states reported through the
+ * HCI_LE_READ_SUPPORTED_STATES are invalid/broken.
+ *
+ * This mechanism is necessary as many controllers have been seen has
+ * having trouble initiating a connectable advertisement despite the
+ * state combination being reported as supported.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_BROKEN_LE_STATES,
/* When this quirk is set, then erroneous data reporting
* is ignored. This is mainly due to the fact that the HCI
@@ -320,6 +309,20 @@ enum {
*/
HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT,
+ /*
+ * When this quirk is set, the HCI_OP_LE_EXT_CREATE_CONN command is
+ * disabled. This is required for the Actions Semiconductor ATS2851
+ * based controllers, which erroneously claims to support it.
+ */
+ HCI_QUIRK_BROKEN_EXT_CREATE_CONN,
+
+ /*
+ * When this quirk is set, the command WRITE_AUTH_PAYLOAD_TIMEOUT is
+ * skipped. This is required for the Actions Semiconductor ATS2851
+ * based controllers, due to a race condition in pairing process.
+ */
+ HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT,
+
/* When this quirk is set, MSFT extension monitor tracking by
* address filter is supported. Since tracking quantity of each
* pattern is limited, this feature supports tracking multiple
@@ -347,6 +350,33 @@ enum {
* claim to support it.
*/
HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE,
+
+ /*
+ * When this quirk is set, the reserved bits of Primary/Secondary_PHY
+ * inside the LE Extended Advertising Report events are discarded.
+ * This is required for some Apple/Broadcom controllers which
+ * abuse these reserved bits for unrelated flags.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY,
+
+ /* When this quirk is set, the HCI_OP_READ_VOICE_SETTING command is
+ * skipped. This is required for a subset of the CSR controller clones
+ * which erroneously claim to support it.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_BROKEN_READ_VOICE_SETTING,
+
+ /* When this quirk is set, the HCI_OP_READ_PAGE_SCAN_TYPE command is
+ * skipped. This is required for a subset of the CSR controller clones
+ * which erroneously claim to support it.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE,
};
/* HCI device flags */
@@ -425,13 +455,13 @@ enum {
HCI_WIDEBAND_SPEECH_ENABLED,
HCI_EVENT_FILTER_CONFIGURED,
HCI_PA_SYNC,
+ HCI_SCO_FLOWCTL,
HCI_DUT_MODE,
HCI_VENDOR_DIAG,
HCI_FORCE_BREDR_SMP,
HCI_FORCE_STATIC_ADDR,
HCI_LL_RPA_RESOLUTION,
- HCI_ENABLE_LL_PRIVACY,
HCI_CMD_PENDING,
HCI_FORCE_NO_MITM,
HCI_QUALITY_REPORT,
@@ -456,7 +486,6 @@ enum {
#define HCI_AUTO_OFF_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
#define HCI_ACL_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */
#define HCI_LE_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */
-#define HCI_LE_AUTOCONN_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */
/* HCI data types */
#define HCI_COMMAND_PKT 0x01
@@ -465,6 +494,7 @@ enum {
#define HCI_EVENT_PKT 0x04
#define HCI_ISODATA_PKT 0x05
#define HCI_DIAG_PKT 0xf0
+#define HCI_DRV_PKT 0xf1
#define HCI_VENDOR_PKT 0xff
/* HCI packet types */
@@ -528,8 +558,8 @@ enum {
#define ESCO_LINK 0x02
/* Low Energy links do not have defined link type. Use invented one */
#define LE_LINK 0x80
-#define AMP_LINK 0x81
-#define ISO_LINK 0x82
+#define CIS_LINK 0x82
+#define BIS_LINK 0x83
#define INVALID_LINK 0xff
/* LMP features */
@@ -679,7 +709,7 @@ enum {
#define HCI_ERROR_REMOTE_POWER_OFF 0x15
#define HCI_ERROR_LOCAL_HOST_TERM 0x16
#define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18
-#define HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE 0x1e
+#define HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE 0x1a
#define HCI_ERROR_INVALID_LL_PARAMS 0x1e
#define HCI_ERROR_UNSPECIFIED 0x1f
#define HCI_ERROR_ADVERTISING_TIMEOUT 0x3c
@@ -694,6 +724,7 @@ enum {
#define HCI_RSSI_INVALID 127
#define HCI_SYNC_HANDLE_INVALID 0xffff
+#define HCI_SID_INVALID 0xff
#define HCI_ROLE_MASTER 0x00
#define HCI_ROLE_SLAVE 0x01
@@ -850,6 +881,11 @@ struct hci_cp_remote_name_req_cancel {
bdaddr_t bdaddr;
} __packed;
+struct hci_rp_remote_name_req_cancel {
+ __u8 status;
+ bdaddr_t bdaddr;
+} __packed;
+
#define HCI_OP_READ_REMOTE_FEATURES 0x041b
struct hci_cp_read_remote_features {
__le16 handle;
@@ -944,56 +980,6 @@ struct hci_cp_io_capability_neg_reply {
__u8 reason;
} __packed;
-#define HCI_OP_CREATE_PHY_LINK 0x0435
-struct hci_cp_create_phy_link {
- __u8 phy_handle;
- __u8 key_len;
- __u8 key_type;
- __u8 key[HCI_AMP_LINK_KEY_SIZE];
-} __packed;
-
-#define HCI_OP_ACCEPT_PHY_LINK 0x0436
-struct hci_cp_accept_phy_link {
- __u8 phy_handle;
- __u8 key_len;
- __u8 key_type;
- __u8 key[HCI_AMP_LINK_KEY_SIZE];
-} __packed;
-
-#define HCI_OP_DISCONN_PHY_LINK 0x0437
-struct hci_cp_disconn_phy_link {
- __u8 phy_handle;
- __u8 reason;
-} __packed;
-
-struct ext_flow_spec {
- __u8 id;
- __u8 stype;
- __le16 msdu;
- __le32 sdu_itime;
- __le32 acc_lat;
- __le32 flush_to;
-} __packed;
-
-#define HCI_OP_CREATE_LOGICAL_LINK 0x0438
-#define HCI_OP_ACCEPT_LOGICAL_LINK 0x0439
-struct hci_cp_create_accept_logical_link {
- __u8 phy_handle;
- struct ext_flow_spec tx_flow_spec;
- struct ext_flow_spec rx_flow_spec;
-} __packed;
-
-#define HCI_OP_DISCONN_LOGICAL_LINK 0x043a
-struct hci_cp_disconn_logical_link {
- __le16 log_handle;
-} __packed;
-
-#define HCI_OP_LOGICAL_LINK_CANCEL 0x043b
-struct hci_cp_logical_link_cancel {
- __u8 phy_handle;
- __u8 flow_spec_id;
-} __packed;
-
#define HCI_OP_ENHANCED_SETUP_SYNC_CONN 0x043d
struct hci_coding_format {
__u8 id;
@@ -1573,6 +1559,11 @@ struct hci_rp_read_tx_power {
__s8 tx_power;
} __packed;
+#define HCI_OP_WRITE_SYNC_FLOWCTL 0x0c2f
+struct hci_cp_write_sync_flowctl {
+ __u8 enable;
+} __packed;
+
#define HCI_OP_READ_PAGE_SCAN_TYPE 0x0c46
struct hci_rp_read_page_scan_type {
__u8 status;
@@ -1615,46 +1606,6 @@ struct hci_rp_read_enc_key_size {
__u8 key_size;
} __packed;
-#define HCI_OP_READ_LOCAL_AMP_INFO 0x1409
-struct hci_rp_read_local_amp_info {
- __u8 status;
- __u8 amp_status;
- __le32 total_bw;
- __le32 max_bw;
- __le32 min_latency;
- __le32 max_pdu;
- __u8 amp_type;
- __le16 pal_cap;
- __le16 max_assoc_size;
- __le32 max_flush_to;
- __le32 be_flush_to;
-} __packed;
-
-#define HCI_OP_READ_LOCAL_AMP_ASSOC 0x140a
-struct hci_cp_read_local_amp_assoc {
- __u8 phy_handle;
- __le16 len_so_far;
- __le16 max_len;
-} __packed;
-struct hci_rp_read_local_amp_assoc {
- __u8 status;
- __u8 phy_handle;
- __le16 rem_len;
- __u8 frag[];
-} __packed;
-
-#define HCI_OP_WRITE_REMOTE_AMP_ASSOC 0x140b
-struct hci_cp_write_remote_amp_assoc {
- __u8 phy_handle;
- __le16 len_so_far;
- __le16 rem_len;
- __u8 frag[];
-} __packed;
-struct hci_rp_write_remote_amp_assoc {
- __u8 status;
- __u8 phy_handle;
-} __packed;
-
#define HCI_OP_GET_MWS_TRANSPORT_CONFIG 0x140c
#define HCI_OP_ENABLE_DUT_MODE 0x1803
@@ -1666,6 +1617,15 @@ struct hci_cp_le_set_event_mask {
__u8 mask[8];
} __packed;
+/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E
+ * 7.8.2 LE Read Buffer Size command
+ * MAX_LE_MTU is 0xffff.
+ * 0 is also valid. It means that no dedicated LE Buffer exists.
+ * It should use the HCI_Read_Buffer_Size command and mtu is shared
+ * between BR/EDR and LE.
+ */
+#define HCI_MIN_LE_MTU 0x001b
+
#define HCI_OP_LE_READ_BUFFER_SIZE 0x2002
struct hci_rp_le_read_buffer_size {
__u8 status;
@@ -1973,6 +1933,8 @@ struct hci_cp_le_pa_create_sync {
__u8 sync_cte_type;
} __packed;
+#define HCI_OP_LE_PA_CREATE_SYNC_CANCEL 0x2045
+
#define HCI_OP_LE_PA_TERM_SYNC 0x2046
struct hci_cp_le_pa_term_sync {
__le16 handle;
@@ -2026,7 +1988,7 @@ struct hci_cp_le_set_ext_adv_data {
__u8 operation;
__u8 frag_pref;
__u8 length;
- __u8 data[];
+ __u8 data[] __counted_by(length);
} __packed;
#define HCI_OP_LE_SET_EXT_SCAN_RSP_DATA 0x2038
@@ -2035,7 +1997,7 @@ struct hci_cp_le_set_ext_scan_rsp_data {
__u8 operation;
__u8 frag_pref;
__u8 length;
- __u8 data[];
+ __u8 data[] __counted_by(length);
} __packed;
#define HCI_OP_LE_SET_EXT_ADV_ENABLE 0x2039
@@ -2061,7 +2023,7 @@ struct hci_cp_le_set_per_adv_data {
__u8 handle;
__u8 operation;
__u8 length;
- __u8 data[];
+ __u8 data[] __counted_by(length);
} __packed;
#define HCI_OP_LE_SET_PER_ADV_ENABLE 0x2040
@@ -2144,7 +2106,7 @@ struct hci_cp_le_set_cig_params {
__le16 c_latency;
__le16 p_latency;
__u8 num_cis;
- struct hci_cis_params cis[];
+ struct hci_cis_params cis[] __counted_by(num_cis);
} __packed;
struct hci_rp_le_set_cig_params {
@@ -2162,7 +2124,7 @@ struct hci_cis {
struct hci_cp_le_create_cis {
__u8 num_cis;
- struct hci_cis cis[];
+ struct hci_cis cis[] __counted_by(num_cis);
} __packed;
#define HCI_OP_LE_REMOVE_CIG 0x2065
@@ -2216,7 +2178,7 @@ struct hci_cp_le_big_create_sync {
__u8 mse;
__le16 timeout;
__u8 num_bis;
- __u8 bis[];
+ __u8 bis[] __counted_by(num_bis);
} __packed;
#define HCI_OP_LE_BIG_TERM_SYNC 0x206c
@@ -2872,7 +2834,7 @@ struct hci_evt_le_create_big_complete {
__le16 bis_handle[];
} __packed;
-#define HCI_EVT_LE_BIG_SYNC_ESTABILISHED 0x1d
+#define HCI_EVT_LE_BIG_SYNC_ESTABLISHED 0x1d
struct hci_evt_le_big_sync_estabilished {
__u8 status;
__u8 handle;
@@ -2993,6 +2955,11 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
return (struct hci_sco_hdr *) skb->data;
}
+static inline struct hci_iso_hdr *hci_iso_hdr(const struct sk_buff *skb)
+{
+ return (struct hci_iso_hdr *)skb->data;
+}
+
/* Command opcode pack/unpack */
#define hci_opcode_pack(ogf, ocf) ((__u16) ((ocf & 0x03ff)|(ogf << 10)))
#define hci_opcode_ogf(op) (op >> 10)
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index e8f581f3f3ce..2b261e74e2c4 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -31,6 +31,7 @@
#include <linux/rculist.h>
#include <net/bluetooth/hci.h>
+#include <net/bluetooth/hci_drv.h>
#include <net/bluetooth/hci_sync.h>
#include <net/bluetooth/hci_sock.h>
#include <net/bluetooth/coredump.h>
@@ -91,8 +92,6 @@ struct discovery_state {
s8 rssi;
u16 uuid_count;
u8 (*uuids)[16];
- unsigned long scan_start;
- unsigned long scan_duration;
unsigned long name_resolve_timeout;
};
@@ -126,7 +125,6 @@ enum suspended_state {
struct hci_conn_hash {
struct list_head list;
unsigned int acl_num;
- unsigned int amp_num;
unsigned int sco_num;
unsigned int iso_num;
unsigned int le_num;
@@ -160,8 +158,9 @@ struct bdaddr_list_with_irk {
/* Bitmask of connection flags */
enum hci_conn_flags {
- HCI_CONN_FLAG_REMOTE_WAKEUP = 1,
- HCI_CONN_FLAG_DEVICE_PRIVACY = 2,
+ HCI_CONN_FLAG_REMOTE_WAKEUP = BIT(0),
+ HCI_CONN_FLAG_DEVICE_PRIVACY = BIT(1),
+ HCI_CONN_FLAG_ADDRESS_RESOLUTION = BIT(2),
};
typedef u8 hci_conn_flags_t;
@@ -189,7 +188,6 @@ struct blocked_key {
struct smp_csrk {
bdaddr_t bdaddr;
u8 bdaddr_type;
- u8 link_type;
u8 type;
u8 val[16];
};
@@ -199,7 +197,6 @@ struct smp_ltk {
struct rcu_head rcu;
bdaddr_t bdaddr;
u8 bdaddr_type;
- u8 link_type;
u8 authenticated;
u8 type;
u8 enc_size;
@@ -214,7 +211,6 @@ struct smp_irk {
bdaddr_t rpa;
bdaddr_t bdaddr;
u8 addr_type;
- u8 link_type;
u8 val[16];
};
@@ -222,8 +218,6 @@ struct link_key {
struct list_head list;
struct rcu_head rcu;
bdaddr_t bdaddr;
- u8 bdaddr_type;
- u8 link_type;
u8 type;
u8 val[HCI_LINK_KEY_SIZE];
u8 pin_len;
@@ -247,6 +241,7 @@ struct adv_info {
bool periodic;
__u8 mesh;
__u8 instance;
+ __u8 handle;
__u32 flags;
__u16 timeout;
__u16 remaining_time;
@@ -267,6 +262,12 @@ struct adv_info {
struct delayed_work rpa_expired_cb;
};
+struct tx_queue {
+ struct sk_buff_head queue;
+ unsigned int extra;
+ unsigned int tracked;
+};
+
#define HCI_MAX_ADV_INSTANCES 5
#define HCI_DEFAULT_ADV_DURATION 2
@@ -341,14 +342,6 @@ struct adv_monitor {
/* Default authenticated payload timeout 30s */
#define DEFAULT_AUTH_PAYLOAD_TIMEOUT 0x0bb8
-struct amp_assoc {
- __u16 len;
- __u16 offset;
- __u16 rem_len;
- __u16 len_so_far;
- __u8 data[HCI_MAX_AMP_ASSOC_SIZE];
-};
-
#define HCI_MAX_PAGES 3
struct hci_dev {
@@ -361,7 +354,6 @@ struct hci_dev {
unsigned long flags;
__u16 id;
__u8 bus;
- __u8 dev_type;
bdaddr_t bdaddr;
bdaddr_t setup_addr;
bdaddr_t public_addr;
@@ -467,21 +459,6 @@ struct hci_dev {
__u16 sniff_min_interval;
__u16 sniff_max_interval;
- __u8 amp_status;
- __u32 amp_total_bw;
- __u32 amp_max_bw;
- __u32 amp_min_latency;
- __u32 amp_max_pdu;
- __u8 amp_type;
- __u16 amp_pal_cap;
- __u16 amp_assoc_size;
- __u32 amp_max_flush_to;
- __u32 amp_be_flush_to;
-
- struct amp_assoc loc_assoc;
-
- __u8 flow_ctl_mode;
-
unsigned int auto_accept_delay;
unsigned long quirks;
@@ -501,13 +478,7 @@ struct hci_dev {
unsigned int le_pkts;
unsigned int iso_pkts;
- __u16 block_len;
- __u16 block_mtu;
- __u16 num_blocks;
- __u16 block_cnt;
-
unsigned long acl_last_tx;
- unsigned long sco_last_tx;
unsigned long le_last_tx;
__u8 le_tx_def_phys;
@@ -559,7 +530,6 @@ struct hci_dev {
struct discovery_state discovery;
- int discovery_old_state;
bool discovery_paused;
int advertising_old_state;
bool advertising_paused;
@@ -644,6 +614,8 @@ struct hci_dev {
struct list_head monitored_devices;
bool advmon_pend_notify;
+ struct hci_drv *hci_drv;
+
#if IS_ENABLED(CONFIG_BT_LEDS)
struct led_trigger *power_led;
#endif
@@ -670,7 +642,6 @@ struct hci_dev {
int (*post_init)(struct hci_dev *hdev);
int (*set_diag)(struct hci_dev *hdev, bool enable);
int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
- void (*cmd_timeout)(struct hci_dev *hdev);
void (*reset)(struct hci_dev *hdev);
bool (*wakeup)(struct hci_dev *hdev);
int (*set_quality_report)(struct hci_dev *hdev, bool enable);
@@ -678,6 +649,7 @@ struct hci_dev {
int (*get_codec_config_data)(struct hci_dev *hdev, __u8 type,
struct bt_codec *codec, __u8 *vnd_len,
__u8 **vnd_data);
+ u8 (*classify_pkt_type)(struct hci_dev *hdev, struct sk_buff *skb);
};
#define HCI_PHY_HANDLE(handle) (handle & 0xff)
@@ -705,7 +677,9 @@ struct hci_conn {
__u8 adv_instance;
__u16 handle;
__u16 sync_handle;
+ __u8 sid;
__u16 state;
+ __u16 mtu;
__u8 mode;
__u8 type;
__u8 role;
@@ -746,6 +720,9 @@ struct hci_conn {
__s8 tx_power;
__s8 max_tx_power;
struct bt_iso_qos iso_qos;
+ __u8 num_bis;
+ __u8 bis[HCI_MAX_ISO_BIS];
+
unsigned long flags;
enum conn_reasons conn_reason;
@@ -765,6 +742,8 @@ struct hci_conn {
struct sk_buff_head data_q;
struct list_head chan_list;
+ struct tx_queue tx_q;
+
struct delayed_work disc_work;
struct delayed_work auto_accept_work;
struct delayed_work idle_work;
@@ -777,7 +756,6 @@ struct hci_conn {
void *l2cap_data;
void *sco_data;
void *iso_data;
- struct amp_mgr *amp_mgr;
struct list_head link_list;
struct hci_conn *parent;
@@ -804,7 +782,6 @@ struct hci_chan {
struct sk_buff_head data_q;
unsigned int sent;
__u8 state;
- bool amp;
};
struct hci_conn_params {
@@ -858,7 +835,7 @@ extern struct mutex hci_cb_list_lock;
} while (0)
#define hci_dev_le_state_simultaneous(hdev) \
- (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) && \
+ (!test_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks) && \
(hdev->le_states[4] & 0x08) && /* Central */ \
(hdev->le_states[4] & 0x40) && /* Peripheral */ \
(hdev->le_states[3] & 0x10)) /* Simultaneous */
@@ -920,8 +897,6 @@ static inline void hci_discovery_filter_clear(struct hci_dev *hdev)
hdev->discovery.uuid_count = 0;
kfree(hdev->discovery.uuids);
hdev->discovery.uuids = NULL;
- hdev->discovery.scan_start = 0;
- hdev->discovery.scan_duration = 0;
}
bool hci_discovery_active(struct hci_dev *hdev);
@@ -985,8 +960,10 @@ enum {
HCI_CONN_PER_ADV,
HCI_CONN_BIG_CREATED,
HCI_CONN_CREATE_CIS,
+ HCI_CONN_CREATE_BIG_SYNC,
HCI_CONN_BIG_SYNC,
HCI_CONN_BIG_SYNC_FAILED,
+ HCI_CONN_CREATE_PA_SYNC,
HCI_CONN_PA_SYNC,
HCI_CONN_PA_SYNC_FAILED,
};
@@ -1013,9 +990,6 @@ static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
case ACL_LINK:
h->acl_num++;
break;
- case AMP_LINK:
- h->amp_num++;
- break;
case LE_LINK:
h->le_num++;
if (c->role == HCI_ROLE_SLAVE)
@@ -1025,7 +999,8 @@ static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
case ESCO_LINK:
h->sco_num++;
break;
- case ISO_LINK:
+ case CIS_LINK:
+ case BIS_LINK:
h->iso_num++;
break;
}
@@ -1042,9 +1017,6 @@ static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
case ACL_LINK:
h->acl_num--;
break;
- case AMP_LINK:
- h->amp_num--;
- break;
case LE_LINK:
h->le_num--;
if (c->role == HCI_ROLE_SLAVE)
@@ -1054,7 +1026,8 @@ static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
case ESCO_LINK:
h->sco_num--;
break;
- case ISO_LINK:
+ case CIS_LINK:
+ case BIS_LINK:
h->iso_num--;
break;
}
@@ -1066,14 +1039,13 @@ static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type)
switch (type) {
case ACL_LINK:
return h->acl_num;
- case AMP_LINK:
- return h->amp_num;
case LE_LINK:
return h->le_num;
case SCO_LINK:
case ESCO_LINK:
return h->sco_num;
- case ISO_LINK:
+ case CIS_LINK:
+ case BIS_LINK:
return h->iso_num;
default:
return 0;
@@ -1084,7 +1056,7 @@ static inline unsigned int hci_conn_count(struct hci_dev *hdev)
{
struct hci_conn_hash *c = &hdev->conn_hash;
- return c->acl_num + c->amp_num + c->sco_num + c->le_num + c->iso_num;
+ return c->acl_num + c->sco_num + c->le_num + c->iso_num;
}
static inline bool hci_conn_valid(struct hci_dev *hdev, struct hci_conn *conn)
@@ -1134,7 +1106,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev,
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (bacmp(&c->dst, ba) || c->type != ISO_LINK)
+ if (bacmp(&c->dst, ba) || c->type != BIS_LINK)
continue;
if (c->iso_qos.bcast.bis == bis) {
@@ -1148,6 +1120,30 @@ static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev,
}
static inline struct hci_conn *
+hci_conn_hash_lookup_create_pa_sync(struct hci_dev *hdev)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type != BIS_LINK)
+ continue;
+
+ if (!test_bit(HCI_CONN_CREATE_PA_SYNC, &c->flags))
+ continue;
+
+ rcu_read_unlock();
+ return c;
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *
hci_conn_hash_lookup_per_adv_bis(struct hci_dev *hdev,
bdaddr_t *ba,
__u8 big, __u8 bis)
@@ -1158,8 +1154,8 @@ hci_conn_hash_lookup_per_adv_bis(struct hci_dev *hdev,
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (bacmp(&c->dst, ba) || c->type != ISO_LINK ||
- !test_bit(HCI_CONN_PER_ADV, &c->flags))
+ if (bacmp(&c->dst, ba) || c->type != BIS_LINK ||
+ !test_bit(HCI_CONN_PER_ADV, &c->flags))
continue;
if (c->iso_qos.bcast.big == big &&
@@ -1248,7 +1244,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev,
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (c->type != ISO_LINK || !bacmp(&c->dst, BDADDR_ANY))
+ if (c->type != CIS_LINK)
continue;
/* Match CIG ID if set */
@@ -1280,7 +1276,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_cig(struct hci_dev *hdev,
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (c->type != ISO_LINK || !bacmp(&c->dst, BDADDR_ANY))
+ if (c->type != CIS_LINK)
continue;
if (handle == c->iso_qos.ucast.cig) {
@@ -1303,7 +1299,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev,
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (bacmp(&c->dst, BDADDR_ANY) || c->type != ISO_LINK)
+ if (c->type != BIS_LINK)
continue;
if (handle == c->iso_qos.bcast.big) {
@@ -1318,6 +1314,30 @@ static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev,
}
static inline struct hci_conn *
+hci_conn_hash_lookup_big_sync_pend(struct hci_dev *hdev,
+ __u8 handle, __u8 num_bis)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type != BIS_LINK)
+ continue;
+
+ if (handle == c->iso_qos.bcast.big && num_bis == c->num_bis) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *
hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle, __u16 state)
{
struct hci_conn_hash *h = &hdev->conn_hash;
@@ -1326,8 +1346,8 @@ hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle, __u16 state)
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (bacmp(&c->dst, BDADDR_ANY) || c->type != ISO_LINK ||
- c->state != state)
+ if (c->type != BIS_LINK || bacmp(&c->dst, BDADDR_ANY) ||
+ c->state != state)
continue;
if (handle == c->iso_qos.bcast.big) {
@@ -1350,8 +1370,8 @@ hci_conn_hash_lookup_pa_sync_big_handle(struct hci_dev *hdev, __u8 big)
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (c->type != ISO_LINK ||
- !test_bit(HCI_CONN_PA_SYNC, &c->flags))
+ if (c->type != BIS_LINK ||
+ !test_bit(HCI_CONN_PA_SYNC, &c->flags))
continue;
if (c->iso_qos.bcast.big == big) {
@@ -1373,8 +1393,14 @@ hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle)
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (c->type != ISO_LINK ||
- !test_bit(HCI_CONN_PA_SYNC, &c->flags))
+ if (c->type != BIS_LINK)
+ continue;
+
+ /* Ignore the listen hcon, we are looking
+ * for the child hcon that was created as
+ * a result of the PA sync established event.
+ */
+ if (c->state == BT_LISTEN)
continue;
if (c->sync_handle == sync_handle) {
@@ -1534,9 +1560,9 @@ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
__u8 data_len, __u8 *data);
struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
__u8 dst_type, __u8 sid, struct bt_iso_qos *qos);
-int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
- struct bt_iso_qos *qos,
- __u16 sync_handle, __u8 num_bis, __u8 bis[]);
+int hci_conn_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
+ struct bt_iso_qos *qos, __u16 sync_handle,
+ __u8 num_bis, __u8 bis[]);
int hci_conn_check_link_mode(struct hci_conn *conn);
int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
@@ -1548,6 +1574,18 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
void hci_conn_failed(struct hci_conn *conn, u8 status);
u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle);
+void hci_conn_tx_queue(struct hci_conn *conn, struct sk_buff *skb);
+void hci_conn_tx_dequeue(struct hci_conn *conn);
+void hci_setup_tx_timestamp(struct sk_buff *skb, size_t key_offset,
+ const struct sockcm_cookie *sockc);
+
+static inline void hci_sockcm_init(struct sockcm_cookie *sockc, struct sock *sk)
+{
+ *sockc = (struct sockcm_cookie) {
+ .tsflags = READ_ONCE(sk->sk_tsflags),
+ };
+}
+
/*
* hci_conn_get() and hci_conn_put() are used to control the life-time of an
* "hci_conn" object. They do not guarantee that the hci_conn object is running,
@@ -1610,10 +1648,6 @@ static inline void hci_conn_drop(struct hci_conn *conn)
}
break;
- case AMP_LINK:
- timeo = conn->disc_timeout;
- break;
-
default:
timeo = 0;
break;
@@ -1740,8 +1774,6 @@ int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type);
int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
u8 type);
-int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
- u8 type);
void hci_bdaddr_list_clear(struct list_head *list);
struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
@@ -1762,6 +1794,7 @@ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
void hci_uuids_clear(struct hci_dev *hdev);
void hci_link_keys_clear(struct hci_dev *hdev);
+u8 *hci_conn_key_enc_size(struct hci_conn *conn);
struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
bdaddr_t *bdaddr, u8 *val, u8 type,
@@ -1840,6 +1873,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_hold_capable(dev) ((dev)->features[0][0] & LMP_HOLD)
#define lmp_sniff_capable(dev) ((dev)->features[0][0] & LMP_SNIFF)
#define lmp_park_capable(dev) ((dev)->features[0][1] & LMP_PARK)
+#define lmp_sco_capable(dev) ((dev)->features[0][1] & LMP_SCO)
#define lmp_inq_rssi_capable(dev) ((dev)->features[0][3] & LMP_RSSI_INQ)
#define lmp_esco_capable(dev) ((dev)->features[0][3] & LMP_ESCO)
#define lmp_bredr_capable(dev) (!((dev)->features[0][4] & LMP_NO_BREDR))
@@ -1900,17 +1934,17 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define ll_privacy_capable(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY)
-/* Use LL Privacy based address resolution if supported */
-#define use_ll_privacy(dev) (ll_privacy_capable(dev) && \
- hci_dev_test_flag(dev, HCI_ENABLE_LL_PRIVACY))
-
-#define privacy_mode_capable(dev) (use_ll_privacy(dev) && \
+#define privacy_mode_capable(dev) (ll_privacy_capable(dev) && \
(hdev->commands[39] & 0x04))
#define read_key_size_capable(dev) \
((dev)->commands[20] & 0x10 && \
!test_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks))
+#define read_voice_setting_capable(dev) \
+ ((dev)->commands[9] & 0x04 && \
+ !test_bit(HCI_QUIRK_BROKEN_READ_VOICE_SETTING, &(dev)->quirks))
+
/* Use enhanced synchronous connection if command is supported and its quirk
* has not been set.
*/
@@ -1924,8 +1958,8 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
!test_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &(dev)->quirks))
/* Use ext create connection if command is supported */
-#define use_ext_conn(dev) ((dev)->commands[37] & 0x80)
-
+#define use_ext_conn(dev) (((dev)->commands[37] & 0x80) && \
+ !test_bit(HCI_QUIRK_BROKEN_EXT_CREATE_CONN, &(dev)->quirks))
/* Extended advertising support */
#define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV))
@@ -1938,8 +1972,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
* C24: Mandatory if the LE Controller supports Connection State and either
* LE Feature (LL Privacy) or LE Feature (Extended Advertising) is supported
*/
-#define use_enhanced_conn_complete(dev) (ll_privacy_capable(dev) || \
- ext_adv_capable(dev))
+#define use_enhanced_conn_complete(dev) ((ll_privacy_capable(dev) || \
+ ext_adv_capable(dev)) && \
+ !test_bit(HCI_QUIRK_BROKEN_EXT_CREATE_CONN, \
+ &(dev)->quirks))
/* Periodic advertising support */
#define per_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_PERIODIC_ADV))
@@ -1972,7 +2008,8 @@ static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
case ESCO_LINK:
return sco_connect_ind(hdev, bdaddr, flags);
- case ISO_LINK:
+ case CIS_LINK:
+ case BIS_LINK:
return iso_connect_ind(hdev, bdaddr, flags);
default:
@@ -2156,18 +2193,46 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
{
u16 max_latency;
- if (min > max || min < 6 || max > 3200)
+ if (min > max) {
+ BT_WARN("min %d > max %d", min, max);
+ return -EINVAL;
+ }
+
+ if (min < 6) {
+ BT_WARN("min %d < 6", min);
+ return -EINVAL;
+ }
+
+ if (max > 3200) {
+ BT_WARN("max %d > 3200", max);
return -EINVAL;
+ }
- if (to_multiplier < 10 || to_multiplier > 3200)
+ if (to_multiplier < 10) {
+ BT_WARN("to_multiplier %d < 10", to_multiplier);
return -EINVAL;
+ }
+
+ if (to_multiplier > 3200) {
+ BT_WARN("to_multiplier %d > 3200", to_multiplier);
+ return -EINVAL;
+ }
- if (max >= to_multiplier * 8)
+ if (max >= to_multiplier * 8) {
+ BT_WARN("max %d >= to_multiplier %d * 8", max, to_multiplier);
return -EINVAL;
+ }
max_latency = (to_multiplier * 4 / max) - 1;
- if (latency > 499 || latency > max_latency)
+ if (latency > 499) {
+ BT_WARN("latency %d > 499", latency);
return -EINVAL;
+ }
+
+ if (latency > max_latency) {
+ BT_WARN("latency %d > max_latency %d", latency, max_latency);
+ return -EINVAL;
+ }
return 0;
}
@@ -2235,8 +2300,22 @@ void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c);
/* These LE scan and inquiry parameters were chosen according to LE General
* Discovery Procedure specification.
*/
-#define DISCOV_LE_SCAN_WIN 0x12
-#define DISCOV_LE_SCAN_INT 0x12
+#define DISCOV_LE_SCAN_WIN 0x0012 /* 11.25 msec */
+#define DISCOV_LE_SCAN_INT 0x0012 /* 11.25 msec */
+#define DISCOV_LE_SCAN_INT_FAST 0x0060 /* 60 msec */
+#define DISCOV_LE_SCAN_WIN_FAST 0x0030 /* 30 msec */
+#define DISCOV_LE_SCAN_INT_CONN 0x0060 /* 60 msec */
+#define DISCOV_LE_SCAN_WIN_CONN 0x0060 /* 60 msec */
+#define DISCOV_LE_SCAN_INT_SLOW1 0x0800 /* 1.28 sec */
+#define DISCOV_LE_SCAN_WIN_SLOW1 0x0012 /* 11.25 msec */
+#define DISCOV_LE_SCAN_INT_SLOW2 0x1000 /* 2.56 sec */
+#define DISCOV_LE_SCAN_WIN_SLOW2 0x0024 /* 22.5 msec */
+#define DISCOV_CODED_SCAN_INT_FAST 0x0120 /* 180 msec */
+#define DISCOV_CODED_SCAN_WIN_FAST 0x0090 /* 90 msec */
+#define DISCOV_CODED_SCAN_INT_SLOW1 0x1800 /* 3.84 sec */
+#define DISCOV_CODED_SCAN_WIN_SLOW1 0x0036 /* 33.75 msec */
+#define DISCOV_CODED_SCAN_INT_SLOW2 0x3000 /* 7.68 sec */
+#define DISCOV_CODED_SCAN_WIN_SLOW2 0x006c /* 67.5 msec */
#define DISCOV_LE_TIMEOUT 10240 /* msec */
#define DISCOV_INTERLEAVED_TIMEOUT 5120 /* msec */
#define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04
@@ -2268,8 +2347,8 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
bool mgmt_connected);
void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 link_type, u8 addr_type, u8 status);
-void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u8 status);
+void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn,
+ u8 status);
void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 status);
@@ -2296,8 +2375,6 @@ void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
u8 status);
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
-void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status);
-void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status);
void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
diff --git a/include/net/bluetooth/hci_drv.h b/include/net/bluetooth/hci_drv.h
new file mode 100644
index 000000000000..2f01c44f05ec
--- /dev/null
+++ b/include/net/bluetooth/hci_drv.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2025 Google Corporation
+ */
+
+#ifndef __HCI_DRV_H
+#define __HCI_DRV_H
+
+#include <linux/types.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci.h>
+
+struct hci_drv_cmd_hdr {
+ __le16 opcode;
+ __le16 len;
+} __packed;
+
+struct hci_drv_ev_hdr {
+ __le16 opcode;
+ __le16 len;
+} __packed;
+
+#define HCI_DRV_EV_CMD_STATUS 0x0000
+struct hci_drv_ev_cmd_status {
+ __le16 opcode;
+ __u8 status;
+} __packed;
+
+#define HCI_DRV_EV_CMD_COMPLETE 0x0001
+struct hci_drv_ev_cmd_complete {
+ __le16 opcode;
+ __u8 status;
+ __u8 data[];
+} __packed;
+
+#define HCI_DRV_STATUS_SUCCESS 0x00
+#define HCI_DRV_STATUS_UNSPECIFIED_ERROR 0x01
+#define HCI_DRV_STATUS_UNKNOWN_COMMAND 0x02
+#define HCI_DRV_STATUS_INVALID_PARAMETERS 0x03
+
+#define HCI_DRV_MAX_DRIVER_NAME_LENGTH 32
+
+/* Common commands that make sense on all drivers start from 0x0000 */
+#define HCI_DRV_OP_READ_INFO 0x0000
+#define HCI_DRV_READ_INFO_SIZE 0
+struct hci_drv_rp_read_info {
+ __u8 driver_name[HCI_DRV_MAX_DRIVER_NAME_LENGTH];
+ __le16 num_supported_commands;
+ __le16 supported_commands[];
+} __packed;
+
+/* Driver specific OGF (Opcode Group Field)
+ * Commands in this group may have different meanings across different drivers.
+ */
+#define HCI_DRV_OGF_DRIVER_SPECIFIC 0x01
+
+int hci_drv_cmd_status(struct hci_dev *hdev, u16 cmd, u8 status);
+int hci_drv_cmd_complete(struct hci_dev *hdev, u16 cmd, u8 status, void *rp,
+ size_t rp_len);
+int hci_drv_process_cmd(struct hci_dev *hdev, struct sk_buff *cmd_skb);
+
+struct hci_drv_handler {
+ int (*func)(struct hci_dev *hdev, void *data, u16 data_len);
+ size_t data_len;
+};
+
+struct hci_drv {
+ size_t common_handler_count;
+ const struct hci_drv_handler *common_handlers;
+
+ size_t specific_handler_count;
+ const struct hci_drv_handler *specific_handlers;
+};
+
+#endif /* __HCI_DRV_H */
diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h
index 082f89531b88..bbd752494ef9 100644
--- a/include/net/bluetooth/hci_mon.h
+++ b/include/net/bluetooth/hci_mon.h
@@ -51,6 +51,8 @@ struct hci_mon_hdr {
#define HCI_MON_CTRL_EVENT 17
#define HCI_MON_ISO_TX_PKT 18
#define HCI_MON_ISO_RX_PKT 19
+#define HCI_MON_DRV_TX_PKT 20
+#define HCI_MON_DRV_RX_PKT 21
struct hci_mon_new_index {
__u8 type;
diff --git a/include/net/bluetooth/hci_sock.h b/include/net/bluetooth/hci_sock.h
index 9949870f7d78..13e8cd4414a1 100644
--- a/include/net/bluetooth/hci_sock.h
+++ b/include/net/bluetooth/hci_sock.h
@@ -144,7 +144,7 @@ struct hci_dev_req {
struct hci_dev_list_req {
__u16 dev_num;
- struct hci_dev_req dev_req[]; /* hci_dev_req structures */
+ struct hci_dev_req dev_req[] __counted_by(dev_num);
};
struct hci_conn_list_req {
diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
index 6a9d063e9f47..72558c826aa1 100644
--- a/include/net/bluetooth/hci_sync.h
+++ b/include/net/bluetooth/hci_sync.h
@@ -8,6 +8,23 @@
#define UINT_PTR(_handle) ((void *)((uintptr_t)_handle))
#define PTR_UINT(_ptr) ((uintptr_t)((void *)_ptr))
+#define HCI_REQ_DONE 0
+#define HCI_REQ_PEND 1
+#define HCI_REQ_CANCELED 2
+
+#define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock)
+#define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock)
+
+struct hci_request {
+ struct hci_dev *hdev;
+ struct sk_buff_head cmd_q;
+
+ /* If something goes wrong when building the HCI request, the error
+ * value is stored in this field.
+ */
+ int err;
+};
+
typedef int (*hci_cmd_sync_work_func_t)(struct hci_dev *hdev, void *data);
typedef void (*hci_cmd_sync_work_destroy_t)(struct hci_dev *hdev, void *data,
int err);
@@ -20,6 +37,10 @@ struct hci_cmd_sync_work_entry {
};
struct adv_info;
+
+struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, struct sock *sk);
+
/* Function with sync suffix shall not be called with hdev->lock held as they
* wait the command to complete and in the meantime an event could be received
* which could attempt to acquire hdev->lock causing a deadlock.
@@ -38,6 +59,8 @@ int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u8 event, u32 timeout,
struct sock *sk);
+int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout);
void hci_cmd_sync_init(struct hci_dev *hdev);
void hci_cmd_sync_clear(struct hci_dev *hdev);
@@ -50,6 +73,10 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
void *data, hci_cmd_sync_work_destroy_t destroy);
int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
void *data, hci_cmd_sync_work_destroy_t destroy);
+int hci_cmd_sync_run(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+int hci_cmd_sync_run_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
struct hci_cmd_sync_work_entry *
hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
void *data, hci_cmd_sync_work_destroy_t destroy);
@@ -113,7 +140,6 @@ int hci_update_scan(struct hci_dev *hdev);
int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul);
int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance,
struct sock *sk);
-int hci_remove_ext_adv_instance(struct hci_dev *hdev, u8 instance);
struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, bool ext,
struct sock *sk);
@@ -129,6 +155,8 @@ int hci_update_discoverable(struct hci_dev *hdev);
int hci_update_connectable_sync(struct hci_dev *hdev);
+int hci_inquiry_sync(struct hci_dev *hdev, u8 length, u8 num_rsp);
+
int hci_start_discovery_sync(struct hci_dev *hdev);
int hci_stop_discovery_sync(struct hci_dev *hdev);
@@ -136,6 +164,7 @@ int hci_suspend_sync(struct hci_dev *hdev);
int hci_resume_sync(struct hci_dev *hdev);
struct hci_conn;
+struct hci_conn_params;
int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason);
@@ -154,3 +183,8 @@ int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn);
int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn);
int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn);
+int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ struct hci_conn_params *params);
+
+int hci_connect_pa_sync(struct hci_dev *hdev, struct hci_conn *conn);
+int hci_connect_big_sync(struct hci_dev *hdev, struct hci_conn *conn);
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index a4278aa618ab..4bb0eaedda18 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -27,7 +27,7 @@
#ifndef __L2CAP_H
#define __L2CAP_H
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/atomic.h>
/* L2CAP defaults */
@@ -38,8 +38,8 @@
#define L2CAP_DEFAULT_TX_WINDOW 63
#define L2CAP_DEFAULT_EXT_WINDOW 0x3FFF
#define L2CAP_DEFAULT_MAX_TX 3
-#define L2CAP_DEFAULT_RETRANS_TO 2000 /* 2 seconds */
-#define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */
+#define L2CAP_DEFAULT_RETRANS_TO 2 /* seconds */
+#define L2CAP_DEFAULT_MONITOR_TO 12 /* seconds */
#define L2CAP_DEFAULT_MAX_PDU_SIZE 1492 /* Sized for AMP packet */
#define L2CAP_DEFAULT_ACK_TO 200
#define L2CAP_DEFAULT_MAX_SDU_SIZE 0xFFFF
@@ -463,18 +463,24 @@ struct l2cap_le_credits {
#define L2CAP_ECRED_MAX_CID 5
struct l2cap_ecred_conn_req {
- __le16 psm;
- __le16 mtu;
- __le16 mps;
- __le16 credits;
+ /* New members must be added within the struct_group() macro below. */
+ __struct_group(l2cap_ecred_conn_req_hdr, hdr, __packed,
+ __le16 psm;
+ __le16 mtu;
+ __le16 mps;
+ __le16 credits;
+ );
__le16 scid[];
} __packed;
struct l2cap_ecred_conn_rsp {
- __le16 mtu;
- __le16 mps;
- __le16 credits;
- __le16 result;
+ /* New members must be added within the struct_group() macro below. */
+ struct_group_tagged(l2cap_ecred_conn_rsp_hdr, hdr,
+ __le16 mtu;
+ __le16 mps;
+ __le16 credits;
+ __le16 result;
+ );
__le16 dcid[];
};
@@ -548,6 +554,9 @@ struct l2cap_chan {
__u16 tx_credits;
__u16 rx_credits;
+ /* estimated available receive buffer space or -1 if unknown */
+ ssize_t rx_avail;
+
__u8 tx_state;
__u8 rx_state;
@@ -659,7 +668,7 @@ struct l2cap_conn {
struct l2cap_chan *smp;
struct list_head chan_l;
- struct mutex chan_lock;
+ struct mutex lock;
struct kref ref;
struct list_head users;
};
@@ -682,10 +691,15 @@ struct l2cap_user {
/* ----- L2CAP socket info ----- */
#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
+struct l2cap_rx_busy {
+ struct list_head list;
+ struct sk_buff *skb;
+};
+
struct l2cap_pinfo {
struct bt_sock bt;
struct l2cap_chan *chan;
- struct sk_buff *rx_busy_skb;
+ struct list_head rx_busy;
};
enum {
@@ -941,8 +955,10 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason);
int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
bdaddr_t *dst, u8 dst_type, u16 timeout);
int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu);
-int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len);
+int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
+ const struct sockcm_cookie *sockc);
void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
+void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail);
int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator);
void l2cap_chan_set_defaults(struct l2cap_chan *chan);
int l2cap_ertm_init(struct l2cap_chan *chan);
@@ -953,12 +969,9 @@ void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
void *data);
void l2cap_chan_del(struct l2cap_chan *chan, int err);
void l2cap_send_conn_req(struct l2cap_chan *chan);
-void l2cap_move_start(struct l2cap_chan *chan);
-void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
- u8 status);
-void __l2cap_physical_cfm(struct l2cap_chan *chan, int result);
struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn);
+struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *conn);
void l2cap_conn_put(struct l2cap_conn *conn);
int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user);
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index d382679efd2b..3575cd16049a 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -113,6 +113,7 @@ struct mgmt_rp_read_index_list {
#define MGMT_SETTING_CIS_PERIPHERAL BIT(19)
#define MGMT_SETTING_ISO_BROADCASTER BIT(20)
#define MGMT_SETTING_ISO_SYNC_RECEIVER BIT(21)
+#define MGMT_SETTING_LL_PRIVACY BIT(22)
#define MGMT_OP_READ_INFO 0x0004
#define MGMT_READ_INFO_SIZE 0
@@ -878,6 +879,16 @@ struct mgmt_cp_mesh_send_cancel {
} __packed;
#define MGMT_MESH_SEND_CANCEL_SIZE 1
+#define MGMT_OP_HCI_CMD_SYNC 0x005B
+struct mgmt_cp_hci_cmd_sync {
+ __le16 opcode;
+ __u8 event;
+ __u8 timeout;
+ __le16 params_len;
+ __u8 params[];
+} __packed;
+#define MGMT_HCI_CMD_SYNC_SIZE 6
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
index 99d26879b02a..c05882476900 100644
--- a/include/net/bluetooth/rfcomm.h
+++ b/include/net/bluetooth/rfcomm.h
@@ -355,7 +355,7 @@ struct rfcomm_dev_info {
struct rfcomm_dev_list_req {
u16 dev_num;
- struct rfcomm_dev_info dev_info[];
+ struct rfcomm_dev_info dev_info[] __counted_by(dev_num);
};
int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index 9ce5ac2bfbad..2053cd8e788a 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -231,7 +231,10 @@ typedef struct port {
mux_states_t sm_mux_state; /* state machine mux state */
u16 sm_mux_timer_counter; /* state machine mux timer counter */
tx_states_t sm_tx_state; /* state machine tx state */
- u16 sm_tx_timer_counter; /* state machine tx timer counter(allways on - enter to transmit state 3 time per second) */
+ u16 sm_tx_timer_counter; /* state machine tx timer counter
+ * (always on - enter to transmit
+ * state 3 time per second)
+ */
u16 sm_churn_actor_timer_counter;
u16 sm_churn_partner_timer_counter;
u32 churn_actor_count;
diff --git a/include/net/bond_alb.h b/include/net/bond_alb.h
index 9dc082b2d543..e5945427f38d 100644
--- a/include/net/bond_alb.h
+++ b/include/net/bond_alb.h
@@ -53,7 +53,7 @@ struct slave;
struct tlb_client_info {
- struct slave *tx_slave; /* A pointer to slave used for transmiting
+ struct slave *tx_slave; /* A pointer to slave used for transmitting
* packets to a Client that the Hash function
* gave this entry index.
*/
diff --git a/include/net/bond_options.h b/include/net/bond_options.h
index 473a0147769e..18687ccf0638 100644
--- a/include/net/bond_options.h
+++ b/include/net/bond_options.h
@@ -161,5 +161,7 @@ void bond_option_arp_ip_targets_clear(struct bonding *bond);
#if IS_ENABLED(CONFIG_IPV6)
void bond_option_ns_ip6_targets_clear(struct bonding *bond);
#endif
+void bond_slave_ns_maddrs_add(struct bonding *bond, struct slave *slave);
+void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave);
#endif /* _NET_BOND_OPTIONS_H */
diff --git a/include/net/bonding.h b/include/net/bonding.h
index b61fb1aa3a56..95f67b308c19 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -260,7 +260,7 @@ struct bonding {
#ifdef CONFIG_XFRM_OFFLOAD
struct list_head ipsec_list;
/* protecting ipsec_list */
- spinlock_t ipsec_lock;
+ struct mutex ipsec_lock;
#endif /* CONFIG_XFRM_OFFLOAD */
struct bpf_prog *xdp_prog;
};
@@ -695,6 +695,7 @@ void bond_debug_register(struct bonding *bond);
void bond_debug_unregister(struct bonding *bond);
void bond_debug_reregister(struct bonding *bond);
const char *bond_mode_name(int mode);
+bool bond_xdp_check(struct bonding *bond, int mode);
void bond_setup(struct net_device *bond_dev);
unsigned int bond_get_num_tx_queues(void);
int bond_netlink_init(void);
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 9b09acac538e..6e172d0f6ef5 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -24,6 +24,11 @@
*/
#define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1))
+static inline bool napi_id_valid(unsigned int napi_id)
+{
+ return napi_id >= MIN_NAPI_ID;
+}
+
#define BUSY_POLL_BUDGET 8
#ifdef CONFIG_NET_RX_BUSY_POLL
@@ -52,6 +57,9 @@ void napi_busy_loop_rcu(unsigned int napi_id,
bool (*loop_end)(void *, unsigned long),
void *loop_end_arg, bool prefer_busy_poll, u16 budget);
+void napi_suspend_irqs(unsigned int napi_id);
+void napi_resume_irqs(unsigned int napi_id);
+
#else /* CONFIG_NET_RX_BUSY_POLL */
static inline unsigned long net_busy_loop_on(void)
{
@@ -68,7 +76,7 @@ static inline bool sk_can_busy_loop(struct sock *sk)
static inline unsigned long busy_loop_current_time(void)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
- return (unsigned long)(local_clock() >> 10);
+ return (unsigned long)(ktime_get_ns() >> 10);
#else
return 0;
#endif
@@ -111,7 +119,7 @@ static inline void sk_busy_loop(struct sock *sk, int nonblock)
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int napi_id = READ_ONCE(sk->sk_napi_id);
- if (napi_id >= MIN_NAPI_ID)
+ if (napi_id_valid(napi_id))
napi_busy_loop(napi_id, nonblock ? NULL : sk_busy_loop_end, sk,
READ_ONCE(sk->sk_prefer_busy_poll),
READ_ONCE(sk->sk_busy_poll_budget) ?: BUSY_POLL_BUDGET);
@@ -119,19 +127,25 @@ static inline void sk_busy_loop(struct sock *sk, int nonblock)
}
/* used in the NIC receive handler to mark the skb */
-static inline void skb_mark_napi_id(struct sk_buff *skb,
- struct napi_struct *napi)
+static inline void __skb_mark_napi_id(struct sk_buff *skb,
+ const struct gro_node *gro)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
/* If the skb was already marked with a valid NAPI ID, avoid overwriting
* it.
*/
- if (skb->napi_id < MIN_NAPI_ID)
- skb->napi_id = napi->napi_id;
+ if (!napi_id_valid(skb->napi_id))
+ skb->napi_id = gro->cached_napi_id;
#endif
}
-/* used in the protocol hanlder to propagate the napi_id to the socket */
+static inline void skb_mark_napi_id(struct sk_buff *skb,
+ const struct napi_struct *napi)
+{
+ __skb_mark_napi_id(skb, &napi->gro);
+}
+
+/* used in the protocol handler to propagate the napi_id to the socket */
static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
@@ -171,12 +185,4 @@ static inline void sk_mark_napi_id_once(struct sock *sk,
#endif
}
-static inline void sk_mark_napi_id_once_xdp(struct sock *sk,
- const struct xdp_buff *xdp)
-{
-#ifdef CONFIG_NET_RX_BUSY_POLL
- __sk_mark_napi_id_once(sk, xdp->rxq->napi_id);
-#endif
-}
-
#endif /* _LINUX_NET_BUSY_POLL_H */
diff --git a/include/net/caif/caif_layer.h b/include/net/caif/caif_layer.h
index 51f7bb42a936..053e7c6a6a66 100644
--- a/include/net/caif/caif_layer.h
+++ b/include/net/caif/caif_layer.h
@@ -11,9 +11,7 @@
struct cflayer;
struct cfpkt;
-struct cfpktq;
struct caif_payload_info;
-struct caif_packet_funcs;
#define CAIF_LAYER_NAME_SZ 16
@@ -22,7 +20,7 @@ struct caif_packet_funcs;
* @assert: expression to evaluate.
*
* This function will print a error message and a do WARN_ON if the
- * assertion failes. Normally this will do a stack up at the current location.
+ * assertion fails. Normally this will do a stack up at the current location.
*/
#define caif_assert(assert) \
do { \
@@ -118,7 +116,7 @@ enum caif_direction {
* @dn: Pointer down to the layer below.
* @node: List node used when layer participate in a list.
* @receive: Packet receive function.
- * @transmit: Packet transmit funciton.
+ * @transmit: Packet transmit function.
* @ctrlcmd: Used for control signalling upwards in the stack.
* @modemcmd: Used for control signaling downwards in the stack.
* @id: The identity of this layer
diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h
index 44d914a50369..acf664227d96 100644
--- a/include/net/caif/cfpkt.h
+++ b/include/net/caif/cfpkt.h
@@ -18,7 +18,7 @@ struct cfpkt *cfpkt_create(u16 len);
/*
* Destroy a CAIF Packet.
- * pkt Packet to be destoyed.
+ * pkt Packet to be destroyed.
*/
void cfpkt_destroy(struct cfpkt *pkt);
diff --git a/include/net/caif/cfsrvl.h b/include/net/caif/cfsrvl.h
index 5ee7b322e18b..a000dc45f966 100644
--- a/include/net/caif/cfsrvl.h
+++ b/include/net/caif/cfsrvl.h
@@ -40,7 +40,6 @@ void cfsrvl_init(struct cfsrvl *service,
struct dev_info *dev_info,
bool supports_flowctrl);
bool cfsrvl_ready(struct cfsrvl *service, int *err);
-u8 cfsrvl_getphyid(struct cflayer *layer);
static inline void cfsrvl_get(struct cflayer *layr)
{
diff --git a/include/net/calipso.h b/include/net/calipso.h
index f8667a3fda9e..76b9e08c10c2 100644
--- a/include/net/calipso.h
+++ b/include/net/calipso.h
@@ -25,7 +25,7 @@
#include <net/netlabel.h>
#include <net/request_sock.h>
#include <linux/refcount.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
/* known doi values */
#define CALIPSO_DOI_UNKNOWN 0x00000000
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 1e09329acc42..d1848dc8ec99 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -7,7 +7,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include <linux/ethtool.h>
@@ -125,33 +125,39 @@ struct wiphy;
* @IEEE80211_CHAN_CAN_MONITOR: This channel can be used for monitor
* mode even in the presence of other (regulatory) restrictions,
* even if it is otherwise disabled.
+ * @IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP: Allow using this channel for AP operation
+ * with very low power (VLP), even if otherwise set to NO_IR.
+ * @IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY: Allow activity on a 20 MHz channel,
+ * even if otherwise set to NO_IR.
*/
enum ieee80211_channel_flags {
- IEEE80211_CHAN_DISABLED = 1<<0,
- IEEE80211_CHAN_NO_IR = 1<<1,
- IEEE80211_CHAN_PSD = 1<<2,
- IEEE80211_CHAN_RADAR = 1<<3,
- IEEE80211_CHAN_NO_HT40PLUS = 1<<4,
- IEEE80211_CHAN_NO_HT40MINUS = 1<<5,
- IEEE80211_CHAN_NO_OFDM = 1<<6,
- IEEE80211_CHAN_NO_80MHZ = 1<<7,
- IEEE80211_CHAN_NO_160MHZ = 1<<8,
- IEEE80211_CHAN_INDOOR_ONLY = 1<<9,
- IEEE80211_CHAN_IR_CONCURRENT = 1<<10,
- IEEE80211_CHAN_NO_20MHZ = 1<<11,
- IEEE80211_CHAN_NO_10MHZ = 1<<12,
- IEEE80211_CHAN_NO_HE = 1<<13,
- IEEE80211_CHAN_1MHZ = 1<<14,
- IEEE80211_CHAN_2MHZ = 1<<15,
- IEEE80211_CHAN_4MHZ = 1<<16,
- IEEE80211_CHAN_8MHZ = 1<<17,
- IEEE80211_CHAN_16MHZ = 1<<18,
- IEEE80211_CHAN_NO_320MHZ = 1<<19,
- IEEE80211_CHAN_NO_EHT = 1<<20,
- IEEE80211_CHAN_DFS_CONCURRENT = 1<<21,
- IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT = 1<<22,
- IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT = 1<<23,
- IEEE80211_CHAN_CAN_MONITOR = 1<<24,
+ IEEE80211_CHAN_DISABLED = BIT(0),
+ IEEE80211_CHAN_NO_IR = BIT(1),
+ IEEE80211_CHAN_PSD = BIT(2),
+ IEEE80211_CHAN_RADAR = BIT(3),
+ IEEE80211_CHAN_NO_HT40PLUS = BIT(4),
+ IEEE80211_CHAN_NO_HT40MINUS = BIT(5),
+ IEEE80211_CHAN_NO_OFDM = BIT(6),
+ IEEE80211_CHAN_NO_80MHZ = BIT(7),
+ IEEE80211_CHAN_NO_160MHZ = BIT(8),
+ IEEE80211_CHAN_INDOOR_ONLY = BIT(9),
+ IEEE80211_CHAN_IR_CONCURRENT = BIT(10),
+ IEEE80211_CHAN_NO_20MHZ = BIT(11),
+ IEEE80211_CHAN_NO_10MHZ = BIT(12),
+ IEEE80211_CHAN_NO_HE = BIT(13),
+ IEEE80211_CHAN_1MHZ = BIT(14),
+ IEEE80211_CHAN_2MHZ = BIT(15),
+ IEEE80211_CHAN_4MHZ = BIT(16),
+ IEEE80211_CHAN_8MHZ = BIT(17),
+ IEEE80211_CHAN_16MHZ = BIT(18),
+ IEEE80211_CHAN_NO_320MHZ = BIT(19),
+ IEEE80211_CHAN_NO_EHT = BIT(20),
+ IEEE80211_CHAN_DFS_CONCURRENT = BIT(21),
+ IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT = BIT(22),
+ IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT = BIT(23),
+ IEEE80211_CHAN_CAN_MONITOR = BIT(24),
+ IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP = BIT(25),
+ IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY = BIT(26),
};
#define IEEE80211_CHAN_NO_HT40 \
@@ -229,13 +235,13 @@ struct ieee80211_channel {
* @IEEE80211_RATE_SUPPORTS_10MHZ: Rate can be used in 10 MHz mode
*/
enum ieee80211_rate_flags {
- IEEE80211_RATE_SHORT_PREAMBLE = 1<<0,
- IEEE80211_RATE_MANDATORY_A = 1<<1,
- IEEE80211_RATE_MANDATORY_B = 1<<2,
- IEEE80211_RATE_MANDATORY_G = 1<<3,
- IEEE80211_RATE_ERP_G = 1<<4,
- IEEE80211_RATE_SUPPORTS_5MHZ = 1<<5,
- IEEE80211_RATE_SUPPORTS_10MHZ = 1<<6,
+ IEEE80211_RATE_SHORT_PREAMBLE = BIT(0),
+ IEEE80211_RATE_MANDATORY_A = BIT(1),
+ IEEE80211_RATE_MANDATORY_B = BIT(2),
+ IEEE80211_RATE_MANDATORY_G = BIT(3),
+ IEEE80211_RATE_ERP_G = BIT(4),
+ IEEE80211_RATE_SUPPORTS_5MHZ = BIT(5),
+ IEEE80211_RATE_SUPPORTS_10MHZ = BIT(6),
};
/**
@@ -1003,6 +1009,17 @@ cfg80211_chandef_compatible(const struct cfg80211_chan_def *chandef1,
int nl80211_chan_width_to_mhz(enum nl80211_chan_width chan_width);
/**
+ * cfg80211_chandef_get_width - return chandef width in MHz
+ * @c: chandef to return bandwidth for
+ * Return: channel width in MHz for the given chandef; note that it returns
+ * 80 for 80+80 configurations
+ */
+static inline int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c)
+{
+ return nl80211_chan_width_to_mhz(c->width);
+}
+
+/**
* cfg80211_chandef_valid - check if a channel definition is valid
* @chandef: the channel definition to check
* Return: %true if the channel definition is valid. %false otherwise.
@@ -1080,43 +1097,6 @@ int cfg80211_chandef_primary(const struct cfg80211_chan_def *chandef,
int nl80211_send_chandef(struct sk_buff *msg, const struct cfg80211_chan_def *chandef);
/**
- * ieee80211_chanwidth_rate_flags - return rate flags for channel width
- * @width: the channel width of the channel
- *
- * In some channel types, not all rates may be used - for example CCK
- * rates may not be used in 5/10 MHz channels.
- *
- * Returns: rate flags which apply for this channel width
- */
-static inline enum ieee80211_rate_flags
-ieee80211_chanwidth_rate_flags(enum nl80211_chan_width width)
-{
- switch (width) {
- case NL80211_CHAN_WIDTH_5:
- return IEEE80211_RATE_SUPPORTS_5MHZ;
- case NL80211_CHAN_WIDTH_10:
- return IEEE80211_RATE_SUPPORTS_10MHZ;
- default:
- break;
- }
- return 0;
-}
-
-/**
- * ieee80211_chandef_rate_flags - returns rate flags for a channel
- * @chandef: channel definition for the channel
- *
- * See ieee80211_chanwidth_rate_flags().
- *
- * Returns: rate flags which apply for this channel
- */
-static inline enum ieee80211_rate_flags
-ieee80211_chandef_rate_flags(struct cfg80211_chan_def *chandef)
-{
- return ieee80211_chanwidth_rate_flags(chandef->width);
-}
-
-/**
* ieee80211_chandef_max_power - maximum transmission power for the chandef
*
* In some regulations, the transmit power may depend on the configured channel
@@ -1149,6 +1129,8 @@ ieee80211_chandef_max_power(struct cfg80211_chan_def *chandef)
* @band_mask: which bands to check on
* @prohibited_flags: which channels to not consider usable,
* %IEEE80211_CHAN_DISABLED is always taken into account
+ *
+ * Return: %true if usable channels found, %false otherwise
*/
bool cfg80211_any_usable_channels(struct wiphy *wiphy,
unsigned long band_mask,
@@ -1281,11 +1263,13 @@ struct cfg80211_crypto_settings {
* struct cfg80211_mbssid_config - AP settings for multi bssid
*
* @tx_wdev: pointer to the transmitted interface in the MBSSID set
+ * @tx_link_id: link ID of the transmitted profile in an MLD.
* @index: index of this AP in the multi bssid group.
* @ema: set to true if the beacons should be sent out in EMA mode.
*/
struct cfg80211_mbssid_config {
struct wireless_dev *tx_wdev;
+ u8 tx_link_id;
u8 index;
bool ema;
};
@@ -1455,7 +1439,6 @@ struct cfg80211_unsol_bcast_probe_resp {
* @crypto: crypto settings
* @privacy: the BSS uses privacy
* @auth_type: Authentication type (algorithm)
- * @smps_mode: SMPS mode
* @inactivity_timeout: time in seconds to determine station's inactivity.
* @p2p_ctwindow: P2P CT Window
* @p2p_opp_ps: P2P opportunistic PS
@@ -1493,7 +1476,6 @@ struct cfg80211_ap_settings {
struct cfg80211_crypto_settings crypto;
bool privacy;
enum nl80211_auth_type auth_type;
- enum nl80211_smps_mode smps_mode;
int inactivity_timeout;
u8 p2p_ctwindow;
bool p2p_opp_ps;
@@ -1575,6 +1557,8 @@ struct cfg80211_csa_settings {
* @beacon_next: beacon data to be used after the color change
* @count: number of beacons until the color change
* @color: the color used after the change
+ * @link_id: defines the link on which color change is expected during MLO.
+ * 0 in case of non-MLO.
*/
struct cfg80211_color_change_settings {
struct cfg80211_beacon_data beacon_color_change;
@@ -1583,6 +1567,7 @@ struct cfg80211_color_change_settings {
struct cfg80211_beacon_data beacon_next;
u8 count;
u8 color;
+ u8 link_id;
};
/**
@@ -1590,6 +1575,7 @@ struct cfg80211_color_change_settings {
*
* Used to pass interface combination parameters
*
+ * @radio_idx: wiphy radio index or -1 for global
* @num_different_channels: the number of different channels we want
* to use for verification
* @radar_detect: a bitmap where each bit corresponds to a channel
@@ -1603,6 +1589,7 @@ struct cfg80211_color_change_settings {
* the verification
*/
struct iface_combination_params {
+ int radio_idx;
int num_different_channels;
u8 radar_detect;
int iftype_num[NUM_NL80211_IFTYPES];
@@ -1748,6 +1735,9 @@ struct cfg80211_ttlm_params {
* @supported_oper_classes_len: number of supported operating classes
* @support_p2p_ps: information if station supports P2P PS mechanism
* @airtime_weight: airtime scheduler weight for this station
+ * @eml_cap_present: Specifies if EML capabilities field (@eml_cap) is
+ * present/updated
+ * @eml_cap: EML capabilities of this station
* @link_sta_params: link related params.
*/
struct station_parameters {
@@ -1772,6 +1762,8 @@ struct station_parameters {
u8 supported_oper_classes_len;
int support_p2p_ps;
u16 airtime_weight;
+ bool eml_cap_present;
+ u16 eml_cap;
struct link_station_parameters link_sta_params;
};
@@ -1833,9 +1825,11 @@ enum cfg80211_station_type {
*
* Utility function for the @change_station driver method. Call this function
* with the appropriate station type looking up the station (and checking that
- * it exists). It will verify whether the station change is acceptable, and if
- * not will return an error code. Note that it may modify the parameters for
- * backward compatibility reasons, so don't use them before calling this.
+ * it exists). It will verify whether the station change is acceptable.
+ *
+ * Return: 0 if the change is acceptable, otherwise an error code. Note that
+ * it may modify the parameters for backward compatibility reasons, so don't
+ * use them before calling this.
*/
int cfg80211_check_station_change(struct wiphy *wiphy,
struct station_parameters *params,
@@ -1950,9 +1944,9 @@ struct rate_info {
* @BSS_PARAM_FLAGS_SHORT_SLOT_TIME: whether short slot time is enabled
*/
enum bss_param_flags {
- BSS_PARAM_FLAGS_CTS_PROT = 1<<0,
- BSS_PARAM_FLAGS_SHORT_PREAMBLE = 1<<1,
- BSS_PARAM_FLAGS_SHORT_SLOT_TIME = 1<<2,
+ BSS_PARAM_FLAGS_CTS_PROT = BIT(0),
+ BSS_PARAM_FLAGS_SHORT_PREAMBLE = BIT(1),
+ BSS_PARAM_FLAGS_SHORT_SLOT_TIME = BIT(2),
};
/**
@@ -2035,9 +2029,6 @@ struct cfg80211_tid_stats {
* @assoc_at: bootime (ns) of the last association
* @rx_bytes: bytes (size of MPDUs) received from this station
* @tx_bytes: bytes (size of MPDUs) transmitted to this station
- * @llid: mesh local link id
- * @plid: mesh peer link id
- * @plink_state: mesh peer link state
* @signal: The signal strength, type depends on the wiphy's signal_type.
* For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_.
* @signal_avg: Average signal strength, type depends on the wiphy's signal_type.
@@ -2057,14 +2048,20 @@ struct cfg80211_tid_stats {
* This number should increase every time the list of stations
* changes, i.e. when a station is added or removed, so that
* userspace can tell whether it got a consistent snapshot.
+ * @beacon_loss_count: Number of times beacon loss event has triggered.
* @assoc_req_ies: IEs from (Re)Association Request.
* This is used only when in AP mode with drivers that do not use
* user space MLME/SME implementation. The information is provided for
* the cfg80211_new_sta() calls to notify user space of the IEs.
* @assoc_req_ies_len: Length of assoc_req_ies buffer in octets.
* @sta_flags: station flags mask & values
- * @beacon_loss_count: Number of times beacon loss event has triggered.
* @t_offset: Time offset of the station relative to this host.
+ * @llid: mesh local link id
+ * @plid: mesh peer link id
+ * @plink_state: mesh peer link state
+ * @connected_to_gate: true if mesh STA has a path to mesh gate
+ * @connected_to_as: true if mesh STA has a path to authentication server
+ * @airtime_link_metric: mesh airtime link metric.
* @local_pm: local mesh STA power save mode
* @peer_pm: peer mesh STA power save mode
* @nonpeer_pm: non-peer mesh STA power save mode
@@ -2073,7 +2070,6 @@ struct cfg80211_tid_stats {
* @rx_beacon: number of beacons received from this peer
* @rx_beacon_signal_avg: signal strength average (in dBm) for beacons received
* from this peer
- * @connected_to_gate: true if mesh STA has a path to mesh gate
* @rx_duration: aggregate PPDU duration(usecs) for all the frames from a peer
* @tx_duration: aggregate PPDU duration(usecs) for all the frames to a peer
* @airtime_weight: current airtime scheduling weight
@@ -2087,8 +2083,6 @@ struct cfg80211_tid_stats {
* @fcs_err_count: number of packets (MPDUs) received from this station with
* an FCS error. This counter should be incremented only when TA of the
* received packet with an FCS error matches the peer MAC address.
- * @airtime_link_metric: mesh airtime link metric.
- * @connected_to_as: true if mesh STA has a path to authentication server
* @mlo_params_valid: Indicates @assoc_link_id and @mld_addr fields are filled
* by driver. Drivers use this only in cfg80211_new_sta() calls when AP
* MLD's MLME/SME is offload to driver. Drivers won't fill this
@@ -2115,9 +2109,6 @@ struct station_info {
u64 assoc_at;
u64 rx_bytes;
u64 tx_bytes;
- u16 llid;
- u16 plid;
- u8 plink_state;
s8 signal;
s8 signal_avg;
@@ -2137,36 +2128,38 @@ struct station_info {
int generation;
+ u32 beacon_loss_count;
+
const u8 *assoc_req_ies;
size_t assoc_req_ies_len;
- u32 beacon_loss_count;
s64 t_offset;
+ u16 llid;
+ u16 plid;
+ u8 plink_state;
+ u8 connected_to_gate;
+ u8 connected_to_as;
+ u32 airtime_link_metric;
enum nl80211_mesh_power_mode local_pm;
enum nl80211_mesh_power_mode peer_pm;
enum nl80211_mesh_power_mode nonpeer_pm;
u32 expected_throughput;
- u64 tx_duration;
- u64 rx_duration;
- u64 rx_beacon;
- u8 rx_beacon_signal_avg;
- u8 connected_to_gate;
+ u16 airtime_weight;
- struct cfg80211_tid_stats *pertid;
s8 ack_signal;
s8 avg_ack_signal;
+ struct cfg80211_tid_stats *pertid;
- u16 airtime_weight;
+ u64 tx_duration;
+ u64 rx_duration;
+ u64 rx_beacon;
+ u8 rx_beacon_signal_avg;
u32 rx_mpdu_count;
u32 fcs_err_count;
- u32 airtime_link_metric;
-
- u8 connected_to_as;
-
bool mlo_params_valid;
u8 assoc_link_id;
u8 mld_addr[ETH_ALEN] __aligned(2);
@@ -2193,7 +2186,7 @@ struct cfg80211_sar_sub_specs {
struct cfg80211_sar_specs {
enum nl80211_sar_type type;
u32 num_sub_specs;
- struct cfg80211_sar_sub_specs sub_specs[];
+ struct cfg80211_sar_sub_specs sub_specs[] __counted_by(num_sub_specs);
};
@@ -2229,7 +2222,7 @@ struct cfg80211_sar_capa {
* @mac_addr: the mac address of the station of interest
* @sinfo: pointer to the structure to fill with the information
*
- * Returns 0 on success and sinfo is filled with the available information
+ * Return: 0 on success and sinfo is filled with the available information
* otherwise returns a negative error code and the content of sinfo has to be
* considered undefined.
*/
@@ -2255,17 +2248,19 @@ static inline int cfg80211_get_station(struct net_device *dev,
* @MONITOR_FLAG_PLCPFAIL: pass frames with bad PLCP
* @MONITOR_FLAG_CONTROL: pass control frames
* @MONITOR_FLAG_OTHER_BSS: disable BSSID filtering
- * @MONITOR_FLAG_COOK_FRAMES: report frames after processing
+ * @MONITOR_FLAG_COOK_FRAMES: deprecated, will unconditionally be refused
* @MONITOR_FLAG_ACTIVE: active monitor, ACKs frames on its MAC address
+ * @MONITOR_FLAG_SKIP_TX: do not pass locally transmitted frames
*/
enum monitor_flags {
- MONITOR_FLAG_CHANGED = 1<<__NL80211_MNTR_FLAG_INVALID,
- MONITOR_FLAG_FCSFAIL = 1<<NL80211_MNTR_FLAG_FCSFAIL,
- MONITOR_FLAG_PLCPFAIL = 1<<NL80211_MNTR_FLAG_PLCPFAIL,
- MONITOR_FLAG_CONTROL = 1<<NL80211_MNTR_FLAG_CONTROL,
- MONITOR_FLAG_OTHER_BSS = 1<<NL80211_MNTR_FLAG_OTHER_BSS,
- MONITOR_FLAG_COOK_FRAMES = 1<<NL80211_MNTR_FLAG_COOK_FRAMES,
- MONITOR_FLAG_ACTIVE = 1<<NL80211_MNTR_FLAG_ACTIVE,
+ MONITOR_FLAG_CHANGED = BIT(__NL80211_MNTR_FLAG_INVALID),
+ MONITOR_FLAG_FCSFAIL = BIT(NL80211_MNTR_FLAG_FCSFAIL),
+ MONITOR_FLAG_PLCPFAIL = BIT(NL80211_MNTR_FLAG_PLCPFAIL),
+ MONITOR_FLAG_CONTROL = BIT(NL80211_MNTR_FLAG_CONTROL),
+ MONITOR_FLAG_OTHER_BSS = BIT(NL80211_MNTR_FLAG_OTHER_BSS),
+ MONITOR_FLAG_COOK_FRAMES = BIT(NL80211_MNTR_FLAG_COOK_FRAMES),
+ MONITOR_FLAG_ACTIVE = BIT(NL80211_MNTR_FLAG_ACTIVE),
+ MONITOR_FLAG_SKIP_TX = BIT(NL80211_MNTR_FLAG_SKIP_TX),
};
/**
@@ -2831,7 +2826,7 @@ struct cfg80211_sched_scan_request {
struct list_head list;
/* keep last */
- struct ieee80211_channel *channels[];
+ struct ieee80211_channel *channels[] __counted_by(n_channels);
};
/**
@@ -2935,6 +2930,7 @@ struct cfg80211_bss_ies {
* @nontrans_list: list of non-transmitted BSS, if this is a transmitted one
* (multi-BSSID support)
* @signal: signal strength value (type depends on the wiphy's signal_type)
+ * @ts_boottime: timestamp of the last BSS update in nanoseconds since boot
* @chains: bitmask for filled values in @chain_signal.
* @chain_signal: per-chain signal strength of last received BSS in dBm.
* @bssid_index: index in the multiple BSS set
@@ -2959,6 +2955,8 @@ struct cfg80211_bss {
s32 signal;
+ u64 ts_boottime;
+
u16 beacon_interval;
u16 capability;
@@ -3011,6 +3009,10 @@ static inline const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 id)
*
* @bss: The BSS to authenticate with, the callee must obtain a reference
* to it if it needs to keep it.
+ * @supported_selectors: List of selectors that should be assumed to be
+ * supported by the station.
+ * SAE_H2E must be assumed supported if set to %NULL.
+ * @supported_selectors_len: Length of supported_selectors in octets.
* @auth_type: Authentication type (algorithm)
* @ie: Extra IEs to add to Authentication frame or %NULL
* @ie_len: Length of ie buffer in octets
@@ -3033,6 +3035,8 @@ struct cfg80211_auth_request {
struct cfg80211_bss *bss;
const u8 *ie;
size_t ie_len;
+ const u8 *supported_selectors;
+ u8 supported_selectors_len;
enum nl80211_auth_type auth_type;
const u8 *key;
u8 key_len;
@@ -3063,6 +3067,19 @@ struct cfg80211_assoc_link {
};
/**
+ * struct cfg80211_ml_reconf_req - MLO link reconfiguration request
+ * @add_links: data for links to add, see &struct cfg80211_assoc_link
+ * @rem_links: bitmap of links to remove
+ * @ext_mld_capa_ops: extended MLD capabilities and operations set by
+ * userspace for the ML reconfiguration action frame
+ */
+struct cfg80211_ml_reconf_req {
+ struct cfg80211_assoc_link add_links[IEEE80211_MLD_MAX_NUM_LINKS];
+ u16 rem_links;
+ u16 ext_mld_capa_ops;
+};
+
+/**
* enum cfg80211_assoc_req_flags - Over-ride default behaviour in association.
*
* @ASSOC_REQ_DISABLE_HT: Disable HT (802.11n)
@@ -3112,6 +3129,10 @@ enum cfg80211_assoc_req_flags {
* included in the Current AP address field of the Reassociation Request
* frame.
* @flags: See &enum cfg80211_assoc_req_flags
+ * @supported_selectors: supported BSS selectors in IEEE 802.11 format
+ * (or %NULL for no change).
+ * If %NULL, then support for SAE_H2E should be assumed.
+ * @supported_selectors_len: number of supported BSS selectors
* @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
* will be used in ht_capa. Un-supported values will be ignored.
* @ht_capa_mask: The bits of ht_capa which are to be used.
@@ -3130,6 +3151,8 @@ enum cfg80211_assoc_req_flags {
* the link on which the association request should be sent
* @ap_mld_addr: AP MLD address in case of MLO association request,
* valid iff @link_id >= 0
+ * @ext_mld_capa_ops: extended MLD capabilities and operations set by
+ * userspace for the association
*/
struct cfg80211_assoc_request {
struct cfg80211_bss *bss;
@@ -3138,6 +3161,8 @@ struct cfg80211_assoc_request {
struct cfg80211_crypto_settings crypto;
bool use_mfp;
u32 flags;
+ const u8 *supported_selectors;
+ u8 supported_selectors_len;
struct ieee80211_ht_cap ht_capa;
struct ieee80211_ht_cap ht_capa_mask;
struct ieee80211_vht_cap vht_capa, vht_capa_mask;
@@ -3148,6 +3173,7 @@ struct cfg80211_assoc_request {
struct cfg80211_assoc_link links[IEEE80211_MLD_MAX_NUM_LINKS];
const u8 *ap_mld_addr;
s8 link_id;
+ u16 ext_mld_capa_ops;
};
/**
@@ -3392,15 +3418,15 @@ enum cfg80211_connect_params_changed {
* @WIPHY_PARAM_TXQ_QUANTUM: TXQ scheduler quantum
*/
enum wiphy_params_flags {
- WIPHY_PARAM_RETRY_SHORT = 1 << 0,
- WIPHY_PARAM_RETRY_LONG = 1 << 1,
- WIPHY_PARAM_FRAG_THRESHOLD = 1 << 2,
- WIPHY_PARAM_RTS_THRESHOLD = 1 << 3,
- WIPHY_PARAM_COVERAGE_CLASS = 1 << 4,
- WIPHY_PARAM_DYN_ACK = 1 << 5,
- WIPHY_PARAM_TXQ_LIMIT = 1 << 6,
- WIPHY_PARAM_TXQ_MEMORY_LIMIT = 1 << 7,
- WIPHY_PARAM_TXQ_QUANTUM = 1 << 8,
+ WIPHY_PARAM_RETRY_SHORT = BIT(0),
+ WIPHY_PARAM_RETRY_LONG = BIT(1),
+ WIPHY_PARAM_FRAG_THRESHOLD = BIT(2),
+ WIPHY_PARAM_RTS_THRESHOLD = BIT(3),
+ WIPHY_PARAM_COVERAGE_CLASS = BIT(4),
+ WIPHY_PARAM_DYN_ACK = BIT(5),
+ WIPHY_PARAM_TXQ_LIMIT = BIT(6),
+ WIPHY_PARAM_TXQ_MEMORY_LIMIT = BIT(7),
+ WIPHY_PARAM_TXQ_QUANTUM = BIT(8),
};
#define IEEE80211_DEFAULT_AIRTIME_WEIGHT 256
@@ -3559,8 +3585,8 @@ struct cfg80211_coalesce_rules {
* @n_rules: number of rules
*/
struct cfg80211_coalesce {
- struct cfg80211_coalesce_rules *rules;
int n_rules;
+ struct cfg80211_coalesce_rules rules[] __counted_by(n_rules);
};
/**
@@ -3575,7 +3601,7 @@ struct cfg80211_coalesce {
struct cfg80211_wowlan_nd_match {
struct cfg80211_ssid ssid;
int n_channels;
- u32 channels[];
+ u32 channels[] __counted_by(n_channels);
};
/**
@@ -3589,7 +3615,7 @@ struct cfg80211_wowlan_nd_match {
*/
struct cfg80211_wowlan_nd_info {
int n_matches;
- struct cfg80211_wowlan_nd_match *matches[];
+ struct cfg80211_wowlan_nd_match *matches[] __counted_by(n_matches);
};
/**
@@ -4570,6 +4596,18 @@ struct mgmt_frame_regs {
*
* @set_hw_timestamp: Enable/disable HW timestamping of TM/FTM frames.
* @set_ttlm: set the TID to link mapping.
+ * @set_epcs: Enable/Disable EPCS for station mode.
+ * @get_radio_mask: get bitmask of radios in use.
+ * (invoked with the wiphy mutex held)
+ * @assoc_ml_reconf: Request a non-AP MLO connection to perform ML
+ * reconfiguration, i.e., add and/or remove links to/from the
+ * association using ML reconfiguration action frames. Successfully added
+ * links will be added to the set of valid links. Successfully removed
+ * links will be removed from the set of valid links. The driver must
+ * indicate removed links by calling cfg80211_links_removed() and added
+ * links by calling cfg80211_mlo_reconf_add_done(). When calling
+ * cfg80211_mlo_reconf_add_done() the bss pointer must be given for each
+ * link for which MLO reconfiguration 'add' operation was requested.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -4682,6 +4720,7 @@ struct cfg80211_ops {
struct ieee80211_channel *chan);
int (*set_monitor_channel)(struct wiphy *wiphy,
+ struct net_device *dev,
struct cfg80211_chan_def *chandef);
int (*scan)(struct wiphy *wiphy,
@@ -4718,7 +4757,7 @@ struct cfg80211_ops {
int (*set_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev,
enum nl80211_tx_power_setting type, int mbm);
int (*get_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev,
- int *dbm);
+ unsigned int link_id, int *dbm);
void (*rfkill_poll)(struct wiphy *wiphy);
@@ -4823,9 +4862,9 @@ struct cfg80211_ops {
int (*start_radar_detection)(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_chan_def *chandef,
- u32 cac_time_ms);
+ u32 cac_time_ms, int link_id);
void (*end_cac)(struct wiphy *wiphy,
- struct net_device *dev);
+ struct net_device *dev, unsigned int link_id);
int (*update_ft_ies)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_update_ft_ies_params *ftie);
int (*crit_proto_start)(struct wiphy *wiphy,
@@ -4931,6 +4970,11 @@ struct cfg80211_ops {
struct cfg80211_set_hw_timestamp *hwts);
int (*set_ttlm)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ttlm_params *params);
+ u32 (*get_radio_mask)(struct wiphy *wiphy, struct net_device *dev);
+ int (*assoc_ml_reconf)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ml_reconf_req *req);
+ int (*set_epcs)(struct wiphy *wiphy, struct net_device *dev,
+ bool val);
};
/*
@@ -5036,7 +5080,9 @@ struct ieee80211_iface_limit {
* struct ieee80211_iface_combination - possible interface combination
*
* With this structure the driver can describe which interface
- * combinations it supports concurrently.
+ * combinations it supports concurrently. When set in a struct wiphy_radio,
+ * the combinations refer to combinations of interfaces currently active on
+ * that radio.
*
* Examples:
*
@@ -5334,6 +5380,8 @@ struct wiphy_iftype_ext_capab {
* cfg80211_get_iftype_ext_capa - lookup interface type extended capability
* @wiphy: the wiphy to look up from
* @type: the interface type to look up
+ *
+ * Return: The extended capability for the given interface @type, may be %NULL
*/
const struct wiphy_iftype_ext_capab *
cfg80211_get_iftype_ext_capa(struct wiphy *wiphy, enum nl80211_iftype type);
@@ -5394,6 +5442,42 @@ struct wiphy_iftype_akm_suites {
int n_akm_suites;
};
+/**
+ * struct wiphy_radio_freq_range - wiphy frequency range
+ * @start_freq: start range edge frequency (kHz)
+ * @end_freq: end range edge frequency (kHz)
+ */
+struct wiphy_radio_freq_range {
+ u32 start_freq;
+ u32 end_freq;
+};
+
+
+/**
+ * struct wiphy_radio - physical radio of a wiphy
+ * This structure describes a physical radio belonging to a wiphy.
+ * It is used to describe concurrent-channel capabilities. Only one channel
+ * can be active on the radio described by struct wiphy_radio.
+ *
+ * @freq_range: frequency range that the radio can operate on.
+ * @n_freq_range: number of elements in @freq_range
+ *
+ * @iface_combinations: Valid interface combinations array, should not
+ * list single interface types.
+ * @n_iface_combinations: number of entries in @iface_combinations array.
+ *
+ * @antenna_mask: bitmask of antennas connected to this radio.
+ */
+struct wiphy_radio {
+ const struct wiphy_radio_freq_range *freq_range;
+ int n_freq_range;
+
+ const struct ieee80211_iface_combination *iface_combinations;
+ int n_iface_combinations;
+
+ u32 antenna_mask;
+};
+
#define CFG80211_HW_TIMESTAMP_ALL_PEERS 0xffff
/**
@@ -5612,6 +5696,9 @@ struct wiphy_iftype_akm_suites {
* A value of %CFG80211_HW_TIMESTAMP_ALL_PEERS indicates the driver
* supports enabling HW timestamping for all peers (i.e. no need to
* specify a mac address).
+ *
+ * @radio: radios belonging to this wiphy
+ * @n_radio: number of radios
*/
struct wiphy {
struct mutex mtx;
@@ -5762,6 +5849,9 @@ struct wiphy {
u16 hw_timestamp_max_peers;
+ int n_radio;
+ const struct wiphy_radio *radio;
+
char priv[] __aligned(NETDEV_ALIGN);
};
@@ -5895,7 +5985,7 @@ int wiphy_register(struct wiphy *wiphy);
* @wiphy: the wiphy to check the locking on
* @p: The pointer to read, prior to dereferencing
*
- * Return the value of the specified RCU-protected pointer, but omit the
+ * Return: the value of the specified RCU-protected pointer, but omit the
* READ_ONCE(), because caller holds the wiphy mutex used for updates.
*/
#define wiphy_dereference(wiphy, p) \
@@ -5904,6 +5994,10 @@ int wiphy_register(struct wiphy *wiphy);
/**
* get_wiphy_regdom - get custom regdomain for the given wiphy
* @wiphy: the wiphy to get the regdomain from
+ *
+ * Context: Requires any of RTNL, wiphy mutex or RCU protection.
+ *
+ * Return: pointer to the regulatory domain associated with the wiphy
*/
const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy);
@@ -5965,6 +6059,10 @@ static inline void wiphy_unlock(struct wiphy *wiphy)
mutex_unlock(&wiphy->mtx);
}
+DEFINE_GUARD(wiphy, struct wiphy *,
+ mutex_lock(&_T->mtx),
+ mutex_unlock(&_T->mtx))
+
struct wiphy_work;
typedef void (*wiphy_work_func_t)(struct wiphy *, struct wiphy_work *);
@@ -6069,6 +6167,65 @@ void wiphy_delayed_work_flush(struct wiphy *wiphy,
struct wiphy_delayed_work *dwork);
/**
+ * wiphy_delayed_work_pending - Find out whether a wiphy delayable
+ * work item is currently pending.
+ *
+ * @wiphy: the wiphy, for debug purposes
+ * @dwork: the delayed work in question
+ *
+ * Return: true if timer is pending, false otherwise
+ *
+ * How wiphy_delayed_work_queue() works is by setting a timer which
+ * when it expires calls wiphy_work_queue() to queue the wiphy work.
+ * Because wiphy_delayed_work_queue() uses mod_timer(), if it is
+ * called twice and the second call happens before the first call
+ * deadline, the work will rescheduled for the second deadline and
+ * won't run before that.
+ *
+ * wiphy_delayed_work_pending() can be used to detect if calling
+ * wiphy_work_delayed_work_queue() would start a new work schedule
+ * or delayed a previous one. As seen below it cannot be used to
+ * detect precisely if the work has finished to execute nor if it
+ * is currently executing.
+ *
+ * CPU0 CPU1
+ * wiphy_delayed_work_queue(wk)
+ * mod_timer(wk->timer)
+ * wiphy_delayed_work_pending(wk) -> true
+ *
+ * [...]
+ * expire_timers(wk->timer)
+ * detach_timer(wk->timer)
+ * wiphy_delayed_work_pending(wk) -> false
+ * wk->timer->function() |
+ * wiphy_work_queue(wk) | delayed work pending
+ * list_add_tail() | returns false but
+ * queue_work(cfg80211_wiphy_work) | wk->func() has not
+ * | been run yet
+ * [...] |
+ * cfg80211_wiphy_work() |
+ * wk->func() V
+ *
+ */
+bool wiphy_delayed_work_pending(struct wiphy *wiphy,
+ struct wiphy_delayed_work *dwork);
+
+/**
+ * enum ieee80211_ap_reg_power - regulatory power for an Access Point
+ *
+ * @IEEE80211_REG_UNSET_AP: Access Point has no regulatory power mode
+ * @IEEE80211_REG_LPI_AP: Indoor Access Point
+ * @IEEE80211_REG_SP_AP: Standard power Access Point
+ * @IEEE80211_REG_VLP_AP: Very low power Access Point
+ */
+enum ieee80211_ap_reg_power {
+ IEEE80211_REG_UNSET_AP,
+ IEEE80211_REG_LPI_AP,
+ IEEE80211_REG_SP_AP,
+ IEEE80211_REG_VLP_AP,
+};
+
+/**
* struct wireless_dev - wireless device state
*
* For netdevs, this structure must be allocated by the driver
@@ -6118,9 +6275,6 @@ void wiphy_delayed_work_flush(struct wiphy *wiphy,
* @address: The address for this device, valid only if @netdev is %NULL
* @is_running: true if this is a non-netdev device that has been started, e.g.
* the P2P Device.
- * @cac_started: true if DFS channel availability check has been started
- * @cac_start_time: timestamp (jiffies) when the dfs state was entered.
- * @cac_time_ms: CAC time in ms
* @ps: powersave mode is enabled
* @ps_timeout: dynamic powersave timeout
* @ap_unexpected_nlportid: (private) netlink port ID of application
@@ -6144,7 +6298,13 @@ void wiphy_delayed_work_flush(struct wiphy *wiphy,
* unprotected beacon report
* @links: array of %IEEE80211_MLD_MAX_NUM_LINKS elements containing @addr
* @ap and @client for each link
+ * @links.cac_started: true if DFS channel availability check has been
+ * started
+ * @links.cac_start_time: timestamp (jiffies) when the dfs state was
+ * entered.
+ * @links.cac_time_ms: CAC time in ms
* @valid_links: bitmap describing what elements of @links are valid
+ * @radio_mask: Bitmask of radios that this interface is allowed to operate on.
*/
struct wireless_dev {
struct wiphy *wiphy;
@@ -6185,11 +6345,6 @@ struct wireless_dev {
u32 owner_nlportid;
bool nl_owner_dead;
- /* FIXME: need to rework radar detection for MLO */
- bool cac_started;
- unsigned long cac_start_time;
- unsigned int cac_time_ms;
-
#ifdef CONFIG_CFG80211_WEXT
/* wext data */
struct {
@@ -6256,8 +6411,14 @@ struct wireless_dev {
struct cfg80211_internal_bss *current_bss;
} client;
};
+
+ bool cac_started;
+ unsigned long cac_start_time;
+ unsigned int cac_time_ms;
} links[IEEE80211_MLD_MAX_NUM_LINKS];
u16 valid_links;
+
+ u32 radio_mask;
};
static inline const u8 *wdev_address(struct wireless_dev *wdev)
@@ -6421,6 +6582,8 @@ ieee80211_get_channel(struct wiphy *wiphy, int freq)
*
* The Preferred Scanning Channels (PSC) are defined in
* Draft IEEE P802.11ax/D5.0, 26.17.2.3.3
+ *
+ * Return: %true if channel is a PSC, %false otherwise
*/
static inline bool cfg80211_channel_is_psc(struct ieee80211_channel *chan)
{
@@ -6431,6 +6594,28 @@ static inline bool cfg80211_channel_is_psc(struct ieee80211_channel *chan)
}
/**
+ * cfg80211_radio_chandef_valid - Check if the radio supports the chandef
+ *
+ * @radio: wiphy radio
+ * @chandef: chandef for current channel
+ *
+ * Return: whether or not the given chandef is valid for the given radio
+ */
+bool cfg80211_radio_chandef_valid(const struct wiphy_radio *radio,
+ const struct cfg80211_chan_def *chandef);
+
+/**
+ * cfg80211_wdev_channel_allowed - Check if the wdev may use the channel
+ *
+ * @wdev: the wireless device
+ * @chan: channel to check
+ *
+ * Return: whether or not the wdev may use the channel
+ */
+bool cfg80211_wdev_channel_allowed(struct wireless_dev *wdev,
+ struct ieee80211_channel *chan);
+
+/**
* ieee80211_get_response_rate - get basic rate for a given rate
*
* @sband: the band to look for rates in
@@ -6450,8 +6635,8 @@ ieee80211_get_response_rate(struct ieee80211_supported_band *sband,
* ieee80211_mandatory_rates - get mandatory rates for a given band
* @sband: the band to look for rates in
*
- * This function returns a bitmap of the mandatory rates for the given
- * band, bits are set according to the rate position in the bitrates array.
+ * Return: a bitmap of the mandatory rates for the given band, bits
+ * are set according to the rate position in the bitrates array.
*/
u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband);
@@ -6665,6 +6850,8 @@ bool ieee80211_get_8023_tunnel_proto(const void *hdr, __be16 *proto);
* header to the ethernet header (if present).
*
* @skb: The 802.3 frame with embedded mesh header
+ *
+ * Return: 0 on success. Non-zero on error.
*/
int ieee80211_strip_8023_mesh_hdr(struct sk_buff *skb);
@@ -7043,6 +7230,8 @@ const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
*
* You can use this to map the regulatory request initiator enum to a
* proper string representation.
+ *
+ * Return: pointer to string representation of the initiator
*/
const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
@@ -7051,6 +7240,8 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
* @wiphy: wiphy for which pre-CAC capability is checked.
*
* Pre-CAC is allowed only in some regdomains (notable ETSI).
+ *
+ * Return: %true if allowed, %false otherwise
*/
bool regulatory_pre_cac_allowed(struct wiphy *wiphy);
@@ -7185,6 +7376,8 @@ static inline void cfg80211_gen_new_bssid(const u8 *bssid, u8 max_bssid,
* cfg80211_is_element_inherited - returns if element ID should be inherited
* @element: element to check
* @non_inherit_element: non inheritance element
+ *
+ * Return: %true if should be inherited, %false otherwise
*/
bool cfg80211_is_element_inherited(const struct element *element,
const struct element *non_inherit_element);
@@ -7197,6 +7390,8 @@ bool cfg80211_is_element_inherited(const struct element *element,
* @sub_elem: current MBSSID subelement (profile)
* @merged_ie: location of the merged profile
* @max_copy_len: max merged profile length
+ *
+ * Return: the number of bytes merged
*/
size_t cfg80211_merge_profile(const u8 *ie, size_t ielen,
const struct element *mbssid_elem,
@@ -7224,7 +7419,7 @@ enum cfg80211_bss_frame_type {
* @ielen: length of IEs
* @band: enum nl80211_band of the channel
*
- * Returns the channel number, or -1 if none could be determined.
+ * Return: the channel number, or -1 if none could be determined.
*/
int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen,
enum nl80211_band band);
@@ -7302,6 +7497,8 @@ cfg80211_inform_bss(struct wiphy *wiphy,
* @bss_type: type of BSS, see &enum ieee80211_bss_type
* @privacy: privacy filter, see &enum ieee80211_privacy
* @use_for: indicates which use is intended
+ *
+ * Return: Reference-counted BSS on success. %NULL on error.
*/
struct cfg80211_bss *__cfg80211_get_bss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
@@ -7322,6 +7519,8 @@ struct cfg80211_bss *__cfg80211_get_bss(struct wiphy *wiphy,
* @privacy: privacy filter, see &enum ieee80211_privacy
*
* This version implies regular usage, %NL80211_BSS_USE_FOR_NORMAL.
+ *
+ * Return: Reference-counted BSS on success. %NULL on error.
*/
static inline struct cfg80211_bss *
cfg80211_get_bss(struct wiphy *wiphy, struct ieee80211_channel *channel,
@@ -7706,8 +7905,9 @@ int cfg80211_vendor_cmd_reply(struct sk_buff *skb);
* cfg80211_vendor_cmd_get_sender - get the current sender netlink ID
* @wiphy: the wiphy
*
- * Return the current netlink port ID in a vendor command handler.
- * Valid to call only there.
+ * Return: the current netlink port ID in a vendor command handler.
+ *
+ * Context: May only be called from a vendor command handler
*/
unsigned int cfg80211_vendor_cmd_get_sender(struct wiphy *wiphy);
@@ -8260,6 +8460,8 @@ void cfg80211_tx_mgmt_expired(struct wireless_dev *wdev, u64 cookie,
*
* @sinfo: the station information
* @gfp: allocation flags
+ *
+ * Return: 0 on success. Non-zero on error.
*/
int cfg80211_sinfo_alloc_tid_stats(struct station_info *sinfo, gfp_t gfp);
@@ -8634,6 +8836,7 @@ void cfg80211_sta_opmode_change_notify(struct net_device *dev, const u8 *mac,
* @chandef: chandef for the current channel
* @event: type of event
* @gfp: context flags
+ * @link_id: valid link_id for MLO operation or 0 otherwise.
*
* This function is called when a Channel availability check (CAC) is finished
* or aborted. This must be called to notify the completion of a CAC process,
@@ -8641,7 +8844,8 @@ void cfg80211_sta_opmode_change_notify(struct net_device *dev, const u8 *mac,
*/
void cfg80211_cac_event(struct net_device *netdev,
const struct cfg80211_chan_def *chandef,
- enum nl80211_radar_event event, gfp_t gfp);
+ enum nl80211_radar_event event, gfp_t gfp,
+ unsigned int link_id);
/**
* cfg80211_background_cac_abort - Channel Availability Check offchan abort event
@@ -8754,6 +8958,34 @@ static inline void cfg80211_report_obss_beacon(struct wiphy *wiphy,
}
/**
+ * struct cfg80211_beaconing_check_config - beacon check configuration
+ * @iftype: the interface type to check for
+ * @relax: allow IR-relaxation conditions to apply (e.g. another
+ * interface connected already on the same channel)
+ * NOTE: If this is set, wiphy mutex must be held.
+ * @reg_power: &enum ieee80211_ap_reg_power value indicating the
+ * advertised/used 6 GHz regulatory power setting
+ */
+struct cfg80211_beaconing_check_config {
+ enum nl80211_iftype iftype;
+ enum ieee80211_ap_reg_power reg_power;
+ bool relax;
+};
+
+/**
+ * cfg80211_reg_check_beaconing - check if beaconing is allowed
+ * @wiphy: the wiphy
+ * @chandef: the channel definition
+ * @cfg: additional parameters for the checking
+ *
+ * Return: %true if there is no secondary channel or the secondary channel(s)
+ * can be used for beaconing (i.e. is not a radar channel etc.)
+ */
+bool cfg80211_reg_check_beaconing(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ struct cfg80211_beaconing_check_config *cfg);
+
+/**
* cfg80211_reg_can_beacon - check if beaconing is allowed
* @wiphy: the wiphy
* @chandef: the channel definition
@@ -8762,9 +8994,17 @@ static inline void cfg80211_report_obss_beacon(struct wiphy *wiphy,
* Return: %true if there is no secondary channel or the secondary channel(s)
* can be used for beaconing (i.e. is not a radar channel etc.)
*/
-bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
- struct cfg80211_chan_def *chandef,
- enum nl80211_iftype iftype);
+static inline bool
+cfg80211_reg_can_beacon(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ enum nl80211_iftype iftype)
+{
+ struct cfg80211_beaconing_check_config config = {
+ .iftype = iftype,
+ };
+
+ return cfg80211_reg_check_beaconing(wiphy, chandef, &config);
+}
/**
* cfg80211_reg_can_beacon_relax - check if beaconing is allowed with relaxation
@@ -8777,13 +9017,22 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
* also checks if IR-relaxation conditions apply, to allow beaconing under
* more permissive conditions.
*
- * Requires the wiphy mutex to be held.
+ * Context: Requires the wiphy mutex to be held.
*/
-bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
- struct cfg80211_chan_def *chandef,
- enum nl80211_iftype iftype);
+static inline bool
+cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ enum nl80211_iftype iftype)
+{
+ struct cfg80211_beaconing_check_config config = {
+ .iftype = iftype,
+ .relax = true,
+ };
-/*
+ return cfg80211_reg_check_beaconing(wiphy, chandef, &config);
+}
+
+/**
* cfg80211_ch_switch_notify - update wdev channel and notify userspace
* @dev: the device which switched channels
* @chandef: the new channel definition
@@ -8796,7 +9045,7 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
unsigned int link_id);
-/*
+/**
* cfg80211_ch_switch_started_notify - notify channel switch start
* @dev: the device on which the channel switch started
* @chandef: the future channel definition
@@ -8819,7 +9068,7 @@ void cfg80211_ch_switch_started_notify(struct net_device *dev,
* @operating_class: the operating class to convert
* @band: band pointer to fill
*
- * Returns %true if the conversion was successful, %false otherwise.
+ * Return: %true if the conversion was successful, %false otherwise.
*/
bool ieee80211_operating_class_to_band(u8 operating_class,
enum nl80211_band *band);
@@ -8831,7 +9080,7 @@ bool ieee80211_operating_class_to_band(u8 operating_class,
* @chan: the ieee80211_channel to convert
* @chandef: a pointer to the resulting chandef
*
- * Returns %true if the conversion was successful, %false otherwise.
+ * Return: %true if the conversion was successful, %false otherwise.
*/
bool ieee80211_operating_class_to_chandef(u8 operating_class,
struct ieee80211_channel *chan,
@@ -8843,7 +9092,7 @@ bool ieee80211_operating_class_to_chandef(u8 operating_class,
* @chandef: the chandef to convert
* @op_class: a pointer to the resulting operating class
*
- * Returns %true if the conversion was successful, %false otherwise.
+ * Return: %true if the conversion was successful, %false otherwise.
*/
bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
u8 *op_class);
@@ -8853,7 +9102,7 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
*
* @chandef: the chandef to convert
*
- * Returns the center frequency of chandef (1st segment) in KHz.
+ * Return: the center frequency of chandef (1st segment) in KHz.
*/
static inline u32
ieee80211_chandef_to_khz(const struct cfg80211_chan_def *chandef)
@@ -8861,7 +9110,7 @@ ieee80211_chandef_to_khz(const struct cfg80211_chan_def *chandef)
return MHZ_TO_KHZ(chandef->center_freq1) + chandef->freq1_offset;
}
-/*
+/**
* cfg80211_tdls_oper_request - request userspace to perform TDLS operation
* @dev: the device on which the operation is requested
* @peer: the MAC address of the peer device
@@ -8880,11 +9129,11 @@ void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer,
enum nl80211_tdls_operation oper,
u16 reason_code, gfp_t gfp);
-/*
+/**
* cfg80211_calculate_bitrate - calculate actual bitrate (in 100Kbps units)
* @rate: given rate_info to calculate bitrate from
*
- * return 0 if MCS index >= 32
+ * Return: calculated bitrate
*/
u32 cfg80211_calculate_bitrate(struct rate_info *rate);
@@ -8898,7 +9147,7 @@ u32 cfg80211_calculate_bitrate(struct rate_info *rate);
* when the driver wishes to unregister the wdev, e.g. when the hardware device
* is unbound from the driver.
*
- * Requires the RTNL and wiphy mutex to be held.
+ * Context: Requires the RTNL and wiphy mutex to be held.
*/
void cfg80211_unregister_wdev(struct wireless_dev *wdev);
@@ -8911,7 +9160,9 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev);
* held. Otherwise, both register_netdevice() and register_netdev() are usable
* instead as well.
*
- * Requires the RTNL and wiphy mutex to be held.
+ * Context: Requires the RTNL and wiphy mutex to be held.
+ *
+ * Return: 0 on success. Non-zero on error.
*/
int cfg80211_register_netdevice(struct net_device *dev);
@@ -8924,7 +9175,7 @@ int cfg80211_register_netdevice(struct net_device *dev);
* is held. Otherwise, both unregister_netdevice() and unregister_netdev() are
* usable instead as well.
*
- * Requires the RTNL and wiphy mutex to be held.
+ * Context: Requires the RTNL and wiphy mutex to be held.
*/
static inline void cfg80211_unregister_netdevice(struct net_device *dev)
{
@@ -9000,9 +9251,9 @@ int cfg80211_get_p2p_attr(const u8 *ies, unsigned int len,
* correctly, if not the result of using this function will not
* be ordered correctly either, i.e. it does no reordering.
*
- * The function returns the offset where the next part of the
- * buffer starts, which may be @ielen if the entire (remainder)
- * of the buffer should be used.
+ * Return: The offset where the next part of the buffer starts, which
+ * may be @ielen if the entire (remainder) of the buffer should be
+ * used.
*/
size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen,
const u8 *ids, int n_ids,
@@ -9030,9 +9281,9 @@ size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen,
* correctly, if not the result of using this function will not
* be ordered correctly either, i.e. it does no reordering.
*
- * The function returns the offset where the next part of the
- * buffer starts, which may be @ielen if the entire (remainder)
- * of the buffer should be used.
+ * Return: The offset where the next part of the buffer starts, which
+ * may be @ielen if the entire (remainder) of the buffer should be
+ * used.
*/
static inline size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
const u8 *ids, int n_ids, size_t offset)
@@ -9096,6 +9347,8 @@ unsigned int ieee80211_get_num_supported_channels(struct wiphy *wiphy);
* This function can be called by the driver to check whether a
* combination of interfaces and their types are allowed according to
* the interface combinations.
+ *
+ * Return: 0 if combinations are allowed. Non-zero on error.
*/
int cfg80211_check_combinations(struct wiphy *wiphy,
struct iface_combination_params *params);
@@ -9111,6 +9364,8 @@ int cfg80211_check_combinations(struct wiphy *wiphy,
* This function can be called by the driver to check what possible
* combinations it fits in at a given moment, e.g. for channel switching
* purposes.
+ *
+ * Return: 0 on success. Non-zero on error.
*/
int cfg80211_iter_combinations(struct wiphy *wiphy,
struct iface_combination_params *params,
@@ -9118,7 +9373,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
void *data),
void *data);
-/*
+/**
* cfg80211_stop_iface - trigger interface disconnection
*
* @wiphy: the wiphy
@@ -9173,6 +9428,8 @@ static inline void wiphy_ext_feature_set(struct wiphy *wiphy,
*
* The extended features are flagged in multiple bytes (see
* &struct wiphy.@ext_features)
+ *
+ * Return: %true if extended feature flag is set, %false otherwise
*/
static inline bool
wiphy_ext_feature_isset(struct wiphy *wiphy,
@@ -9294,6 +9551,8 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev,
* Check whether the interface is allowed to operate; additionally, this API
* can be used to check iftype against the software interfaces when
* check_swif is '1'.
+ *
+ * Return: %true if allowed, %false otherwise
*/
bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
bool is_4addr, u8 check_swif);
@@ -9386,60 +9645,78 @@ void cfg80211_bss_flush(struct wiphy *wiphy);
* @cmd: the actual event we want to notify
* @count: the number of TBTTs until the color change happens
* @color_bitmap: representations of the colors that the local BSS is aware of
+ * @link_id: valid link_id in case of MLO or 0 for non-MLO.
+ *
+ * Return: 0 on success. Non-zero on error.
*/
int cfg80211_bss_color_notify(struct net_device *dev,
enum nl80211_commands cmd, u8 count,
- u64 color_bitmap);
+ u64 color_bitmap, u8 link_id);
/**
* cfg80211_obss_color_collision_notify - notify about bss color collision
* @dev: network device
* @color_bitmap: representations of the colors that the local BSS is aware of
+ * @link_id: valid link_id in case of MLO or 0 for non-MLO.
+ *
+ * Return: 0 on success. Non-zero on error.
*/
static inline int cfg80211_obss_color_collision_notify(struct net_device *dev,
- u64 color_bitmap)
+ u64 color_bitmap,
+ u8 link_id)
{
return cfg80211_bss_color_notify(dev, NL80211_CMD_OBSS_COLOR_COLLISION,
- 0, color_bitmap);
+ 0, color_bitmap, link_id);
}
/**
* cfg80211_color_change_started_notify - notify color change start
* @dev: the device on which the color is switched
* @count: the number of TBTTs until the color change happens
+ * @link_id: valid link_id in case of MLO or 0 for non-MLO.
*
* Inform the userspace about the color change that has started.
+ *
+ * Return: 0 on success. Non-zero on error.
*/
static inline int cfg80211_color_change_started_notify(struct net_device *dev,
- u8 count)
+ u8 count, u8 link_id)
{
return cfg80211_bss_color_notify(dev, NL80211_CMD_COLOR_CHANGE_STARTED,
- count, 0);
+ count, 0, link_id);
}
/**
* cfg80211_color_change_aborted_notify - notify color change abort
* @dev: the device on which the color is switched
+ * @link_id: valid link_id in case of MLO or 0 for non-MLO.
*
* Inform the userspace about the color change that has aborted.
+ *
+ * Return: 0 on success. Non-zero on error.
*/
-static inline int cfg80211_color_change_aborted_notify(struct net_device *dev)
+static inline int cfg80211_color_change_aborted_notify(struct net_device *dev,
+ u8 link_id)
{
return cfg80211_bss_color_notify(dev, NL80211_CMD_COLOR_CHANGE_ABORTED,
- 0, 0);
+ 0, 0, link_id);
}
/**
* cfg80211_color_change_notify - notify color change completion
* @dev: the device on which the color was switched
+ * @link_id: valid link_id in case of MLO or 0 for non-MLO.
*
* Inform the userspace about the color change that has completed.
+ *
+ * Return: 0 on success. Non-zero on error.
*/
-static inline int cfg80211_color_change_notify(struct net_device *dev)
+static inline int cfg80211_color_change_notify(struct net_device *dev,
+ u8 link_id)
{
return cfg80211_bss_color_notify(dev,
NL80211_CMD_COLOR_CHANGE_COMPLETED,
- 0, 0);
+ 0, 0, link_id);
}
/**
@@ -9456,6 +9733,40 @@ static inline int cfg80211_color_change_notify(struct net_device *dev)
void cfg80211_links_removed(struct net_device *dev, u16 link_mask);
/**
+ * struct cfg80211_mlo_reconf_done_data - MLO reconfiguration data
+ * @buf: MLO Reconfiguration Response frame (header + body)
+ * @len: length of the frame data
+ * @added_links: BIT mask of links successfully added to the association
+ * @links: per-link information indexed by link ID
+ * @links.bss: the BSS that MLO reconfiguration was requested for, ownership of
+ * the pointer moves to cfg80211 in the call to
+ * cfg80211_mlo_reconf_add_done().
+ *
+ * The BSS pointer must be set for each link for which 'add' operation was
+ * requested in the assoc_ml_reconf callback.
+ */
+struct cfg80211_mlo_reconf_done_data {
+ const u8 *buf;
+ size_t len;
+ u16 added_links;
+ struct {
+ struct cfg80211_bss *bss;
+ u8 *addr;
+ } links[IEEE80211_MLD_MAX_NUM_LINKS];
+};
+
+/**
+ * cfg80211_mlo_reconf_add_done - Notify about MLO reconfiguration result
+ * @dev: network device.
+ * @data: MLO reconfiguration done data, &struct cfg80211_mlo_reconf_done_data
+ *
+ * Inform cfg80211 and the userspace that processing of ML reconfiguration
+ * request to add links to the association is done.
+ */
+void cfg80211_mlo_reconf_add_done(struct net_device *dev,
+ struct cfg80211_mlo_reconf_done_data *data);
+
+/**
* cfg80211_schedule_channels_check - schedule regulatory check if needed
* @wdev: the wireless device to check
*
@@ -9465,6 +9776,13 @@ void cfg80211_links_removed(struct net_device *dev, u16 link_mask);
*/
void cfg80211_schedule_channels_check(struct wireless_dev *wdev);
+/**
+ * cfg80211_epcs_changed - Notify about a change in EPCS state
+ * @netdev: the wireless device whose EPCS state changed
+ * @enabled: set to true if EPCS was enabled, otherwise set to false.
+ */
+void cfg80211_epcs_changed(struct net_device *netdev, bool enabled);
+
#ifdef CONFIG_CFG80211_DEBUGFS
/**
* wiphy_locked_debugfs_read - do a locked read in debugfs
@@ -9477,6 +9795,8 @@ void cfg80211_schedule_channels_check(struct wireless_dev *wdev);
* @ppos: read position
* @handler: the read handler to call (under wiphy lock)
* @data: additional data to pass to the read handler
+ *
+ * Return: the number of characters read, or a negative errno
*/
ssize_t wiphy_locked_debugfs_read(struct wiphy *wiphy, struct file *file,
char *buf, size_t bufsize,
@@ -9499,6 +9819,8 @@ ssize_t wiphy_locked_debugfs_read(struct wiphy *wiphy, struct file *file,
* @count: read count
* @handler: the write handler to call (under wiphy lock)
* @data: additional data to pass to the write handler
+ *
+ * Return: the number of characters written, or a negative errno
*/
ssize_t wiphy_locked_debugfs_write(struct wiphy *wiphy, struct file *file,
char *buf, size_t bufsize,
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 1338cb92c8e7..e57986b173f8 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -99,12 +99,6 @@ csum_block_add(__wsum csum, __wsum csum2, int offset)
}
static __always_inline __wsum
-csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
-{
- return csum_block_add(csum, csum2, offset);
-}
-
-static __always_inline __wsum
csum_block_sub(__wsum csum, __wsum csum2, int offset)
{
return csum_block_add(csum, ~csum2, offset);
@@ -115,12 +109,6 @@ static __always_inline __wsum csum_unfold(__sum16 n)
return (__force __wsum)n;
}
-static __always_inline
-__wsum csum_partial_ext(const void *buff, int len, __wsum sum)
-{
- return csum_partial(buff, len, sum);
-}
-
#define CSUM_MANGLED_0 ((__force __sum16)0xffff)
static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
@@ -151,6 +139,12 @@ static inline void csum_replace(__wsum *csum, __wsum old, __wsum new)
*csum = csum_add(csum_sub(*csum, old), new);
}
+static inline unsigned short csum_from32to16(unsigned int sum)
+{
+ sum += (sum >> 16) | (sum << 16);
+ return (unsigned short)(sum >> 16);
+}
+
struct sk_buff;
void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
__be32 from, __be32 to, bool pseudohdr);
diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
index 53dd7d988a2d..d6780d7903f4 100644
--- a/include/net/cipso_ipv4.h
+++ b/include/net/cipso_ipv4.h
@@ -28,7 +28,7 @@
#include <net/request_sock.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
/* known doi values */
#define CIPSO_V4_DOI_UNKNOWN 0x00000000
@@ -183,7 +183,8 @@ int cipso_v4_getattr(const unsigned char *cipso,
struct netlbl_lsm_secattr *secattr);
int cipso_v4_sock_setattr(struct sock *sk,
const struct cipso_v4_doi *doi_def,
- const struct netlbl_lsm_secattr *secattr);
+ const struct netlbl_lsm_secattr *secattr,
+ bool sk_locked);
void cipso_v4_sock_delattr(struct sock *sk);
int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr);
int cipso_v4_req_setattr(struct request_sock *req,
@@ -214,7 +215,8 @@ static inline int cipso_v4_getattr(const unsigned char *cipso,
static inline int cipso_v4_sock_setattr(struct sock *sk,
const struct cipso_v4_doi *doi_def,
- const struct netlbl_lsm_secattr *secattr)
+ const struct netlbl_lsm_secattr *secattr,
+ bool sk_locked)
{
return -ENOSYS;
}
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 9ac394bdfbe4..0091f23a40f7 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -35,7 +35,7 @@ struct devlink_port_phys_attrs {
/**
* struct devlink_port_pci_pf_attrs - devlink port's PCI PF attributes
* @controller: Associated controller number
- * @pf: Associated PCI PF number for this port.
+ * @pf: associated PCI function number for the devlink port instance
* @external: when set, indicates if a port is for an external controller
*/
struct devlink_port_pci_pf_attrs {
@@ -47,8 +47,9 @@ struct devlink_port_pci_pf_attrs {
/**
* struct devlink_port_pci_vf_attrs - devlink port's PCI VF attributes
* @controller: Associated controller number
- * @pf: Associated PCI PF number for this port.
- * @vf: Associated PCI VF for of the PCI PF for this port.
+ * @pf: associated PCI function number for the devlink port instance
+ * @vf: associated PCI VF number of a PF for the devlink port instance;
+ * VF number starts from 0 for the first PCI virtual function
* @external: when set, indicates if a port is for an external controller
*/
struct devlink_port_pci_vf_attrs {
@@ -61,8 +62,8 @@ struct devlink_port_pci_vf_attrs {
/**
* struct devlink_port_pci_sf_attrs - devlink port's PCI SF attributes
* @controller: Associated controller number
- * @sf: Associated PCI SF for of the PCI PF for this port.
- * @pf: Associated PCI PF number for this port.
+ * @sf: associated SF number of a PF for the devlink port instance
+ * @pf: associated PCI function number for the devlink port instance
* @external: when set, indicates if a port is for an external controller
*/
struct devlink_port_pci_sf_attrs {
@@ -352,7 +353,7 @@ struct devlink_dpipe_table {
bool resource_valid;
u64 resource_id;
u64 resource_units;
- struct devlink_dpipe_table_ops *table_ops;
+ const struct devlink_dpipe_table_ops *table_ops;
struct rcu_head rcu;
};
@@ -419,11 +420,11 @@ typedef u64 devlink_resource_occ_get_t(void *priv);
#define __DEVLINK_PARAM_MAX_STRING_VALUE 32
enum devlink_param_type {
- DEVLINK_PARAM_TYPE_U8,
- DEVLINK_PARAM_TYPE_U16,
- DEVLINK_PARAM_TYPE_U32,
- DEVLINK_PARAM_TYPE_STRING,
- DEVLINK_PARAM_TYPE_BOOL,
+ DEVLINK_PARAM_TYPE_U8 = DEVLINK_VAR_ATTR_TYPE_U8,
+ DEVLINK_PARAM_TYPE_U16 = DEVLINK_VAR_ATTR_TYPE_U16,
+ DEVLINK_PARAM_TYPE_U32 = DEVLINK_VAR_ATTR_TYPE_U32,
+ DEVLINK_PARAM_TYPE_STRING = DEVLINK_VAR_ATTR_TYPE_STRING,
+ DEVLINK_PARAM_TYPE_BOOL = DEVLINK_VAR_ATTR_TYPE_FLAG,
};
union devlink_param_value {
@@ -483,7 +484,8 @@ struct devlink_param {
int (*get)(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx);
int (*set)(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx);
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack);
int (*validate)(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack);
@@ -599,12 +601,14 @@ enum devlink_param_generic_id {
.validate = _validate, \
}
-/* Part number, identifier of board design */
+/* Identifier of board design */
#define DEVLINK_INFO_VERSION_GENERIC_BOARD_ID "board.id"
/* Revision of board design */
#define DEVLINK_INFO_VERSION_GENERIC_BOARD_REV "board.rev"
/* Maker of the board */
#define DEVLINK_INFO_VERSION_GENERIC_BOARD_MANUFACTURE "board.manufacture"
+/* Part number of the board and its components */
+#define DEVLINK_INFO_VERSION_GENERIC_BOARD_PART_NUMBER "board.part_number"
/* Part number, identifier of asic design */
#define DEVLINK_INFO_VERSION_GENERIC_ASIC_ID "asic.id"
@@ -1258,6 +1262,18 @@ enum devlink_trap_group_generic_id {
.min_burst = _min_burst, \
}
+#define devlink_fmsg_put(fmsg, name, value) ( \
+ _Generic((value), \
+ bool : devlink_fmsg_bool_pair_put, \
+ u8 : devlink_fmsg_u8_pair_put, \
+ u16 : devlink_fmsg_u32_pair_put, \
+ u32 : devlink_fmsg_u32_pair_put, \
+ u64 : devlink_fmsg_u64_pair_put, \
+ int : devlink_fmsg_u32_pair_put, \
+ char * : devlink_fmsg_string_pair_put, \
+ const char * : devlink_fmsg_string_pair_put) \
+ (fmsg, name, (value)))
+
enum {
/* device supports reload operations */
DEVLINK_F_RELOAD = 1UL << 0,
@@ -1519,6 +1535,7 @@ int devl_trylock(struct devlink *devlink);
void devl_unlock(struct devlink *devlink);
void devl_assert_locked(struct devlink *devlink);
bool devl_lock_is_held(struct devlink *devlink);
+DEFINE_GUARD(devl, struct devlink *, devl_lock(_T), devl_unlock(_T));
struct ib_device;
@@ -1602,6 +1619,14 @@ void devlink_free(struct devlink *devlink);
* capability. Should be used by device drivers to
* enable/disable ipsec_packet capability of a
* function managed by the devlink port.
+ * @port_fn_max_io_eqs_get: Callback used to get port function's maximum number
+ * of event queues. Should be used by device drivers to
+ * report the maximum event queues of a function
+ * managed by the devlink port.
+ * @port_fn_max_io_eqs_set: Callback used to set port function's maximum number
+ * of event queues. Should be used by device drivers to
+ * configure maximum number of event queues
+ * of a function managed by the devlink port.
*
* Note: Driver should return -EOPNOTSUPP if it doesn't support
* port function (@port_fn_*) handling for a particular port.
@@ -1651,6 +1676,12 @@ struct devlink_port_ops {
int (*port_fn_ipsec_packet_set)(struct devlink_port *devlink_port,
bool enable,
struct netlink_ext_ack *extack);
+ int (*port_fn_max_io_eqs_get)(struct devlink_port *devlink_port,
+ u32 *max_eqs,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_max_io_eqs_set)(struct devlink_port *devlink_port,
+ u32 max_eqs,
+ struct netlink_ext_ack *extack);
};
void devlink_port_init(struct devlink *devlink,
@@ -1734,7 +1765,7 @@ void devl_sb_unregister(struct devlink *devlink, unsigned int sb_index);
void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index);
int devl_dpipe_table_register(struct devlink *devlink,
const char *table_name,
- struct devlink_dpipe_table_ops *table_ops,
+ const struct devlink_dpipe_table_ops *table_ops,
void *priv, bool counter_control_extern);
void devl_dpipe_table_unregister(struct devlink *devlink,
const char *table_name);
@@ -1762,12 +1793,6 @@ int devl_resource_register(struct devlink *devlink,
u64 resource_id,
u64 parent_resource_id,
const struct devlink_resource_size_params *size_params);
-int devlink_resource_register(struct devlink *devlink,
- const char *resource_name,
- u64 resource_size,
- u64 resource_id,
- u64 parent_resource_id,
- const struct devlink_resource_size_params *size_params);
void devl_resources_unregister(struct devlink *devlink);
void devlink_resources_unregister(struct devlink *devlink);
int devl_resource_size_get(struct devlink *devlink,
@@ -1780,15 +1805,8 @@ void devl_resource_occ_get_register(struct devlink *devlink,
u64 resource_id,
devlink_resource_occ_get_t *occ_get,
void *occ_get_priv);
-void devlink_resource_occ_get_register(struct devlink *devlink,
- u64 resource_id,
- devlink_resource_occ_get_t *occ_get,
- void *occ_get_priv);
void devl_resource_occ_get_unregister(struct devlink *devlink,
u64 resource_id);
-
-void devlink_resource_occ_get_unregister(struct devlink *devlink,
- u64 resource_id);
int devl_params_register(struct devlink *devlink,
const struct devlink_param *params,
size_t params_count);
@@ -1990,6 +2008,7 @@ int devlink_compat_switch_id_get(struct net_device *dev,
int devlink_nl_port_handle_fill(struct sk_buff *msg, struct devlink_port *devlink_port);
size_t devlink_nl_port_handle_size(struct devlink_port *devlink_port);
+void devlink_fmsg_dump_skb(struct devlink_fmsg *fmsg, const struct sk_buff *skb);
#else
diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h
index 9707ab54fdd5..bcf9d7467e1a 100644
--- a/include/net/dropreason-core.h
+++ b/include/net/dropreason-core.h
@@ -6,9 +6,13 @@
#define DEFINE_DROP_REASON(FN, FNe) \
FN(NOT_SPECIFIED) \
FN(NO_SOCKET) \
+ FN(SOCKET_CLOSE) \
+ FN(SOCKET_FILTER) \
+ FN(SOCKET_RCVBUFF) \
+ FN(UNIX_DISCONNECT) \
+ FN(UNIX_SKIP_OOB) \
FN(PKT_TOO_SMALL) \
FN(TCP_CSUM) \
- FN(SOCKET_FILTER) \
FN(UDP_CSUM) \
FN(NETFILTER_DROP) \
FN(OTHERHOST) \
@@ -18,7 +22,6 @@
FN(UNICAST_IN_L2_MULTICAST) \
FN(XFRM_POLICY) \
FN(IP_NOPROTO) \
- FN(SOCKET_RCVBUFF) \
FN(PROTO_MEM) \
FN(TCP_AUTH_HDR) \
FN(TCP_MD5NOTFOUND) \
@@ -36,6 +39,10 @@
FN(TCP_OVERWINDOW) \
FN(TCP_OFOMERGE) \
FN(TCP_RFC7323_PAWS) \
+ FN(TCP_RFC7323_PAWS_ACK) \
+ FN(TCP_RFC7323_TW_PAWS) \
+ FN(TCP_RFC7323_TSECR) \
+ FN(TCP_LISTEN_OVERFLOW) \
FN(TCP_OLD_SEQUENCE) \
FN(TCP_INVALID_SEQUENCE) \
FN(TCP_INVALID_ACK_SEQUENCE) \
@@ -55,9 +62,16 @@
FN(NEIGH_FAILED) \
FN(NEIGH_QUEUEFULL) \
FN(NEIGH_DEAD) \
+ FN(NEIGH_HH_FILLFAIL) \
FN(TC_EGRESS) \
FN(SECURITY_HOOK) \
FN(QDISC_DROP) \
+ FN(QDISC_OVERLIMIT) \
+ FN(QDISC_CONGESTED) \
+ FN(CAKE_FLOOD) \
+ FN(FQ_BAND_LIMIT) \
+ FN(FQ_HORIZON_LIMIT) \
+ FN(FQ_FLOW_LIMIT) \
FN(CPU_BACKLOG) \
FN(XDP) \
FN(TC_INGRESS) \
@@ -76,6 +90,10 @@
FN(INVALID_PROTO) \
FN(IP_INADDRERRORS) \
FN(IP_INNOROUTES) \
+ FN(IP_LOCAL_SOURCE) \
+ FN(IP_INVALID_SOURCE) \
+ FN(IP_LOCALNET) \
+ FN(IP_INVALID_DEST) \
FN(PKT_TOO_BIG) \
FN(DUP_FRAG) \
FN(FRAG_REASM_TIMEOUT) \
@@ -92,6 +110,17 @@
FN(PACKET_SOCK_ERROR) \
FN(TC_CHAIN_NOTFOUND) \
FN(TC_RECLASSIFY_LOOP) \
+ FN(VXLAN_INVALID_HDR) \
+ FN(VXLAN_VNI_NOT_FOUND) \
+ FN(MAC_INVALID_SOURCE) \
+ FN(VXLAN_ENTRY_EXISTS) \
+ FN(NO_TX_TARGET) \
+ FN(IP_TUNNEL_ECN) \
+ FN(TUNNEL_TXINFO) \
+ FN(LOCAL_MAC) \
+ FN(ARP_PVLAN_DISABLE) \
+ FN(MAC_IEEE_MAC_CONTROL) \
+ FN(BRIDGE_INGRESS_STP_STATE) \
FNe(MAX)
/**
@@ -116,12 +145,27 @@ enum skb_drop_reason {
* 3) no valid child socket during 3WHS process
*/
SKB_DROP_REASON_NO_SOCKET,
+ /** @SKB_DROP_REASON_SOCKET_CLOSE: socket is close()d */
+ SKB_DROP_REASON_SOCKET_CLOSE,
+ /** @SKB_DROP_REASON_SOCKET_FILTER: dropped by socket filter */
+ SKB_DROP_REASON_SOCKET_FILTER,
+ /** @SKB_DROP_REASON_SOCKET_RCVBUFF: socket receive buff is full */
+ SKB_DROP_REASON_SOCKET_RCVBUFF,
+ /**
+ * @SKB_DROP_REASON_UNIX_DISCONNECT: recv queue is purged when SOCK_DGRAM
+ * or SOCK_SEQPACKET socket re-connect()s to another socket or notices
+ * during send() that the peer has been close()d.
+ */
+ SKB_DROP_REASON_UNIX_DISCONNECT,
+ /**
+ * @SKB_DROP_REASON_UNIX_SKIP_OOB: Out-Of-Band data is skipped by
+ * recv() without MSG_OOB so dropped.
+ */
+ SKB_DROP_REASON_UNIX_SKIP_OOB,
/** @SKB_DROP_REASON_PKT_TOO_SMALL: packet size is too small */
SKB_DROP_REASON_PKT_TOO_SMALL,
/** @SKB_DROP_REASON_TCP_CSUM: TCP checksum error */
SKB_DROP_REASON_TCP_CSUM,
- /** @SKB_DROP_REASON_SOCKET_FILTER: dropped by socket filter */
- SKB_DROP_REASON_SOCKET_FILTER,
/** @SKB_DROP_REASON_UDP_CSUM: UDP checksum error */
SKB_DROP_REASON_UDP_CSUM,
/** @SKB_DROP_REASON_NETFILTER_DROP: dropped by netfilter */
@@ -152,11 +196,9 @@ enum skb_drop_reason {
SKB_DROP_REASON_XFRM_POLICY,
/** @SKB_DROP_REASON_IP_NOPROTO: no support for IP protocol */
SKB_DROP_REASON_IP_NOPROTO,
- /** @SKB_DROP_REASON_SOCKET_RCVBUFF: socket receive buff is full */
- SKB_DROP_REASON_SOCKET_RCVBUFF,
/**
- * @SKB_DROP_REASON_PROTO_MEM: proto memory limition, such as udp packet
- * drop out of udp_memory_allocated.
+ * @SKB_DROP_REASON_PROTO_MEM: proto memory limitation, such as
+ * udp packet drop out of udp_memory_allocated.
*/
SKB_DROP_REASON_PROTO_MEM,
/**
@@ -217,7 +259,7 @@ enum skb_drop_reason {
*/
SKB_DROP_REASON_TCP_ZEROWINDOW,
/**
- * @SKB_DROP_REASON_TCP_OLD_DATA: the TCP data reveived is already
+ * @SKB_DROP_REASON_TCP_OLD_DATA: the TCP data received is already
* received before (spurious retrans may happened), see
* LINUX_MIB_DELAYEDACKLOST
*/
@@ -238,6 +280,24 @@ enum skb_drop_reason {
* LINUX_MIB_PAWSESTABREJECTED, LINUX_MIB_PAWSACTIVEREJECTED
*/
SKB_DROP_REASON_TCP_RFC7323_PAWS,
+ /**
+ * @SKB_DROP_REASON_TCP_RFC7323_PAWS_ACK: PAWS check, old ACK packet.
+ * Corresponds to LINUX_MIB_PAWS_OLD_ACK.
+ */
+ SKB_DROP_REASON_TCP_RFC7323_PAWS_ACK,
+ /**
+ * @SKB_DROP_REASON_TCP_RFC7323_TW_PAWS: PAWS check, socket is in
+ * TIME_WAIT state.
+ * Corresponds to LINUX_MIB_PAWS_TW_REJECTED.
+ */
+ SKB_DROP_REASON_TCP_RFC7323_TW_PAWS,
+ /**
+ * @SKB_DROP_REASON_TCP_RFC7323_TSECR: PAWS check, invalid TSEcr.
+ * Corresponds to LINUX_MIB_TSECRREJECTED.
+ */
+ SKB_DROP_REASON_TCP_RFC7323_TSECR,
+ /** @SKB_DROP_REASON_TCP_LISTEN_OVERFLOW: listener queue full. */
+ SKB_DROP_REASON_TCP_LISTEN_OVERFLOW,
/** @SKB_DROP_REASON_TCP_OLD_SEQUENCE: Old SEQ field (duplicate packet) */
SKB_DROP_REASON_TCP_OLD_SEQUENCE,
/** @SKB_DROP_REASON_TCP_INVALID_SEQUENCE: Not acceptable SEQ field */
@@ -289,6 +349,8 @@ enum skb_drop_reason {
SKB_DROP_REASON_NEIGH_QUEUEFULL,
/** @SKB_DROP_REASON_NEIGH_DEAD: neigh entry is dead */
SKB_DROP_REASON_NEIGH_DEAD,
+ /** @SKB_DROP_REASON_NEIGH_HH_FILLFAIL: failed to fill the device hard header */
+ SKB_DROP_REASON_NEIGH_HH_FILLFAIL,
/** @SKB_DROP_REASON_TC_EGRESS: dropped in TC egress HOOK */
SKB_DROP_REASON_TC_EGRESS,
/** @SKB_DROP_REASON_SECURITY_HOOK: dropped due to security HOOK */
@@ -299,6 +361,36 @@ enum skb_drop_reason {
*/
SKB_DROP_REASON_QDISC_DROP,
/**
+ * @SKB_DROP_REASON_QDISC_OVERLIMIT: dropped by qdisc when a qdisc
+ * instance exceeds its total buffer size limit.
+ */
+ SKB_DROP_REASON_QDISC_OVERLIMIT,
+ /**
+ * @SKB_DROP_REASON_QDISC_CONGESTED: dropped by a qdisc AQM algorithm
+ * due to congestion.
+ */
+ SKB_DROP_REASON_QDISC_CONGESTED,
+ /**
+ * @SKB_DROP_REASON_CAKE_FLOOD: dropped by the flood protection part of
+ * CAKE qdisc AQM algorithm (BLUE).
+ */
+ SKB_DROP_REASON_CAKE_FLOOD,
+ /**
+ * @SKB_DROP_REASON_FQ_BAND_LIMIT: dropped by fq qdisc when per band
+ * limit is reached.
+ */
+ SKB_DROP_REASON_FQ_BAND_LIMIT,
+ /**
+ * @SKB_DROP_REASON_FQ_HORIZON_LIMIT: dropped by fq qdisc when packet
+ * timestamp is too far in the future.
+ */
+ SKB_DROP_REASON_FQ_HORIZON_LIMIT,
+ /**
+ * @SKB_DROP_REASON_FQ_FLOW_LIMIT: dropped by fq qdisc when a flow
+ * exceeds its limits.
+ */
+ SKB_DROP_REASON_FQ_FLOW_LIMIT,
+ /**
* @SKB_DROP_REASON_CPU_BACKLOG: failed to enqueue the skb to the per CPU
* backlog queue. This can be caused by backlog queue full (see
* netdev_max_backlog in net.rst) or RPS flow limit
@@ -365,6 +457,21 @@ enum skb_drop_reason {
* IPSTATS_MIB_INADDRERRORS
*/
SKB_DROP_REASON_IP_INNOROUTES,
+ /** @SKB_DROP_REASON_IP_LOCAL_SOURCE: the source ip is local */
+ SKB_DROP_REASON_IP_LOCAL_SOURCE,
+ /**
+ * @SKB_DROP_REASON_IP_INVALID_SOURCE: the source ip is invalid:
+ * 1) source ip is multicast or limited broadcast
+ * 2) source ip is zero and not IGMP
+ */
+ SKB_DROP_REASON_IP_INVALID_SOURCE,
+ /** @SKB_DROP_REASON_IP_LOCALNET: source or dest ip is local net */
+ SKB_DROP_REASON_IP_LOCALNET,
+ /**
+ * @SKB_DROP_REASON_IP_INVALID_DEST: the dest ip is invalid:
+ * 1) dest ip is 0
+ */
+ SKB_DROP_REASON_IP_INVALID_DEST,
/**
* @SKB_DROP_REASON_PKT_TOO_BIG: packet size is too big (maybe exceed the
* MTU)
@@ -419,6 +526,54 @@ enum skb_drop_reason {
*/
SKB_DROP_REASON_TC_RECLASSIFY_LOOP,
/**
+ * @SKB_DROP_REASON_VXLAN_INVALID_HDR: VXLAN header is invalid. E.g.:
+ * 1) reserved fields are not zero
+ * 2) "I" flag is not set
+ */
+ SKB_DROP_REASON_VXLAN_INVALID_HDR,
+ /** @SKB_DROP_REASON_VXLAN_VNI_NOT_FOUND: no VXLAN device found for VNI */
+ SKB_DROP_REASON_VXLAN_VNI_NOT_FOUND,
+ /** @SKB_DROP_REASON_MAC_INVALID_SOURCE: source mac is invalid */
+ SKB_DROP_REASON_MAC_INVALID_SOURCE,
+ /**
+ * @SKB_DROP_REASON_VXLAN_ENTRY_EXISTS: trying to migrate a static
+ * entry or an entry pointing to a nexthop.
+ */
+ SKB_DROP_REASON_VXLAN_ENTRY_EXISTS,
+ /** @SKB_DROP_REASON_NO_TX_TARGET: no target found for xmit */
+ SKB_DROP_REASON_NO_TX_TARGET,
+ /**
+ * @SKB_DROP_REASON_IP_TUNNEL_ECN: skb is dropped according to
+ * RFC 6040 4.2, see __INET_ECN_decapsulate() for detail.
+ */
+ SKB_DROP_REASON_IP_TUNNEL_ECN,
+ /**
+ * @SKB_DROP_REASON_TUNNEL_TXINFO: packet without necessary metadata
+ * reached a device which is in "external" mode.
+ */
+ SKB_DROP_REASON_TUNNEL_TXINFO,
+ /**
+ * @SKB_DROP_REASON_LOCAL_MAC: the source MAC address is equal to
+ * the MAC address of the local netdev.
+ */
+ SKB_DROP_REASON_LOCAL_MAC,
+ /**
+ * @SKB_DROP_REASON_ARP_PVLAN_DISABLE: packet which is not IP is
+ * forwarded to the in_dev, and the proxy_arp_pvlan is not
+ * enabled.
+ */
+ SKB_DROP_REASON_ARP_PVLAN_DISABLE,
+ /**
+ * @SKB_DROP_REASON_MAC_IEEE_MAC_CONTROL: the destination MAC address
+ * is an IEEE MAC Control address.
+ */
+ SKB_DROP_REASON_MAC_IEEE_MAC_CONTROL,
+ /**
+ * @SKB_DROP_REASON_BRIDGE_INGRESS_STP_STATE: the STP state of the
+ * ingress bridge port does not allow frames to be forwarded.
+ */
+ SKB_DROP_REASON_BRIDGE_INGRESS_STP_STATE,
+ /**
* @SKB_DROP_REASON_MAX: the maximum of core drop reasons, which
* shouldn't be used as a real 'reason' - only for tracing code gen
*/
diff --git a/include/net/dropreason.h b/include/net/dropreason.h
index 56cb7be92244..7d3b1a2a6fec 100644
--- a/include/net/dropreason.h
+++ b/include/net/dropreason.h
@@ -18,12 +18,6 @@ enum skb_drop_reason_subsys {
SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE,
/**
- * @SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR: mac80211 drop reasons
- * for frames still going to monitor, see net/mac80211/drop.h
- */
- SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR,
-
- /**
* @SKB_DROP_REASON_SUBSYS_OPENVSWITCH: openvswitch drop reasons,
* see net/openvswitch/drop.h
*/
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 7c0da9effe4e..55e2d97f247e 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -24,9 +24,6 @@
struct dsa_8021q_context;
struct tc_action;
-struct phy_device;
-struct fixed_phy_status;
-struct phylink_link_state;
#define DSA_TAG_PROTO_NONE_VALUE 0
#define DSA_TAG_PROTO_BRCM_VALUE 1
@@ -56,6 +53,7 @@ struct phylink_link_state;
#define DSA_TAG_PROTO_RTL8_4T_VALUE 25
#define DSA_TAG_PROTO_RZN1_A5PSW_VALUE 26
#define DSA_TAG_PROTO_LAN937X_VALUE 27
+#define DSA_TAG_PROTO_VSC73XX_8021Q_VALUE 28
enum dsa_tag_protocol {
DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
@@ -86,6 +84,7 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_RTL8_4T = DSA_TAG_PROTO_RTL8_4T_VALUE,
DSA_TAG_PROTO_RZN1_A5PSW = DSA_TAG_PROTO_RZN1_A5PSW_VALUE,
DSA_TAG_PROTO_LAN937X = DSA_TAG_PROTO_LAN937X_VALUE,
+ DSA_TAG_PROTO_VSC73XX_8021Q = DSA_TAG_PROTO_VSC73XX_8021Q_VALUE,
};
struct dsa_switch;
@@ -327,6 +326,12 @@ struct dsa_port {
};
};
+static inline struct dsa_port *
+dsa_phylink_to_port(struct phylink_config *config)
+{
+ return container_of(config, struct dsa_port, pl_config);
+}
+
/* TODO: ideally DSA ports would have a single dp->link_dp member,
* and no dst->rtable nor this struct dsa_link would be needed,
* but this would require some more complex tree walking,
@@ -398,14 +403,18 @@ struct dsa_switch {
*/
u32 configure_vlan_while_not_filtering:1;
- /* If the switch driver always programs the CPU port as egress tagged
- * despite the VLAN configuration indicating otherwise, then setting
- * @untag_bridge_pvid will force the DSA receive path to pop the
- * bridge's default_pvid VLAN tagged frames to offer a consistent
- * behavior between a vlan_filtering=0 and vlan_filtering=1 bridge
- * device.
+ /* Pop the default_pvid of VLAN-unaware bridge ports from tagged frames.
+ * DEPRECATED: Do NOT set this field in new drivers. Instead look at
+ * the dsa_software_vlan_untag() comments.
*/
u32 untag_bridge_pvid:1;
+ /* Pop the default_pvid of VLAN-aware bridge ports from tagged frames.
+ * Useful if the switch cannot preserve the VLAN tag as seen on the
+ * wire for user port ingress, and chooses to send all frames as
+ * VLAN-tagged to the CPU, including those which were originally
+ * untagged.
+ */
+ u32 untag_vlan_aware_bridge_pvid:1;
/* Let DSA manage the FDB entries towards the
* CPU, based on the software bridge database.
@@ -430,6 +439,11 @@ struct dsa_switch {
*/
u32 fdb_isolation:1;
+ /* Drivers that have global DSCP mapping settings must set this to
+ * true to automatically apply the settings to all ports.
+ */
+ u32 dscp_prio_mapping_is_global:1;
+
/* Listener for switch fabric events */
struct notifier_block nb;
@@ -452,6 +466,11 @@ struct dsa_switch {
const struct dsa_switch_ops *ops;
/*
+ * Allow a DSA switch driver to override the phylink MAC ops
+ */
+ const struct phylink_mac_ops *phylink_mac_ops;
+
+ /*
* User mii_bus and devices for the individual ports.
*/
u32 phys_mii_mask;
@@ -578,6 +597,10 @@ static inline bool dsa_is_user_port(struct dsa_switch *ds, int p)
dsa_switch_for_each_port((_dp), (_ds)) \
if (dsa_port_is_user((_dp)))
+#define dsa_switch_for_each_user_port_continue_reverse(_dp, _ds) \
+ dsa_switch_for_each_port_continue_reverse((_dp), (_ds)) \
+ if (dsa_port_is_user((_dp)))
+
#define dsa_switch_for_each_cpu_port(_dp, _ds) \
dsa_switch_for_each_port((_dp), (_ds)) \
if (dsa_port_is_cpu((_dp)))
@@ -858,39 +881,10 @@ struct dsa_switch_ops {
int regnum, u16 val);
/*
- * Link state adjustment (called from libphy)
- */
- void (*adjust_link)(struct dsa_switch *ds, int port,
- struct phy_device *phydev);
- void (*fixed_link_update)(struct dsa_switch *ds, int port,
- struct fixed_phy_status *st);
-
- /*
* PHYLINK integration
*/
void (*phylink_get_caps)(struct dsa_switch *ds, int port,
struct phylink_config *config);
- struct phylink_pcs *(*phylink_mac_select_pcs)(struct dsa_switch *ds,
- int port,
- phy_interface_t iface);
- int (*phylink_mac_prepare)(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface);
- void (*phylink_mac_config)(struct dsa_switch *ds, int port,
- unsigned int mode,
- const struct phylink_link_state *state);
- int (*phylink_mac_finish)(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface);
- void (*phylink_mac_link_down)(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface);
- void (*phylink_mac_link_up)(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phydev,
- int speed, int duplex,
- bool tx_pause, bool rx_pause);
void (*phylink_fixed_state)(struct dsa_switch *ds, int port,
struct phylink_link_state *state);
/*
@@ -912,6 +906,8 @@ struct dsa_switch_ops {
void (*get_rmon_stats)(struct dsa_switch *ds, int port,
struct ethtool_rmon_stats *rmon_stats,
const struct ethtool_rmon_hist_range **ranges);
+ void (*get_ts_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_ts_stats *ts_stats);
void (*get_stats64)(struct dsa_switch *ds, int port,
struct rtnl_link_stats64 *s);
void (*get_pause_stats)(struct dsa_switch *ds, int port,
@@ -931,7 +927,7 @@ struct dsa_switch_ops {
* ethtool timestamp info
*/
int (*get_ts_info)(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *ts);
+ struct kernel_ethtool_ts_info *ts);
/*
* ethtool MAC merge layer
@@ -955,6 +951,10 @@ struct dsa_switch_ops {
u8 prio);
int (*port_del_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp,
u8 prio);
+ int (*port_set_apptrust)(struct dsa_switch *ds, int port,
+ const u8 *sel, int nsel);
+ int (*port_get_apptrust)(struct dsa_switch *ds, int port, u8 *sel,
+ int *nsel);
/*
* Suspend and resume
@@ -990,10 +990,9 @@ struct dsa_switch_ops {
/*
* Port's MAC EEE settings
*/
+ bool (*support_eee)(struct dsa_switch *ds, int port);
int (*set_mac_eee)(struct dsa_switch *ds, int port,
struct ethtool_keee *e);
- int (*get_mac_eee)(struct dsa_switch *ds, int port,
- struct ethtool_keee *e);
/* EEPROM access */
int (*get_eeprom_len)(struct dsa_switch *ds);
@@ -1132,9 +1131,10 @@ struct dsa_switch_ops {
* PTP functionality
*/
int (*port_hwtstamp_get)(struct dsa_switch *ds, int port,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *config);
int (*port_hwtstamp_set)(struct dsa_switch *ds, int port,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void (*port_txtstamp)(struct dsa_switch *ds, int port,
struct sk_buff *skb);
bool (*port_rxtstamp)(struct dsa_switch *ds, int port,
@@ -1247,7 +1247,8 @@ struct dsa_switch_ops {
int dsa_devlink_param_get(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx);
int dsa_devlink_param_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx);
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack);
int dsa_devlink_params_register(struct dsa_switch *ds,
const struct devlink_param *params,
size_t params_count);
@@ -1384,5 +1385,6 @@ static inline bool dsa_user_dev_check(const struct net_device *dev)
netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev);
void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up);
+bool dsa_supports_eee(struct dsa_switch *ds, int port);
#endif
diff --git a/include/net/dscp.h b/include/net/dscp.h
new file mode 100644
index 000000000000..ba40540868c9
--- /dev/null
+++ b/include/net/dscp.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Pengutronix, Oleksij Rempel <kernel@pengutronix.de> */
+
+#ifndef __DSCP_H__
+#define __DSCP_H__
+
+/*
+ * DSCP Pools and Codepoint Space Division:
+ *
+ * The Differentiated Services (Diffserv) architecture defines a method for
+ * classifying and managing network traffic using the DS field in IPv4 and IPv6
+ * packet headers. This field can carry one of 64 distinct DSCP (Differentiated
+ * Services Code Point) values, which are divided into three pools based on
+ * their Least Significant Bits (LSB) patterns and intended usage. Each pool has
+ * a specific registration procedure for assigning DSCP values:
+ *
+ * Pool 1 (Standards Action Pool):
+ * - Codepoint Space: xxxxx0
+ * This pool includes DSCP values ending in '0' (binary), allocated via
+ * Standards Action. It is intended for globally recognized traffic classes,
+ * ensuring interoperability across the internet. This pool encompasses
+ * well-known DSCP values such as CS0-CS7, AFxx, EF, and VOICE-ADMIT.
+ *
+ * Pool 2 (Experimental/Local Use Pool):
+ * - Codepoint Space: xxxx11
+ * Reserved for DSCP values ending in '11' (binary), this pool is designated
+ * for Experimental or Local Use. It allows for private or temporary traffic
+ * marking schemes not intended for standardized global use, facilitating
+ * testing and network-specific configurations without impacting
+ * interoperability.
+ *
+ * Pool 3 (Preferential Standardization Pool):
+ * - Codepoint Space: xxxx01
+ * Initially reserved for experimental or local use, this pool now serves as
+ * a secondary standardization resource should Pool 1 become exhausted. DSCP
+ * values ending in '01' (binary) are assigned via Standards Action, with a
+ * focus on adopting new, standardized traffic classes as the need arises.
+ *
+ * For pool updates see:
+ * https://www.iana.org/assignments/dscp-registry/dscp-registry.xhtml
+ */
+
+/* Pool 1: Standardized DSCP values as per [RFC8126] */
+#define DSCP_CS0 0 /* 000000, [RFC2474] */
+/* CS0 is some times called default (DF) */
+#define DSCP_DF 0 /* 000000, [RFC2474] */
+#define DSCP_CS1 8 /* 001000, [RFC2474] */
+#define DSCP_CS2 16 /* 010000, [RFC2474] */
+#define DSCP_CS3 24 /* 011000, [RFC2474] */
+#define DSCP_CS4 32 /* 100000, [RFC2474] */
+#define DSCP_CS5 40 /* 101000, [RFC2474] */
+#define DSCP_CS6 48 /* 110000, [RFC2474] */
+#define DSCP_CS7 56 /* 111000, [RFC2474] */
+#define DSCP_AF11 10 /* 001010, [RFC2597] */
+#define DSCP_AF12 12 /* 001100, [RFC2597] */
+#define DSCP_AF13 14 /* 001110, [RFC2597] */
+#define DSCP_AF21 18 /* 010010, [RFC2597] */
+#define DSCP_AF22 20 /* 010100, [RFC2597] */
+#define DSCP_AF23 22 /* 010110, [RFC2597] */
+#define DSCP_AF31 26 /* 011010, [RFC2597] */
+#define DSCP_AF32 28 /* 011100, [RFC2597] */
+#define DSCP_AF33 30 /* 011110, [RFC2597] */
+#define DSCP_AF41 34 /* 100010, [RFC2597] */
+#define DSCP_AF42 36 /* 100100, [RFC2597] */
+#define DSCP_AF43 38 /* 100110, [RFC2597] */
+#define DSCP_EF 46 /* 101110, [RFC3246] */
+#define DSCP_VOICE_ADMIT 44 /* 101100, [RFC5865] */
+
+/* Pool 3: Standardized assignments, previously available for experimental/local
+ * use
+ */
+#define DSCP_LE 1 /* 000001, [RFC8622] */
+
+#define DSCP_MAX 64
+
+#endif /* __DSCP_H__ */
diff --git a/include/net/dst.h b/include/net/dst.h
index 0aa331bd2fdb..78c78cdce0e9 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -307,7 +307,7 @@ static inline bool dst_hold_safe(struct dst_entry *dst)
* @skb: buffer
*
* If dst is not yet refcounted and not destroyed, grab a ref on it.
- * Returns true if dst is refcounted.
+ * Returns: true if dst is refcounted.
*/
static inline bool skb_dst_force(struct sk_buff *skb)
{
@@ -341,7 +341,7 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
skb->dev = dev;
/*
- * Clear hash so that we can recalulate the hash for the
+ * Clear hash so that we can recalculate the hash for the
* encapsulated packet, unless we have already determine the hash
* over the L4 4-tuple.
*/
@@ -440,6 +440,15 @@ static inline void dst_set_expires(struct dst_entry *dst, int timeout)
dst->expires = expires;
}
+static inline unsigned int dst_dev_overhead(struct dst_entry *dst,
+ struct sk_buff *skb)
+{
+ if (likely(dst))
+ return LL_RESERVED_SPACE(dst->dev);
+
+ return skb->mac_len;
+}
+
INDIRECT_CALLABLE_DECLARE(int ip6_output(struct net *, struct sock *,
struct sk_buff *));
INDIRECT_CALLABLE_DECLARE(int ip_output(struct net *, struct sock *,
diff --git a/include/net/dst_cache.h b/include/net/dst_cache.h
index df6622a5fe98..1961699598e2 100644
--- a/include/net/dst_cache.h
+++ b/include/net/dst_cache.h
@@ -76,7 +76,7 @@ struct dst_entry *dst_cache_get_ip6(struct dst_cache *dst_cache,
*/
static inline void dst_cache_reset(struct dst_cache *dst_cache)
{
- dst_cache->reset_ts = jiffies;
+ WRITE_ONCE(dst_cache->reset_ts, jiffies);
}
/**
@@ -102,7 +102,7 @@ int dst_cache_init(struct dst_cache *dst_cache, gfp_t gfp);
* @dst_cache: the cache
*
* No synchronization is enforced: it must be called only when the cache
- * is unsed.
+ * is unused.
*/
void dst_cache_destroy(struct dst_cache *dst_cache);
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
index 1b7fae4c6b24..4160731dcb6e 100644
--- a/include/net/dst_metadata.h
+++ b/include/net/dst_metadata.h
@@ -198,7 +198,7 @@ static inline struct metadata_dst *__ip_tun_set_dst(__be32 saddr,
__be32 daddr,
__u8 tos, __u8 ttl,
__be16 tp_dst,
- __be16 flags,
+ const unsigned long *flags,
__be64 tunnel_id,
int md_size)
{
@@ -215,7 +215,7 @@ static inline struct metadata_dst *__ip_tun_set_dst(__be32 saddr,
}
static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
- __be16 flags,
+ const unsigned long *flags,
__be64 tunnel_id,
int md_size)
{
@@ -230,7 +230,7 @@ static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *sad
__u8 tos, __u8 ttl,
__be16 tp_dst,
__be32 label,
- __be16 flags,
+ const unsigned long *flags,
__be64 tunnel_id,
int md_size)
{
@@ -243,7 +243,7 @@ static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *sad
info = &tun_dst->u.tun_info;
info->mode = IP_TUNNEL_INFO_IPV6;
- info->key.tun_flags = flags;
+ ip_tunnel_flags_copy(info->key.tun_flags, flags);
info->key.tun_id = tunnel_id;
info->key.tp_src = 0;
info->key.tp_dst = tp_dst;
@@ -259,7 +259,7 @@ static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *sad
}
static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
- __be16 flags,
+ const unsigned long *flags,
__be64 tunnel_id,
int md_size)
{
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 6d1c8541183d..3a9001a042a5 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -24,7 +24,7 @@ struct dst_ops {
void (*destroy)(struct dst_entry *);
void (*ifdown)(struct dst_entry *,
struct net_device *dev);
- struct dst_entry * (*negative_advice)(struct dst_entry *);
+ void (*negative_advice)(struct sock *sk, struct dst_entry *);
void (*link_failure)(struct sk_buff *);
void (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu,
diff --git a/include/net/eee.h b/include/net/eee.h
index 84837aba3cd9..cfab1b8bc46a 100644
--- a/include/net/eee.h
+++ b/include/net/eee.h
@@ -13,10 +13,7 @@ struct eee_config {
static inline bool eeecfg_mac_can_tx_lpi(const struct eee_config *eeecfg)
{
/* eee_enabled is the master on/off */
- if (!eeecfg->eee_enabled || !eeecfg->tx_lpi_enabled)
- return false;
-
- return true;
+ return eeecfg->eee_enabled && eeecfg->tx_lpi_enabled;
}
static inline void eeecfg_to_eee(struct ethtool_keee *eee,
diff --git a/include/net/erspan.h b/include/net/erspan.h
index 6cb4cbd6a48f..c6209e7b6c96 100644
--- a/include/net/erspan.h
+++ b/include/net/erspan.h
@@ -89,7 +89,7 @@ enum erspan_encap_type {
ERSPAN_ENCAP_NOVLAN = 0x0, /* originally without VLAN tag */
ERSPAN_ENCAP_ISL = 0x1, /* originally ISL encapsulated */
ERSPAN_ENCAP_8021Q = 0x2, /* originally 802.1Q encapsulated */
- ERSPAN_ENCAP_INFRAME = 0x3, /* VLAN tag perserved in frame */
+ ERSPAN_ENCAP_INFRAME = 0x3, /* VLAN tag preserved in frame */
};
#define ERSPAN_V1_MDSIZE 4
@@ -192,7 +192,7 @@ static inline void erspan_build_header(struct sk_buff *skb,
enc_type = ERSPAN_ENCAP_NOVLAN;
/* If mirrored packet has vlan tag, extract tci and
- * perserve vlan header in the mirrored frame.
+ * preserve vlan header in the mirrored frame.
*/
if (eth->h_proto == htons(ETH_P_8021Q)) {
qp = (struct qtag_prefix *)(skb->data + 2 * ETH_ALEN);
diff --git a/include/net/espintcp.h b/include/net/espintcp.h
index 0335bbd76552..c70efd704b6d 100644
--- a/include/net/espintcp.h
+++ b/include/net/espintcp.h
@@ -32,7 +32,7 @@ struct espintcp_ctx {
static inline struct espintcp_ctx *espintcp_getctx(const struct sock *sk)
{
- struct inet_connection_sock *icsk = inet_csk(sk);
+ const struct inet_connection_sock *icsk = inet_csk(sk);
/* RCU is only needed for diag */
return (__force void *)icsk->icsk_ulp_data;
diff --git a/include/net/fib_notifier.h b/include/net/fib_notifier.h
index 6d59221ff05a..48aad6128fea 100644
--- a/include/net/fib_notifier.h
+++ b/include/net/fib_notifier.h
@@ -28,7 +28,7 @@ enum fib_event_type {
struct fib_notifier_ops {
int family;
struct list_head list;
- unsigned int (*fib_seq_read)(struct net *net);
+ unsigned int (*fib_seq_read)(const struct net *net);
int (*fib_dump)(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack);
struct module *owner;
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index d17855c52ef9..6e68e359ad18 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -43,6 +43,10 @@ struct fib_rule {
struct fib_kuid_range uid_range;
struct fib_rule_port_range sport_range;
struct fib_rule_port_range dport_range;
+ u16 sport_mask;
+ u16 dport_mask;
+ u8 iif_is_l3_master;
+ u8 oif_is_l3_master;
struct rcu_head rcu;
};
@@ -146,6 +150,17 @@ static inline bool fib_rule_port_inrange(const struct fib_rule_port_range *a,
ntohs(port) <= a->end;
}
+static inline bool fib_rule_port_match(const struct fib_rule_port_range *range,
+ u16 port_mask, __be16 port)
+{
+ if ((range->start ^ ntohs(port)) & port_mask)
+ return false;
+ if (!port_mask && fib_rule_port_range_set(range) &&
+ !fib_rule_port_inrange(range, port))
+ return false;
+ return true;
+}
+
static inline bool fib_rule_port_range_valid(const struct fib_rule_port_range *a)
{
return a->start != 0 && a->end != 0 && a->end < 0xffff &&
@@ -159,6 +174,12 @@ static inline bool fib_rule_port_range_compare(struct fib_rule_port_range *a,
a->end == b->end;
}
+static inline bool
+fib_rule_port_is_range(const struct fib_rule_port_range *range)
+{
+ return range->start != range->end;
+}
+
static inline bool fib_rule_requires_fldissect(struct fib_rule *rule)
{
return rule->iifindex != LOOPBACK_IFINDEX && (rule->ip_proto ||
@@ -176,12 +197,12 @@ int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table);
bool fib_rule_matchall(const struct fib_rule *rule);
int fib_rules_dump(struct net *net, struct notifier_block *nb, int family,
struct netlink_ext_ack *extack);
-unsigned int fib_rules_seq_read(struct net *net, int family);
+unsigned int fib_rules_seq_read(const struct net *net, int family);
-int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct netlink_ext_ack *extack);
-int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct netlink_ext_ack *extack);
+int fib_newrule(struct net *net, struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack, bool rtnl_held);
+int fib_delrule(struct net *net, struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack, bool rtnl_held);
INDIRECT_CALLABLE_DECLARE(int fib6_rule_match(struct fib_rule *rule,
struct flowi *fl, int flags));
diff --git a/include/net/flow.h b/include/net/flow.h
index 335bbc52171c..a1839c278d87 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -38,6 +38,8 @@ struct flowi_common {
__u8 flowic_flags;
#define FLOWI_FLAG_ANYSRC 0x01
#define FLOWI_FLAG_KNOWN_NH 0x02
+#define FLOWI_FLAG_L3MDEV_OIF 0x04
+#define FLOWI_FLAG_ANY_SPORT 0x08
__u32 flowic_secid;
kuid_t flowic_uid;
__u32 flowic_multipath_hash;
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 1a7131d6cb0e..ced79dc8e856 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -7,6 +7,7 @@
#include <linux/siphash.h>
#include <linux/string.h>
#include <uapi/linux/if_ether.h>
+#include <uapi/linux/pkt_cls.h>
struct bpf_prog;
struct net;
@@ -16,7 +17,8 @@ struct sk_buff;
* struct flow_dissector_key_control:
* @thoff: Transport header offset
* @addr_type: Type of key. One of FLOW_DISSECTOR_KEY_*
- * @flags: Key flags. Any of FLOW_DIS_(IS_FRAGMENT|FIRST_FRAGENCAPSULATION)
+ * @flags: Key flags.
+ * Any of FLOW_DIS_(IS_FRAGMENT|FIRST_FRAG|ENCAPSULATION|F_*)
*/
struct flow_dissector_key_control {
u16 thoff;
@@ -24,9 +26,20 @@ struct flow_dissector_key_control {
u32 flags;
};
-#define FLOW_DIS_IS_FRAGMENT BIT(0)
-#define FLOW_DIS_FIRST_FRAG BIT(1)
-#define FLOW_DIS_ENCAPSULATION BIT(2)
+/* The control flags are kept in sync with TCA_FLOWER_KEY_FLAGS_*, as those
+ * flags are exposed to userspace in some error paths, ie. unsupported flags.
+ */
+enum flow_dissector_ctrl_flags {
+ FLOW_DIS_IS_FRAGMENT = TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT,
+ FLOW_DIS_FIRST_FRAG = TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
+ FLOW_DIS_F_TUNNEL_CSUM = TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM,
+ FLOW_DIS_F_TUNNEL_DONT_FRAGMENT = TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT,
+ FLOW_DIS_F_TUNNEL_OAM = TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM,
+ FLOW_DIS_F_TUNNEL_CRIT_OPT = TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT,
+
+ /* These flags are internal to the kernel */
+ FLOW_DIS_ENCAPSULATION = (TCA_FLOWER_KEY_FLAGS_MAX << 1),
+};
enum flow_dissect_ret {
FLOW_DISSECT_RET_OUT_GOOD,
@@ -97,7 +110,7 @@ struct flow_dissector_key_enc_opts {
* here but seems difficult to #include
*/
u8 len;
- __be16 dst_opt_type;
+ u32 dst_opt_type;
};
struct flow_dissector_key_keyid {
@@ -433,6 +446,8 @@ static inline bool flow_keys_have_l4(const struct flow_keys *keys)
}
u32 flow_hash_from_keys(struct flow_keys *keys);
+u32 flow_hash_from_keys_seed(struct flow_keys *keys,
+ const siphash_key_t *keyval);
void skb_flow_get_icmp_tci(const struct sk_buff *skb,
struct flow_dissector_key_icmp *key_icmp,
const void *data, int thoff, int hlen);
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index 314087a5e181..596ab9791e4d 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -345,7 +345,7 @@ static inline bool flow_action_has_entries(const struct flow_action *action)
* flow_offload_has_one_action() - check if exactly one action is present
* @action: tc filter flow offload action
*
- * Returns true if exactly one action is present.
+ * Return: true if exactly one action is present.
*/
static inline bool flow_offload_has_one_action(const struct flow_action *action)
{
@@ -449,6 +449,96 @@ static inline bool flow_rule_match_key(const struct flow_rule *rule,
return dissector_uses_key(rule->match.dissector, key);
}
+/**
+ * flow_rule_is_supp_control_flags() - check for supported control flags
+ * @supp_flags: control flags supported by driver
+ * @ctrl_flags: control flags present in rule
+ * @extack: The netlink extended ACK for reporting errors.
+ *
+ * Return: true if only supported control flags are set, false otherwise.
+ */
+static inline bool flow_rule_is_supp_control_flags(const u32 supp_flags,
+ const u32 ctrl_flags,
+ struct netlink_ext_ack *extack)
+{
+ if (likely((ctrl_flags & ~supp_flags) == 0))
+ return true;
+
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported match on control.flags %#x",
+ ctrl_flags);
+
+ return false;
+}
+
+/**
+ * flow_rule_is_supp_enc_control_flags() - check for supported control flags
+ * @supp_enc_flags: encapsulation control flags supported by driver
+ * @enc_ctrl_flags: encapsulation control flags present in rule
+ * @extack: The netlink extended ACK for reporting errors.
+ *
+ * Return: true if only supported control flags are set, false otherwise.
+ */
+static inline bool flow_rule_is_supp_enc_control_flags(const u32 supp_enc_flags,
+ const u32 enc_ctrl_flags,
+ struct netlink_ext_ack *extack)
+{
+ if (likely((enc_ctrl_flags & ~supp_enc_flags) == 0))
+ return true;
+
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported match on enc_control.flags %#x",
+ enc_ctrl_flags);
+
+ return false;
+}
+
+/**
+ * flow_rule_has_control_flags() - check for presence of any control flags
+ * @ctrl_flags: control flags present in rule
+ * @extack: The netlink extended ACK for reporting errors.
+ *
+ * Return: true if control flags are set, false otherwise.
+ */
+static inline bool flow_rule_has_control_flags(const u32 ctrl_flags,
+ struct netlink_ext_ack *extack)
+{
+ return !flow_rule_is_supp_control_flags(0, ctrl_flags, extack);
+}
+
+/**
+ * flow_rule_has_enc_control_flags() - check for presence of any control flags
+ * @enc_ctrl_flags: encapsulation control flags present in rule
+ * @extack: The netlink extended ACK for reporting errors.
+ *
+ * Return: true if control flags are set, false otherwise.
+ */
+static inline bool flow_rule_has_enc_control_flags(const u32 enc_ctrl_flags,
+ struct netlink_ext_ack *extack)
+{
+ return !flow_rule_is_supp_enc_control_flags(0, enc_ctrl_flags, extack);
+}
+
+/**
+ * flow_rule_match_has_control_flags() - match and check for any control flags
+ * @rule: The flow_rule under evaluation.
+ * @extack: The netlink extended ACK for reporting errors.
+ *
+ * Return: true if control flags are set, false otherwise.
+ */
+static inline bool flow_rule_match_has_control_flags(struct flow_rule *rule,
+ struct netlink_ext_ack *extack)
+{
+ struct flow_match_control match;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL))
+ return false;
+
+ flow_rule_match_control(rule, &match);
+
+ return flow_rule_has_control_flags(match.mask->flags, extack);
+}
+
struct flow_stats {
u64 pkts;
u64 bytes;
@@ -595,6 +685,7 @@ struct flow_cls_common_offload {
u32 chain_index;
__be16 protocol;
u32 prio;
+ bool skip_sw;
struct netlink_ext_ack *extack;
};
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 9ece6e5a3ea8..a03d56765832 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -2,12 +2,20 @@
#ifndef __NET_GENERIC_NETLINK_H
#define __NET_GENERIC_NETLINK_H
-#include <linux/genetlink.h>
+#include <linux/net.h>
#include <net/netlink.h>
#include <net/net_namespace.h>
+#include <uapi/linux/genetlink.h>
#define GENLMSG_DEFAULT_SIZE (NLMSG_DEFAULT_SIZE - GENL_HDRLEN)
+/* Non-parallel generic netlink requests are serialized by a global lock. */
+void genl_lock(void);
+void genl_unlock(void);
+
+#define MODULE_ALIAS_GENL_FAMILY(family) \
+ MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family)
+
/* Binding to multicast group requires %CAP_NET_ADMIN */
#define GENL_MCAST_CAP_NET_ADMIN BIT(0)
/* Binding to multicast group requires %CAP_SYS_ADMIN */
@@ -116,7 +124,8 @@ struct genl_family {
* @genlhdr: generic netlink message header
* @attrs: netlink attributes
* @_net: network namespace
- * @user_ptr: user pointers
+ * @ctx: storage space for the use by the family
+ * @user_ptr: user pointers (deprecated, use ctx instead)
* @extack: extended ACK report struct
*/
struct genl_info {
@@ -127,7 +136,10 @@ struct genl_info {
struct genlmsghdr * genlhdr;
struct nlattr ** attrs;
possible_net_t _net;
- void * user_ptr[2];
+ union {
+ u8 ctx[NETLINK_CTX_SIZE];
+ void * user_ptr[2];
+ };
struct netlink_ext_ack *extack;
};
@@ -342,7 +354,7 @@ __genlmsg_iput(struct sk_buff *skb, const struct genl_info *info, int flags)
* such requests) or a struct initialized by genl_info_init_ntf()
* when constructing notifications.
*
- * Returns pointer to new genetlink header.
+ * Returns: pointer to new genetlink header.
*/
static inline void *
genlmsg_iput(struct sk_buff *skb, const struct genl_info *info)
@@ -354,7 +366,7 @@ genlmsg_iput(struct sk_buff *skb, const struct genl_info *info)
* genlmsg_nlhdr - Obtain netlink header from user specified header
* @user_hdr: user header as returned from genlmsg_put()
*
- * Returns pointer to netlink header.
+ * Returns: pointer to netlink header.
*/
static inline struct nlmsghdr *genlmsg_nlhdr(void *user_hdr)
{
@@ -423,7 +435,7 @@ static inline void genl_dump_check_consistent(struct netlink_callback *cb,
* @flags: netlink message flags
* @cmd: generic netlink command
*
- * Returns pointer to user specific header
+ * Returns: pointer to user specific header
*/
static inline void *genlmsg_put_reply(struct sk_buff *skb,
struct genl_info *info,
@@ -523,13 +535,12 @@ static inline int genlmsg_multicast(const struct genl_family *family,
* @skb: netlink message as socket buffer
* @portid: own netlink portid to avoid sending to yourself
* @group: offset of multicast group in groups array
- * @flags: allocation flags
*
* This function must hold the RTNL or rcu_read_lock().
*/
int genlmsg_multicast_allns(const struct genl_family *family,
struct sk_buff *skb, u32 portid,
- unsigned int group, gfp_t flags);
+ unsigned int group);
/**
* genlmsg_unicast - unicast a netlink message
diff --git a/include/net/gre.h b/include/net/gre.h
index 4e209708b754..ccd293203284 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -49,67 +49,61 @@ static inline bool netif_is_ip6gretap(const struct net_device *dev)
!strcmp(dev->rtnl_link_ops->kind, "ip6gretap");
}
-static inline int gre_calc_hlen(__be16 o_flags)
+static inline int gre_calc_hlen(const unsigned long *o_flags)
{
int addend = 4;
- if (o_flags & TUNNEL_CSUM)
+ if (test_bit(IP_TUNNEL_CSUM_BIT, o_flags))
addend += 4;
- if (o_flags & TUNNEL_KEY)
+ if (test_bit(IP_TUNNEL_KEY_BIT, o_flags))
addend += 4;
- if (o_flags & TUNNEL_SEQ)
+ if (test_bit(IP_TUNNEL_SEQ_BIT, o_flags))
addend += 4;
return addend;
}
-static inline __be16 gre_flags_to_tnl_flags(__be16 flags)
+static inline void gre_flags_to_tnl_flags(unsigned long *dst, __be16 flags)
{
- __be16 tflags = 0;
-
- if (flags & GRE_CSUM)
- tflags |= TUNNEL_CSUM;
- if (flags & GRE_ROUTING)
- tflags |= TUNNEL_ROUTING;
- if (flags & GRE_KEY)
- tflags |= TUNNEL_KEY;
- if (flags & GRE_SEQ)
- tflags |= TUNNEL_SEQ;
- if (flags & GRE_STRICT)
- tflags |= TUNNEL_STRICT;
- if (flags & GRE_REC)
- tflags |= TUNNEL_REC;
- if (flags & GRE_VERSION)
- tflags |= TUNNEL_VERSION;
-
- return tflags;
+ IP_TUNNEL_DECLARE_FLAGS(res) = { };
+
+ __assign_bit(IP_TUNNEL_CSUM_BIT, res, flags & GRE_CSUM);
+ __assign_bit(IP_TUNNEL_ROUTING_BIT, res, flags & GRE_ROUTING);
+ __assign_bit(IP_TUNNEL_KEY_BIT, res, flags & GRE_KEY);
+ __assign_bit(IP_TUNNEL_SEQ_BIT, res, flags & GRE_SEQ);
+ __assign_bit(IP_TUNNEL_STRICT_BIT, res, flags & GRE_STRICT);
+ __assign_bit(IP_TUNNEL_REC_BIT, res, flags & GRE_REC);
+ __assign_bit(IP_TUNNEL_VERSION_BIT, res, flags & GRE_VERSION);
+
+ ip_tunnel_flags_copy(dst, res);
}
-static inline __be16 gre_tnl_flags_to_gre_flags(__be16 tflags)
+static inline __be16 gre_tnl_flags_to_gre_flags(const unsigned long *tflags)
{
__be16 flags = 0;
- if (tflags & TUNNEL_CSUM)
+ if (test_bit(IP_TUNNEL_CSUM_BIT, tflags))
flags |= GRE_CSUM;
- if (tflags & TUNNEL_ROUTING)
+ if (test_bit(IP_TUNNEL_ROUTING_BIT, tflags))
flags |= GRE_ROUTING;
- if (tflags & TUNNEL_KEY)
+ if (test_bit(IP_TUNNEL_KEY_BIT, tflags))
flags |= GRE_KEY;
- if (tflags & TUNNEL_SEQ)
+ if (test_bit(IP_TUNNEL_SEQ_BIT, tflags))
flags |= GRE_SEQ;
- if (tflags & TUNNEL_STRICT)
+ if (test_bit(IP_TUNNEL_STRICT_BIT, tflags))
flags |= GRE_STRICT;
- if (tflags & TUNNEL_REC)
+ if (test_bit(IP_TUNNEL_REC_BIT, tflags))
flags |= GRE_REC;
- if (tflags & TUNNEL_VERSION)
+ if (test_bit(IP_TUNNEL_VERSION_BIT, tflags))
flags |= GRE_VERSION;
return flags;
}
static inline void gre_build_header(struct sk_buff *skb, int hdr_len,
- __be16 flags, __be16 proto,
+ const unsigned long *flags, __be16 proto,
__be32 key, __be32 seq)
{
+ IP_TUNNEL_DECLARE_FLAGS(cond) = { };
struct gre_base_hdr *greh;
skb_push(skb, hdr_len);
@@ -120,18 +114,22 @@ static inline void gre_build_header(struct sk_buff *skb, int hdr_len,
greh->flags = gre_tnl_flags_to_gre_flags(flags);
greh->protocol = proto;
- if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) {
+ __set_bit(IP_TUNNEL_KEY_BIT, cond);
+ __set_bit(IP_TUNNEL_CSUM_BIT, cond);
+ __set_bit(IP_TUNNEL_SEQ_BIT, cond);
+
+ if (ip_tunnel_flags_intersect(flags, cond)) {
__be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
- if (flags & TUNNEL_SEQ) {
+ if (test_bit(IP_TUNNEL_SEQ_BIT, flags)) {
*ptr = seq;
ptr--;
}
- if (flags & TUNNEL_KEY) {
+ if (test_bit(IP_TUNNEL_KEY_BIT, flags)) {
*ptr = key;
ptr--;
}
- if (flags & TUNNEL_CSUM &&
+ if (test_bit(IP_TUNNEL_CSUM_BIT, flags) &&
!(skb_shinfo(skb)->gso_type &
(SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
*ptr = 0;
diff --git a/include/net/gro.h b/include/net/gro.h
index 50f1e403dbbb..22d3a69e4404 100644
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -11,6 +11,9 @@
#include <net/udp.h>
#include <net/hotdata.h>
+/* This should be increased if a protocol with a bigger head is added. */
+#define GRO_MAX_HEAD (MAX_HEADER + 128)
+
struct napi_gro_cb {
union {
struct {
@@ -36,15 +39,14 @@ struct napi_gro_cb {
/* This is non-zero if the packet cannot be merged with the new skb. */
u16 flush;
- /* Save the IP ID here and check when we get to the transport layer */
- u16 flush_id;
-
/* Number of segments aggregated. */
u16 count;
/* Used in ipv6_gro_receive() and foo-over-udp and esp-in-udp */
u16 proto;
+ u16 pad;
+
/* Used in napi_gro_cb::free */
#define NAPI_GRO_FREE 1
#define NAPI_GRO_FREE_STOLEN_HEAD 2
@@ -75,8 +77,8 @@ struct napi_gro_cb {
/* Used in GRE, set in fou/gue_gro_receive */
u8 is_fou:1;
- /* Used to determine if flush_id can be ignored */
- u8 is_atomic:1;
+ /* Used to determine if ipid_offset can be ignored */
+ u8 ip_fixedid:1;
/* Number of gro_receive callbacks this packet already went through */
u8 recursion_counter:4;
@@ -87,6 +89,15 @@ struct napi_gro_cb {
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum;
+
+ /* L3 offsets */
+ union {
+ struct {
+ u16 network_offset;
+ u16 inner_network_offset;
+ };
+ u16 network_offsets[2];
+ };
};
#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
@@ -172,12 +183,17 @@ static inline void *skb_gro_header(struct sk_buff *skb, unsigned int hlen,
return ptr;
}
+static inline int skb_gro_receive_network_offset(const struct sk_buff *skb)
+{
+ return NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark];
+}
+
static inline void *skb_gro_network_header(const struct sk_buff *skb)
{
if (skb_gro_may_pull(skb, skb_gro_offset(skb)))
- return skb_gro_header_fast(skb, skb_network_offset(skb));
+ return skb_gro_header_fast(skb, skb_gro_receive_network_offset(skb));
- return skb_network_header(skb);
+ return skb->data + skb_gro_receive_network_offset(skb);
}
static inline __wsum inet_gro_compute_pseudo(const struct sk_buff *skb,
@@ -428,29 +444,111 @@ static inline __wsum ip6_gro_compute_pseudo(const struct sk_buff *skb,
skb_gro_len(skb), proto, 0));
}
+static inline int inet_gro_flush(const struct iphdr *iph, const struct iphdr *iph2,
+ struct sk_buff *p, bool outer)
+{
+ const u32 id = ntohl(*(__be32 *)&iph->id);
+ const u32 id2 = ntohl(*(__be32 *)&iph2->id);
+ const u16 ipid_offset = (id >> 16) - (id2 >> 16);
+ const u16 count = NAPI_GRO_CB(p)->count;
+ const u32 df = id & IP_DF;
+ int flush;
+
+ /* All fields must match except length and checksum. */
+ flush = (iph->ttl ^ iph2->ttl) | (iph->tos ^ iph2->tos) | (df ^ (id2 & IP_DF));
+
+ if (flush | (outer && df))
+ return flush;
+
+ /* When we receive our second frame we can make a decision on if we
+ * continue this flow as an atomic flow with a fixed ID or if we use
+ * an incrementing ID.
+ */
+ if (count == 1 && df && !ipid_offset)
+ NAPI_GRO_CB(p)->ip_fixedid = true;
+
+ return ipid_offset ^ (count * !NAPI_GRO_CB(p)->ip_fixedid);
+}
+
+static inline int ipv6_gro_flush(const struct ipv6hdr *iph, const struct ipv6hdr *iph2)
+{
+ /* <Version:4><Traffic_Class:8><Flow_Label:20> */
+ __be32 first_word = *(__be32 *)iph ^ *(__be32 *)iph2;
+
+ /* Flush if Traffic Class fields are different. */
+ return !!((first_word & htonl(0x0FF00000)) |
+ (__force __be32)(iph->hop_limit ^ iph2->hop_limit));
+}
+
+static inline int __gro_receive_network_flush(const void *th, const void *th2,
+ struct sk_buff *p, const u16 diff,
+ bool outer)
+{
+ const void *nh = th - diff;
+ const void *nh2 = th2 - diff;
+
+ if (((struct iphdr *)nh)->version == 6)
+ return ipv6_gro_flush(nh, nh2);
+ else
+ return inet_gro_flush(nh, nh2, p, outer);
+}
+
+static inline int gro_receive_network_flush(const void *th, const void *th2,
+ struct sk_buff *p)
+{
+ const bool encap_mark = NAPI_GRO_CB(p)->encap_mark;
+ int off = skb_transport_offset(p);
+ int flush;
+
+ flush = __gro_receive_network_flush(th, th2, p, off - NAPI_GRO_CB(p)->network_offset, encap_mark);
+ if (encap_mark)
+ flush |= __gro_receive_network_flush(th, th2, p, off - NAPI_GRO_CB(p)->inner_network_offset, false);
+
+ return flush;
+}
+
int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
+int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
+void __gro_flush(struct gro_node *gro, bool flush_old);
+
+static inline void gro_flush(struct gro_node *gro, bool flush_old)
+{
+ if (!gro->bitmask)
+ return;
+
+ __gro_flush(gro, flush_old);
+}
+
+static inline void napi_gro_flush(struct napi_struct *napi, bool flush_old)
+{
+ gro_flush(&napi->gro, flush_old);
+}
/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
-static inline void gro_normal_list(struct napi_struct *napi)
+static inline void gro_normal_list(struct gro_node *gro)
{
- if (!napi->rx_count)
+ if (!gro->rx_count)
return;
- netif_receive_skb_list_internal(&napi->rx_list);
- INIT_LIST_HEAD(&napi->rx_list);
- napi->rx_count = 0;
+ netif_receive_skb_list_internal(&gro->rx_list);
+ INIT_LIST_HEAD(&gro->rx_list);
+ gro->rx_count = 0;
}
/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
* pass the whole batch up to the stack.
*/
-static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
+static inline void gro_normal_one(struct gro_node *gro, struct sk_buff *skb,
+ int segs)
{
- list_add_tail(&skb->list, &napi->rx_list);
- napi->rx_count += segs;
- if (napi->rx_count >= READ_ONCE(net_hotdata.gro_normal_batch))
- gro_normal_list(napi);
+ list_add_tail(&skb->list, &gro->rx_list);
+ gro->rx_count += segs;
+ if (gro->rx_count >= READ_ONCE(net_hotdata.gro_normal_batch))
+ gro_normal_list(gro);
}
+void gro_init(struct gro_node *gro);
+void gro_cleanup(struct gro_node *gro);
+
/* This function is the alternative of 'inet_iif' and 'inet_sdif'
* functions in case we can not rely on fields of IPCB.
*
diff --git a/include/net/gtp.h b/include/net/gtp.h
index 2a503f035d18..c0253c8702d0 100644
--- a/include/net/gtp.h
+++ b/include/net/gtp.h
@@ -78,4 +78,9 @@ static inline bool netif_is_gtp(const struct net_device *dev)
#define GTP1_F_EXTHDR 0x04
#define GTP1_F_MASK 0x07
+struct gtp_ext_hdr {
+ __u8 len;
+ __u8 data[];
+};
+
#endif
diff --git a/include/net/hotdata.h b/include/net/hotdata.h
index 003667a1efd6..fda94b2647ff 100644
--- a/include/net/hotdata.h
+++ b/include/net/hotdata.h
@@ -23,7 +23,6 @@ struct net_hotdata {
struct net_offload udpv6_offload;
#endif
struct list_head offload_base;
- struct list_head ptype_all;
struct kmem_cache *skbuff_cache;
struct kmem_cache *skbuff_fclone_cache;
struct kmem_cache *skb_small_head_cache;
@@ -38,6 +37,9 @@ struct net_hotdata {
int max_backlog;
int dev_tx_weight;
int dev_rx_weight;
+ int sysctl_max_skb_frags;
+ int sysctl_skb_defer_max;
+ int sysctl_mem_pcpu_rsv;
};
#define inet_ehash_secret net_hotdata.tcp_protocol.secret
diff --git a/include/net/hwbm.h b/include/net/hwbm.h
index aa495decec35..bdbe91c609ff 100644
--- a/include/net/hwbm.h
+++ b/include/net/hwbm.h
@@ -11,9 +11,9 @@ struct hwbm_pool {
int frag_size;
/* Number of buffers currently used by this pool */
int buf_num;
- /* constructor called during alocation */
+ /* constructor called during allocation */
int (*construct)(struct hwbm_pool *bm_pool, void *buf);
- /* protect acces to the buffer counter*/
+ /* protect access to the buffer counter*/
struct mutex buf_lock;
/* private data */
void *priv;
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index 925bac726a92..813e163ce27c 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -18,31 +18,33 @@
#define __RADIOTAP_H
#include <linux/kernel.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
/**
* struct ieee80211_radiotap_header - base radiotap header
*/
struct ieee80211_radiotap_header {
- /**
- * @it_version: radiotap version, always 0
- */
- uint8_t it_version;
-
- /**
- * @it_pad: padding (or alignment)
- */
- uint8_t it_pad;
-
- /**
- * @it_len: overall radiotap header length
- */
- __le16 it_len;
-
- /**
- * @it_present: (first) present word
- */
- __le32 it_present;
+ __struct_group(ieee80211_radiotap_header_fixed, hdr, __packed,
+ /**
+ * @it_version: radiotap version, always 0
+ */
+ uint8_t it_version;
+
+ /**
+ * @it_pad: padding (or alignment)
+ */
+ uint8_t it_pad;
+
+ /**
+ * @it_len: overall radiotap header length
+ */
+ __le16 it_len;
+
+ /**
+ * @it_present: (first) present word
+ */
+ __le32 it_present;
+ );
/**
* @it_optional: all remaining presence bitmaps
@@ -50,6 +52,9 @@ struct ieee80211_radiotap_header {
__le32 it_optional[];
} __packed;
+static_assert(offsetof(struct ieee80211_radiotap_header, it_optional) == sizeof(struct ieee80211_radiotap_header_fixed),
+ "struct member likely outside of __struct_group()");
+
/* version is always 0 */
#define PKTHDR_RADIOTAP_VERSION 0
@@ -582,6 +587,7 @@ enum ieee80211_radiotap_eht_usig_tb {
/**
* ieee80211_get_radiotap_len - get radiotap header length
* @data: pointer to the header
+ * Return: the radiotap header length
*/
static inline u16 ieee80211_get_radiotap_len(const char *data)
{
diff --git a/include/net/ieee8021q.h b/include/net/ieee8021q.h
new file mode 100644
index 000000000000..8bfe903dd3d0
--- /dev/null
+++ b/include/net/ieee8021q.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Pengutronix, Oleksij Rempel <kernel@pengutronix.de> */
+
+#ifndef _NET_IEEE8021Q_H
+#define _NET_IEEE8021Q_H
+
+#include <linux/errno.h>
+
+/**
+ * enum ieee8021q_traffic_type - 802.1Q traffic type priority values (802.1Q-2022)
+ *
+ * @IEEE8021Q_TT_BK: Background
+ * @IEEE8021Q_TT_BE: Best Effort (default). According to 802.1Q-2022, BE is 0
+ * but has higher priority than BK which is 1.
+ * @IEEE8021Q_TT_EE: Excellent Effort
+ * @IEEE8021Q_TT_CA: Critical Applications
+ * @IEEE8021Q_TT_VI: Video, < 100 ms latency and jitter
+ * @IEEE8021Q_TT_VO: Voice, < 10 ms latency and jitter
+ * @IEEE8021Q_TT_IC: Internetwork Control
+ * @IEEE8021Q_TT_NC: Network Control
+ */
+enum ieee8021q_traffic_type {
+ IEEE8021Q_TT_BK = 0,
+ IEEE8021Q_TT_BE = 1,
+ IEEE8021Q_TT_EE = 2,
+ IEEE8021Q_TT_CA = 3,
+ IEEE8021Q_TT_VI = 4,
+ IEEE8021Q_TT_VO = 5,
+ IEEE8021Q_TT_IC = 6,
+ IEEE8021Q_TT_NC = 7,
+
+ /* private: */
+ IEEE8021Q_TT_MAX,
+};
+
+#define SIMPLE_IETF_DSCP_TO_IEEE8021Q_TT(dscp) ((dscp >> 3) & 0x7)
+
+#if IS_ENABLED(CONFIG_NET_IEEE8021Q_HELPERS)
+
+int ietf_dscp_to_ieee8021q_tt(u8 dscp);
+int ieee8021q_tt_to_tc(enum ieee8021q_traffic_type tt, unsigned int num_queues);
+
+#else
+
+static inline int ietf_dscp_to_ieee8021q_tt(u8 dscp)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ieee8021q_tt_to_tc(enum ieee8021q_traffic_type tt,
+ unsigned int num_queues)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
+#endif /* _NET_IEEE8021Q_H */
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 025bd8d3c769..745891d2e113 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -21,8 +21,6 @@ struct sockaddr;
struct dst_entry *inet6_csk_route_req(const struct sock *sk, struct flowi6 *fl6,
const struct request_sock *req, u8 proto);
-void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
-
int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu);
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index 533a7337865a..c32878c69179 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -40,7 +40,7 @@ static inline unsigned int __inet6_ehashfn(const u32 lhash,
*
* The sockhash lock must be held as a reader here.
*/
-struct sock *__inet6_lookup_established(struct net *net,
+struct sock *__inet6_lookup_established(const struct net *net,
struct inet_hashinfo *hashinfo,
const struct in6_addr *saddr,
const __be16 sport,
@@ -56,7 +56,7 @@ inet6_ehashfn_t inet6_ehashfn;
INDIRECT_CALLABLE_DECLARE(inet6_ehashfn_t udp6_ehashfn);
-struct sock *inet6_lookup_reuseport(struct net *net, struct sock *sk,
+struct sock *inet6_lookup_reuseport(const struct net *net, struct sock *sk,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr,
__be16 sport,
@@ -64,7 +64,7 @@ struct sock *inet6_lookup_reuseport(struct net *net, struct sock *sk,
unsigned short hnum,
inet6_ehashfn_t *ehashfn);
-struct sock *inet6_lookup_listener(struct net *net,
+struct sock *inet6_lookup_listener(const struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr,
@@ -73,7 +73,7 @@ struct sock *inet6_lookup_listener(struct net *net,
const unsigned short hnum,
const int dif, const int sdif);
-struct sock *inet6_lookup_run_sk_lookup(struct net *net,
+struct sock *inet6_lookup_run_sk_lookup(const struct net *net,
int protocol,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr,
@@ -82,7 +82,7 @@ struct sock *inet6_lookup_run_sk_lookup(struct net *net,
const u16 hnum, const int dif,
inet6_ehashfn_t *ehashfn);
-static inline struct sock *__inet6_lookup(struct net *net,
+static inline struct sock *__inet6_lookup(const struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr,
@@ -150,7 +150,7 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
int iif, int sdif,
bool *refcounted)
{
- struct net *net = dev_net(skb_dst(skb)->dev);
+ struct net *net = dev_net_rcu(skb_dst(skb)->dev);
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
struct sock *sk;
@@ -167,7 +167,7 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
iif, sdif, refcounted);
}
-struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
+struct sock *inet6_lookup(const struct net *net, struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr, const __be16 sport,
const struct in6_addr *daddr, const __be16 dport,
@@ -175,7 +175,7 @@ struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
int inet6_hash(struct sock *sk);
-static inline bool inet6_match(struct net *net, const struct sock *sk,
+static inline bool inet6_match(const struct net *net, const struct sock *sk,
const struct in6_addr *saddr,
const struct in6_addr *daddr,
const __portpair ports,
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index f50a644d87a9..c17a6585d0b0 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -29,8 +29,8 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags, int is_sendmsg);
int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags);
-int inet_accept(struct socket *sock, struct socket *newsock, int flags,
- bool kern);
+int inet_accept(struct socket *sock, struct socket *newsock,
+ struct proto_accept_arg *arg);
void __inet_accept(struct socket *sock, struct socket *newsock,
struct sock *newsk);
int inet_send_prepare(struct sock *sk);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index ccf171f7eb60..1735db332aab 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -44,12 +44,10 @@ struct inet_connection_sock_af_ops {
struct request_sock *req_unhash,
bool *own_req);
u16 net_header_len;
- u16 sockaddr_len;
int (*setsockopt)(struct sock *sk, int level, int optname,
sockptr_t optval, unsigned int optlen);
int (*getsockopt)(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
- void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
void (*mtu_reduced)(struct sock *sk);
};
@@ -58,7 +56,6 @@ struct inet_connection_sock_af_ops {
* @icsk_accept_queue: FIFO of established children
* @icsk_bind_hash: Bind node
* @icsk_bind2_hash: Bind node in the bhash2 table
- * @icsk_timeout: Timeout
* @icsk_retransmit_timer: Resend (no ack)
* @icsk_rto: Retransmit timeout
* @icsk_pmtu_cookie Last pmtu seen by socket
@@ -66,7 +63,6 @@ struct inet_connection_sock_af_ops {
* @icsk_af_ops Operations which are AF_INET{4,6} specific
* @icsk_ulp_ops Pluggable ULP control hook
* @icsk_ulp_data ULP private data
- * @icsk_clean_acked Clean acked data hook
* @icsk_ca_state: Congestion control state
* @icsk_retransmits: Number of unrecovered [RTO] timeouts
* @icsk_pending: Scheduled timer event
@@ -85,18 +81,17 @@ struct inet_connection_sock {
struct request_sock_queue icsk_accept_queue;
struct inet_bind_bucket *icsk_bind_hash;
struct inet_bind2_bucket *icsk_bind2_hash;
- unsigned long icsk_timeout;
struct timer_list icsk_retransmit_timer;
struct timer_list icsk_delack_timer;
__u32 icsk_rto;
__u32 icsk_rto_min;
+ u32 icsk_rto_max;
__u32 icsk_delack_max;
__u32 icsk_pmtu_cookie;
const struct tcp_congestion_ops *icsk_ca_ops;
const struct inet_connection_sock_af_ops *icsk_af_ops;
const struct tcp_ulp_ops *icsk_ulp_ops;
void __rcu *icsk_ulp_data;
- void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq);
unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
__u8 icsk_ca_state:5,
icsk_ca_initialized:1,
@@ -116,8 +111,8 @@ struct inet_connection_sock {
#define ATO_BITS 8
__u32 ato:ATO_BITS, /* Predicted tick of soft clock */
lrcv_flowlabel:20, /* last received ipv6 flowlabel */
- unused:4;
- unsigned long timeout; /* Currently scheduled timeout */
+ dst_quick_ack:1, /* cache dst RTAX_QUICKACK */
+ unused:3;
__u32 lrcvtime; /* timestamp of last received data packet */
__u16 last_seg_size; /* Size of last incoming segment */
__u16 rcv_mss; /* MSS used for delayed ACK decisions */
@@ -147,10 +142,7 @@ struct inet_connection_sock {
#define ICSK_TIME_LOSS_PROBE 5 /* Tail loss probe timer */
#define ICSK_TIME_REO_TIMEOUT 6 /* Reordering timer */
-static inline struct inet_connection_sock *inet_csk(const struct sock *sk)
-{
- return (struct inet_connection_sock *)sk;
-}
+#define inet_csk(ptr) container_of_const(ptr, struct inet_connection_sock, icsk_inet.sk)
static inline void *inet_csk_ca(const struct sock *sk)
{
@@ -192,20 +184,29 @@ static inline void inet_csk_delack_init(struct sock *sk)
memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
}
-void inet_csk_delete_keepalive_timer(struct sock *sk);
-void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
+static inline unsigned long
+icsk_timeout(const struct inet_connection_sock *icsk)
+{
+ return READ_ONCE(icsk->icsk_retransmit_timer.expires);
+}
+
+static inline unsigned long
+icsk_delack_timeout(const struct inet_connection_sock *icsk)
+{
+ return READ_ONCE(icsk->icsk_delack_timer.expires);
+}
static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
{
struct inet_connection_sock *icsk = inet_csk(sk);
if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
- icsk->icsk_pending = 0;
+ smp_store_release(&icsk->icsk_pending, 0);
#ifdef INET_CSK_CLEAR_TIMERS
sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
#endif
} else if (what == ICSK_TIME_DACK) {
- icsk->icsk_ack.pending = 0;
+ smp_store_release(&icsk->icsk_ack.pending, 0);
icsk->icsk_ack.retry = 0;
#ifdef INET_CSK_CLEAR_TIMERS
sk_stop_timer(sk, &icsk->icsk_delack_timer);
@@ -230,15 +231,15 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
when = max_when;
}
+ when += jiffies;
if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 ||
what == ICSK_TIME_LOSS_PROBE || what == ICSK_TIME_REO_TIMEOUT) {
- icsk->icsk_pending = what;
- icsk->icsk_timeout = jiffies + when;
- sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
+ smp_store_release(&icsk->icsk_pending, what);
+ sk_reset_timer(sk, &icsk->icsk_retransmit_timer, when);
} else if (what == ICSK_TIME_DACK) {
- icsk->icsk_ack.pending |= ICSK_ACK_TIMER;
- icsk->icsk_ack.timeout = jiffies + when;
- sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
+ smp_store_release(&icsk->icsk_ack.pending,
+ icsk->icsk_ack.pending | ICSK_ACK_TIMER);
+ sk_reset_timer(sk, &icsk->icsk_delack_timer, when);
} else {
pr_debug("inet_csk BUG: unknown timer value\n");
}
@@ -253,7 +254,7 @@ inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
return (unsigned long)min_t(u64, when, max_when);
}
-struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern);
+struct sock *inet_csk_accept(struct sock *sk, struct proto_accept_arg *arg);
int inet_csk_get_port(struct sock *sk, unsigned short snum);
@@ -266,7 +267,7 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
struct request_sock *req,
struct sock *child);
-void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
unsigned long timeout);
struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
struct request_sock *req,
@@ -284,7 +285,7 @@ static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
{
- return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
+ return inet_csk_reqsk_queue_len(sk) > READ_ONCE(sk->sk_max_ack_backlog);
}
bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
@@ -320,8 +321,6 @@ static inline __poll_t inet_csk_listen_poll(const struct sock *sk)
int inet_csk_listen_start(struct sock *sk);
void inet_csk_listen_stop(struct sock *sk);
-void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
-
/* update the fast reuse flag when adding a socket */
void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
struct sock *sk);
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 153960663ce4..0eccd9c3a883 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -76,7 +76,7 @@ struct frag_v6_compare_key {
* @stamp: timestamp of the last received fragment
* @len: total length of the original datagram
* @meat: length of received fragments so far
- * @mono_delivery_time: stamp has a mono delivery time (EDT)
+ * @tstamp_type: stamp has a mono delivery time (EDT)
* @flags: fragment queue flags
* @max_size: maximum received fragment size
* @fqdir: pointer to struct fqdir
@@ -97,7 +97,7 @@ struct inet_frag_queue {
ktime_t stamp;
int len;
int meat;
- u8 mono_delivery_time;
+ u8 tstamp_type;
__u8 flags;
u16 max_size;
struct fqdir *fqdir;
@@ -137,7 +137,7 @@ static inline void fqdir_pre_exit(struct fqdir *fqdir)
}
void fqdir_exit(struct fqdir *fqdir);
-void inet_frag_kill(struct inet_frag_queue *q);
+void inet_frag_kill(struct inet_frag_queue *q, int *refs);
void inet_frag_destroy(struct inet_frag_queue *q);
struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key);
@@ -145,9 +145,9 @@ struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key);
unsigned int inet_frag_rbtree_purge(struct rb_root *root,
enum skb_drop_reason reason);
-static inline void inet_frag_put(struct inet_frag_queue *q)
+static inline void inet_frag_putn(struct inet_frag_queue *q, int refs)
{
- if (refcount_dec_and_test(&q->refcnt))
+ if (refs && refcount_sub_and_test(refs, &q->refcnt))
inet_frag_destroy(q);
}
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 7f1b38458743..4564b5d348b1 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -89,6 +89,7 @@ struct inet_bind_bucket {
bool fast_ipv6_only;
struct hlist_node node;
struct hlist_head bhash2;
+ struct rcu_head rcu;
};
struct inet_bind2_bucket {
@@ -174,14 +175,9 @@ struct inet_hashinfo {
bool pernet;
} ____cacheline_aligned_in_smp;
-static inline struct inet_hashinfo *tcp_or_dccp_get_hashinfo(const struct sock *sk)
+static inline struct inet_hashinfo *tcp_get_hashinfo(const struct sock *sk)
{
-#if IS_ENABLED(CONFIG_IP_DCCP)
- return sk->sk_prot->h.hashinfo ? :
- sock_net(sk)->ipv4.tcp_death_row.hashinfo;
-#else
return sock_net(sk)->ipv4.tcp_death_row.hashinfo;
-#endif
}
static inline struct inet_listen_hashbucket *
@@ -226,8 +222,7 @@ struct inet_bind_bucket *
inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
struct inet_bind_hashbucket *head,
const unsigned short snum, int l3mdev);
-void inet_bind_bucket_destroy(struct kmem_cache *cachep,
- struct inet_bind_bucket *tb);
+void inet_bind_bucket_destroy(struct inet_bind_bucket *tb);
bool inet_bind_bucket_match(const struct inet_bind_bucket *tb,
const struct net *net, unsigned short port,
@@ -304,7 +299,7 @@ int __inet_hash(struct sock *sk, struct sock *osk);
int inet_hash(struct sock *sk);
void inet_unhash(struct sock *sk);
-struct sock *__inet_lookup_listener(struct net *net,
+struct sock *__inet_lookup_listener(const struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const __be32 saddr, const __be16 sport,
@@ -351,7 +346,7 @@ static inline struct sock *inet_lookup_listener(struct net *net,
((__force __u64)(__be32)(__saddr)))
#endif /* __BIG_ENDIAN */
-static inline bool inet_match(struct net *net, const struct sock *sk,
+static inline bool inet_match(const struct net *net, const struct sock *sk,
const __addrpair cookie, const __portpair ports,
int dif, int sdif)
{
@@ -368,7 +363,7 @@ static inline bool inet_match(struct net *net, const struct sock *sk,
/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
* not check it for lookups anymore, thanks Alexey. -DaveM
*/
-struct sock *__inet_lookup_established(struct net *net,
+struct sock *__inet_lookup_established(const struct net *net,
struct inet_hashinfo *hashinfo,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const u16 hnum,
@@ -382,13 +377,13 @@ inet_ehashfn_t inet_ehashfn;
INDIRECT_CALLABLE_DECLARE(inet_ehashfn_t udp_ehashfn);
-struct sock *inet_lookup_reuseport(struct net *net, struct sock *sk,
+struct sock *inet_lookup_reuseport(const struct net *net, struct sock *sk,
struct sk_buff *skb, int doff,
__be32 saddr, __be16 sport,
__be32 daddr, unsigned short hnum,
inet_ehashfn_t *ehashfn);
-struct sock *inet_lookup_run_sk_lookup(struct net *net,
+struct sock *inet_lookup_run_sk_lookup(const struct net *net,
int protocol,
struct sk_buff *skb, int doff,
__be32 saddr, __be16 sport,
@@ -492,7 +487,7 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
const int sdif,
bool *refcounted)
{
- struct net *net = dev_net(skb_dst(skb)->dev);
+ struct net *net = dev_net_rcu(skb_dst(skb)->dev);
const struct iphdr *iph = ip_hdr(skb);
struct sock *sk;
@@ -527,9 +522,12 @@ static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk, u64 port_offset,
+ u32 hash_port0,
int (*check_established)(struct inet_timewait_death_row *,
struct sock *, __u16,
- struct inet_timewait_sock **));
+ struct inet_timewait_sock **,
+ bool rcu_lookup,
+ u32 hash));
int inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk);
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index f9ddd47dc4f8..1086256549fa 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -19,6 +19,7 @@
#include <linux/netdevice.h>
#include <net/flow.h>
+#include <net/inet_dscp.h>
#include <net/sock.h>
#include <net/request_sock.h>
#include <net/netns/hash.h>
@@ -150,7 +151,8 @@ static inline bool inet_bound_dev_eq(bool l3mdev_accept, int bound_dev_if,
return bound_dev_if == dif || bound_dev_if == sdif;
}
-static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if,
+static inline bool inet_sk_bound_dev_eq(const struct net *net,
+ int bound_dev_if,
int dif, int sdif)
{
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
@@ -171,8 +173,9 @@ struct inet_cork {
u8 tx_flags;
__u8 ttl;
__s16 tos;
- char priority;
+ u32 priority;
__u16 gso_size;
+ u32 ts_opt_id;
u64 transmit_time;
u32 mark;
};
@@ -240,7 +243,8 @@ struct inet_sock {
struct inet_cork_full cork;
};
-#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */
+#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */
+#define IPCORK_TS_OPT_ID 2 /* ts_opt_id field is valid, overriding sk_tskey */
enum {
INET_FLAGS_PKTINFO = 0,
@@ -299,6 +303,11 @@ static inline unsigned long inet_cmsg_flags(const struct inet_sock *inet)
return READ_ONCE(inet->inet_flags) & IP_CMSG_ALL;
}
+static inline dscp_t inet_sk_dscp(const struct inet_sock *inet)
+{
+ return inet_dsfield_to_dscp(READ_ONCE(inet->tos));
+}
+
#define inet_test_bit(nr, sk) \
test_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
#define inet_set_bit(nr, sk) \
@@ -318,8 +327,10 @@ static inline unsigned long inet_cmsg_flags(const struct inet_sock *inet)
static inline struct sock *sk_to_full_sk(struct sock *sk)
{
#ifdef CONFIG_INET
- if (sk && sk->sk_state == TCP_NEW_SYN_RECV)
+ if (sk && READ_ONCE(sk->sk_state) == TCP_NEW_SYN_RECV)
sk = inet_reqsk(sk)->rsk_listener;
+ if (sk && READ_ONCE(sk->sk_state) == TCP_TIME_WAIT)
+ sk = NULL;
#endif
return sk;
}
@@ -328,8 +339,10 @@ static inline struct sock *sk_to_full_sk(struct sock *sk)
static inline const struct sock *sk_const_to_full_sk(const struct sock *sk)
{
#ifdef CONFIG_INET
- if (sk && sk->sk_state == TCP_NEW_SYN_RECV)
+ if (sk && READ_ONCE(sk->sk_state) == TCP_NEW_SYN_RECV)
sk = ((const struct request_sock *)sk)->rsk_listener;
+ if (sk && READ_ONCE(sk->sk_state) == TCP_TIME_WAIT)
+ sk = NULL;
#endif
return sk;
}
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index f28da08a37b4..67a313575780 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -45,6 +45,8 @@ struct inet_timewait_sock {
#define tw_node __tw_common.skc_nulls_node
#define tw_bind_node __tw_common.skc_bind_node
#define tw_refcnt __tw_common.skc_refcnt
+#define tw_tx_queue_mapping __tw_common.skc_tx_queue_mapping
+#define tw_rx_queue_mapping __tw_common.skc_rx_queue_mapping
#define tw_hash __tw_common.skc_hash
#define tw_prot __tw_common.skc_prot
#define tw_net __tw_common.skc_net
@@ -58,7 +60,7 @@ struct inet_timewait_sock {
#define tw_dr __tw_common.skc_tw_dr
__u32 tw_mark;
- volatile unsigned char tw_substate;
+ unsigned char tw_substate;
unsigned char tw_rcv_wscale;
/* Socket demultiplex comparisons on incoming packets. */
@@ -72,6 +74,10 @@ struct inet_timewait_sock {
tw_tos : 8;
u32 tw_txhash;
u32 tw_priority;
+ /**
+ * @tw_reuse_stamp: Time of entry into %TCP_TIME_WAIT state in msec.
+ */
+ u32 tw_entry_stamp;
struct timer_list tw_timer;
struct inet_bind_bucket *tw_tb;
struct inet_bind2_bucket *tw_tb2;
@@ -93,17 +99,14 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
struct inet_timewait_death_row *dr,
const int state);
-void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
- struct inet_hashinfo *hashinfo);
+void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw,
+ struct sock *sk,
+ struct inet_hashinfo *hashinfo,
+ int timeo);
void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
bool rearm);
-static inline void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
-{
- __inet_twsk_schedule(tw, timeo, false);
-}
-
static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
{
__inet_twsk_schedule(tw, timeo, true);
@@ -111,7 +114,7 @@ static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo
void inet_twsk_deschedule_put(struct inet_timewait_sock *tw);
-void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family);
+void inet_twsk_purge(struct inet_hashinfo *hashinfo);
static inline
struct net *twsk_net(const struct inet_timewait_sock *twsk)
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 74ff688568a0..f475757daafb 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -96,30 +96,28 @@ static inline struct in6_addr *inetpeer_get_addr_v6(struct inetpeer_addr *iaddr)
/* can be called with or without local BH being disabled */
struct inet_peer *inet_getpeer(struct inet_peer_base *base,
- const struct inetpeer_addr *daddr,
- int create);
+ const struct inetpeer_addr *daddr);
static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base,
__be32 v4daddr,
- int vif, int create)
+ int vif)
{
struct inetpeer_addr daddr;
daddr.a4.addr = v4daddr;
daddr.a4.vif = vif;
daddr.family = AF_INET;
- return inet_getpeer(base, &daddr, create);
+ return inet_getpeer(base, &daddr);
}
static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
- const struct in6_addr *v6daddr,
- int create)
+ const struct in6_addr *v6daddr)
{
struct inetpeer_addr daddr;
daddr.a6 = *v6daddr;
daddr.family = AF_INET6;
- return inet_getpeer(base, &daddr, create);
+ return inet_getpeer(base, &daddr);
}
static inline int inetpeer_addr_cmp(const struct inetpeer_addr *a,
diff --git a/include/net/ip.h b/include/net/ip.h
index 25cb688bdc62..47ed6d23853d 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -33,6 +33,7 @@
#include <net/flow_dissector.h>
#include <net/netns/hash.h>
#include <net/lwtunnel.h>
+#include <net/inet_dscp.h>
#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
#define IPV4_MIN_MTU 68 /* RFC 791 */
@@ -80,7 +81,6 @@ struct ipcm_cookie {
__u8 protocol;
__u8 ttl;
__s16 tos;
- char priority;
__u16 gso_size;
};
@@ -92,10 +92,12 @@ static inline void ipcm_init(struct ipcm_cookie *ipcm)
static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
const struct inet_sock *inet)
{
- ipcm_init(ipcm);
+ *ipcm = (struct ipcm_cookie) {
+ .tos = READ_ONCE(inet->tos),
+ };
+
+ sockcm_init(&ipcm->sockc, &inet->sk);
- ipcm->sockc.mark = READ_ONCE(inet->sk.sk_mark);
- ipcm->sockc.tsflags = READ_ONCE(inet->sk.sk_tsflags);
ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if);
ipcm->addr = inet->inet_saddr;
ipcm->protocol = inet->inet_num;
@@ -256,11 +258,6 @@ static inline u8 ip_sendmsg_scope(const struct inet_sock *inet,
return RT_SCOPE_UNIVERSE;
}
-static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
-{
- return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(READ_ONCE(inet->tos));
-}
-
/* datagram.c */
int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
@@ -285,7 +282,8 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
}
-void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
+void ip_send_unicast_reply(struct sock *sk, const struct sock *orig_sk,
+ struct sk_buff *skb,
const struct ip_options *sopt,
__be32 daddr, __be32 saddr,
const struct ip_reply_arg *arg,
@@ -359,7 +357,7 @@ static inline void inet_get_local_port_range(const struct net *net, int *low, in
bool inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high);
#ifdef CONFIG_SYSCTL
-static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
+static inline bool inet_is_local_reserved_port(const struct net *net, unsigned short port)
{
if (!net->ipv4.sysctl_local_reserved_ports)
return false;
@@ -421,9 +419,14 @@ int ip_decrease_ttl(struct iphdr *iph)
return --iph->ttl;
}
+static inline dscp_t ip4h_dscp(const struct iphdr *ip4h)
+{
+ return inet_dsfield_to_dscp(ip4h->tos);
+}
+
static inline int ip_mtu_locked(const struct dst_entry *dst)
{
- const struct rtable *rt = (const struct rtable *)dst;
+ const struct rtable *rt = dst_rtable(dst);
return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
}
@@ -461,10 +464,13 @@ static inline bool ip_sk_ignore_df(const struct sock *sk)
static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
bool forwarding)
{
- const struct rtable *rt = container_of(dst, struct rtable, dst);
- struct net *net = dev_net(dst->dev);
- unsigned int mtu;
+ const struct rtable *rt = dst_rtable(dst);
+ unsigned int mtu, res;
+ struct net *net;
+ rcu_read_lock();
+
+ net = dev_net_rcu(dst->dev);
if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) ||
ip_mtu_locked(dst) ||
!forwarding) {
@@ -488,7 +494,11 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
out:
mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
- return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
+ res = mtu - lwtunnel_headroom(dst->lwtstate, mtu);
+
+ rcu_read_unlock();
+
+ return res;
}
static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
@@ -506,8 +516,7 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
}
-struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
- int fc_mx_len,
+struct dst_metrics *ip_fib_metrics_init(struct nlattr *fc_mx, int fc_mx_len,
struct netlink_ext_ack *extack);
static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics)
{
@@ -677,11 +686,24 @@ static __inline__ void inet_reset_saddr(struct sock *sk)
#endif
+#if IS_MODULE(CONFIG_IPV6)
+#define EXPORT_IPV6_MOD(X) EXPORT_SYMBOL(X)
+#define EXPORT_IPV6_MOD_GPL(X) EXPORT_SYMBOL_GPL(X)
+#else
+#define EXPORT_IPV6_MOD(X)
+#define EXPORT_IPV6_MOD_GPL(X)
+#endif
+
static inline unsigned int ipv4_addr_hash(__be32 ip)
{
return (__force unsigned int) ip;
}
+static inline u32 __ipv4_addr_hash(const __be32 ip, const u32 initval)
+{
+ return jhash_1word((__force u32)ip, initval);
+}
+
static inline u32 ipv4_portaddr_hash(const struct net *net,
__be32 saddr,
unsigned int port)
@@ -795,9 +817,8 @@ static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0);
}
-bool icmp_global_allow(void);
-extern int sysctl_icmp_msgs_per_sec;
-extern int sysctl_icmp_msgs_burst;
+bool icmp_global_allow(struct net *net);
+void icmp_global_consume(struct net *net);
#ifdef CONFIG_PROC_FS
int ip_misc_proc_init(void);
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 323c94f1845b..88b0dd4d8e09 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -198,6 +198,7 @@ struct fib6_info {
fib6_destroying:1,
unused:4;
+ struct list_head purge_link;
struct rcu_head rcu;
struct nexthop *nh;
struct fib6_nh fib6_nh[];
@@ -234,9 +235,11 @@ struct fib6_result {
for (rt = (w)->leaf; rt; \
rt = rcu_dereference_protected(rt->fib6_next, 1))
-static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
+#define dst_rt6_info(_ptr) container_of_const(_ptr, struct rt6_info, dst)
+
+static inline struct inet6_dev *ip6_dst_idev(const struct dst_entry *dst)
{
- return ((struct rt6_info *)dst)->rt6i_idev;
+ return dst_rt6_info(dst)->rt6i_idev;
}
static inline bool fib6_requires_src(const struct fib6_info *rt)
@@ -338,7 +341,7 @@ static inline void fib6_info_release(struct fib6_info *f6i)
{
if (f6i && refcount_dec_and_test(&f6i->fib6_ref)) {
DEBUG_NET_WARN_ON_ONCE(!hlist_unhashed(&f6i->gc_link));
- call_rcu(&f6i->rcu, fib6_info_destroy_rcu);
+ call_rcu_hurry(&f6i->rcu, fib6_info_destroy_rcu);
}
}
@@ -392,7 +395,7 @@ struct fib6_table {
struct fib6_node tb6_root;
struct inet_peer_base tb6_peers;
unsigned int flags;
- unsigned int fib_seq;
+ unsigned int fib_seq; /* writes protected by rtnl_mutex */
struct hlist_head tb6_gc_hlist; /* GC candidates */
#define RT6_TABLE_HAS_DFLT_ROUTER BIT(0)
};
@@ -561,7 +564,7 @@ int call_fib6_notifiers(struct net *net, enum fib_event_type event_type,
int __net_init fib6_notifier_init(struct net *net);
void __net_exit fib6_notifier_exit(struct net *net);
-unsigned int fib6_tables_seq_read(struct net *net);
+unsigned int fib6_tables_seq_read(const struct net *net);
int fib6_tables_dump(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack);
@@ -630,7 +633,7 @@ void fib6_rules_cleanup(void);
bool fib6_rule_default(const struct fib_rule *rule);
int fib6_rules_dump(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack);
-unsigned int fib6_rules_seq_read(struct net *net);
+unsigned int fib6_rules_seq_read(const struct net *net);
static inline bool fib6_rules_early_flow_dissect(struct net *net,
struct sk_buff *skb,
@@ -674,7 +677,7 @@ static inline int fib6_rules_dump(struct net *net, struct notifier_block *nb,
{
return 0;
}
-static inline unsigned int fib6_rules_seq_read(struct net *net)
+static inline unsigned int fib6_rules_seq_read(const struct net *net)
{
return 0;
}
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index a30c6aa9e5cf..6dbdf60b342f 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -127,18 +127,26 @@ void rt6_age_exceptions(struct fib6_info *f6i, struct fib6_gc_args *gc_args,
static inline int ip6_route_get_saddr(struct net *net, struct fib6_info *f6i,
const struct in6_addr *daddr,
- unsigned int prefs,
+ unsigned int prefs, int l3mdev_index,
struct in6_addr *saddr)
{
+ struct net_device *l3mdev;
+ struct net_device *dev;
+ bool same_vrf;
int err = 0;
- if (f6i && f6i->fib6_prefsrc.plen) {
+ rcu_read_lock();
+
+ l3mdev = dev_get_by_index_rcu(net, l3mdev_index);
+ if (!f6i || !f6i->fib6_prefsrc.plen || l3mdev)
+ dev = f6i ? fib6_info_nh_dev(f6i) : NULL;
+ same_vrf = !l3mdev || l3mdev_master_dev_rcu(dev) == l3mdev;
+ if (f6i && f6i->fib6_prefsrc.plen && same_vrf)
*saddr = f6i->fib6_prefsrc.addr;
- } else {
- struct net_device *dev = f6i ? fib6_info_nh_dev(f6i) : NULL;
+ else
+ err = ipv6_dev_get_saddr(net, same_vrf ? dev : l3mdev, daddr, prefs, saddr);
- err = ipv6_dev_get_saddr(net, dev, daddr, prefs, saddr);
- }
+ rcu_read_unlock();
return err;
}
@@ -210,12 +218,11 @@ void rt6_uncached_list_del(struct rt6_info *rt);
static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
{
const struct dst_entry *dst = skb_dst(skb);
- const struct rt6_info *rt6 = NULL;
if (dst)
- rt6 = container_of(dst, struct rt6_info, dst);
+ return dst_rt6_info(dst);
- return rt6;
+ return NULL;
}
/*
@@ -227,7 +234,7 @@ static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
{
struct ipv6_pinfo *np = inet6_sk(sk);
- np->dst_cookie = rt6_get_cookie((struct rt6_info *)dst);
+ np->dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
sk_setup_caps(sk, dst);
np->daddr_cache = daddr;
#ifdef CONFIG_IPV6_SUBTREES
@@ -240,7 +247,7 @@ void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
static inline bool ipv6_unicast_destination(const struct sk_buff *skb)
{
- struct rt6_info *rt = (struct rt6_info *) skb_dst(skb);
+ const struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
return rt->rt6i_flags & RTF_LOCAL;
}
@@ -248,7 +255,7 @@ static inline bool ipv6_unicast_destination(const struct sk_buff *skb)
static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
const struct in6_addr *daddr)
{
- struct rt6_info *rt = (struct rt6_info *)dst;
+ const struct rt6_info *rt = dst_rt6_info(dst);
return rt->rt6i_flags & RTF_ANYCAST ||
(rt->rt6i_dst.plen < 127 &&
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 74b369bddf49..399592405c72 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -30,8 +30,8 @@ struct __ip6_tnl_parm {
struct in6_addr laddr; /* local tunnel end-point address */
struct in6_addr raddr; /* remote tunnel end-point address */
- __be16 i_flags;
- __be16 o_flags;
+ IP_TUNNEL_DECLARE_FLAGS(i_flags);
+ IP_TUNNEL_DECLARE_FLAGS(o_flags);
__be32 i_key;
__be32 o_key;
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 9b2f69ba5e49..48bb3cf41469 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -22,6 +22,8 @@
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/refcount.h>
+#include <linux/ip.h>
+#include <linux/in_route.h>
struct fib_config {
u8 fc_dst_len;
@@ -160,6 +162,8 @@ struct fib_info {
struct fib_nh fib_nh[] __counted_by(fib_nhs);
};
+int __net_init fib4_semantics_init(struct net *net);
+void __net_exit fib4_semantics_exit(struct net *net);
#ifdef CONFIG_IP_MULTIPLE_TABLES
struct fib_rule;
@@ -173,6 +177,7 @@ struct fib_result {
unsigned char type;
unsigned char scope;
u32 tclassid;
+ dscp_t dscp;
struct fib_nh_common *nhc;
struct fib_info *fi;
struct fib_table *table;
@@ -344,7 +349,7 @@ static inline int fib4_rules_dump(struct net *net, struct notifier_block *nb,
return 0;
}
-static inline unsigned int fib4_rules_seq_read(struct net *net)
+static inline unsigned int fib4_rules_seq_read(const struct net *net)
{
return 0;
}
@@ -408,7 +413,7 @@ static inline bool fib4_has_custom_rules(const struct net *net)
bool fib4_rule_default(const struct fib_rule *rule);
int fib4_rules_dump(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack);
-unsigned int fib4_rules_seq_read(struct net *net);
+unsigned int fib4_rules_seq_read(const struct net *net);
static inline bool fib4_rules_early_flow_dissect(struct net *net,
struct sk_buff *skb,
@@ -433,6 +438,11 @@ static inline bool fib4_rules_early_flow_dissect(struct net *net,
#endif /* CONFIG_IP_MULTIPLE_TABLES */
+static inline bool fib_dscp_masked_match(dscp_t dscp, const struct flowi4 *fl4)
+{
+ return dscp == inet_dsfield_to_dscp(RT_TOS(fl4->flowi4_tos));
+}
+
/* Exported by fib_frontend.c */
extern const struct nla_policy rtm_ipv4_policy[];
void ip_fib_init(void);
@@ -441,8 +451,21 @@ int fib_gw_from_via(struct fib_config *cfg, struct nlattr *nla,
__be32 fib_compute_spec_dst(struct sk_buff *skb);
bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev);
int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
- u8 tos, int oif, struct net_device *dev,
+ dscp_t dscp, int oif, struct net_device *dev,
struct in_device *idev, u32 *itag);
+
+static inline enum skb_drop_reason
+fib_validate_source_reason(struct sk_buff *skb, __be32 src, __be32 dst,
+ dscp_t dscp, int oif, struct net_device *dev,
+ struct in_device *idev, u32 *itag)
+{
+ int err = fib_validate_source(skb, src, dst, dscp, oif, dev, idev,
+ itag);
+ if (err < 0)
+ return -err;
+ return SKB_NOT_DROPPED_YET;
+}
+
#ifdef CONFIG_IP_ROUTE_CLASSID
static inline int fib_num_tclassid_users(struct net *net)
{
@@ -520,10 +543,39 @@ void fib_nhc_update_mtu(struct fib_nh_common *nhc, u32 new, u32 orig);
#ifdef CONFIG_IP_ROUTE_MULTIPATH
int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
const struct sk_buff *skb, struct flow_keys *flkeys);
+
+static void
+fib_multipath_hash_construct_key(siphash_key_t *key, u32 mp_seed)
+{
+ u64 mp_seed_64 = mp_seed;
+
+ key->key[0] = (mp_seed_64 << 32) | mp_seed_64;
+ key->key[1] = key->key[0];
+}
+
+static inline u32 fib_multipath_hash_from_keys(const struct net *net,
+ struct flow_keys *keys)
+{
+ siphash_aligned_key_t hash_key;
+ u32 mp_seed;
+
+ mp_seed = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed).mp_seed;
+ fib_multipath_hash_construct_key(&hash_key, mp_seed);
+
+ return flow_hash_from_keys_seed(keys, &hash_key);
+}
+#else
+static inline u32 fib_multipath_hash_from_keys(const struct net *net,
+ struct flow_keys *keys)
+{
+ return flow_hash_from_keys(keys);
+}
#endif
+
int fib_check_nh(struct net *net, struct fib_nh *nh, u32 table, u8 scope,
struct netlink_ext_ack *extack);
-void fib_select_multipath(struct fib_result *res, int hash);
+void fib_select_multipath(struct fib_result *res, int hash,
+ const struct flowi4 *fl4);
void fib_select_path(struct net *net, struct fib_result *res,
struct flowi4 *fl4, const struct sk_buff *skb);
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index c286cc2e766e..0c3d571a04a1 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -36,6 +36,24 @@
(sizeof_field(struct ip_tunnel_key, u) - \
sizeof_field(struct ip_tunnel_key, u.ipv4))
+#define __ipt_flag_op(op, ...) \
+ op(__VA_ARGS__, __IP_TUNNEL_FLAG_NUM)
+
+#define IP_TUNNEL_DECLARE_FLAGS(...) \
+ __ipt_flag_op(DECLARE_BITMAP, __VA_ARGS__)
+
+#define ip_tunnel_flags_zero(...) __ipt_flag_op(bitmap_zero, __VA_ARGS__)
+#define ip_tunnel_flags_copy(...) __ipt_flag_op(bitmap_copy, __VA_ARGS__)
+#define ip_tunnel_flags_and(...) __ipt_flag_op(bitmap_and, __VA_ARGS__)
+#define ip_tunnel_flags_or(...) __ipt_flag_op(bitmap_or, __VA_ARGS__)
+
+#define ip_tunnel_flags_empty(...) \
+ __ipt_flag_op(bitmap_empty, __VA_ARGS__)
+#define ip_tunnel_flags_intersect(...) \
+ __ipt_flag_op(bitmap_intersects, __VA_ARGS__)
+#define ip_tunnel_flags_subset(...) \
+ __ipt_flag_op(bitmap_subset, __VA_ARGS__)
+
struct ip_tunnel_key {
__be64 tun_id;
union {
@@ -48,11 +66,11 @@ struct ip_tunnel_key {
struct in6_addr dst;
} ipv6;
} u;
- __be16 tun_flags;
- u8 tos; /* TOS for IPv4, TC for IPv6 */
- u8 ttl; /* TTL for IPv4, HL for IPv6 */
+ IP_TUNNEL_DECLARE_FLAGS(tun_flags);
__be32 label; /* Flow Label for IPv6 */
u32 nhid;
+ u8 tos; /* TOS for IPv4, TC for IPv6 */
+ u8 ttl; /* TTL for IPv4, HL for IPv6 */
__be16 tp_src;
__be16 tp_dst;
__u8 flow_flags;
@@ -77,8 +95,8 @@ struct ip_tunnel_encap {
#define ip_tunnel_info_opts(info) \
_Generic(info, \
- const struct ip_tunnel_info * : ((const void *)((info) + 1)),\
- struct ip_tunnel_info * : ((void *)((info) + 1))\
+ const struct ip_tunnel_info * : ((const void *)(info)->options),\
+ struct ip_tunnel_info * : ((void *)(info)->options)\
)
struct ip_tunnel_info {
@@ -89,6 +107,7 @@ struct ip_tunnel_info {
#endif
u8 options_len;
u8 mode;
+ u8 options[] __aligned_largest __counted_by(options_len);
};
/* 6rd prefix/relay information */
@@ -110,6 +129,17 @@ struct ip_tunnel_prl_entry {
struct metadata_dst;
+/* Kernel-side variant of ip_tunnel_parm */
+struct ip_tunnel_parm_kern {
+ char name[IFNAMSIZ];
+ IP_TUNNEL_DECLARE_FLAGS(i_flags);
+ IP_TUNNEL_DECLARE_FLAGS(o_flags);
+ __be32 i_key;
+ __be32 o_key;
+ int link;
+ struct iphdr iph;
+};
+
struct ip_tunnel {
struct ip_tunnel __rcu *next;
struct hlist_node hash_node;
@@ -136,7 +166,7 @@ struct ip_tunnel {
struct dst_cache dst_cache;
- struct ip_tunnel_parm parms;
+ struct ip_tunnel_parm_kern parms;
int mlink;
int encap_hlen; /* Encap header length (FOU,GUE) */
@@ -157,7 +187,7 @@ struct ip_tunnel {
};
struct tnl_ptk_info {
- __be16 flags;
+ IP_TUNNEL_DECLARE_FLAGS(flags);
__be16 proto;
__be32 key;
__be32 seq;
@@ -179,11 +209,80 @@ struct ip_tunnel_net {
int type;
};
+static inline void ip_tunnel_set_options_present(unsigned long *flags)
+{
+ IP_TUNNEL_DECLARE_FLAGS(present) = { };
+
+ __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present);
+ __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present);
+ __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present);
+ __set_bit(IP_TUNNEL_GTP_OPT_BIT, present);
+ __set_bit(IP_TUNNEL_PFCP_OPT_BIT, present);
+
+ ip_tunnel_flags_or(flags, flags, present);
+}
+
+static inline void ip_tunnel_clear_options_present(unsigned long *flags)
+{
+ IP_TUNNEL_DECLARE_FLAGS(present) = { };
+
+ __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present);
+ __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present);
+ __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present);
+ __set_bit(IP_TUNNEL_GTP_OPT_BIT, present);
+ __set_bit(IP_TUNNEL_PFCP_OPT_BIT, present);
+
+ __ipt_flag_op(bitmap_andnot, flags, flags, present);
+}
+
+static inline bool ip_tunnel_is_options_present(const unsigned long *flags)
+{
+ IP_TUNNEL_DECLARE_FLAGS(present) = { };
+
+ __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present);
+ __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present);
+ __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present);
+ __set_bit(IP_TUNNEL_GTP_OPT_BIT, present);
+ __set_bit(IP_TUNNEL_PFCP_OPT_BIT, present);
+
+ return ip_tunnel_flags_intersect(flags, present);
+}
+
+static inline bool ip_tunnel_flags_is_be16_compat(const unsigned long *flags)
+{
+ IP_TUNNEL_DECLARE_FLAGS(supp) = { };
+
+ bitmap_set(supp, 0, BITS_PER_TYPE(__be16));
+ __set_bit(IP_TUNNEL_VTI_BIT, supp);
+
+ return ip_tunnel_flags_subset(flags, supp);
+}
+
+static inline void ip_tunnel_flags_from_be16(unsigned long *dst, __be16 flags)
+{
+ ip_tunnel_flags_zero(dst);
+
+ bitmap_write(dst, be16_to_cpu(flags), 0, BITS_PER_TYPE(__be16));
+ __assign_bit(IP_TUNNEL_VTI_BIT, dst, flags & VTI_ISVTI);
+}
+
+static inline __be16 ip_tunnel_flags_to_be16(const unsigned long *flags)
+{
+ __be16 ret;
+
+ ret = cpu_to_be16(bitmap_read(flags, 0, BITS_PER_TYPE(__be16)));
+ if (test_bit(IP_TUNNEL_VTI_BIT, flags))
+ ret |= VTI_ISVTI;
+
+ return ret;
+}
+
static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
__be32 saddr, __be32 daddr,
u8 tos, u8 ttl, __be32 label,
__be16 tp_src, __be16 tp_dst,
- __be64 tun_id, __be16 tun_flags)
+ __be64 tun_id,
+ const unsigned long *tun_flags)
{
key->tun_id = tun_id;
key->u.ipv4.src = saddr;
@@ -193,7 +292,7 @@ static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
key->tos = tos;
key->ttl = ttl;
key->label = label;
- key->tun_flags = tun_flags;
+ ip_tunnel_flags_copy(key->tun_flags, tun_flags);
/* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
* the upper tunnel are used.
@@ -214,12 +313,8 @@ ip_tunnel_dst_cache_usable(const struct sk_buff *skb,
{
if (skb->mark)
return false;
- if (!info)
- return true;
- if (info->key.tun_flags & TUNNEL_NOCACHE)
- return false;
- return true;
+ return !info || !test_bit(IP_TUNNEL_NOCACHE_BIT, info->key.tun_flags);
}
static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info
@@ -260,7 +355,7 @@ static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
memset(fl4, 0, sizeof(*fl4));
if (oif) {
- fl4->flowi4_l3mdev = l3mdev_master_upper_ifindex_by_index_rcu(net, oif);
+ fl4->flowi4_l3mdev = l3mdev_master_upper_ifindex_by_index(net, oif);
/* Legacy VRF/l3mdev use case */
fl4->flowi4_oif = fl4->flowi4_l3mdev ? 0 : oif;
}
@@ -282,23 +377,26 @@ struct net *ip_tunnel_get_link_net(const struct net_device *dev);
int ip_tunnel_get_iflink(const struct net_device *dev);
int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
struct rtnl_link_ops *ops, char *devname);
-
-void ip_tunnel_delete_nets(struct list_head *list_net, unsigned int id,
- struct rtnl_link_ops *ops,
- struct list_head *dev_to_kill);
+void ip_tunnel_delete_net(struct net *net, unsigned int id,
+ struct rtnl_link_ops *ops,
+ struct list_head *dev_to_kill);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const u8 proto, int tunnel_hlen);
-int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
+int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p,
+ int cmd);
+bool ip_tunnel_parm_from_user(struct ip_tunnel_parm_kern *kp,
+ const void __user *data);
+bool ip_tunnel_parm_to_user(void __user *data, struct ip_tunnel_parm_kern *kp);
int ip_tunnel_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
void __user *data, int cmd);
int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
- int link, __be16 flags,
+ int link, const unsigned long *flags,
__be32 remote, __be32 local,
__be32 key);
@@ -307,16 +405,17 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
bool log_ecn_error);
int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
- struct ip_tunnel_parm *p, __u32 fwmark);
-int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
- struct ip_tunnel_parm *p, __u32 fwmark);
+ struct ip_tunnel_parm_kern *p, __u32 fwmark);
+int ip_tunnel_newlink(struct net *net, struct net_device *dev,
+ struct nlattr *tb[], struct ip_tunnel_parm_kern *p,
+ __u32 fwmark);
void ip_tunnel_setup(struct net_device *dev, unsigned int net_id);
bool ip_tunnel_netlink_encap_parms(struct nlattr *data[],
struct ip_tunnel_encap *encap);
void ip_tunnel_netlink_parms(struct nlattr *data[],
- struct ip_tunnel_parm *parms);
+ struct ip_tunnel_parm_kern *parms);
extern const struct header_ops ip_tunnel_header_ops;
__be16 ip_tunnel_parse_protocol(const struct sk_buff *skb);
@@ -341,7 +440,8 @@ int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
int ip_tunnel_encap_setup(struct ip_tunnel *t,
struct ip_tunnel_encap *ipencap);
-static inline bool pskb_inet_may_pull(struct sk_buff *skb)
+static inline enum skb_drop_reason
+pskb_inet_may_pull_reason(struct sk_buff *skb)
{
int nhlen;
@@ -358,15 +458,22 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb)
nhlen = 0;
}
- return pskb_network_may_pull(skb, nhlen);
+ return pskb_network_may_pull_reason(skb, nhlen);
+}
+
+static inline bool pskb_inet_may_pull(struct sk_buff *skb)
+{
+ return pskb_inet_may_pull_reason(skb) == SKB_NOT_DROPPED_YET;
}
/* Variant of pskb_inet_may_pull().
*/
-static inline bool skb_vlan_inet_prepare(struct sk_buff *skb)
+static inline enum skb_drop_reason
+skb_vlan_inet_prepare(struct sk_buff *skb, bool inner_proto_inherit)
{
- int nhlen = 0, maclen = ETH_HLEN;
+ int nhlen = 0, maclen = inner_proto_inherit ? 0 : ETH_HLEN;
__be16 type = skb->protocol;
+ enum skb_drop_reason reason;
/* Essentially this is skb_protocol(skb, true)
* And we get MAC len.
@@ -387,11 +494,13 @@ static inline bool skb_vlan_inet_prepare(struct sk_buff *skb)
/* For ETH_P_IPV6/ETH_P_IP we make sure to pull
* a base network header in skb->head.
*/
- if (!pskb_may_pull(skb, maclen + nhlen))
- return false;
+ reason = pskb_may_pull_reason(skb, maclen + nhlen);
+ if (reason)
+ return reason;
skb_set_network_header(skb, maclen);
- return true;
+
+ return SKB_NOT_DROPPED_YET;
}
static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
@@ -474,7 +583,7 @@ static inline u8 ip_tunnel_get_ttl(const struct iphdr *iph,
return 0;
}
-/* Propogate ECN bits out */
+/* Propagate ECN bits out */
static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
const struct sk_buff *skb)
{
@@ -542,17 +651,18 @@ static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
static inline void ip_tunnel_info_opts_get(void *to,
const struct ip_tunnel_info *info)
{
- memcpy(to, info + 1, info->options_len);
+ memcpy(to, ip_tunnel_info_opts(info), info->options_len);
}
static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
const void *from, int len,
- __be16 flags)
+ const unsigned long *flags)
{
info->options_len = len;
if (len > 0) {
memcpy(ip_tunnel_info_opts(info), from, len);
- info->key.tun_flags |= flags;
+ ip_tunnel_flags_or(info->key.tun_flags, info->key.tun_flags,
+ flags);
}
}
@@ -596,7 +706,7 @@ static inline void ip_tunnel_info_opts_get(void *to,
static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
const void *from, int len,
- __be16 flags)
+ const unsigned long *flags)
{
info->options_len = 0;
}
diff --git a/include/net/ipcomp.h b/include/net/ipcomp.h
index 8660a2a6d1fc..51401f01e2a5 100644
--- a/include/net/ipcomp.h
+++ b/include/net/ipcomp.h
@@ -3,20 +3,9 @@
#define _NET_IPCOMP_H
#include <linux/skbuff.h>
-#include <linux/types.h>
-
-#define IPCOMP_SCRATCH_SIZE 65400
-
-struct crypto_comp;
-struct ip_comp_hdr;
-
-struct ipcomp_data {
- u16 threshold;
- struct crypto_comp * __percpu *tfms;
-};
struct ip_comp_hdr;
-struct sk_buff;
+struct netlink_ext_ack;
struct xfrm_state;
int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 88a8e554f7a1..2ccdf85f34f1 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -246,17 +246,20 @@ extern int sysctl_mld_qrv;
#define _DEVADD(net, statname, mod, idev, field, val) \
({ \
struct inet6_dev *_idev = (idev); \
+ unsigned long _field = (field); \
+ unsigned long _val = (val); \
if (likely(_idev != NULL)) \
- mod##SNMP_ADD_STATS((_idev)->stats.statname, (field), (val)); \
- mod##SNMP_ADD_STATS((net)->mib.statname##_statistics, (field), (val));\
+ mod##SNMP_ADD_STATS((_idev)->stats.statname, _field, _val); \
+ mod##SNMP_ADD_STATS((net)->mib.statname##_statistics, _field, _val);\
})
#define _DEVUPD(net, statname, mod, idev, field, val) \
({ \
struct inet6_dev *_idev = (idev); \
+ unsigned long _val = (val); \
if (likely(_idev != NULL)) \
- mod##SNMP_UPD_PO_STATS((_idev)->stats.statname, field, (val)); \
- mod##SNMP_UPD_PO_STATS((net)->mib.statname##_statistics, field, (val));\
+ mod##SNMP_UPD_PO_STATS((_idev)->stats.statname, field, _val); \
+ mod##SNMP_UPD_PO_STATS((net)->mib.statname##_statistics, field, _val);\
})
/* MIBs */
@@ -363,15 +366,6 @@ struct ipcm6_cookie {
struct ipv6_txoptions *opt;
};
-static inline void ipcm6_init(struct ipcm6_cookie *ipc6)
-{
- *ipc6 = (struct ipcm6_cookie) {
- .hlimit = -1,
- .tclass = -1,
- .dontfrag = -1,
- };
-}
-
static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6,
const struct sock *sk)
{
@@ -380,6 +374,8 @@ static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6,
.tclass = inet6_sk(sk)->tclass,
.dontfrag = inet6_test_bit(DONTFRAG, sk),
};
+
+ sockcm_init(&ipc6->sockc, sk);
}
static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
@@ -471,7 +467,7 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
/* This helper is specialized for BIG TCP needs.
* It assumes the hop_jumbo_hdr will immediately follow the IPV6 header.
* It assumes headers are already in skb->head.
- * Returns 0, or IPPROTO_TCP if a BIG TCP packet is there.
+ * Returns: 0, or IPPROTO_TCP if a BIG TCP packet is there.
*/
static inline int ipv6_has_hopopt_jumbo(const struct sk_buff *skb)
{
@@ -851,7 +847,7 @@ static inline int __ipv6_addr_diff32(const void *token1, const void *token2, int
* we should *never* get to this point since that
* would mean the addrs are equal
*
- * However, we do get to it 8) And exacly, when
+ * However, we do get to it 8) And exactly, when
* addresses are equal 8)
*
* ip route add 1111::/128 via ...
@@ -973,7 +969,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
hash = skb_get_hash_flowi6(skb, fl6);
/* Since this is being sent on the wire obfuscate hash a bit
- * to minimize possbility that any useful information to an
+ * to minimize possibility that any useful information to an
* attacker is leaked. Only lower 20 bits are relevant.
*/
hash = rol32(hash, 16);
@@ -1365,4 +1361,16 @@ static inline void ip6_sock_set_recvpktinfo(struct sock *sk)
release_sock(sk);
}
+#define IPV6_ADDR_WORDS 4
+
+static inline void ipv6_addr_cpu_to_be32(__be32 *dst, const u32 *src)
+{
+ cpu_to_be32_array(dst, src, IPV6_ADDR_WORDS);
+}
+
+static inline void ipv6_addr_be32_to_cpu(u32 *dst, const __be32 *src)
+{
+ be32_to_cpu_array(dst, src, IPV6_ADDR_WORDS);
+}
+
#endif /* _NET_IPV6_H */
diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h
index 7321ffe3a108..38ef66826939 100644
--- a/include/net/ipv6_frag.h
+++ b/include/net/ipv6_frag.h
@@ -66,6 +66,7 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
{
struct net_device *dev = NULL;
struct sk_buff *head;
+ int refs = 1;
rcu_read_lock();
/* Paired with the WRITE_ONCE() in fqdir_pre_exit(). */
@@ -77,7 +78,7 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
goto out;
fq->q.flags |= INET_FRAG_DROP;
- inet_frag_kill(&fq->q);
+ inet_frag_kill(&fq->q, &refs);
dev = dev_get_by_index_rcu(net, fq->iif);
if (!dev)
@@ -109,7 +110,7 @@ out:
spin_unlock(&fq->q.lock);
out_rcu_unlock:
rcu_read_unlock();
- inet_frag_put(&fq->q);
+ inet_frag_putn(&fq->q, refs);
}
/* Check if the upper layer header is truncated in the first fragment. */
diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h
index 485c39a89866..8a3465c8c2c5 100644
--- a/include/net/ipv6_stubs.h
+++ b/include/net/ipv6_stubs.h
@@ -9,6 +9,7 @@
#include <net/flow.h>
#include <net/neighbour.h>
#include <net/sock.h>
+#include <net/ipv6.h>
/* structs from net/ip6_fib.h */
struct fib6_info;
@@ -72,6 +73,8 @@ struct ipv6_stub {
int (*output)(struct net *, struct sock *, struct sk_buff *));
struct net_device *(*ipv6_dev_find)(struct net *net, const struct in6_addr *addr,
struct net_device *dev);
+ int (*ip6_xmit)(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
+ __u32 mark, struct ipv6_txoptions *opt, int tclass, u32 priority);
};
extern const struct ipv6_stub *ipv6_stub __read_mostly;
@@ -79,7 +82,7 @@ extern const struct ipv6_stub *ipv6_stub __read_mostly;
struct ipv6_bpf_stub {
int (*inet6_bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len,
u32 flags);
- struct sock *(*udp6_lib_lookup)(struct net *net,
+ struct sock *(*udp6_lib_lookup)(const struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
int dif, int sdif, struct udp_table *tbl,
diff --git a/include/net/iucv/iucv.h b/include/net/iucv/iucv.h
index 5cd7871127c9..9804fa5d9c67 100644
--- a/include/net/iucv/iucv.h
+++ b/include/net/iucv/iucv.h
@@ -15,7 +15,7 @@
* To explore any of the IUCV functions, one must first register their
* program using iucv_register(). Once your program has successfully
* completed a register, it can exploit the other functions.
- * For furthur reference on all IUCV functionality, refer to the
+ * For further reference on all IUCV functionality, refer to the
* CP Programming Services book, also available on the web thru
* www.vm.ibm.com/pubs, manual # SC24-6084
*
@@ -82,7 +82,12 @@ struct iucv_array {
} __attribute__ ((aligned (8)));
extern const struct bus_type iucv_bus;
-extern struct device *iucv_root;
+
+struct device_driver;
+
+struct device *iucv_alloc_device(const struct attribute_group **attrs,
+ struct device_driver *driver, void *priv,
+ const char *fmt, ...) __printf(4, 5);
/*
* struct iucv_path
@@ -197,7 +202,7 @@ struct iucv_handler {
*
* Registers a driver with IUCV.
*
- * Returns 0 on success, -ENOMEM if the memory allocation for the pathid
+ * Returns: 0 on success, -ENOMEM if the memory allocation for the pathid
* table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus.
*/
int iucv_register(struct iucv_handler *handler, int smp);
@@ -219,7 +224,7 @@ void iucv_unregister(struct iucv_handler *handle, int smp);
*
* Allocate a new path structure for use with iucv_connect.
*
- * Returns NULL if the memory allocation failed or a pointer to the
+ * Returns: NULL if the memory allocation failed or a pointer to the
* path structure.
*/
static inline struct iucv_path *iucv_path_alloc(u16 msglim, u8 flags, gfp_t gfp)
@@ -255,7 +260,7 @@ static inline void iucv_path_free(struct iucv_path *path)
* This function is issued after the user received a connection pending
* external interrupt and now wishes to complete the IUCV communication path.
*
- * Returns the result of the CP IUCV call.
+ * Returns: the result of the CP IUCV call.
*/
int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
u8 *userdata, void *private);
@@ -273,7 +278,7 @@ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
* successfully, you are not able to use the path until you receive an IUCV
* Connection Complete external interrupt.
*
- * Returns the result of the CP IUCV call.
+ * Returns: the result of the CP IUCV call.
*/
int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
u8 *userid, u8 *system, u8 *userdata,
@@ -287,7 +292,7 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
* This function temporarily suspends incoming messages on an IUCV path.
* You can later reactivate the path by invoking the iucv_resume function.
*
- * Returns the result from the CP IUCV call.
+ * Returns: the result from the CP IUCV call.
*/
int iucv_path_quiesce(struct iucv_path *path, u8 *userdata);
@@ -299,7 +304,7 @@ int iucv_path_quiesce(struct iucv_path *path, u8 *userdata);
* This function resumes incoming messages on an IUCV path that has
* been stopped with iucv_path_quiesce.
*
- * Returns the result from the CP IUCV call.
+ * Returns: the result from the CP IUCV call.
*/
int iucv_path_resume(struct iucv_path *path, u8 *userdata);
@@ -310,7 +315,7 @@ int iucv_path_resume(struct iucv_path *path, u8 *userdata);
*
* This function terminates an IUCV path.
*
- * Returns the result from the CP IUCV call.
+ * Returns: the result from the CP IUCV call.
*/
int iucv_path_sever(struct iucv_path *path, u8 *userdata);
@@ -322,7 +327,7 @@ int iucv_path_sever(struct iucv_path *path, u8 *userdata);
*
* Cancels a message you have sent.
*
- * Returns the result from the CP IUCV call.
+ * Returns: the result from the CP IUCV call.
*/
int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
u32 srccls);
@@ -342,7 +347,7 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
*
* Locking: local_bh_enable/local_bh_disable
*
- * Returns the result from the CP IUCV call.
+ * Returns: the result from the CP IUCV call.
*/
int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
u8 flags, void *buffer, size_t size, size_t *residual);
@@ -362,7 +367,7 @@ int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
*
* Locking: no locking.
*
- * Returns the result from the CP IUCV call.
+ * Returns: the result from the CP IUCV call.
*/
int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
u8 flags, void *buffer, size_t size,
@@ -377,7 +382,7 @@ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
* are notified of a message and the time that you complete the message,
* the message may be rejected.
*
- * Returns the result from the CP IUCV call.
+ * Returns: the result from the CP IUCV call.
*/
int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg);
@@ -394,7 +399,7 @@ int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg);
* pathid, msgid, and trgcls. Prmmsg signifies the data is moved into
* the parameter list.
*
- * Returns the result from the CP IUCV call.
+ * Returns: the result from the CP IUCV call.
*/
int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
u8 flags, void *reply, size_t size);
@@ -414,7 +419,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
*
* Locking: local_bh_enable/local_bh_disable
*
- * Returns the result from the CP IUCV call.
+ * Returns: the result from the CP IUCV call.
*/
int iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
u8 flags, u32 srccls, void *buffer, size_t size);
@@ -434,7 +439,7 @@ int iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
*
* Locking: no locking.
*
- * Returns the result from the CP IUCV call.
+ * Returns: the result from the CP IUCV call.
*/
int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
u8 flags, u32 srccls, void *buffer, size_t size);
@@ -456,7 +461,7 @@ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
* reply to the message and a buffer is provided into which IUCV moves
* the reply to this message.
*
- * Returns the result from the CP IUCV call.
+ * Returns: the result from the CP IUCV call.
*/
int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
u8 flags, u32 srccls, void *buffer, size_t size,
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
index b2cf243ebe44..b80e474cb0aa 100644
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -23,7 +23,7 @@
* to handle wireless statistics.
*
* The initial APIs served us well and has proven a reasonably good design.
- * However, there is a few shortcommings :
+ * However, there are a few shortcomings :
* o No events, everything is a request to the driver.
* o Large ioctl function in driver with gigantic switch statement
* (i.e. spaghetti code).
@@ -38,13 +38,13 @@
* -------------------------------
* The new driver API is just a bunch of standard functions (handlers),
* each handling a specific Wireless Extension. The driver just export
- * the list of handler it supports, and those will be called apropriately.
+ * the list of handler it supports, and those will be called appropriately.
*
* I tried to keep the main advantage of the previous API (simplicity,
* efficiency and light weight), and also I provide a good dose of backward
* compatibility (most structures are the same, driver can use both API
* simultaneously, ...).
- * Hopefully, I've also addressed the shortcomming of the initial API.
+ * Hopefully, I've also addressed the shortcoming of the initial API.
*
* The advantage of the new API are :
* o Handling of Extensions in driver broken in small contained functions
@@ -84,7 +84,7 @@
/* ---------------------- THE IMPLEMENTATION ---------------------- */
/*
- * Some of the choice I've made are pretty controversials. Defining an
+ * Some of the choice I've made are pretty controversial. Defining an
* API is very much weighting compromises. This goes into some of the
* details and the thinking behind the implementation.
*
@@ -140,7 +140,7 @@
* example to distinguish setting max rate and basic rate), I would
* break the prototype. Using iwreq_data is more flexible.
* 3) Also, the above form is not generic (see above).
- * 4) I don't expect driver developper using the wrong field of the
+ * 4) I don't expect driver developer using the wrong field of the
* union (Doh !), so static typechecking doesn't add much value.
* 5) Lastly, you can skip the union by doing :
* static int mydriver_ioctl_setrate(struct net_device *dev,
@@ -279,8 +279,6 @@
#define IW_DESCR_FLAG_RESTRICT 0x0004 /* GET : request is ROOT only */
/* SET : Omit payload from generated iwevent */
#define IW_DESCR_FLAG_NOMAX 0x0008 /* GET : no limit on request size */
-/* Driver level flags */
-#define IW_DESCR_FLAG_WAIT 0x0100 /* Wait for driver event */
/****************************** TYPES ******************************/
@@ -373,11 +371,10 @@ struct iw_handler_def {
*/
struct iw_ioctl_description {
__u8 header_type; /* NULL, iw_point or other */
- __u8 token_type; /* Future */
+ __u8 flags; /* Special handling of the request */
__u16 token_size; /* Granularity of payload */
__u16 min_tokens; /* Min acceptable token number */
__u16 max_tokens; /* Max acceptable token number */
- __u32 flags; /* Special handling of the request */
};
/* Need to think of short header translation table. Later. */
@@ -404,26 +401,6 @@ struct iw_spy_data {
u_char spy_thr_under[IW_MAX_SPY];
};
-/* --------------------- DEVICE WIRELESS DATA --------------------- */
-/*
- * This is all the wireless data specific to a device instance that
- * is managed by the core of Wireless Extensions or the 802.11 layer.
- * We only keep pointer to those structures, so that a driver is free
- * to share them between instances.
- * This structure should be initialised before registering the device.
- * Access to this data follow the same rules as any other struct net_device
- * data (i.e. valid as long as struct net_device exist, same locking rules).
- */
-/* Forward declaration */
-struct libipw_device;
-/* The struct */
-struct iw_public_data {
- /* Driver enhanced spy support */
- struct iw_spy_data * spy_data;
- /* Legacy structure managed by the ipw2x00-specific IEEE 802.11 layer */
- struct libipw_device * libipw;
-};
-
/**************************** PROTOTYPES ****************************/
/*
* Functions part of the Wireless Extensions (defined in net/wireless/wext-core.c).
@@ -443,23 +420,7 @@ static inline void wireless_nlevent_flush(void) {}
/* We may need a function to send a stream of events to user space.
* More on that later... */
-/* Standard handler for SIOCSIWSPY */
-int iw_handler_set_spy(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-/* Standard handler for SIOCGIWSPY */
-int iw_handler_get_spy(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-/* Standard handler for SIOCSIWTHRSPY */
-int iw_handler_set_thrspy(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-/* Standard handler for SIOCGIWTHRSPY */
-int iw_handler_get_thrspy(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-/* Driver call to update spy records */
-void wireless_spy_update(struct net_device *dev, unsigned char *address,
- struct iw_quality *wstats);
-
-/************************* INLINE FUNTIONS *************************/
+/************************* INLINE FUNCTIONS *************************/
/*
* Function that are so simple that it's more efficient inlining them
*/
diff --git a/include/net/kcm.h b/include/net/kcm.h
index 90279e5e09a5..441e993be634 100644
--- a/include/net/kcm.h
+++ b/include/net/kcm.h
@@ -70,6 +70,7 @@ struct kcm_sock {
struct work_struct tx_work;
struct list_head wait_psock_list;
struct sk_buff *seq_skb;
+ struct mutex tx_mutex;
u32 tx_stopped : 1;
/* Don't use bit fields here, these are set under different locks */
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index 031c661aa14d..1eb8dad18f7e 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -59,6 +59,20 @@ int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type, struct net *net,
int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
struct fib_lookup_arg *arg);
+static inline
+bool l3mdev_fib_rule_iif_match(const struct flowi *fl, int iifindex)
+{
+ return !(fl->flowi_flags & FLOWI_FLAG_L3MDEV_OIF) &&
+ fl->flowi_l3mdev == iifindex;
+}
+
+static inline
+bool l3mdev_fib_rule_oif_match(const struct flowi *fl, int oifindex)
+{
+ return fl->flowi_flags & FLOWI_FLAG_L3MDEV_OIF &&
+ fl->flowi_l3mdev == oifindex;
+}
+
void l3mdev_update_flow(struct net *net, struct flowi *fl);
int l3mdev_master_ifindex_rcu(const struct net_device *dev);
@@ -78,7 +92,7 @@ static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex)
struct net_device *dev;
int rc = 0;
- if (likely(ifindex)) {
+ if (ifindex) {
rcu_read_lock();
dev = dev_get_by_index_rcu(net, ifindex);
@@ -198,10 +212,12 @@ struct sk_buff *l3mdev_l3_out(struct sock *sk, struct sk_buff *skb, u16 proto)
if (netif_is_l3_slave(dev)) {
struct net_device *master;
+ rcu_read_lock();
master = netdev_master_upper_dev_get_rcu(dev);
if (master && master->l3mdev_ops->l3mdev_l3_out)
skb = master->l3mdev_ops->l3mdev_l3_out(master, sk,
skb, proto);
+ rcu_read_unlock();
}
return skb;
@@ -325,6 +341,19 @@ int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
{
return 1;
}
+
+static inline
+bool l3mdev_fib_rule_iif_match(const struct flowi *fl, int iifindex)
+{
+ return false;
+}
+
+static inline
+bool l3mdev_fib_rule_oif_match(const struct flowi *fl, int oifindex)
+{
+ return false;
+}
+
static inline
void l3mdev_update_flow(struct net *net, struct flowi *fl)
{
diff --git a/include/net/lapb.h b/include/net/lapb.h
index 124ee122f2c8..6c07420644e4 100644
--- a/include/net/lapb.h
+++ b/include/net/lapb.h
@@ -4,7 +4,7 @@
#include <linux/lapb.h>
#include <linux/refcount.h>
-#define LAPB_HEADER_LEN 20 /* LAPB over Ethernet + a bit more */
+#define LAPB_HEADER_LEN MAX_HEADER /* LAPB over Ethernet + a bit more */
#define LAPB_ACK_PENDING_CONDITION 0x01
#define LAPB_REJECT_CONDITION 0x02
diff --git a/include/net/lib80211.h b/include/net/lib80211.h
deleted file mode 100644
index 8b47d3a51cf8..000000000000
--- a/include/net/lib80211.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * lib80211.h -- common bits for IEEE802.11 wireless drivers
- *
- * Copyright (c) 2008, John W. Linville <linville@tuxdriver.com>
- *
- * Some bits copied from old ieee80211 component, w/ original copyright
- * notices below:
- *
- * Original code based on Host AP (software wireless LAN access point) driver
- * for Intersil Prism2/2.5/3.
- *
- * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
- * <j@w1.fi>
- * Copyright (c) 2002-2003, Jouni Malinen <j@w1.fi>
- *
- * Adaption to a generic IEEE 802.11 stack by James Ketrenos
- * <jketreno@linux.intel.com>
- *
- * Copyright (c) 2004, Intel Corporation
- *
- */
-
-#ifndef LIB80211_H
-#define LIB80211_H
-
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/atomic.h>
-#include <linux/if.h>
-#include <linux/skbuff.h>
-#include <linux/ieee80211.h>
-#include <linux/timer.h>
-#include <linux/seq_file.h>
-
-#define NUM_WEP_KEYS 4
-
-enum {
- IEEE80211_CRYPTO_TKIP_COUNTERMEASURES = (1 << 0),
-};
-
-struct module;
-
-struct lib80211_crypto_ops {
- const char *name;
- struct list_head list;
-
- /* init new crypto context (e.g., allocate private data space,
- * select IV, etc.); returns NULL on failure or pointer to allocated
- * private data on success */
- void *(*init) (int keyidx);
-
- /* deinitialize crypto context and free allocated private data */
- void (*deinit) (void *priv);
-
- /* encrypt/decrypt return < 0 on error or >= 0 on success. The return
- * value from decrypt_mpdu is passed as the keyidx value for
- * decrypt_msdu. skb must have enough head and tail room for the
- * encryption; if not, error will be returned; these functions are
- * called for all MPDUs (i.e., fragments).
- */
- int (*encrypt_mpdu) (struct sk_buff * skb, int hdr_len, void *priv);
- int (*decrypt_mpdu) (struct sk_buff * skb, int hdr_len, void *priv);
-
- /* These functions are called for full MSDUs, i.e. full frames.
- * These can be NULL if full MSDU operations are not needed. */
- int (*encrypt_msdu) (struct sk_buff * skb, int hdr_len, void *priv);
- int (*decrypt_msdu) (struct sk_buff * skb, int keyidx, int hdr_len,
- void *priv);
-
- int (*set_key) (void *key, int len, u8 * seq, void *priv);
- int (*get_key) (void *key, int len, u8 * seq, void *priv);
-
- /* procfs handler for printing out key information and possible
- * statistics */
- void (*print_stats) (struct seq_file *m, void *priv);
-
- /* Crypto specific flag get/set for configuration settings */
- unsigned long (*get_flags) (void *priv);
- unsigned long (*set_flags) (unsigned long flags, void *priv);
-
- /* maximum number of bytes added by encryption; encrypt buf is
- * allocated with extra_prefix_len bytes, copy of in_buf, and
- * extra_postfix_len; encrypt need not use all this space, but
- * the result must start at the beginning of the buffer and correct
- * length must be returned */
- int extra_mpdu_prefix_len, extra_mpdu_postfix_len;
- int extra_msdu_prefix_len, extra_msdu_postfix_len;
-
- struct module *owner;
-};
-
-struct lib80211_crypt_data {
- struct list_head list; /* delayed deletion list */
- struct lib80211_crypto_ops *ops;
- void *priv;
- atomic_t refcnt;
-};
-
-struct lib80211_crypt_info {
- char *name;
- /* Most clients will already have a lock,
- so just point to that. */
- spinlock_t *lock;
-
- struct lib80211_crypt_data *crypt[NUM_WEP_KEYS];
- int tx_keyidx; /* default TX key index (crypt[tx_keyidx]) */
- struct list_head crypt_deinit_list;
- struct timer_list crypt_deinit_timer;
- int crypt_quiesced;
-};
-
-int lib80211_crypt_info_init(struct lib80211_crypt_info *info, char *name,
- spinlock_t *lock);
-void lib80211_crypt_info_free(struct lib80211_crypt_info *info);
-int lib80211_register_crypto_ops(struct lib80211_crypto_ops *ops);
-int lib80211_unregister_crypto_ops(struct lib80211_crypto_ops *ops);
-struct lib80211_crypto_ops *lib80211_get_crypto_ops(const char *name);
-void lib80211_crypt_delayed_deinit(struct lib80211_crypt_info *info,
- struct lib80211_crypt_data **crypt);
-
-#endif /* LIB80211_H */
diff --git a/include/net/libeth/cache.h b/include/net/libeth/cache.h
new file mode 100644
index 000000000000..bdb0c043ce61
--- /dev/null
+++ b/include/net/libeth/cache.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef __LIBETH_CACHE_H
+#define __LIBETH_CACHE_H
+
+#include <linux/cache.h>
+
+/**
+ * libeth_cacheline_group_assert - make sure cacheline group size is expected
+ * @type: type of the structure containing the group
+ * @grp: group name inside the struct
+ * @sz: expected group size
+ */
+#if defined(CONFIG_64BIT) && SMP_CACHE_BYTES == 64
+#define libeth_cacheline_group_assert(type, grp, sz) \
+ static_assert(offsetof(type, __cacheline_group_end__##grp) - \
+ offsetofend(type, __cacheline_group_begin__##grp) == \
+ (sz))
+#define __libeth_cacheline_struct_assert(type, sz) \
+ static_assert(sizeof(type) == (sz))
+#else /* !CONFIG_64BIT || SMP_CACHE_BYTES != 64 */
+#define libeth_cacheline_group_assert(type, grp, sz) \
+ static_assert(offsetof(type, __cacheline_group_end__##grp) - \
+ offsetofend(type, __cacheline_group_begin__##grp) <= \
+ (sz))
+#define __libeth_cacheline_struct_assert(type, sz) \
+ static_assert(sizeof(type) <= (sz))
+#endif /* !CONFIG_64BIT || SMP_CACHE_BYTES != 64 */
+
+#define __libeth_cls1(sz1) SMP_CACHE_ALIGN(sz1)
+#define __libeth_cls2(sz1, sz2) (SMP_CACHE_ALIGN(sz1) + SMP_CACHE_ALIGN(sz2))
+#define __libeth_cls3(sz1, sz2, sz3) \
+ (SMP_CACHE_ALIGN(sz1) + SMP_CACHE_ALIGN(sz2) + SMP_CACHE_ALIGN(sz3))
+#define __libeth_cls(...) \
+ CONCATENATE(__libeth_cls, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)
+
+/**
+ * libeth_cacheline_struct_assert - make sure CL-based struct size is expected
+ * @type: type of the struct
+ * @...: from 1 to 3 CL group sizes (read-mostly, read-write, cold)
+ *
+ * When a struct contains several CL groups, it's difficult to predict its size
+ * on different architectures. The macro instead takes sizes of all of the
+ * groups the structure contains and generates the final struct size.
+ */
+#define libeth_cacheline_struct_assert(type, ...) \
+ __libeth_cacheline_struct_assert(type, __libeth_cls(__VA_ARGS__)); \
+ static_assert(__alignof(type) >= SMP_CACHE_BYTES)
+
+/**
+ * libeth_cacheline_set_assert - make sure CL-based struct layout is expected
+ * @type: type of the struct
+ * @ro: expected size of the read-mostly group
+ * @rw: expected size of the read-write group
+ * @c: expected size of the cold group
+ *
+ * Check that each group size is expected and then do final struct size check.
+ */
+#define libeth_cacheline_set_assert(type, ro, rw, c) \
+ libeth_cacheline_group_assert(type, read_mostly, ro); \
+ libeth_cacheline_group_assert(type, read_write, rw); \
+ libeth_cacheline_group_assert(type, cold, c); \
+ libeth_cacheline_struct_assert(type, ro, rw, c)
+
+#endif /* __LIBETH_CACHE_H */
diff --git a/include/net/libeth/rx.h b/include/net/libeth/rx.h
new file mode 100644
index 000000000000..ab05024be518
--- /dev/null
+++ b/include/net/libeth/rx.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef __LIBETH_RX_H
+#define __LIBETH_RX_H
+
+#include <linux/if_vlan.h>
+
+#include <net/page_pool/helpers.h>
+#include <net/xdp.h>
+
+/* Rx buffer management */
+
+/* Space reserved in front of each frame */
+#define LIBETH_SKB_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
+/* Maximum headroom for worst-case calculations */
+#define LIBETH_MAX_HEADROOM LIBETH_SKB_HEADROOM
+/* Link layer / L2 overhead: Ethernet, 2 VLAN tags (C + S), FCS */
+#define LIBETH_RX_LL_LEN (ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN)
+/* Maximum supported L2-L4 header length */
+#define LIBETH_MAX_HEAD roundup_pow_of_two(max(MAX_HEADER, 256))
+
+/* Always use order-0 pages */
+#define LIBETH_RX_PAGE_ORDER 0
+/* Pick a sane buffer stride and align to a cacheline boundary */
+#define LIBETH_RX_BUF_STRIDE SKB_DATA_ALIGN(128)
+/* HW-writeable space in one buffer: truesize - headroom/tailroom, aligned */
+#define LIBETH_RX_PAGE_LEN(hr) \
+ ALIGN_DOWN(SKB_MAX_ORDER(hr, LIBETH_RX_PAGE_ORDER), \
+ LIBETH_RX_BUF_STRIDE)
+
+/**
+ * struct libeth_fqe - structure representing an Rx buffer (fill queue element)
+ * @page: page holding the buffer
+ * @offset: offset from the page start (to the headroom)
+ * @truesize: total space occupied by the buffer (w/ headroom and tailroom)
+ *
+ * Depending on the MTU, API switches between one-page-per-frame and shared
+ * page model (to conserve memory on bigger-page platforms). In case of the
+ * former, @offset is always 0 and @truesize is always ```PAGE_SIZE```.
+ */
+struct libeth_fqe {
+ struct page *page;
+ u32 offset;
+ u32 truesize;
+} __aligned_largest;
+
+/**
+ * enum libeth_fqe_type - enum representing types of Rx buffers
+ * @LIBETH_FQE_MTU: buffer size is determined by MTU
+ * @LIBETH_FQE_SHORT: buffer size is smaller than MTU, for short frames
+ * @LIBETH_FQE_HDR: buffer size is ```LIBETH_MAX_HEAD```-sized, for headers
+ */
+enum libeth_fqe_type {
+ LIBETH_FQE_MTU = 0U,
+ LIBETH_FQE_SHORT,
+ LIBETH_FQE_HDR,
+};
+
+/**
+ * struct libeth_fq - structure representing a buffer (fill) queue
+ * @fp: hotpath part of the structure
+ * @pp: &page_pool for buffer management
+ * @fqes: array of Rx buffers
+ * @truesize: size to allocate per buffer, w/overhead
+ * @count: number of descriptors/buffers the queue has
+ * @type: type of the buffers this queue has
+ * @hsplit: flag whether header split is enabled
+ * @buf_len: HW-writeable length per each buffer
+ * @nid: ID of the closest NUMA node with memory
+ */
+struct libeth_fq {
+ struct_group_tagged(libeth_fq_fp, fp,
+ struct page_pool *pp;
+ struct libeth_fqe *fqes;
+
+ u32 truesize;
+ u32 count;
+ );
+
+ /* Cold fields */
+ enum libeth_fqe_type type:2;
+ bool hsplit:1;
+
+ u32 buf_len;
+ int nid;
+};
+
+int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi);
+void libeth_rx_fq_destroy(struct libeth_fq *fq);
+
+/**
+ * libeth_rx_alloc - allocate a new Rx buffer
+ * @fq: fill queue to allocate for
+ * @i: index of the buffer within the queue
+ *
+ * Return: DMA address to be passed to HW for Rx on successful allocation,
+ * ```DMA_MAPPING_ERROR``` otherwise.
+ */
+static inline dma_addr_t libeth_rx_alloc(const struct libeth_fq_fp *fq, u32 i)
+{
+ struct libeth_fqe *buf = &fq->fqes[i];
+
+ buf->truesize = fq->truesize;
+ buf->page = page_pool_dev_alloc(fq->pp, &buf->offset, &buf->truesize);
+ if (unlikely(!buf->page))
+ return DMA_MAPPING_ERROR;
+
+ return page_pool_get_dma_addr(buf->page) + buf->offset +
+ fq->pp->p.offset;
+}
+
+void libeth_rx_recycle_slow(struct page *page);
+
+/**
+ * libeth_rx_sync_for_cpu - synchronize or recycle buffer post DMA
+ * @fqe: buffer to process
+ * @len: frame length from the descriptor
+ *
+ * Process the buffer after it's written by HW. The regular path is to
+ * synchronize DMA for CPU, but in case of no data it will be immediately
+ * recycled back to its PP.
+ *
+ * Return: true when there's data to process, false otherwise.
+ */
+static inline bool libeth_rx_sync_for_cpu(const struct libeth_fqe *fqe,
+ u32 len)
+{
+ struct page *page = fqe->page;
+
+ /* Very rare, but possible case. The most common reason:
+ * the last fragment contained FCS only, which was then
+ * stripped by the HW.
+ */
+ if (unlikely(!len)) {
+ libeth_rx_recycle_slow(page);
+ return false;
+ }
+
+ page_pool_dma_sync_for_cpu(page->pp, page, fqe->offset, len);
+
+ return true;
+}
+
+/* Converting abstract packet type numbers into a software structure with
+ * the packet parameters to do O(1) lookup on Rx.
+ */
+
+enum {
+ LIBETH_RX_PT_OUTER_L2 = 0U,
+ LIBETH_RX_PT_OUTER_IPV4,
+ LIBETH_RX_PT_OUTER_IPV6,
+};
+
+enum {
+ LIBETH_RX_PT_NOT_FRAG = 0U,
+ LIBETH_RX_PT_FRAG,
+};
+
+enum {
+ LIBETH_RX_PT_TUNNEL_IP_NONE = 0U,
+ LIBETH_RX_PT_TUNNEL_IP_IP,
+ LIBETH_RX_PT_TUNNEL_IP_GRENAT,
+ LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC,
+ LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC_VLAN,
+};
+
+enum {
+ LIBETH_RX_PT_TUNNEL_END_NONE = 0U,
+ LIBETH_RX_PT_TUNNEL_END_IPV4,
+ LIBETH_RX_PT_TUNNEL_END_IPV6,
+};
+
+enum {
+ LIBETH_RX_PT_INNER_NONE = 0U,
+ LIBETH_RX_PT_INNER_UDP,
+ LIBETH_RX_PT_INNER_TCP,
+ LIBETH_RX_PT_INNER_SCTP,
+ LIBETH_RX_PT_INNER_ICMP,
+ LIBETH_RX_PT_INNER_TIMESYNC,
+};
+
+#define LIBETH_RX_PT_PAYLOAD_NONE PKT_HASH_TYPE_NONE
+#define LIBETH_RX_PT_PAYLOAD_L2 PKT_HASH_TYPE_L2
+#define LIBETH_RX_PT_PAYLOAD_L3 PKT_HASH_TYPE_L3
+#define LIBETH_RX_PT_PAYLOAD_L4 PKT_HASH_TYPE_L4
+
+struct libeth_rx_pt {
+ u32 outer_ip:2;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:3;
+ enum pkt_hash_types payload_layer:2;
+
+ u32 pad:2;
+ enum xdp_rss_hash_type hash_type:16;
+};
+
+/**
+ * struct libeth_rx_csum - checksum offload bits decoded from the Rx descriptor
+ * @l3l4p: detectable L3 and L4 integrity check is processed by the hardware
+ * @ipe: IP checksum error
+ * @eipe: external (outermost) IP header (only for tunels)
+ * @eudpe: external (outermost) UDP checksum error (only for tunels)
+ * @ipv6exadd: IPv6 header with extension headers
+ * @l4e: L4 integrity error
+ * @pprs: set for packets that skip checksum calculation in the HW pre parser
+ * @nat: the packet is a UDP tunneled packet
+ * @raw_csum_valid: set if raw checksum is valid
+ * @pad: padding to naturally align raw_csum field
+ * @raw_csum: raw checksum
+ */
+struct libeth_rx_csum {
+ u32 l3l4p:1;
+ u32 ipe:1;
+ u32 eipe:1;
+ u32 eudpe:1;
+ u32 ipv6exadd:1;
+ u32 l4e:1;
+ u32 pprs:1;
+ u32 nat:1;
+
+ u32 raw_csum_valid:1;
+ u32 pad:7;
+ u32 raw_csum:16;
+};
+
+/**
+ * struct libeth_rqe_info - receive queue element info
+ * @len: packet length
+ * @ptype: packet type based on types programmed into the device
+ * @eop: whether it's the last fragment of the packet
+ * @rxe: MAC errors: CRC, Alignment, Oversize, Undersizes, Length error
+ * @vlan: C-VLAN or S-VLAN tag depending on the VLAN offload configuration
+ */
+struct libeth_rqe_info {
+ u32 len;
+
+ u32 ptype:14;
+ u32 eop:1;
+ u32 rxe:1;
+
+ u32 vlan:16;
+};
+
+void libeth_rx_pt_gen_hash_type(struct libeth_rx_pt *pt);
+
+/**
+ * libeth_rx_pt_get_ip_ver - get IP version from a packet type structure
+ * @pt: packet type params
+ *
+ * Wrapper to compile out the IPv6 code from the drivers when not supported
+ * by the kernel.
+ *
+ * Return: @pt.outer_ip or stub for IPv6 when not compiled-in.
+ */
+static inline u32 libeth_rx_pt_get_ip_ver(struct libeth_rx_pt pt)
+{
+#if !IS_ENABLED(CONFIG_IPV6)
+ switch (pt.outer_ip) {
+ case LIBETH_RX_PT_OUTER_IPV4:
+ return LIBETH_RX_PT_OUTER_IPV4;
+ default:
+ return LIBETH_RX_PT_OUTER_L2;
+ }
+#else
+ return pt.outer_ip;
+#endif
+}
+
+/* libeth_has_*() can be used to quickly check whether the HW metadata is
+ * available to avoid further expensive processing such as descriptor reads.
+ * They already check for the corresponding netdev feature to be enabled,
+ * thus can be used as drop-in replacements.
+ */
+
+static inline bool libeth_rx_pt_has_checksum(const struct net_device *dev,
+ struct libeth_rx_pt pt)
+{
+ /* Non-zero _INNER* is only possible when _OUTER_IPV* is set,
+ * it is enough to check only for the L4 type.
+ */
+ return likely(pt.inner_prot > LIBETH_RX_PT_INNER_NONE &&
+ (dev->features & NETIF_F_RXCSUM));
+}
+
+static inline bool libeth_rx_pt_has_hash(const struct net_device *dev,
+ struct libeth_rx_pt pt)
+{
+ return likely(pt.payload_layer > LIBETH_RX_PT_PAYLOAD_NONE &&
+ (dev->features & NETIF_F_RXHASH));
+}
+
+/**
+ * libeth_rx_pt_set_hash - fill in skb hash value basing on the PT
+ * @skb: skb to fill the hash in
+ * @hash: 32-bit hash value from the descriptor
+ * @pt: packet type
+ */
+static inline void libeth_rx_pt_set_hash(struct sk_buff *skb, u32 hash,
+ struct libeth_rx_pt pt)
+{
+ skb_set_hash(skb, hash, pt.payload_layer);
+}
+
+#endif /* __LIBETH_RX_H */
diff --git a/include/net/libeth/tx.h b/include/net/libeth/tx.h
new file mode 100644
index 000000000000..35614f9523f6
--- /dev/null
+++ b/include/net/libeth/tx.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef __LIBETH_TX_H
+#define __LIBETH_TX_H
+
+#include <linux/skbuff.h>
+
+#include <net/libeth/types.h>
+
+/* Tx buffer completion */
+
+/**
+ * enum libeth_sqe_type - type of &libeth_sqe to act on Tx completion
+ * @LIBETH_SQE_EMPTY: unused/empty, no action required
+ * @LIBETH_SQE_CTX: context descriptor with empty SQE, no action required
+ * @LIBETH_SQE_SLAB: kmalloc-allocated buffer, unmap and kfree()
+ * @LIBETH_SQE_FRAG: mapped skb frag, only unmap DMA
+ * @LIBETH_SQE_SKB: &sk_buff, unmap and napi_consume_skb(), update stats
+ */
+enum libeth_sqe_type {
+ LIBETH_SQE_EMPTY = 0U,
+ LIBETH_SQE_CTX,
+ LIBETH_SQE_SLAB,
+ LIBETH_SQE_FRAG,
+ LIBETH_SQE_SKB,
+};
+
+/**
+ * struct libeth_sqe - represents a Send Queue Element / Tx buffer
+ * @type: type of the buffer, see the enum above
+ * @rs_idx: index of the last buffer from the batch this one was sent in
+ * @raw: slab buffer to free via kfree()
+ * @skb: &sk_buff to consume
+ * @dma: DMA address to unmap
+ * @len: length of the mapped region to unmap
+ * @nr_frags: number of frags in the frame this buffer belongs to
+ * @packets: number of physical packets sent for this frame
+ * @bytes: number of physical bytes sent for this frame
+ * @priv: driver-private scratchpad
+ */
+struct libeth_sqe {
+ enum libeth_sqe_type type:32;
+ u32 rs_idx;
+
+ union {
+ void *raw;
+ struct sk_buff *skb;
+ };
+
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+
+ u32 nr_frags;
+ u32 packets;
+ u32 bytes;
+
+ unsigned long priv;
+} __aligned_largest;
+
+/**
+ * LIBETH_SQE_CHECK_PRIV - check the driver's private SQE data
+ * @p: type or name of the object the driver wants to fit into &libeth_sqe
+ *
+ * Make sure the driver's private data fits into libeth_sqe::priv. To be used
+ * right after its declaration.
+ */
+#define LIBETH_SQE_CHECK_PRIV(p) \
+ static_assert(sizeof(p) <= sizeof_field(struct libeth_sqe, priv))
+
+/**
+ * struct libeth_cq_pp - completion queue poll params
+ * @dev: &device to perform DMA unmapping
+ * @ss: onstack NAPI stats to fill
+ * @napi: whether it's called from the NAPI context
+ *
+ * libeth uses this structure to access objects needed for performing full
+ * Tx complete operation without passing lots of arguments and change the
+ * prototypes each time a new one is added.
+ */
+struct libeth_cq_pp {
+ struct device *dev;
+ struct libeth_sq_napi_stats *ss;
+
+ bool napi;
+};
+
+/**
+ * libeth_tx_complete - perform Tx completion for one SQE
+ * @sqe: SQE to complete
+ * @cp: poll params
+ *
+ * Do Tx complete for all the types of buffers, incl. freeing, unmapping,
+ * updating the stats etc.
+ */
+static inline void libeth_tx_complete(struct libeth_sqe *sqe,
+ const struct libeth_cq_pp *cp)
+{
+ switch (sqe->type) {
+ case LIBETH_SQE_EMPTY:
+ return;
+ case LIBETH_SQE_SKB:
+ case LIBETH_SQE_FRAG:
+ case LIBETH_SQE_SLAB:
+ dma_unmap_page(cp->dev, dma_unmap_addr(sqe, dma),
+ dma_unmap_len(sqe, len), DMA_TO_DEVICE);
+ break;
+ default:
+ break;
+ }
+
+ switch (sqe->type) {
+ case LIBETH_SQE_SKB:
+ cp->ss->packets += sqe->packets;
+ cp->ss->bytes += sqe->bytes;
+
+ napi_consume_skb(sqe->skb, cp->napi);
+ break;
+ case LIBETH_SQE_SLAB:
+ kfree(sqe->raw);
+ break;
+ default:
+ break;
+ }
+
+ sqe->type = LIBETH_SQE_EMPTY;
+}
+
+#endif /* __LIBETH_TX_H */
diff --git a/include/net/libeth/types.h b/include/net/libeth/types.h
new file mode 100644
index 000000000000..603825e45133
--- /dev/null
+++ b/include/net/libeth/types.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef __LIBETH_TYPES_H
+#define __LIBETH_TYPES_H
+
+#include <linux/types.h>
+
+/**
+ * struct libeth_sq_napi_stats - "hot" counters to update in Tx completion loop
+ * @packets: completed frames counter
+ * @bytes: sum of bytes of completed frames above
+ * @raw: alias to access all the fields as an array
+ */
+struct libeth_sq_napi_stats {
+ union {
+ struct {
+ u32 packets;
+ u32 bytes;
+ };
+ DECLARE_FLEX_ARRAY(u32, raw);
+ };
+};
+
+#endif /* __LIBETH_TYPES_H */
diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
index 53823d61d8b6..a4bea0f33188 100644
--- a/include/net/llc_c_st.h
+++ b/include/net/llc_c_st.h
@@ -44,8 +44,8 @@ struct llc_conn_state_trans {
};
struct llc_conn_state {
- u8 current_state;
- struct llc_conn_state_trans **transitions;
+ u8 current_state;
+ const struct llc_conn_state_trans **transitions;
};
extern struct llc_conn_state llc_conn_state_table[];
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
index 1d55ba7c45be..86681f29bda7 100644
--- a/include/net/llc_pdu.h
+++ b/include/net/llc_pdu.h
@@ -254,7 +254,7 @@ static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
}
/**
- * llc_pdu_decode_sa - extracs source address (MAC) of input frame
+ * llc_pdu_decode_sa - extracts, source address (MAC) of input frame
* @skb: input skb that source address must be extracted from it.
* @sa: pointer to source address (6 byte array).
*
diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
index ed5b2fa40d32..fca49d483d20 100644
--- a/include/net/llc_s_st.h
+++ b/include/net/llc_s_st.h
@@ -29,8 +29,8 @@ struct llc_sap_state_trans {
};
struct llc_sap_state {
- u8 curr_state;
- struct llc_sap_state_trans **transitions;
+ u8 curr_state;
+ const struct llc_sap_state_trans **transitions;
};
/* only access to SAP state table */
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index 53bd2d02a4f0..c306ebe379a0 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -206,6 +206,7 @@ static inline int lwtunnel_valid_encap_type(u16 encap_type,
NL_SET_ERR_MSG(extack, "CONFIG_LWTUNNEL is not enabled in this kernel");
return -EOPNOTSUPP;
}
+
static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len,
struct netlink_ext_ack *extack)
{
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 2d7f87bc5324..82617579d910 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -7,7 +7,7 @@
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2024 Intel Corporation
+ * Copyright (C) 2018 - 2025 Intel Corporation
*/
#ifndef MAC80211_H
@@ -22,7 +22,7 @@
#include <net/cfg80211.h>
#include <net/codel.h>
#include <net/ieee80211_radiotap.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
/**
* DOC: Introduction
@@ -213,7 +213,7 @@ struct ieee80211_low_level_stats {
* @IEEE80211_CHANCTX_CHANGE_RADAR: radar detection flag changed
* @IEEE80211_CHANCTX_CHANGE_CHANNEL: switched to another operating channel,
* this is used only with channel switching with CSA
- * @IEEE80211_CHANCTX_CHANGE_MIN_WIDTH: The min required channel width changed
+ * @IEEE80211_CHANCTX_CHANGE_MIN_DEF: The min chandef changed
* @IEEE80211_CHANCTX_CHANGE_AP: The AP channel definition changed, so (wider
* bandwidth) OFDMA settings need to be changed
* @IEEE80211_CHANCTX_CHANGE_PUNCTURING: The punctured channel(s) bitmap
@@ -224,7 +224,7 @@ enum ieee80211_chanctx_change {
IEEE80211_CHANCTX_CHANGE_RX_CHAINS = BIT(1),
IEEE80211_CHANCTX_CHANGE_RADAR = BIT(2),
IEEE80211_CHANCTX_CHANGE_CHANNEL = BIT(3),
- IEEE80211_CHANCTX_CHANGE_MIN_WIDTH = BIT(4),
+ IEEE80211_CHANCTX_CHANGE_MIN_DEF = BIT(4),
IEEE80211_CHANCTX_CHANGE_AP = BIT(5),
IEEE80211_CHANCTX_CHANGE_PUNCTURING = BIT(6),
};
@@ -250,6 +250,7 @@ struct ieee80211_chan_req {
* @min_def: the minimum channel definition currently required.
* @ap: the channel definition the AP actually is operating as,
* for use with (wider bandwidth) OFDMA
+ * @radio_idx: index of the wiphy radio used used for this channel
* @rx_chains_static: The number of RX chains that must always be
* active on the channel to receive MIMO transmissions
* @rx_chains_dynamic: The number of RX chains that must be enabled
@@ -264,6 +265,7 @@ struct ieee80211_chanctx_conf {
struct cfg80211_chan_def min_def;
struct cfg80211_chan_def ap;
+ int radio_idx;
u8 rx_chains_static, rx_chains_dynamic;
bool radar_enabled;
@@ -361,7 +363,8 @@ struct ieee80211_vif_chanctx_switch {
* @BSS_CHANGED_UNSOL_BCAST_PROBE_RESP: Unsolicited broadcast probe response
* status changed.
* @BSS_CHANGED_MLD_VALID_LINKS: MLD valid links status changed.
- * @BSS_CHANGED_MLD_TTLM: TID to link mapping was changed
+ * @BSS_CHANGED_MLD_TTLM: negotiated TID to link mapping was changed
+ * @BSS_CHANGED_TPE: transmit power envelope changed
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
@@ -395,9 +398,10 @@ enum ieee80211_bss_change {
BSS_CHANGED_HE_OBSS_PD = 1<<28,
BSS_CHANGED_HE_BSS_COLOR = 1<<29,
BSS_CHANGED_FILS_DISCOVERY = 1<<30,
- BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = 1<<31,
+ BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = BIT_ULL(31),
BSS_CHANGED_MLD_VALID_LINKS = BIT_ULL(33),
BSS_CHANGED_MLD_TTLM = BIT_ULL(34),
+ BSS_CHANGED_TPE = BIT_ULL(35),
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -550,6 +554,39 @@ struct ieee80211_fils_discovery {
u32 max_interval;
};
+#define IEEE80211_TPE_EIRP_ENTRIES_320MHZ 5
+struct ieee80211_parsed_tpe_eirp {
+ bool valid;
+ s8 power[IEEE80211_TPE_EIRP_ENTRIES_320MHZ];
+ u8 count;
+};
+
+#define IEEE80211_TPE_PSD_ENTRIES_320MHZ 16
+struct ieee80211_parsed_tpe_psd {
+ bool valid;
+ s8 power[IEEE80211_TPE_PSD_ENTRIES_320MHZ];
+ u8 count, n;
+};
+
+/**
+ * struct ieee80211_parsed_tpe - parsed transmit power envelope information
+ * @max_local: maximum local EIRP, one value for 20, 40, 80, 160, 320 MHz each
+ * (indexed by TX power category)
+ * @max_reg_client: maximum regulatory client EIRP, one value for 20, 40, 80,
+ * 160, 320 MHz each
+ * (indexed by TX power category)
+ * @psd_local: maximum local power spectral density, one value for each 20 MHz
+ * subchannel per bss_conf's chanreq.oper
+ * (indexed by TX power category)
+ * @psd_reg_client: maximum regulatory power spectral density, one value for
+ * each 20 MHz subchannel per bss_conf's chanreq.oper
+ * (indexed by TX power category)
+ */
+struct ieee80211_parsed_tpe {
+ struct ieee80211_parsed_tpe_eirp max_local[2], max_reg_client[2];
+ struct ieee80211_parsed_tpe_psd psd_local[2], psd_reg_client[2];
+};
+
/**
* struct ieee80211_bss_conf - holds the BSS's changing parameters
*
@@ -645,6 +682,9 @@ struct ieee80211_fils_discovery {
* responder functionality.
* @ftmr_params: configurable lci/civic parameter when enabling FTM responder.
* @nontransmitted: this BSS is a nontransmitted BSS profile
+ * @tx_bss_conf: Pointer to the BSS configuration of transmitting interface
+ * if MBSSID is enabled. This pointer is RCU-protected due to CSA finish
+ * and BSS color change flows accessing it.
* @transmitter_bssid: the address of transmitter AP
* @bssid_index: index inside the multiple BSSID set
* @bssid_indicator: 2^bssid_indicator is the maximum number of APs in set
@@ -662,10 +702,10 @@ struct ieee80211_fils_discovery {
* @beacon_tx_rate: The configured beacon transmit rate that needs to be passed
* to driver when rate control is offloaded to firmware.
* @power_type: power type of BSS for 6 GHz
- * @tx_pwr_env: transmit power envelope array of BSS.
- * @tx_pwr_env_num: number of @tx_pwr_env.
+ * @tpe: transmit power envelope information
* @pwr_reduction: power constraint of BSS.
* @eht_support: does this BSS support EHT
+ * @epcs_support: does this BSS support EPCS
* @csa_active: marks whether a channel switch is going on.
* @mu_mimo_owner: indicates interface owns MU-MIMO capability
* @chanctx_conf: The channel context this interface is assigned to, or %NULL
@@ -701,6 +741,23 @@ struct ieee80211_fils_discovery {
* beamformee
* @eht_mu_beamformer: in AP-mode, does this BSS enable operation as an EHT MU
* beamformer
+ * @eht_80mhz_full_bw_ul_mumimo: in AP-mode, does this BSS support the
+ * reception of an EHT TB PPDU on an RU that spans the entire PPDU
+ * bandwidth
+ * @eht_disable_mcs15: disable EHT-MCS 15 reception capability.
+ * @bss_param_ch_cnt: in BSS-mode, the BSS params change count. This
+ * information is the latest known value. It can come from this link's
+ * beacon or from a beacon sent by another link.
+ * @bss_param_ch_cnt_link_id: in BSS-mode, the link_id to which the beacon
+ * that updated &bss_param_ch_cnt belongs. E.g. if link 1 doesn't hear
+ * its beacons, and link 2 sent a beacon with an RNR element that updated
+ * link 1's BSS params change count, then, link 1's
+ * bss_param_ch_cnt_link_id will be 2. That means that link 1 knows that
+ * link 2 was the link that updated its bss_param_ch_cnt value.
+ * In case link 1 hears its beacon again, bss_param_ch_cnt_link_id will
+ * be updated to 1, even if bss_param_ch_cnt didn't change. This allows
+ * the link to know that it heard the latest value from its own beacon
+ * (as opposed to hearing its value from another link's beacon).
*/
struct ieee80211_bss_conf {
struct ieee80211_vif *vif;
@@ -751,6 +808,7 @@ struct ieee80211_bss_conf {
struct ieee80211_ftm_responder_params *ftmr_params;
/* Multiple BSSID data */
bool nontransmitted;
+ struct ieee80211_bss_conf __rcu *tx_bss_conf;
u8 transmitter_bssid[ETH_ALEN];
u8 bssid_index;
u8 bssid_indicator;
@@ -766,11 +824,12 @@ struct ieee80211_bss_conf {
u32 unsol_bcast_probe_resp_interval;
struct cfg80211_bitrate_mask beacon_tx_rate;
enum ieee80211_ap_reg_power power_type;
- struct ieee80211_tx_pwr_env tx_pwr_env[IEEE80211_TPE_MAX_IE_COUNT];
- u8 tx_pwr_env_num;
+
+ struct ieee80211_parsed_tpe tpe;
+
u8 pwr_reduction;
bool eht_support;
-
+ bool epcs_support;
bool csa_active;
bool mu_mimo_owner;
@@ -793,6 +852,11 @@ struct ieee80211_bss_conf {
bool eht_su_beamformer;
bool eht_su_beamformee;
bool eht_mu_beamformer;
+ bool eht_80mhz_full_bw_ul_mumimo;
+ bool eht_disable_mcs15;
+
+ u8 bss_param_ch_cnt;
+ u8 bss_param_ch_cnt_link_id;
};
/**
@@ -953,8 +1017,9 @@ enum mac80211_tx_info_flags {
* of their QoS TID or other priority field values.
* @IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX: first MLO TX, used mostly internally
* for sequence number assignment
- * @IEEE80211_TX_CTRL_SCAN_TX: Indicates that this frame is transmitted
- * due to scanning, not in normal operation on the interface.
+ * @IEEE80211_TX_CTRL_DONT_USE_RATE_MASK: Don't use rate mask for this frame
+ * which is transmitted due to scanning or offchannel TX, not in normal
+ * operation on the interface.
* @IEEE80211_TX_CTRL_MLO_LINK: If not @IEEE80211_LINK_UNSPECIFIED, this
* frame should be transmitted on the specific link. This really is
* only relevant for frames that do not have data present, and is
@@ -975,7 +1040,7 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_CTRL_NO_SEQNO = BIT(7),
IEEE80211_TX_CTRL_DONT_REORDER = BIT(8),
IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX = BIT(9),
- IEEE80211_TX_CTRL_SCAN_TX = BIT(10),
+ IEEE80211_TX_CTRL_DONT_USE_RATE_MASK = BIT(10),
IEEE80211_TX_CTRL_MLO_LINK = 0xf0000000,
};
@@ -1406,8 +1471,6 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_AMPDU_IS_LAST: this subframe is the last subframe of the A-MPDU
* @RX_FLAG_AMPDU_DELIM_CRC_ERROR: A delimiter CRC error has been detected
* on this subframe
- * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
- * is stored in the @ampdu_delimiter_crc field)
* @RX_FLAG_MIC_STRIPPED: The mic was stripped of this packet. Decryption was
* done by the hardware
* @RX_FLAG_ONLY_MONITOR: Report frame only to monitor interfaces without
@@ -1479,7 +1542,7 @@ enum mac80211_rx_flags {
RX_FLAG_AMPDU_LAST_KNOWN = BIT(12),
RX_FLAG_AMPDU_IS_LAST = BIT(13),
RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(14),
- RX_FLAG_AMPDU_DELIM_CRC_KNOWN = BIT(15),
+ /* one free bit at 15 */
RX_FLAG_MACTIME = BIT(16) | BIT(17),
RX_FLAG_MACTIME_PLCP_START = 1 << 16,
RX_FLAG_MACTIME_START = 2 << 16,
@@ -1576,7 +1639,6 @@ enum mac80211_rx_encoding {
* @rx_flags: internal RX flags for mac80211
* @ampdu_reference: A-MPDU reference number, must be a different value for
* each A-MPDU but the same for each subframe within one A-MPDU
- * @ampdu_delimiter_crc: A-MPDU delimiter CRC
* @zero_length_psdu_type: radiotap type of the 0-length PSDU
* @link_valid: if the link which is identified by @link_id is valid. This flag
* is set only when connection is MLO.
@@ -1614,7 +1676,6 @@ struct ieee80211_rx_status {
s8 signal;
u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS];
- u8 ampdu_delimiter_crc;
u8 zero_length_psdu_type;
u8 link_valid:1, link_id:4;
};
@@ -1802,6 +1863,9 @@ struct ieee80211_channel_switch {
* operation on this interface and request a channel context without
* the AP definition. Use this e.g. because the device is able to
* handle OFDMA (downlink and trigger for uplink) on a per-AP basis.
+ * @IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC: indicates that the AP sta should
+ * be removed only after setting the vif as unassociated, and not the
+ * opposite. Only relevant for STA vifs.
*/
enum ieee80211_vif_flags {
IEEE80211_VIF_BEACON_FILTER = BIT(0),
@@ -1810,6 +1874,7 @@ enum ieee80211_vif_flags {
IEEE80211_VIF_GET_NOA_UPDATE = BIT(3),
IEEE80211_VIF_EML_ACTIVE = BIT(4),
IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW = BIT(5),
+ IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC = BIT(6),
};
@@ -1924,14 +1989,18 @@ enum ieee80211_neg_ttlm_res {
* @active_links: The bitmap of active links, or 0 for non-MLO.
* The driver shouldn't change this directly, but use the
* API calls meant for that purpose.
- * @dormant_links: bitmap of valid but disabled links, or 0 for non-MLO.
- * Must be a subset of valid_links.
+ * @dormant_links: subset of the valid links that are disabled/suspended
+ * due to advertised or negotiated TTLM respectively.
+ * 0 for non-MLO.
* @suspended_links: subset of dormant_links representing links that are
- * suspended.
+ * suspended due to negotiated TTLM, and could be activated in the
+ * future by tearing down the TTLM negotiation.
* 0 for non-MLO.
* @neg_ttlm: negotiated TID to link mapping info.
* see &struct ieee80211_neg_ttlm.
* @addr: address of this interface
+ * @addr_valid: indicates if the address is actively used. Set to false for
+ * passive monitor interfaces, true in all other cases.
* @p2p: indicates whether this AP or STA interface is a p2p
* interface, i.e. a GO or p2p-sta respectively
* @netdev_features: tx netdev features supported by the hardware for this
@@ -1961,7 +2030,6 @@ enum ieee80211_neg_ttlm_res {
* @txq: the multicast data TX queue
* @offload_flags: 802.3 -> 802.11 enapsulation offload flags, see
* &enum ieee80211_offload_flags.
- * @mbssid_tx_vif: Pointer to the transmitting interface if MBSSID is enabled.
*/
struct ieee80211_vif {
enum nl80211_iftype type;
@@ -1971,6 +2039,7 @@ struct ieee80211_vif {
u16 valid_links, active_links, dormant_links, suspended_links;
struct ieee80211_neg_ttlm neg_ttlm;
u8 addr[ETH_ALEN] __aligned(2);
+ bool addr_valid;
bool p2p;
u8 cab_queue;
@@ -1989,8 +2058,6 @@ struct ieee80211_vif {
bool probe_req_reg;
bool rx_mcast_action_reg;
- struct ieee80211_vif *mbssid_tx_vif;
-
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
@@ -2052,7 +2119,7 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
* This can be used by mac80211 drivers with direct cfg80211 APIs
* (like the vendor commands) that get a wdev.
*
- * Note that this function may return %NULL if the given wdev isn't
+ * Return: pointer to the wdev, or %NULL if the given wdev isn't
* associated with a vif that the driver knows about (e.g. monitor
* or AP_VLAN interfaces.)
*/
@@ -2065,6 +2132,8 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
* This can be used by mac80211 drivers with direct cfg80211 APIs
* (like the vendor commands) that needs to get the wdev for a vif.
* This can also be useful to get the netdev associated to a vif.
+ *
+ * Return: pointer to the wdev
*/
struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif);
@@ -2123,8 +2192,8 @@ static inline bool lockdep_vif_wiphy_mutex_held(struct ieee80211_vif *vif)
* @IEEE80211_KEY_FLAG_GENERATE_MMIC on the same key.
* @IEEE80211_KEY_FLAG_NO_AUTO_TX: Key needs explicit Tx activation.
* @IEEE80211_KEY_FLAG_GENERATE_MMIE: This flag should be set by the driver
- * for a AES_CMAC key to indicate that it requires sequence number
- * generation only
+ * for a AES_CMAC or a AES_GMAC key to indicate that it requires sequence
+ * number generation only
* @IEEE80211_KEY_FLAG_SPP_AMSDU: SPP A-MSDUs can be used with this key
* (set by mac80211 from the sta->spp_amsdu flag)
*/
@@ -2156,7 +2225,7 @@ enum ieee80211_key_flags {
* @tx_pn: PN used for TX keys, may be used by the driver as well if it
* needs to do software PN assignment by itself (e.g. due to TSO)
* @flags: key flags, see &enum ieee80211_key_flags.
- * @keyidx: the key index (0-3)
+ * @keyidx: the key index (0-7)
* @keylen: key material length
* @key: key material. For ALG_TKIP the key is encoded as a 256-bit (32 byte)
* data block:
@@ -2165,7 +2234,7 @@ enum ieee80211_key_flags {
* - Temporal Authenticator Rx MIC Key (64 bits)
* @icv_len: The ICV length for this key type
* @iv_len: The IV length for this key type
- * @link_id: the link ID for MLO, or -1 for non-MLO or pairwise keys
+ * @link_id: the link ID, 0 for non-MLO, or -1 for pairwise keys
*/
struct ieee80211_key_conf {
atomic64_t tx_pn;
@@ -2276,6 +2345,8 @@ enum ieee80211_sta_rx_bandwidth {
IEEE80211_STA_RX_BW_320,
};
+#define IEEE80211_STA_RX_BW_MAX IEEE80211_STA_RX_BW_320
+
/**
* struct ieee80211_sta_rates - station rate selection table
*
@@ -2421,6 +2492,7 @@ struct ieee80211_link_sta {
* @max_amsdu_subframes: indicates the maximal number of MSDUs in a single
* A-MSDU. Taken from the Extended Capabilities element. 0 means
* unlimited.
+ * @eml_cap: EML capabilities of this MLO station
* @cur: currently valid data as aggregated from the active links
* For non MLO STA it will point to the deflink data. For MLO STA
* ieee80211_sta_recalc_aggregates() must be called to update it.
@@ -2442,7 +2514,7 @@ struct ieee80211_link_sta {
* @spp_amsdu: indicates whether the STA uses SPP A-MSDU or not.
*/
struct ieee80211_sta {
- u8 addr[ETH_ALEN];
+ u8 addr[ETH_ALEN] __aligned(2);
u16 aid;
u16 max_rx_aggregation_subframes;
bool wme;
@@ -2455,6 +2527,7 @@ struct ieee80211_sta {
bool mlo;
bool spp_amsdu;
u8 max_amsdu_subframes;
+ u16 eml_cap;
struct ieee80211_sta_aggregates *cur;
@@ -2637,6 +2710,11 @@ struct ieee80211_txq {
* a virtual monitor interface when monitor interfaces are the only
* active interfaces.
*
+ * @IEEE80211_HW_NO_VIRTUAL_MONITOR: The driver would like to be informed
+ * of any monitor interface, as well as their configured channel.
+ * This is useful for supporting multiple monitor interfaces on different
+ * channels.
+ *
* @IEEE80211_HW_NO_AUTO_VIF: The driver would like for no wlanX to
* be created. It is expected user-space will create vifs as
* desired (and thus have them named as desired).
@@ -2724,14 +2802,6 @@ struct ieee80211_txq {
* @IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA: Hardware supports buffer STA on
* TDLS links.
*
- * @IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP: The driver requires the
- * mgd_prepare_tx() callback to be called before transmission of a
- * deauthentication frame in case the association was completed but no
- * beacon was heard. This is required in multi-channel scenarios, where the
- * virtual interface might not be given air time for the transmission of
- * the frame, as it is not synced with the AP/P2P GO yet, and thus the
- * deauthentication frame might not be transmitted.
- *
* @IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP: The driver (or firmware) doesn't
* support QoS NDP for AP probing - that's most likely a driver bug.
*
@@ -2780,12 +2850,19 @@ struct ieee80211_txq {
*
* @IEEE80211_HW_DISALLOW_PUNCTURING: HW requires disabling puncturing in EHT
* and connecting with a lower bandwidth instead
+ * @IEEE80211_HW_DISALLOW_PUNCTURING_5GHZ: HW requires disabling puncturing in
+ * EHT in 5 GHz and connecting with a lower bandwidth instead
*
* @IEEE80211_HW_HANDLES_QUIET_CSA: HW/driver handles quieting for CSA, so
* no need to stop queues. This really should be set by a driver that
* implements MLO, so operation can continue on other links when one
* link is switching.
*
+ * @IEEE80211_HW_STRICT: strictly enforce certain things mandated by the spec
+ * but otherwise ignored/worked around for interoperability. This is a
+ * HW flag so drivers can opt in according to their own control, e.g. in
+ * testing.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2802,6 +2879,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_SUPPORTS_DYNAMIC_PS,
IEEE80211_HW_MFP_CAPABLE,
IEEE80211_HW_WANT_MONITOR_VIF,
+ IEEE80211_HW_NO_VIRTUAL_MONITOR,
IEEE80211_HW_NO_AUTO_VIF,
IEEE80211_HW_SW_CRYPTO_CONTROL,
IEEE80211_HW_SUPPORT_FAST_XMIT,
@@ -2829,7 +2907,6 @@ enum ieee80211_hw_flags {
IEEE80211_HW_REPORTS_LOW_ACK,
IEEE80211_HW_SUPPORTS_TX_FRAG,
IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA,
- IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP,
IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP,
IEEE80211_HW_BUFF_MMPDU_TXQ,
IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW,
@@ -2844,7 +2921,9 @@ enum ieee80211_hw_flags {
IEEE80211_HW_DETECTS_COLOR_COLLISION,
IEEE80211_HW_MLO_MCAST_MULTI_LINK_TX,
IEEE80211_HW_DISALLOW_PUNCTURING,
+ IEEE80211_HW_DISALLOW_PUNCTURING_5GHZ,
IEEE80211_HW_HANDLES_QUIET_CSA,
+ IEEE80211_HW_STRICT,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -3142,6 +3221,19 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
/**
+ * ieee80211_purge_tx_queue - purge TX skb queue
+ * @hw: the hardware
+ * @skbs: the skbs
+ *
+ * Free a set of transmit skbs. Use this function when device is going to stop
+ * but some transmit skbs without TX status are still queued.
+ * This function does not take the list lock and the caller must hold the
+ * relevant locks to use it.
+ */
+void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
+ struct sk_buff_head *skbs);
+
+/**
* DOC: Hardware crypto acceleration
*
* mac80211 is capable of taking advantage of many hardware
@@ -3741,13 +3833,15 @@ enum ieee80211_reconfig_type {
* @success: whether the frame exchange was successful, only
* used with the mgd_complete_tx() method, and then only
* valid for auth and (re)assoc.
+ * @was_assoc: set if this call is due to deauth/disassoc
+ * while just having been associated
* @link_id: the link id on which the frame will be TX'ed.
- * Only used with the mgd_prepare_tx() method.
+ * 0 for a non-MLO connection.
*/
struct ieee80211_prep_tx_info {
u16 duration;
u16 subtype;
- u8 success:1;
+ u8 success:1, was_assoc:1;
int link_id;
};
@@ -4020,8 +4114,8 @@ struct ieee80211_prep_tx_info {
* in @sta_state.
* The callback can sleep.
*
- * @sta_rc_update: Notifies the driver of changes to the bitrates that can be
- * used to transmit to the station. The changes are advertised with bits
+ * @link_sta_rc_update: Notifies the driver of changes to the bitrates that can
+ * be used to transmit to the station. The changes are advertised with bits
* from &enum ieee80211_rate_control_changed and the values are reflected
* in the station data. This callback should only be used when the driver
* uses hardware rate control (%IEEE80211_HW_HAS_RATE_CONTROL) since
@@ -4196,12 +4290,9 @@ struct ieee80211_prep_tx_info {
* yet it need not necessarily be given airtime, in particular since any
* transmission to a P2P GO needs to be synchronized against the GO's
* powersave state. mac80211 will call this function before transmitting a
- * management frame prior to having successfully associated to allow the
- * driver to give it channel time for the transmission, to get a response
- * and to be able to synchronize with the GO.
- * For drivers that set %IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP, mac80211
- * would also call this function before transmitting a deauthentication
- * frame in case that no beacon was heard from the AP/P2P GO.
+ * management frame prior to transmitting that frame to allow the driver
+ * to give it channel time for the transmission, to get a response and be
+ * able to synchronize with the GO.
* The callback will be called before each transmission and upon return
* mac80211 will transmit the frame right away.
* Additional information is passed in the &struct ieee80211_prep_tx_info
@@ -4392,13 +4483,19 @@ struct ieee80211_prep_tx_info {
* if the requested TID-To-Link mapping can be accepted or not.
* If it's not accepted the driver may suggest a preferred mapping and
* modify @ttlm parameter with the suggested TID-to-Link mapping.
+ * @prep_add_interface: prepare for interface addition. This can be used by
+ * drivers to prepare for the addition of a new interface, e.g., allocate
+ * the needed resources etc. This callback doesn't guarantee that an
+ * interface with the specified type would be added, and thus drivers that
+ * implement this callback need to handle such cases. The type is the full
+ * &enum nl80211_iftype.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb);
int (*start)(struct ieee80211_hw *hw);
- void (*stop)(struct ieee80211_hw *hw);
+ void (*stop)(struct ieee80211_hw *hw, bool suspend);
#ifdef CONFIG_PM
int (*suspend)(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
int (*resume)(struct ieee80211_hw *hw);
@@ -4508,10 +4605,10 @@ struct ieee80211_ops {
void (*sta_pre_rcu_remove)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
- void (*sta_rc_update)(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- u32 changed);
+ void (*link_sta_rc_update)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ u32 changed);
void (*sta_rate_tbl_update)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
@@ -4684,7 +4781,7 @@ struct ieee80211_ops {
u32 (*get_expected_throughput)(struct ieee80211_hw *hw,
struct ieee80211_sta *sta);
int (*get_txpower)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- int *dbm);
+ unsigned int link_id, int *dbm);
int (*tdls_channel_switch)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -4776,6 +4873,8 @@ struct ieee80211_ops {
enum ieee80211_neg_ttlm_res
(*can_neg_ttlm)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_neg_ttlm *ttlm);
+ void (*prep_add_interface)(struct ieee80211_hw *hw,
+ enum nl80211_iftype type);
};
/**
@@ -5254,22 +5353,6 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
int max_rates);
/**
- * ieee80211_sta_set_expected_throughput - set the expected tpt for a station
- *
- * Call this function to notify mac80211 about a change in expected throughput
- * to a station. A driver for a device that does rate control in firmware can
- * call this function when the expected throughput estimate towards a station
- * changes. The information is used to tune the CoDel AQM applied to traffic
- * going towards that station (which can otherwise be too aggressive and cause
- * slow stations to starve).
- *
- * @pubsta: the station to set throughput for.
- * @thr: the current expected throughput in kbps.
- */
-void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta,
- u32 thr);
-
-/**
* ieee80211_tx_rate_update - transmit rate update callback
*
* Drivers should call this functions with a non-NULL pub sta
@@ -5600,7 +5683,7 @@ void ieee80211_csa_finish(struct ieee80211_vif *vif, unsigned int link_id);
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @link_id: valid link_id during MLO or 0 for non-MLO
*
- * This function returns whether the countdown reached zero.
+ * Return: %true if the countdown reached 1, %false otherwise
*/
bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif,
unsigned int link_id);
@@ -5608,12 +5691,13 @@ bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif,
/**
* ieee80211_color_change_finish - notify mac80211 about color change
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: valid link_id during MLO or 0 for non-MLO
*
* After a color change announcement was scheduled and the counter in this
* announcement hits 1, this function must be called by the driver to
* notify mac80211 that the color can be changed
*/
-void ieee80211_color_change_finish(struct ieee80211_vif *vif);
+void ieee80211_color_change_finish(struct ieee80211_vif *vif, u8 link_id);
/**
* ieee80211_proberesp_get - retrieve a Probe Response template
@@ -5945,8 +6029,8 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf);
* key(s) will be available. These will be needed by mac80211 for proper
* RX processing, so this function allows setting them.
*
- * The function returns the newly allocated key structure, which will
- * have similar contents to the passed key configuration but point to
+ * Return: the newly allocated key structure, which will have
+ * similar contents to the passed key configuration but point to
* mac80211-owned memory. In case of errors, the function returns an
* ERR_PTR(), use IS_ERR() etc.
*
@@ -6203,6 +6287,24 @@ void ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw,
void (*iterator)(void *data,
struct ieee80211_sta *sta),
void *data);
+
+/**
+ * ieee80211_iterate_stations_mtx - iterate stations
+ *
+ * This function iterates over all stations associated with a given
+ * hardware that are currently uploaded to the driver and calls the callback
+ * function for them. This version can only be used while holding the wiphy
+ * mutex.
+ *
+ * @hw: the hardware struct of which the interfaces should be iterated over
+ * @iterator: the iterator function to call
+ * @data: first argument of the iterator function
+ */
+void ieee80211_iterate_stations_mtx(struct ieee80211_hw *hw,
+ void (*iterator)(void *data,
+ struct ieee80211_sta *sta),
+ void *data);
+
/**
* ieee80211_queue_work - add work onto the mac80211 workqueue
*
@@ -6345,6 +6447,8 @@ struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw,
* may be %NULL if the link ID is not needed
*
* Obtain the STA by link address, must use RCU protection.
+ *
+ * Return: pointer to STA if found, otherwise %NULL.
*/
struct ieee80211_sta *
ieee80211_find_sta_by_link_addrs(struct ieee80211_hw *hw,
@@ -6474,8 +6578,8 @@ void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid,
* @hw: pointer obtained from ieee80211_alloc_hw()
* @txq: pointer obtained from station or virtual interface
*
- * Return true if the AQL's airtime limit has not been reached and the txq can
- * continue to send more packets to the device. Otherwise return false.
+ * Return: %true if the AQL's airtime limit has not been reached and the txq can
+ * continue to send more packets to the device. Otherwise return %false.
*/
bool
ieee80211_txq_airtime_check(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
@@ -6558,6 +6662,31 @@ void ieee80211_iter_chan_contexts_atomic(
void *iter_data);
/**
+ * ieee80211_iter_chan_contexts_mtx - iterate channel contexts
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @iter: iterator function
+ * @iter_data: data passed to iterator function
+ *
+ * Iterate all active channel contexts. This function can only be used while
+ * holding the wiphy mutex.
+ *
+ * The iterator will not find a context that's being added (during
+ * the driver callback to add it) but will find it while it's being
+ * removed.
+ *
+ * Note that during hardware restart, all contexts that existed
+ * before the restart are considered already present so will be
+ * found while iterating, whether they've been re-added already
+ * or not.
+ */
+void ieee80211_iter_chan_contexts_mtx(
+ struct ieee80211_hw *hw,
+ void (*iter)(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *chanctx_conf,
+ void *data),
+ void *iter_data);
+
+/**
* ieee80211_ap_probereq_get - retrieve a Probe Request template
* @hw: pointer obtained from ieee80211_alloc_hw().
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
@@ -6675,8 +6804,11 @@ void ieee80211_cqm_beacon_loss_notify(struct ieee80211_vif *vif, gfp_t gfp);
* ieee80211_radar_detected - inform that a radar was detected
*
* @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @chanctx_conf: Channel context on which radar is detected. Mandatory to
+ * pass a valid pointer during MLO. For non-MLO %NULL can be passed
*/
-void ieee80211_radar_detected(struct ieee80211_hw *hw);
+void ieee80211_radar_detected(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *chanctx_conf);
/**
* ieee80211_chswitch_done - Complete channel switch process
@@ -6694,14 +6826,12 @@ void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success,
/**
* ieee80211_channel_switch_disconnect - disconnect due to channel switch error
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
- * @block_tx: if %true, do not send deauth frame.
*
* Instruct mac80211 to disconnect due to a channel switch error. The channel
* switch can request to block the tx and so, we need to make sure we do not send
* a deauth frame in this case.
*/
-void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif,
- bool block_tx);
+void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif);
/**
* ieee80211_request_smps - request SM PS transition
@@ -6978,6 +7108,8 @@ bool rate_usable_index_exists(struct ieee80211_supported_band *sband,
* @hw: pointer as obtained from ieee80211_alloc_hw()
* @pubsta: &struct ieee80211_sta pointer to the target destination.
* @rates: new tx rate set to be used for this station.
+ *
+ * Return: 0 on success. An error code otherwise.
*/
int rate_control_set_rates(struct ieee80211_hw *hw,
struct ieee80211_sta *pubsta,
@@ -7138,6 +7270,8 @@ void ieee80211_report_wowlan_wakeup(struct ieee80211_vif *vif,
* @band: the band to transmit on
* @sta: optional pointer to get the station to send the frame to
*
+ * Return: %true if the skb was prepared, %false otherwise
+ *
* Note: must be called under RCU lock
*/
bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
@@ -7154,6 +7288,8 @@ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
*
* @skb: packet injected by userspace
* @dev: the &struct device of this 802.11 device
+ *
+ * Return: %true if the radiotap header was parsed, %false otherwise
*/
bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
struct net_device *dev);
@@ -7263,7 +7399,7 @@ void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid);
* @txq: pointer obtained from station or virtual interface, or from
* ieee80211_next_txq()
*
- * Returns the skb if successful, %NULL if no frame was available.
+ * Return: the skb if successful, %NULL if no frame was available.
*
* Note that this must be called in an rcu_read_lock() critical section,
* which can only be released after the SKB was handled. Some pointers in
@@ -7289,6 +7425,8 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
* @hw: pointer as obtained from ieee80211_alloc_hw()
* @txq: pointer obtained from station or virtual interface, or from
* ieee80211_next_txq()
+ *
+ * Return: the skb if successful, %NULL if no frame was available.
*/
static inline struct sk_buff *ieee80211_tx_dequeue_ni(struct ieee80211_hw *hw,
struct ieee80211_txq *txq)
@@ -7320,7 +7458,7 @@ void ieee80211_handle_wake_tx_queue(struct ieee80211_hw *hw,
* @hw: pointer as obtained from ieee80211_alloc_hw()
* @ac: AC number to return packets from.
*
- * Returns the next txq if successful, %NULL if no queue is eligible. If a txq
+ * Return: the next txq if successful, %NULL if no queue is eligible. If a txq
* is returned, it should be returned with ieee80211_return_txq() after the
* driver has finished scheduling it.
*/
@@ -7403,6 +7541,8 @@ ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
*
* @hw: pointer as obtained from ieee80211_alloc_hw()
* @txq: pointer obtained from station or virtual interface
+ *
+ * Return: %true if transmission is allowed, %false otherwise
*/
bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
struct ieee80211_txq *txq);
@@ -7463,6 +7603,8 @@ void ieee80211_nan_func_match(struct ieee80211_vif *vif,
* @status: &struct ieee80211_rx_status containing the transmission rate
* information.
* @len: frame length in bytes
+ *
+ * Return: the airtime estimate
*/
u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw,
struct ieee80211_rx_status *status,
@@ -7477,23 +7619,13 @@ u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw,
* @hw: pointer as obtained from ieee80211_alloc_hw()
* @info: &struct ieee80211_tx_info of the frame.
* @len: frame length in bytes
+ *
+ * Return: the airtime estimate
*/
u32 ieee80211_calc_tx_airtime(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info,
int len);
/**
- * ieee80211_set_hw_80211_encap - enable hardware encapsulation offloading.
- *
- * This function is used to notify mac80211 that a vif can be passed raw 802.3
- * frames. The driver needs to then handle the 802.11 encapsulation inside the
- * hardware or firmware.
- *
- * @vif: &struct ieee80211_vif pointer from the add_interface callback.
- * @enable: indicate if the feature should be turned on or off
- */
-bool ieee80211_set_hw_80211_encap(struct ieee80211_vif *vif, bool enable);
-
-/**
* ieee80211_get_fils_discovery_tmpl - Get FILS discovery template.
* @hw: pointer obtained from ieee80211_alloc_hw().
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
@@ -7522,6 +7654,7 @@ ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
/**
* ieee80211_obss_color_collision_notify - notify userland about a BSS color
* collision.
+ * @link_id: valid link_id during MLO or 0 for non-MLO
*
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @color_bitmap: a 64 bit bitmap representing the colors that the local BSS is
@@ -7529,7 +7662,7 @@ ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
*/
void
ieee80211_obss_color_collision_notify(struct ieee80211_vif *vif,
- u64 color_bitmap);
+ u64 color_bitmap, u8 link_id);
/**
* ieee80211_is_tx_data - check if frame is a data frame
@@ -7538,6 +7671,8 @@ ieee80211_obss_color_collision_notify(struct ieee80211_vif *vif,
* hardware encapsulation enabled are data frames.
*
* @skb: the frame to be transmitted.
+ *
+ * Return: %true if @skb is a data frame, %false otherwise
*/
static inline bool ieee80211_is_tx_data(struct sk_buff *skb)
{
@@ -7566,13 +7701,15 @@ static inline bool ieee80211_is_tx_data(struct sk_buff *skb)
*
* - change_vif_links(0x11)
* - unassign_vif_chanctx(link_id=0)
+ * - assign_vif_chanctx(link_id=4)
* - change_sta_links(0x11) for each affected STA (the AP)
* (TDLS connections on now inactive links should be torn down)
* - remove group keys on the old link (link_id 0)
* - add new group keys (GTK/IGTK/BIGTK) on the new link (link_id 4)
* - change_sta_links(0x10) for each affected STA (the AP)
- * - assign_vif_chanctx(link_id=4)
* - change_vif_links(0x10)
+ *
+ * Return: 0 on success. An error code otherwise.
*/
int ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links);
@@ -7589,6 +7726,86 @@ int ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links);
void ieee80211_set_active_links_async(struct ieee80211_vif *vif,
u16 active_links);
+/**
+ * ieee80211_send_teardown_neg_ttlm - tear down a negotiated TTLM request
+ * @vif: the interface on which the tear down request should be sent.
+ *
+ * This function can be used to tear down a previously accepted negotiated
+ * TTLM request.
+ */
+void ieee80211_send_teardown_neg_ttlm(struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_chan_width_to_rx_bw - convert channel width to STA RX bandwidth
+ * @width: the channel width value to convert
+ * Return: the STA RX bandwidth value for the channel width
+ */
+static inline enum ieee80211_sta_rx_bandwidth
+ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width)
+{
+ switch (width) {
+ default:
+ WARN_ON_ONCE(1);
+ fallthrough;
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ return IEEE80211_STA_RX_BW_20;
+ case NL80211_CHAN_WIDTH_40:
+ return IEEE80211_STA_RX_BW_40;
+ case NL80211_CHAN_WIDTH_80:
+ return IEEE80211_STA_RX_BW_80;
+ case NL80211_CHAN_WIDTH_160:
+ case NL80211_CHAN_WIDTH_80P80:
+ return IEEE80211_STA_RX_BW_160;
+ case NL80211_CHAN_WIDTH_320:
+ return IEEE80211_STA_RX_BW_320;
+ }
+}
+
+/**
+ * ieee80211_prepare_rx_omi_bw - prepare for sending BW RX OMI
+ * @link_sta: the link STA the OMI is going to be sent to
+ * @bw: the bandwidth requested
+ *
+ * When the driver decides to do RX OMI to change bandwidth with a STA
+ * it calls this function to prepare, then sends the OMI, and finally
+ * calls ieee80211_finalize_rx_omi_bw().
+ *
+ * Note that the (link) STA rate control is updated accordingly as well,
+ * but the chanctx might not be updated if there are other users.
+ * If the intention is to reduce the listen bandwidth, the driver must
+ * ensure there are no TDLS stations nor other uses of the chanctx.
+ *
+ * Also note that in order to sequence correctly, narrowing bandwidth
+ * will only happen in ieee80211_finalize_rx_omi_bw(), whereas widening
+ * again (e.g. going back to normal) will happen here.
+ *
+ * Note that we treat this symmetrically, so if the driver calls this
+ * and tells the peer to only send with a lower bandwidth, we assume
+ * that the driver also wants to only send at that lower bandwidth, to
+ * allow narrowing of the chanctx request for this station/interface.
+ *
+ * Finally, the driver must ensure that if the function returned %true,
+ * ieee80211_finalize_rx_omi_bw() is also called, even for example in
+ * case of HW restart.
+ *
+ * Context: Must be called with wiphy mutex held, and will call back
+ * into the driver, so ensure no driver locks are held.
+ *
+ * Return: %true if changes are going to be made, %false otherwise
+ */
+bool ieee80211_prepare_rx_omi_bw(struct ieee80211_link_sta *link_sta,
+ enum ieee80211_sta_rx_bandwidth bw);
+
+/**
+ * ieee80211_finalize_rx_omi_bw - finalize BW RX OMI update
+ * @link_sta: the link STA the OMI was sent to
+ *
+ * See ieee80211_client_prepare_rx_omi_bw(). Context is the same here
+ * as well.
+ */
+void ieee80211_finalize_rx_omi_bw(struct ieee80211_link_sta *link_sta);
+
/* for older drivers - let's not document these ... */
int ieee80211_emulate_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx);
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index 4a3a9de9da73..d72006a85f02 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -7,7 +7,7 @@
#ifndef NET_MAC802154_H
#define NET_MAC802154_H
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/af_ieee802154.h>
#include <linux/ieee802154.h>
#include <linux/skbuff.h>
@@ -140,7 +140,7 @@ enum ieee802154_hw_flags {
*
* xmit_sync:
* Handler that 802.15.4 module calls for each transmitted frame.
- * skb cntains the buffer starting from the IEEE 802.15.4 header.
+ * skb contains the buffer starting from the IEEE 802.15.4 header.
* The low-level driver should send the frame based on available
* configuration. This is called by a workqueue and useful for
* synchronous 802.15.4 drivers.
@@ -152,7 +152,7 @@ enum ieee802154_hw_flags {
*
* xmit_async:
* Handler that 802.15.4 module calls for each transmitted frame.
- * skb cntains the buffer starting from the IEEE 802.15.4 header.
+ * skb contains the buffer starting from the IEEE 802.15.4 header.
* The low-level driver should send the frame based on available
* configuration.
* This function should return zero or negative errno.
diff --git a/include/net/macsec.h b/include/net/macsec.h
index de216cbc6b05..bc7de5b53e54 100644
--- a/include/net/macsec.h
+++ b/include/net/macsec.h
@@ -38,8 +38,8 @@ struct metadata_dst;
typedef union salt {
struct {
- u32 ssci;
- u64 pn;
+ ssci_t ssci;
+ __be64 pn;
} __packed;
u8 bytes[MACSEC_SALT_LEN];
} __packed salt_t;
diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
index 27684135bb4d..3ce56a816425 100644
--- a/include/net/mana/gdma.h
+++ b/include/net/mana/gdma.h
@@ -60,6 +60,8 @@ enum gdma_eqe_type {
GDMA_EQE_HWC_INIT_DONE = 131,
GDMA_EQE_HWC_SOC_RECONFIG = 132,
GDMA_EQE_HWC_SOC_RECONFIG_DATA = 133,
+ GDMA_EQE_HWC_SOC_SERVICE = 134,
+ GDMA_EQE_RNIC_QP_FATAL = 176,
};
enum {
@@ -69,6 +71,18 @@ enum {
GDMA_DEVICE_MANA_IB = 3,
};
+enum gdma_service_type {
+ GDMA_SERVICE_TYPE_NONE = 0,
+ GDMA_SERVICE_TYPE_RDMA_SUSPEND = 1,
+ GDMA_SERVICE_TYPE_RDMA_RESUME = 2,
+};
+
+struct mana_service_work {
+ struct work_struct work;
+ struct gdma_dev *gdma_dev;
+ enum gdma_service_type event;
+};
+
struct gdma_resource {
/* Protect the bitmap */
spinlock_t lock;
@@ -151,6 +165,7 @@ struct gdma_general_req {
#define GDMA_MESSAGE_V1 1
#define GDMA_MESSAGE_V2 2
#define GDMA_MESSAGE_V3 3
+#define GDMA_MESSAGE_V4 4
struct gdma_general_resp {
struct gdma_resp_hdr hdr;
@@ -222,9 +237,19 @@ struct gdma_dev {
void *driver_data;
struct auxiliary_device *adev;
+ bool is_suspended;
+ bool rdma_teardown;
};
-#define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
+/* MANA_PAGE_SIZE is the DMA unit */
+#define MANA_PAGE_SHIFT 12
+#define MANA_PAGE_SIZE BIT(MANA_PAGE_SHIFT)
+#define MANA_PAGE_ALIGN(x) ALIGN((x), MANA_PAGE_SIZE)
+#define MANA_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), MANA_PAGE_SIZE)
+#define MANA_PFN(a) ((a) >> MANA_PAGE_SHIFT)
+
+/* Required by HW */
+#define MANA_MIN_QSIZE MANA_PAGE_SIZE
#define GDMA_CQE_SIZE 64
#define GDMA_EQE_SIZE 16
@@ -258,7 +283,8 @@ struct gdma_event {
struct gdma_queue;
struct mana_eq {
- struct gdma_queue *eq;
+ struct gdma_queue *eq;
+ struct dentry *mana_eq_debugfs;
};
typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
@@ -356,6 +382,7 @@ struct gdma_irq_context {
struct gdma_context {
struct device *dev;
+ struct dentry *mana_pci_debugfs;
/* Per-vPort max number of queues */
unsigned int max_num_queues;
@@ -395,9 +422,11 @@ struct gdma_context {
/* Azure RDMA adapter */
struct gdma_dev mana_ib;
-};
-#define MAX_NUM_GDMA_DEVICES 4
+ u64 pf_cap_flags1;
+
+ struct workqueue_struct *service_wq;
+};
static inline bool mana_gd_is_mana(struct gdma_dev *gd)
{
@@ -543,11 +572,18 @@ enum {
*/
#define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
#define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3)
+#define GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB BIT(4)
+#define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT BIT(5)
+
+/* Driver can handle holes (zeros) in the device list */
+#define GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP BIT(11)
#define GDMA_DRV_CAP_FLAGS1 \
(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
- GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG)
+ GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \
+ GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT | \
+ GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP)
#define GDMA_DRV_CAP_FLAGS2 0
@@ -608,11 +644,12 @@ struct gdma_query_max_resources_resp {
}; /* HW DATA */
/* GDMA_LIST_DEVICES */
+#define GDMA_DEV_LIST_SIZE 64
struct gdma_list_devices_resp {
struct gdma_resp_hdr hdr;
u32 num_of_devs;
u32 reserved;
- struct gdma_dev_id devs[64];
+ struct gdma_dev_id devs[GDMA_DEV_LIST_SIZE];
}; /* HW DATA */
/* GDMA_REGISTER_DEVICE */
@@ -690,20 +727,6 @@ struct gdma_query_hwc_timeout_resp {
u32 reserved;
};
-enum atb_page_size {
- ATB_PAGE_SIZE_4K,
- ATB_PAGE_SIZE_8K,
- ATB_PAGE_SIZE_16K,
- ATB_PAGE_SIZE_32K,
- ATB_PAGE_SIZE_64K,
- ATB_PAGE_SIZE_128K,
- ATB_PAGE_SIZE_256K,
- ATB_PAGE_SIZE_512K,
- ATB_PAGE_SIZE_1M,
- ATB_PAGE_SIZE_2M,
- ATB_PAGE_SIZE_MAX,
-};
-
enum gdma_mr_access_flags {
GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0),
GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1),
@@ -762,6 +785,7 @@ struct gdma_destroy_dma_region_req {
enum gdma_pd_flags {
GDMA_PD_FLAG_INVALID = 0,
+ GDMA_PD_FLAG_ALLOW_GPA_MR = 1,
};
struct gdma_create_pd_req {
@@ -787,11 +811,18 @@ struct gdma_destory_pd_resp {
};/* HW DATA */
enum gdma_mr_type {
+ /*
+ * Guest Physical Address - MRs of this type allow access
+ * to any DMA-mapped memory using bus-logical address
+ */
+ GDMA_MR_TYPE_GPA = 1,
/* Guest Virtual Address - MRs of this type allow access
* to memory mapped by PTEs associated with this MR using a virtual
* address that is set up in the MST
*/
GDMA_MR_TYPE_GVA = 2,
+ /* Guest zero-based address MRs */
+ GDMA_MR_TYPE_ZBVA = 4,
};
struct gdma_create_mr_params {
@@ -803,6 +834,10 @@ struct gdma_create_mr_params {
u64 virtual_address;
enum gdma_mr_access_flags access_flags;
} gva;
+ struct {
+ u64 dma_region_handle;
+ enum gdma_mr_access_flags access_flags;
+ } zbva;
};
};
@@ -818,7 +853,10 @@ struct gdma_create_mr_request {
u64 virtual_address;
enum gdma_mr_access_flags access_flags;
} gva;
-
+ struct {
+ u64 dma_region_handle;
+ enum gdma_mr_access_flags access_flags;
+ } zbva;
};
u32 reserved_2;
};/* HW DATA */
@@ -867,5 +905,9 @@ int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
u32 resp_len, void *resp);
int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle);
+void mana_register_debugfs(void);
+void mana_unregister_debugfs(void);
+
+int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event);
#endif /* _GDMA_H */
diff --git a/include/net/mana/hw_channel.h b/include/net/mana/hw_channel.h
index 158b125692c2..83cf93338eb3 100644
--- a/include/net/mana/hw_channel.h
+++ b/include/net/mana/hw_channel.h
@@ -49,6 +49,15 @@ union hwc_init_type_data {
};
}; /* HW DATA */
+union hwc_init_soc_service_type {
+ u32 as_uint32;
+
+ struct {
+ u32 value : 28;
+ u32 type : 4;
+ };
+}; /* HW DATA */
+
struct hwc_rx_oob {
u32 type : 6;
u32 eom : 1;
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index 4eeedf14711b..9abb66461211 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -30,19 +30,32 @@ enum TRI_STATE {
};
/* Number of entries for hardware indirection table must be in power of 2 */
-#define MANA_INDIRECT_TABLE_SIZE 64
-#define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1)
+#define MANA_INDIRECT_TABLE_MAX_SIZE 512
+#define MANA_INDIRECT_TABLE_DEF_SIZE 64
/* The Toeplitz hash key's length in bytes: should be multiple of 8 */
#define MANA_HASH_KEY_SIZE 40
#define COMP_ENTRY_SIZE 64
-#define RX_BUFFERS_PER_QUEUE 512
+/* This Max value for RX buffers is derived from __alloc_page()'s max page
+ * allocation calculation. It allows maximum 2^(MAX_ORDER -1) pages. RX buffer
+ * size beyond this value gets rejected by __alloc_page() call.
+ */
+#define MAX_RX_BUFFERS_PER_QUEUE 8192
+#define DEF_RX_BUFFERS_PER_QUEUE 1024
+#define MIN_RX_BUFFERS_PER_QUEUE 128
+
+/* This max value for TX buffers is derived as the maximum allocatable
+ * pages supported on host per guest through testing. TX buffer size beyond
+ * this value is rejected by the hardware.
+ */
+#define MAX_TX_BUFFERS_PER_QUEUE 16384
+#define DEF_TX_BUFFERS_PER_QUEUE 256
+#define MIN_TX_BUFFERS_PER_QUEUE 128
-#define MAX_SEND_BUFFERS_PER_QUEUE 256
+#define EQ_SIZE (8 * MANA_PAGE_SIZE)
-#define EQ_SIZE (8 * PAGE_SIZE)
#define LOG2_EQ_THROTTLE 3
#define MAX_PORTS_IN_MANA_DEV 256
@@ -97,6 +110,8 @@ struct mana_txq {
atomic_t pending_sends;
+ bool napi_initialized;
+
struct mana_stats_tx stats;
};
@@ -274,6 +289,7 @@ struct mana_cq {
/* NAPI data */
struct napi_struct napi;
int work_done;
+ int work_done_since_doorbell;
int budget;
};
@@ -284,7 +300,7 @@ struct mana_recv_buf_oob {
void *buf_va;
bool from_pool; /* allocated from a page pool */
- /* SGL of the buffer going to be sent has part of the work request. */
+ /* SGL of the buffer going to be sent as part of the work request. */
u32 num_sge;
struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES];
@@ -334,6 +350,7 @@ struct mana_rxq {
int xdp_rc; /* XDP redirect return code */
struct page_pool *page_pool;
+ struct dentry *mana_rx_debugfs;
/* MUST BE THE LAST MEMBER:
* Each receive buffer has an associated mana_recv_buf_oob.
@@ -347,6 +364,8 @@ struct mana_tx_qp {
struct mana_cq tx_cq;
mana_handle_t tx_object;
+
+ struct dentry *mana_tx_debugfs;
};
struct mana_ethtool_stats {
@@ -389,8 +408,10 @@ struct mana_context {
struct gdma_dev *gdma_dev;
u16 num_ports;
+ u8 bm_hostmode;
struct mana_eq *eqs;
+ struct dentry *mana_eqs_debugfs;
struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
};
@@ -410,10 +431,11 @@ struct mana_port_context {
struct mana_tx_qp *tx_qp;
/* Indirection Table for RX & TX. The values are queue indexes */
- u32 indir_table[MANA_INDIRECT_TABLE_SIZE];
+ u32 *indir_table;
+ u32 indir_table_sz;
/* Indirection table containing RxObject Handles */
- mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE];
+ mana_handle_t *rxobj_table;
/* Hash key used by the NIC */
u8 hashkey[MANA_HASH_KEY_SIZE];
@@ -435,6 +457,9 @@ struct mana_port_context {
unsigned int max_queues;
unsigned int num_queues;
+ unsigned int rx_queue_size;
+ unsigned int tx_queue_size;
+
mana_handle_t port_handle;
mana_handle_t pf_filter_handle;
@@ -448,6 +473,9 @@ struct mana_port_context {
bool port_st_save; /* Saved port state */
struct mana_ethtool_stats eth_stats;
+
+ /* Debugfs */
+ struct dentry *mana_port_debugfs;
};
netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
@@ -461,6 +489,9 @@ int mana_detach(struct net_device *ndev, bool from_close);
int mana_probe(struct gdma_dev *gd, bool resuming);
void mana_remove(struct gdma_dev *gd, bool suspending);
+int mana_rdma_probe(struct gdma_dev *gd);
+void mana_rdma_remove(struct gdma_dev *gd);
+
void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
u32 flags);
@@ -470,8 +501,11 @@ struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
void mana_query_gf_stats(struct mana_port_context *apc);
+int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues);
+void mana_pre_dealloc_rxbufs(struct mana_port_context *apc);
extern const struct ethtool_ops mana_ethtool_ops;
+extern struct dentry *mana_debugfs_root;
/* A CQ can be created not associated with any EQ */
#define GDMA_CQ_NO_EQ 0xffff
@@ -527,7 +561,8 @@ struct mana_query_device_cfg_resp {
u64 pf_cap_flags4;
u16 max_num_vports;
- u16 reserved;
+ u8 bm_hostmode; /* response v3: Bare Metal Host Mode */
+ u8 reserved;
u32 max_num_eqs;
/* response v2: */
@@ -670,6 +705,7 @@ struct mana_cfg_rx_steer_req_v2 {
u8 hashkey[MANA_HASH_KEY_SIZE];
u8 cqe_coalescing_enable;
u8 reserved2[7];
+ mana_handle_t indir_tab[] __counted_by(num_indir_entries);
}; /* HW DATA */
struct mana_cfg_rx_steer_resp {
@@ -795,4 +831,8 @@ void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
u32 doorbell_pg_id);
void mana_uncfg_vport(struct mana_port_context *apc);
+
+struct net_device *mana_get_primary_netdev(struct mana_context *ac,
+ u32 port_index,
+ netdevice_tracker *tracker);
#endif /* _MANA_H */
diff --git a/include/net/mctp.h b/include/net/mctp.h
index 7b17c52e8ce2..07d458990113 100644
--- a/include/net/mctp.h
+++ b/include/net/mctp.h
@@ -212,7 +212,7 @@ static inline struct mctp_skb_cb *mctp_cb(struct sk_buff *skb)
BUILD_BUG_ON(sizeof(struct mctp_skb_cb) > sizeof(skb->cb));
WARN_ON(cb->magic != 0x4d435450);
- return (void *)(skb->cb);
+ return cb;
}
/* If CONFIG_MCTP_FLOWS, we may add one of these as a SKB extension,
@@ -295,7 +295,25 @@ void mctp_neigh_remove_dev(struct mctp_dev *mdev);
int mctp_routes_init(void);
void mctp_routes_exit(void);
-void mctp_device_init(void);
+int mctp_device_init(void);
void mctp_device_exit(void);
+/* MCTP IDs and Codes from DMTF specification
+ * "DSP0239 Management Component Transport Protocol (MCTP) IDs and Codes"
+ * https://www.dmtf.org/sites/default/files/standards/documents/DSP0239_1.11.1.pdf
+ */
+enum mctp_phys_binding {
+ MCTP_PHYS_BINDING_UNSPEC = 0x00,
+ MCTP_PHYS_BINDING_SMBUS = 0x01,
+ MCTP_PHYS_BINDING_PCIE_VDM = 0x02,
+ MCTP_PHYS_BINDING_USB = 0x03,
+ MCTP_PHYS_BINDING_KCS = 0x04,
+ MCTP_PHYS_BINDING_SERIAL = 0x05,
+ MCTP_PHYS_BINDING_I3C = 0x06,
+ MCTP_PHYS_BINDING_MMBI = 0x07,
+ MCTP_PHYS_BINDING_PCC = 0x08,
+ MCTP_PHYS_BINDING_UCIE = 0x09,
+ MCTP_PHYS_BINDING_VENDOR = 0xFF,
+};
+
#endif /* __NET_MCTP_H */
diff --git a/include/net/mctpdevice.h b/include/net/mctpdevice.h
index 5c0d04b5c12c..957d9ef924c5 100644
--- a/include/net/mctpdevice.h
+++ b/include/net/mctpdevice.h
@@ -22,6 +22,7 @@ struct mctp_dev {
refcount_t refs;
unsigned int net;
+ enum mctp_phys_binding binding;
const struct mctp_netdev_ops *ops;
@@ -44,7 +45,8 @@ struct mctp_dev *mctp_dev_get_rtnl(const struct net_device *dev);
struct mctp_dev *__mctp_dev_get(const struct net_device *dev);
int mctp_register_netdev(struct net_device *dev,
- const struct mctp_netdev_ops *ops);
+ const struct mctp_netdev_ops *ops,
+ enum mctp_phys_binding binding);
void mctp_unregister_netdev(struct net_device *dev);
void mctp_dev_hold(struct mctp_dev *mdev);
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index fb996124b3d5..f7263fe2a2e4 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -14,6 +14,7 @@
struct mptcp_info;
struct mptcp_sock;
+struct mptcp_pm_addr_entry;
struct seq_file;
/* MPTCP sk_buff extension data */
@@ -97,17 +98,12 @@ struct mptcp_out_options {
};
#define MPTCP_SCHED_NAME_MAX 16
-#define MPTCP_SUBFLOWS_MAX 8
-
-struct mptcp_sched_data {
- bool reinject;
- u8 subflows;
- struct mptcp_subflow_context *contexts[MPTCP_SUBFLOWS_MAX];
-};
+#define MPTCP_SCHED_MAX 128
+#define MPTCP_SCHED_BUF_MAX (MPTCP_SCHED_NAME_MAX * MPTCP_SCHED_MAX)
struct mptcp_sched_ops {
- int (*get_subflow)(struct mptcp_sock *msk,
- struct mptcp_sched_data *data);
+ int (*get_send)(struct mptcp_sock *msk);
+ int (*get_retrans)(struct mptcp_sock *msk);
char name[MPTCP_SCHED_NAME_MAX];
struct module *owner;
@@ -117,6 +113,19 @@ struct mptcp_sched_ops {
void (*release)(struct mptcp_sock *msk);
} ____cacheline_aligned_in_smp;
+#define MPTCP_PM_NAME_MAX 16
+#define MPTCP_PM_MAX 128
+#define MPTCP_PM_BUF_MAX (MPTCP_PM_NAME_MAX * MPTCP_PM_MAX)
+
+struct mptcp_pm_ops {
+ char name[MPTCP_PM_NAME_MAX];
+ struct module *owner;
+ struct list_head list;
+
+ void (*init)(struct mptcp_sock *msk);
+ void (*release)(struct mptcp_sock *msk);
+} ____cacheline_aligned_in_smp;
+
#ifdef CONFIG_MPTCP
void mptcp_init(void);
@@ -220,6 +229,8 @@ static inline __be32 mptcp_reset_option(const struct sk_buff *skb)
return htonl(0u);
}
+
+void mptcp_active_detect_blackhole(struct sock *sk, bool expired);
#else
static inline void mptcp_init(void)
@@ -304,6 +315,8 @@ static inline struct request_sock *mptcp_subflow_reqsk_alloc(const struct reques
}
static inline __be32 mptcp_reset_option(const struct sk_buff *skb) { return htonl(0u); }
+
+static inline void mptcp_active_detect_blackhole(struct sock *sk, bool expired) { }
#endif /* CONFIG_MPTCP */
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 9bbdf6eaa942..3c88d5bc5eed 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -147,11 +147,6 @@ void __ndisc_fill_addr_option(struct sk_buff *skb, int type, const void *data,
* The following hooks can be defined; unless noted otherwise, they are
* optional and can be filled with a null pointer.
*
- * int (*is_useropt)(u8 nd_opt_type):
- * This function is called when IPv6 decide RA userspace options. if
- * this function returns 1 then the option given by nd_opt_type will
- * be handled as userspace option additional to the IPv6 options.
- *
* int (*parse_options)(const struct net_device *dev,
* struct nd_opt_hdr *nd_opt,
* struct ndisc_options *ndopts):
@@ -200,7 +195,6 @@ void __ndisc_fill_addr_option(struct sk_buff *skb, int type, const void *data,
* addresses. E.g. 802.15.4 6LoWPAN.
*/
struct ndisc_ops {
- int (*is_useropt)(u8 nd_opt_type);
int (*parse_options)(const struct net_device *dev,
struct nd_opt_hdr *nd_opt,
struct ndisc_options *ndopts);
@@ -224,15 +218,6 @@ struct ndisc_ops {
};
#if IS_ENABLED(CONFIG_IPV6)
-static inline int ndisc_ops_is_useropt(const struct net_device *dev,
- u8 nd_opt_type)
-{
- if (dev->ndisc_ops && dev->ndisc_ops->is_useropt)
- return dev->ndisc_ops->is_useropt(nd_opt_type);
- else
- return 0;
-}
-
static inline int ndisc_ops_parse_options(const struct net_device *dev,
struct nd_opt_hdr *nd_opt,
struct ndisc_options *ndopts)
@@ -486,7 +471,7 @@ void igmp6_event_report(struct sk_buff *skb);
#ifdef CONFIG_SYSCTL
-int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write,
+int ndisc_ifinfo_sysctl_change(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
#endif
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 0d28172193fa..9a832cab5b1d 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -29,6 +29,7 @@
#include <linux/sysctl.h>
#include <linux/workqueue.h>
#include <net/rtnetlink.h>
+#include <net/neighbour_tables.h>
/*
* NUD stands for "neighbor unreachability detection"
@@ -135,7 +136,8 @@ struct neigh_statistics {
#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
struct neighbour {
- struct neighbour __rcu *next;
+ struct hlist_node hash;
+ struct hlist_node dev_list;
struct neigh_table *tbl;
struct neigh_parms *parms;
unsigned long confirmed;
@@ -190,7 +192,7 @@ struct pneigh_entry {
#define NEIGH_NUM_HASH_RND 4
struct neigh_hash_table {
- struct neighbour __rcu **hash_buckets;
+ struct hlist_head *hash_heads;
unsigned int hash_shift;
__u32 hash_rnd[NEIGH_NUM_HASH_RND];
struct rcu_head rcu;
@@ -236,14 +238,6 @@ struct neigh_table {
struct pneigh_entry **phash_buckets;
};
-enum {
- NEIGH_ARP_TABLE = 0,
- NEIGH_ND_TABLE = 1,
- NEIGH_DN_TABLE = 2,
- NEIGH_NR_TABLES,
- NEIGH_LINK_TABLE = NEIGH_NR_TABLES /* Pseudo table for neigh_xmit */
-};
-
static inline int neigh_parms_family(struct neigh_parms *p)
{
return p->tbl->family;
@@ -276,6 +270,12 @@ static inline void *neighbour_priv(const struct neighbour *n)
extern const struct nla_policy nda_policy[];
+#define neigh_for_each_in_bucket(pos, head) hlist_for_each_entry(pos, head, hash)
+#define neigh_for_each_in_bucket_rcu(pos, head) \
+ hlist_for_each_entry_rcu(pos, head, hash)
+#define neigh_for_each_in_bucket_safe(pos, tmp, head) \
+ hlist_for_each_entry_safe(pos, tmp, head, hash)
+
static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey)
{
return *(const u32 *)n->primary_key == *(const u32 *)pkey;
@@ -304,12 +304,9 @@ static inline struct neighbour *___neigh_lookup_noref(
u32 hash_val;
hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
- for (n = rcu_dereference(nht->hash_buckets[hash_val]);
- n != NULL;
- n = rcu_dereference(n->next)) {
+ neigh_for_each_in_bucket_rcu(n, &nht->hash_heads[hash_val])
if (n->dev == dev && key_eq(n, pkey))
return n;
- }
return NULL;
}
@@ -350,7 +347,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags,
u32 nlmsg_pid);
void __neigh_set_probe_once(struct neighbour *neigh);
-bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl);
+bool neigh_remove_one(struct neighbour *ndel);
void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev);
@@ -412,12 +409,12 @@ void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *,
void *neigh_seq_next(struct seq_file *, void *, loff_t *);
void neigh_seq_stop(struct seq_file *, void *);
-int neigh_proc_dointvec(struct ctl_table *ctl, int write,
+int neigh_proc_dointvec(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
-int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
+int neigh_proc_dointvec_jiffies(const struct ctl_table *ctl, int write,
void *buffer,
size_t *lenp, loff_t *ppos);
-int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
+int neigh_proc_dointvec_ms_jiffies(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
diff --git a/include/net/neighbour_tables.h b/include/net/neighbour_tables.h
new file mode 100644
index 000000000000..bcffbe8f7601
--- /dev/null
+++ b/include/net/neighbour_tables.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_NEIGHBOUR_TABLES_H
+#define _NET_NEIGHBOUR_TABLES_H
+
+enum {
+ NEIGH_ARP_TABLE = 0,
+ NEIGH_ND_TABLE = 1,
+ NEIGH_NR_TABLES,
+ NEIGH_LINK_TABLE = NEIGH_NR_TABLES /* Pseudo table for neigh_xmit */
+};
+
+#endif
diff --git a/include/net/net_debug.h b/include/net/net_debug.h
index 1e74684cbbdb..47f7a4a878b9 100644
--- a/include/net/net_debug.h
+++ b/include/net/net_debug.h
@@ -27,7 +27,7 @@ void netdev_info(const struct net_device *dev, const char *format, ...);
#define netdev_level_once(level, dev, fmt, ...) \
do { \
- static bool __section(".data.once") __print_once; \
+ static bool __section(".data..once") __print_once; \
\
if (!__print_once) { \
__print_once = true; \
@@ -149,9 +149,11 @@ do { \
#if defined(CONFIG_DEBUG_NET)
-#define DEBUG_NET_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
+#define DEBUG_NET_WARN_ON_ONCE(cond) ((void)WARN_ON_ONCE(cond))
+#define DEBUG_NET_WARN_ONCE(cond, format...) ((void)WARN_ONCE(cond, format))
#else
#define DEBUG_NET_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
+#define DEBUG_NET_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
#endif
#endif /* _LINUX_NET_DEBUG_H */
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 20c34bd7a077..025a7574b275 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -80,8 +80,12 @@ struct net {
* or to unregister pernet ops
* (pernet_ops_rwsem write locked).
*/
+ struct llist_node defer_free_list;
struct llist_node cleanup_list; /* namespaces on death row */
+ struct list_head ptype_all;
+ struct list_head ptype_specific;
+
#ifdef CONFIG_KEYS
struct key_tag *key_domain; /* Key domain of operation tag */
#endif
@@ -188,6 +192,10 @@ struct net {
#if IS_ENABLED(CONFIG_SMC)
struct netns_smc smc;
#endif
+#ifdef CONFIG_DEBUG_NET_SMALL_RTNL
+ /* Move to a better place when the config guard is removed. */
+ struct mutex rtnl_mutex;
+#endif
} __randomize_layout;
#include <linux/seq_file_net.h>
@@ -205,6 +213,8 @@ void net_ns_barrier(void);
struct ns_common *get_net_ns(struct ns_common *ns);
struct net *get_net_ns_by_fd(int fd);
+extern struct task_struct *cleanup_net_task;
+
#else /* CONFIG_NET_NS */
#include <linux/sched.h>
#include <linux/nsproxy.h>
@@ -290,6 +300,7 @@ static inline int check_net(const struct net *net)
}
void net_drop_ns(void *);
+void net_passive_dec(struct net *net);
#else
@@ -319,8 +330,23 @@ static inline int check_net(const struct net *net)
}
#define net_drop_ns NULL
+
+static inline void net_passive_dec(struct net *net)
+{
+ refcount_dec(&net->passive);
+}
#endif
+static inline void net_passive_inc(struct net *net)
+{
+ refcount_inc(&net->passive);
+}
+
+/* Returns true if the netns initialization is completed successfully */
+static inline bool net_initialized(const struct net *net)
+{
+ return READ_ONCE(net->list.next);
+}
static inline void __netns_tracker_alloc(struct net *net,
netns_tracker *tracker,
@@ -386,7 +412,7 @@ static inline struct net *read_pnet(const possible_net_t *pnet)
#endif
}
-static inline struct net *read_pnet_rcu(possible_net_t *pnet)
+static inline struct net *read_pnet_rcu(const possible_net_t *pnet)
{
#ifdef CONFIG_NET_NS
return rcu_dereference(pnet->net);
@@ -449,10 +475,10 @@ struct pernet_operations {
void (*exit)(struct net *net);
void (*exit_batch)(struct list_head *net_exit_list);
/* Following method is called with RTNL held. */
- void (*exit_batch_rtnl)(struct list_head *net_exit_list,
- struct list_head *dev_kill_list);
- unsigned int *id;
- size_t size;
+ void (*exit_rtnl)(struct net *net,
+ struct list_head *dev_kill_list);
+ unsigned int * const id;
+ const size_t size;
};
/*
diff --git a/include/net/net_shaper.h b/include/net/net_shaper.h
new file mode 100644
index 000000000000..5c3f49b52fe9
--- /dev/null
+++ b/include/net/net_shaper.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _NET_SHAPER_H_
+#define _NET_SHAPER_H_
+
+#include <linux/types.h>
+
+#include <uapi/linux/net_shaper.h>
+
+struct net_device;
+struct devlink;
+struct netlink_ext_ack;
+
+enum net_shaper_binding_type {
+ NET_SHAPER_BINDING_TYPE_NETDEV,
+ /* NET_SHAPER_BINDING_TYPE_DEVLINK_PORT */
+};
+
+struct net_shaper_binding {
+ enum net_shaper_binding_type type;
+ union {
+ struct net_device *netdev;
+ struct devlink *devlink;
+ };
+};
+
+struct net_shaper_handle {
+ enum net_shaper_scope scope;
+ u32 id;
+};
+
+/**
+ * struct net_shaper - represents a shaping node on the NIC H/W
+ * zeroed field are considered not set.
+ * @parent: Unique identifier for the shaper parent, usually implied
+ * @handle: Unique identifier for this shaper
+ * @metric: Specify if the rate limits refers to PPS or BPS
+ * @bw_min: Minimum guaranteed rate for this shaper
+ * @bw_max: Maximum peak rate allowed for this shaper
+ * @burst: Maximum burst for the peek rate of this shaper
+ * @priority: Scheduling priority for this shaper
+ * @weight: Scheduling weight for this shaper
+ */
+struct net_shaper {
+ struct net_shaper_handle parent;
+ struct net_shaper_handle handle;
+ enum net_shaper_metric metric;
+ u64 bw_min;
+ u64 bw_max;
+ u64 burst;
+ u32 priority;
+ u32 weight;
+
+ /* private: */
+ u32 leaves; /* accounted only for NODE scope */
+ struct rcu_head rcu;
+};
+
+/**
+ * struct net_shaper_ops - Operations on device H/W shapers
+ *
+ * The operations applies to either net_device and devlink objects.
+ * The initial shaping configuration at device initialization is empty:
+ * does not constraint the rate in any way.
+ * The network core keeps track of the applied user-configuration in
+ * the net_device or devlink structure.
+ * The operations are serialized via a per device lock.
+ *
+ * Device not supporting any kind of nesting should not provide the
+ * group operation.
+ *
+ * Each shaper is uniquely identified within the device with a 'handle'
+ * comprising the shaper scope and a scope-specific id.
+ */
+struct net_shaper_ops {
+ /**
+ * @group: create the specified shapers scheduling group
+ *
+ * Nest the @leaves shapers identified under the * @node shaper.
+ * All the shapers belong to the device specified by @binding.
+ * The @leaves arrays size is specified by @leaves_count.
+ * Create either the @leaves and the @node shaper; or if they already
+ * exists, links them together in the desired way.
+ * @leaves scope must be NET_SHAPER_SCOPE_QUEUE.
+ */
+ int (*group)(struct net_shaper_binding *binding, int leaves_count,
+ const struct net_shaper *leaves,
+ const struct net_shaper *node,
+ struct netlink_ext_ack *extack);
+
+ /**
+ * @set: Updates the specified shaper
+ *
+ * Updates or creates the @shaper on the device specified by @binding.
+ */
+ int (*set)(struct net_shaper_binding *binding,
+ const struct net_shaper *shaper,
+ struct netlink_ext_ack *extack);
+
+ /**
+ * @delete: Removes the specified shaper
+ *
+ * Removes the shaper configuration as identified by the given @handle
+ * on the device specified by @binding, restoring the default behavior.
+ */
+ int (*delete)(struct net_shaper_binding *binding,
+ const struct net_shaper_handle *handle,
+ struct netlink_ext_ack *extack);
+
+ /**
+ * @capabilities: get the shaper features supported by the device
+ *
+ * Fills the bitmask @cap with the supported capabilities for the
+ * specified @scope and device specified by @binding.
+ */
+ void (*capabilities)(struct net_shaper_binding *binding,
+ enum net_shaper_scope scope, unsigned long *cap);
+};
+
+#endif
diff --git a/include/net/netdev_lock.h b/include/net/netdev_lock.h
new file mode 100644
index 000000000000..3d3aef80beac
--- /dev/null
+++ b/include/net/netdev_lock.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _NET_NETDEV_LOCK_H
+#define _NET_NETDEV_LOCK_H
+
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+
+static inline bool netdev_trylock(struct net_device *dev)
+{
+ return mutex_trylock(&dev->lock);
+}
+
+static inline void netdev_assert_locked(const struct net_device *dev)
+{
+ lockdep_assert_held(&dev->lock);
+}
+
+static inline void
+netdev_assert_locked_or_invisible(const struct net_device *dev)
+{
+ if (dev->reg_state == NETREG_REGISTERED ||
+ dev->reg_state == NETREG_UNREGISTERING)
+ netdev_assert_locked(dev);
+}
+
+static inline bool netdev_need_ops_lock(const struct net_device *dev)
+{
+ bool ret = dev->request_ops_lock || !!dev->queue_mgmt_ops;
+
+#if IS_ENABLED(CONFIG_NET_SHAPER)
+ ret |= !!dev->netdev_ops->net_shaper_ops;
+#endif
+
+ return ret;
+}
+
+static inline void netdev_lock_ops(struct net_device *dev)
+{
+ if (netdev_need_ops_lock(dev))
+ netdev_lock(dev);
+}
+
+static inline void netdev_unlock_ops(struct net_device *dev)
+{
+ if (netdev_need_ops_lock(dev))
+ netdev_unlock(dev);
+}
+
+static inline void netdev_lock_ops_to_full(struct net_device *dev)
+{
+ if (netdev_need_ops_lock(dev))
+ netdev_assert_locked(dev);
+ else
+ netdev_lock(dev);
+}
+
+static inline void netdev_unlock_full_to_ops(struct net_device *dev)
+{
+ if (netdev_need_ops_lock(dev))
+ netdev_assert_locked(dev);
+ else
+ netdev_unlock(dev);
+}
+
+static inline void netdev_ops_assert_locked(const struct net_device *dev)
+{
+ if (netdev_need_ops_lock(dev))
+ lockdep_assert_held(&dev->lock);
+ else
+ ASSERT_RTNL();
+}
+
+static inline void
+netdev_ops_assert_locked_or_invisible(const struct net_device *dev)
+{
+ if (dev->reg_state == NETREG_REGISTERED ||
+ dev->reg_state == NETREG_UNREGISTERING)
+ netdev_ops_assert_locked(dev);
+}
+
+static inline void netdev_lock_ops_compat(struct net_device *dev)
+{
+ if (netdev_need_ops_lock(dev))
+ netdev_lock(dev);
+ else
+ rtnl_lock();
+}
+
+static inline void netdev_unlock_ops_compat(struct net_device *dev)
+{
+ if (netdev_need_ops_lock(dev))
+ netdev_unlock(dev);
+ else
+ rtnl_unlock();
+}
+
+static inline int netdev_lock_cmp_fn(const struct lockdep_map *a,
+ const struct lockdep_map *b)
+{
+ if (a == b)
+ return 0;
+
+ /* Allow locking multiple devices only under rtnl_lock,
+ * the exact order doesn't matter.
+ * Note that upper devices don't lock their ops, so nesting
+ * mostly happens in batched device removal for now.
+ */
+ return lockdep_rtnl_is_held() ? -1 : 1;
+}
+
+#define netdev_lockdep_set_classes(dev) \
+{ \
+ static struct lock_class_key qdisc_tx_busylock_key; \
+ static struct lock_class_key qdisc_xmit_lock_key; \
+ static struct lock_class_key dev_addr_list_lock_key; \
+ static struct lock_class_key dev_instance_lock_key; \
+ unsigned int i; \
+ \
+ (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
+ lockdep_set_class(&(dev)->addr_list_lock, \
+ &dev_addr_list_lock_key); \
+ lockdep_set_class(&(dev)->lock, \
+ &dev_instance_lock_key); \
+ lock_set_cmp_fn(&dev->lock, netdev_lock_cmp_fn, NULL); \
+ for (i = 0; i < (dev)->num_tx_queues; i++) \
+ lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
+ &qdisc_xmit_lock_key); \
+}
+
+#define netdev_lock_dereference(p, dev) \
+ rcu_dereference_protected(p, lockdep_is_held(&(dev)->lock))
+
+int netdev_debug_event(struct notifier_block *nb, unsigned long event,
+ void *ptr);
+
+#endif
diff --git a/include/net/netdev_netlink.h b/include/net/netdev_netlink.h
new file mode 100644
index 000000000000..075962dbe743
--- /dev/null
+++ b/include/net/netdev_netlink.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_NETDEV_NETLINK_H
+#define __NET_NETDEV_NETLINK_H
+
+#include <linux/list.h>
+
+struct netdev_nl_sock {
+ struct mutex lock;
+ struct list_head bindings;
+};
+
+#endif /* __NET_NETDEV_NETLINK_H */
diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h
index 1ec408585373..ba2eaf39089b 100644
--- a/include/net/netdev_queues.h
+++ b/include/net/netdev_queues.h
@@ -4,16 +4,57 @@
#include <linux/netdevice.h>
+/**
+ * struct netdev_config - queue-related configuration for a netdev
+ * @hds_thresh: HDS Threshold value.
+ * @hds_config: HDS value from userspace.
+ */
+struct netdev_config {
+ u32 hds_thresh;
+ u8 hds_config;
+};
+
/* See the netdev.yaml spec for definition of each statistic */
struct netdev_queue_stats_rx {
u64 bytes;
u64 packets;
u64 alloc_fail;
+
+ u64 hw_drops;
+ u64 hw_drop_overruns;
+
+ u64 csum_complete;
+ u64 csum_unnecessary;
+ u64 csum_none;
+ u64 csum_bad;
+
+ u64 hw_gro_packets;
+ u64 hw_gro_bytes;
+ u64 hw_gro_wire_packets;
+ u64 hw_gro_wire_bytes;
+
+ u64 hw_drop_ratelimits;
};
struct netdev_queue_stats_tx {
u64 bytes;
u64 packets;
+
+ u64 hw_drops;
+ u64 hw_drop_errors;
+
+ u64 csum_none;
+ u64 needs_csum;
+
+ u64 hw_gso_packets;
+ u64 hw_gso_bytes;
+ u64 hw_gso_wire_packets;
+ u64 hw_gso_wire_bytes;
+
+ u64 hw_drop_ratelimits;
+
+ u64 stop;
+ u64 wake;
};
/**
@@ -32,6 +73,8 @@ struct netdev_queue_stats_tx {
* statistics will not generally add up to the total number of events for
* the device. The @get_base_stats callback allows filling in the delta
* between events for currently live queues and overall device history.
+ * @get_base_stats can also be used to report any miscellaneous packets
+ * transferred outside of the main set of queues used by the networking stack.
* When the statistics for the entire device are queried, first @get_base_stats
* is issued to collect the delta, and then a series of per-queue callbacks.
* Only statistics which are set in @get_base_stats will be reported
@@ -42,9 +85,11 @@ struct netdev_queue_stats_tx {
* for some of the events is not maintained, and reliable "total" cannot
* be provided).
*
+ * Ops are called under the instance lock if netdev_need_ops_lock()
+ * returns true, otherwise under rtnl_lock.
* Device drivers can assume that when collecting total device stats,
* the @get_base_stats and subsequent per-queue calls are performed
- * "atomically" (without releasing the rtnl_lock).
+ * "atomically" (without releasing the relevant lock).
*
* Device drivers are encouraged to reset the per-queue statistics when
* number of queues change. This is because the primary use case for
@@ -60,6 +105,47 @@ struct netdev_stat_ops {
struct netdev_queue_stats_tx *tx);
};
+void netdev_stat_queue_sum(struct net_device *netdev,
+ int rx_start, int rx_end,
+ struct netdev_queue_stats_rx *rx_sum,
+ int tx_start, int tx_end,
+ struct netdev_queue_stats_tx *tx_sum);
+
+/**
+ * struct netdev_queue_mgmt_ops - netdev ops for queue management
+ *
+ * @ndo_queue_mem_size: Size of the struct that describes a queue's memory.
+ *
+ * @ndo_queue_mem_alloc: Allocate memory for an RX queue at the specified index.
+ * The new memory is written at the specified address.
+ *
+ * @ndo_queue_mem_free: Free memory from an RX queue.
+ *
+ * @ndo_queue_start: Start an RX queue with the specified memory and at the
+ * specified index.
+ *
+ * @ndo_queue_stop: Stop the RX queue at the specified index. The stopped
+ * queue's memory is written at the specified address.
+ *
+ * Note that @ndo_queue_mem_alloc and @ndo_queue_mem_free may be called while
+ * the interface is closed. @ndo_queue_start and @ndo_queue_stop will only
+ * be called for an interface which is open.
+ */
+struct netdev_queue_mgmt_ops {
+ size_t ndo_queue_mem_size;
+ int (*ndo_queue_mem_alloc)(struct net_device *dev,
+ void *per_queue_mem,
+ int idx);
+ void (*ndo_queue_mem_free)(struct net_device *dev,
+ void *per_queue_mem);
+ int (*ndo_queue_start)(struct net_device *dev,
+ void *per_queue_mem,
+ int idx);
+ int (*ndo_queue_stop)(struct net_device *dev,
+ void *per_queue_mem,
+ int idx);
+};
+
/**
* DOC: Lockless queue stopping / waking helpers.
*
@@ -202,27 +288,27 @@ netdev_txq_completed_mb(struct netdev_queue *dev_queue,
#define netif_subqueue_try_stop(dev, idx, get_desc, start_thrs) \
({ \
- struct netdev_queue *txq; \
+ struct netdev_queue *_txq; \
\
- txq = netdev_get_tx_queue(dev, idx); \
- netif_txq_try_stop(txq, get_desc, start_thrs); \
+ _txq = netdev_get_tx_queue(dev, idx); \
+ netif_txq_try_stop(_txq, get_desc, start_thrs); \
})
#define netif_subqueue_maybe_stop(dev, idx, get_desc, stop_thrs, start_thrs) \
({ \
- struct netdev_queue *txq; \
+ struct netdev_queue *_txq; \
\
- txq = netdev_get_tx_queue(dev, idx); \
- netif_txq_maybe_stop(txq, get_desc, stop_thrs, start_thrs); \
+ _txq = netdev_get_tx_queue(dev, idx); \
+ netif_txq_maybe_stop(_txq, get_desc, stop_thrs, start_thrs); \
})
#define netif_subqueue_completed_wake(dev, idx, pkts, bytes, \
get_desc, start_thrs) \
({ \
- struct netdev_queue *txq; \
+ struct netdev_queue *_txq; \
\
- txq = netdev_get_tx_queue(dev, idx); \
- netif_txq_completed_wake(txq, pkts, bytes, \
+ _txq = netdev_get_tx_queue(dev, idx); \
+ netif_txq_completed_wake(_txq, pkts, bytes, \
get_desc, start_thrs); \
})
diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h
index aa1716fb0e53..8cdcd138b33f 100644
--- a/include/net/netdev_rx_queue.h
+++ b/include/net/netdev_rx_queue.h
@@ -6,6 +6,7 @@
#include <linux/netdevice.h>
#include <linux/sysfs.h>
#include <net/xdp.h>
+#include <net/page_pool/types.h>
/* This structure contains an instance of an RX queue. */
struct netdev_rx_queue {
@@ -15,16 +16,18 @@ struct netdev_rx_queue {
struct rps_dev_flow_table __rcu *rps_flow_table;
#endif
struct kobject kobj;
+ const struct attribute_group **groups;
struct net_device *dev;
netdevice_tracker dev_tracker;
+ /* All fields below are "ops protected",
+ * see comment about net_device::lock
+ */
#ifdef CONFIG_XDP_SOCKETS
struct xsk_buff_pool *pool;
#endif
- /* NAPI instance for the queue
- * Readers and writers must hold RTNL
- */
struct napi_struct *napi;
+ struct pp_memory_provider_params mp_params;
} ____cacheline_aligned_in_smp;
/*
@@ -43,7 +46,6 @@ __netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
return dev->_rx + rxq;
}
-#ifdef CONFIG_SYSFS
static inline unsigned int
get_netdev_rx_queue_index(struct netdev_rx_queue *queue)
{
@@ -53,5 +55,7 @@ get_netdev_rx_queue_index(struct netdev_rx_queue *queue)
BUG_ON(index >= dev->num_rx_queues);
return index;
}
-#endif
+
+int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq);
+
#endif
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index cba3ccf03fcc..3f02a45773e8 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -204,8 +204,7 @@ bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
struct nf_conntrack_tuple *tuple);
void __nf_ct_refresh_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
- const struct sk_buff *skb,
- u32 extra_jiffies, bool do_acct);
+ u32 extra_jiffies, unsigned int bytes);
/* Refresh conntrack for this many jiffies and do accounting */
static inline void nf_ct_refresh_acct(struct nf_conn *ct,
@@ -213,15 +212,14 @@ static inline void nf_ct_refresh_acct(struct nf_conn *ct,
const struct sk_buff *skb,
u32 extra_jiffies)
{
- __nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies, true);
+ __nf_ct_refresh_acct(ct, ctinfo, extra_jiffies, skb->len);
}
/* Refresh conntrack for this many jiffies */
static inline void nf_ct_refresh(struct nf_conn *ct,
- const struct sk_buff *skb,
u32 extra_jiffies)
{
- __nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, false);
+ __nf_ct_refresh_acct(ct, 0, extra_jiffies, 0);
}
/* kill conntrack and do accounting */
@@ -314,16 +312,6 @@ static inline bool nf_ct_should_gc(const struct nf_conn *ct)
#define NF_CT_DAY (86400 * HZ)
-/* Set an arbitrary timeout large enough not to ever expire, this save
- * us a check for the IPS_OFFLOAD_BIT from the packet path via
- * nf_ct_is_expired().
- */
-static inline void nf_ct_offload_timeout(struct nf_conn *ct)
-{
- if (nf_ct_expires(ct) < NF_CT_DAY / 2)
- WRITE_ONCE(ct->timeout, nfct_time_stamp + NF_CT_DAY);
-}
-
struct kernel_param;
int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp);
diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
index e227d997fc71..1b58b5b91ff6 100644
--- a/include/net/netfilter/nf_conntrack_count.h
+++ b/include/net/netfilter/nf_conntrack_count.h
@@ -15,10 +15,8 @@ struct nf_conncount_list {
unsigned int count; /* length of list */
};
-struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
- unsigned int keylen);
-void nf_conncount_destroy(struct net *net, unsigned int family,
- struct nf_conncount_data *data);
+struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int keylen);
+void nf_conncount_destroy(struct net *net, struct nf_conncount_data *data);
unsigned int nf_conncount_count(struct net *net,
struct nf_conncount_data *data,
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 0c1dac318e02..8dcf7c371ee9 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -12,6 +12,7 @@
#include <linux/netfilter/nf_conntrack_common.h>
#include <linux/netfilter/nf_conntrack_tuple_common.h>
#include <net/netfilter/nf_conntrack_extend.h>
+#include <asm/local64.h>
enum nf_ct_ecache_state {
NFCT_ECACHE_DESTROY_FAIL, /* tried but failed to send destroy event */
@@ -20,6 +21,9 @@ enum nf_ct_ecache_state {
struct nf_conntrack_ecache {
unsigned long cache; /* bitops want long */
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ local64_t timestamp; /* event timestamp, in nanoseconds */
+#endif
u16 ctmask; /* bitmask of ct events to be delivered */
u16 expmask; /* bitmask of expect events to be delivered */
u32 missed; /* missed events */
@@ -108,6 +112,14 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
if (e == NULL)
return;
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ /* renew only if this is the first cached event, so that the
+ * timestamp reflects the first, not the last, generated event.
+ */
+ if (local64_read(&e->timestamp) && READ_ONCE(e->cache) == 0)
+ local64_set(&e->timestamp, ktime_get_real_ns());
+#endif
+
set_bit(event, &e->cache);
#endif
}
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 9abb7ee40d72..d711642e78b5 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -163,6 +163,7 @@ struct flow_offload_tuple_rhash {
enum nf_flow_flags {
NF_FLOW_SNAT,
NF_FLOW_DNAT,
+ NF_FLOW_CLOSING,
NF_FLOW_TEARDOWN,
NF_FLOW_HW,
NF_FLOW_HW_DYING,
@@ -305,11 +306,26 @@ struct flow_ports {
__be16 source, dest;
};
+struct nf_flowtable *nf_flowtable_by_dev(const struct net_device *dev);
+int nf_flow_offload_xdp_setup(struct nf_flowtable *flowtable,
+ struct net_device *dev,
+ enum flow_block_command cmd);
+
unsigned int nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state);
unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state);
+#if (IS_BUILTIN(CONFIG_NF_FLOW_TABLE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \
+ (IS_MODULE(CONFIG_NF_FLOW_TABLE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
+extern int nf_flow_register_bpf(void);
+#else
+static inline int nf_flow_register_bpf(void)
+{
+ return 0;
+}
+#endif
+
#define MODULE_ALIAS_NF_FLOWTABLE(family) \
MODULE_ALIAS("nf-flowtable-" __stringify(family))
diff --git a/include/net/netfilter/nf_hooks_lwtunnel.h b/include/net/netfilter/nf_hooks_lwtunnel.h
index 52e27920f829..cef7a4eb8f97 100644
--- a/include/net/netfilter/nf_hooks_lwtunnel.h
+++ b/include/net/netfilter/nf_hooks_lwtunnel.h
@@ -2,6 +2,6 @@
#include <linux/types.h>
#ifdef CONFIG_SYSCTL
-int nf_hooks_lwtunnel_sysctl_handler(struct ctl_table *table, int write,
+int nf_hooks_lwtunnel_sysctl_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
#endif
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 3f1ed467f951..e4d8e451e935 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -2,7 +2,7 @@
#ifndef _NET_NF_TABLES_H
#define _NET_NF_TABLES_H
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/list.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nfnetlink.h>
@@ -209,6 +209,7 @@ static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
* @family: protocol family
* @level: depth of the chains
* @report: notify via unicast netlink message
+ * @reg_inited: bitmap of initialised registers
*/
struct nft_ctx {
struct net *net;
@@ -221,6 +222,7 @@ struct nft_ctx {
u8 family;
u8 level;
bool report;
+ DECLARE_BITMAP(reg_inited, NFT_REG32_NUM);
};
enum nft_data_desc_flags {
@@ -254,7 +256,8 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest);
int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg);
-int nft_parse_register_load(const struct nlattr *attr, u8 *sreg, u32 len);
+int nft_parse_register_load(const struct nft_ctx *ctx,
+ const struct nlattr *attr, u8 *sreg, u32 len);
int nft_parse_register_store(const struct nft_ctx *ctx,
const struct nlattr *attr, u8 *dreg,
const struct nft_data *data,
@@ -311,6 +314,7 @@ static inline void *nft_elem_priv_cast(const struct nft_elem_priv *priv)
/**
* enum nft_iter_type - nftables set iterator type
*
+ * @NFT_ITER_UNSPEC: unspecified, to catch errors
* @NFT_ITER_READ: read-only iteration over set elements
* @NFT_ITER_UPDATE: iteration under mutex to update set element state
*/
@@ -416,7 +420,7 @@ struct nft_expr_info;
int nft_expr_inner_parse(const struct nft_ctx *ctx, const struct nlattr *nla,
struct nft_expr_info *info);
-int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src);
+int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src, gfp_t gfp);
void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr);
int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
const struct nft_expr *expr, bool reset);
@@ -438,6 +442,9 @@ struct nft_set_ext;
* @remove: remove element from set
* @walk: iterate over all set elements
* @get: get set elements
+ * @ksize: kernel set size
+ * @usize: userspace set size
+ * @adjust_maxsize: delta to adjust maximum set size
* @commit: commit set elements
* @abort: abort set elements
* @privsize: function to return size of set private data
@@ -491,6 +498,9 @@ struct nft_set_ops {
const struct nft_set *set,
const struct nft_set_elem *elem,
unsigned int flags);
+ u32 (*ksize)(u32 size);
+ u32 (*usize)(u32 size);
+ u32 (*adjust_maxsize)(const struct nft_set *set);
void (*commit)(struct nft_set *set);
void (*abort)(const struct nft_set *set);
u64 (*privsize)(const struct nlattr * const nla[],
@@ -619,6 +629,11 @@ static inline void *nft_set_priv(const struct nft_set *set)
return (void *)set->data;
}
+static inline enum nft_data_types nft_set_datatype(const struct nft_set *set)
+{
+ return set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE;
+}
+
static inline bool nft_set_gc_is_pending(const struct nft_set *s)
{
return refcount_read(&s->refs) != 1;
@@ -678,9 +693,8 @@ void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set);
* @NFT_SET_EXT_DATA: mapping data
* @NFT_SET_EXT_FLAGS: element flags
* @NFT_SET_EXT_TIMEOUT: element timeout
- * @NFT_SET_EXT_EXPIRATION: element expiration time
* @NFT_SET_EXT_USERDATA: user data associated with the element
- * @NFT_SET_EXT_EXPRESSIONS: expressions assiciated with the element
+ * @NFT_SET_EXT_EXPRESSIONS: expressions associated with the element
* @NFT_SET_EXT_OBJREF: stateful object reference associated with element
* @NFT_SET_EXT_NUM: number of extension types
*/
@@ -690,7 +704,6 @@ enum nft_set_extensions {
NFT_SET_EXT_DATA,
NFT_SET_EXT_FLAGS,
NFT_SET_EXT_TIMEOUT,
- NFT_SET_EXT_EXPIRATION,
NFT_SET_EXT_USERDATA,
NFT_SET_EXT_EXPRESSIONS,
NFT_SET_EXT_OBJREF,
@@ -726,15 +739,18 @@ struct nft_set_ext_tmpl {
/**
* struct nft_set_ext - set extensions
*
- * @genmask: generation mask
+ * @genmask: generation mask, but also flags (see NFT_SET_ELEM_DEAD_BIT)
* @offset: offsets of individual extension types
* @data: beginning of extension data
+ *
+ * This structure must be aligned to word size, otherwise atomic bitops
+ * on genmask field can cause alignment failure on some archs.
*/
struct nft_set_ext {
u8 genmask;
u8 offset[NFT_SET_EXT_NUM];
char data[];
-};
+} __aligned(BITS_PER_LONG / 8);
static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl)
{
@@ -802,14 +818,14 @@ static inline u8 *nft_set_ext_flags(const struct nft_set_ext *ext)
return nft_set_ext(ext, NFT_SET_EXT_FLAGS);
}
-static inline u64 *nft_set_ext_timeout(const struct nft_set_ext *ext)
-{
- return nft_set_ext(ext, NFT_SET_EXT_TIMEOUT);
-}
+struct nft_timeout {
+ u64 timeout;
+ u64 expiration;
+};
-static inline u64 *nft_set_ext_expiration(const struct nft_set_ext *ext)
+static inline struct nft_timeout *nft_set_ext_timeout(const struct nft_set_ext *ext)
{
- return nft_set_ext(ext, NFT_SET_EXT_EXPIRATION);
+ return nft_set_ext(ext, NFT_SET_EXT_TIMEOUT);
}
static inline struct nft_userdata *nft_set_ext_userdata(const struct nft_set_ext *ext)
@@ -825,8 +841,11 @@ static inline struct nft_set_elem_expr *nft_set_ext_expr(const struct nft_set_ex
static inline bool __nft_set_elem_expired(const struct nft_set_ext *ext,
u64 tstamp)
{
- return nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION) &&
- time_after_eq64(tstamp, *nft_set_ext_expiration(ext));
+ if (!nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) ||
+ READ_ONCE(nft_set_ext_timeout(ext)->timeout) == 0)
+ return false;
+
+ return time_after_eq64(tstamp, READ_ONCE(nft_set_ext_timeout(ext)->expiration));
}
static inline bool nft_set_elem_expired(const struct nft_set_ext *ext)
@@ -935,7 +954,7 @@ struct nft_expr_ops {
struct nft_regs *regs,
const struct nft_pktinfo *pkt);
int (*clone)(struct nft_expr *dst,
- const struct nft_expr *src);
+ const struct nft_expr *src, gfp_t gfp);
unsigned int size;
int (*init)(const struct nft_ctx *ctx,
@@ -954,8 +973,7 @@ struct nft_expr_ops {
const struct nft_expr *expr,
bool reset);
int (*validate)(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data);
+ const struct nft_expr *expr);
bool (*reduce)(struct nft_regs_track *track,
const struct nft_expr *expr);
bool (*gc)(struct net *net,
@@ -1124,6 +1142,11 @@ int nft_set_catchall_validate(const struct nft_ctx *ctx, struct nft_set *set);
int nf_tables_bind_chain(const struct nft_ctx *ctx, struct nft_chain *chain);
void nf_tables_unbind_chain(const struct nft_ctx *ctx, struct nft_chain *chain);
+struct nft_hook;
+void nf_tables_chain_device_notify(const struct nft_chain *chain,
+ const struct nft_hook *hook,
+ const struct net_device *dev, int event);
+
enum nft_chain_types {
NFT_CHAIN_T_DEFAULT = 0,
NFT_CHAIN_T_ROUTE,
@@ -1171,7 +1194,7 @@ static inline bool nft_chain_is_bound(struct nft_chain *chain)
int nft_chain_add(struct nft_table *table, struct nft_chain *chain);
void nft_chain_del(struct nft_chain *chain);
-void nf_tables_chain_destroy(struct nft_ctx *ctx);
+void nf_tables_chain_destroy(struct nft_chain *chain);
struct nft_stats {
u64 bytes;
@@ -1181,10 +1204,17 @@ struct nft_stats {
struct nft_hook {
struct list_head list;
- struct nf_hook_ops ops;
+ struct list_head ops_list;
struct rcu_head rcu;
+ char ifname[IFNAMSIZ];
+ u8 ifnamelen;
};
+struct nf_hook_ops *nft_hook_find_ops(const struct nft_hook *hook,
+ const struct net_device *dev);
+struct nf_hook_ops *nft_hook_find_ops_rcu(const struct nft_hook *hook,
+ const struct net_device *dev);
+
/**
* struct nft_base_chain - nf_tables base chain
*
@@ -1218,8 +1248,6 @@ static inline bool nft_is_base_chain(const struct nft_chain *chain)
return chain->flags & NFT_CHAIN_BASE;
}
-int __nft_release_basechain(struct nft_ctx *ctx);
-
unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
static inline bool nft_use_inc(u32 *use)
@@ -1454,7 +1482,8 @@ struct nft_flowtable {
struct nf_flowtable data;
};
-struct nft_flowtable *nft_flowtable_lookup(const struct nft_table *table,
+struct nft_flowtable *nft_flowtable_lookup(const struct net *net,
+ const struct nft_table *table,
const struct nlattr *nla,
u8 genmask);
@@ -1608,41 +1637,68 @@ static inline int nft_set_elem_is_dead(const struct nft_set_ext *ext)
}
/**
- * struct nft_trans - nf_tables object update in transaction
+ * struct nft_trans - nf_tables object update in transaction
*
- * @list: used internally
- * @binding_list: list of objects with possible bindings
- * @msg_type: message type
- * @put_net: ctx->net needs to be put
- * @ctx: transaction context
- * @data: internal information related to the transaction
+ * @list: used internally
+ * @net: struct net
+ * @table: struct nft_table the object resides in
+ * @msg_type: message type
+ * @seq: netlink sequence number
+ * @flags: modifiers to new request
+ * @report: notify via unicast netlink message
+ * @put_net: net needs to be put
+ *
+ * This is the information common to all objects in the transaction,
+ * this must always be the first member of derived sub-types.
*/
struct nft_trans {
struct list_head list;
- struct list_head binding_list;
+ struct net *net;
+ struct nft_table *table;
int msg_type;
- bool put_net;
- struct nft_ctx ctx;
- char data[];
+ u32 seq;
+ u16 flags;
+ u8 report:1;
+ u8 put_net:1;
+};
+
+/**
+ * struct nft_trans_binding - nf_tables object with binding support in transaction
+ * @nft_trans: base structure, MUST be first member
+ * @binding_list: list of objects with possible bindings
+ *
+ * This is the base type used by objects that can be bound to a chain.
+ */
+struct nft_trans_binding {
+ struct nft_trans nft_trans;
+ struct list_head binding_list;
};
struct nft_trans_rule {
+ struct nft_trans nft_trans;
struct nft_rule *rule;
+ struct nft_chain *chain;
struct nft_flow_rule *flow;
u32 rule_id;
bool bound;
};
-#define nft_trans_rule(trans) \
- (((struct nft_trans_rule *)trans->data)->rule)
-#define nft_trans_flow_rule(trans) \
- (((struct nft_trans_rule *)trans->data)->flow)
-#define nft_trans_rule_id(trans) \
- (((struct nft_trans_rule *)trans->data)->rule_id)
-#define nft_trans_rule_bound(trans) \
- (((struct nft_trans_rule *)trans->data)->bound)
+#define nft_trans_container_rule(trans) \
+ container_of(trans, struct nft_trans_rule, nft_trans)
+#define nft_trans_rule(trans) \
+ nft_trans_container_rule(trans)->rule
+#define nft_trans_flow_rule(trans) \
+ nft_trans_container_rule(trans)->flow
+#define nft_trans_rule_id(trans) \
+ nft_trans_container_rule(trans)->rule_id
+#define nft_trans_rule_bound(trans) \
+ nft_trans_container_rule(trans)->bound
+#define nft_trans_rule_chain(trans) \
+ nft_trans_container_rule(trans)->chain
struct nft_trans_set {
+ struct nft_trans_binding nft_trans_binding;
+ struct list_head list_trans_newset;
struct nft_set *set;
u32 set_id;
u32 gc_int;
@@ -1652,100 +1708,132 @@ struct nft_trans_set {
u32 size;
};
-#define nft_trans_set(trans) \
- (((struct nft_trans_set *)trans->data)->set)
-#define nft_trans_set_id(trans) \
- (((struct nft_trans_set *)trans->data)->set_id)
-#define nft_trans_set_bound(trans) \
- (((struct nft_trans_set *)trans->data)->bound)
-#define nft_trans_set_update(trans) \
- (((struct nft_trans_set *)trans->data)->update)
-#define nft_trans_set_timeout(trans) \
- (((struct nft_trans_set *)trans->data)->timeout)
-#define nft_trans_set_gc_int(trans) \
- (((struct nft_trans_set *)trans->data)->gc_int)
-#define nft_trans_set_size(trans) \
- (((struct nft_trans_set *)trans->data)->size)
+#define nft_trans_container_set(t) \
+ container_of(t, struct nft_trans_set, nft_trans_binding.nft_trans)
+#define nft_trans_set(trans) \
+ nft_trans_container_set(trans)->set
+#define nft_trans_set_id(trans) \
+ nft_trans_container_set(trans)->set_id
+#define nft_trans_set_bound(trans) \
+ nft_trans_container_set(trans)->bound
+#define nft_trans_set_update(trans) \
+ nft_trans_container_set(trans)->update
+#define nft_trans_set_timeout(trans) \
+ nft_trans_container_set(trans)->timeout
+#define nft_trans_set_gc_int(trans) \
+ nft_trans_container_set(trans)->gc_int
+#define nft_trans_set_size(trans) \
+ nft_trans_container_set(trans)->size
struct nft_trans_chain {
+ struct nft_trans_binding nft_trans_binding;
struct nft_chain *chain;
- bool update;
char *name;
struct nft_stats __percpu *stats;
u8 policy;
+ bool update;
bool bound;
u32 chain_id;
struct nft_base_chain *basechain;
struct list_head hook_list;
};
-#define nft_trans_chain(trans) \
- (((struct nft_trans_chain *)trans->data)->chain)
-#define nft_trans_chain_update(trans) \
- (((struct nft_trans_chain *)trans->data)->update)
-#define nft_trans_chain_name(trans) \
- (((struct nft_trans_chain *)trans->data)->name)
-#define nft_trans_chain_stats(trans) \
- (((struct nft_trans_chain *)trans->data)->stats)
-#define nft_trans_chain_policy(trans) \
- (((struct nft_trans_chain *)trans->data)->policy)
-#define nft_trans_chain_bound(trans) \
- (((struct nft_trans_chain *)trans->data)->bound)
-#define nft_trans_chain_id(trans) \
- (((struct nft_trans_chain *)trans->data)->chain_id)
-#define nft_trans_basechain(trans) \
- (((struct nft_trans_chain *)trans->data)->basechain)
-#define nft_trans_chain_hooks(trans) \
- (((struct nft_trans_chain *)trans->data)->hook_list)
+#define nft_trans_container_chain(t) \
+ container_of(t, struct nft_trans_chain, nft_trans_binding.nft_trans)
+#define nft_trans_chain(trans) \
+ nft_trans_container_chain(trans)->chain
+#define nft_trans_chain_update(trans) \
+ nft_trans_container_chain(trans)->update
+#define nft_trans_chain_name(trans) \
+ nft_trans_container_chain(trans)->name
+#define nft_trans_chain_stats(trans) \
+ nft_trans_container_chain(trans)->stats
+#define nft_trans_chain_policy(trans) \
+ nft_trans_container_chain(trans)->policy
+#define nft_trans_chain_bound(trans) \
+ nft_trans_container_chain(trans)->bound
+#define nft_trans_chain_id(trans) \
+ nft_trans_container_chain(trans)->chain_id
+#define nft_trans_basechain(trans) \
+ nft_trans_container_chain(trans)->basechain
+#define nft_trans_chain_hooks(trans) \
+ nft_trans_container_chain(trans)->hook_list
struct nft_trans_table {
+ struct nft_trans nft_trans;
bool update;
};
-#define nft_trans_table_update(trans) \
- (((struct nft_trans_table *)trans->data)->update)
+#define nft_trans_container_table(trans) \
+ container_of(trans, struct nft_trans_table, nft_trans)
+#define nft_trans_table_update(trans) \
+ nft_trans_container_table(trans)->update
+
+enum nft_trans_elem_flags {
+ NFT_TRANS_UPD_TIMEOUT = (1 << 0),
+ NFT_TRANS_UPD_EXPIRATION = (1 << 1),
+};
+
+struct nft_elem_update {
+ u64 timeout;
+ u64 expiration;
+ u8 flags;
+};
+
+struct nft_trans_one_elem {
+ struct nft_elem_priv *priv;
+ struct nft_elem_update *update;
+};
struct nft_trans_elem {
+ struct nft_trans nft_trans;
struct nft_set *set;
- struct nft_elem_priv *elem_priv;
bool bound;
+ unsigned int nelems;
+ struct nft_trans_one_elem elems[] __counted_by(nelems);
};
-#define nft_trans_elem_set(trans) \
- (((struct nft_trans_elem *)trans->data)->set)
-#define nft_trans_elem_priv(trans) \
- (((struct nft_trans_elem *)trans->data)->elem_priv)
-#define nft_trans_elem_set_bound(trans) \
- (((struct nft_trans_elem *)trans->data)->bound)
+#define nft_trans_container_elem(t) \
+ container_of(t, struct nft_trans_elem, nft_trans)
+#define nft_trans_elem_set(trans) \
+ nft_trans_container_elem(trans)->set
+#define nft_trans_elem_set_bound(trans) \
+ nft_trans_container_elem(trans)->bound
struct nft_trans_obj {
+ struct nft_trans nft_trans;
struct nft_object *obj;
struct nft_object *newobj;
bool update;
};
-#define nft_trans_obj(trans) \
- (((struct nft_trans_obj *)trans->data)->obj)
-#define nft_trans_obj_newobj(trans) \
- (((struct nft_trans_obj *)trans->data)->newobj)
-#define nft_trans_obj_update(trans) \
- (((struct nft_trans_obj *)trans->data)->update)
+#define nft_trans_container_obj(t) \
+ container_of(t, struct nft_trans_obj, nft_trans)
+#define nft_trans_obj(trans) \
+ nft_trans_container_obj(trans)->obj
+#define nft_trans_obj_newobj(trans) \
+ nft_trans_container_obj(trans)->newobj
+#define nft_trans_obj_update(trans) \
+ nft_trans_container_obj(trans)->update
struct nft_trans_flowtable {
+ struct nft_trans nft_trans;
struct nft_flowtable *flowtable;
- bool update;
struct list_head hook_list;
u32 flags;
+ bool update;
};
-#define nft_trans_flowtable(trans) \
- (((struct nft_trans_flowtable *)trans->data)->flowtable)
-#define nft_trans_flowtable_update(trans) \
- (((struct nft_trans_flowtable *)trans->data)->update)
-#define nft_trans_flowtable_hooks(trans) \
- (((struct nft_trans_flowtable *)trans->data)->hook_list)
-#define nft_trans_flowtable_flags(trans) \
- (((struct nft_trans_flowtable *)trans->data)->flags)
+#define nft_trans_container_flowtable(t) \
+ container_of(t, struct nft_trans_flowtable, nft_trans)
+#define nft_trans_flowtable(trans) \
+ nft_trans_container_flowtable(trans)->flowtable
+#define nft_trans_flowtable_update(trans) \
+ nft_trans_container_flowtable(trans)->update
+#define nft_trans_flowtable_hooks(trans) \
+ nft_trans_container_flowtable(trans)->hook_list
+#define nft_trans_flowtable_flags(trans) \
+ nft_trans_container_flowtable(trans)->flags
#define NFT_TRANS_GC_BATCHCOUNT 256
@@ -1759,6 +1847,33 @@ struct nft_trans_gc {
struct rcu_head rcu;
};
+static inline void nft_ctx_update(struct nft_ctx *ctx,
+ const struct nft_trans *trans)
+{
+ switch (trans->msg_type) {
+ case NFT_MSG_NEWRULE:
+ case NFT_MSG_DELRULE:
+ case NFT_MSG_DESTROYRULE:
+ ctx->chain = nft_trans_rule_chain(trans);
+ break;
+ case NFT_MSG_NEWCHAIN:
+ case NFT_MSG_DELCHAIN:
+ case NFT_MSG_DESTROYCHAIN:
+ ctx->chain = nft_trans_chain(trans);
+ break;
+ default:
+ ctx->chain = NULL;
+ break;
+ }
+
+ ctx->net = trans->net;
+ ctx->table = trans->table;
+ ctx->family = trans->table->family;
+ ctx->report = trans->report;
+ ctx->flags = trans->flags;
+ ctx->seq = trans->seq;
+}
+
struct nft_trans_gc *nft_trans_gc_alloc(struct nft_set *set,
unsigned int gc_seq, gfp_t gfp);
void nft_trans_gc_destroy(struct nft_trans_gc *trans);
@@ -1786,7 +1901,7 @@ void nft_chain_filter_fini(void);
void __init nft_chain_route_init(void);
void nft_chain_route_fini(void);
-void nf_tables_trans_destroy_flush_work(void);
+void nf_tables_trans_destroy_flush_work(struct net *net);
int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result);
__be64 nf_jiffies64_to_msecs(u64 input);
@@ -1800,6 +1915,8 @@ static inline int nft_request_module(struct net *net, const char *fmt, ...) { re
struct nftables_pernet {
struct list_head tables;
struct list_head commit_list;
+ struct list_head destroy_list;
+ struct list_head commit_set_list;
struct list_head binding_list;
struct list_head module_list;
struct list_head notify_list;
@@ -1809,6 +1926,7 @@ struct nftables_pernet {
unsigned int base_seq;
unsigned int gc_seq;
u8 validate_state;
+ struct work_struct destroy_work;
};
extern unsigned int nf_tables_net_id;
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index ff27cb2e1662..03b6165756fc 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -161,6 +161,7 @@ enum {
};
struct nft_inner_tun_ctx {
+ unsigned long cookie;
u16 type;
u16 inner_tunoff;
u16 inner_lloff;
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index 60a7d0ce3080..fcf967286e37 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -19,7 +19,7 @@ static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt)
static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
{
struct iphdr *iph, _iph;
- u32 len, thoff;
+ u32 len, thoff, skb_len;
iph = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb),
sizeof(*iph), &_iph);
@@ -30,8 +30,10 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
return -1;
len = iph_totlen(pkt->skb, iph);
- thoff = skb_network_offset(pkt->skb) + (iph->ihl * 4);
- if (pkt->skb->len < len)
+ thoff = iph->ihl * 4;
+ skb_len = pkt->skb->len - skb_network_offset(pkt->skb);
+
+ if (skb_len < len)
return -1;
else if (len < thoff)
return -1;
@@ -40,7 +42,7 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = iph->protocol;
- pkt->thoff = thoff;
+ pkt->thoff = skb_network_offset(pkt->skb) + thoff;
pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET;
return 0;
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index 467d59b9e533..a0633eeaec97 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -31,8 +31,8 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
struct ipv6hdr *ip6h, _ip6h;
unsigned int thoff = 0;
unsigned short frag_off;
+ u32 pkt_len, skb_len;
int protohdr;
- u32 pkt_len;
ip6h = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb),
sizeof(*ip6h), &_ip6h);
@@ -43,7 +43,8 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
return -1;
pkt_len = ntohs(ip6h->payload_len);
- if (pkt_len + sizeof(*ip6h) > pkt->skb->len)
+ skb_len = pkt->skb->len - skb_network_offset(pkt->skb);
+ if (pkt_len + sizeof(*ip6h) > skb_len)
return -1;
protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
diff --git a/include/net/netfilter/nf_tproxy.h b/include/net/netfilter/nf_tproxy.h
index faa108b1ba67..06985530517b 100644
--- a/include/net/netfilter/nf_tproxy.h
+++ b/include/net/netfilter/nf_tproxy.h
@@ -36,6 +36,7 @@ __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr);
/**
* nf_tproxy_handle_time_wait4 - handle IPv4 TCP TIME_WAIT reopen redirections
+ * @net: The network namespace.
* @skb: The skb being processed.
* @laddr: IPv4 address to redirect to or zero.
* @lport: TCP port to redirect to or zero.
@@ -48,7 +49,7 @@ __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr);
*
* nf_tproxy_handle_time_wait4() consumes the socket reference passed in.
*
- * Returns the listener socket if there's one, the TIME_WAIT socket if
+ * Returns: the listener socket if there's one, the TIME_WAIT socket if
* no such listener is found, or NULL if the TCP header is incomplete.
*/
struct sock *
@@ -107,7 +108,7 @@ nf_tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
*
* nf_tproxy_handle_time_wait6() consumes the socket reference passed in.
*
- * Returns the listener socket if there's one, the TIME_WAIT socket if
+ * Returns: the listener socket if there's one, the TIME_WAIT socket if
* no such listener is found, or NULL if the TCP header is incomplete.
*/
struct sock *
diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h
index 167640b843ef..7370fba844ef 100644
--- a/include/net/netfilter/nft_fib.h
+++ b/include/net/netfilter/nft_fib.h
@@ -2,6 +2,7 @@
#ifndef _NFT_FIB_H_
#define _NFT_FIB_H_
+#include <net/l3mdev.h>
#include <net/netfilter/nf_tables.h>
struct nft_fib {
@@ -18,12 +19,39 @@ nft_fib_is_loopback(const struct sk_buff *skb, const struct net_device *in)
return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
}
+static inline bool nft_fib_can_skip(const struct nft_pktinfo *pkt)
+{
+ const struct net_device *indev = nft_in(pkt);
+ const struct sock *sk;
+
+ switch (nft_hook(pkt)) {
+ case NF_INET_PRE_ROUTING:
+ case NF_INET_INGRESS:
+ case NF_INET_LOCAL_IN:
+ break;
+ default:
+ return false;
+ }
+
+ sk = pkt->skb->sk;
+ if (sk && sk_fullsock(sk))
+ return sk->sk_rx_dst_ifindex == indev->ifindex;
+
+ return nft_fib_is_loopback(pkt->skb, indev);
+}
+
+static inline int nft_fib_l3mdev_master_ifindex_rcu(const struct nft_pktinfo *pkt,
+ const struct net_device *iif)
+{
+ const struct net_device *dev = iif ? iif : pkt->skb->dev;
+
+ return l3mdev_master_ifindex_rcu(dev);
+}
+
int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset);
int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
const struct nlattr * const tb[]);
-int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
- const struct nft_data **data);
-
+int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr);
void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt);
diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h
index ba1238f12a48..d602263590fe 100644
--- a/include/net/netfilter/nft_meta.h
+++ b/include/net/netfilter/nft_meta.h
@@ -41,8 +41,7 @@ void nft_meta_set_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr);
int nft_meta_set_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data);
+ const struct nft_expr *expr);
bool nft_meta_get_reduce(struct nft_regs_track *track,
const struct nft_expr *expr);
diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h
index 6d9ba62efd75..19060212988a 100644
--- a/include/net/netfilter/nft_reject.h
+++ b/include/net/netfilter/nft_reject.h
@@ -15,8 +15,7 @@ struct nft_reject {
extern const struct nla_policy nft_reject_policy[];
int nft_reject_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data);
+ const struct nft_expr *expr);
int nft_reject_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
diff --git a/include/net/netlabel.h b/include/net/netlabel.h
index f3ab0b8a4b18..02914b1df38b 100644
--- a/include/net/netlabel.h
+++ b/include/net/netlabel.h
@@ -30,7 +30,7 @@ struct calipso_doi;
/*
* NetLabel - A management interface for maintaining network packet label
- * mapping tables for explicit packet labling protocols.
+ * mapping tables for explicit packet labeling protocols.
*
* Network protocols such as CIPSO and RIPSO require a label translation layer
* to convert the label on the packet into something meaningful on the host
@@ -97,7 +97,7 @@ struct calipso_doi;
/* NetLabel audit information */
struct netlbl_audit {
- u32 secid;
+ struct lsm_prop prop;
kuid_t loginuid;
unsigned int sessionid;
};
@@ -208,6 +208,7 @@ struct netlbl_lsm_secattr {
* struct netlbl_calipso_ops - NetLabel CALIPSO operations
* @doi_add: add a CALIPSO DOI
* @doi_free: free a CALIPSO DOI
+ * @doi_remove: remove a CALIPSO DOI
* @doi_getdef: returns a reference to a DOI
* @doi_putdef: releases a reference of a DOI
* @doi_walk: enumerate the DOI list
@@ -274,15 +275,17 @@ struct netlbl_calipso_ops {
* on success, NULL on failure.
*
*/
-static inline struct netlbl_lsm_cache *netlbl_secattr_cache_alloc(gfp_t flags)
+static inline struct netlbl_lsm_cache *netlbl_secattr_cache_alloc_noprof(gfp_t flags)
{
struct netlbl_lsm_cache *cache;
- cache = kzalloc(sizeof(*cache), flags);
+ cache = kzalloc_noprof(sizeof(*cache), flags);
if (cache)
refcount_set(&cache->refcount, 1);
return cache;
}
+#define netlbl_secattr_cache_alloc(...) \
+ alloc_hooks(netlbl_secattr_cache_alloc_noprof(__VA_ARGS__))
/**
* netlbl_secattr_cache_free - Frees a netlbl_lsm_cache struct
@@ -311,10 +314,11 @@ static inline void netlbl_secattr_cache_free(struct netlbl_lsm_cache *cache)
* on failure.
*
*/
-static inline struct netlbl_lsm_catmap *netlbl_catmap_alloc(gfp_t flags)
+static inline struct netlbl_lsm_catmap *netlbl_catmap_alloc_noprof(gfp_t flags)
{
- return kzalloc(sizeof(struct netlbl_lsm_catmap), flags);
+ return kzalloc_noprof(sizeof(struct netlbl_lsm_catmap), flags);
}
+#define netlbl_catmap_alloc(...) alloc_hooks(netlbl_catmap_alloc_noprof(__VA_ARGS__))
/**
* netlbl_catmap_free - Free a LSM secattr catmap
@@ -376,10 +380,11 @@ static inline void netlbl_secattr_destroy(struct netlbl_lsm_secattr *secattr)
* pointer on success, or NULL on failure.
*
*/
-static inline struct netlbl_lsm_secattr *netlbl_secattr_alloc(gfp_t flags)
+static inline struct netlbl_lsm_secattr *netlbl_secattr_alloc_noprof(gfp_t flags)
{
- return kzalloc(sizeof(struct netlbl_lsm_secattr), flags);
+ return kzalloc_noprof(sizeof(struct netlbl_lsm_secattr), flags);
}
+#define netlbl_secattr_alloc(...) alloc_hooks(netlbl_secattr_alloc_noprof(__VA_ARGS__))
/**
* netlbl_secattr_free - Frees a netlbl_lsm_secattr struct
@@ -470,7 +475,8 @@ void netlbl_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state);
int netlbl_enabled(void);
int netlbl_sock_setattr(struct sock *sk,
u16 family,
- const struct netlbl_lsm_secattr *secattr);
+ const struct netlbl_lsm_secattr *secattr,
+ bool sk_locked);
void netlbl_sock_delattr(struct sock *sk);
int netlbl_sock_getattr(struct sock *sk,
struct netlbl_lsm_secattr *secattr);
@@ -487,6 +493,7 @@ int netlbl_skbuff_getattr(const struct sk_buff *skb,
u16 family,
struct netlbl_lsm_secattr *secattr);
void netlbl_skbuff_err(struct sk_buff *skb, u16 family, int error, int gateway);
+bool netlbl_sk_lock_check(struct sock *sk);
/*
* LSM label mapping cache operations
@@ -614,7 +621,8 @@ static inline int netlbl_enabled(void)
}
static inline int netlbl_sock_setattr(struct sock *sk,
u16 family,
- const struct netlbl_lsm_secattr *secattr)
+ const struct netlbl_lsm_secattr *secattr,
+ bool sk_locked)
{
return -ENOSYS;
}
@@ -673,6 +681,11 @@ static inline struct audit_buffer *netlbl_audit_start(int type,
{
return NULL;
}
+
+static inline bool netlbl_sk_lock_check(struct sock *sk)
+{
+ return true;
+}
#endif /* CONFIG_NETLABEL */
const struct netlbl_calipso_ops *
diff --git a/include/net/netlink.h b/include/net/netlink.h
index c19ff921b661..90a560dc167a 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -41,7 +41,8 @@
* nlmsg_get_pos() return current position in message
* nlmsg_trim() trim part of message
* nlmsg_cancel() cancel message construction
- * nlmsg_free() free a netlink message
+ * nlmsg_consume() free a netlink message (expected)
+ * nlmsg_free() free a netlink message (drop)
*
* Message Sending:
* nlmsg_multicast() multicast message to several groups
@@ -117,6 +118,7 @@
* nla_nest_start(skb, type) start a nested attribute
* nla_nest_end(skb, nla) finalize a nested attribute
* nla_nest_cancel(skb, nla) cancel nested attribute construction
+ * nla_put_empty_nest(skb, type) create an empty nest
*
* Attribute Length Calculations:
* nla_attr_size(payload) length of attribute w/o padding
@@ -141,6 +143,8 @@
* nla_get_flag(nla) return 1 if flag is true
* nla_get_msecs(nla) get payload for a msecs attribute
*
+ * The same functions also exist with _default().
+ *
* Attribute Misc:
* nla_memcpy(dest, nla, count) copy attribute into memory
* nla_memcmp(nla, data, size) compare attribute with memory area
@@ -157,7 +161,11 @@
* nla_parse() parse and validate stream of attrs
* nla_parse_nested() parse nested attributes
* nla_for_each_attr() loop over all attributes
+ * nla_for_each_attr_type() loop over all attributes with the
+ * given type
* nla_for_each_nested() loop over the nested attributes
+ * nla_for_each_nested_type() loop over the nested attributes with
+ * the given type
*=========================================================================
*/
@@ -313,7 +321,13 @@ enum nla_policy_validation {
* All other Unused - but note that it's a union
*
* Meaning of `validate' field, use via NLA_POLICY_VALIDATE_FN:
+ * NLA_U8, NLA_U16,
+ * NLA_U32, NLA_U64,
+ * NLA_S8, NLA_S16,
+ * NLA_S32, NLA_S64,
+ * NLA_MSECS,
* NLA_BINARY Validation function called for the attribute.
+ *
* All other Unused - but note that it's a union
*
* Example:
@@ -464,6 +478,7 @@ struct nla_policy {
.max = _len \
}
#define NLA_POLICY_MIN_LEN(_len) NLA_POLICY_MIN(NLA_BINARY, _len)
+#define NLA_POLICY_MAX_LEN(_len) NLA_POLICY_MAX(NLA_BINARY, _len)
/**
* struct nl_info - netlink source information
@@ -603,6 +618,22 @@ static inline int nlmsg_len(const struct nlmsghdr *nlh)
}
/**
+ * nlmsg_payload - message payload if the data fits in the len
+ * @nlh: netlink message header
+ * @len: struct length
+ *
+ * Returns: The netlink message payload/data if the length is sufficient,
+ * otherwise NULL.
+ */
+static inline void *nlmsg_payload(const struct nlmsghdr *nlh, size_t len)
+{
+ if (nlh->nlmsg_len < nlmsg_msg_size(len))
+ return NULL;
+
+ return nlmsg_data(nlh);
+}
+
+/**
* nlmsg_attrdata - head of attributes data
* @nlh: netlink message header
* @hdrlen: length of family specific header
@@ -641,7 +672,7 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
* @nlh: netlink message header
* @remaining: number of bytes remaining in message stream
*
- * Returns the next netlink message in the message stream and
+ * Returns: the next netlink message in the message stream and
* decrements remaining by the size of the current message.
*/
static inline struct nlmsghdr *
@@ -668,7 +699,7 @@ nlmsg_next(const struct nlmsghdr *nlh, int *remaining)
* exceeding maxtype will be rejected, policy must be specified, attributes
* will be validated in the strictest way possible.
*
- * Returns 0 on success or a negative error code.
+ * Returns: 0 on success or a negative error code.
*/
static inline int nla_parse(struct nlattr **tb, int maxtype,
const struct nlattr *head, int len,
@@ -693,7 +724,7 @@ static inline int nla_parse(struct nlattr **tb, int maxtype,
* exceeding maxtype will be ignored and attributes from the policy are not
* always strictly validated (only for new attributes).
*
- * Returns 0 on success or a negative error code.
+ * Returns: 0 on success or a negative error code.
*/
static inline int nla_parse_deprecated(struct nlattr **tb, int maxtype,
const struct nlattr *head, int len,
@@ -718,7 +749,7 @@ static inline int nla_parse_deprecated(struct nlattr **tb, int maxtype,
* exceeding maxtype will be rejected as well as trailing data, but the
* policy is not completely strictly validated (only for new attributes).
*
- * Returns 0 on success or a negative error code.
+ * Returns: 0 on success or a negative error code.
*/
static inline int nla_parse_deprecated_strict(struct nlattr **tb, int maxtype,
const struct nlattr *head,
@@ -822,10 +853,10 @@ nlmsg_parse_deprecated_strict(const struct nlmsghdr *nlh, int hdrlen,
/**
* nlmsg_find_attr - find a specific attribute in a netlink message
* @nlh: netlink message header
- * @hdrlen: length of familiy specific header
+ * @hdrlen: length of family specific header
* @attrtype: type of attribute to look for
*
- * Returns the first attribute which matches the specified type.
+ * Returns: the first attribute which matches the specified type.
*/
static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
int hdrlen, int attrtype)
@@ -844,9 +875,9 @@ static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
*
* Validates all attributes in the specified attribute stream against the
* specified policy. Validation is done in liberal mode.
- * See documenation of struct nla_policy for more details.
+ * See documentation of struct nla_policy for more details.
*
- * Returns 0 on success or a negative error code.
+ * Returns: 0 on success or a negative error code.
*/
static inline int nla_validate_deprecated(const struct nlattr *head, int len,
int maxtype,
@@ -867,9 +898,9 @@ static inline int nla_validate_deprecated(const struct nlattr *head, int len,
*
* Validates all attributes in the specified attribute stream against the
* specified policy. Validation is done in strict mode.
- * See documenation of struct nla_policy for more details.
+ * See documentation of struct nla_policy for more details.
*
- * Returns 0 on success or a negative error code.
+ * Returns: 0 on success or a negative error code.
*/
static inline int nla_validate(const struct nlattr *head, int len, int maxtype,
const struct nla_policy *policy,
@@ -882,7 +913,7 @@ static inline int nla_validate(const struct nlattr *head, int len, int maxtype,
/**
* nlmsg_validate_deprecated - validate a netlink message including attributes
* @nlh: netlinket message header
- * @hdrlen: length of familiy specific header
+ * @hdrlen: length of family specific header
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
* @extack: extended ACK report struct
@@ -906,7 +937,7 @@ static inline int nlmsg_validate_deprecated(const struct nlmsghdr *nlh,
* nlmsg_report - need to report back to application?
* @nlh: netlink message header
*
- * Returns 1 if a report back to the application is requested.
+ * Returns: 1 if a report back to the application is requested.
*/
static inline int nlmsg_report(const struct nlmsghdr *nlh)
{
@@ -917,7 +948,7 @@ static inline int nlmsg_report(const struct nlmsghdr *nlh)
* nlmsg_seq - return the seq number of netlink message
* @nlh: netlink message header
*
- * Returns 0 if netlink message is NULL
+ * Returns: 0 if netlink message is NULL
*/
static inline u32 nlmsg_seq(const struct nlmsghdr *nlh)
{
@@ -928,7 +959,7 @@ static inline u32 nlmsg_seq(const struct nlmsghdr *nlh)
* nlmsg_for_each_attr - iterate over a stream of attributes
* @pos: loop counter, set to current attribute
* @nlh: netlink message header
- * @hdrlen: length of familiy specific header
+ * @hdrlen: length of family specific header
* @rem: initialized to len, holds bytes currently remaining in stream
*/
#define nlmsg_for_each_attr(pos, nlh, hdrlen, rem) \
@@ -944,7 +975,7 @@ static inline u32 nlmsg_seq(const struct nlmsghdr *nlh)
* @payload: length of message payload
* @flags: message flags
*
- * Returns NULL if the tailroom of the skb is insufficient to store
+ * Returns: NULL if the tailroom of the skb is insufficient to store
* the message header and payload.
*/
static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
@@ -963,7 +994,7 @@ static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 portid, u32 se
*
* Append data to an existing nlmsg, used when constructing a message
* with multiple fixed-format headers (which is rare).
- * Returns NULL if the tailroom of the skb is insufficient to store
+ * Returns: NULL if the tailroom of the skb is insufficient to store
* the extra payload.
*/
static inline void *nlmsg_append(struct sk_buff *skb, u32 size)
@@ -985,7 +1016,7 @@ static inline void *nlmsg_append(struct sk_buff *skb, u32 size)
* @payload: length of message payload
* @flags: message flags
*
- * Returns NULL if the tailroom of the skb is insufficient to store
+ * Returns: NULL if the tailroom of the skb is insufficient to store
* the message header and payload.
*/
static inline struct nlmsghdr *nlmsg_put_answer(struct sk_buff *skb,
@@ -1029,7 +1060,7 @@ static inline struct sk_buff *nlmsg_new_large(size_t payload)
* @skb: socket buffer the message is stored in
* @nlh: netlink message header
*
- * Corrects the netlink message header to include the appeneded
+ * Corrects the netlink message header to include the appended
* attributes. Only necessary if attributes have been added to
* the message.
*/
@@ -1042,7 +1073,7 @@ static inline void nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh)
* nlmsg_get_pos - return current position in netlink message
* @skb: socket buffer the message is stored in
*
- * Returns a pointer to the current tail of the message.
+ * Returns: a pointer to the current tail of the message.
*/
static inline void *nlmsg_get_pos(struct sk_buff *skb)
{
@@ -1078,7 +1109,7 @@ static inline void nlmsg_cancel(struct sk_buff *skb, struct nlmsghdr *nlh)
}
/**
- * nlmsg_free - free a netlink message
+ * nlmsg_free - drop a netlink message
* @skb: socket buffer of netlink message
*/
static inline void nlmsg_free(struct sk_buff *skb)
@@ -1087,6 +1118,15 @@ static inline void nlmsg_free(struct sk_buff *skb)
}
/**
+ * nlmsg_consume - free a netlink message
+ * @skb: socket buffer of netlink message
+ */
+static inline void nlmsg_consume(struct sk_buff *skb)
+{
+ consume_skb(skb);
+}
+
+/**
* nlmsg_multicast_filtered - multicast a netlink message with filter function
* @sk: netlink socket to spread messages to
* @skb: netlink message as socket buffer
@@ -1259,7 +1299,7 @@ static inline int nla_ok(const struct nlattr *nla, int remaining)
* @nla: netlink attribute
* @remaining: number of bytes remaining in attribute stream
*
- * Returns the next netlink attribute in the attribute stream and
+ * Returns: the next netlink attribute in the attribute stream and
* decrements remaining by the size of the current attribute.
*/
static inline struct nlattr *nla_next(const struct nlattr *nla, int *remaining)
@@ -1275,7 +1315,7 @@ static inline struct nlattr *nla_next(const struct nlattr *nla, int *remaining)
* @nla: attribute containing the nested attributes
* @attrtype: type of attribute to look for
*
- * Returns the first attribute which matches the specified type.
+ * Returns: the first attribute which matches the specified type.
*/
static inline struct nlattr *
nla_find_nested(const struct nlattr *nla, int attrtype)
@@ -1681,6 +1721,20 @@ static inline u32 nla_get_u32(const struct nlattr *nla)
}
/**
+ * nla_get_u32_default - return payload of u32 attribute or default
+ * @nla: u32 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline u32 nla_get_u32_default(const struct nlattr *nla, u32 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_u32(nla);
+}
+
+/**
* nla_get_be32 - return payload of __be32 attribute
* @nla: __be32 netlink attribute
*/
@@ -1690,6 +1744,21 @@ static inline __be32 nla_get_be32(const struct nlattr *nla)
}
/**
+ * nla_get_be32_default - return payload of be32 attribute or default
+ * @nla: __be32 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline __be32 nla_get_be32_default(const struct nlattr *nla,
+ __be32 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_be32(nla);
+}
+
+/**
* nla_get_le32 - return payload of __le32 attribute
* @nla: __le32 netlink attribute
*/
@@ -1699,6 +1768,21 @@ static inline __le32 nla_get_le32(const struct nlattr *nla)
}
/**
+ * nla_get_le32_default - return payload of le32 attribute or default
+ * @nla: __le32 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline __le32 nla_get_le32_default(const struct nlattr *nla,
+ __le32 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_le32(nla);
+}
+
+/**
* nla_get_u16 - return payload of u16 attribute
* @nla: u16 netlink attribute
*/
@@ -1708,6 +1792,20 @@ static inline u16 nla_get_u16(const struct nlattr *nla)
}
/**
+ * nla_get_u16_default - return payload of u16 attribute or default
+ * @nla: u16 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline u16 nla_get_u16_default(const struct nlattr *nla, u16 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_u16(nla);
+}
+
+/**
* nla_get_be16 - return payload of __be16 attribute
* @nla: __be16 netlink attribute
*/
@@ -1717,6 +1815,21 @@ static inline __be16 nla_get_be16(const struct nlattr *nla)
}
/**
+ * nla_get_be16_default - return payload of be16 attribute or default
+ * @nla: __be16 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline __be16 nla_get_be16_default(const struct nlattr *nla,
+ __be16 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_be16(nla);
+}
+
+/**
* nla_get_le16 - return payload of __le16 attribute
* @nla: __le16 netlink attribute
*/
@@ -1726,6 +1839,21 @@ static inline __le16 nla_get_le16(const struct nlattr *nla)
}
/**
+ * nla_get_le16_default - return payload of le16 attribute or default
+ * @nla: __le16 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline __le16 nla_get_le16_default(const struct nlattr *nla,
+ __le16 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_le16(nla);
+}
+
+/**
* nla_get_u8 - return payload of u8 attribute
* @nla: u8 netlink attribute
*/
@@ -1735,6 +1863,20 @@ static inline u8 nla_get_u8(const struct nlattr *nla)
}
/**
+ * nla_get_u8_default - return payload of u8 attribute or default
+ * @nla: u8 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline u8 nla_get_u8_default(const struct nlattr *nla, u8 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_u8(nla);
+}
+
+/**
* nla_get_u64 - return payload of u64 attribute
* @nla: u64 netlink attribute
*/
@@ -1748,6 +1890,20 @@ static inline u64 nla_get_u64(const struct nlattr *nla)
}
/**
+ * nla_get_u64_default - return payload of u64 attribute or default
+ * @nla: u64 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline u64 nla_get_u64_default(const struct nlattr *nla, u64 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_u64(nla);
+}
+
+/**
* nla_get_uint - return payload of uint attribute
* @nla: uint netlink attribute
*/
@@ -1759,6 +1915,20 @@ static inline u64 nla_get_uint(const struct nlattr *nla)
}
/**
+ * nla_get_uint_default - return payload of uint attribute or default
+ * @nla: uint netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline u64 nla_get_uint_default(const struct nlattr *nla, u64 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_uint(nla);
+}
+
+/**
* nla_get_be64 - return payload of __be64 attribute
* @nla: __be64 netlink attribute
*/
@@ -1772,6 +1942,21 @@ static inline __be64 nla_get_be64(const struct nlattr *nla)
}
/**
+ * nla_get_be64_default - return payload of be64 attribute or default
+ * @nla: __be64 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline __be64 nla_get_be64_default(const struct nlattr *nla,
+ __be64 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_be64(nla);
+}
+
+/**
* nla_get_le64 - return payload of __le64 attribute
* @nla: __le64 netlink attribute
*/
@@ -1781,6 +1966,21 @@ static inline __le64 nla_get_le64(const struct nlattr *nla)
}
/**
+ * nla_get_le64_default - return payload of le64 attribute or default
+ * @nla: __le64 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline __le64 nla_get_le64_default(const struct nlattr *nla,
+ __le64 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_le64(nla);
+}
+
+/**
* nla_get_s32 - return payload of s32 attribute
* @nla: s32 netlink attribute
*/
@@ -1790,6 +1990,20 @@ static inline s32 nla_get_s32(const struct nlattr *nla)
}
/**
+ * nla_get_s32_default - return payload of s32 attribute or default
+ * @nla: s32 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline s32 nla_get_s32_default(const struct nlattr *nla, s32 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_s32(nla);
+}
+
+/**
* nla_get_s16 - return payload of s16 attribute
* @nla: s16 netlink attribute
*/
@@ -1799,6 +2013,20 @@ static inline s16 nla_get_s16(const struct nlattr *nla)
}
/**
+ * nla_get_s16_default - return payload of s16 attribute or default
+ * @nla: s16 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline s16 nla_get_s16_default(const struct nlattr *nla, s16 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_s16(nla);
+}
+
+/**
* nla_get_s8 - return payload of s8 attribute
* @nla: s8 netlink attribute
*/
@@ -1808,6 +2036,20 @@ static inline s8 nla_get_s8(const struct nlattr *nla)
}
/**
+ * nla_get_s8_default - return payload of s8 attribute or default
+ * @nla: s8 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline s8 nla_get_s8_default(const struct nlattr *nla, s8 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_s8(nla);
+}
+
+/**
* nla_get_s64 - return payload of s64 attribute
* @nla: s64 netlink attribute
*/
@@ -1821,6 +2063,20 @@ static inline s64 nla_get_s64(const struct nlattr *nla)
}
/**
+ * nla_get_s64_default - return payload of s64 attribute or default
+ * @nla: s64 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline s64 nla_get_s64_default(const struct nlattr *nla, s64 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_s64(nla);
+}
+
+/**
* nla_get_sint - return payload of uint attribute
* @nla: uint netlink attribute
*/
@@ -1832,6 +2088,20 @@ static inline s64 nla_get_sint(const struct nlattr *nla)
}
/**
+ * nla_get_sint_default - return payload of sint attribute or default
+ * @nla: sint netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline s64 nla_get_sint_default(const struct nlattr *nla, s64 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_sint(nla);
+}
+
+/**
* nla_get_flag - return payload of flag attribute
* @nla: flag netlink attribute
*/
@@ -1844,7 +2114,7 @@ static inline int nla_get_flag(const struct nlattr *nla)
* nla_get_msecs - return payload of msecs attribute
* @nla: msecs netlink attribute
*
- * Returns the number of milliseconds in jiffies.
+ * Returns: the number of milliseconds in jiffies.
*/
static inline unsigned long nla_get_msecs(const struct nlattr *nla)
{
@@ -1854,6 +2124,21 @@ static inline unsigned long nla_get_msecs(const struct nlattr *nla)
}
/**
+ * nla_get_msecs_default - return payload of msecs attribute or default
+ * @nla: msecs netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline unsigned long nla_get_msecs_default(const struct nlattr *nla,
+ unsigned long defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_msecs(nla);
+}
+
+/**
* nla_get_in_addr - return payload of IPv4 address attribute
* @nla: IPv4 address netlink attribute
*/
@@ -1863,6 +2148,21 @@ static inline __be32 nla_get_in_addr(const struct nlattr *nla)
}
/**
+ * nla_get_in_addr_default - return payload of be32 attribute or default
+ * @nla: IPv4 address netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline __be32 nla_get_in_addr_default(const struct nlattr *nla,
+ __be32 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_in_addr(nla);
+}
+
+/**
* nla_get_in6_addr - return payload of IPv6 address attribute
* @nla: IPv6 address netlink attribute
*/
@@ -1891,10 +2191,11 @@ static inline struct nla_bitfield32 nla_get_bitfield32(const struct nlattr *nla)
* @src: netlink attribute to duplicate from
* @gfp: GFP mask
*/
-static inline void *nla_memdup(const struct nlattr *src, gfp_t gfp)
+static inline void *nla_memdup_noprof(const struct nlattr *src, gfp_t gfp)
{
- return kmemdup(nla_data(src), nla_len(src), gfp);
+ return kmemdup_noprof(nla_data(src), nla_len(src), gfp);
}
+#define nla_memdup(...) alloc_hooks(nla_memdup_noprof(__VA_ARGS__))
/**
* nla_nest_start_noflag - Start a new level of nested attributes
@@ -1905,7 +2206,7 @@ static inline void *nla_memdup(const struct nlattr *src, gfp_t gfp)
* marked their nest attributes with NLA_F_NESTED flag. New APIs should use
* nla_nest_start() which sets the flag.
*
- * Returns the container attribute or NULL on error
+ * Returns: the container attribute or NULL on error
*/
static inline struct nlattr *nla_nest_start_noflag(struct sk_buff *skb,
int attrtype)
@@ -1926,7 +2227,7 @@ static inline struct nlattr *nla_nest_start_noflag(struct sk_buff *skb,
* Unlike nla_nest_start_noflag(), mark the nest attribute with NLA_F_NESTED
* flag. This is the preferred function to use in new code.
*
- * Returns the container attribute or NULL on error
+ * Returns: the container attribute or NULL on error
*/
static inline struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype)
{
@@ -1939,9 +2240,9 @@ static inline struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype)
* @start: container attribute
*
* Corrects the container attribute header to include the all
- * appeneded attributes.
+ * appended attributes.
*
- * Returns the total data length of the skb.
+ * Returns: the total data length of the skb.
*/
static inline int nla_nest_end(struct sk_buff *skb, struct nlattr *start)
{
@@ -1963,6 +2264,20 @@ static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
}
/**
+ * nla_put_empty_nest - Create an empty nest
+ * @skb: socket buffer the message is stored in
+ * @attrtype: attribute type of the container
+ *
+ * This function is a helper for creating empty nests.
+ *
+ * Returns: 0 when successful or -EMSGSIZE on failure.
+ */
+static inline int nla_put_empty_nest(struct sk_buff *skb, int attrtype)
+{
+ return nla_nest_start(skb, attrtype) ? 0 : -EMSGSIZE;
+}
+
+/**
* __nla_validate_nested - Validate a stream of nested attributes
* @start: container attribute
* @maxtype: maximum attribute type to be expected
@@ -1972,9 +2287,9 @@ static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
*
* Validates all attributes in the nested attribute stream against the
* specified policy. Attributes with a type exceeding maxtype will be
- * ignored. See documenation of struct nla_policy for more details.
+ * ignored. See documentation of struct nla_policy for more details.
*
- * Returns 0 on success or a negative error code.
+ * Returns: 0 on success or a negative error code.
*/
static inline int __nla_validate_nested(const struct nlattr *start, int maxtype,
const struct nla_policy *policy,
@@ -2007,7 +2322,7 @@ nla_validate_nested_deprecated(const struct nlattr *start, int maxtype,
* nla_need_padding_for_64bit - test 64-bit alignment of the next attribute
* @skb: socket buffer the message is stored in
*
- * Return true if padding is needed to align the next attribute (nla_data()) to
+ * Return: true if padding is needed to align the next attribute (nla_data()) to
* a 64-bit aligned area.
*/
static inline bool nla_need_padding_for_64bit(struct sk_buff *skb)
@@ -2034,7 +2349,7 @@ static inline bool nla_need_padding_for_64bit(struct sk_buff *skb)
* This will only be done in architectures which do not have
* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS defined.
*
- * Returns zero on success or a negative error code.
+ * Returns: zero on success or a negative error code.
*/
static inline int nla_align_64bit(struct sk_buff *skb, int padattr)
{
@@ -2071,6 +2386,18 @@ static inline int nla_total_size_64bit(int payload)
pos = nla_next(pos, &(rem)))
/**
+ * nla_for_each_attr_type - iterate over a stream of attributes
+ * @pos: loop counter, set to current attribute
+ * @type: required attribute type for @pos
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @rem: initialized to len, holds bytes currently remaining in stream
+ */
+#define nla_for_each_attr_type(pos, type, head, len, rem) \
+ nla_for_each_attr(pos, head, len, rem) \
+ if (nla_type(pos) == type)
+
+/**
* nla_for_each_nested - iterate over nested attributes
* @pos: loop counter, set to current attribute
* @nla: attribute containing the nested attributes
@@ -2080,6 +2407,17 @@ static inline int nla_total_size_64bit(int payload)
nla_for_each_attr(pos, nla_data(nla), nla_len(nla), rem)
/**
+ * nla_for_each_nested_type - iterate over nested attributes
+ * @pos: loop counter, set to current attribute
+ * @type: required attribute type for @pos
+ * @nla: attribute containing the nested attributes
+ * @rem: initialized to len, holds bytes currently remaining in stream
+ */
+#define nla_for_each_nested_type(pos, type, nla, rem) \
+ nla_for_each_nested(pos, nla, rem) \
+ if (nla_type(pos) == type)
+
+/**
* nla_is_last - Test if attribute is last in stream
* @nla: attribute to test
* @rem: bytes remaining in stream
diff --git a/include/net/netmem.h b/include/net/netmem.h
index d8b810245c1d..386164fb9c18 100644
--- a/include/net/netmem.h
+++ b/include/net/netmem.h
@@ -8,6 +8,83 @@
#ifndef _NET_NETMEM_H
#define _NET_NETMEM_H
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <net/net_debug.h>
+
+/* net_iov */
+
+DECLARE_STATIC_KEY_FALSE(page_pool_mem_providers);
+
+/* We overload the LSB of the struct page pointer to indicate whether it's
+ * a page or net_iov.
+ */
+#define NET_IOV 0x01UL
+
+enum net_iov_type {
+ NET_IOV_DMABUF,
+ NET_IOV_IOURING,
+
+ /* Force size to unsigned long to make the NET_IOV_ASSERTS below pass.
+ */
+ NET_IOV_MAX = ULONG_MAX
+};
+
+struct net_iov {
+ enum net_iov_type type;
+ unsigned long pp_magic;
+ struct page_pool *pp;
+ struct net_iov_area *owner;
+ unsigned long dma_addr;
+ atomic_long_t pp_ref_count;
+};
+
+struct net_iov_area {
+ /* Array of net_iovs for this area. */
+ struct net_iov *niovs;
+ size_t num_niovs;
+
+ /* Offset into the dma-buf where this chunk starts. */
+ unsigned long base_virtual;
+};
+
+/* These fields in struct page are used by the page_pool and net stack:
+ *
+ * struct {
+ * unsigned long pp_magic;
+ * struct page_pool *pp;
+ * unsigned long _pp_mapping_pad;
+ * unsigned long dma_addr;
+ * atomic_long_t pp_ref_count;
+ * };
+ *
+ * We mirror the page_pool fields here so the page_pool can access these fields
+ * without worrying whether the underlying fields belong to a page or net_iov.
+ *
+ * The non-net stack fields of struct page are private to the mm stack and must
+ * never be mirrored to net_iov.
+ */
+#define NET_IOV_ASSERT_OFFSET(pg, iov) \
+ static_assert(offsetof(struct page, pg) == \
+ offsetof(struct net_iov, iov))
+NET_IOV_ASSERT_OFFSET(pp_magic, pp_magic);
+NET_IOV_ASSERT_OFFSET(pp, pp);
+NET_IOV_ASSERT_OFFSET(dma_addr, dma_addr);
+NET_IOV_ASSERT_OFFSET(pp_ref_count, pp_ref_count);
+#undef NET_IOV_ASSERT_OFFSET
+
+static inline struct net_iov_area *net_iov_owner(const struct net_iov *niov)
+{
+ return niov->owner;
+}
+
+static inline unsigned int net_iov_idx(const struct net_iov *niov)
+{
+ return niov - net_iov_owner(niov)->niovs;
+}
+
+/* netmem */
+
/**
* typedef netmem_ref - a nonexistent type marking a reference to generic
* network memory.
@@ -19,23 +96,204 @@
*/
typedef unsigned long __bitwise netmem_ref;
-/* This conversion fails (returns NULL) if the netmem_ref is not struct page
- * backed.
+static inline bool netmem_is_net_iov(const netmem_ref netmem)
+{
+ return (__force unsigned long)netmem & NET_IOV;
+}
+
+/**
+ * __netmem_to_page - unsafely get pointer to the &page backing @netmem
+ * @netmem: netmem reference to convert
*
- * Currently struct page is the only possible netmem, and this helper never
- * fails.
+ * Unsafe version of netmem_to_page(). When @netmem is always page-backed,
+ * e.g. when it's a header buffer, performs faster and generates smaller
+ * object code (no check for the LSB, no WARN). When @netmem points to IOV,
+ * provokes undefined behaviour.
+ *
+ * Return: pointer to the &page (garbage if @netmem is not page-backed).
*/
-static inline struct page *netmem_to_page(netmem_ref netmem)
+static inline struct page *__netmem_to_page(netmem_ref netmem)
{
return (__force struct page *)netmem;
}
-/* Converting from page to netmem is always safe, because a page can always be
- * a netmem.
+/* This conversion fails (returns NULL) if the netmem_ref is not struct page
+ * backed.
*/
+static inline struct page *netmem_to_page(netmem_ref netmem)
+{
+ if (WARN_ON_ONCE(netmem_is_net_iov(netmem)))
+ return NULL;
+
+ return __netmem_to_page(netmem);
+}
+
+static inline struct net_iov *netmem_to_net_iov(netmem_ref netmem)
+{
+ if (netmem_is_net_iov(netmem))
+ return (struct net_iov *)((__force unsigned long)netmem &
+ ~NET_IOV);
+
+ DEBUG_NET_WARN_ON_ONCE(true);
+ return NULL;
+}
+
+static inline netmem_ref net_iov_to_netmem(struct net_iov *niov)
+{
+ return (__force netmem_ref)((unsigned long)niov | NET_IOV);
+}
+
static inline netmem_ref page_to_netmem(struct page *page)
{
return (__force netmem_ref)page;
}
+/**
+ * virt_to_netmem - convert virtual memory pointer to a netmem reference
+ * @data: host memory pointer to convert
+ *
+ * Return: netmem reference to the &page backing this virtual address.
+ */
+static inline netmem_ref virt_to_netmem(const void *data)
+{
+ return page_to_netmem(virt_to_page(data));
+}
+
+static inline int netmem_ref_count(netmem_ref netmem)
+{
+ /* The non-pp refcount of net_iov is always 1. On net_iov, we only
+ * support pp refcounting which uses the pp_ref_count field.
+ */
+ if (netmem_is_net_iov(netmem))
+ return 1;
+
+ return page_ref_count(netmem_to_page(netmem));
+}
+
+static inline unsigned long netmem_pfn_trace(netmem_ref netmem)
+{
+ if (netmem_is_net_iov(netmem))
+ return 0;
+
+ return page_to_pfn(netmem_to_page(netmem));
+}
+
+static inline struct net_iov *__netmem_clear_lsb(netmem_ref netmem)
+{
+ return (struct net_iov *)((__force unsigned long)netmem & ~NET_IOV);
+}
+
+/**
+ * __netmem_get_pp - unsafely get pointer to the &page_pool backing @netmem
+ * @netmem: netmem reference to get the pointer from
+ *
+ * Unsafe version of netmem_get_pp(). When @netmem is always page-backed,
+ * e.g. when it's a header buffer, performs faster and generates smaller
+ * object code (avoids clearing the LSB). When @netmem points to IOV,
+ * provokes invalid memory access.
+ *
+ * Return: pointer to the &page_pool (garbage if @netmem is not page-backed).
+ */
+static inline struct page_pool *__netmem_get_pp(netmem_ref netmem)
+{
+ return __netmem_to_page(netmem)->pp;
+}
+
+static inline struct page_pool *netmem_get_pp(netmem_ref netmem)
+{
+ return __netmem_clear_lsb(netmem)->pp;
+}
+
+static inline atomic_long_t *netmem_get_pp_ref_count_ref(netmem_ref netmem)
+{
+ return &__netmem_clear_lsb(netmem)->pp_ref_count;
+}
+
+static inline bool netmem_is_pref_nid(netmem_ref netmem, int pref_nid)
+{
+ /* NUMA node preference only makes sense if we're allocating
+ * system memory. Memory providers (which give us net_iovs)
+ * choose for us.
+ */
+ if (netmem_is_net_iov(netmem))
+ return true;
+
+ return page_to_nid(netmem_to_page(netmem)) == pref_nid;
+}
+
+static inline netmem_ref netmem_compound_head(netmem_ref netmem)
+{
+ /* niov are never compounded */
+ if (netmem_is_net_iov(netmem))
+ return netmem;
+
+ return page_to_netmem(compound_head(netmem_to_page(netmem)));
+}
+
+/**
+ * __netmem_address - unsafely get pointer to the memory backing @netmem
+ * @netmem: netmem reference to get the pointer for
+ *
+ * Unsafe version of netmem_address(). When @netmem is always page-backed,
+ * e.g. when it's a header buffer, performs faster and generates smaller
+ * object code (no check for the LSB). When @netmem points to IOV, provokes
+ * undefined behaviour.
+ *
+ * Return: pointer to the memory (garbage if @netmem is not page-backed).
+ */
+static inline void *__netmem_address(netmem_ref netmem)
+{
+ return page_address(__netmem_to_page(netmem));
+}
+
+static inline void *netmem_address(netmem_ref netmem)
+{
+ if (netmem_is_net_iov(netmem))
+ return NULL;
+
+ return __netmem_address(netmem);
+}
+
+/**
+ * netmem_is_pfmemalloc - check if @netmem was allocated under memory pressure
+ * @netmem: netmem reference to check
+ *
+ * Return: true if @netmem is page-backed and the page was allocated under
+ * memory pressure, false otherwise.
+ */
+static inline bool netmem_is_pfmemalloc(netmem_ref netmem)
+{
+ if (netmem_is_net_iov(netmem))
+ return false;
+
+ return page_is_pfmemalloc(netmem_to_page(netmem));
+}
+
+static inline unsigned long netmem_get_dma_addr(netmem_ref netmem)
+{
+ return __netmem_clear_lsb(netmem)->dma_addr;
+}
+
+void get_netmem(netmem_ref netmem);
+void put_netmem(netmem_ref netmem);
+
+#define netmem_dma_unmap_addr_set(NETMEM, PTR, ADDR_NAME, VAL) \
+ do { \
+ if (!netmem_is_net_iov(NETMEM)) \
+ dma_unmap_addr_set(PTR, ADDR_NAME, VAL); \
+ else \
+ dma_unmap_addr_set(PTR, ADDR_NAME, 0); \
+ } while (0)
+
+static inline void netmem_dma_unmap_page_attrs(struct device *dev,
+ dma_addr_t addr, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ if (!addr)
+ return;
+
+ dma_unmap_page_attrs(dev, addr, size, dir, attrs);
+}
+
#endif /* _NET_NETMEM_H */
diff --git a/include/net/netns/core.h b/include/net/netns/core.h
index 78214f1b43a2..9b36f0ff0c20 100644
--- a/include/net/netns/core.h
+++ b/include/net/netns/core.h
@@ -15,6 +15,7 @@ struct netns_core {
int sysctl_somaxconn;
int sysctl_optmem_max;
u8 sysctl_txrehash;
+ u8 sysctl_tstamp_allow_data;
#ifdef CONFIG_PROC_FS
struct prot_inuse __percpu *prot_inuse;
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index c356c458b340..6373e3f17da8 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -40,6 +40,18 @@ struct inet_timewait_death_row {
struct tcp_fastopen_context;
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+struct sysctl_fib_multipath_hash_seed {
+ u32 user_seed;
+ u32 mp_seed;
+};
+#endif
+
+struct udp_tunnel_gro {
+ struct sock __rcu *sk;
+ struct hlist_head list;
+};
+
struct netns_ipv4 {
/* Cacheline organization can be found documented in
* Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst.
@@ -69,6 +81,8 @@ struct netns_ipv4 {
__cacheline_group_begin(netns_ipv4_read_rx);
u8 sysctl_ip_early_demux;
u8 sysctl_tcp_early_demux;
+ u8 sysctl_tcp_l3mdev_accept;
+ /* 3 bytes hole, try to pack */
int sysctl_tcp_reordering;
int sysctl_tcp_rmem[3];
__cacheline_group_end(netns_ipv4_read_rx);
@@ -76,6 +90,11 @@ struct netns_ipv4 {
struct inet_timewait_death_row tcp_death_row;
struct udp_table *udp_table;
+#if IS_ENABLED(CONFIG_NET_UDP_TUNNEL)
+ /* Not in a pernet subsys because need to be available at GRO stage */
+ struct udp_tunnel_gro udp_tunnel_gro[2];
+#endif
+
#ifdef CONFIG_SYSCTL
struct ctl_table_header *forw_hdr;
struct ctl_table_header *frags_hdr;
@@ -102,6 +121,9 @@ struct netns_ipv4 {
#endif
struct hlist_head *fib_table_hash;
struct sock *fibnl;
+ struct hlist_head *fib_info_hash;
+ unsigned int fib_info_hash_bits;
+ unsigned int fib_info_cnt;
struct sock *mc_autojoin_sk;
@@ -115,7 +137,10 @@ struct netns_ipv4 {
u8 sysctl_icmp_errors_use_inbound_ifaddr;
int sysctl_icmp_ratelimit;
int sysctl_icmp_ratemask;
-
+ int sysctl_icmp_msgs_per_sec;
+ int sysctl_icmp_msgs_burst;
+ atomic_t icmp_global_credit;
+ u32 icmp_global_stamp;
u32 ip_rt_min_pmtu;
int ip_rt_mtu_expires;
int ip_rt_min_advmss;
@@ -141,9 +166,6 @@ struct netns_ipv4 {
u8 sysctl_fwmark_reflect;
u8 sysctl_tcp_fwmark_accept;
-#ifdef CONFIG_NET_L3_MASTER_DEV
- u8 sysctl_tcp_l3mdev_accept;
-#endif
u8 sysctl_tcp_mtu_probing;
int sysctl_tcp_mtu_probe_floor;
int sysctl_tcp_base_mss;
@@ -166,10 +188,13 @@ struct netns_ipv4 {
u8 sysctl_tcp_retries2;
u8 sysctl_tcp_orphan_retries;
u8 sysctl_tcp_tw_reuse;
+ unsigned int sysctl_tcp_tw_reuse_delay;
int sysctl_tcp_fin_timeout;
u8 sysctl_tcp_sack;
u8 sysctl_tcp_window_scaling;
u8 sysctl_tcp_timestamps;
+ int sysctl_tcp_rto_min_us;
+ int sysctl_tcp_rto_max_ms;
u8 sysctl_tcp_recovery;
u8 sysctl_tcp_thin_linear_timeouts;
u8 sysctl_tcp_slow_start_after_idle;
@@ -245,18 +270,22 @@ struct netns_ipv4 {
#endif
#endif
#ifdef CONFIG_IP_ROUTE_MULTIPATH
+ struct sysctl_fib_multipath_hash_seed sysctl_fib_multipath_hash_seed;
u32 sysctl_fib_multipath_hash_fields;
u8 sysctl_fib_multipath_use_neigh;
u8 sysctl_fib_multipath_hash_policy;
#endif
struct fib_notifier_ops *notifier_ops;
- unsigned int fib_seq; /* protected by rtnl_mutex */
+ unsigned int fib_seq; /* writes protected by rtnl_mutex */
struct fib_notifier_ops *ipmr_notifier_ops;
unsigned int ipmr_seq; /* protected by rtnl_mutex */
atomic_t rt_genid;
siphash_key_t ip_id_key;
+ struct hlist_head *inet_addr_lst;
+ struct delayed_work addr_chk_work;
};
+
#endif
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 5f2cfd84570a..47dc70d8100a 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -72,6 +72,7 @@ struct netns_ipv6 {
struct rt6_statistics *rt6_stats;
struct timer_list ip6_fib_timer;
struct hlist_head *fib_table_hash;
+ spinlock_t fib_table_hash_lock;
struct fib6_table *fib6_main_tbl;
struct list_head fib6_walkers;
rwlock_t fib6_walker_lock;
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 02bbdc577f8e..a6a0bf4a247e 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -15,6 +15,9 @@ struct netns_nf {
const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO];
#ifdef CONFIG_SYSCTL
struct ctl_table_header *nf_log_dir_header;
+#ifdef CONFIG_LWTUNNEL
+ struct ctl_table_header *nf_lwtnl_dir_header;
+#endif
#endif
struct nf_hook_entries __rcu *hooks_ipv4[NF_INET_NUMHOOKS];
struct nf_hook_entries __rcu *hooks_ipv6[NF_INET_NUMHOOKS];
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
index 7eff3d981b89..d25cd7a9c5ff 100644
--- a/include/net/netns/sctp.h
+++ b/include/net/netns/sctp.h
@@ -125,14 +125,14 @@ struct netns_sctp {
int pf_expose;
/*
- * Policy for preforming sctp/socket accounting
+ * Policy for performing sctp/socket accounting
* 0 - do socket level accounting, all assocs share sk_sndbuf
* 1 - do sctp accounting, each asoc may use sk_sndbuf bytes
*/
int sndbuf_policy;
/*
- * Policy for preforming sctp/socket accounting
+ * Policy for performing sctp/socket accounting
* 0 - do socket level accounting, all assocs share sk_rcvbuf
* 1 - do sctp accounting, each asoc may use sk_rcvbuf bytes
*/
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 423b52eca908..23dd647fe024 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -43,6 +43,7 @@ struct netns_xfrm {
struct hlist_head __rcu *state_bysrc;
struct hlist_head __rcu *state_byspi;
struct hlist_head __rcu *state_byseq;
+ struct hlist_head __percpu *state_cache_input;
unsigned int state_hmask;
unsigned int state_num;
struct work_struct state_hash_work;
@@ -51,7 +52,6 @@ struct netns_xfrm {
struct hlist_head *policy_byidx;
unsigned int policy_idx_hmask;
unsigned int idx_generator;
- struct hlist_head policy_inexact[XFRM_POLICY_MAX];
struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX];
unsigned int policy_count[XFRM_POLICY_MAX * 2];
struct work_struct policy_hash_work;
@@ -83,6 +83,7 @@ struct netns_xfrm {
spinlock_t xfrm_policy_lock;
struct mutex xfrm_cfg_mutex;
+ struct delayed_work nat_keepalive_work;
};
#endif
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 7ca315ad500e..572e69cda476 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -105,7 +105,7 @@ struct nh_grp_entry_stats {
struct nh_grp_entry {
struct nexthop *nh;
struct nh_grp_entry_stats __percpu *stats;
- u8 weight;
+ u16 weight;
union {
struct {
@@ -152,6 +152,8 @@ struct nexthop {
u8 protocol; /* app managing this nh */
u8 nh_flags;
bool is_group;
+ bool dead;
+ spinlock_t lock; /* protect dead and f6i_list */
refcount_t refcnt;
struct rcu_head rcu;
@@ -192,7 +194,7 @@ struct nh_notifier_single_info {
};
struct nh_notifier_grp_entry_info {
- u8 weight;
+ u16 weight;
struct nh_notifier_single_info nh;
};
@@ -267,7 +269,7 @@ static inline bool nexthop_get(struct nexthop *nh)
static inline void nexthop_put(struct nexthop *nh)
{
if (refcount_dec_and_test(&nh->refcnt))
- call_rcu(&nh->rcu, nexthop_free_rcu);
+ call_rcu_hurry(&nh->rcu, nexthop_free_rcu);
}
static inline bool nexthop_cmp(const struct nexthop *nh1,
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index e82f55f543bb..09efcaed7c3f 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -332,7 +332,7 @@ struct nci_core_init_rsp_1 {
__le32 nfcc_features;
__u8 num_supported_rf_interfaces;
__u8 supported_rf_interfaces[]; /* variable size array */
- /* continuted in nci_core_init_rsp_2 */
+ /* continued in nci_core_init_rsp_2 */
} __packed;
struct nci_core_init_rsp_2 {
@@ -475,7 +475,7 @@ struct nci_rf_discover_ntf {
#define NCI_OP_RF_INTF_ACTIVATED_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x05)
struct activation_params_nfca_poll_iso_dep {
__u8 rats_res_len;
- __u8 rats_res[20];
+ __u8 rats_res[NFC_ATS_MAXSIZE];
};
struct activation_params_nfcb_poll_iso_dep {
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index ea8595651c38..e180bdf2f82b 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -265,6 +265,10 @@ struct nci_dev {
/* stored during intf_activated_ntf */
__u8 remote_gb[NFC_MAX_GT_LEN];
__u8 remote_gb_len;
+
+ /* stored during intf_activated_ntf */
+ __u8 target_ats[NFC_ATS_MAXSIZE];
+ __u8 target_ats_len;
};
/* ----- NCI Devices ----- */
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 3d07abacf08b..127e6c7d910d 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -80,12 +80,14 @@ struct nfc_ops {
#define NFC_ATR_REQ_GT_OFFSET 14
/**
- * struct nfc_target - NFC target descriptiom
+ * struct nfc_target - NFC target description
*
* @sens_res: 2 bytes describing the target SENS_RES response, if the target
* is a type A one. The %sens_res most significant byte must be byte 2
* as described by the NFC Forum digital specification (i.e. the platform
* configuration one) while %sens_res least significant byte is byte 1.
+ * @ats_len: length of Answer To Select in bytes
+ * @ats: Answer To Select returned by an ISO 14443 Type A target upon activation
*/
struct nfc_target {
u32 idx;
@@ -105,6 +107,8 @@ struct nfc_target {
u8 is_iso15693;
u8 iso15693_dsfid;
u8 iso15693_uid[NFC_ISO15693_UID_MAXSIZE];
+ u8 ats_len;
+ u8 ats[NFC_ATS_MAXSIZE];
};
/**
@@ -230,10 +234,10 @@ static inline void nfc_set_parent_dev(struct nfc_dev *nfc_dev,
}
/**
- * nfc_set_drvdata - set driver specifc data
+ * nfc_set_drvdata - set driver specific data
*
* @dev: The nfc device
- * @data: Pointer to driver specifc data
+ * @data: Pointer to driver specific data
*/
static inline void nfc_set_drvdata(struct nfc_dev *dev, void *data)
{
@@ -241,7 +245,7 @@ static inline void nfc_set_drvdata(struct nfc_dev *dev, void *data)
}
/**
- * nfc_get_drvdata - get driver specifc data
+ * nfc_get_drvdata - get driver specific data
*
* @dev: The nfc device
*/
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index 4c752f799957..a994dea74596 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -192,7 +192,7 @@ enum nl802154_iftype {
* @NL802154_CAP_ATTR_TX_POWERS: a nested attribute for
* nl802154_wpan_phy_tx_power
* @NL802154_CAP_ATTR_MIN_CCA_ED_LEVEL: minimum value for cca_ed_level
- * @NL802154_CAP_ATTR_MAX_CCA_ED_LEVEL: maxmimum value for cca_ed_level
+ * @NL802154_CAP_ATTR_MAX_CCA_ED_LEVEL: maximum value for cca_ed_level
* @NL802154_CAP_ATTR_CCA_MODES: nl802154_cca_modes flags
* @NL802154_CAP_ATTR_CCA_OPTS: nl802154_cca_opts flags
* @NL802154_CAP_ATTR_MIN_MINBE: minimum of minbe value
diff --git a/include/net/p8022.h b/include/net/p8022.h
deleted file mode 100644
index a29e224ac498..000000000000
--- a/include/net/p8022.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NET_P8022_H
-#define _NET_P8022_H
-
-struct net_device;
-struct packet_type;
-struct sk_buff;
-
-struct datalink_proto *
-register_8022_client(unsigned char type,
- int (*func)(struct sk_buff *skb,
- struct net_device *dev,
- struct packet_type *pt,
- struct net_device *orig_dev));
-void unregister_8022_client(struct datalink_proto *proto);
-#endif
diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h
index 1d397c1a0043..93f2c31baf9b 100644
--- a/include/net/page_pool/helpers.h
+++ b/include/net/page_pool/helpers.h
@@ -52,13 +52,17 @@
#ifndef _NET_PAGE_POOL_HELPERS_H
#define _NET_PAGE_POOL_HELPERS_H
+#include <linux/dma-mapping.h>
+
#include <net/page_pool/types.h>
+#include <net/net_debug.h>
+#include <net/netmem.h>
#ifdef CONFIG_PAGE_POOL_STATS
/* Deprecated driver-facing API, use netlink instead */
int page_pool_ethtool_stats_get_count(void);
u8 *page_pool_ethtool_stats_get_strings(u8 *data);
-u64 *page_pool_ethtool_stats_get(u64 *data, void *stats);
+u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats);
bool page_pool_get_stats(const struct page_pool *pool,
struct page_pool_stats *stats);
@@ -73,7 +77,7 @@ static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data)
return data;
}
-static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
+static inline u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats)
{
return data;
}
@@ -100,8 +104,7 @@ static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
*
* Get a page fragment from the page allocator or page_pool caches.
*
- * Return:
- * Return allocated page fragment, otherwise return NULL.
+ * Return: allocated page fragment, otherwise return NULL.
*/
static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
unsigned int *offset,
@@ -112,22 +115,22 @@ static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
return page_pool_alloc_frag(pool, offset, size, gfp);
}
-static inline struct page *page_pool_alloc(struct page_pool *pool,
- unsigned int *offset,
- unsigned int *size, gfp_t gfp)
+static inline netmem_ref page_pool_alloc_netmem(struct page_pool *pool,
+ unsigned int *offset,
+ unsigned int *size, gfp_t gfp)
{
unsigned int max_size = PAGE_SIZE << pool->p.order;
- struct page *page;
+ netmem_ref netmem;
if ((*size << 1) > max_size) {
*size = max_size;
*offset = 0;
- return page_pool_alloc_pages(pool, gfp);
+ return page_pool_alloc_netmems(pool, gfp);
}
- page = page_pool_alloc_frag(pool, offset, *size, gfp);
- if (unlikely(!page))
- return NULL;
+ netmem = page_pool_alloc_frag_netmem(pool, offset, *size, gfp);
+ if (unlikely(!netmem))
+ return 0;
/* There is very likely not enough space for another fragment, so append
* the remaining size to the current fragment to avoid truesize
@@ -138,7 +141,23 @@ static inline struct page *page_pool_alloc(struct page_pool *pool,
pool->frag_offset = max_size;
}
- return page;
+ return netmem;
+}
+
+static inline netmem_ref page_pool_dev_alloc_netmem(struct page_pool *pool,
+ unsigned int *offset,
+ unsigned int *size)
+{
+ gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
+
+ return page_pool_alloc_netmem(pool, offset, size, gfp);
+}
+
+static inline struct page *page_pool_alloc(struct page_pool *pool,
+ unsigned int *offset,
+ unsigned int *size, gfp_t gfp)
+{
+ return netmem_to_page(page_pool_alloc_netmem(pool, offset, size, gfp));
}
/**
@@ -151,8 +170,7 @@ static inline struct page *page_pool_alloc(struct page_pool *pool,
* depending on the requested size in order to allocate memory with least memory
* utilization and performance penalty.
*
- * Return:
- * Return allocated page or page fragment, otherwise return NULL.
+ * Return: allocated page or page fragment, otherwise return NULL.
*/
static inline struct page *page_pool_dev_alloc(struct page_pool *pool,
unsigned int *offset,
@@ -186,8 +204,7 @@ static inline void *page_pool_alloc_va(struct page_pool *pool,
* This is just a thin wrapper around the page_pool_alloc() API, and
* it returns va of the allocated page or page fragment.
*
- * Return:
- * Return the va for the allocated page or page fragment, otherwise return NULL.
+ * Return: the va for the allocated page or page fragment, otherwise return NULL.
*/
static inline void *page_pool_dev_alloc_va(struct page_pool *pool,
unsigned int *size)
@@ -204,12 +221,17 @@ static inline void *page_pool_dev_alloc_va(struct page_pool *pool,
* Get the stored dma direction. A driver might decide to store this locally
* and avoid the extra cache line from page_pool to determine the direction.
*/
-static
-inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
+static inline enum dma_data_direction
+page_pool_get_dma_dir(const struct page_pool *pool)
{
return pool->p.dma_dir;
}
+static inline void page_pool_fragment_netmem(netmem_ref netmem, long nr)
+{
+ atomic_long_set(netmem_get_pp_ref_count_ref(netmem), nr);
+}
+
/**
* page_pool_fragment_page() - split a fresh page into fragments
* @page: page to split
@@ -230,11 +252,12 @@ inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
*/
static inline void page_pool_fragment_page(struct page *page, long nr)
{
- atomic_long_set(&page->pp_ref_count, nr);
+ page_pool_fragment_netmem(page_to_netmem(page), nr);
}
-static inline long page_pool_unref_page(struct page *page, long nr)
+static inline long page_pool_unref_netmem(netmem_ref netmem, long nr)
{
+ atomic_long_t *pp_ref_count = netmem_get_pp_ref_count_ref(netmem);
long ret;
/* If nr == pp_ref_count then we have cleared all remaining
@@ -251,19 +274,19 @@ static inline long page_pool_unref_page(struct page *page, long nr)
* initially, and only overwrite it when the page is partitioned into
* more than one piece.
*/
- if (atomic_long_read(&page->pp_ref_count) == nr) {
+ if (atomic_long_read(pp_ref_count) == nr) {
/* As we have ensured nr is always one for constant case using
* the BUILD_BUG_ON(), only need to handle the non-constant case
* here for pp_ref_count draining, which is a rare case.
*/
BUILD_BUG_ON(__builtin_constant_p(nr) && nr != 1);
if (!__builtin_constant_p(nr))
- atomic_long_set(&page->pp_ref_count, 1);
+ atomic_long_set(pp_ref_count, 1);
return 0;
}
- ret = atomic_long_sub_return(nr, &page->pp_ref_count);
+ ret = atomic_long_sub_return(nr, pp_ref_count);
WARN_ON(ret < 0);
/* We are the last user here too, reset pp_ref_count back to 1 to
@@ -272,20 +295,46 @@ static inline long page_pool_unref_page(struct page *page, long nr)
* page_pool_unref_page() currently.
*/
if (unlikely(!ret))
- atomic_long_set(&page->pp_ref_count, 1);
+ atomic_long_set(pp_ref_count, 1);
return ret;
}
+static inline long page_pool_unref_page(struct page *page, long nr)
+{
+ return page_pool_unref_netmem(page_to_netmem(page), nr);
+}
+
+static inline void page_pool_ref_netmem(netmem_ref netmem)
+{
+ atomic_long_inc(netmem_get_pp_ref_count_ref(netmem));
+}
+
static inline void page_pool_ref_page(struct page *page)
{
- atomic_long_inc(&page->pp_ref_count);
+ page_pool_ref_netmem(page_to_netmem(page));
}
-static inline bool page_pool_is_last_ref(struct page *page)
+static inline bool page_pool_unref_and_test(netmem_ref netmem)
{
/* If page_pool_unref_page() returns 0, we were the last user */
- return page_pool_unref_page(page, 1) == 0;
+ return page_pool_unref_netmem(netmem, 1) == 0;
+}
+
+static inline void page_pool_put_netmem(struct page_pool *pool,
+ netmem_ref netmem,
+ unsigned int dma_sync_size,
+ bool allow_direct)
+{
+ /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
+ * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
+ */
+#ifdef CONFIG_PAGE_POOL
+ if (!page_pool_unref_and_test(netmem))
+ return;
+
+ page_pool_put_unrefed_netmem(pool, netmem, dma_sync_size, allow_direct);
+#endif
}
/**
@@ -306,15 +355,15 @@ static inline void page_pool_put_page(struct page_pool *pool,
unsigned int dma_sync_size,
bool allow_direct)
{
- /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
- * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
- */
-#ifdef CONFIG_PAGE_POOL
- if (!page_pool_is_last_ref(page))
- return;
+ page_pool_put_netmem(pool, page_to_netmem(page), dma_sync_size,
+ allow_direct);
+}
- page_pool_put_unrefed_page(pool, page, dma_sync_size, allow_direct);
-#endif
+static inline void page_pool_put_full_netmem(struct page_pool *pool,
+ netmem_ref netmem,
+ bool allow_direct)
+{
+ page_pool_put_netmem(pool, netmem, -1, allow_direct);
}
/**
@@ -329,7 +378,7 @@ static inline void page_pool_put_page(struct page_pool *pool,
static inline void page_pool_put_full_page(struct page_pool *pool,
struct page *page, bool allow_direct)
{
- page_pool_put_page(pool, page, -1, allow_direct);
+ page_pool_put_netmem(pool, page_to_netmem(page), -1, allow_direct);
}
/**
@@ -346,6 +395,12 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
page_pool_put_full_page(pool, page, true);
}
+static inline void page_pool_recycle_direct_netmem(struct page_pool *pool,
+ netmem_ref netmem)
+{
+ page_pool_put_full_netmem(pool, netmem, true);
+}
+
#define PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA \
(sizeof(dma_addr_t) > sizeof(unsigned long))
@@ -363,6 +418,16 @@ static inline void page_pool_free_va(struct page_pool *pool, void *va,
page_pool_put_page(pool, virt_to_head_page(va), -1, allow_direct);
}
+static inline dma_addr_t page_pool_get_dma_addr_netmem(netmem_ref netmem)
+{
+ dma_addr_t ret = netmem_get_dma_addr(netmem);
+
+ if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA)
+ ret <<= PAGE_SHIFT;
+
+ return ret;
+}
+
/**
* page_pool_get_dma_addr() - Retrieve the stored DMA address.
* @page: page allocated from a page pool
@@ -370,7 +435,7 @@ static inline void page_pool_free_va(struct page_pool *pool, void *va,
* Fetch the DMA address of the page. The page pool to which the page belongs
* must had been created with PP_FLAG_DMA_MAP.
*/
-static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
+static inline dma_addr_t page_pool_get_dma_addr(const struct page *page)
{
dma_addr_t ret = page->dma_addr;
@@ -380,19 +445,46 @@ static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
return ret;
}
-static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
+static inline void __page_pool_dma_sync_for_cpu(const struct page_pool *pool,
+ const dma_addr_t dma_addr,
+ u32 offset, u32 dma_sync_size)
{
- if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) {
- page->dma_addr = addr >> PAGE_SHIFT;
+ dma_sync_single_range_for_cpu(pool->p.dev, dma_addr,
+ offset + pool->p.offset, dma_sync_size,
+ page_pool_get_dma_dir(pool));
+}
- /* We assume page alignment to shave off bottom bits,
- * if this "compression" doesn't work we need to drop.
- */
- return addr != (dma_addr_t)page->dma_addr << PAGE_SHIFT;
- }
+/**
+ * page_pool_dma_sync_for_cpu - sync Rx page for CPU after it's written by HW
+ * @pool: &page_pool the @page belongs to
+ * @page: page to sync
+ * @offset: offset from page start to "hard" start if using PP frags
+ * @dma_sync_size: size of the data written to the page
+ *
+ * Can be used as a shorthand to sync Rx pages before accessing them in the
+ * driver. Caller must ensure the pool was created with ``PP_FLAG_DMA_MAP``.
+ * Note that this version performs DMA sync unconditionally, even if the
+ * associated PP doesn't perform sync-for-device.
+ */
+static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool,
+ const struct page *page,
+ u32 offset, u32 dma_sync_size)
+{
+ __page_pool_dma_sync_for_cpu(pool, page_pool_get_dma_addr(page), offset,
+ dma_sync_size);
+}
- page->dma_addr = addr;
- return false;
+static inline void
+page_pool_dma_sync_netmem_for_cpu(const struct page_pool *pool,
+ const netmem_ref netmem, u32 offset,
+ u32 dma_sync_size)
+{
+ if (!pool->dma_sync_for_cpu)
+ return;
+
+ __page_pool_dma_sync_for_cpu(pool,
+ page_pool_get_dma_addr_netmem(netmem),
+ offset, dma_sync_size);
}
static inline bool page_pool_put(struct page_pool *pool)
@@ -406,4 +498,9 @@ static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
page_pool_update_nid(pool, new_nid);
}
+static inline bool page_pool_is_unreadable(struct page_pool *pool)
+{
+ return !!pool->mp_ops;
+}
+
#endif /* _NET_PAGE_POOL_HELPERS_H */
diff --git a/include/net/page_pool/memory_provider.h b/include/net/page_pool/memory_provider.h
new file mode 100644
index 000000000000..ada4f968960a
--- /dev/null
+++ b/include/net/page_pool/memory_provider.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_PAGE_POOL_MEMORY_PROVIDER_H
+#define _NET_PAGE_POOL_MEMORY_PROVIDER_H
+
+#include <net/netmem.h>
+#include <net/page_pool/types.h>
+
+struct netdev_rx_queue;
+struct netlink_ext_ack;
+struct sk_buff;
+
+struct memory_provider_ops {
+ netmem_ref (*alloc_netmems)(struct page_pool *pool, gfp_t gfp);
+ bool (*release_netmem)(struct page_pool *pool, netmem_ref netmem);
+ int (*init)(struct page_pool *pool);
+ void (*destroy)(struct page_pool *pool);
+ int (*nl_fill)(void *mp_priv, struct sk_buff *rsp,
+ struct netdev_rx_queue *rxq);
+ void (*uninstall)(void *mp_priv, struct netdev_rx_queue *rxq);
+};
+
+bool net_mp_niov_set_dma_addr(struct net_iov *niov, dma_addr_t addr);
+void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov);
+void net_mp_niov_clear_page_pool(struct net_iov *niov);
+
+int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
+ struct pp_memory_provider_params *p);
+int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
+ const struct pp_memory_provider_params *p,
+ struct netlink_ext_ack *extack);
+void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
+ struct pp_memory_provider_params *old_p);
+void __net_mp_close_rxq(struct net_device *dev, unsigned int rxq_idx,
+ const struct pp_memory_provider_params *old_p);
+
+/**
+ * net_mp_netmem_place_in_cache() - give a netmem to a page pool
+ * @pool: the page pool to place the netmem into
+ * @netmem: netmem to give
+ *
+ * Push an accounted netmem into the page pool's allocation cache. The caller
+ * must ensure that there is space in the cache. It should only be called off
+ * the mp_ops->alloc_netmems() path.
+ */
+static inline void net_mp_netmem_place_in_cache(struct page_pool *pool,
+ netmem_ref netmem)
+{
+ pool->alloc.cache[pool->alloc.count++] = netmem;
+}
+
+#endif
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index 5e43a08d3231..431b593de709 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -6,6 +6,8 @@
#include <linux/dma-direction.h>
#include <linux/ptr_ring.h>
#include <linux/types.h>
+#include <linux/xarray.h>
+#include <net/netmem.h>
#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
* map/unmap
@@ -19,8 +21,21 @@
* device driver responsibility
*/
#define PP_FLAG_SYSTEM_POOL BIT(2) /* Global system page_pool */
+
+/* Allow unreadable (net_iov backed) netmem in this page_pool. Drivers setting
+ * this must be able to support unreadable netmem, where netmem_address() would
+ * return NULL. This flag should not be set for header page_pools.
+ *
+ * If the driver sets PP_FLAG_ALLOW_UNREADABLE_NETMEM, it should also set
+ * page_pool_params.slow.queue_idx.
+ */
+#define PP_FLAG_ALLOW_UNREADABLE_NETMEM BIT(3)
+
#define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | \
- PP_FLAG_SYSTEM_POOL)
+ PP_FLAG_SYSTEM_POOL | PP_FLAG_ALLOW_UNREADABLE_NETMEM)
+
+/* Index limit to stay within PP_DMA_INDEX_BITS for DMA indices */
+#define PP_DMA_INDEX_LIMIT XA_LIMIT(1, BIT(PP_DMA_INDEX_BITS) - 1)
/*
* Fast allocation side cache array/stack
@@ -40,25 +55,28 @@
#define PP_ALLOC_CACHE_REFILL 64
struct pp_alloc_cache {
u32 count;
- struct page *cache[PP_ALLOC_CACHE_SIZE];
+ netmem_ref cache[PP_ALLOC_CACHE_SIZE];
};
/**
* struct page_pool_params - page pool parameters
- * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV
+ * @fast: params accessed frequently on hotpath
* @order: 2^order pages on allocation
* @pool_size: size of the ptr_ring
* @nid: NUMA node id to allocate from pages from
* @dev: device, for DMA pre-mapping purposes
- * @netdev: netdev this pool will serve (leave as NULL if none or multiple)
* @napi: NAPI which is the sole consumer of pages, otherwise NULL
* @dma_dir: DMA mapping direction
* @max_len: max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV
* @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
+ * @slow: params with slowpath access only (initialization and Netlink)
+ * @netdev: netdev this pool will serve (leave as NULL if none or multiple)
+ * @queue_idx: queue idx this page_pool is being created for.
+ * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_SYSTEM_POOL,
+ * PP_FLAG_ALLOW_UNREADABLE_NETMEM.
*/
struct page_pool_params {
struct_group_tagged(page_pool_params_fast, fast,
- unsigned int flags;
unsigned int order;
unsigned int pool_size;
int nid;
@@ -70,8 +88,10 @@ struct page_pool_params {
);
struct_group_tagged(page_pool_params_slow, slow,
struct net_device *netdev;
+ unsigned int queue_idx;
+ unsigned int flags;
/* private: used by test code only */
- void (*init_callback)(struct page *page, void *arg);
+ void (*init_callback)(netmem_ref netmem, void *arg);
void *init_arg;
);
};
@@ -126,16 +146,42 @@ struct page_pool_stats {
};
#endif
+/* The whole frag API block must stay within one cacheline. On 32-bit systems,
+ * sizeof(long) == sizeof(int), so that the block size is ``3 * sizeof(long)``.
+ * On 64-bit systems, the actual size is ``2 * sizeof(long) + sizeof(int)``.
+ * The closest pow-2 to both of them is ``4 * sizeof(long)``, so just use that
+ * one for simplicity.
+ * Having it aligned to a cacheline boundary may be excessive and doesn't bring
+ * any good.
+ */
+#define PAGE_POOL_FRAG_GROUP_ALIGN (4 * sizeof(long))
+
+struct memory_provider_ops;
+
+struct pp_memory_provider_params {
+ void *mp_priv;
+ const struct memory_provider_ops *mp_ops;
+};
+
struct page_pool {
struct page_pool_params_fast p;
int cpuid;
- bool has_init_callback;
+ u32 pages_state_hold_cnt;
+
+ bool has_init_callback:1; /* slow::init_callback is set */
+ bool dma_map:1; /* Perform DMA mapping */
+ bool dma_sync:1; /* Perform DMA sync for device */
+ bool dma_sync_for_cpu:1; /* Perform DMA sync for cpu */
+#ifdef CONFIG_PAGE_POOL_STATS
+ bool system:1; /* This is a global percpu pool */
+#endif
+ __cacheline_group_begin_aligned(frag, PAGE_POOL_FRAG_GROUP_ALIGN);
long frag_users;
- struct page *frag_page;
+ netmem_ref frag_page;
unsigned int frag_offset;
- u32 pages_state_hold_cnt;
+ __cacheline_group_end_aligned(frag, PAGE_POOL_FRAG_GROUP_ALIGN);
struct delayed_work release_dw;
void (*disconnect)(void *pool);
@@ -176,6 +222,11 @@ struct page_pool {
*/
struct ptr_ring ring;
+ void *mp_priv;
+ const struct memory_provider_ops *mp_ops;
+
+ struct xarray dma_mapped;
+
#ifdef CONFIG_PAGE_POOL_STATS
/* recycle stats are per-cpu to avoid locking */
struct page_pool_recycle_stats __percpu *recycle_stats;
@@ -196,14 +247,17 @@ struct page_pool {
struct {
struct hlist_node list;
u64 detach_time;
- u32 napi_id;
u32 id;
} user;
};
struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
+netmem_ref page_pool_alloc_netmems(struct page_pool *pool, gfp_t gfp);
struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
unsigned int size, gfp_t gfp);
+netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool,
+ unsigned int *offset, unsigned int size,
+ gfp_t gfp);
struct page_pool *page_pool_create(const struct page_pool_params *params);
struct page_pool *page_pool_create_percpu(const struct page_pool_params *params,
int cpuid);
@@ -211,11 +265,11 @@ struct page_pool *page_pool_create_percpu(const struct page_pool_params *params,
struct xdp_mem_info;
#ifdef CONFIG_PAGE_POOL
+void page_pool_disable_direct_recycling(struct page_pool *pool);
void page_pool_destroy(struct page_pool *pool);
void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
- struct xdp_mem_info *mem);
-void page_pool_put_page_bulk(struct page_pool *pool, void **data,
- int count);
+ const struct xdp_mem_info *mem);
+void page_pool_put_netmem_bulk(netmem_ref *data, u32 count);
#else
static inline void page_pool_destroy(struct page_pool *pool)
{
@@ -223,16 +277,18 @@ static inline void page_pool_destroy(struct page_pool *pool)
static inline void page_pool_use_xdp_mem(struct page_pool *pool,
void (*disconnect)(void *),
- struct xdp_mem_info *mem)
+ const struct xdp_mem_info *mem)
{
}
-static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
- int count)
+static inline void page_pool_put_netmem_bulk(netmem_ref *data, u32 count)
{
}
#endif
+void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
+ unsigned int dma_sync_size,
+ bool allow_direct);
void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page,
unsigned int dma_sync_size,
bool allow_direct);
diff --git a/include/net/pfcp.h b/include/net/pfcp.h
new file mode 100644
index 000000000000..af14f970b80e
--- /dev/null
+++ b/include/net/pfcp.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PFCP_H_
+#define _PFCP_H_
+
+#include <uapi/linux/if_ether.h>
+#include <net/dst_metadata.h>
+#include <linux/netdevice.h>
+#include <uapi/linux/ipv6.h>
+#include <net/udp_tunnel.h>
+#include <uapi/linux/udp.h>
+#include <uapi/linux/ip.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/bits.h>
+
+#define PFCP_PORT 8805
+
+/* PFCP protocol header */
+struct pfcphdr {
+ u8 flags;
+ u8 message_type;
+ __be16 message_length;
+};
+
+/* PFCP header flags */
+#define PFCP_SEID_FLAG BIT(0)
+#define PFCP_MP_FLAG BIT(1)
+
+#define PFCP_VERSION_MASK GENMASK(4, 0)
+
+#define PFCP_HLEN (sizeof(struct udphdr) + sizeof(struct pfcphdr))
+
+/* PFCP node related messages */
+struct pfcphdr_node {
+ u8 seq_number[3];
+ u8 reserved;
+};
+
+/* PFCP session related messages */
+struct pfcphdr_session {
+ __be64 seid;
+ u8 seq_number[3];
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ u8 message_priority:4,
+ reserved:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u8 reserved:4,
+ message_priprity:4;
+#else
+#error "Please fix <asm/byteorder>"
+#endif
+};
+
+struct pfcp_metadata {
+ u8 type;
+ __be64 seid;
+} __packed;
+
+enum {
+ PFCP_TYPE_NODE = 0,
+ PFCP_TYPE_SESSION = 1,
+};
+
+#define PFCP_HEADROOM (sizeof(struct iphdr) + sizeof(struct udphdr) + \
+ sizeof(struct pfcphdr) + sizeof(struct ethhdr))
+#define PFCP6_HEADROOM (sizeof(struct ipv6hdr) + sizeof(struct udphdr) + \
+ sizeof(struct pfcphdr) + sizeof(struct ethhdr))
+
+static inline struct pfcphdr *pfcp_hdr(struct sk_buff *skb)
+{
+ return (struct pfcphdr *)(udp_hdr(skb) + 1);
+}
+
+static inline struct pfcphdr_node *pfcp_hdr_node(struct sk_buff *skb)
+{
+ return (struct pfcphdr_node *)(pfcp_hdr(skb) + 1);
+}
+
+static inline struct pfcphdr_session *pfcp_hdr_session(struct sk_buff *skb)
+{
+ return (struct pfcphdr_session *)(pfcp_hdr(skb) + 1);
+}
+
+static inline bool netif_is_pfcp(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops &&
+ !strcmp(dev->rtnl_link_ops->kind, "pfcp");
+}
+
+#endif
diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h
index e9dc8dca5817..37a3e83531c6 100644
--- a/include/net/phonet/pn_dev.h
+++ b/include/net/phonet/pn_dev.h
@@ -11,13 +11,13 @@
#define PN_DEV_H
#include <linux/list.h>
-#include <linux/mutex.h>
+#include <linux/spinlock.h>
struct net;
struct phonet_device_list {
struct list_head list;
- struct mutex lock;
+ spinlock_t lock;
};
struct phonet_device_list *phonet_device_list(struct net *net);
@@ -38,11 +38,11 @@ int phonet_address_add(struct net_device *dev, u8 addr);
int phonet_address_del(struct net_device *dev, u8 addr);
u8 phonet_address_get(struct net_device *dev, u8 addr);
int phonet_address_lookup(struct net *net, u8 addr);
-void phonet_address_notify(int event, struct net_device *dev, u8 addr);
+void phonet_address_notify(struct net *net, int event, u32 ifindex, u8 addr);
int phonet_route_add(struct net_device *dev, u8 daddr);
int phonet_route_del(struct net_device *dev, u8 daddr);
-void rtm_phonet_notify(int event, struct net_device *dev, u8 dst);
+void rtm_phonet_notify(struct net *net, int event, u32 ifindex, u8 dst);
struct net_device *phonet_route_get_rcu(struct net *net, u8 daddr);
struct net_device *phonet_route_output(struct net *net, u8 daddr);
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index a4ee43f493bb..c64fd896b1f9 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -74,6 +74,15 @@ static inline bool tcf_block_non_null_shared(struct tcf_block *block)
return block && block->index;
}
+#ifdef CONFIG_NET_CLS_ACT
+DECLARE_STATIC_KEY_FALSE(tcf_sw_enabled_key);
+
+static inline bool tcf_block_bypass_sw(struct tcf_block *block)
+{
+ return block && !atomic_read(&block->useswcnt);
+}
+#endif
+
static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
{
WARN_ON(tcf_block_shared(block));
@@ -310,7 +319,7 @@ tcf_exts_hw_stats_update(const struct tcf_exts *exts,
* tcf_exts_has_actions - check if at least one action is present
* @exts: tc filter extensions handle
*
- * Returns true if at least one action is present.
+ * Returns: true if at least one action is present.
*/
static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
{
@@ -482,7 +491,7 @@ int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
struct tcf_pkt_info *);
/**
- * tcf_em_tree_match - evaulate an ematch tree
+ * tcf_em_tree_match - evaluate an ematch tree
*
* @skb: socket buffer of the packet in question
* @tree: ematch tree to be used for evaluation
@@ -492,7 +501,7 @@ int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
* through all ematches respecting their logic relations returning
* as soon as the result is obvious.
*
- * Returns 1 if the ematch tree as-one matches, no ematches are configured
+ * Returns: 1 if the ematch tree as-one matches, no ematches are configured
* or ematch is not enabled in the kernel, otherwise 0 is returned.
*/
static inline int tcf_em_tree_match(struct sk_buff *skb,
@@ -746,10 +755,20 @@ tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
cls_common->chain_index = tp->chain->index;
cls_common->protocol = tp->protocol;
cls_common->prio = tp->prio >> 16;
+ cls_common->skip_sw = tc_skip_sw(flags);
if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
cls_common->extack = extack;
}
+static inline void tcf_proto_update_usesw(struct tcf_proto *tp, u32 flags)
+{
+ if (tp->usesw)
+ return;
+ if (tc_skip_sw(flags) && tc_in_hw(flags))
+ return;
+ tp->usesw = true;
+}
+
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb)
{
diff --git a/include/net/proto_memory.h b/include/net/proto_memory.h
new file mode 100644
index 000000000000..a6ab2f4f5e28
--- /dev/null
+++ b/include/net/proto_memory.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _PROTO_MEMORY_H
+#define _PROTO_MEMORY_H
+
+#include <net/sock.h>
+#include <net/hotdata.h>
+
+/* 1 MB per cpu, in page units */
+#define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
+
+static inline bool sk_has_memory_pressure(const struct sock *sk)
+{
+ return sk->sk_prot->memory_pressure != NULL;
+}
+
+static inline bool
+proto_memory_pressure(const struct proto *prot)
+{
+ if (!prot->memory_pressure)
+ return false;
+ return !!READ_ONCE(*prot->memory_pressure);
+}
+
+static inline bool sk_under_global_memory_pressure(const struct sock *sk)
+{
+ return proto_memory_pressure(sk->sk_prot);
+}
+
+static inline bool sk_under_memory_pressure(const struct sock *sk)
+{
+ if (!sk->sk_prot->memory_pressure)
+ return false;
+
+ if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
+ mem_cgroup_under_socket_pressure(sk->sk_memcg))
+ return true;
+
+ return !!READ_ONCE(*sk->sk_prot->memory_pressure);
+}
+
+static inline long
+proto_memory_allocated(const struct proto *prot)
+{
+ return max(0L, atomic_long_read(prot->memory_allocated));
+}
+
+static inline long
+sk_memory_allocated(const struct sock *sk)
+{
+ return proto_memory_allocated(sk->sk_prot);
+}
+
+static inline void proto_memory_pcpu_drain(struct proto *proto)
+{
+ int val = this_cpu_xchg(*proto->per_cpu_fw_alloc, 0);
+
+ if (val)
+ atomic_long_add(val, proto->memory_allocated);
+}
+
+static inline void
+sk_memory_allocated_add(const struct sock *sk, int val)
+{
+ struct proto *proto = sk->sk_prot;
+
+ val = this_cpu_add_return(*proto->per_cpu_fw_alloc, val);
+
+ if (unlikely(val >= READ_ONCE(net_hotdata.sysctl_mem_pcpu_rsv)))
+ proto_memory_pcpu_drain(proto);
+}
+
+static inline void
+sk_memory_allocated_sub(const struct sock *sk, int val)
+{
+ struct proto *proto = sk->sk_prot;
+
+ val = this_cpu_sub_return(*proto->per_cpu_fw_alloc, val);
+
+ if (unlikely(val <= -READ_ONCE(net_hotdata.sysctl_mem_pcpu_rsv)))
+ proto_memory_pcpu_drain(proto);
+}
+
+#endif /* _PROTO_MEMORY_H */
diff --git a/include/net/psample.h b/include/net/psample.h
index 0509d2d6be67..5071b5fc2b59 100644
--- a/include/net/psample.h
+++ b/include/net/psample.h
@@ -24,7 +24,10 @@ struct psample_metadata {
u8 out_tc_valid:1,
out_tc_occ_valid:1,
latency_valid:1,
- unused:5;
+ rate_as_probability:1,
+ unused:4;
+ const u8 *user_cookie;
+ u32 user_cookie_len;
};
struct psample_group *psample_group_get(struct net *net, u32 group_num);
@@ -35,13 +38,15 @@ struct sk_buff;
#if IS_ENABLED(CONFIG_PSAMPLE)
-void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
- u32 sample_rate, const struct psample_metadata *md);
+void psample_sample_packet(struct psample_group *group,
+ const struct sk_buff *skb, u32 sample_rate,
+ const struct psample_metadata *md);
#else
static inline void psample_sample_packet(struct psample_group *group,
- struct sk_buff *skb, u32 sample_rate,
+ const struct sk_buff *skb,
+ u32 sample_rate,
const struct psample_metadata *md)
{
}
diff --git a/include/net/red.h b/include/net/red.h
index 425364de0df7..159a09359fc0 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -40,7 +40,7 @@
max_P should be small (not 1), usually 0.01..0.02 is good value.
max_P is chosen as a number, so that max_P/(th_max-th_min)
- is a negative power of two in order arithmetics to contain
+ is a negative power of two in order arithmetic to contain
only shifts.
@@ -159,7 +159,7 @@ static inline u32 red_maxp(u8 Plog)
static inline void red_set_vars(struct red_vars *v)
{
/* Reset average queue length, the value is strictly bound
- * to the parameters below, reseting hurts a bit but leaving
+ * to the parameters below, resetting hurts a bit but leaving
* it might result in an unreasonable qavg for a while. --TGR
*/
v->qavg = 0;
@@ -233,10 +233,10 @@ static inline void red_set_parms(struct red_parms *p,
int delta = qth_max - qth_min;
u32 max_p_delta;
- p->qth_min = qth_min << Wlog;
- p->qth_max = qth_max << Wlog;
- p->Wlog = Wlog;
- p->Plog = Plog;
+ WRITE_ONCE(p->qth_min, qth_min << Wlog);
+ WRITE_ONCE(p->qth_max, qth_max << Wlog);
+ WRITE_ONCE(p->Wlog, Wlog);
+ WRITE_ONCE(p->Plog, Plog);
if (delta <= 0)
delta = 1;
p->qth_delta = delta;
@@ -244,7 +244,7 @@ static inline void red_set_parms(struct red_parms *p,
max_P = red_maxp(Plog);
max_P *= delta; /* max_P = (qth_max - qth_min)/2^Plog */
}
- p->max_P = max_P;
+ WRITE_ONCE(p->max_P, max_P);
max_p_delta = max_P / delta;
max_p_delta = max(max_p_delta, 1U);
p->max_P_reciprocal = reciprocal_value(max_p_delta);
@@ -257,7 +257,7 @@ static inline void red_set_parms(struct red_parms *p,
p->target_min = qth_min + 2*delta;
p->target_max = qth_min + 3*delta;
- p->Scell_log = Scell_log;
+ WRITE_ONCE(p->Scell_log, Scell_log);
p->Scell_max = (255 << Scell_log);
if (stab)
@@ -340,7 +340,7 @@ static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p
{
/*
* NOTE: v->qavg is fixed point number with point at Wlog.
- * The formula below is equvalent to floating point
+ * The formula below is equivalent to floating point
* version:
*
* qavg = qavg*(1-W) + backlog*W;
@@ -375,7 +375,7 @@ static inline int red_mark_probability(const struct red_parms *p,
OK. qR is random number in the interval
(0..1/max_P)*(qth_max-qth_min)
i.e. 0..(2^Plog). If we used floating point
- arithmetics, it would be: (2^Plog)*rnd_num,
+ arithmetic, it would be: (2^Plog)*rnd_num,
where rnd_num is less 1.
Taking into account, that qavg have fixed
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index ebf9e028d1ef..6633627f6e76 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -71,8 +71,6 @@ enum environment_cap {
* CRDA and can be used by other regulatory requests. When a
* the last request is not yet processed we must yield until it
* is processed before processing any new requests.
- * @country_ie_checksum: checksum of the last processed and accepted
- * country IE
* @country_ie_env: lets us know if the AP is telling us we are outdoor,
* indoor, or if it doesn't matter
* @list: used to insert into the reg_requests_list linked list
@@ -123,7 +121,7 @@ struct regulatory_request {
* @REGULATORY_DISABLE_BEACON_HINTS: enable this if your driver needs to
* ensure that passive scan flags and beaconing flags may not be lifted by
* cfg80211 due to regulatory beacon hints. For more information on beacon
- * hints read the documenation for regulatory_hint_found_beacon()
+ * hints read the documentation for regulatory_hint_found_beacon()
* @REGULATORY_COUNTRY_IE_FOLLOW_POWER: for devices that have a preference
* that even though they may have programmed their own custom power
* setting prior to wiphy registration, they want to ensure their channel
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 004e651e6067..b07b1cd14e9f 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -18,6 +18,7 @@
#include <linux/refcount.h>
#include <net/sock.h>
+#include <net/rstreason.h>
struct request_sock;
struct sk_buff;
@@ -34,7 +35,8 @@ struct request_sock_ops {
void (*send_ack)(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req);
void (*send_reset)(const struct sock *sk,
- struct sk_buff *skb);
+ struct sk_buff *skb,
+ enum sk_rst_reason reason);
void (*destructor)(struct request_sock *req);
void (*syn_ack_timeout)(const struct request_sock *req);
};
@@ -126,38 +128,6 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb,
return sk;
}
-static inline struct request_sock *
-reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
- bool attach_listener)
-{
- struct request_sock *req;
-
- req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
- if (!req)
- return NULL;
- req->rsk_listener = NULL;
- if (attach_listener) {
- if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
- kmem_cache_free(ops->slab, req);
- return NULL;
- }
- req->rsk_listener = sk_listener;
- }
- req->rsk_ops = ops;
- req_to_sk(req)->sk_prot = sk_listener->sk_prot;
- sk_node_init(&req_to_sk(req)->sk_node);
- sk_tx_queue_clear(req_to_sk(req));
- req->saved_syn = NULL;
- req->syncookie = 0;
- req->timeout = 0;
- req->num_timeout = 0;
- req->num_retrans = 0;
- req->sk = NULL;
- refcount_set(&req->rsk_refcnt, 0);
-
- return req;
-}
-
static inline void __reqsk_free(struct request_sock *req)
{
req->rsk_ops->destructor(req);
@@ -169,14 +139,14 @@ static inline void __reqsk_free(struct request_sock *req)
static inline void reqsk_free(struct request_sock *req)
{
- WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
+ DEBUG_NET_WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
__reqsk_free(req);
}
static inline void reqsk_put(struct request_sock *req)
{
if (refcount_dec_and_test(&req->rsk_refcnt))
- reqsk_free(req);
+ __reqsk_free(req);
}
/*
@@ -282,4 +252,16 @@ static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
return atomic_read(&queue->young);
}
+/* RFC 7323 2.3 Using the Window Scale Option
+ * The window field (SEG.WND) of every outgoing segment, with the
+ * exception of <SYN> segments, MUST be right-shifted by
+ * Rcv.Wind.Shift bits.
+ *
+ * This means the SEG.WND carried in SYNACK can not exceed 65535.
+ * We use this property to harden TCP stack while in NEW_SYN_RECV state.
+ */
+static inline u32 tcp_synack_window(const struct request_sock *req)
+{
+ return min(req->rsk_rcv_wnd, 65535U);
+}
#endif /* _REQUEST_SOCK_H */
diff --git a/include/net/route.h b/include/net/route.h
index d4a0147942f1..8e39aa822cf9 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -27,6 +27,8 @@
#include <net/ip_fib.h>
#include <net/arp.h>
#include <net/ndisc.h>
+#include <net/inet_dscp.h>
+#include <net/sock.h>
#include <linux/in_route.h>
#include <linux/rtnetlink.h>
#include <linux/rcupdate.h>
@@ -35,8 +37,6 @@
#include <linux/cache.h>
#include <linux/security.h>
-#define RTO_ONLINK 0x01
-
static inline __u8 ip_sock_rt_scope(const struct sock *sk)
{
if (sock_flag(sk, SOCK_LOCALROUTE))
@@ -47,7 +47,7 @@ static inline __u8 ip_sock_rt_scope(const struct sock *sk)
static inline __u8 ip_sock_rt_tos(const struct sock *sk)
{
- return RT_TOS(READ_ONCE(inet_sk(sk)->tos));
+ return READ_ONCE(inet_sk(sk)->tos) & INET_DSCP_MASK;
}
struct ip_tunnel_info;
@@ -77,6 +77,17 @@ struct rtable {
rt_pmtu:31;
};
+#define dst_rtable(_ptr) container_of_const(_ptr, struct rtable, dst)
+
+/**
+ * skb_rtable - Returns the skb &rtable
+ * @skb: buffer
+ */
+static inline struct rtable *skb_rtable(const struct sk_buff *skb)
+{
+ return dst_rtable(skb_dst(skb));
+}
+
static inline bool rt_is_input_route(const struct rtable *rt)
{
return rt->rt_is_input != 0;
@@ -119,6 +130,33 @@ struct in_device;
int ip_rt_init(void);
void rt_cache_flush(struct net *net);
void rt_flush_dev(struct net_device *dev);
+
+static inline void inet_sk_init_flowi4(const struct inet_sock *inet,
+ struct flowi4 *fl4)
+{
+ const struct ip_options_rcu *ip4_opt;
+ const struct sock *sk;
+ __be32 daddr;
+
+ rcu_read_lock();
+ ip4_opt = rcu_dereference(inet->inet_opt);
+
+ /* Source routing option overrides the socket destination address */
+ if (ip4_opt && ip4_opt->opt.srr)
+ daddr = ip4_opt->opt.faddr;
+ else
+ daddr = inet->inet_daddr;
+ rcu_read_unlock();
+
+ sk = &inet->sk;
+ flowi4_init_output(fl4, sk->sk_bound_dev_if, READ_ONCE(sk->sk_mark),
+ ip_sock_rt_tos(sk), ip_sock_rt_scope(sk),
+ sk->sk_protocol, inet_sk_flowi_flags(sk), daddr,
+ inet->inet_saddr, inet->inet_dport,
+ inet->inet_sport, sk->sk_uid);
+ security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
+}
+
struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *flp,
const struct sk_buff *skb);
struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *flp,
@@ -141,15 +179,22 @@ static inline struct rtable *ip_route_output_key(struct net *net, struct flowi4
return ip_route_output_flow(net, flp, NULL);
}
+/* Simplistic IPv4 route lookup function.
+ * This is only suitable for some particular use cases: since the flowi4
+ * structure is only partially set, it may bypass some fib-rules.
+ */
static inline struct rtable *ip_route_output(struct net *net, __be32 daddr,
- __be32 saddr, u8 tos, int oif)
+ __be32 saddr, dscp_t dscp,
+ int oif, __u8 scope)
{
struct flowi4 fl4 = {
.flowi4_oif = oif,
- .flowi4_tos = tos,
+ .flowi4_tos = inet_dscp_to_dsfield(dscp),
+ .flowi4_scope = scope,
.daddr = daddr,
.saddr = saddr,
};
+
return ip_route_output_key(net, &fl4);
}
@@ -168,43 +213,34 @@ static inline struct rtable *ip_route_output_ports(struct net *net, struct flowi
return ip_route_output_flow(net, fl4, sk);
}
-static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4 *fl4,
- __be32 daddr, __be32 saddr,
- __be32 gre_key, __u8 tos, int oif)
+enum skb_drop_reason
+ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ dscp_t dscp, struct net_device *dev,
+ struct in_device *in_dev, u32 *itag);
+enum skb_drop_reason
+ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ dscp_t dscp, struct net_device *dev);
+enum skb_drop_reason
+ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ dscp_t dscp, struct net_device *dev,
+ const struct sk_buff *hint);
+
+static inline enum skb_drop_reason
+ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src, dscp_t dscp,
+ struct net_device *devin)
{
- memset(fl4, 0, sizeof(*fl4));
- fl4->flowi4_oif = oif;
- fl4->daddr = daddr;
- fl4->saddr = saddr;
- fl4->flowi4_tos = tos;
- fl4->flowi4_proto = IPPROTO_GRE;
- fl4->fl4_gre_key = gre_key;
- return ip_route_output_key(net, fl4);
-}
-int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
- u8 tos, struct net_device *dev,
- struct in_device *in_dev, u32 *itag);
-int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
- u8 tos, struct net_device *devin);
-int ip_route_use_hint(struct sk_buff *skb, __be32 dst, __be32 src,
- u8 tos, struct net_device *devin,
- const struct sk_buff *hint);
-
-static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
- u8 tos, struct net_device *devin)
-{
- int err;
+ enum skb_drop_reason reason;
rcu_read_lock();
- err = ip_route_input_noref(skb, dst, src, tos, devin);
- if (!err) {
+ reason = ip_route_input_noref(skb, dst, src, dscp, devin);
+ if (!reason) {
skb_dst_force(skb);
if (!skb_dst(skb))
- err = -EINVAL;
+ reason = SKB_DROP_REASON_NOT_SPECIFIED;
}
rcu_read_unlock();
- return err;
+ return reason;
}
void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, int oif,
@@ -249,8 +285,6 @@ static inline void ip_rt_put(struct rtable *rt)
dst_release(&rt->dst);
}
-#define IPTOS_RT_MASK (IPTOS_TOS_MASK & ~3)
-
extern const __u8 ip_tos2prio[16];
static inline char rt_tos2priority(u8 tos)
@@ -292,6 +326,9 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst,
if (inet_test_bit(TRANSPARENT, sk))
flow_flags |= FLOWI_FLAG_ANYSRC;
+ if (IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) && !sport)
+ flow_flags |= FLOWI_FLAG_ANY_SPORT;
+
flowi4_init_output(fl4, oif, READ_ONCE(sk->sk_mark), ip_sock_rt_tos(sk),
ip_sock_rt_scope(sk), protocol, flow_flags, dst,
src, dport, sport, sk->sk_uid);
@@ -348,10 +385,15 @@ static inline int inet_iif(const struct sk_buff *skb)
static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
{
int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
- struct net *net = dev_net(dst->dev);
- if (hoplimit == 0)
+ if (hoplimit == 0) {
+ const struct net *net;
+
+ rcu_read_lock();
+ net = dev_net_rcu(dst->dev);
hoplimit = READ_ONCE(net->ipv4.sysctl_ip_default_ttl);
+ rcu_read_unlock();
+ }
return hoplimit;
}
diff --git a/include/net/rps.h b/include/net/rps.h
index 7660243e905b..d8ab3a08bcc4 100644
--- a/include/net/rps.h
+++ b/include/net/rps.h
@@ -39,7 +39,7 @@ struct rps_dev_flow {
* The rps_dev_flow_table structure contains a table of flow mappings.
*/
struct rps_dev_flow_table {
- unsigned int mask;
+ u8 log;
struct rcu_head rcu;
struct rps_dev_flow flows[];
};
@@ -57,9 +57,10 @@ struct rps_dev_flow_table {
* meaning we use 32-6=26 bits for the hash.
*/
struct rps_sock_flow_table {
- u32 mask;
+ struct rcu_head rcu;
+ u32 mask;
- u32 ents[] ____cacheline_aligned_in_smp;
+ u32 ents[] ____cacheline_aligned_in_smp;
};
#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
@@ -122,4 +123,56 @@ static inline void sock_rps_record_flow(const struct sock *sk)
#endif
}
+static inline void sock_rps_delete_flow(const struct sock *sk)
+{
+#ifdef CONFIG_RPS
+ struct rps_sock_flow_table *table;
+ u32 hash, index;
+
+ if (!static_branch_unlikely(&rfs_needed))
+ return;
+
+ hash = READ_ONCE(sk->sk_rxhash);
+ if (!hash)
+ return;
+
+ rcu_read_lock();
+ table = rcu_dereference(net_hotdata.rps_sock_flow_table);
+ if (table) {
+ index = hash & table->mask;
+ if (READ_ONCE(table->ents[index]) != RPS_NO_CPU)
+ WRITE_ONCE(table->ents[index], RPS_NO_CPU);
+ }
+ rcu_read_unlock();
+#endif
+}
+
+static inline u32 rps_input_queue_tail_incr(struct softnet_data *sd)
+{
+#ifdef CONFIG_RPS
+ return ++sd->input_queue_tail;
+#else
+ return 0;
+#endif
+}
+
+static inline void rps_input_queue_tail_save(u32 *dest, u32 tail)
+{
+#ifdef CONFIG_RPS
+ WRITE_ONCE(*dest, tail);
+#endif
+}
+
+static inline void rps_input_queue_head_add(struct softnet_data *sd, int val)
+{
+#ifdef CONFIG_RPS
+ WRITE_ONCE(sd->input_queue_head, sd->input_queue_head + val);
+#endif
+}
+
+static inline void rps_input_queue_head_incr(struct softnet_data *sd)
+{
+ rps_input_queue_head_add(sd, 1);
+}
+
#endif /* _NET_RPS_H */
diff --git a/include/net/rstreason.h b/include/net/rstreason.h
new file mode 100644
index 000000000000..979ac87b5d99
--- /dev/null
+++ b/include/net/rstreason.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _LINUX_RSTREASON_H
+#define _LINUX_RSTREASON_H
+#include <net/dropreason-core.h>
+#include <uapi/linux/mptcp.h>
+
+#define DEFINE_RST_REASON(FN, FNe) \
+ FN(NOT_SPECIFIED) \
+ FN(NO_SOCKET) \
+ FN(TCP_INVALID_ACK_SEQUENCE) \
+ FN(TCP_RFC7323_PAWS) \
+ FN(TCP_TOO_OLD_ACK) \
+ FN(TCP_ACK_UNSENT_DATA) \
+ FN(TCP_FLAGS) \
+ FN(TCP_OLD_ACK) \
+ FN(TCP_ABORT_ON_DATA) \
+ FN(TCP_TIMEWAIT_SOCKET) \
+ FN(INVALID_SYN) \
+ FN(TCP_ABORT_ON_CLOSE) \
+ FN(TCP_ABORT_ON_LINGER) \
+ FN(TCP_ABORT_ON_MEMORY) \
+ FN(TCP_STATE) \
+ FN(TCP_KEEPALIVE_TIMEOUT) \
+ FN(TCP_DISCONNECT_WITH_DATA) \
+ FN(MPTCP_RST_EUNSPEC) \
+ FN(MPTCP_RST_EMPTCP) \
+ FN(MPTCP_RST_ERESOURCE) \
+ FN(MPTCP_RST_EPROHIBIT) \
+ FN(MPTCP_RST_EWQ2BIG) \
+ FN(MPTCP_RST_EBADPERF) \
+ FN(MPTCP_RST_EMIDDLEBOX) \
+ FN(ERROR) \
+ FNe(MAX)
+
+/**
+ * enum sk_rst_reason - the reasons of socket reset
+ *
+ * The reasons of sk reset, which are used in TCP/MPTCP protocols.
+ *
+ * There are three parts in order:
+ * 1) skb drop reasons: relying on drop reasons for such as passive reset
+ * 2) independent reset reasons: such as active reset reasons
+ * 3) reset reasons in MPTCP: only for MPTCP use
+ */
+enum sk_rst_reason {
+ /* Refer to include/net/dropreason-core.h
+ * Rely on skb drop reasons because it indicates exactly why RST
+ * could happen.
+ */
+ /** @SK_RST_REASON_NOT_SPECIFIED: reset reason is not specified */
+ SK_RST_REASON_NOT_SPECIFIED,
+ /** @SK_RST_REASON_NO_SOCKET: no valid socket that can be used */
+ SK_RST_REASON_NO_SOCKET,
+ /**
+ * @SK_RST_REASON_TCP_INVALID_ACK_SEQUENCE: Not acceptable ACK SEQ
+ * field because ack sequence is not in the window between snd_una
+ * and snd_nxt
+ */
+ SK_RST_REASON_TCP_INVALID_ACK_SEQUENCE,
+ /**
+ * @SK_RST_REASON_TCP_RFC7323_PAWS: PAWS check, corresponding to
+ * LINUX_MIB_PAWSESTABREJECTED, LINUX_MIB_PAWSACTIVEREJECTED
+ */
+ SK_RST_REASON_TCP_RFC7323_PAWS,
+ /** @SK_RST_REASON_TCP_TOO_OLD_ACK: TCP ACK is too old */
+ SK_RST_REASON_TCP_TOO_OLD_ACK,
+ /**
+ * @SK_RST_REASON_TCP_ACK_UNSENT_DATA: TCP ACK for data we haven't
+ * sent yet
+ */
+ SK_RST_REASON_TCP_ACK_UNSENT_DATA,
+ /** @SK_RST_REASON_TCP_FLAGS: TCP flags invalid */
+ SK_RST_REASON_TCP_FLAGS,
+ /** @SK_RST_REASON_TCP_OLD_ACK: TCP ACK is old, but in window */
+ SK_RST_REASON_TCP_OLD_ACK,
+ /**
+ * @SK_RST_REASON_TCP_ABORT_ON_DATA: abort on data
+ * corresponding to LINUX_MIB_TCPABORTONDATA
+ */
+ SK_RST_REASON_TCP_ABORT_ON_DATA,
+
+ /* Here start with the independent reasons */
+ /** @SK_RST_REASON_TCP_TIMEWAIT_SOCKET: happen on the timewait socket */
+ SK_RST_REASON_TCP_TIMEWAIT_SOCKET,
+ /**
+ * @SK_RST_REASON_INVALID_SYN: receive bad syn packet
+ * RFC 793 says if the state is not CLOSED/LISTEN/SYN-SENT then
+ * "fourth, check the SYN bit,...If the SYN is in the window it is
+ * an error, send a reset"
+ */
+ SK_RST_REASON_INVALID_SYN,
+ /**
+ * @SK_RST_REASON_TCP_ABORT_ON_CLOSE: abort on close
+ * corresponding to LINUX_MIB_TCPABORTONCLOSE
+ */
+ SK_RST_REASON_TCP_ABORT_ON_CLOSE,
+ /**
+ * @SK_RST_REASON_TCP_ABORT_ON_LINGER: abort on linger
+ * corresponding to LINUX_MIB_TCPABORTONLINGER
+ */
+ SK_RST_REASON_TCP_ABORT_ON_LINGER,
+ /**
+ * @SK_RST_REASON_TCP_ABORT_ON_MEMORY: abort on memory
+ * corresponding to LINUX_MIB_TCPABORTONMEMORY
+ */
+ SK_RST_REASON_TCP_ABORT_ON_MEMORY,
+ /**
+ * @SK_RST_REASON_TCP_STATE: abort on tcp state
+ * Please see RFC 9293 for all possible reset conditions
+ */
+ SK_RST_REASON_TCP_STATE,
+ /**
+ * @SK_RST_REASON_TCP_KEEPALIVE_TIMEOUT: time to timeout
+ * When we have already run out of all the chances, which means
+ * keepalive timeout, we have to reset the connection
+ */
+ SK_RST_REASON_TCP_KEEPALIVE_TIMEOUT,
+ /**
+ * @SK_RST_REASON_TCP_DISCONNECT_WITH_DATA: disconnect when write
+ * queue is not empty
+ * It means user has written data into the write queue when doing
+ * disconnecting, so we have to send an RST.
+ */
+ SK_RST_REASON_TCP_DISCONNECT_WITH_DATA,
+
+ /* Copy from include/uapi/linux/mptcp.h.
+ * These reset fields will not be changed since they adhere to
+ * RFC 8684. So do not touch them. I'm going to list each definition
+ * of them respectively.
+ */
+ /**
+ * @SK_RST_REASON_MPTCP_RST_EUNSPEC: Unspecified error.
+ * This is the default error; it implies that the subflow is no
+ * longer available. The presence of this option shows that the
+ * RST was generated by an MPTCP-aware device.
+ */
+ SK_RST_REASON_MPTCP_RST_EUNSPEC,
+ /**
+ * @SK_RST_REASON_MPTCP_RST_EMPTCP: MPTCP-specific error.
+ * An error has been detected in the processing of MPTCP options.
+ * This is the usual reason code to return in the cases where a RST
+ * is being sent to close a subflow because of an invalid response.
+ */
+ SK_RST_REASON_MPTCP_RST_EMPTCP,
+ /**
+ * @SK_RST_REASON_MPTCP_RST_ERESOURCE: Lack of resources.
+ * This code indicates that the sending host does not have enough
+ * resources to support the terminated subflow.
+ */
+ SK_RST_REASON_MPTCP_RST_ERESOURCE,
+ /**
+ * @SK_RST_REASON_MPTCP_RST_EPROHIBIT: Administratively prohibited.
+ * This code indicates that the requested subflow is prohibited by
+ * the policies of the sending host.
+ */
+ SK_RST_REASON_MPTCP_RST_EPROHIBIT,
+ /**
+ * @SK_RST_REASON_MPTCP_RST_EWQ2BIG: Too much outstanding data.
+ * This code indicates that there is an excessive amount of data
+ * that needs to be transmitted over the terminated subflow while
+ * having already been acknowledged over one or more other subflows.
+ * This may occur if a path has been unavailable for a short period
+ * and it is more efficient to reset and start again than it is to
+ * retransmit the queued data.
+ */
+ SK_RST_REASON_MPTCP_RST_EWQ2BIG,
+ /**
+ * @SK_RST_REASON_MPTCP_RST_EBADPERF: Unacceptable performance.
+ * This code indicates that the performance of this subflow was
+ * too low compared to the other subflows of this Multipath TCP
+ * connection.
+ */
+ SK_RST_REASON_MPTCP_RST_EBADPERF,
+ /**
+ * @SK_RST_REASON_MPTCP_RST_EMIDDLEBOX: Middlebox interference.
+ * Middlebox interference has been detected over this subflow,
+ * making MPTCP signaling invalid. For example, this may be sent
+ * if the checksum does not validate.
+ */
+ SK_RST_REASON_MPTCP_RST_EMIDDLEBOX,
+
+ /** @SK_RST_REASON_ERROR: unexpected error happens */
+ SK_RST_REASON_ERROR,
+
+ /**
+ * @SK_RST_REASON_MAX: Maximum of socket reset reasons.
+ * It shouldn't be used as a real 'reason'.
+ */
+ SK_RST_REASON_MAX,
+};
+
+/* Convert skb drop reasons to enum sk_rst_reason type */
+static inline enum sk_rst_reason
+sk_rst_convert_drop_reason(enum skb_drop_reason reason)
+{
+ switch (reason) {
+ case SKB_DROP_REASON_NOT_SPECIFIED:
+ return SK_RST_REASON_NOT_SPECIFIED;
+ case SKB_DROP_REASON_NO_SOCKET:
+ return SK_RST_REASON_NO_SOCKET;
+ case SKB_DROP_REASON_TCP_INVALID_ACK_SEQUENCE:
+ return SK_RST_REASON_TCP_INVALID_ACK_SEQUENCE;
+ case SKB_DROP_REASON_TCP_RFC7323_PAWS:
+ return SK_RST_REASON_TCP_RFC7323_PAWS;
+ case SKB_DROP_REASON_TCP_TOO_OLD_ACK:
+ return SK_RST_REASON_TCP_TOO_OLD_ACK;
+ case SKB_DROP_REASON_TCP_ACK_UNSENT_DATA:
+ return SK_RST_REASON_TCP_ACK_UNSENT_DATA;
+ case SKB_DROP_REASON_TCP_FLAGS:
+ return SK_RST_REASON_TCP_FLAGS;
+ case SKB_DROP_REASON_TCP_OLD_ACK:
+ return SK_RST_REASON_TCP_OLD_ACK;
+ case SKB_DROP_REASON_TCP_ABORT_ON_DATA:
+ return SK_RST_REASON_TCP_ABORT_ON_DATA;
+ default:
+ /* If we don't have our own corresponding reason */
+ return SK_RST_REASON_NOT_SPECIFIED;
+ }
+}
+#endif
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 3bfb80bad173..ec65a8cebb99 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -3,6 +3,7 @@
#define __NET_RTNETLINK_H
#include <linux/rtnetlink.h>
+#include <linux/srcu.h>
#include <net/netlink.h>
typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *,
@@ -11,8 +12,11 @@ typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
enum rtnl_link_flags {
RTNL_FLAG_DOIT_UNLOCKED = BIT(0),
+#define RTNL_FLAG_DOIT_PERNET RTNL_FLAG_DOIT_UNLOCKED
+#define RTNL_FLAG_DOIT_PERNET_WIP RTNL_FLAG_DOIT_UNLOCKED
RTNL_FLAG_BULK_DEL_SUPPORTED = BIT(1),
RTNL_FLAG_DUMP_UNLOCKED = BIT(2),
+ RTNL_FLAG_DUMP_SPLIT_NLM_DONE = BIT(3), /* legacy behavior */
};
enum rtnl_kinds {
@@ -28,13 +32,35 @@ static inline enum rtnl_kinds rtnl_msgtype_kind(int msgtype)
return msgtype & RTNL_KIND_MASK;
}
-void rtnl_register(int protocol, int msgtype,
- rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
-int rtnl_register_module(struct module *owner, int protocol, int msgtype,
- rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
-int rtnl_unregister(int protocol, int msgtype);
+/**
+ * struct rtnl_msg_handler - rtnetlink message type and handlers
+ *
+ * @owner: NULL for built-in, THIS_MODULE for module
+ * @protocol: Protocol family or PF_UNSPEC
+ * @msgtype: rtnetlink message type
+ * @doit: Function pointer called for each request message
+ * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
+ * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
+ */
+struct rtnl_msg_handler {
+ struct module *owner;
+ int protocol;
+ int msgtype;
+ rtnl_doit_func doit;
+ rtnl_dumpit_func dumpit;
+ int flags;
+};
+
void rtnl_unregister_all(int protocol);
+int __rtnl_register_many(const struct rtnl_msg_handler *handlers, int n);
+void __rtnl_unregister_many(const struct rtnl_msg_handler *handlers, int n);
+
+#define rtnl_register_many(handlers) \
+ __rtnl_register_many(handlers, ARRAY_SIZE(handlers))
+#define rtnl_unregister_many(handlers) \
+ __rtnl_unregister_many(handlers, ARRAY_SIZE(handlers))
+
static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
{
if (nlmsg_len(nlh) >= sizeof(struct rtgenmsg))
@@ -44,11 +70,47 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
}
/**
+ * struct rtnl_newlink_params - parameters of rtnl_link_ops::newlink()
+ *
+ * @src_net: Source netns of rtnetlink socket
+ * @link_net: Link netns by IFLA_LINK_NETNSID, NULL if not specified
+ * @peer_net: Peer netns
+ * @tb: IFLA_* attributes
+ * @data: IFLA_INFO_DATA attributes
+ */
+struct rtnl_newlink_params {
+ struct net *src_net;
+ struct net *link_net;
+ struct net *peer_net;
+ struct nlattr **tb;
+ struct nlattr **data;
+};
+
+/* Get effective link netns from newlink params. Generally, this is link_net
+ * and falls back to src_net. But for compatibility, a driver may * choose to
+ * use dev_net(dev) instead.
+ */
+static inline struct net *rtnl_newlink_link_net(struct rtnl_newlink_params *p)
+{
+ return p->link_net ? : p->src_net;
+}
+
+/* Get peer netns from newlink params. Fallback to link netns if peer netns is
+ * not specified explicitly.
+ */
+static inline struct net *rtnl_newlink_peer_net(struct rtnl_newlink_params *p)
+{
+ return p->peer_net ? : rtnl_newlink_link_net(p);
+}
+
+/**
* struct rtnl_link_ops - rtnetlink link operations
*
- * @list: Used internally
+ * @list: Used internally, protected by link_ops_mutex and SRCU
+ * @srcu: Used internally
* @kind: Identifier
* @netns_refund: Physical device, move to init_net on netns exit
+ * @peer_type: Peer device specific netlink attribute number (e.g. VETH_INFO_PEER)
* @maxtype: Highest device specific netlink attribute number
* @policy: Netlink policy for device specific attribute validation
* @validate: Optional validation function for netlink/changelink parameters
@@ -77,6 +139,7 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
*/
struct rtnl_link_ops {
struct list_head list;
+ struct srcu_struct srcu;
const char *kind;
@@ -89,16 +152,15 @@ struct rtnl_link_ops {
void (*setup)(struct net_device *dev);
bool netns_refund;
+ const u16 peer_type;
unsigned int maxtype;
const struct nla_policy *policy;
int (*validate)(struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack);
- int (*newlink)(struct net *src_net,
- struct net_device *dev,
- struct nlattr *tb[],
- struct nlattr *data[],
+ int (*newlink)(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack);
int (*changelink)(struct net_device *dev,
struct nlattr *tb[],
@@ -137,16 +199,14 @@ struct rtnl_link_ops {
int *prividx, int attr);
};
-int __rtnl_link_register(struct rtnl_link_ops *ops);
-void __rtnl_link_unregister(struct rtnl_link_ops *ops);
-
int rtnl_link_register(struct rtnl_link_ops *ops);
void rtnl_link_unregister(struct rtnl_link_ops *ops);
/**
* struct rtnl_af_ops - rtnetlink address family operations
*
- * @list: Used internally
+ * @list: Used internally, protected by RTNL and SRCU
+ * @srcu: Used internally
* @family: Address family
* @fill_link_af: Function to fill IFLA_AF_SPEC with address family
* specific netlink attributes.
@@ -159,6 +219,8 @@ void rtnl_link_unregister(struct rtnl_link_ops *ops);
*/
struct rtnl_af_ops {
struct list_head list;
+ struct srcu_struct srcu;
+
int family;
int (*fill_link_af)(struct sk_buff *skb,
@@ -178,7 +240,7 @@ struct rtnl_af_ops {
size_t (*get_stats_af_size)(const struct net_device *dev);
};
-void rtnl_af_register(struct rtnl_af_ops *ops);
+int rtnl_af_register(struct rtnl_af_ops *ops);
void rtnl_af_unregister(struct rtnl_af_ops *ops);
struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 41ca14e81d55..629368ab2787 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -128,6 +128,7 @@ struct Qdisc {
struct rcu_head rcu;
netdevice_tracker dev_tracker;
+ struct lock_class_key root_lock_key;
/* private data */
long privdata[] ____cacheline_aligned;
};
@@ -423,6 +424,8 @@ struct tcf_proto {
*/
spinlock_t lock;
bool deleting;
+ bool counted;
+ bool usesw;
refcount_t refcnt;
struct rcu_head rcu;
struct hlist_node destroy_ht_node;
@@ -472,6 +475,7 @@ struct tcf_block {
struct flow_block flow_block;
struct list_head owner_list;
bool keep_dst;
+ atomic_t useswcnt;
atomic_t offloadcnt; /* Number of oddloaded filters */
unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */
@@ -799,6 +803,14 @@ static inline bool qdisc_tx_changing(const struct net_device *dev)
return false;
}
+/* "noqueue" qdisc identified by not having any enqueue, see noqueue_init() */
+static inline bool qdisc_txq_has_no_queue(const struct netdev_queue *txq)
+{
+ struct Qdisc *qdisc = rcu_access_pointer(txq->qdisc);
+
+ return qdisc->enqueue == NULL;
+}
+
/* Is the device using the noop qdisc on all queues? */
static inline bool qdisc_tx_is_noop(const struct net_device *dev)
{
@@ -843,12 +855,11 @@ static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
- qdisc_calculate_pkt_len(skb, sch);
return sch->enqueue(skb, sch, to_free);
}
static inline void _bstats_update(struct gnet_stats_basic_sync *bstats,
- __u64 bytes, __u32 packets)
+ __u64 bytes, __u64 packets)
{
u64_stats_update_begin(&bstats->syncp);
u64_stats_add(&bstats->bytes, bytes);
@@ -1028,6 +1039,21 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
return skb;
}
+static inline struct sk_buff *qdisc_dequeue_internal(struct Qdisc *sch, bool direct)
+{
+ struct sk_buff *skb;
+
+ skb = __skb_dequeue(&sch->gso_skb);
+ if (skb) {
+ sch->q.qlen--;
+ return skb;
+ }
+ if (direct)
+ return __qdisc_dequeue_head(&sch->q);
+ else
+ return sch->dequeue(sch);
+}
+
static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
{
struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
@@ -1241,6 +1267,14 @@ static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
return NET_XMIT_DROP;
}
+static inline int qdisc_drop_reason(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free,
+ enum skb_drop_reason reason)
+{
+ tcf_set_drop_reason(skb, reason);
+ return qdisc_drop(skb, sch, to_free);
+}
+
static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
diff --git a/include/net/scm.h b/include/net/scm.h
index 92276a2c5543..84c4707e78a5 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -23,10 +23,20 @@ struct scm_creds {
kgid_t gid;
};
+#ifdef CONFIG_UNIX
+struct unix_edge;
+#endif
+
struct scm_fp_list {
short count;
short count_unix;
short max;
+#ifdef CONFIG_UNIX
+ bool inflight;
+ bool dead;
+ struct list_head vertices;
+ struct unix_edge *edges;
+#endif
struct user_struct *user;
struct file *fp[SCM_MAX_FD];
};
@@ -92,123 +102,10 @@ static __inline__ int scm_send(struct socket *sock, struct msghdr *msg,
return __scm_send(sock, msg, scm);
}
-#ifdef CONFIG_SECURITY_NETWORK
-static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm)
-{
- char *secdata;
- u32 seclen;
- int err;
-
- if (test_bit(SOCK_PASSSEC, &sock->flags)) {
- err = security_secid_to_secctx(scm->secid, &secdata, &seclen);
-
- if (!err) {
- put_cmsg(msg, SOL_SOCKET, SCM_SECURITY, seclen, secdata);
- security_release_secctx(secdata, seclen);
- }
- }
-}
-
-static inline bool scm_has_secdata(struct socket *sock)
-{
- return test_bit(SOCK_PASSSEC, &sock->flags);
-}
-#else
-static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm)
-{ }
-
-static inline bool scm_has_secdata(struct socket *sock)
-{
- return false;
-}
-#endif /* CONFIG_SECURITY_NETWORK */
-
-static __inline__ void scm_pidfd_recv(struct msghdr *msg, struct scm_cookie *scm)
-{
- struct file *pidfd_file = NULL;
- int len, pidfd;
-
- /* put_cmsg() doesn't return an error if CMSG is truncated,
- * that's why we need to opencode these checks here.
- */
- if (msg->msg_flags & MSG_CMSG_COMPAT)
- len = sizeof(struct compat_cmsghdr) + sizeof(int);
- else
- len = sizeof(struct cmsghdr) + sizeof(int);
-
- if (msg->msg_controllen < len) {
- msg->msg_flags |= MSG_CTRUNC;
- return;
- }
-
- if (!scm->pid)
- return;
-
- pidfd = pidfd_prepare(scm->pid, 0, &pidfd_file);
-
- if (put_cmsg(msg, SOL_SOCKET, SCM_PIDFD, sizeof(int), &pidfd)) {
- if (pidfd_file) {
- put_unused_fd(pidfd);
- fput(pidfd_file);
- }
-
- return;
- }
-
- if (pidfd_file)
- fd_install(pidfd, pidfd_file);
-}
-
-static inline bool __scm_recv_common(struct socket *sock, struct msghdr *msg,
- struct scm_cookie *scm, int flags)
-{
- if (!msg->msg_control) {
- if (test_bit(SOCK_PASSCRED, &sock->flags) ||
- test_bit(SOCK_PASSPIDFD, &sock->flags) ||
- scm->fp || scm_has_secdata(sock))
- msg->msg_flags |= MSG_CTRUNC;
- scm_destroy(scm);
- return false;
- }
-
- if (test_bit(SOCK_PASSCRED, &sock->flags)) {
- struct user_namespace *current_ns = current_user_ns();
- struct ucred ucreds = {
- .pid = scm->creds.pid,
- .uid = from_kuid_munged(current_ns, scm->creds.uid),
- .gid = from_kgid_munged(current_ns, scm->creds.gid),
- };
- put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(ucreds), &ucreds);
- }
-
- scm_passec(sock, msg, scm);
-
- if (scm->fp)
- scm_detach_fds(msg, scm);
-
- return true;
-}
-
-static inline void scm_recv(struct socket *sock, struct msghdr *msg,
- struct scm_cookie *scm, int flags)
-{
- if (!__scm_recv_common(sock, msg, scm, flags))
- return;
-
- scm_destroy_cred(scm);
-}
-
-static inline void scm_recv_unix(struct socket *sock, struct msghdr *msg,
- struct scm_cookie *scm, int flags)
-{
- if (!__scm_recv_common(sock, msg, scm, flags))
- return;
-
- if (test_bit(SOCK_PASSPIDFD, &sock->flags))
- scm_pidfd_recv(msg, scm);
-
- scm_destroy_cred(scm);
-}
+void scm_recv(struct socket *sock, struct msghdr *msg,
+ struct scm_cookie *scm, int flags);
+void scm_recv_unix(struct socket *sock, struct msghdr *msg,
+ struct scm_cookie *scm, int flags);
static inline int scm_recv_one_fd(struct file *f, int __user *ufd,
unsigned int flags)
diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
index f514a0aa849e..654d37ec0402 100644
--- a/include/net/sctp/checksum.h
+++ b/include/net/sctp/checksum.h
@@ -15,8 +15,6 @@
* Dinakaran Joseph
* Jon Grimm <jgrimm@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Rewritten to use libcrc32c by:
* Vlad Yasevich <vladislav.yasevich@hp.com>
*/
@@ -25,42 +23,18 @@
#include <linux/types.h>
#include <linux/sctp.h>
-#include <linux/crc32c.h>
-#include <linux/crc32.h>
-
-static inline __wsum sctp_csum_update(const void *buff, int len, __wsum sum)
-{
- /* This uses the crypto implementation of crc32c, which is either
- * implemented w/ hardware support or resolves to __crc32c_le().
- */
- return (__force __wsum)crc32c((__force __u32)sum, buff, len);
-}
-
-static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
- int offset, int len)
-{
- return (__force __wsum)__crc32c_le_combine((__force __u32)csum,
- (__force __u32)csum2, len);
-}
-
-static const struct skb_checksum_ops sctp_csum_ops = {
- .update = sctp_csum_update,
- .combine = sctp_csum_combine,
-};
static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
unsigned int offset)
{
struct sctphdr *sh = (struct sctphdr *)(skb->data + offset);
__le32 old = sh->checksum;
- __wsum new;
+ u32 new;
sh->checksum = 0;
- new = ~__skb_checksum(skb, offset, skb->len - offset, ~(__wsum)0,
- &sctp_csum_ops);
+ new = ~skb_crc32c(skb, offset, skb->len - offset, ~0);
sh->checksum = old;
-
- return cpu_to_le32((__force __u32)new);
+ return cpu_to_le32(new);
}
#endif /* __sctp_checksum_h__ */
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index a2310fa995f6..e96d1bd087f6 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -28,7 +28,7 @@
#define __net_sctp_h__
/* Header Strategy.
- * Start getting some control over the header file depencies:
+ * Start getting some control over the header file dependencies:
* includes
* constants
* structs
@@ -364,8 +364,6 @@ sctp_assoc_to_state(const struct sctp_association *asoc)
/* Look up the association by its id. */
struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id);
-int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp);
-
/* A macro to walk a list of skbs. */
#define sctp_skb_for_each(pos, head, tmp) \
skb_queue_walk_safe(head, pos, tmp)
@@ -636,7 +634,7 @@ static inline void sctp_transport_pl_reset(struct sctp_transport *t)
}
} else {
if (t->pl.state != SCTP_PL_DISABLED) {
- if (del_timer(&t->probe_timer))
+ if (timer_delete(&t->probe_timer))
sctp_transport_put(t);
t->pl.state = SCTP_PL_DISABLED;
}
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 64c42bd56bb2..3bfd261a53cc 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -161,7 +161,6 @@ const struct sctp_sm_table_entry *sctp_sm_lookup_event(
enum sctp_event_type event_type,
enum sctp_state state,
union sctp_subtype event_subtype);
-int sctp_chunk_iif(const struct sctp_chunk *);
struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *,
struct sctp_chunk *,
gfp_t gfp);
diff --git a/include/net/sctp/stream_sched.h b/include/net/sctp/stream_sched.h
index 572d73fdcd5e..8034bf5febbe 100644
--- a/include/net/sctp/stream_sched.h
+++ b/include/net/sctp/stream_sched.h
@@ -35,10 +35,10 @@ struct sctp_sched_ops {
struct sctp_chunk *(*dequeue)(struct sctp_outq *q);
/* Called only if the chunk fit the packet */
void (*dequeue_done)(struct sctp_outq *q, struct sctp_chunk *chunk);
- /* Sched all chunks already enqueued */
- void (*sched_all)(struct sctp_stream *steam);
- /* Unched all chunks already enqueued */
- void (*unsched_all)(struct sctp_stream *steam);
+ /* Schedule all chunks already enqueued */
+ void (*sched_all)(struct sctp_stream *stream);
+ /* Unschedule all chunks already enqueued */
+ void (*unsched_all)(struct sctp_stream *stream);
};
int sctp_sched_set_sched(struct sctp_association *asoc,
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index f24a1bbcb3ef..1ad7ce71d0a7 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -521,7 +521,7 @@ struct sctp_datamsg {
refcount_t refcnt;
/* When is this message no longer interesting to the peer? */
unsigned long expires_at;
- /* Did the messenge fail to send? */
+ /* Did the message fail to send? */
int send_error;
u8 send_failed:1,
can_delay:1, /* should this message be Nagle delayed */
@@ -775,6 +775,7 @@ struct sctp_transport {
/* Reference counting. */
refcount_t refcnt;
+ __u32 dead:1,
/* RTO-Pending : A flag used to track if one of the DATA
* chunks sent to this address is currently being
* used to compute a RTT. If this flag is 0,
@@ -784,7 +785,7 @@ struct sctp_transport {
* calculation completes (i.e. the DATA chunk
* is SACK'd) clear this flag.
*/
- __u32 rto_pending:1,
+ rto_pending:1,
/*
* hb_sent : a flag that signals that we have a pending
@@ -792,7 +793,7 @@ struct sctp_transport {
*/
hb_sent:1,
- /* Is the Path MTU update pending on this tranport */
+ /* Is the Path MTU update pending on this transport */
pmtu_pending:1,
dst_pending_confirm:1, /* need to confirm neighbour */
@@ -1223,7 +1224,7 @@ enum sctp_endpoint_type {
};
/*
- * A common base class to bridge the implmentation view of a
+ * A common base class to bridge the implementation view of a
* socket (usually listening) endpoint versus an association's
* local endpoint.
* This common structure is useful for several purposes:
@@ -1353,7 +1354,7 @@ struct sctp_endpoint {
struct rcu_head rcu;
};
-/* Recover the outter endpoint structure. */
+/* Recover the outer endpoint structure. */
static inline struct sctp_endpoint *sctp_ep(struct sctp_ep_common *base)
{
struct sctp_endpoint *ep;
@@ -1906,7 +1907,7 @@ struct sctp_association {
__u32 rwnd_over;
/* Keeps treack of rwnd pressure. This happens when we have
- * a window, but not recevie buffer (i.e small packets). This one
+ * a window, but not receive buffer (i.e small packets). This one
* is releases slowly (1 PMTU at a time ).
*/
__u32 rwnd_press;
@@ -1994,7 +1995,7 @@ struct sctp_association {
/* ADDIP Section 5.2 Upon reception of an ASCONF Chunk.
*
- * This is needed to implement itmes E1 - E4 of the updated
+ * This is needed to implement items E1 - E4 of the updated
* spec. Here is the justification:
*
* Since the peer may bundle multiple ASCONF chunks toward us,
@@ -2005,7 +2006,7 @@ struct sctp_association {
/* These ASCONF chunks are waiting to be sent.
*
- * These chunaks can't be pushed to outqueue until receiving
+ * These chunks can't be pushed to outqueue until receiving
* ASCONF_ACK for the previous ASCONF indicated by
* addip_last_asconf, so as to guarantee that only one ASCONF
* is in flight at any time.
@@ -2059,13 +2060,13 @@ struct sctp_association {
struct sctp_transport *new_transport;
/* SCTP AUTH: list of the endpoint shared keys. These
- * keys are provided out of band by the user applicaton
+ * keys are provided out of band by the user application
* and can't change during the lifetime of the association
*/
struct list_head endpoint_shared_keys;
/* SCTP AUTH:
- * The current generated assocaition shared key (secret)
+ * The current generated association shared key (secret)
*/
struct sctp_auth_bytes *asoc_shared_key;
struct sctp_shared_key *shkey;
@@ -2121,7 +2122,7 @@ enum {
SCTP_ASSOC_EYECATCHER = 0xa550c123,
};
-/* Recover the outter association structure. */
+/* Recover the outer association structure. */
static inline struct sctp_association *sctp_assoc(struct sctp_ep_common *base)
{
struct sctp_association *asoc;
@@ -2151,8 +2152,6 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *,
const union sctp_addr *address,
const gfp_t gfp,
const int peer_state);
-void sctp_assoc_del_peer(struct sctp_association *asoc,
- const union sctp_addr *addr);
void sctp_assoc_rm_peer(struct sctp_association *asoc,
struct sctp_transport *peer);
void sctp_assoc_control_transport(struct sctp_association *asoc,
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
index 21e7fa2a1813..cddebafb9f77 100644
--- a/include/net/secure_seq.h
+++ b/include/net/secure_seq.h
@@ -16,9 +16,5 @@ u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
__be16 sport, __be16 dport);
u32 secure_tcpv6_ts_off(const struct net *net,
const __be32 *saddr, const __be32 *daddr);
-u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
- __be16 sport, __be16 dport);
-u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
- __be16 sport, __be16 dport);
#endif /* _NET_SECURE_SEQ */
diff --git a/include/net/seg6.h b/include/net/seg6.h
index af668f17b398..82b3fbbcbb93 100644
--- a/include/net/seg6.h
+++ b/include/net/seg6.h
@@ -52,10 +52,17 @@ static inline struct seg6_pernet_data *seg6_pernet(struct net *net)
extern int seg6_init(void);
extern void seg6_exit(void);
+#ifdef CONFIG_IPV6_SEG6_LWTUNNEL
extern int seg6_iptunnel_init(void);
extern void seg6_iptunnel_exit(void);
extern int seg6_local_init(void);
extern void seg6_local_exit(void);
+#else
+static inline int seg6_iptunnel_init(void) { return 0; }
+static inline void seg6_iptunnel_exit(void) {}
+static inline int seg6_local_init(void) { return 0; }
+static inline void seg6_local_exit(void) {}
+#endif
extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced);
extern struct ipv6_sr_hdr *seg6_get_srh(struct sk_buff *skb, int flags);
diff --git a/include/net/seg6_hmac.h b/include/net/seg6_hmac.h
index 2b5d2ee5613e..24f733b3e3fe 100644
--- a/include/net/seg6_hmac.h
+++ b/include/net/seg6_hmac.h
@@ -49,9 +49,16 @@ extern int seg6_hmac_info_del(struct net *net, u32 key);
extern int seg6_push_hmac(struct net *net, struct in6_addr *saddr,
struct ipv6_sr_hdr *srh);
extern bool seg6_hmac_validate_skb(struct sk_buff *skb);
+#ifdef CONFIG_IPV6_SEG6_HMAC
extern int seg6_hmac_init(void);
extern void seg6_hmac_exit(void);
extern int seg6_hmac_net_init(struct net *net);
extern void seg6_hmac_net_exit(struct net *net);
+#else
+static inline int seg6_hmac_init(void) { return 0; }
+static inline void seg6_hmac_exit(void) {}
+static inline int seg6_hmac_net_init(struct net *net) { return 0; }
+static inline void seg6_hmac_net_exit(struct net *net) {}
+#endif
#endif
diff --git a/include/net/seg6_local.h b/include/net/seg6_local.h
index 3fab9dec2ec4..888c1ce6f527 100644
--- a/include/net/seg6_local.h
+++ b/include/net/seg6_local.h
@@ -19,6 +19,7 @@ extern int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
extern bool seg6_bpf_has_valid_srh(struct sk_buff *skb);
struct seg6_bpf_srh_state {
+ local_lock_t bh_lock;
struct ipv6_sr_hdr *srh;
u16 hdrlen;
bool valid;
diff --git a/include/net/smc.h b/include/net/smc.h
index c9dcb30e3fd9..db84e4e35080 100644
--- a/include/net/smc.h
+++ b/include/net/smc.h
@@ -26,9 +26,6 @@ struct smc_hashinfo {
struct hlist_head ht;
};
-int smc_hash_sk(struct sock *sk);
-void smc_unhash_sk(struct sock *sk);
-
/* SMCD/ISM device driver interface */
struct smcd_dmb {
u64 dmb_tok;
@@ -50,7 +47,6 @@ struct smcd_dmb {
#define ISM_ERROR 0xFFFF
struct smcd_dev;
-struct ism_client;
struct smcd_gid {
u64 gid;
@@ -61,14 +57,8 @@ struct smcd_ops {
int (*query_remote_gid)(struct smcd_dev *dev, struct smcd_gid *rgid,
u32 vid_valid, u32 vid);
int (*register_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb,
- struct ism_client *client);
+ void *client);
int (*unregister_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
- int (*add_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
- int (*del_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
- int (*set_vlan_required)(struct smcd_dev *dev);
- int (*reset_vlan_required)(struct smcd_dev *dev);
- int (*signal_event)(struct smcd_dev *dev, struct smcd_gid *rgid,
- u32 trigger_irq, u32 event_code, u64 info);
int (*move_data)(struct smcd_dev *dev, u64 dmb_tok, unsigned int idx,
bool sf, unsigned int offset, void *data,
unsigned int size);
@@ -76,11 +66,23 @@ struct smcd_ops {
void (*get_local_gid)(struct smcd_dev *dev, struct smcd_gid *gid);
u16 (*get_chid)(struct smcd_dev *dev);
struct device* (*get_dev)(struct smcd_dev *dev);
+
+ /* optional operations */
+ int (*add_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
+ int (*del_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
+ int (*set_vlan_required)(struct smcd_dev *dev);
+ int (*reset_vlan_required)(struct smcd_dev *dev);
+ int (*signal_event)(struct smcd_dev *dev, struct smcd_gid *rgid,
+ u32 trigger_irq, u32 event_code, u64 info);
+ int (*support_dmb_nocopy)(struct smcd_dev *dev);
+ int (*attach_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
+ int (*detach_dmb)(struct smcd_dev *dev, u64 token);
};
struct smcd_dev {
const struct smcd_ops *ops;
void *priv;
+ void *client;
struct list_head list;
spinlock_t lock;
struct smc_connection **conn;
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 468a67836e2f..4cb4326dfebe 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -159,7 +159,7 @@ struct linux_tls_mib {
#define __SNMP_ADD_STATS64(mib, field, addend) \
do { \
- __typeof__(*mib) *ptr = raw_cpu_ptr(mib); \
+ TYPEOF_UNQUAL(*mib) *ptr = raw_cpu_ptr(mib); \
u64_stats_update_begin(&ptr->syncp); \
ptr->mibs[field] += addend; \
u64_stats_update_end(&ptr->syncp); \
@@ -176,8 +176,7 @@ struct linux_tls_mib {
#define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
#define __SNMP_UPD_PO_STATS64(mib, basefield, addend) \
do { \
- __typeof__(*mib) *ptr; \
- ptr = raw_cpu_ptr((mib)); \
+ TYPEOF_UNQUAL(*mib) *ptr = raw_cpu_ptr(mib); \
u64_stats_update_begin(&ptr->syncp); \
ptr->mibs[basefield##PKTS]++; \
ptr->mibs[basefield##OCTETS] += addend; \
diff --git a/include/net/sock.h b/include/net/sock.h
index b4b553df7870..92e7c1aae3cc 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -303,6 +303,7 @@ struct sk_filter;
* @sk_stamp: time stamp of last packet received
* @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
* @sk_tsflags: SO_TIMESTAMPING flags
+ * @sk_bpf_cb_flags: used in bpf_setsockopt()
* @sk_use_task_frag: allow sk_page_frag() to use current->task_frag.
* Sockets that can be used under memory reclaim should
* set this to false.
@@ -336,7 +337,16 @@ struct sk_filter;
* @sk_txtime_deadline_mode: set deadline mode for SO_TXTIME
* @sk_txtime_report_errors: set report errors mode for SO_TXTIME
* @sk_txtime_unused: unused txtime flags
+ * @sk_scm_recv_flags: all flags used by scm_recv()
+ * @sk_scm_credentials: flagged by SO_PASSCRED to recv SCM_CREDENTIALS
+ * @sk_scm_security: flagged by SO_PASSSEC to recv SCM_SECURITY
+ * @sk_scm_pidfd: flagged by SO_PASSPIDFD to recv SCM_PIDFD
+ * @sk_scm_rights: flagged by SO_PASSRIGHTS to recv SCM_RIGHTS
+ * @sk_scm_unused: unused flags for scm_recv()
* @ns_tracker: tracker for netns reference
+ * @sk_user_frags: xarray of pages the user is holding a reference on.
+ * @sk_owner: reference to the real owner of the socket that calls
+ * sock_lock_init_class_and_name().
*/
struct sock {
/*
@@ -519,11 +529,23 @@ struct sock {
#endif
int sk_disconnects;
- u8 sk_txrehash;
+ union {
+ u8 sk_txrehash;
+ u8 sk_scm_recv_flags;
+ struct {
+ u8 sk_scm_credentials : 1,
+ sk_scm_security : 1,
+ sk_scm_pidfd : 1,
+ sk_scm_rights : 1,
+ sk_scm_unused : 4;
+ };
+ };
u8 sk_clockid;
u8 sk_txtime_deadline_mode : 1,
sk_txtime_report_errors : 1,
sk_txtime_unused : 6;
+#define SK_BPF_CB_FLAG_TEST(SK, FLAG) ((SK)->sk_bpf_cb_flags & (FLAG))
+ u8 sk_bpf_cb_flags;
void *sk_user_data;
#ifdef CONFIG_SECURITY
@@ -542,6 +564,16 @@ struct sock {
#endif
struct rcu_head sk_rcu;
netns_tracker ns_tracker;
+ struct xarray sk_user_frags;
+
+#if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES)
+ struct module *sk_owner;
+#endif
+};
+
+struct sock_bh_locked {
+ struct sock *sock;
+ local_lock_t bh_lock;
};
enum sk_pacing {
@@ -887,6 +919,8 @@ static inline void sk_add_bind_node(struct sock *sk,
hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
#define sk_for_each_bound(__sk, list) \
hlist_for_each_entry(__sk, list, sk_bind_node)
+#define sk_for_each_bound_safe(__sk, tmp, list) \
+ hlist_for_each_entry_safe(__sk, tmp, list, sk_bind_node)
/**
* sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset
@@ -944,9 +978,17 @@ enum sock_flags {
SOCK_XDP, /* XDP is attached */
SOCK_TSTAMP_NEW, /* Indicates 64 bit timestamps always */
SOCK_RCVMARK, /* Receive SO_MARK ancillary data with packet */
+ SOCK_RCVPRIORITY, /* Receive SO_PRIORITY ancillary data with packet */
+ SOCK_TIMESTAMPING_ANY, /* Copy of sk_tsflags & TSFLAGS_ANY */
};
#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
+/*
+ * The highest bit of sk_tsflags is reserved for kernel-internal
+ * SOCKCM_FLAG_TS_OPT_ID. There is a check in core/sock.c to control that
+ * SOF_TIMESTAMPING* values do not reach this reserved area
+ */
+#define SOCKCM_FLAG_TS_OPT_ID BIT(31)
static inline void sock_copy_flags(struct sock *nsk, const struct sock *osk)
{
@@ -1194,6 +1236,13 @@ static inline void sk_prot_clear_nulls(struct sock *sk, int size)
size - offsetof(struct sock, sk_node.pprev));
}
+struct proto_accept_arg {
+ int flags;
+ int err;
+ int is_empty;
+ bool kern;
+};
+
/* Networking protocol blocks we attach to sockets.
* socket layer -> transport layer interface
*/
@@ -1208,8 +1257,8 @@ struct proto {
int addr_len);
int (*disconnect)(struct sock *sk, int flags);
- struct sock * (*accept)(struct sock *sk, int flags, int *err,
- bool kern);
+ struct sock * (*accept)(struct sock *sk,
+ struct proto_accept_arg *arg);
int (*ioctl)(struct sock *sk, int cmd,
int *karg);
@@ -1261,10 +1310,6 @@ struct proto {
unsigned int inuse_idx;
#endif
-#if IS_ENABLED(CONFIG_MPTCP)
- int (*forward_alloc_get)(const struct sock *sk);
-#endif
-
bool (*stream_memory_free)(const struct sock *sk, int wake);
bool (*sock_is_readable)(struct sock *sk);
/* Memory pressure */
@@ -1325,15 +1370,6 @@ int sock_load_diag_module(int family, int protocol);
INDIRECT_CALLABLE_DECLARE(bool tcp_stream_memory_free(const struct sock *sk, int wake));
-static inline int sk_forward_alloc_get(const struct sock *sk)
-{
-#if IS_ENABLED(CONFIG_MPTCP)
- if (sk->sk_prot->forward_alloc_get)
- return sk->sk_prot->forward_alloc_get(sk);
-#endif
- return READ_ONCE(sk->sk_forward_alloc);
-}
-
static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
{
if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf))
@@ -1371,75 +1407,6 @@ static inline int sk_under_cgroup_hierarchy(struct sock *sk,
#endif
}
-static inline bool sk_has_memory_pressure(const struct sock *sk)
-{
- return sk->sk_prot->memory_pressure != NULL;
-}
-
-static inline bool sk_under_global_memory_pressure(const struct sock *sk)
-{
- return sk->sk_prot->memory_pressure &&
- !!READ_ONCE(*sk->sk_prot->memory_pressure);
-}
-
-static inline bool sk_under_memory_pressure(const struct sock *sk)
-{
- if (!sk->sk_prot->memory_pressure)
- return false;
-
- if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
- mem_cgroup_under_socket_pressure(sk->sk_memcg))
- return true;
-
- return !!READ_ONCE(*sk->sk_prot->memory_pressure);
-}
-
-static inline long
-proto_memory_allocated(const struct proto *prot)
-{
- return max(0L, atomic_long_read(prot->memory_allocated));
-}
-
-static inline long
-sk_memory_allocated(const struct sock *sk)
-{
- return proto_memory_allocated(sk->sk_prot);
-}
-
-/* 1 MB per cpu, in page units */
-#define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
-extern int sysctl_mem_pcpu_rsv;
-
-static inline void proto_memory_pcpu_drain(struct proto *proto)
-{
- int val = this_cpu_xchg(*proto->per_cpu_fw_alloc, 0);
-
- if (val)
- atomic_long_add(val, proto->memory_allocated);
-}
-
-static inline void
-sk_memory_allocated_add(const struct sock *sk, int val)
-{
- struct proto *proto = sk->sk_prot;
-
- val = this_cpu_add_return(*proto->per_cpu_fw_alloc, val);
-
- if (unlikely(val >= READ_ONCE(sysctl_mem_pcpu_rsv)))
- proto_memory_pcpu_drain(proto);
-}
-
-static inline void
-sk_memory_allocated_sub(const struct sock *sk, int val)
-{
- struct proto *proto = sk->sk_prot;
-
- val = this_cpu_sub_return(*proto->per_cpu_fw_alloc, val);
-
- if (unlikely(val <= -READ_ONCE(sysctl_mem_pcpu_rsv)))
- proto_memory_pcpu_drain(proto);
-}
-
#define SK_ALLOC_PERCPU_COUNTER_BATCH 16
static inline void sk_sockets_allocated_dec(struct sock *sk)
@@ -1466,15 +1433,6 @@ proto_sockets_allocated_sum_positive(struct proto *prot)
return percpu_counter_sum_positive(prot->sockets_allocated);
}
-static inline bool
-proto_memory_pressure(struct proto *prot)
-{
- if (!prot->memory_pressure)
- return false;
- return !!READ_ONCE(*prot->memory_pressure);
-}
-
-
#ifdef CONFIG_PROC_FS
#define PROTO_INUSE_NR 64 /* should be enough for the first time */
struct prot_inuse {
@@ -1583,7 +1541,7 @@ static inline bool sk_wmem_schedule(struct sock *sk, int size)
}
static inline bool
-sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
+__sk_rmem_schedule(struct sock *sk, int size, bool pfmemalloc)
{
int delta;
@@ -1591,7 +1549,13 @@ sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
return true;
delta = size - sk->sk_forward_alloc;
return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_RECV) ||
- skb_pfmemalloc(skb);
+ pfmemalloc;
+}
+
+static inline bool
+sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
+{
+ return __sk_rmem_schedule(sk, size, skb_pfmemalloc(skb));
}
static inline int sk_unused_reserved_mem(const struct sock *sk)
@@ -1641,6 +1605,35 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
sk_mem_reclaim(sk);
}
+#if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES)
+static inline void sk_owner_set(struct sock *sk, struct module *owner)
+{
+ __module_get(owner);
+ sk->sk_owner = owner;
+}
+
+static inline void sk_owner_clear(struct sock *sk)
+{
+ sk->sk_owner = NULL;
+}
+
+static inline void sk_owner_put(struct sock *sk)
+{
+ module_put(sk->sk_owner);
+}
+#else
+static inline void sk_owner_set(struct sock *sk, struct module *owner)
+{
+}
+
+static inline void sk_owner_clear(struct sock *sk)
+{
+}
+
+static inline void sk_owner_put(struct sock *sk)
+{
+}
+#endif
/*
* Macro so as to not evaluate some arguments when
* lockdep is not enabled.
@@ -1650,13 +1643,14 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
*/
#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
do { \
+ sk_owner_set(sk, THIS_MODULE); \
sk->sk_lock.owned = 0; \
init_waitqueue_head(&sk->sk_lock.wq); \
spin_lock_init(&(sk)->sk_lock.slock); \
debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
- sizeof((sk)->sk_lock)); \
+ sizeof((sk)->sk_lock)); \
lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
- (skey), (sname)); \
+ (skey), (sname)); \
lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
} while (0)
@@ -1690,7 +1684,7 @@ bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock);
* lock_sock_fast - fast version of lock_sock
* @sk: socket
*
- * This version should be used for very small section, where process wont block
+ * This version should be used for very small section, where process won't block
* return false if fast path is taken:
*
* sk_lock.slock locked, owned = 0, BH disabled
@@ -1800,9 +1794,9 @@ static inline bool sock_allow_reclassification(const struct sock *csk)
struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
struct proto *prot, int kern);
void sk_free(struct sock *sk);
+void sk_net_refcnt_upgrade(struct sock *sk);
void sk_destruct(struct sock *sk);
struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
-void sk_free_unlock_clone(struct sock *sk);
struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
gfp_t priority);
@@ -1816,6 +1810,15 @@ void sock_efree(struct sk_buff *skb);
#ifdef CONFIG_INET
void sock_edemux(struct sk_buff *skb);
void sock_pfree(struct sk_buff *skb);
+
+static inline void skb_set_owner_edemux(struct sk_buff *skb, struct sock *sk)
+{
+ skb_orphan(skb);
+ if (refcount_inc_not_zero(&sk->sk_refcnt)) {
+ skb->sk = sk;
+ skb->destructor = sock_edemux;
+ }
+}
#else
#define sock_edemux sock_efree
#endif
@@ -1845,6 +1848,8 @@ static inline struct sk_buff *sock_alloc_send_skb(struct sock *sk,
}
void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
+void *sock_kmemdup(struct sock *sk, const void *src,
+ int size, gfp_t priority);
void sock_kfree_s(struct sock *sk, void *mem, int size);
void sock_kzfree_s(struct sock *sk, void *mem, int size);
void sk_send_sigurg(struct sock *sk);
@@ -1860,13 +1865,18 @@ struct sockcm_cookie {
u64 transmit_time;
u32 mark;
u32 tsflags;
+ u32 ts_opt_id;
+ u32 priority;
+ u32 dmabuf_id;
};
static inline void sockcm_init(struct sockcm_cookie *sockc,
const struct sock *sk)
{
*sockc = (struct sockcm_cookie) {
- .tsflags = READ_ONCE(sk->sk_tsflags)
+ .mark = READ_ONCE(sk->sk_mark),
+ .tsflags = READ_ONCE(sk->sk_tsflags),
+ .priority = READ_ONCE(sk->sk_priority),
};
}
@@ -1882,7 +1892,7 @@ int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
int sock_no_bind(struct socket *, struct sockaddr *, int);
int sock_no_connect(struct socket *, struct sockaddr *, int, int);
int sock_no_socketpair(struct socket *, struct socket *);
-int sock_no_accept(struct socket *, struct socket *, int, bool);
+int sock_no_accept(struct socket *, struct socket *, struct proto_accept_arg *);
int sock_no_getname(struct socket *, struct sockaddr *, int);
int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
int sock_no_listen(struct socket *, int);
@@ -2134,17 +2144,10 @@ sk_dst_get(const struct sock *sk)
static inline void __dst_negative_advice(struct sock *sk)
{
- struct dst_entry *ndst, *dst = __sk_dst_get(sk);
-
- if (dst && dst->ops->negative_advice) {
- ndst = dst->ops->negative_advice(dst);
+ struct dst_entry *dst = __sk_dst_get(sk);
- if (ndst != dst) {
- rcu_assign_pointer(sk->sk_dst_cache, ndst);
- sk_tx_queue_clear(sk);
- WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
- }
- }
+ if (dst && dst->ops->negative_advice)
+ dst->ops->negative_advice(sk, dst);
}
static inline void dst_negative_advice(struct sock *sk)
@@ -2173,7 +2176,7 @@ sk_dst_set(struct sock *sk, struct dst_entry *dst)
sk_tx_queue_clear(sk);
WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
- old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
+ old_dst = unrcu_pointer(xchg(&sk->sk_dst_cache, RCU_INITIALIZER(dst)));
dst_release(old_dst);
}
@@ -2344,7 +2347,7 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
}
/**
- * sock_poll_wait - place memory barrier behind the poll_wait call.
+ * sock_poll_wait - wrapper for the poll_wait call.
* @filp: file
* @sock: socket to wait on
* @p: poll_table
@@ -2354,15 +2357,12 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
static inline void sock_poll_wait(struct file *filp, struct socket *sock,
poll_table *p)
{
- if (!poll_does_not_wait(p)) {
- poll_wait(filp, &sock->wq.wait, p);
- /* We need to be sure we are in sync with the
- * socket flags modification.
- *
- * This memory barrier is paired in the wq_has_sleeper.
- */
- smp_mb();
- }
+ /* Provides a barrier we need to be sure we are in sync
+ * with the socket flags modification.
+ *
+ * This memory barrier is paired in the wq_has_sleeper.
+ */
+ poll_wait(filp, &sock->wq.wait, p);
}
static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
@@ -2515,6 +2515,12 @@ static inline void sk_wake_async(const struct sock *sk, int how, int band)
}
}
+static inline void sk_wake_async_rcu(const struct sock *sk, int how, int band)
+{
+ if (unlikely(sock_flag(sk, SOCK_FASYNC)))
+ sock_wake_async(rcu_dereference(sk->sk_wq), how, band);
+}
+
/* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might
* need sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak.
* Note: for send buffers, TCP works better if we can build two skbs at
@@ -2613,10 +2619,10 @@ struct sock_skb_cb {
/* Store sock_skb_cb at the end of skb->cb[] so protocol families
* using skb->cb[] would keep using it directly and utilize its
- * alignement guarantee.
+ * alignment guarantee.
*/
-#define SOCK_SKB_CB_OFFSET ((sizeof_field(struct sk_buff, cb) - \
- sizeof(struct sock_skb_cb)))
+#define SOCK_SKB_CB_OFFSET (sizeof_field(struct sk_buff, cb) - \
+ sizeof(struct sock_skb_cb))
#define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \
SOCK_SKB_CB_OFFSET))
@@ -2705,12 +2711,13 @@ static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
{
#define FLAGS_RECV_CMSGS ((1UL << SOCK_RXQ_OVFL) | \
(1UL << SOCK_RCVTSTAMP) | \
- (1UL << SOCK_RCVMARK))
+ (1UL << SOCK_RCVMARK) | \
+ (1UL << SOCK_RCVPRIORITY) | \
+ (1UL << SOCK_TIMESTAMPING_ANY))
#define TSFLAGS_ANY (SOF_TIMESTAMPING_SOFTWARE | \
SOF_TIMESTAMPING_RAW_HARDWARE)
- if (sk->sk_flags & FLAGS_RECV_CMSGS ||
- READ_ONCE(sk->sk_tsflags) & TSFLAGS_ANY)
+ if (READ_ONCE(sk->sk_flags) & FLAGS_RECV_CMSGS)
__sock_recv_cmsgs(msg, sk, skb);
else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
sock_write_timestamp(sk, skb->tstamp);
@@ -2718,39 +2725,46 @@ static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
sock_write_timestamp(sk, 0);
}
-void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
+void __sock_tx_timestamp(__u32 tsflags, __u8 *tx_flags);
/**
* _sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
* @sk: socket sending this packet
- * @tsflags: timestamping flags to use
+ * @sockc: pointer to socket cmsg cookie to get timestamping info
* @tx_flags: completed with instructions for time stamping
* @tskey: filled in with next sk_tskey (not for TCP, which uses seqno)
*
* Note: callers should take care of initial ``*tx_flags`` value (usually 0)
*/
-static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
+static inline void _sock_tx_timestamp(struct sock *sk,
+ const struct sockcm_cookie *sockc,
__u8 *tx_flags, __u32 *tskey)
{
+ __u32 tsflags = sockc->tsflags;
+
if (unlikely(tsflags)) {
__sock_tx_timestamp(tsflags, tx_flags);
if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
- tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
- *tskey = atomic_inc_return(&sk->sk_tskey) - 1;
+ tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) {
+ if (tsflags & SOCKCM_FLAG_TS_OPT_ID)
+ *tskey = sockc->ts_opt_id;
+ else
+ *tskey = atomic_inc_return(&sk->sk_tskey) - 1;
+ }
}
- if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
- *tx_flags |= SKBTX_WIFI_STATUS;
}
-static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags,
+static inline void sock_tx_timestamp(struct sock *sk,
+ const struct sockcm_cookie *sockc,
__u8 *tx_flags)
{
- _sock_tx_timestamp(sk, tsflags, tx_flags, NULL);
+ _sock_tx_timestamp(sk, sockc, tx_flags, NULL);
}
-static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
+static inline void skb_setup_tx_timestamp(struct sk_buff *skb,
+ const struct sockcm_cookie *sockc)
{
- _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags,
+ _sock_tx_timestamp(skb->sk, sockc, &skb_shinfo(skb)->tx_flags,
&skb_shinfo(skb)->tskey);
}
@@ -2775,9 +2789,26 @@ static inline bool sk_is_udp(const struct sock *sk)
sk->sk_protocol == IPPROTO_UDP;
}
+static inline bool sk_is_unix(const struct sock *sk)
+{
+ return sk->sk_family == AF_UNIX;
+}
+
static inline bool sk_is_stream_unix(const struct sock *sk)
{
- return sk->sk_family == AF_UNIX && sk->sk_type == SOCK_STREAM;
+ return sk_is_unix(sk) && sk->sk_type == SOCK_STREAM;
+}
+
+static inline bool sk_is_vsock(const struct sock *sk)
+{
+ return sk->sk_family == AF_VSOCK;
+}
+
+static inline bool sk_may_scm_recv(const struct sock *sk)
+{
+ return (IS_ENABLED(CONFIG_UNIX) && sk->sk_family == AF_UNIX) ||
+ sk->sk_family == AF_NETLINK ||
+ (IS_ENABLED(CONFIG_BT) && sk->sk_family == AF_BLUETOOTH);
}
/**
@@ -2819,6 +2850,12 @@ sk_is_refcounted(struct sock *sk)
return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE);
}
+static inline bool
+sk_requests_wifi_status(struct sock *sk)
+{
+ return sk && sk_fullsock(sk) && sock_flag(sk, SOCK_WIFI_STATUS);
+}
+
/* Checks if this SKB belongs to an HW offloaded socket
* and whether any SW fallbacks are required based on dev.
* Check decrypted mark in case skb_orphan() cleared socket.
@@ -2831,12 +2868,10 @@ static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) {
skb = sk->sk_validate_xmit_skb(sk, dev, skb);
-#ifdef CONFIG_TLS_DEVICE
- } else if (unlikely(skb->decrypted)) {
+ } else if (unlikely(skb_is_decrypted(skb))) {
pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n");
kfree_skb(skb);
skb = NULL;
-#endif
}
#endif
@@ -2851,6 +2886,16 @@ static inline bool sk_listener(const struct sock *sk)
return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
}
+/* This helper checks if a socket is a LISTEN or NEW_SYN_RECV or TIME_WAIT
+ * TCP SYNACK messages can be attached to LISTEN or NEW_SYN_RECV (depending on SYNCOOKIE)
+ * TCP RST and ACK can be attached to TIME_WAIT.
+ */
+static inline bool sk_listener_or_tw(const struct sock *sk)
+{
+ return (1 << READ_ONCE(sk->sk_state)) &
+ (TCPF_LISTEN | TCPF_NEW_SYN_RECV | TCPF_TIME_WAIT);
+}
+
void sock_enable_timestamp(struct sock *sk, enum sock_flags flag);
int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
int type);
@@ -2875,8 +2920,6 @@ void sk_get_meminfo(const struct sock *sk, u32 *meminfo);
extern __u32 sysctl_wmem_max;
extern __u32 sysctl_rmem_max;
-extern int sysctl_tstamp_allow_data;
-
extern __u32 sysctl_wmem_default;
extern __u32 sysctl_rmem_default;
@@ -2940,6 +2983,13 @@ int sock_set_timestamping(struct sock *sk, int optname,
struct so_timestamping timestamping);
void sock_enable_timestamps(struct sock *sk);
+#if defined(CONFIG_CGROUP_BPF)
+void bpf_skops_tx_timestamping(struct sock *sk, struct sk_buff *skb, int op);
+#else
+static inline void bpf_skops_tx_timestamping(struct sock *sk, struct sk_buff *skb, int op)
+{
+}
+#endif
void sock_no_linger(struct sock *sk);
void sock_set_keepalive(struct sock *sk);
void sock_set_priority(struct sock *sk, u32 priority);
diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h
index 6ec140b0a61b..6e4faf3ee76f 100644
--- a/include/net/sock_reuseport.h
+++ b/include/net/sock_reuseport.h
@@ -26,7 +26,7 @@ struct sock_reuseport {
unsigned int bind_inany:1;
unsigned int has_conns:1;
struct bpf_prog __rcu *prog; /* optional BPF sock selector */
- struct sock *socks[]; /* array of sock pointers */
+ struct sock *socks[] __counted_by(max_socks);
};
extern int reuseport_alloc(struct sock *sk, bool bind_inany);
diff --git a/include/net/strparser.h b/include/net/strparser.h
index 41e2ce9e9e10..0ed73e364faa 100644
--- a/include/net/strparser.h
+++ b/include/net/strparser.h
@@ -43,6 +43,8 @@ struct strparser;
struct strp_callbacks {
int (*parse_msg)(struct strparser *strp, struct sk_buff *skb);
void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb);
+ int (*read_sock)(struct strparser *strp, read_descriptor_t *desc,
+ sk_read_actor_t recv_actor);
int (*read_sock_done)(struct strparser *strp, int err);
void (*abort_parser)(struct strparser *strp, int err);
void (*lock)(struct strparser *strp);
@@ -112,8 +114,6 @@ static inline void strp_pause(struct strparser *strp)
/* May be called without holding lock for attached socket */
void strp_unpause(struct strparser *strp);
-/* Must be called with process lock held (lock_sock) */
-void __strp_unpause(struct strparser *strp);
static inline void save_strp_stats(struct strparser *strp,
struct strp_aggr_stats *agg_stats)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 6ae35199d3b3..5078ad868fee 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -26,6 +26,7 @@
#include <linux/kref.h>
#include <linux/ktime.h>
#include <linux/indirect_call_wrapper.h>
+#include <linux/bits.h>
#include <net/inet_connection_sock.h>
#include <net/inet_timewait_sock.h>
@@ -41,6 +42,7 @@
#include <net/inet_ecn.h>
#include <net/dst.h>
#include <net/mptcp.h>
+#include <net/xfrm.h>
#include <linux/seq_file.h>
#include <linux/memcontrol.h>
@@ -52,6 +54,8 @@ extern struct inet_hashinfo tcp_hashinfo;
DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
int tcp_orphan_count_sum(void);
+DECLARE_PER_CPU(u32, tcp_tw_isn);
+
void tcp_time_wait(struct sock *sk, int state, int timeo);
#define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER)
@@ -141,8 +145,9 @@ static_assert((1 << ATO_BITS) > TCP_DELACK_MAX);
#define TCP_DELACK_MIN 4U
#define TCP_ATO_MIN 4U
#endif
-#define TCP_RTO_MAX ((unsigned)(120*HZ))
-#define TCP_RTO_MIN ((unsigned)(HZ/5))
+#define TCP_RTO_MAX_SEC 120
+#define TCP_RTO_MAX ((unsigned)(TCP_RTO_MAX_SEC * HZ))
+#define TCP_RTO_MIN ((unsigned)(HZ / 5))
#define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */
#define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */
@@ -294,14 +299,6 @@ static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
return seq3 - seq2 >= seq1 - seq2;
}
-static inline bool tcp_out_of_memory(struct sock *sk)
-{
- if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
- sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
- return true;
- return false;
-}
-
static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
{
sk_wmem_queued_add(sk, -skb->truesize);
@@ -314,7 +311,7 @@ static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
void sk_forced_mem_schedule(struct sock *sk, int size);
-bool tcp_check_oom(struct sock *sk, int shift);
+bool tcp_check_oom(const struct sock *sk, int shift);
extern struct proto tcp_prot;
@@ -353,7 +350,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
void tcp_rcv_space_adjust(struct sock *sk);
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
void tcp_twsk_destructor(struct sock *sk);
-void tcp_twsk_purge(struct list_head *net_exit_list, int family);
+void tcp_twsk_purge(struct list_head *net_exit_list);
ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
@@ -377,25 +374,64 @@ static inline void tcp_dec_quickack_mode(struct sock *sk)
}
}
-#define TCP_ECN_OK 1
-#define TCP_ECN_QUEUE_CWR 2
-#define TCP_ECN_DEMAND_CWR 4
-#define TCP_ECN_SEEN 8
+#define TCP_ECN_MODE_RFC3168 BIT(0)
+#define TCP_ECN_QUEUE_CWR BIT(1)
+#define TCP_ECN_DEMAND_CWR BIT(2)
+#define TCP_ECN_SEEN BIT(3)
+#define TCP_ECN_MODE_ACCECN BIT(4)
+
+#define TCP_ECN_DISABLED 0
+#define TCP_ECN_MODE_PENDING (TCP_ECN_MODE_RFC3168 | TCP_ECN_MODE_ACCECN)
+#define TCP_ECN_MODE_ANY (TCP_ECN_MODE_RFC3168 | TCP_ECN_MODE_ACCECN)
+
+static inline bool tcp_ecn_mode_any(const struct tcp_sock *tp)
+{
+ return tp->ecn_flags & TCP_ECN_MODE_ANY;
+}
+
+static inline bool tcp_ecn_mode_rfc3168(const struct tcp_sock *tp)
+{
+ return (tp->ecn_flags & TCP_ECN_MODE_ANY) == TCP_ECN_MODE_RFC3168;
+}
+
+static inline bool tcp_ecn_mode_accecn(const struct tcp_sock *tp)
+{
+ return (tp->ecn_flags & TCP_ECN_MODE_ANY) == TCP_ECN_MODE_ACCECN;
+}
+
+static inline bool tcp_ecn_disabled(const struct tcp_sock *tp)
+{
+ return !tcp_ecn_mode_any(tp);
+}
+
+static inline bool tcp_ecn_mode_pending(const struct tcp_sock *tp)
+{
+ return (tp->ecn_flags & TCP_ECN_MODE_PENDING) == TCP_ECN_MODE_PENDING;
+}
+
+static inline void tcp_ecn_mode_set(struct tcp_sock *tp, u8 mode)
+{
+ tp->ecn_flags &= ~TCP_ECN_MODE_ANY;
+ tp->ecn_flags |= mode;
+}
enum tcp_tw_status {
TCP_TW_SUCCESS = 0,
TCP_TW_RST = 1,
TCP_TW_ACK = 2,
- TCP_TW_SYN = 3
+ TCP_TW_SYN = 3,
+ TCP_TW_ACK_OOW = 4
};
enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
struct sk_buff *skb,
- const struct tcphdr *th);
+ const struct tcphdr *th,
+ u32 *tw_isn,
+ enum skb_drop_reason *drop_reason);
struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
struct request_sock *req, bool fastopen,
- bool *lost_race);
+ bool *lost_race, enum skb_drop_reason *drop_reason);
enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
struct sk_buff *skb);
void tcp_enter_loss(struct sock *sk);
@@ -420,6 +456,7 @@ int do_tcp_setsockopt(struct sock *sk, int level, int optname,
sockptr_t optval, unsigned int optlen);
int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
unsigned int optlen);
+void tcp_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
void tcp_set_keepalive(struct sock *sk, int val);
void tcp_syn_ack_timeout(const struct request_sock *req);
int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
@@ -667,10 +704,11 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
void tcp_send_probe0(struct sock *);
int tcp_write_wakeup(struct sock *, int mib);
void tcp_send_fin(struct sock *sk);
-void tcp_send_active_reset(struct sock *sk, gfp_t priority);
+void tcp_send_active_reset(struct sock *sk, gfp_t priority,
+ enum sk_rst_reason reason);
int tcp_send_synack(struct sock *);
void tcp_push_one(struct sock *, unsigned int mss_now);
-void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
+void __tcp_send_ack(struct sock *sk, u32 rcv_nxt, u16 flags);
void tcp_send_ack(struct sock *sk);
void tcp_send_delayed_ack(struct sock *sk);
void tcp_send_loss_probe(struct sock *sk);
@@ -681,11 +719,25 @@ void tcp_skb_collapse_tstamp(struct sk_buff *skb,
/* tcp_input.c */
void tcp_rearm_rto(struct sock *sk);
void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
+void tcp_done_with_error(struct sock *sk, int err);
void tcp_reset(struct sock *sk, struct sk_buff *skb);
void tcp_fin(struct sock *sk);
void tcp_check_space(struct sock *sk);
void tcp_sack_compress_send_ack(struct sock *sk);
+static inline void tcp_cleanup_skb(struct sk_buff *skb)
+{
+ skb_dst_drop(skb);
+ secpath_reset(skb);
+}
+
+static inline void tcp_add_receive_queue(struct sock *sk, struct sk_buff *skb)
+{
+ DEBUG_NET_WARN_ON_ONCE(skb_dst(skb));
+ DEBUG_NET_WARN_ON_ONCE(secpath_exists(skb));
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
+}
+
/* tcp_timer.c */
void tcp_init_xmit_timers(struct sock *);
static inline void tcp_clear_xmit_timers(struct sock *sk)
@@ -732,6 +784,9 @@ void tcp_get_info(struct sock *, struct tcp_info *);
/* Read 'sendfile()'-style from a TCP socket */
int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
sk_read_actor_t recv_actor);
+int tcp_read_sock_noack(struct sock *sk, read_descriptor_t *desc,
+ sk_read_actor_t recv_actor, bool noack,
+ u32 *copied_seq);
int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);
void tcp_read_done(struct sock *sk, size_t len);
@@ -742,10 +797,14 @@ int tcp_mtu_to_mss(struct sock *sk, int pmtu);
int tcp_mss_to_mtu(struct sock *sk, int mss);
void tcp_mtup_init(struct sock *sk);
-static inline void tcp_bound_rto(const struct sock *sk)
+static inline unsigned int tcp_rto_max(const struct sock *sk)
{
- if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
- inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
+ return READ_ONCE(inet_csk(sk)->icsk_rto_max);
+}
+
+static inline void tcp_bound_rto(struct sock *sk)
+{
+ inet_csk(sk)->icsk_rto = min(inet_csk(sk)->icsk_rto, tcp_rto_max(sk));
}
static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
@@ -786,7 +845,7 @@ u32 tcp_delack_max(const struct sock *sk);
static inline u32 tcp_rto_min(const struct sock *sk)
{
const struct dst_entry *dst = __sk_dst_get(sk);
- u32 rto_min = inet_csk(sk)->icsk_rto_min;
+ u32 rto_min = READ_ONCE(inet_csk(sk)->icsk_rto_min);
if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
@@ -914,17 +973,37 @@ static inline u32 tcp_rsk_tsval(const struct tcp_request_sock *treq)
#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
-#define TCPHDR_FIN 0x01
-#define TCPHDR_SYN 0x02
-#define TCPHDR_RST 0x04
-#define TCPHDR_PSH 0x08
-#define TCPHDR_ACK 0x10
-#define TCPHDR_URG 0x20
-#define TCPHDR_ECE 0x40
-#define TCPHDR_CWR 0x80
-
+#define TCPHDR_FIN BIT(0)
+#define TCPHDR_SYN BIT(1)
+#define TCPHDR_RST BIT(2)
+#define TCPHDR_PSH BIT(3)
+#define TCPHDR_ACK BIT(4)
+#define TCPHDR_URG BIT(5)
+#define TCPHDR_ECE BIT(6)
+#define TCPHDR_CWR BIT(7)
+#define TCPHDR_AE BIT(8)
+#define TCPHDR_FLAGS_MASK (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
+ TCPHDR_PSH | TCPHDR_ACK | TCPHDR_URG | \
+ TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE)
+#define tcp_flags_ntohs(th) (ntohs(*(__be16 *)&tcp_flag_word(th)) & \
+ TCPHDR_FLAGS_MASK)
+
+#define TCPHDR_ACE (TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE)
#define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
+/* State flags for sacked in struct tcp_skb_cb */
+enum tcp_skb_cb_sacked_flags {
+ TCPCB_SACKED_ACKED = (1 << 0), /* SKB ACK'd by a SACK block */
+ TCPCB_SACKED_RETRANS = (1 << 1), /* SKB retransmitted */
+ TCPCB_LOST = (1 << 2), /* SKB is lost */
+ TCPCB_TAGBITS = (TCPCB_SACKED_ACKED | TCPCB_SACKED_RETRANS |
+ TCPCB_LOST), /* All tag bits */
+ TCPCB_REPAIRED = (1 << 4), /* SKB repaired (no skb_mstamp_ns) */
+ TCPCB_EVER_RETRANS = (1 << 7), /* Ever retransmitted frame */
+ TCPCB_RETRANS = (TCPCB_SACKED_RETRANS | TCPCB_EVER_RETRANS |
+ TCPCB_REPAIRED),
+};
+
/* This is what the send packet queuing engine uses to pass
* TCP per-packet control information to the transmission code.
* We also store the host-order sequence numbers in here too.
@@ -935,35 +1014,25 @@ struct tcp_skb_cb {
__u32 seq; /* Starting sequence number */
__u32 end_seq; /* SEQ + FIN + SYN + datalen */
union {
- /* Note : tcp_tw_isn is used in input path only
- * (isn chosen by tcp_timewait_state_process())
- *
+ /* Note :
* tcp_gso_segs/size are used in write queue only,
* cf tcp_skb_pcount()/tcp_skb_mss()
*/
- __u32 tcp_tw_isn;
struct {
u16 tcp_gso_segs;
u16 tcp_gso_size;
};
};
- __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
+ __u16 tcp_flags; /* TCP header flags (tcp[12-13])*/
__u8 sacked; /* State flags for SACK. */
-#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
-#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
-#define TCPCB_LOST 0x04 /* SKB is lost */
-#define TCPCB_TAGBITS 0x07 /* All tag bits */
-#define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp_ns) */
-#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
-#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
- TCPCB_REPAIRED)
-
__u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
- __u8 txstamp_ack:1, /* Record TX timestamp for ack? */
+#define TSTAMP_ACK_SK 0x1
+#define TSTAMP_ACK_BPF 0x2
+ __u8 txstamp_ack:2, /* Record TX timestamp for ack? */
eor:1, /* Is skb MSG_EOR marked? */
has_rxtstamp:1, /* SKB has a RX timestamp */
- unused:5;
+ unused:4;
__u32 ack_seq; /* Sequence number ACK'd */
union {
struct {
@@ -1068,9 +1137,18 @@ static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
const struct sk_buff *from)
{
+ /* skb_cmp_decrypted() not needed, use tcp_write_collapse_fence() */
return likely(tcp_skb_can_collapse_to(to) &&
mptcp_skb_can_collapse(to, from) &&
- skb_pure_zcopy_same(to, from));
+ skb_pure_zcopy_same(to, from) &&
+ skb_frags_readable(to) == skb_frags_readable(from));
+}
+
+static inline bool tcp_skb_can_collapse_rx(const struct sk_buff *to,
+ const struct sk_buff *from)
+{
+ return likely(mptcp_skb_can_collapse(to, from) &&
+ !skb_cmp_decrypted(to, from));
}
/* Events passed to congestion control interface */
@@ -1100,9 +1178,9 @@ enum tcp_ca_ack_event_flags {
#define TCP_CA_UNSPEC 0
/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
-#define TCP_CONG_NON_RESTRICTED 0x1
+#define TCP_CONG_NON_RESTRICTED BIT(0)
/* Requires ECN/ECT set on all packets */
-#define TCP_CONG_NEEDS_ECN 0x2
+#define TCP_CONG_NEEDS_ECN BIT(1)
#define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
union tcp_cc_info;
@@ -1167,7 +1245,7 @@ struct tcp_congestion_ops {
/* call when packets are delivered to update cwnd and pacing rate,
* after all the ca_state processing. (optional)
*/
- void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
+ void (*cong_control)(struct sock *sk, u32 ack, int flag, const struct rate_sample *rs);
/* new value of cwnd after loss (required) */
@@ -1218,7 +1296,7 @@ extern struct tcp_congestion_ops tcp_reno;
struct tcp_congestion_ops *tcp_ca_find(const char *name);
struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
-u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
+u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
#ifdef CONFIG_INET
char *tcp_ca_get_name_by_key(u32 key, char *buffer);
#else
@@ -1416,10 +1494,12 @@ static inline unsigned long tcp_pacing_delay(const struct sock *sk)
static inline void tcp_reset_xmit_timer(struct sock *sk,
const int what,
unsigned long when,
- const unsigned long max_when)
+ bool pace_delay)
{
- inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
- max_when);
+ if (pace_delay)
+ when += tcp_pacing_delay(sk);
+ inet_csk_reset_xmit_timer(sk, what, when,
+ tcp_rto_max(sk));
}
/* Something is really bad, we could not queue an additional packet,
@@ -1448,7 +1528,7 @@ static inline void tcp_check_probe_timer(struct sock *sk)
{
if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- tcp_probe0_base(sk), TCP_RTO_MAX);
+ tcp_probe0_base(sk), true);
}
static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
@@ -1539,11 +1619,10 @@ static inline int tcp_space_from_win(const struct sock *sk, int win)
return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win);
}
-/* Assume a conservative default of 1200 bytes of payload per 4K page.
+/* Assume a 50% default for skb->len/skb->truesize ratio.
* This may be adjusted later in tcp_measure_rcv_mss().
*/
-#define TCP_DEFAULT_SCALING_RATIO ((1200 << TCP_RMEM_TO_WIN_SCALE) / \
- SKB_TRUESIZE(4096))
+#define TCP_DEFAULT_SCALING_RATIO (1 << (TCP_RMEM_TO_WIN_SCALE - 1))
static inline void tcp_scaling_ratio_init(struct sock *sk)
{
@@ -1811,7 +1890,7 @@ int tcp_sigpool_hash_skb_data(struct tcp_sigpool *hp,
* @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash()
* @c: returned tcp_sigpool for usage (uninitialized on failure)
*
- * Returns 0 on success, error otherwise.
+ * Returns: 0 on success, error otherwise.
*/
int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c);
/**
@@ -1858,12 +1937,6 @@ tcp_md5_do_lookup_any_l3index(const struct sock *sk,
return __tcp_md5_do_lookup(sk, 0, addr, family, true);
}
-enum skb_drop_reason
-tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
- const void *saddr, const void *daddr,
- int family, int l3index, const __u8 *hash_location);
-
-
#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
#else
static inline struct tcp_md5sig_key *
@@ -1880,13 +1953,6 @@ tcp_md5_do_lookup_any_l3index(const struct sock *sk,
return NULL;
}
-static inline enum skb_drop_reason
-tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
- const void *saddr, const void *daddr,
- int family, int l3index, const __u8 *hash_location)
-{
- return SKB_NOT_DROPPED_YET;
-}
#define tcp_twsk_md5_key(twsk) NULL
#endif
@@ -2098,6 +2164,14 @@ static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct soc
tcp_wmem_free_skb(sk, skb);
}
+static inline void tcp_write_collapse_fence(struct sock *sk)
+{
+ struct sk_buff *skb = tcp_write_queue_tail(sk);
+
+ if (skb)
+ TCP_SKB_CB(skb)->eor = 1;
+}
+
static inline void tcp_push_pending_frames(struct sock *sk)
{
if (tcp_send_head(sk)) {
@@ -2195,7 +2269,10 @@ void tcp_v4_destroy_sock(struct sock *sk);
struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
netdev_features_t features);
-struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
+struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb);
+struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th);
+struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
+ struct tcphdr *th);
INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
@@ -2284,7 +2361,8 @@ struct tcp_request_sock_ops {
struct dst_entry *(*route_req)(const struct sock *sk,
struct sk_buff *skb,
struct flowi *fl,
- struct request_sock *req);
+ struct request_sock *req,
+ u32 tw_isn);
u32 (*init_seq)(const struct sk_buff *skb);
u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
@@ -2369,21 +2447,15 @@ static inline void tcp_get_current_key(const struct sock *sk,
static inline bool tcp_key_is_md5(const struct tcp_key *key)
{
-#ifdef CONFIG_TCP_MD5SIG
- if (static_branch_unlikely(&tcp_md5_needed.key) &&
- key->type == TCP_KEY_MD5)
- return true;
-#endif
+ if (static_branch_tcp_md5())
+ return key->type == TCP_KEY_MD5;
return false;
}
static inline bool tcp_key_is_ao(const struct tcp_key *key)
{
-#ifdef CONFIG_TCP_AO
- if (static_branch_unlikely(&tcp_ao_needed.key) &&
- key->type == TCP_KEY_AO)
- return true;
-#endif
+ if (static_branch_tcp_ao())
+ return key->type == TCP_KEY_AO;
return false;
}
@@ -2431,14 +2503,35 @@ void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb,
void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb);
void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb);
+static inline void tcp_warn_once(const struct sock *sk, bool cond, const char *str)
+{
+ WARN_ONCE(cond,
+ "%scwn:%u out:%u sacked:%u lost:%u retrans:%u tlp_high_seq:%u sk_state:%u ca_state:%u advmss:%u mss_cache:%u pmtu:%u\n",
+ str,
+ tcp_snd_cwnd(tcp_sk(sk)),
+ tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out,
+ tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out,
+ tcp_sk(sk)->tlp_high_seq, sk->sk_state,
+ inet_csk(sk)->icsk_ca_state,
+ tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache,
+ inet_csk(sk)->icsk_pmtu_cookie);
+}
+
/* At how many usecs into the future should the RTO fire? */
static inline s64 tcp_rto_delta_us(const struct sock *sk)
{
const struct sk_buff *skb = tcp_rtx_queue_head(sk);
u32 rto = inet_csk(sk)->icsk_rto;
- u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
- return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
+ if (likely(skb)) {
+ u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
+
+ return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
+ } else {
+ tcp_warn_once(sk, 1, "rtx queue empty: ");
+ return jiffies_to_usecs(rto);
+ }
+
}
/*
@@ -2551,8 +2644,8 @@ struct tcp_ulp_ops {
/* cleanup ulp */
void (*release)(struct sock *sk);
/* diagnostic */
- int (*get_info)(struct sock *sk, struct sk_buff *skb);
- size_t (*get_info_size)(const struct sock *sk);
+ int (*get_info)(struct sock *sk, struct sk_buff *skb, bool net_admin);
+ size_t (*get_info_size)(const struct sock *sk, bool net_admin);
/* clone ulp */
void (*clone)(const struct request_sock *req, struct sock *newsk,
const gfp_t priority);
@@ -2579,6 +2672,11 @@ struct sk_psock;
#ifdef CONFIG_BPF_SYSCALL
int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
+#ifdef CONFIG_BPF_STREAM_PARSER
+struct strparser;
+int tcp_bpf_strp_read_sock(struct strparser *strp, read_descriptor_t *desc,
+ sk_read_actor_t recv_actor);
+#endif /* CONFIG_BPF_STREAM_PARSER */
#endif /* CONFIG_BPF_SYSCALL */
#ifdef CONFIG_INET
@@ -2629,6 +2727,7 @@ static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
if (sk_fullsock(sk)) {
sock_ops.is_fullsock = 1;
+ sock_ops.is_locked_tcp_sock = 1;
sock_owned_by_me(sk);
}
@@ -2706,10 +2805,10 @@ static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
}
-static inline void tcp_bpf_rtt(struct sock *sk)
+static inline void tcp_bpf_rtt(struct sock *sk, long mrtt, u32 srtt)
{
if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
- tcp_call_bpf(sk, BPF_SOCK_OPS_RTT_CB, 0, NULL);
+ tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_RTT_CB, mrtt, srtt);
}
#if IS_ENABLED(CONFIG_SMC)
@@ -2717,9 +2816,9 @@ extern struct static_key_false tcp_have_smc;
#endif
#if IS_ENABLED(CONFIG_TLS_DEVICE)
-void clean_acked_data_enable(struct inet_connection_sock *icsk,
+void clean_acked_data_enable(struct tcp_sock *tp,
void (*cad)(struct sock *sk, u32 ack_seq));
-void clean_acked_data_disable(struct inet_connection_sock *icsk);
+void clean_acked_data_disable(struct tcp_sock *tp);
void clean_acked_data_flush(void);
#endif
@@ -2795,66 +2894,9 @@ static inline bool tcp_ao_required(struct sock *sk, const void *saddr,
return false;
}
-/* Called with rcu_read_lock() */
-static inline enum skb_drop_reason
-tcp_inbound_hash(struct sock *sk, const struct request_sock *req,
- const struct sk_buff *skb,
- const void *saddr, const void *daddr,
- int family, int dif, int sdif)
-{
- const struct tcphdr *th = tcp_hdr(skb);
- const struct tcp_ao_hdr *aoh;
- const __u8 *md5_location;
- int l3index;
-
- /* Invalid option or two times meet any of auth options */
- if (tcp_parse_auth_options(th, &md5_location, &aoh)) {
- tcp_hash_fail("TCP segment has incorrect auth options set",
- family, skb, "");
- return SKB_DROP_REASON_TCP_AUTH_HDR;
- }
-
- if (req) {
- if (tcp_rsk_used_ao(req) != !!aoh) {
- NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD);
- tcp_hash_fail("TCP connection can't start/end using TCP-AO",
- family, skb, "%s",
- !aoh ? "missing AO" : "AO signed");
- return SKB_DROP_REASON_TCP_AOFAILURE;
- }
- }
-
- /* sdif set, means packet ingressed via a device
- * in an L3 domain and dif is set to the l3mdev
- */
- l3index = sdif ? dif : 0;
-
- /* Fast path: unsigned segments */
- if (likely(!md5_location && !aoh)) {
- /* Drop if there's TCP-MD5 or TCP-AO key with any rcvid/sndid
- * for the remote peer. On TCP-AO established connection
- * the last key is impossible to remove, so there's
- * always at least one current_key.
- */
- if (tcp_ao_required(sk, saddr, family, l3index, true)) {
- tcp_hash_fail("AO hash is required, but not found",
- family, skb, "L3 index %d", l3index);
- return SKB_DROP_REASON_TCP_AONOTFOUND;
- }
- if (unlikely(tcp_md5_do_lookup(sk, l3index, saddr, family))) {
- NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
- tcp_hash_fail("MD5 Hash not found",
- family, skb, "L3 index %d", l3index);
- return SKB_DROP_REASON_TCP_MD5NOTFOUND;
- }
- return SKB_NOT_DROPPED_YET;
- }
-
- if (aoh)
- return tcp_inbound_ao_hash(sk, skb, family, req, l3index, aoh);
-
- return tcp_inbound_md5_hash(sk, skb, saddr, daddr, family,
- l3index, md5_location);
-}
+enum skb_drop_reason tcp_inbound_hash(struct sock *sk,
+ const struct request_sock *req, const struct sk_buff *skb,
+ const void *saddr, const void *daddr,
+ int family, int dif, int sdif);
#endif /* _TCP_H */
diff --git a/include/net/tcp_ao.h b/include/net/tcp_ao.h
index 471e177362b4..df655ce6987d 100644
--- a/include/net/tcp_ao.h
+++ b/include/net/tcp_ao.h
@@ -19,6 +19,11 @@ struct tcp_ao_hdr {
u8 rnext_keyid;
};
+static inline u8 tcp_ao_hdr_maclen(const struct tcp_ao_hdr *aoh)
+{
+ return aoh->length - sizeof(struct tcp_ao_hdr);
+}
+
struct tcp_ao_counters {
atomic64_t pkt_good;
atomic64_t pkt_bad;
@@ -86,7 +91,8 @@ static inline int tcp_ao_sizeof_key(const struct tcp_ao_key *key)
struct tcp_ao_info {
/* List of tcp_ao_key's */
struct hlist_head head;
- /* current_key and rnext_key aren't maintained on listen sockets.
+ /* current_key and rnext_key are maintained on sockets
+ * in TCP_AO_ESTABLISHED states.
* Their purpose is to cache keys on established connections,
* saving needless lookups. Never dereference any of them from
* listen sockets.
@@ -143,43 +149,6 @@ extern struct static_key_false_deferred tcp_ao_needed;
#define static_branch_tcp_ao() false
#endif
-static inline bool tcp_hash_should_produce_warnings(void)
-{
- return static_branch_tcp_md5() || static_branch_tcp_ao();
-}
-
-#define tcp_hash_fail(msg, family, skb, fmt, ...) \
-do { \
- const struct tcphdr *th = tcp_hdr(skb); \
- char hdr_flags[6]; \
- char *f = hdr_flags; \
- \
- if (!tcp_hash_should_produce_warnings()) \
- break; \
- if (th->fin) \
- *f++ = 'F'; \
- if (th->syn) \
- *f++ = 'S'; \
- if (th->rst) \
- *f++ = 'R'; \
- if (th->psh) \
- *f++ = 'P'; \
- if (th->ack) \
- *f++ = '.'; \
- *f = 0; \
- if ((family) == AF_INET) { \
- net_info_ratelimited("%s for %pI4.%d->%pI4.%d [%s] " fmt "\n", \
- msg, &ip_hdr(skb)->saddr, ntohs(th->source), \
- &ip_hdr(skb)->daddr, ntohs(th->dest), \
- hdr_flags, ##__VA_ARGS__); \
- } else { \
- net_info_ratelimited("%s for [%pI6c].%d->[%pI6c].%d [%s]" fmt "\n", \
- msg, &ipv6_hdr(skb)->saddr, ntohs(th->source), \
- &ipv6_hdr(skb)->daddr, ntohs(th->dest), \
- hdr_flags, ##__VA_ARGS__); \
- } \
-} while (0)
-
#ifdef CONFIG_TCP_AO
/* TCP-AO structures and functions */
struct tcp4_ao_context {
@@ -201,9 +170,9 @@ struct tcp6_ao_context {
};
struct tcp_sigpool;
+/* Established states are fast-path and there always is current_key/rnext_key */
#define TCP_AO_ESTABLISHED (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | \
- TCPF_CLOSE | TCPF_CLOSE_WAIT | \
- TCPF_LAST_ACK | TCPF_CLOSING)
+ TCPF_CLOSE_WAIT | TCPF_LAST_ACK | TCPF_CLOSING)
int tcp_ao_transmit_skb(struct sock *sk, struct sk_buff *skb,
struct tcp_ao_key *key, struct tcphdr *th,
@@ -214,7 +183,8 @@ int tcp_ao_hash_skb(unsigned short int family,
const u8 *tkey, int hash_offset, u32 sne);
int tcp_parse_ao(struct sock *sk, int cmd, unsigned short int family,
sockptr_t optval, int optlen);
-struct tcp_ao_key *tcp_ao_established_key(struct tcp_ao_info *ao,
+struct tcp_ao_key *tcp_ao_established_key(const struct sock *sk,
+ struct tcp_ao_info *ao,
int sndid, int rcvid);
int tcp_ao_copy_all_matching(const struct sock *sk, struct sock *newsk,
struct request_sock *req, struct sk_buff *skb,
diff --git a/include/net/tcx.h b/include/net/tcx.h
index 04be9377785d..5ce0ce9e0c02 100644
--- a/include/net/tcx.h
+++ b/include/net/tcx.h
@@ -13,7 +13,7 @@ struct mini_Qdisc;
struct tcx_entry {
struct mini_Qdisc __rcu *miniq;
struct bpf_mprog_bundle bundle;
- bool miniq_active;
+ u32 miniq_active;
struct rcu_head rcu;
};
@@ -75,9 +75,9 @@ tcx_entry_fetch(struct net_device *dev, bool ingress)
return rcu_dereference_rtnl(dev->tcx_egress);
}
-static inline struct bpf_mprog_entry *tcx_entry_create(void)
+static inline struct bpf_mprog_entry *tcx_entry_create_noprof(void)
{
- struct tcx_entry *tcx = kzalloc(sizeof(*tcx), GFP_KERNEL);
+ struct tcx_entry *tcx = kzalloc_noprof(sizeof(*tcx), GFP_KERNEL);
if (tcx) {
bpf_mprog_bundle_init(&tcx->bundle);
@@ -85,6 +85,7 @@ static inline struct bpf_mprog_entry *tcx_entry_create(void)
}
return NULL;
}
+#define tcx_entry_create(...) alloc_hooks(tcx_entry_create_noprof(__VA_ARGS__))
static inline void tcx_entry_free(struct bpf_mprog_entry *entry)
{
@@ -124,11 +125,16 @@ static inline void tcx_skeys_dec(bool ingress)
tcx_dec();
}
-static inline void tcx_miniq_set_active(struct bpf_mprog_entry *entry,
- const bool active)
+static inline void tcx_miniq_inc(struct bpf_mprog_entry *entry)
{
ASSERT_RTNL();
- tcx_entry(entry)->miniq_active = active;
+ tcx_entry(entry)->miniq_active++;
+}
+
+static inline void tcx_miniq_dec(struct bpf_mprog_entry *entry)
+{
+ ASSERT_RTNL();
+ tcx_entry(entry)->miniq_active--;
}
static inline bool tcx_entry_is_active(struct bpf_mprog_entry *entry)
diff --git a/include/net/timewait_sock.h b/include/net/timewait_sock.h
index 74d2b463cc95..62b3e9f2aed4 100644
--- a/include/net/timewait_sock.h
+++ b/include/net/timewait_sock.h
@@ -15,18 +15,9 @@ struct timewait_sock_ops {
struct kmem_cache *twsk_slab;
char *twsk_slab_name;
unsigned int twsk_obj_size;
- int (*twsk_unique)(struct sock *sk,
- struct sock *sktw, void *twp);
void (*twsk_destructor)(struct sock *sk);
};
-static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
-{
- if (sk->sk_prot->twsk_prot->twsk_unique != NULL)
- return sk->sk_prot->twsk_prot->twsk_unique(sk, sktw, twp);
- return 0;
-}
-
static inline void twsk_destructor(struct sock *sk)
{
if (sk->sk_prot->twsk_prot->twsk_destructor != NULL)
diff --git a/include/net/tls.h b/include/net/tls.h
index 33f657d3c051..857340338b69 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -59,6 +59,8 @@ struct tls_rec;
#define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type)
+#define TLS_HANDSHAKE_KEYUPDATE 24 /* rfc8446 B.3: Key update */
+
#define TLS_AAD_SPACE_SIZE 13
#define TLS_MAX_IV_SIZE 16
@@ -130,6 +132,7 @@ struct tls_sw_context_rx {
u8 async_capable:1;
u8 zc_capable:1;
u8 reader_contended:1;
+ bool key_update_pending;
struct tls_strparser strp;
@@ -362,7 +365,7 @@ static inline bool tls_is_skb_tx_device_offloaded(const struct sk_buff *skb)
static inline struct tls_context *tls_get_ctx(const struct sock *sk)
{
- struct inet_connection_sock *icsk = inet_csk(sk);
+ const struct inet_connection_sock *icsk = inet_csk(sk);
/* Use RCU on icsk_ulp_data only for sock diag code,
* TLS data path doesn't need rcu_dereference().
@@ -390,8 +393,12 @@ tls_offload_ctx_tx(const struct tls_context *tls_ctx)
static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
{
- struct tls_context *ctx = tls_get_ctx(sk);
+ struct tls_context *ctx;
+
+ if (!sk_is_inet(sk) || !inet_test_bit(IS_ICSK, sk))
+ return false;
+ ctx = tls_get_ctx(sk);
if (!ctx)
return false;
return !!tls_sw_ctx_tx(ctx);
@@ -399,8 +406,12 @@ static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
static inline bool tls_sw_has_ctx_rx(const struct sock *sk)
{
- struct tls_context *ctx = tls_get_ctx(sk);
+ struct tls_context *ctx;
+
+ if (!sk_is_inet(sk) || !inet_test_bit(IS_ICSK, sk))
+ return false;
+ ctx = tls_get_ctx(sk);
if (!ctx)
return false;
return !!tls_sw_ctx_rx(ctx);
diff --git a/include/net/udp.h b/include/net/udp.h
index 488a6d2babcc..a772510b2aa5 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -50,39 +50,68 @@ struct udp_skb_cb {
#define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb))
/**
- * struct udp_hslot - UDP hash slot
+ * struct udp_hslot - UDP hash slot used by udp_table.hash/hash4
*
* @head: head of list of sockets
+ * @nulls_head: head of list of sockets, only used by hash4
* @count: number of sockets in 'head' list
* @lock: spinlock protecting changes to head/count
*/
struct udp_hslot {
- struct hlist_head head;
+ union {
+ struct hlist_head head;
+ /* hash4 uses hlist_nulls to avoid moving wrongly onto another
+ * hlist, because rehash() can happen with lookup().
+ */
+ struct hlist_nulls_head nulls_head;
+ };
int count;
spinlock_t lock;
-} __attribute__((aligned(2 * sizeof(long))));
+} __aligned(2 * sizeof(long));
+
+/**
+ * struct udp_hslot_main - UDP hash slot used by udp_table.hash2
+ *
+ * @hslot: basic hash slot
+ * @hash4_cnt: number of sockets in hslot4 of the same
+ * (local port, local address)
+ */
+struct udp_hslot_main {
+ struct udp_hslot hslot; /* must be the first member */
+#if !IS_ENABLED(CONFIG_BASE_SMALL)
+ u32 hash4_cnt;
+#endif
+} __aligned(2 * sizeof(long));
+#define UDP_HSLOT_MAIN(__hslot) ((struct udp_hslot_main *)(__hslot))
/**
* struct udp_table - UDP table
*
* @hash: hash table, sockets are hashed on (local port)
* @hash2: hash table, sockets are hashed on (local port, local address)
+ * @hash4: hash table, connected sockets are hashed on
+ * (local port, local address, remote port, remote address)
* @mask: number of slots in hash tables, minus 1
* @log: log2(number of slots in hash table)
*/
struct udp_table {
struct udp_hslot *hash;
- struct udp_hslot *hash2;
+ struct udp_hslot_main *hash2;
+#if !IS_ENABLED(CONFIG_BASE_SMALL)
+ struct udp_hslot *hash4;
+#endif
unsigned int mask;
unsigned int log;
};
extern struct udp_table udp_table;
void udp_table_init(struct udp_table *, const char *);
static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
- struct net *net, unsigned int num)
+ const struct net *net,
+ unsigned int num)
{
return &table->hash[udp_hashfn(net, num, table->mask)];
}
+
/*
* For secondary hash, net_hash_mix() is performed before calling
* udp_hashslot2(), this explains difference with udp_hashslot()
@@ -90,9 +119,90 @@ static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
unsigned int hash)
{
- return &table->hash2[hash & table->mask];
+ return &table->hash2[hash & table->mask].hslot;
+}
+
+#if IS_ENABLED(CONFIG_BASE_SMALL)
+static inline void udp_table_hash4_init(struct udp_table *table)
+{
+}
+
+static inline struct udp_hslot *udp_hashslot4(struct udp_table *table,
+ unsigned int hash)
+{
+ BUILD_BUG();
+ return NULL;
+}
+
+static inline bool udp_hashed4(const struct sock *sk)
+{
+ return false;
+}
+
+static inline unsigned int udp_hash4_slot_size(void)
+{
+ return 0;
+}
+
+static inline bool udp_has_hash4(const struct udp_hslot *hslot2)
+{
+ return false;
+}
+
+static inline void udp_hash4_inc(struct udp_hslot *hslot2)
+{
}
+static inline void udp_hash4_dec(struct udp_hslot *hslot2)
+{
+}
+#else /* !CONFIG_BASE_SMALL */
+
+/* Must be called with table->hash2 initialized */
+static inline void udp_table_hash4_init(struct udp_table *table)
+{
+ table->hash4 = (void *)(table->hash2 + (table->mask + 1));
+ for (int i = 0; i <= table->mask; i++) {
+ table->hash2[i].hash4_cnt = 0;
+
+ INIT_HLIST_NULLS_HEAD(&table->hash4[i].nulls_head, i);
+ table->hash4[i].count = 0;
+ spin_lock_init(&table->hash4[i].lock);
+ }
+}
+
+static inline struct udp_hslot *udp_hashslot4(struct udp_table *table,
+ unsigned int hash)
+{
+ return &table->hash4[hash & table->mask];
+}
+
+static inline bool udp_hashed4(const struct sock *sk)
+{
+ return !hlist_nulls_unhashed(&udp_sk(sk)->udp_lrpa_node);
+}
+
+static inline unsigned int udp_hash4_slot_size(void)
+{
+ return sizeof(struct udp_hslot);
+}
+
+static inline bool udp_has_hash4(const struct udp_hslot *hslot2)
+{
+ return UDP_HSLOT_MAIN(hslot2)->hash4_cnt;
+}
+
+static inline void udp_hash4_inc(struct udp_hslot *hslot2)
+{
+ UDP_HSLOT_MAIN(hslot2)->hash4_cnt++;
+}
+
+static inline void udp_hash4_dec(struct udp_hslot *hslot2)
+{
+ UDP_HSLOT_MAIN(hslot2)->hash4_cnt--;
+}
+#endif /* CONFIG_BASE_SMALL */
+
extern struct proto udp_prot;
extern atomic_long_t udp_memory_allocated;
@@ -180,6 +290,7 @@ static inline void udp_lib_init_sock(struct sock *sk)
struct udp_sock *up = udp_sk(sk);
skb_queue_head_init(&up->reader_queue);
+ INIT_HLIST_NODE(&up->tunnel_list);
up->forward_threshold = sk->sk_rcvbuf >> 2;
set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags);
}
@@ -192,13 +303,29 @@ static inline int udp_lib_hash(struct sock *sk)
}
void udp_lib_unhash(struct sock *sk);
-void udp_lib_rehash(struct sock *sk, u16 new_hash);
+void udp_lib_rehash(struct sock *sk, u16 new_hash, u16 new_hash4);
+u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport,
+ const __be32 faddr, const __be16 fport);
static inline void udp_lib_close(struct sock *sk, long timeout)
{
sk_common_release(sk);
}
+/* hash4 routines shared between UDPv4/6 */
+#if IS_ENABLED(CONFIG_BASE_SMALL)
+static inline void udp_lib_hash4(struct sock *sk, u16 hash)
+{
+}
+
+static inline void udp4_hash4(struct sock *sk)
+{
+}
+#else /* !CONFIG_BASE_SMALL */
+void udp_lib_hash4(struct sock *sk, u16 hash);
+void udp4_hash4(struct sock *sk);
+#endif /* CONFIG_BASE_SMALL */
+
int udp_lib_get_port(struct sock *sk, unsigned short snum,
unsigned int hash2_nulladdr);
@@ -231,7 +358,7 @@ static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
}
/* Since this is being sent on the wire obfuscate hash a bit
- * to minimize possbility that any useful information to an
+ * to minimize possibility that any useful information to an
* attacker is leaked. Only upper 16 bits are relevant in the
* computation for 16 bit port value.
*/
@@ -245,7 +372,7 @@ static inline int udp_rqueue_get(struct sock *sk)
return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
}
-static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
+static inline bool udp_sk_bound_dev_eq(const struct net *net, int bound_dev_if,
int dif, int sdif)
{
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
@@ -296,18 +423,19 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
int udp_lib_setsockopt(struct sock *sk, int level, int optname,
sockptr_t optval, unsigned int optlen,
int (*push_pending_frames)(struct sock *));
-struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+struct sock *udp4_lib_lookup(const struct net *net, __be32 saddr, __be16 sport,
__be32 daddr, __be16 dport, int dif);
-struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+struct sock *__udp4_lib_lookup(const struct net *net, __be32 saddr,
+ __be16 sport,
__be32 daddr, __be16 dport, int dif, int sdif,
struct udp_table *tbl, struct sk_buff *skb);
struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb,
__be16 sport, __be16 dport);
-struct sock *udp6_lib_lookup(struct net *net,
+struct sock *udp6_lib_lookup(const struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
int dif);
-struct sock *__udp6_lib_lookup(struct net *net,
+struct sock *__udp6_lib_lookup(const struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
int dif, int sdif, struct udp_table *tbl,
@@ -379,14 +507,7 @@ static inline bool udp_skb_is_linear(struct sk_buff *skb)
static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
struct iov_iter *to)
{
- int n;
-
- n = copy_to_iter(skb->data + off, len, to);
- if (n == len)
- return 0;
-
- iov_iter_revert(to, n);
- return -EFAULT;
+ return copy_to_iter_full(skb->data + off, len, to) ? 0 : -EFAULT;
}
/*
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index d716214fe03d..2df3b8344eb5 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -179,8 +179,8 @@ struct dst_entry *udp_tunnel6_dst_lookup(struct sk_buff *skb,
struct dst_cache *dst_cache);
struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
- __be16 flags, __be64 tunnel_id,
- int md_size);
+ const unsigned long *flags,
+ __be64 tunnel_id, int md_size);
#ifdef CONFIG_INET
static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
@@ -191,6 +191,21 @@ static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
}
#endif
+#if IS_ENABLED(CONFIG_NET_UDP_TUNNEL)
+void udp_tunnel_update_gro_lookup(struct net *net, struct sock *sk, bool add);
+void udp_tunnel_update_gro_rcv(struct sock *sk, bool add);
+#else
+static inline void udp_tunnel_update_gro_lookup(struct net *net,
+ struct sock *sk, bool add) {}
+static inline void udp_tunnel_update_gro_rcv(struct sock *sk, bool add) {}
+#endif
+
+static inline void udp_tunnel_cleanup_gro(struct sock *sk)
+{
+ udp_tunnel_update_gro_rcv(sk, false);
+ udp_tunnel_update_gro_lookup(sock_net(sk), sk, false);
+}
+
static inline void udp_tunnel_encap_enable(struct sock *sk)
{
if (udp_test_and_set_bit(ENCAP_ENABLED, sk))
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 33ba6fc151cf..e2f7ca045d3e 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -227,6 +227,7 @@ struct vxlan_config {
unsigned int addrmax;
bool no_share;
enum ifla_vxlan_df df;
+ struct vxlanhdr reserved_bits;
};
enum {
@@ -295,7 +296,7 @@ struct vxlan_dev {
struct vxlan_rdst default_dst; /* default destination */
struct timer_list age_timer;
- spinlock_t hash_lock[FDB_HASH_SIZE];
+ spinlock_t hash_lock;
unsigned int addrcnt;
struct gro_cells gro_cells;
@@ -303,9 +304,10 @@ struct vxlan_dev {
struct vxlan_vni_group __rcu *vnigrp;
- struct hlist_head fdb_head[FDB_HASH_SIZE];
+ struct rhashtable fdb_hash_tbl;
struct rhashtable mdb_tbl;
+ struct hlist_head fdb_list;
struct hlist_head mdb_list;
unsigned int mdb_seq;
};
diff --git a/include/net/x25.h b/include/net/x25.h
index 597eb53c471e..5e833cfc864e 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -81,7 +81,7 @@ enum {
#define X25_DEFAULT_WINDOW_SIZE 2 /* Default Window Size */
#define X25_DEFAULT_PACKET_SIZE X25_PS128 /* Default Packet Size */
-#define X25_DEFAULT_THROUGHPUT 0x0A /* Deafult Throughput */
+#define X25_DEFAULT_THROUGHPUT 0x0A /* Default Throughput */
#define X25_DEFAULT_REVERSE 0x00 /* Default Reverse Charging */
#define X25_SMODULUS 8
diff --git a/include/net/xdp.h b/include/net/xdp.h
index e6770dd40c91..b40f1f96cb11 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -11,6 +11,8 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h> /* skb_shared_info */
+#include <net/page_pool/types.h>
+
/**
* DOC: XDP RX-queue information
*
@@ -62,7 +64,6 @@ struct xdp_rxq_info {
u32 queue_index;
u32 reg_state;
struct xdp_mem_info mem;
- unsigned int napi_id;
u32 frag_size;
} ____cacheline_aligned; /* perf critical, avoid false-sharing */
@@ -88,7 +89,7 @@ struct xdp_buff {
u32 flags; /* supported values defined in xdp_buff_flags */
};
-static __always_inline bool xdp_buff_has_frags(struct xdp_buff *xdp)
+static __always_inline bool xdp_buff_has_frags(const struct xdp_buff *xdp)
{
return !!(xdp->flags & XDP_FLAGS_HAS_FRAGS);
}
@@ -103,7 +104,8 @@ static __always_inline void xdp_buff_clear_frags_flag(struct xdp_buff *xdp)
xdp->flags &= ~XDP_FLAGS_HAS_FRAGS;
}
-static __always_inline bool xdp_buff_is_frag_pfmemalloc(struct xdp_buff *xdp)
+static __always_inline bool
+xdp_buff_is_frag_pfmemalloc(const struct xdp_buff *xdp)
{
return !!(xdp->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC);
}
@@ -144,15 +146,16 @@ xdp_prepare_buff(struct xdp_buff *xdp, unsigned char *hard_start,
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
static inline struct skb_shared_info *
-xdp_get_shared_info_from_buff(struct xdp_buff *xdp)
+xdp_get_shared_info_from_buff(const struct xdp_buff *xdp)
{
return (struct skb_shared_info *)xdp_data_hard_end(xdp);
}
-static __always_inline unsigned int xdp_get_buff_len(struct xdp_buff *xdp)
+static __always_inline unsigned int
+xdp_get_buff_len(const struct xdp_buff *xdp)
{
unsigned int len = xdp->data_end - xdp->data;
- struct skb_shared_info *sinfo;
+ const struct skb_shared_info *sinfo;
if (likely(!xdp_buff_has_frags(xdp)))
goto out;
@@ -163,26 +166,114 @@ out:
return len;
}
+void xdp_return_frag(netmem_ref netmem, const struct xdp_buff *xdp);
+
+/**
+ * __xdp_buff_add_frag - attach frag to &xdp_buff
+ * @xdp: XDP buffer to attach the frag to
+ * @netmem: network memory containing the frag
+ * @offset: offset at which the frag starts
+ * @size: size of the frag
+ * @truesize: total memory size occupied by the frag
+ * @try_coalesce: whether to try coalescing the frags (not valid for XSk)
+ *
+ * Attach frag to the XDP buffer. If it currently has no frags attached,
+ * initialize the related fields, otherwise check that the frag number
+ * didn't reach the limit of ``MAX_SKB_FRAGS``. If possible, try coalescing
+ * the frag with the previous one.
+ * The function doesn't check/update the pfmemalloc bit. Please use the
+ * non-underscored wrapper in drivers.
+ *
+ * Return: true on success, false if there's no space for the frag in
+ * the shared info struct.
+ */
+static inline bool __xdp_buff_add_frag(struct xdp_buff *xdp, netmem_ref netmem,
+ u32 offset, u32 size, u32 truesize,
+ bool try_coalesce)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ skb_frag_t *prev;
+ u32 nr_frags;
+
+ if (!xdp_buff_has_frags(xdp)) {
+ xdp_buff_set_frags_flag(xdp);
+
+ nr_frags = 0;
+ sinfo->xdp_frags_size = 0;
+ sinfo->xdp_frags_truesize = 0;
+
+ goto fill;
+ }
+
+ nr_frags = sinfo->nr_frags;
+ prev = &sinfo->frags[nr_frags - 1];
+
+ if (try_coalesce && netmem == skb_frag_netmem(prev) &&
+ offset == skb_frag_off(prev) + skb_frag_size(prev)) {
+ skb_frag_size_add(prev, size);
+ /* Guaranteed to only decrement the refcount */
+ xdp_return_frag(netmem, xdp);
+ } else if (unlikely(nr_frags == MAX_SKB_FRAGS)) {
+ return false;
+ } else {
+fill:
+ __skb_fill_netmem_desc_noacc(sinfo, nr_frags++, netmem,
+ offset, size);
+ }
+
+ sinfo->nr_frags = nr_frags;
+ sinfo->xdp_frags_size += size;
+ sinfo->xdp_frags_truesize += truesize;
+
+ return true;
+}
+
+/**
+ * xdp_buff_add_frag - attach frag to &xdp_buff
+ * @xdp: XDP buffer to attach the frag to
+ * @netmem: network memory containing the frag
+ * @offset: offset at which the frag starts
+ * @size: size of the frag
+ * @truesize: total memory size occupied by the frag
+ *
+ * Version of __xdp_buff_add_frag() which takes care of the pfmemalloc bit.
+ *
+ * Return: true on success, false if there's no space for the frag in
+ * the shared info struct.
+ */
+static inline bool xdp_buff_add_frag(struct xdp_buff *xdp, netmem_ref netmem,
+ u32 offset, u32 size, u32 truesize)
+{
+ if (!__xdp_buff_add_frag(xdp, netmem, offset, size, truesize, true))
+ return false;
+
+ if (unlikely(netmem_is_pfmemalloc(netmem)))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+
+ return true;
+}
+
struct xdp_frame {
void *data;
- u16 len;
- u16 headroom;
+ u32 len;
+ u32 headroom;
u32 metasize; /* uses lower 8-bits */
/* Lifetime of xdp_rxq_info is limited to NAPI/enqueue time,
- * while mem info is valid on remote CPU.
+ * while mem_type is valid on remote CPU.
*/
- struct xdp_mem_info mem;
+ enum xdp_mem_type mem_type:32;
struct net_device *dev_rx; /* used by cpumap */
u32 frame_sz;
u32 flags; /* supported values defined in xdp_buff_flags */
};
-static __always_inline bool xdp_frame_has_frags(struct xdp_frame *frame)
+static __always_inline bool xdp_frame_has_frags(const struct xdp_frame *frame)
{
return !!(frame->flags & XDP_FLAGS_HAS_FRAGS);
}
-static __always_inline bool xdp_frame_is_frag_pfmemalloc(struct xdp_frame *frame)
+static __always_inline bool
+xdp_frame_is_frag_pfmemalloc(const struct xdp_frame *frame)
{
return !!(frame->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC);
}
@@ -190,18 +281,16 @@ static __always_inline bool xdp_frame_is_frag_pfmemalloc(struct xdp_frame *frame
#define XDP_BULK_QUEUE_SIZE 16
struct xdp_frame_bulk {
int count;
- void *xa;
- void *q[XDP_BULK_QUEUE_SIZE];
+ netmem_ref q[XDP_BULK_QUEUE_SIZE];
};
static __always_inline void xdp_frame_bulk_init(struct xdp_frame_bulk *bq)
{
- /* bq->count will be zero'ed when bq->xa gets updated */
- bq->xa = NULL;
+ bq->count = 0;
}
static inline struct skb_shared_info *
-xdp_get_shared_info_from_frame(struct xdp_frame *frame)
+xdp_get_shared_info_from_frame(const struct xdp_frame *frame)
{
void *data_hard_start = frame->data - frame->headroom - sizeof(*frame);
@@ -227,7 +316,14 @@ xdp_update_skb_shared_info(struct sk_buff *skb, u8 nr_frags,
unsigned int size, unsigned int truesize,
bool pfmemalloc)
{
- skb_shinfo(skb)->nr_frags = nr_frags;
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+
+ sinfo->nr_frags = nr_frags;
+ /*
+ * ``destructor_arg`` is unionized with ``xdp_frags_{,true}size``,
+ * reset it after that these fields aren't used anymore.
+ */
+ sinfo->destructor_arg = NULL;
skb->len += size;
skb->data_len += size;
@@ -239,17 +335,19 @@ xdp_update_skb_shared_info(struct sk_buff *skb, u8 nr_frags,
void xdp_warn(const char *msg, const char *func, const int line);
#define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__)
+struct sk_buff *xdp_build_skb_from_buff(const struct xdp_buff *xdp);
+struct sk_buff *xdp_build_skb_from_zc(struct xdp_buff *xdp);
struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp);
struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
struct sk_buff *skb,
struct net_device *dev);
struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
struct net_device *dev);
-int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp);
struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf);
static inline
-void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp)
+void xdp_convert_frame_to_buff(const struct xdp_frame *frame,
+ struct xdp_buff *xdp)
{
xdp->data_hard_start = frame->data - frame->headroom - sizeof(*frame);
xdp->data = frame->data;
@@ -260,7 +358,7 @@ void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp)
}
static inline
-int xdp_update_frame_from_buff(struct xdp_buff *xdp,
+int xdp_update_frame_from_buff(const struct xdp_buff *xdp,
struct xdp_frame *xdp_frame)
{
int metasize, headroom;
@@ -302,24 +400,33 @@ struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp)
if (unlikely(xdp_update_frame_from_buff(xdp, xdp_frame) < 0))
return NULL;
- /* rxq only valid until napi_schedule ends, convert to xdp_mem_info */
- xdp_frame->mem = xdp->rxq->mem;
+ /* rxq only valid until napi_schedule ends, convert to xdp_mem_type */
+ xdp_frame->mem_type = xdp->rxq->mem.type;
return xdp_frame;
}
-void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
- struct xdp_buff *xdp);
+void __xdp_return(netmem_ref netmem, enum xdp_mem_type mem_type,
+ bool napi_direct, struct xdp_buff *xdp);
void xdp_return_frame(struct xdp_frame *xdpf);
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf);
void xdp_return_buff(struct xdp_buff *xdp);
-void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq);
void xdp_return_frame_bulk(struct xdp_frame *xdpf,
struct xdp_frame_bulk *bq);
-static __always_inline unsigned int xdp_get_frame_len(struct xdp_frame *xdpf)
+static inline void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq)
+{
+ if (unlikely(!bq->count))
+ return;
+
+ page_pool_put_netmem_bulk(bq->q, bq->count);
+ bq->count = 0;
+}
+
+static __always_inline unsigned int
+xdp_get_frame_len(const struct xdp_frame *xdpf)
{
- struct skb_shared_info *sinfo;
+ const struct skb_shared_info *sinfo;
unsigned int len = xdpf->len;
if (likely(!xdp_frame_has_frags(xdpf)))
@@ -351,6 +458,38 @@ void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq);
int xdp_reg_mem_model(struct xdp_mem_info *mem,
enum xdp_mem_type type, void *allocator);
void xdp_unreg_mem_model(struct xdp_mem_info *mem);
+int xdp_reg_page_pool(struct page_pool *pool);
+void xdp_unreg_page_pool(const struct page_pool *pool);
+void xdp_rxq_info_attach_page_pool(struct xdp_rxq_info *xdp_rxq,
+ const struct page_pool *pool);
+
+/**
+ * xdp_rxq_info_attach_mem_model - attach registered mem info to RxQ info
+ * @xdp_rxq: XDP RxQ info to attach the memory info to
+ * @mem: already registered memory info
+ *
+ * If the driver registers its memory providers manually, it must use this
+ * function instead of xdp_rxq_info_reg_mem_model().
+ */
+static inline void
+xdp_rxq_info_attach_mem_model(struct xdp_rxq_info *xdp_rxq,
+ const struct xdp_mem_info *mem)
+{
+ xdp_rxq->mem = *mem;
+}
+
+/**
+ * xdp_rxq_info_detach_mem_model - detach registered mem info from RxQ info
+ * @xdp_rxq: XDP RxQ info to detach the memory info from
+ *
+ * If the driver registers its memory providers manually and then attaches it
+ * via xdp_rxq_info_attach_mem_model(), it must call this function before
+ * xdp_rxq_info_unreg().
+ */
+static inline void xdp_rxq_info_detach_mem_model(struct xdp_rxq_info *xdp_rxq)
+{
+ xdp_rxq->mem = (struct xdp_mem_info){ };
+}
/* Drivers not supporting XDP metadata can use this helper, which
* rejects any room expansion for metadata as a result.
@@ -477,8 +616,12 @@ struct xdp_metadata_ops {
u32 bpf_xdp_metadata_kfunc_id(int id);
bool bpf_dev_bound_kfunc_id(u32 btf_id);
void xdp_set_features_flag(struct net_device *dev, xdp_features_t val);
+void xdp_set_features_flag_locked(struct net_device *dev, xdp_features_t val);
void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg);
+void xdp_features_set_redirect_target_locked(struct net_device *dev,
+ bool support_sg);
void xdp_features_clear_redirect_target(struct net_device *dev);
+void xdp_features_clear_redirect_target_locked(struct net_device *dev);
#else
static inline u32 bpf_xdp_metadata_kfunc_id(int id) { return 0; }
static inline bool bpf_dev_bound_kfunc_id(u32 btf_id) { return false; }
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 3d54de168a6d..e8bd6ddb7b12 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -71,9 +71,6 @@ struct xdp_sock {
*/
u32 tx_budget_spent;
- /* Protects generic receive. */
- spinlock_t rx_lock;
-
/* Statistics */
u64 rx_dropped;
u64 rx_queue_full;
@@ -110,18 +107,23 @@ struct xdp_sock {
* indicates position where checksumming should start.
* csum_offset indicates position where checksum should be stored.
*
+ * void (*tmo_request_launch_time)(u64 launch_time, void *priv)
+ * Called when AF_XDP frame requested launch time HW offload support.
+ * launch_time indicates the PTP time at which the device can schedule the
+ * packet for transmission.
*/
struct xsk_tx_metadata_ops {
void (*tmo_request_timestamp)(void *priv);
u64 (*tmo_fill_timestamp)(void *priv);
void (*tmo_request_checksum)(u16 csum_start, u16 csum_offset, void *priv);
+ void (*tmo_request_launch_time)(u64 launch_time, void *priv);
};
#ifdef CONFIG_XDP_SOCKETS
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
-void __xsk_map_flush(void);
+void __xsk_map_flush(struct list_head *flush_list);
/**
* xsk_tx_metadata_to_compl - Save enough relevant metadata information
@@ -162,6 +164,11 @@ static inline void xsk_tx_metadata_request(const struct xsk_tx_metadata *meta,
if (!meta)
return;
+ if (ops->tmo_request_launch_time)
+ if (meta->flags & XDP_TXMD_FLAGS_LAUNCH_TIME)
+ ops->tmo_request_launch_time(meta->request.launch_time,
+ priv);
+
if (ops->tmo_request_timestamp)
if (meta->flags & XDP_TXMD_FLAGS_TIMESTAMP)
ops->tmo_request_timestamp(priv);
@@ -206,7 +213,7 @@ static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
return -EOPNOTSUPP;
}
-static inline void __xsk_map_flush(void)
+static inline void __xsk_map_flush(struct list_head *flush_list)
{
}
@@ -228,14 +235,4 @@ static inline void xsk_tx_metadata_complete(struct xsk_tx_metadata_compl *compl,
}
#endif /* CONFIG_XDP_SOCKETS */
-
-#if defined(CONFIG_XDP_SOCKETS) && defined(CONFIG_DEBUG_NET)
-bool xsk_map_check_flush(void);
-#else
-static inline bool xsk_map_check_flush(void)
-{
- return false;
-}
-#endif
-
#endif /* _LINUX_XDP_SOCK_H */
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
index c9aec9ab6191..513c8e9704f6 100644
--- a/include/net/xdp_sock_drv.h
+++ b/include/net/xdp_sock_drv.h
@@ -59,15 +59,6 @@ static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
xp_fill_cb(pool, desc);
}
-static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
-{
-#ifdef CONFIG_NET_RX_BUSY_POLL
- return pool->heads[0].xdp.rxq->napi_id;
-#else
- return 0;
-#endif
-}
-
static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
unsigned long attrs)
{
@@ -101,7 +92,7 @@ static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
return xp_alloc(pool);
}
-static inline bool xsk_is_eop_desc(struct xdp_desc *desc)
+static inline bool xsk_is_eop_desc(const struct xdp_desc *desc)
{
return !xp_mb_desc(desc);
}
@@ -126,8 +117,8 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
if (likely(!xdp_buff_has_frags(xdp)))
goto out;
- list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
- list_del(&pos->xskb_list_node);
+ list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
+ list_del(&pos->list_node);
xp_free(pos);
}
@@ -136,23 +127,33 @@ out:
xp_free(xskb);
}
-static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
+static inline bool xsk_buff_add_frag(struct xdp_buff *head,
+ struct xdp_buff *xdp)
{
- struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp);
+ const void *data = xdp->data;
+ struct xdp_buff_xsk *frag;
+
+ if (!__xdp_buff_add_frag(head, virt_to_netmem(data),
+ offset_in_page(data), xdp->data_end - data,
+ xdp->frame_sz, false))
+ return false;
+
+ frag = container_of(xdp, struct xdp_buff_xsk, xdp);
+ list_add_tail(&frag->list_node, &frag->pool->xskb_list);
- list_add_tail(&frag->xskb_list_node, &frag->pool->xskb_list);
+ return true;
}
-static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
+static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
{
struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
struct xdp_buff *ret = NULL;
struct xdp_buff_xsk *frag;
frag = list_first_entry_or_null(&xskb->pool->xskb_list,
- struct xdp_buff_xsk, xskb_list_node);
+ struct xdp_buff_xsk, list_node);
if (frag) {
- list_del(&frag->xskb_list_node);
+ list_del(&frag->list_node);
ret = &frag->xdp;
}
@@ -163,7 +164,7 @@ static inline void xsk_buff_del_tail(struct xdp_buff *tail)
{
struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
- list_del(&xskb->xskb_list_node);
+ list_del(&xskb->list_node);
}
static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
@@ -172,7 +173,7 @@ static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
struct xdp_buff_xsk *frag;
frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
- xskb_list_node);
+ list_node);
return &frag->xdp;
}
@@ -195,36 +196,59 @@ static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
return xp_raw_get_data(pool, addr);
}
+/**
+ * xsk_buff_raw_get_ctx - get &xdp_desc context
+ * @pool: XSk buff pool desc address belongs to
+ * @addr: desc address (from userspace)
+ *
+ * Wrapper for xp_raw_get_ctx() to be used in drivers, see its kdoc for
+ * details.
+ *
+ * Return: new &xdp_desc_ctx struct containing desc's DMA address and metadata
+ * pointer, if it is present and valid (initialized to %NULL otherwise).
+ */
+static inline struct xdp_desc_ctx
+xsk_buff_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr)
+{
+ return xp_raw_get_ctx(pool, addr);
+}
+
#define XDP_TXMD_FLAGS_VALID ( \
XDP_TXMD_FLAGS_TIMESTAMP | \
XDP_TXMD_FLAGS_CHECKSUM | \
+ XDP_TXMD_FLAGS_LAUNCH_TIME | \
0)
-static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
+static inline bool
+xsk_buff_valid_tx_metadata(const struct xsk_tx_metadata *meta)
{
return !(meta->flags & ~XDP_TXMD_FLAGS_VALID);
}
-static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
+static inline struct xsk_tx_metadata *
+__xsk_buff_get_metadata(const struct xsk_buff_pool *pool, void *data)
{
struct xsk_tx_metadata *meta;
if (!pool->tx_metadata_len)
return NULL;
- meta = xp_raw_get_data(pool, addr) - pool->tx_metadata_len;
+ meta = data - pool->tx_metadata_len;
if (unlikely(!xsk_buff_valid_tx_metadata(meta)))
return NULL; /* no way to signal the error to the user */
return meta;
}
-static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
+static inline struct xsk_tx_metadata *
+xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
{
- struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
+ return __xsk_buff_get_metadata(pool, xp_raw_get_data(pool, addr));
+}
- if (!pool->dma_need_sync)
- return;
+static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
+{
+ struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
xp_dma_sync_for_cpu(xskb);
}
@@ -309,11 +333,6 @@ static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
{
}
-static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
-{
- return 0;
-}
-
static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
unsigned long attrs)
{
@@ -340,7 +359,7 @@ static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
return NULL;
}
-static inline bool xsk_is_eop_desc(struct xdp_desc *desc)
+static inline bool xsk_is_eop_desc(const struct xdp_desc *desc)
{
return false;
}
@@ -359,11 +378,13 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
{
}
-static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
+static inline bool xsk_buff_add_frag(struct xdp_buff *head,
+ struct xdp_buff *xdp)
{
+ return false;
}
-static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
+static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
{
return NULL;
}
@@ -392,17 +413,30 @@ static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
return NULL;
}
+static inline struct xdp_desc_ctx
+xsk_buff_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr)
+{
+ return (struct xdp_desc_ctx){ };
+}
+
static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
{
return false;
}
-static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
+static inline struct xsk_tx_metadata *
+__xsk_buff_get_metadata(const struct xsk_buff_pool *pool, void *data)
+{
+ return NULL;
+}
+
+static inline struct xsk_tx_metadata *
+xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
{
return NULL;
}
-static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
+static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
{
}
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 57c743b7e4fe..a21e276dbe44 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -19,6 +19,7 @@
#include <net/sock.h>
#include <net/dst.h>
+#include <net/inet_dscp.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/ipv6.h>
@@ -37,6 +38,7 @@
#define XFRM_PROTO_COMP 108
#define XFRM_PROTO_IPIP 4
#define XFRM_PROTO_IPV6 41
+#define XFRM_PROTO_IPTFS IPPROTO_AGGFRAG
#define XFRM_PROTO_ROUTING IPPROTO_ROUTING
#define XFRM_PROTO_DSTOPTS IPPROTO_DSTOPTS
@@ -67,27 +69,27 @@
- instance of a transformer, struct xfrm_state (=SA)
- template to clone xfrm_state, struct xfrm_tmpl
- SPD is plain linear list of xfrm_policy rules, ordered by priority.
+ SPD is organized as hash table (for policies that meet minimum address prefix
+ length setting, net->xfrm.policy_hthresh). Other policies are stored in
+ lists, sorted into rbtree ordered by destination and source address networks.
+ See net/xfrm/xfrm_policy.c for details.
+
(To be compatible with existing pfkeyv2 implementations,
many rules with priority of 0x7fffffff are allowed to exist and
such rules are ordered in an unpredictable way, thanks to bsd folks.)
- Lookup is plain linear search until the first match with selector.
-
If "action" is "block", then we prohibit the flow, otherwise:
if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
policy entry has list of up to XFRM_MAX_DEPTH transformations,
described by templates xfrm_tmpl. Each template is resolved
to a complete xfrm_state (see below) and we pack bundle of transformations
- to a dst_entry returned to requestor.
+ to a dst_entry returned to requester.
dst -. xfrm .-> xfrm_state #1
|---. child .-> dst -. xfrm .-> xfrm_state #2
|---. child .-> dst -. xfrm .-> xfrm_state #3
|---. child .-> NULL
- Bundles are cached at xrfm_policy struct (field ->bundles).
-
Resolution of xrfm_tmpl
-----------------------
@@ -145,8 +147,19 @@ enum {
};
struct xfrm_dev_offload {
+ /* The device for this offload.
+ * Device drivers should not use this directly, as that will prevent
+ * them from working with bonding device. Instead, the device passed
+ * to the add/delete callbacks should be used.
+ */
struct net_device *dev;
netdevice_tracker dev_tracker;
+ /* This is a private pointer used by the bonding driver (and eventually
+ * should be moved there). Device drivers should not use it.
+ * Protected by xfrm_state.lock AND bond.ipsec_lock in most cases,
+ * except in the .xdo_dev_state_del() flow, where only xfrm_state.lock
+ * is held.
+ */
struct net_device *real_dev;
unsigned long offload_handle;
u8 dir : 2;
@@ -178,13 +191,19 @@ struct xfrm_state {
struct hlist_node gclist;
struct hlist_node bydst;
};
- struct hlist_node bysrc;
+ union {
+ struct hlist_node dev_gclist;
+ struct hlist_node bysrc;
+ };
struct hlist_node byspi;
struct hlist_node byseq;
+ struct hlist_node state_cache;
+ struct hlist_node state_cache_input;
refcount_t refcnt;
spinlock_t lock;
+ u32 pcpu_num;
struct xfrm_id id;
struct xfrm_selector sel;
struct xfrm_mark mark;
@@ -206,6 +225,7 @@ struct xfrm_state {
u16 family;
xfrm_address_t saddr;
int header_len;
+ int enc_hdr_len;
int trailer_len;
u32 extra_flags;
struct xfrm_mark smark;
@@ -227,7 +247,10 @@ struct xfrm_state {
/* Data for encapsulator */
struct xfrm_encap_tmpl *encap;
- struct sock __rcu *encap_sk;
+
+ /* NAT keepalive */
+ u32 nat_keepalive_interval; /* seconds */
+ time64_t nat_keepalive_expiration;
/* Data for care-of address */
xfrm_address_t *coaddr;
@@ -291,6 +314,10 @@ struct xfrm_state {
/* Private data of this transformer, format is opaque,
* interpreted by xfrm_type methods. */
void *data;
+ u8 dir;
+
+ const struct xfrm_mode_cbs *mode_cbs;
+ void *mode_data;
};
static inline struct net *xs_net(struct xfrm_state *x)
@@ -341,20 +368,25 @@ struct xfrm_if_cb {
void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
void xfrm_if_unregister_cb(void);
+struct xfrm_dst_lookup_params {
+ struct net *net;
+ dscp_t dscp;
+ int oif;
+ xfrm_address_t *saddr;
+ xfrm_address_t *daddr;
+ u32 mark;
+ __u8 ipproto;
+ union flowi_uli uli;
+};
+
struct net_device;
struct xfrm_type;
struct xfrm_dst;
struct xfrm_policy_afinfo {
struct dst_ops *dst_ops;
- struct dst_entry *(*dst_lookup)(struct net *net,
- int tos, int oif,
- const xfrm_address_t *saddr,
- const xfrm_address_t *daddr,
- u32 mark);
- int (*get_saddr)(struct net *net, int oif,
- xfrm_address_t *saddr,
- xfrm_address_t *daddr,
- u32 mark);
+ struct dst_entry *(*dst_lookup)(const struct xfrm_dst_lookup_params *params);
+ int (*get_saddr)(xfrm_address_t *saddr,
+ const struct xfrm_dst_lookup_params *params);
int (*fill_dst)(struct xfrm_dst *xdst,
struct net_device *dev,
const struct flowi *fl);
@@ -442,6 +474,54 @@ struct xfrm_type_offload {
int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family);
void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family);
+void xfrm_set_type_offload(struct xfrm_state *x);
+static inline void xfrm_unset_type_offload(struct xfrm_state *x)
+{
+ if (!x->type_offload)
+ return;
+
+ module_put(x->type_offload->owner);
+ x->type_offload = NULL;
+}
+
+/**
+ * struct xfrm_mode_cbs - XFRM mode callbacks
+ * @owner: module owner or NULL
+ * @init_state: Add/init mode specific state in `xfrm_state *x`
+ * @clone_state: Copy mode specific values from `orig` to new state `x`
+ * @destroy_state: Cleanup mode specific state from `xfrm_state *x`
+ * @user_init: Process mode specific netlink attributes from user
+ * @copy_to_user: Add netlink attributes to `attrs` based on state in `x`
+ * @sa_len: Return space required to store mode specific netlink attributes
+ * @get_inner_mtu: Return avail payload space after removing encap overhead
+ * @input: Process received packet from SA using mode
+ * @output: Output given packet using mode
+ * @prepare_output: Add mode specific encapsulation to packet in skb. On return
+ * `transport_header` should point at ESP header, `network_header` should
+ * point at outer IP header and `mac_header` should opint at the
+ * protocol/nexthdr field of the outer IP.
+ *
+ * One should examine and understand the specific uses of these callbacks in
+ * xfrm for further detail on how and when these functions are called. RTSL.
+ */
+struct xfrm_mode_cbs {
+ struct module *owner;
+ int (*init_state)(struct xfrm_state *x);
+ int (*clone_state)(struct xfrm_state *x, struct xfrm_state *orig);
+ void (*destroy_state)(struct xfrm_state *x);
+ int (*user_init)(struct net *net, struct xfrm_state *x,
+ struct nlattr **attrs,
+ struct netlink_ext_ack *extack);
+ int (*copy_to_user)(struct xfrm_state *x, struct sk_buff *skb);
+ unsigned int (*sa_len)(const struct xfrm_state *x);
+ u32 (*get_inner_mtu)(struct xfrm_state *x, int outer_mtu);
+ int (*input)(struct xfrm_state *x, struct sk_buff *skb);
+ int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
+ int (*prepare_output)(struct xfrm_state *x, struct sk_buff *skb);
+};
+
+int xfrm_register_mode_cbs(u8 mode, const struct xfrm_mode_cbs *mode_cbs);
+void xfrm_unregister_mode_cbs(u8 mode);
static inline int xfrm_af2proto(unsigned int family)
{
@@ -518,11 +598,44 @@ struct xfrm_policy_queue {
unsigned long timeout;
};
+/**
+ * struct xfrm_policy - xfrm policy
+ * @xp_net: network namespace the policy lives in
+ * @bydst: hlist node for SPD hash table or rbtree list
+ * @byidx: hlist node for index hash table
+ * @state_cache_list: hlist head for policy cached xfrm states
+ * @lock: serialize changes to policy structure members
+ * @refcnt: reference count, freed once it reaches 0
+ * @pos: kernel internal tie-breaker to determine age of policy
+ * @timer: timer
+ * @genid: generation, used to invalidate old policies
+ * @priority: priority, set by userspace
+ * @index: policy index (autogenerated)
+ * @if_id: virtual xfrm interface id
+ * @mark: packet mark
+ * @selector: selector
+ * @lft: liftime configuration data
+ * @curlft: liftime state
+ * @walk: list head on pernet policy list
+ * @polq: queue to hold packets while aqcuire operaion in progress
+ * @bydst_reinsert: policy tree node needs to be merged
+ * @type: XFRM_POLICY_TYPE_MAIN or _SUB
+ * @action: XFRM_POLICY_ALLOW or _BLOCK
+ * @flags: XFRM_POLICY_LOCALOK, XFRM_POLICY_ICMP
+ * @xfrm_nr: number of used templates in @xfrm_vec
+ * @family: protocol family
+ * @security: SELinux security label
+ * @xfrm_vec: array of templates to resolve state
+ * @rcu: rcu head, used to defer memory release
+ * @xdo: hardware offload state
+ */
struct xfrm_policy {
possible_net_t xp_net;
struct hlist_node bydst;
struct hlist_node byidx;
+ struct hlist_head state_cache_list;
+
/* This lock only affects elements except for entry. */
rwlock_t lock;
refcount_t refcnt;
@@ -547,7 +660,6 @@ struct xfrm_policy {
u16 family;
struct xfrm_sec_ctx *security;
struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
- struct hlist_node bydst_inexact_list;
struct rcu_head rcu;
struct xfrm_dev_offload xdo;
@@ -1008,7 +1120,7 @@ void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
struct xfrm_if_parms {
int link; /* ifindex of underlying L2 interface */
- u32 if_id; /* interface identifyer */
+ u32 if_id; /* interface identifier */
bool collect_md;
};
@@ -1049,6 +1161,9 @@ struct xfrm_offload {
#define CRYPTO_INVALID_PACKET_SYNTAX 64
#define CRYPTO_INVALID_PROTOCOL 128
+ /* Used to keep whole l2 header for transport mode GRO */
+ __u32 orig_mac_len;
+
__u8 proto;
__u8 inner_ipproto;
};
@@ -1172,9 +1287,19 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
if (xo) {
x = xfrm_input_state(skb);
- if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
- return (xo->flags & CRYPTO_DONE) &&
- (xo->status & CRYPTO_SUCCESS);
+ if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
+ bool check = (xo->flags & CRYPTO_DONE) &&
+ (xo->status & CRYPTO_SUCCESS);
+
+ /* The packets here are plain ones and secpath was
+ * needed to indicate that hardware already handled
+ * them and there is no need to do nothing in addition.
+ *
+ * Consume secpath which was set by drivers.
+ */
+ secpath_reset(skb);
+ return check;
+ }
}
return __xfrm_check_nopolicy(net, skb, dir) ||
@@ -1584,7 +1709,7 @@ void xfrm_state_update_stats(struct net *net);
static inline void xfrm_dev_state_update_stats(struct xfrm_state *x)
{
struct xfrm_dev_offload *xdo = &x->xso;
- struct net_device *dev = xdo->dev;
+ struct net_device *dev = READ_ONCE(xdo->dev);
if (dev && dev->xfrmdev_ops &&
dev->xfrmdev_ops->xdo_dev_state_update_stats)
@@ -1600,6 +1725,10 @@ int xfrm_state_update(struct xfrm_state *x);
struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
const xfrm_address_t *daddr, __be32 spi,
u8 proto, unsigned short family);
+struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark,
+ const xfrm_address_t *daddr,
+ __be32 spi, u8 proto,
+ unsigned short family);
struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
@@ -1639,7 +1768,7 @@ struct xfrmk_spdinfo {
u32 spdhmcnt;
};
-struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
+struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num);
int xfrm_state_delete(struct xfrm_state *x);
int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
@@ -1650,8 +1779,7 @@ void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
int xfrm_init_replay(struct xfrm_state *x, struct netlink_ext_ack *extack);
u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
-int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload,
- struct netlink_ext_ack *extack);
+int __xfrm_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack);
int xfrm_init_state(struct xfrm_state *x);
int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
@@ -1663,6 +1791,15 @@ int xfrm_trans_queue(struct sk_buff *skb,
struct sk_buff *));
int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err);
int xfrm_output(struct sock *sk, struct sk_buff *skb);
+int xfrm4_tunnel_check_size(struct sk_buff *skb);
+#if IS_ENABLED(CONFIG_IPV6)
+int xfrm6_tunnel_check_size(struct sk_buff *skb);
+#else
+static inline int xfrm6_tunnel_check_size(struct sk_buff *skb)
+{
+ return -EMSGSIZE;
+}
+#endif
#if IS_ENABLED(CONFIG_NET_PKTGEN)
int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb);
@@ -1724,10 +1861,7 @@ static inline int xfrm_user_policy(struct sock *sk, int optname,
}
#endif
-struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
- const xfrm_address_t *saddr,
- const xfrm_address_t *daddr,
- int family, u32 mark);
+struct dst_entry *__xfrm_dst_lookup(int family, const struct xfrm_dst_lookup_params *params);
struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
@@ -1754,7 +1888,7 @@ int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack);
int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi,
struct netlink_ext_ack *extack);
struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
- u8 mode, u32 reqid, u32 if_id, u8 proto,
+ u8 mode, u32 reqid, u32 if_id, u32 pcpu_num, u8 proto,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr, int create,
unsigned short family);
@@ -1769,12 +1903,16 @@ struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *n
u32 if_id);
struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
struct xfrm_migrate *m,
- struct xfrm_encap_tmpl *encap);
+ struct xfrm_encap_tmpl *encap,
+ struct net *net,
+ struct xfrm_user_offload *xuo,
+ struct netlink_ext_ack *extack);
int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
struct xfrm_migrate *m, int num_bundles,
struct xfrm_kmaddress *k, struct net *net,
struct xfrm_encap_tmpl *encap, u32 if_id,
- struct netlink_ext_ack *extack);
+ struct netlink_ext_ack *extack,
+ struct xfrm_user_offload *xuo);
#endif
int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
@@ -1942,13 +2080,16 @@ int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
struct xfrm_user_offload *xuo, u8 dir,
struct netlink_ext_ack *extack);
bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
+void xfrm_dev_state_delete(struct xfrm_state *x);
+void xfrm_dev_state_free(struct xfrm_state *x);
static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
{
struct xfrm_dev_offload *xso = &x->xso;
+ struct net_device *dev = READ_ONCE(xso->dev);
- if (xso->dev && xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn)
- xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
+ if (dev && dev->xfrmdev_ops->xdo_dev_state_advance_esn)
+ dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
}
static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
@@ -1969,28 +2110,6 @@ static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
return false;
}
-static inline void xfrm_dev_state_delete(struct xfrm_state *x)
-{
- struct xfrm_dev_offload *xso = &x->xso;
-
- if (xso->dev)
- xso->dev->xfrmdev_ops->xdo_dev_state_delete(x);
-}
-
-static inline void xfrm_dev_state_free(struct xfrm_state *x)
-{
- struct xfrm_dev_offload *xso = &x->xso;
- struct net_device *dev = xso->dev;
-
- if (dev && dev->xfrmdev_ops) {
- if (dev->xfrmdev_ops->xdo_dev_state_free)
- dev->xfrmdev_ops->xdo_dev_state_free(x);
- xso->dev = NULL;
- xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
- netdev_put(dev, &xso->dev_tracker);
- }
-}
-
static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
{
struct xfrm_dev_offload *xdo = &x->xdo;
@@ -2199,4 +2318,10 @@ static inline int register_xfrm_state_bpf(void)
}
#endif
+int xfrm_nat_keepalive_init(unsigned short family);
+void xfrm_nat_keepalive_fini(unsigned short family);
+int xfrm_nat_keepalive_net_init(struct net *net);
+int xfrm_nat_keepalive_net_fini(struct net *net);
+void xfrm_nat_keepalive_state_updated(struct xfrm_state *x);
+
#endif /* _NET_XFRM_H */
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index 99dd7376df6a..cac56e6b0869 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -28,10 +28,8 @@ struct xdp_buff_xsk {
dma_addr_t dma;
dma_addr_t frame_dma;
struct xsk_buff_pool *pool;
- u64 orig_addr;
- struct list_head free_list_node;
- struct list_head xskb_list_node;
-};
+ struct list_head list_node;
+} __aligned_largest;
#define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb))
#define XSK_TX_COMPL_FITS(t) BUILD_BUG_ON(sizeof(struct xsk_tx_metadata_compl) > sizeof(t))
@@ -43,7 +41,6 @@ struct xsk_dma_map {
refcount_t users;
struct list_head list; /* Protected by the RTNL_LOCK */
u32 dma_pages_cnt;
- bool dma_need_sync;
};
struct xsk_buff_pool {
@@ -56,6 +53,8 @@ struct xsk_buff_pool {
refcount_t users;
struct xdp_umem *umem;
struct work_struct work;
+ /* Protects generic receive in shared and non-shared umem mode. */
+ spinlock_t rx_lock;
struct list_head free_list;
struct list_head xskb_list;
u32 heads_cnt;
@@ -79,10 +78,10 @@ struct xsk_buff_pool {
u32 chunk_size;
u32 chunk_shift;
u32 frame_len;
+ u32 xdp_zc_max_segs;
u8 tx_metadata_len; /* inherited from umem */
u8 cached_need_wakeup;
bool uses_need_wakeup;
- bool dma_need_sync;
bool unaligned;
bool tx_sw_csum;
void *addrs;
@@ -122,7 +121,6 @@ void xp_free(struct xdp_buff_xsk *xskb);
static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
u64 addr)
{
- xskb->orig_addr = addr;
xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
}
@@ -145,6 +143,14 @@ u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max);
bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
+
+struct xdp_desc_ctx {
+ dma_addr_t dma;
+ struct xsk_tx_metadata *meta;
+};
+
+struct xdp_desc_ctx xp_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr);
+
static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb)
{
return xskb->dma;
@@ -155,21 +161,17 @@ static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
return xskb->frame_dma;
}
-void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
{
- xp_dma_sync_for_cpu_slow(xskb);
+ dma_sync_single_for_cpu(xskb->pool->dev, xskb->dma,
+ xskb->pool->frame_len,
+ DMA_BIDIRECTIONAL);
}
-void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
- size_t size);
static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
dma_addr_t dma, size_t size)
{
- if (!pool->dma_need_sync)
- return;
-
- xp_dma_sync_for_device_slow(pool, dma, size);
+ dma_sync_single_for_device(pool->dev, dma, size, DMA_BIDIRECTIONAL);
}
/* Masks for xdp_umem_page flags.
@@ -191,7 +193,7 @@ static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
!(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK);
}
-static inline bool xp_mb_desc(struct xdp_desc *desc)
+static inline bool xp_mb_desc(const struct xdp_desc *desc)
{
return desc->options & XDP_PKT_CONTD;
}
@@ -228,14 +230,19 @@ static inline void xp_release(struct xdp_buff_xsk *xskb)
xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
}
-static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb)
+static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb,
+ struct xsk_buff_pool *pool)
{
- u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
+ u64 orig_addr = xskb->xdp.data - pool->addrs;
+ u64 offset;
+
+ if (!pool->unaligned)
+ return orig_addr;
- offset += xskb->pool->headroom;
- if (!xskb->pool->unaligned)
- return xskb->orig_addr + offset;
- return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
+ offset = xskb->xdp.data - xskb->xdp.data_hard_start;
+ offset += pool->headroom;
+ orig_addr -= offset;
+ return orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
}
static inline bool xp_tx_metadata_enabled(const struct xsk_buff_pool *pool)