summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-08-31 16:43:06 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-08-31 16:43:06 -0700
commit9e9fb7655ed585da8f468e29221f0ba194a5f613 (patch)
treed2c51887389b8297635a5b90d5766897f00fe928 /include/linux
parent86ac54e79fe09b34c52691a780a6e31d12fa57f4 (diff)
parent29ce8f9701072fc221d9c38ad952de1a9578f95c (diff)
Merge tag 'net-next-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from Jakub Kicinski: "Core: - Enable memcg accounting for various networking objects. BPF: - Introduce bpf timers. - Add perf link and opaque bpf_cookie which the program can read out again, to be used in libbpf-based USDT library. - Add bpf_task_pt_regs() helper to access user space pt_regs in kprobes, to help user space stack unwinding. - Add support for UNIX sockets for BPF sockmap. - Extend BPF iterator support for UNIX domain sockets. - Allow BPF TCP congestion control progs and bpf iterators to call bpf_setsockopt(), e.g. to switch to another congestion control algorithm. Protocols: - Support IOAM Pre-allocated Trace with IPv6. - Support Management Component Transport Protocol. - bridge: multicast: add vlan support. - netfilter: add hooks for the SRv6 lightweight tunnel driver. - tcp: - enable mid-stream window clamping (by user space or BPF) - allow data-less, empty-cookie SYN with TFO_SERVER_COOKIE_NOT_REQD - more accurate DSACK processing for RACK-TLP - mptcp: - add full mesh path manager option - add partial support for MP_FAIL - improve use of backup subflows - optimize option processing - af_unix: add OOB notification support. - ipv6: add IFLA_INET6_RA_MTU to expose MTU value advertised by the router. - mac80211: Target Wake Time support in AP mode. - can: j1939: extend UAPI to notify about RX status. Driver APIs: - Add page frag support in page pool API. - Many improvements to the DSA (distributed switch) APIs. - ethtool: extend IRQ coalesce uAPI with timer reset modes. - devlink: control which auxiliary devices are created. - Support CAN PHYs via the generic PHY subsystem. - Proper cross-chip support for tag_8021q. - Allow TX forwarding for the software bridge data path to be offloaded to capable devices. Drivers: - veth: more flexible channels number configuration. - openvswitch: introduce per-cpu upcall dispatch. - Add internet mix (IMIX) mode to pktgen. - Transparently handle XDP operations in the bonding driver. - Add LiteETH network driver. - Renesas (ravb): - support Gigabit Ethernet IP - NXP Ethernet switch (sja1105): - fast aging support - support for "H" switch topologies - traffic termination for ports under VLAN-aware bridge - Intel 1G Ethernet - support getcrosststamp() with PCIe PTM (Precision Time Measurement) for better time sync - support Credit-Based Shaper (CBS) offload, enabling HW traffic prioritization and bandwidth reservation - Broadcom Ethernet (bnxt) - support pulse-per-second output - support larger Rx rings - Mellanox Ethernet (mlx5) - support ethtool RSS contexts and MQPRIO channel mode - support LAG offload with bridging - support devlink rate limit API - support packet sampling on tunnels - Huawei Ethernet (hns3): - basic devlink support - add extended IRQ coalescing support - report extended link state - Netronome Ethernet (nfp): - add conntrack offload support - Broadcom WiFi (brcmfmac): - add WPA3 Personal with FT to supported cipher suites - support 43752 SDIO device - Intel WiFi (iwlwifi): - support scanning hidden 6GHz networks - support for a new hardware family (Bz) - Xen pv driver: - harden netfront against malicious backends - Qualcomm mobile - ipa: refactor power management and enable automatic suspend - mhi: move MBIM to WWAN subsystem interfaces Refactor: - Ambient BPF run context and cgroup storage cleanup. - Compat rework for ndo_ioctl. Old code removal: - prism54 remove the obsoleted driver, deprecated by the p54 driver. - wan: remove sbni/granch driver" * tag 'net-next-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1715 commits) net: Add depends on OF_NET for LiteX's LiteETH ipv6: seg6: remove duplicated include net: hns3: remove unnecessary spaces net: hns3: add some required spaces net: hns3: clean up a type mismatch warning net: hns3: refine function hns3_set_default_feature() ipv6: remove duplicated 'net/lwtunnel.h' include net: w5100: check return value after calling platform_get_resource() net/mlxbf_gige: Make use of devm_platform_ioremap_resourcexxx() net: mdio: mscc-miim: Make use of the helper function devm_platform_ioremap_resource() net: mdio-ipq4019: Make use of devm_platform_ioremap_resource() fou: remove sparse errors ipv4: fix endianness issue in inet_rtm_getroute_build_skb() octeontx2-af: Set proper errorcode for IPv4 checksum errors octeontx2-af: Fix static code analyzer reported issues octeontx2-af: Fix mailbox errors in nix_rss_flowkey_cfg octeontx2-af: Fix loop in free and unmap counter af_unix: fix potential NULL deref in unix_dgram_connect() dpaa2-eth: Replace strlcpy with strscpy octeontx2-af: Use NDC TX for transmit packet data ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bitops.h50
-rw-r--r--include/linux/bpf-cgroup.h230
-rw-r--r--include/linux/bpf.h306
-rw-r--r--include/linux/bpf_types.h3
-rw-r--r--include/linux/bpf_verifier.h19
-rw-r--r--include/linux/bpfptr.h12
-rw-r--r--include/linux/btf.h1
-rw-r--r--include/linux/btf_ids.h9
-rw-r--r--include/linux/can/bittiming.h4
-rw-r--r--include/linux/can/dev.h8
-rw-r--r--include/linux/can/platform/flexcan.h23
-rw-r--r--include/linux/can/rx-offload.h8
-rw-r--r--include/linux/compat.h32
-rw-r--r--include/linux/dsa/8021q.h44
-rw-r--r--include/linux/dsa/sja1105.h22
-rw-r--r--include/linux/ethtool.h26
-rw-r--r--include/linux/filter.h80
-rw-r--r--include/linux/fsl/mc.h3
-rw-r--r--include/linux/genetlink.h23
-rw-r--r--include/linux/hdlc.h4
-rw-r--r--include/linux/hdlcdrv.h2
-rw-r--r--include/linux/ieee80211.h106
-rw-r--r--include/linux/if_bridge.h40
-rw-r--r--include/linux/igmp.h3
-rw-r--r--include/linux/inetdevice.h9
-rw-r--r--include/linux/ioam6.h13
-rw-r--r--include/linux/ioam6_genl.h13
-rw-r--r--include/linux/ioam6_iptunnel.h13
-rw-r--r--include/linux/ipv6.h3
-rw-r--r--include/linux/memcontrol.h3
-rw-r--r--include/linux/mhi.h2
-rw-r--r--include/linux/mii.h2
-rw-r--r--include/linux/mlx5/device.h71
-rw-r--r--include/linux/mlx5/driver.h18
-rw-r--r--include/linux/mlx5/eswitch.h16
-rw-r--r--include/linux/mlx5/fs.h2
-rw-r--r--include/linux/mlx5/mlx5_ifc.h25
-rw-r--r--include/linux/mm_types.h18
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/netdevice.h92
-rw-r--r--include/linux/netfilter/x_tables.h6
-rw-r--r--include/linux/netfilter_bridge/ebtables.h2
-rw-r--r--include/linux/pci.h10
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/ptp_clock_kernel.h48
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/skbuff.h21
-rw-r--r--include/linux/socket.h6
-rw-r--r--include/linux/ssb/ssb.h2
-rw-r--r--include/linux/ssb/ssb_driver_extif.h2
-rw-r--r--include/linux/trace_events.h7
-rw-r--r--include/linux/typecheck.h9
53 files changed, 985 insertions, 492 deletions
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 26bf15e6cd35..5e62e2383b7f 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -4,6 +4,7 @@
#include <asm/types.h>
#include <linux/bits.h>
+#include <linux/typecheck.h>
#include <uapi/linux/kernel.h>
@@ -253,6 +254,55 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
__clear_bit(nr, addr);
}
+/**
+ * __ptr_set_bit - Set bit in a pointer's value
+ * @nr: the bit to set
+ * @addr: the address of the pointer variable
+ *
+ * Example:
+ * void *p = foo();
+ * __ptr_set_bit(bit, &p);
+ */
+#define __ptr_set_bit(nr, addr) \
+ ({ \
+ typecheck_pointer(*(addr)); \
+ __set_bit(nr, (unsigned long *)(addr)); \
+ })
+
+/**
+ * __ptr_clear_bit - Clear bit in a pointer's value
+ * @nr: the bit to clear
+ * @addr: the address of the pointer variable
+ *
+ * Example:
+ * void *p = foo();
+ * __ptr_clear_bit(bit, &p);
+ */
+#define __ptr_clear_bit(nr, addr) \
+ ({ \
+ typecheck_pointer(*(addr)); \
+ __clear_bit(nr, (unsigned long *)(addr)); \
+ })
+
+/**
+ * __ptr_test_bit - Test bit in a pointer's value
+ * @nr: the bit to test
+ * @addr: the address of the pointer variable
+ *
+ * Example:
+ * void *p = foo();
+ * if (__ptr_test_bit(bit, &p)) {
+ * ...
+ * } else {
+ * ...
+ * }
+ */
+#define __ptr_test_bit(nr, addr) \
+ ({ \
+ typecheck_pointer(*(addr)); \
+ test_bit(nr, (unsigned long *)(addr)); \
+ })
+
#ifdef __KERNEL__
#ifndef set_mask_bits
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 6c9b10d82c80..2746fd804216 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -23,22 +23,73 @@ struct ctl_table_header;
struct task_struct;
#ifdef CONFIG_CGROUP_BPF
+enum cgroup_bpf_attach_type {
+ CGROUP_BPF_ATTACH_TYPE_INVALID = -1,
+ CGROUP_INET_INGRESS = 0,
+ CGROUP_INET_EGRESS,
+ CGROUP_INET_SOCK_CREATE,
+ CGROUP_SOCK_OPS,
+ CGROUP_DEVICE,
+ CGROUP_INET4_BIND,
+ CGROUP_INET6_BIND,
+ CGROUP_INET4_CONNECT,
+ CGROUP_INET6_CONNECT,
+ CGROUP_INET4_POST_BIND,
+ CGROUP_INET6_POST_BIND,
+ CGROUP_UDP4_SENDMSG,
+ CGROUP_UDP6_SENDMSG,
+ CGROUP_SYSCTL,
+ CGROUP_UDP4_RECVMSG,
+ CGROUP_UDP6_RECVMSG,
+ CGROUP_GETSOCKOPT,
+ CGROUP_SETSOCKOPT,
+ CGROUP_INET4_GETPEERNAME,
+ CGROUP_INET6_GETPEERNAME,
+ CGROUP_INET4_GETSOCKNAME,
+ CGROUP_INET6_GETSOCKNAME,
+ CGROUP_INET_SOCK_RELEASE,
+ MAX_CGROUP_BPF_ATTACH_TYPE
+};
-extern struct static_key_false cgroup_bpf_enabled_key[MAX_BPF_ATTACH_TYPE];
-#define cgroup_bpf_enabled(type) static_branch_unlikely(&cgroup_bpf_enabled_key[type])
+#define CGROUP_ATYPE(type) \
+ case BPF_##type: return type
-#define BPF_CGROUP_STORAGE_NEST_MAX 8
+static inline enum cgroup_bpf_attach_type
+to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
+{
+ switch (attach_type) {
+ CGROUP_ATYPE(CGROUP_INET_INGRESS);
+ CGROUP_ATYPE(CGROUP_INET_EGRESS);
+ CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE);
+ CGROUP_ATYPE(CGROUP_SOCK_OPS);
+ CGROUP_ATYPE(CGROUP_DEVICE);
+ CGROUP_ATYPE(CGROUP_INET4_BIND);
+ CGROUP_ATYPE(CGROUP_INET6_BIND);
+ CGROUP_ATYPE(CGROUP_INET4_CONNECT);
+ CGROUP_ATYPE(CGROUP_INET6_CONNECT);
+ CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
+ CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
+ CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
+ CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
+ CGROUP_ATYPE(CGROUP_SYSCTL);
+ CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
+ CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
+ CGROUP_ATYPE(CGROUP_GETSOCKOPT);
+ CGROUP_ATYPE(CGROUP_SETSOCKOPT);
+ CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
+ CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
+ CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
+ CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
+ CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
+ default:
+ return CGROUP_BPF_ATTACH_TYPE_INVALID;
+ }
+}
-struct bpf_cgroup_storage_info {
- struct task_struct *task;
- struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
-};
+#undef CGROUP_ATYPE
-/* For each cpu, permit maximum BPF_CGROUP_STORAGE_NEST_MAX number of tasks
- * to use bpf cgroup storage simultaneously.
- */
-DECLARE_PER_CPU(struct bpf_cgroup_storage_info,
- bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
+extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
+#define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
#define for_each_cgroup_storage_type(stype) \
for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
@@ -80,15 +131,15 @@ struct bpf_prog_array;
struct cgroup_bpf {
/* array of effective progs in this cgroup */
- struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
+ struct bpf_prog_array __rcu *effective[MAX_CGROUP_BPF_ATTACH_TYPE];
/* attached progs to this cgroup and attach flags
* when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
* have either zero or one element
* when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
*/
- struct list_head progs[MAX_BPF_ATTACH_TYPE];
- u32 flags[MAX_BPF_ATTACH_TYPE];
+ struct list_head progs[MAX_CGROUP_BPF_ATTACH_TYPE];
+ u32 flags[MAX_CGROUP_BPF_ATTACH_TYPE];
/* list of cgroup shared storages */
struct list_head storages;
@@ -128,28 +179,28 @@ int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
int __cgroup_bpf_run_filter_skb(struct sock *sk,
struct sk_buff *skb,
- enum bpf_attach_type type);
+ enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_sk(struct sock *sk,
- enum bpf_attach_type type);
+ enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
struct sockaddr *uaddr,
- enum bpf_attach_type type,
+ enum cgroup_bpf_attach_type atype,
void *t_ctx,
u32 *flags);
int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
struct bpf_sock_ops_kern *sock_ops,
- enum bpf_attach_type type);
+ enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
- short access, enum bpf_attach_type type);
+ short access, enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
struct ctl_table *table, int write,
char **buf, size_t *pcount, loff_t *ppos,
- enum bpf_attach_type type);
+ enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
int *optname, char __user *optval,
@@ -172,44 +223,6 @@ static inline enum bpf_cgroup_storage_type cgroup_storage_type(
return BPF_CGROUP_STORAGE_SHARED;
}
-static inline int bpf_cgroup_storage_set(struct bpf_cgroup_storage
- *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
-{
- enum bpf_cgroup_storage_type stype;
- int i, err = 0;
-
- preempt_disable();
- for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
- if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != NULL))
- continue;
-
- this_cpu_write(bpf_cgroup_storage_info[i].task, current);
- for_each_cgroup_storage_type(stype)
- this_cpu_write(bpf_cgroup_storage_info[i].storage[stype],
- storage[stype]);
- goto out;
- }
- err = -EBUSY;
- WARN_ON_ONCE(1);
-
-out:
- preempt_enable();
- return err;
-}
-
-static inline void bpf_cgroup_storage_unset(void)
-{
- int i;
-
- for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
- if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
- continue;
-
- this_cpu_write(bpf_cgroup_storage_info[i].task, NULL);
- return;
- }
-}
-
struct bpf_cgroup_storage *
cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
void *key, bool locked);
@@ -230,9 +243,9 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled(BPF_CGROUP_INET_INGRESS)) \
+ if (cgroup_bpf_enabled(CGROUP_INET_INGRESS)) \
__ret = __cgroup_bpf_run_filter_skb(sk, skb, \
- BPF_CGROUP_INET_INGRESS); \
+ CGROUP_INET_INGRESS); \
\
__ret; \
})
@@ -240,54 +253,54 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled(BPF_CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
+ if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
typeof(sk) __sk = sk_to_full_sk(sk); \
if (sk_fullsock(__sk)) \
__ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
- BPF_CGROUP_INET_EGRESS); \
+ CGROUP_INET_EGRESS); \
} \
__ret; \
})
-#define BPF_CGROUP_RUN_SK_PROG(sk, type) \
+#define BPF_CGROUP_RUN_SK_PROG(sk, atype) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled(type)) { \
- __ret = __cgroup_bpf_run_filter_sk(sk, type); \
+ if (cgroup_bpf_enabled(atype)) { \
+ __ret = __cgroup_bpf_run_filter_sk(sk, atype); \
} \
__ret; \
})
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
- BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE)
#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \
- BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE)
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE)
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
- BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND)
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
- BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
-#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
+#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \
({ \
u32 __unused_flags; \
int __ret = 0; \
- if (cgroup_bpf_enabled(type)) \
- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
+ if (cgroup_bpf_enabled(atype)) \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
NULL, \
&__unused_flags); \
__ret; \
})
-#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
+#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \
({ \
u32 __unused_flags; \
int __ret = 0; \
- if (cgroup_bpf_enabled(type)) { \
+ if (cgroup_bpf_enabled(atype)) { \
lock_sock(sk); \
- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
t_ctx, \
&__unused_flags); \
release_sock(sk); \
@@ -300,13 +313,13 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
* (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
* should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
*/
-#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, bind_flags) \
+#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, bind_flags) \
({ \
u32 __flags = 0; \
int __ret = 0; \
- if (cgroup_bpf_enabled(type)) { \
+ if (cgroup_bpf_enabled(atype)) { \
lock_sock(sk); \
- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
NULL, &__flags); \
release_sock(sk); \
if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \
@@ -316,33 +329,33 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
})
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \
- ((cgroup_bpf_enabled(BPF_CGROUP_INET4_CONNECT) || \
- cgroup_bpf_enabled(BPF_CGROUP_INET6_CONNECT)) && \
+ ((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) || \
+ cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \
(sk)->sk_prot->pre_connect)
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET4_CONNECT)
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET6_CONNECT)
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET4_CONNECT, NULL)
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET6_CONNECT, NULL)
#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_SENDMSG, t_ctx)
#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_SENDMSG, t_ctx)
#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_RECVMSG, NULL)
#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_RECVMSG, NULL)
/* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
* fullsock and its parent fullsock cannot be traced by
@@ -362,33 +375,33 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS)) \
+ if (cgroup_bpf_enabled(CGROUP_SOCK_OPS)) \
__ret = __cgroup_bpf_run_filter_sock_ops(sk, \
sock_ops, \
- BPF_CGROUP_SOCK_OPS); \
+ CGROUP_SOCK_OPS); \
__ret; \
})
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
+ if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
if (__sk && sk_fullsock(__sk)) \
__ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
sock_ops, \
- BPF_CGROUP_SOCK_OPS); \
+ CGROUP_SOCK_OPS); \
} \
__ret; \
})
-#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
+#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled(BPF_CGROUP_DEVICE)) \
- __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
+ if (cgroup_bpf_enabled(CGROUP_DEVICE)) \
+ __ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \
access, \
- BPF_CGROUP_DEVICE); \
+ CGROUP_DEVICE); \
\
__ret; \
})
@@ -397,10 +410,10 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled(BPF_CGROUP_SYSCTL)) \
+ if (cgroup_bpf_enabled(CGROUP_SYSCTL)) \
__ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
buf, count, pos, \
- BPF_CGROUP_SYSCTL); \
+ CGROUP_SYSCTL); \
__ret; \
})
@@ -408,7 +421,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
kernel_optval) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled(BPF_CGROUP_SETSOCKOPT)) \
+ if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT)) \
__ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
optname, optval, \
optlen, \
@@ -419,7 +432,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
+ if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
get_user(__ret, optlen); \
__ret; \
})
@@ -428,7 +441,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
max_optlen, retval) \
({ \
int __ret = retval; \
- if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
+ if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
if (!(sock)->sk_prot->bpf_bypass_getsockopt || \
!INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
tcp_bpf_bypass_getsockopt, \
@@ -443,7 +456,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
optlen, retval) \
({ \
int __ret = retval; \
- if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
+ if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
__ret = __cgroup_bpf_run_filter_getsockopt_kern( \
sock, level, optname, optval, optlen, retval); \
__ret; \
@@ -487,9 +500,6 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
return -EINVAL;
}
-static inline int bpf_cgroup_storage_set(
- struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) { return 0; }
-static inline void bpf_cgroup_storage_unset(void) {}
static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
struct bpf_map *map) { return 0; }
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
@@ -505,14 +515,14 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
return 0;
}
-#define cgroup_bpf_enabled(type) (0)
-#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; })
+#define cgroup_bpf_enabled(atype) (0)
+#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; })
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, flags) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, flags) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
@@ -524,7 +534,7 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e8e2b0393ca9..f4c16f19f83e 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -168,6 +168,7 @@ struct bpf_map {
u32 max_entries;
u32 map_flags;
int spin_lock_off; /* >=0 valid offset, <0 error */
+ int timer_off; /* >=0 valid offset, <0 error */
u32 id;
int numa_node;
u32 btf_key_type_id;
@@ -197,30 +198,53 @@ static inline bool map_value_has_spin_lock(const struct bpf_map *map)
return map->spin_lock_off >= 0;
}
-static inline void check_and_init_map_lock(struct bpf_map *map, void *dst)
+static inline bool map_value_has_timer(const struct bpf_map *map)
{
- if (likely(!map_value_has_spin_lock(map)))
- return;
- *(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
- (struct bpf_spin_lock){};
+ return map->timer_off >= 0;
}
-/* copy everything but bpf_spin_lock */
+static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
+{
+ if (unlikely(map_value_has_spin_lock(map)))
+ *(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
+ (struct bpf_spin_lock){};
+ if (unlikely(map_value_has_timer(map)))
+ *(struct bpf_timer *)(dst + map->timer_off) =
+ (struct bpf_timer){};
+}
+
+/* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */
static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
{
+ u32 s_off = 0, s_sz = 0, t_off = 0, t_sz = 0;
+
if (unlikely(map_value_has_spin_lock(map))) {
- u32 off = map->spin_lock_off;
+ s_off = map->spin_lock_off;
+ s_sz = sizeof(struct bpf_spin_lock);
+ } else if (unlikely(map_value_has_timer(map))) {
+ t_off = map->timer_off;
+ t_sz = sizeof(struct bpf_timer);
+ }
- memcpy(dst, src, off);
- memcpy(dst + off + sizeof(struct bpf_spin_lock),
- src + off + sizeof(struct bpf_spin_lock),
- map->value_size - off - sizeof(struct bpf_spin_lock));
+ if (unlikely(s_sz || t_sz)) {
+ if (s_off < t_off || !s_sz) {
+ swap(s_off, t_off);
+ swap(s_sz, t_sz);
+ }
+ memcpy(dst, src, t_off);
+ memcpy(dst + t_off + t_sz,
+ src + t_off + t_sz,
+ s_off - t_off - t_sz);
+ memcpy(dst + s_off + s_sz,
+ src + s_off + s_sz,
+ map->value_size - s_off - s_sz);
} else {
memcpy(dst, src, map->value_size);
}
}
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
bool lock_src);
+void bpf_timer_cancel_and_free(void *timer);
int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
struct bpf_offload_dev;
@@ -314,6 +338,7 @@ enum bpf_arg_type {
ARG_PTR_TO_FUNC, /* pointer to a bpf program function */
ARG_PTR_TO_STACK_OR_NULL, /* pointer to stack or NULL */
ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */
+ ARG_PTR_TO_TIMER, /* pointer to bpf_timer */
__BPF_ARG_TYPE_MAX,
};
@@ -554,6 +579,11 @@ struct btf_func_model {
*/
#define BPF_TRAMP_F_SKIP_FRAME BIT(2)
+/* Store IP address of the caller on the trampoline stack,
+ * so it's available for trampoline's programs.
+ */
+#define BPF_TRAMP_F_IP_ARG BIT(3)
+
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
* bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
*/
@@ -1073,7 +1103,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
/* an array of programs to be executed under rcu_lock.
*
* Typical usage:
- * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN);
+ * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, bpf_prog_run);
*
* the structure returned by bpf_prog_array_alloc() should be populated
* with program pointers and the last pointer must be NULL.
@@ -1084,7 +1114,10 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
*/
struct bpf_prog_array_item {
struct bpf_prog *prog;
- struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
+ union {
+ struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
+ u64 bpf_cookie;
+ };
};
struct bpf_prog_array {
@@ -1110,73 +1143,133 @@ int bpf_prog_array_copy_info(struct bpf_prog_array *array,
int bpf_prog_array_copy(struct bpf_prog_array *old_array,
struct bpf_prog *exclude_prog,
struct bpf_prog *include_prog,
+ u64 bpf_cookie,
struct bpf_prog_array **new_array);
+struct bpf_run_ctx {};
+
+struct bpf_cg_run_ctx {
+ struct bpf_run_ctx run_ctx;
+ const struct bpf_prog_array_item *prog_item;
+};
+
+struct bpf_trace_run_ctx {
+ struct bpf_run_ctx run_ctx;
+ u64 bpf_cookie;
+};
+
+static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
+{
+ struct bpf_run_ctx *old_ctx = NULL;
+
+#ifdef CONFIG_BPF_SYSCALL
+ old_ctx = current->bpf_ctx;
+ current->bpf_ctx = new_ctx;
+#endif
+ return old_ctx;
+}
+
+static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
+{
+#ifdef CONFIG_BPF_SYSCALL
+ current->bpf_ctx = old_ctx;
+#endif
+}
+
/* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
#define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0)
/* BPF program asks to set CN on the packet. */
#define BPF_RET_SET_CN (1 << 0)
-/* For BPF_PROG_RUN_ARRAY_FLAGS and __BPF_PROG_RUN_ARRAY,
- * if bpf_cgroup_storage_set() failed, the rest of programs
- * will not execute. This should be a really rare scenario
- * as it requires BPF_CGROUP_STORAGE_NEST_MAX number of
- * preemptions all between bpf_cgroup_storage_set() and
- * bpf_cgroup_storage_unset() on the same cpu.
- */
-#define BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, ret_flags) \
- ({ \
- struct bpf_prog_array_item *_item; \
- struct bpf_prog *_prog; \
- struct bpf_prog_array *_array; \
- u32 _ret = 1; \
- u32 func_ret; \
- migrate_disable(); \
- rcu_read_lock(); \
- _array = rcu_dereference(array); \
- _item = &_array->items[0]; \
- while ((_prog = READ_ONCE(_item->prog))) { \
- if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage))) \
- break; \
- func_ret = func(_prog, ctx); \
- _ret &= (func_ret & 1); \
- *(ret_flags) |= (func_ret >> 1); \
- bpf_cgroup_storage_unset(); \
- _item++; \
- } \
- rcu_read_unlock(); \
- migrate_enable(); \
- _ret; \
- })
-
-#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null, set_cg_storage) \
- ({ \
- struct bpf_prog_array_item *_item; \
- struct bpf_prog *_prog; \
- struct bpf_prog_array *_array; \
- u32 _ret = 1; \
- migrate_disable(); \
- rcu_read_lock(); \
- _array = rcu_dereference(array); \
- if (unlikely(check_non_null && !_array))\
- goto _out; \
- _item = &_array->items[0]; \
- while ((_prog = READ_ONCE(_item->prog))) { \
- if (!set_cg_storage) { \
- _ret &= func(_prog, ctx); \
- } else { \
- if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage))) \
- break; \
- _ret &= func(_prog, ctx); \
- bpf_cgroup_storage_unset(); \
- } \
- _item++; \
- } \
-_out: \
- rcu_read_unlock(); \
- migrate_enable(); \
- _ret; \
- })
+typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
+
+static __always_inline u32
+BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu,
+ const void *ctx, bpf_prog_run_fn run_prog,
+ u32 *ret_flags)
+{
+ const struct bpf_prog_array_item *item;
+ const struct bpf_prog *prog;
+ const struct bpf_prog_array *array;
+ struct bpf_run_ctx *old_run_ctx;
+ struct bpf_cg_run_ctx run_ctx;
+ u32 ret = 1;
+ u32 func_ret;
+
+ migrate_disable();
+ rcu_read_lock();
+ array = rcu_dereference(array_rcu);
+ item = &array->items[0];
+ old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+ while ((prog = READ_ONCE(item->prog))) {
+ run_ctx.prog_item = item;
+ func_ret = run_prog(prog, ctx);
+ ret &= (func_ret & 1);
+ *(ret_flags) |= (func_ret >> 1);
+ item++;
+ }
+ bpf_reset_run_ctx(old_run_ctx);
+ rcu_read_unlock();
+ migrate_enable();
+ return ret;
+}
+
+static __always_inline u32
+BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu,
+ const void *ctx, bpf_prog_run_fn run_prog)
+{
+ const struct bpf_prog_array_item *item;
+ const struct bpf_prog *prog;
+ const struct bpf_prog_array *array;
+ struct bpf_run_ctx *old_run_ctx;
+ struct bpf_cg_run_ctx run_ctx;
+ u32 ret = 1;
+
+ migrate_disable();
+ rcu_read_lock();
+ array = rcu_dereference(array_rcu);
+ item = &array->items[0];
+ old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+ while ((prog = READ_ONCE(item->prog))) {
+ run_ctx.prog_item = item;
+ ret &= run_prog(prog, ctx);
+ item++;
+ }
+ bpf_reset_run_ctx(old_run_ctx);
+ rcu_read_unlock();
+ migrate_enable();
+ return ret;
+}
+
+static __always_inline u32
+BPF_PROG_RUN_ARRAY(const struct bpf_prog_array __rcu *array_rcu,
+ const void *ctx, bpf_prog_run_fn run_prog)
+{
+ const struct bpf_prog_array_item *item;
+ const struct bpf_prog *prog;
+ const struct bpf_prog_array *array;
+ struct bpf_run_ctx *old_run_ctx;
+ struct bpf_trace_run_ctx run_ctx;
+ u32 ret = 1;
+
+ migrate_disable();
+ rcu_read_lock();
+ array = rcu_dereference(array_rcu);
+ if (unlikely(!array))
+ goto out;
+ old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+ item = &array->items[0];
+ while ((prog = READ_ONCE(item->prog))) {
+ run_ctx.bpf_cookie = item->bpf_cookie;
+ ret &= run_prog(prog, ctx);
+ item++;
+ }
+ bpf_reset_run_ctx(old_run_ctx);
+out:
+ rcu_read_unlock();
+ migrate_enable();
+ return ret;
+}
/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs
* so BPF programs can request cwr for TCP packets.
@@ -1205,7 +1298,7 @@ _out: \
u32 _flags = 0; \
bool _cn; \
u32 _ret; \
- _ret = BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, &_flags); \
+ _ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, &_flags); \
_cn = _flags & BPF_RET_SET_CN; \
if (_ret) \
_ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \
@@ -1214,12 +1307,6 @@ _out: \
_ret; \
})
-#define BPF_PROG_RUN_ARRAY(array, ctx, func) \
- __BPF_PROG_RUN_ARRAY(array, ctx, func, false, true)
-
-#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \
- __BPF_PROG_RUN_ARRAY(array, ctx, func, true, false)
-
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
extern struct mutex bpf_stats_enabled_mutex;
@@ -1398,6 +1485,9 @@ typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
struct seq_file *seq);
typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
struct bpf_link_info *info);
+typedef const struct bpf_func_proto *
+(*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id,
+ const struct bpf_prog *prog);
enum bpf_iter_feature {
BPF_ITER_RESCHED = BIT(0),
@@ -1410,6 +1500,7 @@ struct bpf_iter_reg {
bpf_iter_detach_target_t detach_target;
bpf_iter_show_fdinfo_t show_fdinfo;
bpf_iter_fill_link_info_t fill_link_info;
+ bpf_iter_get_func_proto_t get_func_proto;
u32 ctx_arg_info_size;
u32 feature;
struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
@@ -1432,6 +1523,8 @@ struct bpf_iter__bpf_map_elem {
int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
bool bpf_iter_prog_supported(struct bpf_prog *prog);
+const struct bpf_func_proto *
+bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
int bpf_iter_new_fd(struct bpf_link *link);
bool bpf_link_is_iter(struct bpf_link *link);
@@ -1509,12 +1602,12 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
struct bpf_prog *xdp_prog, struct bpf_map *map,
bool exclude_ingress);
-bool dev_map_can_have_prog(struct bpf_map *map);
void __cpu_map_flush(void);
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
struct net_device *dev_rx);
-bool cpu_map_prog_allowed(struct bpf_map *map);
+int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
+ struct sk_buff *skb);
/* Return map's numa specified by userspace */
static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
@@ -1711,6 +1804,12 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
return 0;
}
+static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
+ struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+
static inline bool cpu_map_prog_allowed(struct bpf_map *map)
{
return false;
@@ -1852,6 +1951,12 @@ void bpf_map_offload_map_free(struct bpf_map *map);
int bpf_prog_test_run_syscall(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr);
+
+int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
+int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
+int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
+void sock_map_unhash(struct sock *sk);
+void sock_map_close(struct sock *sk, long timeout);
#else
static inline int bpf_prog_offload_init(struct bpf_prog *prog,
union bpf_attr *attr)
@@ -1884,24 +1989,6 @@ static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
{
return -ENOTSUPP;
}
-#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
-
-#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
-int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
-int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
-int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
-void sock_map_unhash(struct sock *sk);
-void sock_map_close(struct sock *sk, long timeout);
-
-void bpf_sk_reuseport_detach(struct sock *sk);
-int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
- void *value);
-int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
- void *value, u64 map_flags);
-#else
-static inline void bpf_sk_reuseport_detach(struct sock *sk)
-{
-}
#ifdef CONFIG_BPF_SYSCALL
static inline int sock_map_get_from_fd(const union bpf_attr *attr,
@@ -1921,7 +2008,21 @@ static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void
{
return -EOPNOTSUPP;
}
+#endif /* CONFIG_BPF_SYSCALL */
+#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
+#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
+void bpf_sk_reuseport_detach(struct sock *sk);
+int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
+ void *value);
+int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
+ void *value, u64 map_flags);
+#else
+static inline void bpf_sk_reuseport_detach(struct sock *sk)
+{
+}
+
+#ifdef CONFIG_BPF_SYSCALL
static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
void *key, void *value)
{
@@ -1998,9 +2099,8 @@ extern const struct bpf_func_proto bpf_task_storage_get_proto;
extern const struct bpf_func_proto bpf_task_storage_delete_proto;
extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
-
-const struct bpf_func_proto *bpf_tracing_func_proto(
- enum bpf_func_id func_id, const struct bpf_prog *prog);
+extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
+extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
const struct bpf_func_proto *tracing_prog_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index ae3ac3a2018c..9c81724e4b98 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -136,3 +136,6 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter)
BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns)
BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp)
#endif
+#ifdef CONFIG_PERF_EVENTS
+BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf)
+#endif
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 828d08afeee0..5424124dbe36 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -53,7 +53,14 @@ struct bpf_reg_state {
/* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
* PTR_TO_MAP_VALUE_OR_NULL
*/
- struct bpf_map *map_ptr;
+ struct {
+ struct bpf_map *map_ptr;
+ /* To distinguish map lookups from outer map
+ * the map_uid is non-zero for registers
+ * pointing to inner maps.
+ */
+ u32 map_uid;
+ };
/* for PTR_TO_BTF_ID */
struct {
@@ -201,12 +208,19 @@ struct bpf_func_state {
* zero == main subprog
*/
u32 subprogno;
+ /* Every bpf_timer_start will increment async_entry_cnt.
+ * It's used to distinguish:
+ * void foo(void) { for(;;); }
+ * void foo(void) { bpf_timer_set_callback(,foo); }
+ */
+ u32 async_entry_cnt;
+ bool in_callback_fn;
+ bool in_async_callback_fn;
/* The following fields should be last. See copy_func_state() */
int acquired_refs;
struct bpf_reference_state *refs;
int allocated_stack;
- bool in_callback_fn;
struct bpf_stack_state *stack;
};
@@ -392,6 +406,7 @@ struct bpf_subprog_info {
bool has_tail_call;
bool tail_call_reachable;
bool has_ld_abs;
+ bool is_async_cb;
};
/* single container for all structs
diff --git a/include/linux/bpfptr.h b/include/linux/bpfptr.h
index 5cdeab497cb3..546e27fc6d46 100644
--- a/include/linux/bpfptr.h
+++ b/include/linux/bpfptr.h
@@ -62,9 +62,17 @@ static inline int copy_to_bpfptr_offset(bpfptr_t dst, size_t offset,
return copy_to_sockptr_offset((sockptr_t) dst, offset, src, size);
}
-static inline void *memdup_bpfptr(bpfptr_t src, size_t len)
+static inline void *kvmemdup_bpfptr(bpfptr_t src, size_t len)
{
- return memdup_sockptr((sockptr_t) src, len);
+ void *p = kvmalloc(len, GFP_USER | __GFP_NOWARN);
+
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+ if (copy_from_bpfptr(p, src, len)) {
+ kvfree(p);
+ return ERR_PTR(-EFAULT);
+ }
+ return p;
}
static inline long strncpy_from_bpfptr(char *dst, bpfptr_t src, size_t count)
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 94a0c976c90f..214fde93214b 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -99,6 +99,7 @@ bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
const struct btf_member *m,
u32 expected_offset, u32 expected_size);
int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t);
+int btf_find_timer(const struct btf *btf, const struct btf_type *t);
bool btf_type_is_void(const struct btf_type *t);
s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind);
const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
index 57890b357f85..47d9abfbdb55 100644
--- a/include/linux/btf_ids.h
+++ b/include/linux/btf_ids.h
@@ -82,6 +82,9 @@ __BTF_ID_LIST(name, globl)
#define BTF_ID_LIST_SINGLE(name, prefix, typename) \
BTF_ID_LIST(name) \
BTF_ID(prefix, typename)
+#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) \
+ BTF_ID_LIST_GLOBAL(name) \
+ BTF_ID(prefix, typename)
/*
* The BTF_ID_UNUSED macro defines 4 zero bytes.
@@ -148,6 +151,7 @@ extern struct btf_id_set name;
#define BTF_ID_UNUSED
#define BTF_ID_LIST_GLOBAL(name) u32 name[1];
#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 name[1];
+#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) u32 name[1];
#define BTF_SET_START(name) static struct btf_id_set name = { 0 };
#define BTF_SET_START_GLOBAL(name) static struct btf_id_set name = { 0 };
#define BTF_SET_END(name)
@@ -172,7 +176,8 @@ extern struct btf_id_set name;
BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \
BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \
BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \
- BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock)
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock)
enum {
#define BTF_SOCK_TYPE(name, str) name,
@@ -184,4 +189,6 @@ MAX_BTF_SOCK_TYPE,
extern u32 btf_sock_ids[];
#endif
+extern u32 btf_task_struct_ids[];
+
#endif
diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h
index ae7a3411167c..9de6e9053e34 100644
--- a/include/linux/can/bittiming.h
+++ b/include/linux/can/bittiming.h
@@ -37,7 +37,7 @@
* quanta, from when the bit is sent on the TX pin to when it is
* received on the RX pin of the transmitter. Possible options:
*
- * O: automatic mode. The controller dynamically measure @tdcv
+ * 0: automatic mode. The controller dynamically measures @tdcv
* for each transmitted CAN FD frame.
*
* Other values: manual mode. Use the fixed provided value.
@@ -45,7 +45,7 @@
* @tdco: Transmitter Delay Compensation Offset. Offset value, in time
* quanta, defining the distance between the start of the bit
* reception on the RX pin of the transceiver and the SSP
- * position such as SSP = @tdcv + @tdco.
+ * position such that SSP = @tdcv + @tdco.
*
* If @tdco is zero, then TDC is disabled and both @tdcv and
* @tdcf should be ignored.
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 27b275e463da..2413253e54c7 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -32,6 +32,12 @@ enum can_mode {
CAN_MODE_SLEEP
};
+enum can_termination_gpio {
+ CAN_TERMINATION_GPIO_DISABLED = 0,
+ CAN_TERMINATION_GPIO_ENABLED,
+ CAN_TERMINATION_GPIO_MAX,
+};
+
/*
* CAN common private data
*/
@@ -55,6 +61,8 @@ struct can_priv {
unsigned int termination_const_cnt;
const u16 *termination_const;
u16 termination;
+ struct gpio_desc *termination_gpio;
+ u16 termination_gpio_ohms[CAN_TERMINATION_GPIO_MAX];
enum can_state state;
diff --git a/include/linux/can/platform/flexcan.h b/include/linux/can/platform/flexcan.h
new file mode 100644
index 000000000000..1b536fb999de
--- /dev/null
+++ b/include/linux/can/platform/flexcan.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Angelo Dureghello <angelo@kernel-space.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAN_PLATFORM_FLEXCAN_H
+#define _CAN_PLATFORM_FLEXCAN_H
+
+struct flexcan_platform_data {
+ u32 clock_frequency;
+ u8 clk_src;
+};
+
+#endif /* _CAN_PLATFORM_FLEXCAN_H */
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
index 40882df7105e..c11477620403 100644
--- a/include/linux/can/rx-offload.h
+++ b/include/linux/can/rx-offload.h
@@ -20,6 +20,7 @@ struct can_rx_offload {
bool drop);
struct sk_buff_head skb_queue;
+ struct sk_buff_head skb_irq_queue;
u32 skb_queue_len_max;
unsigned int mb_first;
@@ -48,14 +49,11 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
unsigned int *frame_len_ptr);
int can_rx_offload_queue_tail(struct can_rx_offload *offload,
struct sk_buff *skb);
+void can_rx_offload_irq_finish(struct can_rx_offload *offload);
+void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload);
void can_rx_offload_del(struct can_rx_offload *offload);
void can_rx_offload_enable(struct can_rx_offload *offload);
-static inline void can_rx_offload_schedule(struct can_rx_offload *offload)
-{
- napi_schedule(&offload->napi);
-}
-
static inline void can_rx_offload_disable(struct can_rx_offload *offload)
{
napi_disable(&offload->napi);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index c270124e4402..8e0598c7d1d1 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -20,11 +20,8 @@
#include <linux/unistd.h>
#include <asm/compat.h>
-
-#ifdef CONFIG_COMPAT
#include <asm/siginfo.h>
#include <asm/signal.h>
-#endif
#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
/*
@@ -95,8 +92,6 @@ struct compat_iovec {
compat_size_t iov_len;
};
-#ifdef CONFIG_COMPAT
-
#ifndef compat_user_stack_pointer
#define compat_user_stack_pointer() current_user_stack_pointer()
#endif
@@ -131,9 +126,11 @@ struct compat_tms {
#define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW)
+#ifndef compat_sigset_t
typedef struct {
compat_sigset_word sig[_COMPAT_NSIG_WORDS];
} compat_sigset_t;
+#endif
int set_compat_user_sigmask(const compat_sigset_t __user *umask,
size_t sigsetsize);
@@ -384,6 +381,7 @@ struct compat_keyctl_kdf_params {
__u32 __spare[8];
};
+struct compat_stat;
struct compat_statfs;
struct compat_statfs64;
struct compat_old_linux_dirent;
@@ -428,7 +426,7 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
unsigned int size)
{
/* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */
-#ifdef __BIG_ENDIAN
+#if defined(__BIG_ENDIAN) && defined(CONFIG_64BIT)
compat_sigset_t v;
switch (_NSIG_WORDS) {
case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
@@ -929,17 +927,6 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
-
-/*
- * For most but not all architectures, "am I in a compat syscall?" and
- * "am I a compat task?" are the same question. For architectures on which
- * they aren't the same question, arch code can override in_compat_syscall.
- */
-
-#ifndef in_compat_syscall
-static inline bool in_compat_syscall(void) { return is_compat_task(); }
-#endif
-
/**
* ns_to_old_timeval32 - Compat version of ns_to_timeval
* @nsec: the nanoseconds value to be converted
@@ -969,6 +956,17 @@ int kcompat_sys_statfs64(const char __user * pathname, compat_size_t sz,
int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
struct compat_statfs64 __user * buf);
+#ifdef CONFIG_COMPAT
+
+/*
+ * For most but not all architectures, "am I in a compat syscall?" and
+ * "am I a compat task?" are the same question. For architectures on which
+ * they aren't the same question, arch code can override in_compat_syscall.
+ */
+#ifndef in_compat_syscall
+static inline bool in_compat_syscall(void) { return is_compat_task(); }
+#endif
+
#else /* !CONFIG_COMPAT */
#define is_compat_task() (0)
diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h
index 1587961f1a7b..c7fa4a3498fe 100644
--- a/include/linux/dsa/8021q.h
+++ b/include/linux/dsa/8021q.h
@@ -11,60 +11,48 @@
struct dsa_switch;
struct sk_buff;
struct net_device;
-struct packet_type;
-struct dsa_8021q_context;
-struct dsa_8021q_crosschip_link {
+struct dsa_tag_8021q_vlan {
struct list_head list;
int port;
- struct dsa_8021q_context *other_ctx;
- int other_port;
+ u16 vid;
refcount_t refcount;
};
-struct dsa_8021q_ops {
- int (*vlan_add)(struct dsa_switch *ds, int port, u16 vid, u16 flags);
- int (*vlan_del)(struct dsa_switch *ds, int port, u16 vid);
-};
-
struct dsa_8021q_context {
- const struct dsa_8021q_ops *ops;
struct dsa_switch *ds;
- struct list_head crosschip_links;
+ struct list_head vlans;
/* EtherType of RX VID, used for filtering on master interface */
__be16 proto;
};
-#define DSA_8021Q_N_SUBVLAN 8
-
-int dsa_8021q_setup(struct dsa_8021q_context *ctx, bool enabled);
+int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto);
-int dsa_8021q_crosschip_bridge_join(struct dsa_8021q_context *ctx, int port,
- struct dsa_8021q_context *other_ctx,
- int other_port);
-
-int dsa_8021q_crosschip_bridge_leave(struct dsa_8021q_context *ctx, int port,
- struct dsa_8021q_context *other_ctx,
- int other_port);
+void dsa_tag_8021q_unregister(struct dsa_switch *ds);
struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
u16 tpid, u16 tci);
-void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id,
- int *subvlan);
+void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id);
+
+int dsa_tag_8021q_bridge_tx_fwd_offload(struct dsa_switch *ds, int port,
+ struct net_device *br,
+ int bridge_num);
+
+void dsa_tag_8021q_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port,
+ struct net_device *br,
+ int bridge_num);
+
+u16 dsa_8021q_bridge_tx_fwd_offload_vid(int bridge_num);
u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port);
u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port);
-u16 dsa_8021q_rx_vid_subvlan(struct dsa_switch *ds, int port, u16 subvlan);
-
int dsa_8021q_rx_switch_id(u16 vid);
int dsa_8021q_rx_source_port(u16 vid);
-u16 dsa_8021q_rx_subvlan(u16 vid);
-
bool vid_is_dsa_8021q_rxvlan(u16 vid);
bool vid_is_dsa_8021q_txvlan(u16 vid);
diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h
index b6089b88314c..171106202fe5 100644
--- a/include/linux/dsa/sja1105.h
+++ b/include/linux/dsa/sja1105.h
@@ -16,6 +16,8 @@
#define ETH_P_SJA1105_META 0x0008
#define ETH_P_SJA1110 0xdadc
+#define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1)
+
/* IEEE 802.3 Annex 57A: Slow Protocols PDUs (01:80:C2:xx:xx:xx) */
#define SJA1105_LINKLOCAL_FILTER_A 0x0180C2000000ull
#define SJA1105_LINKLOCAL_FILTER_A_MASK 0xFFFFFF000000ull
@@ -59,14 +61,12 @@ struct sja1105_skb_cb {
((struct sja1105_skb_cb *)((skb)->cb))
struct sja1105_port {
- u16 subvlan_map[DSA_8021Q_N_SUBVLAN];
struct kthread_worker *xmit_worker;
struct kthread_work xmit_work;
struct sk_buff_head xmit_queue;
struct sja1105_tagger_data *data;
struct dsa_port *dp;
bool hwts_tx_en;
- u16 xmit_tpid;
};
enum sja1110_meta_tstamp {
@@ -89,4 +89,22 @@ static inline void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port,
#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */
+#if IS_ENABLED(CONFIG_NET_DSA_SJA1105)
+
+extern const struct dsa_switch_ops sja1105_switch_ops;
+
+static inline bool dsa_port_is_sja1105(struct dsa_port *dp)
+{
+ return dp->ds->ops == &sja1105_switch_ops;
+}
+
+#else
+
+static inline bool dsa_port_is_sja1105(struct dsa_port *dp)
+{
+ return false;
+}
+
+#endif
+
#endif /* _NET_DSA_SJA1105_H */
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 232daaec56e4..849524b55d89 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -15,10 +15,9 @@
#include <linux/bitmap.h>
#include <linux/compat.h>
+#include <linux/netlink.h>
#include <uapi/linux/ethtool.h>
-#ifdef CONFIG_COMPAT
-
struct compat_ethtool_rx_flow_spec {
u32 flow_type;
union ethtool_flow_union h_u;
@@ -38,8 +37,6 @@ struct compat_ethtool_rxnfc {
u32 rule_locs[];
};
-#endif /* CONFIG_COMPAT */
-
#include <linux/rculist.h>
/**
@@ -176,6 +173,11 @@ extern int
__ethtool_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *link_ksettings);
+struct kernel_ethtool_coalesce {
+ u8 use_cqe_mode_tx;
+ u8 use_cqe_mode_rx;
+};
+
/**
* ethtool_intersect_link_masks - Given two link masks, AND them together
* @dst: first mask and where result is stored
@@ -215,7 +217,9 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
#define ETHTOOL_COALESCE_TX_USECS_HIGH BIT(19)
#define ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH BIT(20)
#define ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL BIT(21)
-#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(21, 0)
+#define ETHTOOL_COALESCE_USE_CQE_RX BIT(22)
+#define ETHTOOL_COALESCE_USE_CQE_TX BIT(23)
+#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(23, 0)
#define ETHTOOL_COALESCE_USECS \
(ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_USECS)
@@ -241,6 +245,8 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_RX_USECS_HIGH | \
ETHTOOL_COALESCE_PKT_RATE_LOW | ETHTOOL_COALESCE_PKT_RATE_HIGH | \
ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL)
+#define ETHTOOL_COALESCE_USE_CQE \
+ (ETHTOOL_COALESCE_USE_CQE_RX | ETHTOOL_COALESCE_USE_CQE_TX)
#define ETHTOOL_STAT_NOT_SET (~0ULL)
@@ -606,8 +612,14 @@ struct ethtool_ops {
struct ethtool_eeprom *, u8 *);
int (*set_eeprom)(struct net_device *,
struct ethtool_eeprom *, u8 *);
- int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);
- int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);
+ int (*get_coalesce)(struct net_device *,
+ struct ethtool_coalesce *,
+ struct kernel_ethtool_coalesce *,
+ struct netlink_ext_ack *);
+ int (*set_coalesce)(struct net_device *,
+ struct ethtool_coalesce *,
+ struct kernel_ethtool_coalesce *,
+ struct netlink_ext_ack *);
void (*get_ringparam)(struct net_device *,
struct ethtool_ringparam *);
int (*set_ringparam)(struct net_device *,
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 83b896044e79..7d248941ecea 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -574,7 +574,8 @@ struct bpf_prog {
kprobe_override:1, /* Do we override a kprobe? */
has_callchain_buf:1, /* callchain buffer allocated? */
enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
- call_get_stack:1; /* Do we call bpf_get_stack() or bpf_get_stackid() */
+ call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
+ call_get_func_ip:1; /* Do we call get_func_ip() */
enum bpf_prog_type type; /* Type of BPF program */
enum bpf_attach_type expected_attach_type; /* For some prog types */
u32 len; /* Number of filter blocks */
@@ -599,25 +600,38 @@ struct sk_filter {
DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
-#define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \
- u32 __ret; \
- cant_migrate(); \
- if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
- struct bpf_prog_stats *__stats; \
- u64 __start = sched_clock(); \
- __ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
- __stats = this_cpu_ptr(prog->stats); \
- u64_stats_update_begin(&__stats->syncp); \
- __stats->cnt++; \
- __stats->nsecs += sched_clock() - __start; \
- u64_stats_update_end(&__stats->syncp); \
- } else { \
- __ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
- } \
- __ret; })
-
-#define BPF_PROG_RUN(prog, ctx) \
- __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func)
+typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx,
+ const struct bpf_insn *insnsi,
+ unsigned int (*bpf_func)(const void *,
+ const struct bpf_insn *));
+
+static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
+ const void *ctx,
+ bpf_dispatcher_fn dfunc)
+{
+ u32 ret;
+
+ cant_migrate();
+ if (static_branch_unlikely(&bpf_stats_enabled_key)) {
+ struct bpf_prog_stats *stats;
+ u64 start = sched_clock();
+
+ ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
+ stats = this_cpu_ptr(prog->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->cnt++;
+ stats->nsecs += sched_clock() - start;
+ u64_stats_update_end(&stats->syncp);
+ } else {
+ ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
+ }
+ return ret;
+}
+
+static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void *ctx)
+{
+ return __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func);
+}
/*
* Use in preemptible and therefore migratable context to make sure that
@@ -636,7 +650,7 @@ static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
u32 ret;
migrate_disable();
- ret = __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func);
+ ret = bpf_prog_run(prog, ctx);
migrate_enable();
return ret;
}
@@ -709,7 +723,7 @@ static inline void bpf_restore_data_end(
cb->data_end = saved_data_end;
}
-static inline u8 *bpf_skb_cb(struct sk_buff *skb)
+static inline u8 *bpf_skb_cb(const struct sk_buff *skb)
{
/* eBPF programs may read/write skb->cb[] area to transfer meta
* data between tail calls. Since this also needs to work with
@@ -730,8 +744,9 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
/* Must be invoked with migration disabled */
static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
- struct sk_buff *skb)
+ const void *ctx)
{
+ const struct sk_buff *skb = ctx;
u8 *cb_data = bpf_skb_cb(skb);
u8 cb_saved[BPF_SKB_CB_LEN];
u32 res;
@@ -741,7 +756,7 @@ static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
memset(cb_data, 0, sizeof(cb_saved));
}
- res = BPF_PROG_RUN(prog, skb);
+ res = bpf_prog_run(prog, skb);
if (unlikely(prog->cb_access))
memcpy(cb_data, cb_saved, sizeof(cb_saved));
@@ -775,6 +790,10 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
DECLARE_BPF_DISPATCHER(xdp)
+DECLARE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key);
+
+u32 xdp_master_redirect(struct xdp_buff *xdp);
+
static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
struct xdp_buff *xdp)
{
@@ -782,7 +801,14 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
* under local_bh_disable(), which provides the needed RCU protection
* for accessing map entries.
*/
- return __BPF_PROG_RUN(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
+ u32 act = __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
+
+ if (static_branch_unlikely(&bpf_master_redirect_enabled_key)) {
+ if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev))
+ act = xdp_master_redirect(xdp);
+ }
+
+ return act;
}
void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
@@ -1428,7 +1454,7 @@ static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
};
u32 act;
- act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN);
+ act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
if (act == SK_PASS) {
selected_sk = ctx.selected_sk;
no_reuseport = ctx.no_reuseport;
@@ -1466,7 +1492,7 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
};
u32 act;
- act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN);
+ act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
if (act == SK_PASS) {
selected_sk = ctx.selected_sk;
no_reuseport = ctx.no_reuseport;
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index 63b56aba925a..30ece3ae6df7 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -423,7 +423,8 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
-struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev);
+struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
+ u16 if_id);
extern struct bus_type fsl_mc_bus_type;
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index bc738504ab4a..c285968e437a 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -8,34 +8,11 @@
/* All generic netlink requests are serialized by a global lock. */
extern void genl_lock(void);
extern void genl_unlock(void);
-#ifdef CONFIG_LOCKDEP
-extern bool lockdep_genl_is_held(void);
-#endif
/* for synchronisation between af_netlink and genetlink */
extern atomic_t genl_sk_destructing_cnt;
extern wait_queue_head_t genl_sk_destructing_waitq;
-/**
- * rcu_dereference_genl - rcu_dereference with debug checking
- * @p: The pointer to read, prior to dereferencing
- *
- * Do an rcu_dereference(p), but check caller either holds rcu_read_lock()
- * or genl mutex. Note : Please prefer genl_dereference() or rcu_dereference()
- */
-#define rcu_dereference_genl(p) \
- rcu_dereference_check(p, lockdep_genl_is_held())
-
-/**
- * genl_dereference - fetch RCU pointer when updates are prevented by genl mutex
- * @p: The pointer to read, prior to dereferencing
- *
- * Return the value of the specified RCU-protected pointer, but omit
- * the READ_ONCE(), because caller holds genl mutex.
- */
-#define genl_dereference(p) \
- rcu_dereference_protected(p, lockdep_genl_is_held())
-
#define MODULE_ALIAS_GENL_FAMILY(family)\
MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family)
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h
index cacc4dd27794..630a388035f1 100644
--- a/include/linux/hdlc.h
+++ b/include/linux/hdlc.h
@@ -22,7 +22,7 @@ struct hdlc_proto {
void (*start)(struct net_device *dev); /* if open & DCD */
void (*stop)(struct net_device *dev); /* if open & !DCD */
void (*detach)(struct net_device *dev);
- int (*ioctl)(struct net_device *dev, struct ifreq *ifr);
+ int (*ioctl)(struct net_device *dev, struct if_settings *ifs);
__be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev);
int (*netif_rx)(struct sk_buff *skb);
netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev);
@@ -54,7 +54,7 @@ typedef struct hdlc_device {
/* Exported from hdlc module */
/* Called by hardware driver when a user requests HDLC service */
-int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+int hdlc_ioctl(struct net_device *dev, struct if_settings *ifs);
/* Must be used by hardware driver on module startup/exit */
#define register_hdlc_device(dev) register_netdev(dev)
diff --git a/include/linux/hdlcdrv.h b/include/linux/hdlcdrv.h
index d4d633a49d36..5d70c3f98f5b 100644
--- a/include/linux/hdlcdrv.h
+++ b/include/linux/hdlcdrv.h
@@ -79,7 +79,7 @@ struct hdlcdrv_ops {
*/
int (*open)(struct net_device *);
int (*close)(struct net_device *);
- int (*ioctl)(struct net_device *, struct ifreq *,
+ int (*ioctl)(struct net_device *, void __user *,
struct hdlcdrv_ioctl *, int);
};
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index a6730072d13a..694264503119 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1088,6 +1088,48 @@ struct ieee80211_ext {
} u;
} __packed __aligned(2);
+#define IEEE80211_TWT_CONTROL_NDP BIT(0)
+#define IEEE80211_TWT_CONTROL_RESP_MODE BIT(1)
+#define IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST BIT(3)
+#define IEEE80211_TWT_CONTROL_RX_DISABLED BIT(4)
+#define IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT BIT(5)
+
+#define IEEE80211_TWT_REQTYPE_REQUEST BIT(0)
+#define IEEE80211_TWT_REQTYPE_SETUP_CMD GENMASK(3, 1)
+#define IEEE80211_TWT_REQTYPE_TRIGGER BIT(4)
+#define IEEE80211_TWT_REQTYPE_IMPLICIT BIT(5)
+#define IEEE80211_TWT_REQTYPE_FLOWTYPE BIT(6)
+#define IEEE80211_TWT_REQTYPE_FLOWID GENMASK(9, 7)
+#define IEEE80211_TWT_REQTYPE_WAKE_INT_EXP GENMASK(14, 10)
+#define IEEE80211_TWT_REQTYPE_PROTECTION BIT(15)
+
+enum ieee80211_twt_setup_cmd {
+ TWT_SETUP_CMD_REQUEST,
+ TWT_SETUP_CMD_SUGGEST,
+ TWT_SETUP_CMD_DEMAND,
+ TWT_SETUP_CMD_GROUPING,
+ TWT_SETUP_CMD_ACCEPT,
+ TWT_SETUP_CMD_ALTERNATE,
+ TWT_SETUP_CMD_DICTATE,
+ TWT_SETUP_CMD_REJECT,
+};
+
+struct ieee80211_twt_params {
+ __le16 req_type;
+ __le64 twt;
+ u8 min_twt_dur;
+ __le16 mantissa;
+ u8 channel;
+} __packed;
+
+struct ieee80211_twt_setup {
+ u8 dialog_token;
+ u8 element_id;
+ u8 length;
+ u8 control;
+ u8 params[];
+} __packed;
+
struct ieee80211_mgmt {
__le16 frame_control;
__le16 duration;
@@ -1252,6 +1294,10 @@ struct ieee80211_mgmt {
__le16 toa_error;
u8 variable[0];
} __packed ftm;
+ struct {
+ u8 action_code;
+ u8 variable[];
+ } __packed s1g;
} u;
} __packed action;
} u;
@@ -2266,6 +2312,9 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000
#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000
+#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0
+#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1
+
/**
* ieee80211_he_6ghz_oper - HE 6 GHz operation Information field
* @primary: primary channel
@@ -2282,6 +2331,7 @@ struct ieee80211_he_6ghz_oper {
#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ 2
#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ 3
#define IEEE80211_HE_6GHZ_OPER_CTRL_DUP_BEACON 0x4
+#define IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO 0x38
u8 control;
u8 ccfs0;
u8 ccfs1;
@@ -2289,6 +2339,44 @@ struct ieee80211_he_6ghz_oper {
} __packed;
/*
+ * In "9.4.2.161 Transmit Power Envelope element" of "IEEE Std 802.11ax-2021",
+ * it show four types in "Table 9-275a-Maximum Transmit Power Interpretation
+ * subfield encoding", and two category for each type in "Table E-12-Regulatory
+ * Info subfield encoding in the United States".
+ * So it it totally max 8 Transmit Power Envelope element.
+ */
+#define IEEE80211_TPE_MAX_IE_COUNT 8
+/*
+ * In "Table 9-277—Meaning of Maximum Transmit Power Count subfield"
+ * of "IEEE Std 802.11ax™‐2021", the max power level is 8.
+ */
+#define IEEE80211_MAX_NUM_PWR_LEVEL 8
+
+#define IEEE80211_TPE_MAX_POWER_COUNT 8
+
+/* transmit power interpretation type of transmit power envelope element */
+enum ieee80211_tx_power_intrpt_type {
+ IEEE80211_TPE_LOCAL_EIRP,
+ IEEE80211_TPE_LOCAL_EIRP_PSD,
+ IEEE80211_TPE_REG_CLIENT_EIRP,
+ IEEE80211_TPE_REG_CLIENT_EIRP_PSD,
+};
+
+/**
+ * struct ieee80211_tx_pwr_env
+ *
+ * This structure represents the "Transmit Power Envelope element"
+ */
+struct ieee80211_tx_pwr_env {
+ u8 tx_power_info;
+ s8 tx_power[IEEE80211_TPE_MAX_POWER_COUNT];
+} __packed;
+
+#define IEEE80211_TX_PWR_ENV_INFO_COUNT 0x7
+#define IEEE80211_TX_PWR_ENV_INFO_INTERPRET 0x38
+#define IEEE80211_TX_PWR_ENV_INFO_CATEGORY 0xC0
+
+/*
* ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size
* @he_oper_ie: byte data of the He Operations IE, stating from the byte
* after the ext ID byte. It is assumed that he_oper_ie has at least
@@ -2869,7 +2957,7 @@ enum ieee80211_eid {
WLAN_EID_VHT_OPERATION = 192,
WLAN_EID_EXTENDED_BSS_LOAD = 193,
WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194,
- WLAN_EID_VHT_TX_POWER_ENVELOPE = 195,
+ WLAN_EID_TX_POWER_ENVELOPE = 195,
WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196,
WLAN_EID_AID = 197,
WLAN_EID_QUIET_CHANNEL = 198,
@@ -2881,6 +2969,7 @@ enum ieee80211_eid {
WLAN_EID_AID_RESPONSE = 211,
WLAN_EID_S1G_BCN_COMPAT = 213,
WLAN_EID_S1G_SHORT_BCN_INTERVAL = 214,
+ WLAN_EID_S1G_TWT = 216,
WLAN_EID_S1G_CAPABILITIES = 217,
WLAN_EID_VENDOR_SPECIFIC = 221,
WLAN_EID_QOS_PARAMETER = 222,
@@ -2950,6 +3039,7 @@ enum ieee80211_category {
WLAN_CATEGORY_FST = 18,
WLAN_CATEGORY_UNPROT_DMG = 20,
WLAN_CATEGORY_VHT = 21,
+ WLAN_CATEGORY_S1G = 22,
WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126,
WLAN_CATEGORY_VENDOR_SPECIFIC = 127,
};
@@ -3023,6 +3113,20 @@ enum ieee80211_key_len {
WLAN_KEY_LEN_BIP_GMAC_256 = 32,
};
+enum ieee80211_s1g_actioncode {
+ WLAN_S1G_AID_SWITCH_REQUEST,
+ WLAN_S1G_AID_SWITCH_RESPONSE,
+ WLAN_S1G_SYNC_CONTROL,
+ WLAN_S1G_STA_INFO_ANNOUNCE,
+ WLAN_S1G_EDCA_PARAM_SET,
+ WLAN_S1G_EL_OPERATION,
+ WLAN_S1G_TWT_SETUP,
+ WLAN_S1G_TWT_TEARDOWN,
+ WLAN_S1G_SECT_GROUP_ID_LIST,
+ WLAN_S1G_SECT_ID_FEEDBACK,
+ WLAN_S1G_TWT_INFORMATION = 11,
+};
+
#define IEEE80211_WEP_IV_LEN 4
#define IEEE80211_WEP_ICV_LEN 4
#define IEEE80211_CCMP_HDR_LEN 8
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index b651c5e32a28..509e18c7e740 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -57,10 +57,16 @@ struct br_ip_list {
#define BR_MRP_AWARE BIT(17)
#define BR_MRP_LOST_CONT BIT(18)
#define BR_MRP_LOST_IN_CONT BIT(19)
+#define BR_TX_FWD_OFFLOAD BIT(20)
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
-extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
+struct net_bridge;
+void brioctl_set(int (*hook)(struct net *net, struct net_bridge *br,
+ unsigned int cmd, struct ifreq *ifr,
+ void __user *uarg));
+int br_ioctl_call(struct net *net, struct net_bridge *br, unsigned int cmd,
+ struct ifreq *ifr, void __user *uarg);
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
int br_multicast_list_adjacent(struct net_device *dev,
@@ -70,9 +76,6 @@ bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto);
bool br_multicast_has_router_adjacent(struct net_device *dev, int proto);
bool br_multicast_enabled(const struct net_device *dev);
bool br_multicast_router(const struct net_device *dev);
-int br_mdb_replay(struct net_device *br_dev, struct net_device *dev,
- const void *ctx, bool adding, struct notifier_block *nb,
- struct netlink_ext_ack *extack);
#else
static inline int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list)
@@ -104,13 +107,6 @@ static inline bool br_multicast_router(const struct net_device *dev)
{
return false;
}
-static inline int br_mdb_replay(const struct net_device *br_dev,
- const struct net_device *dev, const void *ctx,
- bool adding, struct notifier_block *nb,
- struct netlink_ext_ack *extack)
-{
- return -EOPNOTSUPP;
-}
#endif
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING)
@@ -120,9 +116,8 @@ int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid);
int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto);
int br_vlan_get_info(const struct net_device *dev, u16 vid,
struct bridge_vlan_info *p_vinfo);
-int br_vlan_replay(struct net_device *br_dev, struct net_device *dev,
- const void *ctx, bool adding, struct notifier_block *nb,
- struct netlink_ext_ack *extack);
+int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
+ struct bridge_vlan_info *p_vinfo);
#else
static inline bool br_vlan_enabled(const struct net_device *dev)
{
@@ -150,12 +145,10 @@ static inline int br_vlan_get_info(const struct net_device *dev, u16 vid,
return -EINVAL;
}
-static inline int br_vlan_replay(struct net_device *br_dev,
- struct net_device *dev, const void *ctx,
- bool adding, struct notifier_block *nb,
- struct netlink_ext_ack *extack)
+static inline int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
+ struct bridge_vlan_info *p_vinfo)
{
- return -EOPNOTSUPP;
+ return -EINVAL;
}
#endif
@@ -167,8 +160,6 @@ void br_fdb_clear_offload(const struct net_device *dev, u16 vid);
bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag);
u8 br_port_get_stp_state(const struct net_device *dev);
clock_t br_get_ageing_time(const struct net_device *br_dev);
-int br_fdb_replay(const struct net_device *br_dev, const struct net_device *dev,
- const void *ctx, bool adding, struct notifier_block *nb);
#else
static inline struct net_device *
br_fdb_find_port(const struct net_device *br_dev,
@@ -197,13 +188,6 @@ static inline clock_t br_get_ageing_time(const struct net_device *br_dev)
{
return 0;
}
-
-static inline int br_fdb_replay(const struct net_device *br_dev,
- const struct net_device *dev, const void *ctx,
- bool adding, struct notifier_block *nb)
-{
- return -EOPNOTSUPP;
-}
#endif
#endif
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 64ce8cd1cfaf..93c262ecbdc9 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -41,9 +41,6 @@ struct ip_sf_socklist {
__be32 sl_addr[];
};
-#define IP_SFLSIZE(count) (sizeof(struct ip_sf_socklist) + \
- (count) * sizeof(__be32))
-
#define IP_SFBLOCK 10 /* allocate this many at once */
/* ip_mc_socklist is real list now. Speed is not argument;
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index aaf4f1b4c277..a038feb63f23 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -178,6 +178,15 @@ static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *);
+#ifdef CONFIG_INET
+int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size);
+#else
+static inline int inet_gifconf(struct net_device *dev, char __user *buf,
+ int len, int size)
+{
+ return 0;
+}
+#endif
void devinet_init(void);
struct in_device *inetdev_by_index(struct net *, int);
__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
diff --git a/include/linux/ioam6.h b/include/linux/ioam6.h
new file mode 100644
index 000000000000..94a24b36998f
--- /dev/null
+++ b/include/linux/ioam6.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * IPv6 IOAM
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+#ifndef _LINUX_IOAM6_H
+#define _LINUX_IOAM6_H
+
+#include <uapi/linux/ioam6.h>
+
+#endif /* _LINUX_IOAM6_H */
diff --git a/include/linux/ioam6_genl.h b/include/linux/ioam6_genl.h
new file mode 100644
index 000000000000..176e67919de3
--- /dev/null
+++ b/include/linux/ioam6_genl.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * IPv6 IOAM Generic Netlink API
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+#ifndef _LINUX_IOAM6_GENL_H
+#define _LINUX_IOAM6_GENL_H
+
+#include <uapi/linux/ioam6_genl.h>
+
+#endif /* _LINUX_IOAM6_GENL_H */
diff --git a/include/linux/ioam6_iptunnel.h b/include/linux/ioam6_iptunnel.h
new file mode 100644
index 000000000000..07d9dfedd29d
--- /dev/null
+++ b/include/linux/ioam6_iptunnel.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * IPv6 IOAM Lightweight Tunnel API
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+#ifndef _LINUX_IOAM6_IPTUNNEL_H
+#define _LINUX_IOAM6_IPTUNNEL_H
+
+#include <uapi/linux/ioam6_iptunnel.h>
+
+#endif /* _LINUX_IOAM6_IPTUNNEL_H */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 70b2ad3b9884..ef4a69865737 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -76,6 +76,9 @@ struct ipv6_devconf {
__s32 disable_policy;
__s32 ndisc_tclass;
__s32 rpl_seg_enabled;
+ __u32 ioam6_id;
+ __u32 ioam6_id_wide;
+ __u8 ioam6_enabled;
struct ctl_table_header *sysctl_header;
};
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 24797929d8a1..20151c4f1e0e 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1582,7 +1582,8 @@ static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
#endif /* CONFIG_CGROUP_WRITEBACK */
struct sock;
-bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
+bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
+ gfp_t gfp_mask);
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
#ifdef CONFIG_MEMCG
extern struct static_key_false memcg_sockets_enabled_key;
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index 944aa3aa3035..beb918328eef 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -356,6 +356,7 @@ struct mhi_controller_config {
* @fbc_download: MHI host needs to do complete image transfer (optional)
* @wake_set: Device wakeup set flag
* @irq_flags: irq flags passed to request_irq (optional)
+ * @mru: the default MRU for the MHI device
*
* Fields marked as (required) need to be populated by the controller driver
* before calling mhi_register_controller(). For the fields marked as (optional)
@@ -448,6 +449,7 @@ struct mhi_controller {
bool fbc_download;
bool wake_set;
unsigned long irq_flags;
+ u32 mru;
};
/**
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 219b93cad1dd..12ea29e04293 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -32,7 +32,7 @@ struct mii_if_info {
extern int mii_link_ok (struct mii_if_info *mii);
extern int mii_nway_restart (struct mii_if_info *mii);
-extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
+extern void mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
extern void mii_ethtool_get_link_ksettings(
struct mii_if_info *mii, struct ethtool_link_ksettings *cmd);
extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 0025913505ab..66eaf0aa7f69 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1038,7 +1038,7 @@ enum {
struct mlx5_mkey_seg {
/* This is a two bit field occupying bits 31-30.
* bit 31 is always 0,
- * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
+ * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have translation
*/
u8 status;
u8 pcie_control;
@@ -1157,6 +1157,9 @@ enum mlx5_cap_mode {
HCA_CAP_OPMOD_GET_CUR = 1,
};
+/* Any new cap addition must update mlx5_hca_caps_alloc() to allocate
+ * capability memory.
+ */
enum mlx5_cap_type {
MLX5_CAP_GENERAL = 0,
MLX5_CAP_ETHERNET_OFFLOADS,
@@ -1213,55 +1216,55 @@ enum mlx5_qcam_feature_groups {
/* GET Dev Caps macros */
#define MLX5_CAP_GEN(mdev, cap) \
- MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
+ MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
#define MLX5_CAP_GEN_64(mdev, cap) \
- MLX5_GET64(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
+ MLX5_GET64(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
#define MLX5_CAP_GEN_MAX(mdev, cap) \
- MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap)
+ MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->max, cap)
#define MLX5_CAP_GEN_2(mdev, cap) \
- MLX5_GET(cmd_hca_cap_2, mdev->caps.hca_cur[MLX5_CAP_GENERAL_2], cap)
+ MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
#define MLX5_CAP_GEN_2_64(mdev, cap) \
- MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca_cur[MLX5_CAP_GENERAL_2], cap)
+ MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
#define MLX5_CAP_GEN_2_MAX(mdev, cap) \
- MLX5_GET(cmd_hca_cap_2, mdev->caps.hca_max[MLX5_CAP_GENERAL_2], cap)
+ MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->max, cap)
#define MLX5_CAP_ETH(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
- mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+ mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->cur, cap)
#define MLX5_CAP_ETH_MAX(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
- mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+ mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->max, cap)
#define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
- mdev->caps.hca_cur[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS], cap)
+ mdev->caps.hca[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS]->cur, cap)
#define MLX5_CAP_ROCE(mdev, cap) \
- MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap)
+ MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->cur, cap)
#define MLX5_CAP_ROCE_MAX(mdev, cap) \
- MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap)
+ MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->max, cap)
#define MLX5_CAP_ATOMIC(mdev, cap) \
- MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap)
+ MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->cur, cap)
#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
- MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap)
+ MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->max, cap)
#define MLX5_CAP_FLOWTABLE(mdev, cap) \
- MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
+ MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
#define MLX5_CAP64_FLOWTABLE(mdev, cap) \
- MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
+ MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
- MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap)
+ MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->max, cap)
#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
@@ -1301,11 +1304,11 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
- mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+ mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
- mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+ mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->max, cap)
#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
@@ -1327,31 +1330,31 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_ESW(mdev, cap) \
MLX5_GET(e_switch_cap, \
- mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap)
+ mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap)
#define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET64(flow_table_eswitch_cap, \
- (mdev)->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+ (mdev)->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
#define MLX5_CAP_ESW_MAX(mdev, cap) \
MLX5_GET(e_switch_cap, \
- mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap)
+ mdev->caps.hca[MLX5_CAP_ESWITCH]->max, cap)
#define MLX5_CAP_ODP(mdev, cap)\
- MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap)
+ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
#define MLX5_CAP_ODP_MAX(mdev, cap)\
- MLX5_GET(odp_cap, mdev->caps.hca_max[MLX5_CAP_ODP], cap)
+ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap)
#define MLX5_CAP_VECTOR_CALC(mdev, cap) \
MLX5_GET(vector_calc_cap, \
- mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap)
+ mdev->caps.hca[MLX5_CAP_VECTOR_CALC]->cur, cap)
#define MLX5_CAP_QOS(mdev, cap)\
- MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap)
+ MLX5_GET(qos_cap, mdev->caps.hca[MLX5_CAP_QOS]->cur, cap)
#define MLX5_CAP_DEBUG(mdev, cap)\
- MLX5_GET(debug_cap, mdev->caps.hca_cur[MLX5_CAP_DEBUG], cap)
+ MLX5_GET(debug_cap, mdev->caps.hca[MLX5_CAP_DEBUG]->cur, cap)
#define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
@@ -1387,27 +1390,27 @@ enum mlx5_qcam_feature_groups {
MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
#define MLX5_CAP_DEV_MEM(mdev, cap)\
- MLX5_GET(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
+ MLX5_GET(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
#define MLX5_CAP64_DEV_MEM(mdev, cap)\
- MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
+ MLX5_GET64(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
#define MLX5_CAP_TLS(mdev, cap) \
- MLX5_GET(tls_cap, (mdev)->caps.hca_cur[MLX5_CAP_TLS], cap)
+ MLX5_GET(tls_cap, (mdev)->caps.hca[MLX5_CAP_TLS]->cur, cap)
#define MLX5_CAP_DEV_EVENT(mdev, cap)\
- MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap)
+ MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca[MLX5_CAP_DEV_EVENT]->cur, cap)
#define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\
MLX5_GET(virtio_emulation_cap, \
- (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
+ (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
#define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\
MLX5_GET64(virtio_emulation_cap, \
- (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
+ (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
#define MLX5_CAP_IPSEC(mdev, cap)\
- MLX5_GET(ipsec_cap, (mdev)->caps.hca_cur[MLX5_CAP_IPSEC], cap)
+ MLX5_GET(ipsec_cap, (mdev)->caps.hca[MLX5_CAP_IPSEC]->cur, cap)
enum {
MLX5_CMD_STAT_OK = 0x0,
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 25a8be58d289..e23417424373 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -581,7 +581,7 @@ struct mlx5_priv {
/* end: qp staff */
/* start: alloc staff */
- /* protect buffer alocation according to numa node */
+ /* protect buffer allocation according to numa node */
struct mutex alloc_mutex;
int numa_node;
@@ -623,8 +623,7 @@ struct mlx5_priv {
};
enum mlx5_device_state {
- MLX5_DEVICE_STATE_UNINITIALIZED,
- MLX5_DEVICE_STATE_UP,
+ MLX5_DEVICE_STATE_UP = 1,
MLX5_DEVICE_STATE_INTERNAL_ERROR,
};
@@ -730,6 +729,11 @@ struct mlx5_profile {
} mr_cache[MAX_MR_CACHE_ENTRIES];
};
+struct mlx5_hca_cap {
+ u32 cur[MLX5_UN_SZ_DW(hca_cap_union)];
+ u32 max[MLX5_UN_SZ_DW(hca_cap_union)];
+};
+
struct mlx5_core_dev {
struct device *device;
enum mlx5_coredev_type coredev_type;
@@ -741,8 +745,7 @@ struct mlx5_core_dev {
char board_id[MLX5_BOARD_ID_LEN];
struct mlx5_cmd cmd;
struct {
- u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
- u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+ struct mlx5_hca_cap *hca[MLX5_CAP_NUM];
u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)];
u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
@@ -1110,7 +1113,7 @@ static inline u8 mlx5_mkey_variant(u32 mkey)
}
/* Async-atomic event notifier used by mlx5 core to forward FW
- * evetns recived from event queue to mlx5 consumers.
+ * evetns received from event queue to mlx5 consumers.
* Optimise event queue dipatching.
*/
int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
@@ -1137,6 +1140,8 @@ bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
struct net_device *slave);
@@ -1144,6 +1149,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
u64 *values,
int num_counters,
size_t *offsets);
+struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev);
struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index bc7db2e059eb..4ab5c1fc1270 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -29,11 +29,20 @@ enum {
REP_LOADED,
};
+enum mlx5_switchdev_event {
+ MLX5_SWITCHDEV_EVENT_PAIR,
+ MLX5_SWITCHDEV_EVENT_UNPAIR,
+};
+
struct mlx5_eswitch_rep;
struct mlx5_eswitch_rep_ops {
int (*load)(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep);
void (*unload)(struct mlx5_eswitch_rep *rep);
void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep);
+ int (*event)(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep,
+ enum mlx5_switchdev_event event,
+ void *data);
};
struct mlx5_eswitch_rep_data {
@@ -63,6 +72,7 @@ struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type);
struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
+ struct mlx5_eswitch *from_esw,
struct mlx5_eswitch_rep *rep, u32 sqn);
#ifdef CONFIG_MLX5_ESWITCH
@@ -128,6 +138,7 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev);
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
+struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw);
#else /* CONFIG_MLX5_ESWITCH */
@@ -171,6 +182,11 @@ static inline u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
return 0;
}
+static inline struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw)
+{
+ return NULL;
+}
+
#endif /* CONFIG_MLX5_ESWITCH */
static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev)
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 77746f7e35b8..0106c67e8ccb 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -38,6 +38,8 @@
#define MLX5_FS_DEFAULT_FLOW_TAG 0x0
+#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
+
enum {
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16,
MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index b0009aa3647f..f3638d09ba77 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -865,7 +865,8 @@ struct mlx5_ifc_qos_cap_bits {
u8 nic_bw_share[0x1];
u8 nic_rate_limit[0x1];
u8 packet_pacing_uid[0x1];
- u8 reserved_at_c[0x14];
+ u8 log_esw_max_sched_depth[0x4];
+ u8 reserved_at_10[0x10];
u8 reserved_at_20[0xb];
u8 log_max_qos_nic_queue_group[0x5];
@@ -921,7 +922,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 scatter_fcs[0x1];
u8 enhanced_multi_pkt_send_wqe[0x1];
u8 tunnel_lso_const_out_ip_id[0x1];
- u8 reserved_at_1c[0x2];
+ u8 tunnel_lro_gre[0x1];
+ u8 tunnel_lro_vxlan[0x1];
u8 tunnel_stateless_gre[0x1];
u8 tunnel_stateless_vxlan[0x1];
@@ -1651,7 +1653,13 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 max_geneve_tlv_option_data_len[0x5];
u8 reserved_at_570[0x10];
- u8 reserved_at_580[0x33];
+ u8 reserved_at_580[0xb];
+ u8 log_max_dci_stream_channels[0x5];
+ u8 reserved_at_590[0x3];
+ u8 log_max_dci_errored_streams[0x5];
+ u8 reserved_at_598[0x8];
+
+ u8 reserved_at_5a0[0x13];
u8 log_max_dek[0x5];
u8 reserved_at_5b8[0x4];
u8 mini_cqe_resp_stride_index[0x1];
@@ -3020,10 +3028,12 @@ struct mlx5_ifc_qpc_bits {
u8 reserved_at_3c0[0x8];
u8 next_send_psn[0x18];
- u8 reserved_at_3e0[0x8];
+ u8 reserved_at_3e0[0x3];
+ u8 log_num_dci_stream_channels[0x5];
u8 cqn_snd[0x18];
- u8 reserved_at_400[0x8];
+ u8 reserved_at_400[0x3];
+ u8 log_num_dci_errored_streams[0x5];
u8 deth_sqpn[0x18];
u8 reserved_at_420[0x20];
@@ -3911,7 +3921,7 @@ struct mlx5_ifc_cqc_bits {
u8 status[0x4];
u8 reserved_at_4[0x2];
u8 dbr_umem_valid[0x1];
- u8 apu_thread_cq[0x1];
+ u8 apu_cq[0x1];
u8 cqe_sz[0x3];
u8 cc[0x1];
u8 reserved_at_c[0x1];
@@ -3937,8 +3947,7 @@ struct mlx5_ifc_cqc_bits {
u8 cq_period[0xc];
u8 cq_max_count[0x10];
- u8 reserved_at_a0[0x18];
- u8 c_eqn[0x8];
+ u8 c_eqn_or_apu_element[0x20];
u8 reserved_at_c0[0x3];
u8 log_page_size[0x5];
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 52bbd2b7cb46..7f8ee09c711f 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -103,11 +103,19 @@ struct page {
unsigned long pp_magic;
struct page_pool *pp;
unsigned long _pp_mapping_pad;
- /**
- * @dma_addr: might require a 64-bit value on
- * 32-bit architectures.
- */
- unsigned long dma_addr[2];
+ unsigned long dma_addr;
+ union {
+ /**
+ * dma_addr_upper: might require a 64-bit
+ * value on 32-bit architectures.
+ */
+ unsigned long dma_addr_upper;
+ /**
+ * For frag page support, not supported in
+ * 32-bit architectures with 64-bit DMA.
+ */
+ atomic_long_t pp_frag_count;
+ };
};
struct { /* slab, slob and slub */
union {
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 12036619346c..a85c9f0bd470 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -75,6 +75,7 @@
#define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4
#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf
+#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752 0xaae8
#define SDIO_VENDOR_ID_MARVELL 0x02df
#define SDIO_DEVICE_ID_MARVELL_LIBERTAS 0x9103
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index eaf5bb008aa9..7c41593c1d6a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -47,6 +47,7 @@
#include <uapi/linux/if_bonding.h>
#include <uapi/linux/pkt_cls.h>
#include <linux/hashtable.h>
+#include <linux/rbtree.h>
struct netpoll_info;
struct device;
@@ -208,6 +209,7 @@ struct sk_buff;
struct netdev_hw_addr {
struct list_head list;
+ struct rb_node node;
unsigned char addr[MAX_ADDR_LEN];
unsigned char type;
#define NETDEV_HW_ADDR_T_LAN 1
@@ -224,6 +226,9 @@ struct netdev_hw_addr {
struct netdev_hw_addr_list {
struct list_head list;
int count;
+
+ /* Auxiliary tree for faster lookup on addition and deletion */
+ struct rb_root tree;
};
#define netdev_hw_addr_list_count(l) ((l)->count)
@@ -295,18 +300,6 @@ enum netdev_state_t {
};
-/*
- * This structure holds boot-time configured netdevice settings. They
- * are then used in the device probing.
- */
-struct netdev_boot_setup {
- char name[IFNAMSIZ];
- struct ifmap map;
-};
-#define NETDEV_BOOT_SETUP_MAX 8
-
-int __init netdev_boot_setup(char *str);
-
struct gro_list {
struct list_head list;
int count;
@@ -734,13 +727,13 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
/* This structure contains an instance of an RX queue. */
struct netdev_rx_queue {
+ struct xdp_rxq_info xdp_rxq;
#ifdef CONFIG_RPS
struct rps_map __rcu *rps_map;
struct rps_dev_flow_table __rcu *rps_flow_table;
#endif
struct kobject kobj;
struct net_device *dev;
- struct xdp_rxq_info xdp_rxq;
#ifdef CONFIG_XDP_SOCKETS
struct xsk_buff_pool *pool;
#endif
@@ -1086,9 +1079,18 @@ struct netdev_net_notifier {
* Test if Media Access Control address is valid for the device.
*
* int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
- * Called when a user requests an ioctl which can't be handled by
- * the generic interface code. If not defined ioctls return
- * not supported error code.
+ * Old-style ioctl entry point. This is used internally by the
+ * appletalk and ieee802154 subsystems but is no longer called by
+ * the device ioctl handler.
+ *
+ * int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd);
+ * Used by the bonding driver for its device specific ioctls:
+ * SIOCBONDENSLAVE, SIOCBONDRELEASE, SIOCBONDSETHWADDR, SIOCBONDCHANGEACTIVE,
+ * SIOCBONDSLAVEINFOQUERY, and SIOCBONDINFOQUERY
+ *
+ * * int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
+ * Called for ethernet specific ioctls: SIOCGMIIPHY, SIOCGMIIREG,
+ * SIOCSMIIREG, SIOCSHWTSTAMP and SIOCGHWTSTAMP.
*
* int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
* Used to set network devices bus interface parameters. This interface
@@ -1321,6 +1323,9 @@ struct netdev_net_notifier {
* that got dropped are freed/returned via xdp_return_frame().
* Returns negative number, means general error invoking ndo, meaning
* no frames were xmit'ed and core-caller will free all frames.
+ * struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *dev,
+ * struct xdp_buff *xdp);
+ * Get the xmit slave of master device based on the xdp_buff.
* int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags);
* This function is used to wake up the softirq, ksoftirqd or kthread
* responsible for sending and/or receiving packets on a specific
@@ -1361,6 +1366,15 @@ struct net_device_ops {
int (*ndo_validate_addr)(struct net_device *dev);
int (*ndo_do_ioctl)(struct net_device *dev,
struct ifreq *ifr, int cmd);
+ int (*ndo_eth_ioctl)(struct net_device *dev,
+ struct ifreq *ifr, int cmd);
+ int (*ndo_siocbond)(struct net_device *dev,
+ struct ifreq *ifr, int cmd);
+ int (*ndo_siocwandev)(struct net_device *dev,
+ struct if_settings *ifs);
+ int (*ndo_siocdevprivate)(struct net_device *dev,
+ struct ifreq *ifr,
+ void __user *data, int cmd);
int (*ndo_set_config)(struct net_device *dev,
struct ifmap *map);
int (*ndo_change_mtu)(struct net_device *dev,
@@ -1539,6 +1553,8 @@ struct net_device_ops {
int (*ndo_xdp_xmit)(struct net_device *dev, int n,
struct xdp_frame **xdp,
u32 flags);
+ struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *dev,
+ struct xdp_buff *xdp);
int (*ndo_xsk_wakeup)(struct net_device *dev,
u32 queue_id, u32 flags);
struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev);
@@ -1805,6 +1821,7 @@ enum netdev_ml_priv_type {
* @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network
* device struct
* @mpls_ptr: mpls_dev struct pointer
+ * @mctp_ptr: MCTP specific data
*
* @dev_addr: Hw address (before bcast,
* because most packets are unicast)
@@ -2092,6 +2109,9 @@ struct net_device {
#if IS_ENABLED(CONFIG_MPLS_ROUTING)
struct mpls_dev __rcu *mpls_ptr;
#endif
+#if IS_ENABLED(CONFIG_MCTP)
+ struct mctp_dev __rcu *mctp_ptr;
+#endif
/*
* Cache lines mostly used on receive path (including eth_type_trans())
@@ -2917,7 +2937,6 @@ static inline struct net_device *first_net_device_rcu(struct net *net)
}
int netdev_boot_setup_check(struct net_device *dev);
-unsigned long netdev_boot_base(const char *prefix, int unit);
struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
const char *hwaddr);
struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
@@ -3289,14 +3308,6 @@ static inline bool dev_has_header(const struct net_device *dev)
return dev->header_ops && dev->header_ops->create;
}
-typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr,
- int len, int size);
-int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
-static inline int unregister_gifconf(unsigned int family)
-{
- return register_gifconf(family, NULL);
-}
-
#ifdef CONFIG_NET_FLOW_LIMIT
#define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
struct sd_flow_limit {
@@ -3915,6 +3926,8 @@ static inline int netif_set_real_num_rx_queues(struct net_device *dev,
return 0;
}
#endif
+int netif_set_real_num_queues(struct net_device *dev,
+ unsigned int txq, unsigned int rxq);
static inline struct netdev_rx_queue *
__netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
@@ -3948,7 +3961,7 @@ void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
/*
* It is not allowed to call kfree_skb() or consume_skb() from hardware
* interrupt context or with hardware interrupts being disabled.
- * (in_irq() || irqs_disabled())
+ * (in_hardirq() || irqs_disabled())
*
* We provide four helpers that can be used in following contexts :
*
@@ -3984,6 +3997,8 @@ static inline void dev_consume_skb_any(struct sk_buff *skb)
__dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
}
+u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog);
void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
int netif_rx(struct sk_buff *skb);
@@ -4012,10 +4027,16 @@ int netdev_rx_handler_register(struct net_device *dev,
void netdev_rx_handler_unregister(struct net_device *dev);
bool dev_valid_name(const char *name);
+static inline bool is_socket_ioctl_cmd(unsigned int cmd)
+{
+ return _IOC_TYPE(cmd) == SOCK_IOC_TYPE;
+}
+int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg);
+int put_user_ifreq(struct ifreq *ifr, void __user *arg);
int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
- bool *need_copyout);
-int dev_ifconf(struct net *net, struct ifconf *, int);
-int dev_ethtool(struct net *net, struct ifreq *);
+ void __user *data, bool *need_copyout);
+int dev_ifconf(struct net *net, struct ifconf __user *ifc);
+int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata);
unsigned int dev_get_flags(const struct net_device *);
int __dev_change_flags(struct net_device *dev, unsigned int flags,
struct netlink_ext_ack *extack);
@@ -4069,6 +4090,7 @@ typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
int fd, int expected_fd, u32 flags);
int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+u8 dev_xdp_prog_count(struct net_device *dev);
u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
@@ -4136,11 +4158,13 @@ void netdev_run_todo(void);
*/
static inline void dev_put(struct net_device *dev)
{
+ if (dev) {
#ifdef CONFIG_PCPU_DEV_REFCNT
- this_cpu_dec(*dev->pcpu_refcnt);
+ this_cpu_dec(*dev->pcpu_refcnt);
#else
- refcount_dec(&dev->dev_refcnt);
+ refcount_dec(&dev->dev_refcnt);
#endif
+ }
}
/**
@@ -4151,11 +4175,13 @@ static inline void dev_put(struct net_device *dev)
*/
static inline void dev_hold(struct net_device *dev)
{
+ if (dev) {
#ifdef CONFIG_PCPU_DEV_REFCNT
- this_cpu_inc(*dev->pcpu_refcnt);
+ this_cpu_inc(*dev->pcpu_refcnt);
#else
- refcount_inc(&dev->dev_refcnt);
+ refcount_inc(&dev->dev_refcnt);
#endif
+ }
}
/* Carrier loss detection, dial on demand. The functions netif_carrier_on
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 28d7027cd460..5897f3dbaf7c 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -238,9 +238,6 @@ struct xt_table {
u_int8_t af; /* address/protocol family */
int priority; /* hook order */
- /* called when table is needed in the given netns */
- int (*table_init)(struct net *net);
-
/* A unique name... */
const char name[XT_TABLE_MAXNAMELEN];
};
@@ -452,6 +449,9 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);
+int xt_register_template(const struct xt_table *t, int(*table_init)(struct net *net));
+void xt_unregister_template(const struct xt_table *t);
+
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
#include <net/compat.h>
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index a8178253ce53..10a01978bc0d 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -127,4 +127,6 @@ static inline bool ebt_invalid_target(int target)
return (target < -NUM_STANDARD_TARGETS || target >= 0);
}
+int ebt_register_template(const struct ebt_table *t, int(*table_init)(struct net *net));
+void ebt_unregister_template(const struct ebt_table *t);
#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 540b377ca8f6..947430637cac 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1620,6 +1620,16 @@ static inline bool pci_aer_available(void) { return false; }
bool pci_ats_disabled(void);
+#ifdef CONFIG_PCIE_PTM
+int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
+bool pcie_ptm_enabled(struct pci_dev *dev);
+#else
+static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
+{ return -EINVAL; }
+static inline bool pcie_ptm_enabled(struct pci_dev *dev)
+{ return false; }
+#endif
+
void pci_cfg_access_lock(struct pci_dev *dev);
bool pci_cfg_access_trylock(struct pci_dev *dev);
void pci_cfg_access_unlock(struct pci_dev *dev);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2d510ad750ed..fe156a8170aa 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -762,6 +762,7 @@ struct perf_event {
#ifdef CONFIG_BPF_SYSCALL
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
+ u64 bpf_cookie;
#endif
#ifdef CONFIG_EVENT_TRACING
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 3b80dc3ed68b..736e1d1a47c4 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -1431,6 +1431,7 @@ static inline int phy_device_register(struct phy_device *phy)
static inline void phy_device_free(struct phy_device *phydev) { }
#endif /* CONFIG_PHYLIB */
void phy_device_remove(struct phy_device *phydev);
+int phy_get_c45_ids(struct phy_device *phydev);
int phy_init_hw(struct phy_device *phydev);
int phy_suspend(struct phy_device *phydev);
int phy_resume(struct phy_device *phydev);
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 71fac9237725..2e5565067355 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -215,7 +215,7 @@ static inline long scaled_ppm_to_ppb(long ppm)
return (long)ppb;
}
-#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
/**
* ptp_clock_register() - register a PTP hardware clock driver
@@ -307,6 +307,33 @@ int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay);
*/
void ptp_cancel_worker_sync(struct ptp_clock *ptp);
+#else
+static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
+ struct device *parent)
+{ return NULL; }
+static inline int ptp_clock_unregister(struct ptp_clock *ptp)
+{ return 0; }
+static inline void ptp_clock_event(struct ptp_clock *ptp,
+ struct ptp_clock_event *event)
+{ }
+static inline int ptp_clock_index(struct ptp_clock *ptp)
+{ return -1; }
+static inline int ptp_find_pin(struct ptp_clock *ptp,
+ enum ptp_pin_function func, unsigned int chan)
+{ return -1; }
+static inline int ptp_schedule_worker(struct ptp_clock *ptp,
+ unsigned long delay)
+{ return -EOPNOTSUPP; }
+static inline void ptp_cancel_worker_sync(struct ptp_clock *ptp)
+{ }
+#endif
+
+#if IS_BUILTIN(CONFIG_PTP_1588_CLOCK)
+/*
+ * These are called by the network core, and don't work if PTP is in
+ * a loadable module.
+ */
+
/**
* ptp_get_vclocks_index() - get all vclocks index on pclock, and
* caller is responsible to free memory
@@ -327,26 +354,7 @@ int ptp_get_vclocks_index(int pclock_index, int **vclock_index);
*/
void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps,
int vclock_index);
-
#else
-static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
- struct device *parent)
-{ return NULL; }
-static inline int ptp_clock_unregister(struct ptp_clock *ptp)
-{ return 0; }
-static inline void ptp_clock_event(struct ptp_clock *ptp,
- struct ptp_clock_event *event)
-{ }
-static inline int ptp_clock_index(struct ptp_clock *ptp)
-{ return -1; }
-static inline int ptp_find_pin(struct ptp_clock *ptp,
- enum ptp_pin_function func, unsigned int chan)
-{ return -1; }
-static inline int ptp_schedule_worker(struct ptp_clock *ptp,
- unsigned long delay)
-{ return -EOPNOTSUPP; }
-static inline void ptp_cancel_worker_sync(struct ptp_clock *ptp)
-{ }
static inline int ptp_get_vclocks_index(int pclock_index, int **vclock_index)
{ return 0; }
static inline void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1780260f237b..e12b524426b0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -42,6 +42,7 @@ struct backing_dev_info;
struct bio_list;
struct blk_plug;
struct bpf_local_storage;
+struct bpf_run_ctx;
struct capture_control;
struct cfs_rq;
struct fs_struct;
@@ -1453,6 +1454,8 @@ struct task_struct {
#ifdef CONFIG_BPF_SYSCALL
/* Used by BPF task local storage */
struct bpf_local_storage __rcu *bpf_storage;
+ /* Used for BPF run context */
+ struct bpf_run_ctx *bpf_ctx;
#endif
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index b2db9cd9a73f..6bdb0db3e825 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -689,6 +689,7 @@ typedef unsigned char *sk_buff_data_t;
* CHECKSUM_UNNECESSARY (max 3)
* @dst_pending_confirm: need to confirm neighbour
* @decrypted: Decrypted SKB
+ * @slow_gro: state present at GRO time, slower prepare step required
* @napi_id: id of the NAPI struct this skb came from
* @sender_cpu: (aka @napi_id) source CPU in XPS
* @secmark: security marking
@@ -863,13 +864,14 @@ struct sk_buff {
__u8 tc_skip_classify:1;
__u8 tc_at_ingress:1;
#endif
-#ifdef CONFIG_NET_REDIRECT
__u8 redirected:1;
+#ifdef CONFIG_NET_REDIRECT
__u8 from_ingress:1;
#endif
#ifdef CONFIG_TLS_DEVICE
__u8 decrypted:1;
#endif
+ __u8 slow_gro:1;
#ifdef CONFIG_NET_SCHED
__u16 tc_index; /* traffic control index */
@@ -990,6 +992,7 @@ static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
*/
static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
{
+ skb->slow_gro |= !!dst;
skb->_skb_refdst = (unsigned long)dst;
}
@@ -1006,6 +1009,7 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
{
WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
+ skb->slow_gro |= !!dst;
skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
}
@@ -1179,6 +1183,7 @@ static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
unsigned int headroom);
+struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom);
struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
int newtailroom, gfp_t priority);
int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
@@ -4216,6 +4221,7 @@ static inline unsigned long skb_get_nfct(const struct sk_buff *skb)
static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ skb->slow_gro |= !!nfct;
skb->_nfct = nfct;
#endif
}
@@ -4375,6 +4381,7 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
nf_conntrack_put(skb_nfct(dst));
#endif
+ dst->slow_gro = src->slow_gro;
__nf_copy(dst, src, true);
}
@@ -4664,17 +4671,13 @@ static inline __wsum lco_csum(struct sk_buff *skb)
static inline bool skb_is_redirected(const struct sk_buff *skb)
{
-#ifdef CONFIG_NET_REDIRECT
return skb->redirected;
-#else
- return false;
-#endif
}
static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
{
-#ifdef CONFIG_NET_REDIRECT
skb->redirected = 1;
+#ifdef CONFIG_NET_REDIRECT
skb->from_ingress = from_ingress;
if (skb->from_ingress)
skb->tstamp = 0;
@@ -4683,9 +4686,7 @@ static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
static inline void skb_reset_redirect(struct sk_buff *skb)
{
-#ifdef CONFIG_NET_REDIRECT
skb->redirected = 0;
-#endif
}
static inline bool skb_csum_is_sctp(struct sk_buff *skb)
@@ -4711,11 +4712,9 @@ static inline u64 skb_get_kcov_handle(struct sk_buff *skb)
}
#ifdef CONFIG_PAGE_POOL
-static inline void skb_mark_for_recycle(struct sk_buff *skb, struct page *page,
- struct page_pool *pp)
+static inline void skb_mark_for_recycle(struct sk_buff *skb)
{
skb->pp_recycle = 1;
- page_pool_store_mem_info(page, pp);
}
#endif
diff --git a/include/linux/socket.h b/include/linux/socket.h
index d3c1a42a2edd..041d6032a348 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -223,8 +223,11 @@ struct ucred {
* reuses AF_INET address family
*/
#define AF_XDP 44 /* XDP sockets */
+#define AF_MCTP 45 /* Management component
+ * transport protocol
+ */
-#define AF_MAX 45 /* For now.. */
+#define AF_MAX 46 /* For now.. */
/* Protocol families, same as address families. */
#define PF_UNSPEC AF_UNSPEC
@@ -274,6 +277,7 @@ struct ucred {
#define PF_QIPCRTR AF_QIPCRTR
#define PF_SMC AF_SMC
#define PF_XDP AF_XDP
+#define PF_MCTP AF_MCTP
#define PF_MAX AF_MAX
/* Maximum queue length specifiable by listen. */
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 0d5a2691e7e9..f9b53acb4e02 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -7,7 +7,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/mod_devicetable.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
diff --git a/include/linux/ssb/ssb_driver_extif.h b/include/linux/ssb/ssb_driver_extif.h
index 3f8bc973d67d..19253bfacd1a 100644
--- a/include/linux/ssb/ssb_driver_extif.h
+++ b/include/linux/ssb/ssb_driver_extif.h
@@ -197,7 +197,7 @@ struct ssb_extif {
static inline bool ssb_extif_available(struct ssb_extif *extif)
{
- return 0;
+ return false;
}
static inline
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index ad413b382a3c..8e0631a4b046 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -675,7 +675,7 @@ trace_trigger_soft_disabled(struct trace_event_file *file)
#ifdef CONFIG_BPF_EVENTS
unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
-int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog);
+int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
void perf_event_detach_bpf_prog(struct perf_event *event);
int perf_event_query_prog_array(struct perf_event *event, void __user *info);
int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
@@ -692,7 +692,7 @@ static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *c
}
static inline int
-perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog)
+perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie)
{
return -EOPNOTSUPP;
}
@@ -803,6 +803,9 @@ extern void ftrace_profile_free_filter(struct perf_event *event);
void perf_trace_buf_update(void *record, u16 type);
void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
+int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
+void perf_event_free_bpf_prog(struct perf_event *event);
+
void bpf_trace_run1(struct bpf_prog *prog, u64 arg1);
void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2);
void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2,
diff --git a/include/linux/typecheck.h b/include/linux/typecheck.h
index 20d310331eb5..46b15e2aaefb 100644
--- a/include/linux/typecheck.h
+++ b/include/linux/typecheck.h
@@ -22,4 +22,13 @@
(void)__tmp; \
})
+/*
+ * Check at compile time that something is a pointer type.
+ */
+#define typecheck_pointer(x) \
+({ typeof(x) __dummy; \
+ (void)sizeof(*__dummy); \
+ 1; \
+})
+
#endif /* TYPECHECK_H_INCLUDED */