diff options
Diffstat (limited to 'include/linux')
38 files changed, 804 insertions, 170 deletions
diff --git a/include/linux/acpi_mdio.h b/include/linux/acpi_mdio.h index 0a24ab7cb66f..8e2eefa9fbc0 100644 --- a/include/linux/acpi_mdio.h +++ b/include/linux/acpi_mdio.h @@ -9,7 +9,14 @@ #include <linux/phy.h> #if IS_ENABLED(CONFIG_ACPI_MDIO) -int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode); +int __acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode, + struct module *owner); + +static inline int +acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *handle) +{ + return __acpi_mdiobus_register(mdio, handle, THIS_MODULE); +} #else /* CONFIG_ACPI_MDIO */ static inline int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode) diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h index 77bc5522e61c..4226379a232d 100644 --- a/include/linux/atomic/atomic-arch-fallback.h +++ b/include/linux/atomic/atomic-arch-fallback.h @@ -1208,15 +1208,21 @@ arch_atomic_inc_and_test(atomic_t *v) #define arch_atomic_inc_and_test arch_atomic_inc_and_test #endif +#ifndef arch_atomic_add_negative_relaxed +#ifdef arch_atomic_add_negative +#define arch_atomic_add_negative_acquire arch_atomic_add_negative +#define arch_atomic_add_negative_release arch_atomic_add_negative +#define arch_atomic_add_negative_relaxed arch_atomic_add_negative +#endif /* arch_atomic_add_negative */ + #ifndef arch_atomic_add_negative /** - * arch_atomic_add_negative - add and test if negative + * arch_atomic_add_negative - Add and test if negative * @i: integer value to add * @v: pointer of type atomic_t * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. + * Atomically adds @i to @v and returns true if the result is negative, + * or false when the result is greater than or equal to zero. */ static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v) @@ -1226,6 +1232,95 @@ arch_atomic_add_negative(int i, atomic_t *v) #define arch_atomic_add_negative arch_atomic_add_negative #endif +#ifndef arch_atomic_add_negative_acquire +/** + * arch_atomic_add_negative_acquire - Add and test if negative + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v and returns true if the result is negative, + * or false when the result is greater than or equal to zero. + */ +static __always_inline bool +arch_atomic_add_negative_acquire(int i, atomic_t *v) +{ + return arch_atomic_add_return_acquire(i, v) < 0; +} +#define arch_atomic_add_negative_acquire arch_atomic_add_negative_acquire +#endif + +#ifndef arch_atomic_add_negative_release +/** + * arch_atomic_add_negative_release - Add and test if negative + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v and returns true if the result is negative, + * or false when the result is greater than or equal to zero. + */ +static __always_inline bool +arch_atomic_add_negative_release(int i, atomic_t *v) +{ + return arch_atomic_add_return_release(i, v) < 0; +} +#define arch_atomic_add_negative_release arch_atomic_add_negative_release +#endif + +#ifndef arch_atomic_add_negative_relaxed +/** + * arch_atomic_add_negative_relaxed - Add and test if negative + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v and returns true if the result is negative, + * or false when the result is greater than or equal to zero. + */ +static __always_inline bool +arch_atomic_add_negative_relaxed(int i, atomic_t *v) +{ + return arch_atomic_add_return_relaxed(i, v) < 0; +} +#define arch_atomic_add_negative_relaxed arch_atomic_add_negative_relaxed +#endif + +#else /* arch_atomic_add_negative_relaxed */ + +#ifndef arch_atomic_add_negative_acquire +static __always_inline bool +arch_atomic_add_negative_acquire(int i, atomic_t *v) +{ + bool ret = arch_atomic_add_negative_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_add_negative_acquire arch_atomic_add_negative_acquire +#endif + +#ifndef arch_atomic_add_negative_release +static __always_inline bool +arch_atomic_add_negative_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_add_negative_relaxed(i, v); +} +#define arch_atomic_add_negative_release arch_atomic_add_negative_release +#endif + +#ifndef arch_atomic_add_negative +static __always_inline bool +arch_atomic_add_negative(int i, atomic_t *v) +{ + bool ret; + __atomic_pre_full_fence(); + ret = arch_atomic_add_negative_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_add_negative arch_atomic_add_negative +#endif + +#endif /* arch_atomic_add_negative_relaxed */ + #ifndef arch_atomic_fetch_add_unless /** * arch_atomic_fetch_add_unless - add unless the number is already a given value @@ -2329,15 +2424,21 @@ arch_atomic64_inc_and_test(atomic64_t *v) #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test #endif +#ifndef arch_atomic64_add_negative_relaxed +#ifdef arch_atomic64_add_negative +#define arch_atomic64_add_negative_acquire arch_atomic64_add_negative +#define arch_atomic64_add_negative_release arch_atomic64_add_negative +#define arch_atomic64_add_negative_relaxed arch_atomic64_add_negative +#endif /* arch_atomic64_add_negative */ + #ifndef arch_atomic64_add_negative /** - * arch_atomic64_add_negative - add and test if negative + * arch_atomic64_add_negative - Add and test if negative * @i: integer value to add * @v: pointer of type atomic64_t * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. + * Atomically adds @i to @v and returns true if the result is negative, + * or false when the result is greater than or equal to zero. */ static __always_inline bool arch_atomic64_add_negative(s64 i, atomic64_t *v) @@ -2347,6 +2448,95 @@ arch_atomic64_add_negative(s64 i, atomic64_t *v) #define arch_atomic64_add_negative arch_atomic64_add_negative #endif +#ifndef arch_atomic64_add_negative_acquire +/** + * arch_atomic64_add_negative_acquire - Add and test if negative + * @i: integer value to add + * @v: pointer of type atomic64_t + * + * Atomically adds @i to @v and returns true if the result is negative, + * or false when the result is greater than or equal to zero. + */ +static __always_inline bool +arch_atomic64_add_negative_acquire(s64 i, atomic64_t *v) +{ + return arch_atomic64_add_return_acquire(i, v) < 0; +} +#define arch_atomic64_add_negative_acquire arch_atomic64_add_negative_acquire +#endif + +#ifndef arch_atomic64_add_negative_release +/** + * arch_atomic64_add_negative_release - Add and test if negative + * @i: integer value to add + * @v: pointer of type atomic64_t + * + * Atomically adds @i to @v and returns true if the result is negative, + * or false when the result is greater than or equal to zero. + */ +static __always_inline bool +arch_atomic64_add_negative_release(s64 i, atomic64_t *v) +{ + return arch_atomic64_add_return_release(i, v) < 0; +} +#define arch_atomic64_add_negative_release arch_atomic64_add_negative_release +#endif + +#ifndef arch_atomic64_add_negative_relaxed +/** + * arch_atomic64_add_negative_relaxed - Add and test if negative + * @i: integer value to add + * @v: pointer of type atomic64_t + * + * Atomically adds @i to @v and returns true if the result is negative, + * or false when the result is greater than or equal to zero. + */ +static __always_inline bool +arch_atomic64_add_negative_relaxed(s64 i, atomic64_t *v) +{ + return arch_atomic64_add_return_relaxed(i, v) < 0; +} +#define arch_atomic64_add_negative_relaxed arch_atomic64_add_negative_relaxed +#endif + +#else /* arch_atomic64_add_negative_relaxed */ + +#ifndef arch_atomic64_add_negative_acquire +static __always_inline bool +arch_atomic64_add_negative_acquire(s64 i, atomic64_t *v) +{ + bool ret = arch_atomic64_add_negative_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_add_negative_acquire arch_atomic64_add_negative_acquire +#endif + +#ifndef arch_atomic64_add_negative_release +static __always_inline bool +arch_atomic64_add_negative_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_add_negative_relaxed(i, v); +} +#define arch_atomic64_add_negative_release arch_atomic64_add_negative_release +#endif + +#ifndef arch_atomic64_add_negative +static __always_inline bool +arch_atomic64_add_negative(s64 i, atomic64_t *v) +{ + bool ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_add_negative_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_add_negative arch_atomic64_add_negative +#endif + +#endif /* arch_atomic64_add_negative_relaxed */ + #ifndef arch_atomic64_fetch_add_unless /** * arch_atomic64_fetch_add_unless - add unless the number is already a given value @@ -2456,4 +2646,4 @@ arch_atomic64_dec_if_positive(atomic64_t *v) #endif #endif /* _LINUX_ATOMIC_FALLBACK_H */ -// b5e87bdd5ede61470c29f7a7e4de781af3770f09 +// 00071fffa021cec66f6290d706d69c91df87bade diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h index 7a139ec030b0..0496816738ca 100644 --- a/include/linux/atomic/atomic-instrumented.h +++ b/include/linux/atomic/atomic-instrumented.h @@ -592,6 +592,28 @@ atomic_add_negative(int i, atomic_t *v) return arch_atomic_add_negative(i, v); } +static __always_inline bool +atomic_add_negative_acquire(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_add_negative_acquire(i, v); +} + +static __always_inline bool +atomic_add_negative_release(int i, atomic_t *v) +{ + kcsan_release(); + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_add_negative_release(i, v); +} + +static __always_inline bool +atomic_add_negative_relaxed(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_add_negative_relaxed(i, v); +} + static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { @@ -1211,6 +1233,28 @@ atomic64_add_negative(s64 i, atomic64_t *v) return arch_atomic64_add_negative(i, v); } +static __always_inline bool +atomic64_add_negative_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_add_negative_acquire(i, v); +} + +static __always_inline bool +atomic64_add_negative_release(s64 i, atomic64_t *v) +{ + kcsan_release(); + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_add_negative_release(i, v); +} + +static __always_inline bool +atomic64_add_negative_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_add_negative_relaxed(i, v); +} + static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { @@ -1830,6 +1874,28 @@ atomic_long_add_negative(long i, atomic_long_t *v) return arch_atomic_long_add_negative(i, v); } +static __always_inline bool +atomic_long_add_negative_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_negative_acquire(i, v); +} + +static __always_inline bool +atomic_long_add_negative_release(long i, atomic_long_t *v) +{ + kcsan_release(); + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_negative_release(i, v); +} + +static __always_inline bool +atomic_long_add_negative_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_negative_relaxed(i, v); +} + static __always_inline long atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) { @@ -2083,4 +2149,4 @@ atomic_long_dec_if_positive(atomic_long_t *v) }) #endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ -// 764f741eb77a7ad565dc8d99ce2837d5542e8aee +// 1b485de9cbaa4900de59e14ee2084357eaeb1c3a diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h index 800b8c35992d..2fc51ba66beb 100644 --- a/include/linux/atomic/atomic-long.h +++ b/include/linux/atomic/atomic-long.h @@ -479,6 +479,24 @@ arch_atomic_long_add_negative(long i, atomic_long_t *v) return arch_atomic64_add_negative(i, v); } +static __always_inline bool +arch_atomic_long_add_negative_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_add_negative_acquire(i, v); +} + +static __always_inline bool +arch_atomic_long_add_negative_release(long i, atomic_long_t *v) +{ + return arch_atomic64_add_negative_release(i, v); +} + +static __always_inline bool +arch_atomic_long_add_negative_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_add_negative_relaxed(i, v); +} + static __always_inline long arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) { @@ -973,6 +991,24 @@ arch_atomic_long_add_negative(long i, atomic_long_t *v) return arch_atomic_add_negative(i, v); } +static __always_inline bool +arch_atomic_long_add_negative_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_add_negative_acquire(i, v); +} + +static __always_inline bool +arch_atomic_long_add_negative_release(long i, atomic_long_t *v) +{ + return arch_atomic_add_negative_release(i, v); +} + +static __always_inline bool +arch_atomic_long_add_negative_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_add_negative_relaxed(i, v); +} + static __always_inline long arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) { @@ -1011,4 +1047,4 @@ arch_atomic_long_dec_if_positive(atomic_long_t *v) #endif /* CONFIG_64BIT */ #endif /* _LINUX_ATOMIC_LONG_H */ -// e8f0e08ff072b74d180eabe2ad001282b38c2c88 +// a194c07d7d2f4b0e178d3c118c919775d5d65f50 diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 520b238abd5a..6792a7940e1e 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -161,6 +161,8 @@ struct bpf_map_ops { bpf_callback_t callback_fn, void *callback_ctx, u64 flags); + u64 (*map_mem_usage)(const struct bpf_map *map); + /* BTF id of struct allocated by map_alloc */ int *map_btf_id; @@ -607,11 +609,18 @@ enum bpf_type_flag { */ NON_OWN_REF = BIT(14 + BPF_BASE_TYPE_BITS), + /* DYNPTR points to sk_buff */ + DYNPTR_TYPE_SKB = BIT(15 + BPF_BASE_TYPE_BITS), + + /* DYNPTR points to xdp_buff */ + DYNPTR_TYPE_XDP = BIT(16 + BPF_BASE_TYPE_BITS), + __BPF_TYPE_FLAG_MAX, __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1, }; -#define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF) +#define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF | DYNPTR_TYPE_SKB \ + | DYNPTR_TYPE_XDP) /* Max number of base types. */ #define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS) @@ -1124,6 +1133,37 @@ static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func( return bpf_func(ctx, insnsi); } +/* the implementation of the opaque uapi struct bpf_dynptr */ +struct bpf_dynptr_kern { + void *data; + /* Size represents the number of usable bytes of dynptr data. + * If for example the offset is at 4 for a local dynptr whose data is + * of type u64, the number of usable bytes is 4. + * + * The upper 8 bits are reserved. It is as follows: + * Bits 0 - 23 = size + * Bits 24 - 30 = dynptr type + * Bit 31 = whether dynptr is read-only + */ + u32 size; + u32 offset; +} __aligned(8); + +enum bpf_dynptr_type { + BPF_DYNPTR_TYPE_INVALID, + /* Points to memory that is local to the bpf program */ + BPF_DYNPTR_TYPE_LOCAL, + /* Underlying data is a ringbuf record */ + BPF_DYNPTR_TYPE_RINGBUF, + /* Underlying data is a sk_buff */ + BPF_DYNPTR_TYPE_SKB, + /* Underlying data is a xdp_buff */ + BPF_DYNPTR_TYPE_XDP, +}; + +int bpf_dynptr_check_size(u32 size); +u32 bpf_dynptr_get_size(const struct bpf_dynptr_kern *ptr); + #ifdef CONFIG_BPF_JIT int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr); int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr); @@ -2241,7 +2281,7 @@ struct bpf_core_ctx { bool btf_nested_type_is_trusted(struct bpf_verifier_log *log, const struct bpf_reg_state *reg, - int off); + int off, const char *suffix); bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log, const struct btf *reg_btf, u32 reg_id, @@ -2266,6 +2306,11 @@ static inline bool has_current_bpf_ctx(void) } void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog); + +void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, + enum bpf_dynptr_type type, u32 offset, u32 size); +void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr); +void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr); #else /* !CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get(u32 ufd) { @@ -2495,6 +2540,19 @@ static inline void bpf_prog_inc_misses_counter(struct bpf_prog *prog) static inline void bpf_cgrp_storage_free(struct cgroup *cgroup) { } + +static inline void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, + enum bpf_dynptr_type type, u32 offset, u32 size) +{ +} + +static inline void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr) +{ +} + +static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr) +{ +} #endif /* CONFIG_BPF_SYSCALL */ void __bpf_free_used_btfs(struct bpf_prog_aux *aux, @@ -2566,6 +2624,7 @@ static inline bool bpf_map_is_offloaded(struct bpf_map *map) struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); void bpf_map_offload_map_free(struct bpf_map *map); +u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map); int bpf_prog_test_run_syscall(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); @@ -2637,6 +2696,11 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map) { } +static inline u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map) +{ + return 0; +} + static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr) @@ -2801,6 +2865,8 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size); +int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags, + struct bpf_dynptr_kern *ptr); #else static inline bool bpf_sock_common_is_valid_access(int off, int size, enum bpf_access_type type, @@ -2822,6 +2888,11 @@ static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, { return 0; } +static inline int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags, + struct bpf_dynptr_kern *ptr) +{ + return -EOPNOTSUPP; +} #endif #ifdef CONFIG_INET @@ -2913,36 +2984,6 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, u32 num_args, struct bpf_bprintf_data *data); void bpf_bprintf_cleanup(struct bpf_bprintf_data *data); -/* the implementation of the opaque uapi struct bpf_dynptr */ -struct bpf_dynptr_kern { - void *data; - /* Size represents the number of usable bytes of dynptr data. - * If for example the offset is at 4 for a local dynptr whose data is - * of type u64, the number of usable bytes is 4. - * - * The upper 8 bits are reserved. It is as follows: - * Bits 0 - 23 = size - * Bits 24 - 30 = dynptr type - * Bit 31 = whether dynptr is read-only - */ - u32 size; - u32 offset; -} __aligned(8); - -enum bpf_dynptr_type { - BPF_DYNPTR_TYPE_INVALID, - /* Points to memory that is local to the bpf program */ - BPF_DYNPTR_TYPE_LOCAL, - /* Underlying data is a kernel-produced ringbuf record */ - BPF_DYNPTR_TYPE_RINGBUF, -}; - -void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, - enum bpf_dynptr_type type, u32 offset, u32 size); -void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr); -int bpf_dynptr_check_size(u32 size); -u32 bpf_dynptr_get_size(const struct bpf_dynptr_kern *ptr); - #ifdef CONFIG_BPF_LSM void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype); void bpf_cgroup_atype_put(int cgroup_atype); diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h index 6d37a40cd90e..d934248b8e81 100644 --- a/include/linux/bpf_local_storage.h +++ b/include/linux/bpf_local_storage.h @@ -164,5 +164,6 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, void *value, u64 map_flags, gfp_t gfp_flags); void bpf_local_storage_free_rcu(struct rcu_head *rcu); +u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map); #endif /* _BPF_LOCAL_STORAGE_H */ diff --git a/include/linux/bpf_mem_alloc.h b/include/linux/bpf_mem_alloc.h index 3e164b8efaa9..a7104af61ab4 100644 --- a/include/linux/bpf_mem_alloc.h +++ b/include/linux/bpf_mem_alloc.h @@ -14,6 +14,13 @@ struct bpf_mem_alloc { struct work_struct work; }; +/* 'size != 0' is for bpf_mem_alloc which manages fixed-size objects. + * Alloc and free are done with bpf_mem_cache_{alloc,free}(). + * + * 'size = 0' is for bpf_mem_alloc which manages many fixed-size objects. + * Alloc and free are done with bpf_mem_{alloc,free}() and the size of + * the returned object is given by the size argument of bpf_mem_alloc(). + */ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu); void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma); diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index cf1bb1cf4a7b..18538bad2b8c 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -537,7 +537,6 @@ struct bpf_verifier_env { bool bypass_spec_v1; bool bypass_spec_v4; bool seen_direct_write; - bool rcu_tag_supported; struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ const struct bpf_line_info *prev_linfo; struct bpf_verifier_log log; @@ -616,9 +615,6 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env, enum bpf_arg_type arg_type); int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, u32 regno, u32 mem_size); -struct bpf_call_arg_meta; -int process_dynptr_func(struct bpf_verifier_env *env, int regno, - enum bpf_arg_type arg_type, struct bpf_call_arg_meta *meta); /* this lives here instead of in bpf.h because it needs to dereference tgt_prog */ static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog, diff --git a/include/linux/btf.h b/include/linux/btf.h index 49e0fe6d8274..556b3e2e7471 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -70,7 +70,7 @@ #define KF_TRUSTED_ARGS (1 << 4) /* kfunc only takes trusted pointer arguments */ #define KF_SLEEPABLE (1 << 5) /* kfunc may sleep */ #define KF_DESTRUCTIVE (1 << 6) /* kfunc performs destructive actions */ -#define KF_RCU (1 << 7) /* kfunc only takes rcu pointer arguments */ +#define KF_RCU (1 << 7) /* kfunc takes either rcu or trusted pointer arguments */ /* * Tag marking a kernel function as a kfunc. This is meant to minimize the diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index 3a4f7cd882ca..00950cc03bff 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -204,7 +204,7 @@ extern struct btf_id_set8 name; #else -#define BTF_ID_LIST(name) static u32 __maybe_unused name[16]; +#define BTF_ID_LIST(name) static u32 __maybe_unused name[64]; #define BTF_ID(prefix, name) #define BTF_ID_FLAGS(prefix, name, ...) #define BTF_ID_UNUSED diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 07e547c02fd8..325af611909f 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -305,10 +305,8 @@ struct dccp_sock { struct timer_list dccps_xmit_timer; }; -static inline struct dccp_sock *dccp_sk(const struct sock *sk) -{ - return (struct dccp_sock *)sk; -} +#define dccp_sk(ptr) container_of_const(ptr, struct dccp_sock, \ + dccps_inet_connection.icsk_inet.sk) static inline const char *dccp_role(const struct sock *sk) { diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 2792185dda22..798d35890118 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -75,6 +75,8 @@ enum { * @tx_push: The flag of tx push mode * @rx_push: The flag of rx push mode * @cqe_size: Size of TX/RX completion queue event + * @tx_push_buf_len: Size of TX push buffer + * @tx_push_buf_max_len: Maximum allowed size of TX push buffer */ struct kernel_ethtool_ringparam { u32 rx_buf_len; @@ -82,6 +84,8 @@ struct kernel_ethtool_ringparam { u8 tx_push; u8 rx_push; u32 cqe_size; + u32 tx_push_buf_len; + u32 tx_push_buf_max_len; }; /** @@ -90,12 +94,14 @@ struct kernel_ethtool_ringparam { * @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size * @ETHTOOL_RING_USE_TX_PUSH: capture for setting tx_push * @ETHTOOL_RING_USE_RX_PUSH: capture for setting rx_push + * @ETHTOOL_RING_USE_TX_PUSH_BUF_LEN: capture for setting tx_push_buf_len */ enum ethtool_supported_ring_param { - ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0), - ETHTOOL_RING_USE_CQE_SIZE = BIT(1), - ETHTOOL_RING_USE_TX_PUSH = BIT(2), - ETHTOOL_RING_USE_RX_PUSH = BIT(3), + ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0), + ETHTOOL_RING_USE_CQE_SIZE = BIT(1), + ETHTOOL_RING_USE_TX_PUSH = BIT(2), + ETHTOOL_RING_USE_RX_PUSH = BIT(3), + ETHTOOL_RING_USE_TX_PUSH_BUF_LEN = BIT(4), }; #define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit)) diff --git a/include/linux/filter.h b/include/linux/filter.h index 1727898f1641..efa5d4a1677e 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1542,4 +1542,50 @@ static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u64 index return XDP_REDIRECT; } +#ifdef CONFIG_NET +int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len); +int __bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, + u32 len, u64 flags); +int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len); +int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len); +void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len); +void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, + void *buf, unsigned long len, bool flush); +#else /* CONFIG_NET */ +static inline int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, + void *to, u32 len) +{ + return -EOPNOTSUPP; +} + +static inline int __bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, + const void *from, u32 len, u64 flags) +{ + return -EOPNOTSUPP; +} + +static inline int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset, + void *buf, u32 len) +{ + return -EOPNOTSUPP; +} + +static inline int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset, + void *buf, u32 len) +{ + return -EOPNOTSUPP; +} + +static inline void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len) +{ + return NULL; +} + +static inline void *bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, void *buf, + unsigned long len, bool flush) +{ + return NULL; +} +#endif /* CONFIG_NET */ + #endif /* __LINUX_FILTER_H__ */ diff --git a/include/linux/igmp.h b/include/linux/igmp.h index b19d3284551f..ebf4349a53af 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -122,7 +122,7 @@ extern int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, sockptr_t optval, sockptr_t optlen); extern int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, sockptr_t optval, size_t offset); -extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt, +extern int ip_mc_sf_allow(const struct sock *sk, __be32 local, __be32 rmt, int dif, int sdif); extern void ip_mc_init_dev(struct in_device *); extern void ip_mc_destroy_dev(struct in_device *); diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 37dfdcfcdd54..839247a4f48e 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -336,10 +336,7 @@ static inline struct ipv6_pinfo *inet6_sk(const struct sock *__sk) return sk_fullsock(__sk) ? inet_sk(__sk)->pinet6 : NULL; } -static inline struct raw6_sock *raw6_sk(const struct sock *sk) -{ - return (struct raw6_sock *)sk; -} +#define raw6_sk(ptr) container_of_const(ptr, struct raw6_sock, inet.sk) #define ipv6_only_sock(sk) (sk->sk_ipv6only) #define ipv6_sk_rxinfo(sk) ((sk)->sk_family == PF_INET6 && \ diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h index 9a6b55da8fd6..72831e35dca3 100644 --- a/include/linux/lockd/xdr4.h +++ b/include/linux/lockd/xdr4.h @@ -22,6 +22,7 @@ #define nlm4_fbig cpu_to_be32(NLM_FBIG) #define nlm4_failed cpu_to_be32(NLM_FAILED) +void nlm4svc_set_file_lock_range(struct file_lock *fl, u64 off, u64 len); bool nlm4svc_decode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr); bool nlm4svc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr); bool nlm4svc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 71b06ebad402..af4dd536a52c 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1357,6 +1357,12 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap) +#define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2(mdev, cap) \ + MLX5_CAP_ESW_FLOWTABLE(mdev, ft_field_support_2_esw_fdb.cap) + +#define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2_MAX(mdev, cap) \ + MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, ft_field_support_2_esw_fdb.cap) + #define MLX5_CAP_ESW(mdev, cap) \ MLX5_GET(e_switch_cap, \ mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index df0f82110249..f243bd10a5e1 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -134,6 +134,7 @@ enum { MLX5_REG_PCAM = 0x507f, MLX5_REG_NODE_DESC = 0x6001, MLX5_REG_HOST_ENDIANNESS = 0x7004, + MLX5_REG_MTMP = 0x900A, MLX5_REG_MCIA = 0x9014, MLX5_REG_MFRL = 0x9028, MLX5_REG_MLCR = 0x902b, @@ -731,6 +732,7 @@ struct mlx5_fw_tracer; struct mlx5_vxlan; struct mlx5_geneve; struct mlx5_hv_vhca; +struct mlx5_thermal; #define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity)) #define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) @@ -808,6 +810,7 @@ struct mlx5_core_dev { struct mlx5_rsc_dump *rsc_dump; u32 vsc_addr; struct mlx5_hv_vhca *hv_vhca; + struct mlx5_thermal *thermal; }; struct mlx5_db { diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 66d76e97a087..e47d6c58da35 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -404,10 +404,13 @@ struct mlx5_ifc_flow_table_fields_supported_bits { u8 metadata_reg_c_0[0x1]; }; +/* Table 2170 - Flow Table Fields Supported 2 Format */ struct mlx5_ifc_flow_table_fields_supported_2_bits { u8 reserved_at_0[0xe]; u8 bth_opcode[0x1]; - u8 reserved_at_f[0x11]; + u8 reserved_at_f[0x1]; + u8 tunnel_header_0_1[0x1]; + u8 reserved_at_11[0xf]; u8 reserved_at_20[0x60]; }; @@ -895,7 +898,13 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits { struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress; - u8 reserved_at_800[0x1000]; + u8 reserved_at_800[0xC00]; + + struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_esw_fdb; + + struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_bitmask_support_2_esw_fdb; + + u8 reserved_at_1500[0x300]; u8 sw_steering_fdb_action_drop_icm_address_rx[0x40]; @@ -10869,6 +10878,31 @@ struct mlx5_ifc_mrtc_reg_bits { u8 time_l[0x20]; }; +struct mlx5_ifc_mtmp_reg_bits { + u8 reserved_at_0[0x14]; + u8 sensor_index[0xc]; + + u8 reserved_at_20[0x10]; + u8 temperature[0x10]; + + u8 mte[0x1]; + u8 mtr[0x1]; + u8 reserved_at_42[0xe]; + u8 max_temperature[0x10]; + + u8 tee[0x2]; + u8 reserved_at_62[0xe]; + u8 temp_threshold_hi[0x10]; + + u8 reserved_at_80[0x10]; + u8 temp_threshold_lo[0x10]; + + u8 reserved_at_a0[0x20]; + + u8 sensor_name_hi[0x20]; + u8 sensor_name_lo[0x20]; +}; + union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_bufferx_reg_bits bufferx_reg; struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; @@ -10931,6 +10965,7 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_mfrl_reg_bits mfrl_reg; struct mlx5_ifc_mtutc_reg_bits mtutc_reg; struct mlx5_ifc_mrtc_reg_bits mrtc_reg; + struct mlx5_ifc_mtmp_reg_bits mtmp_reg; u8 reserved_at_0[0x60e0]; }; diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index e96ee1e348cb..98b2e1e149f9 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -141,6 +141,12 @@ enum mlx5_ptys_width { MLX5_PTYS_WIDTH_12X = 1 << 4, }; +struct mlx5_port_eth_proto { + u32 cap; + u32 admin; + u32 oper; +}; + #define MLX5E_PROT_MASK(link_mode) (1U << link_mode) #define MLX5_GET_ETH_PROTO(reg, out, ext, field) \ (ext ? MLX5_GET(reg, out, ext_##field) : \ @@ -218,4 +224,14 @@ int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state); int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state); int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio); int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio); + +int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext, + struct mlx5_port_eth_proto *eproto); +bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev); +u32 mlx5_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper, + bool force_legacy); +u32 mlx5_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed, + bool force_legacy); +int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); + #endif /* __MLX5_PORT_H__ */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 470085b121d3..18a5be6ddd0f 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -52,6 +52,7 @@ #include <linux/rbtree.h> #include <net/net_trackers.h> #include <net/net_debug.h> +#include <net/dropreason.h> struct netpoll_info; struct device; @@ -508,15 +509,18 @@ static inline bool napi_reschedule(struct napi_struct *napi) return false; } -bool napi_complete_done(struct napi_struct *n, int work_done); /** - * napi_complete - NAPI processing complete - * @n: NAPI context + * napi_complete_done - NAPI processing complete + * @n: NAPI context + * @work_done: number of packets processed * - * Mark NAPI processing as complete. - * Consider using napi_complete_done() instead. + * Mark NAPI processing as complete. Should only be called if poll budget + * has not been completely consumed. + * Prefer over napi_complete(). * Return false if device should avoid rearming interrupts. */ +bool napi_complete_done(struct napi_struct *n, int work_done); + static inline bool napi_complete(struct napi_struct *n) { return napi_complete_done(n, 0); @@ -1308,6 +1312,17 @@ struct netdev_net_notifier { * Used to add FDB entries to dump requests. Implementers should add * entries to skb and update idx with the number of entries. * + * int (*ndo_mdb_add)(struct net_device *dev, struct nlattr *tb[], + * u16 nlmsg_flags, struct netlink_ext_ack *extack); + * Adds an MDB entry to dev. + * int (*ndo_mdb_del)(struct net_device *dev, struct nlattr *tb[], + * struct netlink_ext_ack *extack); + * Deletes the MDB entry from dev. + * int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb, + * struct netlink_callback *cb); + * Dumps MDB entries from dev. The first argument (marker) in the netlink + * callback is used by core rtnetlink code. + * * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, * u16 flags, struct netlink_ext_ack *extack) * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, @@ -1570,6 +1585,16 @@ struct net_device_ops { const unsigned char *addr, u16 vid, u32 portid, u32 seq, struct netlink_ext_ack *extack); + int (*ndo_mdb_add)(struct net_device *dev, + struct nlattr *tb[], + u16 nlmsg_flags, + struct netlink_ext_ack *extack); + int (*ndo_mdb_del)(struct net_device *dev, + struct nlattr *tb[], + struct netlink_ext_ack *extack); + int (*ndo_mdb_dump)(struct net_device *dev, + struct sk_buff *skb, + struct netlink_callback *cb); int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, u16 flags, @@ -2462,6 +2487,7 @@ static inline struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, unsigned int index) { + DEBUG_NET_WARN_ON_ONCE(index >= dev->num_tx_queues); return &dev->_tx[index]; } @@ -3806,13 +3832,8 @@ static inline unsigned int get_netdev_rx_queue_index( int netif_get_num_default_rss_queues(void); -enum skb_free_reason { - SKB_REASON_CONSUMED, - SKB_REASON_DROPPED, -}; - -void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason); -void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason); +void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason); +void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason); /* * It is not allowed to call kfree_skb() or consume_skb() from hardware @@ -3835,22 +3856,22 @@ void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason); */ static inline void dev_kfree_skb_irq(struct sk_buff *skb) { - __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED); + dev_kfree_skb_irq_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED); } static inline void dev_consume_skb_irq(struct sk_buff *skb) { - __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED); + dev_kfree_skb_irq_reason(skb, SKB_CONSUMED); } static inline void dev_kfree_skb_any(struct sk_buff *skb) { - __dev_kfree_skb_any(skb, SKB_REASON_DROPPED); + dev_kfree_skb_any_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED); } static inline void dev_consume_skb_any(struct sk_buff *skb) { - __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED); + dev_kfree_skb_any_reason(skb, SKB_CONSUMED); } u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index 48314ade1506..7834c0be2831 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -197,6 +197,8 @@ static inline int nf_cookie_v6_check(const struct ipv6hdr *iph, __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol); +int nf_ip6_check_hbh_len(struct sk_buff *skb, u32 *plen); + int ipv6_netfilter_init(void); void ipv6_netfilter_fini(void); diff --git a/include/linux/netlink.h b/include/linux/netlink.h index c43ac7690eca..19c0791ed9d5 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -50,7 +50,6 @@ struct netlink_kernel_cfg { struct mutex *cb_mutex; int (*bind)(struct net *net, int group); void (*unbind)(struct net *net, int group); - bool (*compare)(struct net *net, struct sock *sk); }; struct sock *__netlink_kernel_create(struct net *net, int unit, @@ -162,9 +161,31 @@ struct netlink_ext_ack { } \ } while (0) +#define NL_SET_ERR_MSG_ATTR_POL_FMT(extack, attr, pol, fmt, args...) do { \ + struct netlink_ext_ack *__extack = (extack); \ + \ + if (!__extack) \ + break; \ + \ + if (snprintf(__extack->_msg_buf, NETLINK_MAX_FMTMSG_LEN, \ + "%s" fmt "%s", "", ##args, "") >= \ + NETLINK_MAX_FMTMSG_LEN) \ + net_warn_ratelimited("%s" fmt "%s", "truncated extack: ", \ + ##args, "\n"); \ + \ + do_trace_netlink_extack(__extack->_msg_buf); \ + \ + __extack->_msg = __extack->_msg_buf; \ + __extack->bad_attr = (attr); \ + __extack->policy = (pol); \ +} while (0) + #define NL_SET_ERR_MSG_ATTR(extack, attr, msg) \ NL_SET_ERR_MSG_ATTR_POL(extack, attr, NULL, msg) +#define NL_SET_ERR_MSG_ATTR_FMT(extack, attr, msg, args...) \ + NL_SET_ERR_MSG_ATTR_POL_FMT(extack, attr, NULL, msg, ##args) + #define NL_SET_ERR_ATTR_MISS(extack, nest, type) do { \ struct netlink_ext_ack *__extack = (extack); \ \ diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index da633d34ab86..8a52ef2e6fa6 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -14,9 +14,25 @@ #if IS_ENABLED(CONFIG_OF_MDIO) bool of_mdiobus_child_is_phy(struct device_node *child); -int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np); -int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio, - struct device_node *np); +int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np, + struct module *owner); + +static inline int of_mdiobus_register(struct mii_bus *mdio, + struct device_node *np) +{ + return __of_mdiobus_register(mdio, np, THIS_MODULE); +} + +int __devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio, + struct device_node *np, struct module *owner); + +static inline int devm_of_mdiobus_register(struct device *dev, + struct mii_bus *mdio, + struct device_node *np) +{ + return __devm_of_mdiobus_register(dev, mdio, np, THIS_MODULE); +} + struct mdio_device *of_mdio_find_device(struct device_node *np); struct phy_device *of_phy_find_device(struct device_node *phy_np); struct phy_device * diff --git a/include/linux/pcs/pcs-mtk-lynxi.h b/include/linux/pcs/pcs-mtk-lynxi.h new file mode 100644 index 000000000000..be3b4ab32f4a --- /dev/null +++ b/include/linux/pcs/pcs-mtk-lynxi.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_PCS_MTK_LYNXI_H +#define __LINUX_PCS_MTK_LYNXI_H + +#include <linux/phylink.h> +#include <linux/regmap.h> + +#define MTK_SGMII_FLAG_PN_SWAP BIT(0) +struct phylink_pcs *mtk_pcs_lynxi_create(struct device *dev, + struct regmap *regmap, + u32 ana_rgc3, u32 flags); +void mtk_pcs_lynxi_destroy(struct phylink_pcs *pcs); +#endif diff --git a/include/linux/phy.h b/include/linux/phy.h index 36bf0bbc8efa..fefd5091bc24 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1130,16 +1130,15 @@ static inline int phy_read(struct phy_device *phydev, u32 regnum) #define phy_read_poll_timeout(phydev, regnum, val, cond, sleep_us, \ timeout_us, sleep_before_read) \ ({ \ - int __ret = read_poll_timeout(phy_read, val, (cond) || val < 0, \ + int __ret = read_poll_timeout(phy_read, val, val < 0 || (cond), \ sleep_us, timeout_us, sleep_before_read, phydev, regnum); \ - if (val < 0) \ + if (val < 0) \ __ret = val; \ if (__ret) \ phydev_err(phydev, "%s failed: %d\n", __func__, __ret); \ __ret; \ }) - /** * __phy_read - convenience function for reading a given PHY register * @phydev: the phy_device struct diff --git a/include/linux/phylink.h b/include/linux/phylink.h index c492c26202b5..9ff56b050584 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -93,7 +93,6 @@ static inline bool phylink_autoneg_inband(unsigned int mode) * the medium link mode (@speed and @duplex) and the speed/duplex of the phy * interface mode (@interface) are different. * @link: true if the link is up. - * @an_enabled: true if autonegotiation is enabled/desired. * @an_complete: true if autonegotiation has completed. */ struct phylink_link_state { @@ -105,7 +104,6 @@ struct phylink_link_state { int pause; int rate_matching; unsigned int link:1; - unsigned int an_enabled:1; unsigned int an_complete:1; }; diff --git a/include/linux/platform_data/nfcmrvl.h b/include/linux/platform_data/nfcmrvl.h deleted file mode 100644 index 9e75ac8d19be..000000000000 --- a/include/linux/platform_data/nfcmrvl.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (C) 2015, Marvell International Ltd. - * - * This software file (the "File") is distributed by Marvell International - * Ltd. under the terms of the GNU General Public License Version 2, June 1991 - * (the "License"). You may use, redistribute and/or modify this File in - * accordance with the terms and conditions of the License, a copy of which - * is available on the worldwide web at - * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. - * - * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE - * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE - * ARE EXPRESSLY DISCLAIMED. The License provides additional details about - * this warranty disclaimer. - */ - -#ifndef _NFCMRVL_PTF_H_ -#define _NFCMRVL_PTF_H_ - -struct nfcmrvl_platform_data { - /* - * Generic - */ - - /* GPIO that is wired to RESET_N signal */ - int reset_n_io; - /* Tell if transport is muxed in HCI one */ - unsigned int hci_muxed; - - /* - * UART specific - */ - - /* Tell if UART needs flow control at init */ - unsigned int flow_control; - /* Tell if firmware supports break control for power management */ - unsigned int break_control; - - - /* - * I2C specific - */ - - unsigned int irq; - unsigned int irq_polarity; -}; - -#endif /* _NFCMRVL_PTF_H_ */ diff --git a/include/linux/ptp_kvm.h b/include/linux/ptp_kvm.h index c2e28deef33a..746fd67c3480 100644 --- a/include/linux/ptp_kvm.h +++ b/include/linux/ptp_kvm.h @@ -14,6 +14,7 @@ struct timespec64; struct clocksource; int kvm_arch_ptp_init(void); +void kvm_arch_ptp_exit(void); int kvm_arch_ptp_get_clock(struct timespec64 *ts); int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec, struct clocksource **cs); diff --git a/include/linux/rcuref.h b/include/linux/rcuref.h new file mode 100644 index 000000000000..2c8bfd0f1b6b --- /dev/null +++ b/include/linux/rcuref.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _LINUX_RCUREF_H +#define _LINUX_RCUREF_H + +#include <linux/atomic.h> +#include <linux/bug.h> +#include <linux/limits.h> +#include <linux/lockdep.h> +#include <linux/preempt.h> +#include <linux/rcupdate.h> + +#define RCUREF_ONEREF 0x00000000U +#define RCUREF_MAXREF 0x7FFFFFFFU +#define RCUREF_SATURATED 0xA0000000U +#define RCUREF_RELEASED 0xC0000000U +#define RCUREF_DEAD 0xE0000000U +#define RCUREF_NOREF 0xFFFFFFFFU + +/** + * rcuref_init - Initialize a rcuref reference count with the given reference count + * @ref: Pointer to the reference count + * @cnt: The initial reference count typically '1' + */ +static inline void rcuref_init(rcuref_t *ref, unsigned int cnt) +{ + atomic_set(&ref->refcnt, cnt - 1); +} + +/** + * rcuref_read - Read the number of held reference counts of a rcuref + * @ref: Pointer to the reference count + * + * Return: The number of held references (0 ... N) + */ +static inline unsigned int rcuref_read(rcuref_t *ref) +{ + unsigned int c = atomic_read(&ref->refcnt); + + /* Return 0 if within the DEAD zone. */ + return c >= RCUREF_RELEASED ? 0 : c + 1; +} + +extern __must_check bool rcuref_get_slowpath(rcuref_t *ref); + +/** + * rcuref_get - Acquire one reference on a rcuref reference count + * @ref: Pointer to the reference count + * + * Similar to atomic_inc_not_zero() but saturates at RCUREF_MAXREF. + * + * Provides no memory ordering, it is assumed the caller has guaranteed the + * object memory to be stable (RCU, etc.). It does provide a control dependency + * and thereby orders future stores. See documentation in lib/rcuref.c + * + * Return: + * False if the attempt to acquire a reference failed. This happens + * when the last reference has been put already + * + * True if a reference was successfully acquired + */ +static inline __must_check bool rcuref_get(rcuref_t *ref) +{ + /* + * Unconditionally increase the reference count. The saturation and + * dead zones provide enough tolerance for this. + */ + if (likely(!atomic_add_negative_relaxed(1, &ref->refcnt))) + return true; + + /* Handle the cases inside the saturation and dead zones */ + return rcuref_get_slowpath(ref); +} + +extern __must_check bool rcuref_put_slowpath(rcuref_t *ref); + +/* + * Internal helper. Do not invoke directly. + */ +static __always_inline __must_check bool __rcuref_put(rcuref_t *ref) +{ + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && preemptible(), + "suspicious rcuref_put_rcusafe() usage"); + /* + * Unconditionally decrease the reference count. The saturation and + * dead zones provide enough tolerance for this. + */ + if (likely(!atomic_add_negative_release(-1, &ref->refcnt))) + return false; + + /* + * Handle the last reference drop and cases inside the saturation + * and dead zones. + */ + return rcuref_put_slowpath(ref); +} + +/** + * rcuref_put_rcusafe -- Release one reference for a rcuref reference count RCU safe + * @ref: Pointer to the reference count + * + * Provides release memory ordering, such that prior loads and stores are done + * before, and provides an acquire ordering on success such that free() + * must come after. + * + * Can be invoked from contexts, which guarantee that no grace period can + * happen which would free the object concurrently if the decrement drops + * the last reference and the slowpath races against a concurrent get() and + * put() pair. rcu_read_lock()'ed and atomic contexts qualify. + * + * Return: + * True if this was the last reference with no future references + * possible. This signals the caller that it can safely release the + * object which is protected by the reference counter. + * + * False if there are still active references or the put() raced + * with a concurrent get()/put() pair. Caller is not allowed to + * release the protected object. + */ +static inline __must_check bool rcuref_put_rcusafe(rcuref_t *ref) +{ + return __rcuref_put(ref); +} + +/** + * rcuref_put -- Release one reference for a rcuref reference count + * @ref: Pointer to the reference count + * + * Can be invoked from any context. + * + * Provides release memory ordering, such that prior loads and stores are done + * before, and provides an acquire ordering on success such that free() + * must come after. + * + * Return: + * + * True if this was the last reference with no future references + * possible. This signals the caller that it can safely schedule the + * object, which is protected by the reference counter, for + * deconstruction. + * + * False if there are still active references or the put() raced + * with a concurrent get()/put() pair. Caller is not allowed to + * deconstruct the protected object. + */ +static inline __must_check bool rcuref_put(rcuref_t *ref) +{ + bool released; + + preempt_disable(); + released = __rcuref_put(ref); + preempt_enable(); + return released; +} + +#endif diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 92ad75549e9c..f0c87baaf6c0 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -62,16 +62,6 @@ static inline bool lockdep_rtnl_is_held(void) rcu_dereference_check(p, lockdep_rtnl_is_held()) /** - * rcu_dereference_bh_rtnl - rcu_dereference_bh with debug checking - * @p: The pointer to read, prior to dereference - * - * Do an rcu_dereference_bh(p), but check caller either holds rcu_read_lock_bh() - * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference_bh() - */ -#define rcu_dereference_bh_rtnl(p) \ - rcu_dereference_bh_check(p, lockdep_rtnl_is_held()) - -/** * rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL * @p: The pointer to read, prior to dereferencing * diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index ff7ad331fb82..82511b2f61ea 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -345,18 +345,12 @@ struct sk_buff_head { struct sk_buff; -/* To allow 64K frame to be packed as single skb without frag_list we - * require 64K/PAGE_SIZE pages plus 1 additional page to allow for - * buffers which do not start on a page boundary. - * - * Since GRO uses frags we allocate at least 16 regardless of page - * size. - */ -#if (65536/PAGE_SIZE + 1) < 16 -#define MAX_SKB_FRAGS 16UL -#else -#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1) +#ifndef CONFIG_MAX_SKB_FRAGS +# define CONFIG_MAX_SKB_FRAGS 17 #endif + +#define MAX_SKB_FRAGS CONFIG_MAX_SKB_FRAGS + extern int sysctl_max_skb_frags; /* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to @@ -810,7 +804,6 @@ typedef unsigned char *sk_buff_data_t; * @csum_level: indicates the number of consecutive checksums found in * the packet minus one that have been verified as * CHECKSUM_UNNECESSARY (max 3) - * @scm_io_uring: SKB holds io_uring registered files * @dst_pending_confirm: need to confirm neighbour * @decrypted: Decrypted SKB * @slow_gro: state present at GRO time, slower prepare step required @@ -989,7 +982,6 @@ struct sk_buff { #endif __u8 slow_gro:1; __u8 csum_not_inet:1; - __u8 scm_io_uring:1; #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ diff --git a/include/linux/smscphy.h b/include/linux/smscphy.h index 1a136271ba6a..80f37c1dba58 100644 --- a/include/linux/smscphy.h +++ b/include/linux/smscphy.h @@ -28,4 +28,10 @@ #define MII_LAN83C185_MODE_POWERDOWN 0xC0 /* Power Down mode */ #define MII_LAN83C185_MODE_ALL 0xE0 /* All capable mode */ +int smsc_phy_config_intr(struct phy_device *phydev); +irqreturn_t smsc_phy_handle_interrupt(struct phy_device *phydev); +int smsc_phy_config_init(struct phy_device *phydev); +int lan87xx_read_status(struct phy_device *phydev); +int smsc_phy_probe(struct phy_device *phydev); + #endif /* __LINUX_SMSCPHY_H__ */ diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index a152678b82b7..a2414c187483 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -215,7 +215,7 @@ struct plat_stmmacenet_data { int unicast_filter_entries; int tx_fifo_size; int rx_fifo_size; - u32 addr64; + u32 host_dma_width; u32 rx_queues_to_use; u32 tx_queues_to_use; u8 rx_sched_algorithm; diff --git a/include/linux/tcp.h b/include/linux/tcp.h index ca7f05a130d2..b4c08ac86983 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -472,10 +472,12 @@ enum tsq_flags { TCPF_MTU_REDUCED_DEFERRED = (1UL << TCP_MTU_REDUCED_DEFERRED), }; -static inline struct tcp_sock *tcp_sk(const struct sock *sk) -{ - return (struct tcp_sock *)sk; -} +#define tcp_sk(ptr) container_of_const(ptr, struct tcp_sock, inet_conn.icsk_inet.sk) + +/* Variant of tcp_sk() upgrading a const sock to a read/write tcp socket. + * Used in context of (lockless) tcp listeners. + */ +#define tcp_sk_rw(ptr) container_of(ptr, struct tcp_sock, inet_conn.icsk_inet.sk) struct tcp_timewait_sock { struct inet_timewait_sock tw_sk; diff --git a/include/linux/types.h b/include/linux/types.h index ea8cf60a8a79..688fb943556a 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -175,6 +175,12 @@ typedef struct { } atomic64_t; #endif +typedef struct { + atomic_t refcnt; +} rcuref_t; + +#define RCUREF_INIT(i) { .refcnt = ATOMIC_INIT(i - 1) } + struct list_head { struct list_head *next, *prev; }; diff --git a/include/linux/udp.h b/include/linux/udp.h index a2892e151644..43c1fb2d2c21 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -97,10 +97,7 @@ struct udp_sock { #define UDP_MAX_SEGMENTS (1 << 6UL) -static inline struct udp_sock *udp_sk(const struct sock *sk) -{ - return (struct udp_sock *)sk; -} +#define udp_sk(ptr) container_of_const(ptr, struct udp_sock, inet.sk) static inline void udp_set_no_check6_tx(struct sock *sk, bool val) { diff --git a/include/linux/wwan.h b/include/linux/wwan.h index 24d76500b1cc..01fa15506286 100644 --- a/include/linux/wwan.h +++ b/include/linux/wwan.h @@ -64,11 +64,21 @@ struct wwan_port_ops { poll_table *wait); }; +/** struct wwan_port_caps - The WWAN port capbilities + * @frag_len: WWAN port TX fragments length + * @headroom_len: WWAN port TX fragments reserved headroom length + */ +struct wwan_port_caps { + size_t frag_len; + unsigned int headroom_len; +}; + /** * wwan_create_port - Add a new WWAN port * @parent: Device to use as parent and shared by all WWAN ports * @type: WWAN port type * @ops: WWAN port operations + * @caps: WWAN port capabilities * @drvdata: Pointer to caller driver data * * Allocate and register a new WWAN port. The port will be automatically exposed @@ -86,6 +96,7 @@ struct wwan_port_ops { struct wwan_port *wwan_create_port(struct device *parent, enum wwan_port_type type, const struct wwan_port_ops *ops, + struct wwan_port_caps *caps, void *drvdata); /** |