diff options
Diffstat (limited to 'include/linux')
278 files changed, 5069 insertions, 4306 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 39263c6b52e1..2630c2e953f7 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -55,7 +55,7 @@ static inline struct fwnode_handle *acpi_alloc_fwnode_static(void) if (!fwnode) return NULL; - fwnode->ops = &acpi_static_fwnode_ops; + fwnode_init(fwnode, &acpi_static_fwnode_ops); return fwnode; } diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index 20a32120bb88..1a12baa58e40 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h @@ -38,6 +38,7 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size); const struct iommu_ops *iort_iommu_configure_id(struct device *dev, const u32 *id_in); int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head); +phys_addr_t acpi_iort_dma_get_max_cpu_address(void); #else static inline void acpi_iort_init(void) { } static inline u32 iort_msi_map_id(struct device *dev, u32 id) @@ -55,6 +56,9 @@ static inline const struct iommu_ops *iort_iommu_configure_id( static inline int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) { return 0; } + +static inline phys_addr_t acpi_iort_dma_get_max_cpu_address(void) +{ return PHYS_ADDR_MAX; } #endif #endif /* __ACPI_IORT_H__ */ diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 885c9ffc835c..f860645f6512 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -87,6 +87,8 @@ ARM_SMCCC_SMC_32, \ 0, 0x7fff) +#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1 + /* Paravirtualised time calls (defined by ARM DEN0057A) */ #define ARM_SMCCC_HV_PV_TIME_FEATURES \ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index 5d5ff2203fa2..d7493016cd46 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h @@ -186,6 +186,7 @@ struct atmdev_ops { /* only send is required */ void __user *arg); #endif int (*send)(struct atm_vcc *vcc,struct sk_buff *skb); + int (*send_bh)(struct atm_vcc *vcc, struct sk_buff *skb); int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags); void (*phy_put)(struct atm_dev *dev,unsigned char value, unsigned long addr); diff --git a/include/linux/atomic-arch-fallback.h b/include/linux/atomic-arch-fallback.h index bcb6aa27cfa6..a3dba31df01e 100644 --- a/include/linux/atomic-arch-fallback.h +++ b/include/linux/atomic-arch-fallback.h @@ -9,9 +9,9 @@ #include <linux/compiler.h> #ifndef arch_xchg_relaxed -#define arch_xchg_relaxed arch_xchg -#define arch_xchg_acquire arch_xchg -#define arch_xchg_release arch_xchg +#define arch_xchg_acquire arch_xchg +#define arch_xchg_release arch_xchg +#define arch_xchg_relaxed arch_xchg #else /* arch_xchg_relaxed */ #ifndef arch_xchg_acquire @@ -32,9 +32,9 @@ #endif /* arch_xchg_relaxed */ #ifndef arch_cmpxchg_relaxed -#define arch_cmpxchg_relaxed arch_cmpxchg -#define arch_cmpxchg_acquire arch_cmpxchg -#define arch_cmpxchg_release arch_cmpxchg +#define arch_cmpxchg_acquire arch_cmpxchg +#define arch_cmpxchg_release arch_cmpxchg +#define arch_cmpxchg_relaxed arch_cmpxchg #else /* arch_cmpxchg_relaxed */ #ifndef arch_cmpxchg_acquire @@ -55,9 +55,9 @@ #endif /* arch_cmpxchg_relaxed */ #ifndef arch_cmpxchg64_relaxed -#define arch_cmpxchg64_relaxed arch_cmpxchg64 -#define arch_cmpxchg64_acquire arch_cmpxchg64 -#define arch_cmpxchg64_release arch_cmpxchg64 +#define arch_cmpxchg64_acquire arch_cmpxchg64 +#define arch_cmpxchg64_release arch_cmpxchg64 +#define arch_cmpxchg64_relaxed arch_cmpxchg64 #else /* arch_cmpxchg64_relaxed */ #ifndef arch_cmpxchg64_acquire @@ -77,6 +77,76 @@ #endif /* arch_cmpxchg64_relaxed */ +#ifndef arch_try_cmpxchg_relaxed +#ifdef arch_try_cmpxchg +#define arch_try_cmpxchg_acquire arch_try_cmpxchg +#define arch_try_cmpxchg_release arch_try_cmpxchg +#define arch_try_cmpxchg_relaxed arch_try_cmpxchg +#endif /* arch_try_cmpxchg */ + +#ifndef arch_try_cmpxchg +#define arch_try_cmpxchg(_ptr, _oldp, _new) \ +({ \ + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ + ___r = arch_cmpxchg((_ptr), ___o, (_new)); \ + if (unlikely(___r != ___o)) \ + *___op = ___r; \ + likely(___r == ___o); \ +}) +#endif /* arch_try_cmpxchg */ + +#ifndef arch_try_cmpxchg_acquire +#define arch_try_cmpxchg_acquire(_ptr, _oldp, _new) \ +({ \ + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ + ___r = arch_cmpxchg_acquire((_ptr), ___o, (_new)); \ + if (unlikely(___r != ___o)) \ + *___op = ___r; \ + likely(___r == ___o); \ +}) +#endif /* arch_try_cmpxchg_acquire */ + +#ifndef arch_try_cmpxchg_release +#define arch_try_cmpxchg_release(_ptr, _oldp, _new) \ +({ \ + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ + ___r = arch_cmpxchg_release((_ptr), ___o, (_new)); \ + if (unlikely(___r != ___o)) \ + *___op = ___r; \ + likely(___r == ___o); \ +}) +#endif /* arch_try_cmpxchg_release */ + +#ifndef arch_try_cmpxchg_relaxed +#define arch_try_cmpxchg_relaxed(_ptr, _oldp, _new) \ +({ \ + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ + ___r = arch_cmpxchg_relaxed((_ptr), ___o, (_new)); \ + if (unlikely(___r != ___o)) \ + *___op = ___r; \ + likely(___r == ___o); \ +}) +#endif /* arch_try_cmpxchg_relaxed */ + +#else /* arch_try_cmpxchg_relaxed */ + +#ifndef arch_try_cmpxchg_acquire +#define arch_try_cmpxchg_acquire(...) \ + __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__) +#endif + +#ifndef arch_try_cmpxchg_release +#define arch_try_cmpxchg_release(...) \ + __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__) +#endif + +#ifndef arch_try_cmpxchg +#define arch_try_cmpxchg(...) \ + __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__) +#endif + +#endif /* arch_try_cmpxchg_relaxed */ + #ifndef arch_atomic_read_acquire static __always_inline int arch_atomic_read_acquire(const atomic_t *v) @@ -2288,4 +2358,4 @@ arch_atomic64_dec_if_positive(atomic64_t *v) #endif #endif /* _LINUX_ATOMIC_FALLBACK_H */ -// 90cd26cfd69d2250303d654955a0cc12620fb91b +// cca554917d7ea73d5e3e7397dd70c484cad9b2c4 diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h index fd525c71d676..2a3f55d98be9 100644 --- a/include/linux/atomic-fallback.h +++ b/include/linux/atomic-fallback.h @@ -9,9 +9,9 @@ #include <linux/compiler.h> #ifndef xchg_relaxed -#define xchg_relaxed xchg -#define xchg_acquire xchg -#define xchg_release xchg +#define xchg_acquire xchg +#define xchg_release xchg +#define xchg_relaxed xchg #else /* xchg_relaxed */ #ifndef xchg_acquire @@ -32,9 +32,9 @@ #endif /* xchg_relaxed */ #ifndef cmpxchg_relaxed -#define cmpxchg_relaxed cmpxchg -#define cmpxchg_acquire cmpxchg -#define cmpxchg_release cmpxchg +#define cmpxchg_acquire cmpxchg +#define cmpxchg_release cmpxchg +#define cmpxchg_relaxed cmpxchg #else /* cmpxchg_relaxed */ #ifndef cmpxchg_acquire @@ -55,9 +55,9 @@ #endif /* cmpxchg_relaxed */ #ifndef cmpxchg64_relaxed -#define cmpxchg64_relaxed cmpxchg64 -#define cmpxchg64_acquire cmpxchg64 -#define cmpxchg64_release cmpxchg64 +#define cmpxchg64_acquire cmpxchg64 +#define cmpxchg64_release cmpxchg64 +#define cmpxchg64_relaxed cmpxchg64 #else /* cmpxchg64_relaxed */ #ifndef cmpxchg64_acquire @@ -77,6 +77,76 @@ #endif /* cmpxchg64_relaxed */ +#ifndef try_cmpxchg_relaxed +#ifdef try_cmpxchg +#define try_cmpxchg_acquire try_cmpxchg +#define try_cmpxchg_release try_cmpxchg +#define try_cmpxchg_relaxed try_cmpxchg +#endif /* try_cmpxchg */ + +#ifndef try_cmpxchg +#define try_cmpxchg(_ptr, _oldp, _new) \ +({ \ + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ + ___r = cmpxchg((_ptr), ___o, (_new)); \ + if (unlikely(___r != ___o)) \ + *___op = ___r; \ + likely(___r == ___o); \ +}) +#endif /* try_cmpxchg */ + +#ifndef try_cmpxchg_acquire +#define try_cmpxchg_acquire(_ptr, _oldp, _new) \ +({ \ + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ + ___r = cmpxchg_acquire((_ptr), ___o, (_new)); \ + if (unlikely(___r != ___o)) \ + *___op = ___r; \ + likely(___r == ___o); \ +}) +#endif /* try_cmpxchg_acquire */ + +#ifndef try_cmpxchg_release +#define try_cmpxchg_release(_ptr, _oldp, _new) \ +({ \ + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ + ___r = cmpxchg_release((_ptr), ___o, (_new)); \ + if (unlikely(___r != ___o)) \ + *___op = ___r; \ + likely(___r == ___o); \ +}) +#endif /* try_cmpxchg_release */ + +#ifndef try_cmpxchg_relaxed +#define try_cmpxchg_relaxed(_ptr, _oldp, _new) \ +({ \ + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ + ___r = cmpxchg_relaxed((_ptr), ___o, (_new)); \ + if (unlikely(___r != ___o)) \ + *___op = ___r; \ + likely(___r == ___o); \ +}) +#endif /* try_cmpxchg_relaxed */ + +#else /* try_cmpxchg_relaxed */ + +#ifndef try_cmpxchg_acquire +#define try_cmpxchg_acquire(...) \ + __atomic_op_acquire(try_cmpxchg, __VA_ARGS__) +#endif + +#ifndef try_cmpxchg_release +#define try_cmpxchg_release(...) \ + __atomic_op_release(try_cmpxchg, __VA_ARGS__) +#endif + +#ifndef try_cmpxchg +#define try_cmpxchg(...) \ + __atomic_op_fence(try_cmpxchg, __VA_ARGS__) +#endif + +#endif /* try_cmpxchg_relaxed */ + #define arch_atomic_read atomic_read #define arch_atomic_read_acquire atomic_read_acquire @@ -2522,4 +2592,4 @@ atomic64_dec_if_positive(atomic64_t *v) #endif #endif /* _LINUX_ATOMIC_FALLBACK_H */ -// 9d95b56f98d82a2a26c7b79ccdd0c47572d50a6f +// d78e6c293c661c15188f0ec05bce45188c8d5892 diff --git a/include/linux/auxiliary_bus.h b/include/linux/auxiliary_bus.h new file mode 100644 index 000000000000..fc51d45f106b --- /dev/null +++ b/include/linux/auxiliary_bus.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2020 Intel Corporation + * + * Please see Documentation/driver-api/auxiliary_bus.rst for more information. + */ + +#ifndef _AUXILIARY_BUS_H_ +#define _AUXILIARY_BUS_H_ + +#include <linux/device.h> +#include <linux/mod_devicetable.h> + +struct auxiliary_device { + struct device dev; + const char *name; + u32 id; +}; + +struct auxiliary_driver { + int (*probe)(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id); + void (*remove)(struct auxiliary_device *auxdev); + void (*shutdown)(struct auxiliary_device *auxdev); + int (*suspend)(struct auxiliary_device *auxdev, pm_message_t state); + int (*resume)(struct auxiliary_device *auxdev); + const char *name; + struct device_driver driver; + const struct auxiliary_device_id *id_table; +}; + +static inline struct auxiliary_device *to_auxiliary_dev(struct device *dev) +{ + return container_of(dev, struct auxiliary_device, dev); +} + +static inline struct auxiliary_driver *to_auxiliary_drv(struct device_driver *drv) +{ + return container_of(drv, struct auxiliary_driver, driver); +} + +int auxiliary_device_init(struct auxiliary_device *auxdev); +int __auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname); +#define auxiliary_device_add(auxdev) __auxiliary_device_add(auxdev, KBUILD_MODNAME) + +static inline void auxiliary_device_uninit(struct auxiliary_device *auxdev) +{ + put_device(&auxdev->dev); +} + +static inline void auxiliary_device_delete(struct auxiliary_device *auxdev) +{ + device_del(&auxdev->dev); +} + +int __auxiliary_driver_register(struct auxiliary_driver *auxdrv, struct module *owner, + const char *modname); +#define auxiliary_driver_register(auxdrv) \ + __auxiliary_driver_register(auxdrv, THIS_MODULE, KBUILD_MODNAME) + +void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv); + +/** + * module_auxiliary_driver() - Helper macro for registering an auxiliary driver + * @__auxiliary_driver: auxiliary driver struct + * + * Helper macro for auxiliary drivers which do not do anything special in + * module init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_auxiliary_driver(__auxiliary_driver) \ + module_driver(__auxiliary_driver, auxiliary_driver_register, auxiliary_driver_unregister) + +struct auxiliary_device *auxiliary_find_device(struct device *start, + const void *data, + int (*match)(struct device *dev, const void *data)); + +#endif /* _AUXILIARY_BUS_H_ */ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index b23eeca4d677..794b2a33a2c3 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -235,6 +235,8 @@ enum hctx_type { * @flags: Zero or more BLK_MQ_F_* flags. * @driver_data: Pointer to data owned by the block driver that created this * tag set. + * @active_queues_shared_sbitmap: + * number of active request queues per tag set. * @__bitmap_tags: A shared tags sbitmap, used over all hctx's * @__breserved_tags: * A shared reserved tags sbitmap, used over all hctx's diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 639cae2c158b..033eb5f73b65 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1073,12 +1073,15 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, * file system requests. */ static inline unsigned int blk_max_size_offset(struct request_queue *q, - sector_t offset) -{ - unsigned int chunk_sectors = q->limits.chunk_sectors; - - if (!chunk_sectors) - return q->limits.max_sectors; + sector_t offset, + unsigned int chunk_sectors) +{ + if (!chunk_sectors) { + if (q->limits.chunk_sectors) + chunk_sectors = q->limits.chunk_sectors; + else + return q->limits.max_sectors; + } if (likely(is_power_of_2(chunk_sectors))) chunk_sectors -= offset & (chunk_sectors - 1); @@ -1101,7 +1104,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq, req_op(rq) == REQ_OP_SECURE_ERASE) return blk_queue_get_max_sectors(q, req_op(rq)); - return min(blk_max_size_offset(q, offset), + return min(blk_max_size_offset(q, offset, 0), blk_queue_get_max_sectors(q, req_op(rq))); } diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h index 9903088891fa..2696eb0fc149 100644 --- a/include/linux/bootconfig.h +++ b/include/linux/bootconfig.h @@ -12,6 +12,9 @@ #define BOOTCONFIG_MAGIC "#BOOTCONFIG\n" #define BOOTCONFIG_MAGIC_LEN 12 +#define BOOTCONFIG_ALIGN_SHIFT 2 +#define BOOTCONFIG_ALIGN (1 << BOOTCONFIG_ALIGN_SHIFT) +#define BOOTCONFIG_ALIGN_MASK (BOOTCONFIG_ALIGN - 1) /* XBC tree node */ struct xbc_node { diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index ed71bd1a0825..72e69a0e1e8c 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -246,11 +246,11 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, __ret; \ }) -#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND) +#define BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_BIND, NULL) -#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND) +#define BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_BIND, NULL) #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \ sk->sk_prot->pre_connect) @@ -434,8 +434,8 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; }) -#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; }) -#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; }) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 2b16bf48aab6..07cb5d15e743 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -20,6 +20,8 @@ #include <linux/module.h> #include <linux/kallsyms.h> #include <linux/capability.h> +#include <linux/sched/mm.h> +#include <linux/slab.h> struct bpf_verifier_env; struct bpf_verifier_log; @@ -36,9 +38,12 @@ struct seq_operations; struct bpf_iter_aux_info; struct bpf_local_storage; struct bpf_local_storage_map; +struct kobject; +struct mem_cgroup; extern struct idr btf_idr; extern spinlock_t btf_idr_lock; +extern struct kobject *btf_kobj; typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, struct bpf_iter_aux_info *aux); @@ -133,11 +138,6 @@ struct bpf_map_ops { const struct bpf_iter_seq_info *iter_seq_info; }; -struct bpf_map_memory { - u32 pages; - struct user_struct *user; -}; - struct bpf_map { /* The first two cachelines with read-mostly members of which some * are also accessed in fast-path (e.g. ops, max_entries). @@ -158,7 +158,9 @@ struct bpf_map { u32 btf_key_type_id; u32 btf_value_type_id; struct btf *btf; - struct bpf_map_memory memory; +#ifdef CONFIG_MEMCG_KMEM + struct mem_cgroup *memcg; +#endif char name[BPF_OBJ_NAME_LEN]; u32 btf_vmlinux_value_type_id; bool bypass_spec_v1; @@ -310,6 +312,7 @@ enum bpf_return_type { RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */ RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, /* returns a pointer to a valid memory or a btf_id or NULL */ RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */ + RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */ }; /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs @@ -418,7 +421,10 @@ struct bpf_insn_access_aux { enum bpf_reg_type reg_type; union { int ctx_field_size; - u32 btf_id; + struct { + struct btf *btf; + u32 btf_id; + }; }; struct bpf_verifier_log *log; /* for verbose logs */ }; @@ -455,6 +461,7 @@ struct bpf_verifier_ops { struct bpf_insn *dst, struct bpf_prog *prog, u32 *target_size); int (*btf_struct_access)(struct bpf_verifier_log *log, + const struct btf *btf, const struct btf_type *t, int off, int size, enum bpf_access_type atype, u32 *next_btf_id); @@ -768,6 +775,7 @@ struct bpf_prog_aux { u32 ctx_arg_info_size; u32 max_rdonly_access; u32 max_rdwr_access; + struct btf *attach_btf; const struct bpf_ctx_arg_aux *ctx_arg_info; struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */ struct bpf_prog *dst_prog; @@ -1002,7 +1010,6 @@ struct bpf_event_entry { bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); int bpf_prog_calc_tag(struct bpf_prog *fp); -const char *kernel_type_name(u32 btf_type_id); const struct bpf_func_proto *bpf_get_trace_printk_proto(void); @@ -1199,8 +1206,6 @@ void bpf_prog_sub(struct bpf_prog *prog, int i); void bpf_prog_inc(struct bpf_prog *prog); struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); void bpf_prog_put(struct bpf_prog *prog); -int __bpf_prog_charge(struct user_struct *user, u32 pages); -void __bpf_prog_uncharge(struct user_struct *user, u32 pages); void __bpf_free_used_maps(struct bpf_prog_aux *aux, struct bpf_map **used_maps, u32 len); @@ -1215,12 +1220,6 @@ void bpf_map_inc_with_uref(struct bpf_map *map); struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map); void bpf_map_put_with_uref(struct bpf_map *map); void bpf_map_put(struct bpf_map *map); -int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); -void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); -int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size); -void bpf_map_charge_finish(struct bpf_map_memory *mem); -void bpf_map_charge_move(struct bpf_map_memory *dst, - struct bpf_map_memory *src); void *bpf_map_area_alloc(u64 size, int numa_node); void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); void bpf_map_area_free(void *base); @@ -1237,6 +1236,34 @@ int generic_map_delete_batch(struct bpf_map *map, struct bpf_map *bpf_map_get_curr_or_next(u32 *id); struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id); +#ifdef CONFIG_MEMCG_KMEM +void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, + int node); +void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags); +void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, + size_t align, gfp_t flags); +#else +static inline void * +bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, + int node) +{ + return kmalloc_node(size, flags, node); +} + +static inline void * +bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) +{ + return kzalloc(size, flags); +} + +static inline void __percpu * +bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align, + gfp_t flags) +{ + return __alloc_percpu_gfp(size, align, flags); +} +#endif + extern int sysctl_unprivileged_bpf_disabled; static inline bool bpf_allow_ptr_leaks(void) @@ -1294,6 +1321,10 @@ typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux, typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux, struct bpf_link_info *info); +enum bpf_iter_feature { + BPF_ITER_RESCHED = BIT(0), +}; + #define BPF_ITER_CTX_ARG_MAX 2 struct bpf_iter_reg { const char *target; @@ -1302,6 +1333,7 @@ struct bpf_iter_reg { bpf_iter_show_fdinfo_t show_fdinfo; bpf_iter_fill_link_info_t fill_link_info; u32 ctx_arg_info_size; + u32 feature; struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; const struct bpf_iter_seq_info *seq_info; }; @@ -1422,12 +1454,13 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, bool btf_ctx_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info); -int btf_struct_access(struct bpf_verifier_log *log, +int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, const struct btf_type *t, int off, int size, enum bpf_access_type atype, u32 *next_btf_id); bool btf_struct_ids_match(struct bpf_verifier_log *log, - int off, u32 id, u32 need_type_id); + const struct btf *btf, u32 id, int off, + const struct btf *need_btf, u32 need_type_id); int btf_distill_func_proto(struct bpf_verifier_log *log, struct btf *btf, @@ -1482,15 +1515,6 @@ bpf_prog_inc_not_zero(struct bpf_prog *prog) return ERR_PTR(-EOPNOTSUPP); } -static inline int __bpf_prog_charge(struct user_struct *user, u32 pages) -{ - return 0; -} - -static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages) -{ -} - static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, const struct bpf_link_ops *ops, struct bpf_prog *prog) @@ -1834,6 +1858,8 @@ extern const struct bpf_func_proto bpf_copy_from_user_proto; extern const struct bpf_func_proto bpf_snprintf_btf_proto; extern const struct bpf_func_proto bpf_per_cpu_ptr_proto; extern const struct bpf_func_proto bpf_this_cpu_ptr_proto; +extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto; +extern const struct bpf_func_proto bpf_sock_from_file_proto; const struct bpf_func_proto *bpf_tracing_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h index aaacb6aafc87..0d1c33ace398 100644 --- a/include/linux/bpf_lsm.h +++ b/include/linux/bpf_lsm.h @@ -7,6 +7,7 @@ #ifndef _LINUX_BPF_LSM_H #define _LINUX_BPF_LSM_H +#include <linux/sched.h> #include <linux/bpf.h> #include <linux/lsm_hooks.h> @@ -26,6 +27,8 @@ extern struct lsm_blob_sizes bpf_lsm_blob_sizes; int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog, const struct bpf_prog *prog); +bool bpf_lsm_is_sleepable_hook(u32 btf_id); + static inline struct bpf_storage_blob *bpf_inode( const struct inode *inode) { @@ -35,12 +38,29 @@ static inline struct bpf_storage_blob *bpf_inode( return inode->i_security + bpf_lsm_blob_sizes.lbs_inode; } +static inline struct bpf_storage_blob *bpf_task( + const struct task_struct *task) +{ + if (unlikely(!task->security)) + return NULL; + + return task->security + bpf_lsm_blob_sizes.lbs_task; +} + extern const struct bpf_func_proto bpf_inode_storage_get_proto; extern const struct bpf_func_proto bpf_inode_storage_delete_proto; +extern const struct bpf_func_proto bpf_task_storage_get_proto; +extern const struct bpf_func_proto bpf_task_storage_delete_proto; void bpf_inode_storage_free(struct inode *inode); +void bpf_task_storage_free(struct task_struct *task); #else /* !CONFIG_BPF_LSM */ +static inline bool bpf_lsm_is_sleepable_hook(u32 btf_id) +{ + return false; +} + static inline int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog, const struct bpf_prog *prog) { @@ -53,10 +73,20 @@ static inline struct bpf_storage_blob *bpf_inode( return NULL; } +static inline struct bpf_storage_blob *bpf_task( + const struct task_struct *task) +{ + return NULL; +} + static inline void bpf_inode_storage_free(struct inode *inode) { } +static inline void bpf_task_storage_free(struct task_struct *task) +{ +} + #endif /* CONFIG_BPF_LSM */ #endif /* _LINUX_BPF_LSM_H */ diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 2e6f568377f1..99f7fd657d87 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -109,6 +109,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops) #endif #ifdef CONFIG_BPF_LSM BPF_MAP_TYPE(BPF_MAP_TYPE_INODE_STORAGE, inode_storage_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_TASK_STORAGE, task_storage_map_ops) #endif BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops) #if defined(CONFIG_XDP_SOCKETS) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index e83ef6f6bf43..e941fe1484e5 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -5,6 +5,7 @@ #define _LINUX_BPF_VERIFIER_H 1 #include <linux/bpf.h> /* for enum bpf_reg_type */ +#include <linux/btf.h> /* for struct btf and btf_id() */ #include <linux/filter.h> /* for MAX_BPF_STACK */ #include <linux/tnum.h> @@ -43,24 +44,31 @@ enum bpf_reg_liveness { struct bpf_reg_state { /* Ordering of fields matters. See states_equal() */ enum bpf_reg_type type; + /* Fixed part of pointer offset, pointer types only */ + s32 off; union { /* valid when type == PTR_TO_PACKET */ - u16 range; + int range; /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | * PTR_TO_MAP_VALUE_OR_NULL */ struct bpf_map *map_ptr; - u32 btf_id; /* for PTR_TO_BTF_ID */ + /* for PTR_TO_BTF_ID */ + struct { + struct btf *btf; + u32 btf_id; + }; u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */ /* Max size from any of the above. */ - unsigned long raw; + struct { + unsigned long raw1; + unsigned long raw2; + } raw; }; - /* Fixed part of pointer offset, pointer types only */ - s32 off; /* For PTR_TO_PACKET, used to find other pointers with the same variable * offset, so they can share range knowledge. * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we @@ -311,7 +319,10 @@ struct bpf_insn_aux_data { struct { enum bpf_reg_type reg_type; /* type of pseudo_btf_id */ union { - u32 btf_id; /* btf_id for struct typed var */ + struct { + struct btf *btf; + u32 btf_id; /* btf_id for struct typed var */ + }; u32 mem_size; /* mem_size for non-struct typed var */ }; } btf_var; @@ -459,9 +470,12 @@ int check_ctx_reg(struct bpf_verifier_env *env, /* this lives here instead of in bpf.h because it needs to dereference tgt_prog */ static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog, - u32 btf_id) + struct btf *btf, u32 btf_id) { - return tgt_prog ? (((u64)tgt_prog->aux->id) << 32 | btf_id) : btf_id; + if (tgt_prog) + return ((u64)tgt_prog->aux->id << 32) | btf_id; + else + return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id; } int bpf_check_attach_target(struct bpf_verifier_log *log, diff --git a/include/linux/btf.h b/include/linux/btf.h index 2bf641829664..4c200f5d242b 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -18,6 +18,7 @@ struct btf_show; extern const struct file_operations btf_fops; +void btf_get(struct btf *btf); void btf_put(struct btf *btf); int btf_new_fd(const union bpf_attr *attr); struct btf *btf_get_by_fd(int fd); @@ -88,7 +89,8 @@ int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj, char *buf, int len, u64 flags); int btf_get_fd_by_id(u32 id); -u32 btf_id(const struct btf *btf); +u32 btf_obj_id(const struct btf *btf); +bool btf_is_kernel(const struct btf *btf); bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, const struct btf_member *m, u32 expected_offset, u32 expected_size); @@ -206,6 +208,8 @@ static inline const struct btf_var_secinfo *btf_type_var_secinfo( } #ifdef CONFIG_BPF_SYSCALL +struct bpf_prog; + const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id); const char *btf_name_by_offset(const struct btf *btf, u32 offset); struct btf *btf_parse_vmlinux(void); diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h index e3a0be2c90ad..7bb66e15b481 100644 --- a/include/linux/build_bug.h +++ b/include/linux/build_bug.h @@ -77,4 +77,9 @@ #define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr) #define __static_assert(expr, msg, ...) _Static_assert(expr, msg) +#ifdef __GENKSYMS__ +/* genksyms gets confused by _Static_assert */ +#define _Static_assert(expr, ...) +#endif + #endif /* _LINUX_BUILD_BUG_H */ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 41ff31795320..197a79535cc2 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -98,14 +98,13 @@ static inline unsigned int can_bit_time(const struct can_bittiming *bt) } /* - * get_can_dlc(value) - helper macro to cast a given data length code (dlc) - * to u8 and ensure the dlc value to be max. 8 bytes. + * can_cc_dlc2len(value) - convert a given data length code (dlc) of a + * Classical CAN frame into a valid data length of max. 8 bytes. * * To be used in the CAN netdriver receive path to ensure conformance with * ISO 11898-1 Chapter 8.4.2.3 (DLC field) */ -#define get_can_dlc(i) (min_t(u8, (i), CAN_MAX_DLC)) -#define get_canfd_dlc(i) (min_t(u8, (i), CANFD_MAX_DLC)) +#define can_cc_dlc2len(dlc) (min_t(u8, (dlc), CAN_MAX_DLEN)) /* Check for outgoing skbs that have not been created by the CAN subsystem */ static inline bool can_skb_headroom_valid(struct net_device *dev, @@ -171,6 +170,31 @@ static inline bool can_is_canfd_skb(const struct sk_buff *skb) return skb->len == CANFD_MTU; } +/* helper to get the data length code (DLC) for Classical CAN raw DLC access */ +static inline u8 can_get_cc_dlc(const struct can_frame *cf, const u32 ctrlmode) +{ + /* return len8_dlc as dlc value only if all conditions apply */ + if ((ctrlmode & CAN_CTRLMODE_CC_LEN8_DLC) && + (cf->len == CAN_MAX_DLEN) && + (cf->len8_dlc > CAN_MAX_DLEN && cf->len8_dlc <= CAN_MAX_RAW_DLC)) + return cf->len8_dlc; + + /* return the payload length as dlc value */ + return cf->len; +} + +/* helper to set len and len8_dlc value for Classical CAN raw DLC access */ +static inline void can_frame_set_cc_len(struct can_frame *cf, const u8 dlc, + const u32 ctrlmode) +{ + /* the caller already ensured that dlc is a value from 0 .. 15 */ + if (ctrlmode & CAN_CTRLMODE_CC_LEN8_DLC && dlc > CAN_MAX_DLEN) + cf->len8_dlc = dlc; + + /* limit the payload length 'len' to CAN_MAX_DLEN */ + cf->len = can_cc_dlc2len(dlc); +} + /* helper to define static CAN controller features at device creation time */ static inline void can_set_static_ctrlmode(struct net_device *dev, u32 static_mode) @@ -186,11 +210,11 @@ static inline void can_set_static_ctrlmode(struct net_device *dev, dev->mtu = CANFD_MTU; } -/* get data length from can_dlc with sanitized can_dlc */ -u8 can_dlc2len(u8 can_dlc); +/* get data length from raw data length code (DLC) */ +u8 can_fd_dlc2len(u8 dlc); /* map the sanitized data length to an appropriate data length code */ -u8 can_len2dlc(u8 len); +u8 can_fd_len2dlc(u8 len); struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max, unsigned int txqs, unsigned int rxqs); diff --git a/include/linux/can/dev/peak_canfd.h b/include/linux/can/dev/peak_canfd.h index 5fd627e9da19..f38772fd0c07 100644 --- a/include/linux/can/dev/peak_canfd.h +++ b/include/linux/can/dev/peak_canfd.h @@ -282,7 +282,7 @@ static inline int pucan_msg_get_channel(const struct pucan_rx_msg *msg) } /* return the dlc value from any received message channel_dlc field */ -static inline int pucan_msg_get_dlc(const struct pucan_rx_msg *msg) +static inline u8 pucan_msg_get_dlc(const struct pucan_rx_msg *msg) { return msg->channel_dlc >> 4; } diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h index 900b9f4e0605..fc61cf4eff1c 100644 --- a/include/linux/can/skb.h +++ b/include/linux/can/skb.h @@ -61,21 +61,17 @@ static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk) */ static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb) { - if (skb_shared(skb)) { - struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); + struct sk_buff *nskb; - if (likely(nskb)) { - can_skb_set_owner(nskb, skb->sk); - consume_skb(skb); - return nskb; - } else { - kfree_skb(skb); - return NULL; - } + nskb = skb_clone(skb, GFP_ATOMIC); + if (unlikely(!nskb)) { + kfree_skb(skb); + return NULL; } - /* we can assume to have an unshared skb with proper owner */ - return skb; + can_skb_set_owner(nskb, skb->sk); + consume_skb(skb); + return nskb; } #endif /* !_CAN_SKB_H */ diff --git a/include/linux/ccp.h b/include/linux/ccp.h index a5dfbaf2470d..868924dec5a1 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -15,7 +15,8 @@ #include <linux/workqueue.h> #include <linux/list.h> #include <crypto/aes.h> -#include <crypto/sha.h> +#include <crypto/sha1.h> +#include <crypto/sha2.h> struct ccp_device; struct ccp_cmd; diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index fee0b5547cd0..559ee05f86b2 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -668,21 +668,6 @@ struct cgroup_subsys { */ bool threaded:1; - /* - * If %false, this subsystem is properly hierarchical - - * configuration, resource accounting and restriction on a parent - * cgroup cover those of its children. If %true, hierarchy support - * is broken in some ways - some subsystems ignore hierarchy - * completely while others are only implemented half-way. - * - * It's now disallowed to create nested cgroups if the subsystem is - * broken and cgroup core will emit a warning message on such - * cases. Eventually, all subsystems will be made properly - * hierarchical and this will go away. - */ - bool broken_hierarchy:1; - bool warned_broken_hierarchy:1; - /* the following two fields are initialized automtically during boot */ int id; const char *name; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 618838c48313..451c2d26a5db 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -854,7 +854,6 @@ static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {} #endif /* CONFIG_CGROUP_DATA */ struct cgroup_namespace { - refcount_t count; struct ns_common ns; struct user_namespace *user_ns; struct ucounts *ucounts; @@ -889,12 +888,12 @@ copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns, static inline void get_cgroup_ns(struct cgroup_namespace *ns) { if (ns) - refcount_inc(&ns->count); + refcount_inc(&ns->ns.count); } static inline void put_cgroup_ns(struct cgroup_namespace *ns) { - if (ns && refcount_dec_and_test(&ns->count)) + if (ns && refcount_dec_and_test(&ns->ns.count)) free_cgroup_ns(ns); } diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 1de5a1151ee7..ed4070ed41ef 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -98,11 +98,8 @@ extern void reset_isolation_suitable(pg_data_t *pgdat); extern enum compact_result compaction_suitable(struct zone *zone, int order, unsigned int alloc_flags, int highest_zoneidx); -extern void defer_compaction(struct zone *zone, int order); -extern bool compaction_deferred(struct zone *zone, int order); extern void compaction_defer_reset(struct zone *zone, int order, bool alloc_success); -extern bool compaction_restarting(struct zone *zone, int order); /* Compaction has made some progress and retrying makes sense */ static inline bool compaction_made_progress(enum compact_result result) @@ -194,15 +191,6 @@ static inline enum compact_result compaction_suitable(struct zone *zone, int ord return COMPACT_SKIPPED; } -static inline void defer_compaction(struct zone *zone, int order) -{ -} - -static inline bool compaction_deferred(struct zone *zone, int order) -{ - return true; -} - static inline bool compaction_made_progress(enum compact_result result) { return false; diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 230604e7f057..98cff1b4b088 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -8,8 +8,10 @@ + __clang_patchlevel__) #if CLANG_VERSION < 100001 +#ifndef __BPF_TRACING__ # error Sorry, your version of Clang is too old - please use 10.0.1 or newer. #endif +#endif /* Compiler specific definitions for Clang compiler */ @@ -60,12 +62,6 @@ #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 #endif -/* The following are for compatibility with GCC, from compiler-gcc.h, - * and may be redefined here because they should not be shared with other - * compilers, like ICC. - */ -#define barrier() __asm__ __volatile__("" : : : "memory") - #if __has_feature(shadow_call_stack) # define __noscs __attribute__((__no_sanitize__("shadow-call-stack"))) #endif diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index d1e3c6896b71..74c6c0486eed 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -15,25 +15,6 @@ # error Sorry, your version of GCC is too old - please use 4.9 or newer. #endif -/* Optimization barrier */ - -/* The "volatile" is due to gcc bugs */ -#define barrier() __asm__ __volatile__("": : :"memory") -/* - * This version is i.e. to prevent dead stores elimination on @ptr - * where gcc and llvm may behave differently when otherwise using - * normal barrier(): while gcc behavior gets along with a normal - * barrier(), llvm needs an explicit input variable to be assumed - * clobbered. The issue is as follows: while the inline asm might - * access any memory it wants, the compiler could have fit all of - * @ptr into memory registers instead, and since @ptr never escaped - * from that, it proved that the inline asm wasn't touching any of - * it. This version works well with both compilers, i.e. we're telling - * the compiler that the inline asm absolutely may see the contents - * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495 - */ -#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory") - /* * This macro obfuscates arithmetic on a variable address so that gcc * shouldn't recognize the original var, and make assumptions about it. @@ -175,5 +156,3 @@ #else #define __diag_GCC_8(s) #endif - -#define __no_fgcse __attribute__((optimize("-fno-gcse"))) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index e512f5505dad..b8fe0c23cfff 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -80,11 +80,25 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, /* Optimization barrier */ #ifndef barrier -# define barrier() __memory_barrier() +/* The "volatile" is due to gcc bugs */ +# define barrier() __asm__ __volatile__("": : :"memory") #endif #ifndef barrier_data -# define barrier_data(ptr) barrier() +/* + * This version is i.e. to prevent dead stores elimination on @ptr + * where gcc and llvm may behave differently when otherwise using + * normal barrier(): while gcc behavior gets along with a normal + * barrier(), llvm needs an explicit input variable to be assumed + * clobbered. The issue is as follows: while the inline asm might + * access any memory it wants, the compiler could have fit all of + * @ptr into memory registers instead, and since @ptr never escaped + * from that, it proved that the inline asm wasn't touching any of + * it. This version works well with both compilers, i.e. we're telling + * the compiler that the inline asm absolutely may see the contents + * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495 + */ +# define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory") #endif /* workaround for GCC PR82365 if needed */ diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 6e390d58a9f8..ac3fa37a84f9 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -247,10 +247,6 @@ struct ftrace_likely_data { #define asm_inline asm #endif -#ifndef __no_fgcse -# define __no_fgcse -#endif - /* Are two types/vars the same type (ignoring qualifiers)? */ #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) diff --git a/include/linux/completion.h b/include/linux/completion.h index bf8e77001f18..51d9ab079629 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -28,8 +28,7 @@ struct completion { struct swait_queue_head wait; }; -#define init_completion_map(x, m) __init_completion(x) -#define init_completion(x) __init_completion(x) +#define init_completion_map(x, m) init_completion(x) static inline void complete_acquire(struct completion *x) {} static inline void complete_release(struct completion *x) {} @@ -82,7 +81,7 @@ static inline void complete_release(struct completion *x) {} * This inline function will initialize a dynamically created completion * structure. */ -static inline void __init_completion(struct completion *x) +static inline void init_completion(struct completion *x) { x->done = 0; init_swait_queue_head(&x->wait); diff --git a/include/linux/console.h b/include/linux/console.h index 4b1e26c4cb42..20874db50bc8 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -62,7 +62,6 @@ struct consw { int (*con_font_get)(struct vc_data *vc, struct console_font *font); int (*con_font_default)(struct vc_data *vc, struct console_font *font, char *name); - int (*con_font_copy)(struct vc_data *vc, int con); int (*con_resize)(struct vc_data *vc, unsigned int width, unsigned int height, unsigned int user); void (*con_set_palette)(struct vc_data *vc, diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index d53cd331c4dd..bceb06498521 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -51,7 +51,8 @@ static inline enum ctx_state exception_enter(void) { enum ctx_state prev_ctx; - if (!context_tracking_enabled()) + if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK) || + !context_tracking_enabled()) return 0; prev_ctx = this_cpu_read(context_tracking.state); @@ -63,7 +64,8 @@ static inline enum ctx_state exception_enter(void) static inline void exception_exit(enum ctx_state prev_ctx) { - if (context_tracking_enabled()) { + if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK) && + context_tracking_enabled()) { if (prev_ctx != CONTEXT_KERNEL) context_tracking_enter(prev_ctx); } diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index fa37b1c66443..584fccd4fcab 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -65,7 +65,6 @@ struct cpufreq_policy { unsigned int max; /* in kHz */ unsigned int cur; /* in kHz, only needed if cpufreq * governors are used */ - unsigned int restore_freq; /* = policy->cur before transition */ unsigned int suspend_freq; /* freq to set during suspend */ unsigned int policy; /* see above */ @@ -110,6 +109,12 @@ struct cpufreq_policy { bool fast_switch_enabled; /* + * Set if the CPUFREQ_GOV_STRICT_TARGET flag is set for the current + * governor. + */ + bool strict_target; + + /* * Preferred average time interval between consecutive invocations of * the driver to set the frequency for this policy. To be set by the * scaling driver (0, which is the default, means no preference). @@ -298,7 +303,7 @@ __ATTR(_name, 0644, show_##_name, store_##_name) struct cpufreq_driver { char name[CPUFREQ_NAME_LEN]; - u8 flags; + u16 flags; void *driver_data; /* needed by all drivers */ @@ -308,10 +313,6 @@ struct cpufreq_driver { /* define one out of two */ int (*setpolicy)(struct cpufreq_policy *policy); - /* - * On failure, should always restore frequency to policy->restore_freq - * (i.e. old freq). - */ int (*target)(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation); /* Deprecated */ @@ -422,9 +423,18 @@ struct cpufreq_driver { */ #define CPUFREQ_IS_COOLING_DEV BIT(7) +/* + * Set by drivers that need to update internale upper and lower boundaries along + * with the target frequency and so the core and governors should also invoke + * the diver if the target frequency does not change, but the policy min or max + * may have changed. + */ +#define CPUFREQ_NEED_UPDATE_LIMITS BIT(8) + int cpufreq_register_driver(struct cpufreq_driver *driver_data); int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); +bool cpufreq_driver_test_flags(u16 flags); const char *cpufreq_get_current_driver(void); void *cpufreq_get_driver_data(void); @@ -561,12 +571,20 @@ struct cpufreq_governor { char *buf); int (*store_setspeed) (struct cpufreq_policy *policy, unsigned int freq); - /* For governors which change frequency dynamically by themselves */ - bool dynamic_switching; struct list_head governor_list; struct module *owner; + u8 flags; }; +/* Governor flags */ + +/* For governors which change frequency dynamically by themselves */ +#define CPUFREQ_GOV_DYNAMIC_SWITCHING BIT(0) + +/* For governors wanting the target frequency to be set exactly */ +#define CPUFREQ_GOV_STRICT_TARGET BIT(1) + + /* Pass a target to the cpufreq driver */ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, unsigned int target_freq); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index bc56287a1ed1..0042ef362511 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -152,6 +152,7 @@ enum cpuhp_state { CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, CPUHP_AP_ONLINE_IDLE, + CPUHP_AP_SCHED_WAIT_EMPTY, CPUHP_AP_SMPBOOT_THREADS, CPUHP_AP_X86_VDSO_VMA_ONLINE, CPUHP_AP_IRQ_AFFINITY_ONLINE, diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index f0d895d6ac39..383684e30f12 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -199,6 +199,11 @@ static inline int cpumask_any_and_distribute(const struct cpumask *src1p, return cpumask_next_and(-1, src1p, src2p); } +static inline int cpumask_any_distribute(const struct cpumask *srcp) +{ + return cpumask_first(srcp); +} + #define for_each_cpu(cpu, mask) \ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) #define for_each_cpu_not(cpu, mask) \ @@ -252,6 +257,7 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); unsigned int cpumask_local_spread(unsigned int i, int node); int cpumask_any_and_distribute(const struct cpumask *src1p, const struct cpumask *src2p); +int cpumask_any_distribute(const struct cpumask *srcp); /** * for_each_cpu - iterate over every cpu in a mask diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 851dd1f9a8a5..d6c4cc9ecc77 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -144,10 +144,9 @@ void debugfs_create_u32_array(const char *name, umode_t mode, struct dentry *parent, struct debugfs_u32_array *array); -struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name, - struct dentry *parent, - int (*read_fn)(struct seq_file *s, - void *data)); +void debugfs_create_devm_seqfile(struct device *dev, const char *name, + struct dentry *parent, + int (*read_fn)(struct seq_file *s, void *data)); bool debugfs_initialized(void); @@ -327,13 +326,12 @@ static inline void debugfs_create_u32_array(const char *name, umode_t mode, { } -static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev, - const char *name, - struct dentry *parent, - int (*read_fn)(struct seq_file *s, - void *data)) +static inline void debugfs_create_devm_seqfile(struct device *dev, + const char *name, + struct dentry *parent, + int (*read_fn)(struct seq_file *s, + void *data)) { - return ERR_PTR(-ENODEV); } static inline ssize_t debugfs_read_file_bool(struct file *file, diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index 121a2430d7f7..b6d3bae1c74d 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -15,8 +15,6 @@ #include <linux/pm_opp.h> #include <linux/pm_qos.h> -#define DEVFREQ_NAME_LEN 16 - /* DEVFREQ governor name */ #define DEVFREQ_GOV_SIMPLE_ONDEMAND "simple_ondemand" #define DEVFREQ_GOV_PERFORMANCE "performance" @@ -139,7 +137,6 @@ struct devfreq_stats { * using devfreq. * @profile: device-specific devfreq profile * @governor: method how to choose frequency based on the usage. - * @governor_name: devfreq governor name for use with this devfreq * @nb: notifier block used to notify devfreq object that it should * reevaluate operable frequencies. Devfreq users may use * devfreq.nb to the corresponding register notifier call chain. @@ -176,7 +173,6 @@ struct devfreq { struct device dev; struct devfreq_dev_profile *profile; const struct devfreq_governor *governor; - char governor_name[DEVFREQ_NAME_LEN]; struct notifier_block nb; struct delayed_work work; diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h index 9df2dfca68dd..14baa73fc2de 100644 --- a/include/linux/devfreq_cooling.h +++ b/include/linux/devfreq_cooling.h @@ -16,17 +16,6 @@ /** * struct devfreq_cooling_power - Devfreq cooling power ops - * @get_static_power: Take voltage, in mV, and return the static power - * in mW. If NULL, the static power is assumed - * to be 0. - * @get_dynamic_power: Take voltage, in mV, and frequency, in HZ, and - * return the dynamic power draw in mW. If NULL, - * a simple power model is used. - * @dyn_power_coeff: Coefficient for the simple dynamic power model in - * mW/(MHz mV mV). - * If get_dynamic_power() is NULL, then the - * dynamic power is calculated as - * @dyn_power_coeff * frequency * voltage^2 * @get_real_power: When this is set, the framework uses it to ask the * device driver for the actual power. * Some devices have more sophisticated methods @@ -46,14 +35,8 @@ * max total (static + dynamic) power value for each OPP. */ struct devfreq_cooling_power { - unsigned long (*get_static_power)(struct devfreq *devfreq, - unsigned long voltage); - unsigned long (*get_dynamic_power)(struct devfreq *devfreq, - unsigned long freq, - unsigned long voltage); int (*get_real_power)(struct devfreq *df, u32 *power, unsigned long freq, unsigned long voltage); - unsigned long dyn_power_coeff; }; #ifdef CONFIG_DEVFREQ_THERMAL @@ -65,6 +48,9 @@ struct thermal_cooling_device * of_devfreq_cooling_register(struct device_node *np, struct devfreq *df); struct thermal_cooling_device *devfreq_cooling_register(struct devfreq *df); void devfreq_cooling_unregister(struct thermal_cooling_device *dfc); +struct thermal_cooling_device * +devfreq_cooling_em_register(struct devfreq *df, + struct devfreq_cooling_power *dfc_power); #else /* !CONFIG_DEVFREQ_THERMAL */ @@ -87,6 +73,13 @@ devfreq_cooling_register(struct devfreq *df) return ERR_PTR(-EINVAL); } +static inline struct thermal_cooling_device * +devfreq_cooling_em_register(struct devfreq *df, + struct devfreq_cooling_power *dfc_power) +{ + return ERR_PTR(-EINVAL); +} + static inline void devfreq_cooling_unregister(struct thermal_cooling_device *dfc) { diff --git a/include/linux/device.h b/include/linux/device.h index 5ed101be7b2e..89bb8b84173e 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -351,19 +351,13 @@ enum dl_dev_state { * struct dev_links_info - Device data related to device links. * @suppliers: List of links to supplier devices. * @consumers: List of links to consumer devices. - * @needs_suppliers: Hook to global list of devices waiting for suppliers. - * @defer_hook: Hook to global list of devices that have deferred sync_state or - * deferred fw_devlink. - * @need_for_probe: If needs_suppliers is on a list, this indicates if the - * suppliers are needed for probe or not. + * @defer_sync: Hook to global list of devices that have deferred sync_state. * @status: Driver status information. */ struct dev_links_info { struct list_head suppliers; struct list_head consumers; - struct list_head needs_suppliers; - struct list_head defer_hook; - bool need_for_probe; + struct list_head defer_sync; enum dl_dev_state status; }; diff --git a/include/linux/device/class.h b/include/linux/device/class.h index e8d470c457d1..e61ec5502019 100644 --- a/include/linux/device/class.h +++ b/include/linux/device/class.h @@ -256,6 +256,20 @@ extern void class_destroy(struct class *cls); /* This is a #define to keep the compiler from merging different * instances of the __key variable */ + +/** + * class_create - create a struct class structure + * @owner: pointer to the module that is to "own" this struct class + * @name: pointer to a string for the name of this class. + * + * This is used to create a struct class pointer that can then be used + * in calls to device_create(). + * + * Returns &struct class pointer on success, or ERR_PTR() on error. + * + * Note, the pointer created here is to be destroyed when finished by + * making a call to class_destroy(). + */ #define class_create(owner, name) \ ({ \ static struct lock_class_key __key; \ diff --git a/include/linux/dma-buf-map.h b/include/linux/dma-buf-map.h new file mode 100644 index 000000000000..583a3a1f9447 --- /dev/null +++ b/include/linux/dma-buf-map.h @@ -0,0 +1,266 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Pointer to dma-buf-mapped memory, plus helpers. + */ + +#ifndef __DMA_BUF_MAP_H__ +#define __DMA_BUF_MAP_H__ + +#include <linux/io.h> +#include <linux/string.h> + +/** + * DOC: overview + * + * Calling dma-buf's vmap operation returns a pointer to the buffer's memory. + * Depending on the location of the buffer, users may have to access it with + * I/O operations or memory load/store operations. For example, copying to + * system memory could be done with memcpy(), copying to I/O memory would be + * done with memcpy_toio(). + * + * .. code-block:: c + * + * void *vaddr = ...; // pointer to system memory + * memcpy(vaddr, src, len); + * + * void *vaddr_iomem = ...; // pointer to I/O memory + * memcpy_toio(vaddr, _iomem, src, len); + * + * When using dma-buf's vmap operation, the returned pointer is encoded as + * :c:type:`struct dma_buf_map <dma_buf_map>`. + * :c:type:`struct dma_buf_map <dma_buf_map>` stores the buffer's address in + * system or I/O memory and a flag that signals the required method of + * accessing the buffer. Use the returned instance and the helper functions + * to access the buffer's memory in the correct way. + * + * The type :c:type:`struct dma_buf_map <dma_buf_map>` and its helpers are + * actually independent from the dma-buf infrastructure. When sharing buffers + * among devices, drivers have to know the location of the memory to access + * the buffers in a safe way. :c:type:`struct dma_buf_map <dma_buf_map>` + * solves this problem for dma-buf and its users. If other drivers or + * sub-systems require similar functionality, the type could be generalized + * and moved to a more prominent header file. + * + * Open-coding access to :c:type:`struct dma_buf_map <dma_buf_map>` is + * considered bad style. Rather then accessing its fields directly, use one + * of the provided helper functions, or implement your own. For example, + * instances of :c:type:`struct dma_buf_map <dma_buf_map>` can be initialized + * statically with DMA_BUF_MAP_INIT_VADDR(), or at runtime with + * dma_buf_map_set_vaddr(). These helpers will set an address in system memory. + * + * .. code-block:: c + * + * struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(0xdeadbeaf); + * + * dma_buf_map_set_vaddr(&map. 0xdeadbeaf); + * + * To set an address in I/O memory, use dma_buf_map_set_vaddr_iomem(). + * + * .. code-block:: c + * + * dma_buf_map_set_vaddr_iomem(&map. 0xdeadbeaf); + * + * Instances of struct dma_buf_map do not have to be cleaned up, but + * can be cleared to NULL with dma_buf_map_clear(). Cleared mappings + * always refer to system memory. + * + * .. code-block:: c + * + * dma_buf_map_clear(&map); + * + * Test if a mapping is valid with either dma_buf_map_is_set() or + * dma_buf_map_is_null(). + * + * .. code-block:: c + * + * if (dma_buf_map_is_set(&map) != dma_buf_map_is_null(&map)) + * // always true + * + * Instances of :c:type:`struct dma_buf_map <dma_buf_map>` can be compared + * for equality with dma_buf_map_is_equal(). Mappings the point to different + * memory spaces, system or I/O, are never equal. That's even true if both + * spaces are located in the same address space, both mappings contain the + * same address value, or both mappings refer to NULL. + * + * .. code-block:: c + * + * struct dma_buf_map sys_map; // refers to system memory + * struct dma_buf_map io_map; // refers to I/O memory + * + * if (dma_buf_map_is_equal(&sys_map, &io_map)) + * // always false + * + * A set up instance of struct dma_buf_map can be used to access or manipulate + * the buffer memory. Depending on the location of the memory, the provided + * helpers will pick the correct operations. Data can be copied into the memory + * with dma_buf_map_memcpy_to(). The address can be manipulated with + * dma_buf_map_incr(). + * + * .. code-block:: c + * + * const void *src = ...; // source buffer + * size_t len = ...; // length of src + * + * dma_buf_map_memcpy_to(&map, src, len); + * dma_buf_map_incr(&map, len); // go to first byte after the memcpy + */ + +/** + * struct dma_buf_map - Pointer to vmap'ed dma-buf memory. + * @vaddr_iomem: The buffer's address if in I/O memory + * @vaddr: The buffer's address if in system memory + * @is_iomem: True if the dma-buf memory is located in I/O + * memory, or false otherwise. + */ +struct dma_buf_map { + union { + void __iomem *vaddr_iomem; + void *vaddr; + }; + bool is_iomem; +}; + +/** + * DMA_BUF_MAP_INIT_VADDR - Initializes struct dma_buf_map to an address in system memory + * @vaddr: A system-memory address + */ +#define DMA_BUF_MAP_INIT_VADDR(vaddr_) \ + { \ + .vaddr = (vaddr_), \ + .is_iomem = false, \ + } + +/** + * dma_buf_map_set_vaddr - Sets a dma-buf mapping structure to an address in system memory + * @map: The dma-buf mapping structure + * @vaddr: A system-memory address + * + * Sets the address and clears the I/O-memory flag. + */ +static inline void dma_buf_map_set_vaddr(struct dma_buf_map *map, void *vaddr) +{ + map->vaddr = vaddr; + map->is_iomem = false; +} + +/** + * dma_buf_map_set_vaddr_iomem - Sets a dma-buf mapping structure to an address in I/O memory + * @map: The dma-buf mapping structure + * @vaddr_iomem: An I/O-memory address + * + * Sets the address and the I/O-memory flag. + */ +static inline void dma_buf_map_set_vaddr_iomem(struct dma_buf_map *map, + void __iomem *vaddr_iomem) +{ + map->vaddr_iomem = vaddr_iomem; + map->is_iomem = true; +} + +/** + * dma_buf_map_is_equal - Compares two dma-buf mapping structures for equality + * @lhs: The dma-buf mapping structure + * @rhs: A dma-buf mapping structure to compare with + * + * Two dma-buf mapping structures are equal if they both refer to the same type of memory + * and to the same address within that memory. + * + * Returns: + * True is both structures are equal, or false otherwise. + */ +static inline bool dma_buf_map_is_equal(const struct dma_buf_map *lhs, + const struct dma_buf_map *rhs) +{ + if (lhs->is_iomem != rhs->is_iomem) + return false; + else if (lhs->is_iomem) + return lhs->vaddr_iomem == rhs->vaddr_iomem; + else + return lhs->vaddr == rhs->vaddr; +} + +/** + * dma_buf_map_is_null - Tests for a dma-buf mapping to be NULL + * @map: The dma-buf mapping structure + * + * Depending on the state of struct dma_buf_map.is_iomem, tests if the + * mapping is NULL. + * + * Returns: + * True if the mapping is NULL, or false otherwise. + */ +static inline bool dma_buf_map_is_null(const struct dma_buf_map *map) +{ + if (map->is_iomem) + return !map->vaddr_iomem; + return !map->vaddr; +} + +/** + * dma_buf_map_is_set - Tests is the dma-buf mapping has been set + * @map: The dma-buf mapping structure + * + * Depending on the state of struct dma_buf_map.is_iomem, tests if the + * mapping has been set. + * + * Returns: + * True if the mapping is been set, or false otherwise. + */ +static inline bool dma_buf_map_is_set(const struct dma_buf_map *map) +{ + return !dma_buf_map_is_null(map); +} + +/** + * dma_buf_map_clear - Clears a dma-buf mapping structure + * @map: The dma-buf mapping structure + * + * Clears all fields to zero; including struct dma_buf_map.is_iomem. So + * mapping structures that were set to point to I/O memory are reset for + * system memory. Pointers are cleared to NULL. This is the default. + */ +static inline void dma_buf_map_clear(struct dma_buf_map *map) +{ + if (map->is_iomem) { + map->vaddr_iomem = NULL; + map->is_iomem = false; + } else { + map->vaddr = NULL; + } +} + +/** + * dma_buf_map_memcpy_to - Memcpy into dma-buf mapping + * @dst: The dma-buf mapping structure + * @src: The source buffer + * @len: The number of byte in src + * + * Copies data into a dma-buf mapping. The source buffer is in system + * memory. Depending on the buffer's location, the helper picks the correct + * method of accessing the memory. + */ +static inline void dma_buf_map_memcpy_to(struct dma_buf_map *dst, const void *src, size_t len) +{ + if (dst->is_iomem) + memcpy_toio(dst->vaddr_iomem, src, len); + else + memcpy(dst->vaddr, src, len); +} + +/** + * dma_buf_map_incr - Increments the address stored in a dma-buf mapping + * @map: The dma-buf mapping structure + * @incr: The number of bytes to increment + * + * Increments the address stored in a dma-buf mapping. Depending on the + * buffer's location, the correct value will be updated. + */ +static inline void dma_buf_map_incr(struct dma_buf_map *map, size_t incr) +{ + if (map->is_iomem) + map->vaddr_iomem += incr; + else + map->vaddr += incr; +} + +#endif /* __DMA_BUF_MAP_H__ */ diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 957b398d30e5..cf72699cb2bc 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -13,6 +13,7 @@ #ifndef __DMA_BUF_H__ #define __DMA_BUF_H__ +#include <linux/dma-buf-map.h> #include <linux/file.h> #include <linux/err.h> #include <linux/scatterlist.h> @@ -145,7 +146,8 @@ struct dma_buf_ops { * * A &sg_table scatter list of or the backing storage of the DMA buffer, * already mapped into the device address space of the &device attached - * with the provided &dma_buf_attachment. + * with the provided &dma_buf_attachment. The addresses and lengths in + * the scatter list are PAGE_SIZE aligned. * * On failure, returns a negative error value wrapped into a pointer. * May also return -EINTR when a signal was received while being @@ -265,13 +267,13 @@ struct dma_buf_ops { */ int (*mmap)(struct dma_buf *, struct vm_area_struct *vma); - void *(*vmap)(struct dma_buf *); - void (*vunmap)(struct dma_buf *, void *vaddr); + int (*vmap)(struct dma_buf *dmabuf, struct dma_buf_map *map); + void (*vunmap)(struct dma_buf *dmabuf, struct dma_buf_map *map); }; /** * struct dma_buf - shared buffer object - * @size: size of the buffer + * @size: size of the buffer; invariant over the lifetime of the buffer. * @file: file pointer used for sharing buffers across, and for refcounting. * @attachments: list of dma_buf_attachment that denotes all devices attached, * protected by dma_resv lock. @@ -309,7 +311,7 @@ struct dma_buf { const struct dma_buf_ops *ops; struct mutex lock; unsigned vmapping_counter; - void *vmap_ptr; + struct dma_buf_map vmap_ptr; const char *exp_name; const char *name; spinlock_t name_lock; @@ -403,7 +405,7 @@ struct dma_buf_attachment { * @exp_name: name of the exporter - useful for debugging. * @owner: pointer to exporter module - used for refcounting kernel module * @ops: Attach allocator-defined dma buf ops to the new buffer - * @size: Size of the buffer + * @size: Size of the buffer - invariant over the lifetime of the buffer * @flags: mode flags for the file * @resv: reservation-object, NULL to allocate default one * @priv: Attach private data of allocator to this buffer @@ -502,6 +504,6 @@ int dma_buf_end_cpu_access(struct dma_buf *dma_buf, int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, unsigned long); -void *dma_buf_vmap(struct dma_buf *); -void dma_buf_vunmap(struct dma_buf *, void *vaddr); +int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map); +void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map); #endif /* __DMA_BUF_H__ */ diff --git a/include/linux/dma/ti-cppi5.h b/include/linux/dma/ti-cppi5.h index 5896441ee604..efa2f0309f00 100644 --- a/include/linux/dma/ti-cppi5.h +++ b/include/linux/dma/ti-cppi5.h @@ -47,7 +47,7 @@ struct cppi5_host_desc_t { u32 buf_info1; u32 org_buf_len; u64 org_buf_ptr; - u32 epib[0]; + u32 epib[]; } __packed; #define CPPI5_DESC_MIN_ALIGN (16U) @@ -139,7 +139,7 @@ struct cppi5_desc_epib_t { */ struct cppi5_monolithic_desc_t { struct cppi5_desc_hdr_t hdr; - u32 epib[0]; + u32 epib[]; }; #define CPPI5_INFO2_MDESC_DATA_OFFSET_SHIFT (18U) diff --git a/include/linux/edac.h b/include/linux/edac.h index 15e8f3d8a895..76d3562d3006 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -175,11 +175,15 @@ static inline char *mc_event_error_type(const unsigned int err_type) * @MEM_RDDR3: Registered DDR3 RAM * This is a variant of the DDR3 memories. * @MEM_LRDDR3: Load-Reduced DDR3 memory. + * @MEM_LPDDR3: Low-Power DDR3 memory. * @MEM_DDR4: Unbuffered DDR4 RAM * @MEM_RDDR4: Registered DDR4 RAM * This is a variant of the DDR4 memories. * @MEM_LRDDR4: Load-Reduced DDR4 memory. + * @MEM_LPDDR4: Low-Power DDR4 memory. + * @MEM_DDR5: Unbuffered DDR5 RAM * @MEM_NVDIMM: Non-volatile RAM + * @MEM_WIO2: Wide I/O 2. */ enum mem_type { MEM_EMPTY = 0, @@ -200,10 +204,14 @@ enum mem_type { MEM_DDR3, MEM_RDDR3, MEM_LRDDR3, + MEM_LPDDR3, MEM_DDR4, MEM_RDDR4, MEM_LRDDR4, + MEM_LPDDR4, + MEM_DDR5, MEM_NVDIMM, + MEM_WIO2, }; #define MEM_FLAG_EMPTY BIT(MEM_EMPTY) @@ -223,13 +231,17 @@ enum mem_type { #define MEM_FLAG_XDR BIT(MEM_XDR) #define MEM_FLAG_DDR3 BIT(MEM_DDR3) #define MEM_FLAG_RDDR3 BIT(MEM_RDDR3) +#define MEM_FLAG_LPDDR3 BIT(MEM_LPDDR3) #define MEM_FLAG_DDR4 BIT(MEM_DDR4) #define MEM_FLAG_RDDR4 BIT(MEM_RDDR4) #define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4) +#define MEM_FLAG_LPDDR4 BIT(MEM_LPDDR4) +#define MEM_FLAG_DDR5 BIT(MEM_DDR5) #define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM) +#define MEM_FLAG_WIO2 BIT(MEM_WIO2) /** - * enum edac-type - Error Detection and Correction capabilities and mode + * enum edac_type - Error Detection and Correction capabilities and mode * @EDAC_UNKNOWN: Unknown if ECC is available * @EDAC_NONE: Doesn't support ECC * @EDAC_RESERVED: Reserved ECC type @@ -309,7 +321,7 @@ enum scrub_type { #define OP_OFFLINE 0x300 /** - * enum edac_mc_layer - memory controller hierarchy layer + * enum edac_mc_layer_type - memory controller hierarchy layer * * @EDAC_MC_LAYER_BRANCH: memory layer is named "branch" * @EDAC_MC_LAYER_CHANNEL: memory layer is named "channel" diff --git a/include/linux/elf.h b/include/linux/elf.h index 5d5b0321da0b..c9a46c4e183b 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h @@ -22,6 +22,16 @@ SET_PERSONALITY(ex) #endif +#ifndef START_THREAD +#define START_THREAD(elf_ex, regs, elf_entry, start_stack) \ + start_thread(regs, elf_entry, start_stack) +#endif + +#if defined(ARCH_HAS_SETUP_ADDITIONAL_PAGES) && !defined(ARCH_SETUP_ADDITIONAL_PAGES) +#define ARCH_SETUP_ADDITIONAL_PAGES(bprm, ex, interpreter) \ + arch_setup_additional_pages(bprm, interpreter) +#endif + #define ELF32_GNU_PROPERTY_ALIGN 4 #define ELF64_GNU_PROPERTY_ALIGN 8 diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h index 46c3d691f677..de51c1bef27d 100644 --- a/include/linux/elfcore.h +++ b/include/linux/elfcore.h @@ -104,6 +104,7 @@ static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_reg #endif } +#if defined(CONFIG_UM) || defined(CONFIG_IA64) /* * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out * extra segments containing the gate DSO contents. Dumping its @@ -118,5 +119,26 @@ elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset); extern int elf_core_write_extra_data(struct coredump_params *cprm); extern size_t elf_core_extra_data_size(void); +#else +static inline Elf_Half elf_core_extra_phdrs(void) +{ + return 0; +} + +static inline int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) +{ + return 1; +} + +static inline int elf_core_write_extra_data(struct coredump_params *cprm) +{ + return 1; +} + +static inline size_t elf_core_extra_data_size(void) +{ + return 0; +} +#endif #endif /* _LINUX_ELFCORE_H */ diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index b67a51c574b9..757fc60658fa 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -13,9 +13,8 @@ /** * em_perf_state - Performance state of a performance domain * @frequency: The frequency in KHz, for consistency with CPUFreq - * @power: The power consumed at this level, in milli-watts (by 1 CPU or - by a registered device). It can be a total power: static and - dynamic. + * @power: The power consumed at this level (by 1 CPU or by a registered + * device). It can be a total power: static and dynamic. * @cost: The cost coefficient associated with this level, used during * energy calculation. Equal to: power * max_frequency / frequency */ @@ -29,6 +28,8 @@ struct em_perf_state { * em_perf_domain - Performance domain * @table: List of performance states, in ascending order * @nr_perf_states: Number of performance states + * @milliwatts: Flag indicating the power values are in milli-Watts + * or some other scale. * @cpus: Cpumask covering the CPUs of the domain. It's here * for performance reasons to avoid potential cache * misses during energy calculations in the scheduler @@ -43,6 +44,7 @@ struct em_perf_state { struct em_perf_domain { struct em_perf_state *table; int nr_perf_states; + int milliwatts; unsigned long cpus[]; }; @@ -55,7 +57,7 @@ struct em_data_callback { /** * active_power() - Provide power at the next performance state of * a device - * @power : Active power at the performance state in mW + * @power : Active power at the performance state * (modified) * @freq : Frequency at the performance state in kHz * (modified) @@ -66,8 +68,8 @@ struct em_data_callback { * and frequency. * * In case of CPUs, the power is the one of a single CPU in the domain, - * expressed in milli-watts. It is expected to fit in the - * [0, EM_MAX_POWER] range. + * expressed in milli-Watts or an abstract scale. It is expected to + * fit in the [0, EM_MAX_POWER] range. * * Return 0 on success. */ @@ -79,7 +81,8 @@ struct em_data_callback { struct em_perf_domain *em_cpu_get(int cpu); struct em_perf_domain *em_pd_get(struct device *dev); int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, - struct em_data_callback *cb, cpumask_t *span); + struct em_data_callback *cb, cpumask_t *span, + bool milliwatts); void em_dev_unregister_perf_domain(struct device *dev); /** @@ -103,6 +106,9 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, struct em_perf_state *ps; int i, cpu; + if (!sum_util) + return 0; + /* * In order to predict the performance state, map the utilization of * the most utilized CPU of the performance domain to a requested @@ -186,7 +192,8 @@ struct em_data_callback {}; static inline int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, - struct em_data_callback *cb, cpumask_t *span) + struct em_data_callback *cb, cpumask_t *span, + bool milliwatts) { return -EINVAL; } diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index 474f29638d2c..7c581a4c3797 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -13,22 +13,6 @@ * Define dummy _TIF work flags if not defined by the architecture or for * disabled functionality. */ -#ifndef _TIF_SYSCALL_EMU -# define _TIF_SYSCALL_EMU (0) -#endif - -#ifndef _TIF_SYSCALL_TRACEPOINT -# define _TIF_SYSCALL_TRACEPOINT (0) -#endif - -#ifndef _TIF_SECCOMP -# define _TIF_SECCOMP (0) -#endif - -#ifndef _TIF_SYSCALL_AUDIT -# define _TIF_SYSCALL_AUDIT (0) -#endif - #ifndef _TIF_PATCH_PENDING # define _TIF_PATCH_PENDING (0) #endif @@ -37,28 +21,36 @@ # define _TIF_UPROBE (0) #endif +#ifndef _TIF_NOTIFY_SIGNAL +# define _TIF_NOTIFY_SIGNAL (0) +#endif + /* - * TIF flags handled in syscall_enter_from_user_mode() + * SYSCALL_WORK flags handled in syscall_enter_from_user_mode() */ -#ifndef ARCH_SYSCALL_ENTER_WORK -# define ARCH_SYSCALL_ENTER_WORK (0) +#ifndef ARCH_SYSCALL_WORK_ENTER +# define ARCH_SYSCALL_WORK_ENTER (0) #endif -#define SYSCALL_ENTER_WORK \ - (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ - _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_EMU | \ - ARCH_SYSCALL_ENTER_WORK) - /* - * TIF flags handled in syscall_exit_to_user_mode() + * SYSCALL_WORK flags handled in syscall_exit_to_user_mode() */ -#ifndef ARCH_SYSCALL_EXIT_WORK -# define ARCH_SYSCALL_EXIT_WORK (0) +#ifndef ARCH_SYSCALL_WORK_EXIT +# define ARCH_SYSCALL_WORK_EXIT (0) #endif -#define SYSCALL_EXIT_WORK \ - (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ - _TIF_SYSCALL_TRACEPOINT | ARCH_SYSCALL_EXIT_WORK) +#define SYSCALL_WORK_ENTER (SYSCALL_WORK_SECCOMP | \ + SYSCALL_WORK_SYSCALL_TRACEPOINT | \ + SYSCALL_WORK_SYSCALL_TRACE | \ + SYSCALL_WORK_SYSCALL_EMU | \ + SYSCALL_WORK_SYSCALL_AUDIT | \ + SYSCALL_WORK_SYSCALL_USER_DISPATCH | \ + ARCH_SYSCALL_WORK_ENTER) +#define SYSCALL_WORK_EXIT (SYSCALL_WORK_SYSCALL_TRACEPOINT | \ + SYSCALL_WORK_SYSCALL_TRACE | \ + SYSCALL_WORK_SYSCALL_AUDIT | \ + SYSCALL_WORK_SYSCALL_USER_DISPATCH | \ + ARCH_SYSCALL_WORK_EXIT) /* * TIF flags handled in exit_to_user_mode_loop() @@ -69,7 +61,7 @@ #define EXIT_TO_USER_MODE_WORK \ (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ - _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | \ + _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \ ARCH_EXIT_TO_USER_MODE_WORK) /** @@ -110,6 +102,27 @@ static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs #endif /** + * enter_from_user_mode - Establish state when coming from user mode + * + * Syscall/interrupt entry disables interrupts, but user mode is traced as + * interrupts enabled. Also with NO_HZ_FULL RCU might be idle. + * + * 1) Tell lockdep that interrupts are disabled + * 2) Invoke context tracking if enabled to reactivate RCU + * 3) Trace interrupts off state + * + * Invoked from architecture specific syscall entry code with interrupts + * disabled. The calling code has to be non-instrumentable. When the + * function returns all state is correct and interrupts are still + * disabled. The subsequent functions can be instrumented. + * + * This is invoked when there is architecture specific functionality to be + * done between establishing state and enabling interrupts. The caller must + * enable interrupts before invoking syscall_enter_from_user_mode_work(). + */ +void enter_from_user_mode(struct pt_regs *regs); + +/** * syscall_enter_from_user_mode_prepare - Establish state and enable interrupts * @regs: Pointer to currents pt_regs * @@ -118,7 +131,8 @@ static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs * function returns all state is correct, interrupts are enabled and the * subsequent functions can be instrumented. * - * This handles lockdep, RCU (context tracking) and tracing state. + * This handles lockdep, RCU (context tracking) and tracing state, i.e. + * the functionality provided by enter_from_user_mode(). * * This is invoked when there is extra architecture specific functionality * to be done between establishing state and handling user mode entry work. @@ -144,8 +158,8 @@ void syscall_enter_from_user_mode_prepare(struct pt_regs *regs); * * It handles the following work items: * - * 1) TIF flag dependent invocations of arch_syscall_enter_tracehook(), - * __secure_computing(), trace_sys_enter() + * 1) syscall_work flag dependent invocations of + * arch_syscall_enter_tracehook(), __secure_computing(), trace_sys_enter() * 2) Invocation of audit_syscall_entry() */ long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall); @@ -259,12 +273,13 @@ static __always_inline void arch_exit_to_user_mode(void) { } #endif /** - * arch_do_signal - Architecture specific signal delivery function + * arch_do_signal_or_restart - Architecture specific signal delivery function * @regs: Pointer to currents pt_regs + * @has_signal: actual signal to handle * * Invoked from exit_to_user_mode_loop(). */ -void arch_do_signal(struct pt_regs *regs); +void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal); /** * arch_syscall_exit_tracehook - Wrapper around tracehook_report_syscall_exit() @@ -286,6 +301,41 @@ static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step) #endif /** + * exit_to_user_mode - Fixup state when exiting to user mode + * + * Syscall/interrupt exit enables interrupts, but the kernel state is + * interrupts disabled when this is invoked. Also tell RCU about it. + * + * 1) Trace interrupts on state + * 2) Invoke context tracking if enabled to adjust RCU state + * 3) Invoke architecture specific last minute exit code, e.g. speculation + * mitigations, etc.: arch_exit_to_user_mode() + * 4) Tell lockdep that interrupts are enabled + * + * Invoked from architecture specific code when syscall_exit_to_user_mode() + * is not suitable as the last step before returning to userspace. Must be + * invoked with interrupts disabled and the caller must be + * non-instrumentable. + * The caller has to invoke syscall_exit_to_user_mode_work() before this. + */ +void exit_to_user_mode(void); + +/** + * syscall_exit_to_user_mode_work - Handle work before returning to user mode + * @regs: Pointer to currents pt_regs + * + * Same as step 1 and 2 of syscall_exit_to_user_mode() but without calling + * exit_to_user_mode() to perform the final transition to user mode. + * + * Calling convention is the same as for syscall_exit_to_user_mode() and it + * returns with all work handled and interrupts disabled. The caller must + * invoke exit_to_user_mode() before actually switching to user mode to + * make the final state transitions. Interrupts must stay disabled between + * return from this function and the invocation of exit_to_user_mode(). + */ +void syscall_exit_to_user_mode_work(struct pt_regs *regs); + +/** * syscall_exit_to_user_mode - Handle work before returning to user mode * @regs: Pointer to currents pt_regs * @@ -307,8 +357,12 @@ static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step) * - Architecture specific one time work arch_exit_to_user_mode_prepare() * - Address limit and lockdep checks * - * 3) Final transition (lockdep, tracing, context tracking, RCU). Invokes - * arch_exit_to_user_mode() to handle e.g. speculation mitigations + * 3) Final transition (lockdep, tracing, context tracking, RCU), i.e. the + * functionality in exit_to_user_mode(). + * + * This is a combination of syscall_exit_to_user_mode_work() (1,2) and + * exit_to_user_mode(). This function is preferred unless there is a + * compelling architectural reason to use the seperate functions. */ void syscall_exit_to_user_mode(struct pt_regs *regs); @@ -341,8 +395,26 @@ void irqentry_enter_from_user_mode(struct pt_regs *regs); void irqentry_exit_to_user_mode(struct pt_regs *regs); #ifndef irqentry_state +/** + * struct irqentry_state - Opaque object for exception state storage + * @exit_rcu: Used exclusively in the irqentry_*() calls; signals whether the + * exit path has to invoke rcu_irq_exit(). + * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that + * lockdep state is restored correctly on exit from nmi. + * + * This opaque object is filled in by the irqentry_*_enter() functions and + * must be passed back into the corresponding irqentry_*_exit() functions + * when the exception is complete. + * + * Callers of irqentry_*_[enter|exit]() must consider this structure opaque + * and all members private. Descriptions of the members are provided to aid in + * the maintenance of the irqentry_*() functions. + */ typedef struct irqentry_state { - bool exit_rcu; + union { + bool exit_rcu; + bool lockdep; + }; } irqentry_state_t; #endif @@ -392,7 +464,7 @@ void irqentry_exit_cond_resched(void); * @state: Return value from matching call to irqentry_enter() * * Depending on the return target (kernel/user) this runs the necessary - * preemption and work checks if possible and reguired and returns to + * preemption and work checks if possible and required and returns to * the caller with interrupts disabled and no further work pending. * * This is the last action before returning to the low level ASM code which @@ -402,4 +474,23 @@ void irqentry_exit_cond_resched(void); */ void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state); +/** + * irqentry_nmi_enter - Handle NMI entry + * @regs: Pointer to currents pt_regs + * + * Similar to irqentry_enter() but taking care of the NMI constraints. + */ +irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs); + +/** + * irqentry_nmi_exit - Handle return from NMI handling + * @regs: Pointer to pt_regs (NMI entry regs) + * @irq_state: Return value from matching call to irqentry_nmi_enter() + * + * Last action before returning to the low level assembly code. + * + * Counterpart to irqentry_nmi_enter(). + */ +void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state); + #endif diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h index 0cef17afb41a..9b93f8584ff7 100644 --- a/include/linux/entry-kvm.h +++ b/include/linux/entry-kvm.h @@ -11,8 +11,8 @@ # define ARCH_XFER_TO_GUEST_MODE_WORK (0) #endif -#define XFER_TO_GUEST_MODE_WORK \ - (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ +#define XFER_TO_GUEST_MODE_WORK \ + (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL | \ _TIF_NOTIFY_RESUME | ARCH_XFER_TO_GUEST_MODE_WORK) struct kvm_vcpu; diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 6408b446051f..e3da25b51ae4 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -215,6 +215,7 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, #define ETHTOOL_COALESCE_TX_USECS_HIGH BIT(19) #define ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH BIT(20) #define ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL BIT(21) +#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(21, 0) #define ETHTOOL_COALESCE_USECS \ (ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_USECS) diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h index 8f000fada5a4..0350393465d4 100644 --- a/include/linux/eventpoll.h +++ b/include/linux/eventpoll.h @@ -22,14 +22,6 @@ struct file; struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, unsigned long toff); #endif -/* Used to initialize the epoll bits inside the "struct file" */ -static inline void eventpoll_init_file(struct file *file) -{ - INIT_LIST_HEAD(&file->f_ep_links); - INIT_LIST_HEAD(&file->f_tfile_llink); -} - - /* Used to release the epoll bits inside the "struct file" */ void eventpoll_release_file(struct file *file); @@ -50,7 +42,7 @@ static inline void eventpoll_release(struct file *file) * because the file in on the way to be removed and nobody ( but * eventpoll ) has still a reference to this file. */ - if (likely(list_empty(&file->f_ep_links))) + if (likely(!file->f_ep)) return; /* @@ -72,7 +64,6 @@ static inline int ep_op_has_event(int op) #else -static inline void eventpoll_init_file(struct file *file) {} static inline void eventpoll_release(struct file *file) {} #endif diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index 3ceb72b67a7a..9f4d4bcbf251 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -213,12 +213,25 @@ struct export_operations { bool write, u32 *device_generation); int (*commit_blocks)(struct inode *inode, struct iomap *iomaps, int nr_iomaps, struct iattr *iattr); +#define EXPORT_OP_NOWCC (0x1) /* don't collect v3 wcc data */ +#define EXPORT_OP_NOSUBTREECHK (0x2) /* no subtree checking */ +#define EXPORT_OP_CLOSE_BEFORE_UNLINK (0x4) /* close files before unlink */ +#define EXPORT_OP_REMOTE_FS (0x8) /* Filesystem is remote */ +#define EXPORT_OP_NOATOMIC_ATTR (0x10) /* Filesystem cannot supply + atomic attribute updates + */ + unsigned long flags; }; extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid, int *max_len, struct inode *parent); extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid, int *max_len, int connectable); +extern struct dentry *exportfs_decode_fh_raw(struct vfsmount *mnt, + struct fid *fid, int fh_len, + int fileid_type, + int (*acceptable)(void *, struct dentry *), + void *context); extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid, int fh_len, int fileid_type, int (*acceptable)(void *, struct dentry *), void *context); diff --git a/include/linux/filter.h b/include/linux/filter.h index 72d62cbc1578..29c27656165b 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -21,7 +21,7 @@ #include <linux/if_vlan.h> #include <linux/vmalloc.h> #include <linux/sockptr.h> -#include <crypto/sha.h> +#include <crypto/sha1.h> #include <net/sch_generic.h> @@ -558,21 +558,21 @@ struct sk_filter { DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key); #define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \ - u32 ret; \ + u32 __ret; \ cant_migrate(); \ if (static_branch_unlikely(&bpf_stats_enabled_key)) { \ - struct bpf_prog_stats *stats; \ - u64 start = sched_clock(); \ - ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \ - stats = this_cpu_ptr(prog->aux->stats); \ - u64_stats_update_begin(&stats->syncp); \ - stats->cnt++; \ - stats->nsecs += sched_clock() - start; \ - u64_stats_update_end(&stats->syncp); \ + struct bpf_prog_stats *__stats; \ + u64 __start = sched_clock(); \ + __ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \ + __stats = this_cpu_ptr(prog->aux->stats); \ + u64_stats_update_begin(&__stats->syncp); \ + __stats->cnt++; \ + __stats->nsecs += sched_clock() - __start; \ + u64_stats_update_end(&__stats->syncp); \ } else { \ - ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \ + __ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \ } \ - ret; }) + __ret; }) #define BPF_PROG_RUN(prog, ctx) \ __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func) diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 5968df82b991..41a1bab98b7e 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -50,10 +50,6 @@ #define ZYNQMP_PM_CAPABILITY_WAKEUP 0x4U #define ZYNQMP_PM_CAPABILITY_UNUSABLE 0x8U -/* Feature check status */ -#define PM_FEATURE_INVALID -1 -#define PM_FEATURE_UNCHECKED 0 - /* * Firmware FPGA Manager flags * XILINX_ZYNQMP_PM_FPGA_FULL: FPGA full reconfiguration diff --git a/include/linux/font.h b/include/linux/font.h index b5b312c19e46..abf1442ce719 100644 --- a/include/linux/font.h +++ b/include/linux/font.h @@ -16,7 +16,8 @@ struct font_desc { int idx; const char *name; - int width, height; + unsigned int width, height; + unsigned int charcount; const void *data; int pref; }; diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h index e8ca62b2cb5b..2bc3030a69e5 100644 --- a/include/linux/fpga/fpga-mgr.h +++ b/include/linux/fpga/fpga-mgr.h @@ -198,6 +198,8 @@ void fpga_mgr_free(struct fpga_manager *mgr); int fpga_mgr_register(struct fpga_manager *mgr); void fpga_mgr_unregister(struct fpga_manager *mgr); +int devm_fpga_mgr_register(struct device *dev, struct fpga_manager *mgr); + struct fpga_manager *devm_fpga_mgr_create(struct device *dev, const char *name, const struct fpga_manager_ops *mops, void *priv); diff --git a/include/linux/freelist.h b/include/linux/freelist.h new file mode 100644 index 000000000000..fc1842b96469 --- /dev/null +++ b/include/linux/freelist.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */ +#ifndef FREELIST_H +#define FREELIST_H + +#include <linux/atomic.h> + +/* + * Copyright: cameron@moodycamel.com + * + * A simple CAS-based lock-free free list. Not the fastest thing in the world + * under heavy contention, but simple and correct (assuming nodes are never + * freed until after the free list is destroyed), and fairly speedy under low + * contention. + * + * Adapted from: https://moodycamel.com/blog/2014/solving-the-aba-problem-for-lock-free-free-lists + */ + +struct freelist_node { + atomic_t refs; + struct freelist_node *next; +}; + +struct freelist_head { + struct freelist_node *head; +}; + +#define REFS_ON_FREELIST 0x80000000 +#define REFS_MASK 0x7FFFFFFF + +static inline void __freelist_add(struct freelist_node *node, struct freelist_head *list) +{ + /* + * Since the refcount is zero, and nobody can increase it once it's + * zero (except us, and we run only one copy of this method per node at + * a time, i.e. the single thread case), then we know we can safely + * change the next pointer of the node; however, once the refcount is + * back above zero, then other threads could increase it (happens under + * heavy contention, when the refcount goes to zero in between a load + * and a refcount increment of a node in try_get, then back up to + * something non-zero, then the refcount increment is done by the other + * thread) -- so if the CAS to add the node to the actual list fails, + * decrese the refcount and leave the add operation to the next thread + * who puts the refcount back to zero (which could be us, hence the + * loop). + */ + struct freelist_node *head = READ_ONCE(list->head); + + for (;;) { + WRITE_ONCE(node->next, head); + atomic_set_release(&node->refs, 1); + + if (!try_cmpxchg_release(&list->head, &head, node)) { + /* + * Hmm, the add failed, but we can only try again when + * the refcount goes back to zero. + */ + if (atomic_fetch_add_release(REFS_ON_FREELIST - 1, &node->refs) == 1) + continue; + } + return; + } +} + +static inline void freelist_add(struct freelist_node *node, struct freelist_head *list) +{ + /* + * We know that the should-be-on-freelist bit is 0 at this point, so + * it's safe to set it using a fetch_add. + */ + if (!atomic_fetch_add_release(REFS_ON_FREELIST, &node->refs)) { + /* + * Oh look! We were the last ones referencing this node, and we + * know we want to add it to the free list, so let's do it! + */ + __freelist_add(node, list); + } +} + +static inline struct freelist_node *freelist_try_get(struct freelist_head *list) +{ + struct freelist_node *prev, *next, *head = smp_load_acquire(&list->head); + unsigned int refs; + + while (head) { + prev = head; + refs = atomic_read(&head->refs); + if ((refs & REFS_MASK) == 0 || + !atomic_try_cmpxchg_acquire(&head->refs, &refs, refs+1)) { + head = smp_load_acquire(&list->head); + continue; + } + + /* + * Good, reference count has been incremented (it wasn't at + * zero), which means we can read the next and not worry about + * it changing between now and the time we do the CAS. + */ + next = READ_ONCE(head->next); + if (try_cmpxchg_acquire(&list->head, &head, next)) { + /* + * Yay, got the node. This means it was on the list, + * which means should-be-on-freelist must be false no + * matter the refcount (because nobody else knows it's + * been taken off yet, it can't have been put back on). + */ + WARN_ON_ONCE(atomic_read(&head->refs) & REFS_ON_FREELIST); + + /* + * Decrease refcount twice, once for our ref, and once + * for the list's ref. + */ + atomic_fetch_add(-2, &head->refs); + + return head; + } + + /* + * OK, the head must have changed on us, but we still need to decrement + * the refcount we increased. + */ + refs = atomic_fetch_add(-1, &prev->refs); + if (refs == REFS_ON_FREELIST + 1) + __freelist_add(prev, list); + } + + return NULL; +} + +#endif /* FREELIST_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 0bd126418bb6..afb42d1bd64d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -923,7 +923,7 @@ struct file { const struct file_operations *f_op; /* - * Protects f_ep_links, f_flags. + * Protects f_ep, f_flags. * Must not be taken from IRQ context. */ spinlock_t f_lock; @@ -946,8 +946,7 @@ struct file { #ifdef CONFIG_EPOLL /* Used by fs/eventpoll.c to link all the hooks to this file */ - struct list_head f_ep_links; - struct list_head f_tfile_llink; + struct hlist_head *f_ep; #endif /* #ifdef CONFIG_EPOLL */ struct address_space *f_mapping; errseq_t f_wb_err; @@ -1580,8 +1579,24 @@ extern struct timespec64 current_time(struct inode *inode); * Snapshotting support. */ -void __sb_end_write(struct super_block *sb, int level); -int __sb_start_write(struct super_block *sb, int level, bool wait); +/* + * These are internal functions, please use sb_start_{write,pagefault,intwrite} + * instead. + */ +static inline void __sb_end_write(struct super_block *sb, int level) +{ + percpu_up_read(sb->s_writers.rw_sem + level-1); +} + +static inline void __sb_start_write(struct super_block *sb, int level) +{ + percpu_down_read(sb->s_writers.rw_sem + level - 1); +} + +static inline bool __sb_start_write_trylock(struct super_block *sb, int level) +{ + return percpu_down_read_trylock(sb->s_writers.rw_sem + level - 1); +} #define __sb_writers_acquired(sb, lev) \ percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_) @@ -1645,12 +1660,12 @@ static inline void sb_end_intwrite(struct super_block *sb) */ static inline void sb_start_write(struct super_block *sb) { - __sb_start_write(sb, SB_FREEZE_WRITE, true); + __sb_start_write(sb, SB_FREEZE_WRITE); } -static inline int sb_start_write_trylock(struct super_block *sb) +static inline bool sb_start_write_trylock(struct super_block *sb) { - return __sb_start_write(sb, SB_FREEZE_WRITE, false); + return __sb_start_write_trylock(sb, SB_FREEZE_WRITE); } /** @@ -1674,7 +1689,7 @@ static inline int sb_start_write_trylock(struct super_block *sb) */ static inline void sb_start_pagefault(struct super_block *sb) { - __sb_start_write(sb, SB_FREEZE_PAGEFAULT, true); + __sb_start_write(sb, SB_FREEZE_PAGEFAULT); } /* @@ -1692,12 +1707,12 @@ static inline void sb_start_pagefault(struct super_block *sb) */ static inline void sb_start_intwrite(struct super_block *sb) { - __sb_start_write(sb, SB_FREEZE_FS, true); + __sb_start_write(sb, SB_FREEZE_FS); } -static inline int sb_start_intwrite_trylock(struct super_block *sb) +static inline bool sb_start_intwrite_trylock(struct super_block *sb) { - return __sb_start_write(sb, SB_FREEZE_FS, false); + return __sb_start_write_trylock(sb, SB_FREEZE_FS); } @@ -2756,14 +2771,14 @@ static inline void file_start_write(struct file *file) { if (!S_ISREG(file_inode(file)->i_mode)) return; - __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, true); + sb_start_write(file_inode(file)->i_sb); } static inline bool file_start_write_trylock(struct file *file) { if (!S_ISREG(file_inode(file)->i_mode)) return true; - return __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, false); + return sb_start_write_trylock(file_inode(file)->i_sb); } static inline void file_end_write(struct file *file) @@ -3214,7 +3229,7 @@ static inline bool vma_is_fsdax(struct vm_area_struct *vma) { struct inode *inode; - if (!vma->vm_file) + if (!IS_ENABLED(CONFIG_FS_DAX) || !vma->vm_file) return false; if (!vma_is_dax(vma)) return false; @@ -3285,7 +3300,7 @@ static inline ino_t parent_ino(struct dentry *dentry) */ struct simple_transaction_argresp { ssize_t size; - char data[0]; + char data[]; }; #define SIMPLE_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct simple_transaction_argresp)) diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index a8f7a43f031b..d23156d1ac94 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -75,7 +75,7 @@ struct fscrypt_operations { static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode) { /* - * Pairs with the cmpxchg_release() in fscrypt_get_encryption_info(). + * Pairs with the cmpxchg_release() in fscrypt_setup_encryption_info(). * I.e., another task may publish ->i_crypt_info concurrently, executing * a RELEASE barrier. We need to use smp_load_acquire() here to safely * ACQUIRE the memory the other task published. @@ -111,6 +111,35 @@ static inline void fscrypt_handle_d_move(struct dentry *dentry) dentry->d_flags &= ~DCACHE_NOKEY_NAME; } +/** + * fscrypt_is_nokey_name() - test whether a dentry is a no-key name + * @dentry: the dentry to check + * + * This returns true if the dentry is a no-key dentry. A no-key dentry is a + * dentry that was created in an encrypted directory that hasn't had its + * encryption key added yet. Such dentries may be either positive or negative. + * + * When a filesystem is asked to create a new filename in an encrypted directory + * and the new filename's dentry is a no-key dentry, it must fail the operation + * with ENOKEY. This includes ->create(), ->mkdir(), ->mknod(), ->symlink(), + * ->rename(), and ->link(). (However, ->rename() and ->link() are already + * handled by fscrypt_prepare_rename() and fscrypt_prepare_link().) + * + * This is necessary because creating a filename requires the directory's + * encryption key, but just checking for the key on the directory inode during + * the final filesystem operation doesn't guarantee that the key was available + * during the preceding dentry lookup. And the key must have already been + * available during the dentry lookup in order for it to have been checked + * whether the filename already exists in the directory and for the new file's + * dentry not to be invalidated due to it incorrectly having the no-key flag. + * + * Return: %true if the dentry is a no-key name + */ +static inline bool fscrypt_is_nokey_name(const struct dentry *dentry) +{ + return dentry->d_flags & DCACHE_NOKEY_NAME; +} + /* crypto.c */ void fscrypt_enqueue_decrypt_work(struct work_struct *); @@ -171,7 +200,6 @@ int fscrypt_ioctl_remove_key_all_users(struct file *filp, void __user *arg); int fscrypt_ioctl_get_key_status(struct file *filp, void __user *arg); /* keysetup.c */ -int fscrypt_get_encryption_info(struct inode *inode); int fscrypt_prepare_new_inode(struct inode *dir, struct inode *inode, bool *encrypt_ret); void fscrypt_put_encryption_info(struct inode *inode); @@ -213,6 +241,8 @@ int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry, unsigned int flags); int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry, struct fscrypt_name *fname); +int __fscrypt_prepare_readdir(struct inode *dir); +int __fscrypt_prepare_setattr(struct dentry *dentry, struct iattr *attr); int fscrypt_prepare_setflags(struct inode *inode, unsigned int oldflags, unsigned int flags); int fscrypt_prepare_symlink(struct inode *dir, const char *target, @@ -244,6 +274,11 @@ static inline void fscrypt_handle_d_move(struct dentry *dentry) { } +static inline bool fscrypt_is_nokey_name(const struct dentry *dentry) +{ + return false; +} + /* crypto.c */ static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work) { @@ -372,10 +407,6 @@ static inline int fscrypt_ioctl_get_key_status(struct file *filp, } /* keysetup.c */ -static inline int fscrypt_get_encryption_info(struct inode *inode) -{ - return -EOPNOTSUPP; -} static inline int fscrypt_prepare_new_inode(struct inode *dir, struct inode *inode, @@ -503,6 +534,17 @@ static inline int __fscrypt_prepare_lookup(struct inode *dir, return -EOPNOTSUPP; } +static inline int __fscrypt_prepare_readdir(struct inode *dir) +{ + return -EOPNOTSUPP; +} + +static inline int __fscrypt_prepare_setattr(struct dentry *dentry, + struct iattr *attr) +{ + return -EOPNOTSUPP; +} + static inline int fscrypt_prepare_setflags(struct inode *inode, unsigned int oldflags, unsigned int flags) @@ -642,32 +684,6 @@ static inline bool fscrypt_has_encryption_key(const struct inode *inode) } /** - * fscrypt_require_key() - require an inode's encryption key - * @inode: the inode we need the key for - * - * If the inode is encrypted, set up its encryption key if not already done. - * Then require that the key be present and return -ENOKEY otherwise. - * - * No locks are needed, and the key will live as long as the struct inode --- so - * it won't go away from under you. - * - * Return: 0 on success, -ENOKEY if the key is missing, or another -errno code - * if a problem occurred while setting up the encryption key. - */ -static inline int fscrypt_require_key(struct inode *inode) -{ - if (IS_ENCRYPTED(inode)) { - int err = fscrypt_get_encryption_info(inode); - - if (err) - return err; - if (!fscrypt_has_encryption_key(inode)) - return -ENOKEY; - } - return 0; -} - -/** * fscrypt_prepare_link() - prepare to link an inode into a possibly-encrypted * directory * @old_dentry: an existing dentry for the inode being linked @@ -676,8 +692,7 @@ static inline int fscrypt_require_key(struct inode *inode) * * A new link can only be added to an encrypted directory if the directory's * encryption key is available --- since otherwise we'd have no way to encrypt - * the filename. Therefore, we first set up the directory's encryption key (if - * not already done) and return an error if it's unavailable. + * the filename. * * We also verify that the link will not violate the constraint that all files * in an encrypted directory tree use the same encryption policy. @@ -738,8 +753,9 @@ static inline int fscrypt_prepare_rename(struct inode *old_dir, * * Prepare for ->lookup() in a directory which may be encrypted by determining * the name that will actually be used to search the directory on-disk. If the - * directory's encryption key is available, then the lookup is assumed to be by - * plaintext name; otherwise, it is assumed to be by no-key name. + * directory's encryption policy is supported by this kernel and its encryption + * key is available, then the lookup is assumed to be by plaintext name; + * otherwise, it is assumed to be by no-key name. * * This also installs a custom ->d_revalidate() method which will invalidate the * dentry if it was created without the key and the key is later added. @@ -763,6 +779,26 @@ static inline int fscrypt_prepare_lookup(struct inode *dir, } /** + * fscrypt_prepare_readdir() - prepare to read a possibly-encrypted directory + * @dir: the directory inode + * + * If the directory is encrypted and it doesn't already have its encryption key + * set up, try to set it up so that the filenames will be listed in plaintext + * form rather than in no-key form. + * + * Return: 0 on success; -errno on error. Note that the encryption key being + * unavailable is not considered an error. It is also not an error if + * the encryption policy is unsupported by this kernel; that is treated + * like the key being unavailable, so that files can still be deleted. + */ +static inline int fscrypt_prepare_readdir(struct inode *dir) +{ + if (IS_ENCRYPTED(dir)) + return __fscrypt_prepare_readdir(dir); + return 0; +} + +/** * fscrypt_prepare_setattr() - prepare to change a possibly-encrypted inode's * attributes * @dentry: dentry through which the inode is being changed @@ -783,8 +819,8 @@ static inline int fscrypt_prepare_lookup(struct inode *dir, static inline int fscrypt_prepare_setattr(struct dentry *dentry, struct iattr *attr) { - if (attr->ia_valid & ATTR_SIZE) - return fscrypt_require_key(d_inode(dentry)); + if (IS_ENCRYPTED(d_inode(dentry))) + return __fscrypt_prepare_setattr(dentry, attr); return 0; } diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index 9506f8ec0974..fde4ad97564c 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -10,14 +10,32 @@ #define _LINUX_FWNODE_H_ #include <linux/types.h> +#include <linux/list.h> struct fwnode_operations; struct device; +/* + * fwnode link flags + * + * LINKS_ADDED: The fwnode has already be parsed to add fwnode links. + */ +#define FWNODE_FLAG_LINKS_ADDED BIT(0) + struct fwnode_handle { struct fwnode_handle *secondary; const struct fwnode_operations *ops; struct device *dev; + struct list_head suppliers; + struct list_head consumers; + u8 flags; +}; + +struct fwnode_link { + struct fwnode_handle *supplier; + struct list_head s_hook; + struct fwnode_handle *consumer; + struct list_head c_hook; }; /** @@ -68,44 +86,8 @@ struct fwnode_reference_args { * endpoint node. * @graph_get_port_parent: Return the parent node of a port node. * @graph_parse_endpoint: Parse endpoint for port and endpoint id. - * @add_links: Called after the device corresponding to the fwnode is added - * using device_add(). The function is expected to create device - * links to all the suppliers of the device that are available at - * the time this function is called. The function must NOT stop - * at the first failed device link if other unlinked supplier - * devices are present in the system. This is necessary for the - * driver/bus sync_state() callbacks to work correctly. - * - * For example, say Device-C depends on suppliers Device-S1 and - * Device-S2 and the dependency is listed in that order in the - * firmware. Say, S1 gets populated from the firmware after - * late_initcall_sync(). Say S2 is populated and probed way - * before that in device_initcall(). When C is populated, if this - * add_links() function doesn't continue past a "failed linking to - * S1" and continue linking C to S2, then S2 will get a - * sync_state() callback before C is probed. This is because from - * the perspective of S2, C was never a consumer when its - * sync_state() evaluation is done. To avoid this, the add_links() - * function has to go through all available suppliers of the - * device (that corresponds to this fwnode) and link to them - * before returning. - * - * If some suppliers are not yet available (indicated by an error - * return value), this function will be called again when other - * devices are added to allow creating device links to any newly - * available suppliers. - * - * Return 0 if device links have been successfully created to all - * the known suppliers of this device or if the supplier - * information is not known. - * - * Return -ENODEV if the suppliers needed for probing this device - * have not been registered yet (because device links can only be - * created to devices registered with the driver core). - * - * Return -EAGAIN if some of the suppliers of this device have not - * been registered yet, but none of those suppliers are necessary - * for probing the device. + * @add_links: Create fwnode links to all the suppliers of the fwnode. Return + * zero on success, a negative error code otherwise. */ struct fwnode_operations { struct fwnode_handle *(*get)(struct fwnode_handle *fwnode); @@ -145,8 +127,7 @@ struct fwnode_operations { (*graph_get_port_parent)(struct fwnode_handle *fwnode); int (*graph_parse_endpoint)(const struct fwnode_handle *fwnode, struct fwnode_endpoint *endpoint); - int (*add_links)(const struct fwnode_handle *fwnode, - struct device *dev); + int (*add_links)(struct fwnode_handle *fwnode); }; #define fwnode_has_op(fwnode, op) \ @@ -170,8 +151,16 @@ struct fwnode_operations { } while (false) #define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev) +static inline void fwnode_init(struct fwnode_handle *fwnode, + const struct fwnode_operations *ops) +{ + fwnode->ops = ops; + INIT_LIST_HEAD(&fwnode->consumers); + INIT_LIST_HEAD(&fwnode->suppliers); +} + extern u32 fw_devlink_get_flags(void); -void fw_devlink_pause(void); -void fw_devlink_resume(void); +int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup); +void fwnode_links_purge(struct fwnode_handle *fwnode); #endif diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 38f23d757013..03da3f603d30 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -315,7 +315,7 @@ static inline int get_disk_ro(struct gendisk *disk) extern void disk_block_events(struct gendisk *disk); extern void disk_unblock_events(struct gendisk *disk); extern void disk_flush_events(struct gendisk *disk, unsigned int mask); -void set_capacity_revalidate_and_notify(struct gendisk *disk, sector_t size, +bool set_capacity_revalidate_and_notify(struct gendisk *disk, sector_t size, bool update_bdev); /* drivers/char/random.c */ diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h index eeae59d3ceb7..35d21fddaf2d 100644 --- a/include/linux/genl_magic_struct.h +++ b/include/linux/genl_magic_struct.h @@ -89,7 +89,7 @@ static inline int nla_put_u64_0pad(struct sk_buff *skb, int attrtype, u64 value) nla_get_u64, nla_put_u64_0pad, false) #define __str_field(attr_nr, attr_flag, name, maxlen) \ __array(attr_nr, attr_flag, name, NLA_NUL_STRING, char, maxlen, \ - nla_strlcpy, nla_put, false) + nla_strscpy, nla_put, false) #define __bin_field(attr_nr, attr_flag, name, maxlen) \ __array(attr_nr, attr_flag, name, NLA_BINARY, char, maxlen, \ nla_memcpy, nla_put, false) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index c603237e006c..6e479e9c48ce 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -580,8 +580,6 @@ void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); extern void __free_pages(struct page *page, unsigned int order); extern void free_pages(unsigned long addr, unsigned int order); -extern void free_unref_page(struct page *page); -extern void free_unref_page_list(struct list_head *list); struct page_frag_cache; extern void __page_frag_cache_drain(struct page *page, unsigned int count); diff --git a/include/linux/goldfish.h b/include/linux/goldfish.h index 265a099cd3b8..12be1601fd84 100644 --- a/include/linux/goldfish.h +++ b/include/linux/goldfish.h @@ -13,9 +13,9 @@ static inline void gf_write_ptr(const void *ptr, void __iomem *portl, { const unsigned long addr = (unsigned long)ptr; - writel(lower_32_bits(addr), portl); + __raw_writel(lower_32_bits(addr), portl); #ifdef CONFIG_64BIT - writel(upper_32_bits(addr), porth); + __raw_writel(upper_32_bits(addr), porth); #endif } @@ -23,9 +23,9 @@ static inline void gf_write_dma_addr(const dma_addr_t addr, void __iomem *portl, void __iomem *porth) { - writel(lower_32_bits(addr), portl); + __raw_writel(lower_32_bits(addr), portl); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - writel(upper_32_bits(addr), porth); + __raw_writel(upper_32_bits(addr), porth); #endif } diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 754f67ac4326..7c9d6a2d7e90 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -32,9 +32,9 @@ static __always_inline void rcu_irq_enter_check_tick(void) */ #define __irq_enter() \ do { \ - account_irq_enter_time(current); \ preempt_count_add(HARDIRQ_OFFSET); \ lockdep_hardirq_enter(); \ + account_hardirq_enter(current); \ } while (0) /* @@ -62,8 +62,8 @@ void irq_enter_rcu(void); */ #define __irq_exit() \ do { \ + account_hardirq_exit(current); \ lockdep_hardirq_exit(); \ - account_irq_exit_time(current); \ preempt_count_sub(HARDIRQ_OFFSET); \ } while (0) diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h new file mode 100644 index 000000000000..1bbe96dc8be6 --- /dev/null +++ b/include/linux/highmem-internal.h @@ -0,0 +1,232 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_HIGHMEM_INTERNAL_H +#define _LINUX_HIGHMEM_INTERNAL_H + +/* + * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft. + */ +#ifdef CONFIG_KMAP_LOCAL +void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot); +void *__kmap_local_page_prot(struct page *page, pgprot_t prot); +void kunmap_local_indexed(void *vaddr); +void kmap_local_fork(struct task_struct *tsk); +void __kmap_local_sched_out(void); +void __kmap_local_sched_in(void); +static inline void kmap_assert_nomap(void) +{ + DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx); +} +#else +static inline void kmap_local_fork(struct task_struct *tsk) { } +static inline void kmap_assert_nomap(void) { } +#endif + +#ifdef CONFIG_HIGHMEM +#include <asm/highmem.h> + +#ifndef ARCH_HAS_KMAP_FLUSH_TLB +static inline void kmap_flush_tlb(unsigned long addr) { } +#endif + +#ifndef kmap_prot +#define kmap_prot PAGE_KERNEL +#endif + +void *kmap_high(struct page *page); +void kunmap_high(struct page *page); +void __kmap_flush_unused(void); +struct page *__kmap_to_page(void *addr); + +static inline void *kmap(struct page *page) +{ + void *addr; + + might_sleep(); + if (!PageHighMem(page)) + addr = page_address(page); + else + addr = kmap_high(page); + kmap_flush_tlb((unsigned long)addr); + return addr; +} + +static inline void kunmap(struct page *page) +{ + might_sleep(); + if (!PageHighMem(page)) + return; + kunmap_high(page); +} + +static inline struct page *kmap_to_page(void *addr) +{ + return __kmap_to_page(addr); +} + +static inline void kmap_flush_unused(void) +{ + __kmap_flush_unused(); +} + +static inline void *kmap_local_page(struct page *page) +{ + return __kmap_local_page_prot(page, kmap_prot); +} + +static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot) +{ + return __kmap_local_page_prot(page, prot); +} + +static inline void *kmap_local_pfn(unsigned long pfn) +{ + return __kmap_local_pfn_prot(pfn, kmap_prot); +} + +static inline void __kunmap_local(void *vaddr) +{ + kunmap_local_indexed(vaddr); +} + +static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) +{ + preempt_disable(); + pagefault_disable(); + return __kmap_local_page_prot(page, prot); +} + +static inline void *kmap_atomic(struct page *page) +{ + return kmap_atomic_prot(page, kmap_prot); +} + +static inline void *kmap_atomic_pfn(unsigned long pfn) +{ + preempt_disable(); + pagefault_disable(); + return __kmap_local_pfn_prot(pfn, kmap_prot); +} + +static inline void __kunmap_atomic(void *addr) +{ + kunmap_local_indexed(addr); + pagefault_enable(); + preempt_enable(); +} + +unsigned int __nr_free_highpages(void); +extern atomic_long_t _totalhigh_pages; + +static inline unsigned int nr_free_highpages(void) +{ + return __nr_free_highpages(); +} + +static inline unsigned long totalhigh_pages(void) +{ + return (unsigned long)atomic_long_read(&_totalhigh_pages); +} + +static inline void totalhigh_pages_inc(void) +{ + atomic_long_inc(&_totalhigh_pages); +} + +static inline void totalhigh_pages_add(long count) +{ + atomic_long_add(count, &_totalhigh_pages); +} + +#else /* CONFIG_HIGHMEM */ + +static inline struct page *kmap_to_page(void *addr) +{ + return virt_to_page(addr); +} + +static inline void *kmap(struct page *page) +{ + might_sleep(); + return page_address(page); +} + +static inline void kunmap_high(struct page *page) { } +static inline void kmap_flush_unused(void) { } + +static inline void kunmap(struct page *page) +{ +#ifdef ARCH_HAS_FLUSH_ON_KUNMAP + kunmap_flush_on_unmap(page_address(page)); +#endif +} + +static inline void *kmap_local_page(struct page *page) +{ + return page_address(page); +} + +static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot) +{ + return kmap_local_page(page); +} + +static inline void *kmap_local_pfn(unsigned long pfn) +{ + return kmap_local_page(pfn_to_page(pfn)); +} + +static inline void __kunmap_local(void *addr) +{ +#ifdef ARCH_HAS_FLUSH_ON_KUNMAP + kunmap_flush_on_unmap(addr); +#endif +} + +static inline void *kmap_atomic(struct page *page) +{ + preempt_disable(); + pagefault_disable(); + return page_address(page); +} + +static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) +{ + return kmap_atomic(page); +} + +static inline void *kmap_atomic_pfn(unsigned long pfn) +{ + return kmap_atomic(pfn_to_page(pfn)); +} + +static inline void __kunmap_atomic(void *addr) +{ +#ifdef ARCH_HAS_FLUSH_ON_KUNMAP + kunmap_flush_on_unmap(addr); +#endif + pagefault_enable(); + preempt_enable(); +} + +static inline unsigned int nr_free_highpages(void) { return 0; } +static inline unsigned long totalhigh_pages(void) { return 0UL; } + +#endif /* CONFIG_HIGHMEM */ + +/* + * Prevent people trying to call kunmap_atomic() as if it were kunmap() + * kunmap_atomic() should get the return value of kmap_atomic, not the page. + */ +#define kunmap_atomic(__addr) \ +do { \ + BUILD_BUG_ON(__same_type((__addr), struct page *)); \ + __kunmap_atomic(__addr); \ +} while (0) + +#define kunmap_local(__addr) \ +do { \ + BUILD_BUG_ON(__same_type((__addr), struct page *)); \ + __kunmap_local(__addr); \ +} while (0) + +#endif diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 14e6202ce47f..d2c70d3772a3 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -11,217 +11,137 @@ #include <asm/cacheflush.h> -#ifndef ARCH_HAS_FLUSH_ANON_PAGE -static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) -{ -} -#endif - -#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE -static inline void flush_kernel_dcache_page(struct page *page) -{ -} -static inline void flush_kernel_vmap_range(void *vaddr, int size) -{ -} -static inline void invalidate_kernel_vmap_range(void *vaddr, int size) -{ -} -#endif - -#include <asm/kmap_types.h> - -#ifdef CONFIG_HIGHMEM -extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot); -extern void kunmap_atomic_high(void *kvaddr); -#include <asm/highmem.h> - -#ifndef ARCH_HAS_KMAP_FLUSH_TLB -static inline void kmap_flush_tlb(unsigned long addr) { } -#endif - -#ifndef kmap_prot -#define kmap_prot PAGE_KERNEL -#endif - -void *kmap_high(struct page *page); -static inline void *kmap(struct page *page) -{ - void *addr; - - might_sleep(); - if (!PageHighMem(page)) - addr = page_address(page); - else - addr = kmap_high(page); - kmap_flush_tlb((unsigned long)addr); - return addr; -} - -void kunmap_high(struct page *page); - -static inline void kunmap(struct page *page) -{ - might_sleep(); - if (!PageHighMem(page)) - return; - kunmap_high(page); -} +#include "highmem-internal.h" -/* - * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because - * no global lock is needed and because the kmap code must perform a global TLB - * invalidation when the kmap pool wraps. +/** + * kmap - Map a page for long term usage + * @page: Pointer to the page to be mapped + * + * Returns: The virtual address of the mapping + * + * Can only be invoked from preemptible task context because on 32bit + * systems with CONFIG_HIGHMEM enabled this function might sleep. * - * However when holding an atomic kmap it is not legal to sleep, so atomic - * kmaps are appropriate for short, tight code paths only. + * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area + * this returns the virtual address of the direct kernel mapping. * - * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap - * gives a more generic (and caching) interface. But kmap_atomic can - * be used in IRQ contexts, so in some (very limited) cases we need - * it. + * The returned virtual address is globally visible and valid up to the + * point where it is unmapped via kunmap(). The pointer can be handed to + * other contexts. + * + * For highmem pages on 32bit systems this can be slow as the mapping space + * is limited and protected by a global lock. In case that there is no + * mapping slot available the function blocks until a slot is released via + * kunmap(). */ -static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) -{ - preempt_disable(); - pagefault_disable(); - if (!PageHighMem(page)) - return page_address(page); - return kmap_atomic_high_prot(page, prot); -} -#define kmap_atomic(page) kmap_atomic_prot(page, kmap_prot) +static inline void *kmap(struct page *page); -/* declarations for linux/mm/highmem.c */ -unsigned int nr_free_highpages(void); -extern atomic_long_t _totalhigh_pages; -static inline unsigned long totalhigh_pages(void) -{ - return (unsigned long)atomic_long_read(&_totalhigh_pages); -} - -static inline void totalhigh_pages_inc(void) -{ - atomic_long_inc(&_totalhigh_pages); -} - -static inline void totalhigh_pages_dec(void) -{ - atomic_long_dec(&_totalhigh_pages); -} - -static inline void totalhigh_pages_add(long count) -{ - atomic_long_add(count, &_totalhigh_pages); -} - -static inline void totalhigh_pages_set(long val) -{ - atomic_long_set(&_totalhigh_pages, val); -} - -void kmap_flush_unused(void); - -struct page *kmap_to_page(void *addr); - -#else /* CONFIG_HIGHMEM */ +/** + * kunmap - Unmap the virtual address mapped by kmap() + * @addr: Virtual address to be unmapped + * + * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of + * pages in the low memory area. + */ +static inline void kunmap(struct page *page); -static inline unsigned int nr_free_highpages(void) { return 0; } +/** + * kmap_to_page - Get the page for a kmap'ed address + * @addr: The address to look up + * + * Returns: The page which is mapped to @addr. + */ +static inline struct page *kmap_to_page(void *addr); -static inline struct page *kmap_to_page(void *addr) -{ - return virt_to_page(addr); -} +/** + * kmap_flush_unused - Flush all unused kmap mappings in order to + * remove stray mappings + */ +static inline void kmap_flush_unused(void); -static inline unsigned long totalhigh_pages(void) { return 0UL; } +/** + * kmap_local_page - Map a page for temporary usage + * @page: Pointer to the page to be mapped + * + * Returns: The virtual address of the mapping + * + * Can be invoked from any context. + * + * Requires careful handling when nesting multiple mappings because the map + * management is stack based. The unmap has to be in the reverse order of + * the map operation: + * + * addr1 = kmap_local_page(page1); + * addr2 = kmap_local_page(page2); + * ... + * kunmap_local(addr2); + * kunmap_local(addr1); + * + * Unmapping addr1 before addr2 is invalid and causes malfunction. + * + * Contrary to kmap() mappings the mapping is only valid in the context of + * the caller and cannot be handed to other contexts. + * + * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the + * virtual address of the direct mapping. Only real highmem pages are + * temporarily mapped. + * + * While it is significantly faster than kmap() for the higmem case it + * comes with restrictions about the pointer validity. Only use when really + * necessary. + * + * On HIGHMEM enabled systems mapping a highmem page has the side effect of + * disabling migration in order to keep the virtual address stable across + * preemption. No caller of kmap_local_page() can rely on this side effect. + */ +static inline void *kmap_local_page(struct page *page); -static inline void *kmap(struct page *page) -{ - might_sleep(); - return page_address(page); -} +/** + * kmap_atomic - Atomically map a page for temporary usage - Deprecated! + * @page: Pointer to the page to be mapped + * + * Returns: The virtual address of the mapping + * + * Effectively a wrapper around kmap_local_page() which disables pagefaults + * and preemption. + * + * Do not use in new code. Use kmap_local_page() instead. + */ +static inline void *kmap_atomic(struct page *page); -static inline void kunmap_high(struct page *page) -{ -} +/** + * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() + * @addr: Virtual address to be unmapped + * + * Counterpart to kmap_atomic(). + * + * Effectively a wrapper around kunmap_local() which additionally undoes + * the side effects of kmap_atomic(), i.e. reenabling pagefaults and + * preemption. + */ -static inline void kunmap(struct page *page) -{ -#ifdef ARCH_HAS_FLUSH_ON_KUNMAP - kunmap_flush_on_unmap(page_address(page)); -#endif -} +/* Highmem related interfaces for management code */ +static inline unsigned int nr_free_highpages(void); +static inline unsigned long totalhigh_pages(void); -static inline void *kmap_atomic(struct page *page) +#ifndef ARCH_HAS_FLUSH_ANON_PAGE +static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) { - preempt_disable(); - pagefault_disable(); - return page_address(page); } -#define kmap_atomic_prot(page, prot) kmap_atomic(page) - -static inline void kunmap_atomic_high(void *addr) -{ - /* - * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic() - * handles re-enabling faults + preemption - */ -#ifdef ARCH_HAS_FLUSH_ON_KUNMAP - kunmap_flush_on_unmap(addr); #endif -} - -#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) -#define kmap_flush_unused() do {} while(0) - -#endif /* CONFIG_HIGHMEM */ - -#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) - -DECLARE_PER_CPU(int, __kmap_atomic_idx); - -static inline int kmap_atomic_idx_push(void) +#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE +static inline void flush_kernel_dcache_page(struct page *page) { - int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; - -#ifdef CONFIG_DEBUG_HIGHMEM - WARN_ON_ONCE(in_irq() && !irqs_disabled()); - BUG_ON(idx >= KM_TYPE_NR); -#endif - return idx; } - -static inline int kmap_atomic_idx(void) +static inline void flush_kernel_vmap_range(void *vaddr, int size) { - return __this_cpu_read(__kmap_atomic_idx) - 1; } - -static inline void kmap_atomic_idx_pop(void) +static inline void invalidate_kernel_vmap_range(void *vaddr, int size) { -#ifdef CONFIG_DEBUG_HIGHMEM - int idx = __this_cpu_dec_return(__kmap_atomic_idx); - - BUG_ON(idx < 0); -#else - __this_cpu_dec(__kmap_atomic_idx); -#endif } - #endif -/* - * Prevent people trying to call kunmap_atomic() as if it were kunmap() - * kunmap_atomic() should get the return value of kmap_atomic, not the page. - */ -#define kunmap_atomic(addr) \ -do { \ - BUILD_BUG_ON(__same_type((addr), struct page *)); \ - kunmap_atomic_high(addr); \ - pagefault_enable(); \ - preempt_enable(); \ -} while (0) - - /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ #ifndef clear_user_highpage static inline void clear_user_highpage(struct page *page, unsigned long vaddr) @@ -284,13 +204,22 @@ static inline void clear_highpage(struct page *page) kunmap_atomic(kaddr); } +/* + * If we pass in a base or tail page, we can zero up to PAGE_SIZE. + * If we pass in a head page, we can zero up to the size of the compound page. + */ +#if defined(CONFIG_HIGHMEM) && defined(CONFIG_TRANSPARENT_HUGEPAGE) +void zero_user_segments(struct page *page, unsigned start1, unsigned end1, + unsigned start2, unsigned end2); +#else /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */ static inline void zero_user_segments(struct page *page, - unsigned start1, unsigned end1, - unsigned start2, unsigned end2) + unsigned start1, unsigned end1, + unsigned start2, unsigned end2) { void *kaddr = kmap_atomic(page); + unsigned int i; - BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE); + BUG_ON(end1 > page_size(page) || end2 > page_size(page)); if (end1 > start1) memset(kaddr + start1, 0, end1 - start1); @@ -299,8 +228,10 @@ static inline void zero_user_segments(struct page *page, memset(kaddr + start2, 0, end2 - start2); kunmap_atomic(kaddr); - flush_dcache_page(page); + for (i = 0; i < compound_nr(page); i++) + flush_dcache_page(page + i); } +#endif /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */ static inline void zero_user_segment(struct page *page, unsigned start, unsigned end) diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 107cedd7019a..bb5e7b0a4274 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -447,6 +447,10 @@ static inline void hrtimer_restart(struct hrtimer *timer) /* Query timers: */ extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust); +/** + * hrtimer_get_remaining - get remaining time for the timer + * @timer: the timer to read + */ static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer) { return __hrtimer_get_remaining(timer, false); @@ -458,7 +462,7 @@ extern u64 hrtimer_next_event_without(const struct hrtimer *exclude); extern bool hrtimer_active(const struct hrtimer *timer); /** - * hrtimer_is_queued = check, whether the timer is on one of the queues + * hrtimer_is_queued - check, whether the timer is on one of the queues * @timer: Timer to check * * Returns: True if the timer is queued, false otherwise diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 0365aa97f8e7..6a19f35f836b 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -7,43 +7,37 @@ #include <linux/fs.h> /* only for vma_is_dax() */ -extern vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); -extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, - pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, - struct vm_area_struct *vma); -extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); -extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, - pud_t *dst_pud, pud_t *src_pud, unsigned long addr, - struct vm_area_struct *vma); +vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); +int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, + pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, + struct vm_area_struct *vma); +void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); +int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, + pud_t *dst_pud, pud_t *src_pud, unsigned long addr, + struct vm_area_struct *vma); #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD -extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); +void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); #else static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) { } #endif -extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); -extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, - unsigned long addr, - pmd_t *pmd, - unsigned int flags); -extern bool madvise_free_huge_pmd(struct mmu_gather *tlb, - struct vm_area_struct *vma, - pmd_t *pmd, unsigned long addr, unsigned long next); -extern int zap_huge_pmd(struct mmu_gather *tlb, - struct vm_area_struct *vma, - pmd_t *pmd, unsigned long addr); -extern int zap_huge_pud(struct mmu_gather *tlb, - struct vm_area_struct *vma, - pud_t *pud, unsigned long addr); -extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, - unsigned long new_addr, - pmd_t *old_pmd, pmd_t *new_pmd); -extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, - unsigned long addr, pgprot_t newprot, - unsigned long cp_flags); +vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); +struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmd, + unsigned int flags); +bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, + pmd_t *pmd, unsigned long addr, unsigned long next); +int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, + unsigned long addr); +int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, + unsigned long addr); +bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, + unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd); +int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, + pgprot_t newprot, unsigned long cp_flags); vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn, pgprot_t pgprot, bool write); @@ -100,13 +94,13 @@ enum transparent_hugepage_flag { struct kobject; struct kobj_attribute; -extern ssize_t single_hugepage_flag_store(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, size_t count, - enum transparent_hugepage_flag flag); -extern ssize_t single_hugepage_flag_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf, - enum transparent_hugepage_flag flag); +ssize_t single_hugepage_flag_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count, + enum transparent_hugepage_flag flag); +ssize_t single_hugepage_flag_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf, + enum transparent_hugepage_flag flag); extern struct kobj_attribute shmem_enabled_attr; #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) @@ -179,12 +173,11 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, (transparent_hugepage_flags & \ (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) -extern unsigned long thp_get_unmapped_area(struct file *filp, - unsigned long addr, unsigned long len, unsigned long pgoff, - unsigned long flags); +unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags); -extern void prep_transhuge_page(struct page *page); -extern void free_transhuge_page(struct page *page); +void prep_transhuge_page(struct page *page); +void free_transhuge_page(struct page *page); bool is_transparent_hugepage(struct page *page); bool can_split_huge_page(struct page *page, int *pextra_pins); @@ -222,16 +215,12 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, __split_huge_pud(__vma, __pud, __address); \ } while (0) -extern int hugepage_madvise(struct vm_area_struct *vma, - unsigned long *vm_flags, int advice); -extern void vma_adjust_trans_huge(struct vm_area_struct *vma, - unsigned long start, - unsigned long end, - long adjust_next); -extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, - struct vm_area_struct *vma); -extern spinlock_t *__pud_trans_huge_lock(pud_t *pud, - struct vm_area_struct *vma); +int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, + int advice); +void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, + unsigned long end, long adjust_next); +spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma); +spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma); static inline int is_swap_pmd(pmd_t pmd) { @@ -294,7 +283,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap); -extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); +vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); extern struct page *huge_zero_page; diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 770408b2fdaf..72ff75fb1971 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1261,6 +1261,7 @@ struct ieee80211_mgmt { #define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127 #define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126 #define BSS_MEMBERSHIP_SELECTOR_HE_PHY 122 +#define BSS_MEMBERSHIP_SELECTOR_SAE_H2E 123 /* mgmt header + 1 byte category code */ #define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u) @@ -3417,6 +3418,8 @@ struct ieee80211_multiple_bssid_configuration { #define WLAN_AKM_SUITE_FT_PSK_SHA384 SUITE(0x000FAC, 19) #define WLAN_AKM_SUITE_PSK_SHA384 SUITE(0x000FAC, 20) +#define WLAN_AKM_SUITE_WFA_DPP SUITE(WLAN_OUI_WFA, 2) + #define WLAN_MAX_KEY_LEN 32 #define WLAN_PMK_NAME_LEN 16 @@ -3427,6 +3430,7 @@ struct ieee80211_multiple_bssid_configuration { #define WLAN_OUI_WFA 0x506f9a #define WLAN_OUI_TYPE_WFA_P2P 9 +#define WLAN_OUI_TYPE_WFA_DPP 0x1A #define WLAN_OUI_MICROSOFT 0x0050f2 #define WLAN_OUI_TYPE_MICROSOFT_WPA 1 #define WLAN_OUI_TYPE_MICROSOFT_WMM 2 @@ -3832,15 +3836,15 @@ static inline bool for_each_element_completed(const struct element *element, #define WLAN_RSNX_CAPA_SAE_H2E BIT(5) /* - * reduced neighbor report, based on Draft P802.11ax_D5.0, - * section 9.4.2.170 + * reduced neighbor report, based on Draft P802.11ax_D6.1, + * section 9.4.2.170 and accepted contributions. */ #define IEEE80211_AP_INFO_TBTT_HDR_TYPE 0x03 #define IEEE80211_AP_INFO_TBTT_HDR_FILTERED 0x04 #define IEEE80211_AP_INFO_TBTT_HDR_COLOC 0x08 #define IEEE80211_AP_INFO_TBTT_HDR_COUNT 0xF0 -#define IEEE80211_TBTT_INFO_OFFSET_BSSID_BSS_PARAM 8 -#define IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM 12 +#define IEEE80211_TBTT_INFO_OFFSET_BSSID_BSS_PARAM 9 +#define IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM 13 #define IEEE80211_RNR_TBTT_PARAMS_OCT_RECOMMENDED 0x01 #define IEEE80211_RNR_TBTT_PARAMS_SAME_SSID 0x02 diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 556caed00258..b979005ea39c 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -25,6 +25,7 @@ struct br_ip { #if IS_ENABLED(CONFIG_IPV6) struct in6_addr ip6; #endif + unsigned char mac_addr[ETH_ALEN]; } dst; __be16 proto; __u16 vid; diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h deleted file mode 100644 index 52224de798aa..000000000000 --- a/include/linux/if_frad.h +++ /dev/null @@ -1,92 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * DLCI/FRAD Definitions for Frame Relay Access Devices. DLCI devices are - * created for each DLCI associated with a FRAD. The FRAD driver - * is not truly a network device, but the lower level device - * handler. This allows other FRAD manufacturers to use the DLCI - * code, including its RFC1490 encapsulation alongside the current - * implementation for the Sangoma cards. - * - * Version: @(#)if_ifrad.h 0.15 31 Mar 96 - * - * Author: Mike McLagan <mike.mclagan@linux.org> - * - * Changes: - * 0.15 Mike McLagan changed structure defs (packed) - * re-arranged flags - * added DLCI_RET vars - */ -#ifndef _FRAD_H_ -#define _FRAD_H_ - -#include <uapi/linux/if_frad.h> - - -#if defined(CONFIG_DLCI) || defined(CONFIG_DLCI_MODULE) - -/* these are the fields of an RFC 1490 header */ -struct frhdr -{ - unsigned char control; - - /* for IP packets, this can be the NLPID */ - unsigned char pad; - - unsigned char NLPID; - unsigned char OUI[3]; - __be16 PID; - -#define IP_NLPID pad -} __packed; - -/* see RFC 1490 for the definition of the following */ -#define FRAD_I_UI 0x03 - -#define FRAD_P_PADDING 0x00 -#define FRAD_P_Q933 0x08 -#define FRAD_P_SNAP 0x80 -#define FRAD_P_CLNP 0x81 -#define FRAD_P_IP 0xCC - -struct dlci_local -{ - struct net_device *master; - struct net_device *slave; - struct dlci_conf config; - int configured; - struct list_head list; - - /* callback function */ - void (*receive)(struct sk_buff *skb, struct net_device *); -}; - -struct frad_local -{ - /* devices which this FRAD is slaved to */ - struct net_device *master[CONFIG_DLCI_MAX]; - short dlci[CONFIG_DLCI_MAX]; - - struct frad_conf config; - int configured; /* has this device been configured */ - int initialized; /* mem_start, port, irq set ? */ - - /* callback functions */ - int (*activate)(struct net_device *, struct net_device *); - int (*deactivate)(struct net_device *, struct net_device *); - int (*assoc)(struct net_device *, struct net_device *); - int (*deassoc)(struct net_device *, struct net_device *); - int (*dlci_conf)(struct net_device *, struct net_device *, int get); - - /* fields that are used by the Sangoma SDLA cards */ - struct timer_list timer; - struct net_device *dev; - int type; /* adapter type */ - int state; /* state of the S502/8 control latch */ - int buffer; /* current buffer for S508 firmware */ -}; - -#endif /* CONFIG_DLCI || CONFIG_DLCI_MODULE */ - -extern void dlci_ioctl_set(int (*hook)(unsigned int, void __user *)); - -#endif diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index a367ead4bf4b..96556c64c95d 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -30,6 +30,7 @@ struct macvlan_dev { enum macvlan_mode mode; u16 flags; unsigned int macaddr_count; + u32 bc_queue_len_req; #ifdef CONFIG_NET_POLL_CONTROLLER struct netpoll *netpoll; #endif diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h index a3a838dcf8e4..7199280d89ca 100644 --- a/include/linux/iio/adc/ad_sigma_delta.h +++ b/include/linux/iio/adc/ad_sigma_delta.h @@ -79,8 +79,12 @@ struct ad_sigma_delta { /* * DMA (thus cache coherency maintenance) requires the * transfer buffers to live in their own cache lines. + * 'tx_buf' is up to 32 bits. + * 'rx_buf' is up to 32 bits per sample + 64 bit timestamp, + * rounded to 16 bytes to take into account padding. */ - uint8_t data[4] ____cacheline_aligned; + uint8_t tx_buf[4] ____cacheline_aligned; + uint8_t rx_buf[16] __aligned(8); }; static inline int ad_sigma_delta_set_channel(struct ad_sigma_delta *sd, diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h index fbba4093f06c..8febc23f5f26 100644 --- a/include/linux/iio/buffer.h +++ b/include/linux/iio/buffer.h @@ -11,9 +11,6 @@ struct iio_buffer; -void iio_buffer_set_attrs(struct iio_buffer *buffer, - const struct attribute **attrs); - int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data); /** diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h index f2e94196d31f..07c5a8e52ca8 100644 --- a/include/linux/iio/iio-opaque.h +++ b/include/linux/iio/iio-opaque.h @@ -11,6 +11,7 @@ * @channel_attr_list: keep track of automatically created channel * attributes * @chan_attr_group: group for all attrs in base directory + * @ioctl_handlers: ioctl handlers registered with the core handler * @debugfs_dentry: device specific debugfs dentry * @cached_reg_addr: cached register address for debugfs reads * @read_buf: read buffer to be used for the initial reg read @@ -22,6 +23,7 @@ struct iio_dev_opaque { struct list_head buffer_list; struct list_head channel_attr_list; struct attribute_group chan_attr_group; + struct list_head ioctl_handlers; #if defined(CONFIG_DEBUG_FS) struct dentry *debugfs_dentry; unsigned cached_reg_addr; diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index f015fa185253..e4a9822e6495 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -362,6 +362,8 @@ struct iio_trigger; /* forward declaration */ * and max. For lists, all possible values are enumerated. * @write_raw: function to write a value to the device. * Parameters are the same as for read_raw. + * @read_label: function to request label name for a specified label, + * for better channel identification. * @write_raw_get_fmt: callback function to query the expected * format/precision. If not set by the driver, write_raw * returns IIO_VAL_INT_PLUS_MICRO. @@ -420,6 +422,10 @@ struct iio_info { int val2, long mask); + int (*read_label)(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + char *label); + int (*write_raw_get_fmt)(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, long mask); diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h index cad8325903f9..055890b6ffcf 100644 --- a/include/linux/iio/trigger.h +++ b/include/linux/iio/trigger.h @@ -21,7 +21,7 @@ struct iio_trigger; /** * struct iio_trigger_ops - operations structure for an iio_trigger. * @set_trigger_state: switch on/off the trigger on demand - * @try_reenable: function to reenable the trigger when the + * @reenable: function to reenable the trigger when the * use count is zero (may be NULL) * @validate_device: function to validate the device when the * current trigger gets changed. @@ -31,7 +31,7 @@ struct iio_trigger; **/ struct iio_trigger_ops { int (*set_trigger_state)(struct iio_trigger *trig, bool state); - int (*try_reenable)(struct iio_trigger *trig); + void (*reenable)(struct iio_trigger *trig); int (*validate_device)(struct iio_trigger *trig, struct iio_dev *indio_dev); }; @@ -97,7 +97,7 @@ static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig) } /** - * iio_device_set_drvdata() - Set trigger driver data + * iio_trigger_set_drvdata() - Set trigger driver data * @trig: IIO trigger structure * @data: Driver specific data * diff --git a/include/linux/iio/triggered_buffer.h b/include/linux/iio/triggered_buffer.h index e99c91799359..7f154d1f8739 100644 --- a/include/linux/iio/triggered_buffer.h +++ b/include/linux/iio/triggered_buffer.h @@ -4,19 +4,28 @@ #include <linux/interrupt.h> +struct attribute; struct iio_dev; struct iio_buffer_setup_ops; -int iio_triggered_buffer_setup(struct iio_dev *indio_dev, +int iio_triggered_buffer_setup_ext(struct iio_dev *indio_dev, irqreturn_t (*h)(int irq, void *p), irqreturn_t (*thread)(int irq, void *p), - const struct iio_buffer_setup_ops *setup_ops); + const struct iio_buffer_setup_ops *setup_ops, + const struct attribute **buffer_attrs); void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev); -int devm_iio_triggered_buffer_setup(struct device *dev, - struct iio_dev *indio_dev, - irqreturn_t (*h)(int irq, void *p), - irqreturn_t (*thread)(int irq, void *p), - const struct iio_buffer_setup_ops *ops); +#define iio_triggered_buffer_setup(indio_dev, h, thread, setup_ops) \ + iio_triggered_buffer_setup_ext((indio_dev), (h), (thread), (setup_ops), NULL) + +int devm_iio_triggered_buffer_setup_ext(struct device *dev, + struct iio_dev *indio_dev, + irqreturn_t (*h)(int irq, void *p), + irqreturn_t (*thread)(int irq, void *p), + const struct iio_buffer_setup_ops *ops, + const struct attribute **buffer_attrs); + +#define devm_iio_triggered_buffer_setup(dev, indio_dev, h, thread, setup_ops) \ + devm_iio_triggered_buffer_setup_ext((dev), (indio_dev), (h), (thread), (setup_ops), NULL) #endif diff --git a/include/linux/ima.h b/include/linux/ima.h index 8fa7bcfb2da2..7233a2751754 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -29,6 +29,7 @@ extern int ima_post_read_file(struct file *file, void *buf, loff_t size, enum kernel_read_file_id id); extern void ima_post_path_mknod(struct dentry *dentry); extern int ima_file_hash(struct file *file, char *buf, size_t buf_size); +extern int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size); extern void ima_kexec_cmdline(int kernel_fd, const void *buf, int size); #ifdef CONFIG_IMA_KEXEC @@ -115,6 +116,11 @@ static inline int ima_file_hash(struct file *file, char *buf, size_t buf_size) return -EOPNOTSUPP; } +static inline int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size) +{ + return -EOPNOTSUPP; +} + static inline void ima_kexec_cmdline(int kernel_fd, const void *buf, int size) {} #endif /* CONFIG_IMA */ diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 3515ca64e638..53aa0343bf69 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -105,7 +105,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) #define IN_DEV_LOG_MARTIANS(in_dev) IN_DEV_ORCONF((in_dev), LOG_MARTIANS) #define IN_DEV_PROXY_ARP(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP) -#define IN_DEV_PROXY_ARP_PVLAN(in_dev) IN_DEV_CONF_GET(in_dev, PROXY_ARP_PVLAN) +#define IN_DEV_PROXY_ARP_PVLAN(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP_PVLAN) #define IN_DEV_SHARED_MEDIA(in_dev) IN_DEV_ORCONF((in_dev), SHARED_MEDIA) #define IN_DEV_TX_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), SEND_REDIRECTS) #define IN_DEV_SEC_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), \ @@ -126,7 +126,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS))) #define IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) \ - IN_DEV_CONF_GET((in_dev), IGNORE_ROUTES_WITH_LINKDOWN) + IN_DEV_ORCONF((in_dev), IGNORE_ROUTES_WITH_LINKDOWN) #define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER) #define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_ORCONF((in_dev), ARP_ACCEPT) diff --git a/include/linux/input-polldev.h b/include/linux/input-polldev.h deleted file mode 100644 index 14821fd231c0..000000000000 --- a/include/linux/input-polldev.h +++ /dev/null @@ -1,58 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -#ifndef _INPUT_POLLDEV_H -#define _INPUT_POLLDEV_H - -/* - * Copyright (c) 2007 Dmitry Torokhov - */ - -#include <linux/input.h> -#include <linux/workqueue.h> - -/** - * struct input_polled_dev - simple polled input device - * @private: private driver data. - * @open: driver-supplied method that prepares device for polling - * (enabled the device and maybe flushes device state). - * @close: driver-supplied method that is called when device is no - * longer being polled. Used to put device into low power mode. - * @poll: driver-supplied method that polls the device and posts - * input events (mandatory). - * @poll_interval: specifies how often the poll() method should be called. - * Defaults to 500 msec unless overridden when registering the device. - * @poll_interval_max: specifies upper bound for the poll interval. - * Defaults to the initial value of @poll_interval. - * @poll_interval_min: specifies lower bound for the poll interval. - * Defaults to 0. - * @input: input device structure associated with the polled device. - * Must be properly initialized by the driver (id, name, phys, bits). - * - * Polled input device provides a skeleton for supporting simple input - * devices that do not raise interrupts but have to be periodically - * scanned or polled to detect changes in their state. - */ -struct input_polled_dev { - void *private; - - void (*open)(struct input_polled_dev *dev); - void (*close)(struct input_polled_dev *dev); - void (*poll)(struct input_polled_dev *dev); - unsigned int poll_interval; /* msec */ - unsigned int poll_interval_max; /* msec */ - unsigned int poll_interval_min; /* msec */ - - struct input_dev *input; - -/* private: */ - struct delayed_work work; - - bool devres_managed; -}; - -struct input_polled_dev *input_allocate_polled_device(void); -struct input_polled_dev *devm_input_allocate_polled_device(struct device *dev); -void input_free_polled_device(struct input_polled_dev *dev); -int input_register_polled_device(struct input_polled_dev *dev); -void input_unregister_polled_device(struct input_polled_dev *dev); - -#endif diff --git a/include/linux/input.h b/include/linux/input.h index 56f2fd32e609..0354b298d874 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -90,9 +90,11 @@ enum input_clock_type { * @open: this method is called when the very first user calls * input_open_device(). The driver must prepare the device * to start generating events (start polling thread, - * request an IRQ, submit URB, etc.) + * request an IRQ, submit URB, etc.). The meaning of open() is + * to start providing events to the input core. * @close: this method is called when the very last user calls - * input_close_device(). + * input_close_device(). The meaning of close() is to stop + * providing events to the input core. * @flush: purges the device. Most commonly used to get rid of force * feedback effects loaded into the device when disconnecting * from it @@ -127,6 +129,10 @@ enum input_clock_type { * and needs not be explicitly unregistered or freed. * @timestamp: storage for a timestamp set by input_set_timestamp called * by a driver + * @inhibited: indicates that the input device is inhibited. If that is + * the case then input core ignores any events generated by the device. + * Device's close() is called when it is being inhibited and its open() + * is called when it is being uninhibited. */ struct input_dev { const char *name; @@ -201,6 +207,8 @@ struct input_dev { bool devres_managed; ktime_t timestamp[INPUT_CLK_MAX]; + + bool inhibited; }; #define to_input_dev(d) container_of(d, struct input_dev, dev) @@ -502,6 +510,8 @@ bool input_match_device_id(const struct input_dev *dev, void input_enable_softrepeat(struct input_dev *dev, int delay, int period); +bool input_device_enabled(struct input_dev *dev); + extern struct class input_class; /** diff --git a/include/linux/input/adp5589.h b/include/linux/input/adp5589.h index c0523af96893..0e4742c8c81e 100644 --- a/include/linux/input/adp5589.h +++ b/include/linux/input/adp5589.h @@ -175,13 +175,6 @@ struct i2c_client; /* forward declaration */ struct adp5589_gpio_platform_data { int gpio_start; /* GPIO Chip base # */ - int (*setup)(struct i2c_client *client, - int gpio, unsigned ngpio, - void *context); - int (*teardown)(struct i2c_client *client, - int gpio, unsigned ngpio, - void *context); - void *context; }; #endif diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index fbf5b3e7707e..d956987ed032 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -798,7 +798,6 @@ extern int iommu_calculate_agaw(struct intel_iommu *iommu); extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); extern int dmar_disabled; extern int intel_iommu_enabled; -extern int intel_iommu_tboot_noforce; extern int intel_iommu_gfx_mapped; #else static inline int iommu_calculate_agaw(struct intel_iommu *iommu) diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index ee8299eb1f52..870b3251e174 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -352,6 +352,8 @@ extern int irq_can_set_affinity(unsigned int irq); extern int irq_select_affinity(unsigned int irq); extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); +extern int irq_update_affinity_desc(unsigned int irq, + struct irq_affinity_desc *affinity); extern int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); @@ -387,6 +389,12 @@ static inline int irq_set_affinity_hint(unsigned int irq, return -EINVAL; } +static inline int irq_update_affinity_desc(unsigned int irq, + struct irq_affinity_desc *affinity) +{ + return -EINVAL; +} + static inline int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) { diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index c75e4d3d8833..c093e81310a9 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -69,13 +69,32 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping, BUG_ON(offset >= mapping->size); phys_addr = mapping->base + offset; - return iomap_atomic_prot_pfn(PHYS_PFN(phys_addr), mapping->prot); + preempt_disable(); + pagefault_disable(); + return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); } static inline void io_mapping_unmap_atomic(void __iomem *vaddr) { - iounmap_atomic(vaddr); + kunmap_local_indexed((void __force *)vaddr); + pagefault_enable(); + preempt_enable(); +} + +static inline void __iomem * +io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset) +{ + resource_size_t phys_addr; + + BUG_ON(offset >= mapping->size); + phys_addr = mapping->base + offset; + return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); +} + +static inline void io_mapping_unmap_local(void __iomem *vaddr) +{ + kunmap_local_indexed((void __force *)vaddr); } static inline void __iomem * @@ -97,7 +116,7 @@ io_mapping_unmap(void __iomem *vaddr) iounmap(vaddr); } -#else +#else /* HAVE_ATOMIC_IOMAP */ #include <linux/uaccess.h> @@ -162,7 +181,18 @@ io_mapping_unmap_atomic(void __iomem *vaddr) preempt_enable(); } -#endif /* HAVE_ATOMIC_IOMAP */ +static inline void __iomem * +io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset) +{ + return io_mapping_map_wc(mapping, offset, PAGE_SIZE); +} + +static inline void io_mapping_unmap_local(void __iomem *vaddr) +{ + io_mapping_unmap(vaddr); +} + +#endif /* !HAVE_ATOMIC_IOMAP */ static inline struct io_mapping * io_mapping_create_wc(resource_size_t base, diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 4cde111e425b..fb4d5a763e0c 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -86,6 +86,9 @@ struct io_pgtable_cfg { * * IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table * for use in the upper half of a split address space. + * + * IO_PGTABLE_QUIRK_ARM_OUTER_WBWA: Override the outer-cacheability + * attributes set in the TCR for a non-coherent page-table walker. */ #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) @@ -93,6 +96,7 @@ struct io_pgtable_cfg { #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3) #define IO_PGTABLE_QUIRK_NON_STRICT BIT(4) #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5) + #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6) unsigned long quirks; unsigned long pgsize_bitmap; unsigned int ias; @@ -208,6 +212,10 @@ struct io_pgtable { #define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops) +struct io_pgtable_domain_attr { + unsigned long quirks; +}; + static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop) { iop->cfg.tlb->tlb_flush_all(iop->cookie); diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index 868364cea3b7..35b2d845704d 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -30,7 +30,8 @@ struct io_uring_task { struct percpu_counter inflight; struct io_identity __identity; struct io_identity *identity; - bool in_idle; + atomic_t in_idle; + bool sqpoll; }; #if defined(CONFIG_IO_URING) diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 172b3397a1a3..5bd3cac4df9c 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -221,7 +221,7 @@ struct iomap_writeback_ops { * Optional, allows the file system to discard state on a page where * we failed to submit any I/O. */ - void (*discard_page)(struct page *page); + void (*discard_page)(struct page *page, loff_t fileoff); }; struct iomap_writepage_ctx { diff --git a/include/linux/iommu.h b/include/linux/iommu.h index b95a6f8db6ff..ffaa389ea128 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -118,6 +118,7 @@ enum iommu_attr { DOMAIN_ATTR_FSL_PAMUV1, DOMAIN_ATTR_NESTING, /* two stages of translation */ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, + DOMAIN_ATTR_IO_PGTABLE_CFG, DOMAIN_ATTR_MAX, }; diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 5135d4b86cd6..fe48b7840665 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -10,9 +10,10 @@ #define _LINUX_IOPORT_H #ifndef __ASSEMBLY__ +#include <linux/bits.h> #include <linux/compiler.h> +#include <linux/minmax.h> #include <linux/types.h> -#include <linux/bits.h> /* * Resources are tree-like, allowing * nesting etc.. @@ -229,6 +230,31 @@ static inline bool resource_contains(struct resource *r1, struct resource *r2) return r1->start <= r2->start && r1->end >= r2->end; } +/* True if any part of r1 overlaps r2 */ +static inline bool resource_overlaps(struct resource *r1, struct resource *r2) +{ + return r1->start <= r2->end && r1->end >= r2->start; +} + +static inline bool +resource_intersection(struct resource *r1, struct resource *r2, struct resource *r) +{ + if (!resource_overlaps(r1, r2)) + return false; + r->start = max(r1->start, r2->start); + r->end = min(r1->end, r2->end); + return true; +} + +static inline bool +resource_union(struct resource *r1, struct resource *r2, struct resource *r) +{ + if (!resource_overlaps(r1, r2)) + return false; + r->start = min(r1->start, r2->start); + r->end = max(r1->end, r2->end); + return true; +} /* Convenience shorthand with allocation */ #define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0) @@ -296,17 +322,18 @@ extern int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end, void *arg, int (*func)(struct resource *, void *)); -/* True if any part of r1 overlaps r2 */ -static inline bool resource_overlaps(struct resource *r1, struct resource *r2) -{ - return (r1->start <= r2->end && r1->end >= r2->start); -} - struct resource *devm_request_free_mem_region(struct device *dev, struct resource *base, unsigned long size); struct resource *request_free_mem_region(struct resource *base, unsigned long size, const char *name); +static inline void irqresource_disabled(struct resource *res, u32 irq) +{ + res->start = irq; + res->end = irq; + res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET; +} + #ifdef CONFIG_IO_STRICT_DEVMEM void revoke_devmem(struct resource *res); #else diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index a06a78c67f19..05e22770af51 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h @@ -27,7 +27,6 @@ struct ipc_ids { }; struct ipc_namespace { - refcount_t count; struct ipc_ids ids[3]; int sem_ctls[4]; @@ -128,7 +127,7 @@ extern struct ipc_namespace *copy_ipcs(unsigned long flags, static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) { if (ns) - refcount_inc(&ns->count); + refcount_inc(&ns->ns.count); return ns; } diff --git a/include/linux/irq.h b/include/linux/irq.h index c54365309e97..c332871d59da 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -647,7 +647,6 @@ static inline int irq_set_parent(int irq, int parent_irq) */ extern void handle_level_irq(struct irq_desc *desc); extern void handle_fasteoi_irq(struct irq_desc *desc); -extern void handle_percpu_devid_fasteoi_ipi(struct irq_desc *desc); extern void handle_edge_irq(struct irq_desc *desc); extern void handle_edge_eoi_irq(struct irq_desc *desc); extern void handle_simple_irq(struct irq_desc *desc); @@ -954,21 +953,6 @@ static inline void irq_free_desc(unsigned int irq) irq_free_descs(irq, 1); } -#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ -unsigned int irq_alloc_hwirqs(int cnt, int node); -static inline unsigned int irq_alloc_hwirq(int node) -{ - return irq_alloc_hwirqs(1, node); -} -void irq_free_hwirqs(unsigned int from, int cnt); -static inline void irq_free_hwirq(unsigned int irq) -{ - return irq_free_hwirqs(irq, 1); -} -int arch_setup_hwirq(unsigned int irq, int node); -void arch_teardown_hwirq(unsigned int irq); -#endif - #ifdef CONFIG_GENERIC_IRQ_LEGACY void irq_init_desc(unsigned int irq); #endif diff --git a/include/linux/irq_cpustat.h b/include/linux/irq_cpustat.h deleted file mode 100644 index 6e8895cd4d92..000000000000 --- a/include/linux/irq_cpustat.h +++ /dev/null @@ -1,28 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __irq_cpustat_h -#define __irq_cpustat_h - -/* - * Contains default mappings for irq_cpustat_t, used by almost every - * architecture. Some arch (like s390) have per cpu hardware pages and - * they define their own mappings for irq_stat. - * - * Keith Owens <kaos@ocs.com.au> July 2000. - */ - - -/* - * Simple wrappers reducing source bloat. Define all irq_stat fields - * here, even ones that are arch dependent. That way we get common - * definitions instead of differing sets for each arch. - */ - -#ifndef __ARCH_IRQ_STAT -DECLARE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat); /* defined in asm/hardirq.h */ -#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat.member, cpu)) -#endif - -/* arch dependent irq_stat fields */ -#define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386 */ - -#endif /* __irq_cpustat_h */ diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 30823780c192..ec2a47a81e42 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -14,28 +14,37 @@ */ struct irq_work { - union { - struct __call_single_node node; - struct { - struct llist_node llnode; - atomic_t flags; - }; - }; + struct __call_single_node node; void (*func)(struct irq_work *); }; +#define __IRQ_WORK_INIT(_func, _flags) (struct irq_work){ \ + .node = { .u_flags = (_flags), }, \ + .func = (_func), \ +} + +#define IRQ_WORK_INIT(_func) __IRQ_WORK_INIT(_func, 0) +#define IRQ_WORK_INIT_LAZY(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_LAZY) +#define IRQ_WORK_INIT_HARD(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_HARD_IRQ) + +#define DEFINE_IRQ_WORK(name, _f) \ + struct irq_work name = IRQ_WORK_INIT(_f) + static inline void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) { - atomic_set(&work->flags, 0); - work->func = func; + *work = IRQ_WORK_INIT(func); } -#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { \ - .flags = ATOMIC_INIT(0), \ - .func = (_f) \ +static inline bool irq_work_is_pending(struct irq_work *work) +{ + return atomic_read(&work->node.a_flags) & IRQ_WORK_PENDING; } +static inline bool irq_work_is_busy(struct irq_work *work) +{ + return atomic_read(&work->node.a_flags) & IRQ_WORK_BUSY; +} bool irq_work_queue(struct irq_work *work); bool irq_work_queue_on(struct irq_work *work, int cpu); diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 71535e87109f..42d196805f58 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -37,8 +37,8 @@ #include <linux/radix-tree.h> struct device_node; +struct fwnode_handle; struct irq_domain; -struct of_device_id; struct irq_chip; struct irq_data; struct cpumask; @@ -271,6 +271,12 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, irq_hw_number_t first_hwirq, const struct irq_domain_ops *ops, void *host_data); +struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode, + unsigned int size, + unsigned int first_irq, + irq_hw_number_t first_hwirq, + const struct irq_domain_ops *ops, + void *host_data); extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, enum irq_domain_bus_token bus_token); extern bool irq_domain_check_msi_remap(void); @@ -381,14 +387,20 @@ extern int irq_domain_associate(struct irq_domain *domain, unsigned int irq, extern void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, irq_hw_number_t hwirq_base, int count); -extern void irq_domain_disassociate(struct irq_domain *domain, - unsigned int irq); -extern unsigned int irq_create_mapping(struct irq_domain *host, - irq_hw_number_t hwirq); +extern unsigned int irq_create_mapping_affinity(struct irq_domain *host, + irq_hw_number_t hwirq, + const struct irq_affinity_desc *affinity); extern unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec); extern void irq_dispose_mapping(unsigned int virq); +static inline unsigned int irq_create_mapping(struct irq_domain *host, + irq_hw_number_t hwirq) +{ + return irq_create_mapping_affinity(host, hwirq, NULL); +} + + /** * irq_linear_revmap() - Find a linux irq from a hw irq number. * @domain: domain owning this hardware interrupt diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 3ed4e8771b64..8de0e1373de7 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -107,14 +107,14 @@ do { \ current->irq_config = 0; \ } while (0) -# define lockdep_irq_work_enter(__work) \ +# define lockdep_irq_work_enter(_flags) \ do { \ - if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\ + if (!((_flags) & IRQ_WORK_HARD_IRQ)) \ current->irq_config = 1; \ } while (0) -# define lockdep_irq_work_exit(__work) \ +# define lockdep_irq_work_exit(_flags) \ do { \ - if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\ + if (!((_flags) & IRQ_WORK_HARD_IRQ)) \ current->irq_config = 0; \ } while (0) diff --git a/include/linux/iversion.h b/include/linux/iversion.h index 2917ef990d43..3bfebde5a1a6 100644 --- a/include/linux/iversion.h +++ b/include/linux/iversion.h @@ -328,6 +328,19 @@ inode_query_iversion(struct inode *inode) return cur >> I_VERSION_QUERIED_SHIFT; } +/* + * For filesystems without any sort of change attribute, the best we can + * do is fake one up from the ctime: + */ +static inline u64 time_to_chattr(struct timespec64 *t) +{ + u64 chattr = t->tv_sec; + + chattr <<= 32; + chattr += t->tv_nsec; + return chattr; +} + /** * inode_eq_iversion_raw - check whether the raw i_version counter has changed * @inode: inode to check diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index fb3d71ad6eea..578ff196b3ce 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -68,6 +68,7 @@ extern void *jbd2_alloc(size_t size, gfp_t flags); extern void jbd2_free(void *ptr, size_t size); #define JBD2_MIN_JOURNAL_BLOCKS 1024 +#define JBD2_MIN_FC_BLOCKS 256 #ifdef __KERNEL__ @@ -263,7 +264,10 @@ typedef struct journal_superblock_s /* 0x0050 */ __u8 s_checksum_type; /* checksum type */ __u8 s_padding2[3]; - __u32 s_padding[42]; +/* 0x0054 */ + __be32 s_num_fc_blks; /* Number of fast commit blocks */ +/* 0x0058 */ + __u32 s_padding[41]; __be32 s_checksum; /* crc32c(superblock) */ /* 0x0100 */ @@ -397,7 +401,7 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) #define JI_WAIT_DATA (1 << __JI_WAIT_DATA) /** - * struct jbd_inode - The jbd_inode type is the structure linking inodes in + * struct jbd2_inode - The jbd_inode type is the structure linking inodes in * ordered mode present in a transaction so that we can sync them during commit. */ struct jbd2_inode { @@ -941,8 +945,9 @@ struct journal_s /** * @j_fc_off: * - * Number of fast commit blocks currently allocated. - * [j_state_lock]. + * Number of fast commit blocks currently allocated. Accessed only + * during fast commit. Currently only process can do fast commit, so + * this field is not protected by any lock. */ unsigned long j_fc_off; @@ -985,9 +990,9 @@ struct journal_s struct block_device *j_fs_dev; /** - * @j_maxlen: Total maximum capacity of the journal region on disk. + * @j_total_len: Total maximum capacity of the journal region on disk. */ - unsigned int j_maxlen; + unsigned int j_total_len; /** * @j_reserved_credits: @@ -1105,8 +1110,9 @@ struct journal_s struct buffer_head **j_wbuf; /** - * @j_fc_wbuf: Array of fast commit bhs for - * jbd2_journal_commit_transaction. + * @j_fc_wbuf: Array of fast commit bhs for fast commit. Accessed only + * during a fast commit. Currently only process can do fast commit, so + * this field is not protected by any lock. */ struct buffer_head **j_fc_wbuf; @@ -1253,7 +1259,7 @@ struct journal_s */ void (*j_fc_cleanup_callback)(struct journal_s *journal, int); - /* + /** * @j_fc_replay_callback: * * File-system specific function that performs replay of a fast @@ -1611,16 +1617,20 @@ extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *); extern int jbd2_cleanup_journal_tail(journal_t *); /* Fast commit related APIs */ -int jbd2_fc_init(journal_t *journal, int num_fc_blks); int jbd2_fc_begin_commit(journal_t *journal, tid_t tid); int jbd2_fc_end_commit(journal_t *journal); -int jbd2_fc_end_commit_fallback(journal_t *journal, tid_t tid); +int jbd2_fc_end_commit_fallback(journal_t *journal); int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out); int jbd2_submit_inode_data(struct jbd2_inode *jinode); int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode); int jbd2_fc_wait_bufs(journal_t *journal, int num_blks); int jbd2_fc_release_bufs(journal_t *journal); +static inline int jbd2_journal_get_max_txn_bufs(journal_t *journal) +{ + return (journal->j_total_len - journal->j_fc_wbufsize) / 4; +} + /* * is_journal_abort * diff --git a/include/linux/jhash.h b/include/linux/jhash.h index cfb62e9f37be..ab7f8c152b89 100644 --- a/include/linux/jhash.h +++ b/include/linux/jhash.h @@ -99,6 +99,7 @@ static inline u32 jhash(const void *key, u32 length, u32 initval) case 2: a += (u32)k[1]<<8; fallthrough; case 1: a += k[0]; __jhash_final(a, b, c); + break; case 0: /* Nothing left to add */ break; } @@ -136,6 +137,7 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval) case 2: b += k[1]; fallthrough; case 1: a += k[0]; __jhash_final(a, b, c); + break; case 0: /* Nothing left to add */ break; } diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h index bb2246c8ec13..82f29aa35062 100644 --- a/include/linux/kbd_kern.h +++ b/include/linux/kbd_kern.h @@ -9,9 +9,6 @@ extern struct tasklet_struct keyboard_tasklet; extern char *func_table[MAX_NR_FUNC]; -extern char func_buf[]; -extern char *funcbufptr; -extern int funcbufsize, funcbufleft; /* * kbd->xxx contains the VC-local things (flag settings etc..) diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 2f05e9128201..dbf6018fc312 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -204,6 +204,7 @@ extern int _cond_resched(void); extern void ___might_sleep(const char *file, int line, int preempt_offset); extern void __might_sleep(const char *file, int line, int preempt_offset); extern void __cant_sleep(const char *file, int line, int preempt_offset); +extern void __cant_migrate(const char *file, int line); /** * might_sleep - annotation for functions that can sleep @@ -227,6 +228,18 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); # define cant_sleep() \ do { __cant_sleep(__FILE__, __LINE__, 0); } while (0) # define sched_annotate_sleep() (current->task_state_change = 0) + +/** + * cant_migrate - annotation for functions that cannot migrate + * + * Will print a stack trace if executed in code which is migratable + */ +# define cant_migrate() \ + do { \ + if (IS_ENABLED(CONFIG_SMP)) \ + __cant_migrate(__FILE__, __LINE__); \ + } while (0) + /** * non_block_start - annotate the start of section where sleeping is prohibited * @@ -251,6 +264,7 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); int preempt_offset) { } # define might_sleep() do { might_resched(); } while (0) # define cant_sleep() do { } while (0) +# define cant_migrate() do { } while (0) # define sched_annotate_sleep() do { } while (0) # define non_block_start() do { } while (0) # define non_block_end() do { } while (0) @@ -258,13 +272,6 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) -#ifndef CONFIG_PREEMPT_RT -# define cant_migrate() cant_sleep() -#else - /* Placeholder for now */ -# define cant_migrate() do { } while (0) -#endif - /** * abs - return absolute value of an argument * @x: the value. If it is unsigned type, it is converted to signed type first. @@ -536,6 +543,7 @@ extern int panic_on_warn; extern unsigned long panic_on_taint; extern bool panic_on_taint_nousertaint; extern int sysctl_panic_on_rcu_stall; +extern int sysctl_max_rcu_stall_to_panic; extern int sysctl_panic_on_stackoverflow; extern bool crash_kexec_post_notifiers; diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 89f6a4214a70..9e8ca8743c26 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -116,7 +116,7 @@ struct kernfs_elem_attr { * kernfs node is represented by single kernfs_node. Most fields are * private to kernfs and shouldn't be accessed directly by kernfs users. * - * As long as s_count reference is held, the kernfs_node itself is + * As long as count reference is held, the kernfs_node itself is * accessible. Dereferencing elem or any other outer entity requires * active reference. */ diff --git a/include/linux/key-type.h b/include/linux/key-type.h index 2ab2d6d6aeab..7d985a1dfe4a 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -29,6 +29,7 @@ struct kernel_pkey_params; * clear the contents. */ struct key_preparsed_payload { + const char *orig_description; /* Actual or proposed description (maybe NULL) */ char *description; /* Proposed key description (or NULL) */ union key_payload payload; /* Proposed payload */ const void *data; /* Raw data */ diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 629abaf25681..a79404433812 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -27,6 +27,8 @@ #include <linux/rcupdate.h> #include <linux/mutex.h> #include <linux/ftrace.h> +#include <linux/refcount.h> +#include <linux/freelist.h> #include <asm/kprobes.h> #ifdef CONFIG_KPROBES @@ -144,6 +146,11 @@ static inline int kprobe_ftrace(struct kprobe *p) * ignored, due to maxactive being too low. * */ +struct kretprobe_holder { + struct kretprobe *rp; + refcount_t ref; +}; + struct kretprobe { struct kprobe kp; kretprobe_handler_t handler; @@ -151,18 +158,18 @@ struct kretprobe { int maxactive; int nmissed; size_t data_size; - struct hlist_head free_instances; - raw_spinlock_t lock; + struct freelist_head freelist; + struct kretprobe_holder *rph; }; struct kretprobe_instance { union { - struct hlist_node hlist; + struct freelist_node freelist; struct rcu_head rcu; }; - struct kretprobe *rp; + struct llist_node llist; + struct kretprobe_holder *rph; kprobe_opcode_t *ret_addr; - struct task_struct *task; void *fp; char data[]; }; @@ -221,6 +228,14 @@ unsigned long kretprobe_trampoline_handler(struct pt_regs *regs, return ret; } +static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri) +{ + RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(), + "Kretprobe is accessed from instance under preemptive context"); + + return READ_ONCE(ri->rph->rp); +} + #else /* CONFIG_KRETPROBES */ static inline void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) diff --git a/include/linux/list.h b/include/linux/list.h index a18c87b63376..89bdc92e75c3 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -9,7 +9,7 @@ #include <linux/kernel.h> /* - * Simple doubly linked list implementation. + * Circular doubly linked list implementation. * * Some of the internal functions ("__xxx") are useful when * manipulating whole lists rather than single entries, as diff --git a/include/linux/llist.h b/include/linux/llist.h index 2e9c7215882b..24f207b0190b 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h @@ -197,6 +197,16 @@ static inline struct llist_node *llist_next(struct llist_node *node) extern bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, struct llist_head *head); + +static inline bool __llist_add_batch(struct llist_node *new_first, + struct llist_node *new_last, + struct llist_head *head) +{ + new_last->next = head->first; + head->first = new_first; + return new_last->next == NULL; +} + /** * llist_add - add a new entry * @new: new entry to be added @@ -209,6 +219,11 @@ static inline bool llist_add(struct llist_node *new, struct llist_head *head) return llist_add_batch(new, new, head); } +static inline bool __llist_add(struct llist_node *new, struct llist_head *head) +{ + return __llist_add_batch(new, new, head); +} + /** * llist_del_all - delete all entries from lock-less list * @head: the head of lock-less list to delete all entries @@ -222,6 +237,14 @@ static inline struct llist_node *llist_del_all(struct llist_head *head) return xchg(&head->first, NULL); } +static inline struct llist_node *__llist_del_all(struct llist_head *head) +{ + struct llist_node *first = head->first; + + head->first = NULL; + return first; +} + extern struct llist_node *llist_del_first(struct llist_head *head); struct llist_node *llist_reverse_order(struct llist_node *head); diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index f5594879175a..b9e9adec73e8 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -375,6 +375,12 @@ static inline void lockdep_unregister_key(struct lock_class_key *key) #define lockdep_depth(tsk) (0) +/* + * Dummy forward declarations, allow users to write less ifdef-y code + * and depend on dead code elimination. + */ +extern int lock_is_held(const void *); +extern int lockdep_is_held(const void *); #define lockdep_is_held_type(l, r) (1) #define lockdep_assert_held(l) do { (void)(l); } while (0) @@ -594,6 +600,16 @@ do { \ this_cpu_read(hardirqs_enabled))); \ } while (0) +/* + * Acceptable for protecting per-CPU resources accessed from BH. + * Much like in_softirq() - semantics are ambiguous, use carefully. + */ +#define lockdep_assert_in_softirq() \ +do { \ + WARN_ON_ONCE(__lockdep_enabled && \ + (!in_softirq() || in_irq() || in_nmi())); \ +} while (0) + #else # define might_lock(lock) do { } while (0) # define might_lock_read(lock) do { } while (0) @@ -605,6 +621,7 @@ do { \ # define lockdep_assert_preemption_enabled() do { } while (0) # define lockdep_assert_preemption_disabled() do { } while (0) +# define lockdep_assert_in_softirq() do { } while (0) #endif #ifdef CONFIG_PROVE_RAW_LOCK_NESTING diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h index 28f23b341c1c..cd23355d2271 100644 --- a/include/linux/lsm_audit.h +++ b/include/linux/lsm_audit.h @@ -26,7 +26,7 @@ struct lsm_network_audit { int netif; - struct sock *sk; + const struct sock *sk; u16 family; __be16 dport; __be16 sport; diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 32a940117e7a..acc0494cceba 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -301,7 +301,7 @@ LSM_HOOK(void, LSM_RET_VOID, sk_clone_security, const struct sock *sk, struct sock *newsk) LSM_HOOK(void, LSM_RET_VOID, sk_getsecid, struct sock *sk, u32 *secid) LSM_HOOK(void, LSM_RET_VOID, sock_graft, struct sock *sk, struct socket *parent) -LSM_HOOK(int, 0, inet_conn_request, struct sock *sk, struct sk_buff *skb, +LSM_HOOK(int, 0, inet_conn_request, const struct sock *sk, struct sk_buff *skb, struct request_sock *req) LSM_HOOK(void, LSM_RET_VOID, inet_csk_clone, struct sock *newsk, const struct request_sock *req) diff --git a/include/linux/mailbox/zynqmp-ipi-message.h b/include/linux/mailbox/zynqmp-ipi-message.h index 9542b41eacfd..35ce84c8ca02 100644 --- a/include/linux/mailbox/zynqmp-ipi-message.h +++ b/include/linux/mailbox/zynqmp-ipi-message.h @@ -14,7 +14,7 @@ */ struct zynqmp_ipi_message { size_t len; - u8 data[0]; + u8 data[]; }; #endif /* _LINUX_ZYNQMP_IPI_MESSAGE_H_ */ diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index ff7b7607c8cf..52b1610eae68 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -25,6 +25,9 @@ #define MARVELL_PHY_ID_88X3310 0x002b09a0 #define MARVELL_PHY_ID_88E2110 0x002b09b0 +/* Marvel 88E1111 in Finisar SFP module with modified PHY ID */ +#define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0 + /* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do * not have a model ID. So the switch driver traps reads to the ID2 * register and returns the switch family ID diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h index 52aa4821093a..959ad7d850b4 100644 --- a/include/linux/mei_cl_bus.h +++ b/include/linux/mei_cl_bus.h @@ -95,6 +95,12 @@ ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length); ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length); ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, size_t length); +ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length, + u8 vtag); +ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length, + u8 *vtag); +ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf, + size_t length, u8 *vtag); int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb); int mei_cldev_register_notif_cb(struct mei_cl_device *cldev, diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e391e3c56de5..08ed57e02b73 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -235,11 +235,6 @@ struct mem_cgroup { struct vmpressure vmpressure; /* - * Should the accounting and control be hierarchical, per subtree? - */ - bool use_hierarchy; - - /* * Should the OOM killer kill all belonging tasks, had it kill one? */ bool oom_group; @@ -282,20 +277,6 @@ struct mem_cgroup { MEMCG_PADDING(_pad1_); - /* - * set > 0 if pages under this cgroup are moving to other cgroup. - */ - atomic_t moving_account; - struct task_struct *move_lock_task; - - /* Legacy local VM stats and events */ - struct memcg_vmstats_percpu __percpu *vmstats_local; - - /* Subtree VM stats and events (batched updates) */ - struct memcg_vmstats_percpu __percpu *vmstats_percpu; - - MEMCG_PADDING(_pad2_); - atomic_long_t vmstats[MEMCG_NR_STAT]; atomic_long_t vmevents[NR_VM_EVENT_ITEMS]; @@ -310,13 +291,26 @@ struct mem_cgroup { int tcpmem_pressure; #ifdef CONFIG_MEMCG_KMEM - /* Index in the kmem_cache->memcg_params.memcg_caches array */ int kmemcg_id; enum memcg_kmem_state kmem_state; struct obj_cgroup __rcu *objcg; struct list_head objcg_list; /* list of inherited objcgs */ #endif + MEMCG_PADDING(_pad2_); + + /* + * set > 0 if pages under this cgroup are moving to other cgroup. + */ + atomic_t moving_account; + struct task_struct *move_lock_task; + + /* Legacy local VM stats and events */ + struct memcg_vmstats_percpu __percpu *vmstats_local; + + /* Subtree VM stats and events (batched updates) */ + struct memcg_vmstats_percpu __percpu *vmstats_percpu; + #ifdef CONFIG_CGROUP_WRITEBACK struct list_head cgwb_list; struct wb_domain cgwb_domain; @@ -343,6 +337,175 @@ struct mem_cgroup { extern struct mem_cgroup *root_mem_cgroup; +enum page_memcg_data_flags { + /* page->memcg_data is a pointer to an objcgs vector */ + MEMCG_DATA_OBJCGS = (1UL << 0), + /* page has been accounted as a non-slab kernel page */ + MEMCG_DATA_KMEM = (1UL << 1), + /* the next bit after the last actual flag */ + __NR_MEMCG_DATA_FLAGS = (1UL << 2), +}; + +#define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1) + +/* + * page_memcg - get the memory cgroup associated with a page + * @page: a pointer to the page struct + * + * Returns a pointer to the memory cgroup associated with the page, + * or NULL. This function assumes that the page is known to have a + * proper memory cgroup pointer. It's not safe to call this function + * against some type of pages, e.g. slab pages or ex-slab pages. + * + * Any of the following ensures page and memcg binding stability: + * - the page lock + * - LRU isolation + * - lock_page_memcg() + * - exclusive reference + */ +static inline struct mem_cgroup *page_memcg(struct page *page) +{ + unsigned long memcg_data = page->memcg_data; + + VM_BUG_ON_PAGE(PageSlab(page), page); + VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page); + + return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); +} + +/* + * page_memcg_rcu - locklessly get the memory cgroup associated with a page + * @page: a pointer to the page struct + * + * Returns a pointer to the memory cgroup associated with the page, + * or NULL. This function assumes that the page is known to have a + * proper memory cgroup pointer. It's not safe to call this function + * against some type of pages, e.g. slab pages or ex-slab pages. + */ +static inline struct mem_cgroup *page_memcg_rcu(struct page *page) +{ + VM_BUG_ON_PAGE(PageSlab(page), page); + WARN_ON_ONCE(!rcu_read_lock_held()); + + return (struct mem_cgroup *)(READ_ONCE(page->memcg_data) & + ~MEMCG_DATA_FLAGS_MASK); +} + +/* + * page_memcg_check - get the memory cgroup associated with a page + * @page: a pointer to the page struct + * + * Returns a pointer to the memory cgroup associated with the page, + * or NULL. This function unlike page_memcg() can take any page + * as an argument. It has to be used in cases when it's not known if a page + * has an associated memory cgroup pointer or an object cgroups vector. + * + * Any of the following ensures page and memcg binding stability: + * - the page lock + * - LRU isolation + * - lock_page_memcg() + * - exclusive reference + */ +static inline struct mem_cgroup *page_memcg_check(struct page *page) +{ + /* + * Because page->memcg_data might be changed asynchronously + * for slab pages, READ_ONCE() should be used here. + */ + unsigned long memcg_data = READ_ONCE(page->memcg_data); + + if (memcg_data & MEMCG_DATA_OBJCGS) + return NULL; + + return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); +} + +/* + * PageMemcgKmem - check if the page has MemcgKmem flag set + * @page: a pointer to the page struct + * + * Checks if the page has MemcgKmem flag set. The caller must ensure that + * the page has an associated memory cgroup. It's not safe to call this function + * against some types of pages, e.g. slab pages. + */ +static inline bool PageMemcgKmem(struct page *page) +{ + VM_BUG_ON_PAGE(page->memcg_data & MEMCG_DATA_OBJCGS, page); + return page->memcg_data & MEMCG_DATA_KMEM; +} + +#ifdef CONFIG_MEMCG_KMEM +/* + * page_objcgs - get the object cgroups vector associated with a page + * @page: a pointer to the page struct + * + * Returns a pointer to the object cgroups vector associated with the page, + * or NULL. This function assumes that the page is known to have an + * associated object cgroups vector. It's not safe to call this function + * against pages, which might have an associated memory cgroup: e.g. + * kernel stack pages. + */ +static inline struct obj_cgroup **page_objcgs(struct page *page) +{ + unsigned long memcg_data = READ_ONCE(page->memcg_data); + + VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS), page); + VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page); + + return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); +} + +/* + * page_objcgs_check - get the object cgroups vector associated with a page + * @page: a pointer to the page struct + * + * Returns a pointer to the object cgroups vector associated with the page, + * or NULL. This function is safe to use if the page can be directly associated + * with a memory cgroup. + */ +static inline struct obj_cgroup **page_objcgs_check(struct page *page) +{ + unsigned long memcg_data = READ_ONCE(page->memcg_data); + + if (!memcg_data || !(memcg_data & MEMCG_DATA_OBJCGS)) + return NULL; + + VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page); + + return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); +} + +/* + * set_page_objcgs - associate a page with a object cgroups vector + * @page: a pointer to the page struct + * @objcgs: a pointer to the object cgroups vector + * + * Atomically associates a page with a vector of object cgroups. + */ +static inline bool set_page_objcgs(struct page *page, + struct obj_cgroup **objcgs) +{ + return !cmpxchg(&page->memcg_data, 0, (unsigned long)objcgs | + MEMCG_DATA_OBJCGS); +} +#else +static inline struct obj_cgroup **page_objcgs(struct page *page) +{ + return NULL; +} + +static inline struct obj_cgroup **page_objcgs_check(struct page *page) +{ + return NULL; +} + +static inline bool set_page_objcgs(struct page *page, + struct obj_cgroup **objcgs) +{ + return true; +} +#endif + static __always_inline bool memcg_stat_item_in_bytes(int idx) { if (idx == MEMCG_PERCPU_B) @@ -491,12 +654,41 @@ out: struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *); +static inline bool lruvec_holds_page_lru_lock(struct page *page, + struct lruvec *lruvec) +{ + pg_data_t *pgdat = page_pgdat(page); + const struct mem_cgroup *memcg; + struct mem_cgroup_per_node *mz; + + if (mem_cgroup_disabled()) + return lruvec == &pgdat->__lruvec; + + mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); + memcg = page_memcg(page) ? : root_mem_cgroup; + + return lruvec->pgdat == pgdat && mz->memcg == memcg; +} + struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); struct mem_cgroup *get_mem_cgroup_from_page(struct page *page); +struct lruvec *lock_page_lruvec(struct page *page); +struct lruvec *lock_page_lruvec_irq(struct page *page); +struct lruvec *lock_page_lruvec_irqsave(struct page *page, + unsigned long *flags); + +#ifdef CONFIG_DEBUG_VM +void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page); +#else +static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page) +{ +} +#endif + static inline struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ return css ? container_of(css, struct mem_cgroup, css) : NULL; @@ -589,8 +781,6 @@ static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, { if (root == memcg) return true; - if (!root->use_hierarchy) - return false; return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); } @@ -743,15 +933,19 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg, static inline void __mod_memcg_page_state(struct page *page, int idx, int val) { - if (page->mem_cgroup) - __mod_memcg_state(page->mem_cgroup, idx, val); + struct mem_cgroup *memcg = page_memcg(page); + + if (memcg) + __mod_memcg_state(memcg, idx, val); } static inline void mod_memcg_page_state(struct page *page, int idx, int val) { - if (page->mem_cgroup) - mod_memcg_state(page->mem_cgroup, idx, val); + struct mem_cgroup *memcg = page_memcg(page); + + if (memcg) + mod_memcg_state(memcg, idx, val); } static inline unsigned long lruvec_page_state(struct lruvec *lruvec, @@ -794,19 +988,15 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val); -void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, - int val); -void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val); +void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val); -void mod_memcg_obj_state(void *p, int idx, int val); - -static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx, +static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) { unsigned long flags; local_irq_save(flags); - __mod_lruvec_slab_state(p, idx, val); + __mod_lruvec_kmem_state(p, idx, val); local_irq_restore(flags); } @@ -820,43 +1010,6 @@ static inline void mod_memcg_lruvec_state(struct lruvec *lruvec, local_irq_restore(flags); } -static inline void mod_lruvec_state(struct lruvec *lruvec, - enum node_stat_item idx, int val) -{ - unsigned long flags; - - local_irq_save(flags); - __mod_lruvec_state(lruvec, idx, val); - local_irq_restore(flags); -} - -static inline void __mod_lruvec_page_state(struct page *page, - enum node_stat_item idx, int val) -{ - struct page *head = compound_head(page); /* rmap on tail pages */ - pg_data_t *pgdat = page_pgdat(page); - struct lruvec *lruvec; - - /* Untracked pages have no memcg, no lruvec. Update only the node */ - if (!head->mem_cgroup) { - __mod_node_page_state(pgdat, idx, val); - return; - } - - lruvec = mem_cgroup_lruvec(head->mem_cgroup, pgdat); - __mod_lruvec_state(lruvec, idx, val); -} - -static inline void mod_lruvec_page_state(struct page *page, - enum node_stat_item idx, int val) -{ - unsigned long flags; - - local_irq_save(flags); - __mod_lruvec_page_state(page, idx, val); - local_irq_restore(flags); -} - unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, unsigned long *total_scanned); @@ -878,8 +1031,10 @@ static inline void count_memcg_events(struct mem_cgroup *memcg, static inline void count_memcg_page_event(struct page *page, enum vm_event_item idx) { - if (page->mem_cgroup) - count_memcg_events(page->mem_cgroup, idx, 1); + struct mem_cgroup *memcg = page_memcg(page); + + if (memcg) + count_memcg_events(memcg, idx, 1); } static inline void count_memcg_event_mm(struct mm_struct *mm, @@ -900,12 +1055,19 @@ static inline void count_memcg_event_mm(struct mm_struct *mm, static inline void memcg_memory_event(struct mem_cgroup *memcg, enum memcg_memory_event event) { + bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX || + event == MEMCG_SWAP_FAIL; + atomic_long_inc(&memcg->memory_events_local[event]); - cgroup_file_notify(&memcg->events_local_file); + if (!swap_event) + cgroup_file_notify(&memcg->events_local_file); do { atomic_long_inc(&memcg->memory_events[event]); - cgroup_file_notify(&memcg->events_file); + if (swap_event) + cgroup_file_notify(&memcg->swap_events_file); + else + cgroup_file_notify(&memcg->events_file); if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) break; @@ -941,6 +1103,27 @@ void mem_cgroup_split_huge_fixup(struct page *head); struct mem_cgroup; +static inline struct mem_cgroup *page_memcg(struct page *page) +{ + return NULL; +} + +static inline struct mem_cgroup *page_memcg_rcu(struct page *page) +{ + WARN_ON_ONCE(!rcu_read_lock_held()); + return NULL; +} + +static inline struct mem_cgroup *page_memcg_check(struct page *page) +{ + return NULL; +} + +static inline bool PageMemcgKmem(struct page *page) +{ + return false; +} + static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) { return true; @@ -1013,6 +1196,14 @@ static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, return &pgdat->__lruvec; } +static inline bool lruvec_holds_page_lru_lock(struct page *page, + struct lruvec *lruvec) +{ + pg_data_t *pgdat = page_pgdat(page); + + return lruvec == &pgdat->__lruvec; +} + static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) { return NULL; @@ -1038,6 +1229,31 @@ static inline void mem_cgroup_put(struct mem_cgroup *memcg) { } +static inline struct lruvec *lock_page_lruvec(struct page *page) +{ + struct pglist_data *pgdat = page_pgdat(page); + + spin_lock(&pgdat->__lruvec.lru_lock); + return &pgdat->__lruvec; +} + +static inline struct lruvec *lock_page_lruvec_irq(struct page *page) +{ + struct pglist_data *pgdat = page_pgdat(page); + + spin_lock_irq(&pgdat->__lruvec.lru_lock); + return &pgdat->__lruvec; +} + +static inline struct lruvec *lock_page_lruvec_irqsave(struct page *page, + unsigned long *flagsp) +{ + struct pglist_data *pgdat = page_pgdat(page); + + spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp); + return &pgdat->__lruvec; +} + static inline struct mem_cgroup * mem_cgroup_iter(struct mem_cgroup *root, struct mem_cgroup *prev, @@ -1208,31 +1424,7 @@ static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec, { } -static inline void __mod_lruvec_state(struct lruvec *lruvec, - enum node_stat_item idx, int val) -{ - __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); -} - -static inline void mod_lruvec_state(struct lruvec *lruvec, - enum node_stat_item idx, int val) -{ - mod_node_page_state(lruvec_pgdat(lruvec), idx, val); -} - -static inline void __mod_lruvec_page_state(struct page *page, - enum node_stat_item idx, int val) -{ - __mod_node_page_state(page_pgdat(page), idx, val); -} - -static inline void mod_lruvec_page_state(struct page *page, - enum node_stat_item idx, int val) -{ - mod_node_page_state(page_pgdat(page), idx, val); -} - -static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, +static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) { struct page *page = virt_to_head_page(p); @@ -1240,7 +1432,7 @@ static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, __mod_node_page_state(page_pgdat(page), idx, val); } -static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx, +static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) { struct page *page = virt_to_head_page(p); @@ -1248,10 +1440,6 @@ static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx, mod_node_page_state(page_pgdat(page), idx, val); } -static inline void mod_memcg_obj_state(void *p, int idx, int val) -{ -} - static inline unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, @@ -1285,6 +1473,10 @@ static inline void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) { } + +static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page) +{ +} #endif /* CONFIG_MEMCG */ /* idx can be of type enum memcg_stat_item or node_stat_item */ @@ -1315,38 +1507,14 @@ static inline void __dec_memcg_page_state(struct page *page, __mod_memcg_page_state(page, idx, -1); } -static inline void __inc_lruvec_state(struct lruvec *lruvec, - enum node_stat_item idx) -{ - __mod_lruvec_state(lruvec, idx, 1); -} - -static inline void __dec_lruvec_state(struct lruvec *lruvec, - enum node_stat_item idx) -{ - __mod_lruvec_state(lruvec, idx, -1); -} - -static inline void __inc_lruvec_page_state(struct page *page, - enum node_stat_item idx) -{ - __mod_lruvec_page_state(page, idx, 1); -} - -static inline void __dec_lruvec_page_state(struct page *page, - enum node_stat_item idx) -{ - __mod_lruvec_page_state(page, idx, -1); -} - -static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx) +static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx) { - __mod_lruvec_slab_state(p, idx, 1); + __mod_lruvec_kmem_state(p, idx, 1); } -static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx) +static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx) { - __mod_lruvec_slab_state(p, idx, -1); + __mod_lruvec_kmem_state(p, idx, -1); } /* idx can be of type enum memcg_stat_item or node_stat_item */ @@ -1377,41 +1545,61 @@ static inline void dec_memcg_page_state(struct page *page, mod_memcg_page_state(page, idx, -1); } -static inline void inc_lruvec_state(struct lruvec *lruvec, - enum node_stat_item idx) +static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) { - mod_lruvec_state(lruvec, idx, 1); + struct mem_cgroup *memcg; + + memcg = lruvec_memcg(lruvec); + if (!memcg) + return NULL; + memcg = parent_mem_cgroup(memcg); + if (!memcg) + return NULL; + return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); } -static inline void dec_lruvec_state(struct lruvec *lruvec, - enum node_stat_item idx) +static inline void unlock_page_lruvec(struct lruvec *lruvec) { - mod_lruvec_state(lruvec, idx, -1); + spin_unlock(&lruvec->lru_lock); } -static inline void inc_lruvec_page_state(struct page *page, - enum node_stat_item idx) +static inline void unlock_page_lruvec_irq(struct lruvec *lruvec) { - mod_lruvec_page_state(page, idx, 1); + spin_unlock_irq(&lruvec->lru_lock); } -static inline void dec_lruvec_page_state(struct page *page, - enum node_stat_item idx) +static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, + unsigned long flags) { - mod_lruvec_page_state(page, idx, -1); + spin_unlock_irqrestore(&lruvec->lru_lock, flags); } -static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) +/* Don't lock again iff page's lruvec locked */ +static inline struct lruvec *relock_page_lruvec_irq(struct page *page, + struct lruvec *locked_lruvec) { - struct mem_cgroup *memcg; + if (locked_lruvec) { + if (lruvec_holds_page_lru_lock(page, locked_lruvec)) + return locked_lruvec; - memcg = lruvec_memcg(lruvec); - if (!memcg) - return NULL; - memcg = parent_mem_cgroup(memcg); - if (!memcg) - return NULL; - return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); + unlock_page_lruvec_irq(locked_lruvec); + } + + return lock_page_lruvec_irq(page); +} + +/* Don't lock again iff page's lruvec locked */ +static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page, + struct lruvec *locked_lruvec, unsigned long *flags) +{ + if (locked_lruvec) { + if (lruvec_holds_page_lru_lock(page, locked_lruvec)) + return locked_lruvec; + + unlock_page_lruvec_irqrestore(locked_lruvec, *flags); + } + + return lock_page_lruvec_irqsave(page, flags); } #ifdef CONFIG_CGROUP_WRITEBACK @@ -1430,7 +1618,7 @@ static inline void mem_cgroup_track_foreign_dirty(struct page *page, if (mem_cgroup_disabled()) return; - if (unlikely(&page->mem_cgroup->css != wb->memcg_css)) + if (unlikely(&page_memcg(page)->css != wb->memcg_css)) mem_cgroup_track_foreign_dirty_slowpath(page, wb); } @@ -1561,9 +1749,8 @@ static inline void memcg_kmem_uncharge(struct mem_cgroup *memcg, } /* - * helper for accessing a memcg's index. It will be used as an index in the - * child cache array in kmem_cache, and also to derive its name. This function - * will return -1 when this is not a kmem-limited memcg. + * A helper for accessing memcg's kmem_id, used for getting + * corresponding LRU lists. */ static inline int memcg_cache_id(struct mem_cgroup *memcg) { diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index d65c6fdc5cfc..551093b74596 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -281,20 +281,6 @@ static inline bool movable_node_is_enabled(void) } #endif /* ! CONFIG_MEMORY_HOTPLUG */ -#ifdef CONFIG_NUMA -extern int memory_add_physaddr_to_nid(u64 start); -extern int phys_to_target_node(u64 start); -#else -static inline int memory_add_physaddr_to_nid(u64 start) -{ - return 0; -} -static inline int phys_to_target_node(u64 start) -{ - return 0; -} -#endif - #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) /* * pgdat resizing functions diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index 8ba042430d8e..27264fe4b3b9 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h @@ -55,7 +55,12 @@ */ #define TMIO_MMC_HAS_IDLE_WAIT BIT(4) -/* BIT(5) is unused */ +/* + * Use the busy timeout feature. Probably all TMIO versions support it. Yet, + * we don't have documentation for old variants, so we enable only known good + * variants with this flag. Can be removed once all variants are known good. + */ +#define TMIO_MMC_USE_BUSY_TIMEOUT BIT(5) /* * Some controllers have CMD12 automatically diff --git a/include/linux/mhi.h b/include/linux/mhi.h index d4841e5a5f45..562862ff819c 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -214,7 +214,6 @@ enum mhi_db_brst_mode { * @offload_channel: The client manages the channel completely * @doorbell_mode_switch: Channel switches to doorbell mode on M0 transition * @auto_queue: Framework will automatically queue buffers for DL traffic - * @auto_start: Automatically start (open) this channel * @wake-capable: Channel capable of waking up the system */ struct mhi_channel_config { @@ -232,7 +231,6 @@ struct mhi_channel_config { bool offload_channel; bool doorbell_mode_switch; bool auto_queue; - bool auto_start; bool wake_capable; }; @@ -337,6 +335,7 @@ struct mhi_controller_config { * @wlock: Lock for protecting device wakeup * @mhi_link_info: Device bandwidth info * @st_worker: State transition worker + * @hiprio_wq: High priority workqueue for MHI work such as state transitions * @state_event: State change event * @status_cb: CB function to notify power states of the device (required) * @wake_get: CB function to assert device wake (optional) @@ -349,6 +348,7 @@ struct mhi_controller_config { * @read_reg: Read a MHI register via the physical link (required) * @write_reg: Write a MHI register via the physical link (required) * @buffer_len: Bounce buffer length + * @index: Index of the MHI controller instance * @bounce_buf: Use of bounce buffer * @fbc_download: MHI host needs to do complete image transfer (optional) * @pre_init: MHI host needs to do pre-initialization before power up @@ -419,6 +419,7 @@ struct mhi_controller { spinlock_t wlock; struct mhi_link_info mhi_link_info; struct work_struct st_worker; + struct workqueue_struct *hiprio_wq; wait_queue_head_t state_event; void (*status_cb)(struct mhi_controller *mhi_cntrl, @@ -438,6 +439,7 @@ struct mhi_controller { u32 val); size_t buffer_len; + int index; bool bounce_buf; bool fbc_download; bool pre_init; @@ -645,12 +647,12 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl); int mhi_pm_resume(struct mhi_controller *mhi_cntrl); /** - * mhi_download_rddm_img - Download ramdump image from device for - * debugging purpose. + * mhi_download_rddm_image - Download ramdump image from device for + * debugging purpose. * @mhi_cntrl: MHI controller * @in_panic: Download rddm image during kernel panic */ -int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic); +int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic); /** * mhi_force_rddm_mode - Force device into rddm mode @@ -659,6 +661,12 @@ int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic); int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl); /** + * mhi_get_exec_env - Get BHI execution environment of the device + * @mhi_cntrl: MHI controller + */ +enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl); + +/** * mhi_get_mhi_state - Get MHI state of the device * @mhi_cntrl: MHI controller */ @@ -737,4 +745,11 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir, int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir, struct sk_buff *skb, size_t len, enum mhi_flags mflags); +/** + * mhi_queue_is_full - Determine whether queueing new elements is possible + * @mhi_dev: Device associated with the channels + * @dir: DMA direction for the channel + */ +bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir); + #endif /* _MHI_H_ */ diff --git a/include/linux/mic_bus.h b/include/linux/mic_bus.h deleted file mode 100644 index e99c789424e0..000000000000 --- a/include/linux/mic_bus.h +++ /dev/null @@ -1,100 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Intel MIC Platform Software Stack (MPSS) - * - * Copyright(c) 2014 Intel Corporation. - * - * Intel MIC Bus driver. - * - * This implementation is very similar to the virtio bus driver - * implementation @ include/linux/virtio.h. - */ -#ifndef _MIC_BUS_H_ -#define _MIC_BUS_H_ -/* - * Everything a mbus driver needs to work with any particular mbus - * implementation. - */ -#include <linux/interrupt.h> -#include <linux/dma-mapping.h> - -struct mbus_device_id { - __u32 device; - __u32 vendor; -}; - -#define MBUS_DEV_DMA_HOST 2 -#define MBUS_DEV_DMA_MIC 3 -#define MBUS_DEV_ANY_ID 0xffffffff - -/** - * mbus_device - representation of a device using mbus - * @mmio_va: virtual address of mmio space - * @hw_ops: the hardware ops supported by this device. - * @id: the device type identification (used to match it with a driver). - * @dev: underlying device. - * be used to communicate with. - * @index: unique position on the mbus bus - */ -struct mbus_device { - void __iomem *mmio_va; - struct mbus_hw_ops *hw_ops; - struct mbus_device_id id; - struct device dev; - int index; -}; - -/** - * mbus_driver - operations for a mbus I/O driver - * @driver: underlying device driver (populate name and owner). - * @id_table: the ids serviced by this driver. - * @probe: the function to call when a device is found. Returns 0 or -errno. - * @remove: the function to call when a device is removed. - */ -struct mbus_driver { - struct device_driver driver; - const struct mbus_device_id *id_table; - int (*probe)(struct mbus_device *dev); - void (*scan)(struct mbus_device *dev); - void (*remove)(struct mbus_device *dev); -}; - -/** - * struct mic_irq - opaque pointer used as cookie - */ -struct mic_irq; - -/** - * mbus_hw_ops - Hardware operations for accessing a MIC device on the MIC bus. - */ -struct mbus_hw_ops { - struct mic_irq* (*request_threaded_irq)(struct mbus_device *mbdev, - irq_handler_t handler, - irq_handler_t thread_fn, - const char *name, void *data, - int intr_src); - void (*free_irq)(struct mbus_device *mbdev, - struct mic_irq *cookie, void *data); - void (*ack_interrupt)(struct mbus_device *mbdev, int num); -}; - -struct mbus_device * -mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops, - struct mbus_hw_ops *hw_ops, int index, - void __iomem *mmio_va); -void mbus_unregister_device(struct mbus_device *mbdev); - -int mbus_register_driver(struct mbus_driver *drv); -void mbus_unregister_driver(struct mbus_driver *drv); - -static inline struct mbus_device *dev_to_mbus(struct device *_dev) -{ - return container_of(_dev, struct mbus_device, dev); -} - -static inline struct mbus_driver *drv_to_mbus(struct device_driver *drv) -{ - return container_of(drv, struct mbus_driver, driver); -} - -#endif /* _MIC_BUS_H */ diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 0f8d1583fa8e..4594838a0f7c 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -45,8 +45,8 @@ extern struct page *alloc_migration_target(struct page *page, unsigned long priv extern int isolate_movable_page(struct page *page, isolate_mode_t mode); extern void putback_movable_page(struct page *page); -extern int migrate_prep(void); -extern int migrate_prep_local(void); +extern void migrate_prep(void); +extern void migrate_prep_local(void); extern void migrate_page_states(struct page *newpage, struct page *page); extern void migrate_page_copy(struct page *newpage, struct page *page); extern int migrate_huge_page_move_mapping(struct address_space *mapping, diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 06e066e04a4b..236a7d04f891 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -46,7 +46,6 @@ #define DEFAULT_UAR_PAGE_SHIFT 12 -#define MAX_MSIX_P_PORT 17 #define MAX_MSIX 128 #define MIN_MSIX_P_PORT 5 #define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \ diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index cf824366a7d1..f1de49d64a98 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -346,6 +346,7 @@ enum mlx5_event { MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe, + MLX5_EVENT_TYPE_VHCA_STATE_CHANGE = 0xf, MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c, MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d, @@ -717,6 +718,11 @@ struct mlx5_eqe_sync_fw_update { u8 sync_rst_state; }; +struct mlx5_eqe_vhca_state { + __be16 ec_function; + __be16 function_id; +} __packed; + union ev_data { __be32 raw[7]; struct mlx5_eqe_cmd cmd; @@ -736,6 +742,7 @@ union ev_data { struct mlx5_eqe_temp_warning temp_warning; struct mlx5_eqe_xrq_err xrq_err; struct mlx5_eqe_sync_fw_update sync_fw_update; + struct mlx5_eqe_vhca_state vhca_state; } __packed; struct mlx5_eqe { @@ -1076,6 +1083,7 @@ enum { MLX5_MATCH_INNER_HEADERS = 1 << 2, MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3, MLX5_MATCH_MISC_PARAMETERS_3 = 1 << 4, + MLX5_MATCH_MISC_PARAMETERS_4 = 1 << 5, }; enum { diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index add85094f9a5..f93bfe7473aa 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -48,6 +48,7 @@ #include <linux/idr.h> #include <linux/notifier.h> #include <linux/refcount.h> +#include <linux/auxiliary_bus.h> #include <linux/mlx5/device.h> #include <linux/mlx5/doorbell.h> @@ -56,6 +57,8 @@ #include <linux/ptp_clock_kernel.h> #include <net/devlink.h> +#define MLX5_ADEV_NAME "mlx5_core" + enum { MLX5_BOARD_ID_LEN = 64, }; @@ -534,6 +537,17 @@ struct mlx5_core_roce { struct mlx5_flow_handle *allow_rule; }; +enum { + MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0, + MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1, +}; + +struct mlx5_adev { + struct auxiliary_device adev; + struct mlx5_core_dev *mdev; + int idx; +}; + struct mlx5_priv { /* IRQ table valid only for real pci devices PF or VF */ struct mlx5_irq_table *irq_table; @@ -547,7 +561,7 @@ struct mlx5_priv { atomic_t reg_pages; struct list_head free_list; int vfs_pages; - int peer_pf_pages; + int host_pf_pages; struct mlx5_core_health health; @@ -571,6 +585,8 @@ struct mlx5_priv { struct list_head dev_list; struct list_head ctx_list; spinlock_t ctx_lock; + struct mlx5_adev **adev; + int adev_idx; struct mlx5_events *events; struct mlx5_flow_steering *steering; @@ -578,6 +594,7 @@ struct mlx5_priv { struct mlx5_eswitch *eswitch; struct mlx5_core_sriov sriov; struct mlx5_lag *lag; + u32 flags; struct mlx5_devcom *devcom; struct mlx5_fw_reset *fw_reset; struct mlx5_core_roce roce; @@ -888,10 +905,6 @@ enum { CMD_ALLOWED_OPCODE_ALL, }; -int mlx5_cmd_init(struct mlx5_core_dev *dev); -void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); -void mlx5_cmd_set_state(struct mlx5_core_dev *dev, - enum mlx5_cmdif_state cmdif_state); void mlx5_cmd_use_events(struct mlx5_core_dev *dev); void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode); @@ -1059,23 +1072,6 @@ enum { MAX_MR_CACHE_ENTRIES }; -enum { - MLX5_INTERFACE_PROTOCOL_IB = 0, - MLX5_INTERFACE_PROTOCOL_ETH = 1, - MLX5_INTERFACE_PROTOCOL_VDPA = 2, -}; - -struct mlx5_interface { - void * (*add)(struct mlx5_core_dev *dev); - void (*remove)(struct mlx5_core_dev *dev, void *context); - int (*attach)(struct mlx5_core_dev *dev, void *context); - void (*detach)(struct mlx5_core_dev *dev, void *context); - int protocol; - struct list_head list; -}; - -int mlx5_register_interface(struct mlx5_interface *intf); -void mlx5_unregister_interface(struct mlx5_interface *intf); int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb); @@ -1137,7 +1133,7 @@ static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev) return dev->coredev_type == MLX5_COREDEV_VF; } -static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev) +static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev) { return dev->caps.embedded_cpu; } @@ -1213,4 +1209,22 @@ static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev) return val.vbool; } +/** + * mlx5_core_net - Provide net namespace of the mlx5_core_dev + * @dev: mlx5 core device + * + * mlx5_core_net() returns the net namespace of mlx5 core device. + * This can be called only in below described limited context. + * (a) When a devlink instance for mlx5_core is registered and + * when devlink reload operation is disabled. + * or + * (b) during devlink reload reload_down() and reload_up callbacks + * where it is ensured that devlink instance's net namespace is + * stable. + */ +static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev) +{ + return devlink_net(priv_to_devlink(dev)); +} + #endif /* MLX5_DRIVER_H */ diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index b0ae8020f13e..29fd832950e0 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h @@ -96,10 +96,10 @@ static inline u32 mlx5_eswitch_get_vport_metadata_mask(void) u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, u16 vport_num); -u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw); +u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev); #else /* CONFIG_MLX5_ESWITCH */ -static inline u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw) +static inline u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev) { return MLX5_ESWITCH_NONE; } @@ -136,4 +136,8 @@ mlx5_eswitch_get_vport_metadata_mask(void) } #endif /* CONFIG_MLX5_ESWITCH */ +static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev) +{ + return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS; +} #endif diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 846d94ad04bc..1f51f4c3b1af 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -50,6 +50,7 @@ enum { MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1), MLX5_FLOW_TABLE_TERMINATION = BIT(2), MLX5_FLOW_TABLE_UNMANAGED = BIT(3), + MLX5_FLOW_TABLE_OTHER_VPORT = BIT(4), }; #define LEFTOVERS_RULE_NUM 2 @@ -132,6 +133,7 @@ struct mlx5_flow_destination { struct mlx5_pkt_reformat *pkt_reformat; u8 flags; } vport; + u32 sampler_id; }; }; @@ -173,9 +175,7 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, struct mlx5_flow_table * mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, - int prio, - int num_flow_table_entries, - u32 level, u16 vport); + struct mlx5_flow_table_attr *ft_attr, u16 vport); struct mlx5_flow_table *mlx5_create_lag_demux_flow_table( struct mlx5_flow_namespace *ns, int prio, u32 level); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 651591a2965d..0d6e287d614f 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -299,6 +299,8 @@ enum { MLX5_CMD_OP_CREATE_UMEM = 0xa08, MLX5_CMD_OP_DESTROY_UMEM = 0xa0a, MLX5_CMD_OP_SYNC_STEERING = 0xb00, + MLX5_CMD_OP_QUERY_VHCA_STATE = 0xb0d, + MLX5_CMD_OP_MODIFY_VHCA_STATE = 0xb0e, MLX5_CMD_OP_MAX }; @@ -623,6 +625,26 @@ struct mlx5_ifc_fte_match_set_misc3_bits { u8 reserved_at_140[0xc0]; }; +struct mlx5_ifc_fte_match_set_misc4_bits { + u8 prog_sample_field_value_0[0x20]; + + u8 prog_sample_field_id_0[0x20]; + + u8 prog_sample_field_value_1[0x20]; + + u8 prog_sample_field_id_1[0x20]; + + u8 prog_sample_field_value_2[0x20]; + + u8 prog_sample_field_id_2[0x20]; + + u8 prog_sample_field_value_3[0x20]; + + u8 prog_sample_field_id_3[0x20]; + + u8 reserved_at_100[0x100]; +}; + struct mlx5_ifc_cmd_pas_bits { u8 pa_h[0x20]; @@ -891,7 +913,10 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 tunnel_stateless_ipv4_over_vxlan[0x1]; u8 tunnel_stateless_ip_over_ip[0x1]; u8 insert_trailer[0x1]; - u8 reserved_at_2b[0x5]; + u8 reserved_at_2b[0x1]; + u8 tunnel_stateless_ip_over_ip_rx[0x1]; + u8 tunnel_stateless_ip_over_ip_tx[0x1]; + u8 reserved_at_2e[0x2]; u8 max_vxlan_udp_ports[0x8]; u8 reserved_at_38[0x6]; u8 max_geneve_opt_len[0x1]; @@ -1223,8 +1248,22 @@ enum mlx5_fc_bulk_alloc_bitmask { #define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum)) +enum { + MLX5_STEERING_FORMAT_CONNECTX_5 = 0, + MLX5_STEERING_FORMAT_CONNECTX_6DX = 1, +}; + struct mlx5_ifc_cmd_hca_cap_bits { - u8 reserved_at_0[0x30]; + u8 reserved_at_0[0x1f]; + u8 vhca_resource_manager[0x1]; + + u8 reserved_at_20[0x3]; + u8 event_on_vhca_state_teardown_request[0x1]; + u8 event_on_vhca_state_in_use[0x1]; + u8 event_on_vhca_state_active[0x1]; + u8 event_on_vhca_state_allocated[0x1]; + u8 event_on_vhca_state_invalid[0x1]; + u8 reserved_at_28[0x8]; u8 vhca_id[0x10]; u8 reserved_at_40[0x40]; @@ -1241,7 +1280,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 ece_support[0x1]; u8 reserved_at_a4[0x7]; u8 log_max_srq[0x5]; - u8 reserved_at_b0[0x10]; + u8 reserved_at_b0[0x2]; + u8 ts_cqe_to_dest_cqn[0x1]; + u8 reserved_at_b3[0xd]; u8 max_sgl_for_optimized_performance[0x8]; u8 log_max_cq_sz[0x8]; @@ -1512,7 +1553,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 disable_local_lb_uc[0x1]; u8 disable_local_lb_mc[0x1]; u8 log_min_hairpin_wq_data_sz[0x5]; - u8 reserved_at_3e8[0x3]; + u8 reserved_at_3e8[0x2]; + u8 vhca_state[0x1]; u8 log_max_vlan_list[0x5]; u8 reserved_at_3f0[0x3]; u8 log_max_current_mc_list[0x5]; @@ -1521,7 +1563,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 general_obj_types[0x40]; - u8 reserved_at_440[0x20]; + u8 reserved_at_440[0x4]; + u8 steering_format_version[0x4]; + u8 create_qp_start_hint[0x18]; u8 reserved_at_460[0x3]; u8 log_max_uctx[0x5]; @@ -1580,7 +1624,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 max_num_of_monitor_counters[0x10]; u8 num_ppcnt_monitor_counters[0x10]; - u8 reserved_at_640[0x10]; + u8 max_num_sf[0x10]; u8 num_q_monitor_counters[0x10]; u8 reserved_at_660[0x20]; @@ -1616,6 +1660,7 @@ enum mlx5_flow_destination_type { MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0, MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1, MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2, + MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6, MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99, MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100, @@ -1668,7 +1713,9 @@ struct mlx5_ifc_fte_match_param_bits { struct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3; - u8 reserved_at_a00[0x600]; + struct mlx5_ifc_fte_match_set_misc4_bits misc_parameters_4; + + u8 reserved_at_c00[0x400]; }; enum { @@ -3289,8 +3336,12 @@ struct mlx5_ifc_sqc_bits { u8 reserved_at_80[0x10]; u8 hairpin_peer_vhca[0x10]; - u8 reserved_at_a0[0x50]; + u8 reserved_at_a0[0x20]; + + u8 reserved_at_c0[0x8]; + u8 ts_cqe_to_dest_cqn[0x18]; + u8 reserved_at_e0[0x10]; u8 packet_pacing_rate_limit_index[0x10]; u8 tis_lst_sz[0x10]; u8 reserved_at_110[0x10]; @@ -4204,7 +4255,11 @@ struct mlx5_ifc_set_hca_cap_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 other_function[0x1]; + u8 reserved_at_41[0xf]; + u8 function_id[0x10]; + + u8 reserved_at_60[0x20]; union mlx5_ifc_hca_cap_union_bits capability; }; @@ -5461,6 +5516,7 @@ enum { MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3, MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_3 = 0x4, + MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_4 = 0x5, }; struct mlx5_ifc_query_flow_group_out_bits { @@ -5823,7 +5879,7 @@ struct mlx5_ifc_alloc_modify_header_context_in_bits { u8 reserved_at_68[0x10]; u8 num_of_actions[0x8]; - union mlx5_ifc_set_add_copy_action_in_auto_bits actions[0]; + union mlx5_ifc_set_add_copy_action_in_auto_bits actions[]; }; struct mlx5_ifc_dealloc_modify_header_context_out_bits { @@ -9761,7 +9817,7 @@ struct mlx5_ifc_mcda_reg_bits { u8 reserved_at_60[0x20]; - u8 data[0][0x20]; + u8 data[][0x20]; }; enum { @@ -10657,11 +10713,13 @@ struct mlx5_ifc_affiliated_event_header_bits { enum { MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT(0xc), MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = BIT(0x13), + MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER = BIT(0x20), }; enum { MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc, MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13, + MLX5_GENERAL_OBJECT_TYPES_SAMPLER = 0x20, }; enum { @@ -10736,6 +10794,33 @@ struct mlx5_ifc_create_encryption_key_in_bits { struct mlx5_ifc_encryption_key_obj_bits encryption_key_object; }; +struct mlx5_ifc_sampler_obj_bits { + u8 modify_field_select[0x40]; + + u8 table_type[0x8]; + u8 level[0x8]; + u8 reserved_at_50[0xf]; + u8 ignore_flow_level[0x1]; + + u8 sample_ratio[0x20]; + + u8 reserved_at_80[0x8]; + u8 sample_table_id[0x18]; + + u8 reserved_at_a0[0x8]; + u8 default_table_id[0x18]; + + u8 sw_steering_icm_address_rx[0x40]; + u8 sw_steering_icm_address_tx[0x40]; + + u8 reserved_at_140[0xa0]; +}; + +struct mlx5_ifc_create_sampler_obj_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; + struct mlx5_ifc_sampler_obj_bits sampler_object; +}; + enum { MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128 = 0x0, MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256 = 0x1, diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h new file mode 100644 index 000000000000..98b56b75c625 --- /dev/null +++ b/include/linux/mlx5/mlx5_ifc_vdpa.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 Mellanox Technologies Ltd. */ + +#ifndef __MLX5_IFC_VDPA_H_ +#define __MLX5_IFC_VDPA_H_ + +enum { + MLX5_VIRTIO_Q_EVENT_MODE_NO_MSIX_MODE = 0x0, + MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE = 0x1, + MLX5_VIRTIO_Q_EVENT_MODE_MSIX_MODE = 0x2, +}; + +enum { + MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT = 0x1, // do I check this caps? + MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED = 0x2, +}; + +enum { + MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0, + MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1, +}; + +struct mlx5_ifc_virtio_q_bits { + u8 virtio_q_type[0x8]; + u8 reserved_at_8[0x5]; + u8 event_mode[0x3]; + u8 queue_index[0x10]; + + u8 full_emulation[0x1]; + u8 virtio_version_1_0[0x1]; + u8 reserved_at_22[0x2]; + u8 offload_type[0x4]; + u8 event_qpn_or_msix[0x18]; + + u8 doorbell_stride_index[0x10]; + u8 queue_size[0x10]; + + u8 device_emulation_id[0x20]; + + u8 desc_addr[0x40]; + + u8 used_addr[0x40]; + + u8 available_addr[0x40]; + + u8 virtio_q_mkey[0x20]; + + u8 max_tunnel_desc[0x10]; + u8 reserved_at_170[0x8]; + u8 error_type[0x8]; + + u8 umem_1_id[0x20]; + + u8 umem_1_size[0x20]; + + u8 umem_1_offset[0x40]; + + u8 umem_2_id[0x20]; + + u8 umem_2_size[0x20]; + + u8 umem_2_offset[0x40]; + + u8 umem_3_id[0x20]; + + u8 umem_3_size[0x20]; + + u8 umem_3_offset[0x40]; + + u8 counter_set_id[0x20]; + + u8 reserved_at_320[0x8]; + u8 pd[0x18]; + + u8 reserved_at_340[0xc0]; +}; + +struct mlx5_ifc_virtio_net_q_object_bits { + u8 modify_field_select[0x40]; + + u8 reserved_at_40[0x20]; + + u8 vhca_id[0x10]; + u8 reserved_at_70[0x10]; + + u8 queue_feature_bit_mask_12_3[0xa]; + u8 dirty_bitmap_dump_enable[0x1]; + u8 vhost_log_page[0x5]; + u8 reserved_at_90[0xc]; + u8 state[0x4]; + + u8 reserved_at_a0[0x5]; + u8 queue_feature_bit_mask_2_0[0x3]; + u8 tisn_or_qpn[0x18]; + + u8 dirty_bitmap_mkey[0x20]; + + u8 dirty_bitmap_size[0x20]; + + u8 dirty_bitmap_addr[0x40]; + + u8 hw_available_index[0x10]; + u8 hw_used_index[0x10]; + + u8 reserved_at_160[0xa0]; + + struct mlx5_ifc_virtio_q_bits virtio_q_context; +}; + +struct mlx5_ifc_create_virtio_net_q_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; + + struct mlx5_ifc_virtio_net_q_object_bits obj_context; +}; + +struct mlx5_ifc_create_virtio_net_q_out_bits { + struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr; +}; + +struct mlx5_ifc_destroy_virtio_net_q_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_out_cmd_hdr; +}; + +struct mlx5_ifc_destroy_virtio_net_q_out_bits { + struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr; +}; + +struct mlx5_ifc_query_virtio_net_q_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; +}; + +struct mlx5_ifc_query_virtio_net_q_out_bits { + struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr; + + struct mlx5_ifc_virtio_net_q_object_bits obj_context; +}; + +enum { + MLX5_VIRTQ_MODIFY_MASK_STATE = (u64)1 << 0, + MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_PARAMS = (u64)1 << 3, + MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_DUMP_ENABLE = (u64)1 << 4, +}; + +enum { + MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT = 0x0, + MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY = 0x1, + MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND = 0x2, + MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR = 0x3, +}; + +enum { + MLX5_RQTC_LIST_Q_TYPE_RQ = 0x0, + MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q = 0x1, +}; + +struct mlx5_ifc_modify_virtio_net_q_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; + + struct mlx5_ifc_virtio_net_q_object_bits obj_context; +}; + +struct mlx5_ifc_modify_virtio_net_q_out_bits { + struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr; +}; + +#endif /* __MLX5_IFC_VDPA_H_ */ diff --git a/include/linux/mm.h b/include/linux/mm.h index ef360fe70aaf..abc7b3154298 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -557,8 +557,16 @@ enum page_entry_size { struct vm_operations_struct { void (*open)(struct vm_area_struct * area); void (*close)(struct vm_area_struct * area); - int (*split)(struct vm_area_struct * area, unsigned long addr); - int (*mremap)(struct vm_area_struct * area); + /* Called any time before splitting to check if it's allowed */ + int (*may_split)(struct vm_area_struct *area, unsigned long addr); + int (*mremap)(struct vm_area_struct *area, unsigned long flags); + /* + * Called by mprotect() to make driver-specific permission + * checks before mprotect() is finalised. The VMA must not + * be modified. Returns 0 if eprotect() can proceed. + */ + int (*mprotect)(struct vm_area_struct *vma, unsigned long start, + unsigned long end, unsigned long newflags); vm_fault_t (*fault)(struct vm_fault *vmf); vm_fault_t (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size); @@ -1484,28 +1492,6 @@ static inline void set_page_links(struct page *page, enum zone_type zone, #endif } -#ifdef CONFIG_MEMCG -static inline struct mem_cgroup *page_memcg(struct page *page) -{ - return page->mem_cgroup; -} -static inline struct mem_cgroup *page_memcg_rcu(struct page *page) -{ - WARN_ON_ONCE(!rcu_read_lock_held()); - return READ_ONCE(page->mem_cgroup); -} -#else -static inline struct mem_cgroup *page_memcg(struct page *page) -{ - return NULL; -} -static inline struct mem_cgroup *page_memcg_rcu(struct page *page) -{ - WARN_ON_ONCE(!rcu_read_lock_held()); - return NULL; -} -#endif - /* * Some inline functions in vmstat.h depend on page_zone() */ @@ -1716,8 +1702,8 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, unsigned int gup_flags); extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, unsigned int gup_flags); -extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, - unsigned long addr, void *buf, int len, unsigned int gup_flags); +extern int __access_remote_vm(struct mm_struct *mm, unsigned long addr, + void *buf, int len, unsigned int gup_flags); long get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, @@ -2203,7 +2189,7 @@ static inline bool pgtable_pte_page_ctor(struct page *page) if (!ptlock_init(page)) return false; __SetPageTable(page); - inc_zone_page_state(page, NR_PAGETABLE); + inc_lruvec_page_state(page, NR_PAGETABLE); return true; } @@ -2211,7 +2197,7 @@ static inline void pgtable_pte_page_dtor(struct page *page) { ptlock_free(page); __ClearPageTable(page); - dec_zone_page_state(page, NR_PAGETABLE); + dec_lruvec_page_state(page, NR_PAGETABLE); } #define pte_offset_map_lock(mm, pmd, address, ptlp) \ @@ -2298,7 +2284,7 @@ static inline bool pgtable_pmd_page_ctor(struct page *page) if (!pmd_ptlock_init(page)) return false; __SetPageTable(page); - inc_zone_page_state(page, NR_PAGETABLE); + inc_lruvec_page_state(page, NR_PAGETABLE); return true; } @@ -2306,7 +2292,7 @@ static inline void pgtable_pmd_page_dtor(struct page *page) { pmd_ptlock_free(page); __ClearPageTable(page); - dec_zone_page_state(page, NR_PAGETABLE); + dec_lruvec_page_state(page, NR_PAGETABLE); } /* @@ -2433,9 +2419,6 @@ static inline int early_pfn_to_nid(unsigned long pfn) #else /* please see mm/page_alloc.c */ extern int __meminit early_pfn_to_nid(unsigned long pfn); -/* there is a per-arch backend function. */ -extern int __meminit __early_pfn_to_nid(unsigned long pfn, - struct mminit_pfnnid_cache *state); #endif extern void set_dma_reserve(unsigned long new_dma_reserve); @@ -2759,6 +2742,15 @@ static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma, return VM_FAULT_NOPAGE; } +#ifndef io_remap_pfn_range +static inline int io_remap_pfn_range(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn, + unsigned long size, pgprot_t prot) +{ + return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot)); +} +#endif + static inline vm_fault_t vmf_error(int err) { if (err == -ENOMEM) @@ -2865,44 +2857,56 @@ extern int apply_to_existing_page_range(struct mm_struct *mm, unsigned long address, unsigned long size, pte_fn_t fn, void *data); +extern void init_mem_debugging_and_hardening(void); #ifdef CONFIG_PAGE_POISONING -extern bool page_poisoning_enabled(void); -extern void kernel_poison_pages(struct page *page, int numpages, int enable); +extern void __kernel_poison_pages(struct page *page, int numpages); +extern void __kernel_unpoison_pages(struct page *page, int numpages); +extern bool _page_poisoning_enabled_early; +DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled); +static inline bool page_poisoning_enabled(void) +{ + return _page_poisoning_enabled_early; +} +/* + * For use in fast paths after init_mem_debugging() has run, or when a + * false negative result is not harmful when called too early. + */ +static inline bool page_poisoning_enabled_static(void) +{ + return static_branch_unlikely(&_page_poisoning_enabled); +} +static inline void kernel_poison_pages(struct page *page, int numpages) +{ + if (page_poisoning_enabled_static()) + __kernel_poison_pages(page, numpages); +} +static inline void kernel_unpoison_pages(struct page *page, int numpages) +{ + if (page_poisoning_enabled_static()) + __kernel_unpoison_pages(page, numpages); +} #else static inline bool page_poisoning_enabled(void) { return false; } -static inline void kernel_poison_pages(struct page *page, int numpages, - int enable) { } +static inline bool page_poisoning_enabled_static(void) { return false; } +static inline void __kernel_poison_pages(struct page *page, int nunmpages) { } +static inline void kernel_poison_pages(struct page *page, int numpages) { } +static inline void kernel_unpoison_pages(struct page *page, int numpages) { } #endif -#ifdef CONFIG_INIT_ON_ALLOC_DEFAULT_ON -DECLARE_STATIC_KEY_TRUE(init_on_alloc); -#else DECLARE_STATIC_KEY_FALSE(init_on_alloc); -#endif static inline bool want_init_on_alloc(gfp_t flags) { - if (static_branch_unlikely(&init_on_alloc) && - !page_poisoning_enabled()) + if (static_branch_unlikely(&init_on_alloc)) return true; return flags & __GFP_ZERO; } -#ifdef CONFIG_INIT_ON_FREE_DEFAULT_ON -DECLARE_STATIC_KEY_TRUE(init_on_free); -#else DECLARE_STATIC_KEY_FALSE(init_on_free); -#endif static inline bool want_init_on_free(void) { - return static_branch_unlikely(&init_on_free) && - !page_poisoning_enabled(); + return static_branch_unlikely(&init_on_free); } -#ifdef CONFIG_DEBUG_PAGEALLOC -extern void init_debug_pagealloc(void); -#else -static inline void init_debug_pagealloc(void) {} -#endif extern bool _debug_pagealloc_enabled_early; DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); @@ -2924,28 +2928,28 @@ static inline bool debug_pagealloc_enabled_static(void) return static_branch_unlikely(&_debug_pagealloc_enabled); } -#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP) -extern void __kernel_map_pages(struct page *page, int numpages, int enable); - +#ifdef CONFIG_DEBUG_PAGEALLOC /* - * When called in DEBUG_PAGEALLOC context, the call should most likely be - * guarded by debug_pagealloc_enabled() or debug_pagealloc_enabled_static() + * To support DEBUG_PAGEALLOC architecture must ensure that + * __kernel_map_pages() never fails */ -static inline void -kernel_map_pages(struct page *page, int numpages, int enable) -{ - __kernel_map_pages(page, numpages, enable); -} -#ifdef CONFIG_HIBERNATION -extern bool kernel_page_present(struct page *page); -#endif /* CONFIG_HIBERNATION */ -#else /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */ -static inline void -kernel_map_pages(struct page *page, int numpages, int enable) {} -#ifdef CONFIG_HIBERNATION -static inline bool kernel_page_present(struct page *page) { return true; } -#endif /* CONFIG_HIBERNATION */ -#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */ +extern void __kernel_map_pages(struct page *page, int numpages, int enable); + +static inline void debug_pagealloc_map_pages(struct page *page, int numpages) +{ + if (debug_pagealloc_enabled_static()) + __kernel_map_pages(page, numpages, 1); +} + +static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) +{ + if (debug_pagealloc_enabled_static()) + __kernel_map_pages(page, numpages, 0); +} +#else /* CONFIG_DEBUG_PAGEALLOC */ +static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {} +static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {} +#endif /* CONFIG_DEBUG_PAGEALLOC */ #ifdef __HAVE_ARCH_GATE_AREA extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 5a9238f6caad..07d9acb5b19c 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -14,6 +14,7 @@ #include <linux/uprobes.h> #include <linux/page-flags-layout.h> #include <linux/workqueue.h> +#include <linux/seqlock.h> #include <asm/mmu.h> @@ -78,7 +79,7 @@ struct page { struct { /* Page cache and anonymous pages */ /** * @lru: Pageout list, eg. active_list protected by - * pgdat->lru_lock. Sometimes used as a generic list + * lruvec->lru_lock. Sometimes used as a generic list * by the page owner. */ struct list_head lru; @@ -199,10 +200,7 @@ struct page { atomic_t _refcount; #ifdef CONFIG_MEMCG - union { - struct mem_cgroup *mem_cgroup; - struct obj_cgroup **obj_cgroups; - }; + unsigned long memcg_data; #endif /* @@ -446,6 +444,13 @@ struct mm_struct { */ atomic_t has_pinned; + /** + * @write_protect_seq: Locked when any thread is write + * protecting pages mapped by this mm to enforce a later COW, + * for instance during page table copying for fork(). + */ + seqcount_t write_protect_seq; + #ifdef CONFIG_MMU atomic_long_t pgtables_bytes; /* PTE page table pages */ #endif diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index 18e7eae9b5ba..0540f0156f58 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -1,11 +1,65 @@ #ifndef _LINUX_MMAP_LOCK_H #define _LINUX_MMAP_LOCK_H +#include <linux/lockdep.h> +#include <linux/mm_types.h> #include <linux/mmdebug.h> +#include <linux/rwsem.h> +#include <linux/tracepoint-defs.h> +#include <linux/types.h> #define MMAP_LOCK_INITIALIZER(name) \ .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock), +DECLARE_TRACEPOINT(mmap_lock_start_locking); +DECLARE_TRACEPOINT(mmap_lock_acquire_returned); +DECLARE_TRACEPOINT(mmap_lock_released); + +#ifdef CONFIG_TRACING + +void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write); +void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write, + bool success); +void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write); + +static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm, + bool write) +{ + if (tracepoint_enabled(mmap_lock_start_locking)) + __mmap_lock_do_trace_start_locking(mm, write); +} + +static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm, + bool write, bool success) +{ + if (tracepoint_enabled(mmap_lock_acquire_returned)) + __mmap_lock_do_trace_acquire_returned(mm, write, success); +} + +static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write) +{ + if (tracepoint_enabled(mmap_lock_released)) + __mmap_lock_do_trace_released(mm, write); +} + +#else /* !CONFIG_TRACING */ + +static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm, + bool write) +{ +} + +static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm, + bool write, bool success) +{ +} + +static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write) +{ +} + +#endif /* CONFIG_TRACING */ + static inline void mmap_init_lock(struct mm_struct *mm) { init_rwsem(&mm->mmap_lock); @@ -13,57 +67,86 @@ static inline void mmap_init_lock(struct mm_struct *mm) static inline void mmap_write_lock(struct mm_struct *mm) { + __mmap_lock_trace_start_locking(mm, true); down_write(&mm->mmap_lock); + __mmap_lock_trace_acquire_returned(mm, true, true); } static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass) { + __mmap_lock_trace_start_locking(mm, true); down_write_nested(&mm->mmap_lock, subclass); + __mmap_lock_trace_acquire_returned(mm, true, true); } static inline int mmap_write_lock_killable(struct mm_struct *mm) { - return down_write_killable(&mm->mmap_lock); + int ret; + + __mmap_lock_trace_start_locking(mm, true); + ret = down_write_killable(&mm->mmap_lock); + __mmap_lock_trace_acquire_returned(mm, true, ret == 0); + return ret; } static inline bool mmap_write_trylock(struct mm_struct *mm) { - return down_write_trylock(&mm->mmap_lock) != 0; + bool ret; + + __mmap_lock_trace_start_locking(mm, true); + ret = down_write_trylock(&mm->mmap_lock) != 0; + __mmap_lock_trace_acquire_returned(mm, true, ret); + return ret; } static inline void mmap_write_unlock(struct mm_struct *mm) { up_write(&mm->mmap_lock); + __mmap_lock_trace_released(mm, true); } static inline void mmap_write_downgrade(struct mm_struct *mm) { downgrade_write(&mm->mmap_lock); + __mmap_lock_trace_acquire_returned(mm, false, true); } static inline void mmap_read_lock(struct mm_struct *mm) { + __mmap_lock_trace_start_locking(mm, false); down_read(&mm->mmap_lock); + __mmap_lock_trace_acquire_returned(mm, false, true); } static inline int mmap_read_lock_killable(struct mm_struct *mm) { - return down_read_killable(&mm->mmap_lock); + int ret; + + __mmap_lock_trace_start_locking(mm, false); + ret = down_read_killable(&mm->mmap_lock); + __mmap_lock_trace_acquire_returned(mm, false, ret == 0); + return ret; } static inline bool mmap_read_trylock(struct mm_struct *mm) { - return down_read_trylock(&mm->mmap_lock) != 0; + bool ret; + + __mmap_lock_trace_start_locking(mm, false); + ret = down_read_trylock(&mm->mmap_lock) != 0; + __mmap_lock_trace_acquire_returned(mm, false, ret); + return ret; } static inline void mmap_read_unlock(struct mm_struct *mm) { up_read(&mm->mmap_lock); + __mmap_lock_trace_released(mm, false); } static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm) { - if (down_read_trylock(&mm->mmap_lock)) { + if (mmap_read_trylock(mm)) { rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_); return true; } @@ -73,6 +156,7 @@ static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm) static inline void mmap_read_unlock_non_owner(struct mm_struct *mm) { up_read_non_owner(&mm->mmap_lock); + __mmap_lock_trace_released(mm, false); } static inline void mmap_assert_locked(struct mm_struct *mm) diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index c079b932330f..01bba36545c5 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -60,6 +60,8 @@ struct mmc_ios { #define MMC_TIMING_MMC_DDR52 8 #define MMC_TIMING_MMC_HS200 9 #define MMC_TIMING_MMC_HS400 10 +#define MMC_TIMING_SD_EXP 11 +#define MMC_TIMING_SD_EXP_1_2V 12 unsigned char signal_voltage; /* signalling voltage (1.8V or 3.3V) */ @@ -173,6 +175,9 @@ struct mmc_host_ops { */ int (*multi_io_quirk)(struct mmc_card *card, unsigned int direction, int blk_size); + + /* Initialize an SD express card, mandatory for MMC_CAP2_SD_EXP. */ + int (*init_sd_express)(struct mmc_host *host, struct mmc_ios *ios); }; struct mmc_cqe_ops { @@ -358,6 +363,8 @@ struct mmc_host { #define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */ #define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \ MMC_CAP2_HS200_1_2V_SDR) +#define MMC_CAP2_SD_EXP (1 << 7) /* SD express via PCIe */ +#define MMC_CAP2_SD_EXP_1_2V (1 << 8) /* SD express 1.2V */ #define MMC_CAP2_CD_ACTIVE_HIGH (1 << 10) /* Card-detect signal active high */ #define MMC_CAP2_RO_ACTIVE_HIGH (1 << 11) /* Write-protect signal active high */ #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index fb3bf696c05e..b593316bff3d 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -113,8 +113,7 @@ static inline bool free_area_empty(struct free_area *area, int migratetype) struct pglist_data; /* - * zone->lock and the zone lru_lock are two of the hottest locks in the kernel. - * So add a wild amount of padding here to ensure that they fall into separate + * Add a wild amount of padding here to ensure datas fall into separate * cachelines. There are very few zone structures in the machine, so space * consumption is not a concern here. */ @@ -152,7 +151,6 @@ enum zone_stat_item { NR_ZONE_UNEVICTABLE, NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ NR_MLOCK, /* mlock()ed pages found and moved off LRU */ - NR_PAGETABLE, /* used for pagetables */ /* Second 128 byte cacheline */ NR_BOUNCE, #if IS_ENABLED(CONFIG_ZSMALLOC) @@ -207,6 +205,7 @@ enum node_stat_item { #if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) NR_KERNEL_SCS_KB, /* measured in KiB */ #endif + NR_PAGETABLE, /* used for pagetables */ NR_VM_NODE_STAT_ITEMS }; @@ -276,6 +275,8 @@ enum lruvec_flags { struct lruvec { struct list_head lists[NR_LRU_LISTS]; + /* per lruvec lru_lock for memcg */ + spinlock_t lru_lock; /* * These track the cost of reclaiming one LRU - file or anon - * over the other. As the observed cost of reclaiming one LRU @@ -354,26 +355,6 @@ enum zone_type { * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit * platforms may need both zones as they support peripherals with * different DMA addressing limitations. - * - * Some examples: - * - * - i386 and x86_64 have a fixed 16M ZONE_DMA and ZONE_DMA32 for the - * rest of the lower 4G. - * - * - arm only uses ZONE_DMA, the size, up to 4G, may vary depending on - * the specific device. - * - * - arm64 has a fixed 1G ZONE_DMA and ZONE_DMA32 for the rest of the - * lower 4G. - * - * - powerpc only uses ZONE_DMA, the size, up to 2G, may vary - * depending on the specific device. - * - * - s390 uses ZONE_DMA fixed to the lower 2G. - * - * - ia64 and riscv only use ZONE_DMA32. - * - * - parisc uses neither. */ #ifdef CONFIG_ZONE_DMA ZONE_DMA, @@ -470,6 +451,12 @@ struct zone { #endif struct pglist_data *zone_pgdat; struct per_cpu_pageset __percpu *pageset; + /* + * the high and batch values are copied to individual pagesets for + * faster access + */ + int pageset_high; + int pageset_batch; #ifndef CONFIG_SPARSEMEM /* @@ -796,7 +783,6 @@ typedef struct pglist_data { /* Write-intensive fields used by page reclaim */ ZONE_PADDING(_pad1_) - spinlock_t lru_lock; #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT /* @@ -1429,17 +1415,6 @@ void sparse_init(void); #endif /* CONFIG_SPARSEMEM */ /* - * During memory init memblocks map pfns to nids. The search is expensive and - * this caches recent lookups. The implementation of __early_pfn_to_nid - * may treat start/end as pfns or sections. - */ -struct mminit_pfnnid_cache { - unsigned long last_start; - unsigned long last_end; - int last_nid; -}; - -/* * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we * need to check pfn validity within that MAX_ORDER_NR_PAGES block. * pfn_valid_within() should be used in this case; we optimise this away @@ -1451,37 +1426,6 @@ struct mminit_pfnnid_cache { #define pfn_valid_within(pfn) (1) #endif -#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL -/* - * pfn_valid() is meant to be able to tell if a given PFN has valid memmap - * associated with it or not. This means that a struct page exists for this - * pfn. The caller cannot assume the page is fully initialized in general. - * Hotplugable pages might not have been onlined yet. pfn_to_online_page() - * will ensure the struct page is fully online and initialized. Special pages - * (e.g. ZONE_DEVICE) are never onlined and should be treated accordingly. - * - * In FLATMEM, it is expected that holes always have valid memmap as long as - * there is valid PFNs either side of the hole. In SPARSEMEM, it is assumed - * that a valid section has a memmap for the entire section. - * - * However, an ARM, and maybe other embedded architectures in the future - * free memmap backing holes to save memory on the assumption the memmap is - * never used. The page_zone linkages are then broken even though pfn_valid() - * returns true. A walker of the full memmap must then do this additional - * check to ensure the memmap they are looking at is sane by making sure - * the zone and PFN linkages are still valid. This is expensive, but walkers - * of the full memmap are extremely rare. - */ -bool memmap_valid_within(unsigned long pfn, - struct page *page, struct zone *zone); -#else -static inline bool memmap_valid_within(unsigned long pfn, - struct page *page, struct zone *zone) -{ - return true; -} -#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ - #endif /* !__GENERATING_BOUNDS.H */ #endif /* !__ASSEMBLY__ */ #endif /* _LINUX_MMZONE_H */ diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 5b08a473cdba..c425290b21e2 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -838,4 +838,12 @@ struct mhi_device_id { kernel_ulong_t driver_data; }; +#define AUXILIARY_NAME_SIZE 32 +#define AUXILIARY_MODULE_PREFIX "auxiliary:" + +struct auxiliary_device_id { + char name[AUXILIARY_NAME_SIZE]; + kernel_ulong_t driver_data; +}; + #endif /* LINUX_MOD_DEVICETABLE_H */ diff --git a/include/linux/module.h b/include/linux/module.h index 7ccdf87f376f..c4e7a887f469 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -475,6 +475,10 @@ struct module { unsigned int num_bpf_raw_events; struct bpf_raw_event_map *bpf_raw_events; #endif +#ifdef CONFIG_DEBUG_INFO_BTF_MODULES + unsigned int btf_data_size; + void *btf_data; +#endif #ifdef CONFIG_JUMP_LABEL struct jump_entry *jump_entries; unsigned int num_jump_entries; @@ -740,7 +744,7 @@ static inline bool within_module(unsigned long addr, const struct module *mod) } /* Get/put a kernel symbol (calls should be symmetric) */ -#define symbol_get(x) ({ extern typeof(x) x __attribute__((weak)); &(x); }) +#define symbol_get(x) ({ extern typeof(x) x __attribute__((weak,visibility("hidden"))); &(x); }) #define symbol_put(x) do { } while (0) #define symbol_put_addr(x) do { } while (0) diff --git a/include/linux/msi.h b/include/linux/msi.h index 6b584cc4757c..360a0a7e7341 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -4,11 +4,50 @@ #include <linux/kobject.h> #include <linux/list.h> +#include <asm/msi.h> + +/* Dummy shadow structures if an architecture does not define them */ +#ifndef arch_msi_msg_addr_lo +typedef struct arch_msi_msg_addr_lo { + u32 address_lo; +} __attribute__ ((packed)) arch_msi_msg_addr_lo_t; +#endif + +#ifndef arch_msi_msg_addr_hi +typedef struct arch_msi_msg_addr_hi { + u32 address_hi; +} __attribute__ ((packed)) arch_msi_msg_addr_hi_t; +#endif + +#ifndef arch_msi_msg_data +typedef struct arch_msi_msg_data { + u32 data; +} __attribute__ ((packed)) arch_msi_msg_data_t; +#endif +/** + * msi_msg - Representation of a MSI message + * @address_lo: Low 32 bits of msi message address + * @arch_addrlo: Architecture specific shadow of @address_lo + * @address_hi: High 32 bits of msi message address + * (only used when device supports it) + * @arch_addrhi: Architecture specific shadow of @address_hi + * @data: MSI message data (usually 16 bits) + * @arch_data: Architecture specific shadow of @data + */ struct msi_msg { - u32 address_lo; /* low 32 bits of msi message address */ - u32 address_hi; /* high 32 bits of msi message address */ - u32 data; /* 16 bits of msi message data */ + union { + u32 address_lo; + arch_msi_msg_addr_lo_t arch_addr_lo; + }; + union { + u32 address_hi; + arch_msi_msg_addr_hi_t arch_addr_hi; + }; + union { + u32 data; + arch_msi_msg_data_t arch_data; + }; }; extern int pci_msi_ignore_mask; @@ -243,7 +282,6 @@ struct msi_controller { #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN #include <linux/irqhandler.h> -#include <asm/msi.h> struct irq_domain; struct irq_domain_ops; diff --git a/include/linux/net.h b/include/linux/net.h index 0dcd51feef02..9e2324efc26a 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -240,7 +240,7 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg); int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags); struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname); struct socket *sockfd_lookup(int fd, int *err); -struct socket *sock_from_file(struct file *file, int *err); +struct socket *sock_from_file(struct file *file); #define sockfd_put(sock) fput(sock->file) int net_ratelimit(void); diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 0b17c4322b09..934de56644e7 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -207,8 +207,8 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start) NETIF_F_FSO) /* List of features with software fallbacks. */ -#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | \ - NETIF_F_GSO_SCTP) +#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_GSO_SCTP | \ + NETIF_F_GSO_UDP_L4 | NETIF_F_GSO_FRAGLIST) /* * If one device supports one of these features, then enable them diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 964b494b0e8d..7bf167993c05 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -34,7 +34,6 @@ #include <linux/workqueue.h> #include <linux/dynamic_queue_limits.h> -#include <linux/ethtool.h> #include <net/net_namespace.h> #ifdef CONFIG_DCB #include <net/dcbnl.h> @@ -51,6 +50,7 @@ struct netpoll_info; struct device; +struct ethtool_ops; struct phy_device; struct dsa_port; struct ip_tunnel_parm; @@ -350,23 +350,25 @@ struct napi_struct { }; enum { - NAPI_STATE_SCHED, /* Poll is scheduled */ - NAPI_STATE_MISSED, /* reschedule a napi */ - NAPI_STATE_DISABLE, /* Disable pending */ - NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ - NAPI_STATE_LISTED, /* NAPI added to system lists */ - NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */ - NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */ + NAPI_STATE_SCHED, /* Poll is scheduled */ + NAPI_STATE_MISSED, /* reschedule a napi */ + NAPI_STATE_DISABLE, /* Disable pending */ + NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ + NAPI_STATE_LISTED, /* NAPI added to system lists */ + NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */ + NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */ + NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/ }; enum { - NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED), - NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED), - NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE), - NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC), - NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED), - NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), - NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), + NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED), + NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED), + NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE), + NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC), + NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED), + NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), + NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), + NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL), }; enum gro_result { @@ -437,6 +439,11 @@ static inline bool napi_disable_pending(struct napi_struct *n) return test_bit(NAPI_STATE_DISABLE, &n->state); } +static inline bool napi_prefer_busy_poll(struct napi_struct *n) +{ + return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state); +} + bool napi_schedule_prep(struct napi_struct *n); /** @@ -1490,7 +1497,7 @@ struct net_device_ops { }; /** - * enum net_device_priv_flags - &struct net_device priv_flags + * enum netdev_priv_flags - &struct net_device priv_flags * * These are the &struct net_device, they are only set internally * by drivers and used in the kernel. These flags are invisible to @@ -2557,6 +2564,18 @@ static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int l u64_stats_update_end(&tstats->syncp); } +static inline void dev_sw_netstats_tx_add(struct net_device *dev, + unsigned int packets, + unsigned int len) +{ + struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); + + u64_stats_update_begin(&tstats->syncp); + tstats->tx_bytes += len; + tstats->tx_packets += packets; + u64_stats_update_end(&tstats->syncp); +} + static inline void dev_lstats_add(struct net_device *dev, unsigned int len) { struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats); @@ -2584,6 +2603,20 @@ static inline void dev_lstats_add(struct net_device *dev, unsigned int len) #define netdev_alloc_pcpu_stats(type) \ __netdev_alloc_pcpu_stats(type, GFP_KERNEL) +#define devm_netdev_alloc_pcpu_stats(dev, type) \ +({ \ + typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\ + if (pcpu_stats) { \ + int __cpu; \ + for_each_possible_cpu(__cpu) { \ + typeof(type) *stat; \ + stat = per_cpu_ptr(pcpu_stats, __cpu); \ + u64_stats_init(&stat->syncp); \ + } \ + } \ + pcpu_stats; \ +}) + enum netdev_lag_tx_type { NETDEV_LAG_TX_TYPE_UNKNOWN, NETDEV_LAG_TX_TYPE_RANDOM, @@ -2789,7 +2822,6 @@ unsigned long netdev_boot_base(const char *prefix, int unit); struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, const char *hwaddr); struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); -struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); void dev_add_pack(struct packet_type *pt); void dev_remove_pack(struct packet_type *pt); void __dev_remove_pack(struct packet_type *pt); @@ -2813,9 +2845,21 @@ u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev); u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev); + int dev_queue_xmit(struct sk_buff *skb); int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev); -int dev_direct_xmit(struct sk_buff *skb, u16 queue_id); +int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id); + +static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) +{ + int ret; + + ret = __dev_direct_xmit(skb, queue_id); + if (!dev_xmit_complete(ret)) + kfree_skb(skb); + return ret; +} + int register_netdevice(struct net_device *dev); void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); void unregister_netdevice_many(struct list_head *head); @@ -3137,6 +3181,11 @@ static inline bool dev_validate_header(const struct net_device *dev, return false; } +static inline bool dev_has_header(const struct net_device *dev) +{ + return dev->header_ops && dev->header_ops->create; +} + typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len, int size); int register_gifconf(unsigned int family, gifconf_func_t *gifconf); @@ -3576,7 +3625,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) } /** - * netif_subqueue_stopped - test status of subqueue + * __netif_subqueue_stopped - test status of subqueue * @dev: network device * @queue_index: sub queue index * @@ -3590,6 +3639,13 @@ static inline bool __netif_subqueue_stopped(const struct net_device *dev, return netif_tx_queue_stopped(txq); } +/** + * netif_subqueue_stopped - test status of subqueue + * @dev: network device + * @skb: sub queue buffer pointer + * + * Check individual transmit queue of a device with multiple transmit queues. + */ static inline bool netif_subqueue_stopped(const struct net_device *dev, struct sk_buff *skb) { @@ -4501,6 +4557,7 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, const struct net_device_stats *netdev_stats); void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, const struct pcpu_sw_netstats __percpu *netstats); +void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s); extern int netdev_max_backlog; extern int netdev_tstamp_prequeue; diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index ab192720e2d6..46d9a0c26c67 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -198,6 +198,9 @@ struct ip_set_region { u32 elements; /* Number of elements vs timeout */ }; +/* The max revision number supported by any set type + 1 */ +#define IPSET_REVISION_MAX 9 + /* The core set type structure */ struct ip_set_type { struct list_head list; @@ -215,6 +218,8 @@ struct ip_set_type { u8 family; /* Type revisions */ u8 revision_min, revision_max; + /* Revision-specific supported (create) flags */ + u8 create_flags[IPSET_REVISION_MAX+1]; /* Set features to control swapping */ u16 features; diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 89016d08f6a2..f6267e2883f2 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -24,6 +24,12 @@ struct nfnl_callback { const u_int16_t attr_count; /* number of nlattr's */ }; +enum nfnl_abort_action { + NFNL_ABORT_NONE = 0, + NFNL_ABORT_AUTOLOAD, + NFNL_ABORT_VALIDATE, +}; + struct nfnetlink_subsystem { const char *name; __u8 subsys_id; /* nfnetlink subsystem ID */ @@ -31,7 +37,8 @@ struct nfnetlink_subsystem { const struct nfnl_callback *cb; /* callback for individual types */ struct module *owner; int (*commit)(struct net *net, struct sk_buff *skb); - int (*abort)(struct net *net, struct sk_buff *skb, bool autoload); + int (*abort)(struct net *net, struct sk_buff *skb, + enum nfnl_abort_action action); void (*cleanup)(struct net *net); bool (*valid_genid)(struct net *net, u32 genid); }; diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 5deb099d156d..8ebb64193757 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -227,7 +227,7 @@ struct xt_table { unsigned int valid_hooks; /* Man behind the curtain... */ - struct xt_table_info *private; + struct xt_table_info __rcu *private; /* Set this to THIS_MODULE if you are a module, otherwise NULL */ struct module *me; @@ -448,6 +448,9 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu) struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *); +struct xt_table_info +*xt_table_get_private_protected(const struct xt_table *table); + #ifdef CONFIG_COMPAT #include <net/compat.h> diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index 082e2c41b7ff..5b70ca868bb1 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h @@ -16,7 +16,7 @@ struct ip_rt_info { u_int32_t mark; }; -int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type); +int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned addr_type); struct nf_queue_entry; diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index 9b67394471e1..48314ade1506 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -42,7 +42,7 @@ struct nf_ipv6_ops { #if IS_MODULE(CONFIG_IPV6) int (*chk_addr)(struct net *net, const struct in6_addr *addr, const struct net_device *dev, int strict); - int (*route_me_harder)(struct net *net, struct sk_buff *skb); + int (*route_me_harder)(struct net *net, struct sock *sk, struct sk_buff *skb); int (*dev_get_saddr)(struct net *net, const struct net_device *dev, const struct in6_addr *daddr, unsigned int srcprefs, struct in6_addr *saddr); @@ -143,9 +143,9 @@ static inline int nf_br_ip6_fragment(struct net *net, struct sock *sk, #endif } -int ip6_route_me_harder(struct net *net, struct sk_buff *skb); +int ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb); -static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb) +static inline int nf_ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb) { #if IS_MODULE(CONFIG_IPV6) const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); @@ -153,9 +153,9 @@ static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb) if (!v6_ops) return -EHOSTUNREACH; - return v6_ops->route_me_harder(net, skb); + return v6_ops->route_me_harder(net, sk, skb); #elif IS_BUILTIN(CONFIG_IPV6) - return ip6_route_me_harder(net, skb); + return ip6_route_me_harder(net, sk, skb); #else return -EHOSTUNREACH; #endif diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 9dc7eeac924f..5b4c67c91f56 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -385,13 +385,6 @@ enum lock_type4 { NFS4_WRITEW_LT = 4 }; -enum change_attr_type4 { - NFS4_CHANGE_TYPE_IS_MONOTONIC_INCR = 0, - NFS4_CHANGE_TYPE_IS_VERSION_COUNTER = 1, - NFS4_CHANGE_TYPE_IS_VERSION_COUNTER_NOPNFS = 2, - NFS4_CHANGE_TYPE_IS_TIME_METADATA = 3, - NFS4_CHANGE_TYPE_IS_UNDEFINED = 4 -}; /* Mandatory Attributes */ #define FATTR4_WORD0_SUPPORTED_ATTRS (1UL << 0) @@ -459,7 +452,6 @@ enum change_attr_type4 { #define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1) #define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4) #define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13) -#define FATTR4_WORD2_CHANGE_ATTR_TYPE (1UL << 15) #define FATTR4_WORD2_SECURITY_LABEL (1UL << 16) #define FATTR4_WORD2_MODE_UMASK (1UL << 17) #define FATTR4_WORD2_XATTR_SUPPORT (1UL << 18) diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index c32c15216da3..f0373a6cb5fb 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -55,6 +55,7 @@ struct nfs_page { unsigned short wb_nio; /* Number of I/O attempts */ }; +struct nfs_pgio_mirror; struct nfs_pageio_descriptor; struct nfs_pageio_ops { void (*pg_init)(struct nfs_pageio_descriptor *, struct nfs_page *); @@ -64,6 +65,9 @@ struct nfs_pageio_ops { unsigned int (*pg_get_mirror_count)(struct nfs_pageio_descriptor *, struct nfs_page *); void (*pg_cleanup)(struct nfs_pageio_descriptor *); + struct nfs_pgio_mirror * + (*pg_get_mirror)(struct nfs_pageio_descriptor *, u32); + u32 (*pg_set_mirror)(struct nfs_pageio_descriptor *, u32); }; struct nfs_rw_ops { diff --git a/include/linux/ns_common.h b/include/linux/ns_common.h index 5fbc4000358f..0f1d024bd958 100644 --- a/include/linux/ns_common.h +++ b/include/linux/ns_common.h @@ -2,12 +2,15 @@ #ifndef _LINUX_NS_COMMON_H #define _LINUX_NS_COMMON_H +#include <linux/refcount.h> + struct proc_ns_operations; struct ns_common { atomic_long_t stashed; const struct proc_ns_operations *ops; unsigned int inum; + refcount_t count; }; #endif diff --git a/include/linux/numa.h b/include/linux/numa.h index 8cb33ccfb671..cb44cfe2b725 100644 --- a/include/linux/numa.h +++ b/include/linux/numa.h @@ -21,13 +21,41 @@ #endif #ifdef CONFIG_NUMA +#include <linux/printk.h> +#include <asm/sparsemem.h> + /* Generic implementation available */ int numa_map_to_online_node(int node); -#else + +#ifndef memory_add_physaddr_to_nid +static inline int memory_add_physaddr_to_nid(u64 start) +{ + pr_info_once("Unknown online node for memory at 0x%llx, assuming node 0\n", + start); + return 0; +} +#endif +#ifndef phys_to_target_node +static inline int phys_to_target_node(u64 start) +{ + pr_info_once("Unknown target node for memory at 0x%llx, assuming node 0\n", + start); + return 0; +} +#endif +#else /* !CONFIG_NUMA */ static inline int numa_map_to_online_node(int node) { return NUMA_NO_NODE; } +static inline int memory_add_physaddr_to_nid(u64 start) +{ + return 0; +} +static inline int phys_to_target_node(u64 start) +{ + return 0; +} #endif #endif /* _LINUX_NUMA_H */ diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index 06409a6c40bc..e162b757b6d5 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -31,6 +31,19 @@ enum nvmem_type { #define NVMEM_DEVID_AUTO (-2) /** + * struct nvmem_keepout - NVMEM register keepout range. + * + * @start: The first byte offset to avoid. + * @end: One beyond the last byte offset to avoid. + * @value: The byte to fill reads with for this region. + */ +struct nvmem_keepout { + unsigned int start; + unsigned int end; + unsigned char value; +}; + +/** * struct nvmem_config - NVMEM device configuration * * @dev: Parent device. @@ -39,6 +52,8 @@ enum nvmem_type { * @owner: Pointer to exporter module. Used for refcounting. * @cells: Optional array of pre-defined NVMEM cells. * @ncells: Number of elements in cells. + * @keepout: Optional array of keepout ranges (sorted ascending by start). + * @nkeepout: Number of elements in the keepout array. * @type: Type of the nvmem storage * @read_only: Device is read-only. * @root_only: Device is accessibly to root only. @@ -66,6 +81,8 @@ struct nvmem_config { struct gpio_desc *wp_gpio; const struct nvmem_cell_info *cells; int ncells; + const struct nvmem_keepout *keepout; + unsigned int nkeepout; enum nvmem_type type; bool read_only; bool root_only; diff --git a/include/linux/of.h b/include/linux/of.h index 5d51891cbf1a..4b27c9a27df3 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -108,7 +108,7 @@ static inline void of_node_init(struct device_node *node) #if defined(CONFIG_OF_KOBJ) kobject_init(&node->kobj, &of_node_ktype); #endif - node->fwnode.ops = &of_fwnode_ops; + fwnode_init(&node->fwnode, &of_fwnode_ops); } #if defined(CONFIG_OF_KOBJ) @@ -558,6 +558,8 @@ int of_map_id(struct device_node *np, u32 id, const char *map_name, const char *map_mask_name, struct device_node **target, u32 *id_out); +phys_addr_t of_dma_get_max_cpu_address(struct device_node *np); + #else /* CONFIG_OF */ static inline void of_core_init(void) @@ -995,6 +997,11 @@ static inline int of_map_id(struct device_node *np, u32 id, return -EINVAL; } +static inline phys_addr_t of_dma_get_max_cpu_address(struct device_node *np) +{ + return PHYS_ADDR_MAX; +} + #define of_match_ptr(_ptr) NULL #define of_match_node(_matches, _node) NULL #endif /* CONFIG_OF */ @@ -1300,6 +1307,7 @@ static inline int of_get_available_child_count(const struct device_node *np) #define _OF_DECLARE(table, name, compat, fn, fn_type) \ static const struct of_device_id __of_table_##name \ __used __section("__" #table "_of_table") \ + __aligned(__alignof__(struct of_device_id)) \ = { .compatible = compat, \ .data = (fn == (fn_type)NULL) ? fn : fn } #else diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 4f6ba9379112..ec5d0290e0ee 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -86,8 +86,7 @@ */ /* - * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break - * locked- and dirty-page accounting. + * Don't use the pageflags directly. Use the PageFoo macros. * * The page flags field is split into two parts, the main flags area * which extends from the low bits upwards, and the fields area which @@ -335,6 +334,7 @@ PAGEFLAG(Referenced, referenced, PF_HEAD) PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD) __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD) PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD) + TESTCLEARFLAG(LRU, lru, PF_HEAD) PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD) TESTCLEARFLAG(Active, active, PF_HEAD) PAGEFLAG(Workingset, workingset, PF_HEAD) @@ -363,8 +363,7 @@ PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) * for its own purposes. * - PG_private and PG_private_2 cause releasepage() and co to be invoked */ -PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY) - __CLEARPAGEFLAG(Private, private, PF_ANY) +PAGEFLAG(Private, private, PF_ANY) PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY) PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY) TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY) @@ -715,9 +714,8 @@ PAGEFLAG_FALSE(DoubleMap) #define PAGE_MAPCOUNT_RESERVE -128 #define PG_buddy 0x00000080 #define PG_offline 0x00000100 -#define PG_kmemcg 0x00000200 -#define PG_table 0x00000400 -#define PG_guard 0x00000800 +#define PG_table 0x00000200 +#define PG_guard 0x00000400 #define PageType(page, flag) \ ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) @@ -769,12 +767,6 @@ PAGE_TYPE_OPS(Buddy, buddy) PAGE_TYPE_OPS(Offline, offline) /* - * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on - * pages allocated with __GFP_ACCOUNT. It gets cleared on page free. - */ -PAGE_TYPE_OPS(Kmemcg, kmemcg) - -/* * Marks pages in use as page tables. */ PAGE_TYPE_OPS(Table, table) diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h index cfce186f0c4e..aff81ba31bd8 100644 --- a/include/linux/page_ext.h +++ b/include/linux/page_ext.h @@ -44,8 +44,12 @@ static inline void page_ext_init_flatmem(void) { } extern void page_ext_init(void); +static inline void page_ext_init_flatmem_late(void) +{ +} #else extern void page_ext_init_flatmem(void); +extern void page_ext_init_flatmem_late(void); static inline void page_ext_init(void) { } @@ -76,6 +80,10 @@ static inline void page_ext_init(void) { } +static inline void page_ext_init_flatmem_late(void) +{ +} + static inline void page_ext_init_flatmem(void) { } diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index c77b7c31b2e4..d5570deff400 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -344,9 +344,9 @@ static inline struct page *find_get_page_flags(struct address_space *mapping, /** * find_lock_page - locate, pin and lock a pagecache page * @mapping: the address_space to search - * @offset: the page index + * @index: the page index * - * Looks up the page cache entry at @mapping & @offset. If there is a + * Looks up the page cache entry at @mapping & @index. If there is a * page cache page, it is returned locked and with an increased * refcount. * @@ -363,9 +363,9 @@ static inline struct page *find_lock_page(struct address_space *mapping, /** * find_lock_head - Locate, pin and lock a pagecache page. * @mapping: The address_space to search. - * @offset: The page index. + * @index: The page index. * - * Looks up the page cache entry at @mapping & @offset. If there is a + * Looks up the page cache entry at @mapping & @index. If there is a * page cache page, its head page is returned locked and with an increased * refcount. * @@ -906,6 +906,8 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac, xas_set(&xas, rac->_index); rcu_read_lock(); xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) { + if (xas_retry(&xas, page)) + continue; VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(PageTail(page), page); array[i++] = page; diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h index 081d934eda64..ad4ddc17d403 100644 --- a/include/linux/pagevec.h +++ b/include/linux/pagevec.h @@ -43,9 +43,6 @@ static inline unsigned pagevec_lookup(struct pagevec *pvec, unsigned pagevec_lookup_range_tag(struct pagevec *pvec, struct address_space *mapping, pgoff_t *index, pgoff_t end, xa_mark_t tag); -unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec, - struct address_space *mapping, pgoff_t *index, pgoff_t end, - xa_mark_t tag, unsigned max_pages); static inline unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, pgoff_t *index, xa_mark_t tag) { diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index 033ce74f02e8..65d3d83015c3 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h @@ -10,6 +10,33 @@ #include <linux/platform_device.h> /* + * Memory address shift values for the byte-level address that + * can be used when accessing the PCI Express Configuration Space. + */ + +/* + * Enhanced Configuration Access Mechanism (ECAM) + * + * See PCI Express Base Specification, Revision 5.0, Version 1.0, + * Section 7.2.2, Table 7-1, p. 677. + */ +#define PCIE_ECAM_BUS_SHIFT 20 /* Bus number */ +#define PCIE_ECAM_DEVFN_SHIFT 12 /* Device and Function number */ + +#define PCIE_ECAM_BUS_MASK 0xff +#define PCIE_ECAM_DEVFN_MASK 0xff +#define PCIE_ECAM_REG_MASK 0xfff /* Limit offset to a maximum of 4K */ + +#define PCIE_ECAM_BUS(x) (((x) & PCIE_ECAM_BUS_MASK) << PCIE_ECAM_BUS_SHIFT) +#define PCIE_ECAM_DEVFN(x) (((x) & PCIE_ECAM_DEVFN_MASK) << PCIE_ECAM_DEVFN_SHIFT) +#define PCIE_ECAM_REG(x) ((x) & PCIE_ECAM_REG_MASK) + +#define PCIE_ECAM_OFFSET(bus, devfn, where) \ + (PCIE_ECAM_BUS(bus) | \ + PCIE_ECAM_DEVFN(devfn) | \ + PCIE_ECAM_REG(where)) + +/* * struct to hold pci ops and bus shift of the config window * for a PCI controller. */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 22207a79762c..b32126d26997 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -281,6 +281,7 @@ enum pci_bus_speed { PCIE_SPEED_8_0GT = 0x16, PCIE_SPEED_16_0GT = 0x17, PCIE_SPEED_32_0GT = 0x18, + PCIE_SPEED_64_0GT = 0x19, PCI_SPEED_UNKNOWN = 0xff, }; @@ -304,6 +305,7 @@ struct pcie_link_state; struct pci_vpd; struct pci_sriov; struct pci_p2pdma; +struct rcec_ea; /* The pci_dev structure describes PCI devices */ struct pci_dev { @@ -327,6 +329,10 @@ struct pci_dev { u16 aer_cap; /* AER capability offset */ struct aer_stats *aer_stats; /* AER stats for this device */ #endif +#ifdef CONFIG_PCIEPORTBUS + struct rcec_ea *rcec_ea; /* RCEC cached endpoint association */ + struct pci_dev *rcec; /* Associated RCEC device */ +#endif u8 pcie_cap; /* PCIe capability offset */ u8 msi_cap; /* MSI capability offset */ u8 msix_cap; /* MSI-X capability offset */ @@ -380,7 +386,7 @@ struct pci_dev { struct pcie_link_state *link_state; /* ASPM link state */ unsigned int ltr_path:1; /* Latency Tolerance Reporting supported from root to here */ - int l1ss; /* L1SS Capability pointer */ + u16 l1ss; /* L1SS Capability pointer */ #endif unsigned int eetlp_prefix_path:1; /* End-to-End TLP Prefix */ @@ -1063,12 +1069,13 @@ void pci_sort_breadthfirst(void); /* Generic PCI functions exported to card drivers */ -int pci_find_capability(struct pci_dev *dev, int cap); -int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap); -int pci_find_ext_capability(struct pci_dev *dev, int cap); -int pci_find_next_ext_capability(struct pci_dev *dev, int pos, int cap); -int pci_find_ht_capability(struct pci_dev *dev, int ht_cap); -int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap); +u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); +u8 pci_find_capability(struct pci_dev *dev, int cap); +u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap); +u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap); +u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap); +u16 pci_find_ext_capability(struct pci_dev *dev, int cap); +u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap); struct pci_bus *pci_find_next_bus(const struct pci_bus *from); u64 pci_get_dsn(struct pci_dev *dev); @@ -1190,7 +1197,6 @@ void pci_clear_master(struct pci_dev *dev); int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state); int pci_set_cacheline_size(struct pci_dev *dev); -#define HAVE_PCI_SET_MWI int __must_check pci_set_mwi(struct pci_dev *dev); int __must_check pcim_set_mwi(struct pci_dev *dev); int pci_try_set_mwi(struct pci_dev *dev); @@ -1271,7 +1277,7 @@ bool pci_dev_run_wake(struct pci_dev *dev); void pci_d3cold_enable(struct pci_dev *dev); void pci_d3cold_disable(struct pci_dev *dev); bool pcie_relaxed_ordering_enabled(struct pci_dev *dev); -void pci_wakeup_bus(struct pci_bus *bus); +void pci_resume_bus(struct pci_bus *bus); void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state); /* For use by arch with custom probe code */ @@ -1279,7 +1285,6 @@ void set_pcie_port_type(struct pci_dev *pdev); void set_pcie_hotplug_bridge(struct pci_dev *pdev); /* Functions for PCI Hotplug drivers to use */ -int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge); unsigned int pci_rescan_bus(struct pci_bus *bus); void pci_lock_rescan_remove(void); @@ -1719,7 +1724,7 @@ static inline int __pci_register_driver(struct pci_driver *drv, static inline int pci_register_driver(struct pci_driver *drv) { return 0; } static inline void pci_unregister_driver(struct pci_driver *drv) { } -static inline int pci_find_capability(struct pci_dev *dev, int cap) +static inline u8 pci_find_capability(struct pci_dev *dev, int cap) { return 0; } static inline int pci_find_next_capability(struct pci_dev *dev, u8 post, int cap) diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 1ab1e24bcbce..d8156a5dbee8 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -81,6 +81,7 @@ #define PCI_CLASS_SYSTEM_RTC 0x0803 #define PCI_CLASS_SYSTEM_PCI_HOTPLUG 0x0804 #define PCI_CLASS_SYSTEM_SDHCI 0x0805 +#define PCI_CLASS_SYSTEM_RCEC 0x0807 #define PCI_CLASS_SYSTEM_OTHER 0x0880 #define PCI_BASE_CLASS_INPUT 0x09 diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 505480217cf1..bf7966776c55 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -163,6 +163,8 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn); static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; } #endif +bool arm_pmu_irq_is_nmi(void); + /* Internal functions only for core arm_pmu code */ struct arm_pmu *armpmu_alloc(void); struct arm_pmu *armpmu_alloc_atomic(void); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 0c19d279b97f..9a38f579bc76 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1022,18 +1022,14 @@ struct perf_sample_data { struct perf_callchain_entry *callchain; u64 aux_size; - /* - * regs_user may point to task_pt_regs or to regs_user_copy, depending - * on arch details. - */ struct perf_regs regs_user; - struct pt_regs regs_user_copy; - struct perf_regs regs_intr; u64 stack_user_size; u64 phys_addr; u64 cgroup; + u64 data_page_size; + u64 code_page_size; } ____cacheline_aligned; /* default value for data source */ @@ -1400,11 +1396,14 @@ perf_event_addr_filters(struct perf_event *event) extern void perf_event_addr_filters_sync(struct perf_event *event); extern int perf_output_begin(struct perf_output_handle *handle, + struct perf_sample_data *data, struct perf_event *event, unsigned int size); extern int perf_output_begin_forward(struct perf_output_handle *handle, - struct perf_event *event, - unsigned int size); + struct perf_sample_data *data, + struct perf_event *event, + unsigned int size); extern int perf_output_begin_backward(struct perf_output_handle *handle, + struct perf_sample_data *data, struct perf_event *event, unsigned int size); @@ -1588,4 +1587,8 @@ extern void __weak arch_perf_update_userpage(struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now); +#ifdef CONFIG_MMU +extern __weak u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr); +#endif + #endif /* _LINUX_PERF_EVENT_H */ diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h index 2d12e97d5e7b..f632c5725f16 100644 --- a/include/linux/perf_regs.h +++ b/include/linux/perf_regs.h @@ -20,8 +20,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx); int perf_reg_validate(u64 mask); u64 perf_reg_abi(struct task_struct *task); void perf_get_regs_user(struct perf_regs *regs_user, - struct pt_regs *regs, - struct pt_regs *regs_user_copy); + struct pt_regs *regs); #else #define PERF_REG_EXTENDED_MASK 0 @@ -42,8 +41,7 @@ static inline u64 perf_reg_abi(struct task_struct *task) } static inline void perf_get_regs_user(struct perf_regs *regs_user, - struct pt_regs *regs, - struct pt_regs *regs_user_copy) + struct pt_regs *regs) { regs_user->regs = task_pt_regs(current); regs_user->abi = perf_reg_abi(current); diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 38c33eabea89..8fcdfa52eb4b 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -258,6 +258,61 @@ static inline pte_t ptep_get(pte_t *ptep) } #endif +#ifdef CONFIG_GUP_GET_PTE_LOW_HIGH +/* + * WARNING: only to be used in the get_user_pages_fast() implementation. + * + * With get_user_pages_fast(), we walk down the pagetables without taking any + * locks. For this we would like to load the pointers atomically, but sometimes + * that is not possible (e.g. without expensive cmpxchg8b on x86_32 PAE). What + * we do have is the guarantee that a PTE will only either go from not present + * to present, or present to not present or both -- it will not switch to a + * completely different present page without a TLB flush in between; something + * that we are blocking by holding interrupts off. + * + * Setting ptes from not present to present goes: + * + * ptep->pte_high = h; + * smp_wmb(); + * ptep->pte_low = l; + * + * And present to not present goes: + * + * ptep->pte_low = 0; + * smp_wmb(); + * ptep->pte_high = 0; + * + * We must ensure here that the load of pte_low sees 'l' IFF pte_high sees 'h'. + * We load pte_high *after* loading pte_low, which ensures we don't see an older + * value of pte_high. *Then* we recheck pte_low, which ensures that we haven't + * picked up a changed pte high. We might have gotten rubbish values from + * pte_low and pte_high, but we are guaranteed that pte_low will not have the + * present bit set *unless* it is 'l'. Because get_user_pages_fast() only + * operates on present ptes we're safe. + */ +static inline pte_t ptep_get_lockless(pte_t *ptep) +{ + pte_t pte; + + do { + pte.pte_low = ptep->pte_low; + smp_rmb(); + pte.pte_high = ptep->pte_high; + smp_rmb(); + } while (unlikely(pte.pte_low != ptep->pte_low)); + + return pte; +} +#else /* CONFIG_GUP_GET_PTE_LOW_HIGH */ +/* + * We require that the PTE can be read atomically. + */ +static inline pte_t ptep_get_lockless(pte_t *ptep) +{ + return ptep_get(ptep); +} +#endif /* CONFIG_GUP_GET_PTE_LOW_HIGH */ + #ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, @@ -1427,8 +1482,17 @@ typedef unsigned int pgtbl_mod_mask; #endif /* !__ASSEMBLY__ */ -#ifndef io_remap_pfn_range -#define io_remap_pfn_range remap_pfn_range +#if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT) +#ifdef CONFIG_PHYS_ADDR_T_64BIT +/* + * ZSMALLOC needs to know the highest PFN on 32-bit architectures + * with physical address space extension, but falls back to + * BITS_PER_LONG otherwise. + */ +#error Missing MAX_POSSIBLE_PHYSMEM_BITS definition +#else +#define MAX_POSSIBLE_PHYSMEM_BITS 32 +#endif #endif #ifndef has_transparent_hugepage @@ -1485,4 +1549,20 @@ typedef unsigned int pgtbl_mod_mask; #define pmd_leaf(x) 0 #endif +#ifndef pgd_leaf_size +#define pgd_leaf_size(x) (1ULL << PGDIR_SHIFT) +#endif +#ifndef p4d_leaf_size +#define p4d_leaf_size(x) P4D_SIZE +#endif +#ifndef pud_leaf_size +#define pud_leaf_size(x) PUD_SIZE +#endif +#ifndef pmd_leaf_size +#define pmd_leaf_size(x) PMD_SIZE +#endif +#ifndef pte_leaf_size +#define pte_leaf_size(x) PAGE_SIZE +#endif + #endif /* _LINUX_PGTABLE_H */ diff --git a/include/linux/phy.h b/include/linux/phy.h index eb3cb1a98b45..381a95732b6a 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -147,16 +147,8 @@ typedef enum { PHY_INTERFACE_MODE_MAX, } phy_interface_t; -/** +/* * phy_supported_speeds - return all speeds currently supported by a PHY device - * @phy: The PHY device to return supported speeds of. - * @speeds: buffer to store supported speeds in. - * @size: size of speeds buffer. - * - * Description: Returns the number of supported speeds, and fills - * the speeds buffer with the supported speeds. If speeds buffer is - * too small to contain all currently supported speeds, will return as - * many speeds as can fit. */ unsigned int phy_supported_speeds(struct phy_device *phy, unsigned int *speeds, @@ -751,18 +743,11 @@ struct phy_driver { /** @read_status: Determines the negotiated speed and duplex */ int (*read_status)(struct phy_device *phydev); - /** @ack_interrupt: Clears any pending interrupts */ - int (*ack_interrupt)(struct phy_device *phydev); - - /** @config_intr: Enables or disables interrupts */ - int (*config_intr)(struct phy_device *phydev); - - /** - * @did_interrupt: Checks if the PHY generated an interrupt. - * For multi-PHY devices with shared PHY interrupt pin - * Set interrupt bits have to be cleared. + /** @config_intr: Enables or disables interrupts. + * It should also clear any pending interrupts prior to enabling the + * IRQs and after disabling them. */ - int (*did_interrupt)(struct phy_device *phydev); + int (*config_intr)(struct phy_device *phydev); /** @handle_interrupt: Override default interrupt handling */ irqreturn_t (*handle_interrupt)(struct phy_device *phydev); @@ -1022,14 +1007,9 @@ static inline int __phy_modify_changed(struct phy_device *phydev, u32 regnum, regnum, mask, set); } -/** +/* * phy_read_mmd - Convenience function for reading a register * from an MMD on a given PHY. - * @phydev: The phy_device struct - * @devad: The MMD to read from - * @regnum: The register on the MMD to read - * - * Same rules as for phy_read(); */ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum); @@ -1064,38 +1044,21 @@ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum); __ret; \ }) -/** +/* * __phy_read_mmd - Convenience function for reading a register * from an MMD on a given PHY. - * @phydev: The phy_device struct - * @devad: The MMD to read from - * @regnum: The register on the MMD to read - * - * Same rules as for __phy_read(); */ int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum); -/** +/* * phy_write_mmd - Convenience function for writing a register * on an MMD on a given PHY. - * @phydev: The phy_device struct - * @devad: The MMD to write to - * @regnum: The register on the MMD to read - * @val: value to write to @regnum - * - * Same rules as for phy_write(); */ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val); -/** +/* * __phy_write_mmd - Convenience function for writing a register * on an MMD on a given PHY. - * @phydev: The phy_device struct - * @devad: The MMD to write to - * @regnum: The register on the MMD to read - * @val: value to write to @regnum - * - * Same rules as for __phy_write(); */ int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val); @@ -1510,16 +1473,13 @@ int genphy_suspend(struct phy_device *phydev); int genphy_resume(struct phy_device *phydev); int genphy_loopback(struct phy_device *phydev, bool enable); int genphy_soft_reset(struct phy_device *phydev); +irqreturn_t genphy_handle_interrupt_no_ack(struct phy_device *phydev); static inline int genphy_config_aneg(struct phy_device *phydev) { return __genphy_config_aneg(phydev, false); } -static inline int genphy_no_ack_interrupt(struct phy_device *phydev) -{ - return 0; -} static inline int genphy_no_config_intr(struct phy_device *phydev) { return 0; @@ -1570,8 +1530,10 @@ void phy_drivers_unregister(struct phy_driver *drv, int n); int phy_driver_register(struct phy_driver *new_driver, struct module *owner); int phy_drivers_register(struct phy_driver *new_driver, int n, struct module *owner); +void phy_error(struct phy_device *phydev); void phy_state_machine(struct work_struct *work); void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies); +void phy_trigger_machine(struct phy_device *phydev); void phy_mac_interrupt(struct phy_device *phydev); void phy_start_machine(struct phy_device *phydev); void phy_stop_machine(struct phy_device *phydev); diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index 5a5cb45ac57e..7c7e627503d2 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -8,7 +8,6 @@ #include <linux/workqueue.h> #include <linux/threads.h> #include <linux/nsproxy.h> -#include <linux/kref.h> #include <linux/ns_common.h> #include <linux/idr.h> @@ -18,7 +17,6 @@ struct fs_pin; struct pid_namespace { - struct kref kref; struct idr idr; struct rcu_head rcu; unsigned int pid_allocated; @@ -43,7 +41,7 @@ extern struct pid_namespace init_pid_ns; static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns) { if (ns != &init_pid_ns) - kref_get(&ns->kref); + refcount_inc(&ns->ns.count); return ns; } diff --git a/include/linux/platform_data/ad7298.h b/include/linux/platform_data/ad7298.h deleted file mode 100644 index 3e0ffe2d5d3d..000000000000 --- a/include/linux/platform_data/ad7298.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * AD7298 SPI ADC driver - * - * Copyright 2011 Analog Devices Inc. - */ - -#ifndef __LINUX_PLATFORM_DATA_AD7298_H__ -#define __LINUX_PLATFORM_DATA_AD7298_H__ - -/** - * struct ad7298_platform_data - Platform data for the ad7298 ADC driver - * @ext_ref: Whether to use an external reference voltage. - **/ -struct ad7298_platform_data { - bool ext_ref; -}; - -#endif /* IIO_ADC_AD7298_H_ */ diff --git a/include/linux/platform_data/ad7303.h b/include/linux/platform_data/ad7303.h deleted file mode 100644 index c2bd0a13bea1..000000000000 --- a/include/linux/platform_data/ad7303.h +++ /dev/null @@ -1,20 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Analog Devices AD7303 DAC driver - * - * Copyright 2013 Analog Devices Inc. - */ - -#ifndef __IIO_ADC_AD7303_H__ -#define __IIO_ADC_AD7303_H__ - -/** - * struct ad7303_platform_data - AD7303 platform data - * @use_external_ref: If set to true use an external voltage reference connected - * to the REF pin, otherwise use the internal reference derived from Vdd. - */ -struct ad7303_platform_data { - bool use_external_ref; -}; - -#endif diff --git a/include/linux/platform_data/ad7887.h b/include/linux/platform_data/ad7887.h index 732af46b2d16..9b4dca6ae70b 100644 --- a/include/linux/platform_data/ad7887.h +++ b/include/linux/platform_data/ad7887.h @@ -13,13 +13,9 @@ * second input channel, and Vref is internally connected to Vdd. If set to * false the device is used in single channel mode and AIN1/Vref is used as * VREF input. - * @use_onchip_ref: Whether to use the onchip reference. If set to true the - * internal 2.5V reference is used. If set to false a external reference is - * used. */ struct ad7887_platform_data { bool en_dual; - bool use_onchip_ref; }; #endif /* IIO_ADC_AD7887_H_ */ diff --git a/include/linux/platform_data/adau1977.h b/include/linux/platform_data/adau1977.h deleted file mode 100644 index 86667235077a..000000000000 --- a/include/linux/platform_data/adau1977.h +++ /dev/null @@ -1,44 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * ADAU1977/ADAU1978/ADAU1979 driver - * - * Copyright 2014 Analog Devices Inc. - * Author: Lars-Peter Clausen <lars@metafoo.de> - */ - -#ifndef __LINUX_PLATFORM_DATA_ADAU1977_H__ -#define __LINUX_PLATFORM_DATA_ADAU1977_H__ - -/** - * enum adau1977_micbias - ADAU1977 MICBIAS pin voltage setting - * @ADAU1977_MICBIAS_5V0: MICBIAS is set to 5.0 V - * @ADAU1977_MICBIAS_5V5: MICBIAS is set to 5.5 V - * @ADAU1977_MICBIAS_6V0: MICBIAS is set to 6.0 V - * @ADAU1977_MICBIAS_6V5: MICBIAS is set to 6.5 V - * @ADAU1977_MICBIAS_7V0: MICBIAS is set to 7.0 V - * @ADAU1977_MICBIAS_7V5: MICBIAS is set to 7.5 V - * @ADAU1977_MICBIAS_8V0: MICBIAS is set to 8.0 V - * @ADAU1977_MICBIAS_8V5: MICBIAS is set to 8.5 V - * @ADAU1977_MICBIAS_9V0: MICBIAS is set to 9.0 V - */ -enum adau1977_micbias { - ADAU1977_MICBIAS_5V0 = 0x0, - ADAU1977_MICBIAS_5V5 = 0x1, - ADAU1977_MICBIAS_6V0 = 0x2, - ADAU1977_MICBIAS_6V5 = 0x3, - ADAU1977_MICBIAS_7V0 = 0x4, - ADAU1977_MICBIAS_7V5 = 0x5, - ADAU1977_MICBIAS_8V0 = 0x6, - ADAU1977_MICBIAS_8V5 = 0x7, - ADAU1977_MICBIAS_9V0 = 0x8, -}; - -/** - * struct adau1977_platform_data - Platform configuration data for the ADAU1977 - * @micbias: Specifies the voltage for the MICBIAS pin - */ -struct adau1977_platform_data { - enum adau1977_micbias micbias; -}; - -#endif diff --git a/include/linux/platform_data/at91_adc.h b/include/linux/platform_data/at91_adc.h deleted file mode 100644 index f20eaeb827ce..000000000000 --- a/include/linux/platform_data/at91_adc.h +++ /dev/null @@ -1,49 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (C) 2011 Free Electrons - */ - -#ifndef _AT91_ADC_H_ -#define _AT91_ADC_H_ - -enum atmel_adc_ts_type { - ATMEL_ADC_TOUCHSCREEN_NONE = 0, - ATMEL_ADC_TOUCHSCREEN_4WIRE = 4, - ATMEL_ADC_TOUCHSCREEN_5WIRE = 5, -}; - -/** - * struct at91_adc_trigger - description of triggers - * @name: name of the trigger advertised to the user - * @value: value to set in the ADC's trigger setup register - to enable the trigger - * @is_external: Does the trigger rely on an external pin? - */ -struct at91_adc_trigger { - const char *name; - u8 value; - bool is_external; -}; - -/** - * struct at91_adc_data - platform data for ADC driver - * @channels_used: channels in use on the board as a bitmask - * @startup_time: startup time of the ADC in microseconds - * @trigger_list: Triggers available in the ADC - * @trigger_number: Number of triggers available in the ADC - * @use_external_triggers: does the board has external triggers availables - * @vref: Reference voltage for the ADC in millivolts - * @touchscreen_type: If a touchscreen is connected, its type (4 or 5 wires) - */ -struct at91_adc_data { - unsigned long channels_used; - u8 startup_time; - struct at91_adc_trigger *trigger_list; - u8 trigger_number; - bool use_external_triggers; - u16 vref; - enum atmel_adc_ts_type touchscreen_type; -}; - -extern void __init at91_add_device_adc(struct at91_adc_data *data); -#endif diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index 1fcfe9e63cb9..a3a9a878415f 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -1419,7 +1419,7 @@ struct ec_response_flash_info_2 { uint16_t num_banks_total; /* Number of banks described in banks array. */ uint16_t num_banks_desc; - struct ec_flash_bank banks[0]; + struct ec_flash_bank banks[]; } __ec_align4; /* @@ -2420,12 +2420,12 @@ struct ec_response_motion_sense_fifo_info { /* Total amount of vector lost */ uint16_t total_lost; /* Lost events since the last fifo_info, per sensors */ - uint16_t lost[0]; + uint16_t lost[]; } __ec_todo_packed; struct ec_response_motion_sense_fifo_data { uint32_t number_data; - struct ec_response_motion_sensor_data data[0]; + struct ec_response_motion_sensor_data data[]; } __ec_todo_packed; /* List supported activity recognition */ @@ -3093,7 +3093,7 @@ struct ec_response_tmp006_get_calibration_v1 { uint8_t algorithm; uint8_t num_params; uint8_t reserved[2]; - float val[0]; + float val[]; } __ec_align4; struct ec_params_tmp006_set_calibration_v1 { @@ -3101,7 +3101,7 @@ struct ec_params_tmp006_set_calibration_v1 { uint8_t algorithm; uint8_t num_params; uint8_t reserved; - float val[0]; + float val[]; } __ec_align4; @@ -5076,7 +5076,7 @@ struct ec_response_pd_log { uint8_t type; /* event type : see PD_EVENT_xx below */ uint8_t size_port; /* [7:5] port number [4:0] payload size in bytes */ uint16_t data; /* type-defined data payload */ - uint8_t payload[0]; /* optional additional data payload: 0..16 bytes */ + uint8_t payload[]; /* optional additional data payload: 0..16 bytes */ } __ec_align4; /* The timestamp is the microsecond counter shifted to get about a ms. */ @@ -5789,7 +5789,7 @@ struct ec_response_fp_encryption_status { struct ec_response_tp_frame_info { uint32_t n_frames; - uint32_t frame_sizes[0]; + uint32_t frame_sizes[]; } __ec_align4; /* Create a snapshot of current frame readings */ diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h index 4a415ae851ef..02599687770c 100644 --- a/include/linux/platform_data/cros_ec_proto.h +++ b/include/linux/platform_data/cros_ec_proto.h @@ -69,7 +69,7 @@ struct cros_ec_command { uint32_t outsize; uint32_t insize; uint32_t result; - uint8_t data[0]; + uint8_t data[]; }; /** diff --git a/include/linux/platform_data/hirschmann-hellcreek.h b/include/linux/platform_data/hirschmann-hellcreek.h new file mode 100644 index 000000000000..388846766bb2 --- /dev/null +++ b/include/linux/platform_data/hirschmann-hellcreek.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: (GPL-2.0 or MIT) */ +/* + * Hirschmann Hellcreek TSN switch platform data. + * + * Copyright (C) 2020 Linutronix GmbH + * Author Kurt Kanzenbach <kurt@linutronix.de> + */ + +#ifndef _HIRSCHMANN_HELLCREEK_H_ +#define _HIRSCHMANN_HELLCREEK_H_ + +#include <linux/types.h> + +struct hellcreek_platform_data { + int num_ports; /* Amount of switch ports */ + int is_100_mbits; /* Is it configured to 100 or 1000 mbit/s */ + int qbv_support; /* Qbv support on front TSN ports */ + int qbv_on_cpu_port; /* Qbv support on the CPU port */ + int qbu_support; /* Qbu support on front TSN ports */ + u16 module_id; /* Module identificaton */ +}; + +#endif /* _HIRSCHMANN_HELLCREEK_H_ */ diff --git a/include/linux/platform_data/media/coda.h b/include/linux/platform_data/media/coda.h deleted file mode 100644 index 293b61b60c9d..000000000000 --- a/include/linux/platform_data/media/coda.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (C) 2013 Philipp Zabel, Pengutronix - */ -#ifndef PLATFORM_CODA_H -#define PLATFORM_CODA_H - -struct device; - -struct coda_platform_data { - struct device *iram_dev; -}; - -#endif diff --git a/include/linux/platform_data/serial-imx.h b/include/linux/platform_data/serial-imx.h deleted file mode 100644 index 0844b21372c7..000000000000 --- a/include/linux/platform_data/serial-imx.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (C) 2008 by Sascha Hauer <kernel@pengutronix.de> - */ - -#ifndef ASMARM_ARCH_UART_H -#define ASMARM_ARCH_UART_H - -#define IMXUART_HAVE_RTSCTS (1<<0) - -struct imxuart_platform_data { - unsigned int flags; -}; - -#endif diff --git a/include/linux/platform_data/shmob_drm.h b/include/linux/platform_data/shmob_drm.h index fe815d7d9f58..d661399b217d 100644 --- a/include/linux/platform_data/shmob_drm.h +++ b/include/linux/platform_data/shmob_drm.h @@ -10,8 +10,6 @@ #ifndef __SHMOB_DRM_H__ #define __SHMOB_DRM_H__ -#include <linux/kernel.h> - #include <drm/drm_mode.h> enum shmob_drm_clk_source { diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h index c59999ce044e..240dce553a0b 100644 --- a/include/linux/platform_data/ti-sysc.h +++ b/include/linux/platform_data/ti-sysc.h @@ -50,6 +50,7 @@ struct sysc_regbits { s8 emufree_shift; }; +#define SYSC_MODULE_QUIRK_ENA_RESETDONE BIT(25) #define SYSC_MODULE_QUIRK_PRUSS BIT(24) #define SYSC_MODULE_QUIRK_DSS_RESET BIT(23) #define SYSC_MODULE_QUIRK_RTC_UNLOCK BIT(22) diff --git a/include/linux/platform_data/usb-ehci-mxc.h b/include/linux/platform_data/usb-ehci-mxc.h deleted file mode 100644 index ad9794d09bc8..000000000000 --- a/include/linux/platform_data/usb-ehci-mxc.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __INCLUDE_ASM_ARCH_MXC_EHCI_H -#define __INCLUDE_ASM_ARCH_MXC_EHCI_H - -struct mxc_usbh_platform_data { - int (*init)(struct platform_device *pdev); - int (*exit)(struct platform_device *pdev); - - unsigned int portsc; - struct usb_phy *otg; -}; - -#endif /* __INCLUDE_ASM_ARCH_MXC_EHCI_H */ - diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h index 897b8332a39f..2f274cf52805 100644 --- a/include/linux/platform_data/x86/asus-wmi.h +++ b/include/linux/platform_data/x86/asus-wmi.h @@ -62,6 +62,7 @@ /* Misc */ #define ASUS_WMI_DEVID_CAMERA 0x00060013 +#define ASUS_WMI_DEVID_LID_FLIP 0x00060062 /* Storage */ #define ASUS_WMI_DEVID_CARDREADER 0x00080013 diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 77a2aada106d..3f23f6e430bf 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -15,6 +15,7 @@ #define PLATFORM_DEVID_NONE (-1) #define PLATFORM_DEVID_AUTO (-2) +struct irq_affinity; struct mfd_cell; struct property_entry; struct platform_device_id; @@ -52,6 +53,9 @@ extern struct device platform_bus; extern struct resource *platform_get_resource(struct platform_device *, unsigned int, unsigned int); +extern struct resource *platform_get_mem_or_io(struct platform_device *, + unsigned int); + extern struct device * platform_find_device_by_driver(struct device *start, const struct device_driver *drv); @@ -70,6 +74,11 @@ devm_platform_ioremap_resource_byname(struct platform_device *pdev, extern int platform_get_irq(struct platform_device *, unsigned int); extern int platform_get_irq_optional(struct platform_device *, unsigned int); extern int platform_irq_count(struct platform_device *); +extern int devm_platform_get_irqs_affinity(struct platform_device *dev, + struct irq_affinity *affd, + unsigned int minvec, + unsigned int maxvec, + int **irqs); extern struct resource *platform_get_resource_byname(struct platform_device *, unsigned int, const char *); diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 1ad0ec481416..2ca919ae8d36 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -255,24 +255,24 @@ static inline int pm_genpd_init(struct generic_pm_domain *genpd, } static inline int pm_genpd_remove(struct generic_pm_domain *genpd) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int dev_pm_genpd_remove_notifier(struct device *dev) { - return -ENOTSUPP; + return -EOPNOTSUPP; } #define simple_qos_governor (*(struct dev_power_governor *)(NULL)) @@ -280,11 +280,11 @@ static inline int dev_pm_genpd_remove_notifier(struct device *dev) #endif #ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP -void pm_genpd_syscore_poweroff(struct device *dev); -void pm_genpd_syscore_poweron(struct device *dev); +void dev_pm_genpd_suspend(struct device *dev); +void dev_pm_genpd_resume(struct device *dev); #else -static inline void pm_genpd_syscore_poweroff(struct device *dev) {} -static inline void pm_genpd_syscore_poweron(struct device *dev) {} +static inline void dev_pm_genpd_suspend(struct device *dev) {} +static inline void dev_pm_genpd_resume(struct device *dev) {} #endif /* OF PM domain providers */ @@ -325,13 +325,13 @@ struct device *genpd_dev_pm_attach_by_name(struct device *dev, static inline int of_genpd_add_provider_simple(struct device_node *np, struct generic_pm_domain *genpd) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int of_genpd_add_provider_onecell(struct device_node *np, struct genpd_onecell_data *data) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline void of_genpd_del_provider(struct device_node *np) {} @@ -387,7 +387,7 @@ static inline struct device *genpd_dev_pm_attach_by_name(struct device *dev, static inline struct generic_pm_domain *of_genpd_remove_last(struct device_node *np) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index dbb484524f82..1435c054016a 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -90,7 +90,6 @@ struct dev_pm_set_opp_data { #if defined(CONFIG_PM_OPP) struct opp_table *dev_pm_opp_get_opp_table(struct device *dev); -struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index); void dev_pm_opp_put_opp_table(struct opp_table *opp_table); unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp); diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 18b02dcc168e..b492ae00cc90 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -54,11 +54,10 @@ extern u64 pm_runtime_autosuspend_expiration(struct device *dev); extern void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns); extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable); -extern void pm_runtime_clean_up_links(struct device *dev); extern void pm_runtime_get_suppliers(struct device *dev); extern void pm_runtime_put_suppliers(struct device *dev); extern void pm_runtime_new_link(struct device *dev); -extern void pm_runtime_drop_link(struct device *dev); +extern void pm_runtime_drop_link(struct device_link *link); /** * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter. @@ -276,11 +275,10 @@ static inline u64 pm_runtime_autosuspend_expiration( struct device *dev) { return 0; } static inline void pm_runtime_set_memalloc_noio(struct device *dev, bool enable){} -static inline void pm_runtime_clean_up_links(struct device *dev) {} static inline void pm_runtime_get_suppliers(struct device *dev) {} static inline void pm_runtime_put_suppliers(struct device *dev) {} static inline void pm_runtime_new_link(struct device *dev) {} -static inline void pm_runtime_drop_link(struct device *dev) {} +static inline void pm_runtime_drop_link(struct device_link *link) {} #endif /* !CONFIG_PM */ @@ -389,6 +387,27 @@ static inline int pm_runtime_get_sync(struct device *dev) } /** + * pm_runtime_resume_and_get - Bump up usage counter of a device and resume it. + * @dev: Target device. + * + * Resume @dev synchronously and if that is successful, increment its runtime + * PM usage counter. Return 0 if the runtime PM usage counter of @dev has been + * incremented or a negative error code otherwise. + */ +static inline int pm_runtime_resume_and_get(struct device *dev) +{ + int ret; + + ret = __pm_runtime_resume(dev, RPM_GET_PUT); + if (ret < 0) { + pm_runtime_put_noidle(dev); + return ret; + } + + return 0; +} + +/** * pm_runtime_put - Drop device usage counter and queue up "idle check" if 0. * @dev: Target device. * diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index aa3da6611533..196a157456aa 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -84,6 +84,11 @@ static inline bool device_may_wakeup(struct device *dev) return dev->power.can_wakeup && !!dev->power.wakeup; } +static inline bool device_wakeup_path(struct device *dev) +{ + return dev->power.wakeup_path; +} + static inline void device_set_wakeup_path(struct device *dev) { dev->power.wakeup_path = true; @@ -174,6 +179,11 @@ static inline bool device_may_wakeup(struct device *dev) return dev->power.can_wakeup && dev->power.should_wakeup; } +static inline bool device_wakeup_path(struct device *dev) +{ + return false; +} + static inline void device_set_wakeup_path(struct device *dev) {} static inline void __pm_stay_awake(struct wakeup_source *ws) {} diff --git a/include/linux/poison.h b/include/linux/poison.h index dc8ae5d8db03..aff1c9250c82 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -27,11 +27,7 @@ #define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA) /********** mm/page_poison.c **********/ -#ifdef CONFIG_PAGE_POISONING_ZERO -#define PAGE_POISON 0x00 -#else #define PAGE_POISON 0xaa -#endif /********** mm/page_alloc.c ************/ diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 7d9c1c0e149c..69cc8b64aa3a 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -77,31 +77,33 @@ /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */ #include <asm/preempt.h> +#define nmi_count() (preempt_count() & NMI_MASK) #define hardirq_count() (preempt_count() & HARDIRQ_MASK) #define softirq_count() (preempt_count() & SOFTIRQ_MASK) -#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ - | NMI_MASK)) +#define irq_count() (nmi_count() | hardirq_count() | softirq_count()) /* - * Are we doing bottom half or hardware interrupt processing? + * Macros to retrieve the current execution context: * - * in_irq() - We're in (hard) IRQ context + * in_nmi() - We're in NMI context + * in_hardirq() - We're in hard IRQ context + * in_serving_softirq() - We're in softirq context + * in_task() - We're in task context + */ +#define in_nmi() (nmi_count()) +#define in_hardirq() (hardirq_count()) +#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) +#define in_task() (!(in_nmi() | in_hardirq() | in_serving_softirq())) + +/* + * The following macros are deprecated and should not be used in new code: + * in_irq() - Obsolete version of in_hardirq() * in_softirq() - We have BH disabled, or are processing softirqs * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled - * in_serving_softirq() - We're in softirq context - * in_nmi() - We're in NMI context - * in_task() - We're in task context - * - * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really - * should not be used in new code. */ #define in_irq() (hardirq_count()) #define in_softirq() (softirq_count()) #define in_interrupt() (irq_count()) -#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) -#define in_nmi() (preempt_count() & NMI_MASK) -#define in_task() (!(preempt_count() & \ - (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) /* * The preempt_count offset after preempt_disable(); @@ -322,34 +324,71 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier, #endif -/** - * migrate_disable - Prevent migration of the current task +#ifdef CONFIG_SMP + +/* + * Migrate-Disable and why it is undesired. * - * Maps to preempt_disable() which also disables preemption. Use - * migrate_disable() to annotate that the intent is to prevent migration, - * but not necessarily preemption. + * When a preempted task becomes elegible to run under the ideal model (IOW it + * becomes one of the M highest priority tasks), it might still have to wait + * for the preemptee's migrate_disable() section to complete. Thereby suffering + * a reduction in bandwidth in the exact duration of the migrate_disable() + * section. * - * Can be invoked nested like preempt_disable() and needs the corresponding - * number of migrate_enable() invocations. - */ -static __always_inline void migrate_disable(void) -{ - preempt_disable(); -} - -/** - * migrate_enable - Allow migration of the current task + * Per this argument, the change from preempt_disable() to migrate_disable() + * gets us: + * + * - a higher priority tasks gains reduced wake-up latency; with preempt_disable() + * it would have had to wait for the lower priority task. + * + * - a lower priority tasks; which under preempt_disable() could've instantly + * migrated away when another CPU becomes available, is now constrained + * by the ability to push the higher priority task away, which might itself be + * in a migrate_disable() section, reducing it's available bandwidth. * - * Counterpart to migrate_disable(). + * IOW it trades latency / moves the interference term, but it stays in the + * system, and as long as it remains unbounded, the system is not fully + * deterministic. * - * As migrate_disable() can be invoked nested, only the outermost invocation - * reenables migration. * - * Currently mapped to preempt_enable(). + * The reason we have it anyway. + * + * PREEMPT_RT breaks a number of assumptions traditionally held. By forcing a + * number of primitives into becoming preemptible, they would also allow + * migration. This turns out to break a bunch of per-cpu usage. To this end, + * all these primitives employ migirate_disable() to restore this implicit + * assumption. + * + * This is a 'temporary' work-around at best. The correct solution is getting + * rid of the above assumptions and reworking the code to employ explicit + * per-cpu locking or short preempt-disable regions. + * + * The end goal must be to get rid of migrate_disable(), alternatively we need + * a schedulability theory that does not depend on abritrary migration. + * + * + * Notes on the implementation. + * + * The implementation is particularly tricky since existing code patterns + * dictate neither migrate_disable() nor migrate_enable() is allowed to block. + * This means that it cannot use cpus_read_lock() to serialize against hotplug, + * nor can it easily migrate itself into a pending affinity mask change on + * migrate_enable(). + * + * + * Note: even non-work-conserving schedulers like semi-partitioned depends on + * migration, so migrate_disable() is not only a problem for + * work-conserving schedulers. + * */ -static __always_inline void migrate_enable(void) -{ - preempt_enable(); -} +extern void migrate_disable(void); +extern void migrate_enable(void); + +#else + +static inline void migrate_disable(void) { } +static inline void migrate_enable(void) { } + +#endif /* CONFIG_SMP */ #endif /* __LINUX_PREEMPT_H */ diff --git a/include/linux/property.h b/include/linux/property.h index 2d4542629d80..0a9001fe7aea 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -85,9 +85,12 @@ const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode); struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode); struct fwnode_handle *fwnode_get_next_parent( struct fwnode_handle *fwnode); +struct device *fwnode_get_next_parent_dev(struct fwnode_handle *fwnode); unsigned int fwnode_count_parents(const struct fwnode_handle *fwn); struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwn, unsigned int depth); +bool fwnode_is_ancestor_of(struct fwnode_handle *test_ancestor, + struct fwnode_handle *test_child); struct fwnode_handle *fwnode_get_next_child_node( const struct fwnode_handle *fwnode, struct fwnode_handle *child); struct fwnode_handle *fwnode_get_next_available_child_node( diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h index c6487b7ab026..ae04968a3a47 100644 --- a/include/linux/ptp_classify.h +++ b/include/linux/ptp_classify.h @@ -31,6 +31,11 @@ #define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN) #define PTP_CLASS_L4 (PTP_CLASS_IPV4 | PTP_CLASS_IPV6) +#define PTP_MSGTYPE_SYNC 0x0 +#define PTP_MSGTYPE_DELAY_REQ 0x1 +#define PTP_MSGTYPE_PDELAY_REQ 0x2 +#define PTP_MSGTYPE_PDELAY_RESP 0x3 + #define PTP_EV_PORT 319 #define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */ @@ -140,7 +145,7 @@ static inline u8 ptp_get_msgtype(const struct ptp_header *hdr, /* The return is meaningless. The stub function would not be * executed since no available header from ptp_parse_header. */ - return 0; + return PTP_MSGTYPE_SYNC; } #endif #endif /* _PTP_CLASSIFY_H_ */ diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index d3e8ba5c7125..0d47fd33b228 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -12,6 +12,19 @@ #include <linux/pps_kernel.h> #include <linux/ptp_clock.h> +/** + * struct ptp_clock_request - request PTP clock event + * + * @type: The type of the request. + * EXTTS: Configure external trigger timestamping + * PEROUT: Configure periodic output signal (e.g. PPS) + * PPS: trigger internal PPS event for input + * into kernel PPS subsystem + * @extts: describes configuration for external trigger timestamping. + * This is only valid when event == PTP_CLK_REQ_EXTTS. + * @perout: describes configuration for periodic output. + * This is only valid when event == PTP_CLK_REQ_PEROUT. + */ struct ptp_clock_request { enum { diff --git a/include/linux/purgatory.h b/include/linux/purgatory.h index b950e961cfa8..d7dc1559427f 100644 --- a/include/linux/purgatory.h +++ b/include/linux/purgatory.h @@ -3,7 +3,7 @@ #define _LINUX_PURGATORY_H #include <linux/types.h> -#include <crypto/sha.h> +#include <crypto/sha2.h> #include <uapi/linux/kexec.h> struct kexec_sha_region { diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 57fb295ea41a..68d17a4fbf20 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -7,6 +7,7 @@ #ifndef _QED_IF_H #define _QED_IF_H +#include <linux/ethtool.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/netdevice.h> diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 6cdd0152c253..de0826411311 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -241,6 +241,11 @@ bool rcu_lockdep_current_cpu_online(void); static inline bool rcu_lockdep_current_cpu_online(void) { return true; } #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ +extern struct lockdep_map rcu_lock_map; +extern struct lockdep_map rcu_bh_lock_map; +extern struct lockdep_map rcu_sched_lock_map; +extern struct lockdep_map rcu_callback_map; + #ifdef CONFIG_DEBUG_LOCK_ALLOC static inline void rcu_lock_acquire(struct lockdep_map *map) @@ -253,10 +258,6 @@ static inline void rcu_lock_release(struct lockdep_map *map) lock_release(map, _THIS_IP_); } -extern struct lockdep_map rcu_lock_map; -extern struct lockdep_map rcu_bh_lock_map; -extern struct lockdep_map rcu_sched_lock_map; -extern struct lockdep_map rcu_callback_map; int debug_lockdep_rcu_enabled(void); int rcu_read_lock_held(void); int rcu_read_lock_bh_held(void); @@ -327,7 +328,7 @@ static inline void rcu_preempt_sleep_check(void) { } #else /* #ifdef CONFIG_PROVE_RCU */ -#define RCU_LOCKDEP_WARN(c, s) do { } while (0) +#define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c)) #define rcu_sleep_check() do { } while (0) #endif /* #else #ifdef CONFIG_PROVE_RCU */ diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h index 3e7919fc5f34..86c8f6c98412 100644 --- a/include/linux/rcupdate_trace.h +++ b/include/linux/rcupdate_trace.h @@ -11,10 +11,10 @@ #include <linux/sched.h> #include <linux/rcupdate.h> -#ifdef CONFIG_DEBUG_LOCK_ALLOC - extern struct lockdep_map rcu_trace_lock_map; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + static inline int rcu_read_lock_trace_held(void) { return lock_is_held(&rcu_trace_lock_map); diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 7c1ecdb356d8..2a97334eb786 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -89,6 +89,8 @@ static inline void rcu_irq_enter_irqson(void) { } static inline void rcu_irq_exit(void) { } static inline void rcu_irq_exit_preempt(void) { } static inline void rcu_irq_exit_check_preempt(void) { } +#define rcu_is_idle_cpu(cpu) \ + (is_idle_task(current) && !in_nmi() && !in_irq() && !in_serving_softirq()) static inline void exit_rcu(void) { } static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t) { diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 59eb5cd567d7..df578b73960f 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -50,6 +50,7 @@ void rcu_irq_exit(void); void rcu_irq_exit_preempt(void); void rcu_irq_enter_irqson(void); void rcu_irq_exit_irqson(void); +bool rcu_is_idle_cpu(int cpu); #ifdef CONFIG_PROVE_RCU void rcu_irq_exit_check_preempt(void); diff --git a/include/linux/refcount.h b/include/linux/refcount.h index 7fabb1af18e0..b8a6e387f8f9 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -101,7 +101,7 @@ struct mutex; /** - * struct refcount_t - variant of atomic_t specialized for reference counts + * typedef refcount_t - variant of atomic_t specialized for reference counts * @refs: atomic_t counter field * * The counter saturates at REFCOUNT_SATURATED and will not move once @@ -147,24 +147,6 @@ static inline unsigned int refcount_read(const refcount_t *r) return atomic_read(&r->refs); } -/** - * refcount_add_not_zero - add a value to a refcount unless it is 0 - * @i: the value to add to the refcount - * @r: the refcount - * - * Will saturate at REFCOUNT_SATURATED and WARN. - * - * Provides no memory ordering, it is assumed the caller has guaranteed the - * object memory to be stable (RCU, etc.). It does provide a control dependency - * and thereby orders future stores. See the comment on top. - * - * Use of this function is not recommended for the normal reference counting - * use case in which references are taken and released one at a time. In these - * cases, refcount_inc(), or one of its variants, should instead be used to - * increment a reference count. - * - * Return: false if the passed refcount is 0, true otherwise - */ static inline __must_check bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp) { int old = refcount_read(r); @@ -183,17 +165,12 @@ static inline __must_check bool __refcount_add_not_zero(int i, refcount_t *r, in return old; } -static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r) -{ - return __refcount_add_not_zero(i, r, NULL); -} - /** - * refcount_add - add a value to a refcount + * refcount_add_not_zero - add a value to a refcount unless it is 0 * @i: the value to add to the refcount * @r: the refcount * - * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN. + * Will saturate at REFCOUNT_SATURATED and WARN. * * Provides no memory ordering, it is assumed the caller has guaranteed the * object memory to be stable (RCU, etc.). It does provide a control dependency @@ -203,7 +180,14 @@ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r) * use case in which references are taken and released one at a time. In these * cases, refcount_inc(), or one of its variants, should instead be used to * increment a reference count. + * + * Return: false if the passed refcount is 0, true otherwise */ +static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r) +{ + return __refcount_add_not_zero(i, r, NULL); +} + static inline void __refcount_add(int i, refcount_t *r, int *oldp) { int old = atomic_fetch_add_relaxed(i, &r->refs); @@ -217,11 +201,32 @@ static inline void __refcount_add(int i, refcount_t *r, int *oldp) refcount_warn_saturate(r, REFCOUNT_ADD_OVF); } +/** + * refcount_add - add a value to a refcount + * @i: the value to add to the refcount + * @r: the refcount + * + * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN. + * + * Provides no memory ordering, it is assumed the caller has guaranteed the + * object memory to be stable (RCU, etc.). It does provide a control dependency + * and thereby orders future stores. See the comment on top. + * + * Use of this function is not recommended for the normal reference counting + * use case in which references are taken and released one at a time. In these + * cases, refcount_inc(), or one of its variants, should instead be used to + * increment a reference count. + */ static inline void refcount_add(int i, refcount_t *r) { __refcount_add(i, r, NULL); } +static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp) +{ + return __refcount_add_not_zero(1, r, oldp); +} + /** * refcount_inc_not_zero - increment a refcount unless it is 0 * @r: the refcount to increment @@ -235,14 +240,14 @@ static inline void refcount_add(int i, refcount_t *r) * * Return: true if the increment was successful, false otherwise */ -static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp) +static inline __must_check bool refcount_inc_not_zero(refcount_t *r) { - return __refcount_add_not_zero(1, r, oldp); + return __refcount_inc_not_zero(r, NULL); } -static inline __must_check bool refcount_inc_not_zero(refcount_t *r) +static inline void __refcount_inc(refcount_t *r, int *oldp) { - return __refcount_inc_not_zero(r, NULL); + __refcount_add(1, r, oldp); } /** @@ -257,14 +262,27 @@ static inline __must_check bool refcount_inc_not_zero(refcount_t *r) * Will WARN if the refcount is 0, as this represents a possible use-after-free * condition. */ -static inline void __refcount_inc(refcount_t *r, int *oldp) +static inline void refcount_inc(refcount_t *r) { - __refcount_add(1, r, oldp); + __refcount_inc(r, NULL); } -static inline void refcount_inc(refcount_t *r) +static inline __must_check bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp) { - __refcount_inc(r, NULL); + int old = atomic_fetch_sub_release(i, &r->refs); + + if (oldp) + *oldp = old; + + if (old == i) { + smp_acquire__after_ctrl_dep(); + return true; + } + + if (unlikely(old < 0 || old - i < 0)) + refcount_warn_saturate(r, REFCOUNT_SUB_UAF); + + return false; } /** @@ -287,27 +305,14 @@ static inline void refcount_inc(refcount_t *r) * * Return: true if the resulting refcount is 0, false otherwise */ -static inline __must_check bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp) +static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r) { - int old = atomic_fetch_sub_release(i, &r->refs); - - if (oldp) - *oldp = old; - - if (old == i) { - smp_acquire__after_ctrl_dep(); - return true; - } - - if (unlikely(old < 0 || old - i < 0)) - refcount_warn_saturate(r, REFCOUNT_SUB_UAF); - - return false; + return __refcount_sub_and_test(i, r, NULL); } -static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r) +static inline __must_check bool __refcount_dec_and_test(refcount_t *r, int *oldp) { - return __refcount_sub_and_test(i, r, NULL); + return __refcount_sub_and_test(1, r, oldp); } /** @@ -323,26 +328,11 @@ static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r) * * Return: true if the resulting refcount is 0, false otherwise */ -static inline __must_check bool __refcount_dec_and_test(refcount_t *r, int *oldp) -{ - return __refcount_sub_and_test(1, r, oldp); -} - static inline __must_check bool refcount_dec_and_test(refcount_t *r) { return __refcount_dec_and_test(r, NULL); } -/** - * refcount_dec - decrement a refcount - * @r: the refcount - * - * Similar to atomic_dec(), it will WARN on underflow and fail to decrement - * when saturated at REFCOUNT_SATURATED. - * - * Provides release memory ordering, such that prior loads and stores are done - * before. - */ static inline void __refcount_dec(refcount_t *r, int *oldp) { int old = atomic_fetch_sub_release(1, &r->refs); @@ -354,6 +344,16 @@ static inline void __refcount_dec(refcount_t *r, int *oldp) refcount_warn_saturate(r, REFCOUNT_DEC_LEAK); } +/** + * refcount_dec - decrement a refcount + * @r: the refcount + * + * Similar to atomic_dec(), it will WARN on underflow and fail to decrement + * when saturated at REFCOUNT_SATURATED. + * + * Provides release memory ordering, such that prior loads and stores are done + * before. + */ static inline void refcount_dec(refcount_t *r) { __refcount_dec(r, NULL); diff --git a/include/linux/regmap.h b/include/linux/regmap.h index e7834d98207f..2cc4ecd36298 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -315,6 +315,10 @@ typedef void (*regmap_unlock)(void *); * masks are used. * @zero_flag_mask: If set, read_flag_mask and write_flag_mask are used even * if they are both empty. + * @use_relaxed_mmio: If set, MMIO R/W operations will not use memory barriers. + * This can avoid load on devices which don't require strict + * orderings, but drivers should carefully add any explicit + * memory barriers when they may require them. * @use_single_read: If set, converts the bulk read operation into a series of * single read operations. This is useful for a device that * does not support bulk read. @@ -388,6 +392,7 @@ struct regmap_config { bool use_single_read; bool use_single_write; + bool use_relaxed_mmio; bool can_multi_write; enum regmap_endian reg_format_endian; @@ -570,6 +575,10 @@ struct regmap *__regmap_init_sdw(struct sdw_slave *sdw, const struct regmap_config *config, struct lock_class_key *lock_key, const char *lock_name); +struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); struct regmap *__regmap_init_spi_avmm(struct spi_device *spi, const struct regmap_config *config, struct lock_class_key *lock_key, @@ -619,6 +628,10 @@ struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw, const struct regmap_config *config, struct lock_class_key *lock_key, const char *lock_name); +struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus, const struct regmap_config *config, struct lock_class_key *lock_key, @@ -818,6 +831,19 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); sdw, config) /** + * regmap_init_sdw_mbq() - Initialise register map + * + * @sdw: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer to + * a struct regmap. + */ +#define regmap_init_sdw_mbq(sdw, config) \ + __regmap_lockdep_wrapper(__regmap_init_sdw_mbq, #config, \ + sdw, config) + +/** * regmap_init_spi_avmm() - Initialize register map for Intel SPI Slave * to AVMM Bus Bridge * @@ -990,6 +1016,20 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); sdw, config) /** + * devm_regmap_init_sdw_mbq() - Initialise managed register map + * + * @sdw: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +#define devm_regmap_init_sdw_mbq(sdw, config) \ + __regmap_lockdep_wrapper(__devm_regmap_init_sdw_mbq, #config, \ + sdw, config) + +/** * devm_regmap_init_slimbus() - Initialise managed register map * * @slimbus: Device that will be interacted with diff --git a/include/linux/regulator/da9121.h b/include/linux/regulator/da9121.h new file mode 100644 index 000000000000..62d9d257dc25 --- /dev/null +++ b/include/linux/regulator/da9121.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * DA9121 Single-channel dual-phase 10A buck converter + * DA9130 Single-channel dual-phase 10A buck converter (Automotive) + * DA9217 Single-channel dual-phase 6A buck converter + * DA9122 Dual-channel single-phase 5A buck converter + * DA9131 Dual-channel single-phase 5A buck converter (Automotive) + * DA9220 Dual-channel single-phase 3A buck converter + * DA9132 Dual-channel single-phase 3A buck converter (Automotive) + * + * Copyright (C) 2020 Dialog Semiconductor + * + * Authors: Adam Ward, Dialog Semiconductor + */ + +#ifndef __LINUX_REGULATOR_DA9121_H +#define __LINUX_REGULATOR_DA9121_H + +#include <linux/regulator/machine.h> + +struct gpio_desc; + +enum { + DA9121_IDX_BUCK1, + DA9121_IDX_BUCK2, + DA9121_IDX_MAX +}; + +struct da9121_pdata { + int num_buck; + struct gpio_desc *gpiod_ren[DA9121_IDX_MAX]; + struct device_node *reg_node[DA9121_IDX_MAX]; + struct regulator_init_data *init_data[DA9121_IDX_MAX]; +}; + +#endif diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 11cade73726c..d7c77ee370f3 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -223,6 +223,8 @@ enum regulator_type { * @name: Identifying name for the regulator. * @supply_name: Identifying the regulator supply * @of_match: Name used to identify regulator in DT. + * @of_match_full_name: A flag to indicate that the of_match string, if + * present, should be matched against the node full_name. * @regulators_node: Name of node containing regulator definitions in DT. * @of_parse_cb: Optional callback called only if of_match is present. * Will be called for each regulator parsed from DT, during @@ -314,6 +316,7 @@ struct regulator_desc { const char *name; const char *supply_name; const char *of_match; + bool of_match_full_name; const char *regulators_node; int (*of_parse_cb)(struct device_node *, const struct regulator_desc *, diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h index d47e668d9ca8..c964fe8ab698 100644 --- a/include/linux/regulator/pfuze100.h +++ b/include/linux/regulator/pfuze100.h @@ -63,10 +63,4 @@ #define PFUZE3001_VLDO3 8 #define PFUZE3001_VLDO4 9 -struct regulator_init_data; - -struct pfuze_regulator_platform_data { - struct regulator_init_data *init_data[PFUZE100_MAX_REGULATOR]; -}; - #endif /* __LINUX_REG_PFUZE100_H */ diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 3fa3ba6498e8..f28ee75d1005 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -375,6 +375,7 @@ enum rsc_handling_status { * @get_boot_addr: get boot address to entry point specified in firmware * @panic: optional callback to react to system panic, core will delay * panic at least the returned number of milliseconds + * @coredump: collect firmware dump after the subsystem is shutdown */ struct rproc_ops { int (*prepare)(struct rproc *rproc); @@ -393,6 +394,7 @@ struct rproc_ops { int (*sanity_check)(struct rproc *rproc, const struct firmware *fw); u64 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw); unsigned long (*panic)(struct rproc *rproc); + void (*coredump)(struct rproc *rproc); }; /** @@ -653,7 +655,9 @@ rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len, int rproc_boot(struct rproc *rproc); void rproc_shutdown(struct rproc *rproc); +int rproc_set_firmware(struct rproc *rproc, const char *fw_name); void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type); +void rproc_coredump_using_sections(struct rproc *rproc); int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size); int rproc_coredump_add_custom_segment(struct rproc *rproc, dma_addr_t da, size_t size, diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h index 8ad2487a86d5..231e06b74b50 100644 --- a/include/linux/rfkill.h +++ b/include/linux/rfkill.h @@ -138,6 +138,17 @@ void rfkill_unregister(struct rfkill *rfkill); void rfkill_destroy(struct rfkill *rfkill); /** + * rfkill_set_hw_state_reason - Set the internal rfkill hardware block state + * with a reason + * @rfkill: pointer to the rfkill class to modify. + * @blocked: the current hardware block state to set + * @reason: one of &enum rfkill_hard_block_reasons + * + * Prefer to use rfkill_set_hw_state if you don't need any special reason. + */ +bool rfkill_set_hw_state_reason(struct rfkill *rfkill, + bool blocked, unsigned long reason); +/** * rfkill_set_hw_state - Set the internal rfkill hardware block state * @rfkill: pointer to the rfkill class to modify. * @blocked: the current hardware block state to set @@ -156,7 +167,11 @@ void rfkill_destroy(struct rfkill *rfkill); * should be blocked) so that drivers need not keep track of the soft * block state -- which they might not be able to. */ -bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked); +static inline bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) +{ + return rfkill_set_hw_state_reason(rfkill, blocked, + RFKILL_HARD_BLOCK_SIGNAL); +} /** * rfkill_set_sw_state - Set the internal rfkill software block state @@ -256,6 +271,13 @@ static inline void rfkill_destroy(struct rfkill *rfkill) { } +static inline bool rfkill_set_hw_state_reason(struct rfkill *rfkill, + bool blocked, + unsigned long reason) +{ + return blocked; +} + static inline bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) { return blocked; diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 3a6adfa70fb0..70085ca1a3fc 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -91,7 +91,6 @@ enum ttu_flags { TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */ TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */ - TTU_IGNORE_ACCESS = 0x10, /* don't age */ TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */ TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible * and caller guarantees they will diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h index 9fe156d1c018..a5db828b2420 100644 --- a/include/linux/rpmsg.h +++ b/include/linux/rpmsg.h @@ -17,6 +17,7 @@ #include <linux/kref.h> #include <linux/mutex.h> #include <linux/poll.h> +#include <linux/rpmsg/byteorder.h> #define RPMSG_ADDR_ANY 0xFFFFFFFF @@ -46,6 +47,7 @@ struct rpmsg_channel_info { * @dst: destination address * @ept: the rpmsg endpoint of this channel * @announce: if set, rpmsg will announce the creation/removal of this channel + * @little_endian: True if transport is using little endian byte representation */ struct rpmsg_device { struct device dev; @@ -55,6 +57,7 @@ struct rpmsg_device { u32 dst; struct rpmsg_endpoint *ept; bool announce; + bool little_endian; const struct rpmsg_device_ops *ops; }; @@ -111,10 +114,59 @@ struct rpmsg_driver { int (*callback)(struct rpmsg_device *, void *, int, void *, u32); }; +static inline u16 rpmsg16_to_cpu(struct rpmsg_device *rpdev, __rpmsg16 val) +{ + if (!rpdev) + return __rpmsg16_to_cpu(rpmsg_is_little_endian(), val); + else + return __rpmsg16_to_cpu(rpdev->little_endian, val); +} + +static inline __rpmsg16 cpu_to_rpmsg16(struct rpmsg_device *rpdev, u16 val) +{ + if (!rpdev) + return __cpu_to_rpmsg16(rpmsg_is_little_endian(), val); + else + return __cpu_to_rpmsg16(rpdev->little_endian, val); +} + +static inline u32 rpmsg32_to_cpu(struct rpmsg_device *rpdev, __rpmsg32 val) +{ + if (!rpdev) + return __rpmsg32_to_cpu(rpmsg_is_little_endian(), val); + else + return __rpmsg32_to_cpu(rpdev->little_endian, val); +} + +static inline __rpmsg32 cpu_to_rpmsg32(struct rpmsg_device *rpdev, u32 val) +{ + if (!rpdev) + return __cpu_to_rpmsg32(rpmsg_is_little_endian(), val); + else + return __cpu_to_rpmsg32(rpdev->little_endian, val); +} + +static inline u64 rpmsg64_to_cpu(struct rpmsg_device *rpdev, __rpmsg64 val) +{ + if (!rpdev) + return __rpmsg64_to_cpu(rpmsg_is_little_endian(), val); + else + return __rpmsg64_to_cpu(rpdev->little_endian, val); +} + +static inline __rpmsg64 cpu_to_rpmsg64(struct rpmsg_device *rpdev, u64 val) +{ + if (!rpdev) + return __cpu_to_rpmsg64(rpmsg_is_little_endian(), val); + else + return __cpu_to_rpmsg64(rpdev->little_endian, val); +} + #if IS_ENABLED(CONFIG_RPMSG) -int register_rpmsg_device(struct rpmsg_device *dev); -void unregister_rpmsg_device(struct rpmsg_device *dev); +int rpmsg_register_device(struct rpmsg_device *rpdev); +int rpmsg_unregister_device(struct device *parent, + struct rpmsg_channel_info *chinfo); int __register_rpmsg_driver(struct rpmsg_driver *drv, struct module *owner); void unregister_rpmsg_driver(struct rpmsg_driver *drv); void rpmsg_destroy_ept(struct rpmsg_endpoint *); @@ -137,15 +189,18 @@ __poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp, #else -static inline int register_rpmsg_device(struct rpmsg_device *dev) +static inline int rpmsg_register_device(struct rpmsg_device *rpdev) { return -ENXIO; } -static inline void unregister_rpmsg_device(struct rpmsg_device *dev) +static inline int rpmsg_unregister_device(struct device *parent, + struct rpmsg_channel_info *chinfo) { /* This shouldn't be possible */ WARN_ON(1); + + return -ENXIO; } static inline int __register_rpmsg_driver(struct rpmsg_driver *drv, diff --git a/include/linux/rpmsg/byteorder.h b/include/linux/rpmsg/byteorder.h new file mode 100644 index 000000000000..c0f565dbad6d --- /dev/null +++ b/include/linux/rpmsg/byteorder.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Follows implementation found in linux/virtio_byteorder.h + */ +#ifndef _LINUX_RPMSG_BYTEORDER_H +#define _LINUX_RPMSG_BYTEORDER_H +#include <linux/types.h> +#include <uapi/linux/rpmsg_types.h> + +static inline bool rpmsg_is_little_endian(void) +{ +#ifdef __LITTLE_ENDIAN + return true; +#else + return false; +#endif +} + +static inline u16 __rpmsg16_to_cpu(bool little_endian, __rpmsg16 val) +{ + if (little_endian) + return le16_to_cpu((__force __le16)val); + else + return be16_to_cpu((__force __be16)val); +} + +static inline __rpmsg16 __cpu_to_rpmsg16(bool little_endian, u16 val) +{ + if (little_endian) + return (__force __rpmsg16)cpu_to_le16(val); + else + return (__force __rpmsg16)cpu_to_be16(val); +} + +static inline u32 __rpmsg32_to_cpu(bool little_endian, __rpmsg32 val) +{ + if (little_endian) + return le32_to_cpu((__force __le32)val); + else + return be32_to_cpu((__force __be32)val); +} + +static inline __rpmsg32 __cpu_to_rpmsg32(bool little_endian, u32 val) +{ + if (little_endian) + return (__force __rpmsg32)cpu_to_le32(val); + else + return (__force __rpmsg32)cpu_to_be32(val); +} + +static inline u64 __rpmsg64_to_cpu(bool little_endian, __rpmsg64 val) +{ + if (little_endian) + return le64_to_cpu((__force __le64)val); + else + return be64_to_cpu((__force __be64)val); +} + +static inline __rpmsg64 __cpu_to_rpmsg64(bool little_endian, u64 val) +{ + if (little_endian) + return (__force __rpmsg64)cpu_to_le64(val); + else + return (__force __rpmsg64)cpu_to_be64(val); +} + +#endif /* _LINUX_RPMSG_BYTEORDER_H */ diff --git a/include/linux/rpmsg/ns.h b/include/linux/rpmsg/ns.h new file mode 100644 index 000000000000..a7804edd6d58 --- /dev/null +++ b/include/linux/rpmsg/ns.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_RPMSG_NS_H +#define _LINUX_RPMSG_NS_H + +#include <linux/mod_devicetable.h> +#include <linux/rpmsg.h> +#include <linux/rpmsg/byteorder.h> +#include <linux/types.h> + +/** + * struct rpmsg_ns_msg - dynamic name service announcement message + * @name: name of remote service that is published + * @addr: address of remote service that is published + * @flags: indicates whether service is created or destroyed + * + * This message is sent across to publish a new service, or announce + * about its removal. When we receive these messages, an appropriate + * rpmsg channel (i.e device) is created/destroyed. In turn, the ->probe() + * or ->remove() handler of the appropriate rpmsg driver will be invoked + * (if/as-soon-as one is registered). + */ +struct rpmsg_ns_msg { + char name[RPMSG_NAME_SIZE]; + __rpmsg32 addr; + __rpmsg32 flags; +} __packed; + +/** + * enum rpmsg_ns_flags - dynamic name service announcement flags + * + * @RPMSG_NS_CREATE: a new remote service was just created + * @RPMSG_NS_DESTROY: a known remote service was just destroyed + */ +enum rpmsg_ns_flags { + RPMSG_NS_CREATE = 0, + RPMSG_NS_DESTROY = 1, +}; + +/* Address 53 is reserved for advertising remote services */ +#define RPMSG_NS_ADDR (53) + +int rpmsg_ns_register_device(struct rpmsg_device *rpdev); + +#endif diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 22d1575e4991..b829382de6c3 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -110,13 +110,36 @@ struct rtc_device { /* Some hardware can't support UIE mode */ int uie_unsupported; - /* Number of nsec it takes to set the RTC clock. This influences when - * the set ops are called. An offset: - * - of 0.5 s will call RTC set for wall clock time 10.0 s at 9.5 s - * - of 1.5 s will call RTC set for wall clock time 10.0 s at 8.5 s - * - of -0.5 s will call RTC set for wall clock time 10.0 s at 10.5 s + /* + * This offset specifies the update timing of the RTC. + * + * tsched t1 write(t2.tv_sec - 1sec)) t2 RTC increments seconds + * + * The offset defines how tsched is computed so that the write to + * the RTC (t2.tv_sec - 1sec) is correct versus the time required + * for the transport of the write and the time which the RTC needs + * to increment seconds the first time after the write (t2). + * + * For direct accessible RTCs tsched ~= t1 because the write time + * is negligible. For RTCs behind slow busses the transport time is + * significant and has to be taken into account. + * + * The time between the write (t1) and the first increment after + * the write (t2) is RTC specific. For a MC146818 RTC it's 500ms, + * for many others it's exactly 1 second. Consult the datasheet. + * + * The value of this offset is also used to calculate the to be + * written value (t2.tv_sec - 1sec) at tsched. + * + * The default value for this is NSEC_PER_SEC + 10 msec default + * transport time. The offset can be adjusted by drivers so the + * calculation for the to be written value at tsched becomes + * correct: + * + * newval = tsched + set_offset_nsec - NSEC_PER_SEC + * and (tsched + set_offset_nsec) % NSEC_PER_SEC == 0 */ - long set_offset_nsec; + unsigned long set_offset_nsec; bool registered; @@ -165,7 +188,6 @@ int __rtc_register_device(struct module *owner, struct rtc_device *rtc); extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm); extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm); -extern int rtc_set_ntp_time(struct timespec64 now, unsigned long *target_nsec); int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm); extern int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alrm); @@ -205,39 +227,6 @@ static inline bool is_leap_year(unsigned int year) return (!(year % 4) && (year % 100)) || !(year % 400); } -/* Determine if we can call to driver to set the time. Drivers can only be - * called to set a second aligned time value, and the field set_offset_nsec - * specifies how far away from the second aligned time to call the driver. - * - * This also computes 'to_set' which is the time we are trying to set, and has - * a zero in tv_nsecs, such that: - * to_set - set_delay_nsec == now +/- FUZZ - * - */ -static inline bool rtc_tv_nsec_ok(s64 set_offset_nsec, - struct timespec64 *to_set, - const struct timespec64 *now) -{ - /* Allowed error in tv_nsec, arbitarily set to 5 jiffies in ns. */ - const unsigned long TIME_SET_NSEC_FUZZ = TICK_NSEC * 5; - struct timespec64 delay = {.tv_sec = 0, - .tv_nsec = set_offset_nsec}; - - *to_set = timespec64_add(*now, delay); - - if (to_set->tv_nsec < TIME_SET_NSEC_FUZZ) { - to_set->tv_nsec = 0; - return true; - } - - if (to_set->tv_nsec > NSEC_PER_SEC - TIME_SET_NSEC_FUZZ) { - to_set->tv_sec++; - to_set->tv_nsec = 0; - return true; - } - return false; -} - #define rtc_register_device(device) \ __rtc_register_device(THIS_MODULE, device) diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h index 745f5e73f99a..6f155f99aa16 100644 --- a/include/linux/rtsx_pci.h +++ b/include/linux/rtsx_pci.h @@ -82,6 +82,7 @@ #define MS_OC_INT_EN (1 << 23) #define SD_OC_INT_EN (1 << 22) +#define RTSX_DUM_REG 0x1C /* * macros for easy use @@ -658,6 +659,23 @@ #define PM_WAKE_EN 0x01 #define PM_CTRL4 0xFF47 +/* FW config info register */ +#define RTS5261_FW_CFG_INFO0 0xFF50 +#define RTS5261_FW_EXPRESS_TEST_MASK (0x01 << 0) +#define RTS5261_FW_EA_MODE_MASK (0x01 << 5) +#define RTS5261_FW_CFG0 0xFF54 +#define RTS5261_FW_ENTER_EXPRESS (0x01 << 0) + +#define RTS5261_FW_CFG1 0xFF55 +#define RTS5261_SYS_CLK_SEL_MCU_CLK (0x01 << 7) +#define RTS5261_CRC_CLK_SEL_MCU_CLK (0x01 << 6) +#define RTS5261_FAKE_MCU_CLOCK_GATING (0x01 << 5) +#define RTS5261_MCU_BUS_SEL_MASK (0x01 << 4) +#define RTS5261_MCU_CLOCK_SEL_MASK (0x03 << 2) +#define RTS5261_MCU_CLOCK_SEL_16M (0x01 << 2) +#define RTS5261_MCU_CLOCK_GATING (0x01 << 1) +#define RTS5261_DRIVER_ENABLE_FW (0x01 << 0) + #define REG_CFG_OOBS_OFF_TIMER 0xFEA6 #define REG_CFG_OOBS_ON_TIMER 0xFEA7 #define REG_CFG_VCM_ON_TIMER 0xFEA8 @@ -701,6 +719,13 @@ #define RTS5260_DVCC_TUNE_MASK 0x70 #define RTS5260_DVCC_33 0x70 +/*RTS5261*/ +#define RTS5261_LDO1_CFG0 0xFF72 +#define RTS5261_LDO1_OCP_THD_MASK (0x07 << 5) +#define RTS5261_LDO1_OCP_EN (0x01 << 4) +#define RTS5261_LDO1_OCP_LMT_THD_MASK (0x03 << 2) +#define RTS5261_LDO1_OCP_LMT_EN (0x01 << 1) + #define LDO_VCC_CFG1 0xFF73 #define LDO_VCC_REF_TUNE_MASK 0x30 #define LDO_VCC_REF_1V2 0x20 @@ -741,6 +766,8 @@ #define RTS5260_AUTOLOAD_CFG4 0xFF7F #define RTS5260_MIMO_DISABLE 0x8A +/*RTS5261*/ +#define RTS5261_AUX_CLK_16M_EN (1 << 5) #define RTS5260_REG_GPIO_CTL0 0xFC1A #define RTS5260_REG_GPIO_MASK 0x01 @@ -1174,6 +1201,7 @@ struct rtsx_pcr { struct delayed_work carddet_work; struct delayed_work idle_work; + struct delayed_work rtd3_work; spinlock_t lock; struct mutex pcr_mutex; @@ -1183,6 +1211,7 @@ struct rtsx_pcr { unsigned int cur_clock; bool remove_pci; bool msi_en; + bool is_runtime_suspended; #define EXTRA_CAPS_SD_SDR50 (1 << 0) #define EXTRA_CAPS_SD_SDR104 (1 << 1) @@ -1191,6 +1220,7 @@ struct rtsx_pcr { #define EXTRA_CAPS_MMC_HS200 (1 << 4) #define EXTRA_CAPS_MMC_8BIT (1 << 5) #define EXTRA_CAPS_NO_MMC (1 << 7) +#define EXTRA_CAPS_SD_EXPRESS (1 << 8) u32 extra_caps; #define IC_VER_A 0 @@ -1245,6 +1275,8 @@ struct rtsx_pcr { #define PCI_PID(pcr) ((pcr)->pci->device) #define is_version(pcr, pid, ver) \ (CHK_PCI_PID(pcr, pid) && (pcr)->ic_version == (ver)) +#define is_version_higher_than(pcr, pid, ver) \ + (CHK_PCI_PID(pcr, pid) && (pcr)->ic_version > (ver)) #define pcr_dbg(pcr, fmt, arg...) \ dev_dbg(&(pcr)->pci->dev, fmt, ##arg) diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 25e3fde85617..4c715be48717 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -123,6 +123,7 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem) * lock for reading */ extern void down_read(struct rw_semaphore *sem); +extern int __must_check down_read_interruptible(struct rw_semaphore *sem); extern int __must_check down_read_killable(struct rw_semaphore *sem); /* @@ -171,6 +172,7 @@ extern void downgrade_write(struct rw_semaphore *sem); * See Documentation/locking/lockdep-design.rst for more details.) */ extern void down_read_nested(struct rw_semaphore *sem, int subclass); +extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass); extern void down_write_nested(struct rw_semaphore *sem, int subclass); extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass); extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock); @@ -191,6 +193,7 @@ extern void down_read_non_owner(struct rw_semaphore *sem); extern void up_read_non_owner(struct rw_semaphore *sem); #else # define down_read_nested(sem, subclass) down_read(sem) +# define down_read_killable_nested(sem, subclass) down_read_killable(sem) # define down_write_nest_lock(sem, nest_lock) down_write(sem) # define down_write_nested(sem, subclass) down_write(sem) # define down_write_killable_nested(sem, subclass) down_write_killable(sem) diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 36c47e7e66a2..6f70572b2938 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -19,12 +19,6 @@ struct scatterlist { }; /* - * Since the above length field is an unsigned int, below we define the maximum - * length in bytes that can be stored in one scatterlist entry. - */ -#define SCATTERLIST_MAX_SEGMENT (UINT_MAX & PAGE_MASK) - -/* * These macros should be used after a dma_map_sg call has been done * to get bus addresses of each of the SG entries and their lengths. * You should only work with the number of sg entries dma_map_sg diff --git a/include/linux/sched.h b/include/linux/sched.h index 063cd120b459..51d535b69bd6 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -28,12 +28,14 @@ #include <linux/sched/prio.h> #include <linux/sched/types.h> #include <linux/signal_types.h> +#include <linux/syscall_user_dispatch.h> #include <linux/mm_types_task.h> #include <linux/task_io_accounting.h> #include <linux/posix-timers.h> #include <linux/rseq.h> #include <linux/seqlock.h> #include <linux/kcsan.h> +#include <asm/kmap_size.h> /* task_struct member predeclarations (sorted alphabetically): */ struct audit_context; @@ -552,7 +554,6 @@ struct sched_dl_entity { * overruns. */ unsigned int dl_throttled : 1; - unsigned int dl_boosted : 1; unsigned int dl_yielded : 1; unsigned int dl_non_contending : 1; unsigned int dl_overrun : 1; @@ -571,6 +572,15 @@ struct sched_dl_entity { * time. */ struct hrtimer inactive_timer; + +#ifdef CONFIG_RT_MUTEXES + /* + * Priority Inheritance. When a DEADLINE scheduling entity is boosted + * pi_se points to the donor, otherwise points to the dl_se it belongs + * to (the original one/itself). + */ + struct sched_dl_entity *pi_se; +#endif }; #ifdef CONFIG_UCLAMP_TASK @@ -629,6 +639,13 @@ struct wake_q_node { struct wake_q_node *next; }; +struct kmap_ctrl { +#ifdef CONFIG_KMAP_LOCAL + int idx; + pte_t pteval[KM_MAX_IDX]; +#endif +}; + struct task_struct { #ifdef CONFIG_THREAD_INFO_IN_TASK /* @@ -714,6 +731,11 @@ struct task_struct { int nr_cpus_allowed; const cpumask_t *cpus_ptr; cpumask_t cpus_mask; + void *migration_pending; +#ifdef CONFIG_SMP + unsigned short migration_disabled; +#endif + unsigned short migration_flags; #ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; @@ -770,7 +792,6 @@ struct task_struct { unsigned sched_reset_on_fork:1; unsigned sched_contributes_to_load:1; unsigned sched_migrated:1; - unsigned sched_remote_wakeup:1; #ifdef CONFIG_PSI unsigned sched_psi_wake_requeue:1; #endif @@ -780,6 +801,21 @@ struct task_struct { /* Unserialized, strictly 'current' */ + /* + * This field must not be in the scheduler word above due to wakelist + * queueing no longer being serialized by p->on_cpu. However: + * + * p->XXX = X; ttwu() + * schedule() if (p->on_rq && ..) // false + * smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //true + * deactivate_task() ttwu_queue_wakelist()) + * p->on_rq = 0; p->sched_remote_wakeup = Y; + * + * guarantees all stores of 'current' are visible before + * ->sched_remote_wakeup gets used, so it can be in this word. + */ + unsigned sched_remote_wakeup:1; + /* Bit to tell LSMs we're in execve(): */ unsigned in_execve:1; unsigned in_iowait:1; @@ -965,6 +1001,7 @@ struct task_struct { unsigned int sessionid; #endif struct seccomp seccomp; + struct syscall_user_dispatch syscall_dispatch; /* Thread group tracking: */ u64 parent_exec_id; @@ -1289,6 +1326,7 @@ struct task_struct { unsigned int sequential_io; unsigned int sequential_io_avg; #endif + struct kmap_ctrl kmap_ctrl; #ifdef CONFIG_DEBUG_ATOMIC_SLEEP unsigned long task_state_change; #endif @@ -1326,6 +1364,10 @@ struct task_struct { struct callback_head mce_kill_me; #endif +#ifdef CONFIG_KRETPROBES + struct llist_head kretprobe_instances; +#endif + /* * New fields for task_struct should be added above here, so that * they are included in the randomized portion of task_struct. diff --git a/include/linux/sched/hotplug.h b/include/linux/sched/hotplug.h index 9a62ffdd296f..412cdaba33eb 100644 --- a/include/linux/sched/hotplug.h +++ b/include/linux/sched/hotplug.h @@ -11,8 +11,10 @@ extern int sched_cpu_activate(unsigned int cpu); extern int sched_cpu_deactivate(unsigned int cpu); #ifdef CONFIG_HOTPLUG_CPU +extern int sched_cpu_wait_empty(unsigned int cpu); extern int sched_cpu_dying(unsigned int cpu); #else +# define sched_cpu_wait_empty NULL # define sched_cpu_dying NULL #endif diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index d5ece7a9a403..1ae08b8462a4 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -181,6 +181,22 @@ static inline void fs_reclaim_release(gfp_t gfp_mask) { } #endif /** + * might_alloc - Mark possible allocation sites + * @gfp_mask: gfp_t flags that would be used to allocate + * + * Similar to might_sleep() and other annotations, this can be used in functions + * that might allocate, but often don't. Compiles to nothing without + * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking. + */ +static inline void might_alloc(gfp_t gfp_mask) +{ + fs_reclaim_acquire(gfp_mask); + fs_reclaim_release(gfp_mask); + + might_sleep_if(gfpflags_allow_blocking(gfp_mask)); +} + +/** * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope. * * This functions marks the beginning of the GFP_NOIO allocation scope. @@ -347,6 +363,8 @@ static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) extern void membarrier_exec_mmap(struct mm_struct *mm); +extern void membarrier_update_current_mm(struct mm_struct *next_mm); + #else #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS static inline void membarrier_arch_switch_mm(struct mm_struct *prev, @@ -361,6 +379,9 @@ static inline void membarrier_exec_mmap(struct mm_struct *mm) static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) { } +static inline void membarrier_update_current_mm(struct mm_struct *next_mm) +{ +} #endif #endif /* _LINUX_SCHED_MM_H */ diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 1bad18a1d8ba..bd5afa076189 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -353,11 +353,25 @@ static inline int restart_syscall(void) return -ERESTARTNOINTR; } -static inline int signal_pending(struct task_struct *p) +static inline int task_sigpending(struct task_struct *p) { return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); } +static inline int signal_pending(struct task_struct *p) +{ +#if defined(TIF_NOTIFY_SIGNAL) + /* + * TIF_NOTIFY_SIGNAL isn't really a signal, but it requires the same + * behavior in terms of ensuring that we break out of wait loops + * so that notify signal callbacks can be processed. + */ + if (unlikely(test_tsk_thread_flag(p, TIF_NOTIFY_SIGNAL))) + return 1; +#endif + return task_sigpending(p); +} + static inline int __fatal_signal_pending(struct task_struct *p) { return unlikely(sigismember(&p->pending.signal, SIGKILL)); @@ -365,7 +379,7 @@ static inline int __fatal_signal_pending(struct task_struct *p) static inline int fatal_signal_pending(struct task_struct *p) { - return signal_pending(p) && __fatal_signal_pending(p); + return task_sigpending(p) && __fatal_signal_pending(p); } static inline int signal_pending_state(long state, struct task_struct *p) @@ -502,7 +516,7 @@ extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize); static inline void restore_saved_sigmask_unless(bool interrupted) { if (interrupted) - WARN_ON(!test_thread_flag(TIF_SIGPENDING)); + WARN_ON(!signal_pending(current)); else restore_saved_sigmask(); } diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 85fb2f34c59b..c0f71f2e7160 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -47,9 +47,7 @@ extern spinlock_t mmlist_lock; extern union thread_union init_thread_union; extern struct task_struct init_task; -#ifdef CONFIG_PROVE_RCU extern int lockdep_tasklist_lock_is_held(void); -#endif /* #ifdef CONFIG_PROVE_RCU */ extern asmlinkage void schedule_tail(struct task_struct *prev); extern void init_idle(struct task_struct *idle, int cpu); diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 9ef7bf686a9f..8f0f778b7c91 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -225,6 +225,14 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu) #endif /* !CONFIG_SMP */ +#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) +extern void rebuild_sched_domains_energy(void); +#else +static inline void rebuild_sched_domains_energy(void) +{ +} +#endif + #ifndef arch_scale_cpu_capacity /** * arch_scale_cpu_capacity - get the capacity scale factor of a given CPU. diff --git a/include/linux/scif.h b/include/linux/scif.h deleted file mode 100644 index 329e695b8fe5..000000000000 --- a/include/linux/scif.h +++ /dev/null @@ -1,1339 +0,0 @@ -/* - * Intel MIC Platform Software Stack (MPSS) - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Copyright(c) 2014 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * Intel SCIF driver. - * - */ -#ifndef __SCIF_H__ -#define __SCIF_H__ - -#include <linux/types.h> -#include <linux/poll.h> -#include <linux/device.h> -#include <linux/scif_ioctl.h> - -#define SCIF_ACCEPT_SYNC 1 -#define SCIF_SEND_BLOCK 1 -#define SCIF_RECV_BLOCK 1 - -enum { - SCIF_PROT_READ = (1 << 0), - SCIF_PROT_WRITE = (1 << 1) -}; - -enum { - SCIF_MAP_FIXED = 0x10, - SCIF_MAP_KERNEL = 0x20, -}; - -enum { - SCIF_FENCE_INIT_SELF = (1 << 0), - SCIF_FENCE_INIT_PEER = (1 << 1), - SCIF_SIGNAL_LOCAL = (1 << 4), - SCIF_SIGNAL_REMOTE = (1 << 5) -}; - -enum { - SCIF_RMA_USECPU = (1 << 0), - SCIF_RMA_USECACHE = (1 << 1), - SCIF_RMA_SYNC = (1 << 2), - SCIF_RMA_ORDERED = (1 << 3) -}; - -/* End of SCIF Admin Reserved Ports */ -#define SCIF_ADMIN_PORT_END 1024 - -/* End of SCIF Reserved Ports */ -#define SCIF_PORT_RSVD 1088 - -typedef struct scif_endpt *scif_epd_t; -typedef struct scif_pinned_pages *scif_pinned_pages_t; - -/** - * struct scif_range - SCIF registered range used in kernel mode - * @cookie: cookie used internally by SCIF - * @nr_pages: number of pages of PAGE_SIZE - * @prot_flags: R/W protection - * @phys_addr: Array of bus addresses - * @va: Array of kernel virtual addresses backed by the pages in the phys_addr - * array. The va is populated only when called on the host for a remote - * SCIF connection on MIC. This is required to support the use case of DMA - * between MIC and another device which is not a SCIF node e.g., an IB or - * ethernet NIC. - */ -struct scif_range { - void *cookie; - int nr_pages; - int prot_flags; - dma_addr_t *phys_addr; - void __iomem **va; -}; - -/** - * struct scif_pollepd - SCIF endpoint to be monitored via scif_poll - * @epd: SCIF endpoint - * @events: requested events - * @revents: returned events - */ -struct scif_pollepd { - scif_epd_t epd; - __poll_t events; - __poll_t revents; -}; - -/** - * scif_peer_dev - representation of a peer SCIF device - * - * Peer devices show up as PCIe devices for the mgmt node but not the cards. - * The mgmt node discovers all the cards on the PCIe bus and informs the other - * cards about their peers. Upon notification of a peer a node adds a peer - * device to the peer bus to maintain symmetry in the way devices are - * discovered across all nodes in the SCIF network. - * - * @dev: underlying device - * @dnode - The destination node which this device will communicate with. - */ -struct scif_peer_dev { - struct device dev; - u8 dnode; -}; - -/** - * scif_client - representation of a SCIF client - * @name: client name - * @probe - client method called when a peer device is registered - * @remove - client method called when a peer device is unregistered - * @si - subsys_interface used internally for implementing SCIF clients - */ -struct scif_client { - const char *name; - void (*probe)(struct scif_peer_dev *spdev); - void (*remove)(struct scif_peer_dev *spdev); - struct subsys_interface si; -}; - -#define SCIF_OPEN_FAILED ((scif_epd_t)-1) -#define SCIF_REGISTER_FAILED ((off_t)-1) -#define SCIF_MMAP_FAILED ((void *)-1) - -/** - * scif_open() - Create an endpoint - * - * Return: - * Upon successful completion, scif_open() returns an endpoint descriptor to - * be used in subsequent SCIF functions calls to refer to that endpoint; - * otherwise in user mode SCIF_OPEN_FAILED (that is ((scif_epd_t)-1)) is - * returned and errno is set to indicate the error; in kernel mode a NULL - * scif_epd_t is returned. - * - * Errors: - * ENOMEM - Insufficient kernel memory was available - */ -scif_epd_t scif_open(void); - -/** - * scif_bind() - Bind an endpoint to a port - * @epd: endpoint descriptor - * @pn: port number - * - * scif_bind() binds endpoint epd to port pn, where pn is a port number on the - * local node. If pn is zero, a port number greater than or equal to - * SCIF_PORT_RSVD is assigned and returned. Each endpoint may be bound to - * exactly one local port. Ports less than 1024 when requested can only be bound - * by system (or root) processes or by processes executed by privileged users. - * - * Return: - * Upon successful completion, scif_bind() returns the port number to which epd - * is bound; otherwise in user mode -1 is returned and errno is set to - * indicate the error; in kernel mode the negative of one of the following - * errors is returned. - * - * Errors: - * EBADF, ENOTTY - epd is not a valid endpoint descriptor - * EINVAL - the endpoint or the port is already bound - * EISCONN - The endpoint is already connected - * ENOSPC - No port number available for assignment - * EACCES - The port requested is protected and the user is not the superuser - */ -int scif_bind(scif_epd_t epd, u16 pn); - -/** - * scif_listen() - Listen for connections on an endpoint - * @epd: endpoint descriptor - * @backlog: maximum pending connection requests - * - * scif_listen() marks the endpoint epd as a listening endpoint - that is, as - * an endpoint that will be used to accept incoming connection requests. Once - * so marked, the endpoint is said to be in the listening state and may not be - * used as the endpoint of a connection. - * - * The endpoint, epd, must have been bound to a port. - * - * The backlog argument defines the maximum length to which the queue of - * pending connections for epd may grow. If a connection request arrives when - * the queue is full, the client may receive an error with an indication that - * the connection was refused. - * - * Return: - * Upon successful completion, scif_listen() returns 0; otherwise in user mode - * -1 is returned and errno is set to indicate the error; in kernel mode the - * negative of one of the following errors is returned. - * - * Errors: - * EBADF, ENOTTY - epd is not a valid endpoint descriptor - * EINVAL - the endpoint is not bound to a port - * EISCONN - The endpoint is already connected or listening - */ -int scif_listen(scif_epd_t epd, int backlog); - -/** - * scif_connect() - Initiate a connection on a port - * @epd: endpoint descriptor - * @dst: global id of port to which to connect - * - * The scif_connect() function requests the connection of endpoint epd to remote - * port dst. If the connection is successful, a peer endpoint, bound to dst, is - * created on node dst.node. On successful return, the connection is complete. - * - * If the endpoint epd has not already been bound to a port, scif_connect() - * will bind it to an unused local port. - * - * A connection is terminated when an endpoint of the connection is closed, - * either explicitly by scif_close(), or when a process that owns one of the - * endpoints of the connection is terminated. - * - * In user space, scif_connect() supports an asynchronous connection mode - * if the application has set the O_NONBLOCK flag on the endpoint via the - * fcntl() system call. Setting this flag will result in the calling process - * not to wait during scif_connect(). - * - * Return: - * Upon successful completion, scif_connect() returns the port ID to which the - * endpoint, epd, is bound; otherwise in user mode -1 is returned and errno is - * set to indicate the error; in kernel mode the negative of one of the - * following errors is returned. - * - * Errors: - * EBADF, ENOTTY - epd is not a valid endpoint descriptor - * ECONNREFUSED - The destination was not listening for connections or refused - * the connection request - * EINVAL - dst.port is not a valid port ID - * EISCONN - The endpoint is already connected - * ENOMEM - No buffer space is available - * ENODEV - The destination node does not exist, or the node is lost or existed, - * but is not currently in the network since it may have crashed - * ENOSPC - No port number available for assignment - * EOPNOTSUPP - The endpoint is listening and cannot be connected - */ -int scif_connect(scif_epd_t epd, struct scif_port_id *dst); - -/** - * scif_accept() - Accept a connection on an endpoint - * @epd: endpoint descriptor - * @peer: global id of port to which connected - * @newepd: new connected endpoint descriptor - * @flags: flags - * - * The scif_accept() call extracts the first connection request from the queue - * of pending connections for the port on which epd is listening. scif_accept() - * creates a new endpoint, bound to the same port as epd, and allocates a new - * SCIF endpoint descriptor, returned in newepd, for the endpoint. The new - * endpoint is connected to the endpoint through which the connection was - * requested. epd is unaffected by this call, and remains in the listening - * state. - * - * On successful return, peer holds the global port identifier (node id and - * local port number) of the port which requested the connection. - * - * A connection is terminated when an endpoint of the connection is closed, - * either explicitly by scif_close(), or when a process that owns one of the - * endpoints of the connection is terminated. - * - * The number of connections that can (subsequently) be accepted on epd is only - * limited by system resources (memory). - * - * The flags argument is formed by OR'ing together zero or more of the - * following values. - * SCIF_ACCEPT_SYNC - block until a connection request is presented. If - * SCIF_ACCEPT_SYNC is not in flags, and no pending - * connections are present on the queue, scif_accept() - * fails with an EAGAIN error - * - * In user mode, the select() and poll() functions can be used to determine - * when there is a connection request. In kernel mode, the scif_poll() - * function may be used for this purpose. A readable event will be delivered - * when a connection is requested. - * - * Return: - * Upon successful completion, scif_accept() returns 0; otherwise in user mode - * -1 is returned and errno is set to indicate the error; in kernel mode the - * negative of one of the following errors is returned. - * - * Errors: - * EAGAIN - SCIF_ACCEPT_SYNC is not set and no connections are present to be - * accepted or SCIF_ACCEPT_SYNC is not set and remote node failed to complete - * its connection request - * EBADF, ENOTTY - epd is not a valid endpoint descriptor - * EINTR - Interrupted function - * EINVAL - epd is not a listening endpoint, or flags is invalid, or peer is - * NULL, or newepd is NULL - * ENODEV - The requesting node is lost or existed, but is not currently in the - * network since it may have crashed - * ENOMEM - Not enough space - * ENOENT - Secondary part of epd registration failed - */ -int scif_accept(scif_epd_t epd, struct scif_port_id *peer, scif_epd_t - *newepd, int flags); - -/** - * scif_close() - Close an endpoint - * @epd: endpoint descriptor - * - * scif_close() closes an endpoint and performs necessary teardown of - * facilities associated with that endpoint. - * - * If epd is a listening endpoint then it will no longer accept connection - * requests on the port to which it is bound. Any pending connection requests - * are rejected. - * - * If epd is a connected endpoint, then its peer endpoint is also closed. RMAs - * which are in-process through epd or its peer endpoint will complete before - * scif_close() returns. Registered windows of the local and peer endpoints are - * released as if scif_unregister() was called against each window. - * - * Closing a SCIF endpoint does not affect local registered memory mapped by - * a SCIF endpoint on a remote node. The local memory remains mapped by the peer - * SCIF endpoint explicitly removed by calling munmap(..) by the peer. - * - * If the peer endpoint's receive queue is not empty at the time that epd is - * closed, then the peer endpoint can be passed as the endpoint parameter to - * scif_recv() until the receive queue is empty. - * - * epd is freed and may no longer be accessed. - * - * Return: - * Upon successful completion, scif_close() returns 0; otherwise in user mode - * -1 is returned and errno is set to indicate the error; in kernel mode the - * negative of one of the following errors is returned. - * - * Errors: - * EBADF, ENOTTY - epd is not a valid endpoint descriptor - */ -int scif_close(scif_epd_t epd); - -/** - * scif_send() - Send a message - * @epd: endpoint descriptor - * @msg: message buffer address - * @len: message length - * @flags: blocking mode flags - * - * scif_send() sends data to the peer of endpoint epd. Up to len bytes of data - * are copied from memory starting at address msg. On successful execution the - * return value of scif_send() is the number of bytes that were sent, and is - * zero if no bytes were sent because len was zero. scif_send() may be called - * only when the endpoint is in a connected state. - * - * If a scif_send() call is non-blocking, then it sends only those bytes which - * can be sent without waiting, up to a maximum of len bytes. - * - * If a scif_send() call is blocking, then it normally returns after sending - * all len bytes. If a blocking call is interrupted or the connection is - * reset, the call is considered successful if some bytes were sent or len is - * zero, otherwise the call is considered unsuccessful. - * - * In user mode, the select() and poll() functions can be used to determine - * when the send queue is not full. In kernel mode, the scif_poll() function - * may be used for this purpose. - * - * It is recommended that scif_send()/scif_recv() only be used for short - * control-type message communication between SCIF endpoints. The SCIF RMA - * APIs are expected to provide better performance for transfer sizes of - * 1024 bytes or longer for the current MIC hardware and software - * implementation. - * - * scif_send() will block until the entire message is sent if SCIF_SEND_BLOCK - * is passed as the flags argument. - * - * Return: - * Upon successful completion, scif_send() returns the number of bytes sent; - * otherwise in user mode -1 is returned and errno is set to indicate the - * error; in kernel mode the negative of one of the following errors is - * returned. - * - * Errors: - * EBADF, ENOTTY - epd is not a valid endpoint descriptor - * ECONNRESET - Connection reset by peer - * EINVAL - flags is invalid, or len is negative - * ENODEV - The remote node is lost or existed, but is not currently in the - * network since it may have crashed - * ENOMEM - Not enough space - * ENOTCONN - The endpoint is not connected - */ -int scif_send(scif_epd_t epd, void *msg, int len, int flags); - -/** - * scif_recv() - Receive a message - * @epd: endpoint descriptor - * @msg: message buffer address - * @len: message buffer length - * @flags: blocking mode flags - * - * scif_recv() receives data from the peer of endpoint epd. Up to len bytes of - * data are copied to memory starting at address msg. On successful execution - * the return value of scif_recv() is the number of bytes that were received, - * and is zero if no bytes were received because len was zero. scif_recv() may - * be called only when the endpoint is in a connected state. - * - * If a scif_recv() call is non-blocking, then it receives only those bytes - * which can be received without waiting, up to a maximum of len bytes. - * - * If a scif_recv() call is blocking, then it normally returns after receiving - * all len bytes. If the blocking call was interrupted due to a disconnection, - * subsequent calls to scif_recv() will copy all bytes received upto the point - * of disconnection. - * - * In user mode, the select() and poll() functions can be used to determine - * when data is available to be received. In kernel mode, the scif_poll() - * function may be used for this purpose. - * - * It is recommended that scif_send()/scif_recv() only be used for short - * control-type message communication between SCIF endpoints. The SCIF RMA - * APIs are expected to provide better performance for transfer sizes of - * 1024 bytes or longer for the current MIC hardware and software - * implementation. - * - * scif_recv() will block until the entire message is received if - * SCIF_RECV_BLOCK is passed as the flags argument. - * - * Return: - * Upon successful completion, scif_recv() returns the number of bytes - * received; otherwise in user mode -1 is returned and errno is set to - * indicate the error; in kernel mode the negative of one of the following - * errors is returned. - * - * Errors: - * EAGAIN - The destination node is returning from a low power state - * EBADF, ENOTTY - epd is not a valid endpoint descriptor - * ECONNRESET - Connection reset by peer - * EINVAL - flags is invalid, or len is negative - * ENODEV - The remote node is lost or existed, but is not currently in the - * network since it may have crashed - * ENOMEM - Not enough space - * ENOTCONN - The endpoint is not connected - */ -int scif_recv(scif_epd_t epd, void *msg, int len, int flags); - -/** - * scif_register() - Mark a memory region for remote access. - * @epd: endpoint descriptor - * @addr: starting virtual address - * @len: length of range - * @offset: offset of window - * @prot_flags: read/write protection flags - * @map_flags: mapping flags - * - * The scif_register() function opens a window, a range of whole pages of the - * registered address space of the endpoint epd, starting at offset po and - * continuing for len bytes. The value of po, further described below, is a - * function of the parameters offset and len, and the value of map_flags. Each - * page of the window represents the physical memory page which backs the - * corresponding page of the range of virtual address pages starting at addr - * and continuing for len bytes. addr and len are constrained to be multiples - * of the page size. A successful scif_register() call returns po. - * - * When SCIF_MAP_FIXED is set in the map_flags argument, po will be offset - * exactly, and offset is constrained to be a multiple of the page size. The - * mapping established by scif_register() will not replace any existing - * registration; an error is returned if any page within the range [offset, - * offset + len - 1] intersects an existing window. - * - * When SCIF_MAP_FIXED is not set, the implementation uses offset in an - * implementation-defined manner to arrive at po. The po value so chosen will - * be an area of the registered address space that the implementation deems - * suitable for a mapping of len bytes. An offset value of 0 is interpreted as - * granting the implementation complete freedom in selecting po, subject to - * constraints described below. A non-zero value of offset is taken to be a - * suggestion of an offset near which the mapping should be placed. When the - * implementation selects a value for po, it does not replace any extant - * window. In all cases, po will be a multiple of the page size. - * - * The physical pages which are so represented by a window are available for - * access in calls to mmap(), scif_readfrom(), scif_writeto(), - * scif_vreadfrom(), and scif_vwriteto(). While a window is registered, the - * physical pages represented by the window will not be reused by the memory - * subsystem for any other purpose. Note that the same physical page may be - * represented by multiple windows. - * - * Subsequent operations which change the memory pages to which virtual - * addresses are mapped (such as mmap(), munmap()) have no effect on - * existing window. - * - * If the process will fork(), it is recommended that the registered - * virtual address range be marked with MADV_DONTFORK. Doing so will prevent - * problems due to copy-on-write semantics. - * - * The prot_flags argument is formed by OR'ing together one or more of the - * following values. - * SCIF_PROT_READ - allow read operations from the window - * SCIF_PROT_WRITE - allow write operations to the window - * - * Return: - * Upon successful completion, scif_register() returns the offset at which the - * mapping was placed (po); otherwise in user mode SCIF_REGISTER_FAILED (that - * is (off_t *)-1) is returned and errno is set to indicate the error; in - * kernel mode the negative of one of the following errors is returned. - * - * Errors: - * EADDRINUSE - SCIF_MAP_FIXED is set in map_flags, and pages in the range - * [offset, offset + len -1] are already registered - * EAGAIN - The mapping could not be performed due to lack of resources - * EBADF, ENOTTY - epd is not a valid endpoint descriptor - * ECONNRESET - Connection reset by peer - * EINVAL - map_flags is invalid, or prot_flags is invalid, or SCIF_MAP_FIXED is - * set in flags, and offset is not a multiple of the page size, or addr is not a - * multiple of the page size, or len is not a multiple of the page size, or is - * 0, or offset is negative - * ENODEV - The remote node is lost or existed, but is not currently in the - * network since it may have crashed - * ENOMEM - Not enough space - * ENOTCONN -The endpoint is not connected - */ -off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset, - int prot_flags, int map_flags); - -/** - * scif_unregister() - Mark a memory region for remote access. - * @epd: endpoint descriptor - * @offset: start of range to unregister - * @len: length of range to unregister - * - * The scif_unregister() function closes those previously registered windows - * which are entirely within the range [offset, offset + len - 1]. It is an - * error to specify a range which intersects only a subrange of a window. - * - * On a successful return, pages within the window may no longer be specified - * in calls to mmap(), scif_readfrom(), scif_writeto(), scif_vreadfrom(), - * scif_vwriteto(), scif_get_pages, and scif_fence_signal(). The window, - * however, continues to exist until all previous references against it are - * removed. A window is referenced if there is a mapping to it created by - * mmap(), or if scif_get_pages() was called against the window - * (and the pages have not been returned via scif_put_pages()). A window is - * also referenced while an RMA, in which some range of the window is a source - * or destination, is in progress. Finally a window is referenced while some - * offset in that window was specified to scif_fence_signal(), and the RMAs - * marked by that call to scif_fence_signal() have not completed. While a - * window is in this state, its registered address space pages are not - * available for use in a new registered window. - * - * When all such references to the window have been removed, its references to - * all the physical pages which it represents are removed. Similarly, the - * registered address space pages of the window become available for - * registration in a new window. - * - * Return: - * Upon successful completion, scif_unregister() returns 0; otherwise in user - * mode -1 is returned and errno is set to indicate the error; in kernel mode - * the negative of one of the following errors is returned. In the event of an - * error, no windows are unregistered. - * - * Errors: - * EBADF, ENOTTY - epd is not a valid endpoint descriptor - * ECONNRESET - Connection reset by peer - * EINVAL - the range [offset, offset + len - 1] intersects a subrange of a - * window, or offset is negative - * ENODEV - The remote node is lost or existed, but is not currently in the - * network since it may have crashed - * ENOTCONN - The endpoint is not connected - * ENXIO - Offsets in the range [offset, offset + len - 1] are invalid for the - * registered address space of epd - */ -int scif_unregister(scif_epd_t epd, off_t offset, size_t len); - -/** - * scif_readfrom() - Copy from a remote address space - * @epd: endpoint descriptor - * @loffset: offset in local registered address space to - * which to copy - * @len: length of range to copy - * @roffset: offset in remote registered address space - * from which to copy - * @rma_flags: transfer mode flags - * - * scif_readfrom() copies len bytes from the remote registered address space of - * the peer of endpoint epd, starting at the offset roffset to the local - * registered address space of epd, starting at the offset loffset. - * - * Each of the specified ranges [loffset, loffset + len - 1] and [roffset, - * roffset + len - 1] must be within some registered window or windows of the - * local and remote nodes. A range may intersect multiple registered windows, - * but only if those windows are contiguous in the registered address space. - * - * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using - * programmed read/writes. Otherwise the data is copied using DMA. If rma_- - * flags includes SCIF_RMA_SYNC, then scif_readfrom() will return after the - * transfer is complete. Otherwise, the transfer may be performed asynchron- - * ously. The order in which any two asynchronous RMA operations complete - * is non-deterministic. The synchronization functions, scif_fence_mark()/ - * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to - * the completion of asynchronous RMA operations on the same endpoint. - * - * The DMA transfer of individual bytes is not guaranteed to complete in - * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last - * cacheline or partial cacheline of the source range will become visible on - * the destination node after all other transferred data in the source - * range has become visible on the destination node. - * - * The optimal DMA performance will likely be realized if both - * loffset and roffset are cacheline aligned (are a multiple of 64). Lower - * performance will likely be realized if loffset and roffset are not - * cacheline aligned but are separated by some multiple of 64. The lowest level - * of performance is likely if loffset and roffset are not separated by a - * multiple of 64. - * - * The rma_flags argument is formed by ORing together zero or more of the - * following values. - * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA - * engine. - * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the - * transfer has completed. Passing this flag results in the - * current implementation busy waiting and consuming CPU cycles - * while the DMA transfer is in progress for best performance by - * avoiding the interrupt latency. - * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of - * the source range becomes visible on the destination node - * after all other transferred data in the source range has - * become visible on the destination - * - * Return: - * Upon successful completion, scif_readfrom() returns 0; otherwise in user - * mode -1 is returned and errno is set to indicate the error; in kernel mode - * the negative of one of the following errors is returned. - * - * Errors: - * EACCES - Attempt to write to a read-only range - * EBADF, ENOTTY - epd is not a valid endpoint descriptor - * ECONNRESET - Connection reset by peer - * EINVAL - rma_flags is invalid - * ENODEV - The remote node is lost or existed, but is not currently in the - * network since it may have crashed - * ENOTCONN - The endpoint is not connected - * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered - * address space of epd, or, The range [roffset, roffset + len - 1] is invalid - * for the registered address space of the peer of epd, or loffset or roffset - * is negative - */ -int scif_readfrom(scif_epd_t epd, off_t loffset, size_t len, off_t - roffset, int rma_flags); - -/** - * scif_writeto() - Copy to a remote address space - * @epd: endpoint descriptor - * @loffset: offset in local registered address space - * from which to copy - * @len: length of range to copy - * @roffset: offset in remote registered address space to - * which to copy - * @rma_flags: transfer mode flags - * - * scif_writeto() copies len bytes from the local registered address space of - * epd, starting at the offset loffset to the remote registered address space - * of the peer of endpoint epd, starting at the offset roffset. - * - * Each of the specified ranges [loffset, loffset + len - 1] and [roffset, - * roffset + len - 1] must be within some registered window or windows of the - * local and remote nodes. A range may intersect multiple registered windows, - * but only if those windows are contiguous in the registered address space. - * - * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using - * programmed read/writes. Otherwise the data is copied using DMA. If rma_- - * flags includes SCIF_RMA_SYNC, then scif_writeto() will return after the - * transfer is complete. Otherwise, the transfer may be performed asynchron- - * ously. The order in which any two asynchronous RMA operations complete - * is non-deterministic. The synchronization functions, scif_fence_mark()/ - * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to - * the completion of asynchronous RMA operations on the same endpoint. - * - * The DMA transfer of individual bytes is not guaranteed to complete in - * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last - * cacheline or partial cacheline of the source range will become visible on - * the destination node after all other transferred data in the source - * range has become visible on the destination node. - * - * The optimal DMA performance will likely be realized if both - * loffset and roffset are cacheline aligned (are a multiple of 64). Lower - * performance will likely be realized if loffset and roffset are not cacheline - * aligned but are separated by some multiple of 64. The lowest level of - * performance is likely if loffset and roffset are not separated by a multiple - * of 64. - * - * The rma_flags argument is formed by ORing together zero or more of the - * following values. - * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA - * engine. - * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the - * transfer has completed. Passing this flag results in the - * current implementation busy waiting and consuming CPU cycles - * while the DMA transfer is in progress for best performance by - * avoiding the interrupt latency. - * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of - * the source range becomes visible on the destination node - * after all other transferred data in the source range has - * become visible on the destination - * - * Return: - * Upon successful completion, scif_readfrom() returns 0; otherwise in user - * mode -1 is returned and errno is set to indicate the error; in kernel mode - * the negative of one of the following errors is returned. - * - * Errors: - * EACCES - Attempt to write to a read-only range - * EBADF, ENOTTY - epd is not a valid endpoint descriptor - * ECONNRESET - Connection reset by peer - * EINVAL - rma_flags is invalid - * ENODEV - The remote node is lost or existed, but is not currently in the - * network since it may have crashed - * ENOTCONN - The endpoint is not connected - * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered - * address space of epd, or, The range [roffset , roffset + len -1] is invalid - * for the registered address space of the peer of epd, or loffset or roffset - * is negative - */ -int scif_writeto(scif_epd_t epd, off_t loffset, size_t len, off_t - roffset, int rma_flags); - -/** - * scif_vreadfrom() - Copy from a remote address space - * @epd: endpoint descriptor - * @addr: address to which to copy - * @len: length of range to copy - * @roffset: offset in remote registered address space - * from which to copy - * @rma_flags: transfer mode flags - * - * scif_vreadfrom() copies len bytes from the remote registered address - * space of the peer of endpoint epd, starting at the offset roffset, to local - * memory, starting at addr. - * - * The specified range [roffset, roffset + len - 1] must be within some - * registered window or windows of the remote nodes. The range may - * intersect multiple registered windows, but only if those windows are - * contiguous in the registered address space. - * - * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using - * programmed read/writes. Otherwise the data is copied using DMA. If rma_- - * flags includes SCIF_RMA_SYNC, then scif_vreadfrom() will return after the - * transfer is complete. Otherwise, the transfer may be performed asynchron- - * ously. The order in which any two asynchronous RMA operations complete - * is non-deterministic. The synchronization functions, scif_fence_mark()/ - * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to - * the completion of asynchronous RMA operations on the same endpoint. - * - * The DMA transfer of individual bytes is not guaranteed to complete in - * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last - * cacheline or partial cacheline of the source range will become visible on - * the destination node after all other transferred data in the source - * range has become visible on the destination node. - * - * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back - * the specified local memory range may be remain in a pinned state even after - * the specified transfer completes. This may reduce overhead if some or all of - * the same virtual address range is referenced in a subsequent call of - * scif_vreadfrom() or scif_vwriteto(). - * - * The optimal DMA performance will likely be realized if both - * addr and roffset are cacheline aligned (are a multiple of 64). Lower - * performance will likely be realized if addr and roffset are not - * cacheline aligned but are separated by some multiple of 64. The lowest level - * of performance is likely if addr and roffset are not separated by a - * multiple of 64. - * - * The rma_flags argument is formed by ORing together zero or more of the - * following values. - * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA - * engine. - * SCIF_RMA_USECACHE - enable registration caching - * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the - * transfer has completed. Passing this flag results in the - * current implementation busy waiting and consuming CPU cycles - * while the DMA transfer is in progress for best performance by - * avoiding the interrupt latency. - * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of - * the source range becomes visible on the destination node - * after all other transferred data in the source range has - * become visible on the destination - * - * Return: - * Upon successful completion, scif_vreadfrom() returns 0; otherwise in user - * mode -1 is returned and errno is set to indicate the error; in kernel mode - * the negative of one of the following errors is returned. - * - * Errors: - * EACCES - Attempt to write to a read-only range - * EBADF, ENOTTY - epd is not a valid endpoint descriptor - * ECONNRESET - Connection reset by peer - * EINVAL - rma_flags is invalid - * ENODEV - The remote node is lost or existed, but is not currently in the - * network since it may have crashed - * ENOTCONN - The endpoint is not connected - * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the - * registered address space of epd - */ -int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len, off_t roffset, - int rma_flags); - -/** - * scif_vwriteto() - Copy to a remote address space - * @epd: endpoint descriptor - * @addr: address from which to copy - * @len: length of range to copy - * @roffset: offset in remote registered address space to - * which to copy - * @rma_flags: transfer mode flags - * - * scif_vwriteto() copies len bytes from the local memory, starting at addr, to - * the remote registered address space of the peer of endpoint epd, starting at - * the offset roffset. - * - * The specified range [roffset, roffset + len - 1] must be within some - * registered window or windows of the remote nodes. The range may intersect - * multiple registered windows, but only if those windows are contiguous in the - * registered address space. - * - * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using - * programmed read/writes. Otherwise the data is copied using DMA. If rma_- - * flags includes SCIF_RMA_SYNC, then scif_vwriteto() will return after the - * transfer is complete. Otherwise, the transfer may be performed asynchron- - * ously. The order in which any two asynchronous RMA operations complete - * is non-deterministic. The synchronization functions, scif_fence_mark()/ - * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to - * the completion of asynchronous RMA operations on the same endpoint. - * - * The DMA transfer of individual bytes is not guaranteed to complete in - * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last - * cacheline or partial cacheline of the source range will become visible on - * the destination node after all other transferred data in the source - * range has become visible on the destination node. - * - * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back - * the specified local memory range may be remain in a pinned state even after - * the specified transfer completes. This may reduce overhead if some or all of - * the same virtual address range is referenced in a subsequent call of - * scif_vreadfrom() or scif_vwriteto(). - * - * The optimal DMA performance will likely be realized if both - * addr and offset are cacheline aligned (are a multiple of 64). Lower - * performance will likely be realized if addr and offset are not cacheline - * aligned but are separated by some multiple of 64. The lowest level of - * performance is likely if addr and offset are not separated by a multiple of - * 64. - * - * The rma_flags argument is formed by ORing together zero or more of the - * following values. - * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA - * engine. - * SCIF_RMA_USECACHE - allow registration caching - * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the - * transfer has completed. Passing this flag results in the - * current implementation busy waiting and consuming CPU cycles - * while the DMA transfer is in progress for best performance by - * avoiding the interrupt latency. - * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of - * the source range becomes visible on the destination node - * after all other transferred data in the source range has - * become visible on the destination - * - * Return: - * Upon successful completion, scif_vwriteto() returns 0; otherwise in user - * mode -1 is returned and errno is set to indicate the error; in kernel mode - * the negative of one of the following errors is returned. - * - * Errors: - * EACCES - Attempt to write to a read-only range - * EBADF, ENOTTY - epd is not a valid endpoint descriptor - * ECONNRESET - Connection reset by peer - * EINVAL - rma_flags is invalid - * ENODEV - The remote node is lost or existed, but is not currently in the - * network since it may have crashed - * ENOTCONN - The endpoint is not connected - * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the - * registered address space of epd - */ -int scif_vwriteto(scif_epd_t epd, void *addr, size_t len, off_t roffset, - int rma_flags); - -/** - * scif_fence_mark() - Mark previously issued RMAs - * @epd: endpoint descriptor - * @flags: control flags - * @mark: marked value returned as output. - * - * scif_fence_mark() returns after marking the current set of all uncompleted - * RMAs initiated through the endpoint epd or the current set of all - * uncompleted RMAs initiated through the peer of endpoint epd. The RMAs are - * marked with a value returned at mark. The application may subsequently call - * scif_fence_wait(), passing the value returned at mark, to await completion - * of all RMAs so marked. - * - * The flags argument has exactly one of the following values. - * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint - * epd are marked - * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer - * of endpoint epd are marked - * - * Return: - * Upon successful completion, scif_fence_mark() returns 0; otherwise in user - * mode -1 is returned and errno is set to indicate the error; in kernel mode - * the negative of one of the following errors is returned. - * - * Errors: - * EBADF, ENOTTY - epd is not a valid endpoint descriptor - * ECONNRESET - Connection reset by peer - * EINVAL - flags is invalid - * ENODEV - The remote node is lost or existed, but is not currently in the - * network since it may have crashed - * ENOTCONN - The endpoint is not connected - * ENOMEM - Insufficient kernel memory was available - */ -int scif_fence_mark(scif_epd_t epd, int flags, int *mark); - -/** - * scif_fence_wait() - Wait for completion of marked RMAs - * @epd: endpoint descriptor - * @mark: mark request - * - * scif_fence_wait() returns after all RMAs marked with mark have completed. - * The value passed in mark must have been obtained in a previous call to - * scif_fence_mark(). - * - * Return: - * Upon successful completion, scif_fence_wait() returns 0; otherwise in user - * mode -1 is returned and errno is set to indicate the error; in kernel mode - * the negative of one of the following errors is returned. - * - * Errors: - * EBADF, ENOTTY - epd is not a valid endpoint descriptor - * ECONNRESET - Connection reset by peer - * ENODEV - The remote node is lost or existed, but is not currently in the - * network since it may have crashed - * ENOTCONN - The endpoint is not connected - * ENOMEM - Insufficient kernel memory was available - */ -int scif_fence_wait(scif_epd_t epd, int mark); - -/** - * scif_fence_signal() - Request a memory update on completion of RMAs - * @epd: endpoint descriptor - * @loff: local offset - * @lval: local value to write to loffset - * @roff: remote offset - * @rval: remote value to write to roffset - * @flags: flags - * - * scif_fence_signal() returns after marking the current set of all uncompleted - * RMAs initiated through the endpoint epd or marking the current set of all - * uncompleted RMAs initiated through the peer of endpoint epd. - * - * If flags includes SCIF_SIGNAL_LOCAL, then on completion of the RMAs in the - * marked set, lval is written to memory at the address corresponding to offset - * loff in the local registered address space of epd. loff must be within a - * registered window. If flags includes SCIF_SIGNAL_REMOTE, then on completion - * of the RMAs in the marked set, rval is written to memory at the address - * corresponding to offset roff in the remote registered address space of epd. - * roff must be within a remote registered window of the peer of epd. Note - * that any specified offset must be DWORD (4 byte / 32 bit) aligned. - * - * The flags argument is formed by OR'ing together the following. - * Exactly one of the following values. - * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint - * epd are marked - * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer - * of endpoint epd are marked - * One or more of the following values. - * SCIF_SIGNAL_LOCAL - On completion of the marked set of RMAs, write lval to - * memory at the address corresponding to offset loff in the local - * registered address space of epd. - * SCIF_SIGNAL_REMOTE - On completion of the marked set of RMAs, write rval to - * memory at the address corresponding to offset roff in the remote - * registered address space of epd. - * - * Return: - * Upon successful completion, scif_fence_signal() returns 0; otherwise in - * user mode -1 is returned and errno is set to indicate the error; in kernel - * mode the negative of one of the following errors is returned. - * - * Errors: - * EBADF, ENOTTY - epd is not a valid endpoint descriptor - * ECONNRESET - Connection reset by peer - * EINVAL - flags is invalid, or loff or roff are not DWORD aligned - * ENODEV - The remote node is lost or existed, but is not currently in the - * network since it may have crashed - * ENOTCONN - The endpoint is not connected - * ENXIO - loff is invalid for the registered address of epd, or roff is invalid - * for the registered address space, of the peer of epd - */ -int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval, off_t roff, - u64 rval, int flags); - -/** - * scif_get_node_ids() - Return information about online nodes - * @nodes: array in which to return online node IDs - * @len: number of entries in the nodes array - * @self: address to place the node ID of the local node - * - * scif_get_node_ids() fills in the nodes array with up to len node IDs of the - * nodes in the SCIF network. If there is not enough space in nodes, as - * indicated by the len parameter, only len node IDs are returned in nodes. The - * return value of scif_get_node_ids() is the total number of nodes currently in - * the SCIF network. By checking the return value against the len parameter, - * the user may determine if enough space for nodes was allocated. - * - * The node ID of the local node is returned at self. - * - * Return: - * Upon successful completion, scif_get_node_ids() returns the actual number of - * online nodes in the SCIF network including 'self'; otherwise in user mode - * -1 is returned and errno is set to indicate the error; in kernel mode no - * errors are returned. - */ -int scif_get_node_ids(u16 *nodes, int len, u16 *self); - -/** - * scif_pin_pages() - Pin a set of pages - * @addr: Virtual address of range to pin - * @len: Length of range to pin - * @prot_flags: Page protection flags - * @map_flags: Page classification flags - * @pinned_pages: Handle to pinned pages - * - * scif_pin_pages() pins (locks in physical memory) the physical pages which - * back the range of virtual address pages starting at addr and continuing for - * len bytes. addr and len are constrained to be multiples of the page size. A - * successful scif_pin_pages() call returns a handle to pinned_pages which may - * be used in subsequent calls to scif_register_pinned_pages(). - * - * The pages will remain pinned as long as there is a reference against the - * scif_pinned_pages_t value returned by scif_pin_pages() and until - * scif_unpin_pages() is called, passing the scif_pinned_pages_t value. A - * reference is added to a scif_pinned_pages_t value each time a window is - * created by calling scif_register_pinned_pages() and passing the - * scif_pinned_pages_t value. A reference is removed from a - * scif_pinned_pages_t value each time such a window is deleted. - * - * Subsequent operations which change the memory pages to which virtual - * addresses are mapped (such as mmap(), munmap()) have no effect on the - * scif_pinned_pages_t value or windows created against it. - * - * If the process will fork(), it is recommended that the registered - * virtual address range be marked with MADV_DONTFORK. Doing so will prevent - * problems due to copy-on-write semantics. - * - * The prot_flags argument is formed by OR'ing together one or more of the - * following values. - * SCIF_PROT_READ - allow read operations against the pages - * SCIF_PROT_WRITE - allow write operations against the pages - * The map_flags argument can be set as SCIF_MAP_KERNEL to interpret addr as a - * kernel space address. By default, addr is interpreted as a user space - * address. - * - * Return: - * Upon successful completion, scif_pin_pages() returns 0; otherwise the - * negative of one of the following errors is returned. - * - * Errors: - * EINVAL - prot_flags is invalid, map_flags is invalid, or offset is negative - * ENOMEM - Not enough space - */ -int scif_pin_pages(void *addr, size_t len, int prot_flags, int map_flags, - scif_pinned_pages_t *pinned_pages); - -/** - * scif_unpin_pages() - Unpin a set of pages - * @pinned_pages: Handle to pinned pages to be unpinned - * - * scif_unpin_pages() prevents scif_register_pinned_pages() from registering new - * windows against pinned_pages. The physical pages represented by pinned_pages - * will remain pinned until all windows previously registered against - * pinned_pages are deleted (the window is scif_unregister()'d and all - * references to the window are removed (see scif_unregister()). - * - * pinned_pages must have been obtain from a previous call to scif_pin_pages(). - * After calling scif_unpin_pages(), it is an error to pass pinned_pages to - * scif_register_pinned_pages(). - * - * Return: - * Upon successful completion, scif_unpin_pages() returns 0; otherwise the - * negative of one of the following errors is returned. - * - * Errors: - * EINVAL - pinned_pages is not valid - */ -int scif_unpin_pages(scif_pinned_pages_t pinned_pages); - -/** - * scif_register_pinned_pages() - Mark a memory region for remote access. - * @epd: endpoint descriptor - * @pinned_pages: Handle to pinned pages - * @offset: Registered address space offset - * @map_flags: Flags which control where pages are mapped - * - * The scif_register_pinned_pages() function opens a window, a range of whole - * pages of the registered address space of the endpoint epd, starting at - * offset po. The value of po, further described below, is a function of the - * parameters offset and pinned_pages, and the value of map_flags. Each page of - * the window represents a corresponding physical memory page of the range - * represented by pinned_pages; the length of the window is the same as the - * length of range represented by pinned_pages. A successful - * scif_register_pinned_pages() call returns po as the return value. - * - * When SCIF_MAP_FIXED is set in the map_flags argument, po will be offset - * exactly, and offset is constrained to be a multiple of the page size. The - * mapping established by scif_register_pinned_pages() will not replace any - * existing registration; an error is returned if any page of the new window - * would intersect an existing window. - * - * When SCIF_MAP_FIXED is not set, the implementation uses offset in an - * implementation-defined manner to arrive at po. The po so chosen will be an - * area of the registered address space that the implementation deems suitable - * for a mapping of the required size. An offset value of 0 is interpreted as - * granting the implementation complete freedom in selecting po, subject to - * constraints described below. A non-zero value of offset is taken to be a - * suggestion of an offset near which the mapping should be placed. When the - * implementation selects a value for po, it does not replace any extant - * window. In all cases, po will be a multiple of the page size. - * - * The physical pages which are so represented by a window are available for - * access in calls to scif_get_pages(), scif_readfrom(), scif_writeto(), - * scif_vreadfrom(), and scif_vwriteto(). While a window is registered, the - * physical pages represented by the window will not be reused by the memory - * subsystem for any other purpose. Note that the same physical page may be - * represented by multiple windows. - * - * Windows created by scif_register_pinned_pages() are unregistered by - * scif_unregister(). - * - * The map_flags argument can be set to SCIF_MAP_FIXED which interprets a - * fixed offset. - * - * Return: - * Upon successful completion, scif_register_pinned_pages() returns the offset - * at which the mapping was placed (po); otherwise the negative of one of the - * following errors is returned. - * - * Errors: - * EADDRINUSE - SCIF_MAP_FIXED is set in map_flags and pages in the new window - * would intersect an existing window - * EAGAIN - The mapping could not be performed due to lack of resources - * ECONNRESET - Connection reset by peer - * EINVAL - map_flags is invalid, or SCIF_MAP_FIXED is set in map_flags, and - * offset is not a multiple of the page size, or offset is negative - * ENODEV - The remote node is lost or existed, but is not currently in the - * network since it may have crashed - * ENOMEM - Not enough space - * ENOTCONN - The endpoint is not connected - */ -off_t scif_register_pinned_pages(scif_epd_t epd, - scif_pinned_pages_t pinned_pages, - off_t offset, int map_flags); - -/** - * scif_get_pages() - Add references to remote registered pages - * @epd: endpoint descriptor - * @offset: remote registered offset - * @len: length of range of pages - * @pages: returned scif_range structure - * - * scif_get_pages() returns the addresses of the physical pages represented by - * those pages of the registered address space of the peer of epd, starting at - * offset and continuing for len bytes. offset and len are constrained to be - * multiples of the page size. - * - * All of the pages in the specified range [offset, offset + len - 1] must be - * within a single window of the registered address space of the peer of epd. - * - * The addresses are returned as a virtually contiguous array pointed to by the - * phys_addr component of the scif_range structure whose address is returned in - * pages. The nr_pages component of scif_range is the length of the array. The - * prot_flags component of scif_range holds the protection flag value passed - * when the pages were registered. - * - * Each physical page whose address is returned by scif_get_pages() remains - * available and will not be released for reuse until the scif_range structure - * is returned in a call to scif_put_pages(). The scif_range structure returned - * by scif_get_pages() must be unmodified. - * - * It is an error to call scif_close() on an endpoint on which a scif_range - * structure of that endpoint has not been returned to scif_put_pages(). - * - * Return: - * Upon successful completion, scif_get_pages() returns 0; otherwise the - * negative of one of the following errors is returned. - * Errors: - * ECONNRESET - Connection reset by peer. - * EINVAL - offset is not a multiple of the page size, or offset is negative, or - * len is not a multiple of the page size - * ENODEV - The remote node is lost or existed, but is not currently in the - * network since it may have crashed - * ENOTCONN - The endpoint is not connected - * ENXIO - Offsets in the range [offset, offset + len - 1] are invalid - * for the registered address space of the peer epd - */ -int scif_get_pages(scif_epd_t epd, off_t offset, size_t len, - struct scif_range **pages); - -/** - * scif_put_pages() - Remove references from remote registered pages - * @pages: pages to be returned - * - * scif_put_pages() releases a scif_range structure previously obtained by - * calling scif_get_pages(). The physical pages represented by pages may - * be reused when the window which represented those pages is unregistered. - * Therefore, those pages must not be accessed after calling scif_put_pages(). - * - * Return: - * Upon successful completion, scif_put_pages() returns 0; otherwise the - * negative of one of the following errors is returned. - * Errors: - * EINVAL - pages does not point to a valid scif_range structure, or - * the scif_range structure pointed to by pages was already returned - * ENODEV - The remote node is lost or existed, but is not currently in the - * network since it may have crashed - * ENOTCONN - The endpoint is not connected - */ -int scif_put_pages(struct scif_range *pages); - -/** - * scif_poll() - Wait for some event on an endpoint - * @epds: Array of endpoint descriptors - * @nepds: Length of epds - * @timeout: Upper limit on time for which scif_poll() will block - * - * scif_poll() waits for one of a set of endpoints to become ready to perform - * an I/O operation. - * - * The epds argument specifies the endpoint descriptors to be examined and the - * events of interest for each endpoint descriptor. epds is a pointer to an - * array with one member for each open endpoint descriptor of interest. - * - * The number of items in the epds array is specified in nepds. The epd field - * of scif_pollepd is an endpoint descriptor of an open endpoint. The field - * events is a bitmask specifying the events which the application is - * interested in. The field revents is an output parameter, filled by the - * kernel with the events that actually occurred. The bits returned in revents - * can include any of those specified in events, or one of the values EPOLLERR, - * EPOLLHUP, or EPOLLNVAL. (These three bits are meaningless in the events - * field, and will be set in the revents field whenever the corresponding - * condition is true.) - * - * If none of the events requested (and no error) has occurred for any of the - * endpoint descriptors, then scif_poll() blocks until one of the events occurs. - * - * The timeout argument specifies an upper limit on the time for which - * scif_poll() will block, in milliseconds. Specifying a negative value in - * timeout means an infinite timeout. - * - * The following bits may be set in events and returned in revents. - * EPOLLIN - Data may be received without blocking. For a connected - * endpoint, this means that scif_recv() may be called without blocking. For a - * listening endpoint, this means that scif_accept() may be called without - * blocking. - * EPOLLOUT - Data may be sent without blocking. For a connected endpoint, this - * means that scif_send() may be called without blocking. EPOLLOUT may also be - * used to block waiting for a non-blocking connect to complete. This bit value - * has no meaning for a listening endpoint and is ignored if specified. - * - * The following bits are only returned in revents, and are ignored if set in - * events. - * EPOLLERR - An error occurred on the endpoint - * EPOLLHUP - The connection to the peer endpoint was disconnected - * EPOLLNVAL - The specified endpoint descriptor is invalid. - * - * Return: - * Upon successful completion, scif_poll() returns a non-negative value. A - * positive value indicates the total number of endpoint descriptors that have - * been selected (that is, endpoint descriptors for which the revents member is - * non-zero). A value of 0 indicates that the call timed out and no endpoint - * descriptors have been selected. Otherwise in user mode -1 is returned and - * errno is set to indicate the error; in kernel mode the negative of one of - * the following errors is returned. - * - * Errors: - * EINTR - A signal occurred before any requested event - * EINVAL - The nepds argument is greater than {OPEN_MAX} - * ENOMEM - There was no space to allocate file descriptor tables - */ -int scif_poll(struct scif_pollepd *epds, unsigned int nepds, long timeout); - -/** - * scif_client_register() - Register a SCIF client - * @client: client to be registered - * - * scif_client_register() registers a SCIF client. The probe() method - * of the client is called when SCIF peer devices come online and the - * remove() method is called when the peer devices disappear. - * - * Return: - * Upon successful completion, scif_client_register() returns a non-negative - * value. Otherwise the return value is the same as subsys_interface_register() - * in the kernel. - */ -int scif_client_register(struct scif_client *client); - -/** - * scif_client_unregister() - Unregister a SCIF client - * @client: client to be unregistered - * - * scif_client_unregister() unregisters a SCIF client. - * - * Return: - * None - */ -void scif_client_unregister(struct scif_client *client); - -#endif /* __SCIF_H__ */ diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 9cd312a1ff92..c49c5888e854 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -121,6 +121,7 @@ struct scmi_perf_ops { unsigned long *rate, unsigned long *power); bool (*fast_switch_possible)(const struct scmi_handle *handle, struct device *dev); + bool (*power_scale_mw_get)(const struct scmi_handle *handle); }; /** @@ -210,6 +211,64 @@ struct scmi_reset_ops { }; /** + * struct scmi_voltage_info - describe one available SCMI Voltage Domain + * + * @id: the domain ID as advertised by the platform + * @segmented: defines the layout of the entries of array @levels_uv. + * - when True the entries are to be interpreted as triplets, + * each defining a segment representing a range of equally + * space voltages: <lowest_volts>, <highest_volt>, <step_uV> + * - when False the entries simply represent a single discrete + * supported voltage level + * @negative_volts_allowed: True if any of the entries of @levels_uv represent + * a negative voltage. + * @attributes: represents Voltage Domain advertised attributes + * @name: name assigned to the Voltage Domain by platform + * @num_levels: number of total entries in @levels_uv. + * @levels_uv: array of entries describing the available voltage levels for + * this domain. + */ +struct scmi_voltage_info { + unsigned int id; + bool segmented; + bool negative_volts_allowed; + unsigned int attributes; + char name[SCMI_MAX_STR_SIZE]; + unsigned int num_levels; +#define SCMI_VOLTAGE_SEGMENT_LOW 0 +#define SCMI_VOLTAGE_SEGMENT_HIGH 1 +#define SCMI_VOLTAGE_SEGMENT_STEP 2 + int *levels_uv; +}; + +/** + * struct scmi_voltage_ops - represents the various operations provided + * by SCMI Voltage Protocol + * + * @num_domains_get: get the count of voltage domains provided by SCMI + * @info_get: get the information of the specified domain + * @config_set: set the config for the specified domain + * @config_get: get the config of the specified domain + * @level_set: set the voltage level for the specified domain + * @level_get: get the voltage level of the specified domain + */ +struct scmi_voltage_ops { + int (*num_domains_get)(const struct scmi_handle *handle); + const struct scmi_voltage_info __must_check *(*info_get) + (const struct scmi_handle *handle, u32 domain_id); + int (*config_set)(const struct scmi_handle *handle, u32 domain_id, + u32 config); +#define SCMI_VOLTAGE_ARCH_STATE_OFF 0x0 +#define SCMI_VOLTAGE_ARCH_STATE_ON 0x7 + int (*config_get)(const struct scmi_handle *handle, u32 domain_id, + u32 *config); + int (*level_set)(const struct scmi_handle *handle, u32 domain_id, + u32 flags, s32 volt_uV); + int (*level_get)(const struct scmi_handle *handle, u32 domain_id, + s32 *volt_uV); +}; + +/** * struct scmi_notify_ops - represents notifications' operations provided by * SCMI core * @register_event_notifier: Register a notifier_block for the requested event @@ -262,6 +321,7 @@ struct scmi_notify_ops { * @clk_ops: pointer to set of clock protocol operations * @sensor_ops: pointer to set of sensor protocol operations * @reset_ops: pointer to set of reset protocol operations + * @voltage_ops: pointer to set of voltage protocol operations * @notify_ops: pointer to set of notifications related operations * @perf_priv: pointer to private data structure specific to performance * protocol(for internal use only) @@ -273,6 +333,8 @@ struct scmi_notify_ops { * protocol(for internal use only) * @reset_priv: pointer to private data structure specific to reset * protocol(for internal use only) + * @voltage_priv: pointer to private data structure specific to voltage + * protocol(for internal use only) * @notify_priv: pointer to private data structure specific to notifications * (for internal use only) */ @@ -284,6 +346,7 @@ struct scmi_handle { const struct scmi_power_ops *power_ops; const struct scmi_sensor_ops *sensor_ops; const struct scmi_reset_ops *reset_ops; + const struct scmi_voltage_ops *voltage_ops; const struct scmi_notify_ops *notify_ops; /* for protocol internal use */ void *perf_priv; @@ -291,6 +354,7 @@ struct scmi_handle { void *power_priv; void *sensor_priv; void *reset_priv; + void *voltage_priv; void *notify_priv; void *system_priv; }; @@ -303,6 +367,7 @@ enum scmi_std_protocol { SCMI_PROTOCOL_CLOCK = 0x14, SCMI_PROTOCOL_SENSOR = 0x15, SCMI_PROTOCOL_RESET = 0x16, + SCMI_PROTOCOL_VOLTAGE = 0x17, }; enum scmi_system_events { diff --git a/include/linux/scs.h b/include/linux/scs.h index 6dec390cf154..18122d9e17ff 100644 --- a/include/linux/scs.h +++ b/include/linux/scs.h @@ -15,24 +15,18 @@ #ifdef CONFIG_SHADOW_CALL_STACK -/* - * In testing, 1 KiB shadow stack size (i.e. 128 stack frames on a 64-bit - * architecture) provided ~40% safety margin on stack usage while keeping - * memory allocation overhead reasonable. - */ -#define SCS_SIZE SZ_1K +#define SCS_ORDER 0 +#define SCS_SIZE (PAGE_SIZE << SCS_ORDER) #define GFP_SCS (GFP_KERNEL | __GFP_ZERO) /* An illegal pointer value to mark the end of the shadow stack. */ #define SCS_END_MAGIC (0x5f6UL + POISON_POINTER_DELTA) -/* Allocate a static per-CPU shadow stack */ -#define DEFINE_SCS(name) \ - DEFINE_PER_CPU(unsigned long [SCS_SIZE/sizeof(long)], name) \ - #define task_scs(tsk) (task_thread_info(tsk)->scs_base) #define task_scs_sp(tsk) (task_thread_info(tsk)->scs_sp) +void *scs_alloc(int node); +void scs_free(void *s); void scs_init(void); int scs_prepare(struct task_struct *tsk, int node); void scs_release(struct task_struct *tsk); @@ -61,6 +55,8 @@ static inline bool task_scs_end_corrupted(struct task_struct *tsk) #else /* CONFIG_SHADOW_CALL_STACK */ +static inline void *scs_alloc(int node) { return NULL; } +static inline void scs_free(void *s) {} static inline void scs_init(void) {} static inline void scs_task_reset(struct task_struct *tsk) {} static inline int scs_prepare(struct task_struct *tsk, int node) { return 0; } diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 76731230bbc5..bb1926589693 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -482,11 +482,13 @@ enum sctp_error { * 11 Restart of an association with new addresses * 12 User Initiated Abort * 13 Protocol Violation + * 14 Restart of an Association with New Encapsulation Port */ SCTP_ERROR_RESTART = cpu_to_be16(0x0b), SCTP_ERROR_USER_ABORT = cpu_to_be16(0x0c), SCTP_ERROR_PROTO_VIOLATION = cpu_to_be16(0x0d), + SCTP_ERROR_NEW_ENCAP_PORT = cpu_to_be16(0x0e), /* ADDIP Section 3.3 New Error Causes * @@ -793,4 +795,22 @@ enum { SCTP_FLOWLABEL_VAL_MASK = 0xfffff }; +/* UDP Encapsulation + * draft-tuexen-tsvwg-sctp-udp-encaps-cons-03.html#section-4-4 + * + * The error cause indicating an "Restart of an Association with + * New Encapsulation Port" + * + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Cause Code = 14 | Cause Length = 8 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Current Encapsulation Port | New Encapsulation Port | + * +-------------------------------+-------------------------------+ + */ +struct sctp_new_encap_port_hdr { + __be16 cur_port; + __be16 new_port; +}; + #endif /* __LINUX_SCTP_H__ */ diff --git a/include/linux/sdla.h b/include/linux/sdla.h deleted file mode 100644 index 00e8b3b614f0..000000000000 --- a/include/linux/sdla.h +++ /dev/null @@ -1,240 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * INET An implementation of the TCP/IP protocol suite for the LINUX - * operating system. INET is implemented using the BSD Socket - * interface as the means of communication with the user level. - * - * Global definitions for the Frame relay interface. - * - * Version: @(#)if_ifrad.h 0.20 13 Apr 96 - * - * Author: Mike McLagan <mike.mclagan@linux.org> - * - * Changes: - * 0.15 Mike McLagan Structure packing - * - * 0.20 Mike McLagan New flags for S508 buffer handling - */ -#ifndef SDLA_H -#define SDLA_H - -#include <uapi/linux/sdla.h> - - -/* important Z80 window addresses */ -#define SDLA_CONTROL_WND 0xE000 - -#define SDLA_502_CMD_BUF 0xEF60 -#define SDLA_502_RCV_BUF 0xA900 -#define SDLA_502_TXN_AVAIL 0xFFF1 -#define SDLA_502_RCV_AVAIL 0xFFF2 -#define SDLA_502_EVENT_FLAGS 0xFFF3 -#define SDLA_502_MDM_STATUS 0xFFF4 -#define SDLA_502_IRQ_INTERFACE 0xFFFD -#define SDLA_502_IRQ_PERMISSION 0xFFFE -#define SDLA_502_DATA_OFS 0x0010 - -#define SDLA_508_CMD_BUF 0xE000 -#define SDLA_508_TXBUF_INFO 0xF100 -#define SDLA_508_RXBUF_INFO 0xF120 -#define SDLA_508_EVENT_FLAGS 0xF003 -#define SDLA_508_MDM_STATUS 0xF004 -#define SDLA_508_IRQ_INTERFACE 0xF010 -#define SDLA_508_IRQ_PERMISSION 0xF011 -#define SDLA_508_TSE_OFFSET 0xF012 - -/* Event flags */ -#define SDLA_EVENT_STATUS 0x01 -#define SDLA_EVENT_DLCI_STATUS 0x02 -#define SDLA_EVENT_BAD_DLCI 0x04 -#define SDLA_EVENT_LINK_DOWN 0x40 - -/* IRQ Trigger flags */ -#define SDLA_INTR_RX 0x01 -#define SDLA_INTR_TX 0x02 -#define SDLA_INTR_MODEM 0x04 -#define SDLA_INTR_COMPLETE 0x08 -#define SDLA_INTR_STATUS 0x10 -#define SDLA_INTR_TIMER 0x20 - -/* DLCI status bits */ -#define SDLA_DLCI_DELETED 0x01 -#define SDLA_DLCI_ACTIVE 0x02 -#define SDLA_DLCI_WAITING 0x04 -#define SDLA_DLCI_NEW 0x08 -#define SDLA_DLCI_INCLUDED 0x40 - -/* valid command codes */ -#define SDLA_INFORMATION_WRITE 0x01 -#define SDLA_INFORMATION_READ 0x02 -#define SDLA_ISSUE_IN_CHANNEL_SIGNAL 0x03 -#define SDLA_SET_DLCI_CONFIGURATION 0x10 -#define SDLA_READ_DLCI_CONFIGURATION 0x11 -#define SDLA_DISABLE_COMMUNICATIONS 0x12 -#define SDLA_ENABLE_COMMUNICATIONS 0x13 -#define SDLA_READ_DLC_STATUS 0x14 -#define SDLA_READ_DLC_STATISTICS 0x15 -#define SDLA_FLUSH_DLC_STATISTICS 0x16 -#define SDLA_LIST_ACTIVE_DLCI 0x17 -#define SDLA_FLUSH_INFORMATION_BUFFERS 0x18 -#define SDLA_ADD_DLCI 0x20 -#define SDLA_DELETE_DLCI 0x21 -#define SDLA_ACTIVATE_DLCI 0x22 -#define SDLA_DEACTIVATE_DLCI 0x23 -#define SDLA_READ_MODEM_STATUS 0x30 -#define SDLA_SET_MODEM_STATUS 0x31 -#define SDLA_READ_COMMS_ERR_STATS 0x32 -#define SDLA_FLUSH_COMMS_ERR_STATS 0x33 -#define SDLA_READ_CODE_VERSION 0x40 -#define SDLA_SET_IRQ_TRIGGER 0x50 -#define SDLA_GET_IRQ_TRIGGER 0x51 - -/* In channel signal types */ -#define SDLA_ICS_LINK_VERIFY 0x02 -#define SDLA_ICS_STATUS_ENQ 0x03 - -/* modem status flags */ -#define SDLA_MODEM_DTR_HIGH 0x01 -#define SDLA_MODEM_RTS_HIGH 0x02 -#define SDLA_MODEM_DCD_HIGH 0x08 -#define SDLA_MODEM_CTS_HIGH 0x20 - -/* used for RET_MODEM interpretation */ -#define SDLA_MODEM_DCD_LOW 0x01 -#define SDLA_MODEM_CTS_LOW 0x02 - -/* return codes */ -#define SDLA_RET_OK 0x00 -#define SDLA_RET_COMMUNICATIONS 0x01 -#define SDLA_RET_CHANNEL_INACTIVE 0x02 -#define SDLA_RET_DLCI_INACTIVE 0x03 -#define SDLA_RET_DLCI_CONFIG 0x04 -#define SDLA_RET_BUF_TOO_BIG 0x05 -#define SDLA_RET_NO_DATA 0x05 -#define SDLA_RET_BUF_OVERSIZE 0x06 -#define SDLA_RET_CIR_OVERFLOW 0x07 -#define SDLA_RET_NO_BUFS 0x08 -#define SDLA_RET_TIMEOUT 0x0A -#define SDLA_RET_MODEM 0x10 -#define SDLA_RET_CHANNEL_OFF 0x11 -#define SDLA_RET_CHANNEL_ON 0x12 -#define SDLA_RET_DLCI_STATUS 0x13 -#define SDLA_RET_DLCI_UNKNOWN 0x14 -#define SDLA_RET_COMMAND_INVALID 0x1F - -/* Configuration flags */ -#define SDLA_DIRECT_RECV 0x0080 -#define SDLA_TX_NO_EXCEPT 0x0020 -#define SDLA_NO_ICF_MSGS 0x1000 -#define SDLA_TX50_RX50 0x0000 -#define SDLA_TX70_RX30 0x2000 -#define SDLA_TX30_RX70 0x4000 - -/* IRQ selection flags */ -#define SDLA_IRQ_RECEIVE 0x01 -#define SDLA_IRQ_TRANSMIT 0x02 -#define SDLA_IRQ_MODEM_STAT 0x04 -#define SDLA_IRQ_COMMAND 0x08 -#define SDLA_IRQ_CHANNEL 0x10 -#define SDLA_IRQ_TIMER 0x20 - -/* definitions for PC memory mapping */ -#define SDLA_8K_WINDOW 0x01 -#define SDLA_S502_SEG_A 0x10 -#define SDLA_S502_SEG_C 0x20 -#define SDLA_S502_SEG_D 0x00 -#define SDLA_S502_SEG_E 0x30 -#define SDLA_S507_SEG_A 0x00 -#define SDLA_S507_SEG_B 0x40 -#define SDLA_S507_SEG_C 0x80 -#define SDLA_S507_SEG_E 0xC0 -#define SDLA_S508_SEG_A 0x00 -#define SDLA_S508_SEG_C 0x10 -#define SDLA_S508_SEG_D 0x08 -#define SDLA_S508_SEG_E 0x18 - -/* SDLA adapter port constants */ -#define SDLA_IO_EXTENTS 0x04 - -#define SDLA_REG_CONTROL 0x00 -#define SDLA_REG_PC_WINDOW 0x01 /* offset for PC window select latch */ -#define SDLA_REG_Z80_WINDOW 0x02 /* offset for Z80 window select latch */ -#define SDLA_REG_Z80_CONTROL 0x03 /* offset for Z80 control latch */ - -#define SDLA_S502_STS 0x00 /* status reg for 502, 502E, 507 */ -#define SDLA_S508_GNRL 0x00 /* general purp. reg for 508 */ -#define SDLA_S508_STS 0x01 /* status reg for 508 */ -#define SDLA_S508_IDR 0x02 /* ID reg for 508 */ - -/* control register flags */ -#define SDLA_S502A_START 0x00 /* start the CPU */ -#define SDLA_S502A_INTREQ 0x02 -#define SDLA_S502A_INTEN 0x04 -#define SDLA_S502A_HALT 0x08 /* halt the CPU */ -#define SDLA_S502A_NMI 0x10 /* issue an NMI to the CPU */ - -#define SDLA_S502E_CPUEN 0x01 -#define SDLA_S502E_ENABLE 0x02 -#define SDLA_S502E_INTACK 0x04 - -#define SDLA_S507_ENABLE 0x01 -#define SDLA_S507_IRQ3 0x00 -#define SDLA_S507_IRQ4 0x20 -#define SDLA_S507_IRQ5 0x40 -#define SDLA_S507_IRQ7 0x60 -#define SDLA_S507_IRQ10 0x80 -#define SDLA_S507_IRQ11 0xA0 -#define SDLA_S507_IRQ12 0xC0 -#define SDLA_S507_IRQ15 0xE0 - -#define SDLA_HALT 0x00 -#define SDLA_CPUEN 0x02 -#define SDLA_MEMEN 0x04 -#define SDLA_S507_EPROMWR 0x08 -#define SDLA_S507_EPROMCLK 0x10 -#define SDLA_S508_INTRQ 0x08 -#define SDLA_S508_INTEN 0x10 - -struct sdla_cmd { - char opp_flag; - char cmd; - short length; - char retval; - short dlci; - char flags; - short rxlost_int; - long rxlost_app; - char reserve[2]; - char data[SDLA_MAX_DATA]; /* transfer data buffer */ -} __attribute__((packed)); - -struct intr_info { - char flags; - short txlen; - char irq; - char flags2; - short timeout; -} __attribute__((packed)); - -/* found in the 508's control window at RXBUF_INFO */ -struct buf_info { - unsigned short rse_num; - unsigned long rse_base; - unsigned long rse_next; - unsigned long buf_base; - unsigned short reserved; - unsigned long buf_top; -} __attribute__((packed)); - -/* structure pointed to by rse_base in RXBUF_INFO struct */ -struct buf_entry { - char opp_flag; - short length; - short dlci; - char flags; - short timestamp; - short reserved[2]; - long buf_addr; -} __attribute__((packed)); - -#endif diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index 02aef2844c38..47763f3999f7 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -42,7 +42,7 @@ struct seccomp { extern int __secure_computing(const struct seccomp_data *sd); static inline int secure_computing(void) { - if (unlikely(test_thread_flag(TIF_SECCOMP))) + if (unlikely(test_syscall_work(SECCOMP))) return __secure_computing(NULL); return 0; } diff --git a/include/linux/security.h b/include/linux/security.h index bc2725491560..77c7fe58d8a0 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -127,6 +127,7 @@ enum lockdown_reason { LOCKDOWN_PERF, LOCKDOWN_TRACEFS, LOCKDOWN_XMON_RW, + LOCKDOWN_XFRM_SECRET, LOCKDOWN_CONFIDENTIALITY_MAX, }; @@ -869,7 +870,7 @@ static inline int security_inode_killpriv(struct dentry *dentry) static inline int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc) { - return -EOPNOTSUPP; + return cap_inode_getsecurity(inode, name, buffer, alloc); } static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags) @@ -1358,7 +1359,7 @@ void security_sk_clone(const struct sock *sk, struct sock *newsk); void security_sk_classify_flow(struct sock *sk, struct flowi *fl); void security_req_classify_flow(const struct request_sock *req, struct flowi *fl); void security_sock_graft(struct sock*sk, struct socket *parent); -int security_inet_conn_request(struct sock *sk, +int security_inet_conn_request(const struct sock *sk, struct sk_buff *skb, struct request_sock *req); void security_inet_csk_clone(struct sock *newsk, const struct request_sock *req); @@ -1519,7 +1520,7 @@ static inline void security_sock_graft(struct sock *sk, struct socket *parent) { } -static inline int security_inet_conn_request(struct sock *sk, +static inline int security_inet_conn_request(const struct sock *sk, struct sk_buff *skb, struct request_sock *req) { return 0; diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 813614d4b71f..b83b3ae3c877 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -107,6 +107,7 @@ void seq_pad(struct seq_file *m, char c); char *mangle_path(char *s, const char *p, const char *esc); int seq_open(struct file *, const struct seq_operations *); ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); +ssize_t seq_read_iter(struct kiocb *iocb, struct iov_iter *iter); loff_t seq_lseek(struct file *, loff_t, int); int seq_release(struct inode *, struct file *); int seq_write(struct seq_file *seq, const void *data, size_t len); diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index cbfc78b92b65..2f7bb92b4c9e 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -307,10 +307,10 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu __seqprop_case((s), mutex, prop), \ __seqprop_case((s), ww_mutex, prop)) -#define __seqcount_ptr(s) __seqprop(s, ptr) -#define __seqcount_sequence(s) __seqprop(s, sequence) -#define __seqcount_lock_preemptible(s) __seqprop(s, preemptible) -#define __seqcount_assert_lock_held(s) __seqprop(s, assert) +#define seqprop_ptr(s) __seqprop(s, ptr) +#define seqprop_sequence(s) __seqprop(s, sequence) +#define seqprop_preemptible(s) __seqprop(s, preemptible) +#define seqprop_assert(s) __seqprop(s, assert) /** * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier @@ -328,13 +328,13 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu */ #define __read_seqcount_begin(s) \ ({ \ - unsigned seq; \ + unsigned __seq; \ \ - while ((seq = __seqcount_sequence(s)) & 1) \ + while ((__seq = seqprop_sequence(s)) & 1) \ cpu_relax(); \ \ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \ - seq; \ + __seq; \ }) /** @@ -345,10 +345,10 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu */ #define raw_read_seqcount_begin(s) \ ({ \ - unsigned seq = __read_seqcount_begin(s); \ + unsigned _seq = __read_seqcount_begin(s); \ \ smp_rmb(); \ - seq; \ + _seq; \ }) /** @@ -359,7 +359,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu */ #define read_seqcount_begin(s) \ ({ \ - seqcount_lockdep_reader_access(__seqcount_ptr(s)); \ + seqcount_lockdep_reader_access(seqprop_ptr(s)); \ raw_read_seqcount_begin(s); \ }) @@ -376,11 +376,11 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu */ #define raw_read_seqcount(s) \ ({ \ - unsigned seq = __seqcount_sequence(s); \ + unsigned __seq = seqprop_sequence(s); \ \ smp_rmb(); \ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \ - seq; \ + __seq; \ }) /** @@ -425,9 +425,9 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu * Return: true if a read section retry is required, else false */ #define __read_seqcount_retry(s, start) \ - __read_seqcount_t_retry(__seqcount_ptr(s), start) + do___read_seqcount_retry(seqprop_ptr(s), start) -static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start) +static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start) { kcsan_atomic_next(0); return unlikely(READ_ONCE(s->sequence) != start); @@ -445,27 +445,29 @@ static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start) * Return: true if a read section retry is required, else false */ #define read_seqcount_retry(s, start) \ - read_seqcount_t_retry(__seqcount_ptr(s), start) + do_read_seqcount_retry(seqprop_ptr(s), start) -static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start) +static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start) { smp_rmb(); - return __read_seqcount_t_retry(s, start); + return do___read_seqcount_retry(s, start); } /** * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants + * + * Context: check write_seqcount_begin() */ #define raw_write_seqcount_begin(s) \ do { \ - if (__seqcount_lock_preemptible(s)) \ + if (seqprop_preemptible(s)) \ preempt_disable(); \ \ - raw_write_seqcount_t_begin(__seqcount_ptr(s)); \ + do_raw_write_seqcount_begin(seqprop_ptr(s)); \ } while (0) -static inline void raw_write_seqcount_t_begin(seqcount_t *s) +static inline void do_raw_write_seqcount_begin(seqcount_t *s) { kcsan_nestable_atomic_begin(); s->sequence++; @@ -475,16 +477,18 @@ static inline void raw_write_seqcount_t_begin(seqcount_t *s) /** * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants + * + * Context: check write_seqcount_end() */ #define raw_write_seqcount_end(s) \ do { \ - raw_write_seqcount_t_end(__seqcount_ptr(s)); \ + do_raw_write_seqcount_end(seqprop_ptr(s)); \ \ - if (__seqcount_lock_preemptible(s)) \ + if (seqprop_preemptible(s)) \ preempt_enable(); \ } while (0) -static inline void raw_write_seqcount_t_end(seqcount_t *s) +static inline void do_raw_write_seqcount_end(seqcount_t *s) { smp_wmb(); s->sequence++; @@ -498,20 +502,21 @@ static inline void raw_write_seqcount_t_end(seqcount_t *s) * @subclass: lockdep nesting level * * See Documentation/locking/lockdep-design.rst + * Context: check write_seqcount_begin() */ #define write_seqcount_begin_nested(s, subclass) \ do { \ - __seqcount_assert_lock_held(s); \ + seqprop_assert(s); \ \ - if (__seqcount_lock_preemptible(s)) \ + if (seqprop_preemptible(s)) \ preempt_disable(); \ \ - write_seqcount_t_begin_nested(__seqcount_ptr(s), subclass); \ + do_write_seqcount_begin_nested(seqprop_ptr(s), subclass); \ } while (0) -static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass) +static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass) { - raw_write_seqcount_t_begin(s); + do_raw_write_seqcount_begin(s); seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); } @@ -519,46 +524,46 @@ static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass) * write_seqcount_begin() - start a seqcount_t write side critical section * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants * - * write_seqcount_begin opens a write side critical section of the given - * seqcount_t. - * - * Context: seqcount_t write side critical sections must be serialized and - * non-preemptible. If readers can be invoked from hardirq or softirq + * Context: sequence counter write side sections must be serialized and + * non-preemptible. Preemption will be automatically disabled if and + * only if the seqcount write serialization lock is associated, and + * preemptible. If readers can be invoked from hardirq or softirq * context, interrupts or bottom halves must be respectively disabled. */ #define write_seqcount_begin(s) \ do { \ - __seqcount_assert_lock_held(s); \ + seqprop_assert(s); \ \ - if (__seqcount_lock_preemptible(s)) \ + if (seqprop_preemptible(s)) \ preempt_disable(); \ \ - write_seqcount_t_begin(__seqcount_ptr(s)); \ + do_write_seqcount_begin(seqprop_ptr(s)); \ } while (0) -static inline void write_seqcount_t_begin(seqcount_t *s) +static inline void do_write_seqcount_begin(seqcount_t *s) { - write_seqcount_t_begin_nested(s, 0); + do_write_seqcount_begin_nested(s, 0); } /** * write_seqcount_end() - end a seqcount_t write side critical section * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants * - * The write section must've been opened with write_seqcount_begin(). + * Context: Preemption will be automatically re-enabled if and only if + * the seqcount write serialization lock is associated, and preemptible. */ #define write_seqcount_end(s) \ do { \ - write_seqcount_t_end(__seqcount_ptr(s)); \ + do_write_seqcount_end(seqprop_ptr(s)); \ \ - if (__seqcount_lock_preemptible(s)) \ + if (seqprop_preemptible(s)) \ preempt_enable(); \ } while (0) -static inline void write_seqcount_t_end(seqcount_t *s) +static inline void do_write_seqcount_end(seqcount_t *s) { seqcount_release(&s->dep_map, _RET_IP_); - raw_write_seqcount_t_end(s); + do_raw_write_seqcount_end(s); } /** @@ -603,9 +608,9 @@ static inline void write_seqcount_t_end(seqcount_t *s) * } */ #define raw_write_seqcount_barrier(s) \ - raw_write_seqcount_t_barrier(__seqcount_ptr(s)) + do_raw_write_seqcount_barrier(seqprop_ptr(s)) -static inline void raw_write_seqcount_t_barrier(seqcount_t *s) +static inline void do_raw_write_seqcount_barrier(seqcount_t *s) { kcsan_nestable_atomic_begin(); s->sequence++; @@ -623,9 +628,9 @@ static inline void raw_write_seqcount_t_barrier(seqcount_t *s) * will complete successfully and see data older than this. */ #define write_seqcount_invalidate(s) \ - write_seqcount_t_invalidate(__seqcount_ptr(s)) + do_write_seqcount_invalidate(seqprop_ptr(s)) -static inline void write_seqcount_t_invalidate(seqcount_t *s) +static inline void do_write_seqcount_invalidate(seqcount_t *s) { smp_wmb(); kcsan_nestable_atomic_begin(); @@ -865,9 +870,9 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) } /* - * For all seqlock_t write side functions, use write_seqcount_*t*_begin() - * instead of the generic write_seqcount_begin(). This way, no redundant - * lockdep_assert_held() checks are added. + * For all seqlock_t write side functions, use the the internal + * do_write_seqcount_begin() instead of generic write_seqcount_begin(). + * This way, no redundant lockdep_assert_held() checks are added. */ /** @@ -886,7 +891,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) static inline void write_seqlock(seqlock_t *sl) { spin_lock(&sl->lock); - write_seqcount_t_begin(&sl->seqcount.seqcount); + do_write_seqcount_begin(&sl->seqcount.seqcount); } /** @@ -898,7 +903,7 @@ static inline void write_seqlock(seqlock_t *sl) */ static inline void write_sequnlock(seqlock_t *sl) { - write_seqcount_t_end(&sl->seqcount.seqcount); + do_write_seqcount_end(&sl->seqcount.seqcount); spin_unlock(&sl->lock); } @@ -912,7 +917,7 @@ static inline void write_sequnlock(seqlock_t *sl) static inline void write_seqlock_bh(seqlock_t *sl) { spin_lock_bh(&sl->lock); - write_seqcount_t_begin(&sl->seqcount.seqcount); + do_write_seqcount_begin(&sl->seqcount.seqcount); } /** @@ -925,7 +930,7 @@ static inline void write_seqlock_bh(seqlock_t *sl) */ static inline void write_sequnlock_bh(seqlock_t *sl) { - write_seqcount_t_end(&sl->seqcount.seqcount); + do_write_seqcount_end(&sl->seqcount.seqcount); spin_unlock_bh(&sl->lock); } @@ -939,7 +944,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl) static inline void write_seqlock_irq(seqlock_t *sl) { spin_lock_irq(&sl->lock); - write_seqcount_t_begin(&sl->seqcount.seqcount); + do_write_seqcount_begin(&sl->seqcount.seqcount); } /** @@ -951,7 +956,7 @@ static inline void write_seqlock_irq(seqlock_t *sl) */ static inline void write_sequnlock_irq(seqlock_t *sl) { - write_seqcount_t_end(&sl->seqcount.seqcount); + do_write_seqcount_end(&sl->seqcount.seqcount); spin_unlock_irq(&sl->lock); } @@ -960,7 +965,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) unsigned long flags; spin_lock_irqsave(&sl->lock, flags); - write_seqcount_t_begin(&sl->seqcount.seqcount); + do_write_seqcount_begin(&sl->seqcount.seqcount); return flags; } @@ -989,7 +994,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) static inline void write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) { - write_seqcount_t_end(&sl->seqcount.seqcount); + do_write_seqcount_end(&sl->seqcount.seqcount); spin_unlock_irqrestore(&sl->lock, flags); } diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 2b70f736b091..9e655055112d 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -187,4 +187,9 @@ extern void serial8250_set_isa_configurator(void (*v) (int port, struct uart_port *up, u32 *capabilities)); +#ifdef CONFIG_SERIAL_8250_RT288X +unsigned int au_serial_in(struct uart_port *p, int offset); +void au_serial_out(struct uart_port *p, int offset, int value); +#endif + #endif diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index ff63c2963359..e1b684e33841 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -357,8 +357,8 @@ struct earlycon_id { int (*setup)(struct earlycon_device *, const char *options); }; -extern const struct earlycon_id *__earlycon_table[]; -extern const struct earlycon_id *__earlycon_table_end[]; +extern const struct earlycon_id __earlycon_table[]; +extern const struct earlycon_id __earlycon_table_end[]; #if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE) #define EARLYCON_USED_OR_UNUSED __used @@ -366,19 +366,13 @@ extern const struct earlycon_id *__earlycon_table_end[]; #define EARLYCON_USED_OR_UNUSED __maybe_unused #endif -#define _OF_EARLYCON_DECLARE(_name, compat, fn, unique_id) \ - static const struct earlycon_id unique_id \ - EARLYCON_USED_OR_UNUSED __initconst \ +#define OF_EARLYCON_DECLARE(_name, compat, fn) \ + static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \ + EARLYCON_USED_OR_UNUSED __section("__earlycon_table") \ + __aligned(__alignof__(struct earlycon_id)) \ = { .name = __stringify(_name), \ .compatible = compat, \ - .setup = fn }; \ - static const struct earlycon_id EARLYCON_USED_OR_UNUSED \ - __section("__earlycon_table") \ - * const __PASTE(__p, unique_id) = &unique_id - -#define OF_EARLYCON_DECLARE(_name, compat, fn) \ - _OF_EARLYCON_DECLARE(_name, compat, fn, \ - __UNIQUE_ID(__earlycon_##_name)) + .setup = fn } #define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn) diff --git a/include/linux/serial_pnx8xxx.h b/include/linux/serial_pnx8xxx.h deleted file mode 100644 index 619d748dcd44..000000000000 --- a/include/linux/serial_pnx8xxx.h +++ /dev/null @@ -1,67 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Embedded Alley Solutions, source@embeddedalley.com. - */ - -#ifndef _LINUX_SERIAL_PNX8XXX_H -#define _LINUX_SERIAL_PNX8XXX_H - -#include <linux/serial_core.h> - -#define PNX8XXX_NR_PORTS 2 - -struct pnx8xxx_port { - struct uart_port port; - struct timer_list timer; - unsigned int old_status; -}; - -/* register offsets */ -#define PNX8XXX_LCR 0 -#define PNX8XXX_MCR 0x004 -#define PNX8XXX_BAUD 0x008 -#define PNX8XXX_CFG 0x00c -#define PNX8XXX_FIFO 0x028 -#define PNX8XXX_ISTAT 0xfe0 -#define PNX8XXX_IEN 0xfe4 -#define PNX8XXX_ICLR 0xfe8 -#define PNX8XXX_ISET 0xfec -#define PNX8XXX_PD 0xff4 -#define PNX8XXX_MID 0xffc - -#define PNX8XXX_UART_LCR_TXBREAK (1<<30) -#define PNX8XXX_UART_LCR_PAREVN 0x10000000 -#define PNX8XXX_UART_LCR_PAREN 0x08000000 -#define PNX8XXX_UART_LCR_2STOPB 0x04000000 -#define PNX8XXX_UART_LCR_8BIT 0x01000000 -#define PNX8XXX_UART_LCR_TX_RST 0x00040000 -#define PNX8XXX_UART_LCR_RX_RST 0x00020000 -#define PNX8XXX_UART_LCR_RX_NEXT 0x00010000 - -#define PNX8XXX_UART_MCR_SCR 0xFF000000 -#define PNX8XXX_UART_MCR_DCD 0x00800000 -#define PNX8XXX_UART_MCR_CTS 0x00100000 -#define PNX8XXX_UART_MCR_LOOP 0x00000010 -#define PNX8XXX_UART_MCR_RTS 0x00000002 -#define PNX8XXX_UART_MCR_DTR 0x00000001 - -#define PNX8XXX_UART_INT_TX 0x00000080 -#define PNX8XXX_UART_INT_EMPTY 0x00000040 -#define PNX8XXX_UART_INT_RCVTO 0x00000020 -#define PNX8XXX_UART_INT_RX 0x00000010 -#define PNX8XXX_UART_INT_RXOVRN 0x00000008 -#define PNX8XXX_UART_INT_FRERR 0x00000004 -#define PNX8XXX_UART_INT_BREAK 0x00000002 -#define PNX8XXX_UART_INT_PARITY 0x00000001 -#define PNX8XXX_UART_INT_ALLRX 0x0000003F -#define PNX8XXX_UART_INT_ALLTX 0x000000C0 - -#define PNX8XXX_UART_FIFO_TXFIFO 0x001F0000 -#define PNX8XXX_UART_FIFO_TXFIFO_STA (0x1f<<16) -#define PNX8XXX_UART_FIFO_RXBRK 0x00008000 -#define PNX8XXX_UART_FIFO_RXFE 0x00004000 -#define PNX8XXX_UART_FIFO_RXPAR 0x00002000 -#define PNX8XXX_UART_FIFO_RXFIFO 0x00001F00 -#define PNX8XXX_UART_FIFO_RBRTHR 0x000000FF - -#endif diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h index 860e0f843c12..fe1aa4e54680 100644 --- a/include/linux/set_memory.h +++ b/include/linux/set_memory.h @@ -23,6 +23,11 @@ static inline int set_direct_map_default_noflush(struct page *page) { return 0; } + +static inline bool kernel_page_present(struct page *page) +{ + return true; +} #endif #ifndef set_mce_nospec diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index a5a5d1d4d7b1..d82b6f396588 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -67,7 +67,11 @@ extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); extern int shmem_lock(struct file *file, int lock, struct user_struct *user); #ifdef CONFIG_SHMEM -extern bool shmem_mapping(struct address_space *mapping); +extern const struct address_space_operations shmem_aops; +static inline bool shmem_mapping(struct address_space *mapping) +{ + return mapping->a_ops == &shmem_aops; +} #else static inline bool shmem_mapping(struct address_space *mapping) { diff --git a/include/linux/signal.h b/include/linux/signal.h index 7bbc0e9cf084..205526c4003a 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -238,6 +238,7 @@ static inline void siginitset(sigset_t *set, unsigned long mask) memset(&set->sig[1], 0, sizeof(long)*(_NSIG_WORDS-1)); break; case 2: set->sig[1] = 0; + break; case 1: ; } } @@ -250,6 +251,7 @@ static inline void siginitsetinv(sigset_t *set, unsigned long mask) memset(&set->sig[1], -1, sizeof(long)*(_NSIG_WORDS-1)); break; case 2: set->sig[1] = -1; + break; case 1: ; } } @@ -467,4 +469,18 @@ struct seq_file; extern void render_sigset_t(struct seq_file *, const char *, sigset_t *); #endif +#ifndef arch_untagged_si_addr +/* + * Given a fault address and a signal and si_code which correspond to the + * _sigfault union member, returns the address that must appear in si_addr if + * the signal handler does not have SA_EXPOSE_TAGBITS enabled in sa_flags. + */ +static inline void __user *arch_untagged_si_addr(void __user *addr, + unsigned long sig, + unsigned long si_code) +{ + return addr; +} +#endif + #endif /* _LINUX_SIGNAL_H */ diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h index f8a90ae9c6ec..68e06c75c5b2 100644 --- a/include/linux/signal_types.h +++ b/include/linux/signal_types.h @@ -68,4 +68,16 @@ struct ksignal { int sig; }; +#ifndef __ARCH_UAPI_SA_FLAGS +#ifdef SA_RESTORER +#define __ARCH_UAPI_SA_FLAGS SA_RESTORER +#else +#define __ARCH_UAPI_SA_FLAGS 0 +#endif +#endif + +#define UAPI_SA_FLAGS \ + (SA_NOCLDSTOP | SA_NOCLDWAIT | SA_SIGINFO | SA_ONSTACK | SA_RESTART | \ + SA_NODEFER | SA_RESETHAND | SA_EXPOSE_TAGBITS | __ARCH_UAPI_SA_FLAGS) + #endif /* _LINUX_SIGNAL_TYPES_H */ diff --git a/include/linux/siox.h b/include/linux/siox.h index da7225bf1877..6bfbda3f634c 100644 --- a/include/linux/siox.h +++ b/include/linux/siox.h @@ -36,7 +36,7 @@ bool siox_device_connected(struct siox_device *sdevice); struct siox_driver { int (*probe)(struct siox_device *sdevice); - int (*remove)(struct siox_device *sdevice); + void (*remove)(struct siox_device *sdevice); void (*shutdown)(struct siox_device *sdevice); /* diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index a828cf99c521..333bcdc39635 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -701,6 +701,7 @@ typedef unsigned char *sk_buff_data_t; * @transport_header: Transport layer header * @network_header: Network layer header * @mac_header: Link layer header + * @kcov_handle: KCOV remote handle for remote coverage collection * @tail: Tail pointer * @end: End pointer * @head: Head of buffer @@ -904,6 +905,10 @@ struct sk_buff { __u16 network_header; __u16 mac_header; +#ifdef CONFIG_KCOV + u64 kcov_handle; +#endif + /* private: */ __u32 headers_end[0]; /* public: */ @@ -4605,5 +4610,22 @@ static inline void skb_reset_redirect(struct sk_buff *skb) #endif } +static inline void skb_set_kcov_handle(struct sk_buff *skb, + const u64 kcov_handle) +{ +#ifdef CONFIG_KCOV + skb->kcov_handle = kcov_handle; +#endif +} + +static inline u64 skb_get_kcov_handle(struct sk_buff *skb) +{ +#ifdef CONFIG_KCOV + return skb->kcov_handle; +#else + return 0; +#endif +} + #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ diff --git a/include/linux/slab.h b/include/linux/slab.h index dd6897f62010..be4ba5867ac5 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -593,6 +593,24 @@ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) } /** + * krealloc_array - reallocate memory for an array. + * @p: pointer to the memory chunk to reallocate + * @new_n: new number of elements to alloc + * @new_size: new size of a single member of the array + * @flags: the type of memory to allocate (see kmalloc) + */ +static __must_check inline void * +krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t flags) +{ + size_t bytes; + + if (unlikely(check_mul_overflow(new_n, new_size, &bytes))) + return NULL; + + return krealloc(p, bytes, flags); +} + +/** * kcalloc - allocate memory for an array. The memory is set to zero. * @n: number of elements. * @size: element size. diff --git a/include/linux/smp.h b/include/linux/smp.h index 9f13966d3d92..70c6f6284dcf 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -21,24 +21,23 @@ typedef bool (*smp_cond_func_t)(int cpu, void *info); * structure shares (partial) layout with struct irq_work */ struct __call_single_data { - union { - struct __call_single_node node; - struct { - struct llist_node llist; - unsigned int flags; -#ifdef CONFIG_64BIT - u16 src, dst; -#endif - }; - }; + struct __call_single_node node; smp_call_func_t func; void *info; }; +#define CSD_INIT(_func, _info) \ + (struct __call_single_data){ .func = (_func), .info = (_info), } + /* Use __aligned() to avoid to use 2 cache lines for 1 csd */ typedef struct __call_single_data call_single_data_t __aligned(sizeof(struct __call_single_data)); +#define INIT_CSD(_csd, _func, _info) \ +do { \ + *(_csd) = CSD_INIT((_func), (_info)); \ +} while (0) + /* * Enqueue a llist_node on the call_single_queue; be very careful, read * flush_smp_call_function_queue() in detail. diff --git a/include/linux/soc/marvell/octeontx2/asm.h b/include/linux/soc/marvell/octeontx2/asm.h new file mode 100644 index 000000000000..ae2279fe830a --- /dev/null +++ b/include/linux/soc/marvell/octeontx2/asm.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * Copyright (C) 2020 Marvell. + */ + +#ifndef __SOC_OTX2_ASM_H +#define __SOC_OTX2_ASM_H + +#if defined(CONFIG_ARM64) +/* + * otx2_lmt_flush is used for LMT store operation. + * On octeontx2 platform CPT instruction enqueue and + * NIX packet send are only possible via LMTST + * operations and it uses LDEOR instruction targeting + * the coprocessor address. + */ +#define otx2_lmt_flush(ioaddr) \ +({ \ + u64 result = 0; \ + __asm__ volatile(".cpu generic+lse\n" \ + "ldeor xzr, %x[rf], [%[rs]]" \ + : [rf]"=r" (result) \ + : [rs]"r" (ioaddr)); \ + (result); \ +}) +#else +#define otx2_lmt_flush(ioaddr) ({ 0; }) +#endif + +#endif /* __SOC_OTX2_ASM_H */ diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index 41cc1192f9aa..f0b01b728640 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -359,6 +359,7 @@ struct sdw_dpn_prop { * @sink_dpn_prop: Sink Data Port N properties * @scp_int1_mask: SCP_INT1_MASK desired settings * @quirks: bitmask identifying deltas from the MIPI specification + * @is_sdca: the Slave supports the SDCA specification */ struct sdw_slave_prop { u32 mipi_revision; @@ -382,6 +383,7 @@ struct sdw_slave_prop { struct sdw_dpn_prop *sink_dpn_prop; u8 scp_int1_mask; u32 quirks; + bool is_sdca; }; #define SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY BIT(0) @@ -479,10 +481,12 @@ struct sdw_slave_id { /** * struct sdw_slave_intr_status - Slave interrupt status + * @sdca_cascade: set if the Slave device reports an SDCA interrupt * @control_port: control port status * @port: data port status */ struct sdw_slave_intr_status { + bool sdca_cascade; u8 control_port; u8 port[15]; }; diff --git a/include/linux/soundwire/sdw_registers.h b/include/linux/soundwire/sdw_registers.h index f420e8059779..138bec908c40 100644 --- a/include/linux/soundwire/sdw_registers.h +++ b/include/linux/soundwire/sdw_registers.h @@ -41,6 +41,12 @@ #define SDW_DP0_INT_IMPDEF1 BIT(5) #define SDW_DP0_INT_IMPDEF2 BIT(6) #define SDW_DP0_INT_IMPDEF3 BIT(7) +#define SDW_DP0_INTERRUPTS (SDW_DP0_INT_TEST_FAIL | \ + SDW_DP0_INT_PORT_READY | \ + SDW_DP0_INT_BRA_FAILURE | \ + SDW_DP0_INT_IMPDEF1 | \ + SDW_DP0_INT_IMPDEF2 | \ + SDW_DP0_INT_IMPDEF3) #define SDW_DP0_PORTCTRL_DATAMODE GENMASK(3, 2) #define SDW_DP0_PORTCTRL_NXTINVBANK BIT(4) @@ -241,6 +247,11 @@ #define SDW_DPN_INT_IMPDEF1 BIT(5) #define SDW_DPN_INT_IMPDEF2 BIT(6) #define SDW_DPN_INT_IMPDEF3 BIT(7) +#define SDW_DPN_INTERRUPTS (SDW_DPN_INT_TEST_FAIL | \ + SDW_DPN_INT_PORT_READY | \ + SDW_DPN_INT_IMPDEF1 | \ + SDW_DPN_INT_IMPDEF2 | \ + SDW_DPN_INT_IMPDEF3) #define SDW_DPN_PORTCTRL_FLOWMODE GENMASK(1, 0) #define SDW_DPN_PORTCTRL_DATAMODE GENMASK(3, 2) @@ -298,4 +309,36 @@ #define SDW_CASC_PORT_MASK_INTSTAT3 1 #define SDW_CASC_PORT_REG_OFFSET_INTSTAT3 2 +/* + * v1.2 device - SDCA address mapping + * + * Spec definition + * Bits Contents + * 31 0 (required by addressing range) + * 30:26 0b10000 (Control Prefix) + * 25 0 (Reserved) + * 24:22 Function Number [2:0] + * 21 Entity[6] + * 20:19 Control Selector[5:4] + * 18 0 (Reserved) + * 17:15 Control Number[5:3] + * 14 Next + * 13 MBQ + * 12:7 Entity[5:0] + * 6:3 Control Selector[3:0] + * 2:0 Control Number[2:0] + */ + +#define SDW_SDCA_CTL(fun, ent, ctl, ch) (BIT(30) | \ + (((fun) & 0x7) << 22) | \ + (((ent) & 0x40) << 15) | \ + (((ent) & 0x3f) << 7) | \ + (((ctl) & 0x30) << 15) | \ + (((ctl) & 0x0f) << 3) | \ + (((ch) & 0x38) << 12) | \ + ((ch) & 0x07)) + +#define SDW_SDCA_MBQ_CTL(reg) ((reg) | BIT(13)) +#define SDW_SDCA_NEXT_CTL(reg) ((reg) | BIT(14)) + #endif /* __SDW_REGISTERS_H */ diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 99380c0825db..aa09fdc8042d 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -171,6 +171,7 @@ struct spi_device { #define SPI_MODE_1 (0|SPI_CPHA) #define SPI_MODE_2 (SPI_CPOL|0) #define SPI_MODE_3 (SPI_CPOL|SPI_CPHA) +#define SPI_MODE_X_MASK (SPI_CPOL|SPI_CPHA) #define SPI_CS_HIGH 0x04 /* chipselect active high? */ #define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */ #define SPI_3WIRE 0x10 /* SI/SO signals shared */ @@ -734,6 +735,25 @@ static inline struct spi_controller *spi_alloc_slave(struct device *host, return __spi_alloc_controller(host, size, true); } +struct spi_controller *__devm_spi_alloc_controller(struct device *dev, + unsigned int size, + bool slave); + +static inline struct spi_controller *devm_spi_alloc_master(struct device *dev, + unsigned int size) +{ + return __devm_spi_alloc_controller(dev, size, false); +} + +static inline struct spi_controller *devm_spi_alloc_slave(struct device *dev, + unsigned int size) +{ + if (!IS_ENABLED(CONFIG_SPI_SLAVE)) + return NULL; + + return __devm_spi_alloc_controller(dev, size, true); +} + extern int spi_register_controller(struct spi_controller *ctlr); extern int devm_spi_register_controller(struct device *dev, struct spi_controller *ctlr); diff --git a/include/linux/spmi.h b/include/linux/spmi.h index 394a3f68bad5..729bcbf9f5ad 100644 --- a/include/linux/spmi.h +++ b/include/linux/spmi.h @@ -138,6 +138,7 @@ struct spmi_driver { struct device_driver driver; int (*probe)(struct spmi_device *sdev); void (*remove)(struct spmi_device *sdev); + void (*shutdown)(struct spmi_device *sdev); }; static inline struct spmi_driver *to_spmi_driver(struct device_driver *d) diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 628e28903b8b..15ca6b4167cc 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -170,6 +170,7 @@ struct plat_stmmacenet_data { int unicast_filter_entries; int tx_fifo_size; int rx_fifo_size; + u32 addr64; u32 rx_queues_to_use; u32 tx_queues_to_use; u8 rx_sched_algorithm; diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 76d8b09384a7..30577c3aecf8 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -24,6 +24,7 @@ typedef int (*cpu_stop_fn_t)(void *arg); struct cpu_stop_work { struct list_head list; /* cpu_stopper->works */ cpu_stop_fn_t fn; + unsigned long caller; void *arg; struct cpu_stop_done *done; }; @@ -36,6 +37,8 @@ void stop_machine_park(int cpu); void stop_machine_unpark(int cpu); void stop_machine_yield(const struct cpumask *cpumask); +extern void print_stop_info(const char *log_lvl, struct task_struct *task); + #else /* CONFIG_SMP */ #include <linux/workqueue.h> @@ -80,6 +83,8 @@ static inline bool stop_one_cpu_nowait(unsigned int cpu, return false; } +static inline void print_stop_info(const char *log_lvl, struct task_struct *task) { } + #endif /* CONFIG_SMP */ /* diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 386628b36bc7..34c2a69820e9 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -247,6 +247,8 @@ struct svc_rqst { size_t rq_xprt_hlen; /* xprt header len */ struct xdr_buf rq_arg; + struct xdr_stream rq_arg_stream; + struct page *rq_scratch_page; struct xdr_buf rq_res; struct page *rq_pages[RPCSVC_MAXPAGES + 1]; struct page * *rq_respages; /* points into rq_pages */ @@ -519,9 +521,9 @@ void svc_wake_up(struct svc_serv *); void svc_reserve(struct svc_rqst *rqstp, int space); struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu); char * svc_print_addr(struct svc_rqst *, char *, size_t); -int svc_encode_read_payload(struct svc_rqst *rqstp, - unsigned int offset, - unsigned int length); +int svc_encode_result_payload(struct svc_rqst *rqstp, + unsigned int offset, + unsigned int length); unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, struct page **pages, struct kvec *first, size_t total); @@ -557,4 +559,18 @@ static inline void svc_reserve_auth(struct svc_rqst *rqstp, int space) svc_reserve(rqstp, space + rqstp->rq_auth_slack); } +/** + * svcxdr_init_decode - Prepare an xdr_stream for svc Call decoding + * @rqstp: controlling server RPC transaction context + * + */ +static inline void svcxdr_init_decode(struct svc_rqst *rqstp) +{ + struct xdr_stream *xdr = &rqstp->rq_arg_stream; + struct kvec *argv = rqstp->rq_arg.head; + + xdr_init_decode(xdr, &rqstp->rq_arg, argv->iov_base, NULL); + xdr_set_scratch_page(xdr, rqstp->rq_scratch_page); +} + #endif /* SUNRPC_SVC_H */ diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 9dc3a3b88391..294b56e61522 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -47,6 +47,8 @@ #include <linux/sunrpc/svcsock.h> #include <linux/sunrpc/rpc_rdma.h> #include <linux/sunrpc/rpc_rdma_cid.h> +#include <linux/sunrpc/svc_rdma_pcl.h> + #include <rdma/ib_verbs.h> #include <rdma/rdma_cm.h> @@ -142,10 +144,15 @@ struct svc_rdma_recv_ctxt { unsigned int rc_page_count; unsigned int rc_hdr_count; u32 rc_inv_rkey; - __be32 *rc_write_list; - __be32 *rc_reply_chunk; - unsigned int rc_read_payload_offset; - unsigned int rc_read_payload_length; + __be32 rc_msgtype; + + struct svc_rdma_pcl rc_call_pcl; + + struct svc_rdma_pcl rc_read_pcl; + struct svc_rdma_chunk *rc_cur_result_payload; + struct svc_rdma_pcl rc_write_pcl; + struct svc_rdma_pcl rc_reply_pcl; + struct page *rc_pages[RPCSVC_MAXPAGES]; }; @@ -171,6 +178,8 @@ extern void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp, /* svc_rdma_recvfrom.c */ extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma); extern bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma); +extern struct svc_rdma_recv_ctxt * + svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma); extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, struct svc_rdma_recv_ctxt *ctxt); extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma); @@ -179,16 +188,15 @@ extern int svc_rdma_recvfrom(struct svc_rqst *); /* svc_rdma_rw.c */ extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma); -extern int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, - struct svc_rqst *rqstp, - struct svc_rdma_recv_ctxt *head, __be32 *p); extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, - __be32 *wr_ch, struct xdr_buf *xdr, - unsigned int offset, - unsigned long length); + const struct svc_rdma_chunk *chunk, + const struct xdr_buf *xdr); extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, const struct svc_rdma_recv_ctxt *rctxt, - struct xdr_buf *xdr); + const struct xdr_buf *xdr); +extern int svc_rdma_process_read_list(struct svcxprt_rdma *rdma, + struct svc_rqst *rqstp, + struct svc_rdma_recv_ctxt *head); /* svc_rdma_sendto.c */ extern void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma); @@ -201,14 +209,14 @@ extern int svc_rdma_send(struct svcxprt_rdma *rdma, extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *sctxt, const struct svc_rdma_recv_ctxt *rctxt, - struct xdr_buf *xdr); + const struct xdr_buf *xdr); extern void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *sctxt, struct svc_rdma_recv_ctxt *rctxt, int status); extern int svc_rdma_sendto(struct svc_rqst *); -extern int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset, - unsigned int length); +extern int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset, + unsigned int length); /* svc_rdma_transport.c */ extern struct svc_xprt_class svc_rdma_class; diff --git a/include/linux/sunrpc/svc_rdma_pcl.h b/include/linux/sunrpc/svc_rdma_pcl.h new file mode 100644 index 000000000000..7516ad0fae80 --- /dev/null +++ b/include/linux/sunrpc/svc_rdma_pcl.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020, Oracle and/or its affiliates + */ + +#ifndef SVC_RDMA_PCL_H +#define SVC_RDMA_PCL_H + +#include <linux/list.h> + +struct svc_rdma_segment { + u32 rs_handle; + u32 rs_length; + u64 rs_offset; +}; + +struct svc_rdma_chunk { + struct list_head ch_list; + + u32 ch_position; + u32 ch_length; + u32 ch_payload_length; + + u32 ch_segcount; + struct svc_rdma_segment ch_segments[]; +}; + +struct svc_rdma_pcl { + unsigned int cl_count; + struct list_head cl_chunks; +}; + +/** + * pcl_init - Initialize a parsed chunk list + * @pcl: parsed chunk list to initialize + * + */ +static inline void pcl_init(struct svc_rdma_pcl *pcl) +{ + INIT_LIST_HEAD(&pcl->cl_chunks); +} + +/** + * pcl_is_empty - Return true if parsed chunk list is empty + * @pcl: parsed chunk list + * + */ +static inline bool pcl_is_empty(const struct svc_rdma_pcl *pcl) +{ + return list_empty(&pcl->cl_chunks); +} + +/** + * pcl_first_chunk - Return first chunk in a parsed chunk list + * @pcl: parsed chunk list + * + * Returns the first chunk in the list, or NULL if the list is empty. + */ +static inline struct svc_rdma_chunk * +pcl_first_chunk(const struct svc_rdma_pcl *pcl) +{ + if (pcl_is_empty(pcl)) + return NULL; + return list_first_entry(&pcl->cl_chunks, struct svc_rdma_chunk, + ch_list); +} + +/** + * pcl_next_chunk - Return next chunk in a parsed chunk list + * @pcl: a parsed chunk list + * @chunk: chunk in @pcl + * + * Returns the next chunk in the list, or NULL if @chunk is already last. + */ +static inline struct svc_rdma_chunk * +pcl_next_chunk(const struct svc_rdma_pcl *pcl, struct svc_rdma_chunk *chunk) +{ + if (list_is_last(&chunk->ch_list, &pcl->cl_chunks)) + return NULL; + return list_next_entry(chunk, ch_list); +} + +/** + * pcl_for_each_chunk - Iterate over chunks in a parsed chunk list + * @pos: the loop cursor + * @pcl: a parsed chunk list + */ +#define pcl_for_each_chunk(pos, pcl) \ + for (pos = list_first_entry(&(pcl)->cl_chunks, struct svc_rdma_chunk, ch_list); \ + &pos->ch_list != &(pcl)->cl_chunks; \ + pos = list_next_entry(pos, ch_list)) + +/** + * pcl_for_each_segment - Iterate over segments in a parsed chunk + * @pos: the loop cursor + * @chunk: a parsed chunk + */ +#define pcl_for_each_segment(pos, chunk) \ + for (pos = &(chunk)->ch_segments[0]; \ + pos <= &(chunk)->ch_segments[(chunk)->ch_segcount - 1]; \ + pos++) + +/** + * pcl_chunk_end_offset - Return offset of byte range following @chunk + * @chunk: chunk in @pcl + * + * Returns starting offset of the region just after @chunk + */ +static inline unsigned int +pcl_chunk_end_offset(const struct svc_rdma_chunk *chunk) +{ + return xdr_align_size(chunk->ch_position + chunk->ch_payload_length); +} + +struct svc_rdma_recv_ctxt; + +extern void pcl_free(struct svc_rdma_pcl *pcl); +extern bool pcl_alloc_call(struct svc_rdma_recv_ctxt *rctxt, __be32 *p); +extern bool pcl_alloc_read(struct svc_rdma_recv_ctxt *rctxt, __be32 *p); +extern bool pcl_alloc_write(struct svc_rdma_recv_ctxt *rctxt, + struct svc_rdma_pcl *pcl, __be32 *p); +extern int pcl_process_nonpayloads(const struct svc_rdma_pcl *pcl, + const struct xdr_buf *xdr, + int (*actor)(const struct xdr_buf *, + void *), + void *data); + +#endif /* SVC_RDMA_PCL_H */ diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index aca35ab5cff2..92455e0d5244 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -21,8 +21,8 @@ struct svc_xprt_ops { int (*xpo_has_wspace)(struct svc_xprt *); int (*xpo_recvfrom)(struct svc_rqst *); int (*xpo_sendto)(struct svc_rqst *); - int (*xpo_read_payload)(struct svc_rqst *, unsigned int, - unsigned int); + int (*xpo_result_payload)(struct svc_rqst *, unsigned int, + unsigned int); void (*xpo_release_rqst)(struct svc_rqst *); void (*xpo_detach)(struct svc_xprt *); void (*xpo_free)(struct svc_xprt *); diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 9548d075e06d..9b35ce50cf2b 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -183,7 +183,8 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p) */ extern void xdr_shift_buf(struct xdr_buf *, size_t); extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *); -extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int); +extern int xdr_buf_subsegment(const struct xdr_buf *buf, struct xdr_buf *subbuf, + unsigned int base, unsigned int len); extern void xdr_buf_trim(struct xdr_buf *, unsigned int); extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); @@ -247,13 +248,57 @@ extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p, struct rpc_rqst *rqst); extern void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, struct page **pages, unsigned int len); -extern void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen); extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes); extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len); extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len); extern int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data); extern uint64_t xdr_align_data(struct xdr_stream *, uint64_t, uint32_t); extern uint64_t xdr_expand_hole(struct xdr_stream *, uint64_t, uint64_t); +extern bool xdr_stream_subsegment(struct xdr_stream *xdr, struct xdr_buf *subbuf, + unsigned int len); + +/** + * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data. + * @xdr: pointer to xdr_stream struct + * @buf: pointer to an empty buffer + * @buflen: size of 'buf' + * + * The scratch buffer is used when decoding from an array of pages. + * If an xdr_inline_decode() call spans across page boundaries, then + * we copy the data into the scratch buffer in order to allow linear + * access. + */ +static inline void +xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen) +{ + xdr->scratch.iov_base = buf; + xdr->scratch.iov_len = buflen; +} + +/** + * xdr_set_scratch_page - Attach a scratch buffer for decoding data + * @xdr: pointer to xdr_stream struct + * @page: an anonymous page + * + * See xdr_set_scratch_buffer(). + */ +static inline void +xdr_set_scratch_page(struct xdr_stream *xdr, struct page *page) +{ + xdr_set_scratch_buffer(xdr, page_address(page), PAGE_SIZE); +} + +/** + * xdr_reset_scratch_buffer - Clear scratch buffer information + * @xdr: pointer to xdr_stream struct + * + * See xdr_set_scratch_buffer(). + */ +static inline void +xdr_reset_scratch_buffer(struct xdr_stream *xdr) +{ + xdr_set_scratch_buffer(xdr, NULL, 0); +} /** * xdr_stream_remaining - Return the number of bytes remaining in the stream @@ -506,6 +551,27 @@ static inline bool xdr_item_is_present(const __be32 *p) } /** + * xdr_stream_decode_bool - Decode a boolean + * @xdr: pointer to xdr_stream + * @ptr: pointer to a u32 in which to store the result + * + * Return values: + * %0 on success + * %-EBADMSG on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_decode_bool(struct xdr_stream *xdr, __u32 *ptr) +{ + const size_t count = sizeof(*ptr); + __be32 *p = xdr_inline_decode(xdr, count); + + if (unlikely(!p)) + return -EBADMSG; + *ptr = (*p != xdr_zero); + return 0; +} + +/** * xdr_stream_decode_u32 - Decode a 32-bit integer * @xdr: pointer to xdr_stream * @ptr: location to store integer @@ -527,6 +593,27 @@ xdr_stream_decode_u32(struct xdr_stream *xdr, __u32 *ptr) } /** + * xdr_stream_decode_u64 - Decode a 64-bit integer + * @xdr: pointer to xdr_stream + * @ptr: location to store 64-bit integer + * + * Return values: + * %0 on success + * %-EBADMSG on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_decode_u64(struct xdr_stream *xdr, __u64 *ptr) +{ + const size_t count = sizeof(*ptr); + __be32 *p = xdr_inline_decode(xdr, count); + + if (unlikely(!p)) + return -EBADMSG; + xdr_decode_hyper(p, ptr); + return 0; +} + +/** * xdr_stream_decode_opaque_fixed - Decode fixed length opaque xdr data * @xdr: pointer to xdr_stream * @ptr: location to store data diff --git a/include/linux/swap.h b/include/linux/swap.h index 667935c0dbd4..596bc2f4d9b0 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -338,8 +338,6 @@ extern void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages); extern void lru_note_cost_page(struct page *); extern void lru_cache_add(struct page *); -extern void lru_add_page_tail(struct page *page, struct page *page_tail, - struct lruvec *lruvec, struct list_head *head); extern void mark_page_accessed(struct page *); extern void lru_add_drain(void); extern void lru_add_drain_cpu(int cpu); @@ -358,7 +356,7 @@ extern void lru_cache_add_inactive_or_unevictable(struct page *page, extern unsigned long zone_reclaimable_pages(struct zone *zone); extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); -extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); +extern int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode); extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, unsigned long nr_pages, gfp_t gfp_mask, diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 513913ff7486..fbdc65782195 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -5,6 +5,7 @@ #include <linux/dma-direction.h> #include <linux/init.h> #include <linux/types.h> +#include <linux/limits.h> struct device; struct page; @@ -45,13 +46,9 @@ enum dma_sync_target { SYNC_FOR_DEVICE = 1, }; -extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, - dma_addr_t tbl_dma_addr, - phys_addr_t phys, - size_t mapping_size, - size_t alloc_size, - enum dma_data_direction dir, - unsigned long attrs); +phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, + size_t mapping_size, size_t alloc_size, + enum dma_data_direction dir, unsigned long attrs); extern void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, diff --git a/include/linux/syscall_user_dispatch.h b/include/linux/syscall_user_dispatch.h new file mode 100644 index 000000000000..a0ae443fb7df --- /dev/null +++ b/include/linux/syscall_user_dispatch.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 Collabora Ltd. + */ +#ifndef _SYSCALL_USER_DISPATCH_H +#define _SYSCALL_USER_DISPATCH_H + +#include <linux/thread_info.h> + +#ifdef CONFIG_GENERIC_ENTRY + +struct syscall_user_dispatch { + char __user *selector; + unsigned long offset; + unsigned long len; + bool on_dispatch; +}; + +int set_syscall_user_dispatch(unsigned long mode, unsigned long offset, + unsigned long len, char __user *selector); + +#define clear_syscall_work_syscall_user_dispatch(tsk) \ + clear_task_syscall_work(tsk, SYSCALL_USER_DISPATCH) + +#else +struct syscall_user_dispatch {}; + +static inline int set_syscall_user_dispatch(unsigned long mode, unsigned long offset, + unsigned long len, char __user *selector) +{ + return -EINVAL; +} + +static inline void clear_syscall_work_syscall_user_dispatch(struct task_struct *tsk) +{ +} + +#endif /* CONFIG_GENERIC_ENTRY */ + +#endif /* _SYSCALL_USER_DISPATCH_H */ diff --git a/include/linux/thermal.h b/include/linux/thermal.h index d07ea27e72a9..31b84404f047 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -79,6 +79,8 @@ struct thermal_zone_device_ops { enum thermal_trend *); int (*notify) (struct thermal_zone_device *, int, enum thermal_trip_type); + void (*hot)(struct thermal_zone_device *); + void (*critical)(struct thermal_zone_device *); }; struct thermal_cooling_device_ops { @@ -399,6 +401,7 @@ void thermal_cdev_update(struct thermal_cooling_device *); void thermal_notify_framework(struct thermal_zone_device *, int); int thermal_zone_device_enable(struct thermal_zone_device *tz); int thermal_zone_device_disable(struct thermal_zone_device *tz); +void thermal_zone_device_critical(struct thermal_zone_device *tz); #else static inline struct thermal_zone_device *thermal_zone_device_register( const char *type, int trips, int mask, void *devdata, diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index e93e249a4e9b..c8a974cead73 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -35,6 +35,24 @@ enum { GOOD_STACK, }; +#ifdef CONFIG_GENERIC_ENTRY +enum syscall_work_bit { + SYSCALL_WORK_BIT_SECCOMP, + SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT, + SYSCALL_WORK_BIT_SYSCALL_TRACE, + SYSCALL_WORK_BIT_SYSCALL_EMU, + SYSCALL_WORK_BIT_SYSCALL_AUDIT, + SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH, +}; + +#define SYSCALL_WORK_SECCOMP BIT(SYSCALL_WORK_BIT_SECCOMP) +#define SYSCALL_WORK_SYSCALL_TRACEPOINT BIT(SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT) +#define SYSCALL_WORK_SYSCALL_TRACE BIT(SYSCALL_WORK_BIT_SYSCALL_TRACE) +#define SYSCALL_WORK_SYSCALL_EMU BIT(SYSCALL_WORK_BIT_SYSCALL_EMU) +#define SYSCALL_WORK_SYSCALL_AUDIT BIT(SYSCALL_WORK_BIT_SYSCALL_AUDIT) +#define SYSCALL_WORK_SYSCALL_USER_DISPATCH BIT(SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH) +#endif + #include <asm/thread_info.h> #ifdef __KERNEL__ @@ -97,6 +115,38 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag) #define test_thread_flag(flag) \ test_ti_thread_flag(current_thread_info(), flag) +#ifdef CONFIG_GENERIC_ENTRY +#define set_syscall_work(fl) \ + set_bit(SYSCALL_WORK_BIT_##fl, ¤t_thread_info()->syscall_work) +#define test_syscall_work(fl) \ + test_bit(SYSCALL_WORK_BIT_##fl, ¤t_thread_info()->syscall_work) +#define clear_syscall_work(fl) \ + clear_bit(SYSCALL_WORK_BIT_##fl, ¤t_thread_info()->syscall_work) + +#define set_task_syscall_work(t, fl) \ + set_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work) +#define test_task_syscall_work(t, fl) \ + test_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work) +#define clear_task_syscall_work(t, fl) \ + clear_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work) + +#else /* CONFIG_GENERIC_ENTRY */ + +#define set_syscall_work(fl) \ + set_ti_thread_flag(current_thread_info(), TIF_##fl) +#define test_syscall_work(fl) \ + test_ti_thread_flag(current_thread_info(), TIF_##fl) +#define clear_syscall_work(fl) \ + clear_ti_thread_flag(current_thread_info(), TIF_##fl) + +#define set_task_syscall_work(t, fl) \ + set_ti_thread_flag(task_thread_info(t), TIF_##fl) +#define test_task_syscall_work(t, fl) \ + test_ti_thread_flag(task_thread_info(t), TIF_##fl) +#define clear_task_syscall_work(t, fl) \ + clear_ti_thread_flag(task_thread_info(t), TIF_##fl) +#endif /* !CONFIG_GENERIC_ENTRY */ + #define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h index 5db2b11ab085..034dccf93955 100644 --- a/include/linux/thunderbolt.h +++ b/include/linux/thunderbolt.h @@ -179,6 +179,8 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir); * @lock: Lock to serialize access to the following fields of this structure * @vendor_name: Name of the vendor (or %NULL if not known) * @device_name: Name of the device (or %NULL if not known) + * @link_speed: Speed of the link in Gb/s + * @link_width: Width of the link (1 or 2) * @is_unplugged: The XDomain is unplugged * @resume: The XDomain is being resumed * @needs_uuid: If the XDomain does not have @remote_uuid it will be @@ -223,6 +225,8 @@ struct tb_xdomain { struct mutex lock; const char *vendor_name; const char *device_name; + unsigned int link_speed; + unsigned int link_width; bool is_unplugged; bool resume; bool needs_uuid; @@ -243,6 +247,8 @@ struct tb_xdomain { u8 depth; }; +int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd); +void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd); int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path, u16 transmit_ring, u16 receive_path, u16 receive_ring); @@ -344,6 +350,9 @@ void tb_unregister_protocol_handler(struct tb_protocol_handler *handler); * @prtcvers: Protocol version from the properties directory * @prtcrevs: Protocol software revision from the properties directory * @prtcstns: Protocol settings mask from the properties directory + * @debugfs_dir: Pointer to the service debugfs directory. Always created + * when debugfs is enabled. Can be used by service drivers to + * add their own entries under the service. * * Each domain exposes set of services it supports as collection of * properties. For each service there will be one corresponding @@ -357,6 +366,7 @@ struct tb_service { u32 prtcvers; u32 prtcrevs; u32 prtcstns; + struct dentry *debugfs_dir; }; static inline struct tb_service *tb_service_get(struct tb_service *svc) @@ -471,6 +481,8 @@ struct tb_nhi { * @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise. * @vector: MSI-X vector number the ring uses (only set if @irq is > 0) * @flags: Ring specific flags + * @e2e_tx_hop: Transmit HopID when E2E is enabled. Only applicable to + * RX ring. For TX ring this should be set to %0. * @sof_mask: Bit mask used to detect start of frame PDF * @eof_mask: Bit mask used to detect end of frame PDF * @start_poll: Called when ring interrupt is triggered to start @@ -494,6 +506,7 @@ struct tb_ring { int irq; u8 vector; unsigned int flags; + int e2e_tx_hop; u16 sof_mask; u16 eof_mask; void (*start_poll)(void *data); @@ -504,6 +517,8 @@ struct tb_ring { #define RING_FLAG_NO_SUSPEND BIT(0) /* Configure the ring to be in frame mode */ #define RING_FLAG_FRAME BIT(1) +/* Enable end-to-end flow control */ +#define RING_FLAG_E2E BIT(2) struct ring_frame; typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled); @@ -552,7 +567,8 @@ struct ring_frame { struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size, unsigned int flags); struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size, - unsigned int flags, u16 sof_mask, u16 eof_mask, + unsigned int flags, int e2e_tx_hop, + u16 sof_mask, u16 eof_mask, void (*start_poll)(void *), void *poll_data); void tb_ring_start(struct tb_ring *ring); void tb_ring_stop(struct tb_ring *ring); diff --git a/include/linux/time64.h b/include/linux/time64.h index c9dcb3e5781f..5117cb5b5656 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h @@ -124,6 +124,10 @@ static inline bool timespec64_valid_settod(const struct timespec64 *ts) */ static inline s64 timespec64_to_ns(const struct timespec64 *ts) { + /* Prevent multiplication overflow */ + if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) + return KTIME_MAX; + return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; } diff --git a/include/linux/time_namespace.h b/include/linux/time_namespace.h index 5b6031385db0..3146f1c056c9 100644 --- a/include/linux/time_namespace.h +++ b/include/linux/time_namespace.h @@ -4,7 +4,6 @@ #include <linux/sched.h> -#include <linux/kref.h> #include <linux/nsproxy.h> #include <linux/ns_common.h> #include <linux/err.h> @@ -18,7 +17,6 @@ struct timens_offsets { }; struct time_namespace { - struct kref kref; struct user_namespace *user_ns; struct ucounts *ucounts; struct ns_common ns; @@ -37,20 +35,21 @@ extern void timens_commit(struct task_struct *tsk, struct time_namespace *ns); static inline struct time_namespace *get_time_ns(struct time_namespace *ns) { - kref_get(&ns->kref); + refcount_inc(&ns->ns.count); return ns; } struct time_namespace *copy_time_ns(unsigned long flags, struct user_namespace *user_ns, struct time_namespace *old_ns); -void free_time_ns(struct kref *kref); -int timens_on_fork(struct nsproxy *nsproxy, struct task_struct *tsk); +void free_time_ns(struct time_namespace *ns); +void timens_on_fork(struct nsproxy *nsproxy, struct task_struct *tsk); struct vdso_data *arch_get_vdso_data(void *vvar_page); static inline void put_time_ns(struct time_namespace *ns) { - kref_put(&ns->kref, free_time_ns); + if (refcount_dec_and_test(&ns->ns.count)) + free_time_ns(ns); } void proc_timens_show_offsets(struct task_struct *p, struct seq_file *m); @@ -77,6 +76,20 @@ static inline void timens_add_boottime(struct timespec64 *ts) *ts = timespec64_add(*ts, ns_offsets->boottime); } +static inline u64 timens_add_boottime_ns(u64 nsec) +{ + struct timens_offsets *ns_offsets = ¤t->nsproxy->time_ns->offsets; + + return nsec + timespec64_to_ns(&ns_offsets->boottime); +} + +static inline void timens_sub_boottime(struct timespec64 *ts) +{ + struct timens_offsets *ns_offsets = ¤t->nsproxy->time_ns->offsets; + + *ts = timespec64_sub(*ts, ns_offsets->boottime); +} + ktime_t do_timens_ktime_to_host(clockid_t clockid, ktime_t tim, struct timens_offsets *offsets); @@ -122,14 +135,22 @@ struct time_namespace *copy_time_ns(unsigned long flags, return old_ns; } -static inline int timens_on_fork(struct nsproxy *nsproxy, +static inline void timens_on_fork(struct nsproxy *nsproxy, struct task_struct *tsk) { - return 0; + return; } static inline void timens_add_monotonic(struct timespec64 *ts) { } static inline void timens_add_boottime(struct timespec64 *ts) { } + +static inline u64 timens_add_boottime_ns(u64 nsec) +{ + return nsec; +} + +static inline void timens_sub_boottime(struct timespec64 *ts) { } + static inline ktime_t timens_ktime_to_host(clockid_t clockid, ktime_t tim) { return tim; diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 7f7e4a3f4394..929d3f3937c0 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -303,6 +303,8 @@ extern int persistent_clock_is_local; extern void read_persistent_clock64(struct timespec64 *ts); void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock, struct timespec64 *boot_offset); +#ifdef CONFIG_GENERIC_CMOS_UPDATE extern int update_persistent_clock64(struct timespec64 now); +#endif #endif diff --git a/include/linux/timer.h b/include/linux/timer.h index d10bc7e73b41..fda13c9d1256 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -193,7 +193,6 @@ extern int try_to_del_timer_sync(struct timer_list *timer); #define del_singleshot_timer_sync(t) del_timer_sync(t) extern void init_timers(void); -extern void run_local_timers(void); struct hrtimer; extern enum hrtimer_restart it_real_fn(struct hrtimer *); diff --git a/include/linux/timex.h b/include/linux/timex.h index ce0859763670..9c2e54faf9b7 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -157,7 +157,6 @@ extern int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex * extern void hardpps(const struct timespec64 *, const struct timespec64 *); int read_current_timer(unsigned long *timer_val); -void ntp_notify_cmos_timer(void); /* The clock frequency of the i8253/i8254 PIT */ #define PIT_TICK_RATE 1193182ul diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index b480e1a07ed8..54b925224a13 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -83,11 +83,12 @@ static inline int ptrace_report_syscall(struct pt_regs *regs, * tracehook_report_syscall_entry - task is about to attempt a system call * @regs: user register state of current task * - * This will be called if %TIF_SYSCALL_TRACE or %TIF_SYSCALL_EMU have been set, - * when the current task has just entered the kernel for a system call. - * Full user register state is available here. Changing the values - * in @regs can affect the system call number and arguments to be tried. - * It is safe to block here, preventing the system call from beginning. + * This will be called if %SYSCALL_WORK_SYSCALL_TRACE or + * %SYSCALL_WORK_SYSCALL_EMU have been set, when the current task has just + * entered the kernel for a system call. Full user register state is + * available here. Changing the values in @regs can affect the system + * call number and arguments to be tried. It is safe to block here, + * preventing the system call from beginning. * * Returns zero normally, or nonzero if the calling arch code should abort * the system call. That must prevent normal entry so no system call is @@ -109,15 +110,15 @@ static inline __must_check int tracehook_report_syscall_entry( * @regs: user register state of current task * @step: nonzero if simulating single-step or block-step * - * This will be called if %TIF_SYSCALL_TRACE has been set, when the - * current task has just finished an attempted system call. Full + * This will be called if %SYSCALL_WORK_SYSCALL_TRACE has been set, when + * the current task has just finished an attempted system call. Full * user register state is available here. It is safe to block here, * preventing signals from being processed. * * If @step is nonzero, this report is also in lieu of the normal * trap that would follow the system call instruction because * user_enable_block_step() or user_enable_single_step() was used. - * In this case, %TIF_SYSCALL_TRACE might not be set. + * In this case, %SYSCALL_WORK_SYSCALL_TRACE might not be set. * * Called without locks, just before checking for pending signals. */ @@ -198,4 +199,31 @@ static inline void tracehook_notify_resume(struct pt_regs *regs) blkcg_maybe_throttle_current(); } +/* + * called by exit_to_user_mode_loop() if ti_work & _TIF_NOTIFY_SIGNAL. This + * is currently used by TWA_SIGNAL based task_work, which requires breaking + * wait loops to ensure that task_work is noticed and run. + */ +static inline void tracehook_notify_signal(void) +{ +#if defined(TIF_NOTIFY_SIGNAL) + clear_thread_flag(TIF_NOTIFY_SIGNAL); + smp_mb__after_atomic(); + if (current->task_works) + task_work_run(); +#endif +} + +/* + * Called when we have work to process from exit_to_user_mode_loop() + */ +static inline void set_notify_signal(struct task_struct *task) +{ +#if defined(TIF_NOTIFY_SIGNAL) + if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) && + !wake_up_state(task, TASK_INTERRUPTIBLE)) + kick_process(task); +#endif +} + #endif /* <linux/tracehook.h> */ diff --git a/include/linux/tty.h b/include/linux/tty.h index a99e9b8e4e31..c873f475f0a7 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -303,9 +303,12 @@ struct tty_struct { spinlock_t flow_lock; /* Termios values are protected by the termios rwsem */ struct ktermios termios, termios_locked; - struct termiox *termiox; /* May be NULL for unsupported */ char name[64]; struct pid *pgrp; /* Protected by ctrl lock */ + /* + * Writes protected by both ctrl lock and legacy mutex, readers must use + * at least one of them. + */ struct pid *session; unsigned long flags; int count; @@ -605,82 +608,64 @@ static inline struct tty_port *tty_port_get(struct tty_port *port) } /* If the cts flow control is enabled, return true. */ -static inline bool tty_port_cts_enabled(struct tty_port *port) +static inline bool tty_port_cts_enabled(const struct tty_port *port) { return test_bit(TTY_PORT_CTS_FLOW, &port->iflags); } static inline void tty_port_set_cts_flow(struct tty_port *port, bool val) { - if (val) - set_bit(TTY_PORT_CTS_FLOW, &port->iflags); - else - clear_bit(TTY_PORT_CTS_FLOW, &port->iflags); + assign_bit(TTY_PORT_CTS_FLOW, &port->iflags, val); } -static inline bool tty_port_active(struct tty_port *port) +static inline bool tty_port_active(const struct tty_port *port) { return test_bit(TTY_PORT_ACTIVE, &port->iflags); } static inline void tty_port_set_active(struct tty_port *port, bool val) { - if (val) - set_bit(TTY_PORT_ACTIVE, &port->iflags); - else - clear_bit(TTY_PORT_ACTIVE, &port->iflags); + assign_bit(TTY_PORT_ACTIVE, &port->iflags, val); } -static inline bool tty_port_check_carrier(struct tty_port *port) +static inline bool tty_port_check_carrier(const struct tty_port *port) { return test_bit(TTY_PORT_CHECK_CD, &port->iflags); } static inline void tty_port_set_check_carrier(struct tty_port *port, bool val) { - if (val) - set_bit(TTY_PORT_CHECK_CD, &port->iflags); - else - clear_bit(TTY_PORT_CHECK_CD, &port->iflags); + assign_bit(TTY_PORT_CHECK_CD, &port->iflags, val); } -static inline bool tty_port_suspended(struct tty_port *port) +static inline bool tty_port_suspended(const struct tty_port *port) { return test_bit(TTY_PORT_SUSPENDED, &port->iflags); } static inline void tty_port_set_suspended(struct tty_port *port, bool val) { - if (val) - set_bit(TTY_PORT_SUSPENDED, &port->iflags); - else - clear_bit(TTY_PORT_SUSPENDED, &port->iflags); + assign_bit(TTY_PORT_SUSPENDED, &port->iflags, val); } -static inline bool tty_port_initialized(struct tty_port *port) +static inline bool tty_port_initialized(const struct tty_port *port) { return test_bit(TTY_PORT_INITIALIZED, &port->iflags); } static inline void tty_port_set_initialized(struct tty_port *port, bool val) { - if (val) - set_bit(TTY_PORT_INITIALIZED, &port->iflags); - else - clear_bit(TTY_PORT_INITIALIZED, &port->iflags); + assign_bit(TTY_PORT_INITIALIZED, &port->iflags, val); } -static inline bool tty_port_kopened(struct tty_port *port) +static inline bool tty_port_kopened(const struct tty_port *port) { return test_bit(TTY_PORT_KOPENED, &port->iflags); } static inline void tty_port_set_kopened(struct tty_port *port, bool val) { - if (val) - set_bit(TTY_PORT_KOPENED, &port->iflags); - else - clear_bit(TTY_PORT_KOPENED, &port->iflags); + assign_bit(TTY_PORT_KOPENED, &port->iflags, val); } extern struct tty_struct *tty_port_tty_get(struct tty_port *port); @@ -716,6 +701,7 @@ extern int __must_check tty_ldisc_init(struct tty_struct *tty); extern void tty_ldisc_deinit(struct tty_struct *tty); extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p, char *f, int count); +extern void tty_sysctl_init(void); /* n_tty.c */ extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops); diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 358446247ccd..61c3372d3f32 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -224,14 +224,6 @@ * line). See tty_do_resize() if you need to wrap the standard method * in your own logic - the usual case. * - * void (*set_termiox)(struct tty_struct *tty, struct termiox *new); - * - * Called when the device receives a termiox based ioctl. Passes down - * the requested data from user space. This method will not be invoked - * unless the tty also has a valid tty->termiox pointer. - * - * Optional: Called under the termios lock - * * int (*get_icount)(struct tty_struct *tty, struct serial_icounter *icount); * * Called when the device receives a TIOCGICOUNT ioctl. Passed a kernel @@ -285,7 +277,6 @@ struct tty_operations { int (*tiocmset)(struct tty_struct *tty, unsigned int set, unsigned int clear); int (*resize)(struct tty_struct *tty, struct winsize *ws); - int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew); int (*get_icount)(struct tty_struct *tty, struct serial_icounter_struct *icount); int (*get_serial)(struct tty_struct *tty, struct serial_struct *p); diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index 54bf6b118401..47c5962b876b 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h @@ -117,6 +117,14 @@ extern int __must_check struct uio_info *info); /* use a define to avoid include chaining to get THIS_MODULE */ + +/** + * uio_register_device - register a new userspace IO device + * @parent: parent device + * @info: UIO device capabilities + * + * returns zero on success or a negative error code. + */ #define uio_register_device(parent, info) \ __uio_register_device(THIS_MODULE, parent, info) @@ -129,6 +137,14 @@ extern int __must_check struct uio_info *info); /* use a define to avoid include chaining to get THIS_MODULE */ + +/** + * devm_uio_register_device - Resource managed uio_register_device() + * @parent: parent device + * @info: UIO device capabilities + * + * returns zero on success or a negative error code. + */ #define devm_uio_register_device(parent, info) \ __devm_uio_register_device(THIS_MODULE, parent, info) diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 2040696d75b6..a2d229ab63ba 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -437,7 +437,7 @@ static inline struct usb_composite_driver *to_cdriver( #define OS_STRING_IDX 0xEE /** - * struct usb_composite_device - represents one composite usb gadget + * struct usb_composite_dev - represents one composite usb gadget * @gadget: read-only, abstracts the gadget's usb peripheral controller * @req: used for control responses; buffer is pre-allocated * @os_desc_req: used for OS descriptors responses; buffer is pre-allocated diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 3dbb42c637c1..96281cd50ff6 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -734,10 +734,6 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb, /* random stuff */ -#define RUN_CONTEXT (in_irq() ? "in_irq" \ - : (in_interrupt() ? "in_interrupt" : "can sleep")) - - /* This rwsem is for use only by the hub driver and ehci-hcd. * Nobody else should touch it. */ diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h index 3a805e2ecbc9..bb9a782e1411 100644 --- a/include/linux/usb/pd.h +++ b/include/linux/usb/pd.h @@ -466,6 +466,7 @@ static inline unsigned int rdo_max_power(u32 rdo) #define PD_T_DRP_SRC 30 #define PD_T_PS_SOURCE_OFF 920 #define PD_T_PS_SOURCE_ON 480 +#define PD_T_PS_SOURCE_ON_PRS 450 /* 390 - 480ms */ #define PD_T_PS_HARD_RESET 30 #define PD_T_SRC_RECOVER 760 #define PD_T_SRC_RECOVER_MAX 1000 @@ -484,6 +485,7 @@ static inline unsigned int rdo_max_power(u32 rdo) #define PD_T_CC_DEBOUNCE 200 /* 100 - 200 ms */ #define PD_T_PD_DEBOUNCE 20 /* 10 - 20 ms */ +#define PD_T_TRY_CC_DEBOUNCE 15 /* 10 - 20 ms */ #define PD_N_CAPS_COUNT (PD_T_NO_RESPONSE / PD_T_SEND_SOURCE_CAP) #define PD_N_HARD_RESET_COUNT 2 diff --git a/include/linux/usb/pd_vdo.h b/include/linux/usb/pd_vdo.h index 68bdc4e2f5a9..8c08eeb9a74b 100644 --- a/include/linux/usb/pd_vdo.h +++ b/include/linux/usb/pd_vdo.h @@ -103,17 +103,25 @@ * -------------------- * <31> :: data capable as a USB host * <30> :: data capable as a USB device - * <29:27> :: product type + * <29:27> :: product type (UFP / Cable) * <26> :: modal operation supported (1b == yes) - * <25:16> :: Reserved, Shall be set to zero + * <25:16> :: product type (DFP) * <15:0> :: USB-IF assigned VID for this cable vendor */ #define IDH_PTYPE_UNDEF 0 #define IDH_PTYPE_HUB 1 #define IDH_PTYPE_PERIPH 2 +#define IDH_PTYPE_PSD 3 +#define IDH_PTYPE_AMA 5 + #define IDH_PTYPE_PCABLE 3 #define IDH_PTYPE_ACABLE 4 -#define IDH_PTYPE_AMA 5 + +#define IDH_PTYPE_DFP_UNDEF 0 +#define IDH_PTYPE_DFP_HUB 1 +#define IDH_PTYPE_DFP_HOST 2 +#define IDH_PTYPE_DFP_PB 3 +#define IDH_PTYPE_DFP_AMC 4 #define VDO_IDH(usbh, usbd, ptype, is_modal, vid) \ ((usbh) << 31 | (usbd) << 30 | ((ptype) & 0x7) << 27 \ @@ -122,6 +130,7 @@ #define PD_IDH_PTYPE(vdo) (((vdo) >> 27) & 0x7) #define PD_IDH_VID(vdo) ((vdo) & 0xffff) #define PD_IDH_MODAL_SUPP(vdo) ((vdo) & (1 << 26)) +#define PD_IDH_DFP_PTYPE(vdo) (((vdo) >> 23) & 0x7) /* * Cert Stat VDO @@ -177,7 +186,7 @@ * <31:28> :: Cable HW version * <27:24> :: Cable FW version * <23:20> :: Reserved, Shall be set to zero - * <19:18> :: type-C to Type-A/B/C (00b == A, 01 == B, 10 == C) + * <19:18> :: type-C to Type-A/B/C/Captive (00b == A, 01 == B, 10 == C, 11 == Captive) * <17> :: Type-C to Plug/Receptacle (0b == plug, 1b == receptacle) * <16:13> :: cable latency (0001 == <10ns(~1m length)) * <12:11> :: cable termination type (11b == both ends active VCONN req) @@ -193,6 +202,7 @@ #define CABLE_ATYPE 0 #define CABLE_BTYPE 1 #define CABLE_CTYPE 2 +#define CABLE_CAPTIVE 3 #define CABLE_PLUG 0 #define CABLE_RECEPTACLE 1 #define CABLE_CURR_1A5 0 @@ -208,6 +218,7 @@ | (tx1d) << 10 | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 \ | ((cur) & 0x3) << 5 | (vps) << 4 | (sopp) << 3 \ | ((usbss) & 0x7)) +#define VDO_TYPEC_CABLE_TYPE(vdo) (((vdo) >> 18) & 0x3) /* * AMA VDO diff --git a/include/linux/usb/r8152.h b/include/linux/usb/r8152.h new file mode 100644 index 000000000000..20d88b1defc3 --- /dev/null +++ b/include/linux/usb/r8152.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020 Realtek Semiconductor Corp. All rights reserved. + */ + +#ifndef __LINUX_R8152_H +#define __LINUX_R8152_H + +#define RTL8152_REQT_READ 0xc0 +#define RTL8152_REQT_WRITE 0x40 +#define RTL8152_REQ_GET_REGS 0x05 +#define RTL8152_REQ_SET_REGS 0x05 + +#define BYTE_EN_DWORD 0xff +#define BYTE_EN_WORD 0x33 +#define BYTE_EN_BYTE 0x11 +#define BYTE_EN_SIX_BYTES 0x3f +#define BYTE_EN_START_MASK 0x0f +#define BYTE_EN_END_MASK 0xf0 + +#define MCU_TYPE_PLA 0x0100 +#define MCU_TYPE_USB 0x0000 + +/* Define these values to match your device */ +#define VENDOR_ID_REALTEK 0x0bda +#define VENDOR_ID_MICROSOFT 0x045e +#define VENDOR_ID_SAMSUNG 0x04e8 +#define VENDOR_ID_LENOVO 0x17ef +#define VENDOR_ID_LINKSYS 0x13b1 +#define VENDOR_ID_NVIDIA 0x0955 +#define VENDOR_ID_TPLINK 0x2357 + +#if IS_REACHABLE(CONFIG_USB_RTL8152) +extern u8 rtl8152_get_version(struct usb_interface *intf); +#endif + +#endif /* __LINUX_R8152_H */ diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 8e67eff9039f..1c09b922f8b0 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -62,7 +62,6 @@ * @bulk_out_endpointAddress: endpoint address for the bulk out pipe for this * port. * @flags: usb serial port flags - * @write_wait: a wait_queue_head_t used by the port. * @work: work queue entry for the line discipline waking up. * @dev: pointer to the serial device * @@ -108,7 +107,6 @@ struct usb_serial_port { int tx_bytes; unsigned long flags; - wait_queue_head_t write_wait; struct work_struct work; unsigned long sysrq; /* sysrq timeout */ struct device dev; diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h index 09762d26fa0c..f4a18427f5c4 100644 --- a/include/linux/usb/tcpm.h +++ b/include/linux/usb/tcpm.h @@ -83,6 +83,27 @@ enum tcpm_transmit_type { * Optional; Called to enable/disable PD 3.0 fast role swap. * Enabling frs is accessory dependent as not all PD3.0 * accessories support fast role swap. + * @frs_sourcing_vbus: + * Optional; Called to notify that vbus is now being sourced. + * Low level drivers can perform chip specific operations, if any. + * @enable_auto_vbus_discharge: + * Optional; TCPCI spec based TCPC implementations can optionally + * support hardware to autonomously dischrge vbus upon disconnecting + * as sink or source. TCPM signals TCPC to enable the mechanism upon + * entering connected state and signals disabling upon disconnect. + * @set_auto_vbus_discharge_threshold: + * Mandatory when enable_auto_vbus_discharge is implemented. TCPM + * calls this function to allow lower levels drivers to program the + * vbus threshold voltage below which the vbus discharge circuit + * will be turned on. requested_vbus_voltage is set to 0 when vbus + * is going to disappear knowingly i.e. during PR_SWAP and + * HARD_RESET etc. + * @is_vbus_vsafe0v: + * Optional; TCPCI spec based TCPC implementations are expected to + * detect VSAFE0V voltage level at vbus. When detection of VSAFE0V + * is supported by TCPC, set this callback for TCPM to query + * whether vbus is at VSAFE0V when needed. + * Returns true when vbus is at VSAFE0V, false otherwise. */ struct tcpc_dev { struct fwnode_handle *fwnode; @@ -106,9 +127,14 @@ struct tcpc_dev { enum typec_cc_status cc); int (*try_role)(struct tcpc_dev *dev, int role); int (*pd_transmit)(struct tcpc_dev *dev, enum tcpm_transmit_type type, - const struct pd_message *msg); + const struct pd_message *msg, unsigned int negotiated_rev); int (*set_bist_data)(struct tcpc_dev *dev, bool on); int (*enable_frs)(struct tcpc_dev *dev, bool enable); + void (*frs_sourcing_vbus)(struct tcpc_dev *dev); + int (*enable_auto_vbus_discharge)(struct tcpc_dev *dev, bool enable); + int (*set_auto_vbus_discharge_threshold)(struct tcpc_dev *dev, enum typec_pwr_opmode mode, + bool pps_active, u32 requested_vbus_voltage); + bool (*is_vbus_vsafe0v)(struct tcpc_dev *dev); }; struct tcpm_port; diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h index 6be558045942..54475323f83b 100644 --- a/include/linux/usb/typec.h +++ b/include/linux/usb/typec.h @@ -126,9 +126,11 @@ struct typec_altmode_desc { enum typec_port_data roles; }; +int typec_partner_set_num_altmodes(struct typec_partner *partner, int num_altmodes); struct typec_altmode *typec_partner_register_altmode(struct typec_partner *partner, const struct typec_altmode_desc *desc); +int typec_plug_set_num_altmodes(struct typec_plug *plug, int num_altmodes); struct typec_altmode *typec_plug_register_altmode(struct typec_plug *plug, const struct typec_altmode_desc *desc); diff --git a/include/linux/usb/typec_tbt.h b/include/linux/usb/typec_tbt.h index 47c2d501ddce..63dd44b72e0c 100644 --- a/include/linux/usb/typec_tbt.h +++ b/include/linux/usb/typec_tbt.h @@ -39,12 +39,16 @@ struct typec_thunderbolt_data { #define TBT_CABLE_USB3_GEN1 1 #define TBT_CABLE_USB3_PASSIVE 2 #define TBT_CABLE_10_AND_20GBPS 3 -#define TBT_CABLE_ROUNDED BIT(19) +#define TBT_CABLE_ROUNDED_SUPPORT(_vdo_) \ + (((_vdo_) & GENMASK(20, 19)) >> 19) +#define TBT_GEN3_NON_ROUNDED 0 +#define TBT_GEN3_GEN4_ROUNDED_NON_ROUNDED 1 #define TBT_CABLE_OPTICAL BIT(21) #define TBT_CABLE_RETIMER BIT(22) #define TBT_CABLE_LINK_TRAINING BIT(23) #define TBT_SET_CABLE_SPEED(_s_) (((_s_) & GENMASK(2, 0)) << 16) +#define TBT_SET_CABLE_ROUNDED(_g_) (((_g_) & GENMASK(1, 0)) << 19) /* TBT3 Device Enter Mode VDO bits */ #define TBT_ENTER_MODE_CABLE_SPEED(s) TBT_SET_CABLE_SPEED(s) diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 2e4f7721fc4e..88a7673894d5 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -65,8 +65,6 @@ struct usbnet { struct usb_anchor deferred; struct tasklet_struct bh; - struct pcpu_sw_netstats __percpu *stats64; - struct work_struct kevent; unsigned long flags; # define EVENT_TX_HALT 0 @@ -285,7 +283,5 @@ extern int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags); extern void usbnet_status_stop(struct usbnet *dev); extern void usbnet_update_max_qlen(struct usbnet *dev); -extern void usbnet_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats); #endif /* __LINUX_USB_USBNET_H */ diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h index 4a19ac3f24d0..6b03fdd69d27 100644 --- a/include/linux/usb_usual.h +++ b/include/linux/usb_usual.h @@ -84,6 +84,8 @@ /* Cannot handle REPORT_LUNS */ \ US_FLAG(ALWAYS_SYNC, 0x20000000) \ /* lies about caching, so always sync */ \ + US_FLAG(NO_SAME, 0x40000000) \ + /* Cannot handle WRITE_SAME */ \ #define US_FLAG(name, value) US_FL_##name = value , enum { US_DO_ALL_FLAGS }; diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index 6ef1c7109fc4..64cf8ebdc4ec 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -57,7 +57,6 @@ struct user_namespace { struct uid_gid_map uid_map; struct uid_gid_map gid_map; struct uid_gid_map projid_map; - atomic_t count; struct user_namespace *parent; int level; kuid_t owner; @@ -109,7 +108,7 @@ void dec_ucount(struct ucounts *ucounts, enum ucount_type type); static inline struct user_namespace *get_user_ns(struct user_namespace *ns) { if (ns) - atomic_inc(&ns->count); + refcount_inc(&ns->ns.count); return ns; } @@ -119,7 +118,7 @@ extern void __put_user_ns(struct user_namespace *ns); static inline void put_user_ns(struct user_namespace *ns) { - if (ns && atomic_dec_and_test(&ns->count)) + if (ns && refcount_dec_and_test(&ns->ns.count)) __put_user_ns(ns); } diff --git a/include/linux/utsname.h b/include/linux/utsname.h index 44429d9142ca..2b1737c9b244 100644 --- a/include/linux/utsname.h +++ b/include/linux/utsname.h @@ -4,7 +4,6 @@ #include <linux/sched.h> -#include <linux/kref.h> #include <linux/nsproxy.h> #include <linux/ns_common.h> #include <linux/err.h> @@ -22,7 +21,6 @@ struct user_namespace; extern struct user_namespace init_user_ns; struct uts_namespace { - struct kref kref; struct new_utsname name; struct user_namespace *user_ns; struct ucounts *ucounts; @@ -33,16 +31,17 @@ extern struct uts_namespace init_uts_ns; #ifdef CONFIG_UTS_NS static inline void get_uts_ns(struct uts_namespace *ns) { - kref_get(&ns->kref); + refcount_inc(&ns->ns.count); } extern struct uts_namespace *copy_utsname(unsigned long flags, struct user_namespace *user_ns, struct uts_namespace *old_ns); -extern void free_uts_ns(struct kref *kref); +extern void free_uts_ns(struct uts_namespace *ns); static inline void put_uts_ns(struct uts_namespace *ns) { - kref_put(&ns->kref, free_uts_ns); + if (refcount_dec_and_test(&ns->ns.count)) + free_uts_ns(ns); } void uts_ns_init(void); diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index eae0bfd87d91..30bc7a7223bb 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -53,6 +53,16 @@ struct vdpa_device { }; /** + * vDPA IOVA range - the IOVA range support by the device + * @first: start of the IOVA range + * @last: end of the IOVA range + */ +struct vdpa_iova_range { + u64 first; + u64 last; +}; + +/** * vDPA_config_ops - operations for configuring a vDPA device. * Note: vDPA device drivers are required to implement all of the * operations unless it is mentioned to be optional in the following @@ -151,6 +161,10 @@ struct vdpa_device { * @get_generation: Get device config generation (optional) * @vdev: vdpa device * Returns u32: device generation + * @get_iova_range: Get supported iova range (optional) + * @vdev: vdpa device + * Returns the iova range supported by + * the device. * @set_map: Set device memory mapping (optional) * Needed for device that using device * specific DMA translation (on-chip IOMMU) @@ -216,6 +230,7 @@ struct vdpa_config_ops { void (*set_config)(struct vdpa_device *vdev, unsigned int offset, const void *buf, unsigned int len); u32 (*get_generation)(struct vdpa_device *vdev); + struct vdpa_iova_range (*get_iova_range)(struct vdpa_device *vdev); /* DMA ops */ int (*set_map)(struct vdpa_device *vdev, struct vhost_iotlb *iotlb); diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 938eaf9517e2..80c0181c411d 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -72,16 +72,14 @@ struct vmap_area { struct list_head list; /* address sorted list */ /* - * The following three variables can be packed, because - * a vmap_area object is always one of the three states: + * The following two variables can be packed, because + * a vmap_area object can be either: * 1) in "free" tree (root is vmap_area_root) - * 2) in "busy" tree (root is free_vmap_area_root) - * 3) in purge list (head is vmap_purge_list) + * 2) or "busy" tree (root is free_vmap_area_root) */ union { unsigned long subtree_max_size; /* in "free" tree */ struct vm_struct *vm; /* in "busy" tree */ - struct llist_node purge_list; /* in purge list */ }; }; diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 322dcbfcc933..773135fc6e19 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -450,4 +450,108 @@ static inline const char *vm_event_name(enum vm_event_item item) } #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */ +#ifdef CONFIG_MEMCG + +void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, + int val); + +static inline void mod_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx, int val) +{ + unsigned long flags; + + local_irq_save(flags); + __mod_lruvec_state(lruvec, idx, val); + local_irq_restore(flags); +} + +void __mod_lruvec_page_state(struct page *page, + enum node_stat_item idx, int val); + +static inline void mod_lruvec_page_state(struct page *page, + enum node_stat_item idx, int val) +{ + unsigned long flags; + + local_irq_save(flags); + __mod_lruvec_page_state(page, idx, val); + local_irq_restore(flags); +} + +#else + +static inline void __mod_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx, int val) +{ + __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); +} + +static inline void mod_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx, int val) +{ + mod_node_page_state(lruvec_pgdat(lruvec), idx, val); +} + +static inline void __mod_lruvec_page_state(struct page *page, + enum node_stat_item idx, int val) +{ + __mod_node_page_state(page_pgdat(page), idx, val); +} + +static inline void mod_lruvec_page_state(struct page *page, + enum node_stat_item idx, int val) +{ + mod_node_page_state(page_pgdat(page), idx, val); +} + +#endif /* CONFIG_MEMCG */ + +static inline void __inc_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx) +{ + __mod_lruvec_state(lruvec, idx, 1); +} + +static inline void __dec_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx) +{ + __mod_lruvec_state(lruvec, idx, -1); +} + +static inline void __inc_lruvec_page_state(struct page *page, + enum node_stat_item idx) +{ + __mod_lruvec_page_state(page, idx, 1); +} + +static inline void __dec_lruvec_page_state(struct page *page, + enum node_stat_item idx) +{ + __mod_lruvec_page_state(page, idx, -1); +} + +static inline void inc_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx) +{ + mod_lruvec_state(lruvec, idx, 1); +} + +static inline void dec_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx) +{ + mod_lruvec_state(lruvec, idx, -1); +} + +static inline void inc_lruvec_page_state(struct page *page, + enum node_stat_item idx) +{ + mod_lruvec_page_state(page, idx, 1); +} + +static inline void dec_lruvec_page_state(struct page *page, + enum node_stat_item idx) +{ + mod_lruvec_page_state(page, idx, -1); +} + #endif /* _LINUX_VMSTAT_H */ diff --git a/include/linux/vtime.h b/include/linux/vtime.h index 2cdeca062db3..041d6524d144 100644 --- a/include/linux/vtime.h +++ b/include/linux/vtime.h @@ -83,36 +83,46 @@ static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { } #endif #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE -extern void vtime_account_irq_enter(struct task_struct *tsk); -static inline void vtime_account_irq_exit(struct task_struct *tsk) -{ - /* On hard|softirq exit we always account to hard|softirq cputime */ - vtime_account_kernel(tsk); -} +extern void vtime_account_irq(struct task_struct *tsk, unsigned int offset); +extern void vtime_account_softirq(struct task_struct *tsk); +extern void vtime_account_hardirq(struct task_struct *tsk); extern void vtime_flush(struct task_struct *tsk); #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ -static inline void vtime_account_irq_enter(struct task_struct *tsk) { } -static inline void vtime_account_irq_exit(struct task_struct *tsk) { } +static inline void vtime_account_irq(struct task_struct *tsk, unsigned int offset) { } +static inline void vtime_account_softirq(struct task_struct *tsk) { } +static inline void vtime_account_hardirq(struct task_struct *tsk) { } static inline void vtime_flush(struct task_struct *tsk) { } #endif #ifdef CONFIG_IRQ_TIME_ACCOUNTING -extern void irqtime_account_irq(struct task_struct *tsk); +extern void irqtime_account_irq(struct task_struct *tsk, unsigned int offset); #else -static inline void irqtime_account_irq(struct task_struct *tsk) { } +static inline void irqtime_account_irq(struct task_struct *tsk, unsigned int offset) { } #endif -static inline void account_irq_enter_time(struct task_struct *tsk) +static inline void account_softirq_enter(struct task_struct *tsk) +{ + vtime_account_irq(tsk, SOFTIRQ_OFFSET); + irqtime_account_irq(tsk, SOFTIRQ_OFFSET); +} + +static inline void account_softirq_exit(struct task_struct *tsk) +{ + vtime_account_softirq(tsk); + irqtime_account_irq(tsk, 0); +} + +static inline void account_hardirq_enter(struct task_struct *tsk) { - vtime_account_irq_enter(tsk); - irqtime_account_irq(tsk); + vtime_account_irq(tsk, HARDIRQ_OFFSET); + irqtime_account_irq(tsk, HARDIRQ_OFFSET); } -static inline void account_irq_exit_time(struct task_struct *tsk) +static inline void account_hardirq_exit(struct task_struct *tsk) { - vtime_account_irq_exit(tsk); - irqtime_account_irq(tsk); + vtime_account_hardirq(tsk); + irqtime_account_irq(tsk, 0); } #endif /* _LINUX_KERNEL_VTIME_H */ diff --git a/include/linux/wimax/debug.h b/include/linux/wimax/debug.h deleted file mode 100644 index cdae052bcdcd..000000000000 --- a/include/linux/wimax/debug.h +++ /dev/null @@ -1,491 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Linux WiMAX - * Collection of tools to manage debug operations. - * - * Copyright (C) 2005-2007 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * Don't #include this file directly, read on! - * - * EXECUTING DEBUGGING ACTIONS OR NOT - * - * The main thing this framework provides is decission power to take a - * debug action (like printing a message) if the current debug level - * allows it. - * - * The decission power is at two levels: at compile-time (what does - * not make it is compiled out) and at run-time. The run-time - * selection is done per-submodule (as they are declared by the user - * of the framework). - * - * A call to d_test(L) (L being the target debug level) returns true - * if the action should be taken because the current debug levels - * allow it (both compile and run time). - * - * It follows that a call to d_test() that can be determined to be - * always false at compile time will get the code depending on it - * compiled out by optimization. - * - * DEBUG LEVELS - * - * It is up to the caller to define how much a debugging level is. - * - * Convention sets 0 as "no debug" (so an action marked as debug level 0 - * will always be taken). The increasing debug levels are used for - * increased verbosity. - * - * USAGE - * - * Group the code in modules and submodules inside each module [which - * in most cases maps to Linux modules and .c files that compose - * those]. - * - * For each module, there is: - * - * - a MODULENAME (single word, legal C identifier) - * - * - a debug-levels.h header file that declares the list of - * submodules and that is included by all .c files that use - * the debugging tools. The file name can be anything. - * - * - some (optional) .c code to manipulate the runtime debug levels - * through debugfs. - * - * The debug-levels.h file would look like: - * - * #ifndef __debug_levels__h__ - * #define __debug_levels__h__ - * - * #define D_MODULENAME modulename - * #define D_MASTER 10 - * - * #include <linux/wimax/debug.h> - * - * enum d_module { - * D_SUBMODULE_DECLARE(submodule_1), - * D_SUBMODULE_DECLARE(submodule_2), - * ... - * D_SUBMODULE_DECLARE(submodule_N) - * }; - * - * #endif - * - * D_MASTER is the maximum compile-time debug level; any debug actions - * above this will be out. D_MODULENAME is the module name (legal C - * identifier), which has to be unique for each module (to avoid - * namespace collisions during linkage). Note those #defines need to - * be done before #including debug.h - * - * We declare N different submodules whose debug level can be - * independently controlled during runtime. - * - * In a .c file of the module (and only in one of them), define the - * following code: - * - * struct d_level D_LEVEL[] = { - * D_SUBMODULE_DEFINE(submodule_1), - * D_SUBMODULE_DEFINE(submodule_2), - * ... - * D_SUBMODULE_DEFINE(submodule_N), - * }; - * size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); - * - * Externs for d_level_MODULENAME and d_level_size_MODULENAME are used - * and declared in this file using the D_LEVEL and D_LEVEL_SIZE macros - * #defined also in this file. - * - * To manipulate from user space the levels, create a debugfs dentry - * and then register each submodule with: - * - * d_level_register_debugfs("PREFIX_", submodule_X, parent); - * - * Where PREFIX_ is a name of your chosing. This will create debugfs - * file with a single numeric value that can be use to tweak it. To - * remove the entires, just use debugfs_remove_recursive() on 'parent'. - * - * NOTE: remember that even if this will show attached to some - * particular instance of a device, the settings are *global*. - * - * On each submodule (for example, .c files), the debug infrastructure - * should be included like this: - * - * #define D_SUBMODULE submodule_x // matches one in debug-levels.h - * #include "debug-levels.h" - * - * after #including all your include files. - * - * Now you can use the d_*() macros below [d_test(), d_fnstart(), - * d_fnend(), d_printf(), d_dump()]. - * - * If their debug level is greater than D_MASTER, they will be - * compiled out. - * - * If their debug level is lower or equal than D_MASTER but greater - * than the current debug level of their submodule, they'll be - * ignored. - * - * Otherwise, the action will be performed. - */ -#ifndef __debug__h__ -#define __debug__h__ - -#include <linux/types.h> -#include <linux/slab.h> - -struct device; - -/* Backend stuff */ - -/* - * Debug backend: generate a message header from a 'struct device' - * - * @head: buffer where to place the header - * @head_size: length of @head - * @dev: pointer to device used to generate a header from. If NULL, - * an empty ("") header is generated. - */ -static inline -void __d_head(char *head, size_t head_size, - struct device *dev) -{ - if (dev == NULL) - head[0] = 0; - else if ((unsigned long)dev < 4096) { - printk(KERN_ERR "E: Corrupt dev %p\n", dev); - WARN_ON(1); - } else - snprintf(head, head_size, "%s %s: ", - dev_driver_string(dev), dev_name(dev)); -} - - -/* - * Debug backend: log some message if debugging is enabled - * - * @l: intended debug level - * @tag: tag to prefix the message with - * @dev: 'struct device' associated to this message - * @f: printf-like format and arguments - * - * Note this is optimized out if it doesn't pass the compile-time - * check; however, it is *always* compiled. This is useful to make - * sure the printf-like formats and variables are always checked and - * they don't get bit rot if you have all the debugging disabled. - */ -#define _d_printf(l, tag, dev, f, a...) \ -do { \ - char head[64]; \ - if (!d_test(l)) \ - break; \ - __d_head(head, sizeof(head), dev); \ - printk(KERN_ERR "%s%s%s: " f, head, __func__, tag, ##a); \ -} while (0) - - -/* - * CPP syntactic sugar to generate A_B like symbol names when one of - * the arguments is a preprocessor #define. - */ -#define __D_PASTE__(varname, modulename) varname##_##modulename -#define __D_PASTE(varname, modulename) (__D_PASTE__(varname, modulename)) -#define _D_SUBMODULE_INDEX(_name) (D_SUBMODULE_DECLARE(_name)) - - -/* - * Store a submodule's runtime debug level and name - */ -struct d_level { - u8 level; - const char *name; -}; - - -/* - * List of available submodules and their debug levels - * - * We call them d_level_MODULENAME and d_level_size_MODULENAME; the - * macros D_LEVEL and D_LEVEL_SIZE contain the name already for - * convenience. - * - * This array and the size are defined on some .c file that is part of - * the current module. - */ -#define D_LEVEL __D_PASTE(d_level, D_MODULENAME) -#define D_LEVEL_SIZE __D_PASTE(d_level_size, D_MODULENAME) - -extern struct d_level D_LEVEL[]; -extern size_t D_LEVEL_SIZE; - - -/* - * Frontend stuff - * - * - * Stuff you need to declare prior to using the actual "debug" actions - * (defined below). - */ - -#ifndef D_MODULENAME -#error D_MODULENAME is not defined in your debug-levels.h file -/** - * D_MODULE - Name of the current module - * - * #define in your module's debug-levels.h, making sure it is - * unique. This has to be a legal C identifier. - */ -#define D_MODULENAME undefined_modulename -#endif - - -#ifndef D_MASTER -#warning D_MASTER not defined, but debug.h included! [see docs] -/** - * D_MASTER - Compile time maximum debug level - * - * #define in your debug-levels.h file to the maximum debug level the - * runtime code will be allowed to have. This allows you to provide a - * main knob. - * - * Anything above that level will be optimized out of the compile. - * - * Defaults to zero (no debug code compiled in). - * - * Maximum one definition per module (at the debug-levels.h file). - */ -#define D_MASTER 0 -#endif - -#ifndef D_SUBMODULE -#error D_SUBMODULE not defined, but debug.h included! [see docs] -/** - * D_SUBMODULE - Name of the current submodule - * - * #define in your submodule .c file before #including debug-levels.h - * to the name of the current submodule as previously declared and - * defined with D_SUBMODULE_DECLARE() (in your module's - * debug-levels.h) and D_SUBMODULE_DEFINE(). - * - * This is used to provide runtime-control over the debug levels. - * - * Maximum one per .c file! Can be shared among different .c files - * (meaning they belong to the same submodule categorization). - */ -#define D_SUBMODULE undefined_module -#endif - - -/** - * D_SUBMODULE_DECLARE - Declare a submodule for runtime debug level control - * - * @_name: name of the submodule, restricted to the chars that make up a - * valid C identifier ([a-zA-Z0-9_]). - * - * Declare in the module's debug-levels.h header file as: - * - * enum d_module { - * D_SUBMODULE_DECLARE(submodule_1), - * D_SUBMODULE_DECLARE(submodule_2), - * D_SUBMODULE_DECLARE(submodule_3), - * }; - * - * Some corresponding .c file needs to have a matching - * D_SUBMODULE_DEFINE(). - */ -#define D_SUBMODULE_DECLARE(_name) __D_SUBMODULE_##_name - - -/** - * D_SUBMODULE_DEFINE - Define a submodule for runtime debug level control - * - * @_name: name of the submodule, restricted to the chars that make up a - * valid C identifier ([a-zA-Z0-9_]). - * - * Use once per module (in some .c file) as: - * - * static - * struct d_level d_level_SUBMODULENAME[] = { - * D_SUBMODULE_DEFINE(submodule_1), - * D_SUBMODULE_DEFINE(submodule_2), - * D_SUBMODULE_DEFINE(submodule_3), - * }; - * size_t d_level_size_SUBDMODULENAME = ARRAY_SIZE(d_level_SUBDMODULENAME); - * - * Matching D_SUBMODULE_DECLARE()s have to be present in a - * debug-levels.h header file. - */ -#define D_SUBMODULE_DEFINE(_name) \ -[__D_SUBMODULE_##_name] = { \ - .level = 0, \ - .name = #_name \ -} - - - -/* The actual "debug" operations */ - - -/** - * d_test - Returns true if debugging should be enabled - * - * @l: intended debug level (unsigned) - * - * If the master debug switch is enabled and the current settings are - * higher or equal to the requested level, then debugging - * output/actions should be enabled. - * - * NOTE: - * - * This needs to be coded so that it can be evaluated in compile - * time; this is why the ugly BUG_ON() is placed in there, so the - * D_MASTER evaluation compiles all out if it is compile-time false. - */ -#define d_test(l) \ -({ \ - unsigned __l = l; /* type enforcer */ \ - (D_MASTER) >= __l \ - && ({ \ - BUG_ON(_D_SUBMODULE_INDEX(D_SUBMODULE) >= D_LEVEL_SIZE);\ - D_LEVEL[_D_SUBMODULE_INDEX(D_SUBMODULE)].level >= __l; \ - }); \ -}) - - -/** - * d_fnstart - log message at function start if debugging enabled - * - * @l: intended debug level - * @_dev: 'struct device' pointer, NULL if none (for context) - * @f: printf-like format and arguments - */ -#define d_fnstart(l, _dev, f, a...) _d_printf(l, " FNSTART", _dev, f, ## a) - - -/** - * d_fnend - log message at function end if debugging enabled - * - * @l: intended debug level - * @_dev: 'struct device' pointer, NULL if none (for context) - * @f: printf-like format and arguments - */ -#define d_fnend(l, _dev, f, a...) _d_printf(l, " FNEND", _dev, f, ## a) - - -/** - * d_printf - log message if debugging enabled - * - * @l: intended debug level - * @_dev: 'struct device' pointer, NULL if none (for context) - * @f: printf-like format and arguments - */ -#define d_printf(l, _dev, f, a...) _d_printf(l, "", _dev, f, ## a) - - -/** - * d_dump - log buffer hex dump if debugging enabled - * - * @l: intended debug level - * @_dev: 'struct device' pointer, NULL if none (for context) - * @f: printf-like format and arguments - */ -#define d_dump(l, dev, ptr, size) \ -do { \ - char head[64]; \ - if (!d_test(l)) \ - break; \ - __d_head(head, sizeof(head), dev); \ - print_hex_dump(KERN_ERR, head, 0, 16, 1, \ - ((void *) ptr), (size), 0); \ -} while (0) - - -/** - * Export a submodule's debug level over debugfs as PREFIXSUBMODULE - * - * @prefix: string to prefix the name with - * @submodule: name of submodule (not a string, just the name) - * @dentry: debugfs parent dentry - * - * For removing, just use debugfs_remove_recursive() on the parent. - */ -#define d_level_register_debugfs(prefix, name, parent) \ -({ \ - debugfs_create_u8( \ - prefix #name, 0600, parent, \ - &(D_LEVEL[__D_SUBMODULE_ ## name].level)); \ -}) - - -static inline -void d_submodule_set(struct d_level *d_level, size_t d_level_size, - const char *submodule, u8 level, const char *tag) -{ - struct d_level *itr, *top; - int index = -1; - - for (itr = d_level, top = itr + d_level_size; itr < top; itr++) { - index++; - if (itr->name == NULL) { - printk(KERN_ERR "%s: itr->name NULL?? (%p, #%d)\n", - tag, itr, index); - continue; - } - if (!strcmp(itr->name, submodule)) { - itr->level = level; - return; - } - } - printk(KERN_ERR "%s: unknown submodule %s\n", tag, submodule); -} - - -/** - * d_parse_params - Parse a string with debug parameters from the - * command line - * - * @d_level: level structure (D_LEVEL) - * @d_level_size: number of items in the level structure - * (D_LEVEL_SIZE). - * @_params: string with the parameters; this is a space (not tab!) - * separated list of NAME:VALUE, where value is the debug level - * and NAME is the name of the submodule. - * @tag: string for error messages (example: MODULE.ARGNAME). - */ -static inline -void d_parse_params(struct d_level *d_level, size_t d_level_size, - const char *_params, const char *tag) -{ - char submodule[130], *params, *params_orig, *token, *colon; - unsigned level, tokens; - - if (_params == NULL) - return; - params_orig = kstrdup(_params, GFP_KERNEL); - params = params_orig; - while (1) { - token = strsep(¶ms, " "); - if (token == NULL) - break; - if (*token == '\0') /* eat joint spaces */ - continue; - /* kernel's sscanf %s eats until whitespace, so we - * replace : by \n so it doesn't get eaten later by - * strsep */ - colon = strchr(token, ':'); - if (colon != NULL) - *colon = '\n'; - tokens = sscanf(token, "%s\n%u", submodule, &level); - if (colon != NULL) - *colon = ':'; /* set back, for error messages */ - if (tokens == 2) - d_submodule_set(d_level, d_level_size, - submodule, level, tag); - else - printk(KERN_ERR "%s: can't parse '%s' as a " - "SUBMODULE:LEVEL (%d tokens)\n", - tag, token, tokens); - } - kfree(params_orig); -} - -#endif /* #ifndef __debug__h__ */ diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index 0fdbf653b173..4807ca4d52e0 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -20,7 +20,6 @@ * zsmalloc mapping modes * * NOTE: These only make a difference when a mapped object spans pages. - * They also have no effect when ZSMALLOC_PGTABLE_MAPPING is selected. */ enum zs_mapmode { ZS_MM_RW, /* normal read-write mapping */ |