diff options
| author | Ingo Molnar <mingo@kernel.org> | 2025-03-08 00:54:06 +0100 | 
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2025-03-08 00:54:06 +0100 | 
| commit | f23ecef20af6fbd489e0362d33cdf8d9429fa901 (patch) | |
| tree | 713f06d8335b7c3388bbfbc46cb6d2a568951252 /include | |
| parent | c929d08df8bee855528b9d15b853c892c54e1eee (diff) | |
| parent | 85b2b9c16d053364e2004883140538e73b333cdb (diff) | |
Merge branch 'locking/urgent' into locking/core, to pick up locking fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include')
35 files changed, 266 insertions, 73 deletions
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h index f42133dae68e..2afc95bf1655 100644 --- a/include/asm-generic/hugetlb.h +++ b/include/asm-generic/hugetlb.h @@ -90,7 +90,7 @@ static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,  #ifndef __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR  static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, -		unsigned long addr, pte_t *ptep) +		unsigned long addr, pte_t *ptep, unsigned long sz)  {  	return ptep_get_and_clear(mm, addr, ptep);  } diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 02a4adb4a999..0d5b186abee8 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -457,7 +457,7 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)  	. = ALIGN((align));						\  	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\  		__start_rodata = .;					\ -		*(.rodata) *(.rodata.*)					\ +		*(.rodata) *(.rodata.*) *(.data.rel.ro*)		\  		SCHED_DATA						\  		RO_AFTER_INIT_DATA	/* Read only after init */	\  		. = ALIGN(8);						\ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index fa2a76cc2f73..71f4f0cc3dac 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -28,7 +28,7 @@ typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);  typedef __u32 __bitwise req_flags_t;  /* Keep rqf_name[] in sync with the definitions below */ -enum { +enum rqf_flags {  	/* drive already may have started this one */  	__RQF_STARTED,  	/* request for flush sequence */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 248416ecd01c..d37751789bf5 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -196,10 +196,11 @@ struct gendisk {  	unsigned int		zone_capacity;  	unsigned int		last_zone_capacity;  	unsigned long __rcu	*conv_zones_bitmap; -	unsigned int            zone_wplugs_hash_bits; -	spinlock_t              zone_wplugs_lock; +	unsigned int		zone_wplugs_hash_bits; +	atomic_t		nr_zone_wplugs; +	spinlock_t		zone_wplugs_lock;  	struct mempool_s	*zone_wplugs_pool; -	struct hlist_head       *zone_wplugs_hash; +	struct hlist_head	*zone_wplugs_hash;  	struct workqueue_struct *zone_wplugs_wq;  #endif /* CONFIG_BLK_DEV_ZONED */ @@ -367,6 +368,7 @@ struct queue_limits {  	unsigned int		max_sectors;  	unsigned int		max_user_sectors;  	unsigned int		max_segment_size; +	unsigned int		min_segment_size;  	unsigned int		physical_block_size;  	unsigned int		logical_block_size;  	unsigned int		alignment_offset; diff --git a/include/linux/call_once.h b/include/linux/call_once.h index 6261aa0b3fb0..13cd6469e7e5 100644 --- a/include/linux/call_once.h +++ b/include/linux/call_once.h @@ -26,20 +26,41 @@ do {									\  	__once_init((once), #once, &__key);				\  } while (0) -static inline void call_once(struct once *once, void (*cb)(struct once *)) +/* + * call_once - Ensure a function has been called exactly once + * + * @once: Tracking struct + * @cb: Function to be called + * + * If @once has never completed successfully before, call @cb and, if + * it returns a zero or positive value, mark @once as completed.  Return + * the value returned by @cb + * + * If @once has completed succesfully before, return 0. + * + * The call to @cb is implicitly surrounded by a mutex, though for + * efficiency the * function avoids taking it after the first call. + */ +static inline int call_once(struct once *once, int (*cb)(struct once *))  { -        /* Pairs with atomic_set_release() below.  */ -        if (atomic_read_acquire(&once->state) == ONCE_COMPLETED) -                return; - -        guard(mutex)(&once->lock); -        WARN_ON(atomic_read(&once->state) == ONCE_RUNNING); -        if (atomic_read(&once->state) != ONCE_NOT_STARTED) -                return; - -        atomic_set(&once->state, ONCE_RUNNING); -        cb(once); -        atomic_set_release(&once->state, ONCE_COMPLETED); +	int r, state; + +	/* Pairs with atomic_set_release() below.  */ +	if (atomic_read_acquire(&once->state) == ONCE_COMPLETED) +		return 0; + +	guard(mutex)(&once->lock); +	state = atomic_read(&once->state); +	if (unlikely(state != ONCE_NOT_STARTED)) +		return WARN_ON_ONCE(state != ONCE_COMPLETED) ? -EINVAL : 0; + +	atomic_set(&once->state, ONCE_RUNNING); +	r = cb(once); +	if (r < 0) +		atomic_set(&once->state, ONCE_NOT_STARTED); +	else +		atomic_set_release(&once->state, ONCE_COMPLETED); +	return r;  }  #endif /* _LINUX_CALL_ONCE_H */ diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 200fd3c5bc70..155385754824 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -110,7 +110,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,  /* Unreachable code */  #ifdef CONFIG_OBJTOOL  /* Annotate a C jump table to allow objtool to follow the code flow */ -#define __annotate_jump_table __section(".rodata..c_jump_table,\"a\",@progbits #") +#define __annotate_jump_table __section(".data.rel.ro.c_jump_table")  #else /* !CONFIG_OBJTOOL */  #define __annotate_jump_table  #endif /* CONFIG_OBJTOOL */ diff --git a/include/linux/cred.h b/include/linux/cred.h index 0c3c4b16b469..5658a3bfe803 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -172,18 +172,12 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred)  static inline const struct cred *override_creds(const struct cred *override_cred)  { -	const struct cred *old = current->cred; - -	rcu_assign_pointer(current->cred, override_cred); -	return old; +	return rcu_replace_pointer(current->cred, override_cred, 1);  }  static inline const struct cred *revert_creds(const struct cred *revert_cred)  { -	const struct cred *override_cred = current->cred; - -	rcu_assign_pointer(current->cred, revert_cred); -	return override_cred; +	return rcu_replace_pointer(current->cred, revert_cred, 1);  }  /** diff --git a/include/linux/fs.h b/include/linux/fs.h index 2c3b2f8a621f..2788df98080f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2975,8 +2975,8 @@ static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count)  	} else if (iocb->ki_flags & IOCB_DONTCACHE) {  		struct address_space *mapping = iocb->ki_filp->f_mapping; -		filemap_fdatawrite_range_kick(mapping, iocb->ki_pos, -					      iocb->ki_pos + count); +		filemap_fdatawrite_range_kick(mapping, iocb->ki_pos - count, +					      iocb->ki_pos - 1);  	}  	return count; @@ -3452,6 +3452,8 @@ extern const struct file_operations generic_ro_fops;  extern int readlink_copy(char __user *, int, const char *, int);  extern int page_readlink(struct dentry *, char __user *, int); +extern const char *page_get_link_raw(struct dentry *, struct inode *, +				     struct delayed_call *);  extern const char *page_get_link(struct dentry *, struct inode *,  				 struct delayed_call *);  extern void page_put_link(void *); diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index ec8c0ccc8f95..bf5f7256bd28 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -1004,7 +1004,9 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)  static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,  						unsigned long addr, pte_t *ptep)  { -	return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); +	unsigned long psize = huge_page_size(hstate_vma(vma)); + +	return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize);  }  #endif diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 6b27db7f9496..0234f14f2aa6 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -875,10 +875,11 @@ struct mm_struct {  		 */  		unsigned int nr_cpus_allowed;  		/** -		 * @max_nr_cid: Maximum number of concurrency IDs allocated. +		 * @max_nr_cid: Maximum number of allowed concurrency +		 *              IDs allocated.  		 * -		 * Track the highest number of concurrency IDs allocated for the -		 * mm. +		 * Track the highest number of allowed concurrency IDs +		 * allocated for the mm.  		 */  		atomic_t max_nr_cid;  		/** diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c0a86afb85da..ab550a89b9bf 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3275,6 +3275,8 @@ static inline struct net_device *first_net_device_rcu(struct net *net)  }  int netdev_boot_setup_check(struct net_device *dev); +struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, +				   const char *hwaddr);  struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,  				       const char *hwaddr);  struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); @@ -4115,7 +4117,6 @@ void netif_receive_skb_list(struct list_head *head);  gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);  void napi_gro_flush(struct napi_struct *napi, bool flush_old);  struct sk_buff *napi_get_frags(struct napi_struct *napi); -void napi_get_frags_check(struct napi_struct *napi);  gro_result_t napi_gro_frags(struct napi_struct *napi);  static inline void napi_free_frags(struct napi_struct *napi) diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 071d05d81d38..c86a11cfc4a3 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -278,7 +278,7 @@ struct netfs_io_request {  #define NETFS_RREQ_PAUSE		11	/* Pause subrequest generation */  #define NETFS_RREQ_USE_IO_ITER		12	/* Use ->io_iter rather than ->i_pages */  #define NETFS_RREQ_ALL_QUEUED		13	/* All subreqs are now queued */ -#define NETFS_RREQ_NEED_RETRY		14	/* Need to try retrying */ +#define NETFS_RREQ_RETRYING		14	/* Set if we're in the retry path */  #define NETFS_RREQ_USE_PGPRIV2		31	/* [DEPRECATED] Use PG_private_2 to mark  						 * write to cache on read */  	const struct netfs_request_ops *netfs_ops; diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 71fbebfa43c7..9ac83ca88326 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -47,6 +47,7 @@ struct nfs4_acl {  struct nfs4_label {  	uint32_t	lfs;  	uint32_t	pi; +	u32		lsmid;  	u32		len;  	char	*label;  }; diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h index e07e8978d691..e435250fcb4d 100644 --- a/include/linux/nvme-tcp.h +++ b/include/linux/nvme-tcp.h @@ -13,6 +13,8 @@  #define NVME_TCP_ADMIN_CCSZ	SZ_8K  #define NVME_TCP_DIGEST_LENGTH	4  #define NVME_TCP_MIN_MAXH2CDATA 4096 +#define NVME_TCP_MIN_C2HTERM_PLEN	24 +#define NVME_TCP_MAX_C2HTERM_PLEN	152  enum nvme_tcp_pfv {  	NVME_TCP_PFV_1_0 = 0x0, diff --git a/include/linux/nvme.h b/include/linux/nvme.h index fe3b60818fdc..2dc05b1c3283 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -199,28 +199,54 @@ enum {  #define NVME_NVM_IOSQES		6  #define NVME_NVM_IOCQES		4 +/* + * Controller Configuration (CC) register (Offset 14h) + */  enum { +	/* Enable (EN): bit 0 */  	NVME_CC_ENABLE		= 1 << 0,  	NVME_CC_EN_SHIFT	= 0, + +	/* Bits 03:01 are reserved (NVMe Base Specification rev 2.1) */ + +	/* I/O Command Set Selected (CSS): bits 06:04 */  	NVME_CC_CSS_SHIFT	= 4, -	NVME_CC_MPS_SHIFT	= 7, -	NVME_CC_AMS_SHIFT	= 11, -	NVME_CC_SHN_SHIFT	= 14, -	NVME_CC_IOSQES_SHIFT	= 16, -	NVME_CC_IOCQES_SHIFT	= 20, +	NVME_CC_CSS_MASK	= 7 << NVME_CC_CSS_SHIFT,  	NVME_CC_CSS_NVM		= 0 << NVME_CC_CSS_SHIFT,  	NVME_CC_CSS_CSI		= 6 << NVME_CC_CSS_SHIFT, -	NVME_CC_CSS_MASK	= 7 << NVME_CC_CSS_SHIFT, + +	/* Memory Page Size (MPS): bits 10:07 */ +	NVME_CC_MPS_SHIFT	= 7, +	NVME_CC_MPS_MASK	= 0xf << NVME_CC_MPS_SHIFT, + +	/* Arbitration Mechanism Selected (AMS): bits 13:11 */ +	NVME_CC_AMS_SHIFT	= 11, +	NVME_CC_AMS_MASK	= 7 << NVME_CC_AMS_SHIFT,  	NVME_CC_AMS_RR		= 0 << NVME_CC_AMS_SHIFT,  	NVME_CC_AMS_WRRU	= 1 << NVME_CC_AMS_SHIFT,  	NVME_CC_AMS_VS		= 7 << NVME_CC_AMS_SHIFT, + +	/* Shutdown Notification (SHN): bits 15:14 */ +	NVME_CC_SHN_SHIFT	= 14, +	NVME_CC_SHN_MASK	= 3 << NVME_CC_SHN_SHIFT,  	NVME_CC_SHN_NONE	= 0 << NVME_CC_SHN_SHIFT,  	NVME_CC_SHN_NORMAL	= 1 << NVME_CC_SHN_SHIFT,  	NVME_CC_SHN_ABRUPT	= 2 << NVME_CC_SHN_SHIFT, -	NVME_CC_SHN_MASK	= 3 << NVME_CC_SHN_SHIFT, + +	/* I/O Submission Queue Entry Size (IOSQES): bits 19:16 */ +	NVME_CC_IOSQES_SHIFT	= 16, +	NVME_CC_IOSQES_MASK	= 0xf << NVME_CC_IOSQES_SHIFT,  	NVME_CC_IOSQES		= NVME_NVM_IOSQES << NVME_CC_IOSQES_SHIFT, + +	/* I/O Completion Queue Entry Size (IOCQES): bits 23:20 */ +	NVME_CC_IOCQES_SHIFT	= 20, +	NVME_CC_IOCQES_MASK	= 0xf << NVME_CC_IOCQES_SHIFT,  	NVME_CC_IOCQES		= NVME_NVM_IOCQES << NVME_CC_IOCQES_SHIFT, + +	/* Controller Ready Independent of Media Enable (CRIME): bit 24 */  	NVME_CC_CRIME		= 1 << 24, + +	/* Bits 25:31 are reserved (NVMe Base Specification rev 2.1) */  };  enum { diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index de5deb1a0118..1a2594a38199 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -3134,6 +3134,7 @@  #define PCI_DEVICE_ID_INTEL_HDA_LNL_P	0xa828  #define PCI_DEVICE_ID_INTEL_S21152BB	0xb152  #define PCI_DEVICE_ID_INTEL_HDA_BMG	0xe2f7 +#define PCI_DEVICE_ID_INTEL_HDA_PTL_H	0xe328  #define PCI_DEVICE_ID_INTEL_HDA_PTL	0xe428  #define PCI_DEVICE_ID_INTEL_HDA_CML_R	0xf0c8  #define PCI_DEVICE_ID_INTEL_HDA_RKL_S	0xf1c8 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 8ff23bf5a819..b698758000f8 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -31,6 +31,33 @@ struct pipe_buffer {  	unsigned long private;  }; +/* + * Really only alpha needs 32-bit fields, but + * might as well do it for 64-bit architectures + * since that's what we've historically done, + * and it makes 'head_tail' always be a simple + * 'unsigned long'. + */ +#ifdef CONFIG_64BIT +typedef unsigned int pipe_index_t; +#else +typedef unsigned short pipe_index_t; +#endif + +/* + * We have to declare this outside 'struct pipe_inode_info', + * but then we can't use 'union pipe_index' for an anonymous + * union, so we end up having to duplicate this declaration + * below. Annoying. + */ +union pipe_index { +	unsigned long head_tail; +	struct { +		pipe_index_t head; +		pipe_index_t tail; +	}; +}; +  /**   *	struct pipe_inode_info - a linux kernel pipe   *	@mutex: mutex protecting the whole thing @@ -38,6 +65,7 @@ struct pipe_buffer {   *	@wr_wait: writer wait point in case of full pipe   *	@head: The point of buffer production   *	@tail: The point of buffer consumption + *	@head_tail: unsigned long union of @head and @tail   *	@note_loss: The next read() should insert a data-lost message   *	@max_usage: The maximum number of slots that may be used in the ring   *	@ring_size: total number of buffers (should be a power of 2) @@ -58,8 +86,16 @@ struct pipe_buffer {  struct pipe_inode_info {  	struct mutex mutex;  	wait_queue_head_t rd_wait, wr_wait; -	unsigned int head; -	unsigned int tail; + +	/* This has to match the 'union pipe_index' above */ +	union { +		unsigned long head_tail; +		struct { +			pipe_index_t head; +			pipe_index_t tail; +		}; +	}; +  	unsigned int max_usage;  	unsigned int ring_size;  	unsigned int nr_accounted; @@ -141,23 +177,23 @@ static inline bool pipe_has_watch_queue(const struct pipe_inode_info *pipe)  }  /** - * pipe_empty - Return true if the pipe is empty + * pipe_occupancy - Return number of slots used in the pipe   * @head: The pipe ring head pointer   * @tail: The pipe ring tail pointer   */ -static inline bool pipe_empty(unsigned int head, unsigned int tail) +static inline unsigned int pipe_occupancy(unsigned int head, unsigned int tail)  { -	return head == tail; +	return (pipe_index_t)(head - tail);  }  /** - * pipe_occupancy - Return number of slots used in the pipe + * pipe_empty - Return true if the pipe is empty   * @head: The pipe ring head pointer   * @tail: The pipe ring tail pointer   */ -static inline unsigned int pipe_occupancy(unsigned int head, unsigned int tail) +static inline bool pipe_empty(unsigned int head, unsigned int tail)  { -	return head - tail; +	return !pipe_occupancy(head, tail);  }  /** @@ -173,6 +209,33 @@ static inline bool pipe_full(unsigned int head, unsigned int tail,  }  /** + * pipe_is_full - Return true if the pipe is full + * @pipe: the pipe + */ +static inline bool pipe_is_full(const struct pipe_inode_info *pipe) +{ +	return pipe_full(pipe->head, pipe->tail, pipe->max_usage); +} + +/** + * pipe_is_empty - Return true if the pipe is empty + * @pipe: the pipe + */ +static inline bool pipe_is_empty(const struct pipe_inode_info *pipe) +{ +	return pipe_empty(pipe->head, pipe->tail); +} + +/** + * pipe_buf_usage - Return how many pipe buffers are in use + * @pipe: the pipe + */ +static inline unsigned int pipe_buf_usage(const struct pipe_inode_info *pipe) +{ +	return pipe_occupancy(pipe->head, pipe->tail); +} + +/**   * pipe_buf - Return the pipe buffer for the specified slot in the pipe ring   * @pipe: The pipe to access   * @slot: The slot of interest @@ -245,15 +308,6 @@ static inline bool pipe_buf_try_steal(struct pipe_inode_info *pipe,  	return buf->ops->try_steal(pipe, buf);  } -static inline void pipe_discard_from(struct pipe_inode_info *pipe, -		unsigned int old_head) -{ -	unsigned int mask = pipe->ring_size - 1; - -	while (pipe->head > old_head) -		pipe_buf_release(pipe, &pipe->bufs[--pipe->head & mask]); -} -  /* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual     memory allocation, whereas PIPE_BUF makes atomicity guarantees.  */  #define PIPE_SIZE		PAGE_SIZE diff --git a/include/linux/platform_profile.h b/include/linux/platform_profile.h index 8ab5b0e8eb2c..8c9df7dadd5d 100644 --- a/include/linux/platform_profile.h +++ b/include/linux/platform_profile.h @@ -33,6 +33,8 @@ enum platform_profile_option {   * @probe: Callback to setup choices available to the new class device. These   *	   choices will only be enforced when setting a new profile, not when   *	   getting the current one. + * @hidden_choices: Callback to setup choices that are not visible to the user + *		    but can be set by the driver.   * @profile_get: Callback that will be called when showing the current platform   *		 profile in sysfs.   * @profile_set: Callback that will be called when storing a new platform @@ -40,6 +42,7 @@ enum platform_profile_option {   */  struct platform_profile_ops {  	int (*probe)(void *drvdata, unsigned long *choices); +	int (*hidden_choices)(void *drvdata, unsigned long *choices);  	int (*profile_get)(struct device *dev, enum platform_profile_option *profile);  	int (*profile_set)(struct device *dev, enum platform_profile_option profile);  }; diff --git a/include/linux/rcuref.h b/include/linux/rcuref.h index 2c8bfd0f1b6b..6322d8c1c6b4 100644 --- a/include/linux/rcuref.h +++ b/include/linux/rcuref.h @@ -71,27 +71,30 @@ static inline __must_check bool rcuref_get(rcuref_t *ref)  	return rcuref_get_slowpath(ref);  } -extern __must_check bool rcuref_put_slowpath(rcuref_t *ref); +extern __must_check bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt);  /*   * Internal helper. Do not invoke directly.   */  static __always_inline __must_check bool __rcuref_put(rcuref_t *ref)  { +	int cnt; +  	RCU_LOCKDEP_WARN(!rcu_read_lock_held() && preemptible(),  			 "suspicious rcuref_put_rcusafe() usage");  	/*  	 * Unconditionally decrease the reference count. The saturation and  	 * dead zones provide enough tolerance for this.  	 */ -	if (likely(!atomic_add_negative_release(-1, &ref->refcnt))) +	cnt = atomic_sub_return_release(1, &ref->refcnt); +	if (likely(cnt >= 0))  		return false;  	/*  	 * Handle the last reference drop and cases inside the saturation  	 * and dead zones.  	 */ -	return rcuref_put_slowpath(ref); +	return rcuref_put_slowpath(ref, cnt);  }  /** diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 2cbe0c22a32f..0b9095a281b8 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -91,6 +91,8 @@ struct sk_psock {  	struct sk_psock_progs		progs;  #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)  	struct strparser		strp; +	u32				copied_seq; +	u32				ingress_bytes;  #endif  	struct sk_buff_head		ingress_skb;  	struct list_head		ingress_msg; diff --git a/include/linux/socket.h b/include/linux/socket.h index d18cc47e89bd..c3322eb3d686 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -392,6 +392,8 @@ struct ucred {  extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);  extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); +extern int put_cmsg_notrunc(struct msghdr *msg, int level, int type, int len, +			    void *data);  struct timespec64;  struct __kernel_timespec; diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index fec1e8a1570c..eac57914dcf3 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -158,7 +158,6 @@ enum {  	RPC_TASK_NEED_XMIT,  	RPC_TASK_NEED_RECV,  	RPC_TASK_MSG_PIN_WAIT, -	RPC_TASK_SIGNALLED,  };  #define rpc_test_and_set_running(t) \ @@ -171,7 +170,7 @@ enum {  #define RPC_IS_ACTIVATED(t)	test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate) -#define RPC_SIGNALLED(t)	test_bit(RPC_TASK_SIGNALLED, &(t)->tk_runstate) +#define RPC_SIGNALLED(t)	(READ_ONCE(task->tk_rpc_status) == -ERESTARTSYS)  /*   * Task priorities. diff --git a/include/net/gro.h b/include/net/gro.h index b9b58c1f8d19..7b548f91754b 100644 --- a/include/net/gro.h +++ b/include/net/gro.h @@ -11,6 +11,9 @@  #include <net/udp.h>  #include <net/hotdata.h> +/* This should be increased if a protocol with a bigger head is added. */ +#define GRO_MAX_HEAD (MAX_HEADER + 128) +  struct napi_gro_cb {  	union {  		struct { diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 7ba1402ca779..f467a66abc6b 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -297,6 +297,7 @@ static inline int check_net(const struct net *net)  }  void net_drop_ns(void *); +void net_passive_dec(struct net *net);  #else @@ -326,8 +327,18 @@ static inline int check_net(const struct net *net)  }  #define net_drop_ns NULL + +static inline void net_passive_dec(struct net *net) +{ +	refcount_dec(&net->passive); +}  #endif +static inline void net_passive_inc(struct net *net) +{ +	refcount_inc(&net->passive); +} +  /* Returns true if the netns initialization is completed successfully */  static inline bool net_initialized(const struct net *net)  { diff --git a/include/net/sock.h b/include/net/sock.h index 8036b3b79cd8..7ef728324e4e 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1751,6 +1751,7 @@ static inline bool sock_allow_reclassification(const struct sock *csk)  struct sock *sk_alloc(struct net *net, int family, gfp_t priority,  		      struct proto *prot, int kern);  void sk_free(struct sock *sk); +void sk_net_refcnt_upgrade(struct sock *sk);  void sk_destruct(struct sock *sk);  struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);  void sk_free_unlock_clone(struct sock *sk); diff --git a/include/net/strparser.h b/include/net/strparser.h index 41e2ce9e9e10..0a83010b3a64 100644 --- a/include/net/strparser.h +++ b/include/net/strparser.h @@ -43,6 +43,8 @@ struct strparser;  struct strp_callbacks {  	int (*parse_msg)(struct strparser *strp, struct sk_buff *skb);  	void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb); +	int (*read_sock)(struct strparser *strp, read_descriptor_t *desc, +			 sk_read_actor_t recv_actor);  	int (*read_sock_done)(struct strparser *strp, int err);  	void (*abort_parser)(struct strparser *strp, int err);  	void (*lock)(struct strparser *strp); diff --git a/include/net/tcp.h b/include/net/tcp.h index 5b2b04835688..2d08473a6dc0 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -41,6 +41,7 @@  #include <net/inet_ecn.h>  #include <net/dst.h>  #include <net/mptcp.h> +#include <net/xfrm.h>  #include <linux/seq_file.h>  #include <linux/memcontrol.h> @@ -683,6 +684,19 @@ void tcp_fin(struct sock *sk);  void tcp_check_space(struct sock *sk);  void tcp_sack_compress_send_ack(struct sock *sk); +static inline void tcp_cleanup_skb(struct sk_buff *skb) +{ +	skb_dst_drop(skb); +	secpath_reset(skb); +} + +static inline void tcp_add_receive_queue(struct sock *sk, struct sk_buff *skb) +{ +	DEBUG_NET_WARN_ON_ONCE(skb_dst(skb)); +	DEBUG_NET_WARN_ON_ONCE(secpath_exists(skb)); +	__skb_queue_tail(&sk->sk_receive_queue, skb); +} +  /* tcp_timer.c */  void tcp_init_xmit_timers(struct sock *);  static inline void tcp_clear_xmit_timers(struct sock *sk) @@ -729,6 +743,9 @@ void tcp_get_info(struct sock *, struct tcp_info *);  /* Read 'sendfile()'-style from a TCP socket */  int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,  		  sk_read_actor_t recv_actor); +int tcp_read_sock_noack(struct sock *sk, read_descriptor_t *desc, +			sk_read_actor_t recv_actor, bool noack, +			u32 *copied_seq);  int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);  struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);  void tcp_read_done(struct sock *sk, size_t len); @@ -2599,6 +2616,11 @@ struct sk_psock;  #ifdef CONFIG_BPF_SYSCALL  int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);  void tcp_bpf_clone(const struct sock *sk, struct sock *newsk); +#ifdef CONFIG_BPF_STREAM_PARSER +struct strparser; +int tcp_bpf_strp_read_sock(struct strparser *strp, read_descriptor_t *desc, +			   sk_read_actor_t recv_actor); +#endif /* CONFIG_BPF_STREAM_PARSER */  #endif /* CONFIG_BPF_SYSCALL */  #ifdef CONFIG_INET diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h index 3dc7a1551ac3..5d653a3491d0 100644 --- a/include/sound/cs35l56.h +++ b/include/sound/cs35l56.h @@ -12,6 +12,7 @@  #include <linux/firmware/cirrus/cs_dsp.h>  #include <linux/regulator/consumer.h>  #include <linux/regmap.h> +#include <linux/spi/spi.h>  #include <sound/cs-amp-lib.h>  #define CS35L56_DEVID					0x0000000 @@ -61,6 +62,7 @@  #define CS35L56_IRQ1_MASK_8				0x000E0AC  #define CS35L56_IRQ1_MASK_18				0x000E0D4  #define CS35L56_IRQ1_MASK_20				0x000E0DC +#define CS35L56_DSP_MBOX_1_RAW				0x0011000  #define CS35L56_DSP_VIRTUAL1_MBOX_1			0x0011020  #define CS35L56_DSP_VIRTUAL1_MBOX_2			0x0011024  #define CS35L56_DSP_VIRTUAL1_MBOX_3			0x0011028 @@ -224,6 +226,7 @@  #define CS35L56_HALO_STATE_SHUTDOWN			1  #define CS35L56_HALO_STATE_BOOT_DONE			2 +#define CS35L56_MBOX_CMD_PING				0x0A000000  #define CS35L56_MBOX_CMD_AUDIO_PLAY			0x0B000001  #define CS35L56_MBOX_CMD_AUDIO_PAUSE			0x0B000002  #define CS35L56_MBOX_CMD_AUDIO_REINIT			0x0B000003 @@ -254,6 +257,16 @@  #define CS35L56_NUM_BULK_SUPPLIES			3  #define CS35L56_NUM_DSP_REGIONS				5 +/* Additional margin for SYSTEM_RESET to control port ready on SPI */ +#define CS35L56_SPI_RESET_TO_PORT_READY_US (CS35L56_CONTROL_PORT_READY_US + 2500) + +struct cs35l56_spi_payload { +	__be32	addr; +	__be16	pad; +	__be32	value; +} __packed; +static_assert(sizeof(struct cs35l56_spi_payload) == 10); +  struct cs35l56_base {  	struct device *dev;  	struct regmap *regmap; @@ -269,6 +282,7 @@ struct cs35l56_base {  	s8 cal_index;  	struct cirrus_amp_cal_data cal_data;  	struct gpio_desc *reset_gpio; +	struct cs35l56_spi_payload *spi_payload_buf;  };  static inline bool cs35l56_is_otp_register(unsigned int reg) @@ -276,6 +290,23 @@ static inline bool cs35l56_is_otp_register(unsigned int reg)  	return (reg >> 16) == 3;  } +static inline int cs35l56_init_config_for_spi(struct cs35l56_base *cs35l56, +					      struct spi_device *spi) +{ +	cs35l56->spi_payload_buf = devm_kzalloc(&spi->dev, +						sizeof(*cs35l56->spi_payload_buf), +						GFP_KERNEL | GFP_DMA); +	if (!cs35l56->spi_payload_buf) +		return -ENOMEM; + +	return 0; +} + +static inline bool cs35l56_is_spi(struct cs35l56_base *cs35l56) +{ +	return IS_ENABLED(CONFIG_SPI_MASTER) && !!cs35l56->spi_payload_buf; +} +  extern const struct regmap_config cs35l56_regmap_i2c;  extern const struct regmap_config cs35l56_regmap_spi;  extern const struct regmap_config cs35l56_regmap_sdw; diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index b0db89058c91..958a2460330c 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -174,6 +174,7 @@ enum yfs_cm_operation {  	EM(afs_cell_trace_get_queue_dns,	"GET q-dns ") \  	EM(afs_cell_trace_get_queue_manage,	"GET q-mng ") \  	EM(afs_cell_trace_get_queue_new,	"GET q-new ") \ +	EM(afs_cell_trace_get_server,		"GET server") \  	EM(afs_cell_trace_get_vol,		"GET vol   ") \  	EM(afs_cell_trace_insert,		"INSERT    ") \  	EM(afs_cell_trace_manage,		"MANAGE    ") \ @@ -182,6 +183,7 @@ enum yfs_cm_operation {  	EM(afs_cell_trace_put_destroy,		"PUT destry") \  	EM(afs_cell_trace_put_queue_work,	"PUT q-work") \  	EM(afs_cell_trace_put_queue_fail,	"PUT q-fail") \ +	EM(afs_cell_trace_put_server,		"PUT server") \  	EM(afs_cell_trace_put_vol,		"PUT vol   ") \  	EM(afs_cell_trace_see_source,		"SEE source") \  	EM(afs_cell_trace_see_ws,		"SEE ws    ") \ diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h index 6e699cadcb29..f880835f7695 100644 --- a/include/trace/events/netfs.h +++ b/include/trace/events/netfs.h @@ -99,7 +99,7 @@  	EM(netfs_sreq_trace_limited,		"LIMIT")	\  	EM(netfs_sreq_trace_need_clear,		"N-CLR")	\  	EM(netfs_sreq_trace_partial_read,	"PARTR")	\ -	EM(netfs_sreq_trace_need_retry,		"NRTRY")	\ +	EM(netfs_sreq_trace_need_retry,		"ND-RT")	\  	EM(netfs_sreq_trace_prepare,		"PREP ")	\  	EM(netfs_sreq_trace_prep_failed,	"PRPFL")	\  	EM(netfs_sreq_trace_progress,		"PRGRS")	\ @@ -108,7 +108,9 @@  	EM(netfs_sreq_trace_short,		"SHORT")	\  	EM(netfs_sreq_trace_split,		"SPLIT")	\  	EM(netfs_sreq_trace_submit,		"SUBMT")	\ +	EM(netfs_sreq_trace_superfluous,	"SPRFL")	\  	EM(netfs_sreq_trace_terminated,		"TERM ")	\ +	EM(netfs_sreq_trace_wait_for,		"_WAIT")	\  	EM(netfs_sreq_trace_write,		"WRITE")	\  	EM(netfs_sreq_trace_write_skip,		"SKIP ")	\  	E_(netfs_sreq_trace_write_term,		"WTERM") diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index b13dc275ef4a..851841336ee6 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -360,8 +360,7 @@ TRACE_EVENT(rpc_request,  		{ (1UL << RPC_TASK_ACTIVE), "ACTIVE" },			\  		{ (1UL << RPC_TASK_NEED_XMIT), "NEED_XMIT" },		\  		{ (1UL << RPC_TASK_NEED_RECV), "NEED_RECV" },		\ -		{ (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" },	\ -		{ (1UL << RPC_TASK_SIGNALLED), "SIGNALLED" }) +		{ (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" })  DECLARE_EVENT_CLASS(rpc_task_running, diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index e11c82638527..050fa8eb2e8f 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -380,7 +380,7 @@ enum io_uring_op {   *				result 	will be the number of buffers send, with   *				the starting buffer ID in cqe->flags as per   *				usual for provided buffer usage. The buffers - *				will be	contigious from the starting buffer ID. + *				will be	contiguous from the starting buffer ID.   */  #define IORING_RECVSEND_POLL_FIRST	(1U << 0)  #define IORING_RECV_MULTISHOT		(1U << 1) diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h index 33745642f787..e1d2c27533b4 100644 --- a/include/uapi/linux/landlock.h +++ b/include/uapi/linux/landlock.h @@ -268,7 +268,9 @@ struct landlock_net_port_attr {   * ~~~~~~~~~~~~~~~~   *   * These flags enable to restrict a sandboxed process to a set of network - * actions. This is supported since the Landlock ABI version 4. + * actions. + * + * This is supported since Landlock ABI version 4.   *   * The following access rights apply to TCP port numbers:   * @@ -291,11 +293,13 @@ struct landlock_net_port_attr {   * Setting a flag for a ruleset will isolate the Landlock domain to forbid   * connections to resources outside the domain.   * + * This is supported since Landlock ABI version 6. + *   * Scopes:   *   * - %LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET: Restrict a sandboxed process from   *   connecting to an abstract UNIX socket created by a process outside the - *   related Landlock domain (e.g. a parent domain or a non-sandboxed process). + *   related Landlock domain (e.g., a parent domain or a non-sandboxed process).   * - %LANDLOCK_SCOPE_SIGNAL: Restrict a sandboxed process from sending a signal   *   to another process outside the domain.   */ diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h index 934e20ef7f79..95762232e018 100644 --- a/include/uapi/linux/taskstats.h +++ b/include/uapi/linux/taskstats.h @@ -34,7 +34,7 @@   */ -#define TASKSTATS_VERSION	14 +#define TASKSTATS_VERSION	15  #define TS_COMM_LEN		32	/* should be >= TASK_COMM_LEN  					 * in linux/sched.h */ diff --git a/include/uapi/linux/virtio_snd.h b/include/uapi/linux/virtio_snd.h index 5f4100c2cf04..a4cfb9f6561a 100644 --- a/include/uapi/linux/virtio_snd.h +++ b/include/uapi/linux/virtio_snd.h @@ -25,7 +25,7 @@ struct virtio_snd_config {  	__le32 streams;  	/* # of available channel maps */  	__le32 chmaps; -	/* # of available control elements */ +	/* # of available control elements (if VIRTIO_SND_F_CTLS) */  	__le32 controls;  };  | 
