diff options
| author | Ingo Molnar <mingo@kernel.org> | 2022-09-21 09:58:02 +0200 | 
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2022-09-21 09:58:02 +0200 | 
| commit | 74656d03ac36fabb16b9df5221cf398ee3a9ca08 (patch) | |
| tree | 0600e619ac817e2c016c148810814f55280316cc /include/linux/sched.h | |
| parent | 0d97db026509c1a13f732b22670ab1f0ac9d8d87 (diff) | |
| parent | 521a547ced6477c54b4b0cc206000406c221b4d6 (diff) | |
Merge tag 'v6.0-rc6' into locking/core, to refresh the branch
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/sched.h')
| -rw-r--r-- | include/linux/sched.h | 17 | 
1 files changed, 15 insertions, 2 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 88b8817b827d..e7b2f8a5c711 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -34,6 +34,7 @@  #include <linux/rseq.h>  #include <linux/seqlock.h>  #include <linux/kcsan.h> +#include <linux/rv.h>  #include <asm/kmap_size.h>  /* task_struct member predeclarations (sorted alphabetically): */ @@ -843,8 +844,9 @@ struct task_struct {  	int				trc_reader_nesting;  	int				trc_ipi_to_cpu;  	union rcu_special		trc_reader_special; -	bool				trc_reader_checked;  	struct list_head		trc_holdout_list; +	struct list_head		trc_blkd_node; +	int				trc_blkd_cpu;  #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */  	struct sched_info		sched_info; @@ -1500,6 +1502,16 @@ struct task_struct {  	struct callback_head		l1d_flush_kill;  #endif +#ifdef CONFIG_RV +	/* +	 * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS. +	 * If we find justification for more monitors, we can think +	 * about adding more or developing a dynamic method. So far, +	 * none of these are justified. +	 */ +	union rv_task_monitor		rv[RV_PER_TASK_MONITORS]; +#endif +  	/*  	 * New fields for task_struct should be added above here, so that  	 * they are included in the randomized portion of task_struct. @@ -1813,7 +1825,7 @@ current_restore_flags(unsigned long orig_flags, unsigned long flags)  }  extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); -extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); +extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus);  #ifdef CONFIG_SMP  extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);  extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); @@ -2223,6 +2235,7 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)  extern bool sched_task_on_rq(struct task_struct *p);  extern unsigned long get_wchan(struct task_struct *p); +extern struct task_struct *cpu_curr_snapshot(int cpu);  /*   * In order to reduce various lock holder preemption latencies provide an  | 
