diff options
Diffstat (limited to 'kernel/rcu/tree.h')
| -rw-r--r-- | kernel/rcu/tree.h | 36 | 
1 files changed, 27 insertions, 9 deletions
| diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index f491ab4f2e8e..78e051dffc5b 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -58,6 +58,14 @@ struct rcu_dynticks {  #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */  }; +/* Communicate arguments to a workqueue handler. */ +struct rcu_exp_work { +	smp_call_func_t rew_func; +	struct rcu_state *rew_rsp; +	unsigned long rew_s; +	struct work_struct rew_work; +}; +  /* RCU's kthread states for tracing. */  #define RCU_KTHREAD_STOPPED  0  #define RCU_KTHREAD_RUNNING  1 @@ -150,15 +158,32 @@ struct rcu_node {  	struct swait_queue_head nocb_gp_wq[2];  				/* Place for rcu_nocb_kthread() to wait GP. */  #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ -	int need_future_gp[2]; -				/* Counts of upcoming no-CB GP requests. */ +	u8 need_future_gp[4];	/* Counts of upcoming GP requests. */  	raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;  	spinlock_t exp_lock ____cacheline_internodealigned_in_smp;  	unsigned long exp_seq_rq;  	wait_queue_head_t exp_wq[4]; +	struct rcu_exp_work rew; +	bool exp_need_flush;	/* Need to flush workitem? */  } ____cacheline_internodealigned_in_smp; +/* Accessors for ->need_future_gp[] array. */ +#define need_future_gp_mask() \ +	(ARRAY_SIZE(((struct rcu_node *)NULL)->need_future_gp) - 1) +#define need_future_gp_element(rnp, c) \ +	((rnp)->need_future_gp[(c) & need_future_gp_mask()]) +#define need_any_future_gp(rnp)						\ +({									\ +	int __i;							\ +	bool __nonzero = false;						\ +									\ +	for (__i = 0; __i < ARRAY_SIZE((rnp)->need_future_gp); __i++)	\ +		__nonzero = __nonzero ||				\ +			    READ_ONCE((rnp)->need_future_gp[__i]);	\ +	__nonzero;							\ +}) +  /*   * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and   * are indexed relative to this interval rather than the global CPU ID space. @@ -224,10 +249,6 @@ struct rcu_data {  #ifdef CONFIG_RCU_FAST_NO_HZ  	struct rcu_head oom_head;  #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ -	atomic_long_t exp_workdone0;	/* # done by workqueue. */ -	atomic_long_t exp_workdone1;	/* # done by others #1. */ -	atomic_long_t exp_workdone2;	/* # done by others #2. */ -	atomic_long_t exp_workdone3;	/* # done by others #3. */  	int exp_dynticks_snap;		/* Double-check need for IPI. */  	/* 6) Callback offloading. */ @@ -408,7 +429,6 @@ extern struct rcu_state rcu_preempt_state;  #endif /* #ifdef CONFIG_PREEMPT_RCU */  int rcu_dynticks_snap(struct rcu_dynticks *rdtp); -bool rcu_eqs_special_set(int cpu);  #ifdef CONFIG_RCU_BOOST  DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); @@ -438,7 +458,6 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);  static void invoke_rcu_callbacks_kthread(void);  static bool rcu_is_callbacks_kthread(void);  #ifdef CONFIG_RCU_BOOST -static void rcu_preempt_do_callbacks(void);  static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,  						 struct rcu_node *rnp);  #endif /* #ifdef CONFIG_RCU_BOOST */ @@ -454,7 +473,6 @@ static void print_cpu_stall_info_end(void);  static void zero_cpu_stall_ticks(struct rcu_data *rdp);  static void increment_cpu_stall_ticks(void);  static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu); -static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);  static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);  static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);  static void rcu_init_one_nocb(struct rcu_node *rnp); | 
