diff options
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
| -rw-r--r-- | kernel/rcu/tree_plugin.h | 98 | 
1 files changed, 58 insertions, 40 deletions
| diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 84fbee4686d3..7fd12039e512 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -182,7 +182,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)  	raw_lockdep_assert_held_rcu_node(rnp);  	WARN_ON_ONCE(rdp->mynode != rnp); -	WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1); +	WARN_ON_ONCE(!rcu_is_leaf_node(rnp));  	/*  	 * Decide where to queue the newly blocked task.  In theory, @@ -384,6 +384,50 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)  }  /* + * Preemptible RCU implementation for rcu_read_lock(). + * Just increment ->rcu_read_lock_nesting, shared state will be updated + * if we block. + */ +void __rcu_read_lock(void) +{ +	current->rcu_read_lock_nesting++; +	barrier();  /* critical section after entry code. */ +} +EXPORT_SYMBOL_GPL(__rcu_read_lock); + +/* + * Preemptible RCU implementation for rcu_read_unlock(). + * Decrement ->rcu_read_lock_nesting.  If the result is zero (outermost + * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then + * invoke rcu_read_unlock_special() to clean up after a context switch + * in an RCU read-side critical section and other special cases. + */ +void __rcu_read_unlock(void) +{ +	struct task_struct *t = current; + +	if (t->rcu_read_lock_nesting != 1) { +		--t->rcu_read_lock_nesting; +	} else { +		barrier();  /* critical section before exit code. */ +		t->rcu_read_lock_nesting = INT_MIN; +		barrier();  /* assign before ->rcu_read_unlock_special load */ +		if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) +			rcu_read_unlock_special(t); +		barrier();  /* ->rcu_read_unlock_special load before assign */ +		t->rcu_read_lock_nesting = 0; +	} +#ifdef CONFIG_PROVE_LOCKING +	{ +		int rrln = READ_ONCE(t->rcu_read_lock_nesting); + +		WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); +	} +#endif /* #ifdef CONFIG_PROVE_LOCKING */ +} +EXPORT_SYMBOL_GPL(__rcu_read_unlock); + +/*   * Advance a ->blkd_tasks-list pointer to the next entry, instead   * returning NULL if at the end of the list.   */ @@ -489,7 +533,7 @@ void rcu_read_unlock_special(struct task_struct *t)  		rnp = t->rcu_blocked_node;  		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */  		WARN_ON_ONCE(rnp != t->rcu_blocked_node); -		WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1); +		WARN_ON_ONCE(!rcu_is_leaf_node(rnp));  		empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);  		empty_exp = sync_rcu_preempt_exp_done(rnp);  		smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ @@ -685,15 +729,6 @@ static void rcu_preempt_check_callbacks(void)  		t->rcu_read_unlock_special.b.need_qs = true;  } -#ifdef CONFIG_RCU_BOOST - -static void rcu_preempt_do_callbacks(void) -{ -	rcu_do_batch(rcu_state_p, this_cpu_ptr(rcu_data_p)); -} - -#endif /* #ifdef CONFIG_RCU_BOOST */ -  /**   * call_rcu() - Queue an RCU callback for invocation after a grace period.   * @head: structure to be used for queueing the RCU updates. @@ -1140,7 +1175,7 @@ static void rcu_kthread_do_work(void)  {  	rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));  	rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data)); -	rcu_preempt_do_callbacks(); +	rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data));  }  static void rcu_cpu_kthread_setup(unsigned int cpu) @@ -1607,7 +1642,7 @@ static int rcu_oom_notify(struct notifier_block *self,  	for_each_online_cpu(cpu) {  		smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1); -		cond_resched_rcu_qs(); +		cond_resched_tasks_rcu_qs();  	}  	/* Unconditionally decrement: no need to wake ourselves up. */ @@ -1780,19 +1815,6 @@ static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)  	swake_up_all(sq);  } -/* - * Set the root rcu_node structure's ->need_future_gp field - * based on the sum of those of all rcu_node structures.  This does - * double-count the root rcu_node structure's requests, but this - * is necessary to handle the possibility of a rcu_nocb_kthread() - * having awakened during the time that the rcu_node structures - * were being updated for the end of the previous grace period. - */ -static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq) -{ -	rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq; -} -  static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)  {  	return &rnp->nocb_gp_wq[rnp->completed & 0x1]; @@ -1966,7 +1988,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,  			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,  					    TPS("WakeOvf"));  		} else { -			wake_nocb_leader_defer(rdp, RCU_NOCB_WAKE, +			wake_nocb_leader_defer(rdp, RCU_NOCB_WAKE_FORCE,  					       TPS("WakeOvfIsDeferred"));  		}  		rdp->qlen_last_fqs_check = LONG_MAX / 2; @@ -2048,7 +2070,8 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)  	struct rcu_node *rnp = rdp->mynode;  	raw_spin_lock_irqsave_rcu_node(rnp, flags); -	needwake = rcu_start_future_gp(rnp, rdp, &c); +	c = rcu_cbs_completed(rdp->rsp, rnp); +	needwake = rcu_start_this_gp(rnp, rdp, c);  	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);  	if (needwake)  		rcu_gp_kthread_wake(rdp->rsp); @@ -2057,7 +2080,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)  	 * Wait for the grace period.  Do so interruptibly to avoid messing  	 * up the load average.  	 */ -	trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait")); +	trace_rcu_this_gp(rnp, rdp, c, TPS("StartWait"));  	for (;;) {  		swait_event_interruptible(  			rnp->nocb_gp_wq[c & 0x1], @@ -2065,9 +2088,9 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)  		if (likely(d))  			break;  		WARN_ON(signal_pending(current)); -		trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait")); +		trace_rcu_this_gp(rnp, rdp, c, TPS("ResumeWait"));  	} -	trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait")); +	trace_rcu_this_gp(rnp, rdp, c, TPS("EndWait"));  	smp_mb(); /* Ensure that CB invocation happens after GP end. */  } @@ -2236,7 +2259,7 @@ static int rcu_nocb_kthread(void *arg)  				cl++;  			c++;  			local_bh_enable(); -			cond_resched_rcu_qs(); +			cond_resched_tasks_rcu_qs();  			list = next;  		}  		trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1); @@ -2292,7 +2315,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)  void __init rcu_init_nohz(void)  {  	int cpu; -	bool need_rcu_nocb_mask = true; +	bool need_rcu_nocb_mask = false;  	struct rcu_state *rsp;  #if defined(CONFIG_NO_HZ_FULL) @@ -2315,7 +2338,7 @@ void __init rcu_init_nohz(void)  #endif /* #if defined(CONFIG_NO_HZ_FULL) */  	if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) { -		pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n"); +		pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n");  		cpumask_and(rcu_nocb_mask, cpu_possible_mask,  			    rcu_nocb_mask);  	} @@ -2495,10 +2518,6 @@ static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)  {  } -static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq) -{ -} -  static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)  {  	return NULL; @@ -2587,8 +2606,7 @@ static bool rcu_nohz_full_cpu(struct rcu_state *rsp)  }  /* - * Bind the grace-period kthread for the sysidle flavor of RCU to the - * timekeeping CPU. + * Bind the RCU grace-period kthreads to the housekeeping CPU.   */  static void rcu_bind_gp_kthread(void)  { | 
