summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/fair.c73
-rw-r--r--kernel/sched/sched.h11
3 files changed, 43 insertions, 43 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8a10a2ce30a4..c7faeb7bd03a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5861,7 +5861,7 @@ int sched_cpu_dying(unsigned int cpu)
calc_load_migrate(rq);
update_max_interval();
- nohz_balance_exit_idle(cpu);
+ nohz_balance_exit_idle(rq);
hrtick_clear(rq);
return 0;
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 85232dad89c9..494d5db9a6cd 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9103,23 +9103,6 @@ static inline int find_new_ilb(void)
return nr_cpu_ids;
}
-static inline void set_cpu_sd_state_busy(void)
-{
- struct sched_domain *sd;
- int cpu = smp_processor_id();
-
- rcu_read_lock();
- sd = rcu_dereference(per_cpu(sd_llc, cpu));
-
- if (!sd || !sd->nohz_idle)
- goto unlock;
- sd->nohz_idle = 0;
-
- atomic_inc(&sd->shared->nr_busy_cpus);
-unlock:
- rcu_read_unlock();
-}
-
/*
* Kick a CPU to do the nohz balancing, if it is time for it. We pick the
* nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
@@ -9175,8 +9158,7 @@ static void nohz_balancer_kick(struct rq *rq)
* We may be recently in ticked or tickless idle mode. At the first
* busy tick after returning from idle, we will update the busy stats.
*/
- set_cpu_sd_state_busy();
- nohz_balance_exit_idle(cpu);
+ nohz_balance_exit_idle(rq);
/*
* None are in tickless mode and hence no need for NOHZ idle load
@@ -9240,27 +9222,39 @@ out:
kick_ilb(flags);
}
-void nohz_balance_exit_idle(unsigned int cpu)
+static void set_cpu_sd_state_busy(int cpu)
{
- unsigned int flags = atomic_read(nohz_flags(cpu));
+ struct sched_domain *sd;
- if (unlikely(flags & NOHZ_TICK_STOPPED)) {
- /*
- * Completely isolated CPUs don't ever set, so we must test.
- */
- if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
- cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
- atomic_dec(&nohz.nr_cpus);
- }
+ rcu_read_lock();
+ sd = rcu_dereference(per_cpu(sd_llc, cpu));
- atomic_andnot(NOHZ_TICK_STOPPED, nohz_flags(cpu));
- }
+ if (!sd || !sd->nohz_idle)
+ goto unlock;
+ sd->nohz_idle = 0;
+
+ atomic_inc(&sd->shared->nr_busy_cpus);
+unlock:
+ rcu_read_unlock();
}
-void set_cpu_sd_state_idle(void)
+void nohz_balance_exit_idle(struct rq *rq)
+{
+ SCHED_WARN_ON(rq != this_rq());
+
+ if (likely(!rq->nohz_tick_stopped))
+ return;
+
+ rq->nohz_tick_stopped = 0;
+ cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask);
+ atomic_dec(&nohz.nr_cpus);
+
+ set_cpu_sd_state_busy(rq->cpu);
+}
+
+static void set_cpu_sd_state_idle(int cpu)
{
struct sched_domain *sd;
- int cpu = smp_processor_id();
rcu_read_lock();
sd = rcu_dereference(per_cpu(sd_llc, cpu));
@@ -9280,6 +9274,10 @@ unlock:
*/
void nohz_balance_enter_idle(int cpu)
{
+ struct rq *rq = cpu_rq(cpu);
+
+ SCHED_WARN_ON(cpu != smp_processor_id());
+
/* If this CPU is going down, then nothing needs to be done: */
if (!cpu_active(cpu))
return;
@@ -9288,16 +9286,19 @@ void nohz_balance_enter_idle(int cpu)
if (!housekeeping_cpu(cpu, HK_FLAG_SCHED))
return;
- if (atomic_read(nohz_flags(cpu)) & NOHZ_TICK_STOPPED)
+ if (rq->nohz_tick_stopped)
return;
/* If we're a completely isolated CPU, we don't play: */
- if (on_null_domain(cpu_rq(cpu)))
+ if (on_null_domain(rq))
return;
+ rq->nohz_tick_stopped = 1;
+
cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
atomic_inc(&nohz.nr_cpus);
- atomic_or(NOHZ_TICK_STOPPED, nohz_flags(cpu));
+
+ set_cpu_sd_state_idle(cpu);
}
#else
static inline void nohz_balancer_kick(struct rq *rq) { }
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 21381d276709..818f22dbc7ea 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -764,6 +764,7 @@ struct rq {
unsigned long last_load_update_tick;
unsigned long last_blocked_load_update_tick;
#endif /* CONFIG_SMP */
+ unsigned int nohz_tick_stopped;
atomic_t nohz_flags;
#endif /* CONFIG_NO_HZ_COMMON */
@@ -2035,11 +2036,9 @@ extern void cfs_bandwidth_usage_inc(void);
extern void cfs_bandwidth_usage_dec(void);
#ifdef CONFIG_NO_HZ_COMMON
-#define NOHZ_TICK_STOPPED_BIT 0
-#define NOHZ_BALANCE_KICK_BIT 1
-#define NOHZ_STATS_KICK_BIT 2
+#define NOHZ_BALANCE_KICK_BIT 0
+#define NOHZ_STATS_KICK_BIT 1
-#define NOHZ_TICK_STOPPED BIT(NOHZ_TICK_STOPPED_BIT)
#define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT)
#define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT)
@@ -2047,9 +2046,9 @@ extern void cfs_bandwidth_usage_dec(void);
#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
-extern void nohz_balance_exit_idle(unsigned int cpu);
+extern void nohz_balance_exit_idle(struct rq *rq);
#else
-static inline void nohz_balance_exit_idle(unsigned int cpu) { }
+static inline void nohz_balance_exit_idle(struct rq *rq) { }
#endif