diff options
Diffstat (limited to 'kernel/sched/idle.c')
-rw-r--r-- | kernel/sched/idle.c | 65 |
1 files changed, 36 insertions, 29 deletions
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 31231925f1ec..2c85c86b455f 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -81,6 +81,25 @@ void __weak arch_cpu_idle(void) cpu_idle_force_poll = 1; } +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST_IDLE +DEFINE_STATIC_KEY_FALSE(arch_needs_tick_broadcast); + +static inline void cond_tick_broadcast_enter(void) +{ + if (static_branch_unlikely(&arch_needs_tick_broadcast)) + tick_broadcast_enter(); +} + +static inline void cond_tick_broadcast_exit(void) +{ + if (static_branch_unlikely(&arch_needs_tick_broadcast)) + tick_broadcast_exit(); +} +#else +static inline void cond_tick_broadcast_enter(void) { } +static inline void cond_tick_broadcast_exit(void) { } +#endif + /** * default_idle_call - Default CPU idle routine. * @@ -90,6 +109,7 @@ void __cpuidle default_idle_call(void) { instrumentation_begin(); if (!current_clr_polling_and_test()) { + cond_tick_broadcast_enter(); trace_cpu_idle(1, smp_processor_id()); stop_critical_timings(); @@ -99,6 +119,7 @@ void __cpuidle default_idle_call(void) start_critical_timings(); trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); + cond_tick_broadcast_exit(); } local_irq_enable(); instrumentation_end(); @@ -151,19 +172,13 @@ static void cpuidle_idle_call(void) /* * Check if the idle task must be rescheduled. If it is the - * case, exit the function after re-enabling the local irq. + * case, exit the function after re-enabling the local IRQ. */ if (need_resched()) { local_irq_enable(); return; } - /* - * The RCU framework needs to be told that we are entering an idle - * section, so no more rcu read side critical sections and one more - * step to the grace period - */ - if (cpuidle_not_available(drv, dev)) { tick_nohz_idle_stop_tick(); @@ -223,7 +238,7 @@ exit_idle: __current_set_polling(); /* - * It is up to the idle functions to reenable local interrupts + * It is up to the idle functions to re-enable local interrupts */ if (WARN_ON_ONCE(irqs_disabled())) local_irq_enable(); @@ -256,7 +271,6 @@ static void do_idle(void) tick_nohz_idle_enter(); while (!need_resched()) { - rmb(); /* * Interrupts shouldn't be re-enabled from that point on until @@ -291,7 +305,6 @@ static void do_idle(void) local_irq_disable(); if (cpu_is_offline(cpu)) { - tick_nohz_idle_stop_tick(); cpuhp_report_idle_dead(); arch_cpu_idle_dead(); } @@ -300,7 +313,7 @@ static void do_idle(void) rcu_nocb_flush_deferred_wakeup(); /* - * In poll mode we reenable interrupts and spin. Also if we + * In poll mode we re-enable interrupts and spin. Also if we * detected in the wakeup from idle path that the tick * broadcast device expired for us, we don't want to go deep * idle as we know that the IPI is going to arrive right away. @@ -385,8 +398,8 @@ void play_idle_precise(u64 duration_ns, u64 latency_ns) cpuidle_use_deepest_state(latency_ns); it.done = 0; - hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); - it.timer.function = idle_inject_timer_fn; + hrtimer_setup_on_stack(&it.timer, idle_inject_timer_fn, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_HARD); hrtimer_start(&it.timer, ns_to_ktime(duration_ns), HRTIMER_MODE_REL_PINNED_HARD); @@ -436,43 +449,38 @@ static void wakeup_preempt_idle(struct rq *rq, struct task_struct *p, int flags) resched_curr(rq); } -static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) +static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct task_struct *next) { + dl_server_update_idle_time(rq, prev); + scx_update_idle(rq, false, true); } static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first) { update_idle_core(rq); + scx_update_idle(rq, true, true); schedstat_inc(rq->sched_goidle); + next->se.exec_start = rq_clock_task(rq); } -#ifdef CONFIG_SMP -static struct task_struct *pick_task_idle(struct rq *rq) +struct task_struct *pick_task_idle(struct rq *rq) { + scx_update_idle(rq, true, false); return rq->idle; } -#endif - -struct task_struct *pick_next_task_idle(struct rq *rq) -{ - struct task_struct *next = rq->idle; - - set_next_task_idle(rq, next, true); - - return next; -} /* * It is not legal to sleep in the idle task - print a warning * message if some code attempts to do it: */ -static void +static bool dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) { raw_spin_rq_unlock_irq(rq); printk(KERN_ERR "bad: scheduling from the idle thread!\n"); dump_stack(); raw_spin_rq_lock_irq(rq); + return true; } /* @@ -514,13 +522,12 @@ DEFINE_SCHED_CLASS(idle) = { .wakeup_preempt = wakeup_preempt_idle, - .pick_next_task = pick_next_task_idle, + .pick_task = pick_task_idle, .put_prev_task = put_prev_task_idle, .set_next_task = set_next_task_idle, #ifdef CONFIG_SMP .balance = balance_idle, - .pick_task = pick_task_idle, .select_task_rq = select_task_rq_idle, .set_cpus_allowed = set_cpus_allowed_common, #endif |