diff options
Diffstat (limited to 'drivers/acpi/processor_idle.c')
| -rw-r--r-- | drivers/acpi/processor_idle.c | 197 |
1 files changed, 93 insertions, 104 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 7bf882fcd64b..89f2f08b2554 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -16,7 +16,6 @@ #include <linux/acpi.h> #include <linux/dmi.h> #include <linux/sched.h> /* need_resched() */ -#include <linux/sort.h> #include <linux/tick.h> #include <linux/cpuidle.h> #include <linux/cpu.h> @@ -25,6 +24,8 @@ #include <acpi/processor.h> #include <linux/context_tracking.h> +#include "internal.h" + /* * Include the apic definitions for x86 to have the APIC timer related defines * available also for UP (on SMP it gets magically included via linux/smp.h). @@ -56,6 +57,12 @@ struct cpuidle_driver acpi_idle_driver = { }; #ifdef CONFIG_ACPI_PROCESSOR_CSTATE +void acpi_idle_rescan_dead_smt_siblings(void) +{ + if (cpuidle_get_driver() == &acpi_idle_driver) + arch_cpu_rescan_dead_smt_siblings(); +} + static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate); @@ -109,8 +116,8 @@ static const struct dmi_system_id processor_power_dmi_table[] = { static void __cpuidle acpi_safe_halt(void) { if (!tif_need_resched()) { - safe_halt(); - local_irq_disable(); + raw_safe_halt(); + raw_local_irq_disable(); } } @@ -147,7 +154,7 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr, static void __lapic_timer_propagate_broadcast(void *arg) { - struct acpi_processor *pr = (struct acpi_processor *) arg; + struct acpi_processor *pr = arg; if (pr->power.timer_broadcast_on_state < INT_MAX) tick_broadcast_enable(); @@ -269,6 +276,10 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x", pr->power.states[ACPI_STATE_C3].address); + if (!pr->power.states[ACPI_STATE_C2].address && + !pr->power.states[ACPI_STATE_C3].address) + return -ENODEV; + return 0; } @@ -386,25 +397,24 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1); } -static int acpi_cst_latency_cmp(const void *a, const void *b) +static void acpi_cst_latency_sort(struct acpi_processor_cx *states, size_t length) { - const struct acpi_processor_cx *x = a, *y = b; + int i, j, k; - if (!(x->valid && y->valid)) - return 0; - if (x->latency > y->latency) - return 1; - if (x->latency < y->latency) - return -1; - return 0; -} -static void acpi_cst_latency_swap(void *a, void *b, int n) -{ - struct acpi_processor_cx *x = a, *y = b; + for (i = 1; i < length; i++) { + if (!states[i].valid) + continue; - if (!(x->valid && y->valid)) - return; - swap(x->latency, y->latency); + for (j = i - 1, k = i; j >= 0; j--) { + if (!states[j].valid) + continue; + + if (states[j].latency > states[k].latency) + swap(states[j].latency, states[k].latency); + + k = j; + } + } } static int acpi_processor_power_verify(struct acpi_processor *pr) @@ -449,10 +459,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) if (buggy_latency) { pr_notice("FW issue: working around C-state latencies out of order\n"); - sort(&pr->power.states[1], max_cstate, - sizeof(struct acpi_processor_cx), - acpi_cst_latency_cmp, - acpi_cst_latency_swap); + acpi_cst_latency_sort(&pr->power.states[1], max_cstate); } lapic_timer_propagate_broadcast(pr); @@ -462,10 +469,8 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) static int acpi_processor_get_cstate_info(struct acpi_processor *pr) { - unsigned int i; int result; - /* NOTE: the idle thread may not be running while calling * this function */ @@ -482,17 +487,7 @@ static int acpi_processor_get_cstate_info(struct acpi_processor *pr) acpi_processor_get_power_info_default(pr); pr->power.count = acpi_processor_power_verify(pr); - - /* - * if one state of type C2 or C3 is available, mark this - * CPU as being "idle manageable" - */ - for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { - if (pr->power.states[i].valid) { - pr->power.count = i; - pr->flags.power = 1; - } - } + pr->flags.power = 1; return 0; } @@ -523,8 +518,11 @@ static int acpi_idle_bm_check(void) return bm_status; } -static void wait_for_freeze(void) +static __cpuidle void io_idle(unsigned long addr) { + /* IO port based C-state */ + inb(addr); + #ifdef CONFIG_X86 /* No delay is needed if we are in guest */ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) @@ -569,9 +567,7 @@ static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx) } else if (cx->entry_method == ACPI_CSTATE_HALT) { acpi_safe_halt(); } else { - /* IO port based C-state */ - inb(cx->address); - wait_for_freeze(); + io_idle(cx->address); } perf_lopwr_cb(false); @@ -582,7 +578,7 @@ static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx) * @dev: the target CPU * @index: the index of suggested state */ -static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) +static void acpi_idle_play_dead(struct cpuidle_device *dev, int index) { struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); @@ -591,23 +587,17 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) while (1) { if (cx->entry_method == ACPI_CSTATE_HALT) - safe_halt(); + raw_safe_halt(); else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) { - inb(cx->address); - wait_for_freeze(); + io_idle(cx->address); + } else if (cx->entry_method == ACPI_CSTATE_FFH) { + acpi_processor_ffh_play_dead(cx); } else - return -ENODEV; - -#if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU) - cond_wakeup_cpu0(); -#endif + return; } - - /* Never reached */ - return 0; } -static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr) +static __always_inline bool acpi_idle_fallback_to_c1(struct acpi_processor *pr) { return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED); @@ -642,6 +632,8 @@ static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv, */ bool dis_bm = pr->flags.bm_control; + instrumentation_begin(); + /* If we can skip BM, demote to a safe state. */ if (!cx->bm_sts_skip && acpi_idle_bm_check()) { dis_bm = false; @@ -663,11 +655,11 @@ static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv, raw_spin_unlock(&c3_lock); } - ct_idle_enter(); + ct_cpuidle_enter(); acpi_idle_do_entry(cx); - ct_idle_exit(); + ct_cpuidle_exit(); /* Re-enable bus master arbitration */ if (dis_bm) { @@ -677,6 +669,8 @@ static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv, raw_spin_unlock(&c3_lock); } + instrumentation_end(); + return index; } @@ -738,18 +732,16 @@ static int __cpuidle acpi_idle_enter_s2idle(struct cpuidle_device *dev, return 0; } -static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, - struct cpuidle_device *dev) +static void acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, + struct cpuidle_device *dev) { int i, count = ACPI_IDLE_STATE_START; struct acpi_processor_cx *cx; - struct cpuidle_state *state; if (max_cstate == 0) max_cstate = 1; for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { - state = &acpi_idle_driver.states[count]; cx = &pr->power.states[i]; if (!cx->valid) @@ -757,27 +749,13 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, per_cpu(acpi_cstate[count], dev->cpu) = cx; - if (lapic_timer_needs_broadcast(pr, cx)) - state->flags |= CPUIDLE_FLAG_TIMER_STOP; - - if (cx->type == ACPI_STATE_C3) { - state->flags |= CPUIDLE_FLAG_TLB_FLUSHED; - if (pr->flags.bm_check) - state->flags |= CPUIDLE_FLAG_RCU_IDLE; - } - count++; if (count == CPUIDLE_STATE_MAX) break; } - - if (!count) - return -EINVAL; - - return 0; } -static int acpi_processor_setup_cstates(struct acpi_processor *pr) +static void acpi_processor_setup_cstates(struct acpi_processor *pr) { int i, count; struct acpi_processor_cx *cx; @@ -808,12 +786,12 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr) state->enter = acpi_idle_enter; state->flags = 0; - if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 || - cx->type == ACPI_STATE_C3) { - state->enter_dead = acpi_idle_play_dead; - if (cx->type != ACPI_STATE_C3) - drv->safe_state_index = count; - } + + state->enter_dead = acpi_idle_play_dead; + + if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) + drv->safe_state_index = count; + /* * Halt-induced C1 is not good for ->enter_s2idle, because it * re-enables interrupts on exit. Moreover, C1 is generally not @@ -824,17 +802,21 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr) if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr)) state->enter_s2idle = acpi_idle_enter_s2idle; + if (lapic_timer_needs_broadcast(pr, cx)) + state->flags |= CPUIDLE_FLAG_TIMER_STOP; + + if (cx->type == ACPI_STATE_C3) { + state->flags |= CPUIDLE_FLAG_TLB_FLUSHED; + if (pr->flags.bm_check) + state->flags |= CPUIDLE_FLAG_RCU_IDLE; + } + count++; if (count == CPUIDLE_STATE_MAX) break; } drv->state_count = count; - - if (!count) - return -EINVAL; - - return 0; } static inline void acpi_processor_cstate_first_run_checks(void) @@ -1004,11 +986,6 @@ end: return ret; } -/* - * flat_state_cnt - the number of composite LPI states after the process of flattening - */ -static int flat_state_cnt; - /** * combine_lpi_states - combine local and parent LPI states to form a composite LPI state * @@ -1051,9 +1028,10 @@ static void stash_composite_state(struct acpi_lpi_states_array *curr_level, curr_level->composite_states[curr_level->composite_states_size++] = t; } -static int flatten_lpi_states(struct acpi_processor *pr, - struct acpi_lpi_states_array *curr_level, - struct acpi_lpi_states_array *prev_level) +static unsigned int flatten_lpi_states(struct acpi_processor *pr, + unsigned int flat_state_cnt, + struct acpi_lpi_states_array *curr_level, + struct acpi_lpi_states_array *prev_level) { int i, j, state_count = curr_level->size; struct acpi_lpi_state *p, *t = curr_level->entries; @@ -1093,7 +1071,7 @@ static int flatten_lpi_states(struct acpi_processor *pr, } kfree(curr_level->entries); - return 0; + return flat_state_cnt; } int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) @@ -1108,6 +1086,7 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr) acpi_handle handle = pr->handle, pr_ahandle; struct acpi_device *d = NULL; struct acpi_lpi_states_array info[2], *tmp, *prev, *curr; + unsigned int state_count; /* make sure our architecture has support */ ret = acpi_processor_ffh_lpi_probe(pr->id); @@ -1120,14 +1099,13 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr) if (!acpi_has_method(handle, "_LPI")) return -EINVAL; - flat_state_cnt = 0; prev = &info[0]; curr = &info[1]; handle = pr->handle; ret = acpi_processor_evaluate_lpi(handle, prev); if (ret) return ret; - flatten_lpi_states(pr, prev, NULL); + state_count = flatten_lpi_states(pr, 0, prev, NULL); status = acpi_get_parent(handle, &pr_ahandle); while (ACPI_SUCCESS(status)) { @@ -1149,18 +1127,19 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr) break; /* flatten all the LPI states in this level of hierarchy */ - flatten_lpi_states(pr, curr, prev); + state_count = flatten_lpi_states(pr, state_count, curr, prev); tmp = prev, prev = curr, curr = tmp; status = acpi_get_parent(handle, &pr_ahandle); } - pr->power.count = flat_state_cnt; /* reset the index after flattening */ - for (i = 0; i < pr->power.count; i++) + for (i = 0; i < state_count; i++) pr->power.lpi_states[i].index = i; + pr->power.count = state_count; + /* Tell driver that _LPI is supported. */ pr->flags.has_lpi = 1; pr->flags.power = 1; @@ -1217,8 +1196,9 @@ static int acpi_processor_setup_lpi_states(struct acpi_processor *pr) strscpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN); state->exit_latency = lpi->wake_latency; state->target_residency = lpi->min_residency; - if (lpi->arch_flags) - state->flags |= CPUIDLE_FLAG_TIMER_STOP; + state->flags |= arch_get_idle_state_flags(lpi->arch_flags); + if (i != 0 && lpi->entry_method == ACPI_CSTATE_FFH) + state->flags |= CPUIDLE_FLAG_RCU_IDLE; state->enter = acpi_idle_lpi_enter; drv->safe_state_index = i; } @@ -1251,7 +1231,8 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) if (pr->flags.has_lpi) return acpi_processor_setup_lpi_states(pr); - return acpi_processor_setup_cstates(pr); + acpi_processor_setup_cstates(pr); + return 0; } /** @@ -1271,7 +1252,8 @@ static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr, if (pr->flags.has_lpi) return acpi_processor_ffh_lpi_probe(pr->id); - return acpi_processor_setup_cpuidle_cx(pr, dev); + acpi_processor_setup_cpuidle_cx(pr, dev); + return 0; } static int acpi_processor_get_power_info(struct acpi_processor *pr) @@ -1410,6 +1392,9 @@ int acpi_processor_power_init(struct acpi_processor *pr) if (retval) { if (acpi_processor_registered == 0) cpuidle_unregister_driver(&acpi_idle_driver); + + per_cpu(acpi_cpuidle_device, pr->id) = NULL; + kfree(dev); return retval; } acpi_processor_registered++; @@ -1429,8 +1414,12 @@ int acpi_processor_power_exit(struct acpi_processor *pr) acpi_processor_registered--; if (acpi_processor_registered == 0) cpuidle_unregister_driver(&acpi_idle_driver); + + kfree(dev); } pr->flags.power_setup_done = 0; return 0; } + +MODULE_IMPORT_NS("ACPI_PROCESSOR_IDLE"); |
