diff options
Diffstat (limited to 'drivers/acpi/processor_throttling.c')
| -rw-r--r-- | drivers/acpi/processor_throttling.c | 349 |
1 files changed, 161 insertions, 188 deletions
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index e7dd2c1fee79..f9c2bc1d4a3a 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * processor_throttling.c - Throttling submodule of the ACPI processor driver * @@ -5,46 +6,24 @@ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> - * - Added processor hotplug support - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * - Added processor hotplug support */ +#define pr_fmt(fmt) "ACPI: " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/cpufreq.h> - -#include <asm/io.h> -#include <asm/uaccess.h> - -#include <acpi/acpi_bus.h> -#include <acpi/acpi_drivers.h> +#include <linux/acpi.h> +#include <linux/uaccess.h> #include <acpi/processor.h> - -#define PREFIX "ACPI: " - -#define ACPI_PROCESSOR_CLASS "processor" -#define _COMPONENT ACPI_PROCESSOR_COMPONENT -ACPI_MODULE_NAME("processor_throttling"); +#include <asm/io.h> +#ifdef CONFIG_X86 +#include <asm/msr.h> +#endif /* ignore_tpc: * 0 -> acpi processor driver doesn't ignore _TPC values @@ -59,16 +38,22 @@ struct throttling_tstate { int target_state; /* target T-state */ }; +struct acpi_processor_throttling_arg { + struct acpi_processor *pr; + int target_state; + bool force; +}; + #define THROTTLING_PRECHANGE (1) #define THROTTLING_POSTCHANGE (2) static int acpi_processor_get_throttling(struct acpi_processor *pr); -int acpi_processor_set_throttling(struct acpi_processor *pr, - int state, bool force); +static int __acpi_processor_set_throttling(struct acpi_processor *pr, + int state, bool force, bool direct); static int acpi_processor_update_tsd_coord(void) { - int count, count_target; + int count_target; int retval = 0; unsigned int i, j; cpumask_var_t covered_cpus; @@ -125,7 +110,6 @@ static int acpi_processor_update_tsd_coord(void) /* Validate the Domain info */ count_target = pdomain->num_processors; - count = 1; for_each_possible_cpu(j) { if (i == j) @@ -158,7 +142,6 @@ static int acpi_processor_update_tsd_coord(void) cpumask_set_cpu(j, covered_cpus); cpumask_set_cpu(j, pthrottling->shared_cpu_map); - count++; } for_each_possible_cpu(j) { if (i == j) @@ -211,19 +194,15 @@ err_ret: */ void acpi_processor_throttling_init(void) { - if (acpi_processor_update_tsd_coord()) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Assume no T-state coordination\n")); - } - - return; + if (acpi_processor_update_tsd_coord()) + pr_debug("Assume no T-state coordination\n"); } static int acpi_processor_throttling_notifier(unsigned long event, void *data) { struct throttling_tstate *p_tstate = data; struct acpi_processor *pr; - unsigned int cpu ; + unsigned int cpu; int target_state; struct acpi_processor_limit *p_limit; struct acpi_processor_throttling *p_throttling; @@ -231,12 +210,13 @@ static int acpi_processor_throttling_notifier(unsigned long event, void *data) cpu = p_tstate->cpu; pr = per_cpu(processors, cpu); if (!pr) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n")); + pr_debug("Invalid pr pointer\n"); return 0; } if (!pr->flags.throttling) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is " - "unsupported on CPU %d\n", cpu)); + acpi_handle_debug(pr->handle, + "Throttling control unsupported on CPU %d\n", + cpu); return 0; } target_state = p_tstate->target_state; @@ -255,14 +235,13 @@ static int acpi_processor_throttling_notifier(unsigned long event, void *data) if (pr->throttling_platform_limit > target_state) target_state = pr->throttling_platform_limit; if (target_state >= p_throttling->state_count) { - printk(KERN_WARNING - "Exceed the limit of T-state \n"); + pr_warn("Exceed the limit of T-state\n"); target_state = p_throttling->state_count - 1; } p_tstate->target_state = target_state; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:" - "target T-state of CPU %d is T%d\n", - cpu, target_state)); + acpi_handle_debug(pr->handle, + "PreChange Event: target T-state of CPU %d is T%d\n", + cpu, target_state); break; case THROTTLING_POSTCHANGE: /* @@ -270,13 +249,12 @@ static int acpi_processor_throttling_notifier(unsigned long event, void *data) * T-state flag of acpi_processor_throttling. */ p_throttling->state = target_state; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:" - "CPU %d is switched to T%d\n", - cpu, target_state)); + acpi_handle_debug(pr->handle, + "PostChange Event: CPU %d is switched to T%d\n", + cpu, target_state); break; default: - printk(KERN_WARNING - "Unsupported Throttling notifier event\n"); + pr_warn("Unsupported Throttling notifier event\n"); break; } @@ -299,9 +277,9 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr) status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc); if (ACPI_FAILURE(status)) { - if (status != AE_NOT_FOUND) { - ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC")); - } + if (status != AE_NOT_FOUND) + acpi_evaluation_failure_warn(pr->handle, "_TPC", status); + return -ENODEV; } @@ -376,11 +354,11 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr) * 3. TSD domain */ void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, - unsigned long action) + bool is_dead) { int result = 0; - if (action == CPU_DEAD) { + if (is_dead) { /* When one CPU is offline, the T-state throttling * will be invalidated. */ @@ -427,21 +405,21 @@ static int acpi_processor_get_throttling_control(struct acpi_processor *pr) acpi_status status = 0; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *ptc = NULL; - union acpi_object obj = { 0 }; + union acpi_object obj; struct acpi_processor_throttling *throttling; status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer); if (ACPI_FAILURE(status)) { - if (status != AE_NOT_FOUND) { - ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC")); - } + if (status != AE_NOT_FOUND) + acpi_evaluation_failure_warn(pr->handle, "_PTC", status); + return -ENODEV; } ptc = (union acpi_object *)buffer.pointer; if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE) || (ptc->package.count != 2)) { - printk(KERN_ERR PREFIX "Invalid _PTC data\n"); + pr_err("Invalid _PTC data\n"); result = -EFAULT; goto end; } @@ -455,8 +433,7 @@ static int acpi_processor_get_throttling_control(struct acpi_processor *pr) if ((obj.type != ACPI_TYPE_BUFFER) || (obj.buffer.length < sizeof(struct acpi_ptc_register)) || (obj.buffer.pointer == NULL)) { - printk(KERN_ERR PREFIX - "Invalid _PTC data (control_register)\n"); + pr_err("Invalid _PTC data (control_register)\n"); result = -EFAULT; goto end; } @@ -472,7 +449,7 @@ static int acpi_processor_get_throttling_control(struct acpi_processor *pr) if ((obj.type != ACPI_TYPE_BUFFER) || (obj.buffer.length < sizeof(struct acpi_ptc_register)) || (obj.buffer.pointer == NULL)) { - printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n"); + pr_err("Invalid _PTC data (status_register)\n"); result = -EFAULT; goto end; } @@ -484,19 +461,19 @@ static int acpi_processor_get_throttling_control(struct acpi_processor *pr) if ((throttling->control_register.bit_width + throttling->control_register.bit_offset) > 32) { - printk(KERN_ERR PREFIX "Invalid _PTC control register\n"); + pr_err("Invalid _PTC control register\n"); result = -EFAULT; goto end; } if ((throttling->status_register.bit_width + throttling->status_register.bit_offset) > 32) { - printk(KERN_ERR PREFIX "Invalid _PTC status register\n"); + pr_err("Invalid _PTC status register\n"); result = -EFAULT; goto end; } - end: +end: kfree(buffer.pointer); return result; @@ -517,26 +494,27 @@ static int acpi_processor_get_throttling_states(struct acpi_processor *pr) status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer); if (ACPI_FAILURE(status)) { - if (status != AE_NOT_FOUND) { - ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS")); - } + if (status != AE_NOT_FOUND) + acpi_evaluation_failure_warn(pr->handle, "_TSS", status); + return -ENODEV; } tss = buffer.pointer; if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) { - printk(KERN_ERR PREFIX "Invalid _TSS data\n"); + pr_err("Invalid _TSS data\n"); result = -EFAULT; goto end; } - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n", - tss->package.count)); + acpi_handle_debug(pr->handle, "Found %d throttling states\n", + tss->package.count); pr->throttling.state_count = tss->package.count; pr->throttling.states_tss = - kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count, - GFP_KERNEL); + kmalloc_array(tss->package.count, + sizeof(struct acpi_processor_tx_tss), + GFP_KERNEL); if (!pr->throttling.states_tss) { result = -ENOMEM; goto end; @@ -551,27 +529,27 @@ static int acpi_processor_get_throttling_states(struct acpi_processor *pr) state.length = sizeof(struct acpi_processor_tx_tss); state.pointer = tx; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i)); + acpi_handle_debug(pr->handle, "Extracting state %d\n", i); status = acpi_extract_package(&(tss->package.elements[i]), &format, &state); if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data")); + acpi_handle_warn(pr->handle, "Invalid _TSS data: %s\n", + acpi_format_exception(status)); result = -EFAULT; kfree(pr->throttling.states_tss); goto end; } if (!tx->freqpercentage) { - printk(KERN_ERR PREFIX - "Invalid _TSS data: freq is zero\n"); + pr_err("Invalid _TSS data: freq is zero\n"); result = -EFAULT; kfree(pr->throttling.states_tss); goto end; } } - end: +end: kfree(buffer.pointer); return result; @@ -596,21 +574,21 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr) status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer); if (ACPI_FAILURE(status)) { - if (status != AE_NOT_FOUND) { - ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD")); - } + if (status != AE_NOT_FOUND) + acpi_evaluation_failure_warn(pr->handle, "_TSD", status); + return -ENODEV; } tsd = buffer.pointer; if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) { - printk(KERN_ERR PREFIX "Invalid _TSD data\n"); + pr_err("Invalid _TSD data\n"); result = -EFAULT; goto end; } if (tsd->package.count != 1) { - printk(KERN_ERR PREFIX "Invalid _TSD data\n"); + pr_err("Invalid _TSD data\n"); result = -EFAULT; goto end; } @@ -623,19 +601,19 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr) status = acpi_extract_package(&(tsd->package.elements[0]), &format, &state); if (ACPI_FAILURE(status)) { - printk(KERN_ERR PREFIX "Invalid _TSD data\n"); + pr_err("Invalid _TSD data\n"); result = -EFAULT; goto end; } if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) { - printk(KERN_ERR PREFIX "Unknown _TSD:num_entries\n"); + pr_err("Unknown _TSD:num_entries\n"); result = -EFAULT; goto end; } if (pdomain->revision != ACPI_TSD_REV0_REVISION) { - printk(KERN_ERR PREFIX "Unknown _TSD:revision\n"); + pr_err("Unknown _TSD:revision\n"); result = -EFAULT; goto end; } @@ -656,7 +634,7 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr) pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; } - end: +end: kfree(buffer.pointer); return result; } @@ -677,6 +655,15 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr) if (!pr->flags.throttling) return -ENODEV; + /* + * We don't care about error returns - we just try to mark + * these reserved so that nobody else is confused into thinking + * that this region might be unused.. + * + * (In particular, allocating the IO range for Cardbus) + */ + request_region(pr->throttling.address, 6, "ACPI CPU throttle"); + pr->throttling.state = 0; duty_mask = pr->throttling.state_count - 1; @@ -703,9 +690,9 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr) local_irq_enable(); - ACPI_DEBUG_PRINT((ACPI_DB_INFO, + acpi_handle_debug(pr->handle, "Throttling state is T%d (%d%% throttling applied)\n", - state, pr->throttling.states[state].performance)); + state, pr->throttling.states[state].performance); return 0; } @@ -719,13 +706,12 @@ static int acpi_throttling_rdmsr(u64 *value) if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) || !this_cpu_has(X86_FEATURE_ACPI)) { - printk(KERN_ERR PREFIX - "HARDWARE addr space,NOT supported yet\n"); + pr_err("HARDWARE addr space,NOT supported yet\n"); } else { msr_low = 0; msr_high = 0; rdmsr_safe(MSR_IA32_THERM_CONTROL, - (u32 *)&msr_low , (u32 *) &msr_high); + (u32 *)&msr_low, (u32 *) &msr_high); msr = (msr_high << 32) | msr_low; *value = (u64) msr; ret = 0; @@ -740,8 +726,7 @@ static int acpi_throttling_wrmsr(u64 value) if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) || !this_cpu_has(X86_FEATURE_ACPI)) { - printk(KERN_ERR PREFIX - "HARDWARE addr space,NOT supported yet\n"); + pr_err("HARDWARE addr space,NOT supported yet\n"); } else { msr = value; wrmsr_safe(MSR_IA32_THERM_CONTROL, @@ -753,15 +738,13 @@ static int acpi_throttling_wrmsr(u64 value) #else static int acpi_throttling_rdmsr(u64 *value) { - printk(KERN_ERR PREFIX - "HARDWARE addr space,NOT supported yet\n"); + pr_err("HARDWARE addr space,NOT supported yet\n"); return -1; } static int acpi_throttling_wrmsr(u64 value) { - printk(KERN_ERR PREFIX - "HARDWARE addr space,NOT supported yet\n"); + pr_err("HARDWARE addr space,NOT supported yet\n"); return -1; } #endif @@ -792,7 +775,7 @@ static int acpi_read_throttling_status(struct acpi_processor *pr, ret = acpi_throttling_rdmsr(value); break; default: - printk(KERN_ERR PREFIX "Unknown addr space %d\n", + pr_err("Unknown addr space %d\n", (u32) (throttling->status_register.space_id)); } return ret; @@ -825,7 +808,7 @@ static int acpi_write_throttling_state(struct acpi_processor *pr, ret = acpi_throttling_wrmsr(value); break; default: - printk(KERN_ERR PREFIX "Unknown addr space %d\n", + pr_err("Unknown addr space %d\n", (u32) (throttling->control_register.space_id)); } return ret; @@ -880,10 +863,11 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) if (ret >= 0) { state = acpi_get_throttling_state(pr, value); if (state == -1) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Invalid throttling state, reset\n")); + acpi_handle_debug(pr->handle, + "Invalid throttling state, reset\n"); state = 0; - ret = acpi_processor_set_throttling(pr, state, true); + ret = __acpi_processor_set_throttling(pr, state, true, + true); if (ret) return ret; } @@ -893,36 +877,31 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) return 0; } -static int acpi_processor_get_throttling(struct acpi_processor *pr) +static long __acpi_processor_get_throttling(void *data) { - cpumask_var_t saved_mask; - int ret; + struct acpi_processor *pr = data; + + return pr->throttling.acpi_processor_get_throttling(pr); +} +static int acpi_processor_get_throttling(struct acpi_processor *pr) +{ if (!pr) return -EINVAL; if (!pr->flags.throttling) return -ENODEV; - if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) - return -ENOMEM; - /* - * Migrate task to the cpu pointed by pr. + * This is either called from the CPU hotplug callback of + * processor_driver or via the ACPI probe function. In the latter + * case the CPU is not guaranteed to be online. Both call sites are + * protected against CPU hotplug. */ - cpumask_copy(saved_mask, ¤t->cpus_allowed); - /* FIXME: use work_on_cpu() */ - if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) { - /* Can't migrate to the target pr->id CPU. Exit */ - free_cpumask_var(saved_mask); + if (!cpu_online(pr->id)) return -ENODEV; - } - ret = pr->throttling.acpi_processor_get_throttling(pr); - /* restore the previous state */ - set_cpus_allowed_ptr(current, saved_mask); - free_cpumask_var(saved_mask); - return ret; + return call_on_cpu(pr->id, __acpi_processor_get_throttling, pr, false); } static int acpi_processor_get_fadt_info(struct acpi_processor *pr) @@ -930,15 +909,15 @@ static int acpi_processor_get_fadt_info(struct acpi_processor *pr) int i, step; if (!pr->throttling.address) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n")); + acpi_handle_debug(pr->handle, "No throttling register\n"); return -EINVAL; } else if (!pr->throttling.duty_width) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n")); + acpi_handle_debug(pr->handle, "No throttling states\n"); return -EINVAL; } /* TBD: Support duty_cycle values that span bit 4. */ else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) { - printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n"); + pr_warn("duty_cycle spans bit 4\n"); return -EINVAL; } @@ -1024,10 +1003,10 @@ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, local_irq_enable(); - ACPI_DEBUG_PRINT((ACPI_DB_INFO, + acpi_handle_debug(pr->handle, "Throttling state set to T%d (%d%%)\n", state, (pr->throttling.states[state].performance ? pr-> - throttling.states[state].performance / 10 : 0))); + throttling.states[state].performance / 10 : 0)); return 0; } @@ -1063,16 +1042,24 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, return 0; } -int acpi_processor_set_throttling(struct acpi_processor *pr, - int state, bool force) +static long acpi_processor_throttling_fn(void *data) +{ + struct acpi_processor_throttling_arg *arg = data; + struct acpi_processor *pr = arg->pr; + + return pr->throttling.acpi_processor_set_throttling(pr, + arg->target_state, arg->force); +} + +static int __acpi_processor_set_throttling(struct acpi_processor *pr, + int state, bool force, bool direct) { - cpumask_var_t saved_mask; int ret = 0; unsigned int i; struct acpi_processor *match_pr; struct acpi_processor_throttling *p_throttling; + struct acpi_processor_throttling_arg arg; struct throttling_tstate t_state; - cpumask_var_t online_throttling_cpus; if (!pr) return -EINVAL; @@ -1083,14 +1070,6 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, if ((state < 0) || (state > (pr->throttling.state_count - 1))) return -EINVAL; - if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) - return -ENOMEM; - - if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) { - free_cpumask_var(saved_mask); - return -ENOMEM; - } - if (cpu_is_offline(pr->id)) { /* * the cpu pointed by pr->id is offline. Unnecessary to change @@ -1099,17 +1078,15 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, return -ENODEV; } - cpumask_copy(saved_mask, ¤t->cpus_allowed); t_state.target_state = state; p_throttling = &(pr->throttling); - cpumask_and(online_throttling_cpus, cpu_online_mask, - p_throttling->shared_cpu_map); + /* * The throttling notifier will be called for every * affected cpu in order to get one proper T-state. * The notifier event is THROTTLING_PRECHANGE. */ - for_each_cpu(i, online_throttling_cpus) { + for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) { t_state.cpu = i; acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, &t_state); @@ -1121,29 +1098,27 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, * it can be called only for the cpu pointed by pr. */ if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { - /* FIXME: use work_on_cpu() */ - if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) { - /* Can't migrate to the pr->id CPU. Exit */ - ret = -ENODEV; - goto exit; - } - ret = p_throttling->acpi_processor_set_throttling(pr, - t_state.target_state, force); + arg.pr = pr; + arg.target_state = state; + arg.force = force; + ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, &arg, + direct); } else { /* * When the T-state coordination is SW_ALL or HW_ALL, * it is necessary to set T-state for every affected * cpus. */ - for_each_cpu(i, online_throttling_cpus) { + for_each_cpu_and(i, cpu_online_mask, + p_throttling->shared_cpu_map) { match_pr = per_cpu(processors, i); /* * If the pointer is invalid, we will report the * error message and continue. */ if (!match_pr) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Invalid Pointer for CPU %d\n", i)); + acpi_handle_debug(pr->handle, + "Invalid Pointer for CPU %d\n", i); continue; } /* @@ -1151,18 +1126,16 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, * we will report the error message and continue. */ if (!match_pr->flags.throttling) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Throttling Control is unsupported " - "on CPU %d\n", i)); + acpi_handle_debug(pr->handle, + "Throttling Control unsupported on CPU %d\n", i); continue; } - t_state.cpu = i; - /* FIXME: use work_on_cpu() */ - if (set_cpus_allowed_ptr(current, cpumask_of(i))) - continue; - ret = match_pr->throttling. - acpi_processor_set_throttling( - match_pr, t_state.target_state, force); + + arg.pr = match_pr; + arg.target_state = state; + arg.force = force; + ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, + &arg, direct); } } /* @@ -1171,30 +1144,31 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, * affected cpu to update the T-states. * The notifier event is THROTTLING_POSTCHANGE */ - for_each_cpu(i, online_throttling_cpus) { + for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) { t_state.cpu = i; acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, &t_state); } - /* restore the previous state */ - /* FIXME: use work_on_cpu() */ - set_cpus_allowed_ptr(current, saved_mask); -exit: - free_cpumask_var(online_throttling_cpus); - free_cpumask_var(saved_mask); + return ret; } +int acpi_processor_set_throttling(struct acpi_processor *pr, int state, + bool force) +{ + return __acpi_processor_set_throttling(pr, state, force, false); +} + int acpi_processor_get_throttling_info(struct acpi_processor *pr) { int result = 0; struct acpi_processor_throttling *pthrottling; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, + acpi_handle_debug(pr->handle, "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n", pr->throttling.address, pr->throttling.duty_offset, - pr->throttling.duty_width)); + pr->throttling.duty_width); /* * Evaluate _PTC, _TSS and _TPC @@ -1202,8 +1176,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr) */ if (acpi_processor_get_throttling_control(pr) || acpi_processor_get_throttling_states(pr) || - acpi_processor_get_platform_limit(pr)) - { + acpi_processor_get_platform_limit(pr)) { pr->throttling.acpi_processor_get_throttling = &acpi_processor_get_throttling_fadt; pr->throttling.acpi_processor_set_throttling = @@ -1234,13 +1207,13 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr) * used this part. */ if (errata.piix4.throttle) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Throttling not supported on PIIX4 A- or B-step\n")); + acpi_handle_debug(pr->handle, + "Throttling not supported on PIIX4 A- or B-step\n"); return 0; } - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n", - pr->throttling.state_count)); + acpi_handle_debug(pr->handle, "Found %d throttling states\n", + pr->throttling.state_count); pr->flags.throttling = 1; @@ -1255,15 +1228,15 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr) goto end; if (pr->throttling.state) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, + acpi_handle_debug(pr->handle, "Disabling throttling (was T%d)\n", - pr->throttling.state)); + pr->throttling.state); result = acpi_processor_set_throttling(pr, 0, false); if (result) goto end; } - end: +end: if (result) pr->flags.throttling = 0; |
