From fb318cbff40964999f303d50bcf541dd9ead6780 Mon Sep 17 00:00:00 2001 From: Lin Ming Date: Wed, 18 Mar 2009 09:09:01 +0800 Subject: ACPI: cpufreq: use new bit register access function > arch/x86/kernel/cpu/cpufreq/longhaul.c: In function 'longhaul_setstate': > arch/x86/kernel/cpu/cpufreq/longhaul.c:308: error: implicit declaration of function 'acpi_set_register' Signed-off-by: Lin Ming Compile-tested-by: Stephen Rothwell Signed-off-by: Len Brown --- arch/x86/kernel/cpu/cpufreq/longhaul.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c index a4cff5d6e380..4e18d5119828 100644 --- a/arch/x86/kernel/cpu/cpufreq/longhaul.c +++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c @@ -303,7 +303,7 @@ retry_loop: outb(3, 0x22); } else if ((pr != NULL) && pr->flags.bm_control) { /* Disable bus master arbitration */ - acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); + acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); } switch (longhaul_version) { @@ -326,7 +326,7 @@ retry_loop: case TYPE_POWERSAVER: if (longhaul_flags & USE_ACPI_C3) { /* Don't allow wakeup */ - acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); + acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 0); do_powersaver(cx->address, clock_ratio_index, dir); } else { do_powersaver(0, clock_ratio_index, dir); @@ -339,7 +339,7 @@ retry_loop: outb(0, 0x22); } else if ((pr != NULL) && pr->flags.bm_control) { /* Enable bus master arbitration */ - acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); + acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); } outb(pic2_mask,0xA1); /* restore mask */ outb(pic1_mask,0x21); -- cgit From a59d1637eb0e0a37ee0e5c92800c60abe3624e24 Mon Sep 17 00:00:00 2001 From: "Pallipadi, Venkatesh" Date: Thu, 19 Mar 2009 14:41:40 -0700 Subject: ACPI: cap off P-state transition latency from buggy BIOSes Some BIOSes report very high frequency transition latency which are plainly wrong on CPus that can change frequency using native MSR interface. One such system is IBM T42 (2327-8ZU) as reported by Owen Taylor and Rik van Riel. cpufreq_ondemand driver uses this transition latency to come up with a reasonable sampling interval to sample CPU usage and with such high latency value, ondemand sampling interval ends up being very high (0.5 sec, in this particular case), resulting in performance impact due to slow response to increasing frequency. Fix it by capping-off the transition latency to 20uS for native MSR based frequency transitions. mjg: We've confirmed that this also helps on the X31 Signed-off-by: Venkatesh Pallipadi Acked-by: Matthew Garrett Signed-off-by: Len Brown --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 4b1c319d30c3..89c676d6caf7 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -680,6 +680,18 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) perf->states[i].transition_latency * 1000; } + /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */ + if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && + policy->cpuinfo.transition_latency > 20 * 1000) { + static int print_once; + policy->cpuinfo.transition_latency = 20 * 1000; + if (!print_once) { + print_once = 1; + printk(KERN_INFO "Capping off P-state tranision latency" + " at 20 uS\n"); + } + } + data->max_freq = perf->states[0].core_frequency * 1000; /* table init */ for (i=0; istate_count; i++) { -- cgit From 5b4c0b6fffb91b07a6f85dabbdfbd5abab61d9db Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Wed, 1 Apr 2009 01:49:42 -0400 Subject: ACPI: update comment update ACPI Development Discussion List Signed-off-by: Zhang Rui Signed-off-by: Len Brown --- arch/x86/kernel/acpi/boot.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 7678f10c4568..2481ec3e83b1 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1493,7 +1493,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d) /* * If your system is blacklisted here, but you find that acpi=force - * works for you, please contact acpi-devel@sourceforge.net + * works for you, please contact linux-acpi@vger.kernel.org */ static struct dmi_system_id __initdata acpi_dmi_table[] = { /* -- cgit From 7237d3de78ff89ec2e18eae5fe962d063024fef5 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Mon, 30 Mar 2009 13:55:30 -0800 Subject: x86, ACPI: add support for x2apic ACPI extensions All logical processors with APIC ID values of 255 and greater will have their APIC reported through Processor X2APIC structure (type-9 entry type) and all logical processors with APIC ID less than 255 will have their APIC reported through legacy Processor Local APIC (type-0 entry type) only. This is the same case even for NMI structure reporting. The Processor X2APIC Affinity structure provides the association between the X2APIC ID of a logical processor and the proximity domain to which the logical processor belongs. For OSPM, Procssor IDs outside the 0-254 range are to be declared as Device() objects in the ACPI namespace. Signed-off-by: Suresh Siddha Signed-off-by: Len Brown --- arch/x86/kernel/acpi/boot.c | 63 ++++++++++++++++++++++++++++++++++++++++++--- arch/x86/mm/srat_64.c | 30 +++++++++++++++++++++ 2 files changed, 89 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 7678f10c4568..565e70c7ca93 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -259,6 +259,35 @@ static void __cpuinit acpi_register_lapic(int id, u8 enabled) generic_processor_info(id, ver); } +static int __init +acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end) +{ + struct acpi_madt_local_x2apic *processor = NULL; + + processor = (struct acpi_madt_local_x2apic *)header; + + if (BAD_MADT_ENTRY(processor, end)) + return -EINVAL; + + acpi_table_print_madt_entry(header); + +#ifdef CONFIG_X86_X2APIC + /* + * We need to register disabled CPU as well to permit + * counting disabled CPUs. This allows us to size + * cpus_possible_map more accurately, to permit + * to not preallocating memory for all NR_CPUS + * when we use CPU hotplug. + */ + acpi_register_lapic(processor->local_apic_id, /* APIC ID */ + processor->lapic_flags & ACPI_MADT_ENABLED); +#else + printk(KERN_WARNING PREFIX "x2apic entry ignored\n"); +#endif + + return 0; +} + static int __init acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end) { @@ -318,6 +347,25 @@ acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header, return 0; } +static int __init +acpi_parse_x2apic_nmi(struct acpi_subtable_header *header, + const unsigned long end) +{ + struct acpi_madt_local_x2apic_nmi *x2apic_nmi = NULL; + + x2apic_nmi = (struct acpi_madt_local_x2apic_nmi *)header; + + if (BAD_MADT_ENTRY(x2apic_nmi, end)) + return -EINVAL; + + acpi_table_print_madt_entry(header); + + if (x2apic_nmi->lint != 1) + printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); + + return 0; +} + static int __init acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end) { @@ -823,6 +871,7 @@ static int __init early_acpi_parse_madt_lapic_addr_ovr(void) static int __init acpi_parse_madt_lapic_entries(void) { int count; + int x2count = 0; if (!cpu_has_apic) return -ENODEV; @@ -846,22 +895,28 @@ static int __init acpi_parse_madt_lapic_entries(void) count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_sapic, MAX_APICS); - if (!count) + if (!count) { + x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC, + acpi_parse_x2apic, MAX_APICS); count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic, MAX_APICS); - if (!count) { + } + if (!count && !x2count) { printk(KERN_ERR PREFIX "No LAPIC entries present\n"); /* TBD: Cleanup to allow fallback to MPS */ return -ENODEV; - } else if (count < 0) { + } else if (count < 0 || x2count < 0) { printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n"); /* TBD: Cleanup to allow fallback to MPS */ return count; } + x2count = + acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC_NMI, + acpi_parse_x2apic_nmi, 0); count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0); - if (count < 0) { + if (count < 0 || x2count < 0) { printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); /* TBD: Cleanup to allow fallback to MPS */ return count; diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 09737c8af074..13d56f5b1349 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -115,6 +115,36 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) reserve_early(phys, phys + length, "ACPI SLIT"); } +/* Callback for Proximity Domain -> x2APIC mapping */ +void __init +acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) +{ + int pxm, node; + int apic_id; + + if (srat_disabled()) + return; + if (pa->header.length < sizeof(struct acpi_srat_x2apic_cpu_affinity)) { + bad_srat(); + return; + } + if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0) + return; + pxm = pa->proximity_domain; + node = setup_node(pxm); + if (node < 0) { + printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm); + bad_srat(); + return; + } + + apic_id = pa->apic_id; + apicid_to_node[apic_id] = node; + acpi_numa = 1; + printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n", + pxm, apic_id, node); +} + /* Callback for Proximity Domain -> LAPIC mapping */ void __init acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) -- cgit