diff options
Diffstat (limited to 'arch/x86/kernel/acpi')
-rw-r--r-- | arch/x86/kernel/acpi/boot.c | 11 | ||||
-rw-r--r-- | arch/x86/kernel/acpi/cppc.c | 14 | ||||
-rw-r--r-- | arch/x86/kernel/acpi/cstate.c | 21 | ||||
-rw-r--r-- | arch/x86/kernel/acpi/madt_playdead.S | 1 | ||||
-rw-r--r-- | arch/x86/kernel/acpi/madt_wakeup.c | 75 | ||||
-rw-r--r-- | arch/x86/kernel/acpi/sleep.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/acpi/wakeup_64.S | 1 |
7 files changed, 54 insertions, 70 deletions
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index dae6a73be40e..9fa321a95eb3 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -23,6 +23,8 @@ #include <linux/serial_core.h> #include <linux/pgtable.h> +#include <xen/xen.h> + #include <asm/e820/api.h> #include <asm/irqdomain.h> #include <asm/pci_x86.h> @@ -1729,6 +1731,15 @@ int __init acpi_mps_check(void) { #if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_X86_MPPARSE) /* mptable code is not built-in*/ + + /* + * Xen disables ACPI in PV DomU guests but it still emulates APIC and + * supports SMP. Returning early here ensures that APIC is not disabled + * unnecessarily and the guest is not limited to a single vCPU. + */ + if (xen_pv_domain() && !xen_initial_domain()) + return 0; + if (acpi_disabled || acpi_noirq) { pr_warn("MPS support code is not built-in, using acpi=off or acpi=noirq or pci=noacpi may have problem\n"); return 1; diff --git a/arch/x86/kernel/acpi/cppc.c b/arch/x86/kernel/acpi/cppc.c index d745dd586303..7047124490f6 100644 --- a/arch/x86/kernel/acpi/cppc.c +++ b/arch/x86/kernel/acpi/cppc.c @@ -4,6 +4,8 @@ * Copyright (c) 2016, Intel Corporation. */ +#include <linux/bitfield.h> + #include <acpi/cppc_acpi.h> #include <asm/msr.h> #include <asm/processor.h> @@ -47,7 +49,7 @@ int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val) { int err; - err = rdmsrl_safe_on_cpu(cpunum, reg->address, val); + err = rdmsrq_safe_on_cpu(cpunum, reg->address, val); if (!err) { u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1, reg->bit_offset); @@ -63,7 +65,7 @@ int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val) u64 rd_val; int err; - err = rdmsrl_safe_on_cpu(cpunum, reg->address, &rd_val); + err = rdmsrq_safe_on_cpu(cpunum, reg->address, &rd_val); if (!err) { u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1, reg->bit_offset); @@ -72,7 +74,7 @@ int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val) val &= mask; rd_val &= ~mask; rd_val |= val; - err = wrmsrl_safe_on_cpu(cpunum, reg->address, rd_val); + err = wrmsrq_safe_on_cpu(cpunum, reg->address, rd_val); } return err; } @@ -145,11 +147,11 @@ int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf) int ret; if (cpu_feature_enabled(X86_FEATURE_CPPC)) { - ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &val); + ret = rdmsrq_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &val); if (ret) goto out; - val = AMD_CPPC_HIGHEST_PERF(val); + val = FIELD_GET(AMD_CPPC_HIGHEST_PERF_MASK, val); } else { ret = cppc_get_highest_perf(cpu, &val); if (ret) @@ -270,7 +272,7 @@ int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator) } /* detect if running on heterogeneous design */ - if (cpu_feature_enabled(X86_FEATURE_AMD_HETEROGENEOUS_CORES)) { + if (cpu_feature_enabled(X86_FEATURE_AMD_HTR_CORES)) { switch (core_type) { case TOPO_CPU_TYPE_UNKNOWN: pr_warn("Undefined core type found for cpu %d\n", cpu); diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index 5854f0b8f0f1..8698d66563ed 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -13,9 +13,11 @@ #include <linux/sched.h> #include <acpi/processor.h> -#include <asm/cpuid.h> +#include <asm/cpu_device_id.h> +#include <asm/cpuid/api.h> #include <asm/mwait.h> #include <asm/special_insns.h> +#include <asm/smp.h> /* * Initialize bm_flags based on the CPU cache properties @@ -47,12 +49,11 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, /* * On all recent Intel platforms, ARB_DISABLE is a nop. * So, set bm_control to zero to indicate that ARB_DISABLE - * is not required while entering C3 type state on - * P4, Core and beyond CPUs + * is not required while entering C3 type state. */ if (c->x86_vendor == X86_VENDOR_INTEL && - (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f))) - flags->bm_control = 0; + (c->x86 > 15 || (c->x86_vfm >= INTEL_CORE2_MEROM && c->x86_vfm <= INTEL_FAM6_LAST))) + flags->bm_control = 0; if (c->x86_vendor == X86_VENDOR_CENTAUR) { if (c->x86 > 6 || (c->x86 == 6 && c->x86_model == 0x0f && @@ -205,6 +206,16 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, } EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); +void __noreturn acpi_processor_ffh_play_dead(struct acpi_processor_cx *cx) +{ + unsigned int cpu = smp_processor_id(); + struct cstate_entry *percpu_entry; + + percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); + mwait_play_dead(percpu_entry->states[cx->index].eax); +} +EXPORT_SYMBOL_GPL(acpi_processor_ffh_play_dead); + void __cpuidle acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) { unsigned int cpu = smp_processor_id(); diff --git a/arch/x86/kernel/acpi/madt_playdead.S b/arch/x86/kernel/acpi/madt_playdead.S index 4e498d28cdc8..aefb9cb583ad 100644 --- a/arch/x86/kernel/acpi/madt_playdead.S +++ b/arch/x86/kernel/acpi/madt_playdead.S @@ -14,6 +14,7 @@ * rsi: PGD of the identity mapping */ SYM_FUNC_START(asm_acpi_mp_play_dead) + ANNOTATE_NOENDBR /* Turn off global entries. Following CR3 write will flush them. */ movq %cr4, %rdx andq $~(X86_CR4_PGE), %rdx diff --git a/arch/x86/kernel/acpi/madt_wakeup.c b/arch/x86/kernel/acpi/madt_wakeup.c index d5ef6215583b..6d7603511f52 100644 --- a/arch/x86/kernel/acpi/madt_wakeup.c +++ b/arch/x86/kernel/acpi/madt_wakeup.c @@ -70,58 +70,6 @@ static void __init free_pgt_page(void *pgt, void *dummy) return memblock_free(pgt, PAGE_SIZE); } -/* - * Make sure asm_acpi_mp_play_dead() is present in the identity mapping at - * the same place as in the kernel page tables. asm_acpi_mp_play_dead() switches - * to the identity mapping and the function has be present at the same spot in - * the virtual address space before and after switching page tables. - */ -static int __init init_transition_pgtable(pgd_t *pgd) -{ - pgprot_t prot = PAGE_KERNEL_EXEC_NOENC; - unsigned long vaddr, paddr; - p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - - vaddr = (unsigned long)asm_acpi_mp_play_dead; - pgd += pgd_index(vaddr); - if (!pgd_present(*pgd)) { - p4d = (p4d_t *)alloc_pgt_page(NULL); - if (!p4d) - return -ENOMEM; - set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE)); - } - p4d = p4d_offset(pgd, vaddr); - if (!p4d_present(*p4d)) { - pud = (pud_t *)alloc_pgt_page(NULL); - if (!pud) - return -ENOMEM; - set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE)); - } - pud = pud_offset(p4d, vaddr); - if (!pud_present(*pud)) { - pmd = (pmd_t *)alloc_pgt_page(NULL); - if (!pmd) - return -ENOMEM; - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); - } - pmd = pmd_offset(pud, vaddr); - if (!pmd_present(*pmd)) { - pte = (pte_t *)alloc_pgt_page(NULL); - if (!pte) - return -ENOMEM; - set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); - } - pte = pte_offset_kernel(pmd, vaddr); - - paddr = __pa(vaddr); - set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot)); - - return 0; -} - static int __init acpi_mp_setup_reset(u64 reset_vector) { struct x86_mapping_info info = { @@ -130,6 +78,7 @@ static int __init acpi_mp_setup_reset(u64 reset_vector) .page_flag = __PAGE_KERNEL_LARGE_EXEC, .kernpg_flag = _KERNPG_TABLE_NOENC, }; + unsigned long mstart, mend; pgd_t *pgd; pgd = alloc_pgt_page(NULL); @@ -137,8 +86,6 @@ static int __init acpi_mp_setup_reset(u64 reset_vector) return -ENOMEM; for (int i = 0; i < nr_pfn_mapped; i++) { - unsigned long mstart, mend; - mstart = pfn_mapped[i].start << PAGE_SHIFT; mend = pfn_mapped[i].end << PAGE_SHIFT; if (kernel_ident_mapping_init(&info, pgd, mstart, mend)) { @@ -147,14 +94,24 @@ static int __init acpi_mp_setup_reset(u64 reset_vector) } } - if (kernel_ident_mapping_init(&info, pgd, - PAGE_ALIGN_DOWN(reset_vector), - PAGE_ALIGN(reset_vector + 1))) { + mstart = PAGE_ALIGN_DOWN(reset_vector); + mend = mstart + PAGE_SIZE; + if (kernel_ident_mapping_init(&info, pgd, mstart, mend)) { kernel_ident_mapping_free(&info, pgd); return -ENOMEM; } - if (init_transition_pgtable(pgd)) { + /* + * Make sure asm_acpi_mp_play_dead() is present in the identity mapping + * at the same place as in the kernel page tables. + * asm_acpi_mp_play_dead() switches to the identity mapping and the + * function must be present at the same spot in the virtual address space + * before and after switching page tables. + */ + info.offset = __START_KERNEL_map - phys_base; + mstart = PAGE_ALIGN_DOWN(__pa(asm_acpi_mp_play_dead)); + mend = mstart + PAGE_SIZE; + if (kernel_ident_mapping_init(&info, pgd, mstart, mend)) { kernel_ident_mapping_free(&info, pgd); return -ENOMEM; } @@ -169,7 +126,7 @@ static int __init acpi_mp_setup_reset(u64 reset_vector) return 0; } -static int acpi_wakeup_cpu(u32 apicid, unsigned long start_ip) +static int acpi_wakeup_cpu(u32 apicid, unsigned long start_ip, unsigned int cpu) { if (!acpi_mp_wake_mailbox_paddr) { pr_warn_once("No MADT mailbox: cannot bringup secondary CPUs. Booting with kexec?\n"); diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 6dfecb27b846..91fa262f0e30 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -16,6 +16,7 @@ #include <asm/cacheflush.h> #include <asm/realmode.h> #include <asm/hypervisor.h> +#include <asm/msr.h> #include <asm/smp.h> #include <linux/ftrace.h> diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index b200a193beeb..04f561f75e99 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S @@ -17,6 +17,7 @@ * Hooray, we are in Long 64-bit mode (but still running in low memory) */ SYM_FUNC_START(wakeup_long64) + ANNOTATE_NOENDBR movq saved_magic(%rip), %rax movq $0x123456789abcdef0, %rdx cmpq %rdx, %rax |