summaryrefslogtreecommitdiff
path: root/arch/x86/xen
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/Kconfig1
-rw-r--r--arch/x86/xen/apic.c29
-rw-r--r--arch/x86/xen/debugfs.c2
-rw-r--r--arch/x86/xen/debugfs.h7
-rw-r--r--arch/x86/xen/enlighten.c135
-rw-r--r--arch/x86/xen/enlighten_hvm.c21
-rw-r--r--arch/x86/xen/enlighten_pv.c37
-rw-r--r--arch/x86/xen/enlighten_pvh.c112
-rw-r--r--arch/x86/xen/irq.c2
-rw-r--r--arch/x86/xen/mmu.c3
-rw-r--r--arch/x86/xen/mmu.h28
-rw-r--r--arch/x86/xen/mmu_hvm.c2
-rw-r--r--arch/x86/xen/mmu_pv.c118
-rw-r--r--arch/x86/xen/multicalls.c133
-rw-r--r--arch/x86/xen/multicalls.h69
-rw-r--r--arch/x86/xen/p2m.c123
-rw-r--r--arch/x86/xen/pmu.c1
-rw-r--r--arch/x86/xen/pmu.h22
-rw-r--r--arch/x86/xen/setup.c248
-rw-r--r--arch/x86/xen/smp.c15
-rw-r--r--arch/x86/xen/smp.h49
-rw-r--r--arch/x86/xen/smp_hvm.c2
-rw-r--r--arch/x86/xen/smp_pv.c73
-rw-r--r--arch/x86/xen/spinlock.c20
-rw-r--r--arch/x86/xen/suspend.c2
-rw-r--r--arch/x86/xen/time.c2
-rw-r--r--arch/x86/xen/vga.c1
-rw-r--r--arch/x86/xen/xen-asm.S60
-rw-r--r--arch/x86/xen/xen-head.S106
-rw-r--r--arch/x86/xen/xen-ops.h180
30 files changed, 1090 insertions, 513 deletions
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index a65fc2ae15b4..77e788e928cd 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -81,7 +81,6 @@ config XEN_PVH
bool "Xen PVH guest support"
depends on XEN && XEN_PVHVM && ACPI
select PVH
- def_bool n
help
Support for running as a Xen PVH guest.
diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c
index 9dd5490b3318..bb0f3f368446 100644
--- a/arch/x86/xen/apic.c
+++ b/arch/x86/xen/apic.c
@@ -10,8 +10,6 @@
#include <xen/xen.h>
#include <xen/interface/physdev.h>
#include "xen-ops.h"
-#include "pmu.h"
-#include "smp.h"
static unsigned int xen_io_apic_read(unsigned apic, unsigned reg)
{
@@ -33,12 +31,6 @@ static unsigned int xen_io_apic_read(unsigned apic, unsigned reg)
return 0xfd;
}
-static u32 xen_set_apic_id(u32 x)
-{
- WARN_ON(1);
- return x;
-}
-
static u32 xen_get_apic_id(u32 x)
{
return ((x)>>24) & 0xFFu;
@@ -49,20 +41,20 @@ static u32 xen_apic_read(u32 reg)
struct xen_platform_op op = {
.cmd = XENPF_get_cpuinfo,
.interface_version = XENPF_INTERFACE_VERSION,
- .u.pcpu_info.xen_cpuid = 0,
};
- int ret;
-
- /* Shouldn't need this as APIC is turned off for PV, and we only
- * get called on the bootup processor. But just in case. */
- if (!xen_initial_domain() || smp_processor_id())
- return 0;
+ int ret, cpu;
if (reg == APIC_LVR)
return 0x14;
if (reg != APIC_ID)
return 0;
+ cpu = smp_processor_id();
+ if (!xen_initial_domain())
+ return cpu ? cpuid_to_apicid[cpu] << 24 : 0;
+
+ op.u.pcpu_info.xen_cpuid = cpu;
+
ret = HYPERVISOR_platform_op(&op);
if (ret)
op.u.pcpu_info.apic_id = BAD_APICID;
@@ -110,11 +102,6 @@ static int xen_madt_oem_check(char *oem_id, char *oem_table_id)
return xen_pv_domain();
}
-static u32 xen_phys_pkg_id(u32 initial_apic_id, int index_msb)
-{
- return initial_apic_id >> index_msb;
-}
-
static u32 xen_cpu_present_to_apicid(int cpu)
{
if (cpu_present(cpu))
@@ -133,11 +120,9 @@ static struct apic xen_pv_apic __ro_after_init = {
.disable_esr = 0,
.cpu_present_to_apicid = xen_cpu_present_to_apicid,
- .phys_pkg_id = xen_phys_pkg_id, /* detect_ht */
.max_apic_id = UINT_MAX,
.get_apic_id = xen_get_apic_id,
- .set_apic_id = xen_set_apic_id,
.calc_dest_apicid = apic_flat_calc_apicid,
diff --git a/arch/x86/xen/debugfs.c b/arch/x86/xen/debugfs.c
index 532410998684..b8c9f2a7d9b6 100644
--- a/arch/x86/xen/debugfs.c
+++ b/arch/x86/xen/debugfs.c
@@ -3,7 +3,7 @@
#include <linux/debugfs.h>
#include <linux/slab.h>
-#include "debugfs.h"
+#include "xen-ops.h"
static struct dentry *d_xen_debug;
diff --git a/arch/x86/xen/debugfs.h b/arch/x86/xen/debugfs.h
deleted file mode 100644
index 6b813ad1091c..000000000000
--- a/arch/x86/xen/debugfs.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _XEN_DEBUGFS_H
-#define _XEN_DEBUGFS_H
-
-struct dentry * __init xen_init_debugfs(void);
-
-#endif /* _XEN_DEBUGFS_H */
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 3c61bb98c10e..43dcd8c7badc 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1,11 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
-#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
-#include <linux/memblock.h>
-#endif
#include <linux/console.h>
#include <linux/cpu.h>
+#include <linux/instrumentation.h>
#include <linux/kexec.h>
+#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/panic_notifier.h>
@@ -22,10 +21,9 @@
#include <asm/setup.h>
#include "xen-ops.h"
-#include "smp.h"
-#include "pmu.h"
-EXPORT_SYMBOL_GPL(hypercall_page);
+DEFINE_STATIC_CALL(xen_hypercall, xen_hypercall_hvm);
+EXPORT_STATIC_CALL_TRAMP(xen_hypercall);
/*
* Pointer to the xen_vcpu_info structure or
@@ -72,6 +70,67 @@ EXPORT_SYMBOL(xen_start_flags);
*/
struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
+static __ref void xen_get_vendor(void)
+{
+ init_cpu_devs();
+ cpu_detect(&boot_cpu_data);
+ get_cpu_vendor(&boot_cpu_data);
+}
+
+void xen_hypercall_setfunc(void)
+{
+ if (static_call_query(xen_hypercall) != xen_hypercall_hvm)
+ return;
+
+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON))
+ static_call_update(xen_hypercall, xen_hypercall_amd);
+ else
+ static_call_update(xen_hypercall, xen_hypercall_intel);
+}
+
+/*
+ * Evaluate processor vendor in order to select the correct hypercall
+ * function for HVM/PVH guests.
+ * Might be called very early in boot before vendor has been set by
+ * early_cpu_init().
+ */
+noinstr void *__xen_hypercall_setfunc(void)
+{
+ void (*func)(void);
+
+ /*
+ * Xen is supported only on CPUs with CPUID, so testing for
+ * X86_FEATURE_CPUID is a test for early_cpu_init() having been
+ * run.
+ *
+ * Note that __xen_hypercall_setfunc() is noinstr only due to a nasty
+ * dependency chain: it is being called via the xen_hypercall static
+ * call when running as a PVH or HVM guest. Hypercalls need to be
+ * noinstr due to PV guests using hypercalls in noinstr code. So we
+ * can safely tag the function body as "instrumentation ok", since
+ * the PV guest requirement is not of interest here (xen_get_vendor()
+ * calls noinstr functions, and static_call_update_early() might do
+ * so, too).
+ */
+ instrumentation_begin();
+
+ if (!boot_cpu_has(X86_FEATURE_CPUID))
+ xen_get_vendor();
+
+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON))
+ func = xen_hypercall_amd;
+ else
+ func = xen_hypercall_intel;
+
+ static_call_update_early(xen_hypercall, func);
+
+ instrumentation_end();
+
+ return func;
+}
+
static int xen_cpu_up_online(unsigned int cpu)
{
xen_init_lock_cpu(cpu);
@@ -350,3 +409,67 @@ void xen_arch_unregister_cpu(int num)
}
EXPORT_SYMBOL(xen_arch_unregister_cpu);
#endif
+
+/* Amount of extra memory space we add to the e820 ranges */
+struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
+
+void __init xen_add_extra_mem(unsigned long start_pfn, unsigned long n_pfns)
+{
+ unsigned int i;
+
+ /*
+ * No need to check for zero size, should happen rarely and will only
+ * write a new entry regarded to be unused due to zero size.
+ */
+ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+ /* Add new region. */
+ if (xen_extra_mem[i].n_pfns == 0) {
+ xen_extra_mem[i].start_pfn = start_pfn;
+ xen_extra_mem[i].n_pfns = n_pfns;
+ break;
+ }
+ /* Append to existing region. */
+ if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns ==
+ start_pfn) {
+ xen_extra_mem[i].n_pfns += n_pfns;
+ break;
+ }
+ }
+ if (i == XEN_EXTRA_MEM_MAX_REGIONS)
+ printk(KERN_WARNING "Warning: not enough extra memory regions\n");
+
+ memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
+}
+
+#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
+int __init arch_xen_unpopulated_init(struct resource **res)
+{
+ unsigned int i;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ /* Must be set strictly before calling xen_free_unpopulated_pages(). */
+ *res = &iomem_resource;
+
+ /*
+ * Initialize with pages from the extra memory regions (see
+ * arch/x86/xen/setup.c).
+ */
+ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+ unsigned int j;
+
+ for (j = 0; j < xen_extra_mem[i].n_pfns; j++) {
+ struct page *pg =
+ pfn_to_page(xen_extra_mem[i].start_pfn + j);
+
+ xen_free_unpopulated_pages(1, &pg);
+ }
+
+ /* Zero so region is not also added to the balloon driver. */
+ xen_extra_mem[i].n_pfns = 0;
+ }
+
+ return 0;
+}
+#endif
diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
index 3f8c34707c50..fe57ff85d004 100644
--- a/arch/x86/xen/enlighten_hvm.c
+++ b/arch/x86/xen/enlighten_hvm.c
@@ -28,8 +28,6 @@
#include <asm/xen/page.h>
#include "xen-ops.h"
-#include "mmu.h"
-#include "smp.h"
static unsigned long shared_info_pfn;
@@ -108,15 +106,8 @@ static void __init init_hvm_pv_info(void)
/* PVH set up hypercall page in xen_prepare_pvh(). */
if (xen_pvh_domain())
pv_info.name = "Xen PVH";
- else {
- u64 pfn;
- uint32_t msr;
-
+ else
pv_info.name = "Xen HVM";
- msr = cpuid_ebx(base + 2);
- pfn = __pa(hypercall_page);
- wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
- }
xen_setup_features();
@@ -148,7 +139,9 @@ static void xen_hvm_shutdown(void)
if (kexec_in_progress)
xen_reboot(SHUTDOWN_soft_reset);
}
+#endif
+#ifdef CONFIG_CRASH_DUMP
static void xen_hvm_crash_shutdown(struct pt_regs *regs)
{
native_machine_crash_shutdown(regs);
@@ -168,7 +161,7 @@ static int xen_cpu_up_prepare_hvm(unsigned int cpu)
*/
xen_uninit_lock_cpu(cpu);
- if (cpu_acpi_id(cpu) != U32_MAX)
+ if (cpu_acpi_id(cpu) != CPU_ACPIID_INVALID)
per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
else
per_cpu(xen_vcpu_id, cpu) = cpu;
@@ -236,6 +229,8 @@ static void __init xen_hvm_guest_init(void)
#ifdef CONFIG_KEXEC_CORE
machine_ops.shutdown = xen_hvm_shutdown;
+#endif
+#ifdef CONFIG_CRASH_DUMP
machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
#endif
}
@@ -298,6 +293,10 @@ static uint32_t __init xen_platform_hvm(void)
if (xen_pv_domain())
return 0;
+ /* Set correct hypercall function. */
+ if (xen_domain)
+ xen_hypercall_setfunc();
+
if (xen_pvh_domain() && nopv) {
/* Guest booting via the Xen-PVH boot entry goes here */
pr_info("\"nopv\" parameter is ignored in PVH guest\n");
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index aeb33e0a3f76..5e57835e999d 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -49,6 +49,7 @@
#include <xen/hvc-console.h>
#include <xen/acpi.h>
+#include <asm/cpuid.h>
#include <asm/paravirt.h>
#include <asm/apic.h>
#include <asm/page.h>
@@ -85,10 +86,6 @@
#endif
#include "xen-ops.h"
-#include "mmu.h"
-#include "smp.h"
-#include "multicalls.h"
-#include "pmu.h"
#include "../kernel/cpu/cpu.h" /* get_cpu_cap() */
@@ -175,7 +172,7 @@ static void __init xen_set_mtrr_data(void)
/* Only overwrite MTRR state if any MTRR could be got from Xen. */
if (reg)
- mtrr_overwrite_state(var, reg, MTRR_TYPE_UNCACHABLE);
+ guest_force_mtrr_state(var, reg, MTRR_TYPE_UNCACHABLE);
#endif
}
@@ -199,7 +196,10 @@ static void __init xen_pv_init_platform(void)
if (xen_initial_domain())
xen_set_mtrr_data();
else
- mtrr_overwrite_state(NULL, 0, MTRR_TYPE_WRBACK);
+ guest_force_mtrr_state(NULL, 0, MTRR_TYPE_WRBACK);
+
+ /* Adjust nr_cpu_ids before "enumeration" happens */
+ xen_smp_count_cpus();
}
static void __init xen_pv_guest_late_init(void)
@@ -216,14 +216,22 @@ static __read_mostly unsigned int cpuid_leaf5_edx_val;
static void xen_cpuid(unsigned int *ax, unsigned int *bx,
unsigned int *cx, unsigned int *dx)
{
- unsigned maskebx = ~0;
+ unsigned int maskebx = ~0;
+ unsigned int or_ebx = 0;
/*
* Mask out inconvenient features, to try and disable as many
* unsupported kernel subsystems as possible.
*/
switch (*ax) {
- case CPUID_MWAIT_LEAF:
+ case 0x1:
+ /* Replace initial APIC ID in bits 24-31 of EBX. */
+ /* See xen_pv_smp_config() for related topology preparations. */
+ maskebx = 0x00ffffff;
+ or_ebx = smp_processor_id() << 24;
+ break;
+
+ case CPUID_LEAF_MWAIT:
/* Synthesize the values.. */
*ax = 0;
*bx = 0;
@@ -245,6 +253,7 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
: "0" (*ax), "2" (*cx));
*bx &= maskebx;
+ *bx |= or_ebx;
}
static bool __init xen_check_mwait(void)
@@ -292,7 +301,7 @@ static bool __init xen_check_mwait(void)
* ecx and edx. The hypercall provides only partial information.
*/
- ax = CPUID_MWAIT_LEAF;
+ ax = CPUID_LEAF_MWAIT;
bx = 0;
cx = 0;
dx = 0;
@@ -1024,6 +1033,10 @@ static u64 xen_do_read_msr(unsigned int msr, int *err)
switch (msr) {
case MSR_IA32_APICBASE:
val &= ~X2APIC_ENABLE;
+ if (smp_processor_id() == 0)
+ val |= MSR_IA32_APICBASE_BSP;
+ else
+ val &= ~MSR_IA32_APICBASE_BSP;
break;
}
return val;
@@ -1149,8 +1162,6 @@ static const typeof(pv_ops) xen_cpu_ops __initconst = {
.write_cr4 = xen_write_cr4,
- .wbinvd = pv_native_wbinvd,
-
.read_msr = xen_read_msr,
.write_msr = xen_write_msr,
@@ -1329,6 +1340,9 @@ asmlinkage __visible void __init xen_start_kernel(struct start_info *si)
xen_domain_type = XEN_PV_DOMAIN;
xen_start_flags = xen_start_info->flags;
+ /* Interrupts are guaranteed to be off initially. */
+ early_boot_irqs_disabled = true;
+ static_call_update_early(xen_hypercall, xen_hypercall_pv);
xen_setup_features();
@@ -1419,7 +1433,6 @@ asmlinkage __visible void __init xen_start_kernel(struct start_info *si)
WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv));
local_irq_disable();
- early_boot_irqs_disabled = true;
xen_raw_console_write("mapping kernel into physical memory\n");
xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base,
diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
index ada3868c02c2..0e3d930bcb89 100644
--- a/arch/x86/xen/enlighten_pvh.c
+++ b/arch/x86/xen/enlighten_pvh.c
@@ -1,12 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/acpi.h>
#include <linux/export.h>
+#include <linux/mm.h>
#include <xen/hvc-console.h>
+#include <xen/acpi.h>
+#include <asm/bootparam.h>
#include <asm/io_apic.h>
#include <asm/hypervisor.h>
#include <asm/e820/api.h>
+#include <asm/setup.h>
#include <xen/xen.h>
#include <asm/xen/interface.h>
@@ -25,21 +29,111 @@
bool __ro_after_init xen_pvh;
EXPORT_SYMBOL_GPL(xen_pvh);
-void __init xen_pvh_init(struct boot_params *boot_params)
+#ifdef CONFIG_XEN_DOM0
+int xen_pvh_setup_gsi(int gsi, int trigger, int polarity)
+{
+ int ret;
+ struct physdev_setup_gsi setup_gsi;
+
+ setup_gsi.gsi = gsi;
+ setup_gsi.triggering = (trigger == ACPI_EDGE_SENSITIVE ? 0 : 1);
+ setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
+
+ ret = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
+ if (ret == -EEXIST) {
+ xen_raw_printk("Already setup the GSI :%d\n", gsi);
+ ret = 0;
+ } else if (ret)
+ xen_raw_printk("Fail to setup GSI (%d)!\n", gsi);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xen_pvh_setup_gsi);
+#endif
+
+/*
+ * Reserve e820 UNUSABLE regions to inflate the memory balloon.
+ *
+ * On PVH dom0 the host memory map is used, RAM regions available to dom0 are
+ * located as the same place as in the native memory map, but since dom0 gets
+ * less memory than the total amount of host RAM the ranges that can't be
+ * populated are converted from RAM -> UNUSABLE. Use such regions (up to the
+ * ratio signaled in EXTRA_MEM_RATIO) in order to inflate the balloon driver at
+ * boot. Doing so prevents the guest (even if just temporary) from using holes
+ * in the memory map in order to map grants or foreign addresses, and
+ * hopefully limits the risk of a clash with a device MMIO region. Ideally the
+ * hypervisor should notify us which memory ranges are suitable for creating
+ * foreign mappings, but that's not yet implemented.
+ */
+static void __init pvh_reserve_extra_memory(void)
+{
+ struct boot_params *bootp = &boot_params;
+ unsigned int i, ram_pages = 0, extra_pages;
+
+ for (i = 0; i < bootp->e820_entries; i++) {
+ struct boot_e820_entry *e = &bootp->e820_table[i];
+
+ if (e->type != E820_TYPE_RAM)
+ continue;
+ ram_pages += PFN_DOWN(e->addr + e->size) - PFN_UP(e->addr);
+ }
+
+ /* Max amount of extra memory. */
+ extra_pages = EXTRA_MEM_RATIO * ram_pages;
+
+ /*
+ * Convert UNUSABLE ranges to RAM and reserve them for foreign mapping
+ * purposes.
+ */
+ for (i = 0; i < bootp->e820_entries && extra_pages; i++) {
+ struct boot_e820_entry *e = &bootp->e820_table[i];
+ unsigned long pages;
+
+ if (e->type != E820_TYPE_UNUSABLE)
+ continue;
+
+ pages = min(extra_pages,
+ PFN_DOWN(e->addr + e->size) - PFN_UP(e->addr));
+
+ if (pages != (PFN_DOWN(e->addr + e->size) - PFN_UP(e->addr))) {
+ struct boot_e820_entry *next;
+
+ if (bootp->e820_entries ==
+ ARRAY_SIZE(bootp->e820_table))
+ /* No space left to split - skip region. */
+ continue;
+
+ /* Split entry. */
+ next = e + 1;
+ memmove(next, e,
+ (bootp->e820_entries - i) * sizeof(*e));
+ bootp->e820_entries++;
+ next->addr = PAGE_ALIGN(e->addr) + PFN_PHYS(pages);
+ e->size = next->addr - e->addr;
+ next->size -= e->size;
+ }
+ e->type = E820_TYPE_RAM;
+ extra_pages -= pages;
+
+ xen_add_extra_mem(PFN_UP(e->addr), pages);
+ }
+}
+
+static void __init pvh_arch_setup(void)
{
- u32 msr;
- u64 pfn;
+ pvh_reserve_extra_memory();
+
+ if (xen_initial_domain())
+ xen_add_preferred_consoles();
+}
+void __init xen_pvh_init(struct boot_params *boot_params)
+{
xen_pvh = 1;
xen_domain_type = XEN_HVM_DOMAIN;
xen_start_flags = pvh_start_info.flags;
- msr = cpuid_ebx(xen_cpuid_base() + 2);
- pfn = __pa(hypercall_page);
- wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
-
- if (xen_initial_domain())
- x86_init.oem.arch_setup = xen_add_preferred_consoles;
+ x86_init.oem.arch_setup = pvh_arch_setup;
x86_init.oem.banner = xen_banner;
xen_efi_init(boot_params);
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 6092fea7d651..39982f955cfe 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -45,7 +45,7 @@ static const typeof(pv_ops) xen_irq_ops __initconst = {
/* Initial interrupt flag handling only called while interrupts off. */
.save_fl = __PV_IS_CALLEE_SAVE(paravirt_ret0),
.irq_disable = __PV_IS_CALLEE_SAVE(paravirt_nop),
- .irq_enable = __PV_IS_CALLEE_SAVE(paravirt_BUG),
+ .irq_enable = __PV_IS_CALLEE_SAVE(BUG_func),
.safe_halt = xen_safe_halt,
.halt = xen_halt,
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 60e9c37fd79f..c4c479373249 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -5,8 +5,7 @@
#include <asm/xen/hypercall.h>
#include <xen/interface/memory.h>
-#include "multicalls.h"
-#include "mmu.h"
+#include "xen-ops.h"
unsigned long arbitrary_virt_to_mfn(void *vaddr)
{
diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h
deleted file mode 100644
index 6e4c6bd62203..000000000000
--- a/arch/x86/xen/mmu.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _XEN_MMU_H
-
-#include <linux/linkage.h>
-#include <asm/page.h>
-
-enum pt_level {
- PT_PGD,
- PT_P4D,
- PT_PUD,
- PT_PMD,
- PT_PTE
-};
-
-
-bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
-
-void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
-
-pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
-void xen_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
- pte_t *ptep, pte_t pte);
-
-unsigned long xen_read_cr2_direct(void);
-
-extern void xen_init_mmu_ops(void);
-extern void xen_hvm_init_mmu_ops(void);
-#endif /* _XEN_MMU_H */
diff --git a/arch/x86/xen/mmu_hvm.c b/arch/x86/xen/mmu_hvm.c
index 509bdee3ab90..337955652202 100644
--- a/arch/x86/xen/mmu_hvm.c
+++ b/arch/x86/xen/mmu_hvm.c
@@ -5,7 +5,7 @@
#include <xen/interface/xen.h>
#include <xen/hvm.h>
-#include "mmu.h"
+#include "xen-ops.h"
#ifdef CONFIG_PROC_VMCORE
/*
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index b6830554ff69..d078de2c952b 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -34,7 +34,7 @@
* would need to validate the whole pagetable before going on.
* Naturally, this is quite slow. The solution is to "pin" a
* pagetable, which enforces all the constraints on the pagetable even
- * when it is not actively in use. This menas that Xen can be assured
+ * when it is not actively in use. This means that Xen can be assured
* that it is still valid when you do load it into %cr3, and doesn't
* need to revalidate it.
*
@@ -82,9 +82,7 @@
#include <xen/hvc-console.h>
#include <xen/swiotlb-xen.h>
-#include "multicalls.h"
-#include "mmu.h"
-#include "debugfs.h"
+#include "xen-ops.h"
/*
* Prototypes for functions called via PV_CALLEE_SAVE_REGS_THUNK() in order
@@ -113,6 +111,51 @@ static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
*/
static DEFINE_SPINLOCK(xen_reservation_lock);
+/* Protected by xen_reservation_lock. */
+#define MIN_CONTIG_ORDER 9 /* 2MB */
+static unsigned int discontig_frames_order = MIN_CONTIG_ORDER;
+static unsigned long discontig_frames_early[1UL << MIN_CONTIG_ORDER] __initdata;
+static unsigned long *discontig_frames __refdata = discontig_frames_early;
+static bool discontig_frames_dyn;
+
+static int alloc_discontig_frames(unsigned int order)
+{
+ unsigned long *new_array, *old_array;
+ unsigned int old_order;
+ unsigned long flags;
+
+ BUG_ON(order < MIN_CONTIG_ORDER);
+ BUILD_BUG_ON(sizeof(discontig_frames_early) != PAGE_SIZE);
+
+ new_array = (unsigned long *)__get_free_pages(GFP_KERNEL,
+ order - MIN_CONTIG_ORDER);
+ if (!new_array)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&xen_reservation_lock, flags);
+
+ old_order = discontig_frames_order;
+
+ if (order > discontig_frames_order || !discontig_frames_dyn) {
+ if (!discontig_frames_dyn)
+ old_array = NULL;
+ else
+ old_array = discontig_frames;
+
+ discontig_frames = new_array;
+ discontig_frames_order = order;
+ discontig_frames_dyn = true;
+ } else {
+ old_array = new_array;
+ }
+
+ spin_unlock_irqrestore(&xen_reservation_lock, flags);
+
+ free_pages((unsigned long)old_array, old_order - MIN_CONTIG_ORDER);
+
+ return 0;
+}
+
/*
* Note about cr3 (pagetable base) values:
*
@@ -128,7 +171,7 @@ static DEFINE_SPINLOCK(xen_reservation_lock);
* looking at another vcpu's cr3 value, it should use this variable.
*/
DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
-DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
+static DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
static phys_addr_t xen_pt_base, xen_pt_size __initdata;
@@ -305,16 +348,17 @@ static void xen_set_pte(pte_t *ptep, pte_t pteval)
__xen_set_pte(ptep, pteval);
}
-pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
+static pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
{
/* Just return the pte as-is. We preserve the bits on commit */
trace_xen_mmu_ptep_modify_prot_start(vma->vm_mm, addr, ptep, *ptep);
return *ptep;
}
-void xen_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
- pte_t *ptep, pte_t pte)
+static void xen_ptep_modify_prot_commit(struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t *ptep, pte_t pte)
{
struct mmu_update u;
@@ -666,7 +710,7 @@ static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
{
spinlock_t *ptl = NULL;
-#if USE_SPLIT_PTE_PTLOCKS
+#if defined(CONFIG_SPLIT_PTE_PTLOCKS)
ptl = ptlock_ptr(page_ptdesc(page));
spin_lock_nest_lock(ptl, &mm->page_table_lock);
#endif
@@ -782,6 +826,7 @@ void xen_mm_pin_all(void)
{
struct page *page;
+ spin_lock(&init_mm.page_table_lock);
spin_lock(&pgd_lock);
list_for_each_entry(page, &pgd_list, lru) {
@@ -792,6 +837,7 @@ void xen_mm_pin_all(void)
}
spin_unlock(&pgd_lock);
+ spin_unlock(&init_mm.page_table_lock);
}
static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
@@ -813,6 +859,9 @@ static void __init xen_after_bootmem(void)
SetPagePinned(virt_to_page(level3_user_vsyscall));
#endif
xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
+
+ if (alloc_discontig_frames(MIN_CONTIG_ORDER))
+ BUG();
}
static void xen_unpin_page(struct mm_struct *mm, struct page *page,
@@ -888,6 +937,7 @@ void xen_mm_unpin_all(void)
{
struct page *page;
+ spin_lock(&init_mm.page_table_lock);
spin_lock(&pgd_lock);
list_for_each_entry(page, &pgd_list, lru) {
@@ -899,6 +949,7 @@ void xen_mm_unpin_all(void)
}
spin_unlock(&pgd_lock);
+ spin_unlock(&init_mm.page_table_lock);
}
static void xen_enter_mmap(struct mm_struct *mm)
@@ -913,7 +964,7 @@ static void drop_mm_ref_this_cpu(void *info)
struct mm_struct *mm = info;
if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm)
- leave_mm(smp_processor_id());
+ leave_mm();
/*
* If this cpu still has a stale cr3 reference, then make sure
@@ -1059,7 +1110,7 @@ static void __init xen_cleanmfnmap_pmd(pmd_t *pmd, bool unpin)
pte_t *pte_tbl;
int i;
- if (pmd_large(*pmd)) {
+ if (pmd_leaf(*pmd)) {
pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
xen_free_ro_pages(pa, PMD_SIZE);
return;
@@ -1082,7 +1133,7 @@ static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin)
pmd_t *pmd_tbl;
int i;
- if (pud_large(*pud)) {
+ if (pud_leaf(*pud)) {
pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
xen_free_ro_pages(pa, PUD_SIZE);
return;
@@ -1104,7 +1155,7 @@ static void __init xen_cleanmfnmap_p4d(p4d_t *p4d, bool unpin)
pud_t *pud_tbl;
int i;
- if (p4d_large(*p4d)) {
+ if (p4d_leaf(*p4d)) {
pa = p4d_val(*p4d) & PHYSICAL_PAGE_MASK;
xen_free_ro_pages(pa, P4D_SIZE);
return;
@@ -1554,7 +1605,8 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
__set_pfn_prot(pfn, PAGE_KERNEL_RO);
- if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned)
+ if (level == PT_PTE && IS_ENABLED(CONFIG_SPLIT_PTE_PTLOCKS) &&
+ !pinned)
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
xen_mc_issue(XEN_LAZY_MMU);
@@ -1582,7 +1634,7 @@ static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
if (pinned) {
xen_mc_batch();
- if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
+ if (level == PT_PTE && IS_ENABLED(CONFIG_SPLIT_PTE_PTLOCKS))
__pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
__set_pfn_prot(pfn, PAGE_KERNEL);
@@ -1863,7 +1915,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
if (!pud_present(pud))
return 0;
pa = pud_val(pud) & PTE_PFN_MASK;
- if (pud_large(pud))
+ if (pud_leaf(pud))
return pa + (vaddr & ~PUD_MASK);
pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
@@ -1871,7 +1923,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
if (!pmd_present(pmd))
return 0;
pa = pmd_val(pmd) & PTE_PFN_MASK;
- if (pmd_large(pmd))
+ if (pmd_leaf(pmd))
return pa + (vaddr & ~PMD_MASK);
pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
@@ -2019,10 +2071,7 @@ void __init xen_reserve_special_pages(void)
void __init xen_pt_check_e820(void)
{
- if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) {
- xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n");
- BUG();
- }
+ xen_chk_is_e820_usable(xen_pt_base, xen_pt_size, "page table");
}
static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
@@ -2202,10 +2251,6 @@ void __init xen_init_mmu_ops(void)
memset(dummy_mapping, 0xff, PAGE_SIZE);
}
-/* Protected by xen_reservation_lock. */
-#define MAX_CONTIG_ORDER 9 /* 2MB */
-static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
-
#define VOID_PTE (mfn_pte(0, __pgprot(0)))
static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
unsigned long *in_frames,
@@ -2322,18 +2367,25 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
unsigned int address_bits,
dma_addr_t *dma_handle)
{
- unsigned long *in_frames = discontig_frames, out_frame;
+ unsigned long *in_frames, out_frame;
unsigned long flags;
int success;
unsigned long vstart = (unsigned long)phys_to_virt(pstart);
- if (unlikely(order > MAX_CONTIG_ORDER))
- return -ENOMEM;
+ if (unlikely(order > discontig_frames_order)) {
+ if (!discontig_frames_dyn)
+ return -ENOMEM;
+
+ if (alloc_discontig_frames(order))
+ return -ENOMEM;
+ }
memset((void *) vstart, 0, PAGE_SIZE << order);
spin_lock_irqsave(&xen_reservation_lock, flags);
+ in_frames = discontig_frames;
+
/* 1. Zap current PTEs, remembering MFNs. */
xen_zap_pfn_range(vstart, order, in_frames, NULL);
@@ -2357,12 +2409,12 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
{
- unsigned long *out_frames = discontig_frames, in_frame;
+ unsigned long *out_frames, in_frame;
unsigned long flags;
int success;
unsigned long vstart;
- if (unlikely(order > MAX_CONTIG_ORDER))
+ if (unlikely(order > discontig_frames_order))
return;
vstart = (unsigned long)phys_to_virt(pstart);
@@ -2370,6 +2422,8 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
spin_lock_irqsave(&xen_reservation_lock, flags);
+ out_frames = discontig_frames;
+
/* 1. Find start MFN of contiguous extent. */
in_frame = virt_to_mfn((void *)vstart);
@@ -2520,7 +2574,7 @@ out:
}
EXPORT_SYMBOL_GPL(xen_remap_pfn);
-#ifdef CONFIG_KEXEC_CORE
+#ifdef CONFIG_VMCORE_INFO
phys_addr_t paddr_vmcoreinfo_note(void)
{
if (xen_pv_domain())
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index 07054572297f..10c660fae8b3 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -23,26 +23,21 @@
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <linux/debugfs.h>
+#include <linux/jump_label.h>
+#include <linux/printk.h>
#include <asm/xen/hypercall.h>
-#include "multicalls.h"
-#include "debugfs.h"
+#include "xen-ops.h"
#define MC_BATCH 32
-#define MC_DEBUG 0
-
#define MC_ARGS (MC_BATCH * 16)
struct mc_buffer {
unsigned mcidx, argidx, cbidx;
struct multicall_entry entries[MC_BATCH];
-#if MC_DEBUG
- struct multicall_entry debug[MC_BATCH];
- void *caller[MC_BATCH];
-#endif
unsigned char args[MC_ARGS];
struct callback {
void (*fn)(void *);
@@ -50,13 +45,103 @@ struct mc_buffer {
} callbacks[MC_BATCH];
};
+struct mc_debug_data {
+ struct multicall_entry entries[MC_BATCH];
+ void *caller[MC_BATCH];
+ size_t argsz[MC_BATCH];
+ unsigned long *args[MC_BATCH];
+};
+
static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
+static struct mc_debug_data mc_debug_data_early __initdata;
+static DEFINE_PER_CPU(struct mc_debug_data *, mc_debug_data) =
+ &mc_debug_data_early;
+static struct mc_debug_data __percpu *mc_debug_data_ptr;
DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
+static struct static_key mc_debug __ro_after_init;
+static bool mc_debug_enabled __initdata;
+
+static int __init xen_parse_mc_debug(char *arg)
+{
+ mc_debug_enabled = true;
+ static_key_slow_inc(&mc_debug);
+
+ return 0;
+}
+early_param("xen_mc_debug", xen_parse_mc_debug);
+
+void mc_percpu_init(unsigned int cpu)
+{
+ per_cpu(mc_debug_data, cpu) = per_cpu_ptr(mc_debug_data_ptr, cpu);
+}
+
+static int __init mc_debug_enable(void)
+{
+ unsigned long flags;
+
+ if (!mc_debug_enabled)
+ return 0;
+
+ mc_debug_data_ptr = alloc_percpu(struct mc_debug_data);
+ if (!mc_debug_data_ptr) {
+ pr_err("xen_mc_debug inactive\n");
+ static_key_slow_dec(&mc_debug);
+ return -ENOMEM;
+ }
+
+ /* Be careful when switching to percpu debug data. */
+ local_irq_save(flags);
+ xen_mc_flush();
+ mc_percpu_init(0);
+ local_irq_restore(flags);
+
+ pr_info("xen_mc_debug active\n");
+
+ return 0;
+}
+early_initcall(mc_debug_enable);
+
+/* Number of parameters of hypercalls used via multicalls. */
+static const uint8_t hpcpars[] = {
+ [__HYPERVISOR_mmu_update] = 4,
+ [__HYPERVISOR_stack_switch] = 2,
+ [__HYPERVISOR_fpu_taskswitch] = 1,
+ [__HYPERVISOR_update_descriptor] = 2,
+ [__HYPERVISOR_update_va_mapping] = 3,
+ [__HYPERVISOR_mmuext_op] = 4,
+};
+
+static void print_debug_data(struct mc_buffer *b, struct mc_debug_data *mcdb,
+ int idx)
+{
+ unsigned int arg;
+ unsigned int opidx = mcdb->entries[idx].op & 0xff;
+ unsigned int pars = 0;
+
+ pr_err(" call %2d: op=%lu result=%ld caller=%pS ", idx + 1,
+ mcdb->entries[idx].op, b->entries[idx].result,
+ mcdb->caller[idx]);
+ if (opidx < ARRAY_SIZE(hpcpars))
+ pars = hpcpars[opidx];
+ if (pars) {
+ pr_cont("pars=");
+ for (arg = 0; arg < pars; arg++)
+ pr_cont("%lx ", mcdb->entries[idx].args[arg]);
+ }
+ if (mcdb->argsz[idx]) {
+ pr_cont("args=");
+ for (arg = 0; arg < mcdb->argsz[idx] / 8; arg++)
+ pr_cont("%lx ", mcdb->args[idx][arg]);
+ }
+ pr_cont("\n");
+}
+
void xen_mc_flush(void)
{
struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
struct multicall_entry *mc;
+ struct mc_debug_data *mcdb = NULL;
int ret = 0;
unsigned long flags;
int i;
@@ -69,10 +154,11 @@ void xen_mc_flush(void)
trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx);
-#if MC_DEBUG
- memcpy(b->debug, b->entries,
- b->mcidx * sizeof(struct multicall_entry));
-#endif
+ if (static_key_false(&mc_debug)) {
+ mcdb = __this_cpu_read(mc_debug_data);
+ memcpy(mcdb->entries, b->entries,
+ b->mcidx * sizeof(struct multicall_entry));
+ }
switch (b->mcidx) {
case 0:
@@ -103,21 +189,14 @@ void xen_mc_flush(void)
pr_err("%d of %d multicall(s) failed: cpu %d\n",
ret, b->mcidx, smp_processor_id());
for (i = 0; i < b->mcidx; i++) {
- if (b->entries[i].result < 0) {
-#if MC_DEBUG
- pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\t%pS\n",
- i + 1,
- b->debug[i].op,
- b->debug[i].args[0],
- b->entries[i].result,
- b->caller[i]);
-#else
+ if (static_key_false(&mc_debug)) {
+ print_debug_data(b, mcdb, i);
+ } else if (b->entries[i].result < 0) {
pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\n",
i + 1,
b->entries[i].op,
b->entries[i].args[0],
b->entries[i].result);
-#endif
}
}
}
@@ -155,9 +234,13 @@ struct multicall_space __xen_mc_entry(size_t args)
}
ret.mc = &b->entries[b->mcidx];
-#if MC_DEBUG
- b->caller[b->mcidx] = __builtin_return_address(0);
-#endif
+ if (static_key_false(&mc_debug)) {
+ struct mc_debug_data *mcdb = __this_cpu_read(mc_debug_data);
+
+ mcdb->caller[b->mcidx] = __builtin_return_address(0);
+ mcdb->argsz[b->mcidx] = args;
+ mcdb->args[b->mcidx] = (unsigned long *)(&b->args[argidx]);
+ }
b->mcidx++;
ret.args = &b->args[argidx];
b->argidx = argidx + args;
diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h
deleted file mode 100644
index c3867b585e0d..000000000000
--- a/arch/x86/xen/multicalls.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _XEN_MULTICALLS_H
-#define _XEN_MULTICALLS_H
-
-#include <trace/events/xen.h>
-
-#include "xen-ops.h"
-
-/* Multicalls */
-struct multicall_space
-{
- struct multicall_entry *mc;
- void *args;
-};
-
-/* Allocate room for a multicall and its args */
-struct multicall_space __xen_mc_entry(size_t args);
-
-DECLARE_PER_CPU(unsigned long, xen_mc_irq_flags);
-
-/* Call to start a batch of multiple __xen_mc_entry()s. Must be
- paired with xen_mc_issue() */
-static inline void xen_mc_batch(void)
-{
- unsigned long flags;
-
- /* need to disable interrupts until this entry is complete */
- local_irq_save(flags);
- trace_xen_mc_batch(xen_get_lazy_mode());
- __this_cpu_write(xen_mc_irq_flags, flags);
-}
-
-static inline struct multicall_space xen_mc_entry(size_t args)
-{
- xen_mc_batch();
- return __xen_mc_entry(args);
-}
-
-/* Flush all pending multicalls */
-void xen_mc_flush(void);
-
-/* Issue a multicall if we're not in a lazy mode */
-static inline void xen_mc_issue(unsigned mode)
-{
- trace_xen_mc_issue(mode);
-
- if ((xen_get_lazy_mode() & mode) == 0)
- xen_mc_flush();
-
- /* restore flags saved in xen_mc_batch */
- local_irq_restore(this_cpu_read(xen_mc_irq_flags));
-}
-
-/* Set up a callback to be called when the current batch is flushed */
-void xen_mc_callback(void (*fn)(void *), void *data);
-
-/*
- * Try to extend the arguments of the previous multicall command. The
- * previous command's op must match. If it does, then it attempts to
- * extend the argument space allocated to the multicall entry by
- * arg_size bytes.
- *
- * The returned multicall_space will return with mc pointing to the
- * command on success, or NULL on failure, and args pointing to the
- * newly allocated space.
- */
-struct multicall_space xen_mc_extend_args(unsigned long op, size_t arg_size);
-
-#endif /* _XEN_MULTICALLS_H */
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 9bdc3b656b2c..56914e21e303 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -70,6 +70,7 @@
#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <linux/acpi.h>
#include <asm/cache.h>
#include <asm/setup.h>
@@ -80,8 +81,8 @@
#include <asm/xen/hypervisor.h>
#include <xen/balloon.h>
#include <xen/grant_table.h>
+#include <xen/hvc-console.h>
-#include "multicalls.h"
#include "xen-ops.h"
#define P2M_MID_PER_PAGE (PAGE_SIZE / sizeof(unsigned long *))
@@ -177,13 +178,7 @@ static void p2m_init_identity(unsigned long *p2m, unsigned long pfn)
static void * __ref alloc_p2m_page(void)
{
if (unlikely(!slab_is_available())) {
- void *ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
-
- if (!ptr)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE);
-
- return ptr;
+ return memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
}
return (void *)__get_free_page(GFP_KERNEL);
@@ -555,7 +550,6 @@ int xen_alloc_p2m_entry(unsigned long pfn)
/* Separately check the mid mfn level */
unsigned long missing_mfn;
unsigned long mid_mfn_mfn;
- unsigned long old_mfn;
mid_mfn = alloc_p2m_page();
if (!mid_mfn)
@@ -565,12 +559,12 @@ int xen_alloc_p2m_entry(unsigned long pfn)
missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
mid_mfn_mfn = virt_to_mfn(mid_mfn);
- old_mfn = cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn);
- if (old_mfn != missing_mfn) {
- free_p2m_page(mid_mfn);
- mid_mfn = mfn_to_virt(old_mfn);
- } else {
+ /* try_cmpxchg() updates missing_mfn on failure. */
+ if (try_cmpxchg(top_mfn_p, &missing_mfn, mid_mfn_mfn)) {
p2m_top_mfn_p[topidx] = mid_mfn;
+ } else {
+ free_p2m_page(mid_mfn);
+ mid_mfn = mfn_to_virt(missing_mfn);
}
}
} else {
@@ -731,7 +725,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
* immediate unmapping.
*/
map_ops[i].status = GNTST_general_error;
- unmap[0].host_addr = map_ops[i].host_addr,
+ unmap[0].host_addr = map_ops[i].host_addr;
unmap[0].handle = map_ops[i].handle;
map_ops[i].handle = INVALID_GRANT_HANDLE;
if (map_ops[i].flags & GNTMAP_device_map)
@@ -741,7 +735,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
if (kmap_ops) {
kmap_ops[i].status = GNTST_general_error;
- unmap[1].host_addr = kmap_ops[i].host_addr,
+ unmap[1].host_addr = kmap_ops[i].host_addr;
unmap[1].handle = kmap_ops[i].handle;
kmap_ops[i].handle = INVALID_GRANT_HANDLE;
if (kmap_ops[i].flags & GNTMAP_device_map)
@@ -794,9 +788,104 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
return ret;
}
+/* Remapped non-RAM areas */
+#define NR_NONRAM_REMAP 4
+static struct nonram_remap {
+ phys_addr_t maddr;
+ phys_addr_t paddr;
+ size_t size;
+} xen_nonram_remap[NR_NONRAM_REMAP] __ro_after_init;
+static unsigned int nr_nonram_remap __ro_after_init;
+
+/*
+ * Do the real remapping of non-RAM regions as specified in the
+ * xen_nonram_remap[] array.
+ * In case of an error just crash the system.
+ */
+void __init xen_do_remap_nonram(void)
+{
+ unsigned int i;
+ unsigned int remapped = 0;
+ const struct nonram_remap *remap = xen_nonram_remap;
+ unsigned long pfn, mfn, end_pfn;
+
+ for (i = 0; i < nr_nonram_remap; i++) {
+ end_pfn = PFN_UP(remap->paddr + remap->size);
+ pfn = PFN_DOWN(remap->paddr);
+ mfn = PFN_DOWN(remap->maddr);
+ while (pfn < end_pfn) {
+ if (!set_phys_to_machine(pfn, mfn))
+ panic("Failed to set p2m mapping for pfn=%lx mfn=%lx\n",
+ pfn, mfn);
+
+ pfn++;
+ mfn++;
+ remapped++;
+ }
+
+ remap++;
+ }
+
+ pr_info("Remapped %u non-RAM page(s)\n", remapped);
+}
+
+#ifdef CONFIG_ACPI
+/*
+ * Xen variant of acpi_os_ioremap() taking potentially remapped non-RAM
+ * regions into account.
+ * Any attempt to map an area crossing a remap boundary will produce a
+ * WARN() splat.
+ * phys is related to remap->maddr on input and will be rebased to remap->paddr.
+ */
+static void __iomem *xen_acpi_os_ioremap(acpi_physical_address phys,
+ acpi_size size)
+{
+ unsigned int i;
+ const struct nonram_remap *remap = xen_nonram_remap;
+
+ for (i = 0; i < nr_nonram_remap; i++) {
+ if (phys + size > remap->maddr &&
+ phys < remap->maddr + remap->size) {
+ WARN_ON(phys < remap->maddr ||
+ phys + size > remap->maddr + remap->size);
+ phys += remap->paddr - remap->maddr;
+ break;
+ }
+ }
+
+ return x86_acpi_os_ioremap(phys, size);
+}
+#endif /* CONFIG_ACPI */
+
+/*
+ * Add a new non-RAM remap entry.
+ * In case of no free entry found, just crash the system.
+ */
+void __init xen_add_remap_nonram(phys_addr_t maddr, phys_addr_t paddr,
+ unsigned long size)
+{
+ BUG_ON((maddr & ~PAGE_MASK) != (paddr & ~PAGE_MASK));
+
+ if (nr_nonram_remap == NR_NONRAM_REMAP) {
+ xen_raw_console_write("Number of required E820 entry remapping actions exceed maximum value\n");
+ BUG();
+ }
+
+#ifdef CONFIG_ACPI
+ /* Switch to the Xen acpi_os_ioremap() variant. */
+ if (nr_nonram_remap == 0)
+ acpi_os_ioremap = xen_acpi_os_ioremap;
+#endif
+
+ xen_nonram_remap[nr_nonram_remap].maddr = maddr;
+ xen_nonram_remap[nr_nonram_remap].paddr = paddr;
+ xen_nonram_remap[nr_nonram_remap].size = size;
+
+ nr_nonram_remap++;
+}
+
#ifdef CONFIG_XEN_DEBUG_FS
#include <linux/debugfs.h>
-#include "debugfs.h"
static int p2m_dump_show(struct seq_file *m, void *v)
{
static const char * const type_name[] = {
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
index 246d67dab510..f06987b0efc3 100644
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -10,7 +10,6 @@
#include <xen/interface/xenpmu.h>
#include "xen-ops.h"
-#include "pmu.h"
/* x86_pmu.handle_irq definition */
#include "../events/perf_event.h"
diff --git a/arch/x86/xen/pmu.h b/arch/x86/xen/pmu.h
deleted file mode 100644
index 65c58894fc79..000000000000
--- a/arch/x86/xen/pmu.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __XEN_PMU_H
-#define __XEN_PMU_H
-
-#include <xen/interface/xenpmu.h>
-
-extern bool is_xen_pmu;
-
-irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id);
-#ifdef CONFIG_XEN_HAVE_VPMU
-void xen_pmu_init(int cpu);
-void xen_pmu_finish(int cpu);
-#else
-static inline void xen_pmu_init(int cpu) {}
-static inline void xen_pmu_finish(int cpu) {}
-#endif
-bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err);
-bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err);
-int pmu_apic_update(uint32_t reg);
-unsigned long long xen_read_pmc(int counter);
-
-#endif /* __XEN_PMU_H */
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index b3e37961065a..c3db71d96c43 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -15,12 +15,12 @@
#include <linux/cpuidle.h>
#include <linux/cpufreq.h>
#include <linux/memory_hotplug.h>
+#include <linux/acpi.h>
#include <asm/elf.h>
#include <asm/vdso.h>
#include <asm/e820/api.h>
#include <asm/setup.h>
-#include <asm/acpi.h>
#include <asm/numa.h>
#include <asm/idtentry.h>
#include <asm/xen/hypervisor.h>
@@ -34,13 +34,9 @@
#include <xen/features.h>
#include <xen/hvc-console.h>
#include "xen-ops.h"
-#include "mmu.h"
#define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
-/* Amount of extra memory space we add to the e820 ranges */
-struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
-
/* Number of pages released from the initial allocation. */
unsigned long xen_released_pages;
@@ -50,6 +46,9 @@ bool xen_pv_pci_possible;
/* E820 map used during setting up memory. */
static struct e820_table xen_e820_table __initdata;
+/* Number of initially usable memory pages. */
+static unsigned long ini_nr_pages __initdata;
+
/*
* Buffer used to remap identity mapped pages. We only need the virtual space.
* The physical page behind this address is remapped as needed to different
@@ -64,18 +63,6 @@ static struct {
} xen_remap_buf __initdata __aligned(PAGE_SIZE);
static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
-/*
- * The maximum amount of extra memory compared to the base size. The
- * main scaling factor is the size of struct page. At extreme ratios
- * of base:extra, all the base memory can be filled with page
- * structures for the extra memory, leaving no space for anything
- * else.
- *
- * 10x seems like a reasonable balance between scaling flexibility and
- * leaving a practically usable system.
- */
-#define EXTRA_MEM_RATIO (10)
-
static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
static void __init xen_parse_512gb(void)
@@ -96,35 +83,6 @@ static void __init xen_parse_512gb(void)
xen_512gb_limit = val;
}
-static void __init xen_add_extra_mem(unsigned long start_pfn,
- unsigned long n_pfns)
-{
- int i;
-
- /*
- * No need to check for zero size, should happen rarely and will only
- * write a new entry regarded to be unused due to zero size.
- */
- for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
- /* Add new region. */
- if (xen_extra_mem[i].n_pfns == 0) {
- xen_extra_mem[i].start_pfn = start_pfn;
- xen_extra_mem[i].n_pfns = n_pfns;
- break;
- }
- /* Append to existing region. */
- if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns ==
- start_pfn) {
- xen_extra_mem[i].n_pfns += n_pfns;
- break;
- }
- }
- if (i == XEN_EXTRA_MEM_MAX_REGIONS)
- printk(KERN_WARNING "Warning: not enough extra memory regions\n");
-
- memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
-}
-
static void __init xen_del_extra_mem(unsigned long start_pfn,
unsigned long n_pfns)
{
@@ -257,7 +215,7 @@ static int __init xen_free_mfn(unsigned long mfn)
* as a fallback if the remapping fails.
*/
static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
- unsigned long end_pfn, unsigned long nr_pages)
+ unsigned long end_pfn)
{
unsigned long pfn, end;
int ret;
@@ -265,7 +223,7 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
WARN_ON(start_pfn > end_pfn);
/* Release pages first. */
- end = min(end_pfn, nr_pages);
+ end = min(end_pfn, ini_nr_pages);
for (pfn = start_pfn; pfn < end; pfn++) {
unsigned long mfn = pfn_to_mfn(pfn);
@@ -386,15 +344,14 @@ static void __init xen_do_set_identity_and_remap_chunk(
* to Xen and not remapped.
*/
static unsigned long __init xen_set_identity_and_remap_chunk(
- unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
- unsigned long remap_pfn)
+ unsigned long start_pfn, unsigned long end_pfn, unsigned long remap_pfn)
{
unsigned long pfn;
unsigned long i = 0;
unsigned long n = end_pfn - start_pfn;
if (remap_pfn == 0)
- remap_pfn = nr_pages;
+ remap_pfn = ini_nr_pages;
while (i < n) {
unsigned long cur_pfn = start_pfn + i;
@@ -403,19 +360,19 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
unsigned long remap_range_size;
/* Do not remap pages beyond the current allocation */
- if (cur_pfn >= nr_pages) {
+ if (cur_pfn >= ini_nr_pages) {
/* Identity map remaining pages */
set_phys_range_identity(cur_pfn, cur_pfn + size);
break;
}
- if (cur_pfn + size > nr_pages)
- size = nr_pages - cur_pfn;
+ if (cur_pfn + size > ini_nr_pages)
+ size = ini_nr_pages - cur_pfn;
remap_range_size = xen_find_pfn_range(&remap_pfn);
if (!remap_range_size) {
pr_warn("Unable to find available pfn range, not remapping identity pages\n");
xen_set_identity_and_release_chunk(cur_pfn,
- cur_pfn + left, nr_pages);
+ cur_pfn + left);
break;
}
/* Adjust size to fit in current e820 RAM region */
@@ -442,18 +399,18 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
}
static unsigned long __init xen_count_remap_pages(
- unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
+ unsigned long start_pfn, unsigned long end_pfn,
unsigned long remap_pages)
{
- if (start_pfn >= nr_pages)
+ if (start_pfn >= ini_nr_pages)
return remap_pages;
- return remap_pages + min(end_pfn, nr_pages) - start_pfn;
+ return remap_pages + min(end_pfn, ini_nr_pages) - start_pfn;
}
-static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
+static unsigned long __init xen_foreach_remap_area(
unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
- unsigned long nr_pages, unsigned long last_val))
+ unsigned long last_val))
{
phys_addr_t start = 0;
unsigned long ret_val = 0;
@@ -481,8 +438,7 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
end_pfn = PFN_UP(entry->addr);
if (start_pfn < end_pfn)
- ret_val = func(start_pfn, end_pfn, nr_pages,
- ret_val);
+ ret_val = func(start_pfn, end_pfn, ret_val);
start = end;
}
}
@@ -539,6 +495,8 @@ void __init xen_remap_memory(void)
set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
pr_info("Remapped %ld page(s)\n", remapped);
+
+ xen_do_remap_nonram();
}
static unsigned long __init xen_get_pages_limit(void)
@@ -612,7 +570,7 @@ static void __init xen_ignore_unusable(void)
}
}
-bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
+static bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
{
struct e820_entry *entry;
unsigned mapcnt;
@@ -670,6 +628,111 @@ phys_addr_t __init xen_find_free_area(phys_addr_t size)
}
/*
+ * Swap a non-RAM E820 map entry with RAM above ini_nr_pages.
+ * Note that the E820 map is modified accordingly, but the P2M map isn't yet.
+ * The adaption of the P2M must be deferred until page allocation is possible.
+ */
+static void __init xen_e820_swap_entry_with_ram(struct e820_entry *swap_entry)
+{
+ struct e820_entry *entry;
+ unsigned int mapcnt;
+ phys_addr_t mem_end = PFN_PHYS(ini_nr_pages);
+ phys_addr_t swap_addr, swap_size, entry_end;
+
+ swap_addr = PAGE_ALIGN_DOWN(swap_entry->addr);
+ swap_size = PAGE_ALIGN(swap_entry->addr - swap_addr + swap_entry->size);
+ entry = xen_e820_table.entries;
+
+ for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
+ entry_end = entry->addr + entry->size;
+ if (entry->type == E820_TYPE_RAM && entry->size >= swap_size &&
+ entry_end - swap_size >= mem_end) {
+ /* Reduce RAM entry by needed space (whole pages). */
+ entry->size -= swap_size;
+
+ /* Add new entry at the end of E820 map. */
+ entry = xen_e820_table.entries +
+ xen_e820_table.nr_entries;
+ xen_e820_table.nr_entries++;
+
+ /* Fill new entry (keep size and page offset). */
+ entry->type = swap_entry->type;
+ entry->addr = entry_end - swap_size +
+ swap_addr - swap_entry->addr;
+ entry->size = swap_entry->size;
+
+ /* Convert old entry to RAM, align to pages. */
+ swap_entry->type = E820_TYPE_RAM;
+ swap_entry->addr = swap_addr;
+ swap_entry->size = swap_size;
+
+ /* Remember PFN<->MFN relation for P2M update. */
+ xen_add_remap_nonram(swap_addr, entry_end - swap_size,
+ swap_size);
+
+ /* Order E820 table and merge entries. */
+ e820__update_table(&xen_e820_table);
+
+ return;
+ }
+
+ entry++;
+ }
+
+ xen_raw_console_write("No suitable area found for required E820 entry remapping action\n");
+ BUG();
+}
+
+/*
+ * Look for non-RAM memory types in a specific guest physical area and move
+ * those away if possible (ACPI NVS only for now).
+ */
+static void __init xen_e820_resolve_conflicts(phys_addr_t start,
+ phys_addr_t size)
+{
+ struct e820_entry *entry;
+ unsigned int mapcnt;
+ phys_addr_t end;
+
+ if (!size)
+ return;
+
+ end = start + size;
+ entry = xen_e820_table.entries;
+
+ for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
+ if (entry->addr >= end)
+ return;
+
+ if (entry->addr + entry->size > start &&
+ entry->type == E820_TYPE_NVS)
+ xen_e820_swap_entry_with_ram(entry);
+
+ entry++;
+ }
+}
+
+/*
+ * Check for an area in physical memory to be usable for non-movable purposes.
+ * An area is considered to usable if the used E820 map lists it to be RAM or
+ * some other type which can be moved to higher PFNs while keeping the MFNs.
+ * In case the area is not usable, crash the system with an error message.
+ */
+void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size,
+ const char *component)
+{
+ xen_e820_resolve_conflicts(start, size);
+
+ if (!xen_is_e820_reserved(start, size))
+ return;
+
+ xen_raw_console_write("Xen hypervisor allocated ");
+ xen_raw_console_write(component);
+ xen_raw_console_write(" memory conflicts with E820 map\n");
+ BUG();
+}
+
+/*
* Like memcpy, but with physical addresses for dest and src.
*/
static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
@@ -728,20 +791,20 @@ static void __init xen_reserve_xen_mfnlist(void)
**/
char * __init xen_memory_setup(void)
{
- unsigned long max_pfn, pfn_s, n_pfns;
+ unsigned long pfn_s, n_pfns;
phys_addr_t mem_end, addr, size, chunk_size;
u32 type;
int rc;
struct xen_memory_map memmap;
unsigned long max_pages;
unsigned long extra_pages = 0;
+ unsigned long maxmem_pages;
int i;
int op;
xen_parse_512gb();
- max_pfn = xen_get_pages_limit();
- max_pfn = min(max_pfn, xen_start_info->nr_pages);
- mem_end = PFN_PHYS(max_pfn);
+ ini_nr_pages = min(xen_get_pages_limit(), xen_start_info->nr_pages);
+ mem_end = PFN_PHYS(ini_nr_pages);
memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
@@ -791,13 +854,35 @@ char * __init xen_memory_setup(void)
/* Make sure the Xen-supplied memory map is well-ordered. */
e820__update_table(&xen_e820_table);
+ /*
+ * Check whether the kernel itself conflicts with the target E820 map.
+ * Failing now is better than running into weird problems later due
+ * to relocating (and even reusing) pages with kernel text or data.
+ */
+ xen_chk_is_e820_usable(__pa_symbol(_text),
+ __pa_symbol(_end) - __pa_symbol(_text),
+ "kernel");
+
+ /*
+ * Check for a conflict of the xen_start_info memory with the target
+ * E820 map.
+ */
+ xen_chk_is_e820_usable(__pa(xen_start_info), sizeof(*xen_start_info),
+ "xen_start_info");
+
+ /*
+ * Check for a conflict of the hypervisor supplied page tables with
+ * the target E820 map.
+ */
+ xen_pt_check_e820();
+
max_pages = xen_get_max_pages();
/* How many extra pages do we need due to remapping? */
- max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
+ max_pages += xen_foreach_remap_area(xen_count_remap_pages);
- if (max_pages > max_pfn)
- extra_pages += max_pages - max_pfn;
+ if (max_pages > ini_nr_pages)
+ extra_pages += max_pages - ini_nr_pages;
/*
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO
@@ -806,8 +891,8 @@ char * __init xen_memory_setup(void)
* Make sure we have no memory above max_pages, as this area
* isn't handled by the p2m management.
*/
- extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
- extra_pages, max_pages - max_pfn);
+ maxmem_pages = EXTRA_MEM_RATIO * min(ini_nr_pages, PFN_DOWN(MAXMEM));
+ extra_pages = min3(maxmem_pages, extra_pages, max_pages - ini_nr_pages);
i = 0;
addr = xen_e820_table.entries[0].addr;
size = xen_e820_table.entries[0].size;
@@ -863,23 +948,6 @@ char * __init xen_memory_setup(void)
e820__update_table(e820_table);
- /*
- * Check whether the kernel itself conflicts with the target E820 map.
- * Failing now is better than running into weird problems later due
- * to relocating (and even reusing) pages with kernel text or data.
- */
- if (xen_is_e820_reserved(__pa_symbol(_text),
- __pa_symbol(__bss_stop) - __pa_symbol(_text))) {
- xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
- BUG();
- }
-
- /*
- * Check for a conflict of the hypervisor supplied page tables with
- * the target E820 map.
- */
- xen_pt_check_e820();
-
xen_reserve_xen_mfnlist();
/* Check for a conflict of the initrd with the target E820 map. */
@@ -907,7 +975,7 @@ char * __init xen_memory_setup(void)
* Set identity map on non-RAM pages and prepare remapping the
* underlying RAM.
*/
- xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
+ xen_foreach_remap_area(xen_set_identity_and_remap_chunk);
pr_info("Released %ld page(s)\n", xen_released_pages);
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 4b0d6fff88de..05f92c812ac8 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -9,7 +9,6 @@
#include <xen/hvc-console.h>
#include "xen-ops.h"
-#include "smp.h"
static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
@@ -65,6 +64,8 @@ int xen_smp_intr_init(unsigned int cpu)
char *resched_name, *callfunc_name, *debug_name;
resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
+ if (!resched_name)
+ goto fail_mem;
per_cpu(xen_resched_irq, cpu).name = resched_name;
rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
cpu,
@@ -77,6 +78,8 @@ int xen_smp_intr_init(unsigned int cpu)
per_cpu(xen_resched_irq, cpu).irq = rc;
callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
+ if (!callfunc_name)
+ goto fail_mem;
per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
cpu,
@@ -90,6 +93,9 @@ int xen_smp_intr_init(unsigned int cpu)
if (!xen_fifo_events) {
debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
+ if (!debug_name)
+ goto fail_mem;
+
per_cpu(xen_debug_irq, cpu).name = debug_name;
rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
xen_debug_interrupt,
@@ -101,6 +107,9 @@ int xen_smp_intr_init(unsigned int cpu)
}
callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
+ if (!callfunc_name)
+ goto fail_mem;
+
per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
cpu,
@@ -114,6 +123,8 @@ int xen_smp_intr_init(unsigned int cpu)
return 0;
+ fail_mem:
+ rc = -ENOMEM;
fail:
xen_smp_intr_free(cpu);
return rc;
@@ -123,8 +134,6 @@ void __init xen_smp_cpus_done(unsigned int max_cpus)
{
if (xen_hvm_domain())
native_smp_cpus_done(max_cpus);
- else
- calculate_max_logical_packages();
}
void xen_smp_send_reschedule(int cpu)
diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h
deleted file mode 100644
index c20cbb14c82b..000000000000
--- a/arch/x86/xen/smp.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _XEN_SMP_H
-
-#ifdef CONFIG_SMP
-
-void asm_cpu_bringup_and_idle(void);
-asmlinkage void cpu_bringup_and_idle(void);
-
-extern void xen_send_IPI_mask(const struct cpumask *mask,
- int vector);
-extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
- int vector);
-extern void xen_send_IPI_allbutself(int vector);
-extern void xen_send_IPI_all(int vector);
-extern void xen_send_IPI_self(int vector);
-
-extern int xen_smp_intr_init(unsigned int cpu);
-extern void xen_smp_intr_free(unsigned int cpu);
-int xen_smp_intr_init_pv(unsigned int cpu);
-void xen_smp_intr_free_pv(unsigned int cpu);
-
-void xen_smp_cpus_done(unsigned int max_cpus);
-
-void xen_smp_send_reschedule(int cpu);
-void xen_smp_send_call_function_ipi(const struct cpumask *mask);
-void xen_smp_send_call_function_single_ipi(int cpu);
-
-void __noreturn xen_cpu_bringup_again(unsigned long stack);
-
-struct xen_common_irq {
- int irq;
- char *name;
-};
-#else /* CONFIG_SMP */
-
-static inline int xen_smp_intr_init(unsigned int cpu)
-{
- return 0;
-}
-static inline void xen_smp_intr_free(unsigned int cpu) {}
-
-static inline int xen_smp_intr_init_pv(unsigned int cpu)
-{
- return 0;
-}
-static inline void xen_smp_intr_free_pv(unsigned int cpu) {}
-#endif /* CONFIG_SMP */
-
-#endif
diff --git a/arch/x86/xen/smp_hvm.c b/arch/x86/xen/smp_hvm.c
index ac95d1981cc0..485c1d8804f7 100644
--- a/arch/x86/xen/smp_hvm.c
+++ b/arch/x86/xen/smp_hvm.c
@@ -5,8 +5,6 @@
#include <xen/events.h>
#include "xen-ops.h"
-#include "smp.h"
-
static void __init xen_hvm_smp_prepare_boot_cpu(void)
{
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index a0f07bbfcd6e..6863d3da7dec 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -29,6 +29,7 @@
#include <asm/idtentry.h>
#include <asm/desc.h>
#include <asm/cpu.h>
+#include <asm/apic.h>
#include <asm/io_apic.h>
#include <xen/interface/xen.h>
@@ -45,9 +46,6 @@
#include <xen/hvc-console.h>
#include "xen-ops.h"
-#include "mmu.h"
-#include "smp.h"
-#include "pmu.h"
cpumask_var_t xen_cpu_initialized_map;
@@ -73,7 +71,6 @@ static void cpu_bringup(void)
}
cpu = smp_processor_id();
smp_store_cpu_info(cpu);
- cpu_data(cpu).x86_max_cores = 1;
set_cpu_sibling_map(cpu);
speculative_store_bypass_ht_init();
@@ -149,39 +146,16 @@ int xen_smp_intr_init_pv(unsigned int cpu)
return rc;
}
-static void __init _get_smp_config(unsigned int early)
+static void __init xen_pv_smp_config(void)
{
- int i, rc;
- unsigned int subtract = 0;
-
- if (early)
- return;
-
- num_processors = 0;
- disabled_cpus = 0;
- for (i = 0; i < nr_cpu_ids; i++) {
- rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
- if (rc >= 0) {
- num_processors++;
- set_cpu_possible(i, true);
- } else {
- set_cpu_possible(i, false);
- set_cpu_present(i, false);
- subtract++;
- }
- }
-#ifdef CONFIG_HOTPLUG_CPU
- /* This is akin to using 'nr_cpus' on the Linux command line.
- * Which is OK as when we use 'dom0_max_vcpus=X' we can only
- * have up to X, while nr_cpu_ids is greater than X. This
- * normally is not a problem, except when CPU hotplugging
- * is involved and then there might be more than X CPUs
- * in the guest - which will not work as there is no
- * hypercall to expand the max number of VCPUs an already
- * running guest has. So cap it up to X. */
- if (subtract)
- set_nr_cpu_ids(nr_cpu_ids - subtract);
-#endif
+ u32 apicid = 0;
+ int i;
+
+ topology_register_boot_apic(apicid);
+
+ for (i = 0; i < nr_cpu_ids; i++)
+ topology_register_apic(apicid++, CPU_ACPIID_INVALID, true);
+
/* Pretend to be a proper enumerated system */
smp_found_config = 1;
}
@@ -224,8 +198,6 @@ static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus)
smp_prepare_cpus_common();
- cpu_data(0).x86_max_cores = 1;
-
speculative_store_bypass_ht_init();
xen_pmu_init(0);
@@ -333,6 +305,7 @@ static int xen_pv_kick_ap(unsigned int cpu, struct task_struct *idle)
return rc;
xen_pmu_init(cpu);
+ mc_percpu_init(cpu);
/*
* Why is this a BUG? If the hypercall fails then everything can be
@@ -434,6 +407,20 @@ static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+void __init xen_smp_count_cpus(void)
+{
+ unsigned int cpus;
+
+ for (cpus = 0; cpus < nr_cpu_ids; cpus++) {
+ if (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpus, NULL) < 0)
+ break;
+ }
+
+ pr_info("Xen PV: Detected %u vCPUS\n", cpus);
+ if (cpus < nr_cpu_ids)
+ set_nr_cpu_ids(cpus);
+}
+
static const struct smp_ops xen_smp_ops __initconst = {
.smp_prepare_boot_cpu = xen_pv_smp_prepare_boot_cpu,
.smp_prepare_cpus = xen_pv_smp_prepare_cpus,
@@ -458,6 +445,12 @@ void __init xen_smp_init(void)
smp_ops = xen_smp_ops;
/* Avoid searching for BIOS MP tables */
- x86_init.mpparse.find_smp_config = x86_init_noop;
- x86_init.mpparse.get_smp_config = _get_smp_config;
+ x86_init.mpparse.find_mptable = x86_init_noop;
+ x86_init.mpparse.early_parse_smp_cfg = x86_init_noop;
+
+ /* XEN/PV Dom0 has halfways sane topology information via CPUID/MADT */
+ if (xen_initial_domain())
+ x86_init.mpparse.parse_smp_cfg = x86_init_noop;
+ else
+ x86_init.mpparse.parse_smp_cfg = xen_pv_smp_config;
}
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 5c6fc16e4b92..8e4efe0fb6f9 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -18,7 +18,6 @@
static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
static DEFINE_PER_CPU(char *, irq_name);
static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest);
-static bool xen_pvspin = true;
static void xen_qlock_kick(int cpu)
{
@@ -68,7 +67,7 @@ void xen_init_lock_cpu(int cpu)
int irq;
char *name;
- if (!xen_pvspin)
+ if (nopvspin)
return;
WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
@@ -95,7 +94,7 @@ void xen_uninit_lock_cpu(int cpu)
{
int irq;
- if (!xen_pvspin)
+ if (nopvspin)
return;
kfree(per_cpu(irq_name, cpu));
@@ -125,10 +124,10 @@ PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
void __init xen_init_spinlocks(void)
{
/* Don't need to use pvqspinlock code if there is only 1 vCPU. */
- if (num_possible_cpus() == 1 || nopvspin)
- xen_pvspin = false;
+ if (num_possible_cpus() == 1)
+ nopvspin = true;
- if (!xen_pvspin) {
+ if (nopvspin) {
printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
static_branch_disable(&virt_spin_lock_key);
return;
@@ -143,12 +142,3 @@ void __init xen_init_spinlocks(void)
pv_ops.lock.kick = xen_qlock_kick;
pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
}
-
-static __init int xen_parse_nopvspin(char *arg)
-{
- pr_notice("\"xen_nopvspin\" is deprecated, please use \"nopvspin\" instead\n");
- xen_pvspin = false;
- return 0;
-}
-early_param("xen_nopvspin", xen_parse_nopvspin);
-
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index 1d83152c761b..77a6ea1c60e4 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -15,8 +15,6 @@
#include <asm/fixmap.h>
#include "xen-ops.h"
-#include "mmu.h"
-#include "pmu.h"
static DEFINE_PER_CPU(u64, spec_ctrl);
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 52fa5609b7f6..96521b1874ac 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -30,7 +30,7 @@
#include "xen-ops.h"
/* Minimum amount of time until next clock event fires */
-#define TIMER_SLOP 100000
+#define TIMER_SLOP 1
static u64 xen_sched_clock_offset __read_mostly;
diff --git a/arch/x86/xen/vga.c b/arch/x86/xen/vga.c
index d97adab8420f..f7547807b0bd 100644
--- a/arch/x86/xen/vga.c
+++ b/arch/x86/xen/vga.c
@@ -2,7 +2,6 @@
#include <linux/screen_info.h>
#include <linux/init.h>
-#include <asm/bootparam.h>
#include <asm/setup.h>
#include <xen/interface/xen.h>
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index 1a9cd18dfbd3..b518f36d1ca2 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -20,15 +20,38 @@
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/objtool.h>
#include <../entry/calling.h>
.pushsection .noinstr.text, "ax"
/*
+ * PV hypercall interface to the hypervisor.
+ *
+ * Called via inline asm(), so better preserve %rcx and %r11.
+ *
+ * Input:
+ * %eax: hypercall number
+ * %rdi, %rsi, %rdx, %r10, %r8: args 1..5 for the hypercall
+ * Output: %rax
+ */
+SYM_FUNC_START(xen_hypercall_pv)
+ ANNOTATE_NOENDBR
+ push %rcx
+ push %r11
+ UNWIND_HINT_SAVE
+ syscall
+ UNWIND_HINT_RESTORE
+ pop %r11
+ pop %rcx
+ RET
+SYM_FUNC_END(xen_hypercall_pv)
+
+/*
* Disabling events is simply a matter of making the event mask
* non-zero.
*/
SYM_FUNC_START(xen_irq_disable_direct)
- movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
+ movb $1, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)
RET
SYM_FUNC_END(xen_irq_disable_direct)
@@ -69,7 +92,7 @@ SYM_FUNC_END(check_events)
SYM_FUNC_START(xen_irq_enable_direct)
FRAME_BEGIN
/* Unmask events */
- movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
+ movb $0, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)
/*
* Preempt here doesn't matter because that will deal with any
@@ -78,7 +101,7 @@ SYM_FUNC_START(xen_irq_enable_direct)
*/
/* Test for pending */
- testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
+ testb $0xff, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_pending)
jz 1f
call check_events
@@ -97,7 +120,7 @@ SYM_FUNC_END(xen_irq_enable_direct)
* x86 use opposite senses (mask vs enable).
*/
SYM_FUNC_START(xen_save_fl_direct)
- testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
+ testb $0xff, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)
setz %ah
addb %ah, %ah
RET
@@ -113,7 +136,7 @@ SYM_FUNC_END(xen_read_cr2);
SYM_FUNC_START(xen_read_cr2_direct)
FRAME_BEGIN
- _ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX
+ _ASM_MOV PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_arch_cr2), %_ASM_AX
FRAME_END
RET
SYM_FUNC_END(xen_read_cr2_direct);
@@ -176,7 +199,6 @@ SYM_CODE_START(xen_early_idt_handler_array)
SYM_CODE_END(xen_early_idt_handler_array)
__FINIT
-hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
/*
* Xen64 iret frame:
*
@@ -186,17 +208,28 @@ hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
* cs
* rip <-- standard iret frame
*
- * flags
+ * flags <-- xen_iret must push from here on
*
- * rcx }
- * r11 }<-- pushed by hypercall page
- * rsp->rax }
+ * rcx
+ * r11
+ * rsp->rax
*/
+.macro xen_hypercall_iret
+ pushq $0 /* Flags */
+ push %rcx
+ push %r11
+ push %rax
+ mov $__HYPERVISOR_iret, %eax
+ syscall /* Do the IRET. */
+#ifdef CONFIG_MITIGATION_SLS
+ int3
+#endif
+.endm
+
SYM_CODE_START(xen_iret)
UNWIND_HINT_UNDEFINED
ANNOTATE_NOENDBR
- pushq $0
- jmp hypercall_iret
+ xen_hypercall_iret
SYM_CODE_END(xen_iret)
/*
@@ -301,8 +334,7 @@ SYM_CODE_START(xen_entry_SYSENTER_compat)
ENDBR
lea 16(%rsp), %rsp /* strip %rcx, %r11 */
mov $-ENOSYS, %rax
- pushq $0
- jmp hypercall_iret
+ xen_hypercall_iret
SYM_CODE_END(xen_entry_SYSENTER_compat)
SYM_CODE_END(xen_entry_SYSCALL_compat)
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index a0ea285878db..894edf8d6d62 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -6,9 +6,11 @@
#include <linux/elfnote.h>
#include <linux/init.h>
+#include <linux/instrumentation.h>
#include <asm/boot.h>
#include <asm/asm.h>
+#include <asm/frame.h>
#include <asm/msr.h>
#include <asm/page_types.h>
#include <asm/percpu.h>
@@ -20,28 +22,6 @@
#include <xen/interface/xen-mca.h>
#include <asm/xen/interface.h>
-.pushsection .noinstr.text, "ax"
- .balign PAGE_SIZE
-SYM_CODE_START(hypercall_page)
- .rept (PAGE_SIZE / 32)
- UNWIND_HINT_FUNC
- ANNOTATE_NOENDBR
- ANNOTATE_UNRET_SAFE
- ret
- /*
- * Xen will write the hypercall page, and sort out ENDBR.
- */
- .skip 31, 0xcc
- .endr
-
-#define HYPERCALL(n) \
- .equ xen_hypercall_##n, hypercall_page + __HYPERVISOR_##n * 32; \
- .type xen_hypercall_##n, @function; .size xen_hypercall_##n, 32
-#include <asm/xen-hypercalls.h>
-#undef HYPERCALL
-SYM_CODE_END(hypercall_page)
-.popsection
-
#ifdef CONFIG_XEN_PV
__INIT
SYM_CODE_START(startup_xen)
@@ -49,7 +29,7 @@ SYM_CODE_START(startup_xen)
ANNOTATE_NOENDBR
cld
- leaq (__end_init_task - PTREGS_SIZE)(%rip), %rsp
+ leaq __top_init_kernel_stack(%rip), %rsp
/* Set up %gs.
*
@@ -87,6 +67,82 @@ SYM_CODE_END(xen_cpu_bringup_again)
#endif
#endif
+ .pushsection .noinstr.text, "ax"
+/*
+ * Xen hypercall interface to the hypervisor.
+ *
+ * Input:
+ * %eax: hypercall number
+ * 32-bit:
+ * %ebx, %ecx, %edx, %esi, %edi: args 1..5 for the hypercall
+ * 64-bit:
+ * %rdi, %rsi, %rdx, %r10, %r8: args 1..5 for the hypercall
+ * Output: %[er]ax
+ */
+SYM_FUNC_START(xen_hypercall_hvm)
+ ENDBR
+ FRAME_BEGIN
+ /* Save all relevant registers (caller save and arguments). */
+#ifdef CONFIG_X86_32
+ push %eax
+ push %ebx
+ push %ecx
+ push %edx
+ push %esi
+ push %edi
+#else
+ push %rax
+ push %rcx
+ push %rdx
+ push %rdi
+ push %rsi
+ push %r11
+ push %r10
+ push %r9
+ push %r8
+#endif
+ /* Set the vendor specific function. */
+ call __xen_hypercall_setfunc
+ /* Set ZF = 1 if AMD, Restore saved registers. */
+#ifdef CONFIG_X86_32
+ lea xen_hypercall_amd, %ebx
+ cmp %eax, %ebx
+ pop %edi
+ pop %esi
+ pop %edx
+ pop %ecx
+ pop %ebx
+ pop %eax
+#else
+ lea xen_hypercall_amd(%rip), %rcx
+ cmp %rax, %rcx
+ pop %r8
+ pop %r9
+ pop %r10
+ pop %r11
+ pop %rsi
+ pop %rdi
+ pop %rdx
+ pop %rcx
+ pop %rax
+#endif
+ FRAME_END
+ /* Use correct hypercall function. */
+ jz xen_hypercall_amd
+ jmp xen_hypercall_intel
+SYM_FUNC_END(xen_hypercall_hvm)
+
+SYM_FUNC_START(xen_hypercall_amd)
+ vmmcall
+ RET
+SYM_FUNC_END(xen_hypercall_amd)
+
+SYM_FUNC_START(xen_hypercall_intel)
+ vmcall
+ RET
+SYM_FUNC_END(xen_hypercall_intel)
+ .popsection
+
ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux")
ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6")
ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0")
@@ -94,7 +150,8 @@ SYM_CODE_END(xen_cpu_bringup_again)
ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, _ASM_PTR __START_KERNEL_map)
/* Map the p2m table to a 512GB-aligned user address. */
ELFNOTE(Xen, XEN_ELFNOTE_INIT_P2M, .quad (PUD_SIZE * PTRS_PER_PUD))
- ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, _ASM_PTR startup_xen)
+ ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .globl xen_elfnote_entry;
+ xen_elfnote_entry: _ASM_PTR xen_elfnote_entry_value - .)
ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .ascii "!writable_page_tables")
ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz "yes")
ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID,
@@ -115,7 +172,6 @@ SYM_CODE_END(xen_cpu_bringup_again)
#else
# define FEATURES_DOM0 0
#endif
- ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, _ASM_PTR hypercall_page)
ELFNOTE(Xen, XEN_ELFNOTE_SUPPORTED_FEATURES,
.long FEATURES_PV | FEATURES_PVH | FEATURES_DOM0)
ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic")
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index a87ab36889e7..63c13a2ccf55 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -5,8 +5,15 @@
#include <linux/init.h>
#include <linux/clocksource.h>
#include <linux/irqreturn.h>
+#include <linux/linkage.h>
+
+#include <xen/interface/xenpmu.h>
#include <xen/xen-ops.h>
+#include <asm/page.h>
+
+#include <trace/events/xen.h>
+
/* These are code, but not functions. Defined in entry.S */
extern const char xen_failsafe_callback[];
@@ -23,14 +30,11 @@ void xen_copy_trap_info(struct trap_info *traps);
DECLARE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info);
DECLARE_PER_CPU(unsigned long, xen_cr3);
-DECLARE_PER_CPU(unsigned long, xen_current_cr3);
extern struct start_info *xen_start_info;
extern struct shared_info xen_dummy_shared_info;
extern struct shared_info *HYPERVISOR_shared_info;
-extern bool xen_fifo_events;
-
void xen_setup_mfn_list_list(void);
void xen_build_mfn_list_list(void);
void xen_setup_machphys_mapping(void);
@@ -43,8 +47,12 @@ void xen_mm_unpin_all(void);
#ifdef CONFIG_X86_64
void __init xen_relocate_p2m(void);
#endif
+void __init xen_do_remap_nonram(void);
+void __init xen_add_remap_nonram(phys_addr_t maddr, phys_addr_t paddr,
+ unsigned long size);
-bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size);
+void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size,
+ const char *component);
unsigned long __ref xen_chk_extra_mem(unsigned long pfn);
void __init xen_inv_extra_mem(void);
void __init xen_remap_memory(void);
@@ -163,4 +171,168 @@ void xen_hvm_post_suspend(int suspend_cancelled);
static inline void xen_hvm_post_suspend(int suspend_cancelled) {}
#endif
+/*
+ * The maximum amount of extra memory compared to the base size. The
+ * main scaling factor is the size of struct page. At extreme ratios
+ * of base:extra, all the base memory can be filled with page
+ * structures for the extra memory, leaving no space for anything
+ * else.
+ *
+ * 10x seems like a reasonable balance between scaling flexibility and
+ * leaving a practically usable system.
+ */
+#define EXTRA_MEM_RATIO (10)
+
+void xen_add_extra_mem(unsigned long start_pfn, unsigned long n_pfns);
+
+struct dentry * __init xen_init_debugfs(void);
+
+enum pt_level {
+ PT_PGD,
+ PT_P4D,
+ PT_PUD,
+ PT_PMD,
+ PT_PTE
+};
+
+bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
+unsigned long xen_read_cr2_direct(void);
+void xen_init_mmu_ops(void);
+void xen_hvm_init_mmu_ops(void);
+
+/* Multicalls */
+struct multicall_space
+{
+ struct multicall_entry *mc;
+ void *args;
+};
+
+/* Allocate room for a multicall and its args */
+struct multicall_space __xen_mc_entry(size_t args);
+
+DECLARE_PER_CPU(unsigned long, xen_mc_irq_flags);
+
+/* Call to start a batch of multiple __xen_mc_entry()s. Must be
+ paired with xen_mc_issue() */
+static inline void xen_mc_batch(void)
+{
+ unsigned long flags;
+
+ /* need to disable interrupts until this entry is complete */
+ local_irq_save(flags);
+ trace_xen_mc_batch(xen_get_lazy_mode());
+ __this_cpu_write(xen_mc_irq_flags, flags);
+}
+
+static inline struct multicall_space xen_mc_entry(size_t args)
+{
+ xen_mc_batch();
+ return __xen_mc_entry(args);
+}
+
+/* Flush all pending multicalls */
+void xen_mc_flush(void);
+
+/* Issue a multicall if we're not in a lazy mode */
+static inline void xen_mc_issue(unsigned mode)
+{
+ trace_xen_mc_issue(mode);
+
+ if ((xen_get_lazy_mode() & mode) == 0)
+ xen_mc_flush();
+
+ /* restore flags saved in xen_mc_batch */
+ local_irq_restore(this_cpu_read(xen_mc_irq_flags));
+}
+
+/* Set up a callback to be called when the current batch is flushed */
+void xen_mc_callback(void (*fn)(void *), void *data);
+
+/*
+ * Try to extend the arguments of the previous multicall command. The
+ * previous command's op must match. If it does, then it attempts to
+ * extend the argument space allocated to the multicall entry by
+ * arg_size bytes.
+ *
+ * The returned multicall_space will return with mc pointing to the
+ * command on success, or NULL on failure, and args pointing to the
+ * newly allocated space.
+ */
+struct multicall_space xen_mc_extend_args(unsigned long op, size_t arg_size);
+
+/* Do percpu data initialization for multicalls. */
+void mc_percpu_init(unsigned int cpu);
+
+extern bool is_xen_pmu;
+
+irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id);
+#ifdef CONFIG_XEN_HAVE_VPMU
+void xen_pmu_init(int cpu);
+void xen_pmu_finish(int cpu);
+#else
+static inline void xen_pmu_init(int cpu) {}
+static inline void xen_pmu_finish(int cpu) {}
+#endif
+bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err);
+bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err);
+int pmu_apic_update(uint32_t reg);
+unsigned long long xen_read_pmc(int counter);
+
+#ifdef CONFIG_SMP
+
+void asm_cpu_bringup_and_idle(void);
+asmlinkage void cpu_bringup_and_idle(void);
+
+extern void xen_send_IPI_mask(const struct cpumask *mask,
+ int vector);
+extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
+ int vector);
+extern void xen_send_IPI_allbutself(int vector);
+extern void xen_send_IPI_all(int vector);
+extern void xen_send_IPI_self(int vector);
+
+extern int xen_smp_intr_init(unsigned int cpu);
+extern void xen_smp_intr_free(unsigned int cpu);
+int xen_smp_intr_init_pv(unsigned int cpu);
+void xen_smp_intr_free_pv(unsigned int cpu);
+
+void xen_smp_count_cpus(void);
+void xen_smp_cpus_done(unsigned int max_cpus);
+
+void xen_smp_send_reschedule(int cpu);
+void xen_smp_send_call_function_ipi(const struct cpumask *mask);
+void xen_smp_send_call_function_single_ipi(int cpu);
+
+void __noreturn xen_cpu_bringup_again(unsigned long stack);
+
+struct xen_common_irq {
+ int irq;
+ char *name;
+};
+#else /* CONFIG_SMP */
+
+static inline int xen_smp_intr_init(unsigned int cpu)
+{
+ return 0;
+}
+static inline void xen_smp_intr_free(unsigned int cpu) {}
+
+static inline int xen_smp_intr_init_pv(unsigned int cpu)
+{
+ return 0;
+}
+static inline void xen_smp_intr_free_pv(unsigned int cpu) {}
+static inline void xen_smp_count_cpus(void) { }
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_XEN_PV
+void xen_hypercall_pv(void);
+#endif
+void xen_hypercall_hvm(void);
+void xen_hypercall_amd(void);
+void xen_hypercall_intel(void);
+void xen_hypercall_setfunc(void);
+void *__xen_hypercall_setfunc(void);
+
#endif /* XEN_OPS_H */