diff options
Diffstat (limited to 'arch/arm/xen/enlighten.c')
| -rw-r--r-- | arch/arm/xen/enlighten.c | 612 |
1 files changed, 440 insertions, 172 deletions
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index f71c37edca26..8655bc3d3634 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only #include <xen/xen.h> #include <xen/events.h> #include <xen/grant_table.h> @@ -15,21 +16,34 @@ #include <asm/xen/hypervisor.h> #include <asm/xen/hypercall.h> #include <asm/system_misc.h> +#include <asm/efi.h> #include <linux/interrupt.h> #include <linux/irqreturn.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_fdt.h> #include <linux/of_irq.h> #include <linux/of_address.h> +#include <linux/cpuidle.h> +#include <linux/cpufreq.h> +#include <linux/cpu.h> +#include <linux/console.h> +#include <linux/pvclock_gtod.h> +#include <linux/reboot.h> +#include <linux/time64.h> +#include <linux/timekeeping.h> +#include <linux/timekeeper_internal.h> +#include <linux/acpi.h> +#include <linux/virtio_anchor.h> #include <linux/mm.h> -struct start_info _xen_start_info; +static struct start_info _xen_start_info; struct start_info *xen_start_info = &_xen_start_info; -EXPORT_SYMBOL_GPL(xen_start_info); +EXPORT_SYMBOL(xen_start_info); enum xen_domain_type xen_domain_type = XEN_NATIVE; -EXPORT_SYMBOL_GPL(xen_domain_type); +EXPORT_SYMBOL(xen_domain_type); struct shared_info xen_dummy_shared_info; struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info; @@ -37,205 +51,418 @@ struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info; DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); static struct vcpu_info __percpu *xen_vcpu_info; +/* Linux <-> Xen vCPU id mapping */ +DEFINE_PER_CPU(uint32_t, xen_vcpu_id); +EXPORT_PER_CPU_SYMBOL(xen_vcpu_id); + /* These are unused until we support booting "pre-ballooned" */ unsigned long xen_released_pages; struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; -/* TODO: to be removed */ -__read_mostly int xen_have_vector_callback; -EXPORT_SYMBOL_GPL(xen_have_vector_callback); +static __read_mostly unsigned int xen_events_irq; +static __read_mostly phys_addr_t xen_grant_frames; -int xen_platform_pci_unplug = XEN_UNPLUG_ALL; -EXPORT_SYMBOL_GPL(xen_platform_pci_unplug); +#define GRANT_TABLE_INDEX 0 +#define EXT_REGION_INDEX 1 -static __read_mostly int xen_events_irq = -1; +uint32_t xen_start_flags; +EXPORT_SYMBOL(xen_start_flags); -/* map fgmfn of domid to lpfn in the current domain */ -static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn, - unsigned int domid) +int xen_unmap_domain_gfn_range(struct vm_area_struct *vma, + int nr, struct page **pages) { - int rc; - struct xen_add_to_physmap_range xatp = { - .domid = DOMID_SELF, - .foreign_domid = domid, - .size = 1, - .space = XENMAPSPACE_gmfn_foreign, - }; - xen_ulong_t idx = fgmfn; - xen_pfn_t gpfn = lpfn; - int err = 0; - - set_xen_guest_handle(xatp.idxs, &idx); - set_xen_guest_handle(xatp.gpfns, &gpfn); - set_xen_guest_handle(xatp.errs, &err); - - rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp); - if (rc || err) { - pr_warn("Failed to map pfn to mfn rc:%d:%d pfn:%lx mfn:%lx\n", - rc, err, lpfn, fgmfn); - return 1; - } - return 0; + return xen_xlate_unmap_gfn_range(vma, nr, pages); } +EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range); -struct remap_data { - xen_pfn_t fgmfn; /* foreign domain's gmfn */ - pgprot_t prot; - domid_t domid; - struct vm_area_struct *vma; - int index; - struct page **pages; - struct xen_remap_mfn_info *info; -}; - -static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, - void *data) +static void xen_read_wallclock(struct timespec64 *ts) { - struct remap_data *info = data; - struct page *page = info->pages[info->index++]; - unsigned long pfn = page_to_pfn(page); - pte_t pte = pfn_pte(pfn, info->prot); - - if (map_foreign_page(pfn, info->fgmfn, info->domid)) - return -EFAULT; - set_pte_at(info->vma->vm_mm, addr, ptep, pte); - - return 0; + u32 version; + struct timespec64 now, ts_monotonic; + struct shared_info *s = HYPERVISOR_shared_info; + struct pvclock_wall_clock *wall_clock = &(s->wc); + + /* get wallclock at system boot */ + do { + version = wall_clock->version; + rmb(); /* fetch version before time */ + now.tv_sec = ((uint64_t)wall_clock->sec_hi << 32) | wall_clock->sec; + now.tv_nsec = wall_clock->nsec; + rmb(); /* fetch time before checking version */ + } while ((wall_clock->version & 1) || (version != wall_clock->version)); + + /* time since system boot */ + ktime_get_ts64(&ts_monotonic); + *ts = timespec64_add(now, ts_monotonic); } -int xen_remap_domain_mfn_range(struct vm_area_struct *vma, - unsigned long addr, - xen_pfn_t mfn, int nr, - pgprot_t prot, unsigned domid, - struct page **pages) +static int xen_pvclock_gtod_notify(struct notifier_block *nb, + unsigned long was_set, void *priv) { - int err; - struct remap_data data; + /* Protected by the calling core code serialization */ + static struct timespec64 next_sync; - /* TBD: Batching, current sole caller only does page at a time */ - if (nr > 1) - return -EINVAL; - - data.fgmfn = mfn; - data.prot = prot; - data.domid = domid; - data.vma = vma; - data.index = 0; - data.pages = pages; - err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT, - remap_pte_fn, &data); - return err; -} -EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); - -int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, - int nr, struct page **pages) -{ - int i; + struct xen_platform_op op; + struct timespec64 now, system_time; + struct timekeeper *tk = priv; - for (i = 0; i < nr; i++) { - struct xen_remove_from_physmap xrp; - unsigned long rc, pfn; + now.tv_sec = tk->xtime_sec; + now.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); + system_time = timespec64_add(now, tk->wall_to_monotonic); - pfn = page_to_pfn(pages[i]); + /* + * We only take the expensive HV call when the clock was set + * or when the 11 minutes RTC synchronization time elapsed. + */ + if (!was_set && timespec64_compare(&now, &next_sync) < 0) + return NOTIFY_OK; + + op.cmd = XENPF_settime64; + op.u.settime64.mbz = 0; + op.u.settime64.secs = now.tv_sec; + op.u.settime64.nsecs = now.tv_nsec; + op.u.settime64.system_time = timespec64_to_ns(&system_time); + (void)HYPERVISOR_platform_op(&op); + + /* + * Move the next drift compensation time 11 minutes + * ahead. That's emulating the sync_cmos_clock() update for + * the hardware RTC. + */ + next_sync = now; + next_sync.tv_sec += 11 * 60; - xrp.domid = DOMID_SELF; - xrp.gpfn = pfn; - rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp); - if (rc) { - pr_warn("Failed to unmap pfn:%lx rc:%ld\n", - pfn, rc); - return rc; - } - } - return 0; + return NOTIFY_OK; } -EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); -static void __init xen_percpu_init(void *unused) +static struct notifier_block xen_pvclock_gtod_notifier = { + .notifier_call = xen_pvclock_gtod_notify, +}; + +static int xen_starting_cpu(unsigned int cpu) { struct vcpu_register_vcpu_info info; struct vcpu_info *vcpup; int err; - int cpu = get_cpu(); + + /* + * VCPUOP_register_vcpu_info cannot be called twice for the same + * vcpu, so if vcpu_info is already registered, just get out. This + * can happen with cpu-hotplug. + */ + if (per_cpu(xen_vcpu, cpu) != NULL) + goto after_register_vcpu_info; pr_info("Xen: initializing cpu%d\n", cpu); vcpup = per_cpu_ptr(xen_vcpu_info, cpu); - info.mfn = __pa(vcpup) >> PAGE_SHIFT; - info.offset = offset_in_page(vcpup); + info.mfn = percpu_to_gfn(vcpup); + info.offset = xen_offset_in_page(vcpup); - err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); + err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu), + &info); BUG_ON(err); per_cpu(xen_vcpu, cpu) = vcpup; +after_register_vcpu_info: enable_percpu_irq(xen_events_irq, 0); + return 0; } -static void xen_restart(char str, const char *cmd) +static int xen_dying_cpu(unsigned int cpu) { - struct sched_shutdown r = { .reason = SHUTDOWN_reboot }; + disable_percpu_irq(xen_events_irq); + return 0; +} + +void xen_reboot(int reason) +{ + struct sched_shutdown r = { .reason = reason }; int rc; + rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); - if (rc) - BUG(); + BUG_ON(rc); +} + +static int xen_restart(struct notifier_block *nb, unsigned long action, + void *data) +{ + xen_reboot(SHUTDOWN_reboot); + + return NOTIFY_DONE; } +static struct notifier_block xen_restart_nb = { + .notifier_call = xen_restart, + .priority = 192, +}; + static void xen_power_off(void) { - struct sched_shutdown r = { .reason = SHUTDOWN_poweroff }; - int rc; - rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); - if (rc) - BUG(); + xen_reboot(SHUTDOWN_poweroff); +} + +static irqreturn_t xen_arm_callback(int irq, void *arg) +{ + xen_evtchn_do_upcall(); + return IRQ_HANDLED; +} + +static __initdata struct { + const char *compat; + const char *prefix; + const char *version; + bool found; +} hyper_node = {"xen,xen", "xen,xen-", NULL, false}; + +static int __init fdt_find_hyper_node(unsigned long node, const char *uname, + int depth, void *data) +{ + const void *s = NULL; + int len; + + if (depth != 1 || strcmp(uname, "hypervisor") != 0) + return 0; + + if (of_flat_dt_is_compatible(node, hyper_node.compat)) + hyper_node.found = true; + + s = of_get_flat_dt_prop(node, "compatible", &len); + if (strlen(hyper_node.prefix) + 3 < len && + !strncmp(hyper_node.prefix, s, strlen(hyper_node.prefix))) + hyper_node.version = s + strlen(hyper_node.prefix); + + /* + * Check if Xen supports EFI by checking whether there is the + * "/hypervisor/uefi" node in DT. If so, runtime services are available + * through proxy functions (e.g. in case of Xen dom0 EFI implementation + * they call special hypercall which executes relevant EFI functions) + * and that is why they are always enabled. + */ + if (IS_ENABLED(CONFIG_XEN_EFI)) { + if ((of_get_flat_dt_subnode_by_name(node, "uefi") > 0) && + !efi_runtime_disabled()) + set_bit(EFI_RUNTIME_SERVICES, &efi.flags); + } + + return 0; } /* * see Documentation/devicetree/bindings/arm/xen.txt for the * documentation of the Xen Device Tree format. */ -#define GRANT_TABLE_PHYSADDR 0 -static int __init xen_guest_init(void) +void __init xen_early_init(void) { - struct xen_add_to_physmap xatp; - static struct shared_info *shared_info_page = 0; - struct device_node *node; - int len; - const char *s = NULL; - const char *version = NULL; - const char *xen_prefix = "xen,xen-"; - struct resource res; - - node = of_find_compatible_node(NULL, NULL, "xen,xen"); - if (!node) { + of_scan_flat_dt(fdt_find_hyper_node, NULL); + if (!hyper_node.found) { pr_debug("No Xen support\n"); - return 0; + return; } - s = of_get_property(node, "compatible", &len); - if (strlen(xen_prefix) + 3 < len && - !strncmp(xen_prefix, s, strlen(xen_prefix))) - version = s + strlen(xen_prefix); - if (version == NULL) { + + if (hyper_node.version == NULL) { pr_debug("Xen version not found\n"); - return 0; + return; } - if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) - return 0; - xen_hvm_resume_frames = res.start >> PAGE_SHIFT; - xen_events_irq = irq_of_parse_and_map(node, 0); - pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n", - version, xen_events_irq, xen_hvm_resume_frames); + + pr_info("Xen %s support found\n", hyper_node.version); + xen_domain_type = XEN_HVM_DOMAIN; xen_setup_features(); + if (xen_feature(XENFEAT_dom0)) - xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED; + xen_start_flags |= SIF_INITDOMAIN|SIF_PRIVILEGED; + + if (!console_set_on_cmdline && !xen_initial_domain()) + add_preferred_console("hvc", 0, NULL); +} + +static void __init xen_acpi_guest_init(void) +{ +#ifdef CONFIG_ACPI + struct xen_hvm_param a; + int interrupt, trigger, polarity; + + a.domid = DOMID_SELF; + a.index = HVM_PARAM_CALLBACK_IRQ; + + if (HYPERVISOR_hvm_op(HVMOP_get_param, &a) + || (a.value >> 56) != HVM_PARAM_CALLBACK_TYPE_PPI) { + xen_events_irq = 0; + return; + } + + interrupt = a.value & 0xff; + trigger = ((a.value >> 8) & 0x1) ? ACPI_EDGE_SENSITIVE + : ACPI_LEVEL_SENSITIVE; + polarity = ((a.value >> 8) & 0x2) ? ACPI_ACTIVE_LOW + : ACPI_ACTIVE_HIGH; + xen_events_irq = acpi_register_gsi(NULL, interrupt, trigger, polarity); +#endif +} + +#ifdef CONFIG_XEN_UNPOPULATED_ALLOC +/* + * A type-less specific Xen resource which contains extended regions + * (unused regions of guest physical address space provided by the hypervisor). + */ +static struct resource xen_resource = { + .name = "Xen unused space", +}; + +int __init arch_xen_unpopulated_init(struct resource **res) +{ + struct device_node *np; + struct resource *regs, *tmp_res; + uint64_t min_gpaddr = -1, max_gpaddr = 0; + unsigned int i, nr_reg = 0; + int rc; + + if (!xen_domain()) + return -ENODEV; + + if (!acpi_disabled) + return -ENODEV; + + np = of_find_compatible_node(NULL, NULL, "xen,xen"); + if (WARN_ON(!np)) + return -ENODEV; + + /* Skip region 0 which is reserved for grant table space */ + while (of_get_address(np, nr_reg + EXT_REGION_INDEX, NULL, NULL)) + nr_reg++; + + if (!nr_reg) { + pr_err("No extended regions are found\n"); + of_node_put(np); + return -EINVAL; + } + + regs = kcalloc(nr_reg, sizeof(*regs), GFP_KERNEL); + if (!regs) { + of_node_put(np); + return -ENOMEM; + } + + /* + * Create resource from extended regions provided by the hypervisor to be + * used as unused address space for Xen scratch pages. + */ + for (i = 0; i < nr_reg; i++) { + rc = of_address_to_resource(np, i + EXT_REGION_INDEX, ®s[i]); + if (rc) + goto err; + + if (max_gpaddr < regs[i].end) + max_gpaddr = regs[i].end; + if (min_gpaddr > regs[i].start) + min_gpaddr = regs[i].start; + } + + xen_resource.start = min_gpaddr; + xen_resource.end = max_gpaddr; + + /* + * Mark holes between extended regions as unavailable. The rest of that + * address space will be available for the allocation. + */ + for (i = 1; i < nr_reg; i++) { + resource_size_t start, end; + + /* There is an overlap between regions */ + if (regs[i - 1].end + 1 > regs[i].start) { + rc = -EINVAL; + goto err; + } + + /* There is no hole between regions */ + if (regs[i - 1].end + 1 == regs[i].start) + continue; + + start = regs[i - 1].end + 1; + end = regs[i].start - 1; + + tmp_res = kzalloc(sizeof(*tmp_res), GFP_KERNEL); + if (!tmp_res) { + rc = -ENOMEM; + goto err; + } + + tmp_res->name = "Unavailable space"; + tmp_res->start = start; + tmp_res->end = end; + + rc = insert_resource(&xen_resource, tmp_res); + if (rc) { + pr_err("Cannot insert resource %pR (%d)\n", tmp_res, rc); + kfree(tmp_res); + goto err; + } + } + + *res = &xen_resource; + +err: + of_node_put(np); + kfree(regs); + return rc; +} +#endif + +static void __init xen_dt_guest_init(void) +{ + struct device_node *xen_node; + struct resource res; + + xen_node = of_find_compatible_node(NULL, NULL, "xen,xen"); + if (!xen_node) { + pr_err("Xen support was detected before, but it has disappeared\n"); + return; + } + + xen_events_irq = irq_of_parse_and_map(xen_node, 0); + + if (of_address_to_resource(xen_node, GRANT_TABLE_INDEX, &res)) { + pr_err("Xen grant table region is not found\n"); + of_node_put(xen_node); + return; + } + of_node_put(xen_node); + xen_grant_frames = res.start; +} + +static int __init xen_guest_init(void) +{ + struct xen_add_to_physmap xatp; + struct shared_info *shared_info_page = NULL; + int rc, cpu; + + if (!xen_domain()) + return 0; + + if (IS_ENABLED(CONFIG_XEN_VIRTIO)) + virtio_set_mem_acc_cb(xen_virtio_restricted_mem_acc); + + if (!acpi_disabled) + xen_acpi_guest_init(); else - xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED); + xen_dt_guest_init(); + + if (!xen_events_irq) { + pr_err("Xen event channel interrupt not found\n"); + return -ENODEV; + } + + /* + * The fdt parsing codes have set EFI_RUNTIME_SERVICES if Xen EFI + * parameters are found. Force enable runtime services. + */ + if (efi_enabled(EFI_RUNTIME_SERVICES)) + xen_efi_runtime_setup(); + + shared_info_page = (struct shared_info *)get_zeroed_page(GFP_KERNEL); - if (!shared_info_page) - shared_info_page = (struct shared_info *) - get_zeroed_page(GFP_KERNEL); if (!shared_info_page) { pr_err("not enough memory\n"); return -ENOMEM; @@ -243,7 +470,7 @@ static int __init xen_guest_init(void) xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; - xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT; + xatp.gpfn = virt_to_gfn(shared_info_page); if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); @@ -258,53 +485,91 @@ static int __init xen_guest_init(void) * For uniformity we use VCPUOP_register_vcpu_info even on cpu0. */ xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info), - sizeof(struct vcpu_info)); + 1 << fls(sizeof(struct vcpu_info) - 1)); if (xen_vcpu_info == NULL) return -ENOMEM; + /* Direct vCPU id mapping for ARM guests. */ + for_each_possible_cpu(cpu) + per_cpu(xen_vcpu_id, cpu) = cpu; + + if (!xen_grant_frames) { + xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames(); + rc = xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn, + &xen_auto_xlat_grant_frames.vaddr, + xen_auto_xlat_grant_frames.count); + } else + rc = gnttab_setup_auto_xlat_frames(xen_grant_frames); + if (rc) { + free_percpu(xen_vcpu_info); + return rc; + } gnttab_init(); - if (!xen_initial_domain()) - xenbus_probe(NULL); - return 0; -} -core_initcall(xen_guest_init); + /* + * Making sure board specific code will not set up ops for + * cpu idle and cpu freq. + */ + disable_cpuidle(); + disable_cpufreq(); -static int __init xen_pm_init(void) -{ - pm_power_off = xen_power_off; - arm_pm_restart = xen_restart; + xen_init_IRQ(); - return 0; + if (request_percpu_irq(xen_events_irq, xen_arm_callback, + "events", &xen_vcpu)) { + pr_err("Error request IRQ %d\n", xen_events_irq); + return -EINVAL; + } + + if (xen_initial_domain()) + pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier); + + return cpuhp_setup_state(CPUHP_AP_ARM_XEN_STARTING, + "arm/xen:starting", xen_starting_cpu, + xen_dying_cpu); } -subsys_initcall(xen_pm_init); +early_initcall(xen_guest_init); -static irqreturn_t xen_arm_callback(int irq, void *arg) +static int xen_starting_runstate_cpu(unsigned int cpu) { - xen_hvm_evtchn_do_upcall(); - return IRQ_HANDLED; + xen_setup_runstate_info(cpu); + return 0; } -static int __init xen_init_events(void) +static int __init xen_late_init(void) { - if (!xen_domain() || xen_events_irq < 0) + if (!xen_domain()) return -ENODEV; - xen_init_IRQ(); - - if (request_percpu_irq(xen_events_irq, xen_arm_callback, - "events", &xen_vcpu)) { - pr_err("Error requesting IRQ %d\n", xen_events_irq); - return -EINVAL; + register_platform_power_off(xen_power_off); + register_restart_handler(&xen_restart_nb); + if (!xen_initial_domain()) { + struct timespec64 ts; + xen_read_wallclock(&ts); + do_settimeofday64(&ts); } - on_each_cpu(xen_percpu_init, NULL, 0); + if (xen_kernel_unmapped_at_usr()) + return 0; + + xen_time_setup_guest(); - return 0; + return cpuhp_setup_state(CPUHP_AP_ARM_XEN_RUNSTATE_STARTING, + "arm/xen_runstate:starting", + xen_starting_runstate_cpu, NULL); } -postcore_initcall(xen_init_events); +late_initcall(xen_late_init); + + +/* empty stubs */ +void xen_arch_pre_suspend(void) { } +void xen_arch_post_suspend(int suspend_cancelled) { } +void xen_timer_resume(void) { } +void xen_arch_resume(void) { } +void xen_arch_suspend(void) { } + -/* In the hypervisor.S file. */ +/* In the hypercall.S file. */ EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op); EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op); EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version); @@ -314,5 +579,8 @@ EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op); EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op); EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op); EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op); -EXPORT_SYMBOL_GPL(HYPERVISOR_tmem_op); +EXPORT_SYMBOL_GPL(HYPERVISOR_platform_op_raw); +EXPORT_SYMBOL_GPL(HYPERVISOR_multicall); +EXPORT_SYMBOL_GPL(HYPERVISOR_vm_assist); +EXPORT_SYMBOL_GPL(HYPERVISOR_dm_op); EXPORT_SYMBOL_GPL(privcmd_call); |
