summaryrefslogtreecommitdiff
path: root/drivers/hv/hv.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/hv/hv.c')
-rw-r--r--drivers/hv/hv.c568
1 files changed, 415 insertions, 153 deletions
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index e83507f49676..c100f04b3581 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -8,6 +8,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
@@ -17,12 +18,15 @@
#include <linux/clockchips.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/export.h>
#include <clocksource/hyperv_timer.h>
#include <asm/mshyperv.h>
+#include <linux/set_memory.h>
#include "hyperv_vmbus.h"
/* The one and only */
struct hv_context hv_context;
+EXPORT_SYMBOL_FOR_MODULES(hv_context, "mshv_vtl");
/*
* hv_init - Main initialization routine.
@@ -38,80 +42,136 @@ int hv_init(void)
}
/*
- * Functions for allocating and freeing memory with size and
- * alignment HV_HYP_PAGE_SIZE. These functions are needed because
- * the guest page size may not be the same as the Hyper-V page
- * size. We depend upon kmalloc() aligning power-of-two size
- * allocations to the allocation size boundary, so that the
- * allocated memory appears to Hyper-V as a page of the size
- * it expects.
- */
-
-void *hv_alloc_hyperv_page(void)
-{
- BUILD_BUG_ON(PAGE_SIZE < HV_HYP_PAGE_SIZE);
-
- if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
- return (void *)__get_free_page(GFP_KERNEL);
- else
- return kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
-}
-
-void *hv_alloc_hyperv_zeroed_page(void)
-{
- if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
- return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
- else
- return kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
-}
-
-void hv_free_hyperv_page(unsigned long addr)
-{
- if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
- free_page(addr);
- else
- kfree((void *)addr);
-}
-
-/*
* hv_post_message - Post a message using the hypervisor message IPC.
*
* This involves a hypercall.
*/
int hv_post_message(union hv_connection_id connection_id,
- enum hv_message_type message_type,
- void *payload, size_t payload_size)
+ enum hv_message_type message_type,
+ void *payload, size_t payload_size)
{
struct hv_input_post_message *aligned_msg;
- struct hv_per_cpu_context *hv_cpu;
+ unsigned long flags;
u64 status;
if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
return -EMSGSIZE;
- hv_cpu = get_cpu_ptr(hv_context.cpu_context);
- aligned_msg = hv_cpu->post_msg_page;
+ local_irq_save(flags);
+
+ /*
+ * A TDX VM with the paravisor must use the decrypted post_msg_page: see
+ * the comment in struct hv_per_cpu_context. A SNP VM with the paravisor
+ * can use the encrypted hyperv_pcpu_input_arg because it copies the
+ * input into the GHCB page, which has been decrypted by the paravisor.
+ */
+ if (hv_isolation_type_tdx() && ms_hyperv.paravisor_present)
+ aligned_msg = this_cpu_ptr(hv_context.cpu_context)->post_msg_page;
+ else
+ aligned_msg = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
aligned_msg->connectionid = connection_id;
aligned_msg->reserved = 0;
aligned_msg->message_type = message_type;
aligned_msg->payload_size = payload_size;
memcpy((void *)aligned_msg->payload, payload, payload_size);
- status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL);
+ if (ms_hyperv.paravisor_present && !vmbus_is_confidential()) {
+ /*
+ * If the VMBus isn't confidential, use the CoCo-specific
+ * mechanism to communicate with the hypervisor.
+ */
+ if (hv_isolation_type_tdx())
+ status = hv_tdx_hypercall(HVCALL_POST_MESSAGE,
+ virt_to_phys(aligned_msg), 0);
+ else if (hv_isolation_type_snp())
+ status = hv_ghcb_hypercall(HVCALL_POST_MESSAGE,
+ aligned_msg, NULL,
+ sizeof(*aligned_msg));
+ else
+ status = HV_STATUS_INVALID_PARAMETER;
+ } else {
+ u64 control = HVCALL_POST_MESSAGE;
+
+ control |= hv_nested ? HV_HYPERCALL_NESTED : 0;
+ /*
+ * If there is no paravisor, this will go to the hypervisor.
+ * In the Confidential VMBus case, there is the paravisor
+ * to which this will trap.
+ */
+ status = hv_do_hypercall(control, aligned_msg, NULL);
+ }
- /* Preemption must remain disabled until after the hypercall
- * so some other thread can't get scheduled onto this cpu and
- * corrupt the per-cpu post_msg_page
- */
- put_cpu_ptr(hv_cpu);
+ local_irq_restore(flags);
return hv_result(status);
}
+EXPORT_SYMBOL_FOR_MODULES(hv_post_message, "mshv_vtl");
+
+static int hv_alloc_page(void **page, bool decrypt, const char *note)
+{
+ int ret = 0;
+
+ /*
+ * After the page changes its encryption status, its contents might
+ * appear scrambled on some hardware. Thus `get_zeroed_page` would
+ * zero the page out in vain, so do that explicitly exactly once.
+ *
+ * By default, the page is allocated encrypted in a CoCo VM.
+ */
+ *page = (void *)__get_free_page(GFP_KERNEL);
+ if (!*page)
+ return -ENOMEM;
+
+ if (decrypt)
+ ret = set_memory_decrypted((unsigned long)*page, 1);
+ if (ret)
+ goto failed;
+
+ memset(*page, 0, PAGE_SIZE);
+ return 0;
+
+failed:
+ /*
+ * Report the failure but don't put the page back on the free list as
+ * its encryption status is unknown.
+ */
+ pr_err("allocation failed for %s page, error %d, decrypted %d\n",
+ note, ret, decrypt);
+ *page = NULL;
+ return ret;
+}
+
+static int hv_free_page(void **page, bool encrypt, const char *note)
+{
+ int ret = 0;
+
+ if (!*page)
+ return 0;
+
+ if (encrypt)
+ ret = set_memory_encrypted((unsigned long)*page, 1);
+
+ /*
+ * In the case of the failure, the page is leaked. Something is wrong,
+ * prefer to lose the page with the unknown encryption status and stay afloat.
+ */
+ if (ret)
+ pr_err("deallocation failed for %s page, error %d, encrypt %d\n",
+ note, ret, encrypt);
+ else
+ free_page((unsigned long)*page);
+
+ *page = NULL;
+
+ return ret;
+}
int hv_synic_alloc(void)
{
- int cpu;
+ int cpu, ret = -ENOMEM;
struct hv_per_cpu_context *hv_cpu;
+ const bool decrypt = !vmbus_is_confidential();
/*
* First, zero all per-cpu memory areas so hv_synic_free() can
@@ -125,7 +185,7 @@ int hv_synic_alloc(void)
hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask),
GFP_KERNEL);
- if (hv_context.hv_numa_map == NULL) {
+ if (!hv_context.hv_numa_map) {
pr_err("Unable to allocate NUMA map\n");
goto err;
}
@@ -134,163 +194,299 @@ int hv_synic_alloc(void)
hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
tasklet_init(&hv_cpu->msg_dpc,
- vmbus_on_msg_dpc, (unsigned long) hv_cpu);
+ vmbus_on_msg_dpc, (unsigned long)hv_cpu);
- hv_cpu->synic_message_page =
- (void *)get_zeroed_page(GFP_ATOMIC);
- if (hv_cpu->synic_message_page == NULL) {
- pr_err("Unable to allocate SYNIC message page\n");
- goto err;
+ if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
+ ret = hv_alloc_page(&hv_cpu->post_msg_page,
+ decrypt, "post msg");
+ if (ret)
+ goto err;
}
- hv_cpu->synic_event_page = (void *)get_zeroed_page(GFP_ATOMIC);
- if (hv_cpu->synic_event_page == NULL) {
- pr_err("Unable to allocate SYNIC event page\n");
- goto err;
+ /*
+ * If these SynIC pages are not allocated, SIEF and SIM pages
+ * are configured using what the root partition or the paravisor
+ * provides upon reading the SIEFP and SIMP registers.
+ */
+ if (!ms_hyperv.paravisor_present && !hv_root_partition()) {
+ ret = hv_alloc_page(&hv_cpu->hyp_synic_message_page,
+ decrypt, "hypervisor SynIC msg");
+ if (ret)
+ goto err;
+ ret = hv_alloc_page(&hv_cpu->hyp_synic_event_page,
+ decrypt, "hypervisor SynIC event");
+ if (ret)
+ goto err;
}
- hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
- if (hv_cpu->post_msg_page == NULL) {
- pr_err("Unable to allocate post msg page\n");
- goto err;
+ if (vmbus_is_confidential()) {
+ ret = hv_alloc_page(&hv_cpu->para_synic_message_page,
+ false, "paravisor SynIC msg");
+ if (ret)
+ goto err;
+ ret = hv_alloc_page(&hv_cpu->para_synic_event_page,
+ false, "paravisor SynIC event");
+ if (ret)
+ goto err;
}
}
return 0;
+
err:
/*
* Any memory allocations that succeeded will be freed when
* the caller cleans up by calling hv_synic_free()
*/
- return -ENOMEM;
+ return ret;
}
-
void hv_synic_free(void)
{
int cpu;
+ const bool encrypt = !vmbus_is_confidential();
for_each_present_cpu(cpu) {
- struct hv_per_cpu_context *hv_cpu
- = per_cpu_ptr(hv_context.cpu_context, cpu);
-
- free_page((unsigned long)hv_cpu->synic_event_page);
- free_page((unsigned long)hv_cpu->synic_message_page);
- free_page((unsigned long)hv_cpu->post_msg_page);
+ struct hv_per_cpu_context *hv_cpu =
+ per_cpu_ptr(hv_context.cpu_context, cpu);
+
+ if (ms_hyperv.paravisor_present && hv_isolation_type_tdx())
+ hv_free_page(&hv_cpu->post_msg_page,
+ encrypt, "post msg");
+ if (!ms_hyperv.paravisor_present && !hv_root_partition()) {
+ hv_free_page(&hv_cpu->hyp_synic_event_page,
+ encrypt, "hypervisor SynIC event");
+ hv_free_page(&hv_cpu->hyp_synic_message_page,
+ encrypt, "hypervisor SynIC msg");
+ }
+ if (vmbus_is_confidential()) {
+ hv_free_page(&hv_cpu->para_synic_event_page,
+ false, "paravisor SynIC event");
+ hv_free_page(&hv_cpu->para_synic_message_page,
+ false, "paravisor SynIC msg");
+ }
}
kfree(hv_context.hv_numa_map);
}
/*
- * hv_synic_init - Initialize the Synthetic Interrupt Controller.
- *
- * If it is already initialized by another entity (ie x2v shim), we need to
- * retrieve the initialized message and event pages. Otherwise, we create and
- * initialize the message and event pages.
+ * hv_hyp_synic_enable_regs - Initialize the Synthetic Interrupt Controller
+ * with the hypervisor.
*/
-void hv_synic_enable_regs(unsigned int cpu)
+void hv_hyp_synic_enable_regs(unsigned int cpu)
{
- struct hv_per_cpu_context *hv_cpu
- = per_cpu_ptr(hv_context.cpu_context, cpu);
+ struct hv_per_cpu_context *hv_cpu =
+ per_cpu_ptr(hv_context.cpu_context, cpu);
union hv_synic_simp simp;
union hv_synic_siefp siefp;
union hv_synic_sint shared_sint;
- union hv_synic_scontrol sctrl;
- /* Setup the Synic's message page */
- simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
+ /* Setup the Synic's message page with the hypervisor. */
+ simp.as_uint64 = hv_get_msr(HV_MSR_SIMP);
simp.simp_enabled = 1;
- simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
- >> HV_HYP_PAGE_SHIFT;
- hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
+ if (ms_hyperv.paravisor_present || hv_root_partition()) {
+ /* Mask out vTOM bit. ioremap_cache() maps decrypted */
+ u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) &
+ ~ms_hyperv.shared_gpa_boundary;
+ hv_cpu->hyp_synic_message_page =
+ (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
+ if (!hv_cpu->hyp_synic_message_page)
+ pr_err("Fail to map synic message page.\n");
+ } else {
+ simp.base_simp_gpa = virt_to_phys(hv_cpu->hyp_synic_message_page)
+ >> HV_HYP_PAGE_SHIFT;
+ }
+
+ hv_set_msr(HV_MSR_SIMP, simp.as_uint64);
- /* Setup the Synic's event page */
- siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
+ /* Setup the Synic's event page with the hypervisor. */
+ siefp.as_uint64 = hv_get_msr(HV_MSR_SIEFP);
siefp.siefp_enabled = 1;
- siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
- >> HV_HYP_PAGE_SHIFT;
- hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
+ if (ms_hyperv.paravisor_present || hv_root_partition()) {
+ /* Mask out vTOM bit. ioremap_cache() maps decrypted */
+ u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) &
+ ~ms_hyperv.shared_gpa_boundary;
+ hv_cpu->hyp_synic_event_page =
+ (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
+ if (!hv_cpu->hyp_synic_event_page)
+ pr_err("Fail to map synic event page.\n");
+ } else {
+ siefp.base_siefp_gpa = virt_to_phys(hv_cpu->hyp_synic_event_page)
+ >> HV_HYP_PAGE_SHIFT;
+ }
+
+ hv_set_msr(HV_MSR_SIEFP, siefp.as_uint64);
+ hv_enable_coco_interrupt(cpu, vmbus_interrupt, true);
/* Setup the shared SINT. */
if (vmbus_irq != -1)
enable_percpu_irq(vmbus_irq, 0);
- shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
- VMBUS_MESSAGE_SINT);
+ shared_sint.as_uint64 = hv_get_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT);
shared_sint.vector = vmbus_interrupt;
shared_sint.masked = false;
+ shared_sint.auto_eoi = hv_recommend_using_aeoi();
+ hv_set_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+}
- /*
- * On architectures where Hyper-V doesn't support AEOI (e.g., ARM64),
- * it doesn't provide a recommendation flag and AEOI must be disabled.
- */
-#ifdef HV_DEPRECATING_AEOI_RECOMMENDED
- shared_sint.auto_eoi =
- !(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED);
-#else
- shared_sint.auto_eoi = 0;
-#endif
- hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
- shared_sint.as_uint64);
+static void hv_hyp_synic_enable_interrupts(void)
+{
+ union hv_synic_scontrol sctrl;
/* Enable the global synic bit */
- sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
+ sctrl.as_uint64 = hv_get_msr(HV_MSR_SCONTROL);
sctrl.enable = 1;
- hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
+ hv_set_msr(HV_MSR_SCONTROL, sctrl.as_uint64);
+}
+
+static void hv_para_synic_enable_regs(unsigned int cpu)
+{
+ union hv_synic_simp simp;
+ union hv_synic_siefp siefp;
+ struct hv_per_cpu_context *hv_cpu
+ = per_cpu_ptr(hv_context.cpu_context, cpu);
+
+ /* Setup the Synic's message page with the paravisor. */
+ simp.as_uint64 = hv_para_get_synic_register(HV_MSR_SIMP);
+ simp.simp_enabled = 1;
+ simp.base_simp_gpa = virt_to_phys(hv_cpu->para_synic_message_page)
+ >> HV_HYP_PAGE_SHIFT;
+ hv_para_set_synic_register(HV_MSR_SIMP, simp.as_uint64);
+
+ /* Setup the Synic's event page with the paravisor. */
+ siefp.as_uint64 = hv_para_get_synic_register(HV_MSR_SIEFP);
+ siefp.siefp_enabled = 1;
+ siefp.base_siefp_gpa = virt_to_phys(hv_cpu->para_synic_event_page)
+ >> HV_HYP_PAGE_SHIFT;
+ hv_para_set_synic_register(HV_MSR_SIEFP, siefp.as_uint64);
+}
+
+static void hv_para_synic_enable_interrupts(void)
+{
+ union hv_synic_scontrol sctrl;
+
+ /* Enable the global synic bit */
+ sctrl.as_uint64 = hv_para_get_synic_register(HV_MSR_SCONTROL);
+ sctrl.enable = 1;
+ hv_para_set_synic_register(HV_MSR_SCONTROL, sctrl.as_uint64);
}
int hv_synic_init(unsigned int cpu)
{
- hv_synic_enable_regs(cpu);
+ if (vmbus_is_confidential())
+ hv_para_synic_enable_regs(cpu);
+
+ /*
+ * The SINT is set in hv_hyp_synic_enable_regs() by calling
+ * hv_set_msr(). hv_set_msr() in turn has special case code for the
+ * SINT MSRs that write to the hypervisor version of the MSR *and*
+ * the paravisor version of the MSR (but *without* the proxy bit when
+ * VMBus is confidential).
+ *
+ * Then enable interrupts via the paravisor if VMBus is confidential,
+ * and otherwise via the hypervisor.
+ */
+
+ hv_hyp_synic_enable_regs(cpu);
+ if (vmbus_is_confidential())
+ hv_para_synic_enable_interrupts();
+ else
+ hv_hyp_synic_enable_interrupts();
hv_stimer_legacy_init(cpu, VMBUS_MESSAGE_SINT);
return 0;
}
-/*
- * hv_synic_cleanup - Cleanup routine for hv_synic_init().
- */
-void hv_synic_disable_regs(unsigned int cpu)
+void hv_hyp_synic_disable_regs(unsigned int cpu)
{
+ struct hv_per_cpu_context *hv_cpu =
+ per_cpu_ptr(hv_context.cpu_context, cpu);
union hv_synic_sint shared_sint;
union hv_synic_simp simp;
union hv_synic_siefp siefp;
- union hv_synic_scontrol sctrl;
- shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
- VMBUS_MESSAGE_SINT);
+ shared_sint.as_uint64 = hv_get_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT);
shared_sint.masked = 1;
/* Need to correctly cleanup in the case of SMP!!! */
/* Disable the interrupt */
- hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
- shared_sint.as_uint64);
+ hv_set_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+ hv_enable_coco_interrupt(cpu, vmbus_interrupt, false);
- simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
+ simp.as_uint64 = hv_get_msr(HV_MSR_SIMP);
+ /*
+ * In Isolation VM, simp and sief pages are allocated by
+ * paravisor. These pages also will be used by kdump
+ * kernel. So just reset enable bit here and keep page
+ * addresses.
+ */
simp.simp_enabled = 0;
- simp.base_simp_gpa = 0;
+ if (ms_hyperv.paravisor_present || hv_root_partition()) {
+ if (hv_cpu->hyp_synic_message_page) {
+ iounmap(hv_cpu->hyp_synic_message_page);
+ hv_cpu->hyp_synic_message_page = NULL;
+ }
+ } else {
+ simp.base_simp_gpa = 0;
+ }
- hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
+ hv_set_msr(HV_MSR_SIMP, simp.as_uint64);
- siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
+ siefp.as_uint64 = hv_get_msr(HV_MSR_SIEFP);
siefp.siefp_enabled = 0;
- siefp.base_siefp_gpa = 0;
- hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
+ if (ms_hyperv.paravisor_present || hv_root_partition()) {
+ if (hv_cpu->hyp_synic_event_page) {
+ iounmap(hv_cpu->hyp_synic_event_page);
+ hv_cpu->hyp_synic_event_page = NULL;
+ }
+ } else {
+ siefp.base_siefp_gpa = 0;
+ }
+
+ hv_set_msr(HV_MSR_SIEFP, siefp.as_uint64);
+}
+
+static void hv_hyp_synic_disable_interrupts(void)
+{
+ union hv_synic_scontrol sctrl;
/* Disable the global synic bit */
- sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
+ sctrl.as_uint64 = hv_get_msr(HV_MSR_SCONTROL);
sctrl.enable = 0;
- hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
+ hv_set_msr(HV_MSR_SCONTROL, sctrl.as_uint64);
+}
- if (vmbus_irq != -1)
- disable_percpu_irq(vmbus_irq);
+static void hv_para_synic_disable_regs(unsigned int cpu)
+{
+ union hv_synic_simp simp;
+ union hv_synic_siefp siefp;
+
+ /* Disable SynIC's message page in the paravisor. */
+ simp.as_uint64 = hv_para_get_synic_register(HV_MSR_SIMP);
+ simp.simp_enabled = 0;
+ hv_para_set_synic_register(HV_MSR_SIMP, simp.as_uint64);
+
+ /* Disable SynIC's event page in the paravisor. */
+ siefp.as_uint64 = hv_para_get_synic_register(HV_MSR_SIEFP);
+ siefp.siefp_enabled = 0;
+ hv_para_set_synic_register(HV_MSR_SIEFP, siefp.as_uint64);
+}
+
+static void hv_para_synic_disable_interrupts(void)
+{
+ union hv_synic_scontrol sctrl;
+
+ /* Disable the global synic bit */
+ sctrl.as_uint64 = hv_para_get_synic_register(HV_MSR_SCONTROL);
+ sctrl.enable = 0;
+ hv_para_set_synic_register(HV_MSR_SCONTROL, sctrl.as_uint64);
}
#define HV_MAX_TRIES 3
@@ -303,16 +499,18 @@ void hv_synic_disable_regs(unsigned int cpu)
* that the normal interrupt handling mechanism will find and process the channel interrupt
* "very soon", and in the process clear the bit.
*/
-static bool hv_synic_event_pending(void)
+static bool __hv_synic_event_pending(union hv_synic_event_flags *event, int sint)
{
- struct hv_per_cpu_context *hv_cpu = this_cpu_ptr(hv_context.cpu_context);
- union hv_synic_event_flags *event =
- (union hv_synic_event_flags *)hv_cpu->synic_event_page + VMBUS_MESSAGE_SINT;
- unsigned long *recv_int_page = event->flags; /* assumes VMBus version >= VERSION_WIN8 */
+ unsigned long *recv_int_page;
bool pending;
u32 relid;
int tries = 0;
+ if (!event)
+ return false;
+
+ event += sint;
+ recv_int_page = event->flags; /* assumes VMBus version >= VERSION_WIN8 */
retry:
pending = false;
for_each_set_bit(relid, recv_int_page, HV_EVENT_FLAGS_COUNT) {
@@ -329,10 +527,58 @@ retry:
return pending;
}
+static bool hv_synic_event_pending(void)
+{
+ struct hv_per_cpu_context *hv_cpu = this_cpu_ptr(hv_context.cpu_context);
+ union hv_synic_event_flags *hyp_synic_event_page = hv_cpu->hyp_synic_event_page;
+ union hv_synic_event_flags *para_synic_event_page = hv_cpu->para_synic_event_page;
+
+ return
+ __hv_synic_event_pending(hyp_synic_event_page, VMBUS_MESSAGE_SINT) ||
+ __hv_synic_event_pending(para_synic_event_page, VMBUS_MESSAGE_SINT);
+}
+
+static int hv_pick_new_cpu(struct vmbus_channel *channel)
+{
+ int ret = -EBUSY;
+ int start;
+ int cpu;
+
+ lockdep_assert_cpus_held();
+ lockdep_assert_held(&vmbus_connection.channel_mutex);
+
+ /*
+ * We can't assume that the relevant interrupts will be sent before
+ * the cpu is offlined on older versions of hyperv.
+ */
+ if (vmbus_proto_version < VERSION_WIN10_V5_3)
+ return -EBUSY;
+
+ start = get_random_u32_below(nr_cpu_ids);
+
+ for_each_cpu_wrap(cpu, cpu_online_mask, start) {
+ if (channel->target_cpu == cpu ||
+ channel->target_cpu == VMBUS_CONNECT_CPU)
+ continue;
+
+ ret = vmbus_channel_set_cpu(channel, cpu);
+ if (!ret)
+ break;
+ }
+
+ if (ret)
+ ret = vmbus_channel_set_cpu(channel, VMBUS_CONNECT_CPU);
+
+ return ret;
+}
+
+/*
+ * hv_synic_cleanup - Cleanup routine for hv_synic_init().
+ */
int hv_synic_cleanup(unsigned int cpu)
{
struct vmbus_channel *channel, *sc;
- bool channel_found = false;
+ int ret = 0;
if (vmbus_connection.conn_state != CONNECTED)
goto always_cleanup;
@@ -349,38 +595,34 @@ int hv_synic_cleanup(unsigned int cpu)
/*
* Search for channels which are bound to the CPU we're about to
- * cleanup. In case we find one and vmbus is still connected, we
- * fail; this will effectively prevent CPU offlining.
- *
- * TODO: Re-bind the channels to different CPUs.
+ * cleanup.
*/
mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
if (channel->target_cpu == cpu) {
- channel_found = true;
- break;
+ ret = hv_pick_new_cpu(channel);
+ if (ret) {
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ return ret;
+ }
}
list_for_each_entry(sc, &channel->sc_list, sc_list) {
if (sc->target_cpu == cpu) {
- channel_found = true;
- break;
+ ret = hv_pick_new_cpu(sc);
+ if (ret) {
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ return ret;
+ }
}
}
- if (channel_found)
- break;
}
mutex_unlock(&vmbus_connection.channel_mutex);
- if (channel_found)
- return -EBUSY;
-
/*
- * channel_found == false means that any channels that were previously
- * assigned to the CPU have been reassigned elsewhere with a call of
- * vmbus_send_modifychannel(). Scan the event flags page looking for
- * bits that are set and waiting with a timeout for vmbus_chan_sched()
- * to process such bits. If bits are still set after this operation
- * and VMBus is connected, fail the CPU offlining operation.
+ * Scan the event flags page looking for bits that are set and waiting
+ * with a timeout for vmbus_chan_sched() to process such bits. If bits
+ * are still set after this operation and VMBus is connected, fail the
+ * CPU offlining operation.
*/
if (vmbus_proto_version >= VERSION_WIN10_V4_1 && hv_synic_event_pending())
return -EBUSY;
@@ -388,7 +630,27 @@ int hv_synic_cleanup(unsigned int cpu)
always_cleanup:
hv_stimer_legacy_cleanup(cpu);
- hv_synic_disable_regs(cpu);
+ /*
+ * First, disable the event and message pages
+ * used for communicating with the host, and then
+ * disable the host interrupts if VMBus is not
+ * confidential.
+ */
+ hv_hyp_synic_disable_regs(cpu);
+ if (!vmbus_is_confidential())
+ hv_hyp_synic_disable_interrupts();
- return 0;
+ /*
+ * Perform the same steps for the Confidential VMBus.
+ * The sequencing provides the guarantee that no data
+ * may be posted for processing before disabling interrupts.
+ */
+ if (vmbus_is_confidential()) {
+ hv_para_synic_disable_regs(cpu);
+ hv_para_synic_disable_interrupts();
+ }
+ if (vmbus_irq != -1)
+ disable_percpu_irq(vmbus_irq);
+
+ return ret;
}