diff options
Diffstat (limited to 'drivers/hv/channel_mgmt.c')
| -rw-r--r-- | drivers/hv/channel_mgmt.c | 508 |
1 files changed, 351 insertions, 157 deletions
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 417a95e5094d..74fed2c073d4 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -20,7 +20,9 @@ #include <linux/delay.h> #include <linux/cpu.h> #include <linux/hyperv.h> +#include <linux/export.h> #include <asm/mshyperv.h> +#include <linux/sched/isolation.h> #include "hyperv_vmbus.h" @@ -31,103 +33,129 @@ const struct vmbus_device vmbus_devs[] = { { .dev_type = HV_IDE, HV_IDE_GUID, .perf_device = true, + .allowed_in_isolated = false, }, /* SCSI */ { .dev_type = HV_SCSI, HV_SCSI_GUID, .perf_device = true, + .allowed_in_isolated = true, }, /* Fibre Channel */ { .dev_type = HV_FC, HV_SYNTHFC_GUID, .perf_device = true, + .allowed_in_isolated = false, }, /* Synthetic NIC */ { .dev_type = HV_NIC, HV_NIC_GUID, .perf_device = true, + .allowed_in_isolated = true, }, /* Network Direct */ { .dev_type = HV_ND, HV_ND_GUID, .perf_device = true, + .allowed_in_isolated = false, }, /* PCIE */ { .dev_type = HV_PCIE, HV_PCIE_GUID, .perf_device = false, + .allowed_in_isolated = true, }, /* Synthetic Frame Buffer */ { .dev_type = HV_FB, HV_SYNTHVID_GUID, .perf_device = false, + .allowed_in_isolated = false, }, /* Synthetic Keyboard */ { .dev_type = HV_KBD, HV_KBD_GUID, .perf_device = false, + .allowed_in_isolated = false, }, /* Synthetic MOUSE */ { .dev_type = HV_MOUSE, HV_MOUSE_GUID, .perf_device = false, + .allowed_in_isolated = false, }, /* KVP */ { .dev_type = HV_KVP, HV_KVP_GUID, .perf_device = false, + .allowed_in_isolated = false, }, /* Time Synch */ { .dev_type = HV_TS, HV_TS_GUID, .perf_device = false, + .allowed_in_isolated = true, }, /* Heartbeat */ { .dev_type = HV_HB, HV_HEART_BEAT_GUID, .perf_device = false, + .allowed_in_isolated = true, }, /* Shutdown */ { .dev_type = HV_SHUTDOWN, HV_SHUTDOWN_GUID, .perf_device = false, + .allowed_in_isolated = true, }, /* File copy */ - { .dev_type = HV_FCOPY, + /* fcopy always uses 16KB ring buffer size and is working well for last many years */ + { .pref_ring_size = 0x4000, + .dev_type = HV_FCOPY, HV_FCOPY_GUID, .perf_device = false, + .allowed_in_isolated = false, }, /* Backup */ { .dev_type = HV_BACKUP, HV_VSS_GUID, .perf_device = false, + .allowed_in_isolated = false, }, /* Dynamic Memory */ { .dev_type = HV_DM, HV_DM_GUID, .perf_device = false, + .allowed_in_isolated = false, }, - /* Unknown GUID */ - { .dev_type = HV_UNKNOWN, + /* + * Unknown GUID + * 64 KB ring buffer + 4 KB header should be sufficient size for any Hyper-V device apart + * from HV_NIC and HV_SCSI. This case avoid the fallback for unknown devices to allocate + * much bigger (2 MB) of ring size. + */ + { .pref_ring_size = 0x11000, + .dev_type = HV_UNKNOWN, .perf_device = false, + .allowed_in_isolated = false, }, }; +EXPORT_SYMBOL_GPL(vmbus_devs); static const struct { guid_t guid; @@ -135,6 +163,7 @@ static const struct { { HV_AVMA1_GUID }, { HV_AVMA2_GUID }, { HV_RDV_GUID }, + { HV_IMC_GUID }, }; /* @@ -190,6 +219,7 @@ static u16 hv_get_dev_type(const struct vmbus_channel *channel) * vmbus_prep_negotiate_resp() - Create default response for Negotiate message * @icmsghdrp: Pointer to msg header structure * @buf: Raw buffer channel data + * @buflen: Length of the raw buffer channel data. * @fw_version: The framework versions we can support. * @fw_vercnt: The size of @fw_version. * @srv_version: The service versions we can support. @@ -202,8 +232,8 @@ static u16 hv_get_dev_type(const struct vmbus_channel *channel) * Set up and fill in default negotiate response message. * Mainly used by Hyper-V drivers. */ -bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, - u8 *buf, const int *fw_version, int fw_vercnt, +bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, + u32 buflen, const int *fw_version, int fw_vercnt, const int *srv_version, int srv_vercnt, int *nego_fw_version, int *nego_srv_version) { @@ -215,10 +245,14 @@ bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, bool found_match = false; struct icmsg_negotiate *negop; + /* Check that there's enough space for icframe_vercnt, icmsg_vercnt */ + if (buflen < ICMSG_HDR + offsetof(struct icmsg_negotiate, reserved)) { + pr_err_ratelimited("Invalid icmsg negotiate\n"); + return false; + } + icmsghdrp->icmsgsize = 0x10; - negop = (struct icmsg_negotiate *)&buf[ - sizeof(struct vmbuspipe_hdr) + - sizeof(struct icmsg_hdr)]; + negop = (struct icmsg_negotiate *)&buf[ICMSG_HDR]; icframe_major = negop->icframe_vercnt; icframe_minor = 0; @@ -226,6 +260,15 @@ bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, icmsg_major = negop->icmsg_vercnt; icmsg_minor = 0; + /* Validate negop packet */ + if (icframe_major > IC_VERSION_NEGOTIATION_MAX_VER_COUNT || + icmsg_major > IC_VERSION_NEGOTIATION_MAX_VER_COUNT || + ICMSG_NEGOTIATE_PKT_SIZE(icframe_major, icmsg_major) > buflen) { + pr_err_ratelimited("Invalid icmsg negotiate - icframe_major: %u, icmsg_major: %u\n", + icframe_major, icmsg_major); + goto fw_error; + } + /* * Select the framework version number we will * support. @@ -302,7 +345,6 @@ fw_error: negop->icversion_data[1].minor = icmsg_minor; return found_match; } - EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp); /* @@ -317,7 +359,6 @@ static struct vmbus_channel *alloc_channel(void) return NULL; spin_lock_init(&channel->sched_lock); - spin_lock_init(&channel->lock); init_completion(&channel->rescind_event); INIT_LIST_HEAD(&channel->sc_list); @@ -351,7 +392,7 @@ void vmbus_channel_map_relid(struct vmbus_channel *channel) * execute: * * (a) In the "normal (i.e., not resuming from hibernation)" path, - * the full barrier in smp_store_mb() guarantees that the store + * the full barrier in virt_store_mb() guarantees that the store * is propagated to all CPUs before the add_channel_work work * is queued. In turn, add_channel_work is queued before the * channel's ring buffer is allocated/initialized and the @@ -363,14 +404,14 @@ void vmbus_channel_map_relid(struct vmbus_channel *channel) * recv_int_page before retrieving the channel pointer from the * array of channels. * - * (b) In the "resuming from hibernation" path, the smp_store_mb() + * (b) In the "resuming from hibernation" path, the virt_store_mb() * guarantees that the store is propagated to all CPUs before * the VMBus connection is marked as ready for the resume event * (cf. check_ready_for_resume_event()). The interrupt handler * of the VMBus driver and vmbus_chan_sched() can not run before * vmbus_bus_resume() has completed execution (cf. resume_noirq). */ - smp_store_mb( + virt_store_mb( vmbus_connection.channels[channel->offermsg.child_relid], channel); } @@ -400,8 +441,6 @@ static void vmbus_release_relid(u32 relid) void hv_process_channel_removal(struct vmbus_channel *channel) { - unsigned long flags; - lockdep_assert_held(&vmbus_connection.channel_mutex); BUG_ON(!channel->rescind); @@ -415,28 +454,24 @@ void hv_process_channel_removal(struct vmbus_channel *channel) /* * Upon suspend, an in-use hv_sock channel is removed from the array of * channels and the relid is invalidated. After hibernation, when the - * user-space appplication destroys the channel, it's unnecessary and + * user-space application destroys the channel, it's unnecessary and * unsafe to remove the channel from the array of channels. See also * the inline comments before the call of vmbus_release_relid() below. */ if (channel->offermsg.child_relid != INVALID_RELID) vmbus_channel_unmap_relid(channel); - if (channel->primary_channel == NULL) { + if (channel->primary_channel == NULL) list_del(&channel->listentry); - } else { - struct vmbus_channel *primary_channel = channel->primary_channel; - spin_lock_irqsave(&primary_channel->lock, flags); + else list_del(&channel->sc_list); - spin_unlock_irqrestore(&primary_channel->lock, flags); - } /* * If this is a "perf" channel, updates the hv_numa_map[] masks so that * init_vp_index() can (re-)use the CPU. */ if (hv_is_perf_channel(channel)) - hv_clear_alloced_cpu(channel->target_cpu); + hv_clear_allocated_cpu(channel->target_cpu); /* * Upon suspend, an in-use hv_sock channel is marked as "rescinded" and @@ -470,7 +505,6 @@ static void vmbus_add_channel_work(struct work_struct *work) struct vmbus_channel *newchannel = container_of(work, struct vmbus_channel, add_channel_work); struct vmbus_channel *primary_channel = newchannel->primary_channel; - unsigned long flags; int ret; /* @@ -509,13 +543,17 @@ static void vmbus_add_channel_work(struct work_struct *work) * Add the new device to the bus. This will kick off device-driver * binding which eventually invokes the device driver's AddDevice() * method. + * + * If vmbus_device_register() fails, the 'device_obj' is freed in + * vmbus_device_release() as called by device_unregister() in the + * error path of vmbus_device_register(). In the outside error + * path, there's no need to free it. */ ret = vmbus_device_register(newchannel->device_obj); if (ret != 0) { pr_err("unable to add child device object (relid %d)\n", newchannel->offermsg.child_relid); - kfree(newchannel->device_obj); goto err_deq_chan; } @@ -531,13 +569,10 @@ err_deq_chan: */ newchannel->probe_done = true; - if (primary_channel == NULL) { + if (primary_channel == NULL) list_del(&newchannel->listentry); - } else { - spin_lock_irqsave(&primary_channel->lock, flags); + else list_del(&newchannel->sc_list); - spin_unlock_irqrestore(&primary_channel->lock, flags); - } /* vmbus_process_offer() has mapped the channel. */ vmbus_channel_unmap_relid(newchannel); @@ -557,7 +592,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) { struct vmbus_channel *channel; struct workqueue_struct *wq; - unsigned long flags; bool fnew = true; /* @@ -574,10 +608,10 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) * CPUS_READ_UNLOCK CPUS_WRITE_UNLOCK * * Forbids: CPU1's LOAD from *not* seing CPU2's STORE && - * CPU2's SEARCH from *not* seeing CPU1's INSERT + * CPU2's SEARCH from *not* seeing CPU1's INSERT * * Forbids: CPU2's SEARCH from seeing CPU1's INSERT && - * CPU2's LOAD from *not* seing CPU1's STORE + * CPU2's LOAD from *not* seing CPU1's STORE */ cpus_read_lock(); @@ -587,6 +621,17 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) */ mutex_lock(&vmbus_connection.channel_mutex); + list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { + if (guid_equal(&channel->offermsg.offer.if_type, + &newchannel->offermsg.offer.if_type) && + guid_equal(&channel->offermsg.offer.if_instance, + &newchannel->offermsg.offer.if_instance)) { + fnew = false; + newchannel->primary_channel = channel; + break; + } + } + init_vp_index(newchannel); /* Remember the channels that should be cleaned up upon suspend. */ @@ -599,25 +644,16 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) */ atomic_dec(&vmbus_connection.offer_in_progress); - list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { - if (guid_equal(&channel->offermsg.offer.if_type, - &newchannel->offermsg.offer.if_type) && - guid_equal(&channel->offermsg.offer.if_instance, - &newchannel->offermsg.offer.if_instance)) { - fnew = false; - break; - } - } - - if (fnew) + if (fnew) { list_add_tail(&newchannel->listentry, &vmbus_connection.chn_list); - else { + } else { /* * Check to see if this is a valid sub-channel. */ if (newchannel->offermsg.offer.sub_channel_index == 0) { mutex_unlock(&vmbus_connection.channel_mutex); + cpus_read_unlock(); /* * Don't call free_channel(), because newchannel->kobj * is not initialized yet. @@ -629,10 +665,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) /* * Process the sub-channel. */ - newchannel->primary_channel = channel; - spin_lock_irqsave(&channel->lock, flags); list_add_tail(&newchannel->sc_list, &channel->sc_list); - spin_unlock_irqrestore(&channel->lock, flags); } vmbus_channel_map_relid(newchannel); @@ -668,89 +701,122 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) } /* + * Check if CPUs used by other channels of the same device. + * It should only be called by init_vp_index(). + */ +static bool hv_cpuself_used(u32 cpu, struct vmbus_channel *chn) +{ + struct vmbus_channel *primary = chn->primary_channel; + struct vmbus_channel *sc; + + lockdep_assert_held(&vmbus_connection.channel_mutex); + + if (!primary) + return false; + + if (primary->target_cpu == cpu) + return true; + + list_for_each_entry(sc, &primary->sc_list, sc_list) + if (sc != chn && sc->target_cpu == cpu) + return true; + + return false; +} + +/* * We use this state to statically distribute the channel interrupt load. */ static int next_numa_node_id; /* - * Starting with Win8, we can statically distribute the incoming - * channel interrupt load by binding a channel to VCPU. - * - * For pre-win8 hosts or non-performance critical channels we assign the - * VMBUS_CONNECT_CPU. + * We can statically distribute the incoming channel interrupt load + * by binding a channel to VCPU. * - * Starting with win8, performance critical channels will be distributed - * evenly among all the available NUMA nodes. Once the node is assigned, - * we will assign the CPU based on a simple round robin scheme. + * For non-performance critical channels we assign the VMBUS_CONNECT_CPU. + * Performance critical channels will be distributed evenly among all + * the available NUMA nodes. Once the node is assigned, we will assign + * the CPU based on a simple round robin scheme. */ static void init_vp_index(struct vmbus_channel *channel) { bool perf_chn = hv_is_perf_channel(channel); + u32 i, ncpu = num_online_cpus(); cpumask_var_t available_mask; - struct cpumask *alloced_mask; + struct cpumask *allocated_mask; + const struct cpumask *hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ); u32 target_cpu; int numa_node; - if ((vmbus_proto_version == VERSION_WS2008) || - (vmbus_proto_version == VERSION_WIN7) || (!perf_chn) || - !alloc_cpumask_var(&available_mask, GFP_KERNEL)) { + if (!perf_chn || + !alloc_cpumask_var(&available_mask, GFP_KERNEL) || + cpumask_empty(hk_mask)) { /* - * Prior to win8, all channel interrupts are - * delivered on VMBUS_CONNECT_CPU. - * Also if the channel is not a performance critical + * If the channel is not a performance critical * channel, bind it to VMBUS_CONNECT_CPU. * In case alloc_cpumask_var() fails, bind it to * VMBUS_CONNECT_CPU. + * If all the cpus are isolated, bind it to + * VMBUS_CONNECT_CPU. */ - channel->numa_node = cpu_to_node(VMBUS_CONNECT_CPU); channel->target_cpu = VMBUS_CONNECT_CPU; - channel->target_vp = - hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU); if (perf_chn) - hv_set_alloced_cpu(VMBUS_CONNECT_CPU); + hv_set_allocated_cpu(VMBUS_CONNECT_CPU); return; } - while (true) { - numa_node = next_numa_node_id++; - if (numa_node == nr_node_ids) { - next_numa_node_id = 0; - continue; + for (i = 1; i <= ncpu + 1; i++) { + while (true) { + numa_node = next_numa_node_id++; + if (numa_node == nr_node_ids) { + next_numa_node_id = 0; + continue; + } + if (cpumask_empty(cpumask_of_node(numa_node))) + continue; + break; } - if (cpumask_empty(cpumask_of_node(numa_node))) - continue; - break; - } - channel->numa_node = numa_node; - alloced_mask = &hv_context.hv_numa_map[numa_node]; + allocated_mask = &hv_context.hv_numa_map[numa_node]; - if (cpumask_weight(alloced_mask) == - cpumask_weight(cpumask_of_node(numa_node))) { - /* - * We have cycled through all the CPUs in the node; - * reset the alloced map. - */ - cpumask_clear(alloced_mask); - } +retry: + cpumask_xor(available_mask, allocated_mask, cpumask_of_node(numa_node)); + cpumask_and(available_mask, available_mask, hk_mask); - cpumask_xor(available_mask, alloced_mask, cpumask_of_node(numa_node)); + if (cpumask_empty(available_mask)) { + /* + * We have cycled through all the CPUs in the node; + * reset the allocated map. + */ + cpumask_clear(allocated_mask); + goto retry; + } - target_cpu = cpumask_first(available_mask); - cpumask_set_cpu(target_cpu, alloced_mask); + target_cpu = cpumask_first(available_mask); + cpumask_set_cpu(target_cpu, allocated_mask); + + if (channel->offermsg.offer.sub_channel_index >= ncpu || + i > ncpu || !hv_cpuself_used(target_cpu, channel)) + break; + } channel->target_cpu = target_cpu; - channel->target_vp = hv_cpu_number_to_vp_number(target_cpu); free_cpumask_var(available_mask); } +#define UNLOAD_DELAY_UNIT_MS 10 /* 10 milliseconds */ +#define UNLOAD_WAIT_MS (100*1000) /* 100 seconds */ +#define UNLOAD_WAIT_LOOPS (UNLOAD_WAIT_MS/UNLOAD_DELAY_UNIT_MS) +#define UNLOAD_MSG_MS (5*1000) /* Every 5 seconds */ +#define UNLOAD_MSG_LOOPS (UNLOAD_MSG_MS/UNLOAD_DELAY_UNIT_MS) + static void vmbus_wait_for_unload(void) { int cpu; void *page_addr; struct hv_message *msg; struct vmbus_channel_message_header *hdr; - u32 message_type; + u32 message_type, i; /* * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was @@ -760,16 +826,35 @@ static void vmbus_wait_for_unload(void) * functional and vmbus_unload_response() will complete * vmbus_connection.unload_event. If not, the last thing we can do is * read message pages for all CPUs directly. + * + * Wait up to 100 seconds since an Azure host must writeback any dirty + * data in its disk cache before the VMbus UNLOAD request will + * complete. This flushing has been empirically observed to take up + * to 50 seconds in cases with a lot of dirty data, so allow additional + * leeway and for inaccuracies in mdelay(). But eventually time out so + * that the panic path can't get hung forever in case the response + * message isn't seen. */ - while (1) { + for (i = 1; i <= UNLOAD_WAIT_LOOPS; i++) { if (completion_done(&vmbus_connection.unload_event)) - break; + goto completed; - for_each_online_cpu(cpu) { + for_each_present_cpu(cpu) { struct hv_per_cpu_context *hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); - page_addr = hv_cpu->synic_message_page; + /* + * In a CoCo VM the hyp_synic_message_page is not allocated + * in hv_synic_alloc(). Instead it is set/cleared in + * hv_hyp_synic_enable_regs() and hv_hyp_synic_disable_regs() + * such that it is set only when the CPU is online. If + * not all present CPUs are online, the message page + * might be NULL, so skip such CPUs. + */ + page_addr = hv_cpu->hyp_synic_message_page; + if (!page_addr) + continue; + msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT; @@ -786,19 +871,31 @@ static void vmbus_wait_for_unload(void) vmbus_signal_eom(msg, message_type); } - mdelay(10); + /* + * Give a notice periodically so someone watching the + * serial output won't think it is completely hung. + */ + if (!(i % UNLOAD_MSG_LOOPS)) + pr_notice("Waiting for VMBus UNLOAD to complete\n"); + + mdelay(UNLOAD_DELAY_UNIT_MS); } + pr_err("Continuing even though VMBus UNLOAD did not complete\n"); +completed: /* * We're crashing and already got the UNLOAD_RESPONSE, cleanup all * maybe-pending messages on all CPUs to be able to receive new * messages after we reconnect. */ - for_each_online_cpu(cpu) { + for_each_present_cpu(cpu) { struct hv_per_cpu_context *hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); - page_addr = hv_cpu->synic_message_page; + page_addr = hv_cpu->hyp_synic_message_page; + if (!page_addr) + continue; + msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT; msg->header.message_type = HVMSG_NONE; } @@ -812,6 +909,11 @@ static void vmbus_unload_response(struct vmbus_channel_message_header *hdr) /* * This is a global event; just wakeup the waiting thread. * Once we successfully unload, we can cleanup the monitor state. + * + * NB. A malicious or compromised Hyper-V could send a spurious + * message of type CHANNELMSG_UNLOAD_RESPONSE, and trigger a call + * of the complete() below. Make sure that unload_event has been + * initialized by the time this complete() is executed. */ complete(&vmbus_connection.unload_event); } @@ -827,7 +929,7 @@ void vmbus_initiate_unload(bool crash) if (vmbus_proto_version < VERSION_WIN8_1) return; - init_completion(&vmbus_connection.unload_event); + reinit_completion(&vmbus_connection.unload_event); memset(&hdr, 0, sizeof(struct vmbus_channel_message_header)); hdr.msgtype = CHANNELMSG_UNLOAD; vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header), @@ -843,16 +945,6 @@ void vmbus_initiate_unload(bool crash) vmbus_wait_for_unload(); } -static void check_ready_for_resume_event(void) -{ - /* - * If all the old primary channels have been fixed up, then it's safe - * to resume. - */ - if (atomic_dec_and_test(&vmbus_connection.nr_chan_fixup_on_resume)) - complete(&vmbus_connection.ready_for_resume_event); -} - static void vmbus_setup_channel_state(struct vmbus_channel *channel, struct vmbus_channel_offer_channel *offer) { @@ -861,11 +953,9 @@ static void vmbus_setup_channel_state(struct vmbus_channel *channel, */ channel->sig_event = VMBUS_EVENT_CONNECTION_ID; - if (vmbus_proto_version != VERSION_WS2008) { - channel->is_dedicated_interrupt = - (offer->is_dedicated_interrupt != 0); - channel->sig_event = offer->connection_id; - } + channel->is_dedicated_interrupt = + (offer->is_dedicated_interrupt != 0); + channel->sig_event = offer->connection_id; memcpy(&channel->offermsg, offer, sizeof(struct vmbus_channel_offer_channel)); @@ -905,6 +995,24 @@ find_primary_channel_by_offer(const struct vmbus_channel_offer_channel *offer) return channel; } +static bool vmbus_is_valid_offer(const struct vmbus_channel_offer_channel *offer) +{ + const guid_t *guid = &offer->offer.if_type; + u16 i; + + if (!hv_is_isolation_supported()) + return true; + + if (is_hvsock_offer(offer)) + return true; + + for (i = 0; i < ARRAY_SIZE(vmbus_devs); i++) { + if (guid_equal(guid, &vmbus_devs[i].guid)) + return vmbus_devs[i].allowed_in_isolated; + } + return false; +} + /* * vmbus_onoffer - Handler for channel offers from vmbus in parent partition. * @@ -914,11 +1022,35 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr) struct vmbus_channel_offer_channel *offer; struct vmbus_channel *oldchannel, *newchannel; size_t offer_sz; + bool co_ring_buffer, co_external_memory; offer = (struct vmbus_channel_offer_channel *)hdr; trace_vmbus_onoffer(offer); + if (!vmbus_is_valid_offer(offer)) { + pr_err_ratelimited("Invalid offer %d from the host supporting isolation\n", + offer->child_relid); + atomic_dec(&vmbus_connection.offer_in_progress); + return; + } + + co_ring_buffer = is_co_ring_buffer(offer); + co_external_memory = is_co_external_memory(offer); + if (!co_ring_buffer && co_external_memory) { + pr_err("Invalid offer relid=%d: the ring buffer isn't encrypted\n", + offer->child_relid); + return; + } + if (co_ring_buffer || co_external_memory) { + if (vmbus_proto_version < VERSION_WIN10_V6_0 || !vmbus_is_confidential()) { + pr_err("Invalid offer relid=%d: no support for confidential VMBus\n", + offer->child_relid); + atomic_dec(&vmbus_connection.offer_in_progress); + return; + } + } + oldchannel = find_primary_channel_by_offer(offer); if (oldchannel != NULL) { @@ -944,7 +1076,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr) * UNLOCK channel_mutex * * Forbids: r1 == valid_relid && - * channels[valid_relid] == channel + * channels[valid_relid] == channel * * Note. r1 can be INVALID_RELID only for an hv_sock channel. * None of the hv_sock channels which were present before the @@ -985,8 +1117,6 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr) /* Add the channel back to the array of channels. */ vmbus_channel_map_relid(oldchannel); - check_ready_for_resume_event(); - mutex_unlock(&vmbus_connection.channel_mutex); return; } @@ -999,6 +1129,8 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr) pr_err("Unable to allocate channel object\n"); return; } + newchannel->co_ring_buffer = co_ring_buffer; + newchannel->co_external_memory = co_external_memory; vmbus_setup_channel_state(newchannel, offer); @@ -1065,6 +1197,18 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) mutex_lock(&vmbus_connection.channel_mutex); channel = relid2channel(rescind->child_relid); + if (channel != NULL) { + /* + * Guarantee that no other instance of vmbus_onoffer_rescind() + * has got a reference to the channel object. Synchronize on + * &vmbus_connection.channel_mutex. + */ + if (channel->rescind_ref) { + mutex_unlock(&vmbus_connection.channel_mutex); + return; + } + channel->rescind_ref = true; + } mutex_unlock(&vmbus_connection.channel_mutex); if (channel == NULL) { @@ -1118,8 +1262,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) vmbus_device_unregister(channel->device_obj); put_device(dev); } - } - if (channel->primary_channel != NULL) { + } else if (channel->primary_channel != NULL) { /* * Sub-channel is being rescinded. Following is the channel * close sequence when initiated from the driveri (refer to @@ -1161,13 +1304,28 @@ EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister); /* * vmbus_onoffers_delivered - - * This is invoked when all offers have been delivered. + * The CHANNELMSG_ALLOFFERS_DELIVERED message arrives after all + * boot-time offers are delivered. A boot-time offer is for the primary + * channel for any virtual hardware configured in the VM at the time it boots. + * Boot-time offers include offers for physical devices assigned to the VM + * via Hyper-V's Discrete Device Assignment (DDA) functionality that are + * handled as virtual PCI devices in Linux (e.g., NVMe devices and GPUs). + * Boot-time offers do not include offers for VMBus sub-channels. Because + * devices can be hot-added to the VM after it is booted, additional channel + * offers that aren't boot-time offers can be received at any time after the + * all-offers-delivered message. * - * Nothing to do here. + * SR-IOV NIC Virtual Functions (VFs) assigned to a VM are not considered + * to be assigned to the VM at boot-time, and offers for VFs may occur after + * the all-offers-delivered message. VFs are optional accelerators to the + * synthetic VMBus NIC and are effectively hot-added only after the VMBus + * NIC channel is opened (once it knows the guest can support it, via the + * sriov bit in the netvsc protocol). */ static void vmbus_onoffers_delivered( struct vmbus_channel_message_header *hdr) { + complete(&vmbus_connection.all_offers_delivered_event); } /* @@ -1266,6 +1424,46 @@ static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr) } /* + * vmbus_onmodifychannel_response - Modify Channel response handler. + * + * This is invoked when we received a response to our channel modify request. + * Find the matching request, copy the response and signal the requesting thread. + */ +static void vmbus_onmodifychannel_response(struct vmbus_channel_message_header *hdr) +{ + struct vmbus_channel_modifychannel_response *response; + struct vmbus_channel_msginfo *msginfo; + unsigned long flags; + + response = (struct vmbus_channel_modifychannel_response *)hdr; + + trace_vmbus_onmodifychannel_response(response); + + /* + * Find the modify msg, copy the response and signal/unblock the wait event. + */ + spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); + + list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, msglistentry) { + struct vmbus_channel_message_header *responseheader = + (struct vmbus_channel_message_header *)msginfo->msg; + + if (responseheader->msgtype == CHANNELMSG_MODIFYCHANNEL) { + struct vmbus_channel_modifychannel *modifymsg; + + modifymsg = (struct vmbus_channel_modifychannel *)msginfo->msg; + if (modifymsg->child_relid == response->child_relid) { + memcpy(&msginfo->response.modify_response, response, + sizeof(*response)); + complete(&msginfo->waitevent); + break; + } + } + } + spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); +} + +/* * vmbus_ongpadl_torndown - GPADL torndown handler. * * This is invoked when we received a response to our gpadl teardown request. @@ -1382,6 +1580,8 @@ channel_message_table[CHANNELMSG_COUNT] = { { CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL, 0}, { CHANNELMSG_MODIFYCHANNEL, 0, NULL, 0}, { CHANNELMSG_TL_CONNECT_RESULT, 0, NULL, 0}, + { CHANNELMSG_MODIFYCHANNEL_RESPONSE, 1, vmbus_onmodifychannel_response, + sizeof(struct vmbus_channel_modifychannel_response)}, }; /* @@ -1401,7 +1601,8 @@ void vmbus_onmessage(struct vmbus_channel_message_header *hdr) } /* - * vmbus_request_offers - Send a request to get all our pending offers. + * vmbus_request_offers - Send a request to get all our pending offers + * and wait for all boot-time offers to arrive. */ int vmbus_request_offers(void) { @@ -1409,7 +1610,7 @@ int vmbus_request_offers(void) struct vmbus_channel_msginfo *msginfo; int ret; - msginfo = kmalloc(sizeof(*msginfo) + + msginfo = kzalloc(sizeof(*msginfo) + sizeof(struct vmbus_channel_message_header), GFP_KERNEL); if (!msginfo) @@ -1419,6 +1620,10 @@ int vmbus_request_offers(void) msg->msgtype = CHANNELMSG_REQUESTOFFERS; + /* + * This REQUESTOFFERS message will result in the host sending an all + * offers delivered message after all the boot-time offers are sent. + */ ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header), true); @@ -1430,25 +1635,33 @@ int vmbus_request_offers(void) goto cleanup; } -cleanup: - kfree(msginfo); - - return ret; -} + /* + * Wait for the host to send all boot-time offers. + * Keeping it as a best-effort mechanism, where a warning is + * printed if a timeout occurs, and execution is resumed. + */ + if (!wait_for_completion_timeout(&vmbus_connection.all_offers_delivered_event, + secs_to_jiffies(60))) { + pr_warn("timed out waiting for all boot-time offers to be delivered.\n"); + } -static void invoke_sc_cb(struct vmbus_channel *primary_channel) -{ - struct list_head *cur, *tmp; - struct vmbus_channel *cur_channel; + /* + * Flush handling of offer messages (which may initiate work on + * other work queues). + */ + flush_workqueue(vmbus_connection.work_queue); - if (primary_channel->sc_creation_callback == NULL) - return; + /* + * Flush workqueue for processing the incoming offers. Subchannel + * offers and their processing can happen later, so there is no need to + * flush that workqueue here. + */ + flush_workqueue(vmbus_connection.handle_primary_chan_wq); - list_for_each_safe(cur, tmp, &primary_channel->sc_list) { - cur_channel = list_entry(cur, struct vmbus_channel, sc_list); +cleanup: + kfree(msginfo); - primary_channel->sc_creation_callback(cur_channel); - } + return ret; } void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel, @@ -1458,25 +1671,6 @@ void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel, } EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback); -bool vmbus_are_subchannels_present(struct vmbus_channel *primary) -{ - bool ret; - - ret = !list_empty(&primary->sc_list); - - if (ret) { - /* - * Invoke the callback on sub-channel creation. - * This will present a uniform interface to the - * clients. - */ - invoke_sc_cb(primary); - } - - return ret; -} -EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present); - void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel, void (*chn_rescind_cb)(struct vmbus_channel *)) { |
