summaryrefslogtreecommitdiff
path: root/drivers/hv/hyperv_vmbus.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/hv/hyperv_vmbus.h')
-rw-r--r--drivers/hv/hyperv_vmbus.h122
1 files changed, 96 insertions, 26 deletions
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index dc673edf053c..b2862e0a317a 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -15,10 +15,11 @@
#include <linux/list.h>
#include <linux/bitops.h>
#include <asm/sync_bitops.h>
-#include <asm/hyperv-tlfs.h>
+#include <asm/mshyperv.h>
#include <linux/atomic.h>
#include <linux/hyperv.h>
#include <linux/interrupt.h>
+#include <hyperv/hvhdk.h>
#include "hv_trace.h"
@@ -32,6 +33,7 @@
*/
#define HV_UTIL_NEGO_TIMEOUT 55
+void vmbus_isr(void);
/* Definitions for the monitored notification facility */
union hv_monitor_trigger_group {
@@ -120,10 +122,35 @@ enum {
* Per cpu state for channel handling
*/
struct hv_per_cpu_context {
- void *synic_message_page;
- void *synic_event_page;
/*
- * buffer to post messages to the host.
+ * SynIC pages for communicating with the host.
+ *
+ * These pages are accessible to the host partition and the hypervisor.
+ * They may be used for exchanging data with the host partition and the
+ * hypervisor even when they aren't trusted yet the guest partition
+ * must be prepared to handle the malicious behavior.
+ */
+ void *hyp_synic_message_page;
+ void *hyp_synic_event_page;
+ /*
+ * SynIC pages for communicating with the paravisor.
+ *
+ * These pages may be accessed from within the guest partition only in
+ * CoCo VMs. Neither the host partition nor the hypervisor can access
+ * these pages in that case; they are used for exchanging data with the
+ * paravisor.
+ */
+ void *para_synic_message_page;
+ void *para_synic_event_page;
+
+ /*
+ * The page is only used in hv_post_message() for a TDX VM (with the
+ * paravisor) to post a messages to Hyper-V: when such a VM calls
+ * HVCALL_POST_MESSAGE, it can't use the hyperv_pcpu_input_arg (which
+ * is encrypted in such a VM) as the hypercall input page, because
+ * the input page for HVCALL_POST_MESSAGE must be decrypted in such a
+ * VM, so post_msg_page (which is decrypted in hv_synic_alloc()) is
+ * introduced for this purpose. See hyperv_init() for more comments.
*/
void *post_msg_page;
@@ -164,10 +191,10 @@ extern int hv_synic_alloc(void);
extern void hv_synic_free(void);
-extern void hv_synic_enable_regs(unsigned int cpu);
+extern void hv_hyp_synic_enable_regs(unsigned int cpu);
extern int hv_synic_init(unsigned int cpu);
-extern void hv_synic_disable_regs(unsigned int cpu);
+extern void hv_hyp_synic_disable_regs(unsigned int cpu);
extern int hv_synic_cleanup(unsigned int cpu);
/* Interface */
@@ -175,7 +202,8 @@ extern int hv_synic_cleanup(unsigned int cpu);
void hv_ringbuffer_pre_init(struct vmbus_channel *channel);
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
- struct page *pages, u32 pagecnt, u32 max_pkt_size);
+ struct page *pages, u32 pagecnt, u32 max_pkt_size,
+ bool confidential);
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
@@ -241,8 +269,6 @@ struct vmbus_connection {
* is child->parent notification
*/
struct hv_monitor_page *monitor_pages[2];
- void *monitor_pages_original[2];
- phys_addr_t monitor_pages_pa[2];
struct list_head chn_msg_list;
spinlock_t channelmsg_lock;
@@ -282,18 +308,10 @@ struct vmbus_connection {
struct completion ready_for_suspend_event;
/*
- * The number of primary channels that should be "fixed up"
- * upon resume: these channels are re-offered upon resume, and some
- * fields of the channel offers (i.e. child_relid and connection_id)
- * can change, so the old offermsg must be fixed up, before the resume
- * callbacks of the VSC drivers start to further touch the channels.
+ * Completed once the host has offered all boot-time channels.
+ * Note that some channels may still be under process on a workqueue.
*/
- atomic_t nr_chan_fixup_on_resume;
- /*
- * vmbus_bus_resume() waits for "nr_chan_fixup_on_resume" to
- * drop to zero.
- */
- struct completion ready_for_resume_event;
+ struct completion all_offers_delivered_event;
};
@@ -336,6 +354,51 @@ extern const struct vmbus_channel_message_table_entry
/* General vmbus interface */
+bool vmbus_is_confidential(void);
+
+#if IS_ENABLED(CONFIG_HYPERV_VMBUS)
+/* Free the message slot and signal end-of-message if required */
+static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
+{
+ /*
+ * On crash we're reading some other CPU's message page and we need
+ * to be careful: this other CPU may already had cleared the header
+ * and the host may already had delivered some other message there.
+ * In case we blindly write msg->header.message_type we're going
+ * to lose it. We can still lose a message of the same type but
+ * we count on the fact that there can only be one
+ * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
+ * on crash.
+ */
+ if (cmpxchg(&msg->header.message_type, old_msg_type,
+ HVMSG_NONE) != old_msg_type)
+ return;
+
+ /*
+ * The cmxchg() above does an implicit memory barrier to
+ * ensure the write to MessageType (ie set to
+ * HVMSG_NONE) happens before we read the
+ * MessagePending and EOMing. Otherwise, the EOMing
+ * will not deliver any more messages since there is
+ * no empty slot
+ */
+ if (msg->header.message_flags.msg_pending) {
+ /*
+ * This will cause message queue rescan to
+ * possibly deliver another msg from the
+ * hypervisor
+ */
+ if (vmbus_is_confidential())
+ hv_para_set_synic_register(HV_MSR_EOM, 0);
+ else
+ hv_set_msr(HV_MSR_EOM, 0);
+ }
+}
+
+extern int vmbus_interrupt;
+extern int vmbus_irq;
+#endif /* CONFIG_HYPERV_VMBUS */
+
struct hv_device *vmbus_device_create(const guid_t *type,
const guid_t *instance,
struct vmbus_channel *channel);
@@ -365,22 +428,18 @@ void vmbus_on_event(unsigned long data);
void vmbus_on_msg_dpc(unsigned long data);
int hv_kvp_init(struct hv_util_service *srv);
+int hv_kvp_init_transport(void);
void hv_kvp_deinit(void);
int hv_kvp_pre_suspend(void);
int hv_kvp_pre_resume(void);
void hv_kvp_onchannelcallback(void *context);
int hv_vss_init(struct hv_util_service *srv);
+int hv_vss_init_transport(void);
void hv_vss_deinit(void);
int hv_vss_pre_suspend(void);
int hv_vss_pre_resume(void);
void hv_vss_onchannelcallback(void *context);
-
-int hv_fcopy_init(struct hv_util_service *srv);
-void hv_fcopy_deinit(void);
-int hv_fcopy_pre_suspend(void);
-int hv_fcopy_pre_resume(void);
-void hv_fcopy_onchannelcallback(void *context);
void vmbus_initiate_unload(bool crash);
static inline void hv_poll_channel(struct vmbus_channel *channel,
@@ -412,6 +471,11 @@ static inline bool hv_is_perf_channel(struct vmbus_channel *channel)
return vmbus_devs[channel->device_id].perf_device;
}
+static inline size_t hv_dev_ring_size(struct vmbus_channel *channel)
+{
+ return vmbus_devs[channel->device_id].pref_ring_size;
+}
+
static inline bool hv_is_allocated_cpu(unsigned int cpu)
{
struct vmbus_channel *channel, *sc;
@@ -479,4 +543,10 @@ static inline int hv_debug_add_dev_dir(struct hv_device *dev)
#endif /* CONFIG_HYPERV_TESTING */
+/* Create and remove sysfs entry for memory mapped ring buffers for a channel */
+int hv_create_ring_sysfs(struct vmbus_channel *channel,
+ int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel,
+ struct vm_area_struct *vma));
+int hv_remove_ring_sysfs(struct vmbus_channel *channel);
+
#endif /* _HYPERV_VMBUS_H */