summaryrefslogtreecommitdiff
path: root/drivers/hv/ring_buffer.c
diff options
context:
space:
mode:
authorAndres Beltran <lkmlabelt@gmail.com>2020-11-09 11:04:00 +0100
committerWei Liu <wei.liu@kernel.org>2020-11-17 10:51:06 +0000
commite8b7db38449ac5b950a3f00519171c4be3e226ff (patch)
tree257903993b84723c09238c6963d022a9d857520f /drivers/hv/ring_buffer.c
parent09162bc32c880a791c6c0668ce0745cf7958f576 (diff)
Drivers: hv: vmbus: Add vmbus_requestor data structure for VMBus hardening
Currently, VMbus drivers use pointers into guest memory as request IDs for interactions with Hyper-V. To be more robust in the face of errors or malicious behavior from a compromised Hyper-V, avoid exposing guest memory addresses to Hyper-V. Also avoid Hyper-V giving back a bad request ID that is then treated as the address of a guest data structure with no validation. Instead, encapsulate these memory addresses and provide small integers as request IDs. Signed-off-by: Andres Beltran <lkmlabelt@gmail.com> Co-developed-by: Andrea Parri (Microsoft) <parri.andrea@gmail.com> Signed-off-by: Andrea Parri (Microsoft) <parri.andrea@gmail.com> Reviewed-by: Michael Kelley <mikelley@microsoft.com> Reviewed-by: Wei Liu <wei.liu@kernel.org> Link: https://lore.kernel.org/r/20201109100402.8946-2-parri.andrea@gmail.com Signed-off-by: Wei Liu <wei.liu@kernel.org>
Diffstat (limited to 'drivers/hv/ring_buffer.c')
-rw-r--r--drivers/hv/ring_buffer.c29
1 files changed, 27 insertions, 2 deletions
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 356e22159e83..35833d4d1a1d 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -248,7 +248,8 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
/* Write to the ring buffer. */
int hv_ringbuffer_write(struct vmbus_channel *channel,
- const struct kvec *kv_list, u32 kv_count)
+ const struct kvec *kv_list, u32 kv_count,
+ u64 requestid)
{
int i;
u32 bytes_avail_towrite;
@@ -258,6 +259,8 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
u64 prev_indices;
unsigned long flags;
struct hv_ring_buffer_info *outring_info = &channel->outbound;
+ struct vmpacket_descriptor *desc = kv_list[0].iov_base;
+ u64 rqst_id = VMBUS_NO_RQSTOR;
if (channel->rescind)
return -ENODEV;
@@ -300,6 +303,23 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
kv_list[i].iov_len);
}
+ /*
+ * Allocate the request ID after the data has been copied into the
+ * ring buffer. Once this request ID is allocated, the completion
+ * path could find the data and free it.
+ */
+
+ if (desc->flags == VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED) {
+ rqst_id = vmbus_next_request_id(&channel->requestor, requestid);
+ if (rqst_id == VMBUS_RQST_ERROR) {
+ spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+ pr_err("No request id available\n");
+ return -EAGAIN;
+ }
+ }
+ desc = hv_get_ring_buffer(outring_info) + old_write;
+ desc->trans_id = (rqst_id == VMBUS_NO_RQSTOR) ? requestid : rqst_id;
+
/* Set previous packet start */
prev_indices = hv_get_ring_bufferindices(outring_info);
@@ -319,8 +339,13 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
hv_signal_on_write(old_write, channel);
- if (channel->rescind)
+ if (channel->rescind) {
+ if (rqst_id != VMBUS_NO_RQSTOR) {
+ /* Reclaim request ID to avoid leak of IDs */
+ vmbus_request_addr(&channel->requestor, rqst_id);
+ }
return -ENODEV;
+ }
return 0;
}