diff options
Diffstat (limited to 'drivers/misc/vmw_vmci/vmci_queue_pair.c')
| -rw-r--r-- | drivers/misc/vmw_vmci/vmci_queue_pair.c | 169 |
1 files changed, 26 insertions, 143 deletions
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index 880c33ab9f47..b88ac144ad32 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -324,7 +324,7 @@ static void *qp_alloc_queue(u64 size, u32 flags) /* * Copies from a given buffer or iovector to a VMCI Queue. Uses - * kmap()/kunmap() to dynamically map/unmap required portions of the queue + * kmap_local_page() to dynamically map required portions of the queue * by traversing the offset -> page translation structure for the queue. * Assumes that offset + size does not wrap around in the queue. */ @@ -345,7 +345,7 @@ static int qp_memcpy_to_queue_iter(struct vmci_queue *queue, size_t to_copy; if (kernel_if->host) - va = kmap(kernel_if->u.h.page[page_index]); + va = kmap_local_page(kernel_if->u.h.page[page_index]); else va = kernel_if->u.g.vas[page_index + 1]; /* Skip header. */ @@ -359,12 +359,12 @@ static int qp_memcpy_to_queue_iter(struct vmci_queue *queue, if (!copy_from_iter_full((u8 *)va + page_offset, to_copy, from)) { if (kernel_if->host) - kunmap(kernel_if->u.h.page[page_index]); + kunmap_local(va); return VMCI_ERROR_INVALID_ARGS; } bytes_copied += to_copy; if (kernel_if->host) - kunmap(kernel_if->u.h.page[page_index]); + kunmap_local(va); } return VMCI_SUCCESS; @@ -372,7 +372,7 @@ static int qp_memcpy_to_queue_iter(struct vmci_queue *queue, /* * Copies to a given buffer or iovector from a VMCI Queue. Uses - * kmap()/kunmap() to dynamically map/unmap required portions of the queue + * kmap_local_page() to dynamically map required portions of the queue * by traversing the offset -> page translation structure for the queue. * Assumes that offset + size does not wrap around in the queue. */ @@ -393,7 +393,7 @@ static int qp_memcpy_from_queue_iter(struct iov_iter *to, int err; if (kernel_if->host) - va = kmap(kernel_if->u.h.page[page_index]); + va = kmap_local_page(kernel_if->u.h.page[page_index]); else va = kernel_if->u.g.vas[page_index + 1]; /* Skip header. */ @@ -407,12 +407,12 @@ static int qp_memcpy_from_queue_iter(struct iov_iter *to, err = copy_to_iter((u8 *)va + page_offset, to_copy, to); if (err != to_copy) { if (kernel_if->host) - kunmap(kernel_if->u.h.page[page_index]); + kunmap_local(va); return VMCI_ERROR_INVALID_ARGS; } bytes_copied += to_copy; if (kernel_if->host) - kunmap(kernel_if->u.h.page[page_index]); + kunmap_local(va); } return VMCI_SUCCESS; @@ -854,6 +854,7 @@ static int qp_notify_peer_local(bool attach, struct vmci_handle handle) u32 context_id = vmci_get_context_id(); struct vmci_event_qp ev; + memset(&ev, 0, sizeof(ev)); ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER); ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_CONTEXT_RESOURCE_ID); @@ -1467,6 +1468,7 @@ static int qp_notify_peer(bool attach, * kernel. */ + memset(&ev, 0, sizeof(ev)); ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER); ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_CONTEXT_RESOURCE_ID); @@ -2243,7 +2245,8 @@ int vmci_qp_broker_map(struct vmci_handle handle, result = VMCI_SUCCESS; - if (context_id != VMCI_HOST_CONTEXT_ID) { + if (context_id != VMCI_HOST_CONTEXT_ID && + !QPBROKERSTATE_HAS_MEM(entry)) { struct vmci_qp_page_store page_store; page_store.pages = guest_mem; @@ -2350,7 +2353,8 @@ int vmci_qp_broker_unmap(struct vmci_handle handle, goto out; } - if (context_id != VMCI_HOST_CONTEXT_ID) { + if (context_id != VMCI_HOST_CONTEXT_ID && + QPBROKERSTATE_HAS_MEM(entry)) { qp_acquire_queue_mutex(entry->produce_q); result = qp_save_headers(entry); if (result < VMCI_SUCCESS) @@ -2575,6 +2579,12 @@ static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q, if (result < VMCI_SUCCESS) return result; + /* + * This virt_wmb() ensures that data written to the queue + * is observable before the new producer_tail is. + */ + virt_wmb(); + vmci_q_header_add_producer_tail(produce_q->q_header, written, produce_q_size); return written; @@ -2618,6 +2628,12 @@ static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q, if (buf_ready < VMCI_SUCCESS) return (ssize_t) buf_ready; + /* + * This virt_rmb() ensures that data from the queue will be read + * after we have determined how much is ready to be consumed. + */ + virt_rmb(); + read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready); head = vmci_q_header_consumer_head(produce_q->q_header); if (likely(head + read < consume_q_size)) { @@ -3007,139 +3023,6 @@ s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair) EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready); /* - * vmci_qpair_enqueue() - Throw data on the queue. - * @qpair: Pointer to the queue pair struct. - * @buf: Pointer to buffer containing data - * @buf_size: Length of buffer. - * @buf_type: Buffer type (Unused). - * - * This is the client interface for enqueueing data into the queue. - * Returns number of bytes enqueued or < 0 on error. - */ -ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair, - const void *buf, - size_t buf_size, - int buf_type) -{ - ssize_t result; - struct iov_iter from; - struct kvec v = {.iov_base = (void *)buf, .iov_len = buf_size}; - - if (!qpair || !buf) - return VMCI_ERROR_INVALID_ARGS; - - iov_iter_kvec(&from, WRITE, &v, 1, buf_size); - - qp_lock(qpair); - - do { - result = qp_enqueue_locked(qpair->produce_q, - qpair->consume_q, - qpair->produce_q_size, - &from); - - if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && - !qp_wait_for_ready_queue(qpair)) - result = VMCI_ERROR_WOULD_BLOCK; - - } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); - - qp_unlock(qpair); - - return result; -} -EXPORT_SYMBOL_GPL(vmci_qpair_enqueue); - -/* - * vmci_qpair_dequeue() - Get data from the queue. - * @qpair: Pointer to the queue pair struct. - * @buf: Pointer to buffer for the data - * @buf_size: Length of buffer. - * @buf_type: Buffer type (Unused). - * - * This is the client interface for dequeueing data from the queue. - * Returns number of bytes dequeued or < 0 on error. - */ -ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair, - void *buf, - size_t buf_size, - int buf_type) -{ - ssize_t result; - struct iov_iter to; - struct kvec v = {.iov_base = buf, .iov_len = buf_size}; - - if (!qpair || !buf) - return VMCI_ERROR_INVALID_ARGS; - - iov_iter_kvec(&to, READ, &v, 1, buf_size); - - qp_lock(qpair); - - do { - result = qp_dequeue_locked(qpair->produce_q, - qpair->consume_q, - qpair->consume_q_size, - &to, true); - - if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && - !qp_wait_for_ready_queue(qpair)) - result = VMCI_ERROR_WOULD_BLOCK; - - } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); - - qp_unlock(qpair); - - return result; -} -EXPORT_SYMBOL_GPL(vmci_qpair_dequeue); - -/* - * vmci_qpair_peek() - Peek at the data in the queue. - * @qpair: Pointer to the queue pair struct. - * @buf: Pointer to buffer for the data - * @buf_size: Length of buffer. - * @buf_type: Buffer type (Unused on Linux). - * - * This is the client interface for peeking into a queue. (I.e., - * copy data from the queue without updating the head pointer.) - * Returns number of bytes dequeued or < 0 on error. - */ -ssize_t vmci_qpair_peek(struct vmci_qp *qpair, - void *buf, - size_t buf_size, - int buf_type) -{ - struct iov_iter to; - struct kvec v = {.iov_base = buf, .iov_len = buf_size}; - ssize_t result; - - if (!qpair || !buf) - return VMCI_ERROR_INVALID_ARGS; - - iov_iter_kvec(&to, READ, &v, 1, buf_size); - - qp_lock(qpair); - - do { - result = qp_dequeue_locked(qpair->produce_q, - qpair->consume_q, - qpair->consume_q_size, - &to, false); - - if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && - !qp_wait_for_ready_queue(qpair)) - result = VMCI_ERROR_WOULD_BLOCK; - - } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); - - qp_unlock(qpair); - - return result; -} -EXPORT_SYMBOL_GPL(vmci_qpair_peek); - -/* * vmci_qpair_enquev() - Throw data on the queue using iov. * @qpair: Pointer to the queue pair struct. * @iov: Pointer to buffer containing data |
