From 579db9d45cb4e8e7cedff9e6079331a1e2ea9f5d Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Thu, 30 Nov 2017 17:01:26 +0100 Subject: virt: Add vboxguest VMMDEV communication code This commits adds a header describing the hardware interface for the Virtual Box Guest PCI device used in Virtual Box virtual machines and utility functions for talking to the Virtual Box hypervisor over this interface. These utility functions will used both by the vboxguest driver for the PCI device which offers the /dev/vboxguest ioctl API and by the vboxfs driver which offers folder sharing support. Signed-off-by: Hans de Goede Reviewed-by: Larry Finger Signed-off-by: Greg Kroah-Hartman --- drivers/virt/vboxguest/vboxguest_utils.c | 801 +++++++++++++++++++++++++++++++ drivers/virt/vboxguest/vmmdev.h | 449 +++++++++++++++++ 2 files changed, 1250 insertions(+) create mode 100644 drivers/virt/vboxguest/vboxguest_utils.c create mode 100644 drivers/virt/vboxguest/vmmdev.h (limited to 'drivers/virt') diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c new file mode 100644 index 000000000000..8daea691fbb5 --- /dev/null +++ b/drivers/virt/vboxguest/vboxguest_utils.c @@ -0,0 +1,801 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */ +/* + * vboxguest vmm-req and hgcm-call code, VBoxGuestR0LibHGCMInternal.cpp, + * VBoxGuestR0LibGenericRequest.cpp and RTErrConvertToErrno.cpp in vbox svn. + * + * Copyright (C) 2006-2016 Oracle Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "vboxguest_core.h" + +/* Get the pointer to the first parameter of a HGCM call request. */ +#define VMMDEV_HGCM_CALL_PARMS(a) \ + ((struct vmmdev_hgcm_function_parameter *)( \ + (u8 *)(a) + sizeof(struct vmmdev_hgcm_call))) + +/* The max parameter buffer size for a user request. */ +#define VBG_MAX_HGCM_USER_PARM (24 * SZ_1M) +/* The max parameter buffer size for a kernel request. */ +#define VBG_MAX_HGCM_KERNEL_PARM (16 * SZ_1M) + +#define VBG_DEBUG_PORT 0x504 + +/* This protects vbg_log_buf and serializes VBG_DEBUG_PORT accesses */ +static DEFINE_SPINLOCK(vbg_log_lock); +static char vbg_log_buf[128]; + +#define VBG_LOG(name, pr_func) \ +void name(const char *fmt, ...) \ +{ \ + unsigned long flags; \ + va_list args; \ + int i, count; \ + \ + va_start(args, fmt); \ + spin_lock_irqsave(&vbg_log_lock, flags); \ + \ + count = vscnprintf(vbg_log_buf, sizeof(vbg_log_buf), fmt, args);\ + for (i = 0; i < count; i++) \ + outb(vbg_log_buf[i], VBG_DEBUG_PORT); \ + \ + pr_func("%s", vbg_log_buf); \ + \ + spin_unlock_irqrestore(&vbg_log_lock, flags); \ + va_end(args); \ +} \ +EXPORT_SYMBOL(name) + +VBG_LOG(vbg_info, pr_info); +VBG_LOG(vbg_warn, pr_warn); +VBG_LOG(vbg_err, pr_err); +#if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG) +VBG_LOG(vbg_debug, pr_debug); +#endif + +void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type) +{ + struct vmmdev_request_header *req; + + req = kmalloc(len, GFP_KERNEL | __GFP_DMA32); + if (!req) + return NULL; + + memset(req, 0xaa, len); + + req->size = len; + req->version = VMMDEV_REQUEST_HEADER_VERSION; + req->request_type = req_type; + req->rc = VERR_GENERAL_FAILURE; + req->reserved1 = 0; + req->reserved2 = 0; + + return req; +} + +/* Note this function returns a VBox status code, not a negative errno!! */ +int vbg_req_perform(struct vbg_dev *gdev, void *req) +{ + unsigned long phys_req = virt_to_phys(req); + + outl(phys_req, gdev->io_port + VMMDEV_PORT_OFF_REQUEST); + /* + * The host changes the request as a result of the outl, make sure + * the outl and any reads of the req happen in the correct order. + */ + mb(); + + return ((struct vmmdev_request_header *)req)->rc; +} + +static bool hgcm_req_done(struct vbg_dev *gdev, + struct vmmdev_hgcmreq_header *header) +{ + unsigned long flags; + bool done; + + spin_lock_irqsave(&gdev->event_spinlock, flags); + done = header->flags & VMMDEV_HGCM_REQ_DONE; + spin_unlock_irqrestore(&gdev->event_spinlock, flags); + + return done; +} + +int vbg_hgcm_connect(struct vbg_dev *gdev, + struct vmmdev_hgcm_service_location *loc, + u32 *client_id, int *vbox_status) +{ + struct vmmdev_hgcm_connect *hgcm_connect = NULL; + int rc; + + hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect), + VMMDEVREQ_HGCM_CONNECT); + if (!hgcm_connect) + return -ENOMEM; + + hgcm_connect->header.flags = 0; + memcpy(&hgcm_connect->loc, loc, sizeof(*loc)); + hgcm_connect->client_id = 0; + + rc = vbg_req_perform(gdev, hgcm_connect); + + if (rc == VINF_HGCM_ASYNC_EXECUTE) + wait_event(gdev->hgcm_wq, + hgcm_req_done(gdev, &hgcm_connect->header)); + + if (rc >= 0) { + *client_id = hgcm_connect->client_id; + rc = hgcm_connect->header.result; + } + + kfree(hgcm_connect); + + *vbox_status = rc; + return 0; +} +EXPORT_SYMBOL(vbg_hgcm_connect); + +int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status) +{ + struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL; + int rc; + + hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect), + VMMDEVREQ_HGCM_DISCONNECT); + if (!hgcm_disconnect) + return -ENOMEM; + + hgcm_disconnect->header.flags = 0; + hgcm_disconnect->client_id = client_id; + + rc = vbg_req_perform(gdev, hgcm_disconnect); + + if (rc == VINF_HGCM_ASYNC_EXECUTE) + wait_event(gdev->hgcm_wq, + hgcm_req_done(gdev, &hgcm_disconnect->header)); + + if (rc >= 0) + rc = hgcm_disconnect->header.result; + + kfree(hgcm_disconnect); + + *vbox_status = rc; + return 0; +} +EXPORT_SYMBOL(vbg_hgcm_disconnect); + +static u32 hgcm_call_buf_size_in_pages(void *buf, u32 len) +{ + u32 size = PAGE_ALIGN(len + ((unsigned long)buf & ~PAGE_MASK)); + + return size >> PAGE_SHIFT; +} + +static void hgcm_call_add_pagelist_size(void *buf, u32 len, size_t *extra) +{ + u32 page_count; + + page_count = hgcm_call_buf_size_in_pages(buf, len); + *extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]); +} + +static int hgcm_call_preprocess_linaddr( + const struct vmmdev_hgcm_function_parameter *src_parm, + void **bounce_buf_ret, size_t *extra) +{ + void *buf, *bounce_buf; + bool copy_in; + u32 len; + int ret; + + buf = (void *)src_parm->u.pointer.u.linear_addr; + len = src_parm->u.pointer.size; + copy_in = src_parm->type != VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT; + + if (len > VBG_MAX_HGCM_USER_PARM) + return -E2BIG; + + bounce_buf = kvmalloc(len, GFP_KERNEL); + if (!bounce_buf) + return -ENOMEM; + + if (copy_in) { + ret = copy_from_user(bounce_buf, (void __user *)buf, len); + if (ret) + return -EFAULT; + } else { + memset(bounce_buf, 0, len); + } + + *bounce_buf_ret = bounce_buf; + hgcm_call_add_pagelist_size(bounce_buf, len, extra); + return 0; +} + +/** + * Preprocesses the HGCM call, validate parameters, alloc bounce buffers and + * figure out how much extra storage we need for page lists. + * Return: 0 or negative errno value. + * @src_parm: Pointer to source function call parameters + * @parm_count: Number of function call parameters. + * @bounce_bufs_ret: Where to return the allocated bouncebuffer array + * @extra: Where to return the extra request space needed for + * physical page lists. + */ +static int hgcm_call_preprocess( + const struct vmmdev_hgcm_function_parameter *src_parm, + u32 parm_count, void ***bounce_bufs_ret, size_t *extra) +{ + void *buf, **bounce_bufs = NULL; + u32 i, len; + int ret; + + for (i = 0; i < parm_count; i++, src_parm++) { + switch (src_parm->type) { + case VMMDEV_HGCM_PARM_TYPE_32BIT: + case VMMDEV_HGCM_PARM_TYPE_64BIT: + break; + + case VMMDEV_HGCM_PARM_TYPE_LINADDR: + case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: + case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: + if (!bounce_bufs) { + bounce_bufs = kcalloc(parm_count, + sizeof(void *), + GFP_KERNEL); + if (!bounce_bufs) + return -ENOMEM; + + *bounce_bufs_ret = bounce_bufs; + } + + ret = hgcm_call_preprocess_linaddr(src_parm, + &bounce_bufs[i], + extra); + if (ret) + return ret; + + break; + + case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL: + case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN: + case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT: + buf = (void *)src_parm->u.pointer.u.linear_addr; + len = src_parm->u.pointer.size; + if (WARN_ON(len > VBG_MAX_HGCM_KERNEL_PARM)) + return -E2BIG; + + hgcm_call_add_pagelist_size(buf, len, extra); + break; + + default: + return -EINVAL; + } + } + + return 0; +} + +/** + * Translates linear address types to page list direction flags. + * + * Return: page list flags. + * @type: The type. + */ +static u32 hgcm_call_linear_addr_type_to_pagelist_flags( + enum vmmdev_hgcm_function_parameter_type type) +{ + switch (type) { + default: + WARN_ON(1); + /* Fall through */ + case VMMDEV_HGCM_PARM_TYPE_LINADDR: + case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL: + return VMMDEV_HGCM_F_PARM_DIRECTION_BOTH; + + case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: + case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN: + return VMMDEV_HGCM_F_PARM_DIRECTION_TO_HOST; + + case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: + case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT: + return VMMDEV_HGCM_F_PARM_DIRECTION_FROM_HOST; + } +} + +static void hgcm_call_init_linaddr(struct vmmdev_hgcm_call *call, + struct vmmdev_hgcm_function_parameter *dst_parm, void *buf, u32 len, + enum vmmdev_hgcm_function_parameter_type type, u32 *off_extra) +{ + struct vmmdev_hgcm_pagelist *dst_pg_lst; + struct page *page; + bool is_vmalloc; + u32 i, page_count; + + dst_parm->type = type; + + if (len == 0) { + dst_parm->u.pointer.size = 0; + dst_parm->u.pointer.u.linear_addr = 0; + return; + } + + dst_pg_lst = (void *)call + *off_extra; + page_count = hgcm_call_buf_size_in_pages(buf, len); + is_vmalloc = is_vmalloc_addr(buf); + + dst_parm->type = VMMDEV_HGCM_PARM_TYPE_PAGELIST; + dst_parm->u.page_list.size = len; + dst_parm->u.page_list.offset = *off_extra; + dst_pg_lst->flags = hgcm_call_linear_addr_type_to_pagelist_flags(type); + dst_pg_lst->offset_first_page = (unsigned long)buf & ~PAGE_MASK; + dst_pg_lst->page_count = page_count; + + for (i = 0; i < page_count; i++) { + if (is_vmalloc) + page = vmalloc_to_page(buf); + else + page = virt_to_page(buf); + + dst_pg_lst->pages[i] = page_to_phys(page); + buf += PAGE_SIZE; + } + + *off_extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]); +} + +/** + * Initializes the call request that we're sending to the host. + * @call: The call to initialize. + * @client_id: The client ID of the caller. + * @function: The function number of the function to call. + * @src_parm: Pointer to source function call parameters. + * @parm_count: Number of function call parameters. + * @bounce_bufs: The bouncebuffer array. + */ +static void hgcm_call_init_call( + struct vmmdev_hgcm_call *call, u32 client_id, u32 function, + const struct vmmdev_hgcm_function_parameter *src_parm, + u32 parm_count, void **bounce_bufs) +{ + struct vmmdev_hgcm_function_parameter *dst_parm = + VMMDEV_HGCM_CALL_PARMS(call); + u32 i, off_extra = (uintptr_t)(dst_parm + parm_count) - (uintptr_t)call; + void *buf; + + call->header.flags = 0; + call->header.result = VINF_SUCCESS; + call->client_id = client_id; + call->function = function; + call->parm_count = parm_count; + + for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) { + switch (src_parm->type) { + case VMMDEV_HGCM_PARM_TYPE_32BIT: + case VMMDEV_HGCM_PARM_TYPE_64BIT: + *dst_parm = *src_parm; + break; + + case VMMDEV_HGCM_PARM_TYPE_LINADDR: + case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: + case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: + hgcm_call_init_linaddr(call, dst_parm, bounce_bufs[i], + src_parm->u.pointer.size, + src_parm->type, &off_extra); + break; + + case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL: + case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN: + case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT: + buf = (void *)src_parm->u.pointer.u.linear_addr; + hgcm_call_init_linaddr(call, dst_parm, buf, + src_parm->u.pointer.size, + src_parm->type, &off_extra); + break; + + default: + WARN_ON(1); + dst_parm->type = VMMDEV_HGCM_PARM_TYPE_INVALID; + } + } +} + +/** + * Tries to cancel a pending HGCM call. + * + * Return: VBox status code + */ +static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call) +{ + int rc; + + /* + * We use a pre-allocated request for cancellations, which is + * protected by cancel_req_mutex. This means that all cancellations + * get serialized, this should be fine since they should be rare. + */ + mutex_lock(&gdev->cancel_req_mutex); + gdev->cancel_req->phys_req_to_cancel = virt_to_phys(call); + rc = vbg_req_perform(gdev, gdev->cancel_req); + mutex_unlock(&gdev->cancel_req_mutex); + + if (rc == VERR_NOT_IMPLEMENTED) { + call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED; + call->header.header.request_type = VMMDEVREQ_HGCM_CANCEL; + + rc = vbg_req_perform(gdev, call); + if (rc == VERR_INVALID_PARAMETER) + rc = VERR_NOT_FOUND; + } + + if (rc >= 0) + call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED; + + return rc; +} + +/** + * Performs the call and completion wait. + * Return: 0 or negative errno value. + * @gdev: The VBoxGuest device extension. + * @call: The call to execute. + * @timeout_ms: Timeout in ms. + * @leak_it: Where to return the leak it / free it, indicator. + * Cancellation fun. + */ +static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call, + u32 timeout_ms, bool *leak_it) +{ + int rc, cancel_rc, ret; + long timeout; + + *leak_it = false; + + rc = vbg_req_perform(gdev, call); + + /* + * If the call failed, then pretend success. Upper layers will + * interpret the result code in the packet. + */ + if (rc < 0) { + call->header.result = rc; + return 0; + } + + if (rc != VINF_HGCM_ASYNC_EXECUTE) + return 0; + + /* Host decided to process the request asynchronously, wait for it */ + if (timeout_ms == U32_MAX) + timeout = MAX_SCHEDULE_TIMEOUT; + else + timeout = msecs_to_jiffies(timeout_ms); + + timeout = wait_event_interruptible_timeout( + gdev->hgcm_wq, + hgcm_req_done(gdev, &call->header), + timeout); + + /* timeout > 0 means hgcm_req_done has returned true, so success */ + if (timeout > 0) + return 0; + + if (timeout == 0) + ret = -ETIMEDOUT; + else + ret = -EINTR; + + /* Cancel the request */ + cancel_rc = hgcm_cancel_call(gdev, call); + if (cancel_rc >= 0) + return ret; + + /* + * Failed to cancel, this should mean that the cancel has lost the + * race with normal completion, wait while the host completes it. + */ + if (cancel_rc == VERR_NOT_FOUND || cancel_rc == VERR_SEM_DESTROYED) + timeout = msecs_to_jiffies(500); + else + timeout = msecs_to_jiffies(2000); + + timeout = wait_event_timeout(gdev->hgcm_wq, + hgcm_req_done(gdev, &call->header), + timeout); + + if (WARN_ON(timeout == 0)) { + /* We really should never get here */ + vbg_err("%s: Call timedout and cancellation failed, leaking the request\n", + __func__); + *leak_it = true; + return ret; + } + + /* The call has completed normally after all */ + return 0; +} + +/** + * Copies the result of the call back to the caller info structure and user + * buffers. + * Return: 0 or negative errno value. + * @call: HGCM call request. + * @dst_parm: Pointer to function call parameters destination. + * @parm_count: Number of function call parameters. + * @bounce_bufs: The bouncebuffer array. + */ +static int hgcm_call_copy_back_result( + const struct vmmdev_hgcm_call *call, + struct vmmdev_hgcm_function_parameter *dst_parm, + u32 parm_count, void **bounce_bufs) +{ + const struct vmmdev_hgcm_function_parameter *src_parm = + VMMDEV_HGCM_CALL_PARMS(call); + void __user *p; + int ret; + u32 i; + + /* Copy back parameters. */ + for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) { + switch (dst_parm->type) { + case VMMDEV_HGCM_PARM_TYPE_32BIT: + case VMMDEV_HGCM_PARM_TYPE_64BIT: + *dst_parm = *src_parm; + break; + + case VMMDEV_HGCM_PARM_TYPE_PAGELIST: + dst_parm->u.page_list.size = src_parm->u.page_list.size; + break; + + case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: + case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL: + case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN: + case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT: + dst_parm->u.pointer.size = src_parm->u.pointer.size; + break; + + case VMMDEV_HGCM_PARM_TYPE_LINADDR: + case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: + dst_parm->u.pointer.size = src_parm->u.pointer.size; + + p = (void __user *)dst_parm->u.pointer.u.linear_addr; + ret = copy_to_user(p, bounce_bufs[i], + min(src_parm->u.pointer.size, + dst_parm->u.pointer.size)); + if (ret) + return -EFAULT; + break; + + default: + WARN_ON(1); + return -EINVAL; + } + } + + return 0; +} + +int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function, + u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms, + u32 parm_count, int *vbox_status) +{ + struct vmmdev_hgcm_call *call; + void **bounce_bufs = NULL; + bool leak_it; + size_t size; + int i, ret; + + size = sizeof(struct vmmdev_hgcm_call) + + parm_count * sizeof(struct vmmdev_hgcm_function_parameter); + /* + * Validate and buffer the parameters for the call. This also increases + * call_size with the amount of extra space needed for page lists. + */ + ret = hgcm_call_preprocess(parms, parm_count, &bounce_bufs, &size); + if (ret) { + /* Even on error bounce bufs may still have been allocated */ + goto free_bounce_bufs; + } + + call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL); + if (!call) { + ret = -ENOMEM; + goto free_bounce_bufs; + } + + hgcm_call_init_call(call, client_id, function, parms, parm_count, + bounce_bufs); + + ret = vbg_hgcm_do_call(gdev, call, timeout_ms, &leak_it); + if (ret == 0) { + *vbox_status = call->header.result; + ret = hgcm_call_copy_back_result(call, parms, parm_count, + bounce_bufs); + } + + if (!leak_it) + kfree(call); + +free_bounce_bufs: + if (bounce_bufs) { + for (i = 0; i < parm_count; i++) + kvfree(bounce_bufs[i]); + kfree(bounce_bufs); + } + + return ret; +} +EXPORT_SYMBOL(vbg_hgcm_call); + +#ifdef CONFIG_COMPAT +int vbg_hgcm_call32( + struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms, + struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count, + int *vbox_status) +{ + struct vmmdev_hgcm_function_parameter *parm64 = NULL; + u32 i, size; + int ret = 0; + + /* KISS allocate a temporary request and convert the parameters. */ + size = parm_count * sizeof(struct vmmdev_hgcm_function_parameter); + parm64 = kzalloc(size, GFP_KERNEL); + if (!parm64) + return -ENOMEM; + + for (i = 0; i < parm_count; i++) { + switch (parm32[i].type) { + case VMMDEV_HGCM_PARM_TYPE_32BIT: + parm64[i].type = VMMDEV_HGCM_PARM_TYPE_32BIT; + parm64[i].u.value32 = parm32[i].u.value32; + break; + + case VMMDEV_HGCM_PARM_TYPE_64BIT: + parm64[i].type = VMMDEV_HGCM_PARM_TYPE_64BIT; + parm64[i].u.value64 = parm32[i].u.value64; + break; + + case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: + case VMMDEV_HGCM_PARM_TYPE_LINADDR: + case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: + parm64[i].type = parm32[i].type; + parm64[i].u.pointer.size = parm32[i].u.pointer.size; + parm64[i].u.pointer.u.linear_addr = + parm32[i].u.pointer.u.linear_addr; + break; + + default: + ret = -EINVAL; + } + if (ret < 0) + goto out_free; + } + + ret = vbg_hgcm_call(gdev, client_id, function, timeout_ms, + parm64, parm_count, vbox_status); + if (ret < 0) + goto out_free; + + /* Copy back. */ + for (i = 0; i < parm_count; i++, parm32++, parm64++) { + switch (parm64[i].type) { + case VMMDEV_HGCM_PARM_TYPE_32BIT: + parm32[i].u.value32 = parm64[i].u.value32; + break; + + case VMMDEV_HGCM_PARM_TYPE_64BIT: + parm32[i].u.value64 = parm64[i].u.value64; + break; + + case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: + case VMMDEV_HGCM_PARM_TYPE_LINADDR: + case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: + parm32[i].u.pointer.size = parm64[i].u.pointer.size; + break; + + default: + WARN_ON(1); + ret = -EINVAL; + } + } + +out_free: + kfree(parm64); + return ret; +} +#endif + +static const int vbg_status_code_to_errno_table[] = { + [-VERR_ACCESS_DENIED] = -EPERM, + [-VERR_FILE_NOT_FOUND] = -ENOENT, + [-VERR_PROCESS_NOT_FOUND] = -ESRCH, + [-VERR_INTERRUPTED] = -EINTR, + [-VERR_DEV_IO_ERROR] = -EIO, + [-VERR_TOO_MUCH_DATA] = -E2BIG, + [-VERR_BAD_EXE_FORMAT] = -ENOEXEC, + [-VERR_INVALID_HANDLE] = -EBADF, + [-VERR_TRY_AGAIN] = -EAGAIN, + [-VERR_NO_MEMORY] = -ENOMEM, + [-VERR_INVALID_POINTER] = -EFAULT, + [-VERR_RESOURCE_BUSY] = -EBUSY, + [-VERR_ALREADY_EXISTS] = -EEXIST, + [-VERR_NOT_SAME_DEVICE] = -EXDEV, + [-VERR_NOT_A_DIRECTORY] = -ENOTDIR, + [-VERR_PATH_NOT_FOUND] = -ENOTDIR, + [-VERR_IS_A_DIRECTORY] = -EISDIR, + [-VERR_INVALID_PARAMETER] = -EINVAL, + [-VERR_TOO_MANY_OPEN_FILES] = -ENFILE, + [-VERR_INVALID_FUNCTION] = -ENOTTY, + [-VERR_SHARING_VIOLATION] = -ETXTBSY, + [-VERR_FILE_TOO_BIG] = -EFBIG, + [-VERR_DISK_FULL] = -ENOSPC, + [-VERR_SEEK_ON_DEVICE] = -ESPIPE, + [-VERR_WRITE_PROTECT] = -EROFS, + [-VERR_BROKEN_PIPE] = -EPIPE, + [-VERR_DEADLOCK] = -EDEADLK, + [-VERR_FILENAME_TOO_LONG] = -ENAMETOOLONG, + [-VERR_FILE_LOCK_FAILED] = -ENOLCK, + [-VERR_NOT_IMPLEMENTED] = -ENOSYS, + [-VERR_NOT_SUPPORTED] = -ENOSYS, + [-VERR_DIR_NOT_EMPTY] = -ENOTEMPTY, + [-VERR_TOO_MANY_SYMLINKS] = -ELOOP, + [-VERR_NO_DATA] = -ENODATA, + [-VERR_NET_NO_NETWORK] = -ENONET, + [-VERR_NET_NOT_UNIQUE_NAME] = -ENOTUNIQ, + [-VERR_NO_TRANSLATION] = -EILSEQ, + [-VERR_NET_NOT_SOCKET] = -ENOTSOCK, + [-VERR_NET_DEST_ADDRESS_REQUIRED] = -EDESTADDRREQ, + [-VERR_NET_MSG_SIZE] = -EMSGSIZE, + [-VERR_NET_PROTOCOL_TYPE] = -EPROTOTYPE, + [-VERR_NET_PROTOCOL_NOT_AVAILABLE] = -ENOPROTOOPT, + [-VERR_NET_PROTOCOL_NOT_SUPPORTED] = -EPROTONOSUPPORT, + [-VERR_NET_SOCKET_TYPE_NOT_SUPPORTED] = -ESOCKTNOSUPPORT, + [-VERR_NET_OPERATION_NOT_SUPPORTED] = -EOPNOTSUPP, + [-VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED] = -EPFNOSUPPORT, + [-VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED] = -EAFNOSUPPORT, + [-VERR_NET_ADDRESS_IN_USE] = -EADDRINUSE, + [-VERR_NET_ADDRESS_NOT_AVAILABLE] = -EADDRNOTAVAIL, + [-VERR_NET_DOWN] = -ENETDOWN, + [-VERR_NET_UNREACHABLE] = -ENETUNREACH, + [-VERR_NET_CONNECTION_RESET] = -ENETRESET, + [-VERR_NET_CONNECTION_ABORTED] = -ECONNABORTED, + [-VERR_NET_CONNECTION_RESET_BY_PEER] = -ECONNRESET, + [-VERR_NET_NO_BUFFER_SPACE] = -ENOBUFS, + [-VERR_NET_ALREADY_CONNECTED] = -EISCONN, + [-VERR_NET_NOT_CONNECTED] = -ENOTCONN, + [-VERR_NET_SHUTDOWN] = -ESHUTDOWN, + [-VERR_NET_TOO_MANY_REFERENCES] = -ETOOMANYREFS, + [-VERR_TIMEOUT] = -ETIMEDOUT, + [-VERR_NET_CONNECTION_REFUSED] = -ECONNREFUSED, + [-VERR_NET_HOST_DOWN] = -EHOSTDOWN, + [-VERR_NET_HOST_UNREACHABLE] = -EHOSTUNREACH, + [-VERR_NET_ALREADY_IN_PROGRESS] = -EALREADY, + [-VERR_NET_IN_PROGRESS] = -EINPROGRESS, + [-VERR_MEDIA_NOT_PRESENT] = -ENOMEDIUM, + [-VERR_MEDIA_NOT_RECOGNIZED] = -EMEDIUMTYPE, +}; + +int vbg_status_code_to_errno(int rc) +{ + if (rc >= 0) + return 0; + + rc = -rc; + if (rc >= ARRAY_SIZE(vbg_status_code_to_errno_table) || + vbg_status_code_to_errno_table[rc] == 0) { + vbg_warn("%s: Unhandled err %d\n", __func__, -rc); + return -EPROTO; + } + + return vbg_status_code_to_errno_table[rc]; +} +EXPORT_SYMBOL(vbg_status_code_to_errno); diff --git a/drivers/virt/vboxguest/vmmdev.h b/drivers/virt/vboxguest/vmmdev.h new file mode 100644 index 000000000000..5e2ae978935d --- /dev/null +++ b/drivers/virt/vboxguest/vmmdev.h @@ -0,0 +1,449 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */ +/* + * Virtual Device for Guest <-> VMM/Host communication interface + * + * Copyright (C) 2006-2016 Oracle Corporation + */ + +#ifndef __VBOX_VMMDEV_H__ +#define __VBOX_VMMDEV_H__ + +#include +#include +#include +#include + +/* Port for generic request interface (relative offset). */ +#define VMMDEV_PORT_OFF_REQUEST 0 + +/** Layout of VMMDEV RAM region that contains information for guest. */ +struct vmmdev_memory { + /** The size of this structure. */ + u32 size; + /** The structure version. (VMMDEV_MEMORY_VERSION) */ + u32 version; + + union { + struct { + /** Flag telling that VMMDev has events pending. */ + u8 have_events; + /** Explicit padding, MBZ. */ + u8 padding[3]; + } V1_04; + + struct { + /** Pending events flags, set by host. */ + u32 host_events; + /** Mask of events the guest wants, set by guest. */ + u32 guest_event_mask; + } V1_03; + } V; + + /* struct vbva_memory, not used */ +}; +VMMDEV_ASSERT_SIZE(vmmdev_memory, 8 + 8); + +/** Version of vmmdev_memory structure (vmmdev_memory::version). */ +#define VMMDEV_MEMORY_VERSION (1) + +/* Host mouse capabilities has been changed. */ +#define VMMDEV_EVENT_MOUSE_CAPABILITIES_CHANGED BIT(0) +/* HGCM event. */ +#define VMMDEV_EVENT_HGCM BIT(1) +/* A display change request has been issued. */ +#define VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST BIT(2) +/* Credentials are available for judgement. */ +#define VMMDEV_EVENT_JUDGE_CREDENTIALS BIT(3) +/* The guest has been restored. */ +#define VMMDEV_EVENT_RESTORED BIT(4) +/* Seamless mode state changed. */ +#define VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST BIT(5) +/* Memory balloon size changed. */ +#define VMMDEV_EVENT_BALLOON_CHANGE_REQUEST BIT(6) +/* Statistics interval changed. */ +#define VMMDEV_EVENT_STATISTICS_INTERVAL_CHANGE_REQUEST BIT(7) +/* VRDP status changed. */ +#define VMMDEV_EVENT_VRDP BIT(8) +/* New mouse position data available. */ +#define VMMDEV_EVENT_MOUSE_POSITION_CHANGED BIT(9) +/* CPU hotplug event occurred. */ +#define VMMDEV_EVENT_CPU_HOTPLUG BIT(10) +/* The mask of valid events, for sanity checking. */ +#define VMMDEV_EVENT_VALID_EVENT_MASK 0x000007ffU + +/* + * Additions are allowed to work only if additions_major == vmmdev_current && + * additions_minor <= vmmdev_current. Additions version is reported to host + * (VMMDev) by VMMDEVREQ_REPORT_GUEST_INFO. + */ +#define VMMDEV_VERSION 0x00010004 +#define VMMDEV_VERSION_MAJOR (VMMDEV_VERSION >> 16) +#define VMMDEV_VERSION_MINOR (VMMDEV_VERSION & 0xffff) + +/* Maximum request packet size. */ +#define VMMDEV_MAX_VMMDEVREQ_SIZE 1048576 + +/* Version of vmmdev_request_header structure. */ +#define VMMDEV_REQUEST_HEADER_VERSION 0x10001 + +/** struct vmmdev_request_header - Generic VMMDev request header. */ +struct vmmdev_request_header { + /** IN: Size of the structure in bytes (including body). */ + u32 size; + /** IN: Version of the structure. */ + u32 version; + /** IN: Type of the request. */ + enum vmmdev_request_type request_type; + /** OUT: Return code. */ + s32 rc; + /** Reserved field no.1. MBZ. */ + u32 reserved1; + /** Reserved field no.2. MBZ. */ + u32 reserved2; +}; +VMMDEV_ASSERT_SIZE(vmmdev_request_header, 24); + +/** + * struct vmmdev_mouse_status - Mouse status request structure. + * + * Used by VMMDEVREQ_GET_MOUSE_STATUS and VMMDEVREQ_SET_MOUSE_STATUS. + */ +struct vmmdev_mouse_status { + /** header */ + struct vmmdev_request_header header; + /** Mouse feature mask. See VMMDEV_MOUSE_*. */ + u32 mouse_features; + /** Mouse x position. */ + s32 pointer_pos_x; + /** Mouse y position. */ + s32 pointer_pos_y; +}; +VMMDEV_ASSERT_SIZE(vmmdev_mouse_status, 24 + 12); + +/* The guest can (== wants to) handle absolute coordinates. */ +#define VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE BIT(0) +/* + * The host can (== wants to) send absolute coordinates. + * (Input not captured.) + */ +#define VMMDEV_MOUSE_HOST_WANTS_ABSOLUTE BIT(1) +/* + * The guest can *NOT* switch to software cursor and therefore depends on the + * host cursor. + * + * When guest additions are installed and the host has promised to display the + * cursor itself, the guest installs a hardware mouse driver. Don't ask the + * guest to switch to a software cursor then. + */ +#define VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR BIT(2) +/* The host does NOT provide support for drawing the cursor itself. */ +#define VMMDEV_MOUSE_HOST_CANNOT_HWPOINTER BIT(3) +/* The guest can read VMMDev events to find out about pointer movement */ +#define VMMDEV_MOUSE_NEW_PROTOCOL BIT(4) +/* + * If the guest changes the status of the VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR + * bit, the host will honour this. + */ +#define VMMDEV_MOUSE_HOST_RECHECKS_NEEDS_HOST_CURSOR BIT(5) +/* + * The host supplies an absolute pointing device. The Guest Additions may + * wish to use this to decide whether to install their own driver. + */ +#define VMMDEV_MOUSE_HOST_HAS_ABS_DEV BIT(6) + +/* The minimum value our pointing device can return. */ +#define VMMDEV_MOUSE_RANGE_MIN 0 +/* The maximum value our pointing device can return. */ +#define VMMDEV_MOUSE_RANGE_MAX 0xFFFF + +/** + * struct vmmdev_host_version - VirtualBox host version request structure. + * + * VBG uses this to detect the precense of new features in the interface. + */ +struct vmmdev_host_version { + /** Header. */ + struct vmmdev_request_header header; + /** Major version. */ + u16 major; + /** Minor version. */ + u16 minor; + /** Build number. */ + u32 build; + /** SVN revision. */ + u32 revision; + /** Feature mask. */ + u32 features; +}; +VMMDEV_ASSERT_SIZE(vmmdev_host_version, 24 + 16); + +/* Physical page lists are supported by HGCM. */ +#define VMMDEV_HVF_HGCM_PHYS_PAGE_LIST BIT(0) + +/** + * struct vmmdev_mask - Structure to set / clear bits in a mask used for + * VMMDEVREQ_SET_GUEST_CAPABILITIES and VMMDEVREQ_CTL_GUEST_FILTER_MASK. + */ +struct vmmdev_mask { + /** Header. */ + struct vmmdev_request_header header; + /** Mask of bits to be set. */ + u32 or_mask; + /** Mask of bits to be cleared. */ + u32 not_mask; +}; +VMMDEV_ASSERT_SIZE(vmmdev_mask, 24 + 8); + +/* The guest supports seamless display rendering. */ +#define VMMDEV_GUEST_SUPPORTS_SEAMLESS BIT(0) +/* The guest supports mapping guest to host windows. */ +#define VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING BIT(1) +/* + * The guest graphical additions are active. + * Used for fast activation and deactivation of certain graphical operations + * (e.g. resizing & seamless). The legacy VMMDEVREQ_REPORT_GUEST_CAPABILITIES + * request sets this automatically, but VMMDEVREQ_SET_GUEST_CAPABILITIES does + * not. + */ +#define VMMDEV_GUEST_SUPPORTS_GRAPHICS BIT(2) + +/** struct vmmdev_hypervisorinfo - Hypervisor info structure. */ +struct vmmdev_hypervisorinfo { + /** Header. */ + struct vmmdev_request_header header; + /** + * Guest virtual address of proposed hypervisor start. + * Not used by VMMDEVREQ_GET_HYPERVISOR_INFO. + */ + u32 hypervisor_start; + /** Hypervisor size in bytes. */ + u32 hypervisor_size; +}; +VMMDEV_ASSERT_SIZE(vmmdev_hypervisorinfo, 24 + 8); + +/** struct vmmdev_events - Pending events structure. */ +struct vmmdev_events { + /** Header. */ + struct vmmdev_request_header header; + /** OUT: Pending event mask. */ + u32 events; +}; +VMMDEV_ASSERT_SIZE(vmmdev_events, 24 + 4); + +#define VMMDEV_OSTYPE_LINUX26 0x53000 +#define VMMDEV_OSTYPE_X64 BIT(8) + +/** struct vmmdev_guestinfo - Guest information report. */ +struct vmmdev_guest_info { + /** Header. */ + struct vmmdev_request_header header; + /** + * The VMMDev interface version expected by additions. + * *Deprecated*, do not use anymore! Will be removed. + */ + u32 interface_version; + /** Guest OS type. */ + u32 os_type; +}; +VMMDEV_ASSERT_SIZE(vmmdev_guest_info, 24 + 8); + +/** struct vmmdev_guestinfo2 - Guest information report, version 2. */ +struct vmmdev_guest_info2 { + /** Header. */ + struct vmmdev_request_header header; + /** Major version. */ + u16 additions_major; + /** Minor version. */ + u16 additions_minor; + /** Build number. */ + u32 additions_build; + /** SVN revision. */ + u32 additions_revision; + /** Feature mask, currently unused. */ + u32 additions_features; + /** + * The intentional meaning of this field was: + * Some additional information, for example 'Beta 1' or something like + * that. + * + * The way it was implemented was implemented: VBG_VERSION_STRING. + * + * This means the first three members are duplicated in this field (if + * the guest build config is sane). So, the user must check this and + * chop it off before usage. There is, because of the Main code's blind + * trust in the field's content, no way back. + */ + char name[128]; +}; +VMMDEV_ASSERT_SIZE(vmmdev_guest_info2, 24 + 144); + +enum vmmdev_guest_facility_type { + VBOXGUEST_FACILITY_TYPE_UNKNOWN = 0, + VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER = 20, + /* VBoxGINA / VBoxCredProv / pam_vbox. */ + VBOXGUEST_FACILITY_TYPE_AUTO_LOGON = 90, + VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE = 100, + /* VBoxTray (Windows), VBoxClient (Linux, Unix). */ + VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT = 101, + VBOXGUEST_FACILITY_TYPE_SEAMLESS = 1000, + VBOXGUEST_FACILITY_TYPE_GRAPHICS = 1100, + VBOXGUEST_FACILITY_TYPE_ALL = 0x7ffffffe, + /* Ensure the enum is a 32 bit data-type */ + VBOXGUEST_FACILITY_TYPE_SIZEHACK = 0x7fffffff +}; + +enum vmmdev_guest_facility_status { + VBOXGUEST_FACILITY_STATUS_INACTIVE = 0, + VBOXGUEST_FACILITY_STATUS_PAUSED = 1, + VBOXGUEST_FACILITY_STATUS_PRE_INIT = 20, + VBOXGUEST_FACILITY_STATUS_INIT = 30, + VBOXGUEST_FACILITY_STATUS_ACTIVE = 50, + VBOXGUEST_FACILITY_STATUS_TERMINATING = 100, + VBOXGUEST_FACILITY_STATUS_TERMINATED = 101, + VBOXGUEST_FACILITY_STATUS_FAILED = 800, + VBOXGUEST_FACILITY_STATUS_UNKNOWN = 999, + /* Ensure the enum is a 32 bit data-type */ + VBOXGUEST_FACILITY_STATUS_SIZEHACK = 0x7fffffff +}; + +/** struct vmmdev_guest_status - Guest Additions status structure. */ +struct vmmdev_guest_status { + /** Header. */ + struct vmmdev_request_header header; + /** Facility the status is indicated for. */ + enum vmmdev_guest_facility_type facility; + /** Current guest status. */ + enum vmmdev_guest_facility_status status; + /** Flags, not used at the moment. */ + u32 flags; +}; +VMMDEV_ASSERT_SIZE(vmmdev_guest_status, 24 + 12); + +#define VMMDEV_MEMORY_BALLOON_CHUNK_SIZE (1048576) +#define VMMDEV_MEMORY_BALLOON_CHUNK_PAGES (1048576 / 4096) + +/** struct vmmdev_memballoon_info - Memory-balloon info structure. */ +struct vmmdev_memballoon_info { + /** Header. */ + struct vmmdev_request_header header; + /** Balloon size in megabytes. */ + u32 balloon_chunks; + /** Guest ram size in megabytes. */ + u32 phys_mem_chunks; + /** + * Setting this to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST indicates that + * the request is a response to that event. + * (Don't confuse this with VMMDEVREQ_ACKNOWLEDGE_EVENTS.) + */ + u32 event_ack; +}; +VMMDEV_ASSERT_SIZE(vmmdev_memballoon_info, 24 + 12); + +/** struct vmmdev_memballoon_change - Change the size of the balloon. */ +struct vmmdev_memballoon_change { + /** Header. */ + struct vmmdev_request_header header; + /** The number of pages in the array. */ + u32 pages; + /** true = inflate, false = deflate. */ + u32 inflate; + /** Physical address (u64) of each page. */ + u64 phys_page[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]; +}; + +/** struct vmmdev_write_core_dump - Write Core Dump request data. */ +struct vmmdev_write_core_dump { + /** Header. */ + struct vmmdev_request_header header; + /** Flags (reserved, MBZ). */ + u32 flags; +}; +VMMDEV_ASSERT_SIZE(vmmdev_write_core_dump, 24 + 4); + +/** struct vmmdev_heartbeat - Heart beat check state structure. */ +struct vmmdev_heartbeat { + /** Header. */ + struct vmmdev_request_header header; + /** OUT: Guest heartbeat interval in nanosec. */ + u64 interval_ns; + /** Heartbeat check flag. */ + u8 enabled; + /** Explicit padding, MBZ. */ + u8 padding[3]; +} __packed; +VMMDEV_ASSERT_SIZE(vmmdev_heartbeat, 24 + 12); + +#define VMMDEV_HGCM_REQ_DONE BIT(0) +#define VMMDEV_HGCM_REQ_CANCELLED BIT(1) + +/** struct vmmdev_hgcmreq_header - vmmdev HGCM requests header. */ +struct vmmdev_hgcmreq_header { + /** Request header. */ + struct vmmdev_request_header header; + + /** HGCM flags. */ + u32 flags; + + /** Result code. */ + s32 result; +}; +VMMDEV_ASSERT_SIZE(vmmdev_hgcmreq_header, 24 + 8); + +/** struct vmmdev_hgcm_connect - HGCM connect request structure. */ +struct vmmdev_hgcm_connect { + /** HGCM request header. */ + struct vmmdev_hgcmreq_header header; + + /** IN: Description of service to connect to. */ + struct vmmdev_hgcm_service_location loc; + + /** OUT: Client identifier assigned by local instance of HGCM. */ + u32 client_id; +}; +VMMDEV_ASSERT_SIZE(vmmdev_hgcm_connect, 32 + 132 + 4); + +/** struct vmmdev_hgcm_disconnect - HGCM disconnect request structure. */ +struct vmmdev_hgcm_disconnect { + /** HGCM request header. */ + struct vmmdev_hgcmreq_header header; + + /** IN: Client identifier. */ + u32 client_id; +}; +VMMDEV_ASSERT_SIZE(vmmdev_hgcm_disconnect, 32 + 4); + +#define VMMDEV_HGCM_MAX_PARMS 32 + +/** struct vmmdev_hgcm_call - HGCM call request structure. */ +struct vmmdev_hgcm_call { + /* request header */ + struct vmmdev_hgcmreq_header header; + + /** IN: Client identifier. */ + u32 client_id; + /** IN: Service function number. */ + u32 function; + /** IN: Number of parameters. */ + u32 parm_count; + /** Parameters follow in form: HGCMFunctionParameter32|64 parms[X]; */ +}; +VMMDEV_ASSERT_SIZE(vmmdev_hgcm_call, 32 + 12); + +/** + * struct vmmdev_hgcm_cancel2 - HGCM cancel request structure, version 2. + * + * After the request header.rc will be: + * + * VINF_SUCCESS when cancelled. + * VERR_NOT_FOUND if the specified request cannot be found. + * VERR_INVALID_PARAMETER if the address is invalid valid. + */ +struct vmmdev_hgcm_cancel2 { + /** Header. */ + struct vmmdev_request_header header; + /** The physical address of the request to cancel. */ + u32 phys_req_to_cancel; +}; +VMMDEV_ASSERT_SIZE(vmmdev_hgcm_cancel2, 24 + 4); + +#endif -- cgit From 0ba002bc4393dcfae031fc707b11c094b46a5048 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Thu, 30 Nov 2017 17:01:27 +0100 Subject: virt: Add vboxguest driver for Virtual Box Guest integration This commit adds a driver for the Virtual Box Guest PCI device used in Virtual Box virtual machines. Enabling this driver will add support for Virtual Box Guest integration features such as copy-and-paste, seamless mode and OpenGL pass-through. This driver also offers vboxguest IPC functionality which is needed for the vboxfs driver which offers folder sharing support. Signed-off-by: Hans de Goede Reviewed-by: Larry Finger Signed-off-by: Greg Kroah-Hartman --- drivers/virt/Kconfig | 1 + drivers/virt/Makefile | 1 + drivers/virt/vboxguest/Kconfig | 18 + drivers/virt/vboxguest/Makefile | 3 + drivers/virt/vboxguest/vboxguest_core.c | 1571 ++++++++++++++++++++++++++++ drivers/virt/vboxguest/vboxguest_core.h | 174 +++ drivers/virt/vboxguest/vboxguest_linux.c | 466 +++++++++ drivers/virt/vboxguest/vboxguest_version.h | 19 + 8 files changed, 2253 insertions(+) create mode 100644 drivers/virt/vboxguest/Kconfig create mode 100644 drivers/virt/vboxguest/Makefile create mode 100644 drivers/virt/vboxguest/vboxguest_core.c create mode 100644 drivers/virt/vboxguest/vboxguest_core.h create mode 100644 drivers/virt/vboxguest/vboxguest_linux.c create mode 100644 drivers/virt/vboxguest/vboxguest_version.h (limited to 'drivers/virt') diff --git a/drivers/virt/Kconfig b/drivers/virt/Kconfig index 99ebdde590f8..8d9cdfbd6bcc 100644 --- a/drivers/virt/Kconfig +++ b/drivers/virt/Kconfig @@ -30,4 +30,5 @@ config FSL_HV_MANAGER 4) A kernel interface for receiving callbacks when a managed partition shuts down. +source "drivers/virt/vboxguest/Kconfig" endif diff --git a/drivers/virt/Makefile b/drivers/virt/Makefile index c47f04dd343b..d3f7b2540890 100644 --- a/drivers/virt/Makefile +++ b/drivers/virt/Makefile @@ -3,3 +3,4 @@ # obj-$(CONFIG_FSL_HV_MANAGER) += fsl_hypervisor.o +obj-y += vboxguest/ diff --git a/drivers/virt/vboxguest/Kconfig b/drivers/virt/vboxguest/Kconfig new file mode 100644 index 000000000000..fffd318a10fe --- /dev/null +++ b/drivers/virt/vboxguest/Kconfig @@ -0,0 +1,18 @@ +config VBOXGUEST + tristate "Virtual Box Guest integration support" + depends on X86 && PCI && INPUT + help + This is a driver for the Virtual Box Guest PCI device used in + Virtual Box virtual machines. Enabling this driver will add + support for Virtual Box Guest integration features such as + copy-and-paste, seamless mode and OpenGL pass-through. + + This driver also offers vboxguest IPC functionality which is needed + for the vboxfs driver which offers folder sharing support. + + If you enable this driver you should also enable the VBOXVIDEO option. + + Although it is possible to build this module in, it is advised + to build this driver as a module, so that it can be updated + independently of the kernel. Select M to build this driver as a + module. diff --git a/drivers/virt/vboxguest/Makefile b/drivers/virt/vboxguest/Makefile new file mode 100644 index 000000000000..203b8f465817 --- /dev/null +++ b/drivers/virt/vboxguest/Makefile @@ -0,0 +1,3 @@ +vboxguest-y := vboxguest_linux.o vboxguest_core.o vboxguest_utils.o + +obj-$(CONFIG_VBOXGUEST) += vboxguest.o diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c new file mode 100644 index 000000000000..190dbf8cfcb5 --- /dev/null +++ b/drivers/virt/vboxguest/vboxguest_core.c @@ -0,0 +1,1571 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */ +/* + * vboxguest core guest-device handling code, VBoxGuest.cpp in upstream svn. + * + * Copyright (C) 2007-2016 Oracle Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "vboxguest_core.h" +#include "vboxguest_version.h" + +/* Get the pointer to the first HGCM parameter. */ +#define VBG_IOCTL_HGCM_CALL_PARMS(a) \ + ((struct vmmdev_hgcm_function_parameter *)( \ + (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call))) +/* Get the pointer to the first HGCM parameter in a 32-bit request. */ +#define VBG_IOCTL_HGCM_CALL_PARMS32(a) \ + ((struct vmmdev_hgcm_function_parameter32 *)( \ + (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call))) + +#define GUEST_MAPPINGS_TRIES 5 + +/** + * Reserves memory in which the VMM can relocate any guest mappings + * that are floating around. + * + * This operation is a little bit tricky since the VMM might not accept + * just any address because of address clashes between the three contexts + * it operates in, so we try several times. + * + * Failure to reserve the guest mappings is ignored. + * + * @gdev: The Guest extension device. + */ +static void vbg_guest_mappings_init(struct vbg_dev *gdev) +{ + struct vmmdev_hypervisorinfo *req; + void *guest_mappings[GUEST_MAPPINGS_TRIES]; + struct page **pages = NULL; + u32 size, hypervisor_size; + int i, rc; + + /* Query the required space. */ + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO); + if (!req) + return; + + req->hypervisor_start = 0; + req->hypervisor_size = 0; + rc = vbg_req_perform(gdev, req); + if (rc < 0) + goto out; + + /* + * The VMM will report back if there is nothing it wants to map, like + * for instance in VT-x and AMD-V mode. + */ + if (req->hypervisor_size == 0) + goto out; + + hypervisor_size = req->hypervisor_size; + /* Add 4M so that we can align the vmap to 4MiB as the host requires. */ + size = PAGE_ALIGN(req->hypervisor_size) + SZ_4M; + + pages = kmalloc(sizeof(*pages) * (size >> PAGE_SHIFT), GFP_KERNEL); + if (!pages) + goto out; + + gdev->guest_mappings_dummy_page = alloc_page(GFP_HIGHUSER); + if (!gdev->guest_mappings_dummy_page) + goto out; + + for (i = 0; i < (size >> PAGE_SHIFT); i++) + pages[i] = gdev->guest_mappings_dummy_page; + + /* + * Try several times, the VMM might not accept some addresses because + * of address clashes between the three contexts. + */ + for (i = 0; i < GUEST_MAPPINGS_TRIES; i++) { + guest_mappings[i] = vmap(pages, (size >> PAGE_SHIFT), + VM_MAP, PAGE_KERNEL_RO); + if (!guest_mappings[i]) + break; + + req->header.request_type = VMMDEVREQ_SET_HYPERVISOR_INFO; + req->header.rc = VERR_INTERNAL_ERROR; + req->hypervisor_size = hypervisor_size; + req->hypervisor_start = + (unsigned long)PTR_ALIGN(guest_mappings[i], SZ_4M); + + rc = vbg_req_perform(gdev, req); + if (rc >= 0) { + gdev->guest_mappings = guest_mappings[i]; + break; + } + } + + /* Free vmap's from failed attempts. */ + while (--i >= 0) + vunmap(guest_mappings[i]); + + /* On failure free the dummy-page backing the vmap */ + if (!gdev->guest_mappings) { + __free_page(gdev->guest_mappings_dummy_page); + gdev->guest_mappings_dummy_page = NULL; + } + +out: + kfree(req); + kfree(pages); +} + +/** + * Undo what vbg_guest_mappings_init did. + * + * @gdev: The Guest extension device. + */ +static void vbg_guest_mappings_exit(struct vbg_dev *gdev) +{ + struct vmmdev_hypervisorinfo *req; + int rc; + + if (!gdev->guest_mappings) + return; + + /* + * Tell the host that we're going to free the memory we reserved for + * it, the free it up. (Leak the memory if anything goes wrong here.) + */ + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO); + if (!req) + return; + + req->hypervisor_start = 0; + req->hypervisor_size = 0; + + rc = vbg_req_perform(gdev, req); + + kfree(req); + + if (rc < 0) { + vbg_err("%s error: %d\n", __func__, rc); + return; + } + + vunmap(gdev->guest_mappings); + gdev->guest_mappings = NULL; + + __free_page(gdev->guest_mappings_dummy_page); + gdev->guest_mappings_dummy_page = NULL; +} + +/** + * Report the guest information to the host. + * Return: 0 or negative errno value. + * @gdev: The Guest extension device. + */ +static int vbg_report_guest_info(struct vbg_dev *gdev) +{ + /* + * Allocate and fill in the two guest info reports. + */ + struct vmmdev_guest_info *req1 = NULL; + struct vmmdev_guest_info2 *req2 = NULL; + int rc, ret = -ENOMEM; + + req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO); + req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2); + if (!req1 || !req2) + goto out_free; + + req1->interface_version = VMMDEV_VERSION; + req1->os_type = VMMDEV_OSTYPE_LINUX26; +#if __BITS_PER_LONG == 64 + req1->os_type |= VMMDEV_OSTYPE_X64; +#endif + + req2->additions_major = VBG_VERSION_MAJOR; + req2->additions_minor = VBG_VERSION_MINOR; + req2->additions_build = VBG_VERSION_BUILD; + req2->additions_revision = VBG_SVN_REV; + /* (no features defined yet) */ + req2->additions_features = 0; + strlcpy(req2->name, VBG_VERSION_STRING, + sizeof(req2->name)); + + /* + * There are two protocols here: + * 1. INFO2 + INFO1. Supported by >=3.2.51. + * 2. INFO1 and optionally INFO2. The old protocol. + * + * We try protocol 2 first. It will fail with VERR_NOT_SUPPORTED + * if not supported by the VMMDev (message ordering requirement). + */ + rc = vbg_req_perform(gdev, req2); + if (rc >= 0) { + rc = vbg_req_perform(gdev, req1); + } else if (rc == VERR_NOT_SUPPORTED || rc == VERR_NOT_IMPLEMENTED) { + rc = vbg_req_perform(gdev, req1); + if (rc >= 0) { + rc = vbg_req_perform(gdev, req2); + if (rc == VERR_NOT_IMPLEMENTED) + rc = VINF_SUCCESS; + } + } + ret = vbg_status_code_to_errno(rc); + +out_free: + kfree(req2); + kfree(req1); + return ret; +} + +/** + * Report the guest driver status to the host. + * Return: 0 or negative errno value. + * @gdev: The Guest extension device. + * @active: Flag whether the driver is now active or not. + */ +static int vbg_report_driver_status(struct vbg_dev *gdev, bool active) +{ + struct vmmdev_guest_status *req; + int rc; + + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS); + if (!req) + return -ENOMEM; + + req->facility = VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER; + if (active) + req->status = VBOXGUEST_FACILITY_STATUS_ACTIVE; + else + req->status = VBOXGUEST_FACILITY_STATUS_INACTIVE; + req->flags = 0; + + rc = vbg_req_perform(gdev, req); + if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */ + rc = VINF_SUCCESS; + + kfree(req); + + return vbg_status_code_to_errno(rc); +} + +/** + * Inflate the balloon by one chunk. The caller owns the balloon mutex. + * Return: 0 or negative errno value. + * @gdev: The Guest extension device. + * @chunk_idx: Index of the chunk. + */ +static int vbg_balloon_inflate(struct vbg_dev *gdev, u32 chunk_idx) +{ + struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req; + struct page **pages; + int i, rc, ret; + + pages = kmalloc(sizeof(*pages) * VMMDEV_MEMORY_BALLOON_CHUNK_PAGES, + GFP_KERNEL | __GFP_NOWARN); + if (!pages) + return -ENOMEM; + + req->header.size = sizeof(*req); + req->inflate = true; + req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; + + for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) { + pages[i] = alloc_page(GFP_KERNEL | __GFP_NOWARN); + if (!pages[i]) { + ret = -ENOMEM; + goto out_error; + } + + req->phys_page[i] = page_to_phys(pages[i]); + } + + rc = vbg_req_perform(gdev, req); + if (rc < 0) { + vbg_err("%s error, rc: %d\n", __func__, rc); + ret = vbg_status_code_to_errno(rc); + goto out_error; + } + + gdev->mem_balloon.pages[chunk_idx] = pages; + + return 0; + +out_error: + while (--i >= 0) + __free_page(pages[i]); + kfree(pages); + + return ret; +} + +/** + * Deflate the balloon by one chunk. The caller owns the balloon mutex. + * Return: 0 or negative errno value. + * @gdev: The Guest extension device. + * @chunk_idx: Index of the chunk. + */ +static int vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx) +{ + struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req; + struct page **pages = gdev->mem_balloon.pages[chunk_idx]; + int i, rc; + + req->header.size = sizeof(*req); + req->inflate = false; + req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; + + for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) + req->phys_page[i] = page_to_phys(pages[i]); + + rc = vbg_req_perform(gdev, req); + if (rc < 0) { + vbg_err("%s error, rc: %d\n", __func__, rc); + return vbg_status_code_to_errno(rc); + } + + for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) + __free_page(pages[i]); + kfree(pages); + gdev->mem_balloon.pages[chunk_idx] = NULL; + + return 0; +} + +/** + * Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size + * the host wants the balloon to be and adjust accordingly. + */ +static void vbg_balloon_work(struct work_struct *work) +{ + struct vbg_dev *gdev = + container_of(work, struct vbg_dev, mem_balloon.work); + struct vmmdev_memballoon_info *req = gdev->mem_balloon.get_req; + u32 i, chunks; + int rc, ret; + + /* + * Setting this bit means that we request the value from the host and + * change the guest memory balloon according to the returned value. + */ + req->event_ack = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST; + rc = vbg_req_perform(gdev, req); + if (rc < 0) { + vbg_err("%s error, rc: %d)\n", __func__, rc); + return; + } + + /* + * The host always returns the same maximum amount of chunks, so + * we do this once. + */ + if (!gdev->mem_balloon.max_chunks) { + gdev->mem_balloon.pages = + devm_kcalloc(gdev->dev, req->phys_mem_chunks, + sizeof(struct page **), GFP_KERNEL); + if (!gdev->mem_balloon.pages) + return; + + gdev->mem_balloon.max_chunks = req->phys_mem_chunks; + } + + chunks = req->balloon_chunks; + if (chunks > gdev->mem_balloon.max_chunks) { + vbg_err("%s: illegal balloon size %u (max=%u)\n", + __func__, chunks, gdev->mem_balloon.max_chunks); + return; + } + + if (chunks > gdev->mem_balloon.chunks) { + /* inflate */ + for (i = gdev->mem_balloon.chunks; i < chunks; i++) { + ret = vbg_balloon_inflate(gdev, i); + if (ret < 0) + return; + + gdev->mem_balloon.chunks++; + } + } else { + /* deflate */ + for (i = gdev->mem_balloon.chunks; i-- > chunks;) { + ret = vbg_balloon_deflate(gdev, i); + if (ret < 0) + return; + + gdev->mem_balloon.chunks--; + } + } +} + +/** + * Callback for heartbeat timer. + */ +static void vbg_heartbeat_timer(struct timer_list *t) +{ + struct vbg_dev *gdev = from_timer(gdev, t, heartbeat_timer); + + vbg_req_perform(gdev, gdev->guest_heartbeat_req); + mod_timer(&gdev->heartbeat_timer, + msecs_to_jiffies(gdev->heartbeat_interval_ms)); +} + +/** + * Configure the host to check guest's heartbeat + * and get heartbeat interval from the host. + * Return: 0 or negative errno value. + * @gdev: The Guest extension device. + * @enabled: Set true to enable guest heartbeat checks on host. + */ +static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled) +{ + struct vmmdev_heartbeat *req; + int rc; + + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE); + if (!req) + return -ENOMEM; + + req->enabled = enabled; + req->interval_ns = 0; + rc = vbg_req_perform(gdev, req); + do_div(req->interval_ns, 1000000); /* ns -> ms */ + gdev->heartbeat_interval_ms = req->interval_ns; + kfree(req); + + return vbg_status_code_to_errno(rc); +} + +/** + * Initializes the heartbeat timer. This feature may be disabled by the host. + * Return: 0 or negative errno value. + * @gdev: The Guest extension device. + */ +static int vbg_heartbeat_init(struct vbg_dev *gdev) +{ + int ret; + + /* Make sure that heartbeat checking is disabled if we fail. */ + ret = vbg_heartbeat_host_config(gdev, false); + if (ret < 0) + return ret; + + ret = vbg_heartbeat_host_config(gdev, true); + if (ret < 0) + return ret; + + /* + * Preallocate the request to use it from the timer callback because: + * 1) on Windows vbg_req_alloc must be called at IRQL <= APC_LEVEL + * and the timer callback runs at DISPATCH_LEVEL; + * 2) avoid repeated allocations. + */ + gdev->guest_heartbeat_req = vbg_req_alloc( + sizeof(*gdev->guest_heartbeat_req), + VMMDEVREQ_GUEST_HEARTBEAT); + if (!gdev->guest_heartbeat_req) + return -ENOMEM; + + vbg_info("%s: Setting up heartbeat to trigger every %d milliseconds\n", + __func__, gdev->heartbeat_interval_ms); + mod_timer(&gdev->heartbeat_timer, 0); + + return 0; +} + +/** + * Cleanup hearbeat code, stop HB timer and disable host heartbeat checking. + * @gdev: The Guest extension device. + */ +static void vbg_heartbeat_exit(struct vbg_dev *gdev) +{ + del_timer_sync(&gdev->heartbeat_timer); + vbg_heartbeat_host_config(gdev, false); + kfree(gdev->guest_heartbeat_req); + +} + +/** + * Applies a change to the bit usage tracker. + * Return: true if the mask changed, false if not. + * @tracker: The bit usage tracker. + * @changed: The bits to change. + * @previous: The previous value of the bits. + */ +static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker *tracker, + u32 changed, u32 previous) +{ + bool global_change = false; + + while (changed) { + u32 bit = ffs(changed) - 1; + u32 bitmask = BIT(bit); + + if (bitmask & previous) { + tracker->per_bit_usage[bit] -= 1; + if (tracker->per_bit_usage[bit] == 0) { + global_change = true; + tracker->mask &= ~bitmask; + } + } else { + tracker->per_bit_usage[bit] += 1; + if (tracker->per_bit_usage[bit] == 1) { + global_change = true; + tracker->mask |= bitmask; + } + } + + changed &= ~bitmask; + } + + return global_change; +} + +/** + * Init and termination worker for resetting the (host) event filter on the host + * Return: 0 or negative errno value. + * @gdev: The Guest extension device. + * @fixed_events: Fixed events (init time). + */ +static int vbg_reset_host_event_filter(struct vbg_dev *gdev, + u32 fixed_events) +{ + struct vmmdev_mask *req; + int rc; + + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK); + if (!req) + return -ENOMEM; + + req->not_mask = U32_MAX & ~fixed_events; + req->or_mask = fixed_events; + rc = vbg_req_perform(gdev, req); + if (rc < 0) + vbg_err("%s error, rc: %d\n", __func__, rc); + + kfree(req); + return vbg_status_code_to_errno(rc); +} + +/** + * Changes the event filter mask for the given session. + * + * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to + * do session cleanup. Takes the session spinlock. + * + * Return: 0 or negative errno value. + * @gdev: The Guest extension device. + * @session: The session. + * @or_mask: The events to add. + * @not_mask: The events to remove. + * @session_termination: Set if we're called by the session cleanup code. + * This tweaks the error handling so we perform + * proper session cleanup even if the host + * misbehaves. + */ +static int vbg_set_session_event_filter(struct vbg_dev *gdev, + struct vbg_session *session, + u32 or_mask, u32 not_mask, + bool session_termination) +{ + struct vmmdev_mask *req; + u32 changed, previous; + int rc, ret = 0; + + /* Allocate a request buffer before taking the spinlock */ + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK); + if (!req) { + if (!session_termination) + return -ENOMEM; + /* Ignore allocation failure, we must do session cleanup. */ + } + + mutex_lock(&gdev->session_mutex); + + /* Apply the changes to the session mask. */ + previous = session->event_filter; + session->event_filter |= or_mask; + session->event_filter &= ~not_mask; + + /* If anything actually changed, update the global usage counters. */ + changed = previous ^ session->event_filter; + if (!changed) + goto out; + + vbg_track_bit_usage(&gdev->event_filter_tracker, changed, previous); + or_mask = gdev->fixed_events | gdev->event_filter_tracker.mask; + + if (gdev->event_filter_host == or_mask || !req) + goto out; + + gdev->event_filter_host = or_mask; + req->or_mask = or_mask; + req->not_mask = ~or_mask; + rc = vbg_req_perform(gdev, req); + if (rc < 0) { + ret = vbg_status_code_to_errno(rc); + + /* Failed, roll back (unless it's session termination time). */ + gdev->event_filter_host = U32_MAX; + if (session_termination) + goto out; + + vbg_track_bit_usage(&gdev->event_filter_tracker, changed, + session->event_filter); + session->event_filter = previous; + } + +out: + mutex_unlock(&gdev->session_mutex); + kfree(req); + + return ret; +} + +/** + * Init and termination worker for set guest capabilities to zero on the host. + * Return: 0 or negative errno value. + * @gdev: The Guest extension device. + */ +static int vbg_reset_host_capabilities(struct vbg_dev *gdev) +{ + struct vmmdev_mask *req; + int rc; + + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES); + if (!req) + return -ENOMEM; + + req->not_mask = U32_MAX; + req->or_mask = 0; + rc = vbg_req_perform(gdev, req); + if (rc < 0) + vbg_err("%s error, rc: %d\n", __func__, rc); + + kfree(req); + return vbg_status_code_to_errno(rc); +} + +/** + * Sets the guest capabilities for a session. Takes the session spinlock. + * Return: 0 or negative errno value. + * @gdev: The Guest extension device. + * @session: The session. + * @or_mask: The capabilities to add. + * @not_mask: The capabilities to remove. + * @session_termination: Set if we're called by the session cleanup code. + * This tweaks the error handling so we perform + * proper session cleanup even if the host + * misbehaves. + */ +static int vbg_set_session_capabilities(struct vbg_dev *gdev, + struct vbg_session *session, + u32 or_mask, u32 not_mask, + bool session_termination) +{ + struct vmmdev_mask *req; + u32 changed, previous; + int rc, ret = 0; + + /* Allocate a request buffer before taking the spinlock */ + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES); + if (!req) { + if (!session_termination) + return -ENOMEM; + /* Ignore allocation failure, we must do session cleanup. */ + } + + mutex_lock(&gdev->session_mutex); + + /* Apply the changes to the session mask. */ + previous = session->guest_caps; + session->guest_caps |= or_mask; + session->guest_caps &= ~not_mask; + + /* If anything actually changed, update the global usage counters. */ + changed = previous ^ session->guest_caps; + if (!changed) + goto out; + + vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, previous); + or_mask = gdev->guest_caps_tracker.mask; + + if (gdev->guest_caps_host == or_mask || !req) + goto out; + + gdev->guest_caps_host = or_mask; + req->or_mask = or_mask; + req->not_mask = ~or_mask; + rc = vbg_req_perform(gdev, req); + if (rc < 0) { + ret = vbg_status_code_to_errno(rc); + + /* Failed, roll back (unless it's session termination time). */ + gdev->guest_caps_host = U32_MAX; + if (session_termination) + goto out; + + vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, + session->guest_caps); + session->guest_caps = previous; + } + +out: + mutex_unlock(&gdev->session_mutex); + kfree(req); + + return ret; +} + +/** + * vbg_query_host_version get the host feature mask and version information. + * Return: 0 or negative errno value. + * @gdev: The Guest extension device. + */ +static int vbg_query_host_version(struct vbg_dev *gdev) +{ + struct vmmdev_host_version *req; + int rc, ret; + + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION); + if (!req) + return -ENOMEM; + + rc = vbg_req_perform(gdev, req); + ret = vbg_status_code_to_errno(rc); + if (ret) + goto out; + + snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u", + req->major, req->minor, req->build, req->revision); + gdev->host_features = req->features; + + vbg_info("vboxguest: host-version: %s %#x\n", gdev->host_version, + gdev->host_features); + + if (!(req->features & VMMDEV_HVF_HGCM_PHYS_PAGE_LIST)) { + vbg_err("vboxguest: Error host too old (does not support page-lists)\n"); + ret = -ENODEV; + } + +out: + kfree(req); + return ret; +} + +/** + * Initializes the VBoxGuest device extension when the + * device driver is loaded. + * + * The native code locates the VMMDev on the PCI bus and retrieve + * the MMIO and I/O port ranges, this function will take care of + * mapping the MMIO memory (if present). Upon successful return + * the native code should set up the interrupt handler. + * + * Return: 0 or negative errno value. + * + * @gdev: The Guest extension device. + * @fixed_events: Events that will be enabled upon init and no client + * will ever be allowed to mask. + */ +int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events) +{ + int ret = -ENOMEM; + + gdev->fixed_events = fixed_events | VMMDEV_EVENT_HGCM; + gdev->event_filter_host = U32_MAX; /* forces a report */ + gdev->guest_caps_host = U32_MAX; /* forces a report */ + + init_waitqueue_head(&gdev->event_wq); + init_waitqueue_head(&gdev->hgcm_wq); + spin_lock_init(&gdev->event_spinlock); + mutex_init(&gdev->session_mutex); + mutex_init(&gdev->cancel_req_mutex); + timer_setup(&gdev->heartbeat_timer, vbg_heartbeat_timer, 0); + INIT_WORK(&gdev->mem_balloon.work, vbg_balloon_work); + + gdev->mem_balloon.get_req = + vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req), + VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ); + gdev->mem_balloon.change_req = + vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req), + VMMDEVREQ_CHANGE_MEMBALLOON); + gdev->cancel_req = + vbg_req_alloc(sizeof(*(gdev->cancel_req)), + VMMDEVREQ_HGCM_CANCEL2); + gdev->ack_events_req = + vbg_req_alloc(sizeof(*gdev->ack_events_req), + VMMDEVREQ_ACKNOWLEDGE_EVENTS); + gdev->mouse_status_req = + vbg_req_alloc(sizeof(*gdev->mouse_status_req), + VMMDEVREQ_GET_MOUSE_STATUS); + + if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req || + !gdev->cancel_req || !gdev->ack_events_req || + !gdev->mouse_status_req) + goto err_free_reqs; + + ret = vbg_query_host_version(gdev); + if (ret) + goto err_free_reqs; + + ret = vbg_report_guest_info(gdev); + if (ret) { + vbg_err("vboxguest: vbg_report_guest_info error: %d\n", ret); + goto err_free_reqs; + } + + ret = vbg_reset_host_event_filter(gdev, gdev->fixed_events); + if (ret) { + vbg_err("vboxguest: Error setting fixed event filter: %d\n", + ret); + goto err_free_reqs; + } + + ret = vbg_reset_host_capabilities(gdev); + if (ret) { + vbg_err("vboxguest: Error clearing guest capabilities: %d\n", + ret); + goto err_free_reqs; + } + + ret = vbg_core_set_mouse_status(gdev, 0); + if (ret) { + vbg_err("vboxguest: Error clearing mouse status: %d\n", ret); + goto err_free_reqs; + } + + /* These may fail without requiring the driver init to fail. */ + vbg_guest_mappings_init(gdev); + vbg_heartbeat_init(gdev); + + /* All Done! */ + ret = vbg_report_driver_status(gdev, true); + if (ret < 0) + vbg_err("vboxguest: Error reporting driver status: %d\n", ret); + + return 0; + +err_free_reqs: + kfree(gdev->mouse_status_req); + kfree(gdev->ack_events_req); + kfree(gdev->cancel_req); + kfree(gdev->mem_balloon.change_req); + kfree(gdev->mem_balloon.get_req); + return ret; +} + +/** + * Call this on exit to clean-up vboxguest-core managed resources. + * + * The native code should call this before the driver is loaded, + * but don't call this on shutdown. + * @gdev: The Guest extension device. + */ +void vbg_core_exit(struct vbg_dev *gdev) +{ + vbg_heartbeat_exit(gdev); + vbg_guest_mappings_exit(gdev); + + /* Clear the host flags (mouse status etc). */ + vbg_reset_host_event_filter(gdev, 0); + vbg_reset_host_capabilities(gdev); + vbg_core_set_mouse_status(gdev, 0); + + kfree(gdev->mouse_status_req); + kfree(gdev->ack_events_req); + kfree(gdev->cancel_req); + kfree(gdev->mem_balloon.change_req); + kfree(gdev->mem_balloon.get_req); +} + +/** + * Creates a VBoxGuest user session. + * + * vboxguest_linux.c calls this when userspace opens the char-device. + * Return: A pointer to the new session or an ERR_PTR on error. + * @gdev: The Guest extension device. + * @user: Set if this is a session for the vboxuser device. + */ +struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user) +{ + struct vbg_session *session; + + session = kzalloc(sizeof(*session), GFP_KERNEL); + if (!session) + return ERR_PTR(-ENOMEM); + + session->gdev = gdev; + session->user_session = user; + + return session; +} + +/** + * Closes a VBoxGuest session. + * @session: The session to close (and free). + */ +void vbg_core_close_session(struct vbg_session *session) +{ + struct vbg_dev *gdev = session->gdev; + int i, rc; + + vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true); + vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true); + + for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) { + if (!session->hgcm_client_ids[i]) + continue; + + vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i], &rc); + } + + kfree(session); +} + +static int vbg_ioctl_chk(struct vbg_ioctl_hdr *hdr, size_t in_size, + size_t out_size) +{ + if (hdr->size_in != (sizeof(*hdr) + in_size) || + hdr->size_out != (sizeof(*hdr) + out_size)) + return -EINVAL; + + return 0; +} + +static int vbg_ioctl_driver_version_info( + struct vbg_ioctl_driver_version_info *info) +{ + const u16 vbg_maj_version = VBG_IOC_VERSION >> 16; + u16 min_maj_version, req_maj_version; + + if (vbg_ioctl_chk(&info->hdr, sizeof(info->u.in), sizeof(info->u.out))) + return -EINVAL; + + req_maj_version = info->u.in.req_version >> 16; + min_maj_version = info->u.in.min_version >> 16; + + if (info->u.in.min_version > info->u.in.req_version || + min_maj_version != req_maj_version) + return -EINVAL; + + if (info->u.in.min_version <= VBG_IOC_VERSION && + min_maj_version == vbg_maj_version) { + info->u.out.session_version = VBG_IOC_VERSION; + } else { + info->u.out.session_version = U32_MAX; + info->hdr.rc = VERR_VERSION_MISMATCH; + } + + info->u.out.driver_version = VBG_IOC_VERSION; + info->u.out.driver_revision = 0; + info->u.out.reserved1 = 0; + info->u.out.reserved2 = 0; + + return 0; +} + +static bool vbg_wait_event_cond(struct vbg_dev *gdev, + struct vbg_session *session, + u32 event_mask) +{ + unsigned long flags; + bool wakeup; + u32 events; + + spin_lock_irqsave(&gdev->event_spinlock, flags); + + events = gdev->pending_events & event_mask; + wakeup = events || session->cancel_waiters; + + spin_unlock_irqrestore(&gdev->event_spinlock, flags); + + return wakeup; +} + +/* Must be called with the event_lock held */ +static u32 vbg_consume_events_locked(struct vbg_dev *gdev, + struct vbg_session *session, + u32 event_mask) +{ + u32 events = gdev->pending_events & event_mask; + + gdev->pending_events &= ~events; + return events; +} + +static int vbg_ioctl_wait_for_events(struct vbg_dev *gdev, + struct vbg_session *session, + struct vbg_ioctl_wait_for_events *wait) +{ + u32 timeout_ms = wait->u.in.timeout_ms; + u32 event_mask = wait->u.in.events; + unsigned long flags; + long timeout; + int ret = 0; + + if (vbg_ioctl_chk(&wait->hdr, sizeof(wait->u.in), sizeof(wait->u.out))) + return -EINVAL; + + if (timeout_ms == U32_MAX) + timeout = MAX_SCHEDULE_TIMEOUT; + else + timeout = msecs_to_jiffies(timeout_ms); + + wait->u.out.events = 0; + do { + timeout = wait_event_interruptible_timeout( + gdev->event_wq, + vbg_wait_event_cond(gdev, session, event_mask), + timeout); + + spin_lock_irqsave(&gdev->event_spinlock, flags); + + if (timeout < 0 || session->cancel_waiters) { + ret = -EINTR; + } else if (timeout == 0) { + ret = -ETIMEDOUT; + } else { + wait->u.out.events = + vbg_consume_events_locked(gdev, session, event_mask); + } + + spin_unlock_irqrestore(&gdev->event_spinlock, flags); + + /* + * Someone else may have consumed the event(s) first, in + * which case we go back to waiting. + */ + } while (ret == 0 && wait->u.out.events == 0); + + return ret; +} + +static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev *gdev, + struct vbg_session *session, + struct vbg_ioctl_hdr *hdr) +{ + unsigned long flags; + + if (hdr->size_in != sizeof(*hdr) || hdr->size_out != sizeof(*hdr)) + return -EINVAL; + + spin_lock_irqsave(&gdev->event_spinlock, flags); + session->cancel_waiters = true; + spin_unlock_irqrestore(&gdev->event_spinlock, flags); + + wake_up(&gdev->event_wq); + + return 0; +} + +/** + * Checks if the VMM request is allowed in the context of the given session. + * Return: 0 or negative errno value. + * @gdev: The Guest extension device. + * @session: The calling session. + * @req: The request. + */ +static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session, + const struct vmmdev_request_header *req) +{ + const struct vmmdev_guest_status *guest_status; + bool trusted_apps_only; + + switch (req->request_type) { + /* Trusted users apps only. */ + case VMMDEVREQ_QUERY_CREDENTIALS: + case VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT: + case VMMDEVREQ_REGISTER_SHARED_MODULE: + case VMMDEVREQ_UNREGISTER_SHARED_MODULE: + case VMMDEVREQ_WRITE_COREDUMP: + case VMMDEVREQ_GET_CPU_HOTPLUG_REQ: + case VMMDEVREQ_SET_CPU_HOTPLUG_STATUS: + case VMMDEVREQ_CHECK_SHARED_MODULES: + case VMMDEVREQ_GET_PAGE_SHARING_STATUS: + case VMMDEVREQ_DEBUG_IS_PAGE_SHARED: + case VMMDEVREQ_REPORT_GUEST_STATS: + case VMMDEVREQ_REPORT_GUEST_USER_STATE: + case VMMDEVREQ_GET_STATISTICS_CHANGE_REQ: + trusted_apps_only = true; + break; + + /* Anyone. */ + case VMMDEVREQ_GET_MOUSE_STATUS: + case VMMDEVREQ_SET_MOUSE_STATUS: + case VMMDEVREQ_SET_POINTER_SHAPE: + case VMMDEVREQ_GET_HOST_VERSION: + case VMMDEVREQ_IDLE: + case VMMDEVREQ_GET_HOST_TIME: + case VMMDEVREQ_SET_POWER_STATUS: + case VMMDEVREQ_ACKNOWLEDGE_EVENTS: + case VMMDEVREQ_CTL_GUEST_FILTER_MASK: + case VMMDEVREQ_REPORT_GUEST_STATUS: + case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ: + case VMMDEVREQ_VIDEMODE_SUPPORTED: + case VMMDEVREQ_GET_HEIGHT_REDUCTION: + case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2: + case VMMDEVREQ_VIDEMODE_SUPPORTED2: + case VMMDEVREQ_VIDEO_ACCEL_ENABLE: + case VMMDEVREQ_VIDEO_ACCEL_FLUSH: + case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION: + case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX: + case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ: + case VMMDEVREQ_GET_VRDPCHANGE_REQ: + case VMMDEVREQ_LOG_STRING: + case VMMDEVREQ_GET_SESSION_ID: + trusted_apps_only = false; + break; + + /* Depends on the request parameters... */ + case VMMDEVREQ_REPORT_GUEST_CAPABILITIES: + guest_status = (const struct vmmdev_guest_status *)req; + switch (guest_status->facility) { + case VBOXGUEST_FACILITY_TYPE_ALL: + case VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER: + vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n", + guest_status->facility); + return -EPERM; + case VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE: + trusted_apps_only = true; + break; + case VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT: + case VBOXGUEST_FACILITY_TYPE_SEAMLESS: + case VBOXGUEST_FACILITY_TYPE_GRAPHICS: + default: + trusted_apps_only = false; + break; + } + break; + + /* Anything else is not allowed. */ + default: + vbg_err("Denying userspace vmm call type %#08x\n", + req->request_type); + return -EPERM; + } + + if (trusted_apps_only && session->user_session) { + vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n", + req->request_type); + return -EPERM; + } + + return 0; +} + +static int vbg_ioctl_vmmrequest(struct vbg_dev *gdev, + struct vbg_session *session, void *data) +{ + struct vbg_ioctl_hdr *hdr = data; + int ret; + + if (hdr->size_in != hdr->size_out) + return -EINVAL; + + if (hdr->size_in > VMMDEV_MAX_VMMDEVREQ_SIZE) + return -E2BIG; + + if (hdr->type == VBG_IOCTL_HDR_TYPE_DEFAULT) + return -EINVAL; + + ret = vbg_req_allowed(gdev, session, data); + if (ret < 0) + return ret; + + vbg_req_perform(gdev, data); + WARN_ON(hdr->rc == VINF_HGCM_ASYNC_EXECUTE); + + return 0; +} + +static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev, + struct vbg_session *session, + struct vbg_ioctl_hgcm_connect *conn) +{ + u32 client_id; + int i, ret; + + if (vbg_ioctl_chk(&conn->hdr, sizeof(conn->u.in), sizeof(conn->u.out))) + return -EINVAL; + + /* Find a free place in the sessions clients array and claim it */ + mutex_lock(&gdev->session_mutex); + for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) { + if (!session->hgcm_client_ids[i]) { + session->hgcm_client_ids[i] = U32_MAX; + break; + } + } + mutex_unlock(&gdev->session_mutex); + + if (i >= ARRAY_SIZE(session->hgcm_client_ids)) + return -EMFILE; + + ret = vbg_hgcm_connect(gdev, &conn->u.in.loc, &client_id, + &conn->hdr.rc); + + mutex_lock(&gdev->session_mutex); + if (ret == 0 && conn->hdr.rc >= 0) { + conn->u.out.client_id = client_id; + session->hgcm_client_ids[i] = client_id; + } else { + conn->u.out.client_id = 0; + session->hgcm_client_ids[i] = 0; + } + mutex_unlock(&gdev->session_mutex); + + return ret; +} + +static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev, + struct vbg_session *session, + struct vbg_ioctl_hgcm_disconnect *disconn) +{ + u32 client_id; + int i, ret; + + if (vbg_ioctl_chk(&disconn->hdr, sizeof(disconn->u.in), 0)) + return -EINVAL; + + client_id = disconn->u.in.client_id; + if (client_id == 0 || client_id == U32_MAX) + return -EINVAL; + + mutex_lock(&gdev->session_mutex); + for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) { + if (session->hgcm_client_ids[i] == client_id) { + session->hgcm_client_ids[i] = U32_MAX; + break; + } + } + mutex_unlock(&gdev->session_mutex); + + if (i >= ARRAY_SIZE(session->hgcm_client_ids)) + return -EINVAL; + + ret = vbg_hgcm_disconnect(gdev, client_id, &disconn->hdr.rc); + + mutex_lock(&gdev->session_mutex); + if (ret == 0 && disconn->hdr.rc >= 0) + session->hgcm_client_ids[i] = 0; + else + session->hgcm_client_ids[i] = client_id; + mutex_unlock(&gdev->session_mutex); + + return ret; +} + +static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev, + struct vbg_session *session, bool f32bit, + struct vbg_ioctl_hgcm_call *call) +{ + size_t actual_size; + u32 client_id; + int i, ret; + + if (call->hdr.size_in < sizeof(*call)) + return -EINVAL; + + if (call->hdr.size_in != call->hdr.size_out) + return -EINVAL; + + if (call->parm_count > VMMDEV_HGCM_MAX_PARMS) + return -E2BIG; + + client_id = call->client_id; + if (client_id == 0 || client_id == U32_MAX) + return -EINVAL; + + actual_size = sizeof(*call); + if (f32bit) + actual_size += call->parm_count * + sizeof(struct vmmdev_hgcm_function_parameter32); + else + actual_size += call->parm_count * + sizeof(struct vmmdev_hgcm_function_parameter); + if (call->hdr.size_in < actual_size) { + vbg_debug("VBG_IOCTL_HGCM_CALL: hdr.size_in %d required size is %zd\n", + call->hdr.size_in, actual_size); + return -EINVAL; + } + call->hdr.size_out = actual_size; + + /* + * Validate the client id. + */ + mutex_lock(&gdev->session_mutex); + for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) + if (session->hgcm_client_ids[i] == client_id) + break; + mutex_unlock(&gdev->session_mutex); + if (i >= ARRAY_SIZE(session->hgcm_client_ids)) { + vbg_debug("VBG_IOCTL_HGCM_CALL: INVALID handle. u32Client=%#08x\n", + client_id); + return -EINVAL; + } + + if (f32bit) + ret = vbg_hgcm_call32(gdev, client_id, + call->function, call->timeout_ms, + VBG_IOCTL_HGCM_CALL_PARMS32(call), + call->parm_count, &call->hdr.rc); + else + ret = vbg_hgcm_call(gdev, client_id, + call->function, call->timeout_ms, + VBG_IOCTL_HGCM_CALL_PARMS(call), + call->parm_count, &call->hdr.rc); + + if (ret == -E2BIG) { + /* E2BIG needs to be reported through the hdr.rc field. */ + call->hdr.rc = VERR_OUT_OF_RANGE; + ret = 0; + } + + if (ret && ret != -EINTR && ret != -ETIMEDOUT) + vbg_err("VBG_IOCTL_HGCM_CALL error: %d\n", ret); + + return ret; +} + +static int vbg_ioctl_log(struct vbg_ioctl_log *log) +{ + if (log->hdr.size_out != sizeof(log->hdr)) + return -EINVAL; + + vbg_info("%.*s", (int)(log->hdr.size_in - sizeof(log->hdr)), + log->u.in.msg); + + return 0; +} + +static int vbg_ioctl_change_filter_mask(struct vbg_dev *gdev, + struct vbg_session *session, + struct vbg_ioctl_change_filter *filter) +{ + u32 or_mask, not_mask; + + if (vbg_ioctl_chk(&filter->hdr, sizeof(filter->u.in), 0)) + return -EINVAL; + + or_mask = filter->u.in.or_mask; + not_mask = filter->u.in.not_mask; + + if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK) + return -EINVAL; + + return vbg_set_session_event_filter(gdev, session, or_mask, not_mask, + false); +} + +static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev, + struct vbg_session *session, struct vbg_ioctl_set_guest_caps *caps) +{ + u32 or_mask, not_mask; + int ret; + + if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), sizeof(caps->u.out))) + return -EINVAL; + + or_mask = caps->u.in.or_mask; + not_mask = caps->u.in.not_mask; + + if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK) + return -EINVAL; + + ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask, + false); + if (ret) + return ret; + + caps->u.out.session_caps = session->guest_caps; + caps->u.out.global_caps = gdev->guest_caps_host; + + return 0; +} + +static int vbg_ioctl_check_balloon(struct vbg_dev *gdev, + struct vbg_ioctl_check_balloon *balloon_info) +{ + if (vbg_ioctl_chk(&balloon_info->hdr, 0, sizeof(balloon_info->u.out))) + return -EINVAL; + + balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks; + /* + * Under Linux we handle VMMDEV_EVENT_BALLOON_CHANGE_REQUEST + * events entirely in the kernel, see vbg_core_isr(). + */ + balloon_info->u.out.handle_in_r3 = false; + + return 0; +} + +static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev, + struct vbg_ioctl_write_coredump *dump) +{ + struct vmmdev_write_core_dump *req; + + if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0)) + return -EINVAL; + + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP); + if (!req) + return -ENOMEM; + + req->flags = dump->u.in.flags; + dump->hdr.rc = vbg_req_perform(gdev, req); + + kfree(req); + return 0; +} + +/** + * Common IOCtl for user to kernel communication. + * Return: 0 or negative errno value. + * @session: The client session. + * @req: The requested function. + * @data: The i/o data buffer, minimum size sizeof(struct vbg_ioctl_hdr). + */ +int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data) +{ + unsigned int req_no_size = req & ~IOCSIZE_MASK; + struct vbg_dev *gdev = session->gdev; + struct vbg_ioctl_hdr *hdr = data; + bool f32bit = false; + + hdr->rc = VINF_SUCCESS; + if (!hdr->size_out) + hdr->size_out = hdr->size_in; + + /* + * hdr->version and hdr->size_in / hdr->size_out minimum size are + * already checked by vbg_misc_device_ioctl(). + */ + + /* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */ + if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) || + req == VBG_IOCTL_VMMDEV_REQUEST_BIG) + return vbg_ioctl_vmmrequest(gdev, session, data); + + if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT) + return -EINVAL; + + /* Fixed size requests. */ + switch (req) { + case VBG_IOCTL_DRIVER_VERSION_INFO: + return vbg_ioctl_driver_version_info(data); + case VBG_IOCTL_HGCM_CONNECT: + return vbg_ioctl_hgcm_connect(gdev, session, data); + case VBG_IOCTL_HGCM_DISCONNECT: + return vbg_ioctl_hgcm_disconnect(gdev, session, data); + case VBG_IOCTL_WAIT_FOR_EVENTS: + return vbg_ioctl_wait_for_events(gdev, session, data); + case VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS: + return vbg_ioctl_interrupt_all_wait_events(gdev, session, data); + case VBG_IOCTL_CHANGE_FILTER_MASK: + return vbg_ioctl_change_filter_mask(gdev, session, data); + case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES: + return vbg_ioctl_change_guest_capabilities(gdev, session, data); + case VBG_IOCTL_CHECK_BALLOON: + return vbg_ioctl_check_balloon(gdev, data); + case VBG_IOCTL_WRITE_CORE_DUMP: + return vbg_ioctl_write_core_dump(gdev, data); + } + + /* Variable sized requests. */ + switch (req_no_size) { +#ifdef CONFIG_COMPAT + case VBG_IOCTL_HGCM_CALL_32(0): + f32bit = true; + /* Fall through */ +#endif + case VBG_IOCTL_HGCM_CALL(0): + return vbg_ioctl_hgcm_call(gdev, session, f32bit, data); + case VBG_IOCTL_LOG(0): + return vbg_ioctl_log(data); + } + + vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req); + return -ENOTTY; +} + +/** + * Report guest supported mouse-features to the host. + * + * Return: 0 or negative errno value. + * @gdev: The Guest extension device. + * @features: The set of features to report to the host. + */ +int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features) +{ + struct vmmdev_mouse_status *req; + int rc; + + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS); + if (!req) + return -ENOMEM; + + req->mouse_features = features; + req->pointer_pos_x = 0; + req->pointer_pos_y = 0; + + rc = vbg_req_perform(gdev, req); + if (rc < 0) + vbg_err("%s error, rc: %d\n", __func__, rc); + + kfree(req); + return vbg_status_code_to_errno(rc); +} + +/** Core interrupt service routine. */ +irqreturn_t vbg_core_isr(int irq, void *dev_id) +{ + struct vbg_dev *gdev = dev_id; + struct vmmdev_events *req = gdev->ack_events_req; + bool mouse_position_changed = false; + unsigned long flags; + u32 events = 0; + int rc; + + if (!gdev->mmio->V.V1_04.have_events) + return IRQ_NONE; + + /* Get and acknowlegde events. */ + req->header.rc = VERR_INTERNAL_ERROR; + req->events = 0; + rc = vbg_req_perform(gdev, req); + if (rc < 0) { + vbg_err("Error performing events req, rc: %d\n", rc); + return IRQ_NONE; + } + + events = req->events; + + if (events & VMMDEV_EVENT_MOUSE_POSITION_CHANGED) { + mouse_position_changed = true; + events &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED; + } + + if (events & VMMDEV_EVENT_HGCM) { + wake_up(&gdev->hgcm_wq); + events &= ~VMMDEV_EVENT_HGCM; + } + + if (events & VMMDEV_EVENT_BALLOON_CHANGE_REQUEST) { + schedule_work(&gdev->mem_balloon.work); + events &= ~VMMDEV_EVENT_BALLOON_CHANGE_REQUEST; + } + + if (events) { + spin_lock_irqsave(&gdev->event_spinlock, flags); + gdev->pending_events |= events; + spin_unlock_irqrestore(&gdev->event_spinlock, flags); + + wake_up(&gdev->event_wq); + } + + if (mouse_position_changed) + vbg_linux_mouse_event(gdev); + + return IRQ_HANDLED; +} diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h new file mode 100644 index 000000000000..6c784bf4fa6d --- /dev/null +++ b/drivers/virt/vboxguest/vboxguest_core.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */ +/* Copyright (C) 2010-2016 Oracle Corporation */ + +#ifndef __VBOXGUEST_CORE_H__ +#define __VBOXGUEST_CORE_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "vmmdev.h" + +struct vbg_session; + +/** VBox guest memory balloon. */ +struct vbg_mem_balloon { + /** Work handling VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events */ + struct work_struct work; + /** Pre-allocated vmmdev_memballoon_info req for query */ + struct vmmdev_memballoon_info *get_req; + /** Pre-allocated vmmdev_memballoon_change req for inflate / deflate */ + struct vmmdev_memballoon_change *change_req; + /** The current number of chunks in the balloon. */ + u32 chunks; + /** The maximum number of chunks in the balloon. */ + u32 max_chunks; + /** + * Array of pointers to page arrays. A page * array is allocated for + * each chunk when inflating, and freed when the deflating. + */ + struct page ***pages; +}; + +/** + * Per bit usage tracker for a u32 mask. + * + * Used for optimal handling of guest properties and event filter. + */ +struct vbg_bit_usage_tracker { + /** Per bit usage counters. */ + u32 per_bit_usage[32]; + /** The current mask according to per_bit_usage. */ + u32 mask; +}; + +/** VBox guest device (data) extension. */ +struct vbg_dev { + struct device *dev; + /** The base of the adapter I/O ports. */ + u16 io_port; + /** Pointer to the mapping of the VMMDev adapter memory. */ + struct vmmdev_memory *mmio; + /** Host version */ + char host_version[64]; + /** Host features */ + unsigned int host_features; + /** + * Dummy page and vmap address for reserved kernel virtual-address + * space for the guest mappings, only used on hosts lacking vtx. + */ + struct page *guest_mappings_dummy_page; + void *guest_mappings; + /** Spinlock protecting pending_events. */ + spinlock_t event_spinlock; + /** Preallocated struct vmmdev_events for the IRQ handler. */ + struct vmmdev_events *ack_events_req; + /** Wait-for-event list for threads waiting for multiple events. */ + wait_queue_head_t event_wq; + /** Mask of pending events. */ + u32 pending_events; + /** Wait-for-event list for threads waiting on HGCM async completion. */ + wait_queue_head_t hgcm_wq; + /** Pre-allocated hgcm cancel2 req. for cancellation on timeout */ + struct vmmdev_hgcm_cancel2 *cancel_req; + /** Mutex protecting cancel_req accesses */ + struct mutex cancel_req_mutex; + /** Pre-allocated mouse-status request for the input-device handling. */ + struct vmmdev_mouse_status *mouse_status_req; + /** Input device for reporting abs mouse coordinates to the guest. */ + struct input_dev *input; + + /** Memory balloon information. */ + struct vbg_mem_balloon mem_balloon; + + /** Lock for session related items in vbg_dev and vbg_session */ + struct mutex session_mutex; + /** Events we won't permit anyone to filter out. */ + u32 fixed_events; + /** + * Usage counters for the host events (excludes fixed events), + * Protected by session_mutex. + */ + struct vbg_bit_usage_tracker event_filter_tracker; + /** + * The event filter last reported to the host (or UINT32_MAX). + * Protected by session_mutex. + */ + u32 event_filter_host; + + /** + * Usage counters for guest capabilities. Indexed by capability bit + * number, one count per session using a capability. + * Protected by session_mutex. + */ + struct vbg_bit_usage_tracker guest_caps_tracker; + /** + * The guest capabilities last reported to the host (or UINT32_MAX). + * Protected by session_mutex. + */ + u32 guest_caps_host; + + /** + * Heartbeat timer which fires with interval + * cNsHearbeatInterval and its handler sends + * VMMDEVREQ_GUEST_HEARTBEAT to VMMDev. + */ + struct timer_list heartbeat_timer; + /** Heartbeat timer interval in ms. */ + int heartbeat_interval_ms; + /** Preallocated VMMDEVREQ_GUEST_HEARTBEAT request. */ + struct vmmdev_request_header *guest_heartbeat_req; + + /** "vboxguest" char-device */ + struct miscdevice misc_device; + /** "vboxuser" char-device */ + struct miscdevice misc_device_user; +}; + +/** The VBoxGuest per session data. */ +struct vbg_session { + /** Pointer to the device extension. */ + struct vbg_dev *gdev; + + /** + * Array containing HGCM client IDs associated with this session. + * These will be automatically disconnected when the session is closed. + * Protected by vbg_gdev.session_mutex. + */ + u32 hgcm_client_ids[64]; + /** + * Host events requested by the session. + * An event type requested in any guest session will be added to the + * host filter. Protected by vbg_gdev.session_mutex. + */ + u32 event_filter; + /** + * Guest capabilities for this session. + * A capability claimed by any guest session will be reported to the + * host. Protected by vbg_gdev.session_mutex. + */ + u32 guest_caps; + /** Does this session belong to a root process or a user one? */ + bool user_session; + /** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */ + bool cancel_waiters; +}; + +int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events); +void vbg_core_exit(struct vbg_dev *gdev); +struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user); +void vbg_core_close_session(struct vbg_session *session); +int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data); +int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features); + +irqreturn_t vbg_core_isr(int irq, void *dev_id); + +void vbg_linux_mouse_event(struct vbg_dev *gdev); + +#endif diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c new file mode 100644 index 000000000000..d045aa51ce03 --- /dev/null +++ b/drivers/virt/vboxguest/vboxguest_linux.c @@ -0,0 +1,466 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * vboxguest linux pci driver, char-dev and input-device code, + * + * Copyright (C) 2006-2016 Oracle Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include "vboxguest_core.h" + +/** The device name. */ +#define DEVICE_NAME "vboxguest" +/** The device name for the device node open to everyone. */ +#define DEVICE_NAME_USER "vboxuser" +/** VirtualBox PCI vendor ID. */ +#define VBOX_VENDORID 0x80ee +/** VMMDev PCI card product ID. */ +#define VMMDEV_DEVICEID 0xcafe + +/** Mutex protecting the global vbg_gdev pointer used by vbg_get/put_gdev. */ +static DEFINE_MUTEX(vbg_gdev_mutex); +/** Global vbg_gdev pointer used by vbg_get/put_gdev. */ +static struct vbg_dev *vbg_gdev; + +static int vbg_misc_device_open(struct inode *inode, struct file *filp) +{ + struct vbg_session *session; + struct vbg_dev *gdev; + + /* misc_open sets filp->private_data to our misc device */ + gdev = container_of(filp->private_data, struct vbg_dev, misc_device); + + session = vbg_core_open_session(gdev, false); + if (IS_ERR(session)) + return PTR_ERR(session); + + filp->private_data = session; + return 0; +} + +static int vbg_misc_device_user_open(struct inode *inode, struct file *filp) +{ + struct vbg_session *session; + struct vbg_dev *gdev; + + /* misc_open sets filp->private_data to our misc device */ + gdev = container_of(filp->private_data, struct vbg_dev, + misc_device_user); + + session = vbg_core_open_session(gdev, false); + if (IS_ERR(session)) + return PTR_ERR(session); + + filp->private_data = session; + return 0; +} + +/** + * Close device. + * Return: 0 on success, negated errno on failure. + * @inode: Pointer to inode info structure. + * @filp: Associated file pointer. + */ +static int vbg_misc_device_close(struct inode *inode, struct file *filp) +{ + vbg_core_close_session(filp->private_data); + filp->private_data = NULL; + return 0; +} + +/** + * Device I/O Control entry point. + * Return: 0 on success, negated errno on failure. + * @filp: Associated file pointer. + * @req: The request specified to ioctl(). + * @arg: The argument specified to ioctl(). + */ +static long vbg_misc_device_ioctl(struct file *filp, unsigned int req, + unsigned long arg) +{ + struct vbg_session *session = filp->private_data; + size_t returned_size, size; + struct vbg_ioctl_hdr hdr; + int ret = 0; + void *buf; + + if (copy_from_user(&hdr, (void *)arg, sizeof(hdr))) + return -EFAULT; + + if (hdr.version != VBG_IOCTL_HDR_VERSION) + return -EINVAL; + + if (hdr.size_in < sizeof(hdr) || + (hdr.size_out && hdr.size_out < sizeof(hdr))) + return -EINVAL; + + size = max(hdr.size_in, hdr.size_out); + if (_IOC_SIZE(req) && _IOC_SIZE(req) != size) + return -EINVAL; + if (size > SZ_16M) + return -E2BIG; + + /* __GFP_DMA32 because IOCTL_VMMDEV_REQUEST passes this to the host */ + buf = kmalloc(size, GFP_KERNEL | __GFP_DMA32); + if (!buf) + return -ENOMEM; + + if (copy_from_user(buf, (void *)arg, hdr.size_in)) { + ret = -EFAULT; + goto out; + } + if (hdr.size_in < size) + memset(buf + hdr.size_in, 0, size - hdr.size_in); + + ret = vbg_core_ioctl(session, req, buf); + if (ret) + goto out; + + returned_size = ((struct vbg_ioctl_hdr *)buf)->size_out; + if (returned_size > size) { + vbg_debug("%s: too much output data %zu > %zu\n", + __func__, returned_size, size); + returned_size = size; + } + if (copy_to_user((void *)arg, buf, returned_size) != 0) + ret = -EFAULT; + +out: + kfree(buf); + + return ret; +} + +/** The file_operations structures. */ +static const struct file_operations vbg_misc_device_fops = { + .owner = THIS_MODULE, + .open = vbg_misc_device_open, + .release = vbg_misc_device_close, + .unlocked_ioctl = vbg_misc_device_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = vbg_misc_device_ioctl, +#endif +}; +static const struct file_operations vbg_misc_device_user_fops = { + .owner = THIS_MODULE, + .open = vbg_misc_device_user_open, + .release = vbg_misc_device_close, + .unlocked_ioctl = vbg_misc_device_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = vbg_misc_device_ioctl, +#endif +}; + +/** + * Called when the input device is first opened. + * + * Sets up absolute mouse reporting. + */ +static int vbg_input_open(struct input_dev *input) +{ + struct vbg_dev *gdev = input_get_drvdata(input); + u32 feat = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE | VMMDEV_MOUSE_NEW_PROTOCOL; + int ret; + + ret = vbg_core_set_mouse_status(gdev, feat); + if (ret) + return ret; + + return 0; +} + +/** + * Called if all open handles to the input device are closed. + * + * Disables absolute reporting. + */ +static void vbg_input_close(struct input_dev *input) +{ + struct vbg_dev *gdev = input_get_drvdata(input); + + vbg_core_set_mouse_status(gdev, 0); +} + +/** + * Creates the kernel input device. + * + * Return: 0 on success, negated errno on failure. + */ +static int vbg_create_input_device(struct vbg_dev *gdev) +{ + struct input_dev *input; + + input = devm_input_allocate_device(gdev->dev); + if (!input) + return -ENOMEM; + + input->id.bustype = BUS_PCI; + input->id.vendor = VBOX_VENDORID; + input->id.product = VMMDEV_DEVICEID; + input->open = vbg_input_open; + input->close = vbg_input_close; + input->dev.parent = gdev->dev; + input->name = "VirtualBox mouse integration"; + + input_set_abs_params(input, ABS_X, VMMDEV_MOUSE_RANGE_MIN, + VMMDEV_MOUSE_RANGE_MAX, 0, 0); + input_set_abs_params(input, ABS_Y, VMMDEV_MOUSE_RANGE_MIN, + VMMDEV_MOUSE_RANGE_MAX, 0, 0); + input_set_capability(input, EV_KEY, BTN_MOUSE); + input_set_drvdata(input, gdev); + + gdev->input = input; + + return input_register_device(gdev->input); +} + +static ssize_t host_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct vbg_dev *gdev = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", gdev->host_version); +} + +static ssize_t host_features_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct vbg_dev *gdev = dev_get_drvdata(dev); + + return sprintf(buf, "%#x\n", gdev->host_features); +} + +static DEVICE_ATTR_RO(host_version); +static DEVICE_ATTR_RO(host_features); + +/** + * Does the PCI detection and init of the device. + * + * Return: 0 on success, negated errno on failure. + */ +static int vbg_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) +{ + struct device *dev = &pci->dev; + resource_size_t io, io_len, mmio, mmio_len; + struct vmmdev_memory *vmmdev; + struct vbg_dev *gdev; + int ret; + + gdev = devm_kzalloc(dev, sizeof(*gdev), GFP_KERNEL); + if (!gdev) + return -ENOMEM; + + ret = pci_enable_device(pci); + if (ret != 0) { + vbg_err("vboxguest: Error enabling device: %d\n", ret); + return ret; + } + + ret = -ENODEV; + + io = pci_resource_start(pci, 0); + io_len = pci_resource_len(pci, 0); + if (!io || !io_len) { + vbg_err("vboxguest: Error IO-port resource (0) is missing\n"); + goto err_disable_pcidev; + } + if (devm_request_region(dev, io, io_len, DEVICE_NAME) == NULL) { + vbg_err("vboxguest: Error could not claim IO resource\n"); + ret = -EBUSY; + goto err_disable_pcidev; + } + + mmio = pci_resource_start(pci, 1); + mmio_len = pci_resource_len(pci, 1); + if (!mmio || !mmio_len) { + vbg_err("vboxguest: Error MMIO resource (1) is missing\n"); + goto err_disable_pcidev; + } + + if (devm_request_mem_region(dev, mmio, mmio_len, DEVICE_NAME) == NULL) { + vbg_err("vboxguest: Error could not claim MMIO resource\n"); + ret = -EBUSY; + goto err_disable_pcidev; + } + + vmmdev = devm_ioremap(dev, mmio, mmio_len); + if (!vmmdev) { + vbg_err("vboxguest: Error ioremap failed; MMIO addr=%p size=%d\n", + (void *)mmio, (int)mmio_len); + goto err_disable_pcidev; + } + + /* Validate MMIO region version and size. */ + if (vmmdev->version != VMMDEV_MEMORY_VERSION || + vmmdev->size < 32 || vmmdev->size > mmio_len) { + vbg_err("vboxguest: Bogus VMMDev memory; version=%08x (expected %08x) size=%d (expected <= %d)\n", + vmmdev->version, VMMDEV_MEMORY_VERSION, + vmmdev->size, (int)mmio_len); + goto err_disable_pcidev; + } + + gdev->io_port = io; + gdev->mmio = vmmdev; + gdev->dev = dev; + gdev->misc_device.minor = MISC_DYNAMIC_MINOR; + gdev->misc_device.name = DEVICE_NAME; + gdev->misc_device.fops = &vbg_misc_device_fops; + gdev->misc_device_user.minor = MISC_DYNAMIC_MINOR; + gdev->misc_device_user.name = DEVICE_NAME_USER; + gdev->misc_device_user.fops = &vbg_misc_device_user_fops; + + ret = vbg_core_init(gdev, VMMDEV_EVENT_MOUSE_POSITION_CHANGED); + if (ret) + goto err_disable_pcidev; + + ret = vbg_create_input_device(gdev); + if (ret) { + vbg_err("vboxguest: Error creating input device: %d\n", ret); + goto err_vbg_core_exit; + } + + ret = devm_request_irq(dev, pci->irq, vbg_core_isr, IRQF_SHARED, + DEVICE_NAME, gdev); + if (ret) { + vbg_err("vboxguest: Error requesting irq: %d\n", ret); + goto err_vbg_core_exit; + } + + ret = misc_register(&gdev->misc_device); + if (ret) { + vbg_err("vboxguest: Error misc_register %s failed: %d\n", + DEVICE_NAME, ret); + goto err_vbg_core_exit; + } + + ret = misc_register(&gdev->misc_device_user); + if (ret) { + vbg_err("vboxguest: Error misc_register %s failed: %d\n", + DEVICE_NAME_USER, ret); + goto err_unregister_misc_device; + } + + mutex_lock(&vbg_gdev_mutex); + if (!vbg_gdev) + vbg_gdev = gdev; + else + ret = -EBUSY; + mutex_unlock(&vbg_gdev_mutex); + + if (ret) { + vbg_err("vboxguest: Error more then 1 vbox guest pci device\n"); + goto err_unregister_misc_device_user; + } + + pci_set_drvdata(pci, gdev); + device_create_file(dev, &dev_attr_host_version); + device_create_file(dev, &dev_attr_host_features); + + vbg_info("vboxguest: misc device minor %d, IRQ %d, I/O port %x, MMIO at %p (size %d)\n", + gdev->misc_device.minor, pci->irq, gdev->io_port, + (void *)mmio, (int)mmio_len); + + return 0; + +err_unregister_misc_device_user: + misc_deregister(&gdev->misc_device_user); +err_unregister_misc_device: + misc_deregister(&gdev->misc_device); +err_vbg_core_exit: + vbg_core_exit(gdev); +err_disable_pcidev: + pci_disable_device(pci); + + return ret; +} + +static void vbg_pci_remove(struct pci_dev *pci) +{ + struct vbg_dev *gdev = pci_get_drvdata(pci); + + mutex_lock(&vbg_gdev_mutex); + vbg_gdev = NULL; + mutex_unlock(&vbg_gdev_mutex); + + device_remove_file(gdev->dev, &dev_attr_host_features); + device_remove_file(gdev->dev, &dev_attr_host_version); + misc_deregister(&gdev->misc_device_user); + misc_deregister(&gdev->misc_device); + vbg_core_exit(gdev); + pci_disable_device(pci); +} + +struct vbg_dev *vbg_get_gdev(void) +{ + mutex_lock(&vbg_gdev_mutex); + + /* + * Note on success we keep the mutex locked until vbg_put_gdev(), + * this stops vbg_pci_remove from removing the device from underneath + * vboxsf. vboxsf will only hold a reference for a short while. + */ + if (vbg_gdev) + return vbg_gdev; + + mutex_unlock(&vbg_gdev_mutex); + return ERR_PTR(-ENODEV); +} +EXPORT_SYMBOL(vbg_get_gdev); + +void vbg_put_gdev(struct vbg_dev *gdev) +{ + WARN_ON(gdev != vbg_gdev); + mutex_unlock(&vbg_gdev_mutex); +} +EXPORT_SYMBOL(vbg_put_gdev); + +/** + * Callback for mouse events. + * + * This is called at the end of the ISR, after leaving the event spinlock, if + * VMMDEV_EVENT_MOUSE_POSITION_CHANGED was raised by the host. + * + * @gdev: The device extension. + */ +void vbg_linux_mouse_event(struct vbg_dev *gdev) +{ + int rc; + + /* Report events to the kernel input device */ + gdev->mouse_status_req->mouse_features = 0; + gdev->mouse_status_req->pointer_pos_x = 0; + gdev->mouse_status_req->pointer_pos_y = 0; + rc = vbg_req_perform(gdev, gdev->mouse_status_req); + if (rc >= 0) { + input_report_abs(gdev->input, ABS_X, + gdev->mouse_status_req->pointer_pos_x); + input_report_abs(gdev->input, ABS_Y, + gdev->mouse_status_req->pointer_pos_y); + input_sync(gdev->input); + } +} + +static const struct pci_device_id vbg_pci_ids[] = { + { .vendor = VBOX_VENDORID, .device = VMMDEV_DEVICEID }, + {} +}; +MODULE_DEVICE_TABLE(pci, vbg_pci_ids); + +static struct pci_driver vbg_pci_driver = { + .name = DEVICE_NAME, + .id_table = vbg_pci_ids, + .probe = vbg_pci_probe, + .remove = vbg_pci_remove, +}; + +module_pci_driver(vbg_pci_driver); + +MODULE_AUTHOR("Oracle Corporation"); +MODULE_DESCRIPTION("Oracle VM VirtualBox Guest Additions for Linux Module"); +MODULE_LICENSE("GPL"); diff --git a/drivers/virt/vboxguest/vboxguest_version.h b/drivers/virt/vboxguest/vboxguest_version.h new file mode 100644 index 000000000000..77f0c8f8a231 --- /dev/null +++ b/drivers/virt/vboxguest/vboxguest_version.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */ +/* + * VBox Guest additions version info, this is used by the host to determine + * supported guest-addition features in some cases. So this will need to be + * synced with vbox upstreams versioning scheme when we implement / port + * new features from the upstream out-of-tree vboxguest driver. + */ + +#ifndef __VBOX_VERSION_H__ +#define __VBOX_VERSION_H__ + +/* Last synced October 4th 2017 */ +#define VBG_VERSION_MAJOR 5 +#define VBG_VERSION_MINOR 2 +#define VBG_VERSION_BUILD 0 +#define VBG_SVN_REV 68940 +#define VBG_VERSION_STRING "5.2.0" + +#endif -- cgit From 0b598e4f877ba28603985031b98d4d592464ade7 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 21 Dec 2017 17:15:18 +0100 Subject: virt: vbox: use %pap format for printing resource_size_t resource_size_t may be larger than pointers depending on configuration, so we can run into this build warning: drivers/virt/vboxguest/vboxguest_linux.c: In function 'vbg_pci_probe': drivers/virt/vboxguest/vboxguest_linux.c:295:4: error: cast to pointer from integer of different size [-Werror=int-to-pointer-cast] drivers/virt/vboxguest/vboxguest_linux.c:367:4: error: cast to pointer from integer of different size [-Werror=int-to-pointer-cast] This uses the special %pap to print the address by reference. Fixes: 0ba002bc4393 ("virt: Add vboxguest driver for Virtual Box Guest integration") Signed-off-by: Arnd Bergmann Signed-off-by: Greg Kroah-Hartman --- drivers/virt/vboxguest/vboxguest_linux.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/virt') diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c index d045aa51ce03..82e280d38cc2 100644 --- a/drivers/virt/vboxguest/vboxguest_linux.c +++ b/drivers/virt/vboxguest/vboxguest_linux.c @@ -291,8 +291,8 @@ static int vbg_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) vmmdev = devm_ioremap(dev, mmio, mmio_len); if (!vmmdev) { - vbg_err("vboxguest: Error ioremap failed; MMIO addr=%p size=%d\n", - (void *)mmio, (int)mmio_len); + vbg_err("vboxguest: Error ioremap failed; MMIO addr=%pap size=%pap\n", + &mmio, &mmio_len); goto err_disable_pcidev; } @@ -362,9 +362,9 @@ static int vbg_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) device_create_file(dev, &dev_attr_host_version); device_create_file(dev, &dev_attr_host_features); - vbg_info("vboxguest: misc device minor %d, IRQ %d, I/O port %x, MMIO at %p (size %d)\n", + vbg_info("vboxguest: misc device minor %d, IRQ %d, I/O port %x, MMIO at %pap (size %pap)\n", gdev->misc_device.minor, pci->irq, gdev->io_port, - (void *)mmio, (int)mmio_len); + &mmio, &mmio_len); return 0; -- cgit From f72c3500911f6a14876407c859e511cdb48bf432 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Mon, 15 Jan 2018 18:46:25 +0100 Subject: virt: vbox: Add error mapping for VERR_INVALID_NAME and VERR_NO_MORE_FILES Add error mapping for VERR_INVALID_NAME and VERR_NO_MORE_FILES vbox status codes, these are both used by the vboxsf (shared folder) code. Signed-off-by: Hans de Goede Signed-off-by: Greg Kroah-Hartman --- drivers/virt/vboxguest/vboxguest_utils.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/virt') diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c index 8daea691fbb5..0f0dab8023cf 100644 --- a/drivers/virt/vboxguest/vboxguest_utils.c +++ b/drivers/virt/vboxguest/vboxguest_utils.c @@ -731,6 +731,7 @@ static const int vbg_status_code_to_errno_table[] = { [-VERR_NOT_SAME_DEVICE] = -EXDEV, [-VERR_NOT_A_DIRECTORY] = -ENOTDIR, [-VERR_PATH_NOT_FOUND] = -ENOTDIR, + [-VERR_INVALID_NAME] = -ENOENT, [-VERR_IS_A_DIRECTORY] = -EISDIR, [-VERR_INVALID_PARAMETER] = -EINVAL, [-VERR_TOO_MANY_OPEN_FILES] = -ENFILE, @@ -748,6 +749,7 @@ static const int vbg_status_code_to_errno_table[] = { [-VERR_NOT_SUPPORTED] = -ENOSYS, [-VERR_DIR_NOT_EMPTY] = -ENOTEMPTY, [-VERR_TOO_MANY_SYMLINKS] = -ELOOP, + [-VERR_NO_MORE_FILES] = -ENODATA, [-VERR_NO_DATA] = -ENODATA, [-VERR_NET_NO_NETWORK] = -ENONET, [-VERR_NET_NOT_UNIQUE_NAME] = -ENOTUNIQ, -- cgit