diff options
Diffstat (limited to 'drivers/gpu/drm/virtio/virtgpu_vq.c')
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_vq.c | 1064 |
1 files changed, 834 insertions, 230 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 9eb96fb2c147..0c194b4e9488 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -26,42 +26,39 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include <drm/drmP.h> -#include "virtgpu_drv.h" +#include <linux/dma-mapping.h> #include <linux/virtio.h> #include <linux/virtio_config.h> #include <linux/virtio_ring.h> +#include <drm/drm_edid.h> +#include <drm/drm_print.h> + +#include "virtgpu_drv.h" +#include "virtgpu_trace.h" + #define MAX_INLINE_CMD_SIZE 96 #define MAX_INLINE_RESP_SIZE 24 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \ + MAX_INLINE_CMD_SIZE \ + MAX_INLINE_RESP_SIZE) -void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, - uint32_t *resid) +static void convert_to_hw_box(struct virtio_gpu_box *dst, + const struct drm_virtgpu_3d_box *src) { - int handle; - - idr_preload(GFP_KERNEL); - spin_lock(&vgdev->resource_idr_lock); - handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT); - spin_unlock(&vgdev->resource_idr_lock); - idr_preload_end(); - *resid = handle; -} - -void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id) -{ - spin_lock(&vgdev->resource_idr_lock); - idr_remove(&vgdev->resource_idr, id); - spin_unlock(&vgdev->resource_idr_lock); + dst->x = cpu_to_le32(src->x); + dst->y = cpu_to_le32(src->y); + dst->z = cpu_to_le32(src->z); + dst->w = cpu_to_le32(src->w); + dst->h = cpu_to_le32(src->h); + dst->d = cpu_to_le32(src->d); } void virtio_gpu_ctrl_ack(struct virtqueue *vq) { struct drm_device *dev = vq->vdev->priv; struct virtio_gpu_device *vgdev = dev->dev_private; + schedule_work(&vgdev->ctrlq.dequeue_work); } @@ -69,6 +66,7 @@ void virtio_gpu_cursor_ack(struct virtqueue *vq) { struct drm_device *dev = vq->vdev->priv; struct virtio_gpu_device *vgdev = dev->dev_private; + schedule_work(&vgdev->cursorq.dequeue_work); } @@ -89,6 +87,22 @@ void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev) vgdev->vbufs = NULL; } +/* For drm_panic */ +static struct virtio_gpu_vbuffer* +virtio_gpu_panic_get_vbuf(struct virtio_gpu_device *vgdev, int size) +{ + struct virtio_gpu_vbuffer *vbuf; + + vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_ATOMIC); + + vbuf->buf = (void *)vbuf + sizeof(*vbuf); + vbuf->size = size; + vbuf->resp_cb = NULL; + vbuf->resp_size = sizeof(struct virtio_gpu_ctrl_hdr); + vbuf->resp_buf = (void *)vbuf->buf + size; + return vbuf; +} + static struct virtio_gpu_vbuffer* virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev, int size, int resp_size, void *resp_buf, @@ -96,12 +110,10 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev, { struct virtio_gpu_vbuffer *vbuf; - vbuf = kmem_cache_alloc(vgdev->vbufs, GFP_KERNEL); - if (!vbuf) - return ERR_PTR(-ENOMEM); - memset(vbuf, 0, VBUFFER_SIZE); + vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL); - BUG_ON(size > MAX_INLINE_CMD_SIZE); + BUG_ON(size > MAX_INLINE_CMD_SIZE || + size < sizeof(struct virtio_gpu_ctrl_hdr)); vbuf->buf = (void *)vbuf + sizeof(*vbuf); vbuf->size = size; @@ -115,21 +127,14 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev, return vbuf; } -static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer **vbuffer_p, - int size) +static struct virtio_gpu_ctrl_hdr * +virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf) { - struct virtio_gpu_vbuffer *vbuf; - - vbuf = virtio_gpu_get_vbuf(vgdev, size, - sizeof(struct virtio_gpu_ctrl_hdr), - NULL, NULL); - if (IS_ERR(vbuf)) { - *vbuffer_p = NULL; - return ERR_CAST(vbuf); - } - *vbuffer_p = vbuf; - return vbuf->buf; + /* this assumes a vbuf contains a command that starts with a + * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor + * virtqueues. + */ + return (struct virtio_gpu_ctrl_hdr *)vbuf->buf; } static struct virtio_gpu_update_cursor* @@ -149,6 +154,18 @@ virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev, return (struct virtio_gpu_update_cursor *)vbuf->buf; } +/* For drm_panic */ +static void *virtio_gpu_panic_alloc_cmd_resp(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer **vbuffer_p, + int cmd_size) +{ + struct virtio_gpu_vbuffer *vbuf; + + vbuf = virtio_gpu_panic_get_vbuf(vgdev, cmd_size); + *vbuffer_p = vbuf; + return (struct virtio_gpu_command *)vbuf->buf; +} + static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev, virtio_gpu_resp_cb cb, struct virtio_gpu_vbuffer **vbuffer_p, @@ -159,20 +176,35 @@ static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev, vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size, resp_size, resp_buf, cb); - if (IS_ERR(vbuf)) { - *vbuffer_p = NULL; - return ERR_CAST(vbuf); - } *vbuffer_p = vbuf; return (struct virtio_gpu_command *)vbuf->buf; } +static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer **vbuffer_p, + int size) +{ + return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size, + sizeof(struct virtio_gpu_ctrl_hdr), + NULL); +} + +static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer **vbuffer_p, + int size, + virtio_gpu_resp_cb cb) +{ + return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size, + sizeof(struct virtio_gpu_ctrl_hdr), + NULL); +} + static void free_vbuf(struct virtio_gpu_device *vgdev, struct virtio_gpu_vbuffer *vbuf) { if (vbuf->resp_size > MAX_INLINE_RESP_SIZE) kfree(vbuf->resp_buf); - kfree(vbuf->data_buf); + kvfree(vbuf->data_buf); kmem_cache_free(vgdev->vbufs, vbuf); } @@ -198,7 +230,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) struct list_head reclaim_list; struct virtio_gpu_vbuffer *entry, *tmp; struct virtio_gpu_ctrl_hdr *resp; - u64 fence_id = 0; + u64 fence_id; INIT_LIST_HEAD(&reclaim_list); spin_lock(&vgdev->ctrlq.qlock); @@ -209,30 +241,37 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); spin_unlock(&vgdev->ctrlq.qlock); - list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + list_for_each_entry(entry, &reclaim_list, list) { resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; - if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) - DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type)); - if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) { - u64 f = le64_to_cpu(resp->fence_id); - if (fence_id > f) { - DRM_ERROR("%s: Oops: fence %llx -> %llx\n", - __func__, fence_id, f); - } else { - fence_id = f; - } + trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp, entry->seqno); + + if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) { + if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) { + struct virtio_gpu_ctrl_hdr *cmd; + + cmd = virtio_gpu_vbuf_ctrl_hdr(entry); + DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n", + le32_to_cpu(resp->type), + le32_to_cpu(cmd->type)); + } else + DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type)); + } + if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) { + fence_id = le64_to_cpu(resp->fence_id); + virtio_gpu_fence_event_process(vgdev, fence_id); } if (entry->resp_cb) entry->resp_cb(vgdev, entry); + } + wake_up(&vgdev->ctrlq.ack_queue); + list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + if (entry->objs) + virtio_gpu_array_put_free_delayed(vgdev, entry->objs); list_del(&entry->list); free_vbuf(vgdev, entry); } - wake_up(&vgdev->ctrlq.ack_queue); - - if (fence_id) - virtio_gpu_fence_event_process(vgdev, fence_id); } void virtio_gpu_dequeue_cursor_func(struct work_struct *work) @@ -252,110 +291,271 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work) spin_unlock(&vgdev->cursorq.qlock); list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + struct virtio_gpu_ctrl_hdr *resp = + (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; + + trace_virtio_gpu_cmd_response(vgdev->cursorq.vq, resp, entry->seqno); list_del(&entry->list); free_vbuf(vgdev, entry); } wake_up(&vgdev->cursorq.ack_queue); } -static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf) - __releases(&vgdev->ctrlq.qlock) - __acquires(&vgdev->ctrlq.qlock) +/* Create sg_table from a vmalloc'd buffer. */ +static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents) +{ + int ret, s, i; + struct sg_table *sgt; + struct scatterlist *sg; + struct page *pg; + + if (WARN_ON(!PAGE_ALIGNED(data))) + return NULL; + + sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return NULL; + + *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE); + ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL); + if (ret) { + kfree(sgt); + return NULL; + } + + for_each_sgtable_sg(sgt, sg, i) { + pg = vmalloc_to_page(data); + if (!pg) { + sg_free_table(sgt); + kfree(sgt); + return NULL; + } + + s = min_t(int, PAGE_SIZE, size); + sg_set_page(sg, pg, s, 0); + + size -= s; + data += s; + } + + return sgt; +} + +/* For drm_panic */ +static int virtio_gpu_panic_queue_ctrl_sgs(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf, + int elemcnt, + struct scatterlist **sgs, + int outcnt, + int incnt) { struct virtqueue *vq = vgdev->ctrlq.vq; - struct scatterlist *sgs[3], vcmd, vout, vresp; - int outcnt = 0, incnt = 0; int ret; - if (!vgdev->vqs_ready) + if (vgdev->has_indirect) + elemcnt = 1; + + if (vq->num_free < elemcnt) + return -ENOMEM; + + ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); + WARN_ON(ret); + + vbuf->seqno = ++vgdev->ctrlq.seqno; + trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), vbuf->seqno); + + atomic_inc(&vgdev->pending_commands); + + return 0; +} + +static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf, + struct virtio_gpu_fence *fence, + int elemcnt, + struct scatterlist **sgs, + int outcnt, + int incnt) +{ + struct virtqueue *vq = vgdev->ctrlq.vq; + int ret, idx; + + if (!drm_dev_enter(vgdev->ddev, &idx)) { + if (fence && vbuf->objs) + virtio_gpu_array_unlock_resv(vbuf->objs); + free_vbuf(vgdev, vbuf); return -ENODEV; + } + + if (vgdev->has_indirect) + elemcnt = 1; + +again: + spin_lock(&vgdev->ctrlq.qlock); + if (vq->num_free < elemcnt) { + spin_unlock(&vgdev->ctrlq.qlock); + virtio_gpu_notify(vgdev); + wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt); + goto again; + } + + /* now that the position of the vbuf in the virtqueue is known, we can + * finally set the fence id + */ + if (fence) { + virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf), + fence); + if (vbuf->objs) { + virtio_gpu_array_add_fence(vbuf->objs, &fence->f); + virtio_gpu_array_unlock_resv(vbuf->objs); + } + } + + ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); + WARN_ON(ret); + + vbuf->seqno = ++vgdev->ctrlq.seqno; + trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), vbuf->seqno); + + atomic_inc(&vgdev->pending_commands); + + spin_unlock(&vgdev->ctrlq.qlock); + + drm_dev_exit(idx); + return 0; +} + +/* For drm_panic */ +static int virtio_gpu_panic_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + struct scatterlist *sgs[3], vcmd, vresp; + int elemcnt = 0, outcnt = 0, incnt = 0; + + /* set up vcmd */ sg_init_one(&vcmd, vbuf->buf, vbuf->size); - sgs[outcnt+incnt] = &vcmd; + elemcnt++; + sgs[outcnt] = &vcmd; outcnt++; + /* set up vresp */ + if (vbuf->resp_size) { + sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size); + elemcnt++; + sgs[outcnt + incnt] = &vresp; + incnt++; + } + + return virtio_gpu_panic_queue_ctrl_sgs(vgdev, vbuf, + elemcnt, sgs, + outcnt, incnt); +} + +static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf, + struct virtio_gpu_fence *fence) +{ + struct scatterlist *sgs[3], vcmd, vout, vresp; + struct sg_table *sgt = NULL; + int elemcnt = 0, outcnt = 0, incnt = 0, ret; + + /* set up vcmd */ + sg_init_one(&vcmd, vbuf->buf, vbuf->size); + elemcnt++; + sgs[outcnt] = &vcmd; + outcnt++; + + /* set up vout */ if (vbuf->data_size) { - sg_init_one(&vout, vbuf->data_buf, vbuf->data_size); - sgs[outcnt + incnt] = &vout; + if (is_vmalloc_addr(vbuf->data_buf)) { + int sg_ents; + + sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size, + &sg_ents); + if (!sgt) { + if (fence && vbuf->objs) + virtio_gpu_array_unlock_resv(vbuf->objs); + return -ENOMEM; + } + + elemcnt += sg_ents; + sgs[outcnt] = sgt->sgl; + } else { + sg_init_one(&vout, vbuf->data_buf, vbuf->data_size); + elemcnt++; + sgs[outcnt] = &vout; + } outcnt++; } + /* set up vresp */ if (vbuf->resp_size) { sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size); + elemcnt++; sgs[outcnt + incnt] = &vresp; incnt++; } -retry: - ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); - if (ret == -ENOSPC) { - spin_unlock(&vgdev->ctrlq.qlock); - wait_event(vgdev->ctrlq.ack_queue, vq->num_free); - spin_lock(&vgdev->ctrlq.qlock); - goto retry; - } else { - virtqueue_kick(vq); - } + ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt, + incnt); - if (!ret) - ret = vq->num_free; + if (sgt) { + sg_free_table(sgt); + kfree(sgt); + } return ret; } -static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf) +/* For drm_panic */ +void virtio_gpu_panic_notify(struct virtio_gpu_device *vgdev) { - int rc; + bool notify; - spin_lock(&vgdev->ctrlq.qlock); - rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); - spin_unlock(&vgdev->ctrlq.qlock); - return rc; + if (!atomic_read(&vgdev->pending_commands)) + return; + + atomic_set(&vgdev->pending_commands, 0); + notify = virtqueue_kick_prepare(vgdev->ctrlq.vq); + + if (notify) + virtqueue_notify(vgdev->ctrlq.vq); } -static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf, - struct virtio_gpu_ctrl_hdr *hdr, - struct virtio_gpu_fence **fence) +void virtio_gpu_notify(struct virtio_gpu_device *vgdev) { - struct virtqueue *vq = vgdev->ctrlq.vq; - int rc; + bool notify; + + if (!atomic_read(&vgdev->pending_commands)) + return; -again: spin_lock(&vgdev->ctrlq.qlock); + atomic_set(&vgdev->pending_commands, 0); + notify = virtqueue_kick_prepare(vgdev->ctrlq.vq); + spin_unlock(&vgdev->ctrlq.qlock); - /* - * Make sure we have enouth space in the virtqueue. If not - * wait here until we have. - * - * Without that virtio_gpu_queue_ctrl_buffer_nolock might have - * to wait for free space, which can result in fence ids being - * submitted out-of-order. - */ - if (vq->num_free < 3) { - spin_unlock(&vgdev->ctrlq.qlock); - wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3); - goto again; - } + if (notify) + virtqueue_notify(vgdev->ctrlq.vq); +} - if (fence) - virtio_gpu_fence_emit(vgdev, hdr, fence); - rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); - spin_unlock(&vgdev->ctrlq.qlock); - return rc; +static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL); } -static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf) +static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) { struct virtqueue *vq = vgdev->cursorq.vq; struct scatterlist *sgs[1], ccmd; - int ret; - int outcnt; + int idx, ret, outcnt; + bool notify; - if (!vgdev->vqs_ready) - return -ENODEV; + if (!drm_dev_enter(vgdev->ddev, &idx)) { + free_vbuf(vgdev, vbuf); + return; + } sg_init_one(&ccmd, vbuf->buf, vbuf->size); sgs[0] = &ccmd; @@ -366,73 +566,83 @@ retry: ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC); if (ret == -ENOSPC) { spin_unlock(&vgdev->cursorq.qlock); - wait_event(vgdev->cursorq.ack_queue, vq->num_free); + wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt); spin_lock(&vgdev->cursorq.qlock); goto retry; } else { - virtqueue_kick(vq); + vbuf->seqno = ++vgdev->cursorq.seqno; + trace_virtio_gpu_cmd_queue(vq, + virtio_gpu_vbuf_ctrl_hdr(vbuf), + vbuf->seqno); + + notify = virtqueue_kick_prepare(vq); } spin_unlock(&vgdev->cursorq.qlock); - if (!ret) - ret = vq->num_free; - return ret; + if (notify) + virtqueue_notify(vq); + + drm_dev_exit(idx); } /* just create gem objects for userspace and long lived objects, - just use dma_alloced pages for the queue objects? */ + * just use dma_alloced pages for the queue objects? + */ /* create a basic resource */ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, - uint32_t resource_id, - uint32_t format, - uint32_t width, - uint32_t height) + struct virtio_gpu_object *bo, + struct virtio_gpu_object_params *params, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence) { struct virtio_gpu_resource_create_2d *cmd_p; struct virtio_gpu_vbuffer *vbuf; cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D); - cmd_p->resource_id = cpu_to_le32(resource_id); - cmd_p->format = cpu_to_le32(format); - cmd_p->width = cpu_to_le32(width); - cmd_p->height = cpu_to_le32(height); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + cmd_p->format = cpu_to_le32(params->format); + cmd_p->width = cpu_to_le32(params->width); + cmd_p->height = cpu_to_le32(params->height); - virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); + bo->created = true; } -void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, - uint32_t resource_id) +static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) { - struct virtio_gpu_resource_unref *cmd_p; - struct virtio_gpu_vbuffer *vbuf; - - cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); - memset(cmd_p, 0, sizeof(*cmd_p)); + struct virtio_gpu_object *bo; - cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF); - cmd_p->resource_id = cpu_to_le32(resource_id); + bo = vbuf->resp_cb_data; + vbuf->resp_cb_data = NULL; - virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + virtio_gpu_cleanup_object(bo); } -void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev, - uint32_t resource_id) +void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo) { - struct virtio_gpu_resource_detach_backing *cmd_p; + struct virtio_gpu_resource_unref *cmd_p; struct virtio_gpu_vbuffer *vbuf; + int ret; - cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p), + virtio_gpu_cmd_unref_cb); memset(cmd_p, 0, sizeof(*cmd_p)); - cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING); - cmd_p->resource_id = cpu_to_le32(resource_id); + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); - virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + vbuf->resp_cb_data = bo; + ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + if (ret < 0) + virtio_gpu_cleanup_object(bo); } void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, @@ -457,16 +667,42 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); } +/* For drm_panic */ +void virtio_gpu_panic_cmd_resource_flush(struct virtio_gpu_device *vgdev, + uint32_t resource_id, + uint32_t x, uint32_t y, + uint32_t width, uint32_t height) +{ + struct virtio_gpu_resource_flush *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + + cmd_p = virtio_gpu_panic_alloc_cmd_resp(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = NULL; + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH); + cmd_p->resource_id = cpu_to_le32(resource_id); + cmd_p->r.width = cpu_to_le32(width); + cmd_p->r.height = cpu_to_le32(height); + cmd_p->r.x = cpu_to_le32(x); + cmd_p->r.y = cpu_to_le32(y); + + virtio_gpu_panic_queue_ctrl_buffer(vgdev, vbuf); +} + void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, uint32_t resource_id, uint32_t x, uint32_t y, - uint32_t width, uint32_t height) + uint32_t width, uint32_t height, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence) { struct virtio_gpu_resource_flush *cmd_p; struct virtio_gpu_vbuffer *vbuf; cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH); cmd_p->resource_id = cpu_to_le32(resource_id); @@ -475,30 +711,69 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, cmd_p->r.x = cpu_to_le32(x); cmd_p->r.y = cpu_to_le32(y); - virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); +} + +/* For drm_panic */ +int virtio_gpu_panic_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, + uint64_t offset, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y, + struct virtio_gpu_object_array *objs) +{ + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); + struct virtio_gpu_transfer_to_host_2d *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); + + if (virtio_gpu_is_shmem(bo) && use_dma_api) + dma_sync_sgtable_for_device(vgdev->vdev->dev.parent, + bo->base.sgt, DMA_TO_DEVICE); + + cmd_p = virtio_gpu_panic_alloc_cmd_resp(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + cmd_p->offset = cpu_to_le64(offset); + cmd_p->r.width = cpu_to_le32(width); + cmd_p->r.height = cpu_to_le32(height); + cmd_p->r.x = cpu_to_le32(x); + cmd_p->r.y = cpu_to_le32(y); + + return virtio_gpu_panic_queue_ctrl_buffer(vgdev, vbuf); } void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, - uint32_t resource_id, uint64_t offset, - __le32 width, __le32 height, - __le32 x, __le32 y, - struct virtio_gpu_fence **fence) + uint64_t offset, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_to_host_2d *cmd_p; struct virtio_gpu_vbuffer *vbuf; + bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); + + if (virtio_gpu_is_shmem(bo) && use_dma_api) + dma_sync_sgtable_for_device(vgdev->vdev->dev.parent, + bo->base.sgt, DMA_TO_DEVICE); cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); - cmd_p->resource_id = cpu_to_le32(resource_id); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); cmd_p->offset = cpu_to_le64(offset); - cmd_p->r.width = width; - cmd_p->r.height = height; - cmd_p->r.x = x; - cmd_p->r.y = y; + cmd_p->r.width = cpu_to_le32(width); + cmd_p->r.height = cpu_to_le32(height); + cmd_p->r.x = cpu_to_le32(x); + cmd_p->r.y = cpu_to_le32(y); - virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); } static void @@ -506,7 +781,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev, uint32_t resource_id, struct virtio_gpu_mem_entry *ents, uint32_t nents, - struct virtio_gpu_fence **fence) + struct virtio_gpu_fence *fence) { struct virtio_gpu_resource_attach_backing *cmd_p; struct virtio_gpu_vbuffer *vbuf; @@ -521,7 +796,24 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev, vbuf->data_buf = ents; vbuf->data_size = sizeof(*ents) * nents; - virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); +} + +static void +virtio_gpu_cmd_resource_detach_backing(struct virtio_gpu_device *vgdev, + uint32_t resource_id, + struct virtio_gpu_fence *fence) +{ + struct virtio_gpu_resource_detach_backing *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING); + cmd_p->resource_id = cpu_to_le32(resource_id); + + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); } static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev, @@ -563,9 +855,13 @@ static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev, int i = le32_to_cpu(cmd->capset_index); spin_lock(&vgdev->display_info_lock); - vgdev->capsets[i].id = le32_to_cpu(resp->capset_id); - vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version); - vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size); + if (vgdev->capsets) { + vgdev->capsets[i].id = le32_to_cpu(resp->capset_id); + vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version); + vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size); + } else { + DRM_ERROR("invalid capset memory."); + } spin_unlock(&vgdev->display_info_lock); wake_up(&vgdev->resp_wq); } @@ -585,14 +881,54 @@ static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev, cache_ent->id == le32_to_cpu(cmd->capset_id)) { memcpy(cache_ent->caps_cache, resp->capset_data, cache_ent->size); + /* Copy must occur before is_valid is signalled. */ + smp_wmb(); atomic_set(&cache_ent->is_valid, 1); break; } } spin_unlock(&vgdev->display_info_lock); - wake_up(&vgdev->resp_wq); + wake_up_all(&vgdev->resp_wq); } +static int virtio_get_edid_block(void *data, u8 *buf, + unsigned int block, size_t len) +{ + struct virtio_gpu_resp_edid *resp = data; + size_t start = block * EDID_LENGTH; + + if (start + len > le32_to_cpu(resp->size)) + return -EINVAL; + memcpy(buf, resp->edid + start, len); + return 0; +} + +static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + struct virtio_gpu_cmd_get_edid *cmd = + (struct virtio_gpu_cmd_get_edid *)vbuf->buf; + struct virtio_gpu_resp_edid *resp = + (struct virtio_gpu_resp_edid *)vbuf->resp_buf; + uint32_t scanout = le32_to_cpu(cmd->scanout); + struct virtio_gpu_output *output; + const struct drm_edid *new_edid, *old_edid; + + if (scanout >= vgdev->num_scanouts) + return; + output = vgdev->outputs + scanout; + + new_edid = drm_edid_read_custom(&output->conn, virtio_get_edid_block, resp); + drm_edid_connector_update(&output->conn, new_edid); + + spin_lock(&vgdev->display_info_lock); + old_edid = output->drm_edid; + output->drm_edid = new_edid; + spin_unlock(&vgdev->display_info_lock); + + drm_edid_free(old_edid); + wake_up(&vgdev->resp_wq); +} int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev) { @@ -646,11 +982,14 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev, { struct virtio_gpu_get_capset *cmd_p; struct virtio_gpu_vbuffer *vbuf; - int max_size = vgdev->capsets[idx].max_size; + int max_size; struct virtio_gpu_drv_cap_cache *cache_ent; + struct virtio_gpu_drv_cap_cache *search_ent; void *resp_buf; - if (idx > vgdev->num_capsets) + *cache_p = NULL; + + if (idx >= vgdev->num_capsets) return -EINVAL; if (version > vgdev->capsets[idx].max_version) @@ -660,6 +999,7 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev, if (!cache_ent) return -ENOMEM; + max_size = vgdev->capsets[idx].max_size; cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL); if (!cache_ent->caps_cache) { kfree(cache_ent); @@ -679,9 +1019,26 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev, atomic_set(&cache_ent->is_valid, 0); cache_ent->size = max_size; spin_lock(&vgdev->display_info_lock); - list_add_tail(&cache_ent->head, &vgdev->cap_cache); + /* Search while under lock in case it was added by another task. */ + list_for_each_entry(search_ent, &vgdev->cap_cache, head) { + if (search_ent->id == vgdev->capsets[idx].id && + search_ent->version == version) { + *cache_p = search_ent; + break; + } + } + if (!*cache_p) + list_add_tail(&cache_ent->head, &vgdev->cap_cache); spin_unlock(&vgdev->display_info_lock); + if (*cache_p) { + /* Entry was found, so free everything that was just created. */ + kfree(resp_buf); + kfree(cache_ent->caps_cache); + kfree(cache_ent); + return 0; + } + cmd_p = virtio_gpu_alloc_cmd_resp (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset) + max_size, @@ -695,8 +1052,37 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev, return 0; } +int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev) +{ + struct virtio_gpu_cmd_get_edid *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + void *resp_buf; + int scanout; + + if (WARN_ON(!vgdev->has_edid)) + return -EINVAL; + + for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) { + resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid), + GFP_KERNEL); + if (!resp_buf) + return -ENOMEM; + + cmd_p = virtio_gpu_alloc_cmd_resp + (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf, + sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid), + resp_buf); + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID); + cmd_p->scanout = cpu_to_le32(scanout); + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + } + + return 0; +} + void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id, - uint32_t nlen, const char *name) + uint32_t context_init, uint32_t nlen, + const char *name) { struct virtio_gpu_ctx_create *cmd_p; struct virtio_gpu_vbuffer *vbuf; @@ -707,8 +1093,8 @@ void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id, cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE); cmd_p->hdr.ctx_id = cpu_to_le32(id); cmd_p->nlen = cpu_to_le32(nlen); - strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name)-1); - cmd_p->debug_name[sizeof(cmd_p->debug_name)-1] = 0; + cmd_p->context_init = cpu_to_le32(context_init); + strscpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name)); virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); } @@ -728,102 +1114,143 @@ void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, uint32_t ctx_id, - uint32_t resource_id) + struct virtio_gpu_object_array *objs) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_ctx_resource *cmd_p; struct virtio_gpu_vbuffer *vbuf; cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); - cmd_p->resource_id = cpu_to_le32(resource_id); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); - } void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, uint32_t ctx_id, - uint32_t resource_id) + struct virtio_gpu_object_array *objs) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_ctx_resource *cmd_p; struct virtio_gpu_vbuffer *vbuf; cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); - cmd_p->resource_id = cpu_to_le32(resource_id); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); } void virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, - struct virtio_gpu_resource_create_3d *rc_3d, - struct virtio_gpu_fence **fence) + struct virtio_gpu_object *bo, + struct virtio_gpu_object_params *params, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence) { struct virtio_gpu_resource_create_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf; cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; - *cmd_p = *rc_3d; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D); - cmd_p->hdr.flags = 0; - - virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + cmd_p->format = cpu_to_le32(params->format); + cmd_p->width = cpu_to_le32(params->width); + cmd_p->height = cpu_to_le32(params->height); + + cmd_p->target = cpu_to_le32(params->target); + cmd_p->bind = cpu_to_le32(params->bind); + cmd_p->depth = cpu_to_le32(params->depth); + cmd_p->array_size = cpu_to_le32(params->array_size); + cmd_p->last_level = cpu_to_le32(params->last_level); + cmd_p->nr_samples = cpu_to_le32(params->nr_samples); + cmd_p->flags = cpu_to_le32(params->flags); + + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); + + bo->created = true; } void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, - uint32_t resource_id, uint32_t ctx_id, + uint32_t ctx_id, uint64_t offset, uint32_t level, - struct virtio_gpu_box *box, - struct virtio_gpu_fence **fence) + uint32_t stride, + uint32_t layer_stride, + struct drm_virtgpu_3d_box *box, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf; + bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); + + if (virtio_gpu_is_shmem(bo) && use_dma_api) + dma_sync_sgtable_for_device(vgdev->vdev->dev.parent, + bo->base.sgt, DMA_TO_DEVICE); cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); - cmd_p->resource_id = cpu_to_le32(resource_id); - cmd_p->box = *box; + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + convert_to_hw_box(&cmd_p->box, box); cmd_p->offset = cpu_to_le64(offset); cmd_p->level = cpu_to_le32(level); + cmd_p->stride = cpu_to_le32(stride); + cmd_p->layer_stride = cpu_to_le32(layer_stride); - virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); } void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, - uint32_t resource_id, uint32_t ctx_id, + uint32_t ctx_id, uint64_t offset, uint32_t level, - struct virtio_gpu_box *box, - struct virtio_gpu_fence **fence) + uint32_t stride, + uint32_t layer_stride, + struct drm_virtgpu_3d_box *box, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf; cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); - cmd_p->resource_id = cpu_to_le32(resource_id); - cmd_p->box = *box; + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + convert_to_hw_box(&cmd_p->box, box); cmd_p->offset = cpu_to_le64(offset); cmd_p->level = cpu_to_le32(level); + cmd_p->stride = cpu_to_le32(stride); + cmd_p->layer_stride = cpu_to_le32(layer_stride); - virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); } void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, void *data, uint32_t data_size, - uint32_t ctx_id, struct virtio_gpu_fence **fence) + uint32_t ctx_id, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence) { struct virtio_gpu_cmd_submit *cmd_p; struct virtio_gpu_vbuffer *vbuf; @@ -833,50 +1260,40 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, vbuf->data_buf = data; vbuf->data_size = data_size; + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); cmd_p->size = cpu_to_le32(data_size); - virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); } -int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, - struct virtio_gpu_object *obj, - uint32_t resource_id, - struct virtio_gpu_fence **fence) +void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *obj, + struct virtio_gpu_mem_entry *ents, + unsigned int nents) { - struct virtio_gpu_mem_entry *ents; - struct scatterlist *sg; - int si; + if (obj->attached) + return; - if (!obj->pages) { - int ret; - ret = virtio_gpu_object_get_sg_table(vgdev, obj); - if (ret) - return ret; - } + virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle, + ents, nents, NULL); - /* gets freed when the ring has consumed it */ - ents = kmalloc_array(obj->pages->nents, - sizeof(struct virtio_gpu_mem_entry), - GFP_KERNEL); - if (!ents) { - DRM_ERROR("failed to allocate ent list\n"); - return -ENOMEM; - } + obj->attached = true; +} - for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) { - ents[si].addr = cpu_to_le64(sg_phys(sg)); - ents[si].length = cpu_to_le32(sg->length); - ents[si].padding = 0; - } +void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *obj, + struct virtio_gpu_fence *fence) +{ + if (!obj->attached) + return; - virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id, - ents, obj->pages->nents, + virtio_gpu_cmd_resource_detach_backing(vgdev, obj->hw_res_handle, fence); - obj->hw_res_handle = resource_id; - return 0; + + obj->attached = false; } void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, @@ -890,3 +1307,190 @@ void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, memcpy(cur_p, &output->cursor, sizeof(output->cursor)); virtio_gpu_queue_cursor(vgdev, vbuf); } + +static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + struct virtio_gpu_object *obj = + gem_to_virtio_gpu_obj(vbuf->objs->objs[0]); + struct virtio_gpu_resp_resource_uuid *resp = + (struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf; + uint32_t resp_type = le32_to_cpu(resp->hdr.type); + + spin_lock(&vgdev->resource_export_lock); + WARN_ON(obj->uuid_state != STATE_INITIALIZING); + + if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID && + obj->uuid_state == STATE_INITIALIZING) { + import_uuid(&obj->uuid, resp->uuid); + obj->uuid_state = STATE_OK; + } else { + obj->uuid_state = STATE_ERR; + } + spin_unlock(&vgdev->resource_export_lock); + + wake_up_all(&vgdev->resp_wq); +} + +int +virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_array *objs) +{ + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); + struct virtio_gpu_resource_assign_uuid *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + struct virtio_gpu_resp_resource_uuid *resp_buf; + + resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL); + if (!resp_buf) { + spin_lock(&vgdev->resource_export_lock); + bo->uuid_state = STATE_ERR; + spin_unlock(&vgdev->resource_export_lock); + virtio_gpu_array_put_free(objs); + return -ENOMEM; + } + + cmd_p = virtio_gpu_alloc_cmd_resp + (vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p), + sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + + vbuf->objs = objs; + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + return 0; +} + +static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + struct virtio_gpu_object *bo = + gem_to_virtio_gpu_obj(vbuf->objs->objs[0]); + struct virtio_gpu_resp_map_info *resp = + (struct virtio_gpu_resp_map_info *)vbuf->resp_buf; + struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); + uint32_t resp_type = le32_to_cpu(resp->hdr.type); + + spin_lock(&vgdev->host_visible_lock); + + if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) { + vram->map_info = resp->map_info; + vram->map_state = STATE_OK; + } else { + vram->map_state = STATE_ERR; + } + + spin_unlock(&vgdev->host_visible_lock); + wake_up_all(&vgdev->resp_wq); +} + +int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_array *objs, uint64_t offset) +{ + struct virtio_gpu_resource_map_blob *cmd_p; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); + struct virtio_gpu_vbuffer *vbuf; + struct virtio_gpu_resp_map_info *resp_buf; + + resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL); + if (!resp_buf) + return -ENOMEM; + + cmd_p = virtio_gpu_alloc_cmd_resp + (vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p), + sizeof(struct virtio_gpu_resp_map_info), resp_buf); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + cmd_p->offset = cpu_to_le64(offset); + vbuf->objs = objs; + + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + return 0; +} + +void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo) +{ + struct virtio_gpu_resource_unmap_blob *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); +} + +void +virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo, + struct virtio_gpu_object_params *params, + struct virtio_gpu_mem_entry *ents, + uint32_t nents) +{ + struct virtio_gpu_resource_create_blob *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB); + cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + cmd_p->blob_mem = cpu_to_le32(params->blob_mem); + cmd_p->blob_flags = cpu_to_le32(params->blob_flags); + cmd_p->blob_id = cpu_to_le64(params->blob_id); + cmd_p->size = cpu_to_le64(params->size); + cmd_p->nr_entries = cpu_to_le32(nents); + + vbuf->data_buf = ents; + vbuf->data_size = sizeof(*ents) * nents; + + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + bo->created = true; + + if (nents) + bo->attached = true; +} + +void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev, + uint32_t scanout_id, + struct virtio_gpu_object *bo, + struct drm_framebuffer *fb, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y) +{ + uint32_t i; + struct virtio_gpu_set_scanout_blob *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + uint32_t format = virtio_gpu_translate_format(fb->format->format); + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + cmd_p->scanout_id = cpu_to_le32(scanout_id); + + cmd_p->format = cpu_to_le32(format); + cmd_p->width = cpu_to_le32(fb->width); + cmd_p->height = cpu_to_le32(fb->height); + + for (i = 0; i < 4; i++) { + cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]); + cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]); + } + + cmd_p->r.width = cpu_to_le32(width); + cmd_p->r.height = cpu_to_le32(height); + cmd_p->r.x = cpu_to_le32(x); + cmd_p->r.y = cpu_to_le32(y); + + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); +} |
