summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/virtio/virtgpu_vq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/virtio/virtgpu_vq.c')
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c273
1 files changed, 230 insertions, 43 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index cf84d382dd41..0c194b4e9488 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -31,6 +31,9 @@
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
+
#include "virtgpu_drv.h"
#include "virtgpu_trace.h"
@@ -84,6 +87,22 @@ void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
vgdev->vbufs = NULL;
}
+/* For drm_panic */
+static struct virtio_gpu_vbuffer*
+virtio_gpu_panic_get_vbuf(struct virtio_gpu_device *vgdev, int size)
+{
+ struct virtio_gpu_vbuffer *vbuf;
+
+ vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_ATOMIC);
+
+ vbuf->buf = (void *)vbuf + sizeof(*vbuf);
+ vbuf->size = size;
+ vbuf->resp_cb = NULL;
+ vbuf->resp_size = sizeof(struct virtio_gpu_ctrl_hdr);
+ vbuf->resp_buf = (void *)vbuf->buf + size;
+ return vbuf;
+}
+
static struct virtio_gpu_vbuffer*
virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
int size, int resp_size, void *resp_buf,
@@ -91,9 +110,7 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
{
struct virtio_gpu_vbuffer *vbuf;
- vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
- if (!vbuf)
- return ERR_PTR(-ENOMEM);
+ vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
BUG_ON(size > MAX_INLINE_CMD_SIZE ||
size < sizeof(struct virtio_gpu_ctrl_hdr));
@@ -137,6 +154,18 @@ virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
return (struct virtio_gpu_update_cursor *)vbuf->buf;
}
+/* For drm_panic */
+static void *virtio_gpu_panic_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer **vbuffer_p,
+ int cmd_size)
+{
+ struct virtio_gpu_vbuffer *vbuf;
+
+ vbuf = virtio_gpu_panic_get_vbuf(vgdev, cmd_size);
+ *vbuffer_p = vbuf;
+ return (struct virtio_gpu_command *)vbuf->buf;
+}
+
static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
virtio_gpu_resp_cb cb,
struct virtio_gpu_vbuffer **vbuffer_p,
@@ -147,10 +176,6 @@ static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
resp_size, resp_buf, cb);
- if (IS_ERR(vbuf)) {
- *vbuffer_p = NULL;
- return ERR_CAST(vbuf);
- }
*vbuffer_p = vbuf;
return (struct virtio_gpu_command *)vbuf->buf;
}
@@ -205,7 +230,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
struct list_head reclaim_list;
struct virtio_gpu_vbuffer *entry, *tmp;
struct virtio_gpu_ctrl_hdr *resp;
- u64 fence_id = 0;
+ u64 fence_id;
INIT_LIST_HEAD(&reclaim_list);
spin_lock(&vgdev->ctrlq.qlock);
@@ -219,11 +244,12 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
list_for_each_entry(entry, &reclaim_list, list) {
resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
- trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
+ trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp, entry->seqno);
if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
struct virtio_gpu_ctrl_hdr *cmd;
+
cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
le32_to_cpu(resp->type),
@@ -232,23 +258,14 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
}
if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
- u64 f = le64_to_cpu(resp->fence_id);
-
- if (fence_id > f) {
- DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
- __func__, fence_id, f);
- } else {
- fence_id = f;
- }
+ fence_id = le64_to_cpu(resp->fence_id);
+ virtio_gpu_fence_event_process(vgdev, fence_id);
}
if (entry->resp_cb)
entry->resp_cb(vgdev, entry);
}
wake_up(&vgdev->ctrlq.ack_queue);
- if (fence_id)
- virtio_gpu_fence_event_process(vgdev, fence_id);
-
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
if (entry->objs)
virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
@@ -274,6 +291,10 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
spin_unlock(&vgdev->cursorq.qlock);
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
+ struct virtio_gpu_ctrl_hdr *resp =
+ (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
+
+ trace_virtio_gpu_cmd_response(vgdev->cursorq.vq, resp, entry->seqno);
list_del(&entry->list);
free_vbuf(vgdev, entry);
}
@@ -320,6 +341,34 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
return sgt;
}
+/* For drm_panic */
+static int virtio_gpu_panic_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf,
+ int elemcnt,
+ struct scatterlist **sgs,
+ int outcnt,
+ int incnt)
+{
+ struct virtqueue *vq = vgdev->ctrlq.vq;
+ int ret;
+
+ if (vgdev->has_indirect)
+ elemcnt = 1;
+
+ if (vq->num_free < elemcnt)
+ return -ENOMEM;
+
+ ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
+ WARN_ON(ret);
+
+ vbuf->seqno = ++vgdev->ctrlq.seqno;
+ trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), vbuf->seqno);
+
+ atomic_inc(&vgdev->pending_commands);
+
+ return 0;
+}
+
static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf,
struct virtio_gpu_fence *fence,
@@ -335,7 +384,7 @@ static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
if (fence && vbuf->objs)
virtio_gpu_array_unlock_resv(vbuf->objs);
free_vbuf(vgdev, vbuf);
- return -1;
+ return -ENODEV;
}
if (vgdev->has_indirect)
@@ -366,7 +415,8 @@ again:
ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
WARN_ON(ret);
- trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
+ vbuf->seqno = ++vgdev->ctrlq.seqno;
+ trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), vbuf->seqno);
atomic_inc(&vgdev->pending_commands);
@@ -376,6 +426,32 @@ again:
return 0;
}
+/* For drm_panic */
+static int virtio_gpu_panic_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ struct scatterlist *sgs[3], vcmd, vresp;
+ int elemcnt = 0, outcnt = 0, incnt = 0;
+
+ /* set up vcmd */
+ sg_init_one(&vcmd, vbuf->buf, vbuf->size);
+ elemcnt++;
+ sgs[outcnt] = &vcmd;
+ outcnt++;
+
+ /* set up vresp */
+ if (vbuf->resp_size) {
+ sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
+ elemcnt++;
+ sgs[outcnt + incnt] = &vresp;
+ incnt++;
+ }
+
+ return virtio_gpu_panic_queue_ctrl_sgs(vgdev, vbuf,
+ elemcnt, sgs,
+ outcnt, incnt);
+}
+
static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf,
struct virtio_gpu_fence *fence)
@@ -394,12 +470,13 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
if (vbuf->data_size) {
if (is_vmalloc_addr(vbuf->data_buf)) {
int sg_ents;
+
sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
&sg_ents);
if (!sgt) {
if (fence && vbuf->objs)
virtio_gpu_array_unlock_resv(vbuf->objs);
- return -1;
+ return -ENOMEM;
}
elemcnt += sg_ents;
@@ -430,6 +507,21 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
return ret;
}
+/* For drm_panic */
+void virtio_gpu_panic_notify(struct virtio_gpu_device *vgdev)
+{
+ bool notify;
+
+ if (!atomic_read(&vgdev->pending_commands))
+ return;
+
+ atomic_set(&vgdev->pending_commands, 0);
+ notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
+
+ if (notify)
+ virtqueue_notify(vgdev->ctrlq.vq);
+}
+
void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
{
bool notify;
@@ -478,8 +570,10 @@ retry:
spin_lock(&vgdev->cursorq.qlock);
goto retry;
} else {
+ vbuf->seqno = ++vgdev->cursorq.seqno;
trace_virtio_gpu_cmd_queue(vq,
- virtio_gpu_vbuf_ctrl_hdr(vbuf));
+ virtio_gpu_vbuf_ctrl_hdr(vbuf),
+ vbuf->seqno);
notify = virtqueue_kick_prepare(vq);
}
@@ -573,16 +667,42 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
+/* For drm_panic */
+void virtio_gpu_panic_cmd_resource_flush(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id,
+ uint32_t x, uint32_t y,
+ uint32_t width, uint32_t height)
+{
+ struct virtio_gpu_resource_flush *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_panic_alloc_cmd_resp(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = NULL;
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
+ cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->r.width = cpu_to_le32(width);
+ cmd_p->r.height = cpu_to_le32(height);
+ cmd_p->r.x = cpu_to_le32(x);
+ cmd_p->r.y = cpu_to_le32(y);
+
+ virtio_gpu_panic_queue_ctrl_buffer(vgdev, vbuf);
+}
+
void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
uint32_t x, uint32_t y,
- uint32_t width, uint32_t height)
+ uint32_t width, uint32_t height,
+ struct virtio_gpu_object_array *objs,
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_flush *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
cmd_p->resource_id = cpu_to_le32(resource_id);
@@ -591,7 +711,38 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
cmd_p->r.x = cpu_to_le32(x);
cmd_p->r.y = cpu_to_le32(y);
- virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
+}
+
+/* For drm_panic */
+int virtio_gpu_panic_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
+ uint64_t offset,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y,
+ struct virtio_gpu_object_array *objs)
+{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+ struct virtio_gpu_transfer_to_host_2d *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+ bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
+
+ if (virtio_gpu_is_shmem(bo) && use_dma_api)
+ dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
+ bo->base.sgt, DMA_TO_DEVICE);
+
+ cmd_p = virtio_gpu_panic_alloc_cmd_resp(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+ cmd_p->offset = cpu_to_le64(offset);
+ cmd_p->r.width = cpu_to_le32(width);
+ cmd_p->r.height = cpu_to_le32(height);
+ cmd_p->r.x = cpu_to_le32(x);
+ cmd_p->r.y = cpu_to_le32(y);
+
+ return virtio_gpu_panic_queue_ctrl_buffer(vgdev, vbuf);
}
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
@@ -605,11 +756,10 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
- struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
- if (use_dma_api)
+ if (virtio_gpu_is_shmem(bo) && use_dma_api)
dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
- shmem->pages, DMA_TO_DEVICE);
+ bo->base.sgt, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
@@ -649,6 +799,23 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
+static void
+virtio_gpu_cmd_resource_detach_backing(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id,
+ struct virtio_gpu_fence *fence)
+{
+ struct virtio_gpu_resource_detach_backing *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
+ cmd_p->resource_id = cpu_to_le32(resource_id);
+
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
+}
+
static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
@@ -731,7 +898,7 @@ static int virtio_get_edid_block(void *data, u8 *buf,
size_t start = block * EDID_LENGTH;
if (start + len > le32_to_cpu(resp->size))
- return -1;
+ return -EINVAL;
memcpy(buf, resp->edid + start, len);
return 0;
}
@@ -745,21 +912,21 @@ static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
(struct virtio_gpu_resp_edid *)vbuf->resp_buf;
uint32_t scanout = le32_to_cpu(cmd->scanout);
struct virtio_gpu_output *output;
- struct edid *new_edid, *old_edid;
+ const struct drm_edid *new_edid, *old_edid;
if (scanout >= vgdev->num_scanouts)
return;
output = vgdev->outputs + scanout;
- new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
- drm_connector_update_edid_property(&output->conn, new_edid);
+ new_edid = drm_edid_read_custom(&output->conn, virtio_get_edid_block, resp);
+ drm_edid_connector_update(&output->conn, new_edid);
spin_lock(&vgdev->display_info_lock);
- old_edid = output->edid;
- output->edid = new_edid;
+ old_edid = output->drm_edid;
+ output->drm_edid = new_edid;
spin_unlock(&vgdev->display_info_lock);
- kfree(old_edid);
+ drm_edid_free(old_edid);
wake_up(&vgdev->resp_wq);
}
@@ -914,7 +1081,8 @@ int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
}
void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
- uint32_t nlen, const char *name)
+ uint32_t context_init, uint32_t nlen,
+ const char *name)
{
struct virtio_gpu_ctx_create *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -925,8 +1093,8 @@ void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
cmd_p->hdr.ctx_id = cpu_to_le32(id);
cmd_p->nlen = cpu_to_le32(nlen);
- strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
- cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
+ cmd_p->context_init = cpu_to_le32(context_init);
+ strscpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name));
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
@@ -1027,11 +1195,9 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
- if (virtio_gpu_is_shmem(bo) && use_dma_api) {
- struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
+ if (virtio_gpu_is_shmem(bo) && use_dma_api)
dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
- shmem->pages, DMA_TO_DEVICE);
- }
+ bo->base.sgt, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
@@ -1108,8 +1274,26 @@ void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_mem_entry *ents,
unsigned int nents)
{
+ if (obj->attached)
+ return;
+
virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
ents, nents, NULL);
+
+ obj->attached = true;
+}
+
+void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj,
+ struct virtio_gpu_fence *fence)
+{
+ if (!obj->attached)
+ return;
+
+ virtio_gpu_cmd_resource_detach_backing(vgdev, obj->hw_res_handle,
+ fence);
+
+ obj->attached = false;
}
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
@@ -1270,6 +1454,9 @@ virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
bo->created = true;
+
+ if (nents)
+ bo->attached = true;
}
void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,