diff options
Diffstat (limited to 'drivers/gpu/drm/virtio/virtgpu_kms.c')
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_kms.c | 250 |
1 files changed, 151 insertions, 99 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 3af6181c05a8..f3594695bb82 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -25,13 +25,13 @@ #include <linux/virtio.h> #include <linux/virtio_config.h> -#include <drm/drmP.h> -#include "virtgpu_drv.h" +#include <linux/virtio_ring.h> -static int virtio_gpu_fbdev = 1; +#include <drm/drm_file.h> +#include <drm/drm_managed.h> +#include <drm/drm_print.h> -MODULE_PARM_DESC(fbdev, "Disable/Enable framebuffer device & console"); -module_param_named(fbdev, virtio_gpu_fbdev, int, 0400); +#include "virtgpu_drv.h" static void virtio_gpu_config_changed_work_func(struct work_struct *work) { @@ -41,36 +41,20 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work) u32 events_read, events_clear = 0; /* read the config space */ - virtio_cread(vgdev->vdev, struct virtio_gpu_config, - events_read, &events_read); + virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, + events_read, &events_read); if (events_read & VIRTIO_GPU_EVENT_DISPLAY) { - if (vgdev->has_edid) - virtio_gpu_cmd_get_edids(vgdev); - virtio_gpu_cmd_get_display_info(vgdev); - drm_helper_hpd_irq_event(vgdev->ddev); + if (vgdev->num_scanouts) { + if (vgdev->has_edid) + virtio_gpu_cmd_get_edids(vgdev); + virtio_gpu_cmd_get_display_info(vgdev); + virtio_gpu_notify(vgdev); + drm_helper_hpd_irq_event(vgdev->ddev); + } events_clear |= VIRTIO_GPU_EVENT_DISPLAY; } - virtio_cwrite(vgdev->vdev, struct virtio_gpu_config, - events_clear, &events_clear); -} - -static int virtio_gpu_context_create(struct virtio_gpu_device *vgdev, - uint32_t nlen, const char *name) -{ - int handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL); - - if (handle < 0) - return handle; - handle += 1; - virtio_gpu_cmd_context_create(vgdev, handle, nlen, name); - return handle; -} - -static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev, - uint32_t ctx_id) -{ - virtio_gpu_cmd_context_destroy(vgdev, ctx_id); - ida_free(&vgdev->ctx_id_ida, ctx_id - 1); + virtio_cwrite_le(vgdev->vdev, struct virtio_gpu_config, + events_clear, &events_clear); } static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq, @@ -85,58 +69,78 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev, int num_capsets) { int i, ret; + bool invalid_capset_id = false; + struct drm_device *drm = vgdev->ddev; - vgdev->capsets = kcalloc(num_capsets, - sizeof(struct virtio_gpu_drv_capset), - GFP_KERNEL); + vgdev->capsets = drmm_kcalloc(drm, num_capsets, + sizeof(struct virtio_gpu_drv_capset), + GFP_KERNEL); if (!vgdev->capsets) { DRM_ERROR("failed to allocate cap sets\n"); return; } for (i = 0; i < num_capsets; i++) { virtio_gpu_cmd_get_capset_info(vgdev, i); + virtio_gpu_notify(vgdev); ret = wait_event_timeout(vgdev->resp_wq, vgdev->capsets[i].id > 0, 5 * HZ); - if (ret == 0) { + /* + * Capability ids are defined in the virtio-gpu spec and are + * between 1 to 63, inclusive. + */ + if (!vgdev->capsets[i].id || + vgdev->capsets[i].id > MAX_CAPSET_ID) + invalid_capset_id = true; + + if (ret == 0) DRM_ERROR("timed out waiting for cap set %d\n", i); - kfree(vgdev->capsets); + else if (invalid_capset_id) + DRM_ERROR("invalid capset id %u", vgdev->capsets[i].id); + + if (ret == 0 || invalid_capset_id) { + spin_lock(&vgdev->display_info_lock); + drmm_kfree(drm, vgdev->capsets); vgdev->capsets = NULL; + spin_unlock(&vgdev->display_info_lock); return; } + + vgdev->capset_id_mask |= 1 << vgdev->capsets[i].id; DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n", i, vgdev->capsets[i].id, vgdev->capsets[i].max_version, vgdev->capsets[i].max_size); } + vgdev->num_capsets = num_capsets; } -int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags) +int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev) { - static vq_callback_t *callbacks[] = { - virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack + struct virtqueue_info vqs_info[] = { + { "control", virtio_gpu_ctrl_ack }, + { "cursor", virtio_gpu_cursor_ack }, }; - static const char * const names[] = { "control", "cursor" }; - struct virtio_gpu_device *vgdev; /* this will expand later */ struct virtqueue *vqs[2]; u32 num_scanouts, num_capsets; - int ret; + int ret = 0; - if (!virtio_has_feature(dev_to_virtio(dev->dev), VIRTIO_F_VERSION_1)) + if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) return -ENODEV; - vgdev = kzalloc(sizeof(struct virtio_gpu_device), GFP_KERNEL); + vgdev = drmm_kzalloc(dev, sizeof(struct virtio_gpu_device), GFP_KERNEL); if (!vgdev) return -ENOMEM; vgdev->ddev = dev; dev->dev_private = vgdev; - vgdev->vdev = dev_to_virtio(dev->dev); - vgdev->dev = dev->dev; + vgdev->vdev = vdev; spin_lock_init(&vgdev->display_info_lock); + spin_lock_init(&vgdev->resource_export_lock); + spin_lock_init(&vgdev->host_visible_lock); ida_init(&vgdev->ctx_id_ida); ida_init(&vgdev->resource_ida); init_waitqueue_head(&vgdev->resp_wq); @@ -150,20 +154,60 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags) INIT_WORK(&vgdev->config_changed_work, virtio_gpu_config_changed_work_func); + INIT_WORK(&vgdev->obj_free_work, + virtio_gpu_array_put_free_work); + INIT_LIST_HEAD(&vgdev->obj_free_list); + spin_lock_init(&vgdev->obj_free_lock); + #ifdef __LITTLE_ENDIAN if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL)) vgdev->has_virgl_3d = true; - DRM_INFO("virgl 3d acceleration %s\n", - vgdev->has_virgl_3d ? "enabled" : "not supported by host"); -#else - DRM_INFO("virgl 3d acceleration not supported by guest\n"); #endif - if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) { + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) vgdev->has_edid = true; - DRM_INFO("EDID support available.\n"); + + if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) + vgdev->has_indirect = true; + + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) + vgdev->has_resource_assign_uuid = true; + + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) + vgdev->has_resource_blob = true; + + if (virtio_get_shm_region(vgdev->vdev, &vgdev->host_visible_region, + VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) { + if (!devm_request_mem_region(&vgdev->vdev->dev, + vgdev->host_visible_region.addr, + vgdev->host_visible_region.len, + dev_name(&vgdev->vdev->dev))) { + DRM_ERROR("Could not reserve host visible region\n"); + ret = -EBUSY; + goto err_vqs; + } + + DRM_INFO("Host memory window: 0x%lx +0x%lx\n", + (unsigned long)vgdev->host_visible_region.addr, + (unsigned long)vgdev->host_visible_region.len); + vgdev->has_host_visible = true; + drm_mm_init(&vgdev->host_visible_mm, + (unsigned long)vgdev->host_visible_region.addr, + (unsigned long)vgdev->host_visible_region.len); } - ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL); + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT)) + vgdev->has_context_init = true; + + DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible", + vgdev->has_virgl_3d ? '+' : '-', + vgdev->has_edid ? '+' : '-', + vgdev->has_resource_blob ? '+' : '-', + vgdev->has_host_visible ? '+' : '-'); + + DRM_INFO("features: %ccontext_init\n", + vgdev->has_context_init ? '+' : '-'); + + ret = virtio_find_vqs(vgdev->vdev, 2, vqs, vqs_info, NULL); if (ret) { DRM_ERROR("failed to find virt queues\n"); goto err_vqs; @@ -176,56 +220,51 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags) goto err_vbufs; } - ret = virtio_gpu_ttm_init(vgdev); - if (ret) { - DRM_ERROR("failed to init ttm %d\n", ret); - goto err_ttm; - } - /* get display info */ - virtio_cread(vgdev->vdev, struct virtio_gpu_config, - num_scanouts, &num_scanouts); + virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, + num_scanouts, &num_scanouts); vgdev->num_scanouts = min_t(uint32_t, num_scanouts, VIRTIO_GPU_MAX_SCANOUTS); - if (!vgdev->num_scanouts) { - DRM_ERROR("num_scanouts is zero\n"); - ret = -EINVAL; - goto err_scanouts; + + if (!IS_ENABLED(CONFIG_DRM_VIRTIO_GPU_KMS) || !vgdev->num_scanouts) { + DRM_INFO("KMS disabled\n"); + vgdev->num_scanouts = 0; + vgdev->has_edid = false; + dev->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC); + } else { + DRM_INFO("number of scanouts: %d\n", num_scanouts); } - DRM_INFO("number of scanouts: %d\n", num_scanouts); - virtio_cread(vgdev->vdev, struct virtio_gpu_config, - num_capsets, &num_capsets); + virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, + num_capsets, &num_capsets); DRM_INFO("number of cap sets: %d\n", num_capsets); ret = virtio_gpu_modeset_init(vgdev); - if (ret) - goto err_modeset; + if (ret) { + DRM_ERROR("modeset init failed\n"); + goto err_scanouts; + } virtio_device_ready(vgdev->vdev); - vgdev->vqs_ready = true; if (num_capsets) virtio_gpu_get_capsets(vgdev, num_capsets); - if (vgdev->has_edid) - virtio_gpu_cmd_get_edids(vgdev); - virtio_gpu_cmd_get_display_info(vgdev); - wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending, - 5 * HZ); - if (virtio_gpu_fbdev) - virtio_gpu_fbdev_init(vgdev); - + if (vgdev->num_scanouts) { + if (vgdev->has_edid) + virtio_gpu_cmd_get_edids(vgdev); + virtio_gpu_cmd_get_display_info(vgdev); + virtio_gpu_notify(vgdev); + wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending, + 5 * HZ); + } return 0; -err_modeset: err_scanouts: - virtio_gpu_ttm_fini(vgdev); -err_ttm: virtio_gpu_free_vbufs(vgdev); err_vbufs: vgdev->vdev->config->del_vqs(vgdev->vdev); err_vqs: - kfree(vgdev); + dev->dev_private = NULL; return ret; } @@ -239,30 +278,38 @@ static void virtio_gpu_cleanup_cap_cache(struct virtio_gpu_device *vgdev) } } -void virtio_gpu_driver_unload(struct drm_device *dev) +void virtio_gpu_deinit(struct drm_device *dev) { struct virtio_gpu_device *vgdev = dev->dev_private; - vgdev->vqs_ready = false; + flush_work(&vgdev->obj_free_work); flush_work(&vgdev->ctrlq.dequeue_work); flush_work(&vgdev->cursorq.dequeue_work); flush_work(&vgdev->config_changed_work); + virtio_reset_device(vgdev->vdev); vgdev->vdev->config->del_vqs(vgdev->vdev); +} + +void virtio_gpu_release(struct drm_device *dev) +{ + struct virtio_gpu_device *vgdev = dev->dev_private; + + if (!vgdev) + return; virtio_gpu_modeset_fini(vgdev); - virtio_gpu_ttm_fini(vgdev); virtio_gpu_free_vbufs(vgdev); virtio_gpu_cleanup_cap_cache(vgdev); - kfree(vgdev->capsets); - kfree(vgdev); + + if (vgdev->has_host_visible) + drm_mm_takedown(&vgdev->host_visible_mm); } int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file) { struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_fpriv *vfpriv; - int id; - char dbgname[TASK_COMM_LEN]; + int handle; /* can't create contexts without 3d renderer */ if (!vgdev->has_virgl_3d) @@ -273,14 +320,15 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file) if (!vfpriv) return -ENOMEM; - get_task_comm(dbgname, current); - id = virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname); - if (id < 0) { + mutex_init(&vfpriv->context_lock); + + handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL); + if (handle < 0) { kfree(vfpriv); - return id; + return handle; } - vfpriv->ctx_id = id; + vfpriv->ctx_id = handle + 1; file->driver_priv = vfpriv; return 0; } @@ -288,14 +336,18 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file) void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file) { struct virtio_gpu_device *vgdev = dev->dev_private; - struct virtio_gpu_fpriv *vfpriv; + struct virtio_gpu_fpriv *vfpriv = file->driver_priv; if (!vgdev->has_virgl_3d) return; - vfpriv = file->driver_priv; + if (vfpriv->context_created) { + virtio_gpu_cmd_context_destroy(vgdev, vfpriv->ctx_id); + virtio_gpu_notify(vgdev); + } - virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id); + ida_free(&vgdev->ctx_id_ida, vfpriv->ctx_id - 1); + mutex_destroy(&vfpriv->context_lock); kfree(vfpriv); file->driver_priv = NULL; } |
