diff options
Diffstat (limited to 'drivers/gpu/drm/virtio')
-rw-r--r-- | drivers/gpu/drm/virtio/Kconfig | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_display.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_drv.c | 66 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_drv.h | 34 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_fence.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_gem.c | 46 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_ioctl.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_kms.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_object.c | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_plane.c | 237 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_prime.c | 182 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_submit.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_trace.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_vq.c | 201 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_vram.c | 9 |
15 files changed, 752 insertions, 101 deletions
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig index ea06ff2aa4b4..fc884fb57b7e 100644 --- a/drivers/gpu/drm/virtio/Kconfig +++ b/drivers/gpu/drm/virtio/Kconfig @@ -3,6 +3,7 @@ config DRM_VIRTIO_GPU tristate "Virtio GPU driver" depends on DRM && VIRTIO_MENU && MMU select VIRTIO + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_SHMEM_HELPER select VIRTIO_DMA_SHARED_BUFFER diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index ad924a8502e9..59a45e74a641 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -164,11 +164,9 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector) struct drm_display_mode *mode = NULL; int count, width, height; - if (output->edid) { - count = drm_add_edid_modes(connector, output->edid); - if (count) - return count; - } + count = drm_edid_connector_add_modes(connector); + if (count) + return count; width = le32_to_cpu(output->info.r.width); height = le32_to_cpu(output->info.r.height); @@ -191,7 +189,7 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector) } static enum drm_mode_status virtio_gpu_conn_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { struct virtio_gpu_output *output = drm_connector_to_virtio_gpu_output(connector); @@ -369,5 +367,5 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev) return; for (i = 0 ; i < vgdev->num_scanouts; ++i) - kfree(vgdev->outputs[i].edid); + drm_edid_free(vgdev->outputs[i].drm_edid); } diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index f8e9abe647b9..e32e680c7197 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -26,20 +26,24 @@ * OTHER DEALINGS IN THE SOFTWARE. */ +#include <linux/aperture.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/poll.h> +#include <linux/vgaarb.h> #include <linux/wait.h> +#include <drm/clients/drm_client_setup.h> #include <drm/drm.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fbdev_generic.h> +#include <drm/drm_fbdev_shmem.h> #include <drm/drm_file.h> #include "virtgpu_drv.h" +#define PCI_DEVICE_ID_VIRTIO_GPU 0x1050 + static const struct drm_driver driver; static int virtio_gpu_modeset = -1; @@ -58,7 +62,7 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev) vga ? "virtio-vga" : "virtio-gpu-pci", pname); if (vga) { - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); + ret = aperture_remove_conflicting_pci_devices(pdev, driver.name); if (ret) return ret; } @@ -94,6 +98,7 @@ static int virtio_gpu_probe(struct virtio_device *vdev) goto err_free; } + dma_set_max_seg_size(dev->dev, dma_max_mapping_size(dev->dev) ?: UINT_MAX); ret = virtio_gpu_init(vdev, dev); if (ret) goto err_free; @@ -102,7 +107,8 @@ static int virtio_gpu_probe(struct virtio_device *vdev) if (ret) goto err_deinit; - drm_fbdev_generic_setup(vdev->priv, 32); + drm_client_setup(vdev->priv, NULL); + return 0; err_deinit: @@ -122,6 +128,14 @@ static void virtio_gpu_remove(struct virtio_device *vdev) drm_dev_put(dev); } +static void virtio_gpu_shutdown(struct virtio_device *vdev) +{ + /* + * drm does its own synchronization on shutdown. + * Do nothing here, opt out of device reset. + */ +} + static void virtio_gpu_config_changed(struct virtio_device *vdev) { struct drm_device *dev = vdev->priv; @@ -153,14 +167,50 @@ static struct virtio_driver virtio_gpu_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtio_gpu_probe, .remove = virtio_gpu_remove, + .shutdown = virtio_gpu_shutdown, .config_changed = virtio_gpu_config_changed }; -module_virtio_driver(virtio_gpu_driver); +static int __init virtio_gpu_driver_init(void) +{ + struct pci_dev *pdev; + int ret; + + pdev = pci_get_device(PCI_VENDOR_ID_REDHAT_QUMRANET, + PCI_DEVICE_ID_VIRTIO_GPU, + NULL); + if (pdev && pci_is_vga(pdev)) { + ret = vga_get_interruptible(pdev, + VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM); + if (ret) { + pci_dev_put(pdev); + return ret; + } + } + + ret = register_virtio_driver(&virtio_gpu_driver); + + if (pdev) { + if (pci_is_vga(pdev)) + vga_put(pdev, + VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM); + + pci_dev_put(pdev); + } + + return ret; +} + +static void __exit virtio_gpu_driver_exit(void) +{ + unregister_virtio_driver(&virtio_gpu_driver); +} + +module_init(virtio_gpu_driver_init); +module_exit(virtio_gpu_driver_exit); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio GPU driver"); @@ -182,7 +232,8 @@ static const struct drm_driver driver = { .postclose = virtio_gpu_driver_postclose, .dumb_create = virtio_gpu_mode_dumb_create, - .dumb_map_offset = virtio_gpu_mode_dumb_mmap, + + DRM_FBDEV_SHMEM_DRIVER_OPS, #if defined(CONFIG_DEBUG_FS) .debugfs_init = virtio_gpu_debugfs_init, @@ -198,7 +249,6 @@ static const struct drm_driver driver = { .name = DRIVER_NAME, .desc = DRIVER_DESC, - .date = DRIVER_DATE, .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index bb7d86a0c6a1..f17660a71a3e 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -45,7 +45,6 @@ #define DRIVER_NAME "virtio_gpu" #define DRIVER_DESC "virtio GPU" -#define DRIVER_DATE "0" #define DRIVER_MAJOR 0 #define DRIVER_MINOR 1 @@ -89,9 +88,11 @@ struct virtio_gpu_object_params { struct virtio_gpu_object { struct drm_gem_shmem_object base; + struct sg_table *sgt; uint32_t hw_res_handle; bool dumb; bool created; + bool attached; bool host3d_blob, guest_blob; uint32_t blob_mem, blob_flags; @@ -179,7 +180,7 @@ struct virtio_gpu_output { struct drm_encoder enc; struct virtio_gpu_display_one info; struct virtio_gpu_update_cursor cursor; - struct edid *edid; + const struct drm_edid *drm_edid; int cur_x; int cur_y; bool needs_modeset; @@ -194,6 +195,13 @@ struct virtio_gpu_framebuffer { #define to_virtio_gpu_framebuffer(x) \ container_of(x, struct virtio_gpu_framebuffer, base) +struct virtio_gpu_plane_state { + struct drm_plane_state base; + struct virtio_gpu_fence *fence; +}; +#define to_virtio_gpu_plane_state(x) \ + container_of(x, struct virtio_gpu_plane_state, base) + struct virtio_gpu_queue { struct virtqueue *vq; spinlock_t qlock; @@ -301,10 +309,8 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj, int virtio_gpu_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); -int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv, - struct drm_device *dev, - uint32_t handle, uint64_t *offset_p); +struct virtio_gpu_object_array *virtio_gpu_panic_array_alloc(void); struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents); struct virtio_gpu_object_array* virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents); @@ -329,12 +335,21 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, struct virtio_gpu_fence *fence); void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo); +int virtio_gpu_panic_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, + uint64_t offset, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y, + struct virtio_gpu_object_array *objs); void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, uint64_t offset, uint32_t width, uint32_t height, uint32_t x, uint32_t y, struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); +void virtio_gpu_panic_cmd_resource_flush(struct virtio_gpu_device *vgdev, + uint32_t resource_id, + uint32_t x, uint32_t y, + uint32_t width, uint32_t height); void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, uint32_t resource_id, uint32_t x, uint32_t y, @@ -349,6 +364,10 @@ void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *obj, struct virtio_gpu_mem_entry *ents, unsigned int nents); +void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *obj, + struct virtio_gpu_fence *fence); +int virtio_gpu_detach_object_fenced(struct virtio_gpu_object *bo); void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, struct virtio_gpu_output *output); int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev); @@ -399,6 +418,7 @@ void virtio_gpu_ctrl_ack(struct virtqueue *vq); void virtio_gpu_cursor_ack(struct virtqueue *vq); void virtio_gpu_dequeue_ctrl_func(struct work_struct *work); void virtio_gpu_dequeue_cursor_func(struct work_struct *work); +void virtio_gpu_panic_notify(struct virtio_gpu_device *vgdev); void virtio_gpu_notify(struct virtio_gpu_device *vgdev); int @@ -468,6 +488,10 @@ struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev, struct drm_gem_object *virtgpu_gem_prime_import_sg_table( struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); +int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents, + unsigned int *nents, + struct virtio_gpu_object *bo, + struct dma_buf_attachment *attach); /* virtgpu_debugfs.c */ void virtio_gpu_debugfs_init(struct drm_minor *minor); diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c index f28357dbde35..44c1d8ef3c4d 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fence.c +++ b/drivers/gpu/drm/virtio/virtgpu_fence.c @@ -49,26 +49,10 @@ static bool virtio_gpu_fence_signaled(struct dma_fence *f) return false; } -static void virtio_gpu_fence_value_str(struct dma_fence *f, char *str, int size) -{ - snprintf(str, size, "[%llu, %llu]", f->context, f->seqno); -} - -static void virtio_gpu_timeline_value_str(struct dma_fence *f, char *str, - int size) -{ - struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f); - - snprintf(str, size, "%llu", - (u64)atomic64_read(&fence->drv->last_fence_id)); -} - static const struct dma_fence_ops virtio_gpu_fence_ops = { .get_driver_name = virtio_gpu_get_driver_name, .get_timeline_name = virtio_gpu_get_timeline_name, .signaled = virtio_gpu_fence_signaled, - .fence_value_str = virtio_gpu_fence_value_str, - .timeline_value_str = virtio_gpu_timeline_value_str, }; struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev, diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c index 7db48d17ee3a..90c99d83c4cf 100644 --- a/drivers/gpu/drm/virtio/virtgpu_gem.c +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c @@ -99,21 +99,6 @@ fail: return ret; } -int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv, - struct drm_device *dev, - uint32_t handle, uint64_t *offset_p) -{ - struct drm_gem_object *gobj; - - BUG_ON(!offset_p); - gobj = drm_gem_object_lookup(file_priv, handle); - if (gobj == NULL) - return -ENOENT; - *offset_p = drm_vma_node_offset_addr(&gobj->vma_node); - drm_gem_object_put(gobj); - return 0; -} - int virtio_gpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file) { @@ -127,15 +112,18 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj, /* the context might still be missing when the first ioctl is * DRM_IOCTL_MODE_CREATE_DUMB or DRM_IOCTL_PRIME_FD_TO_HANDLE */ - virtio_gpu_create_context(obj->dev, file); + if (!vgdev->has_context_init) + virtio_gpu_create_context(obj->dev, file); - objs = virtio_gpu_array_alloc(1); - if (!objs) - return -ENOMEM; - virtio_gpu_array_add_obj(objs, obj); + if (vfpriv->context_created) { + objs = virtio_gpu_array_alloc(1); + if (!objs) + return -ENOMEM; + virtio_gpu_array_add_obj(objs, obj); + + virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, objs); + } - virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, - objs); out_notify: virtio_gpu_notify(vgdev); return 0; @@ -161,6 +149,20 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj, virtio_gpu_notify(vgdev); } +/* For drm panic */ +struct virtio_gpu_object_array *virtio_gpu_panic_array_alloc(void) +{ + struct virtio_gpu_object_array *objs; + + objs = kmalloc(sizeof(struct virtio_gpu_object_array), GFP_ATOMIC); + if (!objs) + return NULL; + + objs->nents = 0; + objs->total = 1; + return objs; +} + struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents) { struct virtio_gpu_object_array *objs; diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index e4f76f315550..c33c057365f8 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -80,9 +80,9 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data, struct virtio_gpu_device *vgdev = dev->dev_private; struct drm_virtgpu_map *virtio_gpu_map = data; - return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev, - virtio_gpu_map->handle, - &virtio_gpu_map->offset); + return drm_gem_dumb_map_offset(file, vgdev->ddev, + virtio_gpu_map->handle, + &virtio_gpu_map->offset); } static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 5a3b5aaed1f3..7dfb2006c561 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -116,11 +116,10 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev, int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev) { - static vq_callback_t *callbacks[] = { - virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack + struct virtqueue_info vqs_info[] = { + { "control", virtio_gpu_ctrl_ack }, + { "cursor", virtio_gpu_cursor_ack }, }; - static const char * const names[] = { "control", "cursor" }; - struct virtio_gpu_device *vgdev; /* this will expand later */ struct virtqueue *vqs[2]; @@ -207,7 +206,7 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev) DRM_INFO("features: %ccontext_init\n", vgdev->has_context_init ? '+' : '-'); - ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL); + ret = virtio_find_vqs(vgdev->vdev, 2, vqs, vqs_info, NULL); if (ret) { DRM_ERROR("failed to find virt queues\n"); goto err_vqs; diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index c7e74cf13022..5517cff8715c 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -80,6 +80,9 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) drm_gem_free_mmap_offset(&vram->base.base.base); drm_gem_object_release(&vram->base.base.base); kfree(vram); + } else { + drm_gem_object_release(&bo->base.base); + kfree(bo); } } @@ -97,6 +100,27 @@ static void virtio_gpu_free_object(struct drm_gem_object *obj) virtio_gpu_cleanup_object(bo); } +int virtio_gpu_detach_object_fenced(struct virtio_gpu_object *bo) +{ + struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; + struct virtio_gpu_fence *fence; + + if (!bo->attached) + return 0; + + fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0); + if (!fence) + return -ENOMEM; + + virtio_gpu_object_detach(vgdev, bo, fence); + virtio_gpu_notify(vgdev); + + dma_fence_wait(&fence->f, false); + dma_fence_put(&fence->f); + + return 0; +} + static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = { .free = virtio_gpu_free_object, .open = virtio_gpu_gem_object_open, diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index a72a2dbda031..698ea7adb951 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -26,6 +26,10 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem_atomic_helper.h> +#include <linux/virtio_dma_buf.h> +#include <drm/drm_managed.h> +#include <drm/drm_panic.h> #include "virtgpu_drv.h" @@ -66,11 +70,28 @@ uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc) return format; } +static struct +drm_plane_state *virtio_gpu_plane_duplicate_state(struct drm_plane *plane) +{ + struct virtio_gpu_plane_state *new; + + if (WARN_ON(!plane->state)) + return NULL; + + new = kzalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return NULL; + + __drm_atomic_helper_plane_duplicate_state(plane, &new->base); + + return &new->base; +} + static const struct drm_plane_funcs virtio_gpu_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .reset = drm_atomic_helper_plane_reset, - .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_duplicate_state = virtio_gpu_plane_duplicate_state, .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, }; @@ -108,6 +129,30 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, return ret; } +/* For drm panic */ +static int virtio_gpu_panic_update_dumb_bo(struct virtio_gpu_device *vgdev, + struct drm_plane_state *state, + struct drm_rect *rect) +{ + struct virtio_gpu_object *bo = + gem_to_virtio_gpu_obj(state->fb->obj[0]); + struct virtio_gpu_object_array *objs; + uint32_t w = rect->x2 - rect->x1; + uint32_t h = rect->y2 - rect->y1; + uint32_t x = rect->x1; + uint32_t y = rect->y1; + uint32_t off = x * state->fb->format->cpp[0] + + y * state->fb->pitches[0]; + + objs = virtio_gpu_panic_array_alloc(); + if (!objs) + return -ENOMEM; + virtio_gpu_array_add_obj(objs, &bo->base.base); + + return virtio_gpu_panic_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y, + objs); +} + static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev, struct drm_plane_state *state, struct drm_rect *rect) @@ -131,6 +176,24 @@ static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev, objs, NULL); } +/* For drm_panic */ +static void virtio_gpu_panic_resource_flush(struct drm_plane *plane, + uint32_t x, uint32_t y, + uint32_t width, uint32_t height) +{ + struct drm_device *dev = plane->dev; + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_framebuffer *vgfb; + struct virtio_gpu_object *bo; + + vgfb = to_virtio_gpu_framebuffer(plane->state->fb); + bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); + + virtio_gpu_panic_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y, + width, height); + virtio_gpu_panic_notify(vgdev); +} + static void virtio_gpu_resource_flush(struct drm_plane *plane, uint32_t x, uint32_t y, uint32_t width, uint32_t height) @@ -138,11 +201,13 @@ static void virtio_gpu_resource_flush(struct drm_plane *plane, struct drm_device *dev = plane->dev; struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_framebuffer *vgfb; + struct virtio_gpu_plane_state *vgplane_st; struct virtio_gpu_object *bo; vgfb = to_virtio_gpu_framebuffer(plane->state->fb); + vgplane_st = to_virtio_gpu_plane_state(plane->state); bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); - if (vgfb->fence) { + if (vgplane_st->fence) { struct virtio_gpu_object_array *objs; objs = virtio_gpu_array_alloc(1); @@ -151,13 +216,11 @@ static void virtio_gpu_resource_flush(struct drm_plane *plane, virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); virtio_gpu_array_lock_resv(objs); virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y, - width, height, objs, vgfb->fence); + width, height, objs, + vgplane_st->fence); virtio_gpu_notify(vgdev); - - dma_fence_wait_timeout(&vgfb->fence->f, true, + dma_fence_wait_timeout(&vgplane_st->fence->f, true, msecs_to_jiffies(50)); - dma_fence_put(&vgfb->fence->f); - vgfb->fence = NULL; } else { virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y, width, height, NULL, NULL); @@ -241,45 +304,121 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane, rect.y2 - rect.y1); } +static int virtio_gpu_prepare_imported_obj(struct drm_plane *plane, + struct drm_plane_state *new_state, + struct drm_gem_object *obj) +{ + struct virtio_gpu_device *vgdev = plane->dev->dev_private; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + struct dma_buf_attachment *attach = obj->import_attach; + struct dma_resv *resv = attach->dmabuf->resv; + struct virtio_gpu_mem_entry *ents = NULL; + unsigned int nents; + int ret; + + dma_resv_lock(resv, NULL); + + ret = dma_buf_pin(attach); + if (ret) { + dma_resv_unlock(resv); + return ret; + } + + if (!bo->sgt) { + ret = virtgpu_dma_buf_import_sgt(&ents, &nents, + bo, attach); + if (ret) + goto err; + + virtio_gpu_object_attach(vgdev, bo, ents, nents); + } + + dma_resv_unlock(resv); + return 0; + +err: + dma_buf_unpin(attach); + dma_resv_unlock(resv); + return ret; +} + static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state) { struct drm_device *dev = plane->dev; struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_framebuffer *vgfb; + struct virtio_gpu_plane_state *vgplane_st; struct virtio_gpu_object *bo; + struct drm_gem_object *obj; + int ret; if (!new_state->fb) return 0; vgfb = to_virtio_gpu_framebuffer(new_state->fb); + vgplane_st = to_virtio_gpu_plane_state(new_state); bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); + + drm_gem_plane_helper_prepare_fb(plane, new_state); + if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob)) return 0; - if (bo->dumb && (plane->state->fb != new_state->fb)) { - vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, + obj = new_state->fb->obj[0]; + if (bo->dumb || drm_gem_is_imported(obj)) { + vgplane_st->fence = virtio_gpu_fence_alloc(vgdev, + vgdev->fence_drv.context, 0); - if (!vgfb->fence) + if (!vgplane_st->fence) return -ENOMEM; } + if (drm_gem_is_imported(obj)) { + ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj); + if (ret) + goto err_fence; + } + return 0; + +err_fence: + if (vgplane_st->fence) { + dma_fence_put(&vgplane_st->fence->f); + vgplane_st->fence = NULL; + } + + return ret; +} + +static void virtio_gpu_cleanup_imported_obj(struct drm_gem_object *obj) +{ + struct dma_buf_attachment *attach = obj->import_attach; + struct dma_resv *resv = attach->dmabuf->resv; + + dma_resv_lock(resv, NULL); + dma_buf_unpin(attach); + dma_resv_unlock(resv); } static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *state) { - struct virtio_gpu_framebuffer *vgfb; + struct virtio_gpu_plane_state *vgplane_st; + struct drm_gem_object *obj; if (!state->fb) return; - vgfb = to_virtio_gpu_framebuffer(state->fb); - if (vgfb->fence) { - dma_fence_put(&vgfb->fence->f); - vgfb->fence = NULL; + vgplane_st = to_virtio_gpu_plane_state(state); + if (vgplane_st->fence) { + dma_fence_put(&vgplane_st->fence->f); + vgplane_st->fence = NULL; } + + obj = state->fb->obj[0]; + if (drm_gem_is_imported(obj)) + virtio_gpu_cleanup_imported_obj(obj); } static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, @@ -291,6 +430,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_output *output = NULL; struct virtio_gpu_framebuffer *vgfb; + struct virtio_gpu_plane_state *vgplane_st; struct virtio_gpu_object *bo = NULL; uint32_t handle; @@ -303,6 +443,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, if (plane->state->fb) { vgfb = to_virtio_gpu_framebuffer(plane->state->fb); + vgplane_st = to_virtio_gpu_plane_state(plane->state); bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); handle = bo->hw_res_handle; } else { @@ -322,11 +463,9 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, (vgdev, 0, plane->state->crtc_w, plane->state->crtc_h, - 0, 0, objs, vgfb->fence); + 0, 0, objs, vgplane_st->fence); virtio_gpu_notify(vgdev); - dma_fence_wait(&vgfb->fence->f, true); - dma_fence_put(&vgfb->fence->f); - vgfb->fence = NULL; + dma_fence_wait(&vgplane_st->fence->f, true); } if (plane->state->fb != old_state->fb) { @@ -359,11 +498,71 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, virtio_gpu_cursor_ping(vgdev, output); } +static int virtio_drm_get_scanout_buffer(struct drm_plane *plane, + struct drm_scanout_buffer *sb) +{ + struct virtio_gpu_object *bo; + + if (!plane->state || !plane->state->fb || !plane->state->visible) + return -ENODEV; + + bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]); + + if (virtio_gpu_is_vram(bo) || drm_gem_is_imported(&bo->base.base)) + return -ENODEV; + + if (bo->base.vaddr) { + iosys_map_set_vaddr(&sb->map[0], bo->base.vaddr); + } else { + struct drm_gem_shmem_object *shmem = &bo->base; + + if (!shmem->pages) + return -ENODEV; + /* map scanout buffer later */ + sb->pages = shmem->pages; + } + + sb->format = plane->state->fb->format; + sb->height = plane->state->fb->height; + sb->width = plane->state->fb->width; + sb->pitch[0] = plane->state->fb->pitches[0]; + return 0; +} + +static void virtio_panic_flush(struct drm_plane *plane) +{ + struct virtio_gpu_object *bo; + struct drm_device *dev = plane->dev; + struct virtio_gpu_device *vgdev = dev->dev_private; + struct drm_rect rect; + + rect.x1 = 0; + rect.y1 = 0; + rect.x2 = plane->state->fb->width; + rect.y2 = plane->state->fb->height; + + bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]); + + if (bo->dumb) { + if (virtio_gpu_panic_update_dumb_bo(vgdev, plane->state, + &rect)) + return; + } + + virtio_gpu_panic_resource_flush(plane, + plane->state->src_x >> 16, + plane->state->src_y >> 16, + plane->state->src_w >> 16, + plane->state->src_h >> 16); +} + static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = { .prepare_fb = virtio_gpu_plane_prepare_fb, .cleanup_fb = virtio_gpu_plane_cleanup_fb, .atomic_check = virtio_gpu_plane_atomic_check, .atomic_update = virtio_gpu_primary_plane_update, + .get_scanout_buffer = virtio_drm_get_scanout_buffer, + .panic_flush = virtio_panic_flush, }; static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = { diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index 44425f20d91a..1118a0250279 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -27,6 +27,8 @@ #include "virtgpu_drv.h" +MODULE_IMPORT_NS("DMA_BUF"); + static int virtgpu_virtio_get_uuid(struct dma_buf *buf, uuid_t *uuid) { @@ -73,7 +75,6 @@ static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach, static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = { .ops = { - .cache_sgt_mapping = true, .attach = virtio_dma_buf_attach, .detach = drm_gem_map_detach, .map_dma_buf = virtgpu_gem_map_dma_buf, @@ -142,10 +143,161 @@ struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj, return buf; } +int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents, + unsigned int *nents, + struct virtio_gpu_object *bo, + struct dma_buf_attachment *attach) +{ + struct scatterlist *sl; + struct sg_table *sgt; + long i, ret; + + dma_resv_assert_held(attach->dmabuf->resv); + + ret = dma_resv_wait_timeout(attach->dmabuf->resv, + DMA_RESV_USAGE_KERNEL, + false, MAX_SCHEDULE_TIMEOUT); + if (ret <= 0) + return ret < 0 ? ret : -ETIMEDOUT; + + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) + return PTR_ERR(sgt); + + *ents = kvmalloc_array(sgt->nents, + sizeof(struct virtio_gpu_mem_entry), + GFP_KERNEL); + if (!(*ents)) { + dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); + return -ENOMEM; + } + + *nents = sgt->nents; + for_each_sgtable_dma_sg(sgt, sl, i) { + (*ents)[i].addr = cpu_to_le64(sg_dma_address(sl)); + (*ents)[i].length = cpu_to_le32(sg_dma_len(sl)); + (*ents)[i].padding = 0; + } + + bo->sgt = sgt; + return 0; +} + +static void virtgpu_dma_buf_unmap(struct virtio_gpu_object *bo) +{ + struct dma_buf_attachment *attach = bo->base.base.import_attach; + + dma_resv_assert_held(attach->dmabuf->resv); + + if (bo->created) { + virtio_gpu_detach_object_fenced(bo); + + if (bo->sgt) + dma_buf_unmap_attachment(attach, bo->sgt, + DMA_BIDIRECTIONAL); + + bo->sgt = NULL; + } +} + +static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj) +{ + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + struct virtio_gpu_device *vgdev = obj->dev->dev_private; + + if (drm_gem_is_imported(obj)) { + struct dma_buf *dmabuf = obj->dma_buf; + + dma_resv_lock(dmabuf->resv, NULL); + virtgpu_dma_buf_unmap(bo); + dma_resv_unlock(dmabuf->resv); + + dma_buf_detach(dmabuf, obj->import_attach); + dma_buf_put(dmabuf); + } + + if (bo->created) { + virtio_gpu_cmd_unref_resource(vgdev, bo); + virtio_gpu_notify(vgdev); + return; + } + virtio_gpu_cleanup_object(bo); +} + +static int virtgpu_dma_buf_init_obj(struct drm_device *dev, + struct virtio_gpu_object *bo, + struct dma_buf_attachment *attach) +{ + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_object_params params = { 0 }; + struct dma_resv *resv = attach->dmabuf->resv; + struct virtio_gpu_mem_entry *ents = NULL; + unsigned int nents; + int ret; + + ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle); + if (ret) { + virtgpu_dma_buf_free_obj(&bo->base.base); + return ret; + } + + dma_resv_lock(resv, NULL); + + ret = dma_buf_pin(attach); + if (ret) + goto err_pin; + + ret = virtgpu_dma_buf_import_sgt(&ents, &nents, bo, attach); + if (ret) + goto err_import; + + params.blob = true; + params.blob_mem = VIRTGPU_BLOB_MEM_GUEST; + params.blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE; + params.size = attach->dmabuf->size; + + virtio_gpu_cmd_resource_create_blob(vgdev, bo, ¶ms, + ents, nents); + bo->guest_blob = true; + + dma_buf_unpin(attach); + dma_resv_unlock(resv); + + return 0; + +err_import: + dma_buf_unpin(attach); +err_pin: + dma_resv_unlock(resv); + virtgpu_dma_buf_free_obj(&bo->base.base); + return ret; +} + +static const struct drm_gem_object_funcs virtgpu_gem_dma_buf_funcs = { + .free = virtgpu_dma_buf_free_obj, +}; + +static void virtgpu_dma_buf_move_notify(struct dma_buf_attachment *attach) +{ + struct drm_gem_object *obj = attach->importer_priv; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + + virtgpu_dma_buf_unmap(bo); +} + +static const struct dma_buf_attach_ops virtgpu_dma_buf_attach_ops = { + .allow_peer2peer = true, + .move_notify = virtgpu_dma_buf_move_notify +}; + struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev, struct dma_buf *buf) { + struct virtio_gpu_device *vgdev = dev->dev_private; + struct dma_buf_attachment *attach; + struct virtio_gpu_object *bo; struct drm_gem_object *obj; + int ret; if (buf->ops == &virtgpu_dmabuf_ops.ops) { obj = buf->priv; @@ -159,7 +311,33 @@ struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev, } } - return drm_gem_prime_import(dev, buf); + if (!vgdev->has_resource_blob || vgdev->has_virgl_3d) + return drm_gem_prime_import(dev, buf); + + bo = kzalloc(sizeof(*bo), GFP_KERNEL); + if (!bo) + return ERR_PTR(-ENOMEM); + + obj = &bo->base.base; + obj->resv = buf->resv; + obj->funcs = &virtgpu_gem_dma_buf_funcs; + drm_gem_private_object_init(dev, obj, buf->size); + + attach = dma_buf_dynamic_attach(buf, dev->dev, + &virtgpu_dma_buf_attach_ops, obj); + if (IS_ERR(attach)) { + kfree(bo); + return ERR_CAST(attach); + } + + obj->import_attach = attach; + get_dma_buf(buf); + + ret = virtgpu_dma_buf_init_obj(dev, bo, attach); + if (ret < 0) + return ERR_PTR(ret); + + return obj; } struct drm_gem_object *virtgpu_gem_prime_import_sg_table( diff --git a/drivers/gpu/drm/virtio/virtgpu_submit.c b/drivers/gpu/drm/virtio/virtgpu_submit.c index 5c514946bbad..7d34cf83f5f2 100644 --- a/drivers/gpu/drm/virtio/virtgpu_submit.c +++ b/drivers/gpu/drm/virtio/virtgpu_submit.c @@ -48,7 +48,7 @@ struct virtio_gpu_submit { static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit, struct dma_fence *in_fence) { - u32 context = submit->fence_ctx + submit->ring_idx; + u64 context = submit->fence_ctx + submit->ring_idx; if (dma_fence_match_context(in_fence, context)) return 0; @@ -99,8 +99,8 @@ virtio_gpu_parse_deps(struct virtio_gpu_submit *submit) return 0; /* - * kvalloc at first tries to allocate memory using kmalloc and - * falls back to vmalloc only on failure. It also uses __GFP_NOWARN + * kvmalloc() at first tries to allocate memory using kmalloc() and + * falls back to vmalloc() only on failure. It also uses __GFP_NOWARN * internally for allocations larger than a page size, preventing * storm of KMSG warnings. */ @@ -529,7 +529,7 @@ int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, virtio_gpu_submit(&submit); /* - * Set up usr-out data after submitting the job to optimize + * Set up user-out data after submitting the job to optimize * the job submission path. */ virtio_gpu_install_out_fence_fd(&submit); diff --git a/drivers/gpu/drm/virtio/virtgpu_trace.h b/drivers/gpu/drm/virtio/virtgpu_trace.h index 031bc77689d5..227bf0ae7ed5 100644 --- a/drivers/gpu/drm/virtio/virtgpu_trace.h +++ b/drivers/gpu/drm/virtio/virtgpu_trace.h @@ -25,7 +25,7 @@ DECLARE_EVENT_CLASS(virtio_gpu_cmd, TP_fast_assign( __entry->dev = vq->vdev->index; __entry->vq = vq->index; - __assign_str(name, vq->name); + __assign_str(name); __entry->type = le32_to_cpu(hdr->type); __entry->flags = le32_to_cpu(hdr->flags); __entry->fence_id = le64_to_cpu(hdr->fence_id); diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index b1a00c0c25a7..55a15e247dd1 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -86,6 +86,22 @@ void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev) vgdev->vbufs = NULL; } +/* For drm_panic */ +static struct virtio_gpu_vbuffer* +virtio_gpu_panic_get_vbuf(struct virtio_gpu_device *vgdev, int size) +{ + struct virtio_gpu_vbuffer *vbuf; + + vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_ATOMIC); + + vbuf->buf = (void *)vbuf + sizeof(*vbuf); + vbuf->size = size; + vbuf->resp_cb = NULL; + vbuf->resp_size = sizeof(struct virtio_gpu_ctrl_hdr); + vbuf->resp_buf = (void *)vbuf->buf + size; + return vbuf; +} + static struct virtio_gpu_vbuffer* virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev, int size, int resp_size, void *resp_buf, @@ -137,6 +153,18 @@ virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev, return (struct virtio_gpu_update_cursor *)vbuf->buf; } +/* For drm_panic */ +static void *virtio_gpu_panic_alloc_cmd_resp(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer **vbuffer_p, + int cmd_size) +{ + struct virtio_gpu_vbuffer *vbuf; + + vbuf = virtio_gpu_panic_get_vbuf(vgdev, cmd_size); + *vbuffer_p = vbuf; + return (struct virtio_gpu_command *)vbuf->buf; +} + static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev, virtio_gpu_resp_cb cb, struct virtio_gpu_vbuffer **vbuffer_p, @@ -311,6 +339,34 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents) return sgt; } +/* For drm_panic */ +static int virtio_gpu_panic_queue_ctrl_sgs(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf, + int elemcnt, + struct scatterlist **sgs, + int outcnt, + int incnt) +{ + struct virtqueue *vq = vgdev->ctrlq.vq; + int ret; + + if (vgdev->has_indirect) + elemcnt = 1; + + if (vq->num_free < elemcnt) + return -ENOMEM; + + ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); + WARN_ON(ret); + + vbuf->seqno = ++vgdev->ctrlq.seqno; + trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), vbuf->seqno); + + atomic_inc(&vgdev->pending_commands); + + return 0; +} + static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev, struct virtio_gpu_vbuffer *vbuf, struct virtio_gpu_fence *fence, @@ -368,6 +424,32 @@ again: return 0; } +/* For drm_panic */ +static int virtio_gpu_panic_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + struct scatterlist *sgs[3], vcmd, vresp; + int elemcnt = 0, outcnt = 0, incnt = 0; + + /* set up vcmd */ + sg_init_one(&vcmd, vbuf->buf, vbuf->size); + elemcnt++; + sgs[outcnt] = &vcmd; + outcnt++; + + /* set up vresp */ + if (vbuf->resp_size) { + sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size); + elemcnt++; + sgs[outcnt + incnt] = &vresp; + incnt++; + } + + return virtio_gpu_panic_queue_ctrl_sgs(vgdev, vbuf, + elemcnt, sgs, + outcnt, incnt); +} + static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, struct virtio_gpu_vbuffer *vbuf, struct virtio_gpu_fence *fence) @@ -422,6 +504,21 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, return ret; } +/* For drm_panic */ +void virtio_gpu_panic_notify(struct virtio_gpu_device *vgdev) +{ + bool notify; + + if (!atomic_read(&vgdev->pending_commands)) + return; + + atomic_set(&vgdev->pending_commands, 0); + notify = virtqueue_kick_prepare(vgdev->ctrlq.vq); + + if (notify) + virtqueue_notify(vgdev->ctrlq.vq); +} + void virtio_gpu_notify(struct virtio_gpu_device *vgdev) { bool notify; @@ -567,6 +664,29 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); } +/* For drm_panic */ +void virtio_gpu_panic_cmd_resource_flush(struct virtio_gpu_device *vgdev, + uint32_t resource_id, + uint32_t x, uint32_t y, + uint32_t width, uint32_t height) +{ + struct virtio_gpu_resource_flush *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + + cmd_p = virtio_gpu_panic_alloc_cmd_resp(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = NULL; + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH); + cmd_p->resource_id = cpu_to_le32(resource_id); + cmd_p->r.width = cpu_to_le32(width); + cmd_p->r.height = cpu_to_le32(height); + cmd_p->r.x = cpu_to_le32(x); + cmd_p->r.y = cpu_to_le32(y); + + virtio_gpu_panic_queue_ctrl_buffer(vgdev, vbuf); +} + void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, uint32_t resource_id, uint32_t x, uint32_t y, @@ -591,6 +711,37 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); } +/* For drm_panic */ +int virtio_gpu_panic_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, + uint64_t offset, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y, + struct virtio_gpu_object_array *objs) +{ + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); + struct virtio_gpu_transfer_to_host_2d *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); + + if (virtio_gpu_is_shmem(bo) && use_dma_api) + dma_sync_sgtable_for_device(vgdev->vdev->dev.parent, + bo->base.sgt, DMA_TO_DEVICE); + + cmd_p = virtio_gpu_panic_alloc_cmd_resp(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + cmd_p->offset = cpu_to_le64(offset); + cmd_p->r.width = cpu_to_le32(width); + cmd_p->r.height = cpu_to_le32(height); + cmd_p->r.x = cpu_to_le32(x); + cmd_p->r.y = cpu_to_le32(y); + + return virtio_gpu_panic_queue_ctrl_buffer(vgdev, vbuf); +} + void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, uint64_t offset, uint32_t width, uint32_t height, @@ -645,6 +796,23 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev, virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); } +static void +virtio_gpu_cmd_resource_detach_backing(struct virtio_gpu_device *vgdev, + uint32_t resource_id, + struct virtio_gpu_fence *fence) +{ + struct virtio_gpu_resource_detach_backing *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING); + cmd_p->resource_id = cpu_to_le32(resource_id); + + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); +} + static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev, struct virtio_gpu_vbuffer *vbuf) { @@ -741,21 +909,21 @@ static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev, (struct virtio_gpu_resp_edid *)vbuf->resp_buf; uint32_t scanout = le32_to_cpu(cmd->scanout); struct virtio_gpu_output *output; - struct edid *new_edid, *old_edid; + const struct drm_edid *new_edid, *old_edid; if (scanout >= vgdev->num_scanouts) return; output = vgdev->outputs + scanout; - new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp); - drm_connector_update_edid_property(&output->conn, new_edid); + new_edid = drm_edid_read_custom(&output->conn, virtio_get_edid_block, resp); + drm_edid_connector_update(&output->conn, new_edid); spin_lock(&vgdev->display_info_lock); - old_edid = output->edid; - output->edid = new_edid; + old_edid = output->drm_edid; + output->drm_edid = new_edid; spin_unlock(&vgdev->display_info_lock); - kfree(old_edid); + drm_edid_free(old_edid); wake_up(&vgdev->resp_wq); } @@ -1103,8 +1271,26 @@ void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, struct virtio_gpu_mem_entry *ents, unsigned int nents) { + if (obj->attached) + return; + virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle, ents, nents, NULL); + + obj->attached = true; +} + +void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *obj, + struct virtio_gpu_fence *fence) +{ + if (!obj->attached) + return; + + virtio_gpu_cmd_resource_detach_backing(vgdev, obj->hw_res_handle, + fence); + + obj->attached = false; } void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, @@ -1265,6 +1451,9 @@ virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev, virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); bo->created = true; + + if (nents) + bo->attached = true; } void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev, diff --git a/drivers/gpu/drm/virtio/virtgpu_vram.c b/drivers/gpu/drm/virtio/virtgpu_vram.c index 25df81c02783..5ad3b7c6f73c 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vram.c +++ b/drivers/gpu/drm/virtio/virtgpu_vram.c @@ -37,6 +37,7 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj, struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); unsigned long vm_size = vma->vm_end - vma->vm_start; + unsigned long vm_end; if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) return -EINVAL; @@ -56,12 +57,14 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj, else if (vram->map_info == VIRTIO_GPU_MAP_CACHE_UNCACHED) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - /* Partial mappings of GEM buffers don't happen much in practice. */ - if (vm_size != vram->vram_node.size) + if (check_add_overflow(vma->vm_pgoff << PAGE_SHIFT, vm_size, &vm_end)) + return -EINVAL; + + if (vm_end > vram->vram_node.size) return -EINVAL; ret = io_remap_pfn_range(vma, vma->vm_start, - vram->vram_node.start >> PAGE_SHIFT, + (vram->vram_node.start >> PAGE_SHIFT) + vma->vm_pgoff, vm_size, vma->vm_page_prot); return ret; } |