diff options
Diffstat (limited to 'drivers/gpu/drm/virtio')
| -rw-r--r-- | drivers/gpu/drm/virtio/Kconfig | 1 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_debugfs.c | 1 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_display.c | 56 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_drv.c | 71 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_drv.h | 48 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_fence.c | 16 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_gem.c | 46 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_ioctl.c | 47 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_kms.c | 30 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_object.c | 30 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_plane.c | 258 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_prime.c | 183 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_submit.c | 9 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_trace.h | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_vq.c | 204 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_vram.c | 9 |
16 files changed, 867 insertions, 144 deletions
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig index ea06ff2aa4b4..fc884fb57b7e 100644 --- a/drivers/gpu/drm/virtio/Kconfig +++ b/drivers/gpu/drm/virtio/Kconfig @@ -3,6 +3,7 @@ config DRM_VIRTIO_GPU tristate "Virtio GPU driver" depends on DRM && VIRTIO_MENU && MMU select VIRTIO + select DRM_CLIENT_SELECTION select DRM_KMS_HELPER select DRM_GEM_SHMEM_HELPER select VIRTIO_DMA_SHARED_BUFFER diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c index 853dd9aa397e..3a68a16b58ae 100644 --- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c +++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c @@ -27,6 +27,7 @@ #include <drm/drm_debugfs.h> #include <drm/drm_file.h> +#include <drm/drm_print.h> #include "virtgpu_drv.h" diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index ad924a8502e9..6a962c1d6e95 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -30,8 +30,11 @@ #include <drm/drm_edid.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> +#include <drm/drm_vblank.h> +#include <drm/drm_vblank_helper.h> #include "virtgpu_drv.h" @@ -55,6 +58,7 @@ static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = { .reset = drm_atomic_helper_crtc_reset, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + DRM_CRTC_VBLANK_TIMER_FUNCS, }; static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = { @@ -66,6 +70,7 @@ static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = { static int virtio_gpu_framebuffer_init(struct drm_device *dev, struct virtio_gpu_framebuffer *vgfb, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { @@ -73,7 +78,7 @@ virtio_gpu_framebuffer_init(struct drm_device *dev, vgfb->base.obj[0] = obj; - drm_helper_mode_fill_fb_struct(dev, &vgfb->base, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, &vgfb->base, info, mode_cmd); ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs); if (ret) { @@ -98,6 +103,7 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc) static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { + drm_crtc_vblank_on(crtc); } static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc, @@ -107,6 +113,8 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc, struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc); + drm_crtc_vblank_off(crtc); + virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0); virtio_gpu_notify(vgdev); } @@ -120,9 +128,10 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc, static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { - struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, - crtc); + struct drm_device *dev = crtc->dev; + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc); + struct drm_pending_vblank_event *event; /* * virtio-gpu can't do modeset and plane update operations @@ -130,9 +139,22 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, * in the plane update callback, and here we just check * whenever we must force the modeset. */ - if (drm_atomic_crtc_needs_modeset(crtc_state)) { + if (drm_atomic_crtc_needs_modeset(crtc_state)) output->needs_modeset = true; + + spin_lock_irq(&dev->event_lock); + + event = crtc_state->event; + crtc_state->event = NULL; + + if (event) { + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, event); + else + drm_crtc_send_vblank_event(crtc, event); } + + spin_unlock_irq(&dev->event_lock); } static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = { @@ -164,11 +186,9 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector) struct drm_display_mode *mode = NULL; int count, width, height; - if (output->edid) { - count = drm_add_edid_modes(connector, output->edid); - if (count) - return count; - } + count = drm_edid_connector_add_modes(connector); + if (count) + return count; width = le32_to_cpu(output->info.r.width); height = le32_to_cpu(output->info.r.height); @@ -191,7 +211,7 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector) } static enum drm_mode_status virtio_gpu_conn_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { struct virtio_gpu_output *output = drm_connector_to_virtio_gpu_output(connector); @@ -259,6 +279,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index) struct drm_encoder *encoder = &output->enc; struct drm_crtc *crtc = &output->crtc; struct drm_plane *primary, *cursor; + int ret; output->index = index; if (index == 0) { @@ -273,8 +294,10 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index) cursor = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_CURSOR, index); if (IS_ERR(cursor)) return PTR_ERR(cursor); - drm_crtc_init_with_planes(dev, crtc, primary, cursor, - &virtio_gpu_crtc_funcs, NULL); + ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor, + &virtio_gpu_crtc_funcs, NULL); + if (ret) + return ret; drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs); drm_connector_init(dev, connector, &virtio_gpu_connector_funcs, @@ -295,6 +318,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index) static struct drm_framebuffer * virtio_gpu_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_gem_object *obj = NULL; @@ -316,7 +340,7 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev, return ERR_PTR(-ENOMEM); } - ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj); + ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, info, mode_cmd, obj); if (ret) { kfree(virtio_gpu_fb); drm_gem_object_put(obj); @@ -357,6 +381,10 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev) for (i = 0 ; i < vgdev->num_scanouts; ++i) vgdev_output_init(vgdev, i); + ret = drm_vblank_init(vgdev->ddev, vgdev->num_scanouts); + if (ret) + return ret; + drm_mode_config_reset(vgdev->ddev); return 0; } @@ -369,5 +397,5 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev) return; for (i = 0 ; i < vgdev->num_scanouts; ++i) - kfree(vgdev->outputs[i].edid); + drm_edid_free(vgdev->outputs[i].drm_edid); } diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 644b8ee51009..a5ce96fb8a1d 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -26,20 +26,25 @@ * OTHER DEALINGS IN THE SOFTWARE. */ +#include <linux/aperture.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/poll.h> +#include <linux/vgaarb.h> #include <linux/wait.h> +#include <drm/clients/drm_client_setup.h> #include <drm/drm.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fbdev_generic.h> +#include <drm/drm_fbdev_shmem.h> #include <drm/drm_file.h> +#include <drm/drm_print.h> #include "virtgpu_drv.h" +#define PCI_DEVICE_ID_VIRTIO_GPU 0x1050 + static const struct drm_driver driver; static int virtio_gpu_modeset = -1; @@ -51,14 +56,14 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev) { struct pci_dev *pdev = to_pci_dev(dev->dev); const char *pname = dev_name(&pdev->dev); - bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA; + bool vga = pci_is_vga(pdev); int ret; DRM_INFO("pci: %s detected at %s\n", vga ? "virtio-vga" : "virtio-gpu-pci", pname); if (vga) { - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); + ret = aperture_remove_conflicting_pci_devices(pdev, driver.name); if (ret) return ret; } @@ -94,6 +99,7 @@ static int virtio_gpu_probe(struct virtio_device *vdev) goto err_free; } + dma_set_max_seg_size(dev->dev, dma_max_mapping_size(dev->dev) ?: UINT_MAX); ret = virtio_gpu_init(vdev, dev); if (ret) goto err_free; @@ -102,7 +108,8 @@ static int virtio_gpu_probe(struct virtio_device *vdev) if (ret) goto err_deinit; - drm_fbdev_generic_setup(vdev->priv, 32); + drm_client_setup(vdev->priv, NULL); + return 0; err_deinit: @@ -122,6 +129,14 @@ static void virtio_gpu_remove(struct virtio_device *vdev) drm_dev_put(dev); } +static void virtio_gpu_shutdown(struct virtio_device *vdev) +{ + struct drm_device *dev = vdev->priv; + + /* stop talking to the device */ + drm_dev_unplug(dev); +} + static void virtio_gpu_config_changed(struct virtio_device *vdev) { struct drm_device *dev = vdev->priv; @@ -153,14 +168,50 @@ static struct virtio_driver virtio_gpu_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtio_gpu_probe, .remove = virtio_gpu_remove, + .shutdown = virtio_gpu_shutdown, .config_changed = virtio_gpu_config_changed }; -module_virtio_driver(virtio_gpu_driver); +static int __init virtio_gpu_driver_init(void) +{ + struct pci_dev *pdev; + int ret; + + pdev = pci_get_device(PCI_VENDOR_ID_REDHAT_QUMRANET, + PCI_DEVICE_ID_VIRTIO_GPU, + NULL); + if (pdev && pci_is_vga(pdev)) { + ret = vga_get_interruptible(pdev, + VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM); + if (ret) { + pci_dev_put(pdev); + return ret; + } + } + + ret = register_virtio_driver(&virtio_gpu_driver); + + if (pdev) { + if (pci_is_vga(pdev)) + vga_put(pdev, + VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM); + + pci_dev_put(pdev); + } + + return ret; +} + +static void __exit virtio_gpu_driver_exit(void) +{ + unregister_virtio_driver(&virtio_gpu_driver); +} + +module_init(virtio_gpu_driver_init); +module_exit(virtio_gpu_driver_exit); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio GPU driver"); @@ -177,12 +228,13 @@ static const struct drm_driver driver = { * out via drm_device::driver_features: */ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC | - DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE, + DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE | DRIVER_CURSOR_HOTSPOT, .open = virtio_gpu_driver_open, .postclose = virtio_gpu_driver_postclose, .dumb_create = virtio_gpu_mode_dumb_create, - .dumb_map_offset = virtio_gpu_mode_dumb_mmap, + + DRM_FBDEV_SHMEM_DRIVER_OPS, #if defined(CONFIG_DEBUG_FS) .debugfs_init = virtio_gpu_debugfs_init, @@ -198,7 +250,6 @@ static const struct drm_driver driver = { .name = DRIVER_NAME, .desc = DRIVER_DESC, - .date = DRIVER_DATE, .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 4126c384286b..f17660a71a3e 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -45,7 +45,6 @@ #define DRIVER_NAME "virtio_gpu" #define DRIVER_DESC "virtio GPU" -#define DRIVER_DATE "0" #define DRIVER_MAJOR 0 #define DRIVER_MINOR 1 @@ -58,6 +57,9 @@ #define MAX_CAPSET_ID 63 #define MAX_RINGS 64 +/* See virtio_gpu_ctx_create. One additional character for NULL terminator. */ +#define DEBUG_NAME_MAX_LEN 65 + struct virtio_gpu_object_params { unsigned long size; bool dumb; @@ -86,9 +88,11 @@ struct virtio_gpu_object_params { struct virtio_gpu_object { struct drm_gem_shmem_object base; + struct sg_table *sgt; uint32_t hw_res_handle; bool dumb; bool created; + bool attached; bool host3d_blob, guest_blob; uint32_t blob_mem, blob_flags; @@ -119,7 +123,7 @@ struct virtio_gpu_object_array { struct ww_acquire_ctx ticket; struct list_head next; u32 nents, total; - struct drm_gem_object *objs[]; + struct drm_gem_object *objs[] __counted_by(total); }; struct virtio_gpu_vbuffer; @@ -176,7 +180,7 @@ struct virtio_gpu_output { struct drm_encoder enc; struct virtio_gpu_display_one info; struct virtio_gpu_update_cursor cursor; - struct edid *edid; + const struct drm_edid *drm_edid; int cur_x; int cur_y; bool needs_modeset; @@ -191,6 +195,13 @@ struct virtio_gpu_framebuffer { #define to_virtio_gpu_framebuffer(x) \ container_of(x, struct virtio_gpu_framebuffer, base) +struct virtio_gpu_plane_state { + struct drm_plane_state base; + struct virtio_gpu_fence *fence; +}; +#define to_virtio_gpu_plane_state(x) \ + container_of(x, struct virtio_gpu_plane_state, base) + struct virtio_gpu_queue { struct virtqueue *vq; spinlock_t qlock; @@ -274,6 +285,8 @@ struct virtio_gpu_fpriv { uint64_t base_fence_ctx; uint64_t ring_idx_mask; struct mutex context_lock; + char debug_name[DEBUG_NAME_MAX_LEN]; + bool explicit_debug_name; }; /* virtgpu_ioctl.c */ @@ -296,10 +309,8 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj, int virtio_gpu_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); -int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv, - struct drm_device *dev, - uint32_t handle, uint64_t *offset_p); +struct virtio_gpu_object_array *virtio_gpu_panic_array_alloc(void); struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents); struct virtio_gpu_object_array* virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents); @@ -324,12 +335,21 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, struct virtio_gpu_fence *fence); void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo); +int virtio_gpu_panic_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, + uint64_t offset, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y, + struct virtio_gpu_object_array *objs); void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, uint64_t offset, uint32_t width, uint32_t height, uint32_t x, uint32_t y, struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); +void virtio_gpu_panic_cmd_resource_flush(struct virtio_gpu_device *vgdev, + uint32_t resource_id, + uint32_t x, uint32_t y, + uint32_t width, uint32_t height); void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, uint32_t resource_id, uint32_t x, uint32_t y, @@ -344,8 +364,10 @@ void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *obj, struct virtio_gpu_mem_entry *ents, unsigned int nents); -int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev); -int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev); +void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *obj, + struct virtio_gpu_fence *fence); +int virtio_gpu_detach_object_fenced(struct virtio_gpu_object *bo); void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, struct virtio_gpu_output *output); int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev); @@ -394,11 +416,9 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, struct virtio_gpu_fence *fence); void virtio_gpu_ctrl_ack(struct virtqueue *vq); void virtio_gpu_cursor_ack(struct virtqueue *vq); -void virtio_gpu_fence_ack(struct virtqueue *vq); void virtio_gpu_dequeue_ctrl_func(struct work_struct *work); void virtio_gpu_dequeue_cursor_func(struct work_struct *work); -void virtio_gpu_dequeue_fence_func(struct work_struct *work); - +void virtio_gpu_panic_notify(struct virtio_gpu_device *vgdev); void virtio_gpu_notify(struct virtio_gpu_device *vgdev); int @@ -465,11 +485,13 @@ struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj, int flags); struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev, struct dma_buf *buf); -int virtgpu_gem_prime_get_uuid(struct drm_gem_object *obj, - uuid_t *uuid); struct drm_gem_object *virtgpu_gem_prime_import_sg_table( struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); +int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents, + unsigned int *nents, + struct virtio_gpu_object *bo, + struct dma_buf_attachment *attach); /* virtgpu_debugfs.c */ void virtio_gpu_debugfs_init(struct drm_minor *minor); diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c index f28357dbde35..44c1d8ef3c4d 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fence.c +++ b/drivers/gpu/drm/virtio/virtgpu_fence.c @@ -49,26 +49,10 @@ static bool virtio_gpu_fence_signaled(struct dma_fence *f) return false; } -static void virtio_gpu_fence_value_str(struct dma_fence *f, char *str, int size) -{ - snprintf(str, size, "[%llu, %llu]", f->context, f->seqno); -} - -static void virtio_gpu_timeline_value_str(struct dma_fence *f, char *str, - int size) -{ - struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f); - - snprintf(str, size, "%llu", - (u64)atomic64_read(&fence->drv->last_fence_id)); -} - static const struct dma_fence_ops virtio_gpu_fence_ops = { .get_driver_name = virtio_gpu_get_driver_name, .get_timeline_name = virtio_gpu_get_timeline_name, .signaled = virtio_gpu_fence_signaled, - .fence_value_str = virtio_gpu_fence_value_str, - .timeline_value_str = virtio_gpu_timeline_value_str, }; struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev, diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c index 7db48d17ee3a..90c99d83c4cf 100644 --- a/drivers/gpu/drm/virtio/virtgpu_gem.c +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c @@ -99,21 +99,6 @@ fail: return ret; } -int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv, - struct drm_device *dev, - uint32_t handle, uint64_t *offset_p) -{ - struct drm_gem_object *gobj; - - BUG_ON(!offset_p); - gobj = drm_gem_object_lookup(file_priv, handle); - if (gobj == NULL) - return -ENOENT; - *offset_p = drm_vma_node_offset_addr(&gobj->vma_node); - drm_gem_object_put(gobj); - return 0; -} - int virtio_gpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file) { @@ -127,15 +112,18 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj, /* the context might still be missing when the first ioctl is * DRM_IOCTL_MODE_CREATE_DUMB or DRM_IOCTL_PRIME_FD_TO_HANDLE */ - virtio_gpu_create_context(obj->dev, file); + if (!vgdev->has_context_init) + virtio_gpu_create_context(obj->dev, file); - objs = virtio_gpu_array_alloc(1); - if (!objs) - return -ENOMEM; - virtio_gpu_array_add_obj(objs, obj); + if (vfpriv->context_created) { + objs = virtio_gpu_array_alloc(1); + if (!objs) + return -ENOMEM; + virtio_gpu_array_add_obj(objs, obj); + + virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, objs); + } - virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, - objs); out_notify: virtio_gpu_notify(vgdev); return 0; @@ -161,6 +149,20 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj, virtio_gpu_notify(vgdev); } +/* For drm panic */ +struct virtio_gpu_object_array *virtio_gpu_panic_array_alloc(void) +{ + struct virtio_gpu_object_array *objs; + + objs = kmalloc(sizeof(struct virtio_gpu_object_array), GFP_ATOMIC); + if (!objs) + return NULL; + + objs->nents = 0; + objs->total = 1; + return objs; +} + struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents) { struct virtio_gpu_object_array *objs; diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index b24b11f25197..c33c057365f8 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -42,12 +42,19 @@ static void virtio_gpu_create_context_locked(struct virtio_gpu_device *vgdev, struct virtio_gpu_fpriv *vfpriv) { - char dbgname[TASK_COMM_LEN]; + if (vfpriv->explicit_debug_name) { + virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id, + vfpriv->context_init, + strlen(vfpriv->debug_name), + vfpriv->debug_name); + } else { + char dbgname[TASK_COMM_LEN]; - get_task_comm(dbgname, current); - virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id, - vfpriv->context_init, strlen(dbgname), - dbgname); + get_task_comm(dbgname, current); + virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id, + vfpriv->context_init, strlen(dbgname), + dbgname); + } vfpriv->context_created = true; } @@ -73,9 +80,9 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data, struct virtio_gpu_device *vgdev = dev->dev_private; struct drm_virtgpu_map *virtio_gpu_map = data; - return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev, - virtio_gpu_map->handle, - &virtio_gpu_map->offset); + return drm_gem_dumb_map_offset(file, vgdev->ddev, + virtio_gpu_map->handle, + &virtio_gpu_map->offset); } static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data, @@ -107,6 +114,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data, case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs: value = vgdev->capset_id_mask; break; + case VIRTGPU_PARAM_EXPLICIT_DEBUG_NAME: + value = vgdev->has_context_init ? 1 : 0; + break; default: return -EINVAL; } @@ -565,8 +575,8 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { int ret = 0; - uint32_t num_params, i, param, value; - uint64_t valid_ring_mask; + uint32_t num_params, i; + uint64_t valid_ring_mask, param, value; size_t len; struct drm_virtgpu_context_set_param *ctx_set_params = NULL; struct virtio_gpu_device *vgdev = dev->dev_private; @@ -580,7 +590,7 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev, return -EINVAL; /* Number of unique parameters supported at this time. */ - if (num_params > 3) + if (num_params > 4) return -EINVAL; ctx_set_params = memdup_user(u64_to_user_ptr(args->ctx_set_params), @@ -642,6 +652,21 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev, vfpriv->ring_idx_mask = value; break; + case VIRTGPU_CONTEXT_PARAM_DEBUG_NAME: + if (vfpriv->explicit_debug_name) { + ret = -EINVAL; + goto out_unlock; + } + + ret = strncpy_from_user(vfpriv->debug_name, + u64_to_user_ptr(value), + DEBUG_NAME_MAX_LEN - 1); + if (ret < 0) + goto out_unlock; + + vfpriv->explicit_debug_name = true; + ret = 0; + break; default: ret = -EINVAL; goto out_unlock; diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 5a3b5aaed1f3..f3594695bb82 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -29,6 +29,7 @@ #include <drm/drm_file.h> #include <drm/drm_managed.h> +#include <drm/drm_print.h> #include "virtgpu_drv.h" @@ -116,11 +117,10 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev, int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev) { - static vq_callback_t *callbacks[] = { - virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack + struct virtqueue_info vqs_info[] = { + { "control", virtio_gpu_ctrl_ack }, + { "cursor", virtio_gpu_cursor_ack }, }; - static const char * const names[] = { "control", "cursor" }; - struct virtio_gpu_device *vgdev; /* this will expand later */ struct virtqueue *vqs[2]; @@ -163,18 +163,18 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev) if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL)) vgdev->has_virgl_3d = true; #endif - if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) { + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) vgdev->has_edid = true; - } - if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) { + + if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) vgdev->has_indirect = true; - } - if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) { + + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) vgdev->has_resource_assign_uuid = true; - } - if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) { + + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) vgdev->has_resource_blob = true; - } + if (virtio_get_shm_region(vgdev->vdev, &vgdev->host_visible_region, VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) { if (!devm_request_mem_region(&vgdev->vdev->dev, @@ -194,9 +194,9 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev) (unsigned long)vgdev->host_visible_region.addr, (unsigned long)vgdev->host_visible_region.len); } - if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT)) { + + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT)) vgdev->has_context_init = true; - } DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible", vgdev->has_virgl_3d ? '+' : '-', @@ -207,7 +207,7 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev) DRM_INFO("features: %ccontext_init\n", vgdev->has_context_init ? '+' : '-'); - ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL); + ret = virtio_find_vqs(vgdev->vdev, 2, vqs, vqs_info, NULL); if (ret) { DRM_ERROR("failed to find virt queues\n"); goto err_vqs; diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index c7e74cf13022..4270bfede7b9 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -26,6 +26,8 @@ #include <linux/dma-mapping.h> #include <linux/moduleparam.h> +#include <drm/drm_print.h> + #include "virtgpu_drv.h" static int virtio_gpu_virglrenderer_workaround = 1; @@ -47,6 +49,7 @@ int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid) *resid = handle + 1; } else { int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL); + if (handle < 0) return handle; *resid = handle + 1; @@ -56,9 +59,8 @@ int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid) static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id) { - if (!virtio_gpu_virglrenderer_workaround) { + if (!virtio_gpu_virglrenderer_workaround) ida_free(&vgdev->resource_ida, id - 1); - } } void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) @@ -80,6 +82,9 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) drm_gem_free_mmap_offset(&vram->base.base.base); drm_gem_object_release(&vram->base.base.base); kfree(vram); + } else { + drm_gem_object_release(&bo->base.base); + kfree(bo); } } @@ -97,6 +102,27 @@ static void virtio_gpu_free_object(struct drm_gem_object *obj) virtio_gpu_cleanup_object(bo); } +int virtio_gpu_detach_object_fenced(struct virtio_gpu_object *bo) +{ + struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; + struct virtio_gpu_fence *fence; + + if (!bo->attached) + return 0; + + fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0); + if (!fence) + return -ENOMEM; + + virtio_gpu_object_detach(vgdev, bo, fence); + virtio_gpu_notify(vgdev); + + dma_fence_wait(&fence->f, false); + dma_fence_put(&fence->f); + + return 0; +} + static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = { .free = virtio_gpu_free_object, .open = virtio_gpu_gem_object_open, diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index a2e045f3a000..a7863f8ee4ee 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -26,6 +26,11 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem_atomic_helper.h> +#include <linux/virtio_dma_buf.h> +#include <drm/drm_managed.h> +#include <drm/drm_panic.h> +#include <drm/drm_print.h> #include "virtgpu_drv.h" @@ -66,11 +71,28 @@ uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc) return format; } +static struct +drm_plane_state *virtio_gpu_plane_duplicate_state(struct drm_plane *plane) +{ + struct virtio_gpu_plane_state *new; + + if (WARN_ON(!plane->state)) + return NULL; + + new = kzalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return NULL; + + __drm_atomic_helper_plane_duplicate_state(plane, &new->base); + + return &new->base; +} + static const struct drm_plane_funcs virtio_gpu_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .reset = drm_atomic_helper_plane_reset, - .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_duplicate_state = virtio_gpu_plane_duplicate_state, .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, }; @@ -79,6 +101,8 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, { struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); + struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, + plane); bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR; struct drm_crtc_state *crtc_state; int ret; @@ -86,10 +110,18 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc)) return 0; + /* + * Ignore damage clips if the framebuffer attached to the plane's state + * has changed since the last plane update (page-flip). In this case, a + * full plane update should happen because uploads are done per-buffer. + */ + if (old_plane_state->fb != new_plane_state->fb) + new_plane_state->ignore_damage_clips = true; + crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc); if (IS_ERR(crtc_state)) - return PTR_ERR(crtc_state); + return PTR_ERR(crtc_state); ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, DRM_PLANE_NO_SCALING, @@ -98,6 +130,30 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, return ret; } +/* For drm panic */ +static int virtio_gpu_panic_update_dumb_bo(struct virtio_gpu_device *vgdev, + struct drm_plane_state *state, + struct drm_rect *rect) +{ + struct virtio_gpu_object *bo = + gem_to_virtio_gpu_obj(state->fb->obj[0]); + struct virtio_gpu_object_array *objs; + uint32_t w = rect->x2 - rect->x1; + uint32_t h = rect->y2 - rect->y1; + uint32_t x = rect->x1; + uint32_t y = rect->y1; + uint32_t off = x * state->fb->format->cpp[0] + + y * state->fb->pitches[0]; + + objs = virtio_gpu_panic_array_alloc(); + if (!objs) + return -ENOMEM; + virtio_gpu_array_add_obj(objs, &bo->base.base); + + return virtio_gpu_panic_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y, + objs); +} + static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev, struct drm_plane_state *state, struct drm_rect *rect) @@ -121,6 +177,24 @@ static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev, objs, NULL); } +/* For drm_panic */ +static void virtio_gpu_panic_resource_flush(struct drm_plane *plane, + uint32_t x, uint32_t y, + uint32_t width, uint32_t height) +{ + struct drm_device *dev = plane->dev; + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_framebuffer *vgfb; + struct virtio_gpu_object *bo; + + vgfb = to_virtio_gpu_framebuffer(plane->state->fb); + bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); + + virtio_gpu_panic_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y, + width, height); + virtio_gpu_panic_notify(vgdev); +} + static void virtio_gpu_resource_flush(struct drm_plane *plane, uint32_t x, uint32_t y, uint32_t width, uint32_t height) @@ -128,11 +202,13 @@ static void virtio_gpu_resource_flush(struct drm_plane *plane, struct drm_device *dev = plane->dev; struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_framebuffer *vgfb; + struct virtio_gpu_plane_state *vgplane_st; struct virtio_gpu_object *bo; vgfb = to_virtio_gpu_framebuffer(plane->state->fb); + vgplane_st = to_virtio_gpu_plane_state(plane->state); bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); - if (vgfb->fence) { + if (vgplane_st->fence) { struct virtio_gpu_object_array *objs; objs = virtio_gpu_array_alloc(1); @@ -141,13 +217,11 @@ static void virtio_gpu_resource_flush(struct drm_plane *plane, virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); virtio_gpu_array_lock_resv(objs); virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y, - width, height, objs, vgfb->fence); + width, height, objs, + vgplane_st->fence); virtio_gpu_notify(vgdev); - - dma_fence_wait_timeout(&vgfb->fence->f, true, + dma_fence_wait_timeout(&vgplane_st->fence->f, true, msecs_to_jiffies(50)); - dma_fence_put(&vgfb->fence->f); - vgfb->fence = NULL; } else { virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y, width, height, NULL, NULL); @@ -231,45 +305,121 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane, rect.y2 - rect.y1); } +static int virtio_gpu_prepare_imported_obj(struct drm_plane *plane, + struct drm_plane_state *new_state, + struct drm_gem_object *obj) +{ + struct virtio_gpu_device *vgdev = plane->dev->dev_private; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + struct dma_buf_attachment *attach = obj->import_attach; + struct dma_resv *resv = attach->dmabuf->resv; + struct virtio_gpu_mem_entry *ents = NULL; + unsigned int nents; + int ret; + + dma_resv_lock(resv, NULL); + + ret = dma_buf_pin(attach); + if (ret) { + dma_resv_unlock(resv); + return ret; + } + + if (!bo->sgt) { + ret = virtgpu_dma_buf_import_sgt(&ents, &nents, + bo, attach); + if (ret) + goto err; + + virtio_gpu_object_attach(vgdev, bo, ents, nents); + } + + dma_resv_unlock(resv); + return 0; + +err: + dma_buf_unpin(attach); + dma_resv_unlock(resv); + return ret; +} + static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state) { struct drm_device *dev = plane->dev; struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_framebuffer *vgfb; + struct virtio_gpu_plane_state *vgplane_st; struct virtio_gpu_object *bo; + struct drm_gem_object *obj; + int ret; if (!new_state->fb) return 0; vgfb = to_virtio_gpu_framebuffer(new_state->fb); + vgplane_st = to_virtio_gpu_plane_state(new_state); bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); + + drm_gem_plane_helper_prepare_fb(plane, new_state); + if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob)) return 0; - if (bo->dumb && (plane->state->fb != new_state->fb)) { - vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, + obj = new_state->fb->obj[0]; + if (bo->dumb || drm_gem_is_imported(obj)) { + vgplane_st->fence = virtio_gpu_fence_alloc(vgdev, + vgdev->fence_drv.context, 0); - if (!vgfb->fence) + if (!vgplane_st->fence) return -ENOMEM; } + if (drm_gem_is_imported(obj)) { + ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj); + if (ret) + goto err_fence; + } + return 0; + +err_fence: + if (vgplane_st->fence) { + dma_fence_put(&vgplane_st->fence->f); + vgplane_st->fence = NULL; + } + + return ret; +} + +static void virtio_gpu_cleanup_imported_obj(struct drm_gem_object *obj) +{ + struct dma_buf_attachment *attach = obj->import_attach; + struct dma_resv *resv = attach->dmabuf->resv; + + dma_resv_lock(resv, NULL); + dma_buf_unpin(attach); + dma_resv_unlock(resv); } static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *state) { - struct virtio_gpu_framebuffer *vgfb; + struct virtio_gpu_plane_state *vgplane_st; + struct drm_gem_object *obj; if (!state->fb) return; - vgfb = to_virtio_gpu_framebuffer(state->fb); - if (vgfb->fence) { - dma_fence_put(&vgfb->fence->f); - vgfb->fence = NULL; + vgplane_st = to_virtio_gpu_plane_state(state); + if (vgplane_st->fence) { + dma_fence_put(&vgplane_st->fence->f); + vgplane_st->fence = NULL; } + + obj = state->fb->obj[0]; + if (drm_gem_is_imported(obj)) + virtio_gpu_cleanup_imported_obj(obj); } static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, @@ -281,6 +431,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_output *output = NULL; struct virtio_gpu_framebuffer *vgfb; + struct virtio_gpu_plane_state *vgplane_st; struct virtio_gpu_object *bo = NULL; uint32_t handle; @@ -293,6 +444,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, if (plane->state->fb) { vgfb = to_virtio_gpu_framebuffer(plane->state->fb); + vgplane_st = to_virtio_gpu_plane_state(plane->state); bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); handle = bo->hw_res_handle; } else { @@ -312,27 +464,25 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, (vgdev, 0, plane->state->crtc_w, plane->state->crtc_h, - 0, 0, objs, vgfb->fence); + 0, 0, objs, vgplane_st->fence); virtio_gpu_notify(vgdev); - dma_fence_wait(&vgfb->fence->f, true); - dma_fence_put(&vgfb->fence->f); - vgfb->fence = NULL; + dma_fence_wait(&vgplane_st->fence->f, true); } if (plane->state->fb != old_state->fb) { DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle, plane->state->crtc_x, plane->state->crtc_y, - plane->state->fb ? plane->state->fb->hot_x : 0, - plane->state->fb ? plane->state->fb->hot_y : 0); + plane->state->hotspot_x, + plane->state->hotspot_y); output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); output->cursor.resource_id = cpu_to_le32(handle); if (plane->state->fb) { output->cursor.hot_x = - cpu_to_le32(plane->state->fb->hot_x); + cpu_to_le32(plane->state->hotspot_x); output->cursor.hot_y = - cpu_to_le32(plane->state->fb->hot_y); + cpu_to_le32(plane->state->hotspot_y); } else { output->cursor.hot_x = cpu_to_le32(0); output->cursor.hot_y = cpu_to_le32(0); @@ -349,11 +499,71 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, virtio_gpu_cursor_ping(vgdev, output); } +static int virtio_drm_get_scanout_buffer(struct drm_plane *plane, + struct drm_scanout_buffer *sb) +{ + struct virtio_gpu_object *bo; + + if (!plane->state || !plane->state->fb || !plane->state->visible) + return -ENODEV; + + bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]); + + if (virtio_gpu_is_vram(bo) || drm_gem_is_imported(&bo->base.base)) + return -ENODEV; + + if (bo->base.vaddr) { + iosys_map_set_vaddr(&sb->map[0], bo->base.vaddr); + } else { + struct drm_gem_shmem_object *shmem = &bo->base; + + if (!shmem->pages) + return -ENODEV; + /* map scanout buffer later */ + sb->pages = shmem->pages; + } + + sb->format = plane->state->fb->format; + sb->height = plane->state->fb->height; + sb->width = plane->state->fb->width; + sb->pitch[0] = plane->state->fb->pitches[0]; + return 0; +} + +static void virtio_panic_flush(struct drm_plane *plane) +{ + struct virtio_gpu_object *bo; + struct drm_device *dev = plane->dev; + struct virtio_gpu_device *vgdev = dev->dev_private; + struct drm_rect rect; + + rect.x1 = 0; + rect.y1 = 0; + rect.x2 = plane->state->fb->width; + rect.y2 = plane->state->fb->height; + + bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]); + + if (bo->dumb) { + if (virtio_gpu_panic_update_dumb_bo(vgdev, plane->state, + &rect)) + return; + } + + virtio_gpu_panic_resource_flush(plane, + plane->state->src_x >> 16, + plane->state->src_y >> 16, + plane->state->src_w >> 16, + plane->state->src_h >> 16); +} + static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = { .prepare_fb = virtio_gpu_plane_prepare_fb, .cleanup_fb = virtio_gpu_plane_cleanup_fb, .atomic_check = virtio_gpu_plane_atomic_check, .atomic_update = virtio_gpu_primary_plane_update, + .get_scanout_buffer = virtio_drm_get_scanout_buffer, + .panic_flush = virtio_panic_flush, }; static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = { diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index 44425f20d91a..ce49282198cb 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -27,6 +27,8 @@ #include "virtgpu_drv.h" +MODULE_IMPORT_NS("DMA_BUF"); + static int virtgpu_virtio_get_uuid(struct dma_buf *buf, uuid_t *uuid) { @@ -73,7 +75,6 @@ static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach, static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = { .ops = { - .cache_sgt_mapping = true, .attach = virtio_dma_buf_attach, .detach = drm_gem_map_detach, .map_dma_buf = virtgpu_gem_map_dma_buf, @@ -142,10 +143,162 @@ struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj, return buf; } +int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents, + unsigned int *nents, + struct virtio_gpu_object *bo, + struct dma_buf_attachment *attach) +{ + struct scatterlist *sl; + struct sg_table *sgt; + long i, ret; + + dma_resv_assert_held(attach->dmabuf->resv); + + ret = dma_resv_wait_timeout(attach->dmabuf->resv, + DMA_RESV_USAGE_KERNEL, + false, MAX_SCHEDULE_TIMEOUT); + if (ret <= 0) + return ret < 0 ? ret : -ETIMEDOUT; + + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) + return PTR_ERR(sgt); + + *ents = kvmalloc_array(sgt->nents, + sizeof(struct virtio_gpu_mem_entry), + GFP_KERNEL); + if (!(*ents)) { + dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); + return -ENOMEM; + } + + *nents = sgt->nents; + for_each_sgtable_dma_sg(sgt, sl, i) { + (*ents)[i].addr = cpu_to_le64(sg_dma_address(sl)); + (*ents)[i].length = cpu_to_le32(sg_dma_len(sl)); + (*ents)[i].padding = 0; + } + + bo->sgt = sgt; + return 0; +} + +static void virtgpu_dma_buf_unmap(struct virtio_gpu_object *bo) +{ + struct dma_buf_attachment *attach = bo->base.base.import_attach; + + dma_resv_assert_held(attach->dmabuf->resv); + + if (bo->created) { + virtio_gpu_detach_object_fenced(bo); + + if (bo->sgt) + dma_buf_unmap_attachment(attach, bo->sgt, + DMA_BIDIRECTIONAL); + + bo->sgt = NULL; + } +} + +static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj) +{ + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + struct virtio_gpu_device *vgdev = obj->dev->dev_private; + struct dma_buf_attachment *attach = obj->import_attach; + + if (drm_gem_is_imported(obj)) { + struct dma_buf *dmabuf = attach->dmabuf; + + dma_resv_lock(dmabuf->resv, NULL); + virtgpu_dma_buf_unmap(bo); + dma_resv_unlock(dmabuf->resv); + + dma_buf_detach(dmabuf, attach); + dma_buf_put(dmabuf); + } + + if (bo->created) { + virtio_gpu_cmd_unref_resource(vgdev, bo); + virtio_gpu_notify(vgdev); + return; + } + virtio_gpu_cleanup_object(bo); +} + +static int virtgpu_dma_buf_init_obj(struct drm_device *dev, + struct virtio_gpu_object *bo, + struct dma_buf_attachment *attach) +{ + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_object_params params = { 0 }; + struct dma_resv *resv = attach->dmabuf->resv; + struct virtio_gpu_mem_entry *ents = NULL; + unsigned int nents; + int ret; + + ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle); + if (ret) { + virtgpu_dma_buf_free_obj(&bo->base.base); + return ret; + } + + dma_resv_lock(resv, NULL); + + ret = dma_buf_pin(attach); + if (ret) + goto err_pin; + + ret = virtgpu_dma_buf_import_sgt(&ents, &nents, bo, attach); + if (ret) + goto err_import; + + params.blob = true; + params.blob_mem = VIRTGPU_BLOB_MEM_GUEST; + params.blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE; + params.size = attach->dmabuf->size; + + virtio_gpu_cmd_resource_create_blob(vgdev, bo, ¶ms, + ents, nents); + bo->guest_blob = true; + + dma_buf_unpin(attach); + dma_resv_unlock(resv); + + return 0; + +err_import: + dma_buf_unpin(attach); +err_pin: + dma_resv_unlock(resv); + virtgpu_dma_buf_free_obj(&bo->base.base); + return ret; +} + +static const struct drm_gem_object_funcs virtgpu_gem_dma_buf_funcs = { + .free = virtgpu_dma_buf_free_obj, +}; + +static void virtgpu_dma_buf_move_notify(struct dma_buf_attachment *attach) +{ + struct drm_gem_object *obj = attach->importer_priv; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + + virtgpu_dma_buf_unmap(bo); +} + +static const struct dma_buf_attach_ops virtgpu_dma_buf_attach_ops = { + .allow_peer2peer = true, + .move_notify = virtgpu_dma_buf_move_notify +}; + struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev, struct dma_buf *buf) { + struct virtio_gpu_device *vgdev = dev->dev_private; + struct dma_buf_attachment *attach; + struct virtio_gpu_object *bo; struct drm_gem_object *obj; + int ret; if (buf->ops == &virtgpu_dmabuf_ops.ops) { obj = buf->priv; @@ -159,7 +312,33 @@ struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev, } } - return drm_gem_prime_import(dev, buf); + if (!vgdev->has_resource_blob || vgdev->has_virgl_3d) + return drm_gem_prime_import(dev, buf); + + bo = kzalloc(sizeof(*bo), GFP_KERNEL); + if (!bo) + return ERR_PTR(-ENOMEM); + + obj = &bo->base.base; + obj->resv = buf->resv; + obj->funcs = &virtgpu_gem_dma_buf_funcs; + drm_gem_private_object_init(dev, obj, buf->size); + + attach = dma_buf_dynamic_attach(buf, dev->dev, + &virtgpu_dma_buf_attach_ops, obj); + if (IS_ERR(attach)) { + kfree(bo); + return ERR_CAST(attach); + } + + obj->import_attach = attach; + get_dma_buf(buf); + + ret = virtgpu_dma_buf_init_obj(dev, bo, attach); + if (ret < 0) + return ERR_PTR(ret); + + return obj; } struct drm_gem_object *virtgpu_gem_prime_import_sg_table( diff --git a/drivers/gpu/drm/virtio/virtgpu_submit.c b/drivers/gpu/drm/virtio/virtgpu_submit.c index 3c00135ead45..7d34cf83f5f2 100644 --- a/drivers/gpu/drm/virtio/virtgpu_submit.c +++ b/drivers/gpu/drm/virtio/virtgpu_submit.c @@ -48,7 +48,7 @@ struct virtio_gpu_submit { static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit, struct dma_fence *in_fence) { - u32 context = submit->fence_ctx + submit->ring_idx; + u64 context = submit->fence_ctx + submit->ring_idx; if (dma_fence_match_context(in_fence, context)) return 0; @@ -99,8 +99,8 @@ virtio_gpu_parse_deps(struct virtio_gpu_submit *submit) return 0; /* - * kvalloc at first tries to allocate memory using kmalloc and - * falls back to vmalloc only on failure. It also uses __GFP_NOWARN + * kvmalloc() at first tries to allocate memory using kmalloc() and + * falls back to vmalloc() only on failure. It also uses __GFP_NOWARN * internally for allocations larger than a page size, preventing * storm of KMSG warnings. */ @@ -361,7 +361,6 @@ static void virtio_gpu_complete_submit(struct virtio_gpu_submit *submit) submit->buf = NULL; submit->buflist = NULL; submit->sync_file = NULL; - submit->out_fence = NULL; submit->out_fence_fd = -1; } @@ -530,7 +529,7 @@ int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, virtio_gpu_submit(&submit); /* - * Set up usr-out data after submitting the job to optimize + * Set up user-out data after submitting the job to optimize * the job submission path. */ virtio_gpu_install_out_fence_fd(&submit); diff --git a/drivers/gpu/drm/virtio/virtgpu_trace.h b/drivers/gpu/drm/virtio/virtgpu_trace.h index 031bc77689d5..227bf0ae7ed5 100644 --- a/drivers/gpu/drm/virtio/virtgpu_trace.h +++ b/drivers/gpu/drm/virtio/virtgpu_trace.h @@ -25,7 +25,7 @@ DECLARE_EVENT_CLASS(virtio_gpu_cmd, TP_fast_assign( __entry->dev = vq->vdev->index; __entry->vq = vq->index; - __assign_str(name, vq->name); + __assign_str(name); __entry->type = le32_to_cpu(hdr->type); __entry->flags = le32_to_cpu(hdr->flags); __entry->fence_id = le64_to_cpu(hdr->fence_id); diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index b1a00c0c25a7..0c194b4e9488 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -32,6 +32,7 @@ #include <linux/virtio_ring.h> #include <drm/drm_edid.h> +#include <drm/drm_print.h> #include "virtgpu_drv.h" #include "virtgpu_trace.h" @@ -86,6 +87,22 @@ void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev) vgdev->vbufs = NULL; } +/* For drm_panic */ +static struct virtio_gpu_vbuffer* +virtio_gpu_panic_get_vbuf(struct virtio_gpu_device *vgdev, int size) +{ + struct virtio_gpu_vbuffer *vbuf; + + vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_ATOMIC); + + vbuf->buf = (void *)vbuf + sizeof(*vbuf); + vbuf->size = size; + vbuf->resp_cb = NULL; + vbuf->resp_size = sizeof(struct virtio_gpu_ctrl_hdr); + vbuf->resp_buf = (void *)vbuf->buf + size; + return vbuf; +} + static struct virtio_gpu_vbuffer* virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev, int size, int resp_size, void *resp_buf, @@ -137,6 +154,18 @@ virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev, return (struct virtio_gpu_update_cursor *)vbuf->buf; } +/* For drm_panic */ +static void *virtio_gpu_panic_alloc_cmd_resp(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer **vbuffer_p, + int cmd_size) +{ + struct virtio_gpu_vbuffer *vbuf; + + vbuf = virtio_gpu_panic_get_vbuf(vgdev, cmd_size); + *vbuffer_p = vbuf; + return (struct virtio_gpu_command *)vbuf->buf; +} + static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev, virtio_gpu_resp_cb cb, struct virtio_gpu_vbuffer **vbuffer_p, @@ -220,6 +249,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) { if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) { struct virtio_gpu_ctrl_hdr *cmd; + cmd = virtio_gpu_vbuf_ctrl_hdr(entry); DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n", le32_to_cpu(resp->type), @@ -311,6 +341,34 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents) return sgt; } +/* For drm_panic */ +static int virtio_gpu_panic_queue_ctrl_sgs(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf, + int elemcnt, + struct scatterlist **sgs, + int outcnt, + int incnt) +{ + struct virtqueue *vq = vgdev->ctrlq.vq; + int ret; + + if (vgdev->has_indirect) + elemcnt = 1; + + if (vq->num_free < elemcnt) + return -ENOMEM; + + ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); + WARN_ON(ret); + + vbuf->seqno = ++vgdev->ctrlq.seqno; + trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), vbuf->seqno); + + atomic_inc(&vgdev->pending_commands); + + return 0; +} + static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev, struct virtio_gpu_vbuffer *vbuf, struct virtio_gpu_fence *fence, @@ -368,6 +426,32 @@ again: return 0; } +/* For drm_panic */ +static int virtio_gpu_panic_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + struct scatterlist *sgs[3], vcmd, vresp; + int elemcnt = 0, outcnt = 0, incnt = 0; + + /* set up vcmd */ + sg_init_one(&vcmd, vbuf->buf, vbuf->size); + elemcnt++; + sgs[outcnt] = &vcmd; + outcnt++; + + /* set up vresp */ + if (vbuf->resp_size) { + sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size); + elemcnt++; + sgs[outcnt + incnt] = &vresp; + incnt++; + } + + return virtio_gpu_panic_queue_ctrl_sgs(vgdev, vbuf, + elemcnt, sgs, + outcnt, incnt); +} + static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, struct virtio_gpu_vbuffer *vbuf, struct virtio_gpu_fence *fence) @@ -386,6 +470,7 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, if (vbuf->data_size) { if (is_vmalloc_addr(vbuf->data_buf)) { int sg_ents; + sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size, &sg_ents); if (!sgt) { @@ -422,6 +507,21 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, return ret; } +/* For drm_panic */ +void virtio_gpu_panic_notify(struct virtio_gpu_device *vgdev) +{ + bool notify; + + if (!atomic_read(&vgdev->pending_commands)) + return; + + atomic_set(&vgdev->pending_commands, 0); + notify = virtqueue_kick_prepare(vgdev->ctrlq.vq); + + if (notify) + virtqueue_notify(vgdev->ctrlq.vq); +} + void virtio_gpu_notify(struct virtio_gpu_device *vgdev) { bool notify; @@ -567,6 +667,29 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); } +/* For drm_panic */ +void virtio_gpu_panic_cmd_resource_flush(struct virtio_gpu_device *vgdev, + uint32_t resource_id, + uint32_t x, uint32_t y, + uint32_t width, uint32_t height) +{ + struct virtio_gpu_resource_flush *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + + cmd_p = virtio_gpu_panic_alloc_cmd_resp(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = NULL; + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH); + cmd_p->resource_id = cpu_to_le32(resource_id); + cmd_p->r.width = cpu_to_le32(width); + cmd_p->r.height = cpu_to_le32(height); + cmd_p->r.x = cpu_to_le32(x); + cmd_p->r.y = cpu_to_le32(y); + + virtio_gpu_panic_queue_ctrl_buffer(vgdev, vbuf); +} + void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, uint32_t resource_id, uint32_t x, uint32_t y, @@ -591,6 +714,37 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); } +/* For drm_panic */ +int virtio_gpu_panic_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, + uint64_t offset, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y, + struct virtio_gpu_object_array *objs) +{ + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); + struct virtio_gpu_transfer_to_host_2d *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); + + if (virtio_gpu_is_shmem(bo) && use_dma_api) + dma_sync_sgtable_for_device(vgdev->vdev->dev.parent, + bo->base.sgt, DMA_TO_DEVICE); + + cmd_p = virtio_gpu_panic_alloc_cmd_resp(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + cmd_p->offset = cpu_to_le64(offset); + cmd_p->r.width = cpu_to_le32(width); + cmd_p->r.height = cpu_to_le32(height); + cmd_p->r.x = cpu_to_le32(x); + cmd_p->r.y = cpu_to_le32(y); + + return virtio_gpu_panic_queue_ctrl_buffer(vgdev, vbuf); +} + void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, uint64_t offset, uint32_t width, uint32_t height, @@ -645,6 +799,23 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev, virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); } +static void +virtio_gpu_cmd_resource_detach_backing(struct virtio_gpu_device *vgdev, + uint32_t resource_id, + struct virtio_gpu_fence *fence) +{ + struct virtio_gpu_resource_detach_backing *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING); + cmd_p->resource_id = cpu_to_le32(resource_id); + + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); +} + static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev, struct virtio_gpu_vbuffer *vbuf) { @@ -741,21 +912,21 @@ static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev, (struct virtio_gpu_resp_edid *)vbuf->resp_buf; uint32_t scanout = le32_to_cpu(cmd->scanout); struct virtio_gpu_output *output; - struct edid *new_edid, *old_edid; + const struct drm_edid *new_edid, *old_edid; if (scanout >= vgdev->num_scanouts) return; output = vgdev->outputs + scanout; - new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp); - drm_connector_update_edid_property(&output->conn, new_edid); + new_edid = drm_edid_read_custom(&output->conn, virtio_get_edid_block, resp); + drm_edid_connector_update(&output->conn, new_edid); spin_lock(&vgdev->display_info_lock); - old_edid = output->edid; - output->edid = new_edid; + old_edid = output->drm_edid; + output->drm_edid = new_edid; spin_unlock(&vgdev->display_info_lock); - kfree(old_edid); + drm_edid_free(old_edid); wake_up(&vgdev->resp_wq); } @@ -1103,8 +1274,26 @@ void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, struct virtio_gpu_mem_entry *ents, unsigned int nents) { + if (obj->attached) + return; + virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle, ents, nents, NULL); + + obj->attached = true; +} + +void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *obj, + struct virtio_gpu_fence *fence) +{ + if (!obj->attached) + return; + + virtio_gpu_cmd_resource_detach_backing(vgdev, obj->hw_res_handle, + fence); + + obj->attached = false; } void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, @@ -1265,6 +1454,9 @@ virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev, virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); bo->created = true; + + if (nents) + bo->attached = true; } void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev, diff --git a/drivers/gpu/drm/virtio/virtgpu_vram.c b/drivers/gpu/drm/virtio/virtgpu_vram.c index 25df81c02783..5ad3b7c6f73c 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vram.c +++ b/drivers/gpu/drm/virtio/virtgpu_vram.c @@ -37,6 +37,7 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj, struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); unsigned long vm_size = vma->vm_end - vma->vm_start; + unsigned long vm_end; if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) return -EINVAL; @@ -56,12 +57,14 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj, else if (vram->map_info == VIRTIO_GPU_MAP_CACHE_UNCACHED) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - /* Partial mappings of GEM buffers don't happen much in practice. */ - if (vm_size != vram->vram_node.size) + if (check_add_overflow(vma->vm_pgoff << PAGE_SHIFT, vm_size, &vm_end)) + return -EINVAL; + + if (vm_end > vram->vram_node.size) return -EINVAL; ret = io_remap_pfn_range(vma, vma->vm_start, - vram->vram_node.start >> PAGE_SHIFT, + (vram->vram_node.start >> PAGE_SHIFT) + vma->vm_pgoff, vm_size, vma->vm_page_prot); return ret; } |
