summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/virtio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/virtio')
-rw-r--r--drivers/gpu/drm/virtio/Kconfig12
-rw-r--r--drivers/gpu/drm/virtio/Makefile2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c62
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c79
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h52
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c16
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c46
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c232
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c69
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c30
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c262
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c183
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_submit.c542
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_trace.h2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c211
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vram.c11
17 files changed, 1463 insertions, 349 deletions
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
index 51ec7c3240c9..fc884fb57b7e 100644
--- a/drivers/gpu/drm/virtio/Kconfig
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -3,6 +3,7 @@ config DRM_VIRTIO_GPU
tristate "Virtio GPU driver"
depends on DRM && VIRTIO_MENU && MMU
select VIRTIO
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
select VIRTIO_DMA_SHARED_BUFFER
@@ -11,3 +12,14 @@ config DRM_VIRTIO_GPU
QEMU based VMMs (like KVM or Xen).
If unsure say M.
+
+config DRM_VIRTIO_GPU_KMS
+ bool "Virtio GPU driver modesetting support"
+ depends on DRM_VIRTIO_GPU
+ default y
+ help
+ Enable modesetting support for virtio GPU driver. This can be
+ disabled in cases where only "headless" usage of the GPU is
+ required.
+
+ If unsure, say Y.
diff --git a/drivers/gpu/drm/virtio/Makefile b/drivers/gpu/drm/virtio/Makefile
index b99fa4a73b68..d2e1788a8227 100644
--- a/drivers/gpu/drm/virtio/Makefile
+++ b/drivers/gpu/drm/virtio/Makefile
@@ -6,6 +6,6 @@
virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o virtgpu_vram.o \
virtgpu_display.o virtgpu_vq.o \
virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \
- virtgpu_ioctl.o virtgpu_prime.o virtgpu_trace_points.o
+ virtgpu_ioctl.o virtgpu_prime.o virtgpu_trace_points.o virtgpu_submit.o
obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio-gpu.o
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index 853dd9aa397e..3a68a16b58ae 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -27,6 +27,7 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include "virtgpu_drv.h"
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 9ea7611a9e0f..6a962c1d6e95 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -30,8 +30,11 @@
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_vblank_helper.h>
#include "virtgpu_drv.h"
@@ -55,6 +58,7 @@ static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ DRM_CRTC_VBLANK_TIMER_FUNCS,
};
static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
@@ -66,6 +70,7 @@ static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
static int
virtio_gpu_framebuffer_init(struct drm_device *dev,
struct virtio_gpu_framebuffer *vgfb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
@@ -73,7 +78,7 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
vgfb->base.obj[0] = obj;
- drm_helper_mode_fill_fb_struct(dev, &vgfb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &vgfb->base, info, mode_cmd);
ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs);
if (ret) {
@@ -98,6 +103,7 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ drm_crtc_vblank_on(crtc);
}
static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -107,6 +113,8 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
+ drm_crtc_vblank_off(crtc);
+
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0);
virtio_gpu_notify(vgdev);
}
@@ -120,9 +128,10 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
- struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
- crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
+ struct drm_pending_vblank_event *event;
/*
* virtio-gpu can't do modeset and plane update operations
@@ -130,9 +139,22 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
* in the plane update callback, and here we just check
* whenever we must force the modeset.
*/
- if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
output->needs_modeset = true;
+
+ spin_lock_irq(&dev->event_lock);
+
+ event = crtc_state->event;
+ crtc_state->event = NULL;
+
+ if (event) {
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
}
+
+ spin_unlock_irq(&dev->event_lock);
}
static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
@@ -164,11 +186,9 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
struct drm_display_mode *mode = NULL;
int count, width, height;
- if (output->edid) {
- count = drm_add_edid_modes(connector, output->edid);
- if (count)
- return count;
- }
+ count = drm_edid_connector_add_modes(connector);
+ if (count)
+ return count;
width = le32_to_cpu(output->info.r.width);
height = le32_to_cpu(output->info.r.height);
@@ -191,7 +211,7 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
}
static enum drm_mode_status virtio_gpu_conn_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct virtio_gpu_output *output =
drm_connector_to_virtio_gpu_output(connector);
@@ -259,6 +279,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
struct drm_encoder *encoder = &output->enc;
struct drm_crtc *crtc = &output->crtc;
struct drm_plane *primary, *cursor;
+ int ret;
output->index = index;
if (index == 0) {
@@ -273,8 +294,10 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
cursor = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_CURSOR, index);
if (IS_ERR(cursor))
return PTR_ERR(cursor);
- drm_crtc_init_with_planes(dev, crtc, primary, cursor,
- &virtio_gpu_crtc_funcs, NULL);
+ ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor,
+ &virtio_gpu_crtc_funcs, NULL);
+ if (ret)
+ return ret;
drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
@@ -295,6 +318,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
static struct drm_framebuffer *
virtio_gpu_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj = NULL;
@@ -316,7 +340,7 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
}
- ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj);
+ ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, info, mode_cmd, obj);
if (ret) {
kfree(virtio_gpu_fb);
drm_gem_object_put(obj);
@@ -336,6 +360,9 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
{
int i, ret;
+ if (!vgdev->num_scanouts)
+ return 0;
+
ret = drmm_mode_config_init(vgdev->ddev);
if (ret)
return ret;
@@ -354,6 +381,10 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
for (i = 0 ; i < vgdev->num_scanouts; ++i)
vgdev_output_init(vgdev, i);
+ ret = drm_vblank_init(vgdev->ddev, vgdev->num_scanouts);
+ if (ret)
+ return ret;
+
drm_mode_config_reset(vgdev->ddev);
return 0;
}
@@ -362,6 +393,9 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
{
int i;
+ if (!vgdev->num_scanouts)
+ return;
+
for (i = 0 ; i < vgdev->num_scanouts; ++i)
- kfree(vgdev->outputs[i].edid);
+ drm_edid_free(vgdev->outputs[i].drm_edid);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index ae97b98750b6..a5ce96fb8a1d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -26,20 +26,25 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/poll.h>
+#include <linux/vgaarb.h>
#include <linux/wait.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm.h>
-#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include "virtgpu_drv.h"
+#define PCI_DEVICE_ID_VIRTIO_GPU 0x1050
+
static const struct drm_driver driver;
static int virtio_gpu_modeset = -1;
@@ -51,14 +56,14 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
const char *pname = dev_name(&pdev->dev);
- bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
+ bool vga = pci_is_vga(pdev);
int ret;
DRM_INFO("pci: %s detected at %s\n",
vga ? "virtio-vga" : "virtio-gpu-pci",
pname);
if (vga) {
- ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
+ ret = aperture_remove_conflicting_pci_devices(pdev, driver.name);
if (ret)
return ret;
}
@@ -94,6 +99,7 @@ static int virtio_gpu_probe(struct virtio_device *vdev)
goto err_free;
}
+ dma_set_max_seg_size(dev->dev, dma_max_mapping_size(dev->dev) ?: UINT_MAX);
ret = virtio_gpu_init(vdev, dev);
if (ret)
goto err_free;
@@ -102,7 +108,8 @@ static int virtio_gpu_probe(struct virtio_device *vdev)
if (ret)
goto err_deinit;
- drm_fbdev_generic_setup(vdev->priv, 32);
+ drm_client_setup(vdev->priv, NULL);
+
return 0;
err_deinit:
@@ -122,6 +129,14 @@ static void virtio_gpu_remove(struct virtio_device *vdev)
drm_dev_put(dev);
}
+static void virtio_gpu_shutdown(struct virtio_device *vdev)
+{
+ struct drm_device *dev = vdev->priv;
+
+ /* stop talking to the device */
+ drm_dev_unplug(dev);
+}
+
static void virtio_gpu_config_changed(struct virtio_device *vdev)
{
struct drm_device *dev = vdev->priv;
@@ -153,14 +168,50 @@ static struct virtio_driver virtio_gpu_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtio_gpu_probe,
.remove = virtio_gpu_remove,
+ .shutdown = virtio_gpu_shutdown,
.config_changed = virtio_gpu_config_changed
};
-module_virtio_driver(virtio_gpu_driver);
+static int __init virtio_gpu_driver_init(void)
+{
+ struct pci_dev *pdev;
+ int ret;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_REDHAT_QUMRANET,
+ PCI_DEVICE_ID_VIRTIO_GPU,
+ NULL);
+ if (pdev && pci_is_vga(pdev)) {
+ ret = vga_get_interruptible(pdev,
+ VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
+ if (ret) {
+ pci_dev_put(pdev);
+ return ret;
+ }
+ }
+
+ ret = register_virtio_driver(&virtio_gpu_driver);
+
+ if (pdev) {
+ if (pci_is_vga(pdev))
+ vga_put(pdev,
+ VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
+
+ pci_dev_put(pdev);
+ }
+
+ return ret;
+}
+
+static void __exit virtio_gpu_driver_exit(void)
+{
+ unregister_virtio_driver(&virtio_gpu_driver);
+}
+
+module_init(virtio_gpu_driver_init);
+module_exit(virtio_gpu_driver_exit);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio GPU driver");
@@ -172,19 +223,22 @@ MODULE_AUTHOR("Alon Levy");
DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
static const struct drm_driver driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
+ /*
+ * If KMS is disabled DRIVER_MODESET and DRIVER_ATOMIC are masked
+ * out via drm_device::driver_features:
+ */
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC |
+ DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE | DRIVER_CURSOR_HOTSPOT,
.open = virtio_gpu_driver_open,
.postclose = virtio_gpu_driver_postclose,
.dumb_create = virtio_gpu_mode_dumb_create,
- .dumb_map_offset = virtio_gpu_mode_dumb_mmap,
+
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = virtio_gpu_debugfs_init,
#endif
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_mmap = drm_gem_prime_mmap,
.gem_prime_import = virtgpu_gem_prime_import,
.gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
@@ -196,7 +250,6 @@ static const struct drm_driver driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index af6ffb696086..f17660a71a3e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -45,7 +45,6 @@
#define DRIVER_NAME "virtio_gpu"
#define DRIVER_DESC "virtio GPU"
-#define DRIVER_DATE "0"
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 1
@@ -58,6 +57,9 @@
#define MAX_CAPSET_ID 63
#define MAX_RINGS 64
+/* See virtio_gpu_ctx_create. One additional character for NULL terminator. */
+#define DEBUG_NAME_MAX_LEN 65
+
struct virtio_gpu_object_params {
unsigned long size;
bool dumb;
@@ -86,9 +88,11 @@ struct virtio_gpu_object_params {
struct virtio_gpu_object {
struct drm_gem_shmem_object base;
+ struct sg_table *sgt;
uint32_t hw_res_handle;
bool dumb;
bool created;
+ bool attached;
bool host3d_blob, guest_blob;
uint32_t blob_mem, blob_flags;
@@ -119,7 +123,7 @@ struct virtio_gpu_object_array {
struct ww_acquire_ctx ticket;
struct list_head next;
u32 nents, total;
- struct drm_gem_object *objs[];
+ struct drm_gem_object *objs[] __counted_by(total);
};
struct virtio_gpu_vbuffer;
@@ -176,7 +180,7 @@ struct virtio_gpu_output {
struct drm_encoder enc;
struct virtio_gpu_display_one info;
struct virtio_gpu_update_cursor cursor;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int cur_x;
int cur_y;
bool needs_modeset;
@@ -191,6 +195,13 @@ struct virtio_gpu_framebuffer {
#define to_virtio_gpu_framebuffer(x) \
container_of(x, struct virtio_gpu_framebuffer, base)
+struct virtio_gpu_plane_state {
+ struct drm_plane_state base;
+ struct virtio_gpu_fence *fence;
+};
+#define to_virtio_gpu_plane_state(x) \
+ container_of(x, struct virtio_gpu_plane_state, base)
+
struct virtio_gpu_queue {
struct virtqueue *vq;
spinlock_t qlock;
@@ -274,6 +285,8 @@ struct virtio_gpu_fpriv {
uint64_t base_fence_ctx;
uint64_t ring_idx_mask;
struct mutex context_lock;
+ char debug_name[DEBUG_NAME_MAX_LEN];
+ bool explicit_debug_name;
};
/* virtgpu_ioctl.c */
@@ -296,10 +309,8 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset_p);
+struct virtio_gpu_object_array *virtio_gpu_panic_array_alloc(void);
struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents);
struct virtio_gpu_object_array*
virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents);
@@ -324,12 +335,21 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo);
+int virtio_gpu_panic_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
+ uint64_t offset,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y,
+ struct virtio_gpu_object_array *objs);
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset,
uint32_t width, uint32_t height,
uint32_t x, uint32_t y,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
+void virtio_gpu_panic_cmd_resource_flush(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id,
+ uint32_t x, uint32_t y,
+ uint32_t width, uint32_t height);
void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
uint32_t x, uint32_t y,
@@ -344,8 +364,10 @@ void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
struct virtio_gpu_mem_entry *ents,
unsigned int nents);
-int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
-int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
+void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj,
+ struct virtio_gpu_fence *fence);
+int virtio_gpu_detach_object_fenced(struct virtio_gpu_object *bo);
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
struct virtio_gpu_output *output);
int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev);
@@ -394,11 +416,9 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_fence *fence);
void virtio_gpu_ctrl_ack(struct virtqueue *vq);
void virtio_gpu_cursor_ack(struct virtqueue *vq);
-void virtio_gpu_fence_ack(struct virtqueue *vq);
void virtio_gpu_dequeue_ctrl_func(struct work_struct *work);
void virtio_gpu_dequeue_cursor_func(struct work_struct *work);
-void virtio_gpu_dequeue_fence_func(struct work_struct *work);
-
+void virtio_gpu_panic_notify(struct virtio_gpu_device *vgdev);
void virtio_gpu_notify(struct virtio_gpu_device *vgdev);
int
@@ -465,11 +485,13 @@ struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
int flags);
struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *buf);
-int virtgpu_gem_prime_get_uuid(struct drm_gem_object *obj,
- uuid_t *uuid);
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *sgt);
+int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents,
+ unsigned int *nents,
+ struct virtio_gpu_object *bo,
+ struct dma_buf_attachment *attach);
/* virtgpu_debugfs.c */
void virtio_gpu_debugfs_init(struct drm_minor *minor);
@@ -486,4 +508,8 @@ void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
struct sg_table *sgt,
enum dma_data_direction dir);
+/* virtgpu_submit.c */
+int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
#endif
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index f28357dbde35..44c1d8ef3c4d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -49,26 +49,10 @@ static bool virtio_gpu_fence_signaled(struct dma_fence *f)
return false;
}
-static void virtio_gpu_fence_value_str(struct dma_fence *f, char *str, int size)
-{
- snprintf(str, size, "[%llu, %llu]", f->context, f->seqno);
-}
-
-static void virtio_gpu_timeline_value_str(struct dma_fence *f, char *str,
- int size)
-{
- struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f);
-
- snprintf(str, size, "%llu",
- (u64)atomic64_read(&fence->drv->last_fence_id));
-}
-
static const struct dma_fence_ops virtio_gpu_fence_ops = {
.get_driver_name = virtio_gpu_get_driver_name,
.get_timeline_name = virtio_gpu_get_timeline_name,
.signaled = virtio_gpu_fence_signaled,
- .fence_value_str = virtio_gpu_fence_value_str,
- .timeline_value_str = virtio_gpu_timeline_value_str,
};
struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 7db48d17ee3a..90c99d83c4cf 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -99,21 +99,6 @@ fail:
return ret;
}
-int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset_p)
-{
- struct drm_gem_object *gobj;
-
- BUG_ON(!offset_p);
- gobj = drm_gem_object_lookup(file_priv, handle);
- if (gobj == NULL)
- return -ENOENT;
- *offset_p = drm_vma_node_offset_addr(&gobj->vma_node);
- drm_gem_object_put(gobj);
- return 0;
-}
-
int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file)
{
@@ -127,15 +112,18 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
/* the context might still be missing when the first ioctl is
* DRM_IOCTL_MODE_CREATE_DUMB or DRM_IOCTL_PRIME_FD_TO_HANDLE
*/
- virtio_gpu_create_context(obj->dev, file);
+ if (!vgdev->has_context_init)
+ virtio_gpu_create_context(obj->dev, file);
- objs = virtio_gpu_array_alloc(1);
- if (!objs)
- return -ENOMEM;
- virtio_gpu_array_add_obj(objs, obj);
+ if (vfpriv->context_created) {
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ return -ENOMEM;
+ virtio_gpu_array_add_obj(objs, obj);
+
+ virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, objs);
+ }
- virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
- objs);
out_notify:
virtio_gpu_notify(vgdev);
return 0;
@@ -161,6 +149,20 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
virtio_gpu_notify(vgdev);
}
+/* For drm panic */
+struct virtio_gpu_object_array *virtio_gpu_panic_array_alloc(void)
+{
+ struct virtio_gpu_object_array *objs;
+
+ objs = kmalloc(sizeof(struct virtio_gpu_object_array), GFP_ATOMIC);
+ if (!objs)
+ return NULL;
+
+ objs->nents = 0;
+ objs->total = 1;
+ return objs;
+}
+
struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
{
struct virtio_gpu_object_array *objs;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 9f4a90493aea..c33c057365f8 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -38,46 +38,23 @@
VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \
VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)
-static int virtio_gpu_fence_event_create(struct drm_device *dev,
- struct drm_file *file,
- struct virtio_gpu_fence *fence,
- uint32_t ring_idx)
-{
- struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
- struct virtio_gpu_fence_event *e = NULL;
- int ret;
-
- if (!(vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
- return 0;
-
- e = kzalloc(sizeof(*e), GFP_KERNEL);
- if (!e)
- return -ENOMEM;
-
- e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED;
- e->event.length = sizeof(e->event);
-
- ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
- if (ret)
- goto free;
-
- fence->e = e;
- return 0;
-free:
- kfree(e);
- return ret;
-}
-
/* Must be called with &virtio_gpu_fpriv.struct_mutex held. */
static void virtio_gpu_create_context_locked(struct virtio_gpu_device *vgdev,
struct virtio_gpu_fpriv *vfpriv)
{
- char dbgname[TASK_COMM_LEN];
+ if (vfpriv->explicit_debug_name) {
+ virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
+ vfpriv->context_init,
+ strlen(vfpriv->debug_name),
+ vfpriv->debug_name);
+ } else {
+ char dbgname[TASK_COMM_LEN];
- get_task_comm(dbgname, current);
- virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
- vfpriv->context_init, strlen(dbgname),
- dbgname);
+ get_task_comm(dbgname, current);
+ virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
+ vfpriv->context_init, strlen(dbgname),
+ dbgname);
+ }
vfpriv->context_created = true;
}
@@ -103,164 +80,9 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_map *virtio_gpu_map = data;
- return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
- virtio_gpu_map->handle,
- &virtio_gpu_map->offset);
-}
-
-/*
- * Usage of execbuffer:
- * Relocations need to take into account the full VIRTIO_GPUDrawable size.
- * However, the command as passed from user space must *not* contain the initial
- * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
- */
-static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_virtgpu_execbuffer *exbuf = data;
- struct virtio_gpu_device *vgdev = dev->dev_private;
- struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
- struct virtio_gpu_fence *out_fence;
- int ret;
- uint32_t *bo_handles = NULL;
- void __user *user_bo_handles = NULL;
- struct virtio_gpu_object_array *buflist = NULL;
- struct sync_file *sync_file;
- int in_fence_fd = exbuf->fence_fd;
- int out_fence_fd = -1;
- void *buf;
- uint64_t fence_ctx;
- uint32_t ring_idx;
-
- fence_ctx = vgdev->fence_drv.context;
- ring_idx = 0;
-
- if (vgdev->has_virgl_3d == false)
- return -ENOSYS;
-
- if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
- return -EINVAL;
-
- if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX)) {
- if (exbuf->ring_idx >= vfpriv->num_rings)
- return -EINVAL;
-
- if (!vfpriv->base_fence_ctx)
- return -EINVAL;
-
- fence_ctx = vfpriv->base_fence_ctx;
- ring_idx = exbuf->ring_idx;
- }
-
- exbuf->fence_fd = -1;
-
- virtio_gpu_create_context(dev, file);
- if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
- struct dma_fence *in_fence;
-
- in_fence = sync_file_get_fence(in_fence_fd);
-
- if (!in_fence)
- return -EINVAL;
-
- /*
- * Wait if the fence is from a foreign context, or if the fence
- * array contains any fence from a foreign context.
- */
- ret = 0;
- if (!dma_fence_match_context(in_fence, fence_ctx + ring_idx))
- ret = dma_fence_wait(in_fence, true);
-
- dma_fence_put(in_fence);
- if (ret)
- return ret;
- }
-
- if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
- out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
- if (out_fence_fd < 0)
- return out_fence_fd;
- }
-
- if (exbuf->num_bo_handles) {
- bo_handles = kvmalloc_array(exbuf->num_bo_handles,
- sizeof(uint32_t), GFP_KERNEL);
- if (!bo_handles) {
- ret = -ENOMEM;
- goto out_unused_fd;
- }
-
- user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
- if (copy_from_user(bo_handles, user_bo_handles,
- exbuf->num_bo_handles * sizeof(uint32_t))) {
- ret = -EFAULT;
- goto out_unused_fd;
- }
-
- buflist = virtio_gpu_array_from_handles(file, bo_handles,
- exbuf->num_bo_handles);
- if (!buflist) {
- ret = -ENOENT;
- goto out_unused_fd;
- }
- kvfree(bo_handles);
- bo_handles = NULL;
- }
-
- buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
- if (IS_ERR(buf)) {
- ret = PTR_ERR(buf);
- goto out_unused_fd;
- }
-
- if (buflist) {
- ret = virtio_gpu_array_lock_resv(buflist);
- if (ret)
- goto out_memdup;
- }
-
- out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
- if(!out_fence) {
- ret = -ENOMEM;
- goto out_unresv;
- }
-
- ret = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
- if (ret)
- goto out_unresv;
-
- if (out_fence_fd >= 0) {
- sync_file = sync_file_create(&out_fence->f);
- if (!sync_file) {
- dma_fence_put(&out_fence->f);
- ret = -ENOMEM;
- goto out_unresv;
- }
-
- exbuf->fence_fd = out_fence_fd;
- fd_install(out_fence_fd, sync_file->file);
- }
-
- virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
- vfpriv->ctx_id, buflist, out_fence);
- dma_fence_put(&out_fence->f);
- virtio_gpu_notify(vgdev);
- return 0;
-
-out_unresv:
- if (buflist)
- virtio_gpu_array_unlock_resv(buflist);
-out_memdup:
- kvfree(buf);
-out_unused_fd:
- kvfree(bo_handles);
- if (buflist)
- virtio_gpu_array_put_free(buflist);
-
- if (out_fence_fd >= 0)
- put_unused_fd(out_fence_fd);
-
- return ret;
+ return drm_gem_dumb_map_offset(file, vgdev->ddev,
+ virtio_gpu_map->handle,
+ &virtio_gpu_map->offset);
}
static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
@@ -292,6 +114,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs:
value = vgdev->capset_id_mask;
break;
+ case VIRTGPU_PARAM_EXPLICIT_DEBUG_NAME:
+ value = vgdev->has_context_init ? 1 : 0;
+ break;
default:
return -EINVAL;
}
@@ -750,8 +575,8 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
int ret = 0;
- uint32_t num_params, i, param, value;
- uint64_t valid_ring_mask;
+ uint32_t num_params, i;
+ uint64_t valid_ring_mask, param, value;
size_t len;
struct drm_virtgpu_context_set_param *ctx_set_params = NULL;
struct virtio_gpu_device *vgdev = dev->dev_private;
@@ -765,7 +590,7 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
return -EINVAL;
/* Number of unique parameters supported at this time. */
- if (num_params > 3)
+ if (num_params > 4)
return -EINVAL;
ctx_set_params = memdup_user(u64_to_user_ptr(args->ctx_set_params),
@@ -827,6 +652,21 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
vfpriv->ring_idx_mask = value;
break;
+ case VIRTGPU_CONTEXT_PARAM_DEBUG_NAME:
+ if (vfpriv->explicit_debug_name) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = strncpy_from_user(vfpriv->debug_name,
+ u64_to_user_ptr(value),
+ DEBUG_NAME_MAX_LEN - 1);
+ if (ret < 0)
+ goto out_unlock;
+
+ vfpriv->explicit_debug_name = true;
+ ret = 0;
+ break;
default:
ret = -EINVAL;
goto out_unlock;
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 27b7f14dae89..f3594695bb82 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -29,6 +29,7 @@
#include <drm/drm_file.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include "virtgpu_drv.h"
@@ -43,11 +44,13 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
events_read, &events_read);
if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
- if (vgdev->has_edid)
- virtio_gpu_cmd_get_edids(vgdev);
- virtio_gpu_cmd_get_display_info(vgdev);
- virtio_gpu_notify(vgdev);
- drm_helper_hpd_irq_event(vgdev->ddev);
+ if (vgdev->num_scanouts) {
+ if (vgdev->has_edid)
+ virtio_gpu_cmd_get_edids(vgdev);
+ virtio_gpu_cmd_get_display_info(vgdev);
+ virtio_gpu_notify(vgdev);
+ drm_helper_hpd_irq_event(vgdev->ddev);
+ }
events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
}
virtio_cwrite_le(vgdev->vdev, struct virtio_gpu_config,
@@ -114,11 +117,10 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
{
- static vq_callback_t *callbacks[] = {
- virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
+ struct virtqueue_info vqs_info[] = {
+ { "control", virtio_gpu_ctrl_ack },
+ { "cursor", virtio_gpu_cursor_ack },
};
- static const char * const names[] = { "control", "cursor" };
-
struct virtio_gpu_device *vgdev;
/* this will expand later */
struct virtqueue *vqs[2];
@@ -161,18 +163,18 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
vgdev->has_virgl_3d = true;
#endif
- if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID))
vgdev->has_edid = true;
- }
- if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
+
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC))
vgdev->has_indirect = true;
- }
- if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) {
+
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID))
vgdev->has_resource_assign_uuid = true;
- }
- if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) {
+
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB))
vgdev->has_resource_blob = true;
- }
+
if (virtio_get_shm_region(vgdev->vdev, &vgdev->host_visible_region,
VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) {
if (!devm_request_mem_region(&vgdev->vdev->dev,
@@ -192,9 +194,9 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
(unsigned long)vgdev->host_visible_region.addr,
(unsigned long)vgdev->host_visible_region.len);
}
- if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT)) {
+
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT))
vgdev->has_context_init = true;
- }
DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible",
vgdev->has_virgl_3d ? '+' : '-',
@@ -205,7 +207,7 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
DRM_INFO("features: %ccontext_init\n",
vgdev->has_context_init ? '+' : '-');
- ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
+ ret = virtio_find_vqs(vgdev->vdev, 2, vqs, vqs_info, NULL);
if (ret) {
DRM_ERROR("failed to find virt queues\n");
goto err_vqs;
@@ -223,12 +225,15 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
num_scanouts, &num_scanouts);
vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
VIRTIO_GPU_MAX_SCANOUTS);
- if (!vgdev->num_scanouts) {
- DRM_ERROR("num_scanouts is zero\n");
- ret = -EINVAL;
- goto err_scanouts;
+
+ if (!IS_ENABLED(CONFIG_DRM_VIRTIO_GPU_KMS) || !vgdev->num_scanouts) {
+ DRM_INFO("KMS disabled\n");
+ vgdev->num_scanouts = 0;
+ vgdev->has_edid = false;
+ dev->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
+ } else {
+ DRM_INFO("number of scanouts: %d\n", num_scanouts);
}
- DRM_INFO("number of scanouts: %d\n", num_scanouts);
virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
num_capsets, &num_capsets);
@@ -244,12 +249,14 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
if (num_capsets)
virtio_gpu_get_capsets(vgdev, num_capsets);
- if (vgdev->has_edid)
- virtio_gpu_cmd_get_edids(vgdev);
- virtio_gpu_cmd_get_display_info(vgdev);
- virtio_gpu_notify(vgdev);
- wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
- 5 * HZ);
+ if (vgdev->num_scanouts) {
+ if (vgdev->has_edid)
+ virtio_gpu_cmd_get_edids(vgdev);
+ virtio_gpu_cmd_get_display_info(vgdev);
+ virtio_gpu_notify(vgdev);
+ wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
+ 5 * HZ);
+ }
return 0;
err_scanouts:
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index c7e74cf13022..4270bfede7b9 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -26,6 +26,8 @@
#include <linux/dma-mapping.h>
#include <linux/moduleparam.h>
+#include <drm/drm_print.h>
+
#include "virtgpu_drv.h"
static int virtio_gpu_virglrenderer_workaround = 1;
@@ -47,6 +49,7 @@ int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
*resid = handle + 1;
} else {
int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
+
if (handle < 0)
return handle;
*resid = handle + 1;
@@ -56,9 +59,8 @@ int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
{
- if (!virtio_gpu_virglrenderer_workaround) {
+ if (!virtio_gpu_virglrenderer_workaround)
ida_free(&vgdev->resource_ida, id - 1);
- }
}
void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
@@ -80,6 +82,9 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
drm_gem_free_mmap_offset(&vram->base.base.base);
drm_gem_object_release(&vram->base.base.base);
kfree(vram);
+ } else {
+ drm_gem_object_release(&bo->base.base);
+ kfree(bo);
}
}
@@ -97,6 +102,27 @@ static void virtio_gpu_free_object(struct drm_gem_object *obj)
virtio_gpu_cleanup_object(bo);
}
+int virtio_gpu_detach_object_fenced(struct virtio_gpu_object *bo)
+{
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+ struct virtio_gpu_fence *fence;
+
+ if (!bo->attached)
+ return 0;
+
+ fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
+ if (!fence)
+ return -ENOMEM;
+
+ virtio_gpu_object_detach(vgdev, bo, fence);
+ virtio_gpu_notify(vgdev);
+
+ dma_fence_wait(&fence->f, false);
+ dma_fence_put(&fence->f);
+
+ return 0;
+}
+
static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
.free = virtio_gpu_free_object,
.open = virtio_gpu_gem_object_open,
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 4c09e313bebc..a7863f8ee4ee 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -26,6 +26,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <linux/virtio_dma_buf.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_panic.h>
+#include <drm/drm_print.h>
#include "virtgpu_drv.h"
@@ -66,11 +71,28 @@ uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
return format;
}
+static struct
+drm_plane_state *virtio_gpu_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct virtio_gpu_plane_state *new;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &new->base);
+
+ return &new->base;
+}
+
static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_duplicate_state = virtio_gpu_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
@@ -79,6 +101,8 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
+ plane);
bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR;
struct drm_crtc_state *crtc_state;
int ret;
@@ -86,10 +110,18 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc))
return 0;
+ /*
+ * Ignore damage clips if the framebuffer attached to the plane's state
+ * has changed since the last plane update (page-flip). In this case, a
+ * full plane update should happen because uploads are done per-buffer.
+ */
+ if (old_plane_state->fb != new_plane_state->fb)
+ new_plane_state->ignore_damage_clips = true;
+
crtc_state = drm_atomic_get_crtc_state(state,
new_plane_state->crtc);
if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
+ return PTR_ERR(crtc_state);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
DRM_PLANE_NO_SCALING,
@@ -98,6 +130,30 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
return ret;
}
+/* For drm panic */
+static int virtio_gpu_panic_update_dumb_bo(struct virtio_gpu_device *vgdev,
+ struct drm_plane_state *state,
+ struct drm_rect *rect)
+{
+ struct virtio_gpu_object *bo =
+ gem_to_virtio_gpu_obj(state->fb->obj[0]);
+ struct virtio_gpu_object_array *objs;
+ uint32_t w = rect->x2 - rect->x1;
+ uint32_t h = rect->y2 - rect->y1;
+ uint32_t x = rect->x1;
+ uint32_t y = rect->y1;
+ uint32_t off = x * state->fb->format->cpp[0] +
+ y * state->fb->pitches[0];
+
+ objs = virtio_gpu_panic_array_alloc();
+ if (!objs)
+ return -ENOMEM;
+ virtio_gpu_array_add_obj(objs, &bo->base.base);
+
+ return virtio_gpu_panic_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y,
+ objs);
+}
+
static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev,
struct drm_plane_state *state,
struct drm_rect *rect)
@@ -121,6 +177,24 @@ static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev,
objs, NULL);
}
+/* For drm_panic */
+static void virtio_gpu_panic_resource_flush(struct drm_plane *plane,
+ uint32_t x, uint32_t y,
+ uint32_t width, uint32_t height)
+{
+ struct drm_device *dev = plane->dev;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_object *bo;
+
+ vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
+
+ virtio_gpu_panic_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
+ width, height);
+ virtio_gpu_panic_notify(vgdev);
+}
+
static void virtio_gpu_resource_flush(struct drm_plane *plane,
uint32_t x, uint32_t y,
uint32_t width, uint32_t height)
@@ -128,11 +202,13 @@ static void virtio_gpu_resource_flush(struct drm_plane *plane,
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_plane_state *vgplane_st;
struct virtio_gpu_object *bo;
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ vgplane_st = to_virtio_gpu_plane_state(plane->state);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
- if (vgfb->fence) {
+ if (vgplane_st->fence) {
struct virtio_gpu_object_array *objs;
objs = virtio_gpu_array_alloc(1);
@@ -141,13 +217,11 @@ static void virtio_gpu_resource_flush(struct drm_plane *plane,
virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
virtio_gpu_array_lock_resv(objs);
virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
- width, height, objs, vgfb->fence);
+ width, height, objs,
+ vgplane_st->fence);
virtio_gpu_notify(vgdev);
-
- dma_fence_wait_timeout(&vgfb->fence->f, true,
+ dma_fence_wait_timeout(&vgplane_st->fence->f, true,
msecs_to_jiffies(50));
- dma_fence_put(&vgfb->fence->f);
- vgfb->fence = NULL;
} else {
virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
width, height, NULL, NULL);
@@ -231,45 +305,121 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
rect.y2 - rect.y1);
}
+static int virtio_gpu_prepare_imported_obj(struct drm_plane *plane,
+ struct drm_plane_state *new_state,
+ struct drm_gem_object *obj)
+{
+ struct virtio_gpu_device *vgdev = plane->dev->dev_private;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct dma_buf_attachment *attach = obj->import_attach;
+ struct dma_resv *resv = attach->dmabuf->resv;
+ struct virtio_gpu_mem_entry *ents = NULL;
+ unsigned int nents;
+ int ret;
+
+ dma_resv_lock(resv, NULL);
+
+ ret = dma_buf_pin(attach);
+ if (ret) {
+ dma_resv_unlock(resv);
+ return ret;
+ }
+
+ if (!bo->sgt) {
+ ret = virtgpu_dma_buf_import_sgt(&ents, &nents,
+ bo, attach);
+ if (ret)
+ goto err;
+
+ virtio_gpu_object_attach(vgdev, bo, ents, nents);
+ }
+
+ dma_resv_unlock(resv);
+ return 0;
+
+err:
+ dma_buf_unpin(attach);
+ dma_resv_unlock(resv);
+ return ret;
+}
+
static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_plane_state *vgplane_st;
struct virtio_gpu_object *bo;
+ struct drm_gem_object *obj;
+ int ret;
if (!new_state->fb)
return 0;
vgfb = to_virtio_gpu_framebuffer(new_state->fb);
+ vgplane_st = to_virtio_gpu_plane_state(new_state);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
+
+ drm_gem_plane_helper_prepare_fb(plane, new_state);
+
if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob))
return 0;
- if (bo->dumb && (plane->state->fb != new_state->fb)) {
- vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
+ obj = new_state->fb->obj[0];
+ if (bo->dumb || drm_gem_is_imported(obj)) {
+ vgplane_st->fence = virtio_gpu_fence_alloc(vgdev,
+ vgdev->fence_drv.context,
0);
- if (!vgfb->fence)
+ if (!vgplane_st->fence)
return -ENOMEM;
}
+ if (drm_gem_is_imported(obj)) {
+ ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj);
+ if (ret)
+ goto err_fence;
+ }
+
return 0;
+
+err_fence:
+ if (vgplane_st->fence) {
+ dma_fence_put(&vgplane_st->fence->f);
+ vgplane_st->fence = NULL;
+ }
+
+ return ret;
+}
+
+static void virtio_gpu_cleanup_imported_obj(struct drm_gem_object *obj)
+{
+ struct dma_buf_attachment *attach = obj->import_attach;
+ struct dma_resv *resv = attach->dmabuf->resv;
+
+ dma_resv_lock(resv, NULL);
+ dma_buf_unpin(attach);
+ dma_resv_unlock(resv);
}
static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_plane_state *vgplane_st;
+ struct drm_gem_object *obj;
if (!state->fb)
return;
- vgfb = to_virtio_gpu_framebuffer(state->fb);
- if (vgfb->fence) {
- dma_fence_put(&vgfb->fence->f);
- vgfb->fence = NULL;
+ vgplane_st = to_virtio_gpu_plane_state(state);
+ if (vgplane_st->fence) {
+ dma_fence_put(&vgplane_st->fence->f);
+ vgplane_st->fence = NULL;
}
+
+ obj = state->fb->obj[0];
+ if (drm_gem_is_imported(obj))
+ virtio_gpu_cleanup_imported_obj(obj);
}
static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
@@ -281,6 +431,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = NULL;
struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_plane_state *vgplane_st;
struct virtio_gpu_object *bo = NULL;
uint32_t handle;
@@ -293,6 +444,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
if (plane->state->fb) {
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ vgplane_st = to_virtio_gpu_plane_state(plane->state);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
handle = bo->hw_res_handle;
} else {
@@ -312,27 +464,25 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
(vgdev, 0,
plane->state->crtc_w,
plane->state->crtc_h,
- 0, 0, objs, vgfb->fence);
+ 0, 0, objs, vgplane_st->fence);
virtio_gpu_notify(vgdev);
- dma_fence_wait(&vgfb->fence->f, true);
- dma_fence_put(&vgfb->fence->f);
- vgfb->fence = NULL;
+ dma_fence_wait(&vgplane_st->fence->f, true);
}
if (plane->state->fb != old_state->fb) {
DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
plane->state->crtc_x,
plane->state->crtc_y,
- plane->state->fb ? plane->state->fb->hot_x : 0,
- plane->state->fb ? plane->state->fb->hot_y : 0);
+ plane->state->hotspot_x,
+ plane->state->hotspot_y);
output->cursor.hdr.type =
cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
output->cursor.resource_id = cpu_to_le32(handle);
if (plane->state->fb) {
output->cursor.hot_x =
- cpu_to_le32(plane->state->fb->hot_x);
+ cpu_to_le32(plane->state->hotspot_x);
output->cursor.hot_y =
- cpu_to_le32(plane->state->fb->hot_y);
+ cpu_to_le32(plane->state->hotspot_y);
} else {
output->cursor.hot_x = cpu_to_le32(0);
output->cursor.hot_y = cpu_to_le32(0);
@@ -349,11 +499,71 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
virtio_gpu_cursor_ping(vgdev, output);
}
+static int virtio_drm_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct virtio_gpu_object *bo;
+
+ if (!plane->state || !plane->state->fb || !plane->state->visible)
+ return -ENODEV;
+
+ bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
+
+ if (virtio_gpu_is_vram(bo) || drm_gem_is_imported(&bo->base.base))
+ return -ENODEV;
+
+ if (bo->base.vaddr) {
+ iosys_map_set_vaddr(&sb->map[0], bo->base.vaddr);
+ } else {
+ struct drm_gem_shmem_object *shmem = &bo->base;
+
+ if (!shmem->pages)
+ return -ENODEV;
+ /* map scanout buffer later */
+ sb->pages = shmem->pages;
+ }
+
+ sb->format = plane->state->fb->format;
+ sb->height = plane->state->fb->height;
+ sb->width = plane->state->fb->width;
+ sb->pitch[0] = plane->state->fb->pitches[0];
+ return 0;
+}
+
+static void virtio_panic_flush(struct drm_plane *plane)
+{
+ struct virtio_gpu_object *bo;
+ struct drm_device *dev = plane->dev;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct drm_rect rect;
+
+ rect.x1 = 0;
+ rect.y1 = 0;
+ rect.x2 = plane->state->fb->width;
+ rect.y2 = plane->state->fb->height;
+
+ bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
+
+ if (bo->dumb) {
+ if (virtio_gpu_panic_update_dumb_bo(vgdev, plane->state,
+ &rect))
+ return;
+ }
+
+ virtio_gpu_panic_resource_flush(plane,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16);
+}
+
static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
.prepare_fb = virtio_gpu_plane_prepare_fb,
.cleanup_fb = virtio_gpu_plane_cleanup_fb,
.atomic_check = virtio_gpu_plane_atomic_check,
.atomic_update = virtio_gpu_primary_plane_update,
+ .get_scanout_buffer = virtio_drm_get_scanout_buffer,
+ .panic_flush = virtio_panic_flush,
};
static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
@@ -390,5 +600,9 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
return plane;
drm_plane_helper_add(plane, funcs);
+
+ if (type == DRM_PLANE_TYPE_PRIMARY)
+ drm_plane_enable_fb_damage_clips(plane);
+
return plane;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 44425f20d91a..ce49282198cb 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -27,6 +27,8 @@
#include "virtgpu_drv.h"
+MODULE_IMPORT_NS("DMA_BUF");
+
static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
uuid_t *uuid)
{
@@ -73,7 +75,6 @@ static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
.ops = {
- .cache_sgt_mapping = true,
.attach = virtio_dma_buf_attach,
.detach = drm_gem_map_detach,
.map_dma_buf = virtgpu_gem_map_dma_buf,
@@ -142,10 +143,162 @@ struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
return buf;
}
+int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents,
+ unsigned int *nents,
+ struct virtio_gpu_object *bo,
+ struct dma_buf_attachment *attach)
+{
+ struct scatterlist *sl;
+ struct sg_table *sgt;
+ long i, ret;
+
+ dma_resv_assert_held(attach->dmabuf->resv);
+
+ ret = dma_resv_wait_timeout(attach->dmabuf->resv,
+ DMA_RESV_USAGE_KERNEL,
+ false, MAX_SCHEDULE_TIMEOUT);
+ if (ret <= 0)
+ return ret < 0 ? ret : -ETIMEDOUT;
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ *ents = kvmalloc_array(sgt->nents,
+ sizeof(struct virtio_gpu_mem_entry),
+ GFP_KERNEL);
+ if (!(*ents)) {
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+ return -ENOMEM;
+ }
+
+ *nents = sgt->nents;
+ for_each_sgtable_dma_sg(sgt, sl, i) {
+ (*ents)[i].addr = cpu_to_le64(sg_dma_address(sl));
+ (*ents)[i].length = cpu_to_le32(sg_dma_len(sl));
+ (*ents)[i].padding = 0;
+ }
+
+ bo->sgt = sgt;
+ return 0;
+}
+
+static void virtgpu_dma_buf_unmap(struct virtio_gpu_object *bo)
+{
+ struct dma_buf_attachment *attach = bo->base.base.import_attach;
+
+ dma_resv_assert_held(attach->dmabuf->resv);
+
+ if (bo->created) {
+ virtio_gpu_detach_object_fenced(bo);
+
+ if (bo->sgt)
+ dma_buf_unmap_attachment(attach, bo->sgt,
+ DMA_BIDIRECTIONAL);
+
+ bo->sgt = NULL;
+ }
+}
+
+static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
+{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct virtio_gpu_device *vgdev = obj->dev->dev_private;
+ struct dma_buf_attachment *attach = obj->import_attach;
+
+ if (drm_gem_is_imported(obj)) {
+ struct dma_buf *dmabuf = attach->dmabuf;
+
+ dma_resv_lock(dmabuf->resv, NULL);
+ virtgpu_dma_buf_unmap(bo);
+ dma_resv_unlock(dmabuf->resv);
+
+ dma_buf_detach(dmabuf, attach);
+ dma_buf_put(dmabuf);
+ }
+
+ if (bo->created) {
+ virtio_gpu_cmd_unref_resource(vgdev, bo);
+ virtio_gpu_notify(vgdev);
+ return;
+ }
+ virtio_gpu_cleanup_object(bo);
+}
+
+static int virtgpu_dma_buf_init_obj(struct drm_device *dev,
+ struct virtio_gpu_object *bo,
+ struct dma_buf_attachment *attach)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_object_params params = { 0 };
+ struct dma_resv *resv = attach->dmabuf->resv;
+ struct virtio_gpu_mem_entry *ents = NULL;
+ unsigned int nents;
+ int ret;
+
+ ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
+ if (ret) {
+ virtgpu_dma_buf_free_obj(&bo->base.base);
+ return ret;
+ }
+
+ dma_resv_lock(resv, NULL);
+
+ ret = dma_buf_pin(attach);
+ if (ret)
+ goto err_pin;
+
+ ret = virtgpu_dma_buf_import_sgt(&ents, &nents, bo, attach);
+ if (ret)
+ goto err_import;
+
+ params.blob = true;
+ params.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
+ params.blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
+ params.size = attach->dmabuf->size;
+
+ virtio_gpu_cmd_resource_create_blob(vgdev, bo, &params,
+ ents, nents);
+ bo->guest_blob = true;
+
+ dma_buf_unpin(attach);
+ dma_resv_unlock(resv);
+
+ return 0;
+
+err_import:
+ dma_buf_unpin(attach);
+err_pin:
+ dma_resv_unlock(resv);
+ virtgpu_dma_buf_free_obj(&bo->base.base);
+ return ret;
+}
+
+static const struct drm_gem_object_funcs virtgpu_gem_dma_buf_funcs = {
+ .free = virtgpu_dma_buf_free_obj,
+};
+
+static void virtgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->importer_priv;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+
+ virtgpu_dma_buf_unmap(bo);
+}
+
+static const struct dma_buf_attach_ops virtgpu_dma_buf_attach_ops = {
+ .allow_peer2peer = true,
+ .move_notify = virtgpu_dma_buf_move_notify
+};
+
struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *buf)
{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct dma_buf_attachment *attach;
+ struct virtio_gpu_object *bo;
struct drm_gem_object *obj;
+ int ret;
if (buf->ops == &virtgpu_dmabuf_ops.ops) {
obj = buf->priv;
@@ -159,7 +312,33 @@ struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
}
}
- return drm_gem_prime_import(dev, buf);
+ if (!vgdev->has_resource_blob || vgdev->has_virgl_3d)
+ return drm_gem_prime_import(dev, buf);
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return ERR_PTR(-ENOMEM);
+
+ obj = &bo->base.base;
+ obj->resv = buf->resv;
+ obj->funcs = &virtgpu_gem_dma_buf_funcs;
+ drm_gem_private_object_init(dev, obj, buf->size);
+
+ attach = dma_buf_dynamic_attach(buf, dev->dev,
+ &virtgpu_dma_buf_attach_ops, obj);
+ if (IS_ERR(attach)) {
+ kfree(bo);
+ return ERR_CAST(attach);
+ }
+
+ obj->import_attach = attach;
+ get_dma_buf(buf);
+
+ ret = virtgpu_dma_buf_init_obj(dev, bo, attach);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return obj;
}
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
diff --git a/drivers/gpu/drm/virtio/virtgpu_submit.c b/drivers/gpu/drm/virtio/virtgpu_submit.c
new file mode 100644
index 000000000000..7d34cf83f5f2
--- /dev/null
+++ b/drivers/gpu/drm/virtio/virtgpu_submit.c
@@ -0,0 +1,542 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2015 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * Authors:
+ * Dave Airlie
+ * Alon Levy
+ */
+
+#include <linux/dma-fence-unwrap.h>
+#include <linux/file.h>
+#include <linux/sync_file.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_file.h>
+#include <drm/drm_syncobj.h>
+#include <drm/virtgpu_drm.h>
+
+#include "virtgpu_drv.h"
+
+struct virtio_gpu_submit_post_dep {
+ struct drm_syncobj *syncobj;
+ struct dma_fence_chain *chain;
+ u64 point;
+};
+
+struct virtio_gpu_submit {
+ struct virtio_gpu_submit_post_dep *post_deps;
+ unsigned int num_out_syncobjs;
+
+ struct drm_syncobj **in_syncobjs;
+ unsigned int num_in_syncobjs;
+
+ struct virtio_gpu_object_array *buflist;
+ struct drm_virtgpu_execbuffer *exbuf;
+ struct virtio_gpu_fence *out_fence;
+ struct virtio_gpu_fpriv *vfpriv;
+ struct virtio_gpu_device *vgdev;
+ struct sync_file *sync_file;
+ struct drm_file *file;
+ int out_fence_fd;
+ u64 fence_ctx;
+ u32 ring_idx;
+ void *buf;
+};
+
+static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit,
+ struct dma_fence *in_fence)
+{
+ u64 context = submit->fence_ctx + submit->ring_idx;
+
+ if (dma_fence_match_context(in_fence, context))
+ return 0;
+
+ return dma_fence_wait(in_fence, true);
+}
+
+static int virtio_gpu_dma_fence_wait(struct virtio_gpu_submit *submit,
+ struct dma_fence *fence)
+{
+ struct dma_fence_unwrap itr;
+ struct dma_fence *f;
+ int err;
+
+ dma_fence_unwrap_for_each(f, &itr, fence) {
+ err = virtio_gpu_do_fence_wait(submit, f);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void virtio_gpu_free_syncobjs(struct drm_syncobj **syncobjs,
+ u32 nr_syncobjs)
+{
+ u32 i = nr_syncobjs;
+
+ while (i--) {
+ if (syncobjs[i])
+ drm_syncobj_put(syncobjs[i]);
+ }
+
+ kvfree(syncobjs);
+}
+
+static int
+virtio_gpu_parse_deps(struct virtio_gpu_submit *submit)
+{
+ struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
+ struct drm_virtgpu_execbuffer_syncobj syncobj_desc;
+ size_t syncobj_stride = exbuf->syncobj_stride;
+ u32 num_in_syncobjs = exbuf->num_in_syncobjs;
+ struct drm_syncobj **syncobjs;
+ int ret = 0, i;
+
+ if (!num_in_syncobjs)
+ return 0;
+
+ /*
+ * kvmalloc() at first tries to allocate memory using kmalloc() and
+ * falls back to vmalloc() only on failure. It also uses __GFP_NOWARN
+ * internally for allocations larger than a page size, preventing
+ * storm of KMSG warnings.
+ */
+ syncobjs = kvcalloc(num_in_syncobjs, sizeof(*syncobjs), GFP_KERNEL);
+ if (!syncobjs)
+ return -ENOMEM;
+
+ for (i = 0; i < num_in_syncobjs; i++) {
+ u64 address = exbuf->in_syncobjs + i * syncobj_stride;
+ struct dma_fence *fence;
+
+ memset(&syncobj_desc, 0, sizeof(syncobj_desc));
+
+ if (copy_from_user(&syncobj_desc,
+ u64_to_user_ptr(address),
+ min(syncobj_stride, sizeof(syncobj_desc)))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ if (syncobj_desc.flags & ~VIRTGPU_EXECBUF_SYNCOBJ_FLAGS) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = drm_syncobj_find_fence(submit->file, syncobj_desc.handle,
+ syncobj_desc.point, 0, &fence);
+ if (ret)
+ break;
+
+ ret = virtio_gpu_dma_fence_wait(submit, fence);
+
+ dma_fence_put(fence);
+ if (ret)
+ break;
+
+ if (syncobj_desc.flags & VIRTGPU_EXECBUF_SYNCOBJ_RESET) {
+ syncobjs[i] = drm_syncobj_find(submit->file,
+ syncobj_desc.handle);
+ if (!syncobjs[i]) {
+ ret = -EINVAL;
+ break;
+ }
+ }
+ }
+
+ if (ret) {
+ virtio_gpu_free_syncobjs(syncobjs, i);
+ return ret;
+ }
+
+ submit->num_in_syncobjs = num_in_syncobjs;
+ submit->in_syncobjs = syncobjs;
+
+ return ret;
+}
+
+static void virtio_gpu_reset_syncobjs(struct drm_syncobj **syncobjs,
+ u32 nr_syncobjs)
+{
+ u32 i;
+
+ for (i = 0; i < nr_syncobjs; i++) {
+ if (syncobjs[i])
+ drm_syncobj_replace_fence(syncobjs[i], NULL);
+ }
+}
+
+static void
+virtio_gpu_free_post_deps(struct virtio_gpu_submit_post_dep *post_deps,
+ u32 nr_syncobjs)
+{
+ u32 i = nr_syncobjs;
+
+ while (i--) {
+ kfree(post_deps[i].chain);
+ drm_syncobj_put(post_deps[i].syncobj);
+ }
+
+ kvfree(post_deps);
+}
+
+static int virtio_gpu_parse_post_deps(struct virtio_gpu_submit *submit)
+{
+ struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
+ struct drm_virtgpu_execbuffer_syncobj syncobj_desc;
+ struct virtio_gpu_submit_post_dep *post_deps;
+ u32 num_out_syncobjs = exbuf->num_out_syncobjs;
+ size_t syncobj_stride = exbuf->syncobj_stride;
+ int ret = 0, i;
+
+ if (!num_out_syncobjs)
+ return 0;
+
+ post_deps = kvcalloc(num_out_syncobjs, sizeof(*post_deps), GFP_KERNEL);
+ if (!post_deps)
+ return -ENOMEM;
+
+ for (i = 0; i < num_out_syncobjs; i++) {
+ u64 address = exbuf->out_syncobjs + i * syncobj_stride;
+
+ memset(&syncobj_desc, 0, sizeof(syncobj_desc));
+
+ if (copy_from_user(&syncobj_desc,
+ u64_to_user_ptr(address),
+ min(syncobj_stride, sizeof(syncobj_desc)))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ post_deps[i].point = syncobj_desc.point;
+
+ if (syncobj_desc.flags) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (syncobj_desc.point) {
+ post_deps[i].chain = dma_fence_chain_alloc();
+ if (!post_deps[i].chain) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ post_deps[i].syncobj = drm_syncobj_find(submit->file,
+ syncobj_desc.handle);
+ if (!post_deps[i].syncobj) {
+ kfree(post_deps[i].chain);
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ if (ret) {
+ virtio_gpu_free_post_deps(post_deps, i);
+ return ret;
+ }
+
+ submit->num_out_syncobjs = num_out_syncobjs;
+ submit->post_deps = post_deps;
+
+ return 0;
+}
+
+static void
+virtio_gpu_process_post_deps(struct virtio_gpu_submit *submit)
+{
+ struct virtio_gpu_submit_post_dep *post_deps = submit->post_deps;
+
+ if (post_deps) {
+ struct dma_fence *fence = &submit->out_fence->f;
+ u32 i;
+
+ for (i = 0; i < submit->num_out_syncobjs; i++) {
+ if (post_deps[i].chain) {
+ drm_syncobj_add_point(post_deps[i].syncobj,
+ post_deps[i].chain,
+ fence, post_deps[i].point);
+ post_deps[i].chain = NULL;
+ } else {
+ drm_syncobj_replace_fence(post_deps[i].syncobj,
+ fence);
+ }
+ }
+ }
+}
+
+static int virtio_gpu_fence_event_create(struct drm_device *dev,
+ struct drm_file *file,
+ struct virtio_gpu_fence *fence,
+ u32 ring_idx)
+{
+ struct virtio_gpu_fence_event *e = NULL;
+ int ret;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED;
+ e->event.length = sizeof(e->event);
+
+ ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
+ if (ret) {
+ kfree(e);
+ return ret;
+ }
+
+ fence->e = e;
+
+ return 0;
+}
+
+static int virtio_gpu_init_submit_buflist(struct virtio_gpu_submit *submit)
+{
+ struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
+ u32 *bo_handles;
+
+ if (!exbuf->num_bo_handles)
+ return 0;
+
+ bo_handles = kvmalloc_array(exbuf->num_bo_handles, sizeof(*bo_handles),
+ GFP_KERNEL);
+ if (!bo_handles)
+ return -ENOMEM;
+
+ if (copy_from_user(bo_handles, u64_to_user_ptr(exbuf->bo_handles),
+ exbuf->num_bo_handles * sizeof(*bo_handles))) {
+ kvfree(bo_handles);
+ return -EFAULT;
+ }
+
+ submit->buflist = virtio_gpu_array_from_handles(submit->file, bo_handles,
+ exbuf->num_bo_handles);
+ if (!submit->buflist) {
+ kvfree(bo_handles);
+ return -ENOENT;
+ }
+
+ kvfree(bo_handles);
+
+ return 0;
+}
+
+static void virtio_gpu_cleanup_submit(struct virtio_gpu_submit *submit)
+{
+ virtio_gpu_reset_syncobjs(submit->in_syncobjs, submit->num_in_syncobjs);
+ virtio_gpu_free_syncobjs(submit->in_syncobjs, submit->num_in_syncobjs);
+ virtio_gpu_free_post_deps(submit->post_deps, submit->num_out_syncobjs);
+
+ if (!IS_ERR(submit->buf))
+ kvfree(submit->buf);
+
+ if (submit->buflist)
+ virtio_gpu_array_put_free(submit->buflist);
+
+ if (submit->out_fence_fd >= 0)
+ put_unused_fd(submit->out_fence_fd);
+
+ if (submit->out_fence)
+ dma_fence_put(&submit->out_fence->f);
+
+ if (submit->sync_file)
+ fput(submit->sync_file->file);
+}
+
+static void virtio_gpu_submit(struct virtio_gpu_submit *submit)
+{
+ virtio_gpu_cmd_submit(submit->vgdev, submit->buf, submit->exbuf->size,
+ submit->vfpriv->ctx_id, submit->buflist,
+ submit->out_fence);
+ virtio_gpu_notify(submit->vgdev);
+}
+
+static void virtio_gpu_complete_submit(struct virtio_gpu_submit *submit)
+{
+ submit->buf = NULL;
+ submit->buflist = NULL;
+ submit->sync_file = NULL;
+ submit->out_fence_fd = -1;
+}
+
+static int virtio_gpu_init_submit(struct virtio_gpu_submit *submit,
+ struct drm_virtgpu_execbuffer *exbuf,
+ struct drm_device *dev,
+ struct drm_file *file,
+ u64 fence_ctx, u32 ring_idx)
+{
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_fence *out_fence;
+ bool drm_fence_event;
+ int err;
+
+ memset(submit, 0, sizeof(*submit));
+
+ if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX) &&
+ (vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
+ drm_fence_event = true;
+ else
+ drm_fence_event = false;
+
+ if ((exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) ||
+ exbuf->num_out_syncobjs ||
+ exbuf->num_bo_handles ||
+ drm_fence_event)
+ out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
+ else
+ out_fence = NULL;
+
+ if (drm_fence_event) {
+ err = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
+ if (err) {
+ dma_fence_put(&out_fence->f);
+ return err;
+ }
+ }
+
+ submit->out_fence = out_fence;
+ submit->fence_ctx = fence_ctx;
+ submit->ring_idx = ring_idx;
+ submit->out_fence_fd = -1;
+ submit->vfpriv = vfpriv;
+ submit->vgdev = vgdev;
+ submit->exbuf = exbuf;
+ submit->file = file;
+
+ err = virtio_gpu_init_submit_buflist(submit);
+ if (err)
+ return err;
+
+ submit->buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
+ if (IS_ERR(submit->buf))
+ return PTR_ERR(submit->buf);
+
+ if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
+ err = get_unused_fd_flags(O_CLOEXEC);
+ if (err < 0)
+ return err;
+
+ submit->out_fence_fd = err;
+
+ submit->sync_file = sync_file_create(&out_fence->f);
+ if (!submit->sync_file)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int virtio_gpu_wait_in_fence(struct virtio_gpu_submit *submit)
+{
+ int ret = 0;
+
+ if (submit->exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
+ struct dma_fence *in_fence =
+ sync_file_get_fence(submit->exbuf->fence_fd);
+ if (!in_fence)
+ return -EINVAL;
+
+ /*
+ * Wait if the fence is from a foreign context, or if the
+ * fence array contains any fence from a foreign context.
+ */
+ ret = virtio_gpu_dma_fence_wait(submit, in_fence);
+
+ dma_fence_put(in_fence);
+ }
+
+ return ret;
+}
+
+static void virtio_gpu_install_out_fence_fd(struct virtio_gpu_submit *submit)
+{
+ if (submit->sync_file) {
+ submit->exbuf->fence_fd = submit->out_fence_fd;
+ fd_install(submit->out_fence_fd, submit->sync_file->file);
+ }
+}
+
+static int virtio_gpu_lock_buflist(struct virtio_gpu_submit *submit)
+{
+ if (submit->buflist)
+ return virtio_gpu_array_lock_resv(submit->buflist);
+
+ return 0;
+}
+
+int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ u64 fence_ctx = vgdev->fence_drv.context;
+ struct drm_virtgpu_execbuffer *exbuf = data;
+ struct virtio_gpu_submit submit;
+ u32 ring_idx = 0;
+ int ret = -EINVAL;
+
+ if (!vgdev->has_virgl_3d)
+ return -ENOSYS;
+
+ if (exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS)
+ return ret;
+
+ if (exbuf->flags & VIRTGPU_EXECBUF_RING_IDX) {
+ if (exbuf->ring_idx >= vfpriv->num_rings)
+ return ret;
+
+ if (!vfpriv->base_fence_ctx)
+ return ret;
+
+ fence_ctx = vfpriv->base_fence_ctx;
+ ring_idx = exbuf->ring_idx;
+ }
+
+ virtio_gpu_create_context(dev, file);
+
+ ret = virtio_gpu_init_submit(&submit, exbuf, dev, file,
+ fence_ctx, ring_idx);
+ if (ret)
+ goto cleanup;
+
+ ret = virtio_gpu_parse_post_deps(&submit);
+ if (ret)
+ goto cleanup;
+
+ ret = virtio_gpu_parse_deps(&submit);
+ if (ret)
+ goto cleanup;
+
+ /*
+ * Await in-fences in the end of the job submission path to
+ * optimize the path by proceeding directly to the submission
+ * to virtio after the waits.
+ */
+ ret = virtio_gpu_wait_in_fence(&submit);
+ if (ret)
+ goto cleanup;
+
+ ret = virtio_gpu_lock_buflist(&submit);
+ if (ret)
+ goto cleanup;
+
+ virtio_gpu_submit(&submit);
+
+ /*
+ * Set up user-out data after submitting the job to optimize
+ * the job submission path.
+ */
+ virtio_gpu_install_out_fence_fd(&submit);
+ virtio_gpu_process_post_deps(&submit);
+ virtio_gpu_complete_submit(&submit);
+cleanup:
+ virtio_gpu_cleanup_submit(&submit);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/virtio/virtgpu_trace.h b/drivers/gpu/drm/virtio/virtgpu_trace.h
index 031bc77689d5..227bf0ae7ed5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_trace.h
+++ b/drivers/gpu/drm/virtio/virtgpu_trace.h
@@ -25,7 +25,7 @@ DECLARE_EVENT_CLASS(virtio_gpu_cmd,
TP_fast_assign(
__entry->dev = vq->vdev->index;
__entry->vq = vq->index;
- __assign_str(name, vq->name);
+ __assign_str(name);
__entry->type = le32_to_cpu(hdr->type);
__entry->flags = le32_to_cpu(hdr->flags);
__entry->fence_id = le64_to_cpu(hdr->fence_id);
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index a04a9b20896d..0c194b4e9488 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -32,6 +32,7 @@
#include <linux/virtio_ring.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include "virtgpu_drv.h"
#include "virtgpu_trace.h"
@@ -86,6 +87,22 @@ void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
vgdev->vbufs = NULL;
}
+/* For drm_panic */
+static struct virtio_gpu_vbuffer*
+virtio_gpu_panic_get_vbuf(struct virtio_gpu_device *vgdev, int size)
+{
+ struct virtio_gpu_vbuffer *vbuf;
+
+ vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_ATOMIC);
+
+ vbuf->buf = (void *)vbuf + sizeof(*vbuf);
+ vbuf->size = size;
+ vbuf->resp_cb = NULL;
+ vbuf->resp_size = sizeof(struct virtio_gpu_ctrl_hdr);
+ vbuf->resp_buf = (void *)vbuf->buf + size;
+ return vbuf;
+}
+
static struct virtio_gpu_vbuffer*
virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
int size, int resp_size, void *resp_buf,
@@ -137,6 +154,18 @@ virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
return (struct virtio_gpu_update_cursor *)vbuf->buf;
}
+/* For drm_panic */
+static void *virtio_gpu_panic_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer **vbuffer_p,
+ int cmd_size)
+{
+ struct virtio_gpu_vbuffer *vbuf;
+
+ vbuf = virtio_gpu_panic_get_vbuf(vgdev, cmd_size);
+ *vbuffer_p = vbuf;
+ return (struct virtio_gpu_command *)vbuf->buf;
+}
+
static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
virtio_gpu_resp_cb cb,
struct virtio_gpu_vbuffer **vbuffer_p,
@@ -220,6 +249,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
struct virtio_gpu_ctrl_hdr *cmd;
+
cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
le32_to_cpu(resp->type),
@@ -311,6 +341,34 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
return sgt;
}
+/* For drm_panic */
+static int virtio_gpu_panic_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf,
+ int elemcnt,
+ struct scatterlist **sgs,
+ int outcnt,
+ int incnt)
+{
+ struct virtqueue *vq = vgdev->ctrlq.vq;
+ int ret;
+
+ if (vgdev->has_indirect)
+ elemcnt = 1;
+
+ if (vq->num_free < elemcnt)
+ return -ENOMEM;
+
+ ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
+ WARN_ON(ret);
+
+ vbuf->seqno = ++vgdev->ctrlq.seqno;
+ trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), vbuf->seqno);
+
+ atomic_inc(&vgdev->pending_commands);
+
+ return 0;
+}
+
static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf,
struct virtio_gpu_fence *fence,
@@ -368,6 +426,32 @@ again:
return 0;
}
+/* For drm_panic */
+static int virtio_gpu_panic_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ struct scatterlist *sgs[3], vcmd, vresp;
+ int elemcnt = 0, outcnt = 0, incnt = 0;
+
+ /* set up vcmd */
+ sg_init_one(&vcmd, vbuf->buf, vbuf->size);
+ elemcnt++;
+ sgs[outcnt] = &vcmd;
+ outcnt++;
+
+ /* set up vresp */
+ if (vbuf->resp_size) {
+ sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
+ elemcnt++;
+ sgs[outcnt + incnt] = &vresp;
+ incnt++;
+ }
+
+ return virtio_gpu_panic_queue_ctrl_sgs(vgdev, vbuf,
+ elemcnt, sgs,
+ outcnt, incnt);
+}
+
static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf,
struct virtio_gpu_fence *fence)
@@ -386,6 +470,7 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
if (vbuf->data_size) {
if (is_vmalloc_addr(vbuf->data_buf)) {
int sg_ents;
+
sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
&sg_ents);
if (!sgt) {
@@ -422,6 +507,21 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
return ret;
}
+/* For drm_panic */
+void virtio_gpu_panic_notify(struct virtio_gpu_device *vgdev)
+{
+ bool notify;
+
+ if (!atomic_read(&vgdev->pending_commands))
+ return;
+
+ atomic_set(&vgdev->pending_commands, 0);
+ notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
+
+ if (notify)
+ virtqueue_notify(vgdev->ctrlq.vq);
+}
+
void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
{
bool notify;
@@ -567,6 +667,29 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
+/* For drm_panic */
+void virtio_gpu_panic_cmd_resource_flush(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id,
+ uint32_t x, uint32_t y,
+ uint32_t width, uint32_t height)
+{
+ struct virtio_gpu_resource_flush *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_panic_alloc_cmd_resp(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = NULL;
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
+ cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->r.width = cpu_to_le32(width);
+ cmd_p->r.height = cpu_to_le32(height);
+ cmd_p->r.x = cpu_to_le32(x);
+ cmd_p->r.y = cpu_to_le32(y);
+
+ virtio_gpu_panic_queue_ctrl_buffer(vgdev, vbuf);
+}
+
void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
uint32_t x, uint32_t y,
@@ -591,6 +714,37 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
+/* For drm_panic */
+int virtio_gpu_panic_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
+ uint64_t offset,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y,
+ struct virtio_gpu_object_array *objs)
+{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+ struct virtio_gpu_transfer_to_host_2d *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+ bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
+
+ if (virtio_gpu_is_shmem(bo) && use_dma_api)
+ dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
+ bo->base.sgt, DMA_TO_DEVICE);
+
+ cmd_p = virtio_gpu_panic_alloc_cmd_resp(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+ cmd_p->offset = cpu_to_le64(offset);
+ cmd_p->r.width = cpu_to_le32(width);
+ cmd_p->r.height = cpu_to_le32(height);
+ cmd_p->r.x = cpu_to_le32(x);
+ cmd_p->r.y = cpu_to_le32(y);
+
+ return virtio_gpu_panic_queue_ctrl_buffer(vgdev, vbuf);
+}
+
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset,
uint32_t width, uint32_t height,
@@ -604,7 +758,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
if (virtio_gpu_is_shmem(bo) && use_dma_api)
- dma_sync_sgtable_for_device(&vgdev->vdev->dev,
+ dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
bo->base.sgt, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
@@ -645,6 +799,23 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
+static void
+virtio_gpu_cmd_resource_detach_backing(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id,
+ struct virtio_gpu_fence *fence)
+{
+ struct virtio_gpu_resource_detach_backing *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
+ cmd_p->resource_id = cpu_to_le32(resource_id);
+
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
+}
+
static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
@@ -741,21 +912,21 @@ static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
(struct virtio_gpu_resp_edid *)vbuf->resp_buf;
uint32_t scanout = le32_to_cpu(cmd->scanout);
struct virtio_gpu_output *output;
- struct edid *new_edid, *old_edid;
+ const struct drm_edid *new_edid, *old_edid;
if (scanout >= vgdev->num_scanouts)
return;
output = vgdev->outputs + scanout;
- new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
- drm_connector_update_edid_property(&output->conn, new_edid);
+ new_edid = drm_edid_read_custom(&output->conn, virtio_get_edid_block, resp);
+ drm_edid_connector_update(&output->conn, new_edid);
spin_lock(&vgdev->display_info_lock);
- old_edid = output->edid;
- output->edid = new_edid;
+ old_edid = output->drm_edid;
+ output->drm_edid = new_edid;
spin_unlock(&vgdev->display_info_lock);
- kfree(old_edid);
+ drm_edid_free(old_edid);
wake_up(&vgdev->resp_wq);
}
@@ -923,8 +1094,7 @@ void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
cmd_p->hdr.ctx_id = cpu_to_le32(id);
cmd_p->nlen = cpu_to_le32(nlen);
cmd_p->context_init = cpu_to_le32(context_init);
- strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
- cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
+ strscpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name));
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
@@ -1026,7 +1196,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
if (virtio_gpu_is_shmem(bo) && use_dma_api)
- dma_sync_sgtable_for_device(&vgdev->vdev->dev,
+ dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
bo->base.sgt, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
@@ -1104,8 +1274,26 @@ void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_mem_entry *ents,
unsigned int nents)
{
+ if (obj->attached)
+ return;
+
virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
ents, nents, NULL);
+
+ obj->attached = true;
+}
+
+void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj,
+ struct virtio_gpu_fence *fence)
+{
+ if (!obj->attached)
+ return;
+
+ virtio_gpu_cmd_resource_detach_backing(vgdev, obj->hw_res_handle,
+ fence);
+
+ obj->attached = false;
}
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
@@ -1266,6 +1454,9 @@ virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
bo->created = true;
+
+ if (nents)
+ bo->attached = true;
}
void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/virtio/virtgpu_vram.c b/drivers/gpu/drm/virtio/virtgpu_vram.c
index 6b45b0429fef..5ad3b7c6f73c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vram.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vram.c
@@ -37,6 +37,7 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
unsigned long vm_size = vma->vm_end - vma->vm_start;
+ unsigned long vm_end;
if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
return -EINVAL;
@@ -46,7 +47,7 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
return -EINVAL;
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
- vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+ vm_flags_set(vma, VM_MIXEDMAP | VM_DONTEXPAND);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
vma->vm_ops = &virtio_gpu_vram_vm_ops;
@@ -56,12 +57,14 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
else if (vram->map_info == VIRTIO_GPU_MAP_CACHE_UNCACHED)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- /* Partial mappings of GEM buffers don't happen much in practice. */
- if (vm_size != vram->vram_node.size)
+ if (check_add_overflow(vma->vm_pgoff << PAGE_SHIFT, vm_size, &vm_end))
+ return -EINVAL;
+
+ if (vm_end > vram->vram_node.size)
return -EINVAL;
ret = io_remap_pfn_range(vma, vma->vm_start,
- vram->vram_node.start >> PAGE_SHIFT,
+ (vram->vram_node.start >> PAGE_SHIFT) + vma->vm_pgoff,
vm_size, vma->vm_page_prot);
return ret;
}