summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/virtio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/virtio')
-rw-r--r--drivers/gpu/drm/virtio/Kconfig12
-rw-r--r--drivers/gpu/drm/virtio/Makefile2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c71
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c137
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h101
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c46
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c63
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c342
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c116
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c123
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c343
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c223
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_submit.c542
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_trace.h26
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c273
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vram.c72
17 files changed, 1978 insertions, 520 deletions
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
index 51ec7c3240c9..fc884fb57b7e 100644
--- a/drivers/gpu/drm/virtio/Kconfig
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -3,6 +3,7 @@ config DRM_VIRTIO_GPU
tristate "Virtio GPU driver"
depends on DRM && VIRTIO_MENU && MMU
select VIRTIO
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
select VIRTIO_DMA_SHARED_BUFFER
@@ -11,3 +12,14 @@ config DRM_VIRTIO_GPU
QEMU based VMMs (like KVM or Xen).
If unsure say M.
+
+config DRM_VIRTIO_GPU_KMS
+ bool "Virtio GPU driver modesetting support"
+ depends on DRM_VIRTIO_GPU
+ default y
+ help
+ Enable modesetting support for virtio GPU driver. This can be
+ disabled in cases where only "headless" usage of the GPU is
+ required.
+
+ If unsure, say Y.
diff --git a/drivers/gpu/drm/virtio/Makefile b/drivers/gpu/drm/virtio/Makefile
index b99fa4a73b68..d2e1788a8227 100644
--- a/drivers/gpu/drm/virtio/Makefile
+++ b/drivers/gpu/drm/virtio/Makefile
@@ -6,6 +6,6 @@
virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o virtgpu_vram.o \
virtgpu_display.o virtgpu_vq.o \
virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \
- virtgpu_ioctl.o virtgpu_prime.o virtgpu_trace_points.o
+ virtgpu_ioctl.o virtgpu_prime.o virtgpu_trace_points.o virtgpu_submit.o
obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio-gpu.o
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index c2b20e0ee030..3a68a16b58ae 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -23,15 +23,18 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/string_helpers.h>
+
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include "virtgpu_drv.h"
static void virtio_gpu_add_bool(struct seq_file *m, const char *name,
bool value)
{
- seq_printf(m, "%-16s : %s\n", name, value ? "yes" : "no");
+ seq_printf(m, "%-16s : %s\n", name, str_yes_no(value));
}
static void virtio_gpu_add_int(struct seq_file *m, const char *name, int value)
@@ -52,6 +55,7 @@ static int virtio_gpu_features(struct seq_file *m, void *data)
vgdev->has_resource_assign_uuid);
virtio_gpu_add_bool(m, "blob resources", vgdev->has_resource_blob);
+ virtio_gpu_add_bool(m, "context init", vgdev->has_context_init);
virtio_gpu_add_int(m, "cap sets", vgdev->num_capsets);
virtio_gpu_add_int(m, "scanouts", vgdev->num_scanouts);
if (vgdev->host_visible_region.len) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index a6caebd4a0dd..6a962c1d6e95 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -27,10 +27,14 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_vblank_helper.h>
#include "virtgpu_drv.h"
@@ -54,6 +58,7 @@ static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ DRM_CRTC_VBLANK_TIMER_FUNCS,
};
static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
@@ -65,6 +70,7 @@ static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
static int
virtio_gpu_framebuffer_init(struct drm_device *dev,
struct virtio_gpu_framebuffer *vgfb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
@@ -72,7 +78,7 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
vgfb->base.obj[0] = obj;
- drm_helper_mode_fill_fb_struct(dev, &vgfb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &vgfb->base, info, mode_cmd);
ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs);
if (ret) {
@@ -97,6 +103,7 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ drm_crtc_vblank_on(crtc);
}
static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -106,6 +113,8 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
+ drm_crtc_vblank_off(crtc);
+
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0);
virtio_gpu_notify(vgdev);
}
@@ -119,9 +128,10 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
- struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
- crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
+ struct drm_pending_vblank_event *event;
/*
* virtio-gpu can't do modeset and plane update operations
@@ -129,9 +139,22 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
* in the plane update callback, and here we just check
* whenever we must force the modeset.
*/
- if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
output->needs_modeset = true;
+
+ spin_lock_irq(&dev->event_lock);
+
+ event = crtc_state->event;
+ crtc_state->event = NULL;
+
+ if (event) {
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
}
+
+ spin_unlock_irq(&dev->event_lock);
}
static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
@@ -163,11 +186,9 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
struct drm_display_mode *mode = NULL;
int count, width, height;
- if (output->edid) {
- count = drm_add_edid_modes(connector, output->edid);
- if (count)
- return count;
- }
+ count = drm_edid_connector_add_modes(connector);
+ if (count)
+ return count;
width = le32_to_cpu(output->info.r.width);
height = le32_to_cpu(output->info.r.height);
@@ -179,6 +200,8 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
DRM_DEBUG("add mode: %dx%d\n", width, height);
mode = drm_cvt_mode(connector->dev, width, height, 60,
false, false, false);
+ if (!mode)
+ return count;
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
count++;
@@ -188,7 +211,7 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
}
static enum drm_mode_status virtio_gpu_conn_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct virtio_gpu_output *output =
drm_connector_to_virtio_gpu_output(connector);
@@ -256,6 +279,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
struct drm_encoder *encoder = &output->enc;
struct drm_crtc *crtc = &output->crtc;
struct drm_plane *primary, *cursor;
+ int ret;
output->index = index;
if (index == 0) {
@@ -270,8 +294,10 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
cursor = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_CURSOR, index);
if (IS_ERR(cursor))
return PTR_ERR(cursor);
- drm_crtc_init_with_planes(dev, crtc, primary, cursor,
- &virtio_gpu_crtc_funcs, NULL);
+ ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor,
+ &virtio_gpu_crtc_funcs, NULL);
+ if (ret)
+ return ret;
drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
@@ -292,6 +318,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
static struct drm_framebuffer *
virtio_gpu_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj = NULL;
@@ -308,10 +335,12 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-EINVAL);
virtio_gpu_fb = kzalloc(sizeof(*virtio_gpu_fb), GFP_KERNEL);
- if (virtio_gpu_fb == NULL)
+ if (virtio_gpu_fb == NULL) {
+ drm_gem_object_put(obj);
return ERR_PTR(-ENOMEM);
+ }
- ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj);
+ ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, info, mode_cmd, obj);
if (ret) {
kfree(virtio_gpu_fb);
drm_gem_object_put(obj);
@@ -331,6 +360,9 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
{
int i, ret;
+ if (!vgdev->num_scanouts)
+ return 0;
+
ret = drmm_mode_config_init(vgdev->ddev);
if (ret)
return ret;
@@ -344,9 +376,15 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
vgdev->ddev->mode_config.max_width = XRES_MAX;
vgdev->ddev->mode_config.max_height = YRES_MAX;
+ vgdev->ddev->mode_config.fb_modifiers_not_supported = true;
+
for (i = 0 ; i < vgdev->num_scanouts; ++i)
vgdev_output_init(vgdev, i);
+ ret = drm_vblank_init(vgdev->ddev, vgdev->num_scanouts);
+ if (ret)
+ return ret;
+
drm_mode_config_reset(vgdev->ddev);
return 0;
}
@@ -355,6 +393,9 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
{
int i;
+ if (!vgdev->num_scanouts)
+ return;
+
for (i = 0 ; i < vgdev->num_scanouts; ++i)
- kfree(vgdev->outputs[i].edid);
+ drm_edid_free(vgdev->outputs[i].drm_edid);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index ca77edbc5ea0..a5ce96fb8a1d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -26,18 +26,25 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/aperture.h>
#include <linux/module.h>
-#include <linux/console.h>
#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/vgaarb.h>
+#include <linux/wait.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm.h>
-#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include "virtgpu_drv.h"
+#define PCI_DEVICE_ID_VIRTIO_GPU 0x1050
+
static const struct drm_driver driver;
static int virtio_gpu_modeset = -1;
@@ -45,56 +52,23 @@ static int virtio_gpu_modeset = -1;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, virtio_gpu_modeset, int, 0400);
-static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vdev)
+static int virtio_gpu_pci_quirk(struct drm_device *dev)
{
- struct pci_dev *pdev = to_pci_dev(vdev->dev.parent);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
const char *pname = dev_name(&pdev->dev);
- bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
- char unique[20];
+ bool vga = pci_is_vga(pdev);
int ret;
DRM_INFO("pci: %s detected at %s\n",
vga ? "virtio-vga" : "virtio-gpu-pci",
pname);
if (vga) {
- ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "virtiodrmfb");
+ ret = aperture_remove_conflicting_pci_devices(pdev, driver.name);
if (ret)
return ret;
}
- /*
- * Normally the drm_dev_set_unique() call is done by core DRM.
- * The following comment covers, why virtio cannot rely on it.
- *
- * Unlike the other virtual GPU drivers, virtio abstracts the
- * underlying bus type by using struct virtio_device.
- *
- * Hence the dev_is_pci() check, used in core DRM, will fail
- * and the unique returned will be the virtio_device "virtio0",
- * while a "pci:..." one is required.
- *
- * A few other ideas were considered:
- * - Extend the dev_is_pci() check [in drm_set_busid] to
- * consider virtio.
- * Seems like a bigger hack than what we have already.
- *
- * - Point drm_device::dev to the parent of the virtio_device
- * Semantic changes:
- * * Using the wrong device for i2c, framebuffer_alloc and
- * prime import.
- * Visual changes:
- * * Helpers such as DRM_DEV_ERROR, dev_info, drm_printer,
- * will print the wrong information.
- *
- * We could address the latter issues, by introducing
- * drm_device::bus_dev, ... which would be used solely for this.
- *
- * So for the moment keep things as-is, with a bulky comment
- * for the next person who feels like removing this
- * drm_dev_set_unique() quirk.
- */
- snprintf(unique, sizeof(unique), "pci:%s", pname);
- return drm_dev_set_unique(dev, unique);
+ return 0;
}
static int virtio_gpu_probe(struct virtio_device *vdev)
@@ -102,24 +76,31 @@ static int virtio_gpu_probe(struct virtio_device *vdev)
struct drm_device *dev;
int ret;
- if (vgacon_text_force() && virtio_gpu_modeset == -1)
+ if (drm_firmware_drivers_only() && virtio_gpu_modeset == -1)
return -EINVAL;
if (virtio_gpu_modeset == 0)
return -EINVAL;
- dev = drm_dev_alloc(&driver, &vdev->dev);
+ /*
+ * The virtio-gpu device is a virtual device that doesn't have DMA
+ * ops assigned to it, nor DMA mask set and etc. Its parent device
+ * is actual GPU device we want to use it for the DRM's device in
+ * order to benefit from using generic DRM APIs.
+ */
+ dev = drm_dev_alloc(&driver, vdev->dev.parent);
if (IS_ERR(dev))
return PTR_ERR(dev);
vdev->priv = dev;
- if (!strcmp(vdev->dev.parent->bus->name, "pci")) {
- ret = virtio_gpu_pci_quirk(dev, vdev);
+ if (dev_is_pci(vdev->dev.parent)) {
+ ret = virtio_gpu_pci_quirk(dev);
if (ret)
goto err_free;
}
- ret = virtio_gpu_init(dev);
+ dma_set_max_seg_size(dev->dev, dma_max_mapping_size(dev->dev) ?: UINT_MAX);
+ ret = virtio_gpu_init(vdev, dev);
if (ret)
goto err_free;
@@ -127,7 +108,8 @@ static int virtio_gpu_probe(struct virtio_device *vdev)
if (ret)
goto err_deinit;
- drm_fbdev_generic_setup(vdev->priv, 32);
+ drm_client_setup(vdev->priv, NULL);
+
return 0;
err_deinit:
@@ -147,6 +129,14 @@ static void virtio_gpu_remove(struct virtio_device *vdev)
drm_dev_put(dev);
}
+static void virtio_gpu_shutdown(struct virtio_device *vdev)
+{
+ struct drm_device *dev = vdev->priv;
+
+ /* stop talking to the device */
+ drm_dev_unplug(dev);
+}
+
static void virtio_gpu_config_changed(struct virtio_device *vdev)
{
struct drm_device *dev = vdev->priv;
@@ -172,19 +162,56 @@ static unsigned int features[] = {
VIRTIO_GPU_F_EDID,
VIRTIO_GPU_F_RESOURCE_UUID,
VIRTIO_GPU_F_RESOURCE_BLOB,
+ VIRTIO_GPU_F_CONTEXT_INIT,
};
static struct virtio_driver virtio_gpu_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtio_gpu_probe,
.remove = virtio_gpu_remove,
+ .shutdown = virtio_gpu_shutdown,
.config_changed = virtio_gpu_config_changed
};
-module_virtio_driver(virtio_gpu_driver);
+static int __init virtio_gpu_driver_init(void)
+{
+ struct pci_dev *pdev;
+ int ret;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_REDHAT_QUMRANET,
+ PCI_DEVICE_ID_VIRTIO_GPU,
+ NULL);
+ if (pdev && pci_is_vga(pdev)) {
+ ret = vga_get_interruptible(pdev,
+ VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
+ if (ret) {
+ pci_dev_put(pdev);
+ return ret;
+ }
+ }
+
+ ret = register_virtio_driver(&virtio_gpu_driver);
+
+ if (pdev) {
+ if (pci_is_vga(pdev))
+ vga_put(pdev,
+ VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
+
+ pci_dev_put(pdev);
+ }
+
+ return ret;
+}
+
+static void __exit virtio_gpu_driver_exit(void)
+{
+ unregister_virtio_driver(&virtio_gpu_driver);
+}
+
+module_init(virtio_gpu_driver_init);
+module_exit(virtio_gpu_driver_exit);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio GPU driver");
@@ -196,19 +223,22 @@ MODULE_AUTHOR("Alon Levy");
DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
static const struct drm_driver driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
+ /*
+ * If KMS is disabled DRIVER_MODESET and DRIVER_ATOMIC are masked
+ * out via drm_device::driver_features:
+ */
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC |
+ DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE | DRIVER_CURSOR_HOTSPOT,
.open = virtio_gpu_driver_open,
.postclose = virtio_gpu_driver_postclose,
.dumb_create = virtio_gpu_mode_dumb_create,
- .dumb_map_offset = virtio_gpu_mode_dumb_mmap,
+
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = virtio_gpu_debugfs_init,
#endif
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_mmap = drm_gem_prime_mmap,
.gem_prime_import = virtgpu_gem_prime_import,
.gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
@@ -220,7 +250,6 @@ static const struct drm_driver driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index d9dbc4f258f3..f17660a71a3e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -26,6 +26,7 @@
#ifndef VIRTIO_DRV_H
#define VIRTIO_DRV_H
+#include <linux/dma-direction.h>
#include <linux/virtio.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
@@ -34,8 +35,8 @@
#include <drm/drm_atomic.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_ioctl.h>
@@ -44,7 +45,6 @@
#define DRIVER_NAME "virtio_gpu"
#define DRIVER_DESC "virtio GPU"
-#define DRIVER_DATE "0"
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 1
@@ -54,6 +54,12 @@
#define STATE_OK 1
#define STATE_ERR 2
+#define MAX_CAPSET_ID 63
+#define MAX_RINGS 64
+
+/* See virtio_gpu_ctx_create. One additional character for NULL terminator. */
+#define DEBUG_NAME_MAX_LEN 65
+
struct virtio_gpu_object_params {
unsigned long size;
bool dumb;
@@ -82,9 +88,11 @@ struct virtio_gpu_object_params {
struct virtio_gpu_object {
struct drm_gem_shmem_object base;
+ struct sg_table *sgt;
uint32_t hw_res_handle;
bool dumb;
bool created;
+ bool attached;
bool host3d_blob, guest_blob;
uint32_t blob_mem, blob_flags;
@@ -96,8 +104,6 @@ struct virtio_gpu_object {
struct virtio_gpu_object_shmem {
struct virtio_gpu_object base;
- struct sg_table *pages;
- uint32_t mapped;
};
struct virtio_gpu_object_vram {
@@ -117,7 +123,7 @@ struct virtio_gpu_object_array {
struct ww_acquire_ctx ticket;
struct list_head next;
u32 nents, total;
- struct drm_gem_object *objs[];
+ struct drm_gem_object *objs[] __counted_by(total);
};
struct virtio_gpu_vbuffer;
@@ -134,9 +140,17 @@ struct virtio_gpu_fence_driver {
spinlock_t lock;
};
+struct virtio_gpu_fence_event {
+ struct drm_pending_event base;
+ struct drm_event event;
+};
+
struct virtio_gpu_fence {
struct dma_fence f;
+ uint32_t ring_idx;
uint64_t fence_id;
+ bool emit_fence_info;
+ struct virtio_gpu_fence_event *e;
struct virtio_gpu_fence_driver *drv;
struct list_head node;
};
@@ -155,6 +169,8 @@ struct virtio_gpu_vbuffer {
struct virtio_gpu_object_array *objs;
struct list_head list;
+
+ uint32_t seqno;
};
struct virtio_gpu_output {
@@ -164,7 +180,7 @@ struct virtio_gpu_output {
struct drm_encoder enc;
struct virtio_gpu_display_one info;
struct virtio_gpu_update_cursor cursor;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int cur_x;
int cur_y;
bool needs_modeset;
@@ -179,11 +195,19 @@ struct virtio_gpu_framebuffer {
#define to_virtio_gpu_framebuffer(x) \
container_of(x, struct virtio_gpu_framebuffer, base)
+struct virtio_gpu_plane_state {
+ struct drm_plane_state base;
+ struct virtio_gpu_fence *fence;
+};
+#define to_virtio_gpu_plane_state(x) \
+ container_of(x, struct virtio_gpu_plane_state, base)
+
struct virtio_gpu_queue {
struct virtqueue *vq;
spinlock_t qlock;
wait_queue_head_t ack_queue;
struct work_struct dequeue_work;
+ uint32_t seqno;
};
struct virtio_gpu_drv_capset {
@@ -202,7 +226,6 @@ struct virtio_gpu_drv_cap_cache {
};
struct virtio_gpu_device {
- struct device *dev;
struct drm_device *ddev;
struct virtio_device *vdev;
@@ -233,6 +256,7 @@ struct virtio_gpu_device {
bool has_resource_assign_uuid;
bool has_resource_blob;
bool has_host_visible;
+ bool has_context_init;
struct virtio_shm_region host_visible_region;
struct drm_mm host_visible_mm;
@@ -244,6 +268,7 @@ struct virtio_gpu_device {
struct virtio_gpu_drv_capset *capsets;
uint32_t num_capsets;
+ uint64_t capset_id_mask;
struct list_head cap_cache;
/* protects uuid state when exporting */
@@ -254,17 +279,23 @@ struct virtio_gpu_device {
struct virtio_gpu_fpriv {
uint32_t ctx_id;
+ uint32_t context_init;
bool context_created;
+ uint32_t num_rings;
+ uint64_t base_fence_ctx;
+ uint64_t ring_idx_mask;
struct mutex context_lock;
+ char debug_name[DEBUG_NAME_MAX_LEN];
+ bool explicit_debug_name;
};
/* virtgpu_ioctl.c */
-#define DRM_VIRTIO_NUM_IOCTLS 11
+#define DRM_VIRTIO_NUM_IOCTLS 12
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
/* virtgpu_kms.c */
-int virtio_gpu_init(struct drm_device *dev);
+int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev);
void virtio_gpu_deinit(struct drm_device *dev);
void virtio_gpu_release(struct drm_device *dev);
int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);
@@ -278,10 +309,8 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset_p);
+struct virtio_gpu_object_array *virtio_gpu_panic_array_alloc(void);
struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents);
struct virtio_gpu_object_array*
virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents);
@@ -306,16 +335,27 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo);
+int virtio_gpu_panic_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
+ uint64_t offset,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y,
+ struct virtio_gpu_object_array *objs);
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset,
uint32_t width, uint32_t height,
uint32_t x, uint32_t y,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
+void virtio_gpu_panic_cmd_resource_flush(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id,
+ uint32_t x, uint32_t y,
+ uint32_t width, uint32_t height);
void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
uint32_t x, uint32_t y,
- uint32_t width, uint32_t height);
+ uint32_t width, uint32_t height,
+ struct virtio_gpu_object_array *objs,
+ struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
uint32_t scanout_id, uint32_t resource_id,
uint32_t width, uint32_t height,
@@ -324,8 +364,10 @@ void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
struct virtio_gpu_mem_entry *ents,
unsigned int nents);
-int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
-int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
+void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj,
+ struct virtio_gpu_fence *fence);
+int virtio_gpu_detach_object_fenced(struct virtio_gpu_object *bo);
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
struct virtio_gpu_output *output);
int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev);
@@ -335,7 +377,8 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
struct virtio_gpu_drv_cap_cache **cache_p);
int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev);
void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
- uint32_t nlen, const char *name);
+ uint32_t context_init, uint32_t nlen,
+ const char *name);
void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
uint32_t id);
void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
@@ -373,11 +416,9 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_fence *fence);
void virtio_gpu_ctrl_ack(struct virtqueue *vq);
void virtio_gpu_cursor_ack(struct virtqueue *vq);
-void virtio_gpu_fence_ack(struct virtqueue *vq);
void virtio_gpu_dequeue_ctrl_func(struct work_struct *work);
void virtio_gpu_dequeue_cursor_func(struct work_struct *work);
-void virtio_gpu_dequeue_fence_func(struct work_struct *work);
-
+void virtio_gpu_panic_notify(struct virtio_gpu_device *vgdev);
void virtio_gpu_notify(struct virtio_gpu_device *vgdev);
int
@@ -415,8 +456,9 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
int index);
/* virtgpu_fence.c */
-struct virtio_gpu_fence *virtio_gpu_fence_alloc(
- struct virtio_gpu_device *vgdev);
+struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev,
+ uint64_t base_fence_ctx,
+ uint32_t ring_idx);
void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_ctrl_hdr *cmd_hdr,
struct virtio_gpu_fence *fence);
@@ -443,11 +485,13 @@ struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
int flags);
struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *buf);
-int virtgpu_gem_prime_get_uuid(struct drm_gem_object *obj,
- uuid_t *uuid);
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *sgt);
+int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents,
+ unsigned int *nents,
+ struct virtio_gpu_object *bo,
+ struct dma_buf_attachment *attach);
/* virtgpu_debugfs.c */
void virtio_gpu_debugfs_init(struct drm_minor *minor);
@@ -457,4 +501,15 @@ bool virtio_gpu_is_vram(struct virtio_gpu_object *bo);
int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr);
+struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo,
+ struct device *dev,
+ enum dma_data_direction dir);
+void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir);
+
+/* virtgpu_submit.c */
+int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
#endif
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index d28e25e8409b..44c1d8ef3c4d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -49,44 +49,35 @@ static bool virtio_gpu_fence_signaled(struct dma_fence *f)
return false;
}
-static void virtio_gpu_fence_value_str(struct dma_fence *f, char *str, int size)
-{
- snprintf(str, size, "[%llu, %llu]", f->context, f->seqno);
-}
-
-static void virtio_gpu_timeline_value_str(struct dma_fence *f, char *str,
- int size)
-{
- struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f);
-
- snprintf(str, size, "%llu",
- (u64)atomic64_read(&fence->drv->last_fence_id));
-}
-
static const struct dma_fence_ops virtio_gpu_fence_ops = {
.get_driver_name = virtio_gpu_get_driver_name,
.get_timeline_name = virtio_gpu_get_timeline_name,
.signaled = virtio_gpu_fence_signaled,
- .fence_value_str = virtio_gpu_fence_value_str,
- .timeline_value_str = virtio_gpu_timeline_value_str,
};
-struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
+struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev,
+ uint64_t base_fence_ctx,
+ uint32_t ring_idx)
{
+ uint64_t fence_context = base_fence_ctx + ring_idx;
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
GFP_KERNEL);
+
if (!fence)
return fence;
fence->drv = drv;
+ fence->ring_idx = ring_idx;
+ fence->emit_fence_info = !(base_fence_ctx == drv->context);
/* This only partially initializes the fence because the seqno is
* unknown yet. The fence must not be used outside of the driver
* until virtio_gpu_fence_emit is called.
*/
- dma_fence_init(&fence->f, &virtio_gpu_fence_ops, &drv->lock, drv->context,
- 0);
+
+ dma_fence_init(&fence->f, &virtio_gpu_fence_ops, &drv->lock,
+ fence_context, 0);
return fence;
}
@@ -108,6 +99,13 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
cmd_hdr->fence_id = cpu_to_le64(fence->fence_id);
+
+ /* Only currently defined fence param. */
+ if (fence->emit_fence_info) {
+ cmd_hdr->flags |=
+ cpu_to_le32(VIRTIO_GPU_FLAG_INFO_RING_IDX);
+ cmd_hdr->ring_idx = (u8)fence->ring_idx;
+ }
}
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
@@ -138,11 +136,21 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
continue;
dma_fence_signal_locked(&curr->f);
+ if (curr->e) {
+ drm_send_event(vgdev->ddev, &curr->e->base);
+ curr->e = NULL;
+ }
+
list_del(&curr->node);
dma_fence_put(&curr->f);
}
dma_fence_signal_locked(&signaled->f);
+ if (signaled->e) {
+ drm_send_event(vgdev->ddev, &signaled->e->base);
+ signaled->e = NULL;
+ }
+
list_del(&signaled->node);
dma_fence_put(&signaled->f);
break;
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 2de61b63ef91..90c99d83c4cf 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -99,21 +99,6 @@ fail:
return ret;
}
-int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset_p)
-{
- struct drm_gem_object *gobj;
-
- BUG_ON(!offset_p);
- gobj = drm_gem_object_lookup(file_priv, handle);
- if (gobj == NULL)
- return -ENOENT;
- *offset_p = drm_vma_node_offset_addr(&gobj->vma_node);
- drm_gem_object_put(gobj);
- return 0;
-}
-
int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file)
{
@@ -127,15 +112,18 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
/* the context might still be missing when the first ioctl is
* DRM_IOCTL_MODE_CREATE_DUMB or DRM_IOCTL_PRIME_FD_TO_HANDLE
*/
- virtio_gpu_create_context(obj->dev, file);
+ if (!vgdev->has_context_init)
+ virtio_gpu_create_context(obj->dev, file);
- objs = virtio_gpu_array_alloc(1);
- if (!objs)
- return -ENOMEM;
- virtio_gpu_array_add_obj(objs, obj);
+ if (vfpriv->context_created) {
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ return -ENOMEM;
+ virtio_gpu_array_add_obj(objs, obj);
+
+ virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, objs);
+ }
- virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
- objs);
out_notify:
virtio_gpu_notify(vgdev);
return 0;
@@ -161,6 +149,20 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
virtio_gpu_notify(vgdev);
}
+/* For drm panic */
+struct virtio_gpu_object_array *virtio_gpu_panic_array_alloc(void)
+{
+ struct virtio_gpu_object_array *objs;
+
+ objs = kmalloc(sizeof(struct virtio_gpu_object_array), GFP_ATOMIC);
+ if (!objs)
+ return NULL;
+
+ objs->nents = 0;
+ objs->total = 1;
+ return objs;
+}
+
struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
{
struct virtio_gpu_object_array *objs;
@@ -214,6 +216,7 @@ void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
{
+ unsigned int i;
int ret;
if (objs->nents == 1) {
@@ -222,6 +225,16 @@ int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
ret = drm_gem_lock_reservations(objs->objs, objs->nents,
&objs->ticket);
}
+ if (ret)
+ return ret;
+
+ for (i = 0; i < objs->nents; ++i) {
+ ret = dma_resv_reserve_fences(objs->objs[i]->resv, 1);
+ if (ret) {
+ virtio_gpu_array_unlock_resv(objs);
+ return ret;
+ }
+ }
return ret;
}
@@ -241,13 +254,17 @@ void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
int i;
for (i = 0; i < objs->nents; i++)
- dma_resv_add_excl_fence(objs->objs[i]->resv, fence);
+ dma_resv_add_fence(objs->objs[i]->resv, fence,
+ DMA_RESV_USAGE_WRITE);
}
void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
{
u32 i;
+ if (!objs)
+ return;
+
for (i = 0; i < objs->nents; i++)
drm_gem_object_put(objs->objs[i]);
virtio_gpu_array_free(objs);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 5c1ad1596889..c33c057365f8 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -38,20 +38,37 @@
VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \
VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)
+/* Must be called with &virtio_gpu_fpriv.struct_mutex held. */
+static void virtio_gpu_create_context_locked(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_fpriv *vfpriv)
+{
+ if (vfpriv->explicit_debug_name) {
+ virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
+ vfpriv->context_init,
+ strlen(vfpriv->debug_name),
+ vfpriv->debug_name);
+ } else {
+ char dbgname[TASK_COMM_LEN];
+
+ get_task_comm(dbgname, current);
+ virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
+ vfpriv->context_init, strlen(dbgname),
+ dbgname);
+ }
+
+ vfpriv->context_created = true;
+}
+
void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
- char dbgname[TASK_COMM_LEN];
mutex_lock(&vfpriv->context_lock);
if (vfpriv->context_created)
goto out_unlock;
- get_task_comm(dbgname, current);
- virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
- strlen(dbgname), dbgname);
- vfpriv->context_created = true;
+ virtio_gpu_create_context_locked(vgdev, vfpriv);
out_unlock:
mutex_unlock(&vfpriv->context_lock);
@@ -63,144 +80,9 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_map *virtio_gpu_map = data;
- return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
- virtio_gpu_map->handle,
- &virtio_gpu_map->offset);
-}
-
-/*
- * Usage of execbuffer:
- * Relocations need to take into account the full VIRTIO_GPUDrawable size.
- * However, the command as passed from user space must *not* contain the initial
- * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
- */
-static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_virtgpu_execbuffer *exbuf = data;
- struct virtio_gpu_device *vgdev = dev->dev_private;
- struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
- struct virtio_gpu_fence *out_fence;
- int ret;
- uint32_t *bo_handles = NULL;
- void __user *user_bo_handles = NULL;
- struct virtio_gpu_object_array *buflist = NULL;
- struct sync_file *sync_file;
- int in_fence_fd = exbuf->fence_fd;
- int out_fence_fd = -1;
- void *buf;
-
- if (vgdev->has_virgl_3d == false)
- return -ENOSYS;
-
- if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
- return -EINVAL;
-
- exbuf->fence_fd = -1;
-
- virtio_gpu_create_context(dev, file);
- if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
- struct dma_fence *in_fence;
-
- in_fence = sync_file_get_fence(in_fence_fd);
-
- if (!in_fence)
- return -EINVAL;
-
- /*
- * Wait if the fence is from a foreign context, or if the fence
- * array contains any fence from a foreign context.
- */
- ret = 0;
- if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
- ret = dma_fence_wait(in_fence, true);
-
- dma_fence_put(in_fence);
- if (ret)
- return ret;
- }
-
- if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
- out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
- if (out_fence_fd < 0)
- return out_fence_fd;
- }
-
- if (exbuf->num_bo_handles) {
- bo_handles = kvmalloc_array(exbuf->num_bo_handles,
- sizeof(uint32_t), GFP_KERNEL);
- if (!bo_handles) {
- ret = -ENOMEM;
- goto out_unused_fd;
- }
-
- user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
- if (copy_from_user(bo_handles, user_bo_handles,
- exbuf->num_bo_handles * sizeof(uint32_t))) {
- ret = -EFAULT;
- goto out_unused_fd;
- }
-
- buflist = virtio_gpu_array_from_handles(file, bo_handles,
- exbuf->num_bo_handles);
- if (!buflist) {
- ret = -ENOENT;
- goto out_unused_fd;
- }
- kvfree(bo_handles);
- bo_handles = NULL;
- }
-
- buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
- if (IS_ERR(buf)) {
- ret = PTR_ERR(buf);
- goto out_unused_fd;
- }
-
- if (buflist) {
- ret = virtio_gpu_array_lock_resv(buflist);
- if (ret)
- goto out_memdup;
- }
-
- out_fence = virtio_gpu_fence_alloc(vgdev);
- if(!out_fence) {
- ret = -ENOMEM;
- goto out_unresv;
- }
-
- if (out_fence_fd >= 0) {
- sync_file = sync_file_create(&out_fence->f);
- if (!sync_file) {
- dma_fence_put(&out_fence->f);
- ret = -ENOMEM;
- goto out_unresv;
- }
-
- exbuf->fence_fd = out_fence_fd;
- fd_install(out_fence_fd, sync_file->file);
- }
-
- virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
- vfpriv->ctx_id, buflist, out_fence);
- dma_fence_put(&out_fence->f);
- virtio_gpu_notify(vgdev);
- return 0;
-
-out_unresv:
- if (buflist)
- virtio_gpu_array_unlock_resv(buflist);
-out_memdup:
- kvfree(buf);
-out_unused_fd:
- kvfree(bo_handles);
- if (buflist)
- virtio_gpu_array_put_free(buflist);
-
- if (out_fence_fd >= 0)
- put_unused_fd(out_fence_fd);
-
- return ret;
+ return drm_gem_dumb_map_offset(file, vgdev->ddev,
+ virtio_gpu_map->handle,
+ &virtio_gpu_map->offset);
}
static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
@@ -226,6 +108,15 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
case VIRTGPU_PARAM_CROSS_DEVICE:
value = vgdev->has_resource_assign_uuid ? 1 : 0;
break;
+ case VIRTGPU_PARAM_CONTEXT_INIT:
+ value = vgdev->has_context_init ? 1 : 0;
+ break;
+ case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs:
+ value = vgdev->capset_id_mask;
+ break;
+ case VIRTGPU_PARAM_EXPLICIT_DEBUG_NAME:
+ value = vgdev->has_context_init ? 1 : 0;
+ break;
default:
return -EINVAL;
}
@@ -278,7 +169,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
if (params.size == 0)
params.size = PAGE_SIZE;
- fence = virtio_gpu_fence_alloc(vgdev);
+ fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
if (!fence)
return -ENOMEM;
ret = virtio_gpu_object_create(vgdev, &params, &qobj, fence);
@@ -292,10 +183,18 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
drm_gem_object_release(obj);
return ret;
}
- drm_gem_object_put(obj);
rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
rc->bo_handle = handle;
+
+ /*
+ * The handle owns the reference now. But we must drop our
+ * remaining reference *after* we no longer need to dereference
+ * the obj. Otherwise userspace could guess the handle and
+ * race closing it from another thread.
+ */
+ drm_gem_object_put(obj);
+
return 0;
}
@@ -357,7 +256,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
if (ret != 0)
goto err_put_free;
- fence = virtio_gpu_fence_alloc(vgdev);
+ fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
if (!fence) {
ret = -ENOMEM;
goto err_unlock;
@@ -417,7 +316,8 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
goto err_put_free;
ret = -ENOMEM;
- fence = virtio_gpu_fence_alloc(vgdev);
+ fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
+ 0);
if (!fence)
goto err_unlock;
@@ -451,9 +351,10 @@ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
if (args->flags & VIRTGPU_WAIT_NOWAIT) {
- ret = dma_resv_test_signaled(obj->resv, true);
+ ret = dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ);
} else {
- ret = dma_resv_wait_timeout(obj->resv, true, true, timeout);
+ ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ,
+ true, timeout);
}
if (ret == 0)
ret = -EBUSY;
@@ -512,8 +413,10 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
spin_unlock(&vgdev->display_info_lock);
/* not in cache - need to talk to hw */
- virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
- &cache_ent);
+ ret = virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
+ &cache_ent);
+ if (ret)
+ return ret;
virtio_gpu_notify(vgdev);
copy_exit:
@@ -542,8 +445,7 @@ static int verify_blob(struct virtio_gpu_device *vgdev,
if (!vgdev->has_resource_blob)
return -EINVAL;
- if ((rc_blob->blob_flags & ~VIRTGPU_BLOB_FLAG_USE_MASK) ||
- !rc_blob->blob_flags)
+ if (rc_blob->blob_flags & ~VIRTGPU_BLOB_FLAG_USE_MASK)
return -EINVAL;
if (rc_blob->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
@@ -654,14 +556,143 @@ static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
drm_gem_object_release(obj);
return ret;
}
- drm_gem_object_put(obj);
rc_blob->res_handle = bo->hw_res_handle;
rc_blob->bo_handle = handle;
+ /*
+ * The handle owns the reference now. But we must drop our
+ * remaining reference *after* we no longer need to dereference
+ * the obj. Otherwise userspace could guess the handle and
+ * race closing it from another thread.
+ */
+ drm_gem_object_put(obj);
+
return 0;
}
+static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file)
+{
+ int ret = 0;
+ uint32_t num_params, i;
+ uint64_t valid_ring_mask, param, value;
+ size_t len;
+ struct drm_virtgpu_context_set_param *ctx_set_params = NULL;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ struct drm_virtgpu_context_init *args = data;
+
+ num_params = args->num_params;
+ len = num_params * sizeof(struct drm_virtgpu_context_set_param);
+
+ if (!vgdev->has_context_init || !vgdev->has_virgl_3d)
+ return -EINVAL;
+
+ /* Number of unique parameters supported at this time. */
+ if (num_params > 4)
+ return -EINVAL;
+
+ ctx_set_params = memdup_user(u64_to_user_ptr(args->ctx_set_params),
+ len);
+
+ if (IS_ERR(ctx_set_params))
+ return PTR_ERR(ctx_set_params);
+
+ mutex_lock(&vfpriv->context_lock);
+ if (vfpriv->context_created) {
+ ret = -EEXIST;
+ goto out_unlock;
+ }
+
+ for (i = 0; i < num_params; i++) {
+ param = ctx_set_params[i].param;
+ value = ctx_set_params[i].value;
+
+ switch (param) {
+ case VIRTGPU_CONTEXT_PARAM_CAPSET_ID:
+ if (value > MAX_CAPSET_ID) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if ((vgdev->capset_id_mask & (1ULL << value)) == 0) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* Context capset ID already set */
+ if (vfpriv->context_init &
+ VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ vfpriv->context_init |= value;
+ break;
+ case VIRTGPU_CONTEXT_PARAM_NUM_RINGS:
+ if (vfpriv->base_fence_ctx) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (value > MAX_RINGS) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ vfpriv->base_fence_ctx = dma_fence_context_alloc(value);
+ vfpriv->num_rings = value;
+ break;
+ case VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK:
+ if (vfpriv->ring_idx_mask) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ vfpriv->ring_idx_mask = value;
+ break;
+ case VIRTGPU_CONTEXT_PARAM_DEBUG_NAME:
+ if (vfpriv->explicit_debug_name) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = strncpy_from_user(vfpriv->debug_name,
+ u64_to_user_ptr(value),
+ DEBUG_NAME_MAX_LEN - 1);
+ if (ret < 0)
+ goto out_unlock;
+
+ vfpriv->explicit_debug_name = true;
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
+ if (vfpriv->ring_idx_mask) {
+ valid_ring_mask = 0;
+ for (i = 0; i < vfpriv->num_rings; i++)
+ valid_ring_mask |= 1ULL << i;
+
+ if (~valid_ring_mask & vfpriv->ring_idx_mask) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
+ virtio_gpu_create_context_locked(vgdev, vfpriv);
+ virtio_gpu_notify(vgdev);
+
+out_unlock:
+ mutex_unlock(&vfpriv->context_lock);
+ kfree(ctx_set_params);
+ return ret;
+}
+
struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
DRM_RENDER_ALLOW),
@@ -698,4 +729,7 @@ struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,
virtio_gpu_resource_create_blob_ioctl,
DRM_RENDER_ALLOW),
+
+ DRM_IOCTL_DEF_DRV(VIRTGPU_CONTEXT_INIT, virtio_gpu_context_init_ioctl,
+ DRM_RENDER_ALLOW),
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index f3379059f324..f3594695bb82 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -28,6 +28,8 @@
#include <linux/virtio_ring.h>
#include <drm/drm_file.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include "virtgpu_drv.h"
@@ -42,11 +44,13 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
events_read, &events_read);
if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
- if (vgdev->has_edid)
- virtio_gpu_cmd_get_edids(vgdev);
- virtio_gpu_cmd_get_display_info(vgdev);
- virtio_gpu_notify(vgdev);
- drm_helper_hpd_irq_event(vgdev->ddev);
+ if (vgdev->num_scanouts) {
+ if (vgdev->has_edid)
+ virtio_gpu_cmd_get_edids(vgdev);
+ virtio_gpu_cmd_get_display_info(vgdev);
+ virtio_gpu_notify(vgdev);
+ drm_helper_hpd_irq_event(vgdev->ddev);
+ }
events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
}
virtio_cwrite_le(vgdev->vdev, struct virtio_gpu_config,
@@ -65,10 +69,12 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
int num_capsets)
{
int i, ret;
+ bool invalid_capset_id = false;
+ struct drm_device *drm = vgdev->ddev;
- vgdev->capsets = kcalloc(num_capsets,
- sizeof(struct virtio_gpu_drv_capset),
- GFP_KERNEL);
+ vgdev->capsets = drmm_kcalloc(drm, num_capsets,
+ sizeof(struct virtio_gpu_drv_capset),
+ GFP_KERNEL);
if (!vgdev->capsets) {
DRM_ERROR("failed to allocate cap sets\n");
return;
@@ -78,46 +84,59 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
virtio_gpu_notify(vgdev);
ret = wait_event_timeout(vgdev->resp_wq,
vgdev->capsets[i].id > 0, 5 * HZ);
- if (ret == 0) {
+ /*
+ * Capability ids are defined in the virtio-gpu spec and are
+ * between 1 to 63, inclusive.
+ */
+ if (!vgdev->capsets[i].id ||
+ vgdev->capsets[i].id > MAX_CAPSET_ID)
+ invalid_capset_id = true;
+
+ if (ret == 0)
DRM_ERROR("timed out waiting for cap set %d\n", i);
+ else if (invalid_capset_id)
+ DRM_ERROR("invalid capset id %u", vgdev->capsets[i].id);
+
+ if (ret == 0 || invalid_capset_id) {
spin_lock(&vgdev->display_info_lock);
- kfree(vgdev->capsets);
+ drmm_kfree(drm, vgdev->capsets);
vgdev->capsets = NULL;
spin_unlock(&vgdev->display_info_lock);
return;
}
+
+ vgdev->capset_id_mask |= 1 << vgdev->capsets[i].id;
DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
i, vgdev->capsets[i].id,
vgdev->capsets[i].max_version,
vgdev->capsets[i].max_size);
}
+
vgdev->num_capsets = num_capsets;
}
-int virtio_gpu_init(struct drm_device *dev)
+int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
{
- static vq_callback_t *callbacks[] = {
- virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
+ struct virtqueue_info vqs_info[] = {
+ { "control", virtio_gpu_ctrl_ack },
+ { "cursor", virtio_gpu_cursor_ack },
};
- static const char * const names[] = { "control", "cursor" };
-
struct virtio_gpu_device *vgdev;
/* this will expand later */
struct virtqueue *vqs[2];
u32 num_scanouts, num_capsets;
int ret = 0;
- if (!virtio_has_feature(dev_to_virtio(dev->dev), VIRTIO_F_VERSION_1))
+ if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
return -ENODEV;
- vgdev = kzalloc(sizeof(struct virtio_gpu_device), GFP_KERNEL);
+ vgdev = drmm_kzalloc(dev, sizeof(struct virtio_gpu_device), GFP_KERNEL);
if (!vgdev)
return -ENOMEM;
vgdev->ddev = dev;
dev->dev_private = vgdev;
- vgdev->vdev = dev_to_virtio(dev->dev);
- vgdev->dev = dev->dev;
+ vgdev->vdev = vdev;
spin_lock_init(&vgdev->display_info_lock);
spin_lock_init(&vgdev->resource_export_lock);
@@ -144,18 +163,18 @@ int virtio_gpu_init(struct drm_device *dev)
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
vgdev->has_virgl_3d = true;
#endif
- if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID))
vgdev->has_edid = true;
- }
- if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
+
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC))
vgdev->has_indirect = true;
- }
- if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) {
+
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID))
vgdev->has_resource_assign_uuid = true;
- }
- if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) {
+
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB))
vgdev->has_resource_blob = true;
- }
+
if (virtio_get_shm_region(vgdev->vdev, &vgdev->host_visible_region,
VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) {
if (!devm_request_mem_region(&vgdev->vdev->dev,
@@ -176,13 +195,19 @@ int virtio_gpu_init(struct drm_device *dev)
(unsigned long)vgdev->host_visible_region.len);
}
- DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible\n",
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT))
+ vgdev->has_context_init = true;
+
+ DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible",
vgdev->has_virgl_3d ? '+' : '-',
vgdev->has_edid ? '+' : '-',
vgdev->has_resource_blob ? '+' : '-',
vgdev->has_host_visible ? '+' : '-');
- ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
+ DRM_INFO("features: %ccontext_init\n",
+ vgdev->has_context_init ? '+' : '-');
+
+ ret = virtio_find_vqs(vgdev->vdev, 2, vqs, vqs_info, NULL);
if (ret) {
DRM_ERROR("failed to find virt queues\n");
goto err_vqs;
@@ -200,12 +225,15 @@ int virtio_gpu_init(struct drm_device *dev)
num_scanouts, &num_scanouts);
vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
VIRTIO_GPU_MAX_SCANOUTS);
- if (!vgdev->num_scanouts) {
- DRM_ERROR("num_scanouts is zero\n");
- ret = -EINVAL;
- goto err_scanouts;
+
+ if (!IS_ENABLED(CONFIG_DRM_VIRTIO_GPU_KMS) || !vgdev->num_scanouts) {
+ DRM_INFO("KMS disabled\n");
+ vgdev->num_scanouts = 0;
+ vgdev->has_edid = false;
+ dev->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
+ } else {
+ DRM_INFO("number of scanouts: %d\n", num_scanouts);
}
- DRM_INFO("number of scanouts: %d\n", num_scanouts);
virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
num_capsets, &num_capsets);
@@ -221,12 +249,14 @@ int virtio_gpu_init(struct drm_device *dev)
if (num_capsets)
virtio_gpu_get_capsets(vgdev, num_capsets);
- if (vgdev->has_edid)
- virtio_gpu_cmd_get_edids(vgdev);
- virtio_gpu_cmd_get_display_info(vgdev);
- virtio_gpu_notify(vgdev);
- wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
- 5 * HZ);
+ if (vgdev->num_scanouts) {
+ if (vgdev->has_edid)
+ virtio_gpu_cmd_get_edids(vgdev);
+ virtio_gpu_cmd_get_display_info(vgdev);
+ virtio_gpu_notify(vgdev);
+ wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
+ 5 * HZ);
+ }
return 0;
err_scanouts:
@@ -235,7 +265,6 @@ err_vbufs:
vgdev->vdev->config->del_vqs(vgdev->vdev);
err_vqs:
dev->dev_private = NULL;
- kfree(vgdev);
return ret;
}
@@ -257,7 +286,7 @@ void virtio_gpu_deinit(struct drm_device *dev)
flush_work(&vgdev->ctrlq.dequeue_work);
flush_work(&vgdev->cursorq.dequeue_work);
flush_work(&vgdev->config_changed_work);
- vgdev->vdev->config->reset(vgdev->vdev);
+ virtio_reset_device(vgdev->vdev);
vgdev->vdev->config->del_vqs(vgdev->vdev);
}
@@ -274,9 +303,6 @@ void virtio_gpu_release(struct drm_device *dev)
if (vgdev->has_host_visible)
drm_mm_takedown(&vgdev->host_visible_mm);
-
- kfree(vgdev->capsets);
- kfree(vgdev);
}
int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index f648b0e24447..4270bfede7b9 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -26,6 +26,8 @@
#include <linux/dma-mapping.h>
#include <linux/moduleparam.h>
+#include <drm/drm_print.h>
+
#include "virtgpu_drv.h"
static int virtio_gpu_virglrenderer_workaround = 1;
@@ -47,6 +49,7 @@ int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
*resid = handle + 1;
} else {
int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
+
if (handle < 0)
return handle;
*resid = handle + 1;
@@ -56,9 +59,8 @@ int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
{
- if (!virtio_gpu_virglrenderer_workaround) {
+ if (!virtio_gpu_virglrenderer_workaround)
ida_free(&vgdev->resource_ida, id - 1);
- }
}
void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
@@ -67,22 +69,7 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
if (virtio_gpu_is_shmem(bo)) {
- struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
-
- if (shmem->pages) {
- if (shmem->mapped) {
- dma_unmap_sgtable(vgdev->vdev->dev.parent,
- shmem->pages, DMA_TO_DEVICE, 0);
- shmem->mapped = 0;
- }
-
- sg_free_table(shmem->pages);
- kfree(shmem->pages);
- shmem->pages = NULL;
- drm_gem_shmem_unpin(&bo->base.base);
- }
-
- drm_gem_shmem_free_object(&bo->base.base);
+ drm_gem_shmem_free(&bo->base);
} else if (virtio_gpu_is_vram(bo)) {
struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
@@ -95,6 +82,9 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
drm_gem_free_mmap_offset(&vram->base.base.base);
drm_gem_object_release(&vram->base.base.base);
kfree(vram);
+ } else {
+ drm_gem_object_release(&bo->base.base);
+ kfree(bo);
}
}
@@ -112,19 +102,40 @@ static void virtio_gpu_free_object(struct drm_gem_object *obj)
virtio_gpu_cleanup_object(bo);
}
+int virtio_gpu_detach_object_fenced(struct virtio_gpu_object *bo)
+{
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+ struct virtio_gpu_fence *fence;
+
+ if (!bo->attached)
+ return 0;
+
+ fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
+ if (!fence)
+ return -ENOMEM;
+
+ virtio_gpu_object_detach(vgdev, bo, fence);
+ virtio_gpu_notify(vgdev);
+
+ dma_fence_wait(&fence->f, false);
+ dma_fence_put(&fence->f);
+
+ return 0;
+}
+
static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
.free = virtio_gpu_free_object,
.open = virtio_gpu_gem_object_open,
.close = virtio_gpu_gem_object_close,
-
- .print_info = drm_gem_shmem_print_info,
+ .print_info = drm_gem_shmem_object_print_info,
.export = virtgpu_gem_prime_export,
- .pin = drm_gem_shmem_pin,
- .unpin = drm_gem_shmem_unpin,
- .get_sg_table = drm_gem_shmem_get_sg_table,
- .vmap = drm_gem_shmem_vmap,
- .vunmap = drm_gem_shmem_vunmap,
- .mmap = drm_gem_shmem_mmap,
+ .pin = drm_gem_shmem_object_pin,
+ .unpin = drm_gem_shmem_object_unpin,
+ .get_sg_table = drm_gem_shmem_object_get_sg_table,
+ .vmap = drm_gem_shmem_object_vmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
+ .mmap = drm_gem_shmem_object_mmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
};
bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
@@ -140,7 +151,7 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
if (!shmem)
- return NULL;
+ return ERR_PTR(-ENOMEM);
dshmem = &shmem->base.base;
dshmem->base.funcs = &virtio_gpu_shmem_funcs;
@@ -153,35 +164,18 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
unsigned int *nents)
{
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
- struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
struct scatterlist *sg;
- int si, ret;
+ struct sg_table *pages;
+ int si;
- ret = drm_gem_shmem_pin(&bo->base.base);
- if (ret < 0)
- return -EINVAL;
-
- /*
- * virtio_gpu uses drm_gem_shmem_get_sg_table instead of
- * drm_gem_shmem_get_pages_sgt because virtio has it's own set of
- * dma-ops. This is discouraged for other drivers, but should be fine
- * since virtio_gpu doesn't support dma-buf import from other devices.
- */
- shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
- if (!shmem->pages) {
- drm_gem_shmem_unpin(&bo->base.base);
- return -EINVAL;
- }
+ pages = drm_gem_shmem_get_pages_sgt(&bo->base);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
- if (use_dma_api) {
- ret = dma_map_sgtable(vgdev->vdev->dev.parent,
- shmem->pages, DMA_TO_DEVICE, 0);
- if (ret)
- return ret;
- *nents = shmem->mapped = shmem->pages->nents;
- } else {
- *nents = shmem->pages->orig_nents;
- }
+ if (use_dma_api)
+ *nents = pages->nents;
+ else
+ *nents = pages->orig_nents;
*ents = kvmalloc_array(*nents,
sizeof(struct virtio_gpu_mem_entry),
@@ -192,13 +186,13 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
}
if (use_dma_api) {
- for_each_sgtable_dma_sg(shmem->pages, sg, si) {
+ for_each_sgtable_dma_sg(pages, sg, si) {
(*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
(*ents)[si].length = cpu_to_le32(sg_dma_len(sg));
(*ents)[si].padding = 0;
}
} else {
- for_each_sgtable_sg(shmem->pages, sg, si) {
+ for_each_sgtable_sg(pages, sg, si) {
(*ents)[si].addr = cpu_to_le64(sg_phys(sg));
(*ents)[si].length = cpu_to_le32(sg->length);
(*ents)[si].padding = 0;
@@ -216,7 +210,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs = NULL;
struct drm_gem_shmem_object *shmem_obj;
struct virtio_gpu_object *bo;
- struct virtio_gpu_mem_entry *ents;
+ struct virtio_gpu_mem_entry *ents = NULL;
unsigned int nents;
int ret;
@@ -234,11 +228,15 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
bo->dumb = params->dumb;
+ ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
+ if (ret != 0)
+ goto err_put_id;
+
if (fence) {
ret = -ENOMEM;
objs = virtio_gpu_array_alloc(1);
if (!objs)
- goto err_put_id;
+ goto err_free_entry;
virtio_gpu_array_add_obj(objs, &bo->base.base);
ret = virtio_gpu_array_lock_resv(objs);
@@ -246,13 +244,6 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
goto err_put_objs;
}
- ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
- if (ret != 0) {
- virtio_gpu_array_put_free(objs);
- virtio_gpu_free_object(&shmem_obj->base);
- return ret;
- }
-
if (params->blob) {
if (params->blob_mem == VIRTGPU_BLOB_MEM_GUEST)
bo->guest_blob = true;
@@ -274,9 +265,11 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
err_put_objs:
virtio_gpu_array_put_free(objs);
+err_free_entry:
+ kvfree(ents);
err_put_id:
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
err_free_gem:
- drm_gem_shmem_free_object(&shmem_obj->base);
+ drm_gem_shmem_free(shmem_obj);
return ret;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 4e1b17548007..a7863f8ee4ee 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -26,7 +26,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <linux/virtio_dma_buf.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_panic.h>
+#include <drm/drm_print.h>
#include "virtgpu_drv.h"
@@ -67,18 +71,28 @@ uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
return format;
}
-static void virtio_gpu_plane_destroy(struct drm_plane *plane)
+static struct
+drm_plane_state *virtio_gpu_plane_duplicate_state(struct drm_plane *plane)
{
- drm_plane_cleanup(plane);
- kfree(plane);
+ struct virtio_gpu_plane_state *new;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &new->base);
+
+ return &new->base;
}
static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = virtio_gpu_plane_destroy,
.reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_duplicate_state = virtio_gpu_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
@@ -87,6 +101,8 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
+ plane);
bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR;
struct drm_crtc_state *crtc_state;
int ret;
@@ -94,18 +110,50 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc))
return 0;
+ /*
+ * Ignore damage clips if the framebuffer attached to the plane's state
+ * has changed since the last plane update (page-flip). In this case, a
+ * full plane update should happen because uploads are done per-buffer.
+ */
+ if (old_plane_state->fb != new_plane_state->fb)
+ new_plane_state->ignore_damage_clips = true;
+
crtc_state = drm_atomic_get_crtc_state(state,
new_plane_state->crtc);
if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
+ return PTR_ERR(crtc_state);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
is_cursor, true);
return ret;
}
+/* For drm panic */
+static int virtio_gpu_panic_update_dumb_bo(struct virtio_gpu_device *vgdev,
+ struct drm_plane_state *state,
+ struct drm_rect *rect)
+{
+ struct virtio_gpu_object *bo =
+ gem_to_virtio_gpu_obj(state->fb->obj[0]);
+ struct virtio_gpu_object_array *objs;
+ uint32_t w = rect->x2 - rect->x1;
+ uint32_t h = rect->y2 - rect->y1;
+ uint32_t x = rect->x1;
+ uint32_t y = rect->y1;
+ uint32_t off = x * state->fb->format->cpp[0] +
+ y * state->fb->pitches[0];
+
+ objs = virtio_gpu_panic_array_alloc();
+ if (!objs)
+ return -ENOMEM;
+ virtio_gpu_array_add_obj(objs, &bo->base.base);
+
+ return virtio_gpu_panic_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y,
+ objs);
+}
+
static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev,
struct drm_plane_state *state,
struct drm_rect *rect)
@@ -129,6 +177,58 @@ static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev,
objs, NULL);
}
+/* For drm_panic */
+static void virtio_gpu_panic_resource_flush(struct drm_plane *plane,
+ uint32_t x, uint32_t y,
+ uint32_t width, uint32_t height)
+{
+ struct drm_device *dev = plane->dev;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_object *bo;
+
+ vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
+
+ virtio_gpu_panic_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
+ width, height);
+ virtio_gpu_panic_notify(vgdev);
+}
+
+static void virtio_gpu_resource_flush(struct drm_plane *plane,
+ uint32_t x, uint32_t y,
+ uint32_t width, uint32_t height)
+{
+ struct drm_device *dev = plane->dev;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_plane_state *vgplane_st;
+ struct virtio_gpu_object *bo;
+
+ vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ vgplane_st = to_virtio_gpu_plane_state(plane->state);
+ bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
+ if (vgplane_st->fence) {
+ struct virtio_gpu_object_array *objs;
+
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ return;
+ virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
+ virtio_gpu_array_lock_resv(objs);
+ virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
+ width, height, objs,
+ vgplane_st->fence);
+ virtio_gpu_notify(vgdev);
+ dma_fence_wait_timeout(&vgplane_st->fence->f, true,
+ msecs_to_jiffies(50));
+ } else {
+ virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
+ width, height, NULL, NULL);
+ virtio_gpu_notify(vgdev);
+ }
+}
+
static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -198,49 +298,128 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
}
}
- virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle,
- rect.x1,
- rect.y1,
- rect.x2 - rect.x1,
- rect.y2 - rect.y1);
- virtio_gpu_notify(vgdev);
+ virtio_gpu_resource_flush(plane,
+ rect.x1,
+ rect.y1,
+ rect.x2 - rect.x1,
+ rect.y2 - rect.y1);
}
-static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
+static int virtio_gpu_prepare_imported_obj(struct drm_plane *plane,
+ struct drm_plane_state *new_state,
+ struct drm_gem_object *obj)
+{
+ struct virtio_gpu_device *vgdev = plane->dev->dev_private;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct dma_buf_attachment *attach = obj->import_attach;
+ struct dma_resv *resv = attach->dmabuf->resv;
+ struct virtio_gpu_mem_entry *ents = NULL;
+ unsigned int nents;
+ int ret;
+
+ dma_resv_lock(resv, NULL);
+
+ ret = dma_buf_pin(attach);
+ if (ret) {
+ dma_resv_unlock(resv);
+ return ret;
+ }
+
+ if (!bo->sgt) {
+ ret = virtgpu_dma_buf_import_sgt(&ents, &nents,
+ bo, attach);
+ if (ret)
+ goto err;
+
+ virtio_gpu_object_attach(vgdev, bo, ents, nents);
+ }
+
+ dma_resv_unlock(resv);
+ return 0;
+
+err:
+ dma_buf_unpin(attach);
+ dma_resv_unlock(resv);
+ return ret;
+}
+
+static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
{
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_plane_state *vgplane_st;
struct virtio_gpu_object *bo;
+ struct drm_gem_object *obj;
+ int ret;
if (!new_state->fb)
return 0;
vgfb = to_virtio_gpu_framebuffer(new_state->fb);
+ vgplane_st = to_virtio_gpu_plane_state(new_state);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
- if (bo && bo->dumb && (plane->state->fb != new_state->fb)) {
- vgfb->fence = virtio_gpu_fence_alloc(vgdev);
- if (!vgfb->fence)
+
+ drm_gem_plane_helper_prepare_fb(plane, new_state);
+
+ if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob))
+ return 0;
+
+ obj = new_state->fb->obj[0];
+ if (bo->dumb || drm_gem_is_imported(obj)) {
+ vgplane_st->fence = virtio_gpu_fence_alloc(vgdev,
+ vgdev->fence_drv.context,
+ 0);
+ if (!vgplane_st->fence)
return -ENOMEM;
}
+ if (drm_gem_is_imported(obj)) {
+ ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj);
+ if (ret)
+ goto err_fence;
+ }
+
return 0;
+
+err_fence:
+ if (vgplane_st->fence) {
+ dma_fence_put(&vgplane_st->fence->f);
+ vgplane_st->fence = NULL;
+ }
+
+ return ret;
}
-static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+static void virtio_gpu_cleanup_imported_obj(struct drm_gem_object *obj)
{
- struct virtio_gpu_framebuffer *vgfb;
+ struct dma_buf_attachment *attach = obj->import_attach;
+ struct dma_resv *resv = attach->dmabuf->resv;
+
+ dma_resv_lock(resv, NULL);
+ dma_buf_unpin(attach);
+ dma_resv_unlock(resv);
+}
- if (!plane->state->fb)
+static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct virtio_gpu_plane_state *vgplane_st;
+ struct drm_gem_object *obj;
+
+ if (!state->fb)
return;
- vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
- if (vgfb->fence) {
- dma_fence_put(&vgfb->fence->f);
- vgfb->fence = NULL;
+ vgplane_st = to_virtio_gpu_plane_state(state);
+ if (vgplane_st->fence) {
+ dma_fence_put(&vgplane_st->fence->f);
+ vgplane_st->fence = NULL;
}
+
+ obj = state->fb->obj[0];
+ if (drm_gem_is_imported(obj))
+ virtio_gpu_cleanup_imported_obj(obj);
}
static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
@@ -252,6 +431,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = NULL;
struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_plane_state *vgplane_st;
struct virtio_gpu_object *bo = NULL;
uint32_t handle;
@@ -264,6 +444,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
if (plane->state->fb) {
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ vgplane_st = to_virtio_gpu_plane_state(plane->state);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
handle = bo->hw_res_handle;
} else {
@@ -283,27 +464,25 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
(vgdev, 0,
plane->state->crtc_w,
plane->state->crtc_h,
- 0, 0, objs, vgfb->fence);
+ 0, 0, objs, vgplane_st->fence);
virtio_gpu_notify(vgdev);
- dma_fence_wait(&vgfb->fence->f, true);
- dma_fence_put(&vgfb->fence->f);
- vgfb->fence = NULL;
+ dma_fence_wait(&vgplane_st->fence->f, true);
}
if (plane->state->fb != old_state->fb) {
DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
plane->state->crtc_x,
plane->state->crtc_y,
- plane->state->fb ? plane->state->fb->hot_x : 0,
- plane->state->fb ? plane->state->fb->hot_y : 0);
+ plane->state->hotspot_x,
+ plane->state->hotspot_y);
output->cursor.hdr.type =
cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
output->cursor.resource_id = cpu_to_le32(handle);
if (plane->state->fb) {
output->cursor.hot_x =
- cpu_to_le32(plane->state->fb->hot_x);
+ cpu_to_le32(plane->state->hotspot_x);
output->cursor.hot_y =
- cpu_to_le32(plane->state->fb->hot_y);
+ cpu_to_le32(plane->state->hotspot_y);
} else {
output->cursor.hot_x = cpu_to_le32(0);
output->cursor.hot_y = cpu_to_le32(0);
@@ -320,14 +499,76 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
virtio_gpu_cursor_ping(vgdev, output);
}
+static int virtio_drm_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct virtio_gpu_object *bo;
+
+ if (!plane->state || !plane->state->fb || !plane->state->visible)
+ return -ENODEV;
+
+ bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
+
+ if (virtio_gpu_is_vram(bo) || drm_gem_is_imported(&bo->base.base))
+ return -ENODEV;
+
+ if (bo->base.vaddr) {
+ iosys_map_set_vaddr(&sb->map[0], bo->base.vaddr);
+ } else {
+ struct drm_gem_shmem_object *shmem = &bo->base;
+
+ if (!shmem->pages)
+ return -ENODEV;
+ /* map scanout buffer later */
+ sb->pages = shmem->pages;
+ }
+
+ sb->format = plane->state->fb->format;
+ sb->height = plane->state->fb->height;
+ sb->width = plane->state->fb->width;
+ sb->pitch[0] = plane->state->fb->pitches[0];
+ return 0;
+}
+
+static void virtio_panic_flush(struct drm_plane *plane)
+{
+ struct virtio_gpu_object *bo;
+ struct drm_device *dev = plane->dev;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct drm_rect rect;
+
+ rect.x1 = 0;
+ rect.y1 = 0;
+ rect.x2 = plane->state->fb->width;
+ rect.y2 = plane->state->fb->height;
+
+ bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
+
+ if (bo->dumb) {
+ if (virtio_gpu_panic_update_dumb_bo(vgdev, plane->state,
+ &rect))
+ return;
+ }
+
+ virtio_gpu_panic_resource_flush(plane,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16);
+}
+
static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
+ .prepare_fb = virtio_gpu_plane_prepare_fb,
+ .cleanup_fb = virtio_gpu_plane_cleanup_fb,
.atomic_check = virtio_gpu_plane_atomic_check,
.atomic_update = virtio_gpu_primary_plane_update,
+ .get_scanout_buffer = virtio_drm_get_scanout_buffer,
+ .panic_flush = virtio_panic_flush,
};
static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
- .prepare_fb = virtio_gpu_cursor_prepare_fb,
- .cleanup_fb = virtio_gpu_cursor_cleanup_fb,
+ .prepare_fb = virtio_gpu_plane_prepare_fb,
+ .cleanup_fb = virtio_gpu_plane_cleanup_fb,
.atomic_check = virtio_gpu_plane_atomic_check,
.atomic_update = virtio_gpu_cursor_plane_update,
};
@@ -340,11 +581,7 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
const struct drm_plane_helper_funcs *funcs;
struct drm_plane *plane;
const uint32_t *formats;
- int ret, nformats;
-
- plane = kzalloc(sizeof(*plane), GFP_KERNEL);
- if (!plane)
- return ERR_PTR(-ENOMEM);
+ int nformats;
if (type == DRM_PLANE_TYPE_CURSOR) {
formats = virtio_gpu_cursor_formats;
@@ -355,17 +592,17 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
nformats = ARRAY_SIZE(virtio_gpu_formats);
funcs = &virtio_gpu_primary_helper_funcs;
}
- ret = drm_universal_plane_init(dev, plane, 1 << index,
- &virtio_gpu_plane_funcs,
- formats, nformats,
- NULL, type, NULL);
- if (ret)
- goto err_plane_init;
+
+ plane = drmm_universal_plane_alloc(dev, struct drm_plane, dev,
+ 1 << index, &virtio_gpu_plane_funcs,
+ formats, nformats, NULL, type, NULL);
+ if (IS_ERR(plane))
+ return plane;
drm_plane_helper_add(plane, funcs);
- return plane;
-err_plane_init:
- kfree(plane);
- return ERR_PTR(ret);
+ if (type == DRM_PLANE_TYPE_PRIMARY)
+ drm_plane_enable_fb_damage_clips(plane);
+
+ return plane;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 807a27a16365..ce49282198cb 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -27,6 +27,8 @@
#include "virtgpu_drv.h"
+MODULE_IMPORT_NS("DMA_BUF");
+
static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
uuid_t *uuid)
{
@@ -43,13 +45,40 @@ static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
return 0;
}
+static struct sg_table *
+virtgpu_gem_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+
+ if (virtio_gpu_is_vram(bo))
+ return virtio_gpu_vram_map_dma_buf(bo, attach->dev, dir);
+
+ return drm_gem_map_dma_buf(attach, dir);
+}
+
+static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+
+ if (virtio_gpu_is_vram(bo)) {
+ virtio_gpu_vram_unmap_dma_buf(attach->dev, sgt, dir);
+ return;
+ }
+
+ drm_gem_unmap_dma_buf(attach, sgt, dir);
+}
+
static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
.ops = {
- .cache_sgt_mapping = true,
.attach = virtio_dma_buf_attach,
.detach = drm_gem_map_detach,
- .map_dma_buf = drm_gem_map_dma_buf,
- .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .map_dma_buf = virtgpu_gem_map_dma_buf,
+ .unmap_dma_buf = virtgpu_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
@@ -62,7 +91,6 @@ static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo)
{
- int ret;
struct virtio_gpu_object_array *objs;
objs = virtio_gpu_array_alloc(1);
@@ -70,11 +98,8 @@ int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev,
return -ENOMEM;
virtio_gpu_array_add_obj(objs, &bo->base.base);
- ret = virtio_gpu_cmd_resource_assign_uuid(vgdev, objs);
- if (ret)
- return ret;
- return 0;
+ return virtio_gpu_cmd_resource_assign_uuid(vgdev, objs);
}
struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
@@ -98,6 +123,8 @@ struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
} else {
bo->uuid_state = STATE_ERR;
}
+ } else if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)) {
+ bo->uuid_state = STATE_ERR;
}
exp_info.ops = &virtgpu_dmabuf_ops.ops;
@@ -116,10 +143,162 @@ struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
return buf;
}
+int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents,
+ unsigned int *nents,
+ struct virtio_gpu_object *bo,
+ struct dma_buf_attachment *attach)
+{
+ struct scatterlist *sl;
+ struct sg_table *sgt;
+ long i, ret;
+
+ dma_resv_assert_held(attach->dmabuf->resv);
+
+ ret = dma_resv_wait_timeout(attach->dmabuf->resv,
+ DMA_RESV_USAGE_KERNEL,
+ false, MAX_SCHEDULE_TIMEOUT);
+ if (ret <= 0)
+ return ret < 0 ? ret : -ETIMEDOUT;
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ *ents = kvmalloc_array(sgt->nents,
+ sizeof(struct virtio_gpu_mem_entry),
+ GFP_KERNEL);
+ if (!(*ents)) {
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+ return -ENOMEM;
+ }
+
+ *nents = sgt->nents;
+ for_each_sgtable_dma_sg(sgt, sl, i) {
+ (*ents)[i].addr = cpu_to_le64(sg_dma_address(sl));
+ (*ents)[i].length = cpu_to_le32(sg_dma_len(sl));
+ (*ents)[i].padding = 0;
+ }
+
+ bo->sgt = sgt;
+ return 0;
+}
+
+static void virtgpu_dma_buf_unmap(struct virtio_gpu_object *bo)
+{
+ struct dma_buf_attachment *attach = bo->base.base.import_attach;
+
+ dma_resv_assert_held(attach->dmabuf->resv);
+
+ if (bo->created) {
+ virtio_gpu_detach_object_fenced(bo);
+
+ if (bo->sgt)
+ dma_buf_unmap_attachment(attach, bo->sgt,
+ DMA_BIDIRECTIONAL);
+
+ bo->sgt = NULL;
+ }
+}
+
+static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
+{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct virtio_gpu_device *vgdev = obj->dev->dev_private;
+ struct dma_buf_attachment *attach = obj->import_attach;
+
+ if (drm_gem_is_imported(obj)) {
+ struct dma_buf *dmabuf = attach->dmabuf;
+
+ dma_resv_lock(dmabuf->resv, NULL);
+ virtgpu_dma_buf_unmap(bo);
+ dma_resv_unlock(dmabuf->resv);
+
+ dma_buf_detach(dmabuf, attach);
+ dma_buf_put(dmabuf);
+ }
+
+ if (bo->created) {
+ virtio_gpu_cmd_unref_resource(vgdev, bo);
+ virtio_gpu_notify(vgdev);
+ return;
+ }
+ virtio_gpu_cleanup_object(bo);
+}
+
+static int virtgpu_dma_buf_init_obj(struct drm_device *dev,
+ struct virtio_gpu_object *bo,
+ struct dma_buf_attachment *attach)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_object_params params = { 0 };
+ struct dma_resv *resv = attach->dmabuf->resv;
+ struct virtio_gpu_mem_entry *ents = NULL;
+ unsigned int nents;
+ int ret;
+
+ ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
+ if (ret) {
+ virtgpu_dma_buf_free_obj(&bo->base.base);
+ return ret;
+ }
+
+ dma_resv_lock(resv, NULL);
+
+ ret = dma_buf_pin(attach);
+ if (ret)
+ goto err_pin;
+
+ ret = virtgpu_dma_buf_import_sgt(&ents, &nents, bo, attach);
+ if (ret)
+ goto err_import;
+
+ params.blob = true;
+ params.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
+ params.blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
+ params.size = attach->dmabuf->size;
+
+ virtio_gpu_cmd_resource_create_blob(vgdev, bo, &params,
+ ents, nents);
+ bo->guest_blob = true;
+
+ dma_buf_unpin(attach);
+ dma_resv_unlock(resv);
+
+ return 0;
+
+err_import:
+ dma_buf_unpin(attach);
+err_pin:
+ dma_resv_unlock(resv);
+ virtgpu_dma_buf_free_obj(&bo->base.base);
+ return ret;
+}
+
+static const struct drm_gem_object_funcs virtgpu_gem_dma_buf_funcs = {
+ .free = virtgpu_dma_buf_free_obj,
+};
+
+static void virtgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->importer_priv;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+
+ virtgpu_dma_buf_unmap(bo);
+}
+
+static const struct dma_buf_attach_ops virtgpu_dma_buf_attach_ops = {
+ .allow_peer2peer = true,
+ .move_notify = virtgpu_dma_buf_move_notify
+};
+
struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *buf)
{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct dma_buf_attachment *attach;
+ struct virtio_gpu_object *bo;
struct drm_gem_object *obj;
+ int ret;
if (buf->ops == &virtgpu_dmabuf_ops.ops) {
obj = buf->priv;
@@ -133,7 +312,33 @@ struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
}
}
- return drm_gem_prime_import(dev, buf);
+ if (!vgdev->has_resource_blob || vgdev->has_virgl_3d)
+ return drm_gem_prime_import(dev, buf);
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return ERR_PTR(-ENOMEM);
+
+ obj = &bo->base.base;
+ obj->resv = buf->resv;
+ obj->funcs = &virtgpu_gem_dma_buf_funcs;
+ drm_gem_private_object_init(dev, obj, buf->size);
+
+ attach = dma_buf_dynamic_attach(buf, dev->dev,
+ &virtgpu_dma_buf_attach_ops, obj);
+ if (IS_ERR(attach)) {
+ kfree(bo);
+ return ERR_CAST(attach);
+ }
+
+ obj->import_attach = attach;
+ get_dma_buf(buf);
+
+ ret = virtgpu_dma_buf_init_obj(dev, bo, attach);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return obj;
}
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
diff --git a/drivers/gpu/drm/virtio/virtgpu_submit.c b/drivers/gpu/drm/virtio/virtgpu_submit.c
new file mode 100644
index 000000000000..7d34cf83f5f2
--- /dev/null
+++ b/drivers/gpu/drm/virtio/virtgpu_submit.c
@@ -0,0 +1,542 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2015 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * Authors:
+ * Dave Airlie
+ * Alon Levy
+ */
+
+#include <linux/dma-fence-unwrap.h>
+#include <linux/file.h>
+#include <linux/sync_file.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_file.h>
+#include <drm/drm_syncobj.h>
+#include <drm/virtgpu_drm.h>
+
+#include "virtgpu_drv.h"
+
+struct virtio_gpu_submit_post_dep {
+ struct drm_syncobj *syncobj;
+ struct dma_fence_chain *chain;
+ u64 point;
+};
+
+struct virtio_gpu_submit {
+ struct virtio_gpu_submit_post_dep *post_deps;
+ unsigned int num_out_syncobjs;
+
+ struct drm_syncobj **in_syncobjs;
+ unsigned int num_in_syncobjs;
+
+ struct virtio_gpu_object_array *buflist;
+ struct drm_virtgpu_execbuffer *exbuf;
+ struct virtio_gpu_fence *out_fence;
+ struct virtio_gpu_fpriv *vfpriv;
+ struct virtio_gpu_device *vgdev;
+ struct sync_file *sync_file;
+ struct drm_file *file;
+ int out_fence_fd;
+ u64 fence_ctx;
+ u32 ring_idx;
+ void *buf;
+};
+
+static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit,
+ struct dma_fence *in_fence)
+{
+ u64 context = submit->fence_ctx + submit->ring_idx;
+
+ if (dma_fence_match_context(in_fence, context))
+ return 0;
+
+ return dma_fence_wait(in_fence, true);
+}
+
+static int virtio_gpu_dma_fence_wait(struct virtio_gpu_submit *submit,
+ struct dma_fence *fence)
+{
+ struct dma_fence_unwrap itr;
+ struct dma_fence *f;
+ int err;
+
+ dma_fence_unwrap_for_each(f, &itr, fence) {
+ err = virtio_gpu_do_fence_wait(submit, f);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void virtio_gpu_free_syncobjs(struct drm_syncobj **syncobjs,
+ u32 nr_syncobjs)
+{
+ u32 i = nr_syncobjs;
+
+ while (i--) {
+ if (syncobjs[i])
+ drm_syncobj_put(syncobjs[i]);
+ }
+
+ kvfree(syncobjs);
+}
+
+static int
+virtio_gpu_parse_deps(struct virtio_gpu_submit *submit)
+{
+ struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
+ struct drm_virtgpu_execbuffer_syncobj syncobj_desc;
+ size_t syncobj_stride = exbuf->syncobj_stride;
+ u32 num_in_syncobjs = exbuf->num_in_syncobjs;
+ struct drm_syncobj **syncobjs;
+ int ret = 0, i;
+
+ if (!num_in_syncobjs)
+ return 0;
+
+ /*
+ * kvmalloc() at first tries to allocate memory using kmalloc() and
+ * falls back to vmalloc() only on failure. It also uses __GFP_NOWARN
+ * internally for allocations larger than a page size, preventing
+ * storm of KMSG warnings.
+ */
+ syncobjs = kvcalloc(num_in_syncobjs, sizeof(*syncobjs), GFP_KERNEL);
+ if (!syncobjs)
+ return -ENOMEM;
+
+ for (i = 0; i < num_in_syncobjs; i++) {
+ u64 address = exbuf->in_syncobjs + i * syncobj_stride;
+ struct dma_fence *fence;
+
+ memset(&syncobj_desc, 0, sizeof(syncobj_desc));
+
+ if (copy_from_user(&syncobj_desc,
+ u64_to_user_ptr(address),
+ min(syncobj_stride, sizeof(syncobj_desc)))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ if (syncobj_desc.flags & ~VIRTGPU_EXECBUF_SYNCOBJ_FLAGS) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = drm_syncobj_find_fence(submit->file, syncobj_desc.handle,
+ syncobj_desc.point, 0, &fence);
+ if (ret)
+ break;
+
+ ret = virtio_gpu_dma_fence_wait(submit, fence);
+
+ dma_fence_put(fence);
+ if (ret)
+ break;
+
+ if (syncobj_desc.flags & VIRTGPU_EXECBUF_SYNCOBJ_RESET) {
+ syncobjs[i] = drm_syncobj_find(submit->file,
+ syncobj_desc.handle);
+ if (!syncobjs[i]) {
+ ret = -EINVAL;
+ break;
+ }
+ }
+ }
+
+ if (ret) {
+ virtio_gpu_free_syncobjs(syncobjs, i);
+ return ret;
+ }
+
+ submit->num_in_syncobjs = num_in_syncobjs;
+ submit->in_syncobjs = syncobjs;
+
+ return ret;
+}
+
+static void virtio_gpu_reset_syncobjs(struct drm_syncobj **syncobjs,
+ u32 nr_syncobjs)
+{
+ u32 i;
+
+ for (i = 0; i < nr_syncobjs; i++) {
+ if (syncobjs[i])
+ drm_syncobj_replace_fence(syncobjs[i], NULL);
+ }
+}
+
+static void
+virtio_gpu_free_post_deps(struct virtio_gpu_submit_post_dep *post_deps,
+ u32 nr_syncobjs)
+{
+ u32 i = nr_syncobjs;
+
+ while (i--) {
+ kfree(post_deps[i].chain);
+ drm_syncobj_put(post_deps[i].syncobj);
+ }
+
+ kvfree(post_deps);
+}
+
+static int virtio_gpu_parse_post_deps(struct virtio_gpu_submit *submit)
+{
+ struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
+ struct drm_virtgpu_execbuffer_syncobj syncobj_desc;
+ struct virtio_gpu_submit_post_dep *post_deps;
+ u32 num_out_syncobjs = exbuf->num_out_syncobjs;
+ size_t syncobj_stride = exbuf->syncobj_stride;
+ int ret = 0, i;
+
+ if (!num_out_syncobjs)
+ return 0;
+
+ post_deps = kvcalloc(num_out_syncobjs, sizeof(*post_deps), GFP_KERNEL);
+ if (!post_deps)
+ return -ENOMEM;
+
+ for (i = 0; i < num_out_syncobjs; i++) {
+ u64 address = exbuf->out_syncobjs + i * syncobj_stride;
+
+ memset(&syncobj_desc, 0, sizeof(syncobj_desc));
+
+ if (copy_from_user(&syncobj_desc,
+ u64_to_user_ptr(address),
+ min(syncobj_stride, sizeof(syncobj_desc)))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ post_deps[i].point = syncobj_desc.point;
+
+ if (syncobj_desc.flags) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (syncobj_desc.point) {
+ post_deps[i].chain = dma_fence_chain_alloc();
+ if (!post_deps[i].chain) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ post_deps[i].syncobj = drm_syncobj_find(submit->file,
+ syncobj_desc.handle);
+ if (!post_deps[i].syncobj) {
+ kfree(post_deps[i].chain);
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ if (ret) {
+ virtio_gpu_free_post_deps(post_deps, i);
+ return ret;
+ }
+
+ submit->num_out_syncobjs = num_out_syncobjs;
+ submit->post_deps = post_deps;
+
+ return 0;
+}
+
+static void
+virtio_gpu_process_post_deps(struct virtio_gpu_submit *submit)
+{
+ struct virtio_gpu_submit_post_dep *post_deps = submit->post_deps;
+
+ if (post_deps) {
+ struct dma_fence *fence = &submit->out_fence->f;
+ u32 i;
+
+ for (i = 0; i < submit->num_out_syncobjs; i++) {
+ if (post_deps[i].chain) {
+ drm_syncobj_add_point(post_deps[i].syncobj,
+ post_deps[i].chain,
+ fence, post_deps[i].point);
+ post_deps[i].chain = NULL;
+ } else {
+ drm_syncobj_replace_fence(post_deps[i].syncobj,
+ fence);
+ }
+ }
+ }
+}
+
+static int virtio_gpu_fence_event_create(struct drm_device *dev,
+ struct drm_file *file,
+ struct virtio_gpu_fence *fence,
+ u32 ring_idx)
+{
+ struct virtio_gpu_fence_event *e = NULL;
+ int ret;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED;
+ e->event.length = sizeof(e->event);
+
+ ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
+ if (ret) {
+ kfree(e);
+ return ret;
+ }
+
+ fence->e = e;
+
+ return 0;
+}
+
+static int virtio_gpu_init_submit_buflist(struct virtio_gpu_submit *submit)
+{
+ struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
+ u32 *bo_handles;
+
+ if (!exbuf->num_bo_handles)
+ return 0;
+
+ bo_handles = kvmalloc_array(exbuf->num_bo_handles, sizeof(*bo_handles),
+ GFP_KERNEL);
+ if (!bo_handles)
+ return -ENOMEM;
+
+ if (copy_from_user(bo_handles, u64_to_user_ptr(exbuf->bo_handles),
+ exbuf->num_bo_handles * sizeof(*bo_handles))) {
+ kvfree(bo_handles);
+ return -EFAULT;
+ }
+
+ submit->buflist = virtio_gpu_array_from_handles(submit->file, bo_handles,
+ exbuf->num_bo_handles);
+ if (!submit->buflist) {
+ kvfree(bo_handles);
+ return -ENOENT;
+ }
+
+ kvfree(bo_handles);
+
+ return 0;
+}
+
+static void virtio_gpu_cleanup_submit(struct virtio_gpu_submit *submit)
+{
+ virtio_gpu_reset_syncobjs(submit->in_syncobjs, submit->num_in_syncobjs);
+ virtio_gpu_free_syncobjs(submit->in_syncobjs, submit->num_in_syncobjs);
+ virtio_gpu_free_post_deps(submit->post_deps, submit->num_out_syncobjs);
+
+ if (!IS_ERR(submit->buf))
+ kvfree(submit->buf);
+
+ if (submit->buflist)
+ virtio_gpu_array_put_free(submit->buflist);
+
+ if (submit->out_fence_fd >= 0)
+ put_unused_fd(submit->out_fence_fd);
+
+ if (submit->out_fence)
+ dma_fence_put(&submit->out_fence->f);
+
+ if (submit->sync_file)
+ fput(submit->sync_file->file);
+}
+
+static void virtio_gpu_submit(struct virtio_gpu_submit *submit)
+{
+ virtio_gpu_cmd_submit(submit->vgdev, submit->buf, submit->exbuf->size,
+ submit->vfpriv->ctx_id, submit->buflist,
+ submit->out_fence);
+ virtio_gpu_notify(submit->vgdev);
+}
+
+static void virtio_gpu_complete_submit(struct virtio_gpu_submit *submit)
+{
+ submit->buf = NULL;
+ submit->buflist = NULL;
+ submit->sync_file = NULL;
+ submit->out_fence_fd = -1;
+}
+
+static int virtio_gpu_init_submit(struct virtio_gpu_submit *submit,
+ struct drm_virtgpu_execbuffer *exbuf,
+ struct drm_device *dev,
+ struct drm_file *file,
+ u64 fence_ctx, u32 ring_idx)
+{
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_fence *out_fence;
+ bool drm_fence_event;
+ int err;
+
+ memset(submit, 0, sizeof(*submit));
+
+ if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX) &&
+ (vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
+ drm_fence_event = true;
+ else
+ drm_fence_event = false;
+
+ if ((exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) ||
+ exbuf->num_out_syncobjs ||
+ exbuf->num_bo_handles ||
+ drm_fence_event)
+ out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
+ else
+ out_fence = NULL;
+
+ if (drm_fence_event) {
+ err = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
+ if (err) {
+ dma_fence_put(&out_fence->f);
+ return err;
+ }
+ }
+
+ submit->out_fence = out_fence;
+ submit->fence_ctx = fence_ctx;
+ submit->ring_idx = ring_idx;
+ submit->out_fence_fd = -1;
+ submit->vfpriv = vfpriv;
+ submit->vgdev = vgdev;
+ submit->exbuf = exbuf;
+ submit->file = file;
+
+ err = virtio_gpu_init_submit_buflist(submit);
+ if (err)
+ return err;
+
+ submit->buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
+ if (IS_ERR(submit->buf))
+ return PTR_ERR(submit->buf);
+
+ if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
+ err = get_unused_fd_flags(O_CLOEXEC);
+ if (err < 0)
+ return err;
+
+ submit->out_fence_fd = err;
+
+ submit->sync_file = sync_file_create(&out_fence->f);
+ if (!submit->sync_file)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int virtio_gpu_wait_in_fence(struct virtio_gpu_submit *submit)
+{
+ int ret = 0;
+
+ if (submit->exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
+ struct dma_fence *in_fence =
+ sync_file_get_fence(submit->exbuf->fence_fd);
+ if (!in_fence)
+ return -EINVAL;
+
+ /*
+ * Wait if the fence is from a foreign context, or if the
+ * fence array contains any fence from a foreign context.
+ */
+ ret = virtio_gpu_dma_fence_wait(submit, in_fence);
+
+ dma_fence_put(in_fence);
+ }
+
+ return ret;
+}
+
+static void virtio_gpu_install_out_fence_fd(struct virtio_gpu_submit *submit)
+{
+ if (submit->sync_file) {
+ submit->exbuf->fence_fd = submit->out_fence_fd;
+ fd_install(submit->out_fence_fd, submit->sync_file->file);
+ }
+}
+
+static int virtio_gpu_lock_buflist(struct virtio_gpu_submit *submit)
+{
+ if (submit->buflist)
+ return virtio_gpu_array_lock_resv(submit->buflist);
+
+ return 0;
+}
+
+int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ u64 fence_ctx = vgdev->fence_drv.context;
+ struct drm_virtgpu_execbuffer *exbuf = data;
+ struct virtio_gpu_submit submit;
+ u32 ring_idx = 0;
+ int ret = -EINVAL;
+
+ if (!vgdev->has_virgl_3d)
+ return -ENOSYS;
+
+ if (exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS)
+ return ret;
+
+ if (exbuf->flags & VIRTGPU_EXECBUF_RING_IDX) {
+ if (exbuf->ring_idx >= vfpriv->num_rings)
+ return ret;
+
+ if (!vfpriv->base_fence_ctx)
+ return ret;
+
+ fence_ctx = vfpriv->base_fence_ctx;
+ ring_idx = exbuf->ring_idx;
+ }
+
+ virtio_gpu_create_context(dev, file);
+
+ ret = virtio_gpu_init_submit(&submit, exbuf, dev, file,
+ fence_ctx, ring_idx);
+ if (ret)
+ goto cleanup;
+
+ ret = virtio_gpu_parse_post_deps(&submit);
+ if (ret)
+ goto cleanup;
+
+ ret = virtio_gpu_parse_deps(&submit);
+ if (ret)
+ goto cleanup;
+
+ /*
+ * Await in-fences in the end of the job submission path to
+ * optimize the path by proceeding directly to the submission
+ * to virtio after the waits.
+ */
+ ret = virtio_gpu_wait_in_fence(&submit);
+ if (ret)
+ goto cleanup;
+
+ ret = virtio_gpu_lock_buflist(&submit);
+ if (ret)
+ goto cleanup;
+
+ virtio_gpu_submit(&submit);
+
+ /*
+ * Set up user-out data after submitting the job to optimize
+ * the job submission path.
+ */
+ virtio_gpu_install_out_fence_fd(&submit);
+ virtio_gpu_process_post_deps(&submit);
+ virtio_gpu_complete_submit(&submit);
+cleanup:
+ virtio_gpu_cleanup_submit(&submit);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/virtio/virtgpu_trace.h b/drivers/gpu/drm/virtio/virtgpu_trace.h
index 711ecc2bd241..227bf0ae7ed5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_trace.h
+++ b/drivers/gpu/drm/virtio/virtgpu_trace.h
@@ -9,40 +9,44 @@
#define TRACE_INCLUDE_FILE virtgpu_trace
DECLARE_EVENT_CLASS(virtio_gpu_cmd,
- TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr),
- TP_ARGS(vq, hdr),
+ TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr, u32 seqno),
+ TP_ARGS(vq, hdr, seqno),
TP_STRUCT__entry(
__field(int, dev)
__field(unsigned int, vq)
- __field(const char *, name)
+ __string(name, vq->name)
__field(u32, type)
__field(u32, flags)
__field(u64, fence_id)
__field(u32, ctx_id)
+ __field(u32, num_free)
+ __field(u32, seqno)
),
TP_fast_assign(
__entry->dev = vq->vdev->index;
__entry->vq = vq->index;
- __entry->name = vq->name;
+ __assign_str(name);
__entry->type = le32_to_cpu(hdr->type);
__entry->flags = le32_to_cpu(hdr->flags);
__entry->fence_id = le64_to_cpu(hdr->fence_id);
__entry->ctx_id = le32_to_cpu(hdr->ctx_id);
+ __entry->num_free = vq->num_free;
+ __entry->seqno = seqno;
),
- TP_printk("vdev=%d vq=%u name=%s type=0x%x flags=0x%x fence_id=%llu ctx_id=%u",
- __entry->dev, __entry->vq, __entry->name,
+ TP_printk("vdev=%d vq=%u name=%s type=0x%x flags=0x%x fence_id=%llu ctx_id=%u num_free=%u seqno=%u",
+ __entry->dev, __entry->vq, __get_str(name),
__entry->type, __entry->flags, __entry->fence_id,
- __entry->ctx_id)
+ __entry->ctx_id, __entry->num_free, __entry->seqno)
);
DEFINE_EVENT(virtio_gpu_cmd, virtio_gpu_cmd_queue,
- TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr),
- TP_ARGS(vq, hdr)
+ TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr, u32 seqno),
+ TP_ARGS(vq, hdr, seqno)
);
DEFINE_EVENT(virtio_gpu_cmd, virtio_gpu_cmd_response,
- TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr),
- TP_ARGS(vq, hdr)
+ TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr, u32 seqno),
+ TP_ARGS(vq, hdr, seqno)
);
#endif
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index cf84d382dd41..0c194b4e9488 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -31,6 +31,9 @@
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
+
#include "virtgpu_drv.h"
#include "virtgpu_trace.h"
@@ -84,6 +87,22 @@ void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
vgdev->vbufs = NULL;
}
+/* For drm_panic */
+static struct virtio_gpu_vbuffer*
+virtio_gpu_panic_get_vbuf(struct virtio_gpu_device *vgdev, int size)
+{
+ struct virtio_gpu_vbuffer *vbuf;
+
+ vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_ATOMIC);
+
+ vbuf->buf = (void *)vbuf + sizeof(*vbuf);
+ vbuf->size = size;
+ vbuf->resp_cb = NULL;
+ vbuf->resp_size = sizeof(struct virtio_gpu_ctrl_hdr);
+ vbuf->resp_buf = (void *)vbuf->buf + size;
+ return vbuf;
+}
+
static struct virtio_gpu_vbuffer*
virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
int size, int resp_size, void *resp_buf,
@@ -91,9 +110,7 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
{
struct virtio_gpu_vbuffer *vbuf;
- vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
- if (!vbuf)
- return ERR_PTR(-ENOMEM);
+ vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
BUG_ON(size > MAX_INLINE_CMD_SIZE ||
size < sizeof(struct virtio_gpu_ctrl_hdr));
@@ -137,6 +154,18 @@ virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
return (struct virtio_gpu_update_cursor *)vbuf->buf;
}
+/* For drm_panic */
+static void *virtio_gpu_panic_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer **vbuffer_p,
+ int cmd_size)
+{
+ struct virtio_gpu_vbuffer *vbuf;
+
+ vbuf = virtio_gpu_panic_get_vbuf(vgdev, cmd_size);
+ *vbuffer_p = vbuf;
+ return (struct virtio_gpu_command *)vbuf->buf;
+}
+
static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
virtio_gpu_resp_cb cb,
struct virtio_gpu_vbuffer **vbuffer_p,
@@ -147,10 +176,6 @@ static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
resp_size, resp_buf, cb);
- if (IS_ERR(vbuf)) {
- *vbuffer_p = NULL;
- return ERR_CAST(vbuf);
- }
*vbuffer_p = vbuf;
return (struct virtio_gpu_command *)vbuf->buf;
}
@@ -205,7 +230,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
struct list_head reclaim_list;
struct virtio_gpu_vbuffer *entry, *tmp;
struct virtio_gpu_ctrl_hdr *resp;
- u64 fence_id = 0;
+ u64 fence_id;
INIT_LIST_HEAD(&reclaim_list);
spin_lock(&vgdev->ctrlq.qlock);
@@ -219,11 +244,12 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
list_for_each_entry(entry, &reclaim_list, list) {
resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
- trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
+ trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp, entry->seqno);
if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
struct virtio_gpu_ctrl_hdr *cmd;
+
cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
le32_to_cpu(resp->type),
@@ -232,23 +258,14 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
}
if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
- u64 f = le64_to_cpu(resp->fence_id);
-
- if (fence_id > f) {
- DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
- __func__, fence_id, f);
- } else {
- fence_id = f;
- }
+ fence_id = le64_to_cpu(resp->fence_id);
+ virtio_gpu_fence_event_process(vgdev, fence_id);
}
if (entry->resp_cb)
entry->resp_cb(vgdev, entry);
}
wake_up(&vgdev->ctrlq.ack_queue);
- if (fence_id)
- virtio_gpu_fence_event_process(vgdev, fence_id);
-
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
if (entry->objs)
virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
@@ -274,6 +291,10 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
spin_unlock(&vgdev->cursorq.qlock);
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
+ struct virtio_gpu_ctrl_hdr *resp =
+ (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
+
+ trace_virtio_gpu_cmd_response(vgdev->cursorq.vq, resp, entry->seqno);
list_del(&entry->list);
free_vbuf(vgdev, entry);
}
@@ -320,6 +341,34 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
return sgt;
}
+/* For drm_panic */
+static int virtio_gpu_panic_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf,
+ int elemcnt,
+ struct scatterlist **sgs,
+ int outcnt,
+ int incnt)
+{
+ struct virtqueue *vq = vgdev->ctrlq.vq;
+ int ret;
+
+ if (vgdev->has_indirect)
+ elemcnt = 1;
+
+ if (vq->num_free < elemcnt)
+ return -ENOMEM;
+
+ ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
+ WARN_ON(ret);
+
+ vbuf->seqno = ++vgdev->ctrlq.seqno;
+ trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), vbuf->seqno);
+
+ atomic_inc(&vgdev->pending_commands);
+
+ return 0;
+}
+
static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf,
struct virtio_gpu_fence *fence,
@@ -335,7 +384,7 @@ static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
if (fence && vbuf->objs)
virtio_gpu_array_unlock_resv(vbuf->objs);
free_vbuf(vgdev, vbuf);
- return -1;
+ return -ENODEV;
}
if (vgdev->has_indirect)
@@ -366,7 +415,8 @@ again:
ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
WARN_ON(ret);
- trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
+ vbuf->seqno = ++vgdev->ctrlq.seqno;
+ trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), vbuf->seqno);
atomic_inc(&vgdev->pending_commands);
@@ -376,6 +426,32 @@ again:
return 0;
}
+/* For drm_panic */
+static int virtio_gpu_panic_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ struct scatterlist *sgs[3], vcmd, vresp;
+ int elemcnt = 0, outcnt = 0, incnt = 0;
+
+ /* set up vcmd */
+ sg_init_one(&vcmd, vbuf->buf, vbuf->size);
+ elemcnt++;
+ sgs[outcnt] = &vcmd;
+ outcnt++;
+
+ /* set up vresp */
+ if (vbuf->resp_size) {
+ sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
+ elemcnt++;
+ sgs[outcnt + incnt] = &vresp;
+ incnt++;
+ }
+
+ return virtio_gpu_panic_queue_ctrl_sgs(vgdev, vbuf,
+ elemcnt, sgs,
+ outcnt, incnt);
+}
+
static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf,
struct virtio_gpu_fence *fence)
@@ -394,12 +470,13 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
if (vbuf->data_size) {
if (is_vmalloc_addr(vbuf->data_buf)) {
int sg_ents;
+
sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
&sg_ents);
if (!sgt) {
if (fence && vbuf->objs)
virtio_gpu_array_unlock_resv(vbuf->objs);
- return -1;
+ return -ENOMEM;
}
elemcnt += sg_ents;
@@ -430,6 +507,21 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
return ret;
}
+/* For drm_panic */
+void virtio_gpu_panic_notify(struct virtio_gpu_device *vgdev)
+{
+ bool notify;
+
+ if (!atomic_read(&vgdev->pending_commands))
+ return;
+
+ atomic_set(&vgdev->pending_commands, 0);
+ notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
+
+ if (notify)
+ virtqueue_notify(vgdev->ctrlq.vq);
+}
+
void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
{
bool notify;
@@ -478,8 +570,10 @@ retry:
spin_lock(&vgdev->cursorq.qlock);
goto retry;
} else {
+ vbuf->seqno = ++vgdev->cursorq.seqno;
trace_virtio_gpu_cmd_queue(vq,
- virtio_gpu_vbuf_ctrl_hdr(vbuf));
+ virtio_gpu_vbuf_ctrl_hdr(vbuf),
+ vbuf->seqno);
notify = virtqueue_kick_prepare(vq);
}
@@ -573,16 +667,42 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
+/* For drm_panic */
+void virtio_gpu_panic_cmd_resource_flush(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id,
+ uint32_t x, uint32_t y,
+ uint32_t width, uint32_t height)
+{
+ struct virtio_gpu_resource_flush *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_panic_alloc_cmd_resp(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = NULL;
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
+ cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->r.width = cpu_to_le32(width);
+ cmd_p->r.height = cpu_to_le32(height);
+ cmd_p->r.x = cpu_to_le32(x);
+ cmd_p->r.y = cpu_to_le32(y);
+
+ virtio_gpu_panic_queue_ctrl_buffer(vgdev, vbuf);
+}
+
void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
uint32_t x, uint32_t y,
- uint32_t width, uint32_t height)
+ uint32_t width, uint32_t height,
+ struct virtio_gpu_object_array *objs,
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_flush *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
cmd_p->resource_id = cpu_to_le32(resource_id);
@@ -591,7 +711,38 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
cmd_p->r.x = cpu_to_le32(x);
cmd_p->r.y = cpu_to_le32(y);
- virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
+}
+
+/* For drm_panic */
+int virtio_gpu_panic_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
+ uint64_t offset,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y,
+ struct virtio_gpu_object_array *objs)
+{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+ struct virtio_gpu_transfer_to_host_2d *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+ bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
+
+ if (virtio_gpu_is_shmem(bo) && use_dma_api)
+ dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
+ bo->base.sgt, DMA_TO_DEVICE);
+
+ cmd_p = virtio_gpu_panic_alloc_cmd_resp(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+ cmd_p->offset = cpu_to_le64(offset);
+ cmd_p->r.width = cpu_to_le32(width);
+ cmd_p->r.height = cpu_to_le32(height);
+ cmd_p->r.x = cpu_to_le32(x);
+ cmd_p->r.y = cpu_to_le32(y);
+
+ return virtio_gpu_panic_queue_ctrl_buffer(vgdev, vbuf);
}
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
@@ -605,11 +756,10 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
- struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
- if (use_dma_api)
+ if (virtio_gpu_is_shmem(bo) && use_dma_api)
dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
- shmem->pages, DMA_TO_DEVICE);
+ bo->base.sgt, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
@@ -649,6 +799,23 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
+static void
+virtio_gpu_cmd_resource_detach_backing(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id,
+ struct virtio_gpu_fence *fence)
+{
+ struct virtio_gpu_resource_detach_backing *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
+ cmd_p->resource_id = cpu_to_le32(resource_id);
+
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
+}
+
static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
@@ -731,7 +898,7 @@ static int virtio_get_edid_block(void *data, u8 *buf,
size_t start = block * EDID_LENGTH;
if (start + len > le32_to_cpu(resp->size))
- return -1;
+ return -EINVAL;
memcpy(buf, resp->edid + start, len);
return 0;
}
@@ -745,21 +912,21 @@ static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
(struct virtio_gpu_resp_edid *)vbuf->resp_buf;
uint32_t scanout = le32_to_cpu(cmd->scanout);
struct virtio_gpu_output *output;
- struct edid *new_edid, *old_edid;
+ const struct drm_edid *new_edid, *old_edid;
if (scanout >= vgdev->num_scanouts)
return;
output = vgdev->outputs + scanout;
- new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
- drm_connector_update_edid_property(&output->conn, new_edid);
+ new_edid = drm_edid_read_custom(&output->conn, virtio_get_edid_block, resp);
+ drm_edid_connector_update(&output->conn, new_edid);
spin_lock(&vgdev->display_info_lock);
- old_edid = output->edid;
- output->edid = new_edid;
+ old_edid = output->drm_edid;
+ output->drm_edid = new_edid;
spin_unlock(&vgdev->display_info_lock);
- kfree(old_edid);
+ drm_edid_free(old_edid);
wake_up(&vgdev->resp_wq);
}
@@ -914,7 +1081,8 @@ int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
}
void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
- uint32_t nlen, const char *name)
+ uint32_t context_init, uint32_t nlen,
+ const char *name)
{
struct virtio_gpu_ctx_create *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -925,8 +1093,8 @@ void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
cmd_p->hdr.ctx_id = cpu_to_le32(id);
cmd_p->nlen = cpu_to_le32(nlen);
- strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
- cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
+ cmd_p->context_init = cpu_to_le32(context_init);
+ strscpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name));
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
@@ -1027,11 +1195,9 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
- if (virtio_gpu_is_shmem(bo) && use_dma_api) {
- struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
+ if (virtio_gpu_is_shmem(bo) && use_dma_api)
dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
- shmem->pages, DMA_TO_DEVICE);
- }
+ bo->base.sgt, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
@@ -1108,8 +1274,26 @@ void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_mem_entry *ents,
unsigned int nents)
{
+ if (obj->attached)
+ return;
+
virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
ents, nents, NULL);
+
+ obj->attached = true;
+}
+
+void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj,
+ struct virtio_gpu_fence *fence)
+{
+ if (!obj->attached)
+ return;
+
+ virtio_gpu_cmd_resource_detach_backing(vgdev, obj->hw_res_handle,
+ fence);
+
+ obj->attached = false;
}
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
@@ -1270,6 +1454,9 @@ virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
bo->created = true;
+
+ if (nents)
+ bo->attached = true;
}
void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/virtio/virtgpu_vram.c b/drivers/gpu/drm/virtio/virtgpu_vram.c
index 5cc34e7330fa..5ad3b7c6f73c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vram.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vram.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include "virtgpu_drv.h"
+#include <linux/dma-mapping.h>
+
static void virtio_gpu_vram_free(struct drm_gem_object *obj)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
@@ -35,6 +37,7 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
unsigned long vm_size = vma->vm_end - vma->vm_start;
+ unsigned long vm_end;
if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
return -EINVAL;
@@ -44,7 +47,7 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
return -EINVAL;
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
- vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+ vm_flags_set(vma, VM_MIXEDMAP | VM_DONTEXPAND);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
vma->vm_ops = &virtio_gpu_vram_vm_ops;
@@ -54,16 +57,77 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
else if (vram->map_info == VIRTIO_GPU_MAP_CACHE_UNCACHED)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- /* Partial mappings of GEM buffers don't happen much in practice. */
- if (vm_size != vram->vram_node.size)
+ if (check_add_overflow(vma->vm_pgoff << PAGE_SHIFT, vm_size, &vm_end))
+ return -EINVAL;
+
+ if (vm_end > vram->vram_node.size)
return -EINVAL;
ret = io_remap_pfn_range(vma, vma->vm_start,
- vram->vram_node.start >> PAGE_SHIFT,
+ (vram->vram_node.start >> PAGE_SHIFT) + vma->vm_pgoff,
vm_size, vma->vm_page_prot);
return ret;
}
+struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo,
+ struct device *dev,
+ enum dma_data_direction dir)
+{
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+ struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
+ struct sg_table *sgt;
+ dma_addr_t addr;
+ int ret;
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
+
+ if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) {
+ // Virtio devices can access the dma-buf via its UUID. Return a stub
+ // sg_table so the dma-buf API still works.
+ if (!is_virtio_device(dev) || !vgdev->has_resource_assign_uuid) {
+ ret = -EIO;
+ goto out;
+ }
+ return sgt;
+ }
+
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ addr = dma_map_resource(dev, vram->vram_node.start,
+ vram->vram_node.size, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ ret = dma_mapping_error(dev, addr);
+ if (ret)
+ goto out;
+
+ sg_set_page(sgt->sgl, NULL, vram->vram_node.size, 0);
+ sg_dma_address(sgt->sgl) = addr;
+ sg_dma_len(sgt->sgl) = vram->vram_node.size;
+
+ return sgt;
+out:
+ sg_free_table(sgt);
+ kfree(sgt);
+ return ERR_PTR(ret);
+}
+
+void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ if (sgt->nents) {
+ dma_unmap_resource(dev, sg_dma_address(sgt->sgl),
+ sg_dma_len(sgt->sgl), dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = {
.open = virtio_gpu_gem_object_open,
.close = virtio_gpu_gem_object_close,