summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/vmwgfx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig5
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmw_surface_cache.h10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c147
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c164
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c844
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h81
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c77
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h124
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c53
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c19
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c140
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c1357
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h131
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c64
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c175
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h196
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h185
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c63
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c41
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c69
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c75
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c302
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c350
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c49
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c110
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c26
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c635
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_vkms.h75
41 files changed, 3291 insertions, 2345 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index faddae3d6ac2..aab646b91ca9 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU"
- depends on DRM && PCI && MMU
- depends on X86 || ARM64
+ depends on DRM && PCI
+ depends on (X86 && HYPERVISOR_GUEST) || ARM64
+ select DRM_CLIENT_SELECTION
select DRM_TTM
select DRM_TTM_HELPER
select MAPPING_DIRTY_HELPERS
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index e94479d9cd5b..b168fd7fe9b3 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -10,6 +10,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \
vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o \
- vmwgfx_gem.o
+ vmwgfx_gem.o vmwgfx_vkms.o vmwgfx_cursor_plane.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c
index 6806c05e57f6..36d46b79562a 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.c
@@ -54,7 +54,7 @@
#include <linux/module.h>
#include <linux/hashtable.h>
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
#define VMW_TTM_OBJECT_REF_HT_ORDER 10
@@ -87,14 +87,11 @@ struct ttm_object_file {
*
* @object_lock: lock that protects idr.
*
- * @object_count: Per device object count.
- *
* This is the per-device data structure needed for ttm object management.
*/
struct ttm_object_device {
spinlock_t object_lock;
- atomic_t object_count;
struct dma_buf_ops ops;
void (*dmabuf_release)(struct dma_buf *dma_buf);
struct idr idr;
@@ -431,7 +428,6 @@ ttm_object_device_init(const struct dma_buf_ops *ops)
return NULL;
spin_lock_init(&tdev->object_lock);
- atomic_set(&tdev->object_count, 0);
/*
* Our base is at VMWGFX_NUM_MOB + 1 because we want to create
@@ -475,7 +471,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
*/
static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
{
- return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
+ return file_ref_get(&dmabuf->file->f_ref);
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmw_surface_cache.h b/drivers/gpu/drm/vmwgfx/vmw_surface_cache.h
index b0d87c5f58d8..1ac3cb151b11 100644
--- a/drivers/gpu/drm/vmwgfx/vmw_surface_cache.h
+++ b/drivers/gpu/drm/vmwgfx/vmw_surface_cache.h
@@ -1,6 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2021 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
+ *
+ * Copyright (c) 2021-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -31,6 +33,10 @@
#include <drm/vmwgfx_drm.h>
+#define SVGA3D_FLAGS_UPPER_32(svga3d_flags) ((svga3d_flags) >> 32)
+#define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
+ ((svga3d_flags) & ((uint64_t)U32_MAX))
+
static inline u32 clamped_umul32(u32 a, u32 b)
{
uint64_t tmp = (uint64_t) a*b;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
index ae2de914eb89..2731f6ded1c2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -54,6 +54,7 @@
#include "vmwgfx_drv.h"
#include "vmwgfx_binding.h"
#include "device_include/svga3d_reg.h"
+#include <linux/vmalloc.h>
#define VMW_BINDING_RT_BIT 0
#define VMW_BINDING_PS_BIT 1
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index c52c7bf1485b..fa5841fda659 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -27,6 +27,8 @@
**************************************************************************/
#include "vmwgfx_drv.h"
+
+#include "vmwgfx_bo.h"
#include <linux/highmem.h>
/*
@@ -420,13 +422,105 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
return 0;
}
+static void *map_external(struct vmw_bo *bo, struct iosys_map *map)
+{
+ struct vmw_private *vmw =
+ container_of(bo->tbo.bdev, struct vmw_private, bdev);
+ void *ptr = NULL;
+ int ret;
+
+ if (drm_gem_is_imported(&bo->tbo.base)) {
+ ret = dma_buf_vmap(bo->tbo.base.dma_buf, map);
+ if (ret) {
+ drm_dbg_driver(&vmw->drm,
+ "Wasn't able to map external bo!\n");
+ goto out;
+ }
+ ptr = map->vaddr;
+ } else {
+ ptr = vmw_bo_map_and_cache(bo);
+ }
+
+out:
+ return ptr;
+}
+
+static void unmap_external(struct vmw_bo *bo, struct iosys_map *map)
+{
+ if (drm_gem_is_imported(&bo->tbo.base))
+ dma_buf_vunmap(bo->tbo.base.dma_buf, map);
+ else
+ vmw_bo_unmap(bo);
+}
+
+static int vmw_external_bo_copy(struct vmw_bo *dst, u32 dst_offset,
+ u32 dst_stride, struct vmw_bo *src,
+ u32 src_offset, u32 src_stride,
+ u32 width_in_bytes, u32 height,
+ struct vmw_diff_cpy *diff)
+{
+ struct vmw_private *vmw =
+ container_of(dst->tbo.bdev, struct vmw_private, bdev);
+ size_t dst_size = dst->tbo.resource->size;
+ size_t src_size = src->tbo.resource->size;
+ struct iosys_map dst_map = {0};
+ struct iosys_map src_map = {0};
+ int ret, i;
+ int x_in_bytes;
+ u8 *vsrc;
+ u8 *vdst;
+
+ vsrc = map_external(src, &src_map);
+ if (!vsrc) {
+ drm_dbg_driver(&vmw->drm, "Wasn't able to map src\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ vdst = map_external(dst, &dst_map);
+ if (!vdst) {
+ drm_dbg_driver(&vmw->drm, "Wasn't able to map dst\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ vsrc += src_offset;
+ vdst += dst_offset;
+ if (src_stride == dst_stride) {
+ dst_size -= dst_offset;
+ src_size -= src_offset;
+ memcpy(vdst, vsrc,
+ min(dst_stride * height, min(dst_size, src_size)));
+ } else {
+ WARN_ON(dst_stride < width_in_bytes);
+ for (i = 0; i < height; ++i) {
+ memcpy(vdst, vsrc, width_in_bytes);
+ vsrc += src_stride;
+ vdst += dst_stride;
+ }
+ }
+
+ x_in_bytes = (dst_offset % dst_stride);
+ diff->rect.x1 = x_in_bytes / diff->cpp;
+ diff->rect.y1 = ((dst_offset - x_in_bytes) / dst_stride);
+ diff->rect.x2 = diff->rect.x1 + width_in_bytes / diff->cpp;
+ diff->rect.y2 = diff->rect.y1 + height;
+
+ ret = 0;
+out:
+ unmap_external(src, &src_map);
+ unmap_external(dst, &dst_map);
+
+ return ret;
+}
+
/**
* vmw_bo_cpu_blit - in-kernel cpu blit.
*
- * @dst: Destination buffer object.
+ * @vmw_dst: Destination buffer object.
* @dst_offset: Destination offset of blit start in bytes.
* @dst_stride: Destination stride in bytes.
- * @src: Source buffer object.
+ * @vmw_src: Source buffer object.
* @src_offset: Source offset of blit start in bytes.
* @src_stride: Source stride in bytes.
* @w: Width of blit.
@@ -444,20 +538,29 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
* Neither of the buffer objects may be placed in PCI memory
* (Fixed memory in TTM terminology) when using this function.
*/
-int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
+int vmw_bo_cpu_blit(struct vmw_bo *vmw_dst,
u32 dst_offset, u32 dst_stride,
- struct ttm_buffer_object *src,
+ struct vmw_bo *vmw_src,
u32 src_offset, u32 src_stride,
u32 w, u32 h,
struct vmw_diff_cpy *diff)
{
+ struct ttm_buffer_object *src = &vmw_src->tbo;
+ struct ttm_buffer_object *dst = &vmw_dst->tbo;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
u32 j, initial_line = dst_offset / dst_stride;
- struct vmw_bo_blit_line_data d;
+ struct vmw_bo_blit_line_data d = {0};
int ret = 0;
+ struct page **dst_pages = NULL;
+ struct page **src_pages = NULL;
+ bool src_external = (src->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
+ bool dst_external = (dst->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
+
+ if (WARN_ON(dst == src))
+ return -EINVAL;
/* Buffer objects need to be either pinned or reserved: */
if (!(dst->pin_count))
@@ -477,12 +580,40 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
return ret;
}
+ if (src_external || dst_external)
+ return vmw_external_bo_copy(vmw_dst, dst_offset, dst_stride,
+ vmw_src, src_offset, src_stride,
+ w, h, diff);
+
+ if (!src->ttm->pages && src->ttm->sg) {
+ src_pages = kvmalloc_array(src->ttm->num_pages,
+ sizeof(struct page *), GFP_KERNEL);
+ if (!src_pages)
+ return -ENOMEM;
+ ret = drm_prime_sg_to_page_array(src->ttm->sg, src_pages,
+ src->ttm->num_pages);
+ if (ret)
+ goto out;
+ }
+ if (!dst->ttm->pages && dst->ttm->sg) {
+ dst_pages = kvmalloc_array(dst->ttm->num_pages,
+ sizeof(struct page *), GFP_KERNEL);
+ if (!dst_pages) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = drm_prime_sg_to_page_array(dst->ttm->sg, dst_pages,
+ dst->ttm->num_pages);
+ if (ret)
+ goto out;
+ }
+
d.mapped_dst = 0;
d.mapped_src = 0;
d.dst_addr = NULL;
d.src_addr = NULL;
- d.dst_pages = dst->ttm->pages;
- d.src_pages = src->ttm->pages;
+ d.dst_pages = dst->ttm->pages ? dst->ttm->pages : dst_pages;
+ d.src_pages = src->ttm->pages ? src->ttm->pages : src_pages;
d.dst_num_pages = PFN_UP(dst->resource->size);
d.src_num_pages = PFN_UP(src->resource->size);
d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
@@ -504,6 +635,8 @@ out:
kunmap_atomic(d.src_addr);
if (d.dst_addr)
kunmap_atomic(d.dst_addr);
+ kvfree(src_pages);
+ kvfree(dst_pages);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index bfd41ce3c8f4..f031a312c783 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2011-2023 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright (c) 2011-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -28,15 +28,40 @@
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
-
+#include "vmwgfx_resource_priv.h"
#include <drm/ttm/ttm_placement.h>
static void vmw_bo_release(struct vmw_bo *vbo)
{
- WARN_ON(vbo->tbo.base.funcs &&
- kref_read(&vbo->tbo.base.refcount) != 0);
+ struct vmw_resource *res;
+
+ WARN_ON(kref_read(&vbo->tbo.base.refcount) != 0);
vmw_bo_unmap(vbo);
+
+ xa_destroy(&vbo->detached_resources);
+ WARN_ON(vbo->is_dumb && !vbo->dumb_surface);
+ if (vbo->is_dumb && vbo->dumb_surface) {
+ res = &vbo->dumb_surface->res;
+ WARN_ON(vbo != res->guest_memory_bo);
+ WARN_ON(!res->guest_memory_bo);
+ if (res->guest_memory_bo) {
+ /* Reserve and switch the backing mob. */
+ mutex_lock(&res->dev_priv->cmdbuf_mutex);
+ (void)vmw_resource_reserve(res, false, true);
+ vmw_resource_mob_detach(res);
+ if (res->dirty)
+ res->func->dirty_free(res);
+ if (res->coherent)
+ vmw_bo_dirty_release(res->guest_memory_bo);
+ res->guest_memory_bo = NULL;
+ res->guest_memory_offset = 0;
+ vmw_resource_unreserve(res, true, false, false, NULL,
+ 0);
+ mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+ }
+ vmw_surface_unreference(&vbo->dumb_surface);
+ }
drm_gem_object_release(&vbo->tbo.base);
}
@@ -49,9 +74,9 @@ static void vmw_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_bo *vbo = to_vmw_bo(&bo->base);
- WARN_ON(vbo->dirty);
WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
vmw_bo_release(vbo);
+ WARN_ON(vbo->dirty);
kfree(vbo);
}
@@ -325,18 +350,26 @@ void vmw_bo_pin_reserved(struct vmw_bo *vbo, bool pin)
*/
void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
{
+ return vmw_bo_map_and_cache_size(vbo, vbo->tbo.base.size);
+}
+
+void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size)
+{
struct ttm_buffer_object *bo = &vbo->tbo;
bool not_used;
void *virtual;
int ret;
+ atomic_inc(&vbo->map_count);
+
virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
if (virtual)
return virtual;
- ret = ttm_bo_kmap(bo, 0, PFN_UP(bo->base.size), &vbo->map);
+ ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vbo->map);
if (ret)
- DRM_ERROR("Buffer object map failed: %d.\n", ret);
+ DRM_ERROR("Buffer object map failed: %d (size: bo = %zu, map = %zu).\n",
+ ret, bo->base.size, size);
return ttm_kmap_obj_virtual(&vbo->map, &not_used);
}
@@ -352,11 +385,17 @@ void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
*/
void vmw_bo_unmap(struct vmw_bo *vbo)
{
+ int map_count;
+
if (vbo->map.bo == NULL)
return;
- ttm_bo_kunmap(&vbo->map);
- vbo->map.bo = NULL;
+ map_count = atomic_dec_return(&vbo->map_count);
+
+ if (!map_count) {
+ ttm_bo_kunmap(&vbo->map);
+ vbo->map.bo = NULL;
+ }
}
@@ -377,7 +416,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
{
struct ttm_operation_ctx ctx = {
.interruptible = params->bo_type != ttm_bo_type_kernel,
- .no_wait_gpu = false
+ .no_wait_gpu = false,
+ .resv = params->resv,
};
struct ttm_device *bdev = &dev_priv->bdev;
struct drm_device *vdev = &dev_priv->drm;
@@ -388,20 +428,23 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
vmw_bo->tbo.priority = 3;
vmw_bo->res_tree = RB_ROOT;
+ xa_init(&vmw_bo->detached_resources);
+ atomic_set(&vmw_bo->map_count, 0);
params->size = ALIGN(params->size, PAGE_SIZE);
drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);
vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
- &vmw_bo->placement, 0, &ctx, NULL,
- NULL, destroy);
+ &vmw_bo->placement, 0, &ctx,
+ params->sg, params->resv, destroy);
if (unlikely(ret))
return ret;
if (params->pin)
ttm_bo_pin(&vmw_bo->tbo);
- ttm_bo_unreserve(&vmw_bo->tbo);
+ if (!params->keep_resv)
+ ttm_bo_unreserve(&vmw_bo->tbo);
return 0;
}
@@ -425,6 +468,7 @@ int vmw_bo_create(struct vmw_private *vmw,
if (unlikely(ret != 0))
goto out_error;
+ (*p_bo)->tbo.base.funcs = &vmw_gem_object_funcs;
return ret;
out_error:
*p_bo = NULL;
@@ -652,52 +696,6 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
dma_fence_put(&fence->base);
}
-
-/**
- * vmw_dumb_create - Create a dumb kms buffer
- *
- * @file_priv: Pointer to a struct drm_file identifying the caller.
- * @dev: Pointer to the drm device.
- * @args: Pointer to a struct drm_mode_create_dumb structure
- * Return: Zero on success, negative error code on failure.
- *
- * This is a driver callback for the core drm create_dumb functionality.
- * Note that this is very similar to the vmw_bo_alloc ioctl, except
- * that the arguments have a different format.
- */
-int vmw_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_bo *vbo;
- int cpp = DIV_ROUND_UP(args->bpp, 8);
- int ret;
-
- switch (cpp) {
- case 1: /* DRM_FORMAT_C8 */
- case 2: /* DRM_FORMAT_RGB565 */
- case 4: /* DRM_FORMAT_XRGB8888 */
- break;
- default:
- /*
- * Dumb buffers don't allow anything else.
- * This is tested via IGT's dumb_buffers
- */
- return -EINVAL;
- }
-
- args->pitch = args->width * cpp;
- args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
-
- ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
- args->size, &args->handle,
- &vbo);
- /* drop reference from allocate - handle holds it now */
- drm_gem_object_put(&vbo->tbo.base);
- return ret;
-}
-
/**
* vmw_bo_swap_notify - swapout notify callback.
*
@@ -851,3 +849,49 @@ void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
vmw_bo_placement_set(bo, domain, domain);
}
+
+int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
+{
+ return xa_err(xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL));
+}
+
+void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
+{
+ xa_erase(&vbo->detached_resources, (unsigned long)res);
+}
+
+struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo)
+{
+ unsigned long index;
+ struct vmw_resource *res = NULL;
+ struct vmw_surface *surf = NULL;
+ struct rb_node *rb_itr = vbo->res_tree.rb_node;
+
+ if (vbo->is_dumb && vbo->dumb_surface) {
+ res = &vbo->dumb_surface->res;
+ goto out;
+ }
+
+ xa_for_each(&vbo->detached_resources, index, res) {
+ if (res->func->res_type == vmw_res_surface)
+ goto out;
+ }
+
+ for (rb_itr = rb_first(&vbo->res_tree); rb_itr;
+ rb_itr = rb_next(rb_itr)) {
+ res = rb_entry(rb_itr, struct vmw_resource, mob_node);
+ if (res->func->res_type == vmw_res_surface)
+ goto out;
+ }
+
+out:
+ if (res)
+ surf = vmw_res_to_srf(res);
+ return surf;
+}
+
+s32 vmw_bo_mobid(struct vmw_bo *vbo)
+{
+ WARN_ON(vbo->tbo.resource->mem_type != VMW_PL_MOB);
+ return (s32)vbo->tbo.resource->start;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
index 0d496dc9c6af..cf84a163bfcb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright 2023 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2023-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -35,11 +36,13 @@
#include <linux/rbtree_types.h>
#include <linux/types.h>
+#include <linux/xarray.h>
struct vmw_bo_dirty;
struct vmw_fence_obj;
struct vmw_private;
struct vmw_resource;
+struct vmw_surface;
enum vmw_bo_domain {
VMW_BO_DOMAIN_SYS = BIT(0),
@@ -53,8 +56,11 @@ struct vmw_bo_params {
u32 domain;
u32 busy_domain;
enum ttm_bo_type bo_type;
- size_t size;
bool pin;
+ bool keep_resv;
+ size_t size;
+ struct dma_resv *resv;
+ struct sg_table *sg;
};
/**
@@ -66,6 +72,8 @@ struct vmw_bo_params {
* @map: Kmap object for semi-persistent mappings
* @res_tree: RB tree of resources using this buffer object as a backing MOB
* @res_prios: Eviction priority counts for attached resources
+ * @map_count: The number of currently active maps. Will differ from the
+ * cpu_writers because it includes kernel maps.
* @cpu_writers: Number of synccpu write grabs. Protected by reservation when
* increased. May be decreased without reservation.
* @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
@@ -76,18 +84,22 @@ struct vmw_bo {
struct ttm_placement placement;
struct ttm_place places[5];
- struct ttm_place busy_places[5];
/* Protected by reservation */
struct ttm_bo_kmap_obj map;
struct rb_root res_tree;
u32 res_prios[TTM_MAX_BO_PRIORITY];
+ struct xarray detached_resources;
+ atomic_t map_count;
atomic_t cpu_writers;
/* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx;
struct vmw_bo_dirty *dirty;
+
+ bool is_dumb;
+ struct vmw_surface *dumb_surface;
};
void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain);
@@ -122,15 +134,21 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence);
void *vmw_bo_map_and_cache(struct vmw_bo *vbo);
+void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size);
void vmw_bo_unmap(struct vmw_bo *vbo);
void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem);
void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
+int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
+void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
+struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo);
+
int vmw_user_bo_lookup(struct drm_file *filp,
u32 handle,
struct vmw_bo **out);
+
/**
* vmw_bo_adjust_prio - Adjust the buffer object eviction priority
* according to attached resources
@@ -186,12 +204,12 @@ static inline void vmw_bo_unreference(struct vmw_bo **buf)
*buf = NULL;
if (tmp_buf)
- ttm_bo_put(&tmp_buf->tbo);
+ drm_gem_object_put(&tmp_buf->tbo.base);
}
static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf)
{
- ttm_bo_get(&buf->tbo);
+ drm_gem_object_get(&buf->tbo.base);
return buf;
}
@@ -215,4 +233,6 @@ static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj)
return container_of((gobj), struct vmw_bo, tbo.base);
}
+s32 vmw_bo_mobid(struct vmw_bo *vbo);
+
#endif // VMWGFX_BO_H
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
index 195ff8792e5a..dd4ca6a9c690 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
@@ -31,6 +31,7 @@
#include <drm/ttm/ttm_placement.h>
#include <linux/sched/signal.h>
+#include <linux/vmalloc.h>
bool vmw_supports_3d(struct vmw_private *dev_priv)
{
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index a7c07692262b..98331c4c0335 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -432,7 +432,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* for the new COTable. Initially pin the buffer object to make sure
* we can use tryreserve without failure.
*/
- ret = vmw_gem_object_create(dev_priv, &bo_params, &buf);
+ ret = vmw_bo_create(dev_priv, &bo_params, &buf);
if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n");
goto out_done;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
new file mode 100644
index 000000000000..718832b08d96
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
@@ -0,0 +1,844 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright (c) 2024-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ **************************************************************************/
+#include "vmwgfx_cursor_plane.h"
+
+#include "vmwgfx_bo.h"
+#include "vmwgfx_drv.h"
+#include "vmwgfx_kms.h"
+#include "vmwgfx_resource_priv.h"
+#include "vmw_surface_cache.h"
+
+#include "drm/drm_atomic.h"
+#include "drm/drm_atomic_helper.h"
+#include "drm/drm_plane.h"
+#include <asm/page.h>
+
+#define VMW_CURSOR_SNOOP_FORMAT SVGA3D_A8R8G8B8
+#define VMW_CURSOR_SNOOP_WIDTH 64
+#define VMW_CURSOR_SNOOP_HEIGHT 64
+
+struct vmw_svga_fifo_cmd_define_cursor {
+ u32 cmd;
+ SVGAFifoCmdDefineAlphaCursor cursor;
+};
+
+/**
+ * vmw_send_define_cursor_cmd - queue a define cursor command
+ * @dev_priv: the private driver struct
+ * @image: buffer which holds the cursor image
+ * @width: width of the mouse cursor image
+ * @height: height of the mouse cursor image
+ * @hotspotX: the horizontal position of mouse hotspot
+ * @hotspotY: the vertical position of mouse hotspot
+ */
+static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
+ u32 *image, u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY)
+{
+ struct vmw_svga_fifo_cmd_define_cursor *cmd;
+ const u32 image_size = width * height * sizeof(*image);
+ const u32 cmd_size = sizeof(*cmd) + image_size;
+
+ /*
+ * Try to reserve fifocmd space and swallow any failures;
+ * such reservations cannot be left unconsumed for long
+ * under the risk of clogging other fifocmd users, so
+ * we treat reservations separtely from the way we treat
+ * other fallible KMS-atomic resources at prepare_fb
+ */
+ cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
+
+ if (unlikely(!cmd))
+ return;
+
+ memset(cmd, 0, sizeof(*cmd));
+
+ memcpy(&cmd[1], image, image_size);
+
+ cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
+ cmd->cursor.id = 0;
+ cmd->cursor.width = width;
+ cmd->cursor.height = height;
+ cmd->cursor.hotspotX = hotspotX;
+ cmd->cursor.hotspotY = hotspotY;
+
+ vmw_cmd_commit_flush(dev_priv, cmd_size);
+}
+
+static void
+vmw_cursor_plane_update_legacy(struct vmw_private *vmw,
+ struct vmw_plane_state *vps)
+{
+ struct vmw_surface *surface = vmw_user_object_surface(&vps->uo);
+ s32 hotspot_x = vps->cursor.legacy.hotspot_x + vps->base.hotspot_x;
+ s32 hotspot_y = vps->cursor.legacy.hotspot_y + vps->base.hotspot_y;
+
+ if (WARN_ON(!surface || !surface->snooper.image))
+ return;
+
+ if (vps->cursor.legacy.id != surface->snooper.id) {
+ vmw_send_define_cursor_cmd(vmw, surface->snooper.image,
+ vps->base.crtc_w, vps->base.crtc_h,
+ hotspot_x, hotspot_y);
+ vps->cursor.legacy.id = surface->snooper.id;
+ }
+}
+
+static enum vmw_cursor_update_type
+vmw_cursor_update_type(struct vmw_private *vmw, struct vmw_plane_state *vps)
+{
+ struct vmw_surface *surface = vmw_user_object_surface(&vps->uo);
+
+ if (surface && surface->snooper.image)
+ return VMW_CURSOR_UPDATE_LEGACY;
+
+ if (vmw->has_mob) {
+ if ((vmw->capabilities2 & SVGA_CAP2_CURSOR_MOB) != 0)
+ return VMW_CURSOR_UPDATE_MOB;
+ }
+
+ return VMW_CURSOR_UPDATE_NONE;
+}
+
+static void vmw_cursor_update_mob(struct vmw_private *vmw,
+ struct vmw_plane_state *vps)
+{
+ SVGAGBCursorHeader *header;
+ SVGAGBAlphaCursorHeader *alpha_header;
+ struct vmw_bo *bo = vmw_user_object_buffer(&vps->uo);
+ u32 *image = vmw_bo_map_and_cache(bo);
+ const u32 image_size = vps->base.crtc_w * vps->base.crtc_h * sizeof(*image);
+
+ header = vmw_bo_map_and_cache(vps->cursor.mob);
+ alpha_header = &header->header.alphaHeader;
+
+ memset(header, 0, sizeof(*header));
+
+ header->type = SVGA_ALPHA_CURSOR;
+ header->sizeInBytes = image_size;
+
+ alpha_header->hotspotX = vps->cursor.legacy.hotspot_x + vps->base.hotspot_x;
+ alpha_header->hotspotY = vps->cursor.legacy.hotspot_y + vps->base.hotspot_y;
+ alpha_header->width = vps->base.crtc_w;
+ alpha_header->height = vps->base.crtc_h;
+
+ memcpy(header + 1, image, image_size);
+ vmw_write(vmw, SVGA_REG_CURSOR_MOBID, vmw_bo_mobid(vps->cursor.mob));
+
+ vmw_bo_unmap(bo);
+ vmw_bo_unmap(vps->cursor.mob);
+}
+
+static u32 vmw_cursor_mob_size(enum vmw_cursor_update_type update_type,
+ u32 w, u32 h)
+{
+ switch (update_type) {
+ case VMW_CURSOR_UPDATE_LEGACY:
+ case VMW_CURSOR_UPDATE_NONE:
+ return 0;
+ case VMW_CURSOR_UPDATE_MOB:
+ return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
+ }
+ return 0;
+}
+
+static void vmw_cursor_mob_destroy(struct vmw_bo **vbo)
+{
+ if (!(*vbo))
+ return;
+
+ ttm_bo_unpin(&(*vbo)->tbo);
+ vmw_bo_unreference(vbo);
+}
+
+/**
+ * vmw_cursor_mob_unmap - Unmaps the cursor mobs.
+ *
+ * @vps: state of the cursor plane
+ *
+ * Returns 0 on success
+ */
+
+static int
+vmw_cursor_mob_unmap(struct vmw_plane_state *vps)
+{
+ int ret = 0;
+ struct vmw_bo *vbo = vps->cursor.mob;
+
+ if (!vbo || !vbo->map.virtual)
+ return 0;
+
+ ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
+ if (likely(ret == 0)) {
+ vmw_bo_unmap(vbo);
+ ttm_bo_unreserve(&vbo->tbo);
+ }
+
+ return ret;
+}
+
+static void vmw_cursor_mob_put(struct vmw_cursor_plane *vcp,
+ struct vmw_plane_state *vps)
+{
+ u32 i;
+
+ if (!vps->cursor.mob)
+ return;
+
+ vmw_cursor_mob_unmap(vps);
+
+ /* Look for a free slot to return this mob to the cache. */
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
+ if (!vcp->cursor_mobs[i]) {
+ vcp->cursor_mobs[i] = vps->cursor.mob;
+ vps->cursor.mob = NULL;
+ return;
+ }
+ }
+
+ /* Cache is full: See if this mob is bigger than an existing mob. */
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
+ if (vcp->cursor_mobs[i]->tbo.base.size <
+ vps->cursor.mob->tbo.base.size) {
+ vmw_cursor_mob_destroy(&vcp->cursor_mobs[i]);
+ vcp->cursor_mobs[i] = vps->cursor.mob;
+ vps->cursor.mob = NULL;
+ return;
+ }
+ }
+
+ /* Destroy it if it's not worth caching. */
+ vmw_cursor_mob_destroy(&vps->cursor.mob);
+}
+
+static int vmw_cursor_mob_get(struct vmw_cursor_plane *vcp,
+ struct vmw_plane_state *vps)
+{
+ struct vmw_private *dev_priv = vmw_priv(vcp->base.dev);
+ u32 size = vmw_cursor_mob_size(vps->cursor.update_type,
+ vps->base.crtc_w, vps->base.crtc_h);
+ u32 i;
+ u32 cursor_max_dim, mob_max_size;
+ struct vmw_fence_obj *fence = NULL;
+ int ret;
+
+ if (!dev_priv->has_mob ||
+ (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
+ return -EINVAL;
+
+ mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
+ cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
+
+ if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
+ vps->base.crtc_h > cursor_max_dim)
+ return -EINVAL;
+
+ if (vps->cursor.mob) {
+ if (vps->cursor.mob->tbo.base.size >= size)
+ return 0;
+ vmw_cursor_mob_put(vcp, vps);
+ }
+
+ /* Look for an unused mob in the cache. */
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
+ if (vcp->cursor_mobs[i] &&
+ vcp->cursor_mobs[i]->tbo.base.size >= size) {
+ vps->cursor.mob = vcp->cursor_mobs[i];
+ vcp->cursor_mobs[i] = NULL;
+ return 0;
+ }
+ }
+ /* Create a new mob if we can't find an existing one. */
+ ret = vmw_bo_create_and_populate(dev_priv, size, VMW_BO_DOMAIN_MOB,
+ &vps->cursor.mob);
+
+ if (ret != 0)
+ return ret;
+
+ /* Fence the mob creation so we are guarateed to have the mob */
+ ret = ttm_bo_reserve(&vps->cursor.mob->tbo, false, false, NULL);
+ if (ret != 0)
+ goto teardown;
+
+ ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ if (ret != 0) {
+ ttm_bo_unreserve(&vps->cursor.mob->tbo);
+ goto teardown;
+ }
+
+ dma_fence_wait(&fence->base, false);
+ dma_fence_put(&fence->base);
+
+ ttm_bo_unreserve(&vps->cursor.mob->tbo);
+
+ return 0;
+
+teardown:
+ vmw_cursor_mob_destroy(&vps->cursor.mob);
+ return ret;
+}
+
+static void vmw_cursor_update_position(struct vmw_private *dev_priv,
+ bool show, int x, int y)
+{
+ const u32 svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
+ : SVGA_CURSOR_ON_HIDE;
+ u32 count;
+
+ spin_lock(&dev_priv->cursor_lock);
+ if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
+ } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
+ count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
+ } else {
+ vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
+ vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
+ vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
+ }
+ spin_unlock(&dev_priv->cursor_lock);
+}
+
+void vmw_kms_cursor_snoop(struct vmw_surface *srf,
+ struct ttm_object_file *tfile,
+ struct ttm_buffer_object *bo,
+ SVGA3dCmdHeader *header)
+{
+ struct ttm_bo_kmap_obj map;
+ unsigned long kmap_offset;
+ unsigned long kmap_num;
+ SVGA3dCopyBox *box;
+ u32 box_count;
+ void *virtual;
+ bool is_iomem;
+ struct vmw_dma_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceDMA dma;
+ } *cmd;
+ int i, ret;
+ const struct SVGA3dSurfaceDesc *desc =
+ vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
+ const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
+
+ cmd = container_of(header, struct vmw_dma_cmd, header);
+
+ /* No snooper installed, nothing to copy */
+ if (!srf->snooper.image)
+ return;
+
+ if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
+ DRM_ERROR("face and mipmap for cursors should never != 0\n");
+ return;
+ }
+
+ if (cmd->header.size < 64) {
+ DRM_ERROR("at least one full copy box must be given\n");
+ return;
+ }
+
+ box = (SVGA3dCopyBox *)&cmd[1];
+ box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
+ sizeof(SVGA3dCopyBox);
+
+ if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
+ box->x != 0 || box->y != 0 || box->z != 0 ||
+ box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
+ box->d != 1 || box_count != 1 ||
+ box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
+ /* TODO handle none page aligned offsets */
+ /* TODO handle more dst & src != 0 */
+ /* TODO handle more then one copy */
+ DRM_ERROR("Can't snoop dma request for cursor!\n");
+ DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
+ box->srcx, box->srcy, box->srcz,
+ box->x, box->y, box->z,
+ box->w, box->h, box->d, box_count,
+ cmd->dma.guest.ptr.offset);
+ return;
+ }
+
+ kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
+ kmap_num = (VMW_CURSOR_SNOOP_HEIGHT * image_pitch) >> PAGE_SHIFT;
+
+ ret = ttm_bo_reserve(bo, true, false, NULL);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("reserve failed\n");
+ return;
+ }
+
+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0))
+ goto err_unreserve;
+
+ virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
+
+ if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
+ memcpy(srf->snooper.image, virtual,
+ VMW_CURSOR_SNOOP_HEIGHT * image_pitch);
+ } else {
+ /* Image is unsigned pointer. */
+ for (i = 0; i < box->h; i++)
+ memcpy(srf->snooper.image + i * image_pitch,
+ virtual + i * cmd->dma.guest.pitch,
+ box->w * desc->pitchBytesPerBlock);
+ }
+ srf->snooper.id++;
+
+ ttm_bo_kunmap(&map);
+err_unreserve:
+ ttm_bo_unreserve(bo);
+}
+
+void vmw_cursor_plane_destroy(struct drm_plane *plane)
+{
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
+ u32 i;
+
+ vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0);
+
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
+ vmw_cursor_mob_destroy(&vcp->cursor_mobs[i]);
+
+ drm_plane_cleanup(plane);
+}
+
+/**
+ * vmw_cursor_mob_map - Maps the cursor mobs.
+ *
+ * @vps: plane_state
+ *
+ * Returns 0 on success
+ */
+
+static int
+vmw_cursor_mob_map(struct vmw_plane_state *vps)
+{
+ int ret;
+ u32 size = vmw_cursor_mob_size(vps->cursor.update_type,
+ vps->base.crtc_w, vps->base.crtc_h);
+ struct vmw_bo *vbo = vps->cursor.mob;
+
+ if (!vbo)
+ return -EINVAL;
+
+ if (vbo->tbo.base.size < size)
+ return -EINVAL;
+
+ if (vbo->map.virtual)
+ return 0;
+
+ ret = ttm_bo_reserve(&vbo->tbo, false, false, NULL);
+ if (unlikely(ret != 0))
+ return -ENOMEM;
+
+ vmw_bo_map_and_cache(vbo);
+
+ ttm_bo_unreserve(&vbo->tbo);
+
+ return 0;
+}
+
+/**
+ * vmw_cursor_plane_cleanup_fb - Unpins the plane surface
+ *
+ * @plane: cursor plane
+ * @old_state: contains the state to clean up
+ *
+ * Unmaps all cursor bo mappings and unpins the cursor surface
+ *
+ * Returns 0 on success
+ */
+void
+vmw_cursor_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
+
+ if (!vmw_user_object_is_null(&vps->uo))
+ vmw_user_object_unmap(&vps->uo);
+
+ vmw_cursor_mob_unmap(vps);
+ vmw_cursor_mob_put(vcp, vps);
+
+ vmw_du_plane_unpin_surf(vps);
+ vmw_user_object_unref(&vps->uo);
+}
+
+static bool
+vmw_cursor_buffer_changed(struct vmw_plane_state *new_vps,
+ struct vmw_plane_state *old_vps)
+{
+ struct vmw_bo *new_bo = vmw_user_object_buffer(&new_vps->uo);
+ struct vmw_bo *old_bo = vmw_user_object_buffer(&old_vps->uo);
+ struct vmw_surface *surf;
+ bool dirty = false;
+ int ret;
+
+ if (new_bo != old_bo)
+ return true;
+
+ if (new_bo) {
+ if (!old_bo) {
+ return true;
+ } else if (new_bo->dirty) {
+ vmw_bo_dirty_scan(new_bo);
+ dirty = vmw_bo_is_dirty(new_bo);
+ if (dirty) {
+ surf = vmw_user_object_surface(&new_vps->uo);
+ if (surf)
+ vmw_bo_dirty_transfer_to_res(&surf->res);
+ else
+ vmw_bo_dirty_clear(new_bo);
+ }
+ return dirty;
+ } else if (new_bo != old_bo) {
+ /*
+ * Currently unused because the top exits right away.
+ * In most cases buffer being different will mean
+ * that the contents is different. For the few percent
+ * of cases where that's not true the cost of doing
+ * the memcmp on all other seems to outweight the
+ * benefits. Leave the conditional to be able to
+ * trivially validate it by removing the initial
+ * if (new_bo != old_bo) at the start.
+ */
+ void *old_image;
+ void *new_image;
+ bool changed = false;
+ struct ww_acquire_ctx ctx;
+ const u32 size = new_vps->base.crtc_w *
+ new_vps->base.crtc_h * sizeof(u32);
+
+ ww_acquire_init(&ctx, &reservation_ww_class);
+
+ ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx);
+ if (ret != 0) {
+ ww_acquire_fini(&ctx);
+ return true;
+ }
+
+ ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx);
+ if (ret != 0) {
+ ttm_bo_unreserve(&old_bo->tbo);
+ ww_acquire_fini(&ctx);
+ return true;
+ }
+
+ old_image = vmw_bo_map_and_cache(old_bo);
+ new_image = vmw_bo_map_and_cache(new_bo);
+
+ if (old_image && new_image && old_image != new_image)
+ changed = memcmp(old_image, new_image, size) !=
+ 0;
+
+ ttm_bo_unreserve(&new_bo->tbo);
+ ttm_bo_unreserve(&old_bo->tbo);
+
+ ww_acquire_fini(&ctx);
+
+ return changed;
+ }
+ return false;
+ }
+
+ return false;
+}
+
+static bool
+vmw_cursor_plane_changed(struct vmw_plane_state *new_vps,
+ struct vmw_plane_state *old_vps)
+{
+ if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
+ old_vps->base.crtc_h != new_vps->base.crtc_h)
+ return true;
+
+ if (old_vps->base.hotspot_x != new_vps->base.hotspot_x ||
+ old_vps->base.hotspot_y != new_vps->base.hotspot_y)
+ return true;
+
+ if (old_vps->cursor.legacy.hotspot_x !=
+ new_vps->cursor.legacy.hotspot_x ||
+ old_vps->cursor.legacy.hotspot_y !=
+ new_vps->cursor.legacy.hotspot_y)
+ return true;
+
+ if (old_vps->base.fb != new_vps->base.fb)
+ return true;
+
+ return false;
+}
+
+/**
+ * vmw_cursor_plane_prepare_fb - Readies the cursor by referencing it
+ *
+ * @plane: display plane
+ * @new_state: info on the new plane state, including the FB
+ *
+ * Returns 0 on success
+ */
+int vmw_cursor_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_framebuffer *fb = new_state->fb;
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(plane->state);
+ struct vmw_private *vmw = vmw_priv(plane->dev);
+ struct vmw_bo *bo = NULL;
+ struct vmw_surface *surface;
+ int ret = 0;
+
+ if (!vmw_user_object_is_null(&vps->uo)) {
+ vmw_user_object_unmap(&vps->uo);
+ vmw_user_object_unref(&vps->uo);
+ }
+
+ if (fb) {
+ if (vmw_framebuffer_to_vfb(fb)->bo) {
+ vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer;
+ vps->uo.surface = NULL;
+ } else {
+ memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo));
+ }
+ vmw_user_object_ref(&vps->uo);
+ }
+
+ vps->cursor.update_type = vmw_cursor_update_type(vmw, vps);
+ switch (vps->cursor.update_type) {
+ case VMW_CURSOR_UPDATE_LEGACY:
+ surface = vmw_user_object_surface(&vps->uo);
+ if (!surface || vps->cursor.legacy.id == surface->snooper.id)
+ vps->cursor.update_type = VMW_CURSOR_UPDATE_NONE;
+ break;
+ case VMW_CURSOR_UPDATE_MOB: {
+ bo = vmw_user_object_buffer(&vps->uo);
+ if (bo) {
+ struct ttm_operation_ctx ctx = { false, false };
+
+ ret = ttm_bo_reserve(&bo->tbo, true, false, NULL);
+ if (ret != 0)
+ return -ENOMEM;
+
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret != 0)
+ return -ENOMEM;
+
+ /*
+ * vmw_bo_pin_reserved also validates, so to skip
+ * the extra validation use ttm_bo_pin directly
+ */
+ if (!bo->tbo.pin_count)
+ ttm_bo_pin(&bo->tbo);
+
+ if (vmw_framebuffer_to_vfb(fb)->bo) {
+ const u32 size = new_state->crtc_w *
+ new_state->crtc_h *
+ sizeof(u32);
+
+ (void)vmw_bo_map_and_cache_size(bo, size);
+ } else {
+ vmw_bo_map_and_cache(bo);
+ }
+ ttm_bo_unreserve(&bo->tbo);
+ }
+ if (!vmw_user_object_is_null(&vps->uo)) {
+ if (!vmw_cursor_plane_changed(vps, old_vps) &&
+ !vmw_cursor_buffer_changed(vps, old_vps)) {
+ vps->cursor.update_type =
+ VMW_CURSOR_UPDATE_NONE;
+ } else {
+ vmw_cursor_mob_get(vcp, vps);
+ vmw_cursor_mob_map(vps);
+ }
+ }
+ }
+ break;
+ case VMW_CURSOR_UPDATE_NONE:
+ /* do nothing */
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_cursor_plane_atomic_check - check if the new state is okay
+ *
+ * @plane: cursor plane
+ * @state: info on the new plane state
+ *
+ * This is a chance to fail if the new cursor state does not fit
+ * our requirements.
+ *
+ * Returns 0 on success
+ */
+int vmw_cursor_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct vmw_private *vmw = vmw_priv(plane->dev);
+ int ret = 0;
+ struct drm_crtc_state *crtc_state = NULL;
+ struct vmw_surface *surface = NULL;
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ enum vmw_cursor_update_type update_type;
+ struct drm_framebuffer *fb = new_state->fb;
+
+ if (new_state->crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
+ new_state->crtc);
+
+ ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING, true,
+ true);
+ if (ret)
+ return ret;
+
+ /* Turning off */
+ if (!fb)
+ return 0;
+
+ update_type = vmw_cursor_update_type(vmw, vps);
+ if (update_type == VMW_CURSOR_UPDATE_LEGACY) {
+ if (new_state->crtc_w != VMW_CURSOR_SNOOP_WIDTH ||
+ new_state->crtc_h != VMW_CURSOR_SNOOP_HEIGHT) {
+ drm_warn(&vmw->drm,
+ "Invalid cursor dimensions (%d, %d)\n",
+ new_state->crtc_w, new_state->crtc_h);
+ return -EINVAL;
+ }
+ surface = vmw_user_object_surface(&vps->uo);
+ if (!surface || !surface->snooper.image) {
+ drm_warn(&vmw->drm,
+ "surface not suitable for cursor\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+void
+vmw_cursor_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *old_state =
+ drm_atomic_get_old_plane_state(state, plane);
+ struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
+ struct vmw_private *dev_priv = vmw_priv(plane->dev);
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ s32 hotspot_x, hotspot_y, cursor_x, cursor_y;
+
+ /*
+ * Hide the cursor if the new bo is null
+ */
+ if (vmw_user_object_is_null(&vps->uo)) {
+ vmw_cursor_update_position(dev_priv, false, 0, 0);
+ return;
+ }
+
+ switch (vps->cursor.update_type) {
+ case VMW_CURSOR_UPDATE_LEGACY:
+ vmw_cursor_plane_update_legacy(dev_priv, vps);
+ break;
+ case VMW_CURSOR_UPDATE_MOB:
+ vmw_cursor_update_mob(dev_priv, vps);
+ break;
+ case VMW_CURSOR_UPDATE_NONE:
+ /* do nothing */
+ break;
+ }
+
+ /*
+ * For all update types update the cursor position
+ */
+ cursor_x = new_state->crtc_x + du->set_gui_x;
+ cursor_y = new_state->crtc_y + du->set_gui_y;
+
+ hotspot_x = vps->cursor.legacy.hotspot_x + new_state->hotspot_x;
+ hotspot_y = vps->cursor.legacy.hotspot_y + new_state->hotspot_y;
+
+ vmw_cursor_update_position(dev_priv, true, cursor_x + hotspot_x,
+ cursor_y + hotspot_y);
+}
+
+int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_cursor_bypass_arg *arg = data;
+ struct vmw_display_unit *du;
+ struct vmw_plane_state *vps;
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ mutex_lock(&dev->mode_config.mutex);
+ if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ du = vmw_crtc_to_du(crtc);
+ vps = vmw_plane_state_to_vps(du->cursor.base.state);
+ vps->cursor.legacy.hotspot_x = arg->xhot;
+ vps->cursor.legacy.hotspot_y = arg->yhot;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+ return 0;
+ }
+
+ crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
+ if (!crtc) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ du = vmw_crtc_to_du(crtc);
+ vps = vmw_plane_state_to_vps(du->cursor.base.state);
+ vps->cursor.legacy.hotspot_x = arg->xhot;
+ vps->cursor.legacy.hotspot_y = arg->yhot;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+void *vmw_cursor_snooper_create(struct drm_file *file_priv,
+ struct vmw_surface_metadata *metadata)
+{
+ if (!file_priv->atomic && metadata->scanout &&
+ metadata->num_sizes == 1 &&
+ metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
+ metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT &&
+ metadata->format == VMW_CURSOR_SNOOP_FORMAT) {
+ const struct SVGA3dSurfaceDesc *desc =
+ vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
+ const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH *
+ VMW_CURSOR_SNOOP_HEIGHT *
+ desc->pitchBytesPerBlock;
+ void *image = kzalloc(cursor_size_bytes, GFP_KERNEL);
+
+ if (!image) {
+ DRM_ERROR("Failed to allocate cursor_image\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ return image;
+ }
+ return NULL;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h
new file mode 100644
index 000000000000..40694925a70e
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright (c) 2024-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ **************************************************************************/
+
+#ifndef VMWGFX_CURSOR_PLANE_H
+#define VMWGFX_CURSOR_PLANE_H
+
+#include "device_include/svga3d_cmd.h"
+#include "drm/drm_file.h"
+#include "drm/drm_fourcc.h"
+#include "drm/drm_plane.h"
+
+#include <linux/types.h>
+
+struct SVGA3dCmdHeader;
+struct ttm_buffer_object;
+struct vmw_bo;
+struct vmw_cursor;
+struct vmw_private;
+struct vmw_surface;
+struct vmw_user_object;
+
+#define vmw_plane_to_vcp(x) container_of(x, struct vmw_cursor_plane, base)
+
+static const u32 __maybe_unused vmw_cursor_plane_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
+enum vmw_cursor_update_type {
+ VMW_CURSOR_UPDATE_NONE = 0,
+ VMW_CURSOR_UPDATE_LEGACY,
+ VMW_CURSOR_UPDATE_MOB,
+};
+
+struct vmw_cursor_plane_state {
+ enum vmw_cursor_update_type update_type;
+ bool changed;
+ bool surface_changed;
+ struct vmw_bo *mob;
+ struct {
+ s32 hotspot_x;
+ s32 hotspot_y;
+ u32 id;
+ } legacy;
+};
+
+/**
+ * Derived class for cursor plane object
+ *
+ * @base DRM plane object
+ * @cursor.cursor_mobs Cursor mobs available for re-use
+ */
+struct vmw_cursor_plane {
+ struct drm_plane base;
+
+ struct vmw_bo *cursor_mobs[3];
+};
+
+struct vmw_surface_metadata;
+void *vmw_cursor_snooper_create(struct drm_file *file_priv,
+ struct vmw_surface_metadata *metadata);
+void vmw_cursor_cmd_dma_snoop(SVGA3dCmdHeader *header,
+ struct vmw_surface *srf,
+ struct ttm_buffer_object *bo);
+
+void vmw_cursor_plane_destroy(struct drm_plane *plane);
+
+int vmw_cursor_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+void vmw_cursor_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+int vmw_cursor_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state);
+void vmw_cursor_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state);
+
+#endif /* VMWGFX_CURSOR_H */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c b/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c
index 829df395c2ed..6e6beff9e262 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c
@@ -25,6 +25,7 @@
*
**************************************************************************/
+#include <linux/vmalloc.h>
#include "vmwgfx_devcaps.h"
#include "vmwgfx_drv.h"
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 0a304706e013..5205552b1970 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1,42 +1,23 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
-
#include "vmwgfx_drv.h"
#include "vmwgfx_bo.h"
#include "vmwgfx_binding.h"
#include "vmwgfx_devcaps.h"
#include "vmwgfx_mksstat.h"
+#include "vmwgfx_vkms.h"
#include "ttm_object.h"
-#include <drm/drm_aperture.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_ttm.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_module.h>
@@ -48,11 +29,14 @@
#ifdef CONFIG_X86
#include <asm/hypervisor.h>
#endif
+
+#include <linux/aperture.h>
#include <linux/cc_platform.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/version.h>
+#include <linux/vmalloc.h>
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
@@ -399,7 +383,8 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_kernel,
.size = PAGE_SIZE,
- .pin = true
+ .pin = true,
+ .keep_resv = true,
};
/*
@@ -411,10 +396,6 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
- BUG_ON(ret != 0);
- vmw_bo_pin_reserved(vbo, true);
-
ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map);
if (likely(ret == 0)) {
result = ttm_kmap_obj_virtual(&map, &dummy);
@@ -744,7 +725,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
dev->vram_size = pci_resource_len(pdev, 2);
drm_info(&dev->drm,
- "Register MMIO at 0x%pa size is %llu kiB\n",
+ "Register MMIO at 0x%pa size is %llu KiB\n",
&rmmio_start, (uint64_t)rmmio_size / 1024);
dev->rmmio = devm_ioremap(dev->drm.dev,
rmmio_start,
@@ -763,12 +744,12 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
fifo_size = pci_resource_len(pdev, 2);
drm_info(&dev->drm,
- "FIFO at %pa size is %llu kiB\n",
+ "FIFO at %pa size is %llu KiB\n",
&fifo_start, (uint64_t)fifo_size / 1024);
dev->fifo_mem = devm_memremap(dev->drm.dev,
fifo_start,
fifo_size,
- MEMREMAP_WB);
+ MEMREMAP_WB | MEMREMAP_DEC);
if (IS_ERR(dev->fifo_mem)) {
drm_err(&dev->drm,
@@ -788,7 +769,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
* SVGA_REG_VRAM_SIZE.
*/
drm_info(&dev->drm,
- "VRAM at %pa size is %llu kiB\n",
+ "VRAM at %pa size is %llu KiB\n",
&dev->vram_start, (uint64_t)dev->vram_size / 1024);
return 0;
@@ -857,8 +838,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
bool refuse_dma = false;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
- dev_priv->drm.dev_private = dev_priv;
-
vmw_sw_context_init(dev_priv);
mutex_init(&dev_priv->cmdbuf_mutex);
@@ -911,6 +890,8 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
"Please switch to a supported graphics device to avoid problems.");
}
+ vmw_vkms_init(dev_priv);
+
ret = vmw_dma_select_mode(dev_priv);
if (unlikely(ret != 0)) {
drm_info(&dev_priv->drm,
@@ -956,13 +937,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
vmw_read(dev_priv,
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
- /*
- * Workaround for low memory 2D VMs to compensate for the
- * allocation taken by fbdev
- */
- if (!(dev_priv->capabilities & SVGA_CAP_3D))
- mem_size *= 3;
-
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
dev_priv->max_primary_mem =
vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_MEM);
@@ -987,13 +961,13 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
dev_priv->max_primary_mem = dev_priv->vram_size;
}
drm_info(&dev_priv->drm,
- "Legacy memory limits: VRAM = %llu kB, FIFO = %llu kB, surface = %u kB\n",
+ "Legacy memory limits: VRAM = %llu KiB, FIFO = %llu KiB, surface = %u KiB\n",
(u64)dev_priv->vram_size / 1024,
(u64)dev_priv->fifo_mem_size / 1024,
dev_priv->memory_size / 1024);
drm_info(&dev_priv->drm,
- "MOB limits: max mob size = %u kB, max mob pages = %u\n",
+ "MOB limits: max mob size = %u KiB, max mob pages = %u\n",
dev_priv->max_mob_size / 1024, dev_priv->max_mob_pages);
ret = vmw_dma_masks(dev_priv);
@@ -1011,7 +985,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
(unsigned)dev_priv->max_gmr_pages);
}
drm_info(&dev_priv->drm,
- "Maximum display memory size is %llu kiB\n",
+ "Maximum display memory size is %llu KiB\n",
(uint64_t)dev_priv->max_primary_mem / 1024);
/* Need mmio memory to check for fifo pitchlock cap. */
@@ -1196,6 +1170,7 @@ static void vmw_driver_unload(struct drm_device *dev)
vmw_svga_disable(dev_priv);
+ vmw_vkms_cleanup(dev_priv);
vmw_kms_close(dev_priv);
vmw_overlay_close(dev_priv);
@@ -1329,9 +1304,6 @@ static void vmw_master_set(struct drm_device *dev,
static void vmw_master_drop(struct drm_device *dev,
struct drm_file *file_priv)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
-
- vmw_kms_legacy_hotspot_clear(dev_priv);
}
bool vmwgfx_supported(struct vmw_private *vmw)
@@ -1611,6 +1583,7 @@ static const struct file_operations vmwgfx_driver_fops = {
.compat_ioctl = vmw_compat_ioctl,
#endif
.llseek = noop_llseek,
+ .fop_flags = FOP_UNSIGNED_OFFSET,
};
static const struct drm_driver driver = {
@@ -1628,11 +1601,13 @@ static const struct drm_driver driver = {
.prime_fd_to_handle = vmw_prime_fd_to_handle,
.prime_handle_to_fd = vmw_prime_handle_to_fd,
+ .gem_prime_import_sg_table = vmw_prime_import_sg_table,
+
+ DRM_FBDEV_TTM_DRIVER_OPS,
.fops = &vmwgfx_driver_fops,
.name = VMWGFX_DRIVER_NAME,
.desc = VMWGFX_DRIVER_DESC,
- .date = VMWGFX_DRIVER_DATE,
.major = VMWGFX_DRIVER_MAJOR,
.minor = VMWGFX_DRIVER_MINOR,
.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
@@ -1653,7 +1628,7 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct vmw_private *vmw;
int ret;
- ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
+ ret = aperture_remove_conflicting_pci_devices(pdev, driver.name);
if (ret)
goto out_error;
@@ -1680,7 +1655,7 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
vmw_fifo_resource_inc(vmw);
vmw_svga_enable(vmw);
- drm_fbdev_generic_setup(&vmw->drm, 0);
+ drm_client_setup(&vmw->drm, NULL);
vmw_debugfs_gem_init(vmw);
vmw_debugfs_resource_managers_init(vmw);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 12efecc17df6..594af8eb04c6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1,27 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
@@ -56,12 +37,11 @@
#define VMWGFX_DRIVER_NAME "vmwgfx"
-#define VMWGFX_DRIVER_DATE "20211206"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 20
+#define VMWGFX_DRIVER_MINOR 21
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
-#define VMWGFX_MAX_DISPLAYS 16
+#define VMWGFX_NUM_DISPLAY_UNITS 8
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
#define VMWGFX_MIN_INITIAL_WIDTH 1280
@@ -81,7 +61,7 @@
#define VMWGFX_NUM_GB_CONTEXT 256
#define VMWGFX_NUM_GB_SHADER 20000
#define VMWGFX_NUM_GB_SURFACE 32768
-#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
+#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_NUM_DISPLAY_UNITS
#define VMWGFX_NUM_DXCONTEXT 256
#define VMWGFX_NUM_DXQUERY 512
#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
@@ -100,10 +80,6 @@
#define VMW_RES_SHADER ttm_driver_type4
#define VMW_RES_HT_ORDER 12
-#define VMW_CURSOR_SNOOP_FORMAT SVGA3D_A8R8G8B8
-#define VMW_CURSOR_SNOOP_WIDTH 64
-#define VMW_CURSOR_SNOOP_HEIGHT 64
-
#define MKSSTAT_CAPACITY_LOG2 5U
#define MKSSTAT_CAPACITY (1U << MKSSTAT_CAPACITY_LOG2)
@@ -117,25 +93,8 @@ struct vmwgfx_hash_item {
unsigned long key;
};
-
-/**
- * struct vmw_validate_buffer - Carries validation info about buffers.
- *
- * @base: Validation info for TTM.
- * @hash: Hash entry for quick lookup of the TTM buffer object.
- *
- * This structure contains also driver private validation info
- * on top of the info needed by TTM.
- */
-struct vmw_validate_buffer {
- struct ttm_validate_buffer base;
- struct vmwgfx_hash_item hash;
- bool validate_as_mob;
-};
-
struct vmw_res_func;
-
/**
* struct vmw-resource - base class for hardware resources
*
@@ -218,7 +177,7 @@ enum vmw_cmdbuf_res_type {
struct vmw_cmdbuf_res_manager;
struct vmw_cursor_snooper {
- size_t age;
+ size_t id;
uint32_t *image;
};
@@ -445,15 +404,6 @@ struct vmw_sw_context{
struct vmw_legacy_display;
struct vmw_overlay;
-struct vmw_vga_topology_state {
- uint32_t width;
- uint32_t height;
- uint32_t primary;
- uint32_t pos_x;
- uint32_t pos_y;
-};
-
-
/*
* struct vmw_otable - Guest Memory OBject table metadata
*
@@ -501,7 +451,6 @@ struct vmw_private {
struct drm_device drm;
struct ttm_device bdev;
- struct drm_vma_offset_manager vma_manager;
u32 pci_id;
resource_size_t io_start;
resource_size_t vram_start;
@@ -642,6 +591,9 @@ struct vmw_private {
uint32 *devcaps;
+ bool vkms_enabled;
+ struct workqueue_struct *crc_workq;
+
/*
* mksGuestStat instance-descriptor and pid arrays
*/
@@ -662,7 +614,7 @@ static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
static inline struct vmw_private *vmw_priv(struct drm_device *dev)
{
- return (struct vmw_private *)dev->dev_private;
+ return container_of(dev, struct vmw_private, drm);
}
static inline struct vmw_private *vmw_priv_from_ttm(struct ttm_device *bdev)
@@ -787,6 +739,26 @@ extern int vmw_gmr_bind(struct vmw_private *dev_priv,
extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
/**
+ * User handles
+ */
+struct vmw_user_object {
+ struct vmw_surface *surface;
+ struct vmw_bo *buffer;
+};
+
+int vmw_user_object_lookup(struct vmw_private *dev_priv, struct drm_file *filp,
+ u32 handle, struct vmw_user_object *uo);
+struct vmw_user_object *vmw_user_object_ref(struct vmw_user_object *uo);
+void vmw_user_object_unref(struct vmw_user_object *uo);
+bool vmw_user_object_is_null(struct vmw_user_object *uo);
+struct vmw_surface *vmw_user_object_surface(struct vmw_user_object *uo);
+struct vmw_bo *vmw_user_object_buffer(struct vmw_user_object *uo);
+void *vmw_user_object_map(struct vmw_user_object *uo);
+void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size);
+void vmw_user_object_unmap(struct vmw_user_object *uo);
+bool vmw_user_object_is_mapped(struct vmw_user_object *uo);
+
+/**
* Resource utilities - vmwgfx_resource.c
*/
struct vmw_user_resource_conv;
@@ -800,11 +772,6 @@ extern int vmw_resource_validate(struct vmw_resource *res, bool intr,
extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
-extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
- struct drm_file *filp,
- uint32_t handle,
- struct vmw_surface **out_surf,
- struct vmw_bo **out_buf);
extern int vmw_user_resource_lookup_handle(
struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
@@ -836,6 +803,7 @@ void vmw_resource_mob_attach(struct vmw_resource *res);
void vmw_resource_mob_detach(struct vmw_resource *res);
void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
pgoff_t end);
+int vmw_resource_clean(struct vmw_resource *res);
int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start,
pgoff_t end, pgoff_t *num_prefault);
@@ -854,9 +822,7 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
* GEM related functionality - vmwgfx_gem.c
*/
struct vmw_bo_params;
-int vmw_gem_object_create(struct vmw_private *vmw,
- struct vmw_bo_params *params,
- struct vmw_bo **p_vbo);
+extern const struct drm_gem_object_funcs vmw_gem_object_funcs;
extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
@@ -1058,7 +1024,6 @@ int vmw_kms_init(struct vmw_private *dev_priv);
int vmw_kms_close(struct vmw_private *dev_priv);
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
void vmw_kms_cursor_snoop(struct vmw_surface *srf,
struct ttm_object_file *tfile,
struct ttm_buffer_object *bo,
@@ -1066,9 +1031,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
unsigned bpp, unsigned depth);
-bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
- uint32_t pitch,
- uint32_t height);
int vmw_kms_present(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
@@ -1078,14 +1040,10 @@ int vmw_kms_present(struct vmw_private *dev_priv,
uint32_t num_clips);
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
int vmw_kms_suspend(struct drm_device *dev);
int vmw_kms_resume(struct drm_device *dev);
void vmw_kms_lost_device(struct drm_device *dev);
-int vmw_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args);
extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
extern void vmw_resource_unpin(struct vmw_resource *res);
extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
@@ -1130,6 +1088,9 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv,
uint32_t handle, uint32_t flags,
int *prime_fd);
+struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *table);
/*
* MemoryOBject management - vmwgfx_mob.c
@@ -1199,6 +1160,15 @@ extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev,
int vmw_gb_surface_define(struct vmw_private *dev_priv,
const struct vmw_surface_metadata *req,
struct vmw_surface **srf_out);
+struct vmw_surface *vmw_lookup_surface_for_buffer(struct vmw_private *vmw,
+ struct vmw_bo *bo,
+ u32 handle);
+u32 vmw_lookup_surface_handle_for_buffer(struct vmw_private *vmw,
+ struct vmw_bo *bo,
+ u32 handle);
+int vmw_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
/*
* Shader management - vmwgfx_shader.c
@@ -1354,9 +1324,9 @@ void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n);
-int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
+int vmw_bo_cpu_blit(struct vmw_bo *dst,
u32 dst_offset, u32 dst_stride,
- struct ttm_buffer_object *src,
+ struct vmw_bo *src,
u32 src_offset, u32 src_stride,
u32 w, u32 h,
struct vmw_diff_cpy *diff);
@@ -1395,8 +1365,10 @@ int vmw_mksstat_remove_all(struct vmw_private *dev_priv);
DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
/* Resource dirtying - vmwgfx_page_dirty.c */
+bool vmw_bo_is_dirty(struct vmw_bo *vbo);
void vmw_bo_dirty_scan(struct vmw_bo *vbo);
int vmw_bo_dirty_add(struct vmw_bo *vbo);
+void vmw_bo_dirty_clear(struct vmw_bo *vbo);
void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res);
void vmw_bo_dirty_clear_res(struct vmw_resource *res);
void vmw_bo_dirty_release(struct vmw_bo *vbo);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index cc3086e649eb..e831e324e737 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1,29 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
+
#include "vmwgfx_binding.h"
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
@@ -35,6 +17,7 @@
#include <linux/sync_file.h>
#include <linux/hashtable.h>
+#include <linux/vmalloc.h>
/*
* Helper macro to get dx_ctx_node if available otherwise print an error
@@ -4085,6 +4068,23 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
return 0;
}
+/*
+ * DMA fence callback to remove a seqno_waiter
+ */
+struct seqno_waiter_rm_context {
+ struct dma_fence_cb base;
+ struct vmw_private *dev_priv;
+};
+
+static void seqno_waiter_rm_cb(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+ struct seqno_waiter_rm_context *ctx =
+ container_of(cb, struct seqno_waiter_rm_context, base);
+
+ vmw_seqno_waiter_remove(ctx->dev_priv);
+ kfree(ctx);
+}
+
int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
void __user *user_commands, void *kernel_commands,
@@ -4265,6 +4265,15 @@ int vmw_execbuf_process(struct drm_file *file_priv,
} else {
/* Link the fence with the FD created earlier */
fd_install(out_fence_fd, sync_file->file);
+ struct seqno_waiter_rm_context *ctx =
+ kmalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx->dev_priv = dev_priv;
+ vmw_seqno_waiter_add(dev_priv);
+ if (dma_fence_add_callback(&fence->base, &ctx->base,
+ seqno_waiter_rm_cb) < 0) {
+ vmw_seqno_waiter_remove(dev_priv);
+ kfree(ctx);
+ }
}
}
@@ -4511,8 +4520,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0))
goto out;
- vmw_kms_cursor_post_execbuf(dev_priv);
-
out:
if (in_fence)
dma_fence_put(in_fence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 2a0cda324703..588d50ababf6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -32,7 +32,6 @@
#define VMW_FENCE_WRAP (1 << 31)
struct vmw_fence_manager {
- int num_fence_objects;
struct vmw_private *dev_priv;
spinlock_t lock;
struct list_head fence_list;
@@ -124,13 +123,13 @@ static void vmw_fence_obj_destroy(struct dma_fence *f)
{
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
-
struct vmw_fence_manager *fman = fman_from_fence(fence);
- spin_lock(&fman->lock);
- list_del_init(&fence->head);
- --fman->num_fence_objects;
- spin_unlock(&fman->lock);
+ if (!list_empty(&fence->head)) {
+ spin_lock(&fman->lock);
+ list_del_init(&fence->head);
+ spin_unlock(&fman->lock);
+ }
fence->destroy(fence);
}
@@ -257,7 +256,6 @@ static const struct dma_fence_ops vmw_fence_ops = {
.release = vmw_fence_obj_destroy,
};
-
/*
* Execute signal actions on fences recently signaled.
* This is done from a workqueue so we don't have to execute
@@ -355,7 +353,6 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
goto out_unlock;
}
list_add_tail(&fence->head, &fman->fence_list);
- ++fman->num_fence_objects;
out_unlock:
spin_unlock(&fman->lock);
@@ -403,7 +400,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
u32 passed_seqno)
{
u32 goal_seqno;
- struct vmw_fence_obj *fence;
+ struct vmw_fence_obj *fence, *next_fence;
if (likely(!fman->seqno_valid))
return false;
@@ -413,7 +410,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
return false;
fman->seqno_valid = false;
- list_for_each_entry(fence, &fman->fence_list, head) {
+ list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
if (!list_empty(&fence->seq_passed_actions)) {
fman->seqno_valid = true;
vmw_fence_goal_write(fman->dev_priv,
@@ -991,7 +988,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
}
event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
- event->event.base.length = sizeof(*event);
+ event->event.base.length = sizeof(event->event);
event->event.user_data = user_data;
ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index 12787bb9c111..e417921af584 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
- * Copyright 2021-2023 VMware, Inc.
+ * Copyright (c) 2021-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -30,6 +31,8 @@
#include "drm/drm_prime.h"
#include "drm/drm_gem_ttm_helper.h"
+#include <linux/debugfs.h>
+
static void vmw_gem_object_free(struct drm_gem_object *gobj)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj);
@@ -48,33 +51,20 @@ static void vmw_gem_object_close(struct drm_gem_object *obj,
{
}
-static int vmw_gem_pin_private(struct drm_gem_object *obj, bool do_pin)
+static int vmw_gem_object_pin(struct drm_gem_object *obj)
{
- struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
struct vmw_bo *vbo = to_vmw_bo(obj);
- int ret;
-
- ret = ttm_bo_reserve(bo, false, false, NULL);
- if (unlikely(ret != 0))
- goto err;
- vmw_bo_pin_reserved(vbo, do_pin);
-
- ttm_bo_unreserve(bo);
-
-err:
- return ret;
-}
+ vmw_bo_pin_reserved(vbo, true);
-
-static int vmw_gem_object_pin(struct drm_gem_object *obj)
-{
- return vmw_gem_pin_private(obj, true);
+ return 0;
}
static void vmw_gem_object_unpin(struct drm_gem_object *obj)
{
- vmw_gem_pin_private(obj, false);
+ struct vmw_bo *vbo = to_vmw_bo(obj);
+
+ vmw_bo_pin_reserved(vbo, false);
}
static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj)
@@ -89,6 +79,59 @@ static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj)
return drm_prime_pages_to_sg(obj->dev, vmw_tt->dma_ttm.pages, vmw_tt->dma_ttm.num_pages);
}
+static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+{
+ struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
+ int ret;
+
+ if (drm_gem_is_imported(obj)) {
+ ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
+ if (!ret) {
+ if (drm_WARN_ON(obj->dev, map->is_iomem)) {
+ dma_buf_vunmap(obj->import_attach->dmabuf, map);
+ return -EIO;
+ }
+ }
+ } else {
+ ret = ttm_bo_vmap(bo, map);
+ }
+
+ return ret;
+}
+
+static void vmw_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
+{
+ if (drm_gem_is_imported(obj))
+ dma_buf_vunmap(obj->import_attach->dmabuf, map);
+ else
+ drm_gem_ttm_vunmap(obj, map);
+}
+
+static int vmw_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ int ret;
+
+ if (drm_gem_is_imported(obj)) {
+ /*
+ * Reset both vm_ops and vm_private_data, so we don't end up with
+ * vm_ops pointing to our implementation if the dma-buf backend
+ * doesn't set those fields.
+ */
+ vma->vm_private_data = NULL;
+ vma->vm_ops = NULL;
+
+ ret = dma_buf_mmap(obj->dma_buf, vma, 0);
+
+ /* Drop the reference drm_gem_mmap_obj() acquired.*/
+ if (!ret)
+ drm_gem_object_put(obj);
+
+ return ret;
+ }
+
+ return drm_gem_ttm_mmap(obj, vma);
+}
+
static const struct vm_operations_struct vmw_vm_ops = {
.pfn_mkwrite = vmw_bo_vm_mkwrite,
.page_mkwrite = vmw_bo_vm_mkwrite,
@@ -97,7 +140,7 @@ static const struct vm_operations_struct vmw_vm_ops = {
.close = ttm_bo_vm_close,
};
-static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
+const struct drm_gem_object_funcs vmw_gem_object_funcs = {
.free = vmw_gem_object_free,
.open = vmw_gem_object_open,
.close = vmw_gem_object_close,
@@ -105,26 +148,12 @@ static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
.pin = vmw_gem_object_pin,
.unpin = vmw_gem_object_unpin,
.get_sg_table = vmw_gem_object_get_sg_table,
- .vmap = drm_gem_ttm_vmap,
- .vunmap = drm_gem_ttm_vunmap,
- .mmap = drm_gem_ttm_mmap,
+ .vmap = vmw_gem_vmap,
+ .vunmap = vmw_gem_vunmap,
+ .mmap = vmw_gem_mmap,
.vm_ops = &vmw_vm_ops,
};
-int vmw_gem_object_create(struct vmw_private *vmw,
- struct vmw_bo_params *params,
- struct vmw_bo **p_vbo)
-{
- int ret = vmw_bo_create(vmw, params, p_vbo);
-
- if (ret != 0)
- goto out_no_bo;
-
- (*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs;
-out_no_bo:
- return ret;
-}
-
int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
@@ -140,7 +169,7 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
.pin = false
};
- ret = vmw_gem_object_create(dev_priv, &params, p_vbo);
+ ret = vmw_bo_create(dev_priv, &params, p_vbo);
if (ret != 0)
goto out_no_bo;
@@ -149,6 +178,39 @@ out_no_bo:
return ret;
}
+struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *table)
+{
+ int ret;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_gem_object *gem = NULL;
+ struct vmw_bo *vbo;
+ struct vmw_bo_params params = {
+ .domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
+ .busy_domain = VMW_BO_DOMAIN_SYS,
+ .bo_type = ttm_bo_type_sg,
+ .size = attach->dmabuf->size,
+ .pin = false,
+ .keep_resv = true,
+ .resv = attach->dmabuf->resv,
+ .sg = table,
+
+ };
+
+ dma_resv_lock(params.resv, NULL);
+
+ ret = vmw_bo_create(dev_priv, &params, &vbo);
+ if (ret != 0)
+ goto out_no_bo;
+
+ vbo->tbo.base.funcs = &vmw_gem_object_funcs;
+
+ gem = &vbo->tbo.base;
+out_no_bo:
+ dma_resv_unlock(params.resv);
+ return gem;
+}
int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index a0b47c9b33f5..5bd967fbcf55 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -94,14 +94,14 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
} else
new_max_pages = gman->max_gmr_pages * 2;
if (new_max_pages > gman->max_gmr_pages && new_max_pages >= gman->used_gmr_pages) {
- DRM_WARN("vmwgfx: increasing guest mob limits to %u kB.\n",
+ DRM_WARN("vmwgfx: increasing guest mob limits to %u KiB.\n",
((new_max_pages) << (PAGE_SHIFT - 10)));
gman->max_gmr_pages = new_max_pages;
} else {
char buf[256];
snprintf(buf, sizeof(buf),
- "vmwgfx, error: guest graphics is out of memory (mob limit at: %ukB).\n",
+ "vmwgfx, error: guest graphics is out of memory (mob limit at: %u KiB).\n",
((gman->max_gmr_pages) << (PAGE_SHIFT - 10)));
vmw_host_printf(buf);
DRM_WARN("%s", buf);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index a1da5678c731..835d1eed8dd9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -31,6 +31,7 @@
#include <drm/vmwgfx_drm.h>
#include <linux/pci.h>
+#include <linux/vmalloc.h>
int vmw_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index cd4925346ed4..05b1c54a070c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1,32 +1,16 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
+
#include "vmwgfx_kms.h"
#include "vmwgfx_bo.h"
+#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_vkms.h"
#include "vmw_surface_cache.h"
#include <drm/drm_atomic.h>
@@ -37,9 +21,16 @@
#include <drm/drm_sysfs.h>
#include <drm/drm_edid.h>
+void vmw_du_init(struct vmw_display_unit *du)
+{
+ vmw_vkms_crtc_init(&du->crtc);
+}
+
void vmw_du_cleanup(struct vmw_display_unit *du)
{
struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
+
+ vmw_vkms_crtc_cleanup(&du->crtc);
drm_plane_cleanup(&du->primary);
if (vmw_cmd_supported(dev_priv))
drm_plane_cleanup(&du->cursor.base);
@@ -50,471 +41,6 @@ void vmw_du_cleanup(struct vmw_display_unit *du)
drm_connector_cleanup(&du->connector);
}
-/*
- * Display Unit Cursor functions
- */
-
-static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
-static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
- struct vmw_plane_state *vps,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY);
-
-struct vmw_svga_fifo_cmd_define_cursor {
- u32 cmd;
- SVGAFifoCmdDefineAlphaCursor cursor;
-};
-
-/**
- * vmw_send_define_cursor_cmd - queue a define cursor command
- * @dev_priv: the private driver struct
- * @image: buffer which holds the cursor image
- * @width: width of the mouse cursor image
- * @height: height of the mouse cursor image
- * @hotspotX: the horizontal position of mouse hotspot
- * @hotspotY: the vertical position of mouse hotspot
- */
-static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
-{
- struct vmw_svga_fifo_cmd_define_cursor *cmd;
- const u32 image_size = width * height * sizeof(*image);
- const u32 cmd_size = sizeof(*cmd) + image_size;
-
- /* Try to reserve fifocmd space and swallow any failures;
- such reservations cannot be left unconsumed for long
- under the risk of clogging other fifocmd users, so
- we treat reservations separtely from the way we treat
- other fallible KMS-atomic resources at prepare_fb */
- cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
-
- if (unlikely(!cmd))
- return;
-
- memset(cmd, 0, sizeof(*cmd));
-
- memcpy(&cmd[1], image, image_size);
-
- cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
- cmd->cursor.id = 0;
- cmd->cursor.width = width;
- cmd->cursor.height = height;
- cmd->cursor.hotspotX = hotspotX;
- cmd->cursor.hotspotY = hotspotY;
-
- vmw_cmd_commit_flush(dev_priv, cmd_size);
-}
-
-/**
- * vmw_cursor_update_image - update the cursor image on the provided plane
- * @dev_priv: the private driver struct
- * @vps: the plane state of the cursor plane
- * @image: buffer which holds the cursor image
- * @width: width of the mouse cursor image
- * @height: height of the mouse cursor image
- * @hotspotX: the horizontal position of mouse hotspot
- * @hotspotY: the vertical position of mouse hotspot
- */
-static void vmw_cursor_update_image(struct vmw_private *dev_priv,
- struct vmw_plane_state *vps,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
-{
- if (vps->cursor.bo)
- vmw_cursor_update_mob(dev_priv, vps, image,
- vps->base.crtc_w, vps->base.crtc_h,
- hotspotX, hotspotY);
-
- else
- vmw_send_define_cursor_cmd(dev_priv, image, width, height,
- hotspotX, hotspotY);
-}
-
-
-/**
- * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
- *
- * Called from inside vmw_du_cursor_plane_atomic_update to actually
- * make the cursor-image live.
- *
- * @dev_priv: device to work with
- * @vps: the plane state of the cursor plane
- * @image: cursor source data to fill the MOB with
- * @width: source data width
- * @height: source data height
- * @hotspotX: cursor hotspot x
- * @hotspotY: cursor hotspot Y
- */
-static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
- struct vmw_plane_state *vps,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
-{
- SVGAGBCursorHeader *header;
- SVGAGBAlphaCursorHeader *alpha_header;
- const u32 image_size = width * height * sizeof(*image);
-
- header = vmw_bo_map_and_cache(vps->cursor.bo);
- alpha_header = &header->header.alphaHeader;
-
- memset(header, 0, sizeof(*header));
-
- header->type = SVGA_ALPHA_CURSOR;
- header->sizeInBytes = image_size;
-
- alpha_header->hotspotX = hotspotX;
- alpha_header->hotspotY = hotspotY;
- alpha_header->width = width;
- alpha_header->height = height;
-
- memcpy(header + 1, image, image_size);
- vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
- vps->cursor.bo->tbo.resource->start);
-}
-
-
-static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
-{
- return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
-}
-
-/**
- * vmw_du_cursor_plane_acquire_image -- Acquire the image data
- * @vps: cursor plane state
- */
-static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
-{
- if (vps->surf) {
- if (vps->surf_mapped)
- return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
- return vps->surf->snooper.image;
- } else if (vps->bo)
- return vmw_bo_map_and_cache(vps->bo);
- return NULL;
-}
-
-static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
- struct vmw_plane_state *new_vps)
-{
- void *old_image;
- void *new_image;
- u32 size;
- bool changed;
-
- if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
- old_vps->base.crtc_h != new_vps->base.crtc_h)
- return true;
-
- if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
- old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
- return true;
-
- size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
-
- old_image = vmw_du_cursor_plane_acquire_image(old_vps);
- new_image = vmw_du_cursor_plane_acquire_image(new_vps);
-
- changed = false;
- if (old_image && new_image)
- changed = memcmp(old_image, new_image, size) != 0;
-
- return changed;
-}
-
-static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
-{
- if (!(*vbo))
- return;
-
- ttm_bo_unpin(&(*vbo)->tbo);
- vmw_bo_unreference(vbo);
-}
-
-static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
- struct vmw_plane_state *vps)
-{
- u32 i;
-
- if (!vps->cursor.bo)
- return;
-
- vmw_du_cursor_plane_unmap_cm(vps);
-
- /* Look for a free slot to return this mob to the cache. */
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
- if (!vcp->cursor_mobs[i]) {
- vcp->cursor_mobs[i] = vps->cursor.bo;
- vps->cursor.bo = NULL;
- return;
- }
- }
-
- /* Cache is full: See if this mob is bigger than an existing mob. */
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
- if (vcp->cursor_mobs[i]->tbo.base.size <
- vps->cursor.bo->tbo.base.size) {
- vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
- vcp->cursor_mobs[i] = vps->cursor.bo;
- vps->cursor.bo = NULL;
- return;
- }
- }
-
- /* Destroy it if it's not worth caching. */
- vmw_du_destroy_cursor_mob(&vps->cursor.bo);
-}
-
-static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
- struct vmw_plane_state *vps)
-{
- struct vmw_private *dev_priv = vcp->base.dev->dev_private;
- u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
- u32 i;
- u32 cursor_max_dim, mob_max_size;
- struct vmw_fence_obj *fence = NULL;
- int ret;
-
- if (!dev_priv->has_mob ||
- (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
- return -EINVAL;
-
- mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
- cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
-
- if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
- vps->base.crtc_h > cursor_max_dim)
- return -EINVAL;
-
- if (vps->cursor.bo) {
- if (vps->cursor.bo->tbo.base.size >= size)
- return 0;
- vmw_du_put_cursor_mob(vcp, vps);
- }
-
- /* Look for an unused mob in the cache. */
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
- if (vcp->cursor_mobs[i] &&
- vcp->cursor_mobs[i]->tbo.base.size >= size) {
- vps->cursor.bo = vcp->cursor_mobs[i];
- vcp->cursor_mobs[i] = NULL;
- return 0;
- }
- }
- /* Create a new mob if we can't find an existing one. */
- ret = vmw_bo_create_and_populate(dev_priv, size,
- VMW_BO_DOMAIN_MOB,
- &vps->cursor.bo);
-
- if (ret != 0)
- return ret;
-
- /* Fence the mob creation so we are guarateed to have the mob */
- ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
- if (ret != 0)
- goto teardown;
-
- ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- if (ret != 0) {
- ttm_bo_unreserve(&vps->cursor.bo->tbo);
- goto teardown;
- }
-
- dma_fence_wait(&fence->base, false);
- dma_fence_put(&fence->base);
-
- ttm_bo_unreserve(&vps->cursor.bo->tbo);
- return 0;
-
-teardown:
- vmw_du_destroy_cursor_mob(&vps->cursor.bo);
- return ret;
-}
-
-
-static void vmw_cursor_update_position(struct vmw_private *dev_priv,
- bool show, int x, int y)
-{
- const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
- : SVGA_CURSOR_ON_HIDE;
- uint32_t count;
-
- spin_lock(&dev_priv->cursor_lock);
- if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
- vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
- } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
- count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
- } else {
- vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
- vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
- vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
- }
- spin_unlock(&dev_priv->cursor_lock);
-}
-
-void vmw_kms_cursor_snoop(struct vmw_surface *srf,
- struct ttm_object_file *tfile,
- struct ttm_buffer_object *bo,
- SVGA3dCmdHeader *header)
-{
- struct ttm_bo_kmap_obj map;
- unsigned long kmap_offset;
- unsigned long kmap_num;
- SVGA3dCopyBox *box;
- unsigned box_count;
- void *virtual;
- bool is_iomem;
- struct vmw_dma_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceDMA dma;
- } *cmd;
- int i, ret;
- const struct SVGA3dSurfaceDesc *desc =
- vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
- const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
-
- cmd = container_of(header, struct vmw_dma_cmd, header);
-
- /* No snooper installed, nothing to copy */
- if (!srf->snooper.image)
- return;
-
- if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
- DRM_ERROR("face and mipmap for cursors should never != 0\n");
- return;
- }
-
- if (cmd->header.size < 64) {
- DRM_ERROR("at least one full copy box must be given\n");
- return;
- }
-
- box = (SVGA3dCopyBox *)&cmd[1];
- box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
- sizeof(SVGA3dCopyBox);
-
- if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
- box->x != 0 || box->y != 0 || box->z != 0 ||
- box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
- box->d != 1 || box_count != 1 ||
- box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
- /* TODO handle none page aligned offsets */
- /* TODO handle more dst & src != 0 */
- /* TODO handle more then one copy */
- DRM_ERROR("Can't snoop dma request for cursor!\n");
- DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
- box->srcx, box->srcy, box->srcz,
- box->x, box->y, box->z,
- box->w, box->h, box->d, box_count,
- cmd->dma.guest.ptr.offset);
- return;
- }
-
- kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
- kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
-
- ret = ttm_bo_reserve(bo, true, false, NULL);
- if (unlikely(ret != 0)) {
- DRM_ERROR("reserve failed\n");
- return;
- }
-
- ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
- if (unlikely(ret != 0))
- goto err_unreserve;
-
- virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
-
- if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
- memcpy(srf->snooper.image, virtual,
- VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
- } else {
- /* Image is unsigned pointer. */
- for (i = 0; i < box->h; i++)
- memcpy(srf->snooper.image + i * image_pitch,
- virtual + i * cmd->dma.guest.pitch,
- box->w * desc->pitchBytesPerBlock);
- }
-
- srf->snooper.age++;
-
- ttm_bo_kunmap(&map);
-err_unreserve:
- ttm_bo_unreserve(bo);
-}
-
-/**
- * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
- *
- * @dev_priv: Pointer to the device private struct.
- *
- * Clears all legacy hotspots.
- */
-void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct vmw_display_unit *du;
- struct drm_crtc *crtc;
-
- drm_modeset_lock_all(dev);
- drm_for_each_crtc(crtc, dev) {
- du = vmw_crtc_to_du(crtc);
-
- du->hotspot_x = 0;
- du->hotspot_y = 0;
- }
- drm_modeset_unlock_all(dev);
-}
-
-void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct vmw_display_unit *du;
- struct drm_crtc *crtc;
-
- mutex_lock(&dev->mode_config.mutex);
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- du = vmw_crtc_to_du(crtc);
- if (!du->cursor_surface ||
- du->cursor_age == du->cursor_surface->snooper.age ||
- !du->cursor_surface->snooper.image)
- continue;
-
- du->cursor_age = du->cursor_surface->snooper.age;
- vmw_send_define_cursor_cmd(dev_priv,
- du->cursor_surface->snooper.image,
- VMW_CURSOR_SNOOP_WIDTH,
- VMW_CURSOR_SNOOP_HEIGHT,
- du->hotspot_x + du->core_hotspot_x,
- du->hotspot_y + du->core_hotspot_y);
- }
-
- mutex_unlock(&dev->mode_config.mutex);
-}
-
-
-void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
-{
- struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
- u32 i;
-
- vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
-
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
- vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
-
- drm_plane_cleanup(plane);
-}
-
void vmw_du_primary_plane_destroy(struct drm_plane *plane)
{
@@ -528,22 +54,16 @@ void vmw_du_primary_plane_destroy(struct drm_plane *plane)
* vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
*
* @vps: plane state associated with the display surface
- * @unreference: true if we also want to unreference the display.
*/
-void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
- bool unreference)
+void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps)
{
- if (vps->surf) {
+ struct vmw_surface *surf = vmw_user_object_surface(&vps->uo);
+
+ if (surf) {
if (vps->pinned) {
- vmw_resource_unpin(&vps->surf->res);
+ vmw_resource_unpin(&surf->res);
vps->pinned--;
}
-
- if (unreference) {
- if (vps->pinned)
- DRM_ERROR("Surface still pinned\n");
- vmw_surface_unreference(&vps->surf);
- }
}
}
@@ -564,257 +84,7 @@ vmw_du_plane_cleanup_fb(struct drm_plane *plane,
{
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
- vmw_du_plane_unpin_surf(vps, false);
-}
-
-
-/**
- * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
- *
- * @vps: plane_state
- *
- * Returns 0 on success
- */
-
-static int
-vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
-{
- int ret;
- u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
- struct ttm_buffer_object *bo;
-
- if (!vps->cursor.bo)
- return -EINVAL;
-
- bo = &vps->cursor.bo->tbo;
-
- if (bo->base.size < size)
- return -EINVAL;
-
- if (vps->cursor.bo->map.virtual)
- return 0;
-
- ret = ttm_bo_reserve(bo, false, false, NULL);
- if (unlikely(ret != 0))
- return -ENOMEM;
-
- vmw_bo_map_and_cache(vps->cursor.bo);
-
- ttm_bo_unreserve(bo);
-
- if (unlikely(ret != 0))
- return -ENOMEM;
-
- return 0;
-}
-
-
-/**
- * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
- *
- * @vps: state of the cursor plane
- *
- * Returns 0 on success
- */
-
-static int
-vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
-{
- int ret = 0;
- struct vmw_bo *vbo = vps->cursor.bo;
-
- if (!vbo || !vbo->map.virtual)
- return 0;
-
- ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
- if (likely(ret == 0)) {
- vmw_bo_unmap(vbo);
- ttm_bo_unreserve(&vbo->tbo);
- }
-
- return ret;
-}
-
-
-/**
- * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
- *
- * @plane: cursor plane
- * @old_state: contains the state to clean up
- *
- * Unmaps all cursor bo mappings and unpins the cursor surface
- *
- * Returns 0 on success
- */
-void
-vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
- struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
-
- if (vps->surf_mapped) {
- vmw_bo_unmap(vps->surf->res.guest_memory_bo);
- vps->surf_mapped = false;
- }
-
- vmw_du_cursor_plane_unmap_cm(vps);
- vmw_du_put_cursor_mob(vcp, vps);
-
- vmw_du_plane_unpin_surf(vps, false);
-
- if (vps->surf) {
- vmw_surface_unreference(&vps->surf);
- vps->surf = NULL;
- }
-
- if (vps->bo) {
- vmw_bo_unreference(&vps->bo);
- vps->bo = NULL;
- }
-}
-
-
-/**
- * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
- *
- * @plane: display plane
- * @new_state: info on the new plane state, including the FB
- *
- * Returns 0 on success
- */
-int
-vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
-{
- struct drm_framebuffer *fb = new_state->fb;
- struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
- struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
- int ret = 0;
-
- if (vps->surf) {
- if (vps->surf_mapped) {
- vmw_bo_unmap(vps->surf->res.guest_memory_bo);
- vps->surf_mapped = false;
- }
- vmw_surface_unreference(&vps->surf);
- vps->surf = NULL;
- }
-
- if (vps->bo) {
- vmw_bo_unreference(&vps->bo);
- vps->bo = NULL;
- }
-
- if (fb) {
- if (vmw_framebuffer_to_vfb(fb)->bo) {
- vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
- vmw_bo_reference(vps->bo);
- } else {
- vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
- vmw_surface_reference(vps->surf);
- }
- }
-
- if (!vps->surf && vps->bo) {
- const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
-
- /*
- * Not using vmw_bo_map_and_cache() helper here as we need to
- * reserve the ttm_buffer_object first which
- * vmw_bo_map_and_cache() omits.
- */
- ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
-
- if (unlikely(ret != 0))
- return -ENOMEM;
-
- ret = ttm_bo_kmap(&vps->bo->tbo, 0, PFN_UP(size), &vps->bo->map);
-
- ttm_bo_unreserve(&vps->bo->tbo);
-
- if (unlikely(ret != 0))
- return -ENOMEM;
- } else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) {
-
- WARN_ON(vps->surf->snooper.image);
- ret = ttm_bo_reserve(&vps->surf->res.guest_memory_bo->tbo, true, false,
- NULL);
- if (unlikely(ret != 0))
- return -ENOMEM;
- vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
- ttm_bo_unreserve(&vps->surf->res.guest_memory_bo->tbo);
- vps->surf_mapped = true;
- }
-
- if (vps->surf || vps->bo) {
- vmw_du_get_cursor_mob(vcp, vps);
- vmw_du_cursor_plane_map_cm(vps);
- }
-
- return 0;
-}
-
-
-void
-vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
- struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
- plane);
- struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
- struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
- s32 hotspot_x, hotspot_y;
-
- hotspot_x = du->hotspot_x + new_state->hotspot_x;
- hotspot_y = du->hotspot_y + new_state->hotspot_y;
-
- du->cursor_surface = vps->surf;
- du->cursor_bo = vps->bo;
-
- if (!vps->surf && !vps->bo) {
- vmw_cursor_update_position(dev_priv, false, 0, 0);
- return;
- }
-
- vps->cursor.hotspot_x = hotspot_x;
- vps->cursor.hotspot_y = hotspot_y;
-
- if (vps->surf) {
- du->cursor_age = du->cursor_surface->snooper.age;
- }
-
- if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
- /*
- * If it hasn't changed, avoid making the device do extra
- * work by keeping the old cursor active.
- */
- struct vmw_cursor_plane_state tmp = old_vps->cursor;
- old_vps->cursor = vps->cursor;
- vps->cursor = tmp;
- } else {
- void *image = vmw_du_cursor_plane_acquire_image(vps);
- if (image)
- vmw_cursor_update_image(dev_priv, vps, image,
- new_state->crtc_w,
- new_state->crtc_h,
- hotspot_x, hotspot_y);
- }
-
- du->cursor_x = new_state->crtc_x + du->set_gui_x;
- du->cursor_y = new_state->crtc_y + du->set_gui_y;
-
- vmw_cursor_update_position(dev_priv, true,
- du->cursor_x + hotspot_x,
- du->cursor_y + hotspot_y);
-
- du->core_hotspot_x = hotspot_x - du->hotspot_x;
- du->core_hotspot_y = hotspot_y - du->hotspot_y;
+ vmw_du_plane_unpin_surf(vps);
}
@@ -858,81 +128,13 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
false, true);
-
- if (!ret && new_fb) {
- struct drm_crtc *crtc = new_state->crtc;
- struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
-
- vmw_connector_state_to_vcs(du->connector.state);
- }
-
-
return ret;
}
-
-/**
- * vmw_du_cursor_plane_atomic_check - check if the new state is okay
- *
- * @plane: cursor plane
- * @state: info on the new plane state
- *
- * This is a chance to fail if the new cursor state does not fit
- * our requirements.
- *
- * Returns 0 on success
- */
-int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
- int ret = 0;
- struct drm_crtc_state *crtc_state = NULL;
- struct vmw_surface *surface = NULL;
- struct drm_framebuffer *fb = new_state->fb;
-
- if (new_state->crtc)
- crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
- new_state->crtc);
-
- ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- true, true);
- if (ret)
- return ret;
-
- /* Turning off */
- if (!fb)
- return 0;
-
- /* A lot of the code assumes this */
- if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
- DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
- new_state->crtc_w, new_state->crtc_h);
- return -EINVAL;
- }
-
- if (!vmw_framebuffer_to_vfb(fb)->bo) {
- surface = vmw_framebuffer_to_vfbs(fb)->surface;
-
- WARN_ON(!surface);
-
- if (!surface ||
- (!surface->snooper.image && !surface->res.guest_memory_bo)) {
- DRM_ERROR("surface not suitable for cursor\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
@@ -940,9 +142,13 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
bool has_primary = new_state->plane_mask &
drm_plane_mask(crtc->primary);
- /* We always want to have an active plane with an active CRTC */
- if (has_primary != new_state->enable)
- return -EINVAL;
+ /*
+ * This is fine in general, but broken userspace might expect
+ * some actual rendering so give a clue as why it's blank.
+ */
+ if (new_state->enable && !has_primary)
+ drm_dbg_driver(&vmw->drm,
+ "CRTC without a primary plane will be blank.\n");
if (new_state->connector_mask != connector_mask &&
@@ -965,15 +171,9 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ vmw_vkms_crtc_atomic_begin(crtc, state);
}
-
-void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
-}
-
-
/**
* vmw_du_crtc_duplicate_state - duplicate crtc state
* @crtc: DRM crtc
@@ -1074,15 +274,10 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
vps->pinned = 0;
vps->cpp = 0;
- memset(&vps->cursor, 0, sizeof(vps->cursor));
+ vps->cursor.mob = NULL;
/* Each ref counted resource needs to be acquired again */
- if (vps->surf)
- (void) vmw_surface_reference(vps->surf);
-
- if (vps->bo)
- (void) vmw_bo_reference(vps->bo);
-
+ vmw_user_object_ref(&vps->uo);
state = &vps->base;
__drm_atomic_helper_plane_duplicate_state(plane, state);
@@ -1131,11 +326,7 @@ vmw_du_plane_destroy_state(struct drm_plane *plane,
struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
/* Should have been freed by cleanup_fb */
- if (vps->surf)
- vmw_surface_unreference(&vps->surf);
-
- if (vps->bo)
- vmw_bo_unreference(&vps->bo);
+ vmw_user_object_unref(&vps->uo);
drm_atomic_helper_plane_destroy_state(plane, state);
}
@@ -1228,9 +419,22 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
+ struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
+ struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo);
+ if (bo) {
+ vmw_bo_dirty_release(bo);
+ /*
+ * bo->dirty is reference counted so it being NULL
+ * means that the surface wasn't coherent to begin
+ * with and so we have to free the dirty tracker
+ * in the vmw_resource
+ */
+ if (!bo->dirty && surf && surf->res.dirty)
+ surf->res.func->dirty_free(&surf->res);
+ }
drm_framebuffer_cleanup(framebuffer);
- vmw_surface_unreference(&vfbs->surface);
+ vmw_user_object_unref(&vfbs->uo);
kfree(vfbs);
}
@@ -1275,29 +479,42 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
return -ENOSYS;
}
+static int vmw_framebuffer_surface_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(fb);
+ struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
+
+ if (WARN_ON(!bo))
+ return -EINVAL;
+ return drm_gem_handle_create(file_priv, &bo->tbo.base, handle);
+}
static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
+ .create_handle = vmw_framebuffer_surface_create_handle,
.destroy = vmw_framebuffer_surface_destroy,
.dirty = drm_atomic_helper_dirtyfb,
};
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
- struct vmw_surface *surface,
+ struct vmw_user_object *uo,
struct vmw_framebuffer **out,
const struct drm_mode_fb_cmd2
- *mode_cmd,
- bool is_bo_proxy)
+ *mode_cmd)
{
struct drm_device *dev = &dev_priv->drm;
struct vmw_framebuffer_surface *vfbs;
- enum SVGA3dSurfaceFormat format;
+ struct vmw_surface *surface;
int ret;
/* 3D is only supported on HWv8 and newer hosts */
if (dev_priv->active_display_unit == vmw_du_legacy)
return -ENOSYS;
+ surface = vmw_user_object_surface(uo);
+
/*
* Sanity checks.
*/
@@ -1325,34 +542,6 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
return -EINVAL;
}
- switch (mode_cmd->pixel_format) {
- case DRM_FORMAT_ARGB8888:
- format = SVGA3D_A8R8G8B8;
- break;
- case DRM_FORMAT_XRGB8888:
- format = SVGA3D_X8R8G8B8;
- break;
- case DRM_FORMAT_RGB565:
- format = SVGA3D_R5G6B5;
- break;
- case DRM_FORMAT_XRGB1555:
- format = SVGA3D_A1R5G5B5;
- break;
- default:
- DRM_ERROR("Invalid pixel format: %p4cc\n",
- &mode_cmd->pixel_format);
- return -EINVAL;
- }
-
- /*
- * For DX, surface format validation is done when surface->scanout
- * is set.
- */
- if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
- DRM_ERROR("Invalid surface format for requested mode.\n");
- return -EINVAL;
- }
-
vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
if (!vfbs) {
ret = -ENOMEM;
@@ -1360,9 +549,8 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
}
drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
- vfbs->surface = vmw_surface_reference(surface);
- vfbs->base.user_handle = mode_cmd->handles[0];
- vfbs->is_bo_proxy = is_bo_proxy;
+ memcpy(&vfbs->uo, uo, sizeof(vfbs->uo));
+ vmw_user_object_ref(&vfbs->uo);
*out = &vfbs->base;
@@ -1374,7 +562,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
return 0;
out_err2:
- vmw_surface_unreference(&surface);
+ vmw_user_object_unref(&vfbs->uo);
kfree(vfbs);
out_err1:
return ret;
@@ -1390,7 +578,6 @@ static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
{
struct vmw_framebuffer_bo *vfbd =
vmw_framebuffer_to_vfbd(fb);
-
return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
}
@@ -1399,6 +586,7 @@ static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
struct vmw_framebuffer_bo *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
+ vmw_bo_dirty_release(vfbd->buffer);
drm_framebuffer_cleanup(framebuffer);
vmw_bo_unreference(&vfbd->buffer);
@@ -1411,86 +599,6 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
.dirty = drm_atomic_helper_dirtyfb,
};
-/**
- * vmw_create_bo_proxy - create a proxy surface for the buffer object
- *
- * @dev: DRM device
- * @mode_cmd: parameters for the new surface
- * @bo_mob: MOB backing the buffer object
- * @srf_out: newly created surface
- *
- * When the content FB is a buffer object, we create a surface as a proxy to the
- * same buffer. This way we can do a surface copy rather than a surface DMA.
- * This is a more efficient approach
- *
- * RETURNS:
- * 0 on success, error code otherwise
- */
-static int vmw_create_bo_proxy(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct vmw_bo *bo_mob,
- struct vmw_surface **srf_out)
-{
- struct vmw_surface_metadata metadata = {0};
- uint32_t format;
- struct vmw_resource *res;
- unsigned int bytes_pp;
- int ret;
-
- switch (mode_cmd->pixel_format) {
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_XRGB8888:
- format = SVGA3D_X8R8G8B8;
- bytes_pp = 4;
- break;
-
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB1555:
- format = SVGA3D_R5G6B5;
- bytes_pp = 2;
- break;
-
- case 8:
- format = SVGA3D_P8;
- bytes_pp = 1;
- break;
-
- default:
- DRM_ERROR("Invalid framebuffer format %p4cc\n",
- &mode_cmd->pixel_format);
- return -EINVAL;
- }
-
- metadata.format = format;
- metadata.mip_levels[0] = 1;
- metadata.num_sizes = 1;
- metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
- metadata.base_size.height = mode_cmd->height;
- metadata.base_size.depth = 1;
- metadata.scanout = true;
-
- ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
- if (ret) {
- DRM_ERROR("Failed to allocate proxy content buffer\n");
- return ret;
- }
-
- res = &(*srf_out)->res;
-
- /* Reserve and switch the backing mob. */
- mutex_lock(&res->dev_priv->cmdbuf_mutex);
- (void) vmw_resource_reserve(res, false, true);
- vmw_user_bo_unref(&res->guest_memory_bo);
- res->guest_memory_bo = vmw_user_bo_ref(bo_mob);
- res->guest_memory_offset = 0;
- vmw_resource_unreserve(res, false, false, false, NULL, 0);
- mutex_unlock(&res->dev_priv->cmdbuf_mutex);
-
- return 0;
-}
-
-
-
static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
struct vmw_bo *bo,
struct vmw_framebuffer **out,
@@ -1529,7 +637,6 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
vfbd->base.bo = true;
vfbd->buffer = vmw_bo_reference(bo);
- vfbd->base.user_handle = mode_cmd->handles[0];
*out = &vfbd->base;
ret = drm_framebuffer_init(dev, &vfbd->base.base,
@@ -1570,55 +677,24 @@ vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
* vmw_kms_new_framebuffer - Create a new framebuffer.
*
* @dev_priv: Pointer to device private struct.
- * @bo: Pointer to buffer object to wrap the kms framebuffer around.
- * Either @bo or @surface must be NULL.
- * @surface: Pointer to a surface to wrap the kms framebuffer around.
- * Either @bo or @surface must be NULL.
- * @only_2d: No presents will occur to this buffer object based framebuffer.
- * This helps the code to do some important optimizations.
+ * @uo: Pointer to user object to wrap the kms framebuffer around.
+ * Either the buffer or surface inside the user object must be NULL.
* @mode_cmd: Frame-buffer metadata.
*/
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
- struct vmw_bo *bo,
- struct vmw_surface *surface,
- bool only_2d,
+ struct vmw_user_object *uo,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vmw_framebuffer *vfb = NULL;
- bool is_bo_proxy = false;
int ret;
- /*
- * We cannot use the SurfaceDMA command in an non-accelerated VM,
- * therefore, wrap the buffer object in a surface so we can use the
- * SurfaceCopy command.
- */
- if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) &&
- bo && only_2d &&
- mode_cmd->width > 64 && /* Don't create a proxy for cursor */
- dev_priv->active_display_unit == vmw_du_screen_target) {
- ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
- bo, &surface);
- if (ret)
- return ERR_PTR(ret);
-
- is_bo_proxy = true;
- }
-
/* Create the new framebuffer depending one what we have */
- if (surface) {
- ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
- mode_cmd,
- is_bo_proxy);
- /*
- * vmw_create_bo_proxy() adds a reference that is no longer
- * needed
- */
- if (is_bo_proxy)
- vmw_surface_unreference(&surface);
- } else if (bo) {
- ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
+ if (vmw_user_object_surface(uo)) {
+ ret = vmw_kms_new_framebuffer_surface(dev_priv, uo, &vfb,
+ mode_cmd);
+ } else if (uo->buffer) {
+ ret = vmw_kms_new_framebuffer_bo(dev_priv, uo->buffer, &vfb,
mode_cmd);
} else {
BUG();
@@ -1640,14 +716,14 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_framebuffer *vfb = NULL;
- struct vmw_surface *surface = NULL;
- struct vmw_bo *bo = NULL;
+ struct vmw_user_object uo = {0};
+ struct vmw_bo *bo;
+ struct vmw_surface *surface;
int ret;
/* returns either a bo or surface */
- ret = vmw_user_lookup_handle(dev_priv, file_priv,
- mode_cmd->handles[0],
- &surface, &bo);
+ ret = vmw_user_object_lookup(dev_priv, file_priv, mode_cmd->handles[0],
+ &uo);
if (ret) {
DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
mode_cmd->handles[0], mode_cmd->handles[0]);
@@ -1655,35 +731,41 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
}
- if (!bo &&
+ if (vmw_user_object_surface(&uo) &&
!vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
DRM_ERROR("Surface size cannot exceed %dx%d\n",
dev_priv->texture_max_width,
dev_priv->texture_max_height);
+ ret = -EINVAL;
goto err_out;
}
- vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
- !(dev_priv->capabilities & SVGA_CAP_3D),
- mode_cmd);
+ vfb = vmw_kms_new_framebuffer(dev_priv, &uo, mode_cmd);
if (IS_ERR(vfb)) {
ret = PTR_ERR(vfb);
goto err_out;
}
err_out:
- /* vmw_user_lookup_handle takes one ref so does new_fb */
- if (bo)
- vmw_user_bo_unref(&bo);
- if (surface)
- vmw_surface_unreference(&surface);
+ bo = vmw_user_object_buffer(&uo);
+ surface = vmw_user_object_surface(&uo);
+ /* vmw_user_object_lookup takes one ref so does new_fb */
+ vmw_user_object_unref(&uo);
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
return ERR_PTR(ret);
}
+ ttm_bo_reserve(&bo->tbo, false, false, NULL);
+ ret = vmw_bo_dirty_add(bo);
+ if (!ret && surface && surface->res.func->dirty_alloc) {
+ surface->res.coherent = true;
+ ret = surface->res.func->dirty_alloc(&surface->res);
+ }
+ ttm_bo_unreserve(&bo->tbo);
+
return &vfb->base;
}
@@ -2040,6 +1122,29 @@ vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
"hotplug_mode_update", 0, 1);
}
+static void
+vmw_atomic_commit_tail(struct drm_atomic_state *old_state)
+{
+ struct vmw_private *vmw = vmw_priv(old_state->dev);
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ drm_atomic_helper_commit_tail(old_state);
+
+ if (vmw->vkms_enabled) {
+ for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ (void)old_crtc_state;
+ flush_work(&du->vkms.crc_generator_work);
+ }
+ }
+}
+
+static const struct drm_mode_config_helper_funcs vmw_mode_config_helpers = {
+ .atomic_commit_tail = vmw_atomic_commit_tail,
+};
+
int vmw_kms_init(struct vmw_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
@@ -2059,6 +1164,7 @@ int vmw_kms_init(struct vmw_private *dev_priv)
dev->mode_config.max_width = dev_priv->texture_max_width;
dev->mode_config.max_height = dev_priv->texture_max_height;
dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
+ dev->mode_config.helper_private = &vmw_mode_config_helpers;
drm_mode_create_suggested_offset_properties(dev);
vmw_kms_create_hotplug_mode_update_property(dev_priv);
@@ -2092,44 +1198,6 @@ int vmw_kms_close(struct vmw_private *dev_priv)
return ret;
}
-int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_vmw_cursor_bypass_arg *arg = data;
- struct vmw_display_unit *du;
- struct drm_crtc *crtc;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
- if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- du = vmw_crtc_to_du(crtc);
- du->hotspot_x = arg->xhot;
- du->hotspot_y = arg->yhot;
- }
-
- mutex_unlock(&dev->mode_config.mutex);
- return 0;
- }
-
- crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
- if (!crtc) {
- ret = -ENOENT;
- goto out;
- }
-
- du = vmw_crtc_to_du(crtc);
-
- du->hotspot_x = arg->xhot;
- du->hotspot_y = arg->yhot;
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
-
- return ret;
-}
-
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
unsigned bpp, unsigned depth)
@@ -2152,13 +1220,12 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
return 0;
}
+static
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
- uint32_t pitch,
- uint32_t height)
+ u64 pitch,
+ u64 height)
{
- return ((u64) pitch * (u64) height) < (u64)
- ((dev_priv->active_display_unit == vmw_du_screen_target) ?
- dev_priv->max_primary_mem : dev_priv->vram_size);
+ return (pitch * height) < (u64)dev_priv->vram_size;
}
/**
@@ -2328,7 +1395,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_vmw_update_layout_arg *arg =
(struct drm_vmw_update_layout_arg *)data;
- void __user *user_rects;
+ const void __user *user_rects;
struct drm_vmw_rect *rects;
struct drm_rect *drm_rects;
unsigned rects_size;
@@ -2340,6 +1407,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
VMWGFX_MIN_INITIAL_HEIGHT};
vmw_du_update_layout(dev_priv, 1, &def_rect);
return 0;
+ } else if (arg->num_outputs > VMWGFX_NUM_DISPLAY_UNITS) {
+ return -E2BIG;
}
rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
@@ -2567,72 +1636,6 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
}
/**
- * vmw_kms_update_proxy - Helper function to update a proxy surface from
- * its backing MOB.
- *
- * @res: Pointer to the surface resource
- * @clips: Clip rects in framebuffer (surface) space.
- * @num_clips: Number of clips in @clips.
- * @increment: Integer with which to increment the clip counter when looping.
- * Used to skip a predetermined number of clip rects.
- *
- * This function makes sure the proxy surface is updated from its backing MOB
- * using the region given by @clips. The surface resource @res and its backing
- * MOB needs to be reserved and validated on call.
- */
-int vmw_kms_update_proxy(struct vmw_resource *res,
- const struct drm_clip_rect *clips,
- unsigned num_clips,
- int increment)
-{
- struct vmw_private *dev_priv = res->dev_priv;
- struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdUpdateGBImage body;
- } *cmd;
- SVGA3dBox *box;
- size_t copy_size = 0;
- int i;
-
- if (!clips)
- return 0;
-
- cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
- if (!cmd)
- return -ENOMEM;
-
- for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
- box = &cmd->body.box;
-
- cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.image.sid = res->id;
- cmd->body.image.face = 0;
- cmd->body.image.mipmap = 0;
-
- if (clips->x1 > size->width || clips->x2 > size->width ||
- clips->y1 > size->height || clips->y2 > size->height) {
- DRM_ERROR("Invalid clips outsize of framebuffer.\n");
- return -EINVAL;
- }
-
- box->x = clips->x1;
- box->y = clips->y1;
- box->z = 0;
- box->w = clips->x2 - clips->x1;
- box->h = clips->y2 - clips->y1;
- box->d = 1;
-
- copy_size += sizeof(*cmd);
- }
-
- vmw_cmd_commit(dev_priv, copy_size);
-
- return 0;
-}
-
-/**
* vmw_kms_create_implicit_placement_property - Set up the implicit placement
* property.
*
@@ -2766,8 +1769,9 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
} else {
struct vmw_framebuffer_surface *vfbs =
container_of(update->vfb, typeof(*vfbs), base);
+ struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo);
- ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
+ ret = vmw_validation_add_resource(&val_ctx, &surf->res,
0, VMW_RES_DIRTY_NONE, NULL,
NULL);
}
@@ -2852,27 +1856,20 @@ out_unref:
* Returns MODE_OK on success, or a drm_mode_status error code.
*/
enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
+ enum drm_mode_status ret;
struct drm_device *dev = connector->dev;
struct vmw_private *dev_priv = vmw_priv(dev);
- u32 max_width = dev_priv->texture_max_width;
- u32 max_height = dev_priv->texture_max_height;
u32 assumed_cpp = 4;
if (dev_priv->assume_16bpp)
assumed_cpp = 2;
- if (dev_priv->active_display_unit == vmw_du_screen_target) {
- max_width = min(dev_priv->stdu_max_width, max_width);
- max_height = min(dev_priv->stdu_max_height, max_height);
- }
-
- if (max_width < mode->hdisplay)
- return MODE_BAD_HVALUE;
-
- if (max_height < mode->vdisplay)
- return MODE_BAD_VVALUE;
+ ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
+ dev_priv->texture_max_height);
+ if (ret != MODE_OK)
+ return ret;
if (!vmw_kms_validate_mode_vram(dev_priv,
mode->hdisplay * assumed_cpp,
@@ -2930,3 +1927,93 @@ int vmw_connector_get_modes(struct drm_connector *connector)
return num_modes;
}
+
+struct vmw_user_object *vmw_user_object_ref(struct vmw_user_object *uo)
+{
+ if (uo->buffer)
+ vmw_user_bo_ref(uo->buffer);
+ else if (uo->surface)
+ vmw_surface_reference(uo->surface);
+ return uo;
+}
+
+void vmw_user_object_unref(struct vmw_user_object *uo)
+{
+ if (uo->buffer)
+ vmw_user_bo_unref(&uo->buffer);
+ else if (uo->surface)
+ vmw_surface_unreference(&uo->surface);
+}
+
+struct vmw_bo *
+vmw_user_object_buffer(struct vmw_user_object *uo)
+{
+ if (uo->buffer)
+ return uo->buffer;
+ else if (uo->surface)
+ return uo->surface->res.guest_memory_bo;
+ return NULL;
+}
+
+struct vmw_surface *
+vmw_user_object_surface(struct vmw_user_object *uo)
+{
+ if (uo->buffer)
+ return uo->buffer->dumb_surface;
+ return uo->surface;
+}
+
+void *vmw_user_object_map(struct vmw_user_object *uo)
+{
+ struct vmw_bo *bo = vmw_user_object_buffer(uo);
+
+ WARN_ON(!bo);
+ return vmw_bo_map_and_cache(bo);
+}
+
+void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size)
+{
+ struct vmw_bo *bo = vmw_user_object_buffer(uo);
+
+ WARN_ON(!bo);
+ return vmw_bo_map_and_cache_size(bo, size);
+}
+
+void vmw_user_object_unmap(struct vmw_user_object *uo)
+{
+ struct vmw_bo *bo = vmw_user_object_buffer(uo);
+ int ret;
+
+ WARN_ON(!bo);
+
+ /* Fence the mob creation so we are guarateed to have the mob */
+ ret = ttm_bo_reserve(&bo->tbo, false, false, NULL);
+ if (ret != 0)
+ return;
+
+ vmw_bo_unmap(bo);
+ vmw_bo_pin_reserved(bo, false);
+
+ ttm_bo_unreserve(&bo->tbo);
+}
+
+bool vmw_user_object_is_mapped(struct vmw_user_object *uo)
+{
+ struct vmw_bo *bo;
+
+ if (!uo || vmw_user_object_is_null(uo))
+ return false;
+
+ bo = vmw_user_object_buffer(uo);
+
+ if (WARN_ON(!bo))
+ return false;
+
+ WARN_ON(bo->map.bo && !bo->map.virtual);
+ return bo->map.virtual;
+}
+
+bool vmw_user_object_is_null(struct vmw_user_object *uo)
+{
+ return !uo->buffer && !uo->surface;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index a94947b588e8..511e29cdb987 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -1,39 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
#ifndef VMWGFX_KMS_H_
#define VMWGFX_KMS_H_
+#include "vmwgfx_cursor_plane.h"
+#include "vmwgfx_drv.h"
+
#include <drm/drm_encoder.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_probe_helper.h>
-#include "vmwgfx_drv.h"
-
/**
* struct vmw_du_update_plane - Closure structure for vmw_du_helper_plane_update
* @plane: Plane which is being updated.
@@ -198,9 +180,6 @@ struct vmw_kms_dirty {
s32 unit_y2;
};
-#define VMWGFX_NUM_DISPLAY_UNITS 8
-
-
#define vmw_framebuffer_to_vfb(x) \
container_of(x, struct vmw_framebuffer, base)
#define vmw_framebuffer_to_vfbs(x) \
@@ -217,25 +196,13 @@ struct vmw_kms_dirty {
struct vmw_framebuffer {
struct drm_framebuffer base;
bool bo;
- uint32_t user_handle;
-};
-
-/*
- * Clip rectangle
- */
-struct vmw_clip_rect {
- int x1, x2, y1, y2;
};
struct vmw_framebuffer_surface {
struct vmw_framebuffer base;
- struct vmw_surface *surface;
- struct vmw_bo *buffer;
- struct list_head head;
- bool is_bo_proxy; /* true if this is proxy surface for DMA buf */
+ struct vmw_user_object uo;
};
-
struct vmw_framebuffer_bo {
struct vmw_framebuffer base;
struct vmw_bo *buffer;
@@ -243,14 +210,10 @@ struct vmw_framebuffer_bo {
static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
-};
-
-static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
- DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB1555,
};
@@ -258,7 +221,6 @@ static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
#define vmw_plane_state_to_vps(x) container_of(x, struct vmw_plane_state, base)
#define vmw_connector_state_to_vcs(x) \
container_of(x, struct vmw_connector_state, base)
-#define vmw_plane_to_vcp(x) container_of(x, struct vmw_cursor_plane, base)
/**
* Derived class for crtc state object
@@ -269,11 +231,6 @@ struct vmw_crtc_state {
struct drm_crtc_state base;
};
-struct vmw_cursor_plane_state {
- struct vmw_bo *bo;
- s32 hotspot_x;
- s32 hotspot_y;
-};
/**
* Derived class for plane state object
@@ -287,8 +244,7 @@ struct vmw_cursor_plane_state {
*/
struct vmw_plane_state {
struct drm_plane_state base;
- struct vmw_surface *surf;
- struct vmw_bo *bo;
+ struct vmw_user_object uo;
int content_fb_type;
unsigned long bo_size;
@@ -298,7 +254,6 @@ struct vmw_plane_state {
/* For CPU Blit */
unsigned int cpp;
- bool surf_mapped;
struct vmw_cursor_plane_state cursor;
};
@@ -332,17 +287,6 @@ struct vmw_connector_state {
int gui_y;
};
-/**
- * Derived class for cursor plane object
- *
- * @base DRM plane object
- * @cursor.cursor_mobs Cursor mobs available for re-use
- */
-struct vmw_cursor_plane {
- struct drm_plane base;
-
- struct vmw_bo *cursor_mobs[3];
-};
/**
* Base class display unit.
@@ -358,18 +302,6 @@ struct vmw_display_unit {
struct drm_plane primary;
struct vmw_cursor_plane cursor;
- struct vmw_surface *cursor_surface;
- struct vmw_bo *cursor_bo;
- size_t cursor_age;
-
- int cursor_x;
- int cursor_y;
-
- int hotspot_x;
- int hotspot_y;
- s32 core_hotspot_x;
- s32 core_hotspot_y;
-
unsigned unit;
/*
@@ -387,11 +319,25 @@ struct vmw_display_unit {
bool is_implicit;
int set_gui_x;
int set_gui_y;
-};
-struct vmw_validation_ctx {
- struct vmw_resource *res;
- struct vmw_bo *buf;
+ struct {
+ struct work_struct crc_generator_work;
+ struct hrtimer timer;
+ ktime_t period_ns;
+
+ /* protects concurrent access to the vblank handler */
+ atomic_t atomic_lock;
+ /* protected by @atomic_lock */
+ bool crc_enabled;
+ struct vmw_surface *surface;
+
+ /* protects concurrent access to the crc worker */
+ spinlock_t crc_state_lock;
+ /* protected by @crc_state_lock */
+ bool crc_pending;
+ u64 frame_start;
+ u64 frame_end;
+ } vkms;
};
#define vmw_crtc_to_du(x) \
@@ -403,9 +349,8 @@ struct vmw_validation_ctx {
/*
* Shared display unit functions - vmwgfx_kms.c
*/
+void vmw_du_init(struct vmw_display_unit *du);
void vmw_du_cleanup(struct vmw_display_unit *du);
-void vmw_du_crtc_save(struct drm_crtc *crtc);
-void vmw_du_crtc_restore(struct drm_crtc *crtc);
int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
u16 *r, u16 *g, u16 *b,
uint32_t size,
@@ -436,7 +381,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
int increment,
struct vmw_kms_dirty *dirty);
enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
int vmw_connector_get_modes(struct drm_connector *connector);
void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
@@ -453,9 +398,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
uint32_t num_clips);
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
- struct vmw_bo *bo,
- struct vmw_surface *surface,
- bool only_2d,
+ struct vmw_user_object *uo,
const struct drm_mode_fb_cmd2 *mode_cmd);
void vmw_guess_mode_timing(struct drm_display_mode *mode);
void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv);
@@ -463,34 +406,22 @@ void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv);
/* Universal Plane Helpers */
void vmw_du_primary_plane_destroy(struct drm_plane *plane);
-void vmw_du_cursor_plane_destroy(struct drm_plane *plane);
/* Atomic Helpers */
int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state);
-int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state);
-void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state);
-int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state);
-void vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state);
void vmw_du_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state);
void vmw_du_plane_reset(struct drm_plane *plane);
struct drm_plane_state *vmw_du_plane_duplicate_state(struct drm_plane *plane);
void vmw_du_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
-void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
- bool unreference);
+void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps);
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state);
void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state);
-void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_atomic_state *state);
void vmw_du_crtc_reset(struct drm_crtc *crtc);
struct drm_crtc_state *vmw_du_crtc_duplicate_state(struct drm_crtc *crtc);
void vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index c4db4aecca6c..c23c9195f0dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -27,6 +28,7 @@
#include "vmwgfx_bo.h"
#include "vmwgfx_kms.h"
+#include "vmwgfx_vkms.h"
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -146,8 +148,9 @@ static int vmw_ldu_fb_pin(struct vmw_framebuffer *vfb)
struct vmw_bo *buf;
int ret;
- buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
- vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.guest_memory_bo;
+ buf = vfb->bo ?
+ vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+ vmw_user_object_buffer(&vmw_framebuffer_to_vfbs(&vfb->base)->uo);
if (!buf)
return 0;
@@ -168,8 +171,10 @@ static int vmw_ldu_fb_unpin(struct vmw_framebuffer *vfb)
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
struct vmw_bo *buf;
- buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
- vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.guest_memory_bo;
+ buf = vfb->bo ?
+ vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+ vmw_user_object_buffer(&vmw_framebuffer_to_vfbs(&vfb->base)->uo);
+
if (WARN_ON(!buf))
return 0;
@@ -241,33 +246,6 @@ static void vmw_ldu_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
}
-/**
- * vmw_ldu_crtc_atomic_enable - Noop
- *
- * @crtc: CRTC associated with the new screen
- * @state: Unused
- *
- * This is called after a mode set has been completed. Here's
- * usually a good place to call vmw_ldu_add_active/vmw_ldu_del_active
- * but since for LDU the display plane is closely tied to the
- * CRTC, it makes more sense to do those at plane update time.
- */
-static void vmw_ldu_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
-}
-
-/**
- * vmw_ldu_crtc_atomic_disable - Turns off CRTC
- *
- * @crtc: CRTC to be turned off
- * @state: Unused
- */
-static void vmw_ldu_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
-}
-
static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
.gamma_set = vmw_du_crtc_gamma_set,
.destroy = vmw_ldu_crtc_destroy,
@@ -276,6 +254,9 @@ static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
+ .enable_vblank = vmw_vkms_enable_vblank,
+ .disable_vblank = vmw_vkms_disable_vblank,
+ .get_vblank_timestamp = vmw_vkms_get_vblank_timestamp,
};
@@ -391,7 +372,7 @@ static const struct drm_plane_funcs vmw_ldu_plane_funcs = {
static const struct drm_plane_funcs vmw_ldu_cursor_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = vmw_du_cursor_plane_destroy,
+ .destroy = vmw_cursor_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
@@ -402,10 +383,10 @@ static const struct drm_plane_funcs vmw_ldu_cursor_funcs = {
*/
static const struct
drm_plane_helper_funcs vmw_ldu_cursor_plane_helper_funcs = {
- .atomic_check = vmw_du_cursor_plane_atomic_check,
- .atomic_update = vmw_du_cursor_plane_atomic_update,
- .prepare_fb = vmw_du_cursor_plane_prepare_fb,
- .cleanup_fb = vmw_du_cursor_plane_cleanup_fb,
+ .atomic_check = vmw_cursor_plane_atomic_check,
+ .atomic_update = vmw_cursor_plane_atomic_update,
+ .prepare_fb = vmw_cursor_plane_prepare_fb,
+ .cleanup_fb = vmw_cursor_plane_cleanup_fb,
};
static const struct
@@ -418,9 +399,9 @@ static const struct drm_crtc_helper_funcs vmw_ldu_crtc_helper_funcs = {
.mode_set_nofb = vmw_ldu_crtc_mode_set_nofb,
.atomic_check = vmw_du_crtc_atomic_check,
.atomic_begin = vmw_du_crtc_atomic_begin,
- .atomic_flush = vmw_du_crtc_atomic_flush,
- .atomic_enable = vmw_ldu_crtc_atomic_enable,
- .atomic_disable = vmw_ldu_crtc_atomic_disable,
+ .atomic_flush = vmw_vkms_crtc_atomic_flush,
+ .atomic_enable = vmw_vkms_crtc_atomic_enable,
+ .atomic_disable = vmw_vkms_crtc_atomic_disable,
};
@@ -498,7 +479,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_connector_helper_add(connector, &vmw_ldu_connector_helper_funcs);
- connector->status = vmw_du_connector_detect(connector, true);
ret = drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
@@ -541,6 +521,8 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
dev_priv->implicit_placement_property,
1);
+ vmw_du_init(&ldu->base);
+
return 0;
err_free_unregister:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 7055cbefc768..d8204d4265d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -282,8 +282,7 @@ out_no_setup:
}
vmw_bo_unpin_unlocked(&batch->otable_bo->tbo);
- ttm_bo_put(&batch->otable_bo->tbo);
- batch->otable_bo = NULL;
+ vmw_bo_unreference(&batch->otable_bo);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 2651fe0ef518..1d9a42cbc88f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -48,8 +48,6 @@
#define RETRIES 3
-#define VMW_HYPERVISOR_MAGIC 0x564D5868
-
#define VMW_PORT_CMD_MSG 30
#define VMW_PORT_CMD_HB_MSG 0
#define VMW_PORT_CMD_OPEN_CHANNEL (MSG_TYPE_OPEN << 16 | VMW_PORT_CMD_MSG)
@@ -104,20 +102,18 @@ static const char* const mksstat_kern_name_desc[MKSSTAT_KERN_COUNT][2] =
*/
static int vmw_open_channel(struct rpc_channel *channel, unsigned int protocol)
{
- unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
+ u32 ecx, edx, esi, edi;
- VMW_PORT(VMW_PORT_CMD_OPEN_CHANNEL,
- (protocol | GUESTMSG_FLAG_COOKIE), si, di,
- 0,
- VMW_HYPERVISOR_MAGIC,
- eax, ebx, ecx, edx, si, di);
+ vmware_hypercall6(VMW_PORT_CMD_OPEN_CHANNEL,
+ (protocol | GUESTMSG_FLAG_COOKIE), 0,
+ &ecx, &edx, &esi, &edi);
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
return -EINVAL;
channel->channel_id = HIGH_WORD(edx);
- channel->cookie_high = si;
- channel->cookie_low = di;
+ channel->cookie_high = esi;
+ channel->cookie_low = edi;
return 0;
}
@@ -133,17 +129,13 @@ static int vmw_open_channel(struct rpc_channel *channel, unsigned int protocol)
*/
static int vmw_close_channel(struct rpc_channel *channel)
{
- unsigned long eax, ebx, ecx, edx, si, di;
-
- /* Set up additional parameters */
- si = channel->cookie_high;
- di = channel->cookie_low;
+ u32 ecx;
- VMW_PORT(VMW_PORT_CMD_CLOSE_CHANNEL,
- 0, si, di,
- channel->channel_id << 16,
- VMW_HYPERVISOR_MAGIC,
- eax, ebx, ecx, edx, si, di);
+ vmware_hypercall5(VMW_PORT_CMD_CLOSE_CHANNEL,
+ 0, channel->channel_id << 16,
+ channel->cookie_high,
+ channel->cookie_low,
+ &ecx);
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
return -EINVAL;
@@ -163,24 +155,18 @@ static int vmw_close_channel(struct rpc_channel *channel)
static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
const char *msg, bool hb)
{
- unsigned long si, di, eax, ebx, ecx, edx;
+ u32 ebx, ecx;
unsigned long msg_len = strlen(msg);
/* HB port can't access encrypted memory. */
if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
- unsigned long bp = channel->cookie_high;
- u32 channel_id = (channel->channel_id << 16);
-
- si = (uintptr_t) msg;
- di = channel->cookie_low;
-
- VMW_PORT_HB_OUT(
+ vmware_hypercall_hb_out(
(MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
- msg_len, si, di,
- VMWARE_HYPERVISOR_HB | channel_id |
- VMWARE_HYPERVISOR_OUT,
- VMW_HYPERVISOR_MAGIC, bp,
- eax, ebx, ecx, edx, si, di);
+ msg_len,
+ channel->channel_id << 16,
+ (uintptr_t) msg, channel->cookie_low,
+ channel->cookie_high,
+ &ebx);
return ebx;
}
@@ -194,14 +180,13 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
memcpy(&word, msg, bytes);
msg_len -= bytes;
msg += bytes;
- si = channel->cookie_high;
- di = channel->cookie_low;
-
- VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_SENDPAYLOAD << 16),
- word, si, di,
- channel->channel_id << 16,
- VMW_HYPERVISOR_MAGIC,
- eax, ebx, ecx, edx, si, di);
+
+ vmware_hypercall5(VMW_PORT_CMD_MSG |
+ (MSG_TYPE_SENDPAYLOAD << 16),
+ word, channel->channel_id << 16,
+ channel->cookie_high,
+ channel->cookie_low,
+ &ecx);
}
return ecx;
@@ -220,22 +205,17 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
unsigned long reply_len, bool hb)
{
- unsigned long si, di, eax, ebx, ecx, edx;
+ u32 ebx, ecx, edx;
/* HB port can't access encrypted memory */
if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
- unsigned long bp = channel->cookie_low;
- u32 channel_id = (channel->channel_id << 16);
-
- si = channel->cookie_high;
- di = (uintptr_t) reply;
-
- VMW_PORT_HB_IN(
+ vmware_hypercall_hb_in(
(MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
- reply_len, si, di,
- VMWARE_HYPERVISOR_HB | channel_id,
- VMW_HYPERVISOR_MAGIC, bp,
- eax, ebx, ecx, edx, si, di);
+ reply_len,
+ channel->channel_id << 16,
+ channel->cookie_high,
+ (uintptr_t) reply, channel->cookie_low,
+ &ebx);
return ebx;
}
@@ -245,14 +225,13 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
while (reply_len) {
unsigned int bytes = min_t(unsigned long, reply_len, 4);
- si = channel->cookie_high;
- di = channel->cookie_low;
-
- VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_RECVPAYLOAD << 16),
- MESSAGE_STATUS_SUCCESS, si, di,
- channel->channel_id << 16,
- VMW_HYPERVISOR_MAGIC,
- eax, ebx, ecx, edx, si, di);
+ vmware_hypercall7(VMW_PORT_CMD_MSG |
+ (MSG_TYPE_RECVPAYLOAD << 16),
+ MESSAGE_STATUS_SUCCESS,
+ channel->channel_id << 16,
+ channel->cookie_high,
+ channel->cookie_low,
+ &ebx, &ecx, &edx);
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
break;
@@ -276,22 +255,18 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
*/
static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
{
- unsigned long eax, ebx, ecx, edx, si, di;
+ u32 ebx, ecx;
size_t msg_len = strlen(msg);
int retries = 0;
while (retries < RETRIES) {
retries++;
- /* Set up additional parameters */
- si = channel->cookie_high;
- di = channel->cookie_low;
-
- VMW_PORT(VMW_PORT_CMD_SENDSIZE,
- msg_len, si, di,
- channel->channel_id << 16,
- VMW_HYPERVISOR_MAGIC,
- eax, ebx, ecx, edx, si, di);
+ vmware_hypercall5(VMW_PORT_CMD_SENDSIZE,
+ msg_len, channel->channel_id << 16,
+ channel->cookie_high,
+ channel->cookie_low,
+ &ecx);
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
/* Expected success. Give up. */
@@ -314,7 +289,7 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
return -EINVAL;
}
-STACK_FRAME_NON_STANDARD(vmw_send_msg);
+STACK_FRAME_NON_STANDARD_FP(vmw_send_msg);
/**
@@ -329,7 +304,7 @@ STACK_FRAME_NON_STANDARD(vmw_send_msg);
static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
size_t *msg_len)
{
- unsigned long eax, ebx, ecx, edx, si, di;
+ u32 ebx, ecx, edx;
char *reply;
size_t reply_len;
int retries = 0;
@@ -341,15 +316,11 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
while (retries < RETRIES) {
retries++;
- /* Set up additional parameters */
- si = channel->cookie_high;
- di = channel->cookie_low;
-
- VMW_PORT(VMW_PORT_CMD_RECVSIZE,
- 0, si, di,
- channel->channel_id << 16,
- VMW_HYPERVISOR_MAGIC,
- eax, ebx, ecx, edx, si, di);
+ vmware_hypercall7(VMW_PORT_CMD_RECVSIZE,
+ 0, channel->channel_id << 16,
+ channel->cookie_high,
+ channel->cookie_low,
+ &ebx, &ecx, &edx);
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
DRM_ERROR("Failed to get reply size for host message.\n");
@@ -384,16 +355,12 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
reply[reply_len] = '\0';
-
- /* Ack buffer */
- si = channel->cookie_high;
- di = channel->cookie_low;
-
- VMW_PORT(VMW_PORT_CMD_RECVSTATUS,
- MESSAGE_STATUS_SUCCESS, si, di,
- channel->channel_id << 16,
- VMW_HYPERVISOR_MAGIC,
- eax, ebx, ecx, edx, si, di);
+ vmware_hypercall5(VMW_PORT_CMD_RECVSTATUS,
+ MESSAGE_STATUS_SUCCESS,
+ channel->channel_id << 16,
+ channel->cookie_high,
+ channel->cookie_low,
+ &ecx);
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
kfree(reply);
@@ -652,13 +619,7 @@ static inline void reset_ppn_array(PPN64 *arr, size_t size)
*/
static inline void hypervisor_ppn_reset_all(void)
{
- unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
-
- VMW_PORT(VMW_PORT_CMD_MKSGS_RESET,
- 0, si, di,
- 0,
- VMW_HYPERVISOR_MAGIC,
- eax, ebx, ecx, edx, si, di);
+ vmware_hypercall1(VMW_PORT_CMD_MKSGS_RESET, 0);
}
/**
@@ -669,13 +630,7 @@ static inline void hypervisor_ppn_reset_all(void)
*/
static inline void hypervisor_ppn_add(PPN64 pfn)
{
- unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
-
- VMW_PORT(VMW_PORT_CMD_MKSGS_ADD_PPN,
- (unsigned long)pfn, si, di,
- 0,
- VMW_HYPERVISOR_MAGIC,
- eax, ebx, ecx, edx, si, di);
+ vmware_hypercall1(VMW_PORT_CMD_MKSGS_ADD_PPN, (unsigned long)pfn);
}
/**
@@ -686,13 +641,7 @@ static inline void hypervisor_ppn_add(PPN64 pfn)
*/
static inline void hypervisor_ppn_remove(PPN64 pfn)
{
- unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
-
- VMW_PORT(VMW_PORT_CMD_MKSGS_REMOVE_PPN,
- (unsigned long)pfn, si, di,
- 0,
- VMW_HYPERVISOR_MAGIC,
- eax, ebx, ecx, edx, si, di);
+ vmware_hypercall1(VMW_PORT_CMD_MKSGS_REMOVE_PPN, (unsigned long)pfn);
}
#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h
index 4f40167ad61f..3c78e9338b54 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h
@@ -34,6 +34,8 @@
#define VMWARE_HYPERVISOR_HB BIT(0)
#define VMWARE_HYPERVISOR_OUT BIT(1)
+#define VMWARE_HYPERVISOR_MAGIC 0x564D5868
+
#define X86_IO_MAGIC 0x86
#define X86_IO_W7_SIZE_SHIFT 0
@@ -45,86 +47,158 @@
#define X86_IO_W7_IMM_SHIFT 5
#define X86_IO_W7_IMM_MASK (0xff << X86_IO_W7_IMM_SHIFT)
-static inline void vmw_port(unsigned long cmd, unsigned long in_ebx,
- unsigned long in_si, unsigned long in_di,
- unsigned long flags, unsigned long magic,
- unsigned long *eax, unsigned long *ebx,
- unsigned long *ecx, unsigned long *edx,
- unsigned long *si, unsigned long *di)
+static inline
+unsigned long vmware_hypercall1(unsigned long cmd, unsigned long in1)
{
- register u64 x0 asm("x0") = magic;
- register u64 x1 asm("x1") = in_ebx;
+ register u64 x0 asm("x0") = VMWARE_HYPERVISOR_MAGIC;
+ register u64 x1 asm("x1") = in1;
register u64 x2 asm("x2") = cmd;
- register u64 x3 asm("x3") = flags | VMWARE_HYPERVISOR_PORT;
- register u64 x4 asm("x4") = in_si;
- register u64 x5 asm("x5") = in_di;
+ register u64 x3 asm("x3") = VMWARE_HYPERVISOR_PORT;
+ register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) |
+ X86_IO_W7_WITH |
+ X86_IO_W7_DIR |
+ (2 << X86_IO_W7_SIZE_SHIFT);
+ asm_inline volatile (
+ "mrs xzr, mdccsr_el0; "
+ : "+r" (x0)
+ : "r" (x1), "r" (x2), "r" (x3), "r" (x7)
+ : "memory");
+
+ return x0;
+}
+
+static inline
+unsigned long vmware_hypercall5(unsigned long cmd, unsigned long in1,
+ unsigned long in3, unsigned long in4,
+ unsigned long in5, u32 *out2)
+{
+ register u64 x0 asm("x0") = VMWARE_HYPERVISOR_MAGIC;
+ register u64 x1 asm("x1") = in1;
+ register u64 x2 asm("x2") = cmd;
+ register u64 x3 asm("x3") = in3 | VMWARE_HYPERVISOR_PORT;
+ register u64 x4 asm("x4") = in4;
+ register u64 x5 asm("x5") = in5;
register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) |
X86_IO_W7_WITH |
X86_IO_W7_DIR |
(2 << X86_IO_W7_SIZE_SHIFT);
- asm volatile("mrs xzr, mdccsr_el0 \n\t"
- : "+r"(x0), "+r"(x1), "+r"(x2),
- "+r"(x3), "+r"(x4), "+r"(x5)
- : "r"(x7)
- :);
- *eax = x0;
- *ebx = x1;
- *ecx = x2;
- *edx = x3;
- *si = x4;
- *di = x5;
+ asm_inline volatile (
+ "mrs xzr, mdccsr_el0; "
+ : "+r" (x0), "+r" (x2)
+ : "r" (x1), "r" (x3), "r" (x4), "r" (x5), "r" (x7)
+ : "memory");
+
+ *out2 = x2;
+ return x0;
}
-static inline void vmw_port_hb(unsigned long cmd, unsigned long in_ecx,
- unsigned long in_si, unsigned long in_di,
- unsigned long flags, unsigned long magic,
- unsigned long bp, u32 w7dir,
- unsigned long *eax, unsigned long *ebx,
- unsigned long *ecx, unsigned long *edx,
- unsigned long *si, unsigned long *di)
+static inline
+unsigned long vmware_hypercall6(unsigned long cmd, unsigned long in1,
+ unsigned long in3, u32 *out2,
+ u32 *out3, u32 *out4, u32 *out5)
{
- register u64 x0 asm("x0") = magic;
+ register u64 x0 asm("x0") = VMWARE_HYPERVISOR_MAGIC;
+ register u64 x1 asm("x1") = in1;
+ register u64 x2 asm("x2") = cmd;
+ register u64 x3 asm("x3") = in3 | VMWARE_HYPERVISOR_PORT;
+ register u64 x4 asm("x4");
+ register u64 x5 asm("x5");
+ register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) |
+ X86_IO_W7_WITH |
+ X86_IO_W7_DIR |
+ (2 << X86_IO_W7_SIZE_SHIFT);
+
+ asm_inline volatile (
+ "mrs xzr, mdccsr_el0; "
+ : "+r" (x0), "+r" (x2), "+r" (x3), "=r" (x4), "=r" (x5)
+ : "r" (x1), "r" (x7)
+ : "memory");
+
+ *out2 = x2;
+ *out3 = x3;
+ *out4 = x4;
+ *out5 = x5;
+ return x0;
+}
+
+static inline
+unsigned long vmware_hypercall7(unsigned long cmd, unsigned long in1,
+ unsigned long in3, unsigned long in4,
+ unsigned long in5, u32 *out1,
+ u32 *out2, u32 *out3)
+{
+ register u64 x0 asm("x0") = VMWARE_HYPERVISOR_MAGIC;
+ register u64 x1 asm("x1") = in1;
+ register u64 x2 asm("x2") = cmd;
+ register u64 x3 asm("x3") = in3 | VMWARE_HYPERVISOR_PORT;
+ register u64 x4 asm("x4") = in4;
+ register u64 x5 asm("x5") = in5;
+ register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) |
+ X86_IO_W7_WITH |
+ X86_IO_W7_DIR |
+ (2 << X86_IO_W7_SIZE_SHIFT);
+
+ asm_inline volatile (
+ "mrs xzr, mdccsr_el0; "
+ : "+r" (x0), "+r" (x1), "+r" (x2), "+r" (x3)
+ : "r" (x4), "r" (x5), "r" (x7)
+ : "memory");
+
+ *out1 = x1;
+ *out2 = x2;
+ *out3 = x3;
+ return x0;
+}
+
+static inline
+unsigned long vmware_hypercall_hb(unsigned long cmd, unsigned long in2,
+ unsigned long in3, unsigned long in4,
+ unsigned long in5, unsigned long in6,
+ u32 *out1, int dir)
+{
+ register u64 x0 asm("x0") = VMWARE_HYPERVISOR_MAGIC;
register u64 x1 asm("x1") = cmd;
- register u64 x2 asm("x2") = in_ecx;
- register u64 x3 asm("x3") = flags | VMWARE_HYPERVISOR_PORT_HB;
- register u64 x4 asm("x4") = in_si;
- register u64 x5 asm("x5") = in_di;
- register u64 x6 asm("x6") = bp;
+ register u64 x2 asm("x2") = in2;
+ register u64 x3 asm("x3") = in3 | VMWARE_HYPERVISOR_PORT_HB;
+ register u64 x4 asm("x4") = in4;
+ register u64 x5 asm("x5") = in5;
+ register u64 x6 asm("x6") = in6;
register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) |
X86_IO_W7_STR |
X86_IO_W7_WITH |
- w7dir;
-
- asm volatile("mrs xzr, mdccsr_el0 \n\t"
- : "+r"(x0), "+r"(x1), "+r"(x2),
- "+r"(x3), "+r"(x4), "+r"(x5)
- : "r"(x6), "r"(x7)
- :);
- *eax = x0;
- *ebx = x1;
- *ecx = x2;
- *edx = x3;
- *si = x4;
- *di = x5;
-}
+ dir;
-#define VMW_PORT(cmd, in_ebx, in_si, in_di, flags, magic, eax, ebx, ecx, edx, \
- si, di) \
- vmw_port(cmd, in_ebx, in_si, in_di, flags, magic, &eax, &ebx, &ecx, \
- &edx, &si, &di)
+ asm_inline volatile (
+ "mrs xzr, mdccsr_el0; "
+ : "+r" (x0), "+r" (x1)
+ : "r" (x2), "r" (x3), "r" (x4), "r" (x5),
+ "r" (x6), "r" (x7)
+ : "memory");
-#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, flags, magic, bp, eax, ebx, \
- ecx, edx, si, di) \
- vmw_port_hb(cmd, in_ecx, in_si, in_di, flags, magic, bp, \
- 0, &eax, &ebx, &ecx, &edx, &si, &di)
+ *out1 = x1;
+ return x0;
+}
-#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, flags, magic, bp, eax, ebx, \
- ecx, edx, si, di) \
- vmw_port_hb(cmd, in_ecx, in_si, in_di, flags, magic, bp, \
- X86_IO_W7_DIR, &eax, &ebx, &ecx, &edx, &si, &di)
+static inline
+unsigned long vmware_hypercall_hb_out(unsigned long cmd, unsigned long in2,
+ unsigned long in3, unsigned long in4,
+ unsigned long in5, unsigned long in6,
+ u32 *out1)
+{
+ return vmware_hypercall_hb(cmd, in2, in3, in4, in5, in6, out1, 0);
+}
+static inline
+unsigned long vmware_hypercall_hb_in(unsigned long cmd, unsigned long in2,
+ unsigned long in3, unsigned long in4,
+ unsigned long in5, unsigned long in6,
+ u32 *out1)
+{
+ return vmware_hypercall_hb(cmd, in2, in3, in4, in5, in6, out1,
+ X86_IO_W7_DIR);
+}
#endif
#endif /* _VMWGFX_MSG_ARM64_H */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h
index 23899d743a90..13304d34cc6e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h
@@ -37,191 +37,6 @@
#include <asm/vmware.h>
-/**
- * Hypervisor-specific bi-directional communication channel. Should never
- * execute on bare metal hardware. The caller must make sure to check for
- * supported hypervisor before using these macros.
- *
- * The last two parameters are both input and output and must be initialized.
- *
- * @cmd: [IN] Message Cmd
- * @in_ebx: [IN] Message Len, through EBX
- * @in_si: [IN] Input argument through SI, set to 0 if not used
- * @in_di: [IN] Input argument through DI, set ot 0 if not used
- * @flags: [IN] hypercall flags + [channel id]
- * @magic: [IN] hypervisor magic value
- * @eax: [OUT] value of EAX register
- * @ebx: [OUT] e.g. status from an HB message status command
- * @ecx: [OUT] e.g. status from a non-HB message status command
- * @edx: [OUT] e.g. channel id
- * @si: [OUT]
- * @di: [OUT]
- */
-#define VMW_PORT(cmd, in_ebx, in_si, in_di, \
- flags, magic, \
- eax, ebx, ecx, edx, si, di) \
-({ \
- asm volatile (VMWARE_HYPERCALL : \
- "=a"(eax), \
- "=b"(ebx), \
- "=c"(ecx), \
- "=d"(edx), \
- "=S"(si), \
- "=D"(di) : \
- "a"(magic), \
- "b"(in_ebx), \
- "c"(cmd), \
- "d"(flags), \
- "S"(in_si), \
- "D"(in_di) : \
- "memory"); \
-})
-
-
-/**
- * Hypervisor-specific bi-directional communication channel. Should never
- * execute on bare metal hardware. The caller must make sure to check for
- * supported hypervisor before using these macros.
- *
- * The last 3 parameters are both input and output and must be initialized.
- *
- * @cmd: [IN] Message Cmd
- * @in_ecx: [IN] Message Len, through ECX
- * @in_si: [IN] Input argument through SI, set to 0 if not used
- * @in_di: [IN] Input argument through DI, set to 0 if not used
- * @flags: [IN] hypercall flags + [channel id]
- * @magic: [IN] hypervisor magic value
- * @bp: [IN]
- * @eax: [OUT] value of EAX register
- * @ebx: [OUT] e.g. status from an HB message status command
- * @ecx: [OUT] e.g. status from a non-HB message status command
- * @edx: [OUT] e.g. channel id
- * @si: [OUT]
- * @di: [OUT]
- */
-#ifdef __x86_64__
-
-#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
- flags, magic, bp, \
- eax, ebx, ecx, edx, si, di) \
-({ \
- asm volatile ( \
- UNWIND_HINT_SAVE \
- "push %%rbp;" \
- UNWIND_HINT_UNDEFINED \
- "mov %12, %%rbp;" \
- VMWARE_HYPERCALL_HB_OUT \
- "pop %%rbp;" \
- UNWIND_HINT_RESTORE : \
- "=a"(eax), \
- "=b"(ebx), \
- "=c"(ecx), \
- "=d"(edx), \
- "=S"(si), \
- "=D"(di) : \
- "a"(magic), \
- "b"(cmd), \
- "c"(in_ecx), \
- "d"(flags), \
- "S"(in_si), \
- "D"(in_di), \
- "r"(bp) : \
- "memory", "cc"); \
-})
-
-
-#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, \
- flags, magic, bp, \
- eax, ebx, ecx, edx, si, di) \
-({ \
- asm volatile ( \
- UNWIND_HINT_SAVE \
- "push %%rbp;" \
- UNWIND_HINT_UNDEFINED \
- "mov %12, %%rbp;" \
- VMWARE_HYPERCALL_HB_IN \
- "pop %%rbp;" \
- UNWIND_HINT_RESTORE : \
- "=a"(eax), \
- "=b"(ebx), \
- "=c"(ecx), \
- "=d"(edx), \
- "=S"(si), \
- "=D"(di) : \
- "a"(magic), \
- "b"(cmd), \
- "c"(in_ecx), \
- "d"(flags), \
- "S"(in_si), \
- "D"(in_di), \
- "r"(bp) : \
- "memory", "cc"); \
-})
-
-#elif defined(__i386__)
-
-/*
- * In the 32-bit version of this macro, we store bp in a memory location
- * because we've ran out of registers.
- * Now we can't reference that memory location while we've modified
- * %esp or %ebp, so we first push it on the stack, just before we push
- * %ebp, and then when we need it we read it from the stack where we
- * just pushed it.
- */
-#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
- flags, magic, bp, \
- eax, ebx, ecx, edx, si, di) \
-({ \
- asm volatile ("push %12;" \
- "push %%ebp;" \
- "mov 0x04(%%esp), %%ebp;" \
- VMWARE_HYPERCALL_HB_OUT \
- "pop %%ebp;" \
- "add $0x04, %%esp;" : \
- "=a"(eax), \
- "=b"(ebx), \
- "=c"(ecx), \
- "=d"(edx), \
- "=S"(si), \
- "=D"(di) : \
- "a"(magic), \
- "b"(cmd), \
- "c"(in_ecx), \
- "d"(flags), \
- "S"(in_si), \
- "D"(in_di), \
- "m"(bp) : \
- "memory", "cc"); \
-})
-
-
-#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, \
- flags, magic, bp, \
- eax, ebx, ecx, edx, si, di) \
-({ \
- asm volatile ("push %12;" \
- "push %%ebp;" \
- "mov 0x04(%%esp), %%ebp;" \
- VMWARE_HYPERCALL_HB_IN \
- "pop %%ebp;" \
- "add $0x04, %%esp;" : \
- "=a"(eax), \
- "=b"(ebx), \
- "=c"(ecx), \
- "=d"(edx), \
- "=S"(si), \
- "=D"(di) : \
- "a"(magic), \
- "b"(cmd), \
- "c"(in_ecx), \
- "d"(flags), \
- "S"(in_si), \
- "D"(in_di), \
- "m"(bp) : \
- "memory", "cc"); \
-})
-#endif /* defined(__i386__) */
-
#endif /* defined(__i386__) || defined(__x86_64__) */
#endif /* _VMWGFX_MSG_X86_H */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index c45b4724e414..e20f64b67b26 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -92,7 +92,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
{
struct vmw_escape_video_flush *flush;
size_t fifo_size;
- bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object);
+ bool have_so = (dev_priv->active_display_unit != vmw_du_legacy);
int i, num_items;
SVGAGuestPtr ptr;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 74ff2812d66a..7de20e56082c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -1,27 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2019-2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2019-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
#include "vmwgfx_bo.h"
@@ -71,6 +52,11 @@ struct vmw_bo_dirty {
unsigned long bitmap[];
};
+bool vmw_bo_is_dirty(struct vmw_bo *vbo)
+{
+ return vbo->dirty && (vbo->dirty->start < vbo->dirty->end);
+}
+
/**
* vmw_bo_dirty_scan_pagetable - Perform a pagetable scan for dirty bits
* @vbo: The buffer object to scan
@@ -341,6 +327,41 @@ void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res)
dirty->end = res_start;
}
+void vmw_bo_dirty_clear(struct vmw_bo *vbo)
+{
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+ pgoff_t start, cur, end;
+ unsigned long res_start = 0;
+ unsigned long res_end = vbo->tbo.base.size;
+
+ WARN_ON_ONCE(res_start & ~PAGE_MASK);
+ res_start >>= PAGE_SHIFT;
+ res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
+
+ if (res_start >= dirty->end || res_end <= dirty->start)
+ return;
+
+ cur = max(res_start, dirty->start);
+ res_end = max(res_end, dirty->end);
+ while (cur < res_end) {
+ unsigned long num;
+
+ start = find_next_bit(&dirty->bitmap[0], res_end, cur);
+ if (start >= res_end)
+ break;
+
+ end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1);
+ cur = end + 1;
+ num = end - start;
+ bitmap_clear(&dirty->bitmap[0], start, num);
+ }
+
+ if (res_start <= dirty->start && res_end > dirty->start)
+ dirty->start = res_end;
+ if (res_start < dirty->end && res_end >= dirty->end)
+ dirty->end = res_start;
+}
+
/**
* vmw_bo_dirty_clear_res - Clear a resource's dirty region from
* its backing mob.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
index 2d72a5ee7c0c..598b90ac7590 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2013 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2013-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -31,6 +32,7 @@
*/
#include "vmwgfx_drv.h"
+#include "vmwgfx_bo.h"
#include "ttm_object.h"
#include <linux/dma-buf.h>
@@ -75,8 +77,12 @@ int vmw_prime_fd_to_handle(struct drm_device *dev,
int fd, u32 *handle)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret = ttm_prime_fd_to_handle(tfile, fd, handle);
- return ttm_prime_fd_to_handle(tfile, fd, handle);
+ if (ret)
+ ret = drm_gem_prime_fd_to_handle(dev, file_priv, fd, handle);
+
+ return ret;
}
int vmw_prime_handle_to_fd(struct drm_device *dev,
@@ -84,6 +90,35 @@ int vmw_prime_handle_to_fd(struct drm_device *dev,
uint32_t handle, uint32_t flags,
int *prime_fd)
{
+ struct vmw_private *vmw = vmw_priv(dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
+ struct vmw_bo *vbo;
+ int ret;
+ int surf_handle;
+
+ if (handle > VMWGFX_NUM_MOB) {
+ ret = ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
+ } else {
+ ret = vmw_user_bo_lookup(file_priv, handle, &vbo);
+ if (ret)
+ return ret;
+ if (vbo && vbo->is_dumb) {
+ ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle,
+ flags, prime_fd);
+ } else {
+ surf_handle = vmw_lookup_surface_handle_for_buffer(vmw,
+ vbo,
+ handle);
+ if (surf_handle > 0)
+ ret = ttm_prime_handle_to_fd(tfile, surf_handle,
+ flags, prime_fd);
+ else
+ ret = drm_gem_prime_handle_to_fd(dev, file_priv,
+ handle, flags,
+ prime_fd);
+ }
+ vmw_user_bo_unref(&vbo);
+ }
+
+ return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index ca300c7427d2..388011696941 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -58,6 +59,7 @@ void vmw_resource_mob_attach(struct vmw_resource *res)
rb_link_node(&res->mob_node, parent, new);
rb_insert_color(&res->mob_node, &gbo->res_tree);
+ vmw_bo_del_detached_resource(gbo, res);
vmw_bo_prio_add(gbo, res->used_prio);
}
@@ -271,7 +273,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
goto out_bad_resource;
res = converter->base_obj_to_res(base);
- kref_get(&res->kref);
+ vmw_resource_reference(res);
*p_res = res;
ret = 0;
@@ -287,28 +289,35 @@ out_bad_resource:
*
* The pointer this pointed at by out_surf and out_buf needs to be null.
*/
-int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+int vmw_user_object_lookup(struct vmw_private *dev_priv,
struct drm_file *filp,
- uint32_t handle,
- struct vmw_surface **out_surf,
- struct vmw_bo **out_buf)
+ u32 handle,
+ struct vmw_user_object *uo)
{
struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
struct vmw_resource *res;
int ret;
- BUG_ON(*out_surf || *out_buf);
+ WARN_ON(uo->surface || uo->buffer);
ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
user_surface_converter,
&res);
if (!ret) {
- *out_surf = vmw_res_to_srf(res);
+ uo->surface = vmw_res_to_srf(res);
return 0;
}
- *out_surf = NULL;
- ret = vmw_user_bo_lookup(filp, handle, out_buf);
+ uo->surface = NULL;
+ ret = vmw_user_bo_lookup(filp, handle, &uo->buffer);
+ if (!ret && !uo->buffer->is_dumb) {
+ uo->surface = vmw_lookup_surface_for_buffer(dev_priv,
+ uo->buffer,
+ handle);
+ if (uo->surface)
+ vmw_user_bo_unref(&uo->buffer);
+ }
+
return ret;
}
@@ -338,7 +347,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
return 0;
}
- ret = vmw_gem_object_create(res->dev_priv, &bo_params, &gbo);
+ ret = vmw_bo_create(res->dev_priv, &bo_params, &gbo);
if (unlikely(ret != 0))
goto out_no_bo;
@@ -522,9 +531,9 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
}
INIT_LIST_HEAD(&val_list);
- ttm_bo_get(&res->guest_memory_bo->tbo);
val_buf->bo = &res->guest_memory_bo->tbo;
val_buf->num_shared = 0;
+ drm_gem_object_get(&val_buf->bo->base);
list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
@@ -548,7 +557,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
out_no_validate:
ttm_eu_backoff_reservation(ticket, &val_list);
out_no_reserve:
- ttm_bo_put(val_buf->bo);
+ drm_gem_object_put(&val_buf->bo->base);
val_buf->bo = NULL;
if (guest_memory_dirty)
vmw_user_bo_unref(&res->guest_memory_bo);
@@ -610,7 +619,7 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
INIT_LIST_HEAD(&val_list);
list_add_tail(&val_buf->head, &val_list);
ttm_eu_backoff_reservation(ticket, &val_list);
- ttm_bo_put(val_buf->bo);
+ drm_gem_object_put(&val_buf->bo->base);
val_buf->bo = NULL;
}
@@ -1064,6 +1073,22 @@ void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
end << PAGE_SHIFT);
}
+int vmw_resource_clean(struct vmw_resource *res)
+{
+ int ret = 0;
+
+ if (res->res_dirty) {
+ if (!res->func->clean)
+ return -EINVAL;
+
+ ret = res->func->clean(res);
+ if (ret)
+ return ret;
+ res->res_dirty = false;
+ }
+ return ret;
+}
+
/**
* vmw_resources_clean - Clean resources intersecting a mob range
* @vbo: The mob buffer object
@@ -1080,6 +1105,7 @@ int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start,
unsigned long res_start = start << PAGE_SHIFT;
unsigned long res_end = end << PAGE_SHIFT;
unsigned long last_cleaned = 0;
+ int ret;
/*
* Find the resource with lowest backup_offset that intersects the
@@ -1106,18 +1132,9 @@ int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start,
* intersecting the range.
*/
while (found) {
- if (found->res_dirty) {
- int ret;
-
- if (!found->func->clean)
- return -EINVAL;
-
- ret = found->func->clean(found);
- if (ret)
- return ret;
-
- found->res_dirty = false;
- }
+ ret = vmw_resource_clean(found);
+ if (ret)
+ return ret;
last_cleaned = found->guest_memory_offset + found->guest_memory_size;
cur = rb_next(&found->mob_node);
if (!cur)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 30c3ad27b662..5f5f5a94301f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2011-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -27,11 +28,13 @@
#include "vmwgfx_bo.h"
#include "vmwgfx_kms.h"
+#include "vmwgfx_vkms.h"
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_vblank.h>
#define vmw_crtc_to_sou(x) \
container_of(x, struct vmw_screen_object_unit, base.crtc)
@@ -89,7 +92,6 @@ struct vmw_kms_sou_define_gmrfb {
struct vmw_screen_object_unit {
struct vmw_display_unit base;
- unsigned long buffer_size; /**< Size of allocated buffer */
struct vmw_bo *buffer; /**< Backing store buffer */
bool defined;
@@ -239,8 +241,7 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
struct vmw_connector_state *vmw_conn_state;
int x, y;
- sou->buffer = vps->bo;
- sou->buffer_size = vps->bo_size;
+ sou->buffer = vmw_user_object_buffer(&vps->uo);
conn_state = sou->base.connector.state;
vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
@@ -255,7 +256,6 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
} else {
sou->buffer = NULL;
- sou->buffer_size = 0;
}
}
@@ -271,19 +271,6 @@ static void vmw_sou_crtc_helper_prepare(struct drm_crtc *crtc)
}
/**
- * vmw_sou_crtc_atomic_enable - Noop
- *
- * @crtc: CRTC associated with the new screen
- * @state: Unused
- *
- * This is called after a mode set has been completed.
- */
-static void vmw_sou_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
-}
-
-/**
* vmw_sou_crtc_atomic_disable - Turns off CRTC
*
* @crtc: CRTC to be turned off
@@ -305,6 +292,9 @@ static void vmw_sou_crtc_atomic_disable(struct drm_crtc *crtc,
sou = vmw_crtc_to_sou(crtc);
dev_priv = vmw_priv(crtc->dev);
+ if (dev_priv->vkms_enabled)
+ drm_crtc_vblank_off(crtc);
+
if (sou->defined) {
ret = vmw_sou_fifo_destroy(dev_priv, sou);
if (ret)
@@ -320,6 +310,9 @@ static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
+ .enable_vblank = vmw_vkms_enable_vblank,
+ .disable_vblank = vmw_vkms_disable_vblank,
+ .get_vblank_timestamp = vmw_vkms_get_vblank_timestamp,
};
/*
@@ -384,10 +377,11 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
struct drm_crtc *crtc = plane->state->crtc ?
plane->state->crtc : old_state->crtc;
+ struct vmw_bo *bo = vmw_user_object_buffer(&vps->uo);
- if (vps->bo)
- vmw_bo_unpin(vmw_priv(crtc->dev), vps->bo, false);
- vmw_bo_unreference(&vps->bo);
+ if (bo)
+ vmw_bo_unpin(vmw_priv(crtc->dev), bo, false);
+ vmw_user_object_unref(&vps->uo);
vps->bo_size = 0;
vmw_du_plane_cleanup_fb(plane, old_state);
@@ -419,9 +413,10 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
.bo_type = ttm_bo_type_device,
.pin = true
};
+ struct vmw_bo *bo = NULL;
if (!new_fb) {
- vmw_bo_unreference(&vps->bo);
+ vmw_user_object_unref(&vps->uo);
vps->bo_size = 0;
return 0;
@@ -430,17 +425,17 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
bo_params.size = new_state->crtc_w * new_state->crtc_h * 4;
dev_priv = vmw_priv(crtc->dev);
- if (vps->bo) {
+ bo = vmw_user_object_buffer(&vps->uo);
+ if (bo) {
if (vps->bo_size == bo_params.size) {
/*
* Note that this might temporarily up the pin-count
* to 2, until cleanup_fb() is called.
*/
- return vmw_bo_pin_in_vram(dev_priv, vps->bo,
- true);
+ return vmw_bo_pin_in_vram(dev_priv, bo, true);
}
- vmw_bo_unreference(&vps->bo);
+ vmw_user_object_unref(&vps->uo);
vps->bo_size = 0;
}
@@ -450,7 +445,7 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
* resume the overlays, this is preferred to failing to alloc.
*/
vmw_overlay_pause_all(dev_priv);
- ret = vmw_bo_create(dev_priv, &bo_params, &vps->bo);
+ ret = vmw_bo_create(dev_priv, &bo_params, &vps->uo.buffer);
vmw_overlay_resume_all(dev_priv);
if (ret)
return ret;
@@ -461,7 +456,7 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
* TTM already thinks the buffer is pinned, but make sure the
* pin_count is upped.
*/
- return vmw_bo_pin_in_vram(dev_priv, vps->bo, true);
+ return vmw_bo_pin_in_vram(dev_priv, vps->uo.buffer, true);
}
static uint32_t vmw_sou_bo_fifo_size(struct vmw_du_update_plane *update,
@@ -588,6 +583,7 @@ static uint32_t vmw_sou_surface_pre_clip(struct vmw_du_update_plane *update,
{
struct vmw_kms_sou_dirty_cmd *blit = cmd;
struct vmw_framebuffer_surface *vfbs;
+ struct vmw_surface *surf = NULL;
vfbs = container_of(update->vfb, typeof(*vfbs), base);
@@ -595,7 +591,8 @@ static uint32_t vmw_sou_surface_pre_clip(struct vmw_du_update_plane *update,
blit->header.size = sizeof(blit->body) + sizeof(SVGASignedRect) *
num_hits;
- blit->body.srcImage.sid = vfbs->surface->res.id;
+ surf = vmw_user_object_surface(&vfbs->uo);
+ blit->body.srcImage.sid = surf->res.id;
blit->body.destScreenId = update->du->unit;
/* Update the source and destination bounding box later in post_clip */
@@ -767,7 +764,7 @@ static const struct drm_plane_funcs vmw_sou_plane_funcs = {
static const struct drm_plane_funcs vmw_sou_cursor_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = vmw_du_cursor_plane_destroy,
+ .destroy = vmw_cursor_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
@@ -778,10 +775,10 @@ static const struct drm_plane_funcs vmw_sou_cursor_funcs = {
*/
static const struct
drm_plane_helper_funcs vmw_sou_cursor_plane_helper_funcs = {
- .atomic_check = vmw_du_cursor_plane_atomic_check,
- .atomic_update = vmw_du_cursor_plane_atomic_update,
- .prepare_fb = vmw_du_cursor_plane_prepare_fb,
- .cleanup_fb = vmw_du_cursor_plane_cleanup_fb,
+ .atomic_check = vmw_cursor_plane_atomic_check,
+ .atomic_update = vmw_cursor_plane_atomic_update,
+ .prepare_fb = vmw_cursor_plane_prepare_fb,
+ .cleanup_fb = vmw_cursor_plane_cleanup_fb,
};
static const struct
@@ -797,8 +794,8 @@ static const struct drm_crtc_helper_funcs vmw_sou_crtc_helper_funcs = {
.mode_set_nofb = vmw_sou_crtc_mode_set_nofb,
.atomic_check = vmw_du_crtc_atomic_check,
.atomic_begin = vmw_du_crtc_atomic_begin,
- .atomic_flush = vmw_du_crtc_atomic_flush,
- .atomic_enable = vmw_sou_crtc_atomic_enable,
+ .atomic_flush = vmw_vkms_crtc_atomic_flush,
+ .atomic_enable = vmw_vkms_crtc_atomic_enable,
.atomic_disable = vmw_sou_crtc_atomic_disable,
};
@@ -871,7 +868,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_connector_helper_add(connector, &vmw_sou_connector_helper_funcs);
- connector->status = vmw_du_connector_detect(connector, true);
ret = drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
@@ -908,6 +904,9 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
dev->mode_config.suggested_x_property, 0);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_y_property, 0);
+
+ vmw_du_init(&sou->base);
+
return 0;
err_free_unregister:
@@ -1109,7 +1108,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
int ret;
if (!srf)
- srf = &vfbs->surface->res;
+ srf = &vmw_user_object_surface(&vfbs->uo)->res;
ret = vmw_validation_add_resource(&val_ctx, srf, 0, VMW_RES_DIRTY_NONE,
NULL, NULL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index a01ca3226d0a..7fb1c88bcc47 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -896,7 +896,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_device,
.size = size,
- .pin = true
+ .pin = true,
+ .keep_resv = true,
};
if (!vmw_shader_id_ok(user_key, shader_type))
@@ -906,10 +907,6 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto out;
- ret = ttm_bo_reserve(&buf->tbo, false, true, NULL);
- if (unlikely(ret != 0))
- goto no_reserve;
-
/* Map and copy shader bytecode. */
ret = ttm_bo_kmap(&buf->tbo, 0, PFN_UP(size), &map);
if (unlikely(ret != 0)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 3c8414a13dba..20aab725e53a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/******************************************************************************
*
- * COPYRIGHT (C) 2014-2023 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2014-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -27,12 +28,15 @@
#include "vmwgfx_bo.h"
#include "vmwgfx_kms.h"
+#include "vmwgfx_vkms.h"
#include "vmw_surface_cache.h"
+#include <linux/fsnotify.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_vblank.h>
#define vmw_crtc_to_stdu(x) \
container_of(x, struct vmw_screen_target_display_unit, base.crtc)
@@ -41,7 +45,14 @@
#define vmw_connector_to_stdu(x) \
container_of(x, struct vmw_screen_target_display_unit, base.connector)
-
+/*
+ * Some renderers such as llvmpipe will align the width and height of their
+ * buffers to match their tile size. We need to keep this in mind when exposing
+ * modes to userspace so that this possible over-allocation will not exceed
+ * graphics memory. 64x64 pixels seems to be a reasonable upper bound for the
+ * tile size of current renderers.
+ */
+#define GPU_TILE_SIZE 64
enum stdu_content_type {
SAME_AS_DISPLAY = 0,
@@ -83,11 +94,6 @@ struct vmw_stdu_update {
SVGA3dCmdUpdateGBScreenTarget body;
};
-struct vmw_stdu_dma {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceDMA body;
-};
-
struct vmw_stdu_surface_copy {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceCopy body;
@@ -407,24 +413,14 @@ static void vmw_stdu_crtc_mode_set_nofb(struct drm_crtc *crtc)
crtc->x, crtc->y);
}
-
-static void vmw_stdu_crtc_helper_prepare(struct drm_crtc *crtc)
-{
-}
-
-static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
-}
-
static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct vmw_private *dev_priv;
struct vmw_screen_target_display_unit *stdu;
+ struct drm_crtc_state *new_crtc_state;
int ret;
-
if (!crtc) {
DRM_ERROR("CRTC is NULL\n");
return;
@@ -432,6 +428,10 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev);
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ if (dev_priv->vkms_enabled)
+ drm_crtc_vblank_off(crtc);
if (stdu->defined) {
ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
@@ -440,6 +440,14 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
(void) vmw_stdu_update_st(dev_priv, stdu);
+ /* Don't destroy the Screen Target if we are only setting the
+ * display as inactive
+ */
+ if (new_crtc_state->enable &&
+ !new_crtc_state->active &&
+ !new_crtc_state->mode_changed)
+ return;
+
ret = vmw_stdu_destroy_st(dev_priv, stdu);
if (ret)
DRM_ERROR("Failed to destroy Screen Target\n");
@@ -494,7 +502,7 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
container_of(dirty->unit, typeof(*stdu), base);
s32 width, height;
s32 src_pitch, dst_pitch;
- struct ttm_buffer_object *src_bo, *dst_bo;
+ struct vmw_bo *src_bo, *dst_bo;
u32 src_offset, dst_offset;
struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(stdu->cpp);
@@ -509,11 +517,11 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
/* Assume we are blitting from Guest (bo) to Host (display_srf) */
src_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
- src_bo = &stdu->display_srf->res.guest_memory_bo->tbo;
+ src_bo = stdu->display_srf->res.guest_memory_bo;
src_offset = ddirty->top * src_pitch + ddirty->left * stdu->cpp;
dst_pitch = ddirty->pitch;
- dst_bo = &ddirty->buf->tbo;
+ dst_bo = ddirty->buf;
dst_offset = ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
(void) vmw_bo_cpu_blit(dst_bo, dst_offset, dst_pitch,
@@ -729,7 +737,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
int ret;
if (!srf)
- srf = &vfbs->surface->res;
+ srf = &vmw_user_object_surface(&vfbs->uo)->res;
ret = vmw_validation_add_resource(&val_ctx, srf, 0, VMW_RES_DIRTY_NONE,
NULL, NULL);
@@ -740,12 +748,6 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
if (ret)
goto out_unref;
- if (vfbs->is_bo_proxy) {
- ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
- if (ret)
- goto out_finish;
- }
-
sdirty.base.fifo_commit = vmw_kms_stdu_surface_fifo_commit;
sdirty.base.clip = vmw_kms_stdu_surface_clip;
sdirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_surface_copy) +
@@ -759,7 +761,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
dest_x, dest_y, num_clips, inc,
&sdirty.base);
-out_finish:
+
vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
NULL);
@@ -770,7 +772,6 @@ out_unref:
return ret;
}
-
/*
* Screen Target CRTC dispatch table
*/
@@ -782,6 +783,12 @@ static const struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
+ .enable_vblank = vmw_vkms_enable_vblank,
+ .disable_vblank = vmw_vkms_disable_vblank,
+ .get_vblank_timestamp = vmw_vkms_get_vblank_timestamp,
+ .get_crc_sources = vmw_vkms_get_crc_sources,
+ .set_crc_source = vmw_vkms_set_crc_source,
+ .verify_crc_source = vmw_vkms_verify_crc_source,
};
@@ -830,7 +837,71 @@ static void vmw_stdu_connector_destroy(struct drm_connector *connector)
vmw_stdu_destroy(vmw_connector_to_stdu(connector));
}
+static enum drm_mode_status
+vmw_stdu_connector_mode_valid(struct drm_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ enum drm_mode_status ret;
+ struct drm_device *dev = connector->dev;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ u64 assumed_cpp = dev_priv->assume_16bpp ? 2 : 4;
+ /* Align width and height to account for GPU tile over-alignment */
+ u64 required_mem = ALIGN(mode->hdisplay, GPU_TILE_SIZE) *
+ ALIGN(mode->vdisplay, GPU_TILE_SIZE) *
+ assumed_cpp;
+ required_mem = ALIGN(required_mem, PAGE_SIZE);
+
+ ret = drm_mode_validate_size(mode, dev_priv->stdu_max_width,
+ dev_priv->stdu_max_height);
+ if (ret != MODE_OK)
+ return ret;
+
+ ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
+ dev_priv->texture_max_height);
+ if (ret != MODE_OK)
+ return ret;
+
+ if (required_mem > dev_priv->max_primary_mem)
+ return MODE_MEM;
+
+ if (required_mem > dev_priv->max_mob_pages * PAGE_SIZE)
+ return MODE_MEM;
+
+ if (required_mem > dev_priv->max_mob_size)
+ return MODE_MEM;
+
+ return MODE_OK;
+}
+
+/*
+ * Trigger a modeset if the X,Y position of the Screen Target changes.
+ * This is needed when multi-mon is cycled. The original Screen Target will have
+ * the same mode but its relative X,Y position in the topology will change.
+ */
+static int vmw_stdu_connector_atomic_check(struct drm_connector *conn,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *conn_state;
+ struct vmw_screen_target_display_unit *du;
+ struct drm_crtc_state *new_crtc_state;
+
+ conn_state = drm_atomic_get_connector_state(state, conn);
+
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
+
+ du = vmw_connector_to_stdu(conn);
+
+ if (!conn_state->crtc)
+ return 0;
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (du->base.gui_x != du->base.set_gui_x ||
+ du->base.gui_y != du->base.set_gui_y)
+ new_crtc_state->mode_changed = true;
+
+ return 0;
+}
static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
.dpms = vmw_du_connector_dpms,
@@ -846,7 +917,8 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
static const struct
drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = {
.get_modes = vmw_connector_get_modes,
- .mode_valid = vmw_connector_mode_valid
+ .mode_valid = vmw_stdu_connector_mode_valid,
+ .atomic_check = vmw_stdu_connector_atomic_check,
};
@@ -873,9 +945,8 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
{
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
- if (vps->surf)
+ if (vmw_user_object_surface(&vps->uo))
WARN_ON(!vps->pinned);
-
vmw_du_plane_cleanup_fb(plane, old_state);
vps->content_fb_type = SAME_AS_DISPLAY;
@@ -883,7 +954,6 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
}
-
/**
* vmw_stdu_primary_plane_prepare_fb - Readies the display surface
*
@@ -907,13 +977,15 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
enum stdu_content_type new_content_type;
struct vmw_framebuffer_surface *new_vfbs;
uint32_t hdisplay = new_state->crtc_w, vdisplay = new_state->crtc_h;
+ struct drm_plane_state *old_state = plane->state;
+ struct drm_rect rect;
int ret;
/* No FB to prepare */
if (!new_fb) {
- if (vps->surf) {
+ if (vmw_user_object_surface(&vps->uo)) {
WARN_ON(vps->pinned != 0);
- vmw_surface_unreference(&vps->surf);
+ vmw_user_object_unref(&vps->uo);
}
return 0;
@@ -923,8 +995,8 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
new_vfbs = (vfb->bo) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
if (new_vfbs &&
- new_vfbs->surface->metadata.base_size.width == hdisplay &&
- new_vfbs->surface->metadata.base_size.height == vdisplay)
+ vmw_user_object_surface(&new_vfbs->uo)->metadata.base_size.width == hdisplay &&
+ vmw_user_object_surface(&new_vfbs->uo)->metadata.base_size.height == vdisplay)
new_content_type = SAME_AS_DISPLAY;
else if (vfb->bo)
new_content_type = SEPARATE_BO;
@@ -962,29 +1034,29 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
metadata.num_sizes = 1;
metadata.scanout = true;
} else {
- metadata = new_vfbs->surface->metadata;
+ metadata = vmw_user_object_surface(&new_vfbs->uo)->metadata;
}
metadata.base_size.width = hdisplay;
metadata.base_size.height = vdisplay;
metadata.base_size.depth = 1;
- if (vps->surf) {
+ if (vmw_user_object_surface(&vps->uo)) {
struct drm_vmw_size cur_base_size =
- vps->surf->metadata.base_size;
+ vmw_user_object_surface(&vps->uo)->metadata.base_size;
if (cur_base_size.width != metadata.base_size.width ||
cur_base_size.height != metadata.base_size.height ||
- vps->surf->metadata.format != metadata.format) {
+ vmw_user_object_surface(&vps->uo)->metadata.format != metadata.format) {
WARN_ON(vps->pinned != 0);
- vmw_surface_unreference(&vps->surf);
+ vmw_user_object_unref(&vps->uo);
}
}
- if (!vps->surf) {
+ if (!vmw_user_object_surface(&vps->uo)) {
ret = vmw_gb_surface_define(dev_priv, &metadata,
- &vps->surf);
+ &vps->uo.surface);
if (ret != 0) {
DRM_ERROR("Couldn't allocate STDU surface.\n");
return ret;
@@ -997,18 +1069,19 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
* The only time we add a reference in prepare_fb is if the
* state object doesn't have a reference to begin with
*/
- if (vps->surf) {
+ if (vmw_user_object_surface(&vps->uo)) {
WARN_ON(vps->pinned != 0);
- vmw_surface_unreference(&vps->surf);
+ vmw_user_object_unref(&vps->uo);
}
- vps->surf = vmw_surface_reference(new_vfbs->surface);
+ memcpy(&vps->uo, &new_vfbs->uo, sizeof(vps->uo));
+ vmw_user_object_ref(&vps->uo);
}
- if (vps->surf) {
+ if (vmw_user_object_surface(&vps->uo)) {
/* Pin new surface before flipping */
- ret = vmw_resource_pin(&vps->surf->res, false);
+ ret = vmw_resource_pin(&vmw_user_object_surface(&vps->uo)->res, false);
if (ret)
goto out_srf_unref;
@@ -1018,6 +1091,34 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
vps->content_fb_type = new_content_type;
/*
+ * The drm fb code will do blit's via the vmap interface, which doesn't
+ * trigger vmw_bo page dirty tracking due to being kernel side (and thus
+ * doesn't require mmap'ing) so we have to update the surface's dirty
+ * regions by hand but we want to be careful to not overwrite the
+ * resource if it has been written to by the gpu (res_dirty).
+ */
+ if (vps->uo.buffer && vps->uo.buffer->is_dumb) {
+ struct vmw_surface *surf = vmw_user_object_surface(&vps->uo);
+ struct vmw_resource *res = &surf->res;
+
+ if (!res->res_dirty && drm_atomic_helper_damage_merged(old_state,
+ new_state,
+ &rect)) {
+ /*
+ * At some point it might be useful to actually translate
+ * (rect.x1, rect.y1) => start, and (rect.x2, rect.y2) => end,
+ * but currently the fb code will just report the entire fb
+ * dirty so in practice it doesn't matter.
+ */
+ pgoff_t start = res->guest_memory_offset >> PAGE_SHIFT;
+ pgoff_t end = __KERNEL_DIV_ROUND_UP(res->guest_memory_offset +
+ res->guest_memory_size,
+ PAGE_SIZE);
+ vmw_resource_dirty_update(res, start, end);
+ }
+ }
+
+ /*
* This should only happen if the buffer object is too large to create a
* proxy surface for.
*/
@@ -1027,7 +1128,7 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
return 0;
out_srf_unref:
- vmw_surface_unreference(&vps->surf);
+ vmw_user_object_unref(&vps->uo);
return ret;
}
@@ -1073,7 +1174,7 @@ vmw_stdu_bo_populate_update_cpu(struct vmw_du_update_plane *update, void *cmd,
struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(0);
struct vmw_stdu_update_gb_image *cmd_img = cmd;
struct vmw_stdu_update *cmd_update;
- struct ttm_buffer_object *src_bo, *dst_bo;
+ struct vmw_bo *src_bo, *dst_bo;
u32 src_offset, dst_offset;
s32 src_pitch, dst_pitch;
s32 width, height;
@@ -1087,11 +1188,11 @@ vmw_stdu_bo_populate_update_cpu(struct vmw_du_update_plane *update, void *cmd,
diff.cpp = stdu->cpp;
- dst_bo = &stdu->display_srf->res.guest_memory_bo->tbo;
+ dst_bo = stdu->display_srf->res.guest_memory_bo;
dst_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
dst_offset = bb->y1 * dst_pitch + bb->x1 * stdu->cpp;
- src_bo = &vfbbo->buffer->tbo;
+ src_bo = vfbbo->buffer;
src_pitch = update->vfb->base.pitches[0];
src_offset = bo_update->fb_top * src_pitch + bo_update->fb_left *
stdu->cpp;
@@ -1169,14 +1270,8 @@ static uint32_t
vmw_stdu_surface_fifo_size_same_display(struct vmw_du_update_plane *update,
uint32_t num_hits)
{
- struct vmw_framebuffer_surface *vfbs;
uint32_t size = 0;
- vfbs = container_of(update->vfb, typeof(*vfbs), base);
-
- if (vfbs->is_bo_proxy)
- size += sizeof(struct vmw_stdu_update_gb_image) * num_hits;
-
size += sizeof(struct vmw_stdu_update);
return size;
@@ -1185,14 +1280,8 @@ vmw_stdu_surface_fifo_size_same_display(struct vmw_du_update_plane *update,
static uint32_t vmw_stdu_surface_fifo_size(struct vmw_du_update_plane *update,
uint32_t num_hits)
{
- struct vmw_framebuffer_surface *vfbs;
uint32_t size = 0;
- vfbs = container_of(update->vfb, typeof(*vfbs), base);
-
- if (vfbs->is_bo_proxy)
- size += sizeof(struct vmw_stdu_update_gb_image) * num_hits;
-
size += sizeof(struct vmw_stdu_surface_copy) + sizeof(SVGA3dCopyBox) *
num_hits + sizeof(struct vmw_stdu_update);
@@ -1200,47 +1289,6 @@ static uint32_t vmw_stdu_surface_fifo_size(struct vmw_du_update_plane *update,
}
static uint32_t
-vmw_stdu_surface_update_proxy(struct vmw_du_update_plane *update, void *cmd)
-{
- struct vmw_framebuffer_surface *vfbs;
- struct drm_plane_state *state = update->plane->state;
- struct drm_plane_state *old_state = update->old_state;
- struct vmw_stdu_update_gb_image *cmd_update = cmd;
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect clip;
- uint32_t copy_size = 0;
-
- vfbs = container_of(update->vfb, typeof(*vfbs), base);
-
- /*
- * proxy surface is special where a buffer object type fb is wrapped
- * in a surface and need an update gb image command to sync with device.
- */
- drm_atomic_helper_damage_iter_init(&iter, old_state, state);
- drm_atomic_for_each_plane_damage(&iter, &clip) {
- SVGA3dBox *box = &cmd_update->body.box;
-
- cmd_update->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
- cmd_update->header.size = sizeof(cmd_update->body);
- cmd_update->body.image.sid = vfbs->surface->res.id;
- cmd_update->body.image.face = 0;
- cmd_update->body.image.mipmap = 0;
-
- box->x = clip.x1;
- box->y = clip.y1;
- box->z = 0;
- box->w = drm_rect_width(&clip);
- box->h = drm_rect_height(&clip);
- box->d = 1;
-
- copy_size += sizeof(*cmd_update);
- cmd_update++;
- }
-
- return copy_size;
-}
-
-static uint32_t
vmw_stdu_surface_populate_copy(struct vmw_du_update_plane *update, void *cmd,
uint32_t num_hits)
{
@@ -1254,7 +1302,7 @@ vmw_stdu_surface_populate_copy(struct vmw_du_update_plane *update, void *cmd,
cmd_copy->header.id = SVGA_3D_CMD_SURFACE_COPY;
cmd_copy->header.size = sizeof(cmd_copy->body) + sizeof(SVGA3dCopyBox) *
num_hits;
- cmd_copy->body.src.sid = vfbs->surface->res.id;
+ cmd_copy->body.src.sid = vmw_user_object_surface(&vfbs->uo)->res.id;
cmd_copy->body.dest.sid = stdu->display_srf->res.id;
return sizeof(*cmd_copy);
@@ -1325,10 +1373,7 @@ static int vmw_stdu_plane_update_surface(struct vmw_private *dev_priv,
srf_update.mutex = &dev_priv->cmdbuf_mutex;
srf_update.intr = true;
- if (vfbs->is_bo_proxy)
- srf_update.post_prepare = vmw_stdu_surface_update_proxy;
-
- if (vfbs->surface->res.id != stdu->display_srf->res.id) {
+ if (vmw_user_object_surface(&vfbs->uo)->res.id != stdu->display_srf->res.id) {
srf_update.calc_fifo_size = vmw_stdu_surface_fifo_size;
srf_update.pre_clip = vmw_stdu_surface_populate_copy;
srf_update.clip = vmw_stdu_surface_populate_clip;
@@ -1372,7 +1417,7 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev);
- stdu->display_srf = vps->surf;
+ stdu->display_srf = vmw_user_object_surface(&vps->uo);
stdu->content_fb_type = vps->content_fb_type;
stdu->cpp = vps->cpp;
@@ -1413,6 +1458,17 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
vmw_fence_obj_unreference(&fence);
}
+static void
+vmw_stdu_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+ struct vmw_screen_target_display_unit *stdu = vmw_crtc_to_stdu(crtc);
+
+ if (vmw->vkms_enabled)
+ vmw_vkms_set_crc_surface(crtc, stdu->display_srf);
+ vmw_vkms_crtc_atomic_flush(crtc, state);
+}
static const struct drm_plane_funcs vmw_stdu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
@@ -1426,7 +1482,7 @@ static const struct drm_plane_funcs vmw_stdu_plane_funcs = {
static const struct drm_plane_funcs vmw_stdu_cursor_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = vmw_du_cursor_plane_destroy,
+ .destroy = vmw_cursor_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
@@ -1438,10 +1494,10 @@ static const struct drm_plane_funcs vmw_stdu_cursor_funcs = {
*/
static const struct
drm_plane_helper_funcs vmw_stdu_cursor_plane_helper_funcs = {
- .atomic_check = vmw_du_cursor_plane_atomic_check,
- .atomic_update = vmw_du_cursor_plane_atomic_update,
- .prepare_fb = vmw_du_cursor_plane_prepare_fb,
- .cleanup_fb = vmw_du_cursor_plane_cleanup_fb,
+ .atomic_check = vmw_cursor_plane_atomic_check,
+ .atomic_update = vmw_cursor_plane_atomic_update,
+ .prepare_fb = vmw_cursor_plane_prepare_fb,
+ .cleanup_fb = vmw_cursor_plane_cleanup_fb,
};
static const struct
@@ -1453,12 +1509,11 @@ drm_plane_helper_funcs vmw_stdu_primary_plane_helper_funcs = {
};
static const struct drm_crtc_helper_funcs vmw_stdu_crtc_helper_funcs = {
- .prepare = vmw_stdu_crtc_helper_prepare,
.mode_set_nofb = vmw_stdu_crtc_mode_set_nofb,
.atomic_check = vmw_du_crtc_atomic_check,
.atomic_begin = vmw_du_crtc_atomic_begin,
- .atomic_flush = vmw_du_crtc_atomic_flush,
- .atomic_enable = vmw_stdu_crtc_atomic_enable,
+ .atomic_flush = vmw_stdu_crtc_atomic_flush,
+ .atomic_enable = vmw_vkms_crtc_atomic_enable,
.atomic_disable = vmw_stdu_crtc_atomic_disable,
};
@@ -1529,6 +1584,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_plane_helper_add(&cursor->base, &vmw_stdu_cursor_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(&cursor->base);
ret = drm_connector_init(dev, connector, &vmw_stdu_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
@@ -1538,7 +1594,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_connector_helper_add(connector, &vmw_stdu_connector_helper_funcs);
- connector->status = vmw_du_connector_detect(connector, false);
ret = drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
@@ -1575,6 +1630,9 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
dev->mode_config.suggested_x_property, 0);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_y_property, 0);
+
+ vmw_du_init(&stdu->base);
+
return 0;
err_free_unregister:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index e7a744dfcecf..7e281c3c6bc5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1,31 +1,13 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
#include "vmwgfx_bo.h"
+#include "vmwgfx_cursor_plane.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_so.h"
@@ -36,9 +18,6 @@
#include <drm/ttm/ttm_placement.h>
#define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
-#define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
-#define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
- (svga3d_flags & ((uint64_t)U32_MAX))
/**
* struct vmw_user_surface - User-space visible surface resource
@@ -660,7 +639,7 @@ static void vmw_user_surface_free(struct vmw_resource *res)
struct vmw_user_surface *user_srf =
container_of(srf, struct vmw_user_surface, srf);
- WARN_ON_ONCE(res->dirty);
+ WARN_ON(res->dirty);
if (user_srf->master)
drm_master_put(&user_srf->master);
kfree(srf->offsets);
@@ -686,6 +665,13 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
struct vmw_resource *res = &user_srf->srf.res;
*p_base = NULL;
+
+ /*
+ * Dumb buffers own the resource and they'll unref the
+ * resource themselves
+ */
+ WARN_ON(res && res->guest_memory_bo && res->guest_memory_bo->is_dumb);
+
vmw_resource_unreference(&res);
}
@@ -812,24 +798,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
}
res->guest_memory_size = cur_bo_offset;
- if (metadata->scanout &&
- metadata->num_sizes == 1 &&
- metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
- metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT &&
- metadata->format == VMW_CURSOR_SNOOP_FORMAT) {
- const struct SVGA3dSurfaceDesc *desc =
- vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
- const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH *
- VMW_CURSOR_SNOOP_HEIGHT *
- desc->pitchBytesPerBlock;
- srf->snooper.image = kzalloc(cursor_size_bytes, GFP_KERNEL);
- if (!srf->snooper.image) {
- DRM_ERROR("Failed to allocate cursor_image\n");
- ret = -ENOMEM;
- goto out_no_copy;
- }
- } else {
- srf->snooper.image = NULL;
+
+ srf->snooper.image = vmw_cursor_snooper_create(file_priv, metadata);
+ if (IS_ERR(srf->snooper.image)) {
+ ret = PTR_ERR(srf->snooper.image);
+ goto out_no_copy;
}
if (drm_is_primary_client(file_priv))
@@ -857,9 +830,13 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
.pin = false
};
- ret = vmw_gem_object_create(dev_priv,
- &params,
- &res->guest_memory_bo);
+ ret = vmw_bo_create(dev_priv, &params, &res->guest_memory_bo);
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
@@ -892,6 +869,113 @@ out_unlock:
return ret;
}
+static struct vmw_user_surface *
+vmw_lookup_user_surface_for_buffer(struct vmw_private *vmw, struct vmw_bo *bo,
+ u32 handle)
+{
+ struct vmw_user_surface *user_srf = NULL;
+ struct vmw_surface *surf;
+ struct ttm_base_object *base;
+
+ surf = vmw_bo_surface(bo);
+ if (surf) {
+ rcu_read_lock();
+ user_srf = container_of(surf, struct vmw_user_surface, srf);
+ base = &user_srf->prime.base;
+ if (base && !kref_get_unless_zero(&base->refcount)) {
+ drm_dbg_driver(&vmw->drm,
+ "%s: referencing a stale surface handle %d\n",
+ __func__, handle);
+ base = NULL;
+ user_srf = NULL;
+ }
+ rcu_read_unlock();
+ }
+
+ return user_srf;
+}
+
+struct vmw_surface *vmw_lookup_surface_for_buffer(struct vmw_private *vmw,
+ struct vmw_bo *bo,
+ u32 handle)
+{
+ struct vmw_user_surface *user_srf =
+ vmw_lookup_user_surface_for_buffer(vmw, bo, handle);
+ struct vmw_surface *surf = NULL;
+ struct ttm_base_object *base;
+
+ if (user_srf) {
+ surf = vmw_surface_reference(&user_srf->srf);
+ base = &user_srf->prime.base;
+ ttm_base_object_unref(&base);
+ }
+ return surf;
+}
+
+u32 vmw_lookup_surface_handle_for_buffer(struct vmw_private *vmw,
+ struct vmw_bo *bo,
+ u32 handle)
+{
+ struct vmw_user_surface *user_srf =
+ vmw_lookup_user_surface_for_buffer(vmw, bo, handle);
+ int surf_handle = 0;
+ struct ttm_base_object *base;
+
+ if (user_srf) {
+ base = &user_srf->prime.base;
+ surf_handle = (u32)base->handle;
+ ttm_base_object_unref(&base);
+ }
+ return surf_handle;
+}
+
+static int vmw_buffer_prime_to_surface_base(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ u32 fd, u32 *handle,
+ struct ttm_base_object **base_p)
+{
+ struct ttm_base_object *base;
+ struct vmw_bo *bo;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_user_surface *user_srf;
+ int ret;
+
+ ret = drm_gem_prime_fd_to_handle(&dev_priv->drm, file_priv, fd, handle);
+ if (ret) {
+ drm_warn(&dev_priv->drm,
+ "Wasn't able to find user buffer for fd = %u.\n", fd);
+ return ret;
+ }
+
+ ret = vmw_user_bo_lookup(file_priv, *handle, &bo);
+ if (ret) {
+ drm_warn(&dev_priv->drm,
+ "Wasn't able to lookup user buffer for handle = %u.\n", *handle);
+ return ret;
+ }
+
+ user_srf = vmw_lookup_user_surface_for_buffer(dev_priv, bo, *handle);
+ if (WARN_ON(!user_srf)) {
+ drm_warn(&dev_priv->drm,
+ "User surface fd %d (handle %d) is null.\n", fd, *handle);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ base = &user_srf->prime.base;
+ ret = ttm_ref_object_add(tfile, base, NULL, false);
+ if (ret) {
+ drm_warn(&dev_priv->drm,
+ "Couldn't add an object ref for the buffer (%d).\n", *handle);
+ goto out;
+ }
+
+ *base_p = base;
+out:
+ vmw_user_bo_unref(&bo);
+
+ return ret;
+}
static int
vmw_surface_handle_reference(struct vmw_private *dev_priv,
@@ -901,15 +985,19 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
struct ttm_base_object **base_p)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_user_surface *user_srf;
+ struct vmw_user_surface *user_srf = NULL;
uint32_t handle;
struct ttm_base_object *base;
int ret;
if (handle_type == DRM_VMW_HANDLE_PRIME) {
ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
- if (unlikely(ret != 0))
- return ret;
+ if (ret)
+ return vmw_buffer_prime_to_surface_base(dev_priv,
+ file_priv,
+ u_handle,
+ &handle,
+ base_p);
} else {
handle = u_handle;
}
@@ -1503,7 +1591,12 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle,
&res->guest_memory_bo);
if (ret == 0) {
- if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) {
+ if (res->guest_memory_bo->is_dumb) {
+ VMW_DEBUG_USER("Can't backup surface with a dumb buffer.\n");
+ vmw_user_bo_unref(&res->guest_memory_bo);
+ ret = -EINVAL;
+ goto out_unlock;
+ } else if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) {
VMW_DEBUG_USER("Surface backup buffer too small.\n");
vmw_user_bo_unref(&res->guest_memory_bo);
ret = -EINVAL;
@@ -1546,6 +1639,14 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
}
+ if (res->guest_memory_bo) {
+ ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res);
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+ }
+
tmp = vmw_resource_reference(res);
ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime,
VMW_RES_SURFACE,
@@ -2100,3 +2201,152 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv,
out_unlock:
return ret;
}
+
+static SVGA3dSurfaceFormat vmw_format_bpp_to_svga(struct vmw_private *vmw,
+ int bpp)
+{
+ switch (bpp) {
+ case 8: /* DRM_FORMAT_C8 */
+ return SVGA3D_P8;
+ case 16: /* DRM_FORMAT_RGB565 */
+ return SVGA3D_R5G6B5;
+ case 32: /* DRM_FORMAT_XRGB8888 */
+ if (has_sm4_context(vmw))
+ return SVGA3D_B8G8R8X8_UNORM;
+ return SVGA3D_X8R8G8B8;
+ default:
+ drm_warn(&vmw->drm, "Unsupported format bpp: %d\n", bpp);
+ return SVGA3D_X8R8G8B8;
+ }
+}
+
+/**
+ * vmw_dumb_create - Create a dumb kms buffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @args: Pointer to a struct drm_mode_create_dumb structure
+ * Return: Zero on success, negative error code on failure.
+ *
+ * This is a driver callback for the core drm create_dumb functionality.
+ * Note that this is very similar to the vmw_bo_alloc ioctl, except
+ * that the arguments have a different format.
+ */
+int vmw_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_bo *vbo = NULL;
+ struct vmw_resource *res = NULL;
+ union drm_vmw_gb_surface_create_ext_arg arg = { 0 };
+ struct drm_vmw_gb_surface_create_ext_req *req = &arg.req;
+ int ret;
+ struct drm_vmw_size drm_size = {
+ .width = args->width,
+ .height = args->height,
+ .depth = 1,
+ };
+ SVGA3dSurfaceFormat format = vmw_format_bpp_to_svga(dev_priv, args->bpp);
+ const struct SVGA3dSurfaceDesc *desc = vmw_surface_get_desc(format);
+ SVGA3dSurfaceAllFlags flags = SVGA3D_SURFACE_HINT_TEXTURE |
+ SVGA3D_SURFACE_HINT_RENDERTARGET |
+ SVGA3D_SURFACE_SCREENTARGET;
+
+ if (vmw_surface_is_dx_screen_target_format(format)) {
+ flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE |
+ SVGA3D_SURFACE_BIND_RENDER_TARGET;
+ }
+
+ /*
+ * Without mob support we're just going to use raw memory buffer
+ * because we wouldn't be able to support full surface coherency
+ * without mobs. There also no reason to support surface coherency
+ * without 3d (i.e. gpu usage on the host) because then all the
+ * contents is going to be rendered guest side.
+ */
+ if (!dev_priv->has_mob || !vmw_supports_3d(dev_priv)) {
+ int cpp = DIV_ROUND_UP(args->bpp, 8);
+
+ switch (cpp) {
+ case 1: /* DRM_FORMAT_C8 */
+ case 2: /* DRM_FORMAT_RGB565 */
+ case 4: /* DRM_FORMAT_XRGB8888 */
+ break;
+ default:
+ /*
+ * Dumb buffers don't allow anything else.
+ * This is tested via IGT's dumb_buffers
+ */
+ return -EINVAL;
+ }
+
+ args->pitch = args->width * cpp;
+ args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
+
+ ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
+ args->size, &args->handle,
+ &vbo);
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_put(&vbo->tbo.base);
+ return ret;
+ }
+
+ req->version = drm_vmw_gb_surface_v1;
+ req->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
+ req->quality_level = SVGA3D_MS_QUALITY_NONE;
+ req->buffer_byte_stride = 0;
+ req->must_be_zero = 0;
+ req->base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(flags);
+ req->svga3d_flags_upper_32_bits = SVGA3D_FLAGS_UPPER_32(flags);
+ req->base.format = (uint32_t)format;
+ req->base.drm_surface_flags = drm_vmw_surface_flag_scanout;
+ req->base.drm_surface_flags |= drm_vmw_surface_flag_shareable;
+ req->base.drm_surface_flags |= drm_vmw_surface_flag_create_buffer;
+ req->base.drm_surface_flags |= drm_vmw_surface_flag_coherent;
+ req->base.base_size.width = args->width;
+ req->base.base_size.height = args->height;
+ req->base.base_size.depth = 1;
+ req->base.array_size = 0;
+ req->base.mip_levels = 1;
+ req->base.multisample_count = 0;
+ req->base.buffer_handle = SVGA3D_INVALID_ID;
+ req->base.autogen_filter = SVGA3D_TEX_FILTER_NONE;
+ ret = vmw_gb_surface_define_ext_ioctl(dev, &arg, file_priv);
+ if (ret) {
+ drm_warn(dev, "Unable to create a dumb buffer\n");
+ return ret;
+ }
+
+ args->handle = arg.rep.buffer_handle;
+ args->size = arg.rep.buffer_size;
+ args->pitch = vmw_surface_calculate_pitch(desc, &drm_size);
+
+ ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg.rep.handle,
+ user_surface_converter,
+ &res);
+ if (ret) {
+ drm_err(dev, "Created resource handle doesn't exist!\n");
+ goto err;
+ }
+
+ vbo = res->guest_memory_bo;
+ vbo->is_dumb = true;
+ vbo->dumb_surface = vmw_res_to_srf(res);
+ drm_gem_object_put(&vbo->tbo.base);
+ /*
+ * Unset the user surface dtor since this in not actually exposed
+ * to userspace. The suface is owned via the dumb_buffer's GEM handle
+ */
+ struct vmw_user_surface *usurf = container_of(vbo->dumb_surface,
+ struct vmw_user_surface, srf);
+ usurf->prime.base.refcount_release = NULL;
+err:
+ if (res)
+ vmw_resource_unreference(&res);
+
+ ttm_ref_object_base_unref(tfile, arg.rep.handle);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 4d23d0a70bcb..5553892d7c3e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -188,13 +188,18 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
switch (dev_priv->map_mode) {
case vmw_dma_map_bind:
case vmw_dma_map_populate:
- vsgt->sgt = &vmw_tt->sgt;
- ret = sg_alloc_table_from_pages_segment(
- &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
- (unsigned long)vsgt->num_pages << PAGE_SHIFT,
- dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
- if (ret)
- goto out_sg_alloc_fail;
+ if (vmw_tt->dma_ttm.page_flags & TTM_TT_FLAG_EXTERNAL) {
+ vsgt->sgt = vmw_tt->dma_ttm.sg;
+ } else {
+ vsgt->sgt = &vmw_tt->sgt;
+ ret = sg_alloc_table_from_pages_segment(&vmw_tt->sgt,
+ vsgt->pages, vsgt->num_pages, 0,
+ (unsigned long)vsgt->num_pages << PAGE_SHIFT,
+ dma_get_max_seg_size(dev_priv->drm.dev),
+ GFP_KERNEL);
+ if (ret)
+ goto out_sg_alloc_fail;
+ }
ret = vmw_ttm_map_for_dma(vmw_tt);
if (unlikely(ret != 0))
@@ -209,8 +214,9 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
return 0;
out_map_fail:
- sg_free_table(vmw_tt->vsgt.sgt);
- vmw_tt->vsgt.sgt = NULL;
+ drm_warn(&dev_priv->drm, "VSG table map failed!");
+ sg_free_table(vsgt->sgt);
+ vsgt->sgt = NULL;
out_sg_alloc_fail:
return ret;
}
@@ -356,15 +362,17 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
static int vmw_ttm_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
- int ret;
+ bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
- /* TODO: maybe completely drop this ? */
if (ttm_tt_is_populated(ttm))
return 0;
- ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
+ if (external && ttm->sg)
+ return drm_prime_sg_to_dma_addr_array(ttm->sg,
+ ttm->dma_address,
+ ttm->num_pages);
- return ret;
+ return ttm_pool_alloc(&bdev->pool, ttm, ctx);
}
static void vmw_ttm_unpopulate(struct ttm_device *bdev,
@@ -372,6 +380,10 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
{
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
dma_ttm);
+ bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
+
+ if (external)
+ return;
vmw_ttm_unbind(bdev, ttm);
@@ -390,6 +402,7 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
{
struct vmw_ttm_tt *vmw_be;
int ret;
+ bool external = bo->type == ttm_bo_type_sg;
vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
if (!vmw_be)
@@ -398,7 +411,10 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
vmw_be->mob = NULL;
- if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
+ if (external)
+ page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE;
+
+ if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent || external)
ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
ttm_cached);
else
@@ -556,15 +572,14 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
.busy_domain = domain,
.bo_type = ttm_bo_type_kernel,
.size = bo_size,
- .pin = true
+ .pin = true,
+ .keep_resv = true,
};
ret = vmw_bo_create(dev_priv, &bo_params, &vbo);
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
- BUG_ON(ret != 0);
ret = vmw_ttm_populate(vbo->tbo.bdev, vbo->tbo.ttm, &ctx);
if (likely(ret == 0)) {
struct vmw_ttm_tt *vmw_tt =
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
deleted file mode 100644
index 90097d04b45f..000000000000
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ /dev/null
@@ -1,110 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/**************************************************************************
- *
- * Copyright 2009-2011 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#include "vmwgfx_drv.h"
-
-static int vmw_bo_vm_lookup(struct ttm_device *bdev,
- struct drm_file *filp,
- unsigned long offset,
- unsigned long pages,
- struct ttm_buffer_object **p_bo)
-{
- struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
- struct drm_device *drm = &dev_priv->drm;
- struct drm_vma_offset_node *node;
- int ret;
-
- *p_bo = NULL;
-
- drm_vma_offset_lock_lookup(bdev->vma_manager);
-
- node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages);
- if (likely(node)) {
- *p_bo = container_of(node, struct ttm_buffer_object,
- base.vma_node);
- *p_bo = ttm_bo_get_unless_zero(*p_bo);
- }
-
- drm_vma_offset_unlock_lookup(bdev->vma_manager);
-
- if (!*p_bo) {
- drm_err(drm, "Could not find buffer object to map\n");
- return -EINVAL;
- }
-
- if (!drm_vma_node_is_allowed(node, filp)) {
- ret = -EACCES;
- goto out_no_access;
- }
-
- return 0;
-out_no_access:
- ttm_bo_put(*p_bo);
- return ret;
-}
-
-int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- static const struct vm_operations_struct vmw_vm_ops = {
- .pfn_mkwrite = vmw_bo_vm_mkwrite,
- .page_mkwrite = vmw_bo_vm_mkwrite,
- .fault = vmw_bo_vm_fault,
- .open = ttm_bo_vm_open,
- .close = ttm_bo_vm_close,
- };
- struct drm_file *file_priv = filp->private_data;
- struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
- struct ttm_device *bdev = &dev_priv->bdev;
- struct ttm_buffer_object *bo;
- int ret;
-
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START))
- return -EINVAL;
-
- ret = vmw_bo_vm_lookup(bdev, file_priv, vma->vm_pgoff, vma_pages(vma), &bo);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_bo_mmap_obj(vma, bo);
- if (unlikely(ret != 0))
- goto out_unref;
-
- vma->vm_ops = &vmw_vm_ops;
-
- /* Use VM_PFNMAP rather than VM_MIXEDMAP if not a COW mapping */
- if (!is_cow_mapping(vma->vm_flags))
- vm_flags_mod(vma, VM_PFNMAP, VM_MIXEDMAP);
-
- ttm_bo_put(bo); /* release extra ref taken by ttm_bo_mmap_obj() */
-
- return 0;
-
-out_unref:
- ttm_bo_put(bo);
- return ret;
-}
-
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index aaacbdcbd742..7ee93e7191c7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -32,9 +32,6 @@
#include <linux/slab.h>
-
-#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
-
/**
* struct vmw_validation_bo_node - Buffer object validation metadata.
* @base: Metadata used for TTM reservation- and validation.
@@ -112,20 +109,10 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
return NULL;
if (ctx->mem_size_left < size) {
- struct page *page;
-
- if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
- ctx->vm_size_left += VMWGFX_VALIDATION_MEM_GRAN;
- ctx->total_mem += VMWGFX_VALIDATION_MEM_GRAN;
- }
-
- page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
return NULL;
- if (ctx->vm)
- ctx->vm_size_left -= PAGE_SIZE;
-
list_add_tail(&page->lru, &ctx->page_list);
ctx->page_address = page_address(page);
ctx->mem_size_left = PAGE_SIZE;
@@ -155,10 +142,6 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
}
ctx->mem_size_left = 0;
- if (ctx->vm && ctx->total_mem) {
- ctx->total_mem = 0;
- ctx->vm_size_left = 0;
- }
}
/**
@@ -279,9 +262,8 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
bo_node->hash.key);
}
val_buf = &bo_node->base;
- val_buf->bo = ttm_bo_get_unless_zero(&vbo->tbo);
- if (!val_buf->bo)
- return -ESRCH;
+ vmw_bo_reference(vbo);
+ val_buf->bo = &vbo->tbo;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &ctx->bo_list);
}
@@ -673,7 +655,7 @@ void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
struct vmw_validation_res_node *val;
list_for_each_entry(entry, &ctx->bo_list, base.head) {
- ttm_bo_put(entry->base.bo);
+ drm_gem_object_put(&entry->base.bo->base);
entry->base.bo = NULL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index 240ee0c4ebfd..353d837907d8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -52,10 +52,6 @@
* buffer objects
* @mem_size_left: Free memory left in the last page in @page_list
* @page_address: Kernel virtual address of the last page in @page_list
- * @vm: A pointer to the memory reservation interface or NULL if no
- * memory reservation is needed.
- * @vm_size_left: Amount of reserved memory that so far has not been allocated.
- * @total_mem: Amount of reserved memory.
*/
struct vmw_validation_context {
struct vmw_sw_context *sw_context;
@@ -68,9 +64,6 @@ struct vmw_validation_context {
unsigned int merge_dups;
unsigned int mem_size_left;
u8 *page_address;
- struct vmw_validation_mem *vm;
- size_t vm_size_left;
- size_t total_mem;
};
struct vmw_bo;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
new file mode 100644
index 000000000000..aec774fa4d7b
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
@@ -0,0 +1,635 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright (c) 2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_vkms.h"
+
+#include "vmwgfx_bo.h"
+#include "vmwgfx_drv.h"
+#include "vmwgfx_kms.h"
+
+#include "vmw_surface_cache.h"
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_debugfs_crc.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
+
+#include <linux/crc32.h>
+#include <linux/delay.h>
+
+#define GUESTINFO_VBLANK "guestinfo.vmwgfx.vkms_enable"
+
+static int
+vmw_surface_sync(struct vmw_private *vmw,
+ struct vmw_surface *surf)
+{
+ int ret;
+ struct vmw_fence_obj *fence = NULL;
+ struct vmw_bo *bo = surf->res.guest_memory_bo;
+
+ vmw_resource_clean(&surf->res);
+
+ ret = ttm_bo_reserve(&bo->tbo, false, false, NULL);
+ if (ret != 0) {
+ drm_warn(&vmw->drm, "%s: failed reserve\n", __func__);
+ goto done;
+ }
+
+ ret = vmw_execbuf_fence_commands(NULL, vmw, &fence, NULL);
+ if (ret != 0) {
+ drm_warn(&vmw->drm, "%s: failed execbuf\n", __func__);
+ ttm_bo_unreserve(&bo->tbo);
+ goto done;
+ }
+
+ dma_fence_wait(&fence->base, false);
+ dma_fence_put(&fence->base);
+
+ ttm_bo_unreserve(&bo->tbo);
+done:
+ return ret;
+}
+
+static void
+compute_crc(struct drm_crtc *crtc,
+ struct vmw_surface *surf,
+ u32 *crc)
+{
+ u8 *mapped_surface;
+ struct vmw_bo *bo = surf->res.guest_memory_bo;
+ const struct SVGA3dSurfaceDesc *desc =
+ vmw_surface_get_desc(surf->metadata.format);
+ u32 row_pitch_bytes;
+ SVGA3dSize blocks;
+ u32 y;
+
+ *crc = 0;
+
+ vmw_surface_get_size_in_blocks(desc, &surf->metadata.base_size, &blocks);
+ row_pitch_bytes = blocks.width * desc->pitchBytesPerBlock;
+ WARN_ON(!bo);
+ mapped_surface = vmw_bo_map_and_cache(bo);
+
+ for (y = 0; y < blocks.height; y++) {
+ *crc = crc32_le(*crc, mapped_surface, row_pitch_bytes);
+ mapped_surface += row_pitch_bytes;
+ }
+
+ vmw_bo_unmap(bo);
+}
+
+static void
+crc_generate_worker(struct work_struct *work)
+{
+ struct vmw_display_unit *du =
+ container_of(work, struct vmw_display_unit, vkms.crc_generator_work);
+ struct drm_crtc *crtc = &du->crtc;
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+ bool crc_pending;
+ u64 frame_start, frame_end;
+ u32 crc32 = 0;
+ struct vmw_surface *surf = 0;
+
+ spin_lock_irq(&du->vkms.crc_state_lock);
+ crc_pending = du->vkms.crc_pending;
+ spin_unlock_irq(&du->vkms.crc_state_lock);
+
+ /*
+ * We raced with the vblank hrtimer and previous work already computed
+ * the crc, nothing to do.
+ */
+ if (!crc_pending)
+ return;
+
+ spin_lock_irq(&du->vkms.crc_state_lock);
+ surf = vmw_surface_reference(du->vkms.surface);
+ spin_unlock_irq(&du->vkms.crc_state_lock);
+
+ if (surf) {
+ if (vmw_surface_sync(vmw, surf)) {
+ drm_warn(
+ crtc->dev,
+ "CRC worker wasn't able to sync the crc surface!\n");
+ return;
+ }
+
+ compute_crc(crtc, surf, &crc32);
+ vmw_surface_unreference(&surf);
+ }
+
+ spin_lock_irq(&du->vkms.crc_state_lock);
+ frame_start = du->vkms.frame_start;
+ frame_end = du->vkms.frame_end;
+ du->vkms.frame_start = 0;
+ du->vkms.frame_end = 0;
+ du->vkms.crc_pending = false;
+ spin_unlock_irq(&du->vkms.crc_state_lock);
+
+ /*
+ * The worker can fall behind the vblank hrtimer, make sure we catch up.
+ */
+ while (frame_start <= frame_end)
+ drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
+}
+
+static enum hrtimer_restart
+vmw_vkms_vblank_simulate(struct hrtimer *timer)
+{
+ struct vmw_display_unit *du = container_of(timer, struct vmw_display_unit, vkms.timer);
+ struct drm_crtc *crtc = &du->crtc;
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+ bool has_surface = false;
+ u64 ret_overrun;
+ bool locked, ret;
+
+ ret_overrun = hrtimer_forward_now(&du->vkms.timer,
+ du->vkms.period_ns);
+ if (ret_overrun != 1)
+ drm_dbg_driver(crtc->dev, "vblank timer missed %lld frames.\n",
+ ret_overrun - 1);
+
+ locked = vmw_vkms_vblank_trylock(crtc);
+ ret = drm_crtc_handle_vblank(crtc);
+ WARN_ON(!ret);
+ if (!locked)
+ return HRTIMER_RESTART;
+ has_surface = du->vkms.surface != NULL;
+ vmw_vkms_unlock(crtc);
+
+ if (du->vkms.crc_enabled && has_surface) {
+ u64 frame = drm_crtc_accurate_vblank_count(crtc);
+
+ spin_lock(&du->vkms.crc_state_lock);
+ if (!du->vkms.crc_pending)
+ du->vkms.frame_start = frame;
+ else
+ drm_dbg_driver(crtc->dev,
+ "crc worker falling behind, frame_start: %llu, frame_end: %llu\n",
+ du->vkms.frame_start, frame);
+ du->vkms.frame_end = frame;
+ du->vkms.crc_pending = true;
+ spin_unlock(&du->vkms.crc_state_lock);
+
+ ret = queue_work(vmw->crc_workq, &du->vkms.crc_generator_work);
+ if (!ret)
+ drm_dbg_driver(crtc->dev, "Composer worker already queued\n");
+ }
+
+ return HRTIMER_RESTART;
+}
+
+void
+vmw_vkms_init(struct vmw_private *vmw)
+{
+ char buffer[64];
+ const size_t max_buf_len = sizeof(buffer) - 1;
+ size_t buf_len = max_buf_len;
+ int ret;
+
+ vmw->vkms_enabled = false;
+
+ ret = vmw_host_get_guestinfo(GUESTINFO_VBLANK, buffer, &buf_len);
+ if (ret || buf_len > max_buf_len)
+ return;
+ buffer[buf_len] = '\0';
+
+ ret = kstrtobool(buffer, &vmw->vkms_enabled);
+ if (!ret && vmw->vkms_enabled) {
+ ret = drm_vblank_init(&vmw->drm, VMWGFX_NUM_DISPLAY_UNITS);
+ vmw->vkms_enabled = (ret == 0);
+ }
+
+ vmw->crc_workq = alloc_ordered_workqueue("vmwgfx_crc_generator", 0);
+ if (!vmw->crc_workq) {
+ drm_warn(&vmw->drm, "crc workqueue allocation failed. Disabling vkms.");
+ vmw->vkms_enabled = false;
+ }
+ if (vmw->vkms_enabled)
+ drm_info(&vmw->drm, "VKMS enabled\n");
+}
+
+void
+vmw_vkms_cleanup(struct vmw_private *vmw)
+{
+ destroy_workqueue(vmw->crc_workq);
+}
+
+bool
+vmw_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq)
+{
+ struct drm_device *dev = crtc->dev;
+ struct vmw_private *vmw = vmw_priv(dev);
+ unsigned int pipe = crtc->index;
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+
+ if (!vmw->vkms_enabled)
+ return false;
+
+ if (!READ_ONCE(vblank->enabled)) {
+ *vblank_time = ktime_get();
+ return true;
+ }
+
+ *vblank_time = READ_ONCE(du->vkms.timer.node.expires);
+
+ if (WARN_ON(*vblank_time == vblank->time))
+ return true;
+
+ /*
+ * To prevent races we roll the hrtimer forward before we do any
+ * interrupt processing - this is how real hw works (the interrupt is
+ * only generated after all the vblank registers are updated) and what
+ * the vblank core expects. Therefore we need to always correct the
+ * timestampe by one frame.
+ */
+ *vblank_time -= du->vkms.period_ns;
+
+ return true;
+}
+
+int
+vmw_vkms_enable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct vmw_private *vmw = vmw_priv(dev);
+ unsigned int pipe = drm_crtc_index(crtc);
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+
+ if (!vmw->vkms_enabled)
+ return -EINVAL;
+
+ drm_calc_timestamping_constants(crtc, &crtc->mode);
+
+ hrtimer_setup(&du->vkms.timer, &vmw_vkms_vblank_simulate, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ du->vkms.period_ns = ktime_set(0, vblank->framedur_ns);
+ hrtimer_start(&du->vkms.timer, du->vkms.period_ns, HRTIMER_MODE_REL);
+
+ return 0;
+}
+
+void
+vmw_vkms_disable_vblank(struct drm_crtc *crtc)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+
+ if (!vmw->vkms_enabled)
+ return;
+
+ hrtimer_cancel(&du->vkms.timer);
+ du->vkms.surface = NULL;
+ du->vkms.period_ns = ktime_set(0, 0);
+}
+
+enum vmw_vkms_lock_state {
+ VMW_VKMS_LOCK_UNLOCKED = 0,
+ VMW_VKMS_LOCK_MODESET = 1,
+ VMW_VKMS_LOCK_VBLANK = 2
+};
+
+void
+vmw_vkms_crtc_init(struct drm_crtc *crtc)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+
+ atomic_set(&du->vkms.atomic_lock, VMW_VKMS_LOCK_UNLOCKED);
+ spin_lock_init(&du->vkms.crc_state_lock);
+
+ INIT_WORK(&du->vkms.crc_generator_work, crc_generate_worker);
+ du->vkms.surface = NULL;
+}
+
+void
+vmw_vkms_crtc_cleanup(struct drm_crtc *crtc)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+
+ if (du->vkms.surface)
+ vmw_surface_unreference(&du->vkms.surface);
+ WARN_ON(work_pending(&du->vkms.crc_generator_work));
+ hrtimer_cancel(&du->vkms.timer);
+}
+
+void
+vmw_vkms_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+
+ if (vmw->vkms_enabled)
+ vmw_vkms_modeset_lock(crtc);
+}
+
+void
+vmw_vkms_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ unsigned long flags;
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+
+ if (!vmw->vkms_enabled)
+ return;
+
+ if (crtc->state->event) {
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+ if (drm_crtc_vblank_get(crtc) != 0)
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ else
+ drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ crtc->state->event = NULL;
+ }
+
+ vmw_vkms_unlock(crtc);
+}
+
+void
+vmw_vkms_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+
+ if (vmw->vkms_enabled)
+ drm_crtc_vblank_on(crtc);
+}
+
+void
+vmw_vkms_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+
+ if (vmw->vkms_enabled)
+ drm_crtc_vblank_off(crtc);
+}
+
+static bool
+is_crc_supported(struct drm_crtc *crtc)
+{
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+
+ if (!vmw->vkms_enabled)
+ return false;
+
+ if (vmw->active_display_unit != vmw_du_screen_target)
+ return false;
+
+ return true;
+}
+
+static const char * const pipe_crc_sources[] = {"auto"};
+
+static int
+crc_parse_source(const char *src_name,
+ bool *enabled)
+{
+ int ret = 0;
+
+ if (!src_name) {
+ *enabled = false;
+ } else if (strcmp(src_name, "auto") == 0) {
+ *enabled = true;
+ } else {
+ *enabled = false;
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+const char *const *
+vmw_vkms_get_crc_sources(struct drm_crtc *crtc,
+ size_t *count)
+{
+ *count = 0;
+ if (!is_crc_supported(crtc))
+ return NULL;
+
+ *count = ARRAY_SIZE(pipe_crc_sources);
+ return pipe_crc_sources;
+}
+
+int
+vmw_vkms_verify_crc_source(struct drm_crtc *crtc,
+ const char *src_name,
+ size_t *values_cnt)
+{
+ bool enabled;
+
+ if (!is_crc_supported(crtc))
+ return -EINVAL;
+
+ if (crc_parse_source(src_name, &enabled) < 0) {
+ drm_dbg_driver(crtc->dev, "unknown source '%s'\n", src_name);
+ return -EINVAL;
+ }
+
+ *values_cnt = 1;
+
+ return 0;
+}
+
+int
+vmw_vkms_set_crc_source(struct drm_crtc *crtc,
+ const char *src_name)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ bool enabled, prev_enabled, locked;
+ int ret;
+
+ if (!is_crc_supported(crtc))
+ return -EINVAL;
+
+ ret = crc_parse_source(src_name, &enabled);
+
+ if (enabled)
+ drm_crtc_vblank_get(crtc);
+
+ locked = vmw_vkms_modeset_lock_relaxed(crtc);
+ prev_enabled = du->vkms.crc_enabled;
+ du->vkms.crc_enabled = enabled;
+ if (locked)
+ vmw_vkms_unlock(crtc);
+
+ if (prev_enabled)
+ drm_crtc_vblank_put(crtc);
+
+ return ret;
+}
+
+void
+vmw_vkms_set_crc_surface(struct drm_crtc *crtc,
+ struct vmw_surface *surf)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
+
+ if (vmw->vkms_enabled && du->vkms.surface != surf) {
+ WARN_ON(atomic_read(&du->vkms.atomic_lock) != VMW_VKMS_LOCK_MODESET);
+ if (du->vkms.surface)
+ vmw_surface_unreference(&du->vkms.surface);
+ if (surf)
+ du->vkms.surface = vmw_surface_reference(surf);
+ }
+}
+
+/**
+ * vmw_vkms_lock_max_wait_ns - Return the max wait for the vkms lock
+ * @du: The vmw_display_unit from which to grab the vblank timings
+ *
+ * Returns the maximum wait time used to acquire the vkms lock. By
+ * default uses a time of a single frame and in case where vblank
+ * was not initialized for the display unit 1/60th of a second.
+ */
+static inline u64
+vmw_vkms_lock_max_wait_ns(struct vmw_display_unit *du)
+{
+ s64 nsecs = ktime_to_ns(du->vkms.period_ns);
+
+ return (nsecs > 0) ? nsecs : 16666666;
+}
+
+/**
+ * vmw_vkms_modeset_lock - Protects access to crtc during modeset
+ * @crtc: The crtc to lock for vkms
+ *
+ * This function prevents the VKMS timers/callbacks from being called
+ * while a modeset operation is in process. We don't want the callbacks
+ * e.g. the vblank simulator to be trying to access incomplete state
+ * so we need to make sure they execute only when the modeset has
+ * finished.
+ *
+ * Normally this would have been done with a spinlock but locking the
+ * entire atomic modeset with vmwgfx is impossible because kms prepare
+ * executes non-atomic ops (e.g. vmw_validation_prepare holds a mutex to
+ * guard various bits of state). Which means that we need to synchronize
+ * atomic context (the vblank handler) with the non-atomic entirity
+ * of kms - so use an atomic_t to track which part of vkms has access
+ * to the basic vkms state.
+ */
+void
+vmw_vkms_modeset_lock(struct drm_crtc *crtc)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ const u64 nsecs_delay = 10;
+ const u64 MAX_NSECS_DELAY = vmw_vkms_lock_max_wait_ns(du);
+ u64 total_delay = 0;
+ int ret;
+
+ do {
+ ret = atomic_cmpxchg(&du->vkms.atomic_lock,
+ VMW_VKMS_LOCK_UNLOCKED,
+ VMW_VKMS_LOCK_MODESET);
+ if (ret == VMW_VKMS_LOCK_UNLOCKED || total_delay >= MAX_NSECS_DELAY)
+ break;
+ ndelay(nsecs_delay);
+ total_delay += nsecs_delay;
+ } while (1);
+
+ if (total_delay >= MAX_NSECS_DELAY) {
+ drm_warn(crtc->dev, "VKMS lock expired! total_delay = %lld, ret = %d, cur = %d\n",
+ total_delay, ret, atomic_read(&du->vkms.atomic_lock));
+ }
+}
+
+/**
+ * vmw_vkms_modeset_lock_relaxed - Protects access to crtc during modeset
+ * @crtc: The crtc to lock for vkms
+ *
+ * Much like vmw_vkms_modeset_lock except that when the crtc is currently
+ * in a modeset it will return immediately.
+ *
+ * Returns true if actually locked vkms to modeset or false otherwise.
+ */
+bool
+vmw_vkms_modeset_lock_relaxed(struct drm_crtc *crtc)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ const u64 nsecs_delay = 10;
+ const u64 MAX_NSECS_DELAY = vmw_vkms_lock_max_wait_ns(du);
+ u64 total_delay = 0;
+ int ret;
+
+ do {
+ ret = atomic_cmpxchg(&du->vkms.atomic_lock,
+ VMW_VKMS_LOCK_UNLOCKED,
+ VMW_VKMS_LOCK_MODESET);
+ if (ret == VMW_VKMS_LOCK_UNLOCKED ||
+ ret == VMW_VKMS_LOCK_MODESET ||
+ total_delay >= MAX_NSECS_DELAY)
+ break;
+ ndelay(nsecs_delay);
+ total_delay += nsecs_delay;
+ } while (1);
+
+ if (total_delay >= MAX_NSECS_DELAY) {
+ drm_warn(crtc->dev, "VKMS relaxed lock expired!\n");
+ return false;
+ }
+
+ return ret == VMW_VKMS_LOCK_UNLOCKED;
+}
+
+/**
+ * vmw_vkms_vblank_trylock - Protects access to crtc during vblank
+ * @crtc: The crtc to lock for vkms
+ *
+ * Tries to lock vkms for vblank, returns immediately.
+ *
+ * Returns true if locked vkms to vblank or false otherwise.
+ */
+bool
+vmw_vkms_vblank_trylock(struct drm_crtc *crtc)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ u32 ret;
+
+ ret = atomic_cmpxchg(&du->vkms.atomic_lock,
+ VMW_VKMS_LOCK_UNLOCKED,
+ VMW_VKMS_LOCK_VBLANK);
+
+ return ret == VMW_VKMS_LOCK_UNLOCKED;
+}
+
+void
+vmw_vkms_unlock(struct drm_crtc *crtc)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+
+ /* Release flag; mark it as unlocked. */
+ atomic_set(&du->vkms.atomic_lock, VMW_VKMS_LOCK_UNLOCKED);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.h
new file mode 100644
index 000000000000..69ddd33a8444
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright (c) 2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef VMWGFX_VKMS_H_
+#define VMWGFX_VKMS_H_
+
+#include <linux/hrtimer_types.h>
+#include <linux/types.h>
+
+struct drm_atomic_state;
+struct drm_crtc;
+struct vmw_private;
+struct vmw_surface;
+
+void vmw_vkms_init(struct vmw_private *vmw);
+void vmw_vkms_cleanup(struct vmw_private *vmw);
+
+void vmw_vkms_modeset_lock(struct drm_crtc *crtc);
+bool vmw_vkms_modeset_lock_relaxed(struct drm_crtc *crtc);
+bool vmw_vkms_vblank_trylock(struct drm_crtc *crtc);
+void vmw_vkms_unlock(struct drm_crtc *crtc);
+
+bool vmw_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq);
+int vmw_vkms_enable_vblank(struct drm_crtc *crtc);
+void vmw_vkms_disable_vblank(struct drm_crtc *crtc);
+
+void vmw_vkms_crtc_init(struct drm_crtc *crtc);
+void vmw_vkms_crtc_cleanup(struct drm_crtc *crtc);
+void vmw_vkms_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
+void vmw_vkms_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state);
+void vmw_vkms_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
+void vmw_vkms_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
+
+const char *const *vmw_vkms_get_crc_sources(struct drm_crtc *crtc,
+ size_t *count);
+int vmw_vkms_verify_crc_source(struct drm_crtc *crtc,
+ const char *src_name,
+ size_t *values_cnt);
+int vmw_vkms_set_crc_source(struct drm_crtc *crtc,
+ const char *src_name);
+void vmw_vkms_set_crc_surface(struct drm_crtc *crtc,
+ struct vmw_surface *surf);
+
+#endif