summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/vmwgfx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig2
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c844
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h81
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c51
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h57
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c510
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c61
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c889
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h72
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c63
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c85
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c7
27 files changed, 1238 insertions, 1649 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index 6c3c2922ae8b..aab646b91ca9 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
depends on (X86 && HYPERVISOR_GUEST) || ARM64
select DRM_CLIENT_SELECTION
select DRM_TTM
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 46a4ab688a7f..b168fd7fe9b3 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -10,6 +10,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \
vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o \
- vmwgfx_gem.o vmwgfx_vkms.o
+ vmwgfx_gem.o vmwgfx_vkms.o vmwgfx_cursor_plane.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index 64bd7d74854e..fa5841fda659 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -429,7 +429,7 @@ static void *map_external(struct vmw_bo *bo, struct iosys_map *map)
void *ptr = NULL;
int ret;
- if (bo->tbo.base.import_attach) {
+ if (drm_gem_is_imported(&bo->tbo.base)) {
ret = dma_buf_vmap(bo->tbo.base.dma_buf, map);
if (ret) {
drm_dbg_driver(&vmw->drm,
@@ -447,7 +447,7 @@ out:
static void unmap_external(struct vmw_bo *bo, struct iosys_map *map)
{
- if (bo->tbo.base.import_attach)
+ if (drm_gem_is_imported(&bo->tbo.base))
dma_buf_vunmap(bo->tbo.base.dma_buf, map);
else
vmw_bo_unmap(bo);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 9b5b8c1f063b..f031a312c783 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -36,8 +36,7 @@ static void vmw_bo_release(struct vmw_bo *vbo)
{
struct vmw_resource *res;
- WARN_ON(vbo->tbo.base.funcs &&
- kref_read(&vbo->tbo.base.refcount) != 0);
+ WARN_ON(kref_read(&vbo->tbo.base.refcount) != 0);
vmw_bo_unmap(vbo);
xa_destroy(&vbo->detached_resources);
@@ -51,11 +50,13 @@ static void vmw_bo_release(struct vmw_bo *vbo)
mutex_lock(&res->dev_priv->cmdbuf_mutex);
(void)vmw_resource_reserve(res, false, true);
vmw_resource_mob_detach(res);
+ if (res->dirty)
+ res->func->dirty_free(res);
if (res->coherent)
vmw_bo_dirty_release(res->guest_memory_bo);
res->guest_memory_bo = NULL;
res->guest_memory_offset = 0;
- vmw_resource_unreserve(res, false, false, false, NULL,
+ vmw_resource_unreserve(res, true, false, false, NULL,
0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
@@ -73,9 +74,9 @@ static void vmw_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_bo *vbo = to_vmw_bo(&bo->base);
- WARN_ON(vbo->dirty);
WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
vmw_bo_release(vbo);
+ WARN_ON(vbo->dirty);
kfree(vbo);
}
@@ -467,6 +468,7 @@ int vmw_bo_create(struct vmw_private *vmw,
if (unlikely(ret != 0))
goto out_error;
+ (*p_bo)->tbo.base.funcs = &vmw_gem_object_funcs;
return ret;
out_error:
*p_bo = NULL;
@@ -848,9 +850,9 @@ void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
vmw_bo_placement_set(bo, domain, domain);
}
-void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
+int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
{
- xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL);
+ return xa_err(xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL));
}
void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
@@ -887,3 +889,9 @@ out:
surf = vmw_res_to_srf(res);
return surf;
}
+
+s32 vmw_bo_mobid(struct vmw_bo *vbo)
+{
+ WARN_ON(vbo->tbo.resource->mem_type != VMW_PL_MOB);
+ return (s32)vbo->tbo.resource->start;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
index 11e330c7c7f5..cf84a163bfcb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -141,7 +141,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem);
void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
-void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
+int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo);
@@ -204,12 +204,12 @@ static inline void vmw_bo_unreference(struct vmw_bo **buf)
*buf = NULL;
if (tmp_buf)
- ttm_bo_put(&tmp_buf->tbo);
+ drm_gem_object_put(&tmp_buf->tbo.base);
}
static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf)
{
- ttm_bo_get(&buf->tbo);
+ drm_gem_object_get(&buf->tbo.base);
return buf;
}
@@ -233,4 +233,6 @@ static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj)
return container_of((gobj), struct vmw_bo, tbo.base);
}
+s32 vmw_bo_mobid(struct vmw_bo *vbo);
+
#endif // VMWGFX_BO_H
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
index dd4ca6a9c690..8fe02131a6c4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
@@ -544,7 +544,7 @@ int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
cmd_fence = (struct svga_fifo_cmd_fence *) fm;
cmd_fence->fence = *seqno;
vmw_cmd_commit_flush(dev_priv, bytes);
- vmw_update_seqno(dev_priv);
+ vmw_fences_update(dev_priv->fman);
out_err:
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index a7c07692262b..98331c4c0335 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -432,7 +432,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* for the new COTable. Initially pin the buffer object to make sure
* we can use tryreserve without failure.
*/
- ret = vmw_gem_object_create(dev_priv, &bo_params, &buf);
+ ret = vmw_bo_create(dev_priv, &bo_params, &buf);
if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n");
goto out_done;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
new file mode 100644
index 000000000000..718832b08d96
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
@@ -0,0 +1,844 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright (c) 2024-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ **************************************************************************/
+#include "vmwgfx_cursor_plane.h"
+
+#include "vmwgfx_bo.h"
+#include "vmwgfx_drv.h"
+#include "vmwgfx_kms.h"
+#include "vmwgfx_resource_priv.h"
+#include "vmw_surface_cache.h"
+
+#include "drm/drm_atomic.h"
+#include "drm/drm_atomic_helper.h"
+#include "drm/drm_plane.h"
+#include <asm/page.h>
+
+#define VMW_CURSOR_SNOOP_FORMAT SVGA3D_A8R8G8B8
+#define VMW_CURSOR_SNOOP_WIDTH 64
+#define VMW_CURSOR_SNOOP_HEIGHT 64
+
+struct vmw_svga_fifo_cmd_define_cursor {
+ u32 cmd;
+ SVGAFifoCmdDefineAlphaCursor cursor;
+};
+
+/**
+ * vmw_send_define_cursor_cmd - queue a define cursor command
+ * @dev_priv: the private driver struct
+ * @image: buffer which holds the cursor image
+ * @width: width of the mouse cursor image
+ * @height: height of the mouse cursor image
+ * @hotspotX: the horizontal position of mouse hotspot
+ * @hotspotY: the vertical position of mouse hotspot
+ */
+static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
+ u32 *image, u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY)
+{
+ struct vmw_svga_fifo_cmd_define_cursor *cmd;
+ const u32 image_size = width * height * sizeof(*image);
+ const u32 cmd_size = sizeof(*cmd) + image_size;
+
+ /*
+ * Try to reserve fifocmd space and swallow any failures;
+ * such reservations cannot be left unconsumed for long
+ * under the risk of clogging other fifocmd users, so
+ * we treat reservations separtely from the way we treat
+ * other fallible KMS-atomic resources at prepare_fb
+ */
+ cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
+
+ if (unlikely(!cmd))
+ return;
+
+ memset(cmd, 0, sizeof(*cmd));
+
+ memcpy(&cmd[1], image, image_size);
+
+ cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
+ cmd->cursor.id = 0;
+ cmd->cursor.width = width;
+ cmd->cursor.height = height;
+ cmd->cursor.hotspotX = hotspotX;
+ cmd->cursor.hotspotY = hotspotY;
+
+ vmw_cmd_commit_flush(dev_priv, cmd_size);
+}
+
+static void
+vmw_cursor_plane_update_legacy(struct vmw_private *vmw,
+ struct vmw_plane_state *vps)
+{
+ struct vmw_surface *surface = vmw_user_object_surface(&vps->uo);
+ s32 hotspot_x = vps->cursor.legacy.hotspot_x + vps->base.hotspot_x;
+ s32 hotspot_y = vps->cursor.legacy.hotspot_y + vps->base.hotspot_y;
+
+ if (WARN_ON(!surface || !surface->snooper.image))
+ return;
+
+ if (vps->cursor.legacy.id != surface->snooper.id) {
+ vmw_send_define_cursor_cmd(vmw, surface->snooper.image,
+ vps->base.crtc_w, vps->base.crtc_h,
+ hotspot_x, hotspot_y);
+ vps->cursor.legacy.id = surface->snooper.id;
+ }
+}
+
+static enum vmw_cursor_update_type
+vmw_cursor_update_type(struct vmw_private *vmw, struct vmw_plane_state *vps)
+{
+ struct vmw_surface *surface = vmw_user_object_surface(&vps->uo);
+
+ if (surface && surface->snooper.image)
+ return VMW_CURSOR_UPDATE_LEGACY;
+
+ if (vmw->has_mob) {
+ if ((vmw->capabilities2 & SVGA_CAP2_CURSOR_MOB) != 0)
+ return VMW_CURSOR_UPDATE_MOB;
+ }
+
+ return VMW_CURSOR_UPDATE_NONE;
+}
+
+static void vmw_cursor_update_mob(struct vmw_private *vmw,
+ struct vmw_plane_state *vps)
+{
+ SVGAGBCursorHeader *header;
+ SVGAGBAlphaCursorHeader *alpha_header;
+ struct vmw_bo *bo = vmw_user_object_buffer(&vps->uo);
+ u32 *image = vmw_bo_map_and_cache(bo);
+ const u32 image_size = vps->base.crtc_w * vps->base.crtc_h * sizeof(*image);
+
+ header = vmw_bo_map_and_cache(vps->cursor.mob);
+ alpha_header = &header->header.alphaHeader;
+
+ memset(header, 0, sizeof(*header));
+
+ header->type = SVGA_ALPHA_CURSOR;
+ header->sizeInBytes = image_size;
+
+ alpha_header->hotspotX = vps->cursor.legacy.hotspot_x + vps->base.hotspot_x;
+ alpha_header->hotspotY = vps->cursor.legacy.hotspot_y + vps->base.hotspot_y;
+ alpha_header->width = vps->base.crtc_w;
+ alpha_header->height = vps->base.crtc_h;
+
+ memcpy(header + 1, image, image_size);
+ vmw_write(vmw, SVGA_REG_CURSOR_MOBID, vmw_bo_mobid(vps->cursor.mob));
+
+ vmw_bo_unmap(bo);
+ vmw_bo_unmap(vps->cursor.mob);
+}
+
+static u32 vmw_cursor_mob_size(enum vmw_cursor_update_type update_type,
+ u32 w, u32 h)
+{
+ switch (update_type) {
+ case VMW_CURSOR_UPDATE_LEGACY:
+ case VMW_CURSOR_UPDATE_NONE:
+ return 0;
+ case VMW_CURSOR_UPDATE_MOB:
+ return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
+ }
+ return 0;
+}
+
+static void vmw_cursor_mob_destroy(struct vmw_bo **vbo)
+{
+ if (!(*vbo))
+ return;
+
+ ttm_bo_unpin(&(*vbo)->tbo);
+ vmw_bo_unreference(vbo);
+}
+
+/**
+ * vmw_cursor_mob_unmap - Unmaps the cursor mobs.
+ *
+ * @vps: state of the cursor plane
+ *
+ * Returns 0 on success
+ */
+
+static int
+vmw_cursor_mob_unmap(struct vmw_plane_state *vps)
+{
+ int ret = 0;
+ struct vmw_bo *vbo = vps->cursor.mob;
+
+ if (!vbo || !vbo->map.virtual)
+ return 0;
+
+ ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
+ if (likely(ret == 0)) {
+ vmw_bo_unmap(vbo);
+ ttm_bo_unreserve(&vbo->tbo);
+ }
+
+ return ret;
+}
+
+static void vmw_cursor_mob_put(struct vmw_cursor_plane *vcp,
+ struct vmw_plane_state *vps)
+{
+ u32 i;
+
+ if (!vps->cursor.mob)
+ return;
+
+ vmw_cursor_mob_unmap(vps);
+
+ /* Look for a free slot to return this mob to the cache. */
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
+ if (!vcp->cursor_mobs[i]) {
+ vcp->cursor_mobs[i] = vps->cursor.mob;
+ vps->cursor.mob = NULL;
+ return;
+ }
+ }
+
+ /* Cache is full: See if this mob is bigger than an existing mob. */
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
+ if (vcp->cursor_mobs[i]->tbo.base.size <
+ vps->cursor.mob->tbo.base.size) {
+ vmw_cursor_mob_destroy(&vcp->cursor_mobs[i]);
+ vcp->cursor_mobs[i] = vps->cursor.mob;
+ vps->cursor.mob = NULL;
+ return;
+ }
+ }
+
+ /* Destroy it if it's not worth caching. */
+ vmw_cursor_mob_destroy(&vps->cursor.mob);
+}
+
+static int vmw_cursor_mob_get(struct vmw_cursor_plane *vcp,
+ struct vmw_plane_state *vps)
+{
+ struct vmw_private *dev_priv = vmw_priv(vcp->base.dev);
+ u32 size = vmw_cursor_mob_size(vps->cursor.update_type,
+ vps->base.crtc_w, vps->base.crtc_h);
+ u32 i;
+ u32 cursor_max_dim, mob_max_size;
+ struct vmw_fence_obj *fence = NULL;
+ int ret;
+
+ if (!dev_priv->has_mob ||
+ (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
+ return -EINVAL;
+
+ mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
+ cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
+
+ if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
+ vps->base.crtc_h > cursor_max_dim)
+ return -EINVAL;
+
+ if (vps->cursor.mob) {
+ if (vps->cursor.mob->tbo.base.size >= size)
+ return 0;
+ vmw_cursor_mob_put(vcp, vps);
+ }
+
+ /* Look for an unused mob in the cache. */
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
+ if (vcp->cursor_mobs[i] &&
+ vcp->cursor_mobs[i]->tbo.base.size >= size) {
+ vps->cursor.mob = vcp->cursor_mobs[i];
+ vcp->cursor_mobs[i] = NULL;
+ return 0;
+ }
+ }
+ /* Create a new mob if we can't find an existing one. */
+ ret = vmw_bo_create_and_populate(dev_priv, size, VMW_BO_DOMAIN_MOB,
+ &vps->cursor.mob);
+
+ if (ret != 0)
+ return ret;
+
+ /* Fence the mob creation so we are guarateed to have the mob */
+ ret = ttm_bo_reserve(&vps->cursor.mob->tbo, false, false, NULL);
+ if (ret != 0)
+ goto teardown;
+
+ ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ if (ret != 0) {
+ ttm_bo_unreserve(&vps->cursor.mob->tbo);
+ goto teardown;
+ }
+
+ dma_fence_wait(&fence->base, false);
+ dma_fence_put(&fence->base);
+
+ ttm_bo_unreserve(&vps->cursor.mob->tbo);
+
+ return 0;
+
+teardown:
+ vmw_cursor_mob_destroy(&vps->cursor.mob);
+ return ret;
+}
+
+static void vmw_cursor_update_position(struct vmw_private *dev_priv,
+ bool show, int x, int y)
+{
+ const u32 svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
+ : SVGA_CURSOR_ON_HIDE;
+ u32 count;
+
+ spin_lock(&dev_priv->cursor_lock);
+ if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
+ } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
+ count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
+ } else {
+ vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
+ vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
+ vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
+ }
+ spin_unlock(&dev_priv->cursor_lock);
+}
+
+void vmw_kms_cursor_snoop(struct vmw_surface *srf,
+ struct ttm_object_file *tfile,
+ struct ttm_buffer_object *bo,
+ SVGA3dCmdHeader *header)
+{
+ struct ttm_bo_kmap_obj map;
+ unsigned long kmap_offset;
+ unsigned long kmap_num;
+ SVGA3dCopyBox *box;
+ u32 box_count;
+ void *virtual;
+ bool is_iomem;
+ struct vmw_dma_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceDMA dma;
+ } *cmd;
+ int i, ret;
+ const struct SVGA3dSurfaceDesc *desc =
+ vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
+ const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
+
+ cmd = container_of(header, struct vmw_dma_cmd, header);
+
+ /* No snooper installed, nothing to copy */
+ if (!srf->snooper.image)
+ return;
+
+ if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
+ DRM_ERROR("face and mipmap for cursors should never != 0\n");
+ return;
+ }
+
+ if (cmd->header.size < 64) {
+ DRM_ERROR("at least one full copy box must be given\n");
+ return;
+ }
+
+ box = (SVGA3dCopyBox *)&cmd[1];
+ box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
+ sizeof(SVGA3dCopyBox);
+
+ if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
+ box->x != 0 || box->y != 0 || box->z != 0 ||
+ box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
+ box->d != 1 || box_count != 1 ||
+ box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
+ /* TODO handle none page aligned offsets */
+ /* TODO handle more dst & src != 0 */
+ /* TODO handle more then one copy */
+ DRM_ERROR("Can't snoop dma request for cursor!\n");
+ DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
+ box->srcx, box->srcy, box->srcz,
+ box->x, box->y, box->z,
+ box->w, box->h, box->d, box_count,
+ cmd->dma.guest.ptr.offset);
+ return;
+ }
+
+ kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
+ kmap_num = (VMW_CURSOR_SNOOP_HEIGHT * image_pitch) >> PAGE_SHIFT;
+
+ ret = ttm_bo_reserve(bo, true, false, NULL);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("reserve failed\n");
+ return;
+ }
+
+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0))
+ goto err_unreserve;
+
+ virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
+
+ if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
+ memcpy(srf->snooper.image, virtual,
+ VMW_CURSOR_SNOOP_HEIGHT * image_pitch);
+ } else {
+ /* Image is unsigned pointer. */
+ for (i = 0; i < box->h; i++)
+ memcpy(srf->snooper.image + i * image_pitch,
+ virtual + i * cmd->dma.guest.pitch,
+ box->w * desc->pitchBytesPerBlock);
+ }
+ srf->snooper.id++;
+
+ ttm_bo_kunmap(&map);
+err_unreserve:
+ ttm_bo_unreserve(bo);
+}
+
+void vmw_cursor_plane_destroy(struct drm_plane *plane)
+{
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
+ u32 i;
+
+ vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0);
+
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
+ vmw_cursor_mob_destroy(&vcp->cursor_mobs[i]);
+
+ drm_plane_cleanup(plane);
+}
+
+/**
+ * vmw_cursor_mob_map - Maps the cursor mobs.
+ *
+ * @vps: plane_state
+ *
+ * Returns 0 on success
+ */
+
+static int
+vmw_cursor_mob_map(struct vmw_plane_state *vps)
+{
+ int ret;
+ u32 size = vmw_cursor_mob_size(vps->cursor.update_type,
+ vps->base.crtc_w, vps->base.crtc_h);
+ struct vmw_bo *vbo = vps->cursor.mob;
+
+ if (!vbo)
+ return -EINVAL;
+
+ if (vbo->tbo.base.size < size)
+ return -EINVAL;
+
+ if (vbo->map.virtual)
+ return 0;
+
+ ret = ttm_bo_reserve(&vbo->tbo, false, false, NULL);
+ if (unlikely(ret != 0))
+ return -ENOMEM;
+
+ vmw_bo_map_and_cache(vbo);
+
+ ttm_bo_unreserve(&vbo->tbo);
+
+ return 0;
+}
+
+/**
+ * vmw_cursor_plane_cleanup_fb - Unpins the plane surface
+ *
+ * @plane: cursor plane
+ * @old_state: contains the state to clean up
+ *
+ * Unmaps all cursor bo mappings and unpins the cursor surface
+ *
+ * Returns 0 on success
+ */
+void
+vmw_cursor_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
+
+ if (!vmw_user_object_is_null(&vps->uo))
+ vmw_user_object_unmap(&vps->uo);
+
+ vmw_cursor_mob_unmap(vps);
+ vmw_cursor_mob_put(vcp, vps);
+
+ vmw_du_plane_unpin_surf(vps);
+ vmw_user_object_unref(&vps->uo);
+}
+
+static bool
+vmw_cursor_buffer_changed(struct vmw_plane_state *new_vps,
+ struct vmw_plane_state *old_vps)
+{
+ struct vmw_bo *new_bo = vmw_user_object_buffer(&new_vps->uo);
+ struct vmw_bo *old_bo = vmw_user_object_buffer(&old_vps->uo);
+ struct vmw_surface *surf;
+ bool dirty = false;
+ int ret;
+
+ if (new_bo != old_bo)
+ return true;
+
+ if (new_bo) {
+ if (!old_bo) {
+ return true;
+ } else if (new_bo->dirty) {
+ vmw_bo_dirty_scan(new_bo);
+ dirty = vmw_bo_is_dirty(new_bo);
+ if (dirty) {
+ surf = vmw_user_object_surface(&new_vps->uo);
+ if (surf)
+ vmw_bo_dirty_transfer_to_res(&surf->res);
+ else
+ vmw_bo_dirty_clear(new_bo);
+ }
+ return dirty;
+ } else if (new_bo != old_bo) {
+ /*
+ * Currently unused because the top exits right away.
+ * In most cases buffer being different will mean
+ * that the contents is different. For the few percent
+ * of cases where that's not true the cost of doing
+ * the memcmp on all other seems to outweight the
+ * benefits. Leave the conditional to be able to
+ * trivially validate it by removing the initial
+ * if (new_bo != old_bo) at the start.
+ */
+ void *old_image;
+ void *new_image;
+ bool changed = false;
+ struct ww_acquire_ctx ctx;
+ const u32 size = new_vps->base.crtc_w *
+ new_vps->base.crtc_h * sizeof(u32);
+
+ ww_acquire_init(&ctx, &reservation_ww_class);
+
+ ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx);
+ if (ret != 0) {
+ ww_acquire_fini(&ctx);
+ return true;
+ }
+
+ ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx);
+ if (ret != 0) {
+ ttm_bo_unreserve(&old_bo->tbo);
+ ww_acquire_fini(&ctx);
+ return true;
+ }
+
+ old_image = vmw_bo_map_and_cache(old_bo);
+ new_image = vmw_bo_map_and_cache(new_bo);
+
+ if (old_image && new_image && old_image != new_image)
+ changed = memcmp(old_image, new_image, size) !=
+ 0;
+
+ ttm_bo_unreserve(&new_bo->tbo);
+ ttm_bo_unreserve(&old_bo->tbo);
+
+ ww_acquire_fini(&ctx);
+
+ return changed;
+ }
+ return false;
+ }
+
+ return false;
+}
+
+static bool
+vmw_cursor_plane_changed(struct vmw_plane_state *new_vps,
+ struct vmw_plane_state *old_vps)
+{
+ if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
+ old_vps->base.crtc_h != new_vps->base.crtc_h)
+ return true;
+
+ if (old_vps->base.hotspot_x != new_vps->base.hotspot_x ||
+ old_vps->base.hotspot_y != new_vps->base.hotspot_y)
+ return true;
+
+ if (old_vps->cursor.legacy.hotspot_x !=
+ new_vps->cursor.legacy.hotspot_x ||
+ old_vps->cursor.legacy.hotspot_y !=
+ new_vps->cursor.legacy.hotspot_y)
+ return true;
+
+ if (old_vps->base.fb != new_vps->base.fb)
+ return true;
+
+ return false;
+}
+
+/**
+ * vmw_cursor_plane_prepare_fb - Readies the cursor by referencing it
+ *
+ * @plane: display plane
+ * @new_state: info on the new plane state, including the FB
+ *
+ * Returns 0 on success
+ */
+int vmw_cursor_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_framebuffer *fb = new_state->fb;
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(plane->state);
+ struct vmw_private *vmw = vmw_priv(plane->dev);
+ struct vmw_bo *bo = NULL;
+ struct vmw_surface *surface;
+ int ret = 0;
+
+ if (!vmw_user_object_is_null(&vps->uo)) {
+ vmw_user_object_unmap(&vps->uo);
+ vmw_user_object_unref(&vps->uo);
+ }
+
+ if (fb) {
+ if (vmw_framebuffer_to_vfb(fb)->bo) {
+ vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer;
+ vps->uo.surface = NULL;
+ } else {
+ memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo));
+ }
+ vmw_user_object_ref(&vps->uo);
+ }
+
+ vps->cursor.update_type = vmw_cursor_update_type(vmw, vps);
+ switch (vps->cursor.update_type) {
+ case VMW_CURSOR_UPDATE_LEGACY:
+ surface = vmw_user_object_surface(&vps->uo);
+ if (!surface || vps->cursor.legacy.id == surface->snooper.id)
+ vps->cursor.update_type = VMW_CURSOR_UPDATE_NONE;
+ break;
+ case VMW_CURSOR_UPDATE_MOB: {
+ bo = vmw_user_object_buffer(&vps->uo);
+ if (bo) {
+ struct ttm_operation_ctx ctx = { false, false };
+
+ ret = ttm_bo_reserve(&bo->tbo, true, false, NULL);
+ if (ret != 0)
+ return -ENOMEM;
+
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret != 0)
+ return -ENOMEM;
+
+ /*
+ * vmw_bo_pin_reserved also validates, so to skip
+ * the extra validation use ttm_bo_pin directly
+ */
+ if (!bo->tbo.pin_count)
+ ttm_bo_pin(&bo->tbo);
+
+ if (vmw_framebuffer_to_vfb(fb)->bo) {
+ const u32 size = new_state->crtc_w *
+ new_state->crtc_h *
+ sizeof(u32);
+
+ (void)vmw_bo_map_and_cache_size(bo, size);
+ } else {
+ vmw_bo_map_and_cache(bo);
+ }
+ ttm_bo_unreserve(&bo->tbo);
+ }
+ if (!vmw_user_object_is_null(&vps->uo)) {
+ if (!vmw_cursor_plane_changed(vps, old_vps) &&
+ !vmw_cursor_buffer_changed(vps, old_vps)) {
+ vps->cursor.update_type =
+ VMW_CURSOR_UPDATE_NONE;
+ } else {
+ vmw_cursor_mob_get(vcp, vps);
+ vmw_cursor_mob_map(vps);
+ }
+ }
+ }
+ break;
+ case VMW_CURSOR_UPDATE_NONE:
+ /* do nothing */
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_cursor_plane_atomic_check - check if the new state is okay
+ *
+ * @plane: cursor plane
+ * @state: info on the new plane state
+ *
+ * This is a chance to fail if the new cursor state does not fit
+ * our requirements.
+ *
+ * Returns 0 on success
+ */
+int vmw_cursor_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct vmw_private *vmw = vmw_priv(plane->dev);
+ int ret = 0;
+ struct drm_crtc_state *crtc_state = NULL;
+ struct vmw_surface *surface = NULL;
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ enum vmw_cursor_update_type update_type;
+ struct drm_framebuffer *fb = new_state->fb;
+
+ if (new_state->crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
+ new_state->crtc);
+
+ ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING, true,
+ true);
+ if (ret)
+ return ret;
+
+ /* Turning off */
+ if (!fb)
+ return 0;
+
+ update_type = vmw_cursor_update_type(vmw, vps);
+ if (update_type == VMW_CURSOR_UPDATE_LEGACY) {
+ if (new_state->crtc_w != VMW_CURSOR_SNOOP_WIDTH ||
+ new_state->crtc_h != VMW_CURSOR_SNOOP_HEIGHT) {
+ drm_warn(&vmw->drm,
+ "Invalid cursor dimensions (%d, %d)\n",
+ new_state->crtc_w, new_state->crtc_h);
+ return -EINVAL;
+ }
+ surface = vmw_user_object_surface(&vps->uo);
+ if (!surface || !surface->snooper.image) {
+ drm_warn(&vmw->drm,
+ "surface not suitable for cursor\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+void
+vmw_cursor_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *old_state =
+ drm_atomic_get_old_plane_state(state, plane);
+ struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
+ struct vmw_private *dev_priv = vmw_priv(plane->dev);
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ s32 hotspot_x, hotspot_y, cursor_x, cursor_y;
+
+ /*
+ * Hide the cursor if the new bo is null
+ */
+ if (vmw_user_object_is_null(&vps->uo)) {
+ vmw_cursor_update_position(dev_priv, false, 0, 0);
+ return;
+ }
+
+ switch (vps->cursor.update_type) {
+ case VMW_CURSOR_UPDATE_LEGACY:
+ vmw_cursor_plane_update_legacy(dev_priv, vps);
+ break;
+ case VMW_CURSOR_UPDATE_MOB:
+ vmw_cursor_update_mob(dev_priv, vps);
+ break;
+ case VMW_CURSOR_UPDATE_NONE:
+ /* do nothing */
+ break;
+ }
+
+ /*
+ * For all update types update the cursor position
+ */
+ cursor_x = new_state->crtc_x + du->set_gui_x;
+ cursor_y = new_state->crtc_y + du->set_gui_y;
+
+ hotspot_x = vps->cursor.legacy.hotspot_x + new_state->hotspot_x;
+ hotspot_y = vps->cursor.legacy.hotspot_y + new_state->hotspot_y;
+
+ vmw_cursor_update_position(dev_priv, true, cursor_x + hotspot_x,
+ cursor_y + hotspot_y);
+}
+
+int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_cursor_bypass_arg *arg = data;
+ struct vmw_display_unit *du;
+ struct vmw_plane_state *vps;
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ mutex_lock(&dev->mode_config.mutex);
+ if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ du = vmw_crtc_to_du(crtc);
+ vps = vmw_plane_state_to_vps(du->cursor.base.state);
+ vps->cursor.legacy.hotspot_x = arg->xhot;
+ vps->cursor.legacy.hotspot_y = arg->yhot;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+ return 0;
+ }
+
+ crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
+ if (!crtc) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ du = vmw_crtc_to_du(crtc);
+ vps = vmw_plane_state_to_vps(du->cursor.base.state);
+ vps->cursor.legacy.hotspot_x = arg->xhot;
+ vps->cursor.legacy.hotspot_y = arg->yhot;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+void *vmw_cursor_snooper_create(struct drm_file *file_priv,
+ struct vmw_surface_metadata *metadata)
+{
+ if (!file_priv->atomic && metadata->scanout &&
+ metadata->num_sizes == 1 &&
+ metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
+ metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT &&
+ metadata->format == VMW_CURSOR_SNOOP_FORMAT) {
+ const struct SVGA3dSurfaceDesc *desc =
+ vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
+ const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH *
+ VMW_CURSOR_SNOOP_HEIGHT *
+ desc->pitchBytesPerBlock;
+ void *image = kzalloc(cursor_size_bytes, GFP_KERNEL);
+
+ if (!image) {
+ DRM_ERROR("Failed to allocate cursor_image\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ return image;
+ }
+ return NULL;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h
new file mode 100644
index 000000000000..40694925a70e
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright (c) 2024-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ **************************************************************************/
+
+#ifndef VMWGFX_CURSOR_PLANE_H
+#define VMWGFX_CURSOR_PLANE_H
+
+#include "device_include/svga3d_cmd.h"
+#include "drm/drm_file.h"
+#include "drm/drm_fourcc.h"
+#include "drm/drm_plane.h"
+
+#include <linux/types.h>
+
+struct SVGA3dCmdHeader;
+struct ttm_buffer_object;
+struct vmw_bo;
+struct vmw_cursor;
+struct vmw_private;
+struct vmw_surface;
+struct vmw_user_object;
+
+#define vmw_plane_to_vcp(x) container_of(x, struct vmw_cursor_plane, base)
+
+static const u32 __maybe_unused vmw_cursor_plane_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
+enum vmw_cursor_update_type {
+ VMW_CURSOR_UPDATE_NONE = 0,
+ VMW_CURSOR_UPDATE_LEGACY,
+ VMW_CURSOR_UPDATE_MOB,
+};
+
+struct vmw_cursor_plane_state {
+ enum vmw_cursor_update_type update_type;
+ bool changed;
+ bool surface_changed;
+ struct vmw_bo *mob;
+ struct {
+ s32 hotspot_x;
+ s32 hotspot_y;
+ u32 id;
+ } legacy;
+};
+
+/**
+ * Derived class for cursor plane object
+ *
+ * @base DRM plane object
+ * @cursor.cursor_mobs Cursor mobs available for re-use
+ */
+struct vmw_cursor_plane {
+ struct drm_plane base;
+
+ struct vmw_bo *cursor_mobs[3];
+};
+
+struct vmw_surface_metadata;
+void *vmw_cursor_snooper_create(struct drm_file *file_priv,
+ struct vmw_surface_metadata *metadata);
+void vmw_cursor_cmd_dma_snoop(SVGA3dCmdHeader *header,
+ struct vmw_surface *srf,
+ struct ttm_buffer_object *bo);
+
+void vmw_cursor_plane_destroy(struct drm_plane *plane);
+
+int vmw_cursor_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+void vmw_cursor_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+int vmw_cursor_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state);
+void vmw_cursor_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state);
+
+#endif /* VMWGFX_CURSOR_H */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 0f32471c8533..8ff958d119be 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1,31 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
-
#include "vmwgfx_drv.h"
#include "vmwgfx_bo.h"
@@ -460,8 +440,10 @@ static int vmw_device_init(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
}
- dev_priv->last_read_seqno = vmw_fence_read(dev_priv);
- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
+ u32 seqno = vmw_fence_read(dev_priv);
+
+ atomic_set(&dev_priv->last_read_seqno, seqno);
+ atomic_set(&dev_priv->marker_seq, seqno);
return 0;
}
@@ -474,7 +456,7 @@ static void vmw_device_fini(struct vmw_private *vmw)
while (vmw_read(vmw, SVGA_REG_BUSY) != 0)
;
- vmw->last_read_seqno = vmw_fence_read(vmw);
+ atomic_set(&vmw->last_read_seqno, vmw_fence_read(vmw));
vmw_write(vmw, SVGA_REG_CONFIG_DONE,
vmw->config_done_state);
@@ -733,7 +715,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
pci_set_master(pdev);
- ret = pci_request_regions(pdev, "vmwgfx probe");
+ ret = pcim_request_all_regions(pdev, "vmwgfx probe");
if (ret)
return ret;
@@ -753,7 +735,6 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
if (!dev->rmmio) {
drm_err(&dev->drm,
"Failed mapping registers mmio memory.\n");
- pci_release_regions(pdev);
return -ENOMEM;
}
} else if (pci_id == VMWGFX_PCI_ID_SVGA2) {
@@ -769,16 +750,14 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
dev->fifo_mem = devm_memremap(dev->drm.dev,
fifo_start,
fifo_size,
- MEMREMAP_WB);
+ MEMREMAP_WB | MEMREMAP_DEC);
if (IS_ERR(dev->fifo_mem)) {
drm_err(&dev->drm,
"Failed mapping FIFO memory.\n");
- pci_release_regions(pdev);
return PTR_ERR(dev->fifo_mem);
}
} else {
- pci_release_regions(pdev);
return -EINVAL;
}
@@ -856,7 +835,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
int ret;
enum vmw_res_type i;
bool refuse_dma = false;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
vmw_sw_context_init(dev_priv);
@@ -872,7 +850,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
return ret;
ret = vmw_detect_version(dev_priv);
if (ret)
- goto out_no_pci_or_version;
+ return ret;
for (i = vmw_res_context; i < vmw_res_max; ++i) {
@@ -1172,15 +1150,13 @@ out_err0:
if (dev_priv->ctx.staged_bindings)
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
-out_no_pci_or_version:
- pci_release_regions(pdev);
+
return ret;
}
static void vmw_driver_unload(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
enum vmw_res_type i;
unregister_pm_notifier(&dev_priv->pm_nb);
@@ -1216,8 +1192,6 @@ static void vmw_driver_unload(struct drm_device *dev)
idr_destroy(&dev_priv->res_idr[i]);
vmw_mksstat_remove_all(dev_priv);
-
- pci_release_regions(pdev);
}
static void vmw_postclose(struct drm_device *dev,
@@ -1324,9 +1298,6 @@ static void vmw_master_set(struct drm_device *dev,
static void vmw_master_drop(struct drm_device *dev,
struct drm_file *file_priv)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
-
- vmw_kms_legacy_hotspot_clear(dev_priv);
}
bool vmwgfx_supported(struct vmw_private *vmw)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 5275ef632d4b..eda5b6f8f4c4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1,29 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
**************************************************************************/
#ifndef _VMWGFX_DRV_H_
@@ -58,7 +38,7 @@
#define VMWGFX_DRIVER_NAME "vmwgfx"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 20
+#define VMWGFX_DRIVER_MINOR 21
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_NUM_DISPLAY_UNITS 8
@@ -100,10 +80,6 @@
#define VMW_RES_SHADER ttm_driver_type4
#define VMW_RES_HT_ORDER 12
-#define VMW_CURSOR_SNOOP_FORMAT SVGA3D_A8R8G8B8
-#define VMW_CURSOR_SNOOP_WIDTH 64
-#define VMW_CURSOR_SNOOP_HEIGHT 64
-
#define MKSSTAT_CAPACITY_LOG2 5U
#define MKSSTAT_CAPACITY (1U << MKSSTAT_CAPACITY_LOG2)
@@ -201,7 +177,7 @@ enum vmw_cmdbuf_res_type {
struct vmw_cmdbuf_res_manager;
struct vmw_cursor_snooper {
- size_t age;
+ size_t id;
uint32_t *image;
};
@@ -546,7 +522,7 @@ struct vmw_private {
int cmdbuf_waiters; /* Protected by waiter_lock */
int error_waiters; /* Protected by waiter_lock */
int fifo_queue_waiters; /* Protected by waiter_lock */
- uint32_t last_read_seqno;
+ atomic_t last_read_seqno;
struct vmw_fence_manager *fman;
uint32_t irq_mask; /* Updates protected by waiter_lock */
@@ -846,9 +822,7 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
* GEM related functionality - vmwgfx_gem.c
*/
struct vmw_bo_params;
-int vmw_gem_object_create(struct vmw_private *vmw,
- struct vmw_bo_params *params,
- struct vmw_bo **p_vbo);
+extern const struct drm_gem_object_funcs vmw_gem_object_funcs;
extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
@@ -1032,15 +1006,14 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv,
uint32_t seqno,
bool interruptible,
unsigned long timeout);
-extern void vmw_update_seqno(struct vmw_private *dev_priv);
-extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
-extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
-extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
-extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
-extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
- int *waiter_count);
-extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
- u32 flag, int *waiter_count);
+bool vmw_seqno_waiter_add(struct vmw_private *dev_priv);
+bool vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
+bool vmw_goal_waiter_add(struct vmw_private *dev_priv);
+bool vmw_goal_waiter_remove(struct vmw_private *dev_priv);
+bool vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
+ int *waiter_count);
+bool vmw_generic_waiter_remove(struct vmw_private *dev_priv,
+ u32 flag, int *waiter_count);
/**
* Kernel modesetting - vmwgfx_kms.c
@@ -1050,7 +1023,6 @@ int vmw_kms_init(struct vmw_private *dev_priv);
int vmw_kms_close(struct vmw_private *dev_priv);
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
void vmw_kms_cursor_snoop(struct vmw_surface *srf,
struct ttm_object_file *tfile,
struct ttm_buffer_object *bo,
@@ -1067,7 +1039,6 @@ int vmw_kms_present(struct vmw_private *dev_priv,
uint32_t num_clips);
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
int vmw_kms_suspend(struct drm_device *dev);
int vmw_kms_resume(struct drm_device *dev);
void vmw_kms_lost_device(struct drm_device *dev);
@@ -1393,8 +1364,10 @@ int vmw_mksstat_remove_all(struct vmw_private *dev_priv);
DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
/* Resource dirtying - vmwgfx_page_dirty.c */
+bool vmw_bo_is_dirty(struct vmw_bo *vbo);
void vmw_bo_dirty_scan(struct vmw_bo *vbo);
int vmw_bo_dirty_add(struct vmw_bo *vbo);
+void vmw_bo_dirty_clear(struct vmw_bo *vbo);
void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res);
void vmw_bo_dirty_clear_res(struct vmw_resource *res);
void vmw_bo_dirty_release(struct vmw_bo *vbo);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 2e52d73eba48..819704ac675d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1,29 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
+
#include "vmwgfx_binding.h"
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
@@ -3896,8 +3878,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
fence_rep.handle = fence_handle;
fence_rep.seqno = fence->base.seqno;
- vmw_update_seqno(dev_priv);
- fence_rep.passed_seqno = dev_priv->last_read_seqno;
+ fence_rep.passed_seqno = vmw_fences_update(dev_priv->fman);
}
/*
@@ -4512,8 +4493,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0))
goto out;
- vmw_kms_cursor_post_execbuf(dev_priv);
-
out:
if (in_fence)
dma_fence_put(in_fence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 588d50ababf6..c2294abbe753 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1,32 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
-#include <linux/sched/signal.h>
-
#include "vmwgfx_drv.h"
#define VMW_FENCE_WRAP (1 << 31)
@@ -35,14 +14,7 @@ struct vmw_fence_manager {
struct vmw_private *dev_priv;
spinlock_t lock;
struct list_head fence_list;
- struct work_struct work;
bool fifo_down;
- struct list_head cleanup_list;
- uint32_t pending_actions[VMW_ACTION_MAX];
- struct mutex goal_irq_mutex;
- bool goal_irq_on; /* Protected by @goal_irq_mutex */
- bool seqno_valid; /* Protected by @lock, and may not be set to true
- without the @goal_irq_mutex held. */
u64 ctx;
};
@@ -52,12 +24,10 @@ struct vmw_user_fence {
};
/**
- * struct vmw_event_fence_action - fence action that delivers a drm event.
+ * struct vmw_event_fence_action - fence callback that delivers a DRM event.
*
- * @action: A struct vmw_fence_action to hook up to a fence.
+ * @base: For use with dma_fence_add_callback(...)
* @event: A pointer to the pending event.
- * @fence: A referenced pointer to the fence to keep it alive while @action
- * hangs on it.
* @dev: Pointer to a struct drm_device so we can access the event stuff.
* @tv_sec: If non-null, the variable pointed to will be assigned
* current time tv_sec val when the fence signals.
@@ -65,10 +35,9 @@ struct vmw_user_fence {
* be assigned the current time tv_usec val when the fence signals.
*/
struct vmw_event_fence_action {
- struct vmw_fence_action action;
+ struct dma_fence_cb base;
struct drm_pending_event *event;
- struct vmw_fence_obj *fence;
struct drm_device *dev;
uint32_t *tv_sec;
@@ -81,44 +50,6 @@ fman_from_fence(struct vmw_fence_obj *fence)
return container_of(fence->base.lock, struct vmw_fence_manager, lock);
}
-static u32 vmw_fence_goal_read(struct vmw_private *vmw)
-{
- if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
- return vmw_read(vmw, SVGA_REG_FENCE_GOAL);
- else
- return vmw_fifo_mem_read(vmw, SVGA_FIFO_FENCE_GOAL);
-}
-
-static void vmw_fence_goal_write(struct vmw_private *vmw, u32 value)
-{
- if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
- vmw_write(vmw, SVGA_REG_FENCE_GOAL, value);
- else
- vmw_fifo_mem_write(vmw, SVGA_FIFO_FENCE_GOAL, value);
-}
-
-/*
- * Note on fencing subsystem usage of irqs:
- * Typically the vmw_fences_update function is called
- *
- * a) When a new fence seqno has been submitted by the fifo code.
- * b) On-demand when we have waiters. Sleeping waiters will switch on the
- * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
- * irq is received. When the last fence waiter is gone, that IRQ is masked
- * away.
- *
- * In situations where there are no waiters and we don't submit any new fences,
- * fence objects may not be signaled. This is perfectly OK, since there are
- * no consumers of the signaled data, but that is NOT ok when there are fence
- * actions attached to a fence. The fencing subsystem then makes use of the
- * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
- * which has an action attached, and each time vmw_fences_update is called,
- * the subsystem makes sure the fence goal seqno is updated.
- *
- * The fence goal seqno irq is on as long as there are unsignaled fence
- * objects with actions attached to them.
- */
-
static void vmw_fence_obj_destroy(struct dma_fence *f)
{
struct vmw_fence_obj *fence =
@@ -126,8 +57,21 @@ static void vmw_fence_obj_destroy(struct dma_fence *f)
struct vmw_fence_manager *fman = fman_from_fence(fence);
if (!list_empty(&fence->head)) {
+ /* The fence manager still has an implicit reference to this
+ * fence via the fence list if head is set. Because the lock is
+ * required to be held when the fence manager updates the fence
+ * list either the fence will have been removed after we get
+ * the lock below or we can safely remove it and the fence
+ * manager will never see it. This implies the fence is being
+ * deleted without being signaled which is dubious but valid
+ * if there are no callbacks. The dma_fence code that calls
+ * this hook will warn about deleted unsignaled with callbacks
+ * so no need to warn again here.
+ */
spin_lock(&fman->lock);
list_del_init(&fence->head);
+ if (fence->waiter_added)
+ vmw_seqno_waiter_remove(fman->dev_priv);
spin_unlock(&fman->lock);
}
fence->destroy(fence);
@@ -143,165 +87,46 @@ static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
return "svga";
}
+/* When we toggle signaling for the SVGA device there is a race period from
+ * the time we first read the fence seqno to the time we enable interrupts.
+ * If we miss the interrupt for a fence during this period its likely the driver
+ * will stall. As a result we need to re-read the seqno after interrupts are
+ * enabled. If interrupts were already enabled we just increment the number of
+ * seqno waiters.
+ */
static bool vmw_fence_enable_signaling(struct dma_fence *f)
{
+ u32 seqno;
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
struct vmw_fence_manager *fman = fman_from_fence(fence);
struct vmw_private *dev_priv = fman->dev_priv;
-
- u32 seqno = vmw_fence_read(dev_priv);
- if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
+check_for_race:
+ seqno = vmw_fence_read(dev_priv);
+ if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
+ if (fence->waiter_added) {
+ vmw_seqno_waiter_remove(dev_priv);
+ fence->waiter_added = false;
+ }
return false;
-
+ } else if (!fence->waiter_added) {
+ fence->waiter_added = true;
+ if (vmw_seqno_waiter_add(dev_priv))
+ goto check_for_race;
+ }
return true;
}
-struct vmwgfx_wait_cb {
- struct dma_fence_cb base;
- struct task_struct *task;
-};
-
-static void
-vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
-{
- struct vmwgfx_wait_cb *wait =
- container_of(cb, struct vmwgfx_wait_cb, base);
-
- wake_up_process(wait->task);
-}
-
-static void __vmw_fences_update(struct vmw_fence_manager *fman);
-
-static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
-{
- struct vmw_fence_obj *fence =
- container_of(f, struct vmw_fence_obj, base);
-
- struct vmw_fence_manager *fman = fman_from_fence(fence);
- struct vmw_private *dev_priv = fman->dev_priv;
- struct vmwgfx_wait_cb cb;
- long ret = timeout;
-
- if (likely(vmw_fence_obj_signaled(fence)))
- return timeout;
-
- vmw_seqno_waiter_add(dev_priv);
-
- spin_lock(f->lock);
-
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
- goto out;
-
- if (intr && signal_pending(current)) {
- ret = -ERESTARTSYS;
- goto out;
- }
-
- cb.base.func = vmwgfx_wait_cb;
- cb.task = current;
- list_add(&cb.base.node, &f->cb_list);
-
- for (;;) {
- __vmw_fences_update(fman);
-
- /*
- * We can use the barrier free __set_current_state() since
- * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
- * fence spinlock.
- */
- if (intr)
- __set_current_state(TASK_INTERRUPTIBLE);
- else
- __set_current_state(TASK_UNINTERRUPTIBLE);
-
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
- if (ret == 0 && timeout > 0)
- ret = 1;
- break;
- }
-
- if (intr && signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
-
- if (ret == 0)
- break;
-
- spin_unlock(f->lock);
-
- ret = schedule_timeout(ret);
-
- spin_lock(f->lock);
- }
- __set_current_state(TASK_RUNNING);
- if (!list_empty(&cb.base.node))
- list_del(&cb.base.node);
-
-out:
- spin_unlock(f->lock);
-
- vmw_seqno_waiter_remove(dev_priv);
-
- return ret;
-}
+static u32 __vmw_fences_update(struct vmw_fence_manager *fman);
static const struct dma_fence_ops vmw_fence_ops = {
.get_driver_name = vmw_fence_get_driver_name,
.get_timeline_name = vmw_fence_get_timeline_name,
.enable_signaling = vmw_fence_enable_signaling,
- .wait = vmw_fence_wait,
.release = vmw_fence_obj_destroy,
};
-/*
- * Execute signal actions on fences recently signaled.
- * This is done from a workqueue so we don't have to execute
- * signal actions from atomic context.
- */
-
-static void vmw_fence_work_func(struct work_struct *work)
-{
- struct vmw_fence_manager *fman =
- container_of(work, struct vmw_fence_manager, work);
- struct list_head list;
- struct vmw_fence_action *action, *next_action;
- bool seqno_valid;
-
- do {
- INIT_LIST_HEAD(&list);
- mutex_lock(&fman->goal_irq_mutex);
-
- spin_lock(&fman->lock);
- list_splice_init(&fman->cleanup_list, &list);
- seqno_valid = fman->seqno_valid;
- spin_unlock(&fman->lock);
-
- if (!seqno_valid && fman->goal_irq_on) {
- fman->goal_irq_on = false;
- vmw_goal_waiter_remove(fman->dev_priv);
- }
- mutex_unlock(&fman->goal_irq_mutex);
-
- if (list_empty(&list))
- return;
-
- /*
- * At this point, only we should be able to manipulate the
- * list heads of the actions we have on the private list.
- * hence fman::lock not held.
- */
-
- list_for_each_entry_safe(action, next_action, &list, head) {
- list_del_init(&action->head);
- if (action->cleanup)
- action->cleanup(action);
- }
- } while (1);
-}
-
struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
{
struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
@@ -312,10 +137,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
fman->dev_priv = dev_priv;
spin_lock_init(&fman->lock);
INIT_LIST_HEAD(&fman->fence_list);
- INIT_LIST_HEAD(&fman->cleanup_list);
- INIT_WORK(&fman->work, &vmw_fence_work_func);
fman->fifo_down = true;
- mutex_init(&fman->goal_irq_mutex);
fman->ctx = dma_fence_context_alloc(1);
return fman;
@@ -325,11 +147,8 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
{
bool lists_empty;
- (void) cancel_work_sync(&fman->work);
-
spin_lock(&fman->lock);
- lists_empty = list_empty(&fman->fence_list) &&
- list_empty(&fman->cleanup_list);
+ lists_empty = list_empty(&fman->fence_list);
spin_unlock(&fman->lock);
BUG_ON(!lists_empty);
@@ -344,7 +163,6 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
fman->ctx, seqno);
- INIT_LIST_HEAD(&fence->seq_passed_actions);
fence->destroy = destroy;
spin_lock(&fman->lock);
@@ -352,6 +170,11 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
ret = -EBUSY;
goto out_unlock;
}
+ /* This creates an implicit reference to the fence from the fence
+ * manager. It will be dropped when the fence is signaled which is
+ * expected to happen before deletion. The dtor has code to catch
+ * the rare deletion before signaling case.
+ */
list_add_tail(&fence->head, &fman->fence_list);
out_unlock:
@@ -360,148 +183,35 @@ out_unlock:
}
-static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
- struct list_head *list)
-{
- struct vmw_fence_action *action, *next_action;
-
- list_for_each_entry_safe(action, next_action, list, head) {
- list_del_init(&action->head);
- fman->pending_actions[action->type]--;
- if (action->seq_passed != NULL)
- action->seq_passed(action);
-
- /*
- * Add the cleanup action to the cleanup list so that
- * it will be performed by a worker task.
- */
-
- list_add_tail(&action->head, &fman->cleanup_list);
- }
-}
-
-/**
- * vmw_fence_goal_new_locked - Figure out a new device fence goal
- * seqno if needed.
- *
- * @fman: Pointer to a fence manager.
- * @passed_seqno: The seqno the device currently signals as passed.
- *
- * This function should be called with the fence manager lock held.
- * It is typically called when we have a new passed_seqno, and
- * we might need to update the fence goal. It checks to see whether
- * the current fence goal has already passed, and, in that case,
- * scans through all unsignaled fences to get the next fence object with an
- * action attached, and sets the seqno of that fence as a new fence goal.
- *
- * returns true if the device goal seqno was updated. False otherwise.
- */
-static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
- u32 passed_seqno)
-{
- u32 goal_seqno;
- struct vmw_fence_obj *fence, *next_fence;
-
- if (likely(!fman->seqno_valid))
- return false;
-
- goal_seqno = vmw_fence_goal_read(fman->dev_priv);
- if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
- return false;
-
- fman->seqno_valid = false;
- list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
- if (!list_empty(&fence->seq_passed_actions)) {
- fman->seqno_valid = true;
- vmw_fence_goal_write(fman->dev_priv,
- fence->base.seqno);
- break;
- }
- }
-
- return true;
-}
-
-
-/**
- * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
- * needed.
- *
- * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
- * considered as a device fence goal.
- *
- * This function should be called with the fence manager lock held.
- * It is typically called when an action has been attached to a fence to
- * check whether the seqno of that fence should be used for a fence
- * goal interrupt. This is typically needed if the current fence goal is
- * invalid, or has a higher seqno than that of the current fence object.
- *
- * returns true if the device goal seqno was updated. False otherwise.
- */
-static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
-{
- struct vmw_fence_manager *fman = fman_from_fence(fence);
- u32 goal_seqno;
-
- if (dma_fence_is_signaled_locked(&fence->base))
- return false;
-
- goal_seqno = vmw_fence_goal_read(fman->dev_priv);
- if (likely(fman->seqno_valid &&
- goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
- return false;
-
- vmw_fence_goal_write(fman->dev_priv, fence->base.seqno);
- fman->seqno_valid = true;
-
- return true;
-}
-
-static void __vmw_fences_update(struct vmw_fence_manager *fman)
+static u32 __vmw_fences_update(struct vmw_fence_manager *fman)
{
struct vmw_fence_obj *fence, *next_fence;
- struct list_head action_list;
- bool needs_rerun;
- uint32_t seqno, new_seqno;
+ const bool cookie = dma_fence_begin_signalling();
+ const u32 seqno = vmw_fence_read(fman->dev_priv);
- seqno = vmw_fence_read(fman->dev_priv);
-rerun:
list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
list_del_init(&fence->head);
+ if (fence->waiter_added) {
+ vmw_seqno_waiter_remove(fman->dev_priv);
+ fence->waiter_added = false;
+ }
dma_fence_signal_locked(&fence->base);
- INIT_LIST_HEAD(&action_list);
- list_splice_init(&fence->seq_passed_actions,
- &action_list);
- vmw_fences_perform_actions(fman, &action_list);
} else
break;
}
-
- /*
- * Rerun if the fence goal seqno was updated, and the
- * hardware might have raced with that update, so that
- * we missed a fence_goal irq.
- */
-
- needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
- if (unlikely(needs_rerun)) {
- new_seqno = vmw_fence_read(fman->dev_priv);
- if (new_seqno != seqno) {
- seqno = new_seqno;
- goto rerun;
- }
- }
-
- if (!list_empty(&fman->cleanup_list))
- (void) schedule_work(&fman->work);
+ dma_fence_end_signalling(cookie);
+ atomic_set_release(&fman->dev_priv->last_read_seqno, seqno);
+ return seqno;
}
-void vmw_fences_update(struct vmw_fence_manager *fman)
+u32 vmw_fences_update(struct vmw_fence_manager *fman)
{
+ u32 seqno;
spin_lock(&fman->lock);
- __vmw_fences_update(fman);
+ seqno = __vmw_fences_update(fman);
spin_unlock(&fman->lock);
+ return seqno;
}
bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
@@ -539,14 +249,13 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
struct vmw_fence_obj **p_fence)
{
struct vmw_fence_obj *fence;
- int ret;
+ int ret;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (unlikely(!fence))
return -ENOMEM;
- ret = vmw_fence_obj_init(fman, fence, seqno,
- vmw_fence_destroy);
+ ret = vmw_fence_obj_init(fman, fence, seqno, vmw_fence_destroy);
if (unlikely(ret != 0))
goto out_err_init;
@@ -638,7 +347,6 @@ out_no_object:
void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
{
- struct list_head action_list;
int ret;
/*
@@ -661,10 +369,6 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
if (unlikely(ret != 0)) {
list_del_init(&fence->head);
dma_fence_signal(&fence->base);
- INIT_LIST_HEAD(&action_list);
- list_splice_init(&fence->seq_passed_actions,
- &action_list);
- vmw_fences_perform_actions(fman, &action_list);
}
BUG_ON(!list_empty(&fence->head));
@@ -778,7 +482,6 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
(struct drm_vmw_fence_signaled_arg *) data;
struct ttm_base_object *base;
struct vmw_fence_obj *fence;
- struct vmw_fence_manager *fman;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_private *dev_priv = vmw_priv(dev);
@@ -787,14 +490,11 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
return PTR_ERR(base);
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
- fman = fman_from_fence(fence);
arg->signaled = vmw_fence_obj_signaled(fence);
arg->signaled_flags = arg->flags;
- spin_lock(&fman->lock);
- arg->passed_seqno = dev_priv->last_read_seqno;
- spin_unlock(&fman->lock);
+ arg->passed_seqno = atomic_read_acquire(&dev_priv->last_read_seqno);
ttm_base_object_unref(&base);
@@ -822,10 +522,11 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
* attached has passed. It queues the event on the submitter's event list.
* This function is always called from atomic context.
*/
-static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
+static void vmw_event_fence_action_seq_passed(struct dma_fence *f,
+ struct dma_fence_cb *cb)
{
struct vmw_event_fence_action *eaction =
- container_of(action, struct vmw_event_fence_action, action);
+ container_of(cb, struct vmw_event_fence_action, base);
struct drm_device *dev = eaction->dev;
struct drm_pending_event *event = eaction->event;
@@ -837,7 +538,7 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
if (likely(eaction->tv_sec != NULL)) {
struct timespec64 ts;
- ktime_get_ts64(&ts);
+ ktime_to_timespec64(f->timestamp);
/* monotonic time, so no y2038 overflow */
*eaction->tv_sec = ts.tv_sec;
*eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
@@ -846,75 +547,10 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
drm_send_event_locked(dev, eaction->event);
eaction->event = NULL;
spin_unlock_irq(&dev->event_lock);
-}
-
-/**
- * vmw_event_fence_action_cleanup
- *
- * @action: The struct vmw_fence_action embedded in a struct
- * vmw_event_fence_action.
- *
- * This function is the struct vmw_fence_action destructor. It's typically
- * called from a workqueue.
- */
-static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
-{
- struct vmw_event_fence_action *eaction =
- container_of(action, struct vmw_event_fence_action, action);
-
- vmw_fence_obj_unreference(&eaction->fence);
+ dma_fence_put(f);
kfree(eaction);
}
-
-/**
- * vmw_fence_obj_add_action - Add an action to a fence object.
- *
- * @fence: The fence object.
- * @action: The action to add.
- *
- * Note that the action callbacks may be executed before this function
- * returns.
- */
-static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
- struct vmw_fence_action *action)
-{
- struct vmw_fence_manager *fman = fman_from_fence(fence);
- bool run_update = false;
-
- mutex_lock(&fman->goal_irq_mutex);
- spin_lock(&fman->lock);
-
- fman->pending_actions[action->type]++;
- if (dma_fence_is_signaled_locked(&fence->base)) {
- struct list_head action_list;
-
- INIT_LIST_HEAD(&action_list);
- list_add_tail(&action->head, &action_list);
- vmw_fences_perform_actions(fman, &action_list);
- } else {
- list_add_tail(&action->head, &fence->seq_passed_actions);
-
- /*
- * This function may set fman::seqno_valid, so it must
- * be run with the goal_irq_mutex held.
- */
- run_update = vmw_fence_goal_check_locked(fence);
- }
-
- spin_unlock(&fman->lock);
-
- if (run_update) {
- if (!fman->goal_irq_on) {
- fman->goal_irq_on = true;
- vmw_goal_waiter_add(fman->dev_priv);
- }
- vmw_fences_update(fman);
- }
- mutex_unlock(&fman->goal_irq_mutex);
-
-}
-
/**
* vmw_event_fence_action_queue - Post an event for sending when a fence
* object seqno has passed.
@@ -949,18 +585,14 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
return -ENOMEM;
eaction->event = event;
-
- eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
- eaction->action.cleanup = vmw_event_fence_action_cleanup;
- eaction->action.type = VMW_ACTION_EVENT;
-
- eaction->fence = vmw_fence_obj_reference(fence);
eaction->dev = &fman->dev_priv->drm;
eaction->tv_sec = tv_sec;
eaction->tv_usec = tv_usec;
- vmw_fence_obj_add_action(fence, &eaction->action);
-
+ vmw_fence_obj_reference(fence); // Dropped in CB
+ if (dma_fence_add_callback(&fence->base, &eaction->base,
+ vmw_event_fence_action_seq_passed) < 0)
+ vmw_event_fence_action_seq_passed(&fence->base, &eaction->base);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index a7eee579c76a..e897cccae1ae 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -39,27 +39,10 @@ struct drm_pending_event;
struct vmw_private;
struct vmw_fence_manager;
-/**
- *
- *
- */
-enum vmw_action_type {
- VMW_ACTION_EVENT = 0,
- VMW_ACTION_MAX
-};
-
-struct vmw_fence_action {
- struct list_head head;
- enum vmw_action_type type;
- void (*seq_passed) (struct vmw_fence_action *action);
- void (*cleanup) (struct vmw_fence_action *action);
-};
-
struct vmw_fence_obj {
struct dma_fence base;
-
+ bool waiter_added;
struct list_head head;
- struct list_head seq_passed_actions;
void (*destroy)(struct vmw_fence_obj *fence);
};
@@ -86,7 +69,7 @@ vmw_fence_obj_reference(struct vmw_fence_obj *fence)
return fence;
}
-extern void vmw_fences_update(struct vmw_fence_manager *fman);
+u32 vmw_fences_update(struct vmw_fence_manager *fman);
extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index ed5015ced392..eedf1fe60be7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -84,7 +84,7 @@ static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
int ret;
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
if (!ret) {
if (drm_WARN_ON(obj->dev, map->is_iomem)) {
@@ -101,7 +101,7 @@ static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
static void vmw_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
{
- if (obj->import_attach)
+ if (drm_gem_is_imported(obj))
dma_buf_vunmap(obj->import_attach->dmabuf, map);
else
drm_gem_ttm_vunmap(obj, map);
@@ -111,7 +111,7 @@ static int vmw_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
int ret;
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
/*
* Reset both vm_ops and vm_private_data, so we don't end up with
* vm_ops pointing to our implementation if the dma-buf backend
@@ -140,7 +140,7 @@ static const struct vm_operations_struct vmw_vm_ops = {
.close = ttm_bo_vm_close,
};
-static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
+const struct drm_gem_object_funcs vmw_gem_object_funcs = {
.free = vmw_gem_object_free,
.open = vmw_gem_object_open,
.close = vmw_gem_object_close,
@@ -154,20 +154,6 @@ static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
.vm_ops = &vmw_vm_ops,
};
-int vmw_gem_object_create(struct vmw_private *vmw,
- struct vmw_bo_params *params,
- struct vmw_bo **p_vbo)
-{
- int ret = vmw_bo_create(vmw, params, p_vbo);
-
- if (ret != 0)
- goto out_no_bo;
-
- (*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs;
-out_no_bo:
- return ret;
-}
-
int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
@@ -183,7 +169,7 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
.pin = false
};
- ret = vmw_gem_object_create(dev_priv, &params, p_vbo);
+ ret = vmw_bo_create(dev_priv, &params, p_vbo);
if (ret != 0)
goto out_no_bo;
@@ -298,11 +284,10 @@ static void vmw_bo_print_info(int id, struct vmw_bo *bo, struct seq_file *m)
seq_printf(m, "\t\t0x%08x: %12zu bytes %s, type = %s",
id, bo->tbo.base.size, placement, type);
- seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d, TTM refs = %d",
+ seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d",
bo->tbo.priority,
bo->tbo.pin_count,
- kref_read(&bo->tbo.base.refcount),
- kref_read(&bo->tbo.kref));
+ kref_read(&bo->tbo.base.refcount));
seq_puts(m, "\n");
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 086e69a130d4..05773eb394d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -123,26 +123,17 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
}
-void vmw_update_seqno(struct vmw_private *dev_priv)
-{
- uint32_t seqno = vmw_fence_read(dev_priv);
-
- if (dev_priv->last_read_seqno != seqno) {
- dev_priv->last_read_seqno = seqno;
- vmw_fences_update(dev_priv->fman);
- }
-}
-
bool vmw_seqno_passed(struct vmw_private *dev_priv,
uint32_t seqno)
{
bool ret;
+ u32 last_read_seqno = atomic_read_acquire(&dev_priv->last_read_seqno);
- if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
+ if (last_read_seqno - seqno < VMW_FENCE_WRAP)
return true;
- vmw_update_seqno(dev_priv);
- if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
+ last_read_seqno = vmw_fences_update(dev_priv->fman);
+ if (last_read_seqno - seqno < VMW_FENCE_WRAP)
return true;
if (!vmw_has_fences(dev_priv) && vmw_fifo_idle(dev_priv, seqno))
@@ -239,51 +230,59 @@ out_err:
return ret;
}
-void vmw_generic_waiter_add(struct vmw_private *dev_priv,
+bool vmw_generic_waiter_add(struct vmw_private *dev_priv,
u32 flag, int *waiter_count)
{
- spin_lock_bh(&dev_priv->waiter_lock);
+ bool hw_programmed = false;
+
+ spin_lock(&dev_priv->waiter_lock);
if ((*waiter_count)++ == 0) {
vmw_irq_status_write(dev_priv, flag);
dev_priv->irq_mask |= flag;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+ hw_programmed = true;
}
- spin_unlock_bh(&dev_priv->waiter_lock);
+ spin_unlock(&dev_priv->waiter_lock);
+ return hw_programmed;
}
-void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
+bool vmw_generic_waiter_remove(struct vmw_private *dev_priv,
u32 flag, int *waiter_count)
{
- spin_lock_bh(&dev_priv->waiter_lock);
+ bool hw_programmed = false;
+
+ spin_lock(&dev_priv->waiter_lock);
if (--(*waiter_count) == 0) {
dev_priv->irq_mask &= ~flag;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+ hw_programmed = true;
}
- spin_unlock_bh(&dev_priv->waiter_lock);
+ spin_unlock(&dev_priv->waiter_lock);
+ return hw_programmed;
}
-void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
+bool vmw_seqno_waiter_add(struct vmw_private *dev_priv)
{
- vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
- &dev_priv->fence_queue_waiters);
+ return vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
+ &dev_priv->fence_queue_waiters);
}
-void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
+bool vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
{
- vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
- &dev_priv->fence_queue_waiters);
+ return vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
+ &dev_priv->fence_queue_waiters);
}
-void vmw_goal_waiter_add(struct vmw_private *dev_priv)
+bool vmw_goal_waiter_add(struct vmw_private *dev_priv)
{
- vmw_generic_waiter_add(dev_priv, vmw_irqflag_fence_goal(dev_priv),
- &dev_priv->goal_queue_waiters);
+ return vmw_generic_waiter_add(dev_priv, vmw_irqflag_fence_goal(dev_priv),
+ &dev_priv->goal_queue_waiters);
}
-void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
+bool vmw_goal_waiter_remove(struct vmw_private *dev_priv)
{
- vmw_generic_waiter_remove(dev_priv, vmw_irqflag_fence_goal(dev_priv),
- &dev_priv->goal_queue_waiters);
+ return vmw_generic_waiter_remove(dev_priv, vmw_irqflag_fence_goal(dev_priv),
+ &dev_priv->goal_queue_waiters);
}
static void vmw_irq_preinstall(struct drm_device *dev)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 1912ac1cde6d..54ea1b513950 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1,33 +1,15 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
**************************************************************************/
+
#include "vmwgfx_kms.h"
#include "vmwgfx_bo.h"
+#include "vmwgfx_resource_priv.h"
#include "vmwgfx_vkms.h"
#include "vmw_surface_cache.h"
@@ -59,474 +41,6 @@ void vmw_du_cleanup(struct vmw_display_unit *du)
drm_connector_cleanup(&du->connector);
}
-/*
- * Display Unit Cursor functions
- */
-
-static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
-static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
- struct vmw_plane_state *vps,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY);
-
-struct vmw_svga_fifo_cmd_define_cursor {
- u32 cmd;
- SVGAFifoCmdDefineAlphaCursor cursor;
-};
-
-/**
- * vmw_send_define_cursor_cmd - queue a define cursor command
- * @dev_priv: the private driver struct
- * @image: buffer which holds the cursor image
- * @width: width of the mouse cursor image
- * @height: height of the mouse cursor image
- * @hotspotX: the horizontal position of mouse hotspot
- * @hotspotY: the vertical position of mouse hotspot
- */
-static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
-{
- struct vmw_svga_fifo_cmd_define_cursor *cmd;
- const u32 image_size = width * height * sizeof(*image);
- const u32 cmd_size = sizeof(*cmd) + image_size;
-
- /* Try to reserve fifocmd space and swallow any failures;
- such reservations cannot be left unconsumed for long
- under the risk of clogging other fifocmd users, so
- we treat reservations separtely from the way we treat
- other fallible KMS-atomic resources at prepare_fb */
- cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
-
- if (unlikely(!cmd))
- return;
-
- memset(cmd, 0, sizeof(*cmd));
-
- memcpy(&cmd[1], image, image_size);
-
- cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
- cmd->cursor.id = 0;
- cmd->cursor.width = width;
- cmd->cursor.height = height;
- cmd->cursor.hotspotX = hotspotX;
- cmd->cursor.hotspotY = hotspotY;
-
- vmw_cmd_commit_flush(dev_priv, cmd_size);
-}
-
-/**
- * vmw_cursor_update_image - update the cursor image on the provided plane
- * @dev_priv: the private driver struct
- * @vps: the plane state of the cursor plane
- * @image: buffer which holds the cursor image
- * @width: width of the mouse cursor image
- * @height: height of the mouse cursor image
- * @hotspotX: the horizontal position of mouse hotspot
- * @hotspotY: the vertical position of mouse hotspot
- */
-static void vmw_cursor_update_image(struct vmw_private *dev_priv,
- struct vmw_plane_state *vps,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
-{
- if (vps->cursor.bo)
- vmw_cursor_update_mob(dev_priv, vps, image,
- vps->base.crtc_w, vps->base.crtc_h,
- hotspotX, hotspotY);
-
- else
- vmw_send_define_cursor_cmd(dev_priv, image, width, height,
- hotspotX, hotspotY);
-}
-
-
-/**
- * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
- *
- * Called from inside vmw_du_cursor_plane_atomic_update to actually
- * make the cursor-image live.
- *
- * @dev_priv: device to work with
- * @vps: the plane state of the cursor plane
- * @image: cursor source data to fill the MOB with
- * @width: source data width
- * @height: source data height
- * @hotspotX: cursor hotspot x
- * @hotspotY: cursor hotspot Y
- */
-static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
- struct vmw_plane_state *vps,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
-{
- SVGAGBCursorHeader *header;
- SVGAGBAlphaCursorHeader *alpha_header;
- const u32 image_size = width * height * sizeof(*image);
-
- header = vmw_bo_map_and_cache(vps->cursor.bo);
- alpha_header = &header->header.alphaHeader;
-
- memset(header, 0, sizeof(*header));
-
- header->type = SVGA_ALPHA_CURSOR;
- header->sizeInBytes = image_size;
-
- alpha_header->hotspotX = hotspotX;
- alpha_header->hotspotY = hotspotY;
- alpha_header->width = width;
- alpha_header->height = height;
-
- memcpy(header + 1, image, image_size);
- vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
- vps->cursor.bo->tbo.resource->start);
-}
-
-
-static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
-{
- return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
-}
-
-/**
- * vmw_du_cursor_plane_acquire_image -- Acquire the image data
- * @vps: cursor plane state
- */
-static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
-{
- struct vmw_surface *surf;
-
- if (vmw_user_object_is_null(&vps->uo))
- return NULL;
-
- surf = vmw_user_object_surface(&vps->uo);
- if (surf && !vmw_user_object_is_mapped(&vps->uo))
- return surf->snooper.image;
-
- return vmw_user_object_map(&vps->uo);
-}
-
-static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
- struct vmw_plane_state *new_vps)
-{
- void *old_image;
- void *new_image;
- u32 size;
- bool changed;
-
- if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
- old_vps->base.crtc_h != new_vps->base.crtc_h)
- return true;
-
- if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
- old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
- return true;
-
- size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
-
- old_image = vmw_du_cursor_plane_acquire_image(old_vps);
- new_image = vmw_du_cursor_plane_acquire_image(new_vps);
-
- changed = false;
- if (old_image && new_image && old_image != new_image)
- changed = memcmp(old_image, new_image, size) != 0;
-
- return changed;
-}
-
-static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
-{
- if (!(*vbo))
- return;
-
- ttm_bo_unpin(&(*vbo)->tbo);
- vmw_bo_unreference(vbo);
-}
-
-static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
- struct vmw_plane_state *vps)
-{
- u32 i;
-
- if (!vps->cursor.bo)
- return;
-
- vmw_du_cursor_plane_unmap_cm(vps);
-
- /* Look for a free slot to return this mob to the cache. */
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
- if (!vcp->cursor_mobs[i]) {
- vcp->cursor_mobs[i] = vps->cursor.bo;
- vps->cursor.bo = NULL;
- return;
- }
- }
-
- /* Cache is full: See if this mob is bigger than an existing mob. */
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
- if (vcp->cursor_mobs[i]->tbo.base.size <
- vps->cursor.bo->tbo.base.size) {
- vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
- vcp->cursor_mobs[i] = vps->cursor.bo;
- vps->cursor.bo = NULL;
- return;
- }
- }
-
- /* Destroy it if it's not worth caching. */
- vmw_du_destroy_cursor_mob(&vps->cursor.bo);
-}
-
-static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
- struct vmw_plane_state *vps)
-{
- struct vmw_private *dev_priv = vmw_priv(vcp->base.dev);
- u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
- u32 i;
- u32 cursor_max_dim, mob_max_size;
- struct vmw_fence_obj *fence = NULL;
- int ret;
-
- if (!dev_priv->has_mob ||
- (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
- return -EINVAL;
-
- mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
- cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
-
- if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
- vps->base.crtc_h > cursor_max_dim)
- return -EINVAL;
-
- if (vps->cursor.bo) {
- if (vps->cursor.bo->tbo.base.size >= size)
- return 0;
- vmw_du_put_cursor_mob(vcp, vps);
- }
-
- /* Look for an unused mob in the cache. */
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
- if (vcp->cursor_mobs[i] &&
- vcp->cursor_mobs[i]->tbo.base.size >= size) {
- vps->cursor.bo = vcp->cursor_mobs[i];
- vcp->cursor_mobs[i] = NULL;
- return 0;
- }
- }
- /* Create a new mob if we can't find an existing one. */
- ret = vmw_bo_create_and_populate(dev_priv, size,
- VMW_BO_DOMAIN_MOB,
- &vps->cursor.bo);
-
- if (ret != 0)
- return ret;
-
- /* Fence the mob creation so we are guarateed to have the mob */
- ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
- if (ret != 0)
- goto teardown;
-
- ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- if (ret != 0) {
- ttm_bo_unreserve(&vps->cursor.bo->tbo);
- goto teardown;
- }
-
- dma_fence_wait(&fence->base, false);
- dma_fence_put(&fence->base);
-
- ttm_bo_unreserve(&vps->cursor.bo->tbo);
- return 0;
-
-teardown:
- vmw_du_destroy_cursor_mob(&vps->cursor.bo);
- return ret;
-}
-
-
-static void vmw_cursor_update_position(struct vmw_private *dev_priv,
- bool show, int x, int y)
-{
- const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
- : SVGA_CURSOR_ON_HIDE;
- uint32_t count;
-
- spin_lock(&dev_priv->cursor_lock);
- if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
- vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
- } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
- count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
- } else {
- vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
- vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
- vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
- }
- spin_unlock(&dev_priv->cursor_lock);
-}
-
-void vmw_kms_cursor_snoop(struct vmw_surface *srf,
- struct ttm_object_file *tfile,
- struct ttm_buffer_object *bo,
- SVGA3dCmdHeader *header)
-{
- struct ttm_bo_kmap_obj map;
- unsigned long kmap_offset;
- unsigned long kmap_num;
- SVGA3dCopyBox *box;
- unsigned box_count;
- void *virtual;
- bool is_iomem;
- struct vmw_dma_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceDMA dma;
- } *cmd;
- int i, ret;
- const struct SVGA3dSurfaceDesc *desc =
- vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
- const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
-
- cmd = container_of(header, struct vmw_dma_cmd, header);
-
- /* No snooper installed, nothing to copy */
- if (!srf->snooper.image)
- return;
-
- if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
- DRM_ERROR("face and mipmap for cursors should never != 0\n");
- return;
- }
-
- if (cmd->header.size < 64) {
- DRM_ERROR("at least one full copy box must be given\n");
- return;
- }
-
- box = (SVGA3dCopyBox *)&cmd[1];
- box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
- sizeof(SVGA3dCopyBox);
-
- if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
- box->x != 0 || box->y != 0 || box->z != 0 ||
- box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
- box->d != 1 || box_count != 1 ||
- box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
- /* TODO handle none page aligned offsets */
- /* TODO handle more dst & src != 0 */
- /* TODO handle more then one copy */
- DRM_ERROR("Can't snoop dma request for cursor!\n");
- DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
- box->srcx, box->srcy, box->srcz,
- box->x, box->y, box->z,
- box->w, box->h, box->d, box_count,
- cmd->dma.guest.ptr.offset);
- return;
- }
-
- kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
- kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
-
- ret = ttm_bo_reserve(bo, true, false, NULL);
- if (unlikely(ret != 0)) {
- DRM_ERROR("reserve failed\n");
- return;
- }
-
- ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
- if (unlikely(ret != 0))
- goto err_unreserve;
-
- virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
-
- if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
- memcpy(srf->snooper.image, virtual,
- VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
- } else {
- /* Image is unsigned pointer. */
- for (i = 0; i < box->h; i++)
- memcpy(srf->snooper.image + i * image_pitch,
- virtual + i * cmd->dma.guest.pitch,
- box->w * desc->pitchBytesPerBlock);
- }
-
- srf->snooper.age++;
-
- ttm_bo_kunmap(&map);
-err_unreserve:
- ttm_bo_unreserve(bo);
-}
-
-/**
- * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
- *
- * @dev_priv: Pointer to the device private struct.
- *
- * Clears all legacy hotspots.
- */
-void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct vmw_display_unit *du;
- struct drm_crtc *crtc;
-
- drm_modeset_lock_all(dev);
- drm_for_each_crtc(crtc, dev) {
- du = vmw_crtc_to_du(crtc);
-
- du->hotspot_x = 0;
- du->hotspot_y = 0;
- }
- drm_modeset_unlock_all(dev);
-}
-
-void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct vmw_display_unit *du;
- struct drm_crtc *crtc;
-
- mutex_lock(&dev->mode_config.mutex);
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- du = vmw_crtc_to_du(crtc);
- if (!du->cursor_surface ||
- du->cursor_age == du->cursor_surface->snooper.age ||
- !du->cursor_surface->snooper.image)
- continue;
-
- du->cursor_age = du->cursor_surface->snooper.age;
- vmw_send_define_cursor_cmd(dev_priv,
- du->cursor_surface->snooper.image,
- VMW_CURSOR_SNOOP_WIDTH,
- VMW_CURSOR_SNOOP_HEIGHT,
- du->hotspot_x + du->core_hotspot_x,
- du->hotspot_y + du->core_hotspot_y);
- }
-
- mutex_unlock(&dev->mode_config.mutex);
-}
-
-
-void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
-{
- struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
- u32 i;
-
- vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0);
-
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
- vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
-
- drm_plane_cleanup(plane);
-}
-
void vmw_du_primary_plane_destroy(struct drm_plane *plane)
{
@@ -575,262 +89,6 @@ vmw_du_plane_cleanup_fb(struct drm_plane *plane,
/**
- * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
- *
- * @vps: plane_state
- *
- * Returns 0 on success
- */
-
-static int
-vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
-{
- int ret;
- u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
- struct ttm_buffer_object *bo;
-
- if (!vps->cursor.bo)
- return -EINVAL;
-
- bo = &vps->cursor.bo->tbo;
-
- if (bo->base.size < size)
- return -EINVAL;
-
- if (vps->cursor.bo->map.virtual)
- return 0;
-
- ret = ttm_bo_reserve(bo, false, false, NULL);
- if (unlikely(ret != 0))
- return -ENOMEM;
-
- vmw_bo_map_and_cache(vps->cursor.bo);
-
- ttm_bo_unreserve(bo);
-
- if (unlikely(ret != 0))
- return -ENOMEM;
-
- return 0;
-}
-
-
-/**
- * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
- *
- * @vps: state of the cursor plane
- *
- * Returns 0 on success
- */
-
-static int
-vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
-{
- int ret = 0;
- struct vmw_bo *vbo = vps->cursor.bo;
-
- if (!vbo || !vbo->map.virtual)
- return 0;
-
- ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
- if (likely(ret == 0)) {
- vmw_bo_unmap(vbo);
- ttm_bo_unreserve(&vbo->tbo);
- }
-
- return ret;
-}
-
-
-/**
- * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
- *
- * @plane: cursor plane
- * @old_state: contains the state to clean up
- *
- * Unmaps all cursor bo mappings and unpins the cursor surface
- *
- * Returns 0 on success
- */
-void
-vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
- struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
-
- if (!vmw_user_object_is_null(&vps->uo))
- vmw_user_object_unmap(&vps->uo);
-
- vmw_du_cursor_plane_unmap_cm(vps);
- vmw_du_put_cursor_mob(vcp, vps);
-
- vmw_du_plane_unpin_surf(vps);
- vmw_user_object_unref(&vps->uo);
-}
-
-
-/**
- * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
- *
- * @plane: display plane
- * @new_state: info on the new plane state, including the FB
- *
- * Returns 0 on success
- */
-int
-vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
-{
- struct drm_framebuffer *fb = new_state->fb;
- struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
- struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
- struct vmw_bo *bo = NULL;
- int ret = 0;
-
- if (!vmw_user_object_is_null(&vps->uo)) {
- vmw_user_object_unmap(&vps->uo);
- vmw_user_object_unref(&vps->uo);
- }
-
- if (fb) {
- if (vmw_framebuffer_to_vfb(fb)->bo) {
- vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer;
- vps->uo.surface = NULL;
- } else {
- memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo));
- }
- vmw_user_object_ref(&vps->uo);
- }
-
- bo = vmw_user_object_buffer(&vps->uo);
- if (bo) {
- struct ttm_operation_ctx ctx = {false, false};
-
- ret = ttm_bo_reserve(&bo->tbo, true, false, NULL);
- if (ret != 0)
- return -ENOMEM;
-
- ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (ret != 0)
- return -ENOMEM;
-
- vmw_bo_pin_reserved(bo, true);
- if (vmw_framebuffer_to_vfb(fb)->bo) {
- const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
-
- (void)vmw_bo_map_and_cache_size(bo, size);
- } else {
- vmw_bo_map_and_cache(bo);
- }
- ttm_bo_unreserve(&bo->tbo);
- }
-
- if (!vmw_user_object_is_null(&vps->uo)) {
- vmw_du_get_cursor_mob(vcp, vps);
- vmw_du_cursor_plane_map_cm(vps);
- }
-
- return 0;
-}
-
-
-void
-vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
- struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
- plane);
- struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
- struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
- struct vmw_bo *old_bo = NULL;
- struct vmw_bo *new_bo = NULL;
- struct ww_acquire_ctx ctx;
- s32 hotspot_x, hotspot_y;
- int ret;
-
- hotspot_x = du->hotspot_x + new_state->hotspot_x;
- hotspot_y = du->hotspot_y + new_state->hotspot_y;
-
- du->cursor_surface = vmw_user_object_surface(&vps->uo);
-
- if (vmw_user_object_is_null(&vps->uo)) {
- vmw_cursor_update_position(dev_priv, false, 0, 0);
- return;
- }
-
- vps->cursor.hotspot_x = hotspot_x;
- vps->cursor.hotspot_y = hotspot_y;
-
- if (du->cursor_surface)
- du->cursor_age = du->cursor_surface->snooper.age;
-
- ww_acquire_init(&ctx, &reservation_ww_class);
-
- if (!vmw_user_object_is_null(&old_vps->uo)) {
- old_bo = vmw_user_object_buffer(&old_vps->uo);
- ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx);
- if (ret != 0)
- return;
- }
-
- if (!vmw_user_object_is_null(&vps->uo)) {
- new_bo = vmw_user_object_buffer(&vps->uo);
- if (old_bo != new_bo) {
- ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx);
- if (ret != 0) {
- if (old_bo) {
- ttm_bo_unreserve(&old_bo->tbo);
- ww_acquire_fini(&ctx);
- }
- return;
- }
- } else {
- new_bo = NULL;
- }
- }
- if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
- /*
- * If it hasn't changed, avoid making the device do extra
- * work by keeping the old cursor active.
- */
- struct vmw_cursor_plane_state tmp = old_vps->cursor;
- old_vps->cursor = vps->cursor;
- vps->cursor = tmp;
- } else {
- void *image = vmw_du_cursor_plane_acquire_image(vps);
- if (image)
- vmw_cursor_update_image(dev_priv, vps, image,
- new_state->crtc_w,
- new_state->crtc_h,
- hotspot_x, hotspot_y);
- }
-
- if (new_bo)
- ttm_bo_unreserve(&new_bo->tbo);
- if (old_bo)
- ttm_bo_unreserve(&old_bo->tbo);
-
- ww_acquire_fini(&ctx);
-
- du->cursor_x = new_state->crtc_x + du->set_gui_x;
- du->cursor_y = new_state->crtc_y + du->set_gui_y;
-
- vmw_cursor_update_position(dev_priv, true,
- du->cursor_x + hotspot_x,
- du->cursor_y + hotspot_y);
-
- du->core_hotspot_x = hotspot_x - du->hotspot_x;
- du->core_hotspot_y = hotspot_y - du->hotspot_y;
-}
-
-
-/**
* vmw_du_primary_plane_atomic_check - check if the new state is okay
*
* @plane: display plane
@@ -873,66 +131,6 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
return ret;
}
-
-/**
- * vmw_du_cursor_plane_atomic_check - check if the new state is okay
- *
- * @plane: cursor plane
- * @state: info on the new plane state
- *
- * This is a chance to fail if the new cursor state does not fit
- * our requirements.
- *
- * Returns 0 on success
- */
-int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
- int ret = 0;
- struct drm_crtc_state *crtc_state = NULL;
- struct vmw_surface *surface = NULL;
- struct drm_framebuffer *fb = new_state->fb;
-
- if (new_state->crtc)
- crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
- new_state->crtc);
-
- ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- true, true);
- if (ret)
- return ret;
-
- /* Turning off */
- if (!fb)
- return 0;
-
- /* A lot of the code assumes this */
- if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
- DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
- new_state->crtc_w, new_state->crtc_h);
- return -EINVAL;
- }
-
- if (!vmw_framebuffer_to_vfb(fb)->bo) {
- surface = vmw_user_object_surface(&vmw_framebuffer_to_vfbs(fb)->uo);
-
- WARN_ON(!surface);
-
- if (!surface ||
- (!surface->snooper.image && !surface->res.guest_memory_bo)) {
- DRM_ERROR("surface not suitable for cursor\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -1076,7 +274,7 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
vps->pinned = 0;
vps->cpp = 0;
- memset(&vps->cursor, 0, sizeof(vps->cursor));
+ vps->cursor.mob = NULL;
/* Each ref counted resource needs to be acquired again */
vmw_user_object_ref(&vps->uo);
@@ -1221,7 +419,20 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
+ struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
+ struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo);
+ if (bo) {
+ vmw_bo_dirty_release(bo);
+ /*
+ * bo->dirty is reference counted so it being NULL
+ * means that the surface wasn't coherent to begin
+ * with and so we have to free the dirty tracker
+ * in the vmw_resource
+ */
+ if (!bo->dirty && surf && surf->res.dirty)
+ surf->res.func->dirty_free(&surf->res);
+ }
drm_framebuffer_cleanup(framebuffer);
vmw_user_object_unref(&vfbs->uo);
@@ -1289,6 +500,7 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
struct vmw_user_object *uo,
struct vmw_framebuffer **out,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2
*mode_cmd)
@@ -1337,7 +549,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
goto out_err1;
}
- drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, info, mode_cmd);
memcpy(&vfbs->uo, uo, sizeof(vfbs->uo));
vmw_user_object_ref(&vfbs->uo);
@@ -1375,6 +587,7 @@ static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
struct vmw_framebuffer_bo *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
+ vmw_bo_dirty_release(vfbd->buffer);
drm_framebuffer_cleanup(framebuffer);
vmw_bo_unreference(&vfbd->buffer);
@@ -1390,6 +603,7 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
struct vmw_bo *bo,
struct vmw_framebuffer **out,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2
*mode_cmd)
@@ -1422,7 +636,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
}
vfbd->base.base.obj[0] = &bo->tbo.base;
- drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, info, mode_cmd);
vfbd->base.bo = true;
vfbd->buffer = vmw_bo_reference(bo);
*out = &vfbd->base;
@@ -1467,11 +681,13 @@ vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
* @dev_priv: Pointer to device private struct.
* @uo: Pointer to user object to wrap the kms framebuffer around.
* Either the buffer or surface inside the user object must be NULL.
+ * @info: pixel format information.
* @mode_cmd: Frame-buffer metadata.
*/
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
struct vmw_user_object *uo,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vmw_framebuffer *vfb = NULL;
@@ -1480,10 +696,10 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
/* Create the new framebuffer depending one what we have */
if (vmw_user_object_surface(uo)) {
ret = vmw_kms_new_framebuffer_surface(dev_priv, uo, &vfb,
- mode_cmd);
+ info, mode_cmd);
} else if (uo->buffer) {
ret = vmw_kms_new_framebuffer_bo(dev_priv, uo->buffer, &vfb,
- mode_cmd);
+ info, mode_cmd);
} else {
BUG();
}
@@ -1500,11 +716,14 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_framebuffer *vfb = NULL;
struct vmw_user_object uo = {0};
+ struct vmw_bo *bo;
+ struct vmw_surface *surface;
int ret;
/* returns either a bo or surface */
@@ -1527,13 +746,15 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
}
- vfb = vmw_kms_new_framebuffer(dev_priv, &uo, mode_cmd);
+ vfb = vmw_kms_new_framebuffer(dev_priv, &uo, info, mode_cmd);
if (IS_ERR(vfb)) {
ret = PTR_ERR(vfb);
goto err_out;
}
err_out:
+ bo = vmw_user_object_buffer(&uo);
+ surface = vmw_user_object_surface(&uo);
/* vmw_user_object_lookup takes one ref so does new_fb */
vmw_user_object_unref(&uo);
@@ -1542,6 +763,14 @@ err_out:
return ERR_PTR(ret);
}
+ ttm_bo_reserve(&bo->tbo, false, false, NULL);
+ ret = vmw_bo_dirty_add(bo);
+ if (!ret && surface && surface->res.func->dirty_alloc) {
+ surface->res.coherent = true;
+ ret = surface->res.func->dirty_alloc(&surface->res);
+ }
+ ttm_bo_unreserve(&bo->tbo);
+
return &vfb->base;
}
@@ -1974,44 +1203,6 @@ int vmw_kms_close(struct vmw_private *dev_priv)
return ret;
}
-int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_vmw_cursor_bypass_arg *arg = data;
- struct vmw_display_unit *du;
- struct drm_crtc *crtc;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
- if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- du = vmw_crtc_to_du(crtc);
- du->hotspot_x = arg->xhot;
- du->hotspot_y = arg->yhot;
- }
-
- mutex_unlock(&dev->mode_config.mutex);
- return 0;
- }
-
- crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
- if (!crtc) {
- ret = -ENOENT;
- goto out;
- }
-
- du = vmw_crtc_to_du(crtc);
-
- du->hotspot_x = arg->xhot;
- du->hotspot_y = arg->yhot;
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
-
- return ret;
-}
-
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
unsigned bpp, unsigned depth)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 4eab581883e2..445471fe9be6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -1,40 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
**************************************************************************/
#ifndef VMWGFX_KMS_H_
#define VMWGFX_KMS_H_
+#include "vmwgfx_cursor_plane.h"
+#include "vmwgfx_drv.h"
+
#include <drm/drm_encoder.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_probe_helper.h>
-#include "vmwgfx_drv.h"
-
/**
* struct vmw_du_update_plane - Closure structure for vmw_du_helper_plane_update
* @plane: Plane which is being updated.
@@ -235,16 +216,11 @@ static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
DRM_FORMAT_XRGB1555,
};
-static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
- DRM_FORMAT_ARGB8888,
-};
-
#define vmw_crtc_state_to_vcs(x) container_of(x, struct vmw_crtc_state, base)
#define vmw_plane_state_to_vps(x) container_of(x, struct vmw_plane_state, base)
#define vmw_connector_state_to_vcs(x) \
container_of(x, struct vmw_connector_state, base)
-#define vmw_plane_to_vcp(x) container_of(x, struct vmw_cursor_plane, base)
/**
* Derived class for crtc state object
@@ -255,11 +231,6 @@ struct vmw_crtc_state {
struct drm_crtc_state base;
};
-struct vmw_cursor_plane_state {
- struct vmw_bo *bo;
- s32 hotspot_x;
- s32 hotspot_y;
-};
/**
* Derived class for plane state object
@@ -283,7 +254,6 @@ struct vmw_plane_state {
/* For CPU Blit */
unsigned int cpp;
- bool surf_mapped;
struct vmw_cursor_plane_state cursor;
};
@@ -317,17 +287,6 @@ struct vmw_connector_state {
int gui_y;
};
-/**
- * Derived class for cursor plane object
- *
- * @base DRM plane object
- * @cursor.cursor_mobs Cursor mobs available for re-use
- */
-struct vmw_cursor_plane {
- struct drm_plane base;
-
- struct vmw_bo *cursor_mobs[3];
-};
/**
* Base class display unit.
@@ -343,17 +302,6 @@ struct vmw_display_unit {
struct drm_plane primary;
struct vmw_cursor_plane cursor;
- struct vmw_surface *cursor_surface;
- size_t cursor_age;
-
- int cursor_x;
- int cursor_y;
-
- int hotspot_x;
- int hotspot_y;
- s32 core_hotspot_x;
- s32 core_hotspot_y;
-
unsigned unit;
/*
@@ -403,8 +351,6 @@ struct vmw_display_unit {
*/
void vmw_du_init(struct vmw_display_unit *du);
void vmw_du_cleanup(struct vmw_display_unit *du);
-void vmw_du_crtc_save(struct drm_crtc *crtc);
-void vmw_du_crtc_restore(struct drm_crtc *crtc);
int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
u16 *r, u16 *g, u16 *b,
uint32_t size,
@@ -453,6 +399,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
struct vmw_user_object *uo,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
void vmw_guess_mode_timing(struct drm_display_mode *mode);
void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv);
@@ -460,19 +407,10 @@ void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv);
/* Universal Plane Helpers */
void vmw_du_primary_plane_destroy(struct drm_plane *plane);
-void vmw_du_cursor_plane_destroy(struct drm_plane *plane);
/* Atomic Helpers */
int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state);
-int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state);
-void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state);
-int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state);
-void vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state);
void vmw_du_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state);
void vmw_du_plane_reset(struct drm_plane *plane);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index f0b429525467..c23c9195f0dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -372,7 +372,7 @@ static const struct drm_plane_funcs vmw_ldu_plane_funcs = {
static const struct drm_plane_funcs vmw_ldu_cursor_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = vmw_du_cursor_plane_destroy,
+ .destroy = vmw_cursor_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
@@ -383,10 +383,10 @@ static const struct drm_plane_funcs vmw_ldu_cursor_funcs = {
*/
static const struct
drm_plane_helper_funcs vmw_ldu_cursor_plane_helper_funcs = {
- .atomic_check = vmw_du_cursor_plane_atomic_check,
- .atomic_update = vmw_du_cursor_plane_atomic_update,
- .prepare_fb = vmw_du_cursor_plane_prepare_fb,
- .cleanup_fb = vmw_du_cursor_plane_cleanup_fb,
+ .atomic_check = vmw_cursor_plane_atomic_check,
+ .atomic_update = vmw_cursor_plane_atomic_update,
+ .prepare_fb = vmw_cursor_plane_prepare_fb,
+ .cleanup_fb = vmw_cursor_plane_cleanup_fb,
};
static const struct
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 7055cbefc768..d8204d4265d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -282,8 +282,7 @@ out_no_setup:
}
vmw_bo_unpin_unlocked(&batch->otable_bo->tbo);
- ttm_bo_put(&batch->otable_bo->tbo);
- batch->otable_bo = NULL;
+ vmw_bo_unreference(&batch->otable_bo);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 74ff2812d66a..7de20e56082c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -1,27 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2019-2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2019-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
#include "vmwgfx_bo.h"
@@ -71,6 +52,11 @@ struct vmw_bo_dirty {
unsigned long bitmap[];
};
+bool vmw_bo_is_dirty(struct vmw_bo *vbo)
+{
+ return vbo->dirty && (vbo->dirty->start < vbo->dirty->end);
+}
+
/**
* vmw_bo_dirty_scan_pagetable - Perform a pagetable scan for dirty bits
* @vbo: The buffer object to scan
@@ -341,6 +327,41 @@ void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res)
dirty->end = res_start;
}
+void vmw_bo_dirty_clear(struct vmw_bo *vbo)
+{
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+ pgoff_t start, cur, end;
+ unsigned long res_start = 0;
+ unsigned long res_end = vbo->tbo.base.size;
+
+ WARN_ON_ONCE(res_start & ~PAGE_MASK);
+ res_start >>= PAGE_SHIFT;
+ res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
+
+ if (res_start >= dirty->end || res_end <= dirty->start)
+ return;
+
+ cur = max(res_start, dirty->start);
+ res_end = max(res_end, dirty->end);
+ while (cur < res_end) {
+ unsigned long num;
+
+ start = find_next_bit(&dirty->bitmap[0], res_end, cur);
+ if (start >= res_end)
+ break;
+
+ end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1);
+ cur = end + 1;
+ num = end - start;
+ bitmap_clear(&dirty->bitmap[0], start, num);
+ }
+
+ if (res_start <= dirty->start && res_end > dirty->start)
+ dirty->start = res_end;
+ if (res_start < dirty->end && res_end >= dirty->end)
+ dirty->end = res_start;
+}
+
/**
* vmw_bo_dirty_clear_res - Clear a resource's dirty region from
* its backing mob.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index a73af8a355fb..388011696941 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -273,7 +273,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
goto out_bad_resource;
res = converter->base_obj_to_res(base);
- kref_get(&res->kref);
+ vmw_resource_reference(res);
*p_res = res;
ret = 0;
@@ -347,7 +347,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
return 0;
}
- ret = vmw_gem_object_create(res->dev_priv, &bo_params, &gbo);
+ ret = vmw_bo_create(res->dev_priv, &bo_params, &gbo);
if (unlikely(ret != 0))
goto out_no_bo;
@@ -531,9 +531,9 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
}
INIT_LIST_HEAD(&val_list);
- ttm_bo_get(&res->guest_memory_bo->tbo);
val_buf->bo = &res->guest_memory_bo->tbo;
val_buf->num_shared = 0;
+ drm_gem_object_get(&val_buf->bo->base);
list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
@@ -557,7 +557,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
out_no_validate:
ttm_eu_backoff_reservation(ticket, &val_list);
out_no_reserve:
- ttm_bo_put(val_buf->bo);
+ drm_gem_object_put(&val_buf->bo->base);
val_buf->bo = NULL;
if (guest_memory_dirty)
vmw_user_bo_unref(&res->guest_memory_bo);
@@ -619,7 +619,7 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
INIT_LIST_HEAD(&val_list);
list_add_tail(&val_buf->head, &val_list);
ttm_eu_backoff_reservation(ticket, &val_list);
- ttm_bo_put(val_buf->bo);
+ drm_gem_object_put(&val_buf->bo->base);
val_buf->bo = NULL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 32029d80b72b..5f5f5a94301f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -445,7 +445,7 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
* resume the overlays, this is preferred to failing to alloc.
*/
vmw_overlay_pause_all(dev_priv);
- ret = vmw_gem_object_create(dev_priv, &bo_params, &vps->uo.buffer);
+ ret = vmw_bo_create(dev_priv, &bo_params, &vps->uo.buffer);
vmw_overlay_resume_all(dev_priv);
if (ret)
return ret;
@@ -764,7 +764,7 @@ static const struct drm_plane_funcs vmw_sou_plane_funcs = {
static const struct drm_plane_funcs vmw_sou_cursor_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = vmw_du_cursor_plane_destroy,
+ .destroy = vmw_cursor_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
@@ -775,10 +775,10 @@ static const struct drm_plane_funcs vmw_sou_cursor_funcs = {
*/
static const struct
drm_plane_helper_funcs vmw_sou_cursor_plane_helper_funcs = {
- .atomic_check = vmw_du_cursor_plane_atomic_check,
- .atomic_update = vmw_du_cursor_plane_atomic_update,
- .prepare_fb = vmw_du_cursor_plane_prepare_fb,
- .cleanup_fb = vmw_du_cursor_plane_cleanup_fb,
+ .atomic_check = vmw_cursor_plane_atomic_check,
+ .atomic_update = vmw_cursor_plane_atomic_update,
+ .prepare_fb = vmw_cursor_plane_prepare_fb,
+ .cleanup_fb = vmw_cursor_plane_cleanup_fb,
};
static const struct
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 7fb1c88bcc47..69dfe69ce0f8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -896,7 +896,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_device,
.size = size,
- .pin = true,
+ .pin = false,
.keep_resv = true,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index f5d2ed1b0a72..20aab725e53a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1482,7 +1482,7 @@ static const struct drm_plane_funcs vmw_stdu_plane_funcs = {
static const struct drm_plane_funcs vmw_stdu_cursor_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = vmw_du_cursor_plane_destroy,
+ .destroy = vmw_cursor_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
@@ -1494,10 +1494,10 @@ static const struct drm_plane_funcs vmw_stdu_cursor_funcs = {
*/
static const struct
drm_plane_helper_funcs vmw_stdu_cursor_plane_helper_funcs = {
- .atomic_check = vmw_du_cursor_plane_atomic_check,
- .atomic_update = vmw_du_cursor_plane_atomic_update,
- .prepare_fb = vmw_du_cursor_plane_prepare_fb,
- .cleanup_fb = vmw_du_cursor_plane_cleanup_fb,
+ .atomic_check = vmw_cursor_plane_atomic_check,
+ .atomic_update = vmw_cursor_plane_atomic_update,
+ .prepare_fb = vmw_cursor_plane_prepare_fb,
+ .cleanup_fb = vmw_cursor_plane_cleanup_fb,
};
static const struct
@@ -1584,6 +1584,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_plane_helper_add(&cursor->base, &vmw_stdu_cursor_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(&cursor->base);
ret = drm_connector_init(dev, connector, &vmw_stdu_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 5721c74da3e0..7e281c3c6bc5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1,32 +1,13 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
**************************************************************************/
#include "vmwgfx_bo.h"
+#include "vmwgfx_cursor_plane.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_so.h"
@@ -658,7 +639,7 @@ static void vmw_user_surface_free(struct vmw_resource *res)
struct vmw_user_surface *user_srf =
container_of(srf, struct vmw_user_surface, srf);
- WARN_ON_ONCE(res->dirty);
+ WARN_ON(res->dirty);
if (user_srf->master)
drm_master_put(&user_srf->master);
kfree(srf->offsets);
@@ -689,8 +670,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
* Dumb buffers own the resource and they'll unref the
* resource themselves
*/
- if (res && res->guest_memory_bo && res->guest_memory_bo->is_dumb)
- return;
+ WARN_ON(res && res->guest_memory_bo && res->guest_memory_bo->is_dumb);
vmw_resource_unreference(&res);
}
@@ -818,25 +798,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
}
res->guest_memory_size = cur_bo_offset;
- if (!file_priv->atomic &&
- metadata->scanout &&
- metadata->num_sizes == 1 &&
- metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
- metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT &&
- metadata->format == VMW_CURSOR_SNOOP_FORMAT) {
- const struct SVGA3dSurfaceDesc *desc =
- vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
- const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH *
- VMW_CURSOR_SNOOP_HEIGHT *
- desc->pitchBytesPerBlock;
- srf->snooper.image = kzalloc(cursor_size_bytes, GFP_KERNEL);
- if (!srf->snooper.image) {
- DRM_ERROR("Failed to allocate cursor_image\n");
- ret = -ENOMEM;
- goto out_no_copy;
- }
- } else {
- srf->snooper.image = NULL;
+
+ srf->snooper.image = vmw_cursor_snooper_create(file_priv, metadata);
+ if (IS_ERR(srf->snooper.image)) {
+ ret = PTR_ERR(srf->snooper.image);
+ goto out_no_copy;
}
if (drm_is_primary_client(file_priv))
@@ -864,14 +830,17 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
.pin = false
};
- ret = vmw_gem_object_create(dev_priv,
- &params,
- &res->guest_memory_bo);
+ ret = vmw_bo_create(dev_priv, &params, &res->guest_memory_bo);
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
}
- vmw_bo_add_detached_resource(res->guest_memory_bo, res);
}
tmp = vmw_resource_reference(&srf->res);
@@ -1670,6 +1639,14 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
}
+ if (res->guest_memory_bo) {
+ ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res);
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+ }
+
tmp = vmw_resource_reference(res);
ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime,
VMW_RES_SURFACE,
@@ -1684,7 +1661,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
rep->handle = user_srf->prime.base.handle;
rep->backup_size = res->guest_memory_size;
if (res->guest_memory_bo) {
- vmw_bo_add_detached_resource(res->guest_memory_bo, res);
rep->buffer_map_handle =
drm_vma_node_offset_addr(&res->guest_memory_bo->tbo.base.vma_node);
rep->buffer_size = res->guest_memory_bo->tbo.base.size;
@@ -2358,12 +2334,19 @@ int vmw_dumb_create(struct drm_file *file_priv,
vbo = res->guest_memory_bo;
vbo->is_dumb = true;
vbo->dumb_surface = vmw_res_to_srf(res);
-
+ drm_gem_object_put(&vbo->tbo.base);
+ /*
+ * Unset the user surface dtor since this in not actually exposed
+ * to userspace. The suface is owned via the dumb_buffer's GEM handle
+ */
+ struct vmw_user_surface *usurf = container_of(vbo->dumb_surface,
+ struct vmw_user_surface, srf);
+ usurf->prime.base.refcount_release = NULL;
err:
if (res)
vmw_resource_unreference(&res);
- if (ret)
- ttm_ref_object_base_unref(tfile, arg.rep.handle);
+
+ ttm_ref_object_base_unref(tfile, arg.rep.handle);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index e7625b3f71e0..7ee93e7191c7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -262,9 +262,8 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
bo_node->hash.key);
}
val_buf = &bo_node->base;
- val_buf->bo = ttm_bo_get_unless_zero(&vbo->tbo);
- if (!val_buf->bo)
- return -ESRCH;
+ vmw_bo_reference(vbo);
+ val_buf->bo = &vbo->tbo;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &ctx->bo_list);
}
@@ -656,7 +655,7 @@ void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
struct vmw_validation_res_node *val;
list_for_each_entry(entry, &ctx->bo_list, base.head) {
- ttm_bo_put(entry->base.bo);
+ drm_gem_object_put(&entry->base.bo->base);
entry->base.bo = NULL;
}