summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c194
1 files changed, 104 insertions, 90 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 44f3f6f107d3..98331c4c0335 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2014-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -30,12 +30,14 @@
* whenever the backing MOB is evicted.
*/
-#include <drm/ttm/ttm_placement.h>
-
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
+#include "vmwgfx_mksstat.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_so.h"
+#include <drm/ttm/ttm_placement.h>
+
/**
* struct vmw_cotable - Context Object Table resource
*
@@ -63,6 +65,7 @@ struct vmw_cotable {
* @min_initial_entries: Min number of initial intries at cotable allocation
* for this cotable type.
* @size: Size of each entry.
+ * @unbind_func: Unbind call-back function.
*/
struct vmw_cotable_info {
u32 min_initial_entries;
@@ -71,18 +74,31 @@ struct vmw_cotable_info {
bool);
};
+
+/*
+ * Getting the initial size right is difficult because it all depends
+ * on what the userspace is doing. The sizes will be aligned up to
+ * a PAGE_SIZE so we just want to make sure that for majority of apps
+ * the initial number of entries doesn't require an immediate resize.
+ * For all cotables except SVGACOTableDXElementLayoutEntry and
+ * SVGACOTableDXBlendStateEntry the initial number of entries fits
+ * within the PAGE_SIZE. For SVGACOTableDXElementLayoutEntry and
+ * SVGACOTableDXBlendStateEntry we want to reserve two pages,
+ * because that's what all apps will require initially.
+ */
static const struct vmw_cotable_info co_info[] = {
{1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
{1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
{1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
- {1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
- {1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
+ {PAGE_SIZE/sizeof(SVGACOTableDXElementLayoutEntry) + 1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
+ {PAGE_SIZE/sizeof(SVGACOTableDXBlendStateEntry) + 1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
{1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
{1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
{1, sizeof(SVGACOTableDXSamplerEntry), NULL},
- {1, sizeof(SVGACOTableDXStreamOutputEntry), NULL},
+ {1, sizeof(SVGACOTableDXStreamOutputEntry), &vmw_dx_streamoutput_cotable_list_scrub},
{1, sizeof(SVGACOTableDXQueryEntry), NULL},
- {1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub}
+ {1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub},
+ {1, sizeof(SVGACOTableDXUAViewEntry), &vmw_view_cotable_list_destroy}
};
/*
@@ -102,6 +118,7 @@ const SVGACOTableType vmw_cotable_scrub_order[] = {
SVGA_COTABLE_SAMPLER,
SVGA_COTABLE_STREAMOUTPUT,
SVGA_COTABLE_DXQUERY,
+ SVGA_COTABLE_UAVIEW,
};
static int vmw_cotable_bind(struct vmw_resource *res,
@@ -114,10 +131,13 @@ static int vmw_cotable_destroy(struct vmw_resource *res);
static const struct vmw_res_func vmw_cotable_func = {
.res_type = vmw_res_cotable,
- .needs_backup = true,
+ .needs_guest_memory = true,
.may_evict = true,
+ .prio = 3,
+ .dirty_prio = 3,
.type_name = "context guest backed object tables",
- .backup_placement = &vmw_mob_placement,
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_cotable_create,
.destroy = vmw_cotable_destroy,
.bind = vmw_cotable_bind,
@@ -162,32 +182,29 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
{
struct vmw_cotable *vcotbl = vmw_cotable(res);
struct vmw_private *dev_priv = res->dev_priv;
- struct ttm_buffer_object *bo = &res->backup->base;
+ struct ttm_buffer_object *bo = &res->guest_memory_bo->tbo;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetCOTable body;
} *cmd;
- WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
- lockdep_assert_held(&bo->resv->lock.base);
+ WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
+ dma_resv_assert_held(bo->base.resv);
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), SVGA3D_INVALID_ID);
- if (!cmd) {
- DRM_ERROR("Failed reserving FIFO space for cotable "
- "binding.\n");
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
+ if (!cmd)
return -ENOMEM;
- }
WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
- WARN_ON(bo->mem.mem_type != VMW_PL_MOB);
+ WARN_ON(bo->resource->mem_type != VMW_PL_MOB);
cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = vcotbl->ctx->id;
cmd->body.type = vcotbl->type;
- cmd->body.mobid = bo->mem.start;
+ cmd->body.mobid = bo->resource->start;
cmd->body.validSizeInBytes = vcotbl->size_read_back;
- vmw_fifo_commit_flush(dev_priv, sizeof(*cmd));
+ vmw_cmd_commit_flush(dev_priv, sizeof(*cmd));
vcotbl->scrubbed = false;
return 0;
@@ -213,7 +230,7 @@ static int vmw_cotable_bind(struct vmw_resource *res,
* take the opportunity to correct the value here so that it's not
* misused in the future.
*/
- val_buf->bo = &res->backup->base;
+ val_buf->bo = &res->guest_memory_bo->tbo;
return vmw_cotable_unscrub(res);
}
@@ -262,12 +279,9 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
if (readback)
submit_size += sizeof(*cmd0);
- cmd1 = vmw_fifo_reserve_dx(dev_priv, submit_size, SVGA3D_INVALID_ID);
- if (!cmd1) {
- DRM_ERROR("Failed reserving FIFO space for cotable "
- "unbinding.\n");
+ cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
+ if (!cmd1)
return -ENOMEM;
- }
vcotbl->size_read_back = 0;
if (readback) {
@@ -277,7 +291,7 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
cmd0->body.cid = vcotbl->ctx->id;
cmd0->body.type = vcotbl->type;
cmd1 = (void *) &cmd0[1];
- vcotbl->size_read_back = res->backup_size;
+ vcotbl->size_read_back = res->guest_memory_size;
}
cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
cmd1->header.size = sizeof(cmd1->body);
@@ -285,7 +299,7 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
cmd1->body.type = vcotbl->type;
cmd1->body.mobid = SVGA3D_INVALID_ID;
cmd1->body.validSizeInBytes = 0;
- vmw_fifo_commit_flush(dev_priv, submit_size);
+ vmw_cmd_commit_flush(dev_priv, submit_size);
vcotbl->scrubbed = true;
/* Trigger a create() on next validate. */
@@ -299,7 +313,7 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
*
* @res: Pointer to the cotable resource.
* @readback: Whether to read back cotable data to the backup buffer.
- * val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
+ * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
* for convenience / fencing.
*
* Unbinds the cotable from the device and fences the backup buffer.
@@ -313,11 +327,11 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
struct ttm_buffer_object *bo = val_buf->bo;
struct vmw_fence_obj *fence;
- if (list_empty(&res->mob_head))
+ if (!vmw_resource_mob_attached(res))
return 0;
- WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
- lockdep_assert_held(&bo->resv->lock.base);
+ WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
+ dma_resv_assert_held(bo->base.resv);
mutex_lock(&dev_priv->binding_mutex);
if (!vcotbl->scrubbed)
@@ -351,23 +365,20 @@ static int vmw_cotable_readback(struct vmw_resource *res)
struct vmw_fence_obj *fence;
if (!vcotbl->scrubbed) {
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
- SVGA3D_INVALID_ID);
- if (!cmd) {
- DRM_ERROR("Failed reserving FIFO space for cotable "
- "readback.\n");
+ cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
+ if (!cmd)
return -ENOMEM;
- }
+
cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = vcotbl->ctx->id;
cmd->body.type = vcotbl->type;
- vcotbl->size_read_back = res->backup_size;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vcotbl->size_read_back = res->guest_memory_size;
+ vmw_cmd_commit(dev_priv, sizeof(*cmd));
}
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- vmw_bo_fence_single(&res->backup->base, fence);
+ vmw_bo_fence_single(&res->guest_memory_bo->tbo, fence);
vmw_fence_obj_unreference(&fence);
return 0;
@@ -390,18 +401,28 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
struct ttm_operation_ctx ctx = { false, false };
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_cotable *vcotbl = vmw_cotable(res);
- struct vmw_buffer_object *buf, *old_buf = res->backup;
- struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
- size_t old_size = res->backup_size;
+ struct vmw_bo *buf, *old_buf = res->guest_memory_bo;
+ struct ttm_buffer_object *bo, *old_bo = &res->guest_memory_bo->tbo;
+ size_t old_size = res->guest_memory_size;
size_t old_size_read_back = vcotbl->size_read_back;
size_t cur_size_read_back;
struct ttm_bo_kmap_obj old_map, new_map;
int ret;
size_t i;
+ struct vmw_bo_params bo_params = {
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
+ .bo_type = ttm_bo_type_device,
+ .size = new_size,
+ .pin = true
+ };
+
+ MKS_STAT_TIME_DECL(MKSSTAT_KERN_COTABLE_RESIZE);
+ MKS_STAT_TIME_PUSH(MKSSTAT_KERN_COTABLE_RESIZE);
ret = vmw_cotable_readback(res);
if (ret)
- return ret;
+ goto out_done;
cur_size_read_back = vcotbl->size_read_back;
vcotbl->size_read_back = old_size_read_back;
@@ -411,18 +432,13 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* for the new COTable. Initially pin the buffer object to make sure
* we can use tryreserve without failure.
*/
- buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
- true, vmw_bo_bo_free);
+ ret = vmw_bo_create(dev_priv, &bo_params, &buf);
if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n");
- return ret;
+ goto out_done;
}
- bo = &buf->base;
+ bo = &buf->tbo;
WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL));
ret = ttm_bo_wait(old_bo, false, false);
@@ -435,7 +451,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* Do a page by page copy of COTables. This eliminates slow vmap()s.
* This should really be a TTM utility.
*/
- for (i = 0; i < old_bo->num_pages; ++i) {
+ for (i = 0; i < PFN_UP(old_bo->resource->size); ++i) {
bool dummy;
ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
@@ -456,14 +472,18 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
}
/* Unpin new buffer, and switch backup buffers. */
- ret = ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
+ vmw_bo_placement_set(buf,
+ VMW_BO_DOMAIN_MOB,
+ VMW_BO_DOMAIN_MOB);
+ ret = ttm_bo_validate(bo, &buf->placement, &ctx);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed validating new COTable backup buffer.\n");
goto out_wait;
}
- res->backup = buf;
- res->backup_size = new_size;
+ vmw_resource_mob_detach(res);
+ res->guest_memory_bo = buf;
+ res->guest_memory_size = new_size;
vcotbl->size_read_back = cur_size_read_back;
/*
@@ -473,25 +493,38 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
ret = vmw_cotable_unscrub(res);
if (ret) {
DRM_ERROR("Failed switching COTable backup buffer.\n");
- res->backup = old_buf;
- res->backup_size = old_size;
+ res->guest_memory_bo = old_buf;
+ res->guest_memory_size = old_size;
vcotbl->size_read_back = old_size_read_back;
+ vmw_resource_mob_attach(res);
goto out_wait;
}
+ vmw_resource_mob_attach(res);
/* Let go of the old mob. */
- list_del(&res->mob_head);
- list_add_tail(&res->mob_head, &buf->res_list);
- vmw_bo_unreference(&old_buf);
+ vmw_user_bo_unref(&old_buf);
res->id = vcotbl->type;
+ ret = dma_resv_reserve_fences(bo->base.resv, 1);
+ if (unlikely(ret))
+ goto out_wait;
+
+ /* Release the pin acquired in vmw_bo_create */
+ ttm_bo_unpin(bo);
+
+ MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE);
+
return 0;
out_map_new:
ttm_bo_kunmap(&old_map);
out_wait:
+ ttm_bo_unpin(bo);
ttm_bo_unreserve(bo);
- vmw_bo_unreference(&buf);
+ vmw_user_bo_unref(&buf);
+
+out_done:
+ MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE);
return ret;
}
@@ -505,13 +538,13 @@ out_wait:
* is called before bind() in the validation sequence is instead used for two
* things.
* 1) Unscrub the cotable if it is scrubbed and still attached to a backup
- * buffer, that is, if @res->mob_head is non-empty.
+ * buffer.
* 2) Resize the cotable if needed.
*/
static int vmw_cotable_create(struct vmw_resource *res)
{
struct vmw_cotable *vcotbl = vmw_cotable(res);
- size_t new_size = res->backup_size;
+ size_t new_size = res->guest_memory_size;
size_t needed_size;
int ret;
@@ -520,8 +553,8 @@ static int vmw_cotable_create(struct vmw_resource *res)
while (needed_size > new_size)
new_size *= 2;
- if (likely(new_size <= res->backup_size)) {
- if (vcotbl->scrubbed && !list_empty(&res->mob_head)) {
+ if (likely(new_size <= res->guest_memory_size)) {
+ if (vcotbl->scrubbed && vmw_resource_mob_attached(res)) {
ret = vmw_cotable_unscrub(res);
if (ret)
return ret;
@@ -545,8 +578,6 @@ static void vmw_hw_cotable_destroy(struct vmw_resource *res)
(void) vmw_cotable_destroy(res);
}
-static size_t cotable_acc_size;
-
/**
* vmw_cotable_free - Cotable resource destructor
*
@@ -554,10 +585,7 @@ static size_t cotable_acc_size;
*/
static void vmw_cotable_free(struct vmw_resource *res)
{
- struct vmw_private *dev_priv = res->dev_priv;
-
kfree(res);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
}
/**
@@ -573,21 +601,9 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
u32 type)
{
struct vmw_cotable *vcotbl;
- struct ttm_operation_ctx ttm_opt_ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
int ret;
u32 num_entries;
- if (unlikely(cotable_acc_size == 0))
- cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- cotable_acc_size, &ttm_opt_ctx);
- if (unlikely(ret))
- return ERR_PTR(ret);
-
vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
if (unlikely(!vcotbl)) {
ret = -ENOMEM;
@@ -601,13 +617,12 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
INIT_LIST_HEAD(&vcotbl->resource_list);
vcotbl->res.id = type;
- vcotbl->res.backup_size = PAGE_SIZE;
+ vcotbl->res.guest_memory_size = PAGE_SIZE;
num_entries = PAGE_SIZE / co_info[type].size;
if (num_entries < co_info[type].min_initial_entries) {
- vcotbl->res.backup_size = co_info[type].min_initial_entries *
+ vcotbl->res.guest_memory_size = co_info[type].min_initial_entries *
co_info[type].size;
- vcotbl->res.backup_size =
- (vcotbl->res.backup_size + PAGE_SIZE - 1) & PAGE_MASK;
+ vcotbl->res.guest_memory_size = PFN_ALIGN(vcotbl->res.guest_memory_size);
}
vcotbl->scrubbed = true;
@@ -622,7 +637,6 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
out_no_init:
kfree(vcotbl);
out_no_alloc:
- ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
return ERR_PTR(ret);
}
@@ -652,7 +666,7 @@ int vmw_cotable_notify(struct vmw_resource *res, int id)
}
/**
- * vmw_cotable_add_view - add a view to the cotable's list of active views.
+ * vmw_cotable_add_resource - add a view to the cotable's list of active views.
*
* @res: pointer struct vmw_resource representing the cotable.
* @head: pointer to the struct list_head member of the resource, dedicated