summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c130
1 files changed, 61 insertions, 69 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 48d1380a952e..94e8982f5616 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2015-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -25,10 +25,14 @@
*
**************************************************************************/
-#include <drm/ttm/ttm_bo_api.h>
-
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
+#include <drm/ttm/ttm_bo.h>
+
+#include <linux/dmapool.h>
+#include <linux/pci.h>
+
/*
* Size of inline command buffers. Try to make sure that a page size is a
* multiple of the DMA pool allocation size.
@@ -45,6 +49,7 @@
* @hw_submitted: List of command buffers submitted to hardware.
* @preempted: List of preempted command buffers.
* @num_hw_submitted: Number of buffers currently being processed by hardware
+ * @block_submission: Identifies a block command submission.
*/
struct vmw_cmdbuf_context {
struct list_head submitted;
@@ -55,7 +60,7 @@ struct vmw_cmdbuf_context {
};
/**
- * struct vmw_cmdbuf_man: - Command buffer manager
+ * struct vmw_cmdbuf_man - Command buffer manager
*
* @cur_mutex: Mutex protecting the command buffer used for incremental small
* kernel command submissions, @cur.
@@ -75,7 +80,6 @@ struct vmw_cmdbuf_context {
* frees are protected by @lock.
* @cmd_space: Buffer object for the command buffer space, unless we were
* able to make a contigous coherent DMA memory allocation, @handle. Immutable.
- * @map_obj: Mapping state for @cmd_space. Immutable.
* @map: Pointer to command buffer space. May be a mapped buffer object or
* a contigous coherent DMA memory allocation. Immutable.
* @cur: Command buffer for small kernel command submissions. Protected by
@@ -85,7 +89,7 @@ struct vmw_cmdbuf_context {
* @max_hw_submitted: Max number of in-flight command buffers the device can
* handle. Immutable.
* @lock: Spinlock protecting command submission queues.
- * @header: Pool of DMA memory for device command buffer headers.
+ * @headers: Pool of DMA memory for device command buffer headers.
* Internal protection.
* @dheaders: Pool of DMA memory for device command buffer headers with trailing
* space for inline data. Internal protection.
@@ -112,8 +116,7 @@ struct vmw_cmdbuf_man {
struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
struct list_head error;
struct drm_mm mm;
- struct ttm_buffer_object *cmd_space;
- struct ttm_bo_kmap_obj map_obj;
+ struct vmw_bo *cmd_space;
u8 *map;
struct vmw_cmdbuf_header *cur;
size_t cur_pos;
@@ -140,7 +143,7 @@ struct vmw_cmdbuf_man {
* @cb_context: The device command buffer context.
* @list: List head for attaching to the manager lists.
* @node: The range manager node.
- * @handle. The DMA address of @cb_header. Handed to the device on command
+ * @handle: The DMA address of @cb_header. Handed to the device on command
* buffer submission.
* @cmd: Pointer to the command buffer space of this buffer.
* @size: Size of the command buffer space of this buffer.
@@ -246,7 +249,7 @@ static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
* __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
* associated structures.
*
- * header: Pointer to the header to free.
+ * @header: Pointer to the header to free.
*
* For internal use. Must be called with man::lock held.
*/
@@ -291,7 +294,7 @@ void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
/**
- * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
+ * vmw_cmdbuf_header_submit: Submit a command buffer to hardware.
*
* @header: The header of the buffer to submit.
*/
@@ -354,18 +357,18 @@ static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
break;
}
- list_del(&entry->list);
- list_add_tail(&entry->list, &ctx->hw_submitted);
+ list_move_tail(&entry->list, &ctx->hw_submitted);
ctx->num_hw_submitted++;
}
}
/**
- * vmw_cmdbuf_ctx_submit: Process a command buffer context.
+ * vmw_cmdbuf_ctx_process - Process a command buffer context.
*
* @man: The command buffer manager.
* @ctx: The command buffer context.
+ * @notempty: Pass back count of non-empty command submitted lists.
*
* Submit command buffers to hardware if possible, and process finished
* buffers. Typically freeing them, but on preemption or error take
@@ -393,6 +396,7 @@ static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
__vmw_cmdbuf_header_free(entry);
break;
case SVGA_CB_STATUS_COMMAND_ERROR:
+ WARN_ONCE(true, "Command buffer error.\n");
entry->cb_header->status = SVGA_CB_STATUS_NONE;
list_add_tail(&entry->list, &man->error);
schedule_work(&man->work);
@@ -510,18 +514,15 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
struct vmw_cmdbuf_man *man =
container_of(work, struct vmw_cmdbuf_man, work);
struct vmw_cmdbuf_header *entry, *next;
- uint32_t dummy;
- bool restart[SVGA_CB_CONTEXT_MAX];
+ uint32_t dummy = 0;
bool send_fence = false;
struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
int i;
struct vmw_cmdbuf_context *ctx;
bool global_block = false;
- for_each_cmdbuf_ctx(man, i, ctx) {
+ for_each_cmdbuf_ctx(man, i, ctx)
INIT_LIST_HEAD(&restart_head[i]);
- restart[i] = false;
- }
mutex_lock(&man->error_mutex);
spin_lock(&man->lock);
@@ -533,23 +534,23 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
const char *cmd_name;
list_del_init(&entry->list);
- restart[entry->cb_context] = true;
global_block = true;
if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
- DRM_ERROR("Unknown command causing device error.\n");
- DRM_ERROR("Command buffer offset is %lu\n",
- (unsigned long) cb_hdr->errorOffset);
+ VMW_DEBUG_USER("Unknown command causing device error.\n");
+ VMW_DEBUG_USER("Command buffer offset is %lu\n",
+ (unsigned long) cb_hdr->errorOffset);
__vmw_cmdbuf_header_free(entry);
send_fence = true;
continue;
}
- DRM_ERROR("Command \"%s\" causing device error.\n", cmd_name);
- DRM_ERROR("Command buffer offset is %lu\n",
- (unsigned long) cb_hdr->errorOffset);
- DRM_ERROR("Command size is %lu\n",
- (unsigned long) error_cmd_size);
+ VMW_DEBUG_USER("Command \"%s\" causing device error.\n",
+ cmd_name);
+ VMW_DEBUG_USER("Command buffer offset is %lu\n",
+ (unsigned long) cb_hdr->errorOffset);
+ VMW_DEBUG_USER("Command size is %lu\n",
+ (unsigned long) error_cmd_size);
new_start_offset = cb_hdr->errorOffset + error_cmd_size;
@@ -609,7 +610,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
/* Send a new fence in case one was removed */
if (send_fence) {
- vmw_fifo_send_fence(man->dev_priv, &dummy);
+ vmw_cmd_send_fence(man->dev_priv, &dummy);
wake_up_all(&man->idle_queue);
}
@@ -617,7 +618,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
}
/**
- * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
+ * vmw_cmdbuf_man_idle - Check whether the command buffer manager is idle.
*
* @man: The command buffer manager.
* @check_preempted: Check also the preempted queue for pending command buffers.
@@ -765,7 +766,7 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
if (info->done)
return true;
-
+
memset(info->node, 0, sizeof(*info->node));
spin_lock(&man->lock);
ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
@@ -799,7 +800,7 @@ static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
{
struct vmw_cmdbuf_alloc_info info;
- info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ info.page_size = PFN_UP(size);
info.node = node;
info.done = false;
@@ -886,7 +887,7 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
header->cmd = man->map + offset;
if (man->using_mob) {
cb_hdr->flags = SVGA_CB_FLAG_MOB;
- cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
+ cb_hdr->ptr.mob.mobid = man->cmd_space->tbo.resource->start;
cb_hdr->ptr.mob.mobOffset = offset;
} else {
cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
@@ -1160,6 +1161,7 @@ static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
* context.
*
* @man: The command buffer manager.
+ * @context: Device context to pass command through.
*
* Synchronously sends a preempt command.
*/
@@ -1183,6 +1185,7 @@ static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
* context.
*
* @man: The command buffer manager.
+ * @context: Device context to start/stop.
* @enable: Whether to enable or disable the context.
*
* Synchronously sends a device start / stop context command.
@@ -1207,21 +1210,16 @@ static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
*
* @man: The command buffer manager.
* @size: The size of the main space pool.
- * @default_size: The default size of the command buffer for small kernel
- * submissions.
*
- * Set the size and allocate the main command buffer space pool,
- * as well as the default size of the command buffer for
- * small kernel submissions. If successful, this enables large command
- * submissions. Note that this function requires that rudimentary command
+ * Set the size and allocate the main command buffer space pool.
+ * If successful, this enables large command submissions.
+ * Note that this function requires that rudimentary command
* submission is already available and that the MOB memory manager is alive.
* Returns 0 on success. Negative error code on failure.
*/
-int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
- size_t size, size_t default_size)
+int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
{
struct vmw_private *dev_priv = man->dev_priv;
- bool dummy;
int ret;
if (man->has_pool)
@@ -1229,33 +1227,34 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
/* First, try to allocate a huge chunk of DMA memory */
size = PAGE_ALIGN(size);
- man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
+ man->map = dma_alloc_coherent(dev_priv->drm.dev, size,
&man->handle, GFP_KERNEL);
if (man->map) {
man->using_mob = false;
} else {
+ struct vmw_bo_params bo_params = {
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
+ .bo_type = ttm_bo_type_kernel,
+ .size = size,
+ .pin = true
+ };
/*
* DMA memory failed. If we can have command buffers in a
* MOB, try to use that instead. Note that this will
* actually call into the already enabled manager, when
* binding the MOB.
*/
- if (!(dev_priv->capabilities & SVGA_CAP_DX))
+ if (!(dev_priv->capabilities & SVGA_CAP_DX) ||
+ !dev_priv->has_mob)
return -ENOMEM;
- ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
- &vmw_mob_ne_placement, 0, false,
- &man->cmd_space);
+ ret = vmw_bo_create(dev_priv, &bo_params, &man->cmd_space);
if (ret)
return ret;
- man->using_mob = true;
- ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
- &man->map_obj);
- if (ret)
- goto out_no_map;
-
- man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
+ man->map = vmw_bo_map_and_cache(man->cmd_space);
+ man->using_mob = man->map;
}
man->size = size;
@@ -1270,16 +1269,11 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
* submissions to be able to free up space.
*/
man->default_size = VMW_CMDBUF_INLINE_SIZE;
- DRM_INFO("Using command buffers with %s pool.\n",
+ drm_info(&dev_priv->drm,
+ "Using command buffers with %s pool.\n",
(man->using_mob) ? "MOB" : "DMA");
return 0;
-
-out_no_map:
- if (man->using_mob)
- ttm_bo_unref(&man->cmd_space);
-
- return ret;
}
/**
@@ -1309,7 +1303,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
2 : 1;
man->headers = dma_pool_create("vmwgfx cmdbuf",
- &dev_priv->dev->pdev->dev,
+ dev_priv->drm.dev,
sizeof(SVGACBHeader),
64, PAGE_SIZE);
if (!man->headers) {
@@ -1318,7 +1312,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
}
man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
- &dev_priv->dev->pdev->dev,
+ dev_priv->drm.dev,
sizeof(struct vmw_cmdbuf_dheader),
64, PAGE_SIZE);
if (!man->dheaders) {
@@ -1378,13 +1372,11 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
man->has_pool = false;
man->default_size = VMW_CMDBUF_INLINE_SIZE;
(void) vmw_cmdbuf_idle(man, false, 10*HZ);
- if (man->using_mob) {
- (void) ttm_bo_kunmap(&man->map_obj);
- ttm_bo_unref(&man->cmd_space);
- } else {
- dma_free_coherent(&man->dev_priv->dev->pdev->dev,
+ if (man->using_mob)
+ vmw_bo_unreference(&man->cmd_space);
+ else
+ dma_free_coherent(man->dev_priv->drm.dev,
man->size, man->map, man->handle);
- }
}
/**