diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c')
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c | 339 |
1 files changed, 219 insertions, 120 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index 86178796de6c..94e8982f5616 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c @@ -1,7 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. + * Copyright 2015-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -25,10 +25,14 @@ * **************************************************************************/ -#include <drm/ttm/ttm_bo_api.h> - +#include "vmwgfx_bo.h" #include "vmwgfx_drv.h" +#include <drm/ttm/ttm_bo.h> + +#include <linux/dmapool.h> +#include <linux/pci.h> + /* * Size of inline command buffers. Try to make sure that a page size is a * multiple of the DMA pool allocation size. @@ -45,21 +49,26 @@ * @hw_submitted: List of command buffers submitted to hardware. * @preempted: List of preempted command buffers. * @num_hw_submitted: Number of buffers currently being processed by hardware + * @block_submission: Identifies a block command submission. */ struct vmw_cmdbuf_context { struct list_head submitted; struct list_head hw_submitted; struct list_head preempted; unsigned num_hw_submitted; + bool block_submission; }; /** - * struct vmw_cmdbuf_man: - Command buffer manager + * struct vmw_cmdbuf_man - Command buffer manager * * @cur_mutex: Mutex protecting the command buffer used for incremental small * kernel command submissions, @cur. * @space_mutex: Mutex to protect against starvation when we allocate * main pool buffer space. + * @error_mutex: Mutex to serialize the work queue error handling. + * Note this is not needed if the same workqueue handler + * can't race with itself... * @work: A struct work_struct implementeing command buffer error handling. * Immutable. * @dev_priv: Pointer to the device private struct. Immutable. @@ -71,7 +80,6 @@ struct vmw_cmdbuf_context { * frees are protected by @lock. * @cmd_space: Buffer object for the command buffer space, unless we were * able to make a contigous coherent DMA memory allocation, @handle. Immutable. - * @map_obj: Mapping state for @cmd_space. Immutable. * @map: Pointer to command buffer space. May be a mapped buffer object or * a contigous coherent DMA memory allocation. Immutable. * @cur: Command buffer for small kernel command submissions. Protected by @@ -81,11 +89,10 @@ struct vmw_cmdbuf_context { * @max_hw_submitted: Max number of in-flight command buffers the device can * handle. Immutable. * @lock: Spinlock protecting command submission queues. - * @header: Pool of DMA memory for device command buffer headers. + * @headers: Pool of DMA memory for device command buffer headers. * Internal protection. * @dheaders: Pool of DMA memory for device command buffer headers with trailing * space for inline data. Internal protection. - * @tasklet: Tasklet struct for irq processing. Immutable. * @alloc_queue: Wait queue for processes waiting to allocate command buffer * space. * @idle_queue: Wait queue for processes waiting for command buffer idle. @@ -98,17 +105,18 @@ struct vmw_cmdbuf_context { * @handle: DMA address handle for the command buffer space if @using_mob is * false. Immutable. * @size: The size of the command buffer space. Immutable. + * @num_contexts: Number of contexts actually enabled. */ struct vmw_cmdbuf_man { struct mutex cur_mutex; struct mutex space_mutex; + struct mutex error_mutex; struct work_struct work; struct vmw_private *dev_priv; struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX]; struct list_head error; struct drm_mm mm; - struct ttm_buffer_object *cmd_space; - struct ttm_bo_kmap_obj map_obj; + struct vmw_bo *cmd_space; u8 *map; struct vmw_cmdbuf_header *cur; size_t cur_pos; @@ -117,7 +125,6 @@ struct vmw_cmdbuf_man { spinlock_t lock; struct dma_pool *headers; struct dma_pool *dheaders; - struct tasklet_struct tasklet; wait_queue_head_t alloc_queue; wait_queue_head_t idle_queue; bool irq_on; @@ -125,6 +132,7 @@ struct vmw_cmdbuf_man { bool has_pool; dma_addr_t handle; size_t size; + u32 num_contexts; }; /** @@ -135,7 +143,7 @@ struct vmw_cmdbuf_man { * @cb_context: The device command buffer context. * @list: List head for attaching to the manager lists. * @node: The range manager node. - * @handle. The DMA address of @cb_header. Handed to the device on command + * @handle: The DMA address of @cb_header. Handed to the device on command * buffer submission. * @cmd: Pointer to the command buffer space of this buffer. * @size: Size of the command buffer space of this buffer. @@ -181,12 +189,13 @@ struct vmw_cmdbuf_alloc_info { }; /* Loop over each context in the command buffer manager. */ -#define for_each_cmdbuf_ctx(_man, _i, _ctx) \ - for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \ +#define for_each_cmdbuf_ctx(_man, _i, _ctx) \ + for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \ ++(_i), ++(_ctx)) -static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable); - +static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context, + bool enable); +static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context); /** * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex. @@ -240,7 +249,7 @@ static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header) * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its * associated structures. * - * header: Pointer to the header to free. + * @header: Pointer to the header to free. * * For internal use. Must be called with man::lock held. */ @@ -278,14 +287,14 @@ void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header) vmw_cmdbuf_header_inline_free(header); return; } - spin_lock_bh(&man->lock); + spin_lock(&man->lock); __vmw_cmdbuf_header_free(header); - spin_unlock_bh(&man->lock); + spin_unlock(&man->lock); } /** - * vmw_cmbuf_header_submit: Submit a command buffer to hardware. + * vmw_cmdbuf_header_submit: Submit a command buffer to hardware. * * @header: The header of the buffer to submit. */ @@ -331,7 +340,8 @@ static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man, struct vmw_cmdbuf_context *ctx) { while (ctx->num_hw_submitted < man->max_hw_submitted && - !list_empty(&ctx->submitted)) { + !list_empty(&ctx->submitted) && + !ctx->block_submission) { struct vmw_cmdbuf_header *entry; SVGACBStatus status; @@ -347,18 +357,18 @@ static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man, break; } - list_del(&entry->list); - list_add_tail(&entry->list, &ctx->hw_submitted); + list_move_tail(&entry->list, &ctx->hw_submitted); ctx->num_hw_submitted++; } } /** - * vmw_cmdbuf_ctx_submit: Process a command buffer context. + * vmw_cmdbuf_ctx_process - Process a command buffer context. * * @man: The command buffer manager. * @ctx: The command buffer context. + * @notempty: Pass back count of non-empty command submitted lists. * * Submit command buffers to hardware if possible, and process finished * buffers. Typically freeing them, but on preemption or error take @@ -386,12 +396,18 @@ static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man, __vmw_cmdbuf_header_free(entry); break; case SVGA_CB_STATUS_COMMAND_ERROR: - case SVGA_CB_STATUS_CB_HEADER_ERROR: + WARN_ONCE(true, "Command buffer error.\n"); + entry->cb_header->status = SVGA_CB_STATUS_NONE; list_add_tail(&entry->list, &man->error); schedule_work(&man->work); break; case SVGA_CB_STATUS_PREEMPTED: - list_add(&entry->list, &ctx->preempted); + entry->cb_header->status = SVGA_CB_STATUS_NONE; + list_add_tail(&entry->list, &ctx->preempted); + break; + case SVGA_CB_STATUS_CB_HEADER_ERROR: + WARN_ONCE(true, "Command buffer header error.\n"); + __vmw_cmdbuf_header_free(entry); break; default: WARN_ONCE(true, "Undefined command buffer status.\n"); @@ -468,20 +484,17 @@ static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man, } /** - * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt - * handler implemented as a tasklet. + * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt + * handler implemented as a threaded irq task. * - * @data: Tasklet closure. A pointer to the command buffer manager cast to - * an unsigned long. + * @man: Pointer to the command buffer manager. * - * The bottom half (tasklet) of the interrupt handler simply calls into the + * The bottom half of the interrupt handler simply calls into the * command buffer processor to free finished buffers and submit any * queued buffers to hardware. */ -static void vmw_cmdbuf_man_tasklet(unsigned long data) +void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man) { - struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data; - spin_lock(&man->lock); vmw_cmdbuf_man_process(man); spin_unlock(&man->lock); @@ -501,29 +514,111 @@ static void vmw_cmdbuf_work_func(struct work_struct *work) struct vmw_cmdbuf_man *man = container_of(work, struct vmw_cmdbuf_man, work); struct vmw_cmdbuf_header *entry, *next; - uint32_t dummy; - bool restart = false; + uint32_t dummy = 0; + bool send_fence = false; + struct list_head restart_head[SVGA_CB_CONTEXT_MAX]; + int i; + struct vmw_cmdbuf_context *ctx; + bool global_block = false; - spin_lock_bh(&man->lock); + for_each_cmdbuf_ctx(man, i, ctx) + INIT_LIST_HEAD(&restart_head[i]); + + mutex_lock(&man->error_mutex); + spin_lock(&man->lock); list_for_each_entry_safe(entry, next, &man->error, list) { - restart = true; - DRM_ERROR("Command buffer error.\n"); + SVGACBHeader *cb_hdr = entry->cb_header; + SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) + (entry->cmd + cb_hdr->errorOffset); + u32 error_cmd_size, new_start_offset; + const char *cmd_name; + + list_del_init(&entry->list); + global_block = true; + + if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) { + VMW_DEBUG_USER("Unknown command causing device error.\n"); + VMW_DEBUG_USER("Command buffer offset is %lu\n", + (unsigned long) cb_hdr->errorOffset); + __vmw_cmdbuf_header_free(entry); + send_fence = true; + continue; + } - list_del(&entry->list); - __vmw_cmdbuf_header_free(entry); - wake_up_all(&man->idle_queue); + VMW_DEBUG_USER("Command \"%s\" causing device error.\n", + cmd_name); + VMW_DEBUG_USER("Command buffer offset is %lu\n", + (unsigned long) cb_hdr->errorOffset); + VMW_DEBUG_USER("Command size is %lu\n", + (unsigned long) error_cmd_size); + + new_start_offset = cb_hdr->errorOffset + error_cmd_size; + + if (new_start_offset >= cb_hdr->length) { + __vmw_cmdbuf_header_free(entry); + send_fence = true; + continue; + } + + if (man->using_mob) + cb_hdr->ptr.mob.mobOffset += new_start_offset; + else + cb_hdr->ptr.pa += (u64) new_start_offset; + + entry->cmd += new_start_offset; + cb_hdr->length -= new_start_offset; + cb_hdr->errorOffset = 0; + cb_hdr->offset = 0; + + list_add_tail(&entry->list, &restart_head[entry->cb_context]); + } + + for_each_cmdbuf_ctx(man, i, ctx) + man->ctx[i].block_submission = true; + + spin_unlock(&man->lock); + + /* Preempt all contexts */ + if (global_block && vmw_cmdbuf_preempt(man, 0)) + DRM_ERROR("Failed preempting command buffer contexts\n"); + + spin_lock(&man->lock); + for_each_cmdbuf_ctx(man, i, ctx) { + /* Move preempted command buffers to the preempted queue. */ + vmw_cmdbuf_ctx_process(man, ctx, &dummy); + + /* + * Add the preempted queue after the command buffer + * that caused an error. + */ + list_splice_init(&ctx->preempted, restart_head[i].prev); + + /* + * Finally add all command buffers first in the submitted + * queue, to rerun them. + */ + + ctx->block_submission = false; + list_splice_init(&restart_head[i], &ctx->submitted); } - spin_unlock_bh(&man->lock); - if (restart && vmw_cmdbuf_startstop(man, true)) - DRM_ERROR("Failed restarting command buffer context 0.\n"); + vmw_cmdbuf_man_process(man); + spin_unlock(&man->lock); + + if (global_block && vmw_cmdbuf_startstop(man, 0, true)) + DRM_ERROR("Failed restarting command buffer contexts\n"); /* Send a new fence in case one was removed */ - vmw_fifo_send_fence(man->dev_priv, &dummy); + if (send_fence) { + vmw_cmd_send_fence(man->dev_priv, &dummy); + wake_up_all(&man->idle_queue); + } + + mutex_unlock(&man->error_mutex); } /** - * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle. + * vmw_cmdbuf_man_idle - Check whether the command buffer manager is idle. * * @man: The command buffer manager. * @check_preempted: Check also the preempted queue for pending command buffers. @@ -536,7 +631,7 @@ static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man, bool idle = false; int i; - spin_lock_bh(&man->lock); + spin_lock(&man->lock); vmw_cmdbuf_man_process(man); for_each_cmdbuf_ctx(man, i, ctx) { if (!list_empty(&ctx->submitted) || @@ -548,7 +643,7 @@ static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man, idle = list_empty(&man->error); out_unlock: - spin_unlock_bh(&man->lock); + spin_unlock(&man->lock); return idle; } @@ -566,12 +661,12 @@ static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man) { struct vmw_cmdbuf_header *cur = man->cur; - WARN_ON(!mutex_is_locked(&man->cur_mutex)); + lockdep_assert_held_once(&man->cur_mutex); if (!cur) return; - spin_lock_bh(&man->lock); + spin_lock(&man->lock); if (man->cur_pos == 0) { __vmw_cmdbuf_header_free(cur); goto out_unlock; @@ -580,7 +675,7 @@ static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man) man->cur->cb_header->length = man->cur_pos; vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0); out_unlock: - spin_unlock_bh(&man->lock); + spin_unlock(&man->lock); man->cur = NULL; man->cur_pos = 0; } @@ -671,16 +766,16 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man, if (info->done) return true; - + memset(info->node, 0, sizeof(*info->node)); - spin_lock_bh(&man->lock); + spin_lock(&man->lock); ret = drm_mm_insert_node(&man->mm, info->node, info->page_size); if (ret) { vmw_cmdbuf_man_process(man); ret = drm_mm_insert_node(&man->mm, info->node, info->page_size); } - spin_unlock_bh(&man->lock); + spin_unlock(&man->lock); info->done = !ret; return info->done; @@ -705,7 +800,7 @@ static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man, { struct vmw_cmdbuf_alloc_info info; - info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT; + info.page_size = PFN_UP(size); info.node = node; info.done = false; @@ -792,7 +887,7 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, header->cmd = man->map + offset; if (man->using_mob) { cb_hdr->flags = SVGA_CB_FLAG_MOB; - cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start; + cb_hdr->ptr.mob.mobid = man->cmd_space->tbo.resource->start; cb_hdr->ptr.mob.mobOffset = offset; } else { cb_hdr->ptr.pa = (u64)man->handle + (u64)offset; @@ -801,9 +896,9 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, return 0; out_no_cb_header: - spin_lock_bh(&man->lock); + spin_lock(&man->lock); drm_mm_remove_node(&header->node); - spin_unlock_bh(&man->lock); + spin_unlock(&man->lock); return ret; } @@ -951,7 +1046,7 @@ static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man, { struct vmw_cmdbuf_header *cur = man->cur; - WARN_ON(!mutex_is_locked(&man->cur_mutex)); + lockdep_assert_held_once(&man->cur_mutex); WARN_ON(size > cur->reserved); man->cur_pos += size; @@ -1023,18 +1118,6 @@ void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size, vmw_cmdbuf_cur_unlock(man); } -/** - * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half. - * - * @man: The command buffer manager. - */ -void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man) -{ - if (!man) - return; - - tasklet_schedule(&man->tasklet); -} /** * vmw_cmdbuf_send_device_command - Send a command through the device context. @@ -1059,9 +1142,9 @@ static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man, memcpy(cmd, command, size); header->cb_header->length = size; header->cb_context = SVGA_CB_CONTEXT_DEVICE; - spin_lock_bh(&man->lock); + spin_lock(&man->lock); status = vmw_cmdbuf_header_submit(header); - spin_unlock_bh(&man->lock); + spin_unlock(&man->lock); vmw_cmdbuf_header_free(header); if (status != SVGA_CB_STATUS_COMPLETED) { @@ -1074,15 +1157,40 @@ static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man, } /** + * vmw_cmdbuf_preempt - Send a preempt command through the device + * context. + * + * @man: The command buffer manager. + * @context: Device context to pass command through. + * + * Synchronously sends a preempt command. + */ +static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context) +{ + struct { + uint32 id; + SVGADCCmdPreempt body; + } __packed cmd; + + cmd.id = SVGA_DC_CMD_PREEMPT; + cmd.body.context = SVGA_CB_CONTEXT_0 + context; + cmd.body.ignoreIDZero = 0; + + return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd)); +} + + +/** * vmw_cmdbuf_startstop - Send a start / stop command through the device * context. * * @man: The command buffer manager. + * @context: Device context to start/stop. * @enable: Whether to enable or disable the context. * * Synchronously sends a device start / stop context command. */ -static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, +static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context, bool enable) { struct { @@ -1092,7 +1200,7 @@ static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT; cmd.body.enable = (enable) ? 1 : 0; - cmd.body.context = SVGA_CB_CONTEXT_0; + cmd.body.context = SVGA_CB_CONTEXT_0 + context; return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd)); } @@ -1102,21 +1210,16 @@ static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, * * @man: The command buffer manager. * @size: The size of the main space pool. - * @default_size: The default size of the command buffer for small kernel - * submissions. * - * Set the size and allocate the main command buffer space pool, - * as well as the default size of the command buffer for - * small kernel submissions. If successful, this enables large command - * submissions. Note that this function requires that rudimentary command + * Set the size and allocate the main command buffer space pool. + * If successful, this enables large command submissions. + * Note that this function requires that rudimentary command * submission is already available and that the MOB memory manager is alive. * Returns 0 on success. Negative error code on failure. */ -int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, - size_t size, size_t default_size) +int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size) { struct vmw_private *dev_priv = man->dev_priv; - bool dummy; int ret; if (man->has_pool) @@ -1124,33 +1227,34 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, /* First, try to allocate a huge chunk of DMA memory */ size = PAGE_ALIGN(size); - man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size, + man->map = dma_alloc_coherent(dev_priv->drm.dev, size, &man->handle, GFP_KERNEL); if (man->map) { man->using_mob = false; } else { + struct vmw_bo_params bo_params = { + .domain = VMW_BO_DOMAIN_MOB, + .busy_domain = VMW_BO_DOMAIN_MOB, + .bo_type = ttm_bo_type_kernel, + .size = size, + .pin = true + }; /* * DMA memory failed. If we can have command buffers in a * MOB, try to use that instead. Note that this will * actually call into the already enabled manager, when * binding the MOB. */ - if (!(dev_priv->capabilities & SVGA_CAP_DX)) + if (!(dev_priv->capabilities & SVGA_CAP_DX) || + !dev_priv->has_mob) return -ENOMEM; - ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device, - &vmw_mob_ne_placement, 0, false, NULL, - &man->cmd_space); + ret = vmw_bo_create(dev_priv, &bo_params, &man->cmd_space); if (ret) return ret; - man->using_mob = true; - ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT, - &man->map_obj); - if (ret) - goto out_no_map; - - man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy); + man->map = vmw_bo_map_and_cache(man->cmd_space); + man->using_mob = man->map; } man->size = size; @@ -1165,16 +1269,11 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, * submissions to be able to free up space. */ man->default_size = VMW_CMDBUF_INLINE_SIZE; - DRM_INFO("Using command buffers with %s pool.\n", + drm_info(&dev_priv->drm, + "Using command buffers with %s pool.\n", (man->using_mob) ? "MOB" : "DMA"); return 0; - -out_no_map: - if (man->using_mob) - ttm_bo_unref(&man->cmd_space); - - return ret; } /** @@ -1191,7 +1290,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv) { struct vmw_cmdbuf_man *man; struct vmw_cmdbuf_context *ctx; - int i; + unsigned int i; int ret; if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS)) @@ -1201,8 +1300,10 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv) if (!man) return ERR_PTR(-ENOMEM); + man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ? + 2 : 1; man->headers = dma_pool_create("vmwgfx cmdbuf", - &dev_priv->dev->pdev->dev, + dev_priv->drm.dev, sizeof(SVGACBHeader), 64, PAGE_SIZE); if (!man->headers) { @@ -1211,7 +1312,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv) } man->dheaders = dma_pool_create("vmwgfx inline cmdbuf", - &dev_priv->dev->pdev->dev, + dev_priv->drm.dev, sizeof(struct vmw_cmdbuf_dheader), 64, PAGE_SIZE); if (!man->dheaders) { @@ -1226,8 +1327,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv) spin_lock_init(&man->lock); mutex_init(&man->cur_mutex); mutex_init(&man->space_mutex); - tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet, - (unsigned long) man); + mutex_init(&man->error_mutex); man->default_size = VMW_CMDBUF_INLINE_SIZE; init_waitqueue_head(&man->alloc_queue); init_waitqueue_head(&man->idle_queue); @@ -1236,9 +1336,9 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv) INIT_WORK(&man->work, &vmw_cmdbuf_work_func); vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR, &dev_priv->error_waiters); - ret = vmw_cmdbuf_startstop(man, true); + ret = vmw_cmdbuf_startstop(man, 0, true); if (ret) { - DRM_ERROR("Failed starting command buffer context 0.\n"); + DRM_ERROR("Failed starting command buffer contexts\n"); vmw_cmdbuf_man_destroy(man); return ERR_PTR(ret); } @@ -1272,13 +1372,11 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man) man->has_pool = false; man->default_size = VMW_CMDBUF_INLINE_SIZE; (void) vmw_cmdbuf_idle(man, false, 10*HZ); - if (man->using_mob) { - (void) ttm_bo_kunmap(&man->map_obj); - ttm_bo_unref(&man->cmd_space); - } else { - dma_free_coherent(&man->dev_priv->dev->pdev->dev, + if (man->using_mob) + vmw_bo_unreference(&man->cmd_space); + else + dma_free_coherent(man->dev_priv->drm.dev, man->size, man->map, man->handle); - } } /** @@ -1292,16 +1390,17 @@ void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man) { WARN_ON_ONCE(man->has_pool); (void) vmw_cmdbuf_idle(man, false, 10*HZ); - if (vmw_cmdbuf_startstop(man, false)) - DRM_ERROR("Failed stopping command buffer context 0.\n"); + + if (vmw_cmdbuf_startstop(man, 0, false)) + DRM_ERROR("Failed stopping command buffer contexts.\n"); vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR, &man->dev_priv->error_waiters); - tasklet_kill(&man->tasklet); (void) cancel_work_sync(&man->work); dma_pool_destroy(man->dheaders); dma_pool_destroy(man->headers); mutex_destroy(&man->cur_mutex); mutex_destroy(&man->space_mutex); + mutex_destroy(&man->error_mutex); kfree(man); } |
