summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_vm.c
diff options
context:
space:
mode:
authorThomas Hellström <thomas.hellstrom@linux.intel.com>2023-09-08 11:17:12 +0200
committerRodrigo Vivi <rodrigo.vivi@intel.com>2023-12-21 11:41:07 -0500
commitd00e9cc28e1e42108618e7a146969a26679170a2 (patch)
tree8a2c8a9e74ab0ad9dec8308f164e8e624ef94f6a /drivers/gpu/drm/xe/xe_vm.c
parent08a4f00e62bc96eabf7d876933f84600a3dc5e69 (diff)
drm/xe/vm: Simplify and document xe_vm_lock()
The xe_vm_lock() function was unnecessarily using ttm_eu_reserve_buffers(). Simplify and document the interface. v4: - Improve on xe_vm_lock() documentation (Matthew Brost) v5: - Rebase conflict. Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230908091716.36984-3-thomas.hellstrom@linux.intel.com Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_vm.c')
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c46
1 files changed, 23 insertions, 23 deletions
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 2bd1fa34256a..0ac421c4e184 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -523,18 +523,17 @@ void xe_vm_unlock_dma_resv(struct xe_vm *vm,
static void xe_vm_kill(struct xe_vm *vm)
{
- struct ww_acquire_ctx ww;
struct xe_exec_queue *q;
lockdep_assert_held(&vm->lock);
- xe_vm_lock(vm, &ww, 0, false);
+ xe_vm_lock(vm, false);
vm->flags |= XE_VM_FLAG_BANNED;
trace_xe_vm_kill(vm);
list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
q->ops->kill(q);
- xe_vm_unlock(vm, &ww);
+ xe_vm_unlock(vm);
/* TODO: Inform user the VM is banned */
}
@@ -1420,7 +1419,6 @@ static void xe_vm_close(struct xe_vm *vm)
void xe_vm_close_and_put(struct xe_vm *vm)
{
LIST_HEAD(contested);
- struct ww_acquire_ctx ww;
struct xe_device *xe = vm->xe;
struct xe_tile *tile;
struct xe_vma *vma, *next_vma;
@@ -1443,7 +1441,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
}
down_write(&vm->lock);
- xe_vm_lock(vm, &ww, 0, false);
+ xe_vm_lock(vm, false);
drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
vma = gpuva_to_vma(gpuva);
@@ -1488,7 +1486,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
vm->pt_root[id] = NULL;
}
}
- xe_vm_unlock(vm, &ww);
+ xe_vm_unlock(vm);
/*
* VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
@@ -3442,30 +3440,32 @@ free_objs:
return err == -ENODATA ? 0 : err;
}
-/*
- * XXX: Using the TTM wrappers for now, likely can call into dma-resv code
- * directly to optimize. Also this likely should be an inline function.
+/**
+ * xe_vm_lock() - Lock the vm's dma_resv object
+ * @vm: The struct xe_vm whose lock is to be locked
+ * @intr: Whether to perform any wait interruptible
+ *
+ * Return: 0 on success, -EINTR if @intr is true and the wait for a
+ * contended lock was interrupted. If @intr is false, the function
+ * always returns 0.
*/
-int xe_vm_lock(struct xe_vm *vm, struct ww_acquire_ctx *ww,
- int num_resv, bool intr)
+int xe_vm_lock(struct xe_vm *vm, bool intr)
{
- struct ttm_validate_buffer tv_vm;
- LIST_HEAD(objs);
- LIST_HEAD(dups);
+ if (intr)
+ return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
- XE_WARN_ON(!ww);
-
- tv_vm.num_shared = num_resv;
- tv_vm.bo = xe_vm_ttm_bo(vm);
- list_add_tail(&tv_vm.head, &objs);
-
- return ttm_eu_reserve_buffers(ww, &objs, intr, &dups);
+ return dma_resv_lock(xe_vm_resv(vm), NULL);
}
-void xe_vm_unlock(struct xe_vm *vm, struct ww_acquire_ctx *ww)
+/**
+ * xe_vm_unlock() - Unlock the vm's dma_resv object
+ * @vm: The struct xe_vm whose lock is to be released.
+ *
+ * Unlock a buffer object lock that was locked by xe_vm_lock().
+ */
+void xe_vm_unlock(struct xe_vm *vm)
{
dma_resv_unlock(xe_vm_resv(vm));
- ww_acquire_fini(ww);
}
/**