diff options
author | Matthew Brost <matthew.brost@intel.com> | 2024-04-24 21:55:04 -0700 |
---|---|---|
committer | Matthew Brost <matthew.brost@intel.com> | 2024-04-26 12:09:58 -0700 |
commit | 701109f2e3a45b9748f0f98849fde9a35d391efb (patch) | |
tree | a29059f47ff0a6bfd48b6a4ca1066c1f25480faf /drivers/gpu/drm/xe/xe_vm.c | |
parent | 0a34c12449de4b09f74808c6f6c39205ee5071f0 (diff) |
drm/xe: Add struct xe_vma_ops abstraction
Having a structure which encapsulates a list of VMA operations will help
enable 1 job for the entire list.
v2:
- Rebase
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Oak Zeng <oak.zeng@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240425045513.1913039-5-matthew.brost@intel.com
Diffstat (limited to 'drivers/gpu/drm/xe/xe_vm.c')
-rw-r--r-- | drivers/gpu/drm/xe/xe_vm.c | 37 |
1 files changed, 21 insertions, 16 deletions
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index e7be99acaff2..93cf5116d2a9 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -2252,7 +2252,7 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op) static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, struct drm_gpuva_ops *ops, struct xe_sync_entry *syncs, u32 num_syncs, - struct list_head *ops_list, bool last) + struct xe_vma_ops *vops, bool last) { struct xe_device *xe = vm->xe; struct xe_vma_op *last_op = NULL; @@ -2264,11 +2264,11 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, drm_gpuva_for_each_op(__op, ops) { struct xe_vma_op *op = gpuva_op_to_vma_op(__op); struct xe_vma *vma; - bool first = list_empty(ops_list); + bool first = list_empty(&vops->list); unsigned int flags = 0; INIT_LIST_HEAD(&op->link); - list_add_tail(&op->link, ops_list); + list_add_tail(&op->link, &vops->list); if (first) { op->flags |= XE_VMA_OP_FIRST; @@ -2394,7 +2394,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, } /* FIXME: Unhandled corner case */ - XE_WARN_ON(!last_op && last && !list_empty(ops_list)); + XE_WARN_ON(!last_op && last && !list_empty(&vops->list)); if (!last_op) return 0; @@ -2734,7 +2734,7 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm, static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm, - struct list_head *ops_list) + struct xe_vma_ops *vops) { struct xe_vma_op *op; int err; @@ -2743,7 +2743,7 @@ static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec, if (err) return err; - list_for_each_entry(op, ops_list, link) { + list_for_each_entry(op, &vops->list, link) { err = op_lock_and_prep(exec, vm, op); if (err) return err; @@ -2753,13 +2753,13 @@ static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec, } static struct dma_fence *ops_execute(struct xe_vm *vm, - struct list_head *ops_list, + struct xe_vma_ops *vops, bool cleanup) { struct xe_vma_op *op, *next; struct dma_fence *fence = NULL; - list_for_each_entry_safe(op, next, ops_list, link) { + list_for_each_entry_safe(op, next, &vops->list, link) { if (!IS_ERR(fence)) { dma_fence_put(fence); fence = xe_vma_op_execute(vm, op); @@ -2777,7 +2777,7 @@ static struct dma_fence *ops_execute(struct xe_vm *vm, } static int vm_bind_ioctl_ops_execute(struct xe_vm *vm, - struct list_head *ops_list) + struct xe_vma_ops *vops) { struct drm_exec exec; struct dma_fence *fence; @@ -2788,12 +2788,12 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm, drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES, 0); drm_exec_until_all_locked(&exec) { - err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, ops_list); + err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops); drm_exec_retry_on_contention(&exec); if (err) goto unlock; - fence = ops_execute(vm, ops_list, true); + fence = ops_execute(vm, vops, true); if (IS_ERR(fence)) { err = PTR_ERR(fence); /* FIXME: Killing VM rather than proper error handling */ @@ -2954,6 +2954,11 @@ static int vm_bind_ioctl_signal_fences(struct xe_vm *vm, return err; } +static void xe_vma_ops_init(struct xe_vma_ops *vops) +{ + INIT_LIST_HEAD(&vops->list); +} + int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct xe_device *xe = to_xe_device(dev); @@ -2967,7 +2972,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) u32 num_syncs, num_ufence = 0; struct xe_sync_entry *syncs = NULL; struct drm_xe_vm_bind_op *bind_ops; - LIST_HEAD(ops_list); + struct xe_vma_ops vops; int err; int i; @@ -3118,6 +3123,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) goto free_syncs; } + xe_vma_ops_init(&vops); for (i = 0; i < args->num_binds; ++i) { u64 range = bind_ops[i].range; u64 addr = bind_ops[i].addr; @@ -3137,14 +3143,13 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) } err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs, - &ops_list, - i == args->num_binds - 1); + &vops, i == args->num_binds - 1); if (err) goto unwind_ops; } /* Nothing to do */ - if (list_empty(&ops_list)) { + if (list_empty(&vops.list)) { err = -ENODATA; goto unwind_ops; } @@ -3153,7 +3158,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) if (q) xe_exec_queue_get(q); - err = vm_bind_ioctl_ops_execute(vm, &ops_list); + err = vm_bind_ioctl_ops_execute(vm, &vops); up_write(&vm->lock); |