diff options
Diffstat (limited to 'drivers/gpu/drm/drm_gpuvm.c')
-rw-r--r-- | drivers/gpu/drm/drm_gpuvm.c | 133 |
1 files changed, 130 insertions, 3 deletions
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index f9eb56f24bef..bbc7fecb6f4a 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -27,6 +27,7 @@ #include <drm/drm_gpuvm.h> +#include <linux/export.h> #include <linux/interval_tree_generic.h> #include <linux/mm.h> @@ -2299,13 +2300,13 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, } /** - * drm_gpuvm_sm_map() - creates the &drm_gpuva_op split/merge steps + * drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps * @gpuvm: the &drm_gpuvm representing the GPU VA space + * @priv: pointer to a driver private data structure * @req_addr: the start address of the new mapping * @req_range: the range of the new mapping * @req_obj: the &drm_gem_object to map * @req_offset: the offset within the &drm_gem_object - * @priv: pointer to a driver private data structure * * This function iterates the given range of the GPU VA space. It utilizes the * &drm_gpuvm_ops to call back into the driver providing the split and merge @@ -2349,7 +2350,7 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv, EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map); /** - * drm_gpuvm_sm_unmap() - creates the &drm_gpuva_ops to split on unmap + * drm_gpuvm_sm_unmap() - calls the &drm_gpuva_ops to split on unmap * @gpuvm: the &drm_gpuvm representing the GPU VA space * @priv: pointer to a driver private data structure * @req_addr: the start address of the range to unmap @@ -2390,6 +2391,132 @@ drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv, } EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap); +static int +drm_gpuva_sm_step_lock(struct drm_gpuva_op *op, void *priv) +{ + struct drm_exec *exec = priv; + + switch (op->op) { + case DRM_GPUVA_OP_REMAP: + if (op->remap.unmap->va->gem.obj) + return drm_exec_lock_obj(exec, op->remap.unmap->va->gem.obj); + return 0; + case DRM_GPUVA_OP_UNMAP: + if (op->unmap.va->gem.obj) + return drm_exec_lock_obj(exec, op->unmap.va->gem.obj); + return 0; + default: + return 0; + } +} + +static const struct drm_gpuvm_ops lock_ops = { + .sm_step_map = drm_gpuva_sm_step_lock, + .sm_step_remap = drm_gpuva_sm_step_lock, + .sm_step_unmap = drm_gpuva_sm_step_lock, +}; + +/** + * drm_gpuvm_sm_map_exec_lock() - locks the objects touched by a drm_gpuvm_sm_map() + * @gpuvm: the &drm_gpuvm representing the GPU VA space + * @exec: the &drm_exec locking context + * @num_fences: for newly mapped objects, the # of fences to reserve + * @req_addr: the start address of the range to unmap + * @req_range: the range of the mappings to unmap + * @req_obj: the &drm_gem_object to map + * @req_offset: the offset within the &drm_gem_object + * + * This function locks (drm_exec_lock_obj()) objects that will be unmapped/ + * remapped, and locks+prepares (drm_exec_prepare_object()) objects that + * will be newly mapped. + * + * The expected usage is: + * + * vm_bind { + * struct drm_exec exec; + * + * // IGNORE_DUPLICATES is required, INTERRUPTIBLE_WAIT is recommended: + * drm_exec_init(&exec, IGNORE_DUPLICATES | INTERRUPTIBLE_WAIT, 0); + * + * drm_exec_until_all_locked (&exec) { + * for_each_vm_bind_operation { + * switch (op->op) { + * case DRIVER_OP_UNMAP: + * ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range); + * break; + * case DRIVER_OP_MAP: + * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, + * op->addr, op->range, + * obj, op->obj_offset); + * break; + * } + * + * drm_exec_retry_on_contention(&exec); + * if (ret) + * return ret; + * } + * } + * } + * + * This enables all locking to be performed before the driver begins modifying + * the VM. This is safe to do in the case of overlapping DRIVER_VM_BIND_OPs, + * where an earlier op can alter the sequence of steps generated for a later + * op, because the later altered step will involve the same GEM object(s) + * already seen in the earlier locking step. For example: + * + * 1) An earlier driver DRIVER_OP_UNMAP op removes the need for a + * DRM_GPUVA_OP_REMAP/UNMAP step. This is safe because we've already + * locked the GEM object in the earlier DRIVER_OP_UNMAP op. + * + * 2) An earlier DRIVER_OP_MAP op overlaps with a later DRIVER_OP_MAP/UNMAP + * op, introducing a DRM_GPUVA_OP_REMAP/UNMAP that wouldn't have been + * required without the earlier DRIVER_OP_MAP. This is safe because we've + * already locked the GEM object in the earlier DRIVER_OP_MAP step. + * + * Returns: 0 on success or a negative error codec + */ +int +drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, unsigned int num_fences, + u64 req_addr, u64 req_range, + struct drm_gem_object *req_obj, u64 req_offset) +{ + if (req_obj) { + int ret = drm_exec_prepare_obj(exec, req_obj, num_fences); + if (ret) + return ret; + } + + return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec, + req_addr, req_range, + req_obj, req_offset); + +} +EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock); + +/** + * drm_gpuvm_sm_unmap_exec_lock() - locks the objects touched by drm_gpuvm_sm_unmap() + * @gpuvm: the &drm_gpuvm representing the GPU VA space + * @exec: the &drm_exec locking context + * @req_addr: the start address of the range to unmap + * @req_range: the range of the mappings to unmap + * + * This function locks (drm_exec_lock_obj()) objects that will be unmapped/ + * remapped by drm_gpuvm_sm_unmap(). + * + * See drm_gpuvm_sm_map_exec_lock() for expected usage. + * + * Returns: 0 on success or a negative error code + */ +int +drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec, + u64 req_addr, u64 req_range) +{ + return __drm_gpuvm_sm_unmap(gpuvm, &lock_ops, exec, + req_addr, req_range); +} +EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap_exec_lock); + static struct drm_gpuva_op * gpuva_op_alloc(struct drm_gpuvm *gpuvm) { |