diff options
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem_submit.c')
| -rw-r--r-- | drivers/gpu/drm/msm/msm_gem_submit.c | 613 |
1 files changed, 228 insertions, 385 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 63c96416e183..75d9f3574370 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -4,6 +4,7 @@ * Author: Rob Clark <robdclark@gmail.com> */ +#include <linux/dma-fence-unwrap.h> #include <linux/file.h> #include <linux/sync_file.h> #include <linux/uaccess.h> @@ -16,6 +17,13 @@ #include "msm_gpu.h" #include "msm_gem.h" #include "msm_gpu_trace.h" +#include "msm_syncobj.h" + +/* For userspace errors, use DRM_UT_DRIVER.. so that userspace can enable + * error msgs for debugging, but we don't spam dmesg by default + */ +#define SUBMIT_ERROR(err, submit, fmt, ...) \ + UERR(err, (submit)->dev, fmt, ##__VA_ARGS__) /* * Cmdstream submission: @@ -24,7 +32,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, struct msm_gpu *gpu, struct msm_gpu_submitqueue *queue, uint32_t nr_bos, - uint32_t nr_cmds) + uint32_t nr_cmds, u64 drm_client_id) { static atomic_t ident = ATOMIC_INIT(0); struct msm_gem_submit *submit; @@ -37,7 +45,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, if (sz > SIZE_MAX) return ERR_PTR(-ENOMEM); - submit = kzalloc(sz, GFP_KERNEL); + submit = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN); if (!submit) return ERR_PTR(-ENOMEM); @@ -48,7 +56,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, return ERR_PTR(ret); } - ret = drm_sched_job_init(&submit->base, queue->entity, queue); + ret = drm_sched_job_init(&submit->base, queue->entity, 1, queue, + drm_client_id); if (ret) { kfree(submit->hw_fence); kfree(submit); @@ -57,7 +66,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, kref_init(&submit->ref); submit->dev = dev; - submit->aspace = queue->ctx->aspace; + submit->vm = msm_context_vm(dev, queue->ctx); submit->gpu = gpu; submit->cmd = (void *)&submit->bos[nr_bos]; submit->queue = queue; @@ -79,6 +88,15 @@ void __msm_gem_submit_destroy(struct kref *kref) container_of(kref, struct msm_gem_submit, ref); unsigned i; + /* + * In error paths, we could unref the submit without calling + * drm_sched_entity_push_job(), so msm_job_free() will never + * get called. Since drm_sched_job_cleanup() will NULL out + * s_fence, we can use that to detect this case. + */ + if (submit->base.s_fence) + drm_sched_job_cleanup(&submit->base); + if (submit->fence_id) { spin_lock(&submit->queue->idr_lock); idr_remove(&submit->queue->fence_idr, submit->fence_id); @@ -136,16 +154,13 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) || !(submit_bo.flags & MANDATORY_FLAGS)) { - DRM_ERROR("invalid flags: %x\n", submit_bo.flags); - ret = -EINVAL; + ret = SUBMIT_ERROR(EINVAL, submit, "invalid flags: %x\n", submit_bo.flags); i = 0; goto out; } submit->bos[i].handle = submit_bo.handle; submit->bos[i].flags = submit_bo.flags; - /* in validate_objects() we figure out if this is true: */ - submit->bos[i].iova = submit_bo.presumed; } spin_lock(&file->table_lock); @@ -158,14 +173,13 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, */ obj = idr_find(&file->object_idr, submit->bos[i].handle); if (!obj) { - DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i); - ret = -EINVAL; + ret = SUBMIT_ERROR(EINVAL, submit, "invalid handle %u at index %u\n", submit->bos[i].handle, i); goto out_unlock; } drm_gem_object_get(obj); - submit->bos[i].obj = to_msm_bo(obj); + submit->bos[i].obj = obj; } out_unlock: @@ -180,6 +194,7 @@ out: static int submit_lookup_cmds(struct msm_gem_submit *submit, struct drm_msm_gem_submit *args, struct drm_file *file) { + struct msm_context *ctx = file->driver_priv; unsigned i; size_t sz; int ret = 0; @@ -202,17 +217,29 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit, case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: break; default: - DRM_ERROR("invalid type: %08x\n", submit_cmd.type); - return -EINVAL; + return SUBMIT_ERROR(EINVAL, submit, "invalid type: %08x\n", submit_cmd.type); } if (submit_cmd.size % 4) { - DRM_ERROR("non-aligned cmdstream buffer size: %u\n", - submit_cmd.size); - ret = -EINVAL; + ret = SUBMIT_ERROR(EINVAL, submit, "non-aligned cmdstream buffer size: %u\n", + submit_cmd.size); goto out; } + if (msm_context_is_vmbind(ctx)) { + if (submit_cmd.nr_relocs) { + ret = SUBMIT_ERROR(EINVAL, submit, "nr_relocs must be zero"); + goto out; + } + + if (submit_cmd.submit_idx || submit_cmd.submit_offset) { + ret = SUBMIT_ERROR(EINVAL, submit, "submit_idx/offset must be zero"); + goto out; + } + + submit->cmd[i].iova = submit_cmd.iova; + } + submit->cmd[i].type = submit_cmd.type; submit->cmd[i].size = submit_cmd.size / 4; submit->cmd[i].offset = submit_cmd.submit_offset / 4; @@ -228,7 +255,7 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit, ret = -ENOMEM; goto out; } - submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL); + submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN); if (!submit->cmd[i].relocs) { ret = -ENOMEM; goto out; @@ -244,127 +271,66 @@ out: return ret; } -/* Unwind bo state, according to cleanup_flags. In the success case, only - * the lock is dropped at the end of the submit (and active/pin ref is dropped - * later when the submit is retired). - */ -static void submit_cleanup_bo(struct msm_gem_submit *submit, int i, - unsigned cleanup_flags) +static int submit_lock_objects_vmbind(struct msm_gem_submit *submit) { - struct drm_gem_object *obj = &submit->bos[i].obj->base; - unsigned flags = submit->bos[i].flags & cleanup_flags; - - /* - * Clear flags bit before dropping lock, so that the msm_job_run() - * path isn't racing with submit_cleanup() (ie. the read/modify/ - * write is protected by the obj lock in all paths) - */ - submit->bos[i].flags &= ~cleanup_flags; - - if (flags & BO_VMA_PINNED) - msm_gem_vma_unpin(submit->bos[i].vma); + unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES; + struct drm_exec *exec = &submit->exec; + int ret = 0; - if (flags & BO_OBJ_PINNED) - msm_gem_unpin_locked(obj); + drm_exec_init(&submit->exec, flags, submit->nr_bos); - if (flags & BO_LOCKED) - dma_resv_unlock(obj->resv); -} + drm_exec_until_all_locked (&submit->exec) { + ret = drm_gpuvm_prepare_vm(submit->vm, exec, 1); + drm_exec_retry_on_contention(exec); + if (ret) + break; -static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i) -{ - unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED | BO_LOCKED; - submit_cleanup_bo(submit, i, cleanup_flags); + ret = drm_gpuvm_prepare_objects(submit->vm, exec, 1); + drm_exec_retry_on_contention(exec); + if (ret) + break; + } - if (!(submit->bos[i].flags & BO_VALID)) - submit->bos[i].iova = 0; + return ret; } /* This is where we make sure all the bo's are reserved and pin'd: */ static int submit_lock_objects(struct msm_gem_submit *submit) { - int contended, slow_locked = -1, i, ret = 0; - -retry: - for (i = 0; i < submit->nr_bos; i++) { - struct msm_gem_object *msm_obj = submit->bos[i].obj; + unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT; + int ret = 0; - if (slow_locked == i) - slow_locked = -1; + if (msm_context_is_vmbind(submit->queue->ctx)) + return submit_lock_objects_vmbind(submit); - contended = i; + drm_exec_init(&submit->exec, flags, submit->nr_bos); - if (!(submit->bos[i].flags & BO_LOCKED)) { - ret = dma_resv_lock_interruptible(msm_obj->base.resv, - &submit->ticket); + drm_exec_until_all_locked (&submit->exec) { + ret = drm_exec_lock_obj(&submit->exec, + drm_gpuvm_resv_obj(submit->vm)); + drm_exec_retry_on_contention(&submit->exec); + if (ret) + break; + for (unsigned i = 0; i < submit->nr_bos; i++) { + struct drm_gem_object *obj = submit->bos[i].obj; + ret = drm_exec_prepare_obj(&submit->exec, obj, 1); + drm_exec_retry_on_contention(&submit->exec); if (ret) - goto fail; - submit->bos[i].flags |= BO_LOCKED; - } - } - - ww_acquire_done(&submit->ticket); - - return 0; - -fail: - if (ret == -EALREADY) { - DRM_ERROR("handle %u at index %u already on submit list\n", - submit->bos[i].handle, i); - ret = -EINVAL; - } - - for (; i >= 0; i--) - submit_unlock_unpin_bo(submit, i); - - if (slow_locked > 0) - submit_unlock_unpin_bo(submit, slow_locked); - - if (ret == -EDEADLK) { - struct msm_gem_object *msm_obj = submit->bos[contended].obj; - /* we lost out in a seqno race, lock and retry.. */ - ret = dma_resv_lock_slow_interruptible(msm_obj->base.resv, - &submit->ticket); - if (!ret) { - submit->bos[contended].flags |= BO_LOCKED; - slow_locked = contended; - goto retry; + break; } - - /* Not expecting -EALREADY here, if the bo was already - * locked, we should have gotten -EALREADY already from - * the dma_resv_lock_interruptable() call. - */ - WARN_ON_ONCE(ret == -EALREADY); } return ret; } -static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit) +static int submit_fence_sync(struct msm_gem_submit *submit) { int i, ret = 0; for (i = 0; i < submit->nr_bos; i++) { - struct drm_gem_object *obj = &submit->bos[i].obj->base; + struct drm_gem_object *obj = submit->bos[i].obj; bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE; - /* NOTE: _reserve_shared() must happen before - * _add_shared_fence(), which makes this a slightly - * strange place to call it. OTOH this is a - * convenient can-fail point to hook it in. - */ - ret = dma_resv_reserve_fences(obj->resv, 1); - if (ret) - return ret; - - /* If userspace has determined that explicit fencing is - * used, it can disable implicit sync on the entire - * submit: - */ - if (no_implicit) - continue; - /* Otherwise userspace can ask for implicit sync to be * disabled on specific buffers. This is useful for internal * usermode driver managed buffers, suballocation, etc. @@ -384,16 +350,15 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit) static int submit_pin_objects(struct msm_gem_submit *submit) { + struct msm_drm_private *priv = submit->dev->dev_private; int i, ret = 0; - submit->valid = true; - for (i = 0; i < submit->nr_bos; i++) { - struct drm_gem_object *obj = &submit->bos[i].obj->base; - struct msm_gem_vma *vma; + struct drm_gem_object *obj = submit->bos[i].obj; + struct drm_gpuva *vma; /* if locking succeeded, pin bo: */ - vma = msm_gem_get_vma_locked(obj, submit->aspace); + vma = msm_gem_get_vma_locked(obj, submit->vm); if (IS_ERR(vma)) { ret = PTR_ERR(vma); break; @@ -403,28 +368,62 @@ static int submit_pin_objects(struct msm_gem_submit *submit) if (ret) break; - submit->bos[i].flags |= BO_OBJ_PINNED | BO_VMA_PINNED; - submit->bos[i].vma = vma; + submit->bos[i].vm_bo = drm_gpuvm_bo_get(vma->vm_bo); + submit->bos[i].iova = vma->va.addr; + } - if (vma->iova == submit->bos[i].iova) { - submit->bos[i].flags |= BO_VALID; - } else { - submit->bos[i].iova = vma->iova; - /* iova changed, so address in cmdstream is not valid: */ - submit->bos[i].flags &= ~BO_VALID; - submit->valid = false; - } + /* + * A second loop while holding the LRU lock (a) avoids acquiring/dropping + * the LRU lock for each individual bo, while (b) avoiding holding the + * LRU lock while calling msm_gem_pin_vma_locked() (which could trigger + * get_pages() which could trigger reclaim.. and if we held the LRU lock + * could trigger deadlock with the shrinker). + */ + mutex_lock(&priv->lru.lock); + for (i = 0; i < submit->nr_bos; i++) { + msm_gem_pin_obj_locked(submit->bos[i].obj); } + mutex_unlock(&priv->lru.lock); + + submit->bos_pinned = true; return ret; } +static void submit_unpin_objects(struct msm_gem_submit *submit) +{ + if (!submit->bos_pinned) + return; + + for (int i = 0; i < submit->nr_bos; i++) { + struct drm_gem_object *obj = submit->bos[i].obj; + + msm_gem_unpin_locked(obj); + } + + submit->bos_pinned = false; +} + static void submit_attach_object_fences(struct msm_gem_submit *submit) { - int i; + struct msm_gem_vm *vm = to_msm_vm(submit->vm); + struct dma_fence *last_fence; - for (i = 0; i < submit->nr_bos; i++) { - struct drm_gem_object *obj = &submit->bos[i].obj->base; + if (msm_context_is_vmbind(submit->queue->ctx)) { + drm_gpuvm_resv_add_fence(submit->vm, &submit->exec, + submit->user_fence, + DMA_RESV_USAGE_BOOKKEEP, + DMA_RESV_USAGE_BOOKKEEP); + + last_fence = vm->last_fence; + vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence); + dma_fence_put(last_fence); + + return; + } + + for (unsigned i = 0; i < submit->nr_bos; i++) { + struct drm_gem_object *obj = submit->bos[i].obj; if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) dma_resv_add_fence(obj->resv, submit->user_fence, @@ -436,44 +435,36 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit) } static int submit_bo(struct msm_gem_submit *submit, uint32_t idx, - struct msm_gem_object **obj, uint64_t *iova, bool *valid) + struct drm_gem_object **obj, uint64_t *iova) { if (idx >= submit->nr_bos) { - DRM_ERROR("invalid buffer index: %u (out of %u)\n", - idx, submit->nr_bos); - return -EINVAL; + return SUBMIT_ERROR(EINVAL, submit, "invalid buffer index: %u (out of %u)\n", + idx, submit->nr_bos); } if (obj) *obj = submit->bos[idx].obj; if (iova) *iova = submit->bos[idx].iova; - if (valid) - *valid = !!(submit->bos[idx].flags & BO_VALID); return 0; } /* process the reloc's and patch up the cmdstream as needed: */ -static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj, +static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *obj, uint32_t offset, uint32_t nr_relocs, struct drm_msm_gem_submit_reloc *relocs) { uint32_t i, last_offset = 0; uint32_t *ptr; int ret = 0; - if (!nr_relocs) - return 0; - - if (offset % 4) { - DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset); - return -EINVAL; - } + if (offset % 4) + return SUBMIT_ERROR(EINVAL, submit, "non-aligned cmdstream buffer: %u\n", offset); /* For now, just map the entire thing. Eventually we probably * to do it page-by-page, w/ kmap() if not vmap()d.. */ - ptr = msm_gem_get_vaddr_locked(&obj->base); + ptr = msm_gem_get_vaddr_locked(obj); if (IS_ERR(ptr)) { ret = PTR_ERR(ptr); @@ -485,32 +476,26 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob struct drm_msm_gem_submit_reloc submit_reloc = relocs[i]; uint32_t off; uint64_t iova; - bool valid; if (submit_reloc.submit_offset % 4) { - DRM_ERROR("non-aligned reloc offset: %u\n", - submit_reloc.submit_offset); - ret = -EINVAL; + ret = SUBMIT_ERROR(EINVAL, submit, "non-aligned reloc offset: %u\n", + submit_reloc.submit_offset); goto out; } /* offset in dwords: */ off = submit_reloc.submit_offset / 4; - if ((off >= (obj->base.size / 4)) || + if ((off >= (obj->size / 4)) || (off < last_offset)) { - DRM_ERROR("invalid offset %u at reloc %u\n", off, i); - ret = -EINVAL; + ret = SUBMIT_ERROR(EINVAL, submit, "invalid offset %u at reloc %u\n", off, i); goto out; } - ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid); + ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova); if (ret) goto out; - if (valid) - continue; - iova += submit_reloc.reloc_offset; if (submit_reloc.shift < 0) @@ -524,7 +509,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob } out: - msm_gem_put_vaddr_locked(&obj->base); + msm_gem_put_vaddr_locked(obj); return ret; } @@ -535,18 +520,15 @@ out: */ static void submit_cleanup(struct msm_gem_submit *submit, bool error) { - unsigned cleanup_flags = BO_LOCKED; - unsigned i; - if (error) - cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED; + submit_unpin_objects(submit); - for (i = 0; i < submit->nr_bos; i++) { - struct msm_gem_object *msm_obj = submit->bos[i].obj; - submit_cleanup_bo(submit, i, cleanup_flags); - if (error) - drm_gem_object_put(&msm_obj->base); - } + if (submit->exec.objects) + drm_exec_fini(&submit->exec); + + /* if job wasn't enqueued to scheduler, early retirement: */ + if (error) + msm_submit_retire(submit); } void msm_submit_retire(struct msm_gem_submit *submit) @@ -554,193 +536,31 @@ void msm_submit_retire(struct msm_gem_submit *submit) int i; for (i = 0; i < submit->nr_bos; i++) { - struct drm_gem_object *obj = &submit->bos[i].obj->base; + struct drm_gem_object *obj = submit->bos[i].obj; + struct drm_gpuvm_bo *vm_bo = submit->bos[i].vm_bo; + msm_gem_lock(obj); + drm_gpuvm_bo_put(vm_bo); + msm_gem_unlock(obj); drm_gem_object_put(obj); } } -struct msm_submit_post_dep { - struct drm_syncobj *syncobj; - uint64_t point; - struct dma_fence_chain *chain; -}; - -static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit, - struct drm_file *file, - uint64_t in_syncobjs_addr, - uint32_t nr_in_syncobjs, - size_t syncobj_stride) -{ - struct drm_syncobj **syncobjs = NULL; - struct drm_msm_gem_submit_syncobj syncobj_desc = {0}; - int ret = 0; - uint32_t i, j; - - syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs), - GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); - if (!syncobjs) - return ERR_PTR(-ENOMEM); - - for (i = 0; i < nr_in_syncobjs; ++i) { - uint64_t address = in_syncobjs_addr + i * syncobj_stride; - - if (copy_from_user(&syncobj_desc, - u64_to_user_ptr(address), - min(syncobj_stride, sizeof(syncobj_desc)))) { - ret = -EFAULT; - break; - } - - if (syncobj_desc.point && - !drm_core_check_feature(submit->dev, DRIVER_SYNCOBJ_TIMELINE)) { - ret = -EOPNOTSUPP; - break; - } - - if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) { - ret = -EINVAL; - break; - } - - ret = drm_sched_job_add_syncobj_dependency(&submit->base, file, - syncobj_desc.handle, syncobj_desc.point); - if (ret) - break; - - if (syncobj_desc.flags & MSM_SUBMIT_SYNCOBJ_RESET) { - syncobjs[i] = - drm_syncobj_find(file, syncobj_desc.handle); - if (!syncobjs[i]) { - ret = -EINVAL; - break; - } - } - } - - if (ret) { - for (j = 0; j <= i; ++j) { - if (syncobjs[j]) - drm_syncobj_put(syncobjs[j]); - } - kfree(syncobjs); - return ERR_PTR(ret); - } - return syncobjs; -} - -static void msm_reset_syncobjs(struct drm_syncobj **syncobjs, - uint32_t nr_syncobjs) -{ - uint32_t i; - - for (i = 0; syncobjs && i < nr_syncobjs; ++i) { - if (syncobjs[i]) - drm_syncobj_replace_fence(syncobjs[i], NULL); - } -} - -static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev, - struct drm_file *file, - uint64_t syncobjs_addr, - uint32_t nr_syncobjs, - size_t syncobj_stride) -{ - struct msm_submit_post_dep *post_deps; - struct drm_msm_gem_submit_syncobj syncobj_desc = {0}; - int ret = 0; - uint32_t i, j; - - post_deps = kcalloc(nr_syncobjs, sizeof(*post_deps), - GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); - if (!post_deps) - return ERR_PTR(-ENOMEM); - - for (i = 0; i < nr_syncobjs; ++i) { - uint64_t address = syncobjs_addr + i * syncobj_stride; - - if (copy_from_user(&syncobj_desc, - u64_to_user_ptr(address), - min(syncobj_stride, sizeof(syncobj_desc)))) { - ret = -EFAULT; - break; - } - - post_deps[i].point = syncobj_desc.point; - - if (syncobj_desc.flags) { - ret = -EINVAL; - break; - } - - if (syncobj_desc.point) { - if (!drm_core_check_feature(dev, - DRIVER_SYNCOBJ_TIMELINE)) { - ret = -EOPNOTSUPP; - break; - } - - post_deps[i].chain = dma_fence_chain_alloc(); - if (!post_deps[i].chain) { - ret = -ENOMEM; - break; - } - } - - post_deps[i].syncobj = - drm_syncobj_find(file, syncobj_desc.handle); - if (!post_deps[i].syncobj) { - ret = -EINVAL; - break; - } - } - - if (ret) { - for (j = 0; j <= i; ++j) { - dma_fence_chain_free(post_deps[j].chain); - if (post_deps[j].syncobj) - drm_syncobj_put(post_deps[j].syncobj); - } - - kfree(post_deps); - return ERR_PTR(ret); - } - - return post_deps; -} - -static void msm_process_post_deps(struct msm_submit_post_dep *post_deps, - uint32_t count, struct dma_fence *fence) -{ - uint32_t i; - - for (i = 0; post_deps && i < count; ++i) { - if (post_deps[i].chain) { - drm_syncobj_add_point(post_deps[i].syncobj, - post_deps[i].chain, - fence, post_deps[i].point); - post_deps[i].chain = NULL; - } else { - drm_syncobj_replace_fence(post_deps[i].syncobj, - fence); - } - } -} - int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file) { struct msm_drm_private *priv = dev->dev_private; struct drm_msm_gem_submit *args = data; - struct msm_file_private *ctx = file->driver_priv; + struct msm_context *ctx = file->driver_priv; struct msm_gem_submit *submit = NULL; struct msm_gpu *gpu = priv->gpu; struct msm_gpu_submitqueue *queue; struct msm_ringbuffer *ring; - struct msm_submit_post_dep *post_deps = NULL; + struct msm_syncobj_post_dep *post_deps = NULL; struct drm_syncobj **syncobjs_to_reset = NULL; + struct sync_file *sync_file = NULL; + unsigned cmds_to_parse; int out_fence_fd = -1; - bool has_ww_ticket = false; unsigned i; int ret; @@ -750,19 +570,17 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (args->pad) return -EINVAL; - if (unlikely(!ctx->aspace) && !capable(CAP_SYS_RAWIO)) { - DRM_ERROR_RATELIMITED("IOMMU support or CAP_SYS_RAWIO required!\n"); - return -EPERM; - } + if (to_msm_vm(ctx->vm)->unusable) + return UERR(EPIPE, dev, "context is unusable"); /* for now, we just have 3d pipe.. eventually this would need to * be more clever to dispatch to appropriate gpu module: */ if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0) - return -EINVAL; + return UERR(EINVAL, dev, "invalid pipe"); if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS) - return -EINVAL; + return UERR(EINVAL, dev, "invalid flags"); if (args->flags & MSM_SUBMIT_SUDO) { if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) || @@ -774,6 +592,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (!queue) return -ENOENT; + if (queue->flags & MSM_SUBMITQUEUE_VM_BIND) { + ret = UERR(EINVAL, dev, "Invalid queue type"); + goto out_post_unlock; + } + ring = gpu->rb[queue->ring_nr]; if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) { @@ -784,7 +607,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, } } - submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds); + submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds, + file->client_id); if (IS_ERR(submit)) { ret = PTR_ERR(submit); goto out_post_unlock; @@ -806,7 +630,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, in_fence = sync_file_get_fence(args->fence_fd); if (!in_fence) { - ret = -EINVAL; + ret = UERR(EINVAL, dev, "invalid in-fence"); goto out_unlock; } @@ -816,10 +640,10 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, } if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) { - syncobjs_to_reset = msm_parse_deps(submit, file, - args->in_syncobjs, - args->nr_in_syncobjs, - args->syncobj_stride); + syncobjs_to_reset = msm_syncobj_parse_deps(dev, &submit->base, + file, args->in_syncobjs, + args->nr_in_syncobjs, + args->syncobj_stride); if (IS_ERR(syncobjs_to_reset)) { ret = PTR_ERR(syncobjs_to_reset); goto out_unlock; @@ -827,10 +651,10 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, } if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) { - post_deps = msm_parse_post_deps(dev, file, - args->out_syncobjs, - args->nr_out_syncobjs, - args->syncobj_stride); + post_deps = msm_syncobj_parse_post_deps(dev, file, + args->out_syncobjs, + args->nr_out_syncobjs, + args->syncobj_stride); if (IS_ERR(post_deps)) { ret = PTR_ERR(post_deps); goto out_unlock; @@ -846,49 +670,54 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, goto out; /* copy_*_user while holding a ww ticket upsets lockdep */ - ww_acquire_init(&submit->ticket, &reservation_ww_class); - has_ww_ticket = true; ret = submit_lock_objects(submit); if (ret) goto out; - ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT)); - if (ret) - goto out; + if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) { + ret = submit_fence_sync(submit); + if (ret) + goto out; + } ret = submit_pin_objects(submit); if (ret) goto out; - for (i = 0; i < args->nr_cmds; i++) { - struct msm_gem_object *msm_obj; + cmds_to_parse = msm_context_is_vmbind(ctx) ? 0 : args->nr_cmds; + + for (i = 0; i < cmds_to_parse; i++) { + struct drm_gem_object *obj; uint64_t iova; - ret = submit_bo(submit, submit->cmd[i].idx, - &msm_obj, &iova, NULL); + ret = submit_bo(submit, submit->cmd[i].idx, &obj, &iova); if (ret) goto out; if (!submit->cmd[i].size || - ((submit->cmd[i].size + submit->cmd[i].offset) > - msm_obj->base.size / 4)) { - DRM_ERROR("invalid cmdstream size: %u\n", submit->cmd[i].size * 4); - ret = -EINVAL; + (size_add(submit->cmd[i].size, submit->cmd[i].offset) > obj->size / 4)) { + ret = UERR(EINVAL, dev, "invalid cmdstream size: %u\n", + submit->cmd[i].size * 4); goto out; } submit->cmd[i].iova = iova + (submit->cmd[i].offset * 4); - if (submit->valid) + if (likely(!submit->cmd[i].nr_relocs)) continue; - ret = submit_reloc(submit, msm_obj, submit->cmd[i].offset * 4, + if (!gpu->allow_relocs) { + ret = UERR(EINVAL, dev, "relocs not allowed\n"); + goto out; + } + + ret = submit_reloc(submit, obj, submit->cmd[i].offset * 4, submit->cmd[i].nr_relocs, submit->cmd[i].relocs); if (ret) goto out; } - submit->nr_cmds = i; + submit->nr_cmds = args->nr_cmds; idr_preload(GFP_KERNEL); @@ -904,7 +733,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, (!args->fence || idr_find(&queue->fence_idr, args->fence))) { spin_unlock(&queue->idr_lock); idr_preload_end(); - ret = -EINVAL; + ret = UERR(EINVAL, dev, "invalid in-fence-sn"); goto out; } @@ -946,17 +775,28 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, } if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) { - struct sync_file *sync_file = sync_file_create(submit->user_fence); - if (!sync_file) { + sync_file = sync_file_create(submit->user_fence); + if (!sync_file) ret = -ENOMEM; - } else { - fd_install(out_fence_fd, sync_file->file); - args->fence_fd = out_fence_fd; - } } + if (ret) + goto out; + submit_attach_object_fences(submit); + if (msm_context_is_vmbind(ctx)) { + /* + * If we are not using VM_BIND, submit_pin_vmas() will validate + * just the BOs attached to the submit. In that case we don't + * need to validate the _entire_ vm, because userspace tracked + * what BOs are associated with the submit. + */ + ret = drm_gpuvm_validate(submit->vm, &submit->exec); + if (ret) + goto out; + } + /* The scheduler owns a ref now: */ msm_gem_submit_get(submit); @@ -967,20 +807,23 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, args->fence = submit->fence_id; queue->last_fence = submit->fence_id; - msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs); - msm_process_post_deps(post_deps, args->nr_out_syncobjs, - submit->user_fence); - + msm_syncobj_reset(syncobjs_to_reset, args->nr_in_syncobjs); + msm_syncobj_process_post_deps(post_deps, args->nr_out_syncobjs, submit->user_fence); out: submit_cleanup(submit, !!ret); - if (has_ww_ticket) - ww_acquire_fini(&submit->ticket); out_unlock: mutex_unlock(&queue->lock); out_post_unlock: - if (ret && (out_fence_fd >= 0)) - put_unused_fd(out_fence_fd); + if (ret) { + if (out_fence_fd >= 0) + put_unused_fd(out_fence_fd); + if (sync_file) + fput(sync_file->file); + } else if (sync_file) { + fd_install(out_fence_fd, sync_file->file); + args->fence_fd = out_fence_fd; + } if (!IS_ERR_OR_NULL(submit)) { msm_gem_submit_put(submit); |
