diff options
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem_submit.c')
| -rw-r--r-- | drivers/gpu/drm/msm/msm_gem_submit.c | 852 |
1 files changed, 547 insertions, 305 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 12b983fc0b56..75d9f3574370 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -1,85 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/dma-fence-unwrap.h> +#include <linux/file.h> #include <linux/sync_file.h> +#include <linux/uaccess.h> + +#include <drm/drm_drv.h> +#include <drm/drm_file.h> +#include <drm/drm_syncobj.h> #include "msm_drv.h" #include "msm_gpu.h" #include "msm_gem.h" #include "msm_gpu_trace.h" +#include "msm_syncobj.h" + +/* For userspace errors, use DRM_UT_DRIVER.. so that userspace can enable + * error msgs for debugging, but we don't spam dmesg by default + */ +#define SUBMIT_ERROR(err, submit, fmt, ...) \ + UERR(err, (submit)->dev, fmt, ##__VA_ARGS__) /* * Cmdstream submission: */ -/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */ -#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */ -#define BO_LOCKED 0x4000 -#define BO_PINNED 0x2000 - static struct msm_gem_submit *submit_create(struct drm_device *dev, - struct msm_gpu *gpu, struct msm_gpu_submitqueue *queue, - uint32_t nr_bos, uint32_t nr_cmds) + struct msm_gpu *gpu, + struct msm_gpu_submitqueue *queue, uint32_t nr_bos, + uint32_t nr_cmds, u64 drm_client_id) { + static atomic_t ident = ATOMIC_INIT(0); struct msm_gem_submit *submit; - uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) + - ((u64)nr_cmds * sizeof(submit->cmd[0])); + uint64_t sz; + int ret; + + sz = struct_size(submit, bos, nr_bos) + + ((u64)nr_cmds * sizeof(submit->cmd[0])); if (sz > SIZE_MAX) - return NULL; + return ERR_PTR(-ENOMEM); - submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); + submit = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN); if (!submit) - return NULL; + return ERR_PTR(-ENOMEM); + + submit->hw_fence = msm_fence_alloc(); + if (IS_ERR(submit->hw_fence)) { + ret = PTR_ERR(submit->hw_fence); + kfree(submit); + return ERR_PTR(ret); + } + + ret = drm_sched_job_init(&submit->base, queue->entity, 1, queue, + drm_client_id); + if (ret) { + kfree(submit->hw_fence); + kfree(submit); + return ERR_PTR(ret); + } + kref_init(&submit->ref); submit->dev = dev; + submit->vm = msm_context_vm(dev, queue->ctx); submit->gpu = gpu; - submit->fence = NULL; submit->cmd = (void *)&submit->bos[nr_bos]; submit->queue = queue; - submit->ring = gpu->rb[queue->prio]; + submit->pid = get_pid(task_pid(current)); + submit->ring = gpu->rb[queue->ring_nr]; + submit->fault_dumped = false; - /* initially, until copy_from_user() and bo lookup succeeds: */ - submit->nr_bos = 0; - submit->nr_cmds = 0; + /* Get a unique identifier for the submission for logging purposes */ + submit->ident = atomic_inc_return(&ident) - 1; INIT_LIST_HEAD(&submit->node); - INIT_LIST_HEAD(&submit->bo_list); - ww_acquire_init(&submit->ticket, &reservation_ww_class); return submit; } -void msm_gem_submit_free(struct msm_gem_submit *submit) +void __msm_gem_submit_destroy(struct kref *kref) { - dma_fence_put(submit->fence); - list_del(&submit->node); + struct msm_gem_submit *submit = + container_of(kref, struct msm_gem_submit, ref); + unsigned i; + + /* + * In error paths, we could unref the submit without calling + * drm_sched_entity_push_job(), so msm_job_free() will never + * get called. Since drm_sched_job_cleanup() will NULL out + * s_fence, we can use that to detect this case. + */ + if (submit->base.s_fence) + drm_sched_job_cleanup(&submit->base); + + if (submit->fence_id) { + spin_lock(&submit->queue->idr_lock); + idr_remove(&submit->queue->fence_idr, submit->fence_id); + spin_unlock(&submit->queue->idr_lock); + } + + dma_fence_put(submit->user_fence); + + /* + * If the submit is freed before msm_job_run(), then hw_fence is + * just some pre-allocated memory, not a reference counted fence. + * Once the job runs and the hw_fence is initialized, it will + * have a refcount of at least one, since the submit holds a ref + * to the hw_fence. + */ + if (kref_read(&submit->hw_fence->refcount) == 0) { + kfree(submit->hw_fence); + } else { + dma_fence_put(submit->hw_fence); + } + put_pid(submit->pid); msm_submitqueue_put(submit->queue); - kfree(submit); -} + for (i = 0; i < submit->nr_cmds; i++) + kfree(submit->cmd[i].relocs); -static inline unsigned long __must_check -copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) -{ - if (access_ok(from, n)) - return __copy_from_user_inatomic(to, from, n); - return -EFAULT; + kfree(submit); } static int submit_lookup_objects(struct msm_gem_submit *submit, @@ -88,13 +133,8 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, unsigned i; int ret = 0; - spin_lock(&file->table_lock); - pagefault_disable(); - for (i = 0; i < args->nr_bos; i++) { struct drm_msm_gem_submit_bo submit_bo; - struct drm_gem_object *obj; - struct msm_gem_object *msm_obj; void __user *userptr = u64_to_user_ptr(args->bos + (i * sizeof(submit_bo))); @@ -103,15 +143,10 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, */ submit->bos[i].flags = 0; - if (copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo))) { - pagefault_enable(); - spin_unlock(&file->table_lock); - if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) { - ret = -EFAULT; - goto out; - } - spin_lock(&file->table_lock); - pagefault_disable(); + if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) { + ret = -EFAULT; + i = 0; + goto out; } /* at least one of READ and/or WRITE flags should be set: */ @@ -119,43 +154,35 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) || !(submit_bo.flags & MANDATORY_FLAGS)) { - DRM_ERROR("invalid flags: %x\n", submit_bo.flags); - ret = -EINVAL; - goto out_unlock; + ret = SUBMIT_ERROR(EINVAL, submit, "invalid flags: %x\n", submit_bo.flags); + i = 0; + goto out; } + submit->bos[i].handle = submit_bo.handle; submit->bos[i].flags = submit_bo.flags; - /* in validate_objects() we figure out if this is true: */ - submit->bos[i].iova = submit_bo.presumed; + } + + spin_lock(&file->table_lock); + + for (i = 0; i < args->nr_bos; i++) { + struct drm_gem_object *obj; /* normally use drm_gem_object_lookup(), but for bulk lookup * all under single table_lock just hit object_idr directly: */ - obj = idr_find(&file->object_idr, submit_bo.handle); + obj = idr_find(&file->object_idr, submit->bos[i].handle); if (!obj) { - DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i); - ret = -EINVAL; - goto out_unlock; - } - - msm_obj = to_msm_bo(obj); - - if (!list_empty(&msm_obj->submit_entry)) { - DRM_ERROR("handle %u at index %u already on submit list\n", - submit_bo.handle, i); - ret = -EINVAL; + ret = SUBMIT_ERROR(EINVAL, submit, "invalid handle %u at index %u\n", submit->bos[i].handle, i); goto out_unlock; } drm_gem_object_get(obj); - submit->bos[i].obj = msm_obj; - - list_add_tail(&msm_obj->submit_entry, &submit->bo_list); + submit->bos[i].obj = obj; } out_unlock: - pagefault_enable(); spin_unlock(&file->table_lock); out: @@ -164,97 +191,156 @@ out: return ret; } -static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, - int i, bool backoff) +static int submit_lookup_cmds(struct msm_gem_submit *submit, + struct drm_msm_gem_submit *args, struct drm_file *file) { - struct msm_gem_object *msm_obj = submit->bos[i].obj; + struct msm_context *ctx = file->driver_priv; + unsigned i; + size_t sz; + int ret = 0; - if (submit->bos[i].flags & BO_PINNED) - msm_gem_unpin_iova(&msm_obj->base, submit->gpu->aspace); + for (i = 0; i < args->nr_cmds; i++) { + struct drm_msm_gem_submit_cmd submit_cmd; + void __user *userptr = + u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd))); - if (submit->bos[i].flags & BO_LOCKED) - ww_mutex_unlock(&msm_obj->resv->lock); + ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd)); + if (ret) { + ret = -EFAULT; + goto out; + } - if (backoff && !(submit->bos[i].flags & BO_VALID)) - submit->bos[i].iova = 0; + /* validate input from userspace: */ + switch (submit_cmd.type) { + case MSM_SUBMIT_CMD_BUF: + case MSM_SUBMIT_CMD_IB_TARGET_BUF: + case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: + break; + default: + return SUBMIT_ERROR(EINVAL, submit, "invalid type: %08x\n", submit_cmd.type); + } - submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED); -} + if (submit_cmd.size % 4) { + ret = SUBMIT_ERROR(EINVAL, submit, "non-aligned cmdstream buffer size: %u\n", + submit_cmd.size); + goto out; + } -/* This is where we make sure all the bo's are reserved and pin'd: */ -static int submit_lock_objects(struct msm_gem_submit *submit) -{ - int contended, slow_locked = -1, i, ret = 0; + if (msm_context_is_vmbind(ctx)) { + if (submit_cmd.nr_relocs) { + ret = SUBMIT_ERROR(EINVAL, submit, "nr_relocs must be zero"); + goto out; + } -retry: - for (i = 0; i < submit->nr_bos; i++) { - struct msm_gem_object *msm_obj = submit->bos[i].obj; + if (submit_cmd.submit_idx || submit_cmd.submit_offset) { + ret = SUBMIT_ERROR(EINVAL, submit, "submit_idx/offset must be zero"); + goto out; + } - if (slow_locked == i) - slow_locked = -1; + submit->cmd[i].iova = submit_cmd.iova; + } + + submit->cmd[i].type = submit_cmd.type; + submit->cmd[i].size = submit_cmd.size / 4; + submit->cmd[i].offset = submit_cmd.submit_offset / 4; + submit->cmd[i].idx = submit_cmd.submit_idx; + submit->cmd[i].nr_relocs = submit_cmd.nr_relocs; - contended = i; + userptr = u64_to_user_ptr(submit_cmd.relocs); - if (!(submit->bos[i].flags & BO_LOCKED)) { - ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock, - &submit->ticket); - if (ret) - goto fail; - submit->bos[i].flags |= BO_LOCKED; + sz = array_size(submit_cmd.nr_relocs, + sizeof(struct drm_msm_gem_submit_reloc)); + /* check for overflow: */ + if (sz == SIZE_MAX) { + ret = -ENOMEM; + goto out; + } + submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN); + if (!submit->cmd[i].relocs) { + ret = -ENOMEM; + goto out; + } + ret = copy_from_user(submit->cmd[i].relocs, userptr, sz); + if (ret) { + ret = -EFAULT; + goto out; } } - ww_acquire_done(&submit->ticket); +out: + return ret; +} - return 0; +static int submit_lock_objects_vmbind(struct msm_gem_submit *submit) +{ + unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES; + struct drm_exec *exec = &submit->exec; + int ret = 0; + + drm_exec_init(&submit->exec, flags, submit->nr_bos); + + drm_exec_until_all_locked (&submit->exec) { + ret = drm_gpuvm_prepare_vm(submit->vm, exec, 1); + drm_exec_retry_on_contention(exec); + if (ret) + break; + + ret = drm_gpuvm_prepare_objects(submit->vm, exec, 1); + drm_exec_retry_on_contention(exec); + if (ret) + break; + } + + return ret; +} + +/* This is where we make sure all the bo's are reserved and pin'd: */ +static int submit_lock_objects(struct msm_gem_submit *submit) +{ + unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT; + int ret = 0; + + if (msm_context_is_vmbind(submit->queue->ctx)) + return submit_lock_objects_vmbind(submit); -fail: - for (; i >= 0; i--) - submit_unlock_unpin_bo(submit, i, true); - - if (slow_locked > 0) - submit_unlock_unpin_bo(submit, slow_locked, true); - - if (ret == -EDEADLK) { - struct msm_gem_object *msm_obj = submit->bos[contended].obj; - /* we lost out in a seqno race, lock and retry.. */ - ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock, - &submit->ticket); - if (!ret) { - submit->bos[contended].flags |= BO_LOCKED; - slow_locked = contended; - goto retry; + drm_exec_init(&submit->exec, flags, submit->nr_bos); + + drm_exec_until_all_locked (&submit->exec) { + ret = drm_exec_lock_obj(&submit->exec, + drm_gpuvm_resv_obj(submit->vm)); + drm_exec_retry_on_contention(&submit->exec); + if (ret) + break; + for (unsigned i = 0; i < submit->nr_bos; i++) { + struct drm_gem_object *obj = submit->bos[i].obj; + ret = drm_exec_prepare_obj(&submit->exec, obj, 1); + drm_exec_retry_on_contention(&submit->exec); + if (ret) + break; } } return ret; } -static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit) +static int submit_fence_sync(struct msm_gem_submit *submit) { int i, ret = 0; for (i = 0; i < submit->nr_bos; i++) { - struct msm_gem_object *msm_obj = submit->bos[i].obj; + struct drm_gem_object *obj = submit->bos[i].obj; bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE; - if (!write) { - /* NOTE: _reserve_shared() must happen before - * _add_shared_fence(), which makes this a slightly - * strange place to call it. OTOH this is a - * convenient can-fail point to hook it in. - */ - ret = reservation_object_reserve_shared(msm_obj->resv, - 1); - if (ret) - return ret; - } - - if (no_implicit) + /* Otherwise userspace can ask for implicit sync to be + * disabled on specific buffers. This is useful for internal + * usermode driver managed buffers, suballocation, etc. + */ + if (submit->bos[i].flags & MSM_SUBMIT_BO_NO_IMPLICIT) continue; - ret = msm_gem_sync_object(&msm_obj->base, submit->ring->fctx, - write); + ret = drm_sched_job_add_implicit_dependencies(&submit->base, + obj, + write); if (ret) break; } @@ -264,75 +350,121 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit) static int submit_pin_objects(struct msm_gem_submit *submit) { + struct msm_drm_private *priv = submit->dev->dev_private; int i, ret = 0; - submit->valid = true; - for (i = 0; i < submit->nr_bos; i++) { - struct msm_gem_object *msm_obj = submit->bos[i].obj; - uint64_t iova; + struct drm_gem_object *obj = submit->bos[i].obj; + struct drm_gpuva *vma; /* if locking succeeded, pin bo: */ - ret = msm_gem_get_and_pin_iova(&msm_obj->base, - submit->gpu->aspace, &iova); + vma = msm_gem_get_vma_locked(obj, submit->vm); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + break; + } + ret = msm_gem_pin_vma_locked(obj, vma); if (ret) break; - submit->bos[i].flags |= BO_PINNED; + submit->bos[i].vm_bo = drm_gpuvm_bo_get(vma->vm_bo); + submit->bos[i].iova = vma->va.addr; + } - if (iova == submit->bos[i].iova) { - submit->bos[i].flags |= BO_VALID; - } else { - submit->bos[i].iova = iova; - /* iova changed, so address in cmdstream is not valid: */ - submit->bos[i].flags &= ~BO_VALID; - submit->valid = false; - } + /* + * A second loop while holding the LRU lock (a) avoids acquiring/dropping + * the LRU lock for each individual bo, while (b) avoiding holding the + * LRU lock while calling msm_gem_pin_vma_locked() (which could trigger + * get_pages() which could trigger reclaim.. and if we held the LRU lock + * could trigger deadlock with the shrinker). + */ + mutex_lock(&priv->lru.lock); + for (i = 0; i < submit->nr_bos; i++) { + msm_gem_pin_obj_locked(submit->bos[i].obj); } + mutex_unlock(&priv->lru.lock); + + submit->bos_pinned = true; return ret; } +static void submit_unpin_objects(struct msm_gem_submit *submit) +{ + if (!submit->bos_pinned) + return; + + for (int i = 0; i < submit->nr_bos; i++) { + struct drm_gem_object *obj = submit->bos[i].obj; + + msm_gem_unpin_locked(obj); + } + + submit->bos_pinned = false; +} + +static void submit_attach_object_fences(struct msm_gem_submit *submit) +{ + struct msm_gem_vm *vm = to_msm_vm(submit->vm); + struct dma_fence *last_fence; + + if (msm_context_is_vmbind(submit->queue->ctx)) { + drm_gpuvm_resv_add_fence(submit->vm, &submit->exec, + submit->user_fence, + DMA_RESV_USAGE_BOOKKEEP, + DMA_RESV_USAGE_BOOKKEEP); + + last_fence = vm->last_fence; + vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence); + dma_fence_put(last_fence); + + return; + } + + for (unsigned i = 0; i < submit->nr_bos; i++) { + struct drm_gem_object *obj = submit->bos[i].obj; + + if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) + dma_resv_add_fence(obj->resv, submit->user_fence, + DMA_RESV_USAGE_WRITE); + else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ) + dma_resv_add_fence(obj->resv, submit->user_fence, + DMA_RESV_USAGE_READ); + } +} + static int submit_bo(struct msm_gem_submit *submit, uint32_t idx, - struct msm_gem_object **obj, uint64_t *iova, bool *valid) + struct drm_gem_object **obj, uint64_t *iova) { if (idx >= submit->nr_bos) { - DRM_ERROR("invalid buffer index: %u (out of %u)\n", - idx, submit->nr_bos); - return -EINVAL; + return SUBMIT_ERROR(EINVAL, submit, "invalid buffer index: %u (out of %u)\n", + idx, submit->nr_bos); } if (obj) *obj = submit->bos[idx].obj; if (iova) *iova = submit->bos[idx].iova; - if (valid) - *valid = !!(submit->bos[idx].flags & BO_VALID); return 0; } /* process the reloc's and patch up the cmdstream as needed: */ -static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj, - uint32_t offset, uint32_t nr_relocs, uint64_t relocs) +static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *obj, + uint32_t offset, uint32_t nr_relocs, struct drm_msm_gem_submit_reloc *relocs) { uint32_t i, last_offset = 0; uint32_t *ptr; int ret = 0; - if (!nr_relocs) - return 0; - - if (offset % 4) { - DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset); - return -EINVAL; - } + if (offset % 4) + return SUBMIT_ERROR(EINVAL, submit, "non-aligned cmdstream buffer: %u\n", offset); /* For now, just map the entire thing. Eventually we probably * to do it page-by-page, w/ kmap() if not vmap()d.. */ - ptr = msm_gem_get_vaddr(&obj->base); + ptr = msm_gem_get_vaddr_locked(obj); if (IS_ERR(ptr)) { ret = PTR_ERR(ptr); @@ -341,42 +473,29 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob } for (i = 0; i < nr_relocs; i++) { - struct drm_msm_gem_submit_reloc submit_reloc; - void __user *userptr = - u64_to_user_ptr(relocs + (i * sizeof(submit_reloc))); + struct drm_msm_gem_submit_reloc submit_reloc = relocs[i]; uint32_t off; uint64_t iova; - bool valid; - - if (copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc))) { - ret = -EFAULT; - goto out; - } if (submit_reloc.submit_offset % 4) { - DRM_ERROR("non-aligned reloc offset: %u\n", - submit_reloc.submit_offset); - ret = -EINVAL; + ret = SUBMIT_ERROR(EINVAL, submit, "non-aligned reloc offset: %u\n", + submit_reloc.submit_offset); goto out; } /* offset in dwords: */ off = submit_reloc.submit_offset / 4; - if ((off >= (obj->base.size / 4)) || + if ((off >= (obj->size / 4)) || (off < last_offset)) { - DRM_ERROR("invalid offset %u at reloc %u\n", off, i); - ret = -EINVAL; + ret = SUBMIT_ERROR(EINVAL, submit, "invalid offset %u at reloc %u\n", off, i); goto out; } - ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid); + ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova); if (ret) goto out; - if (valid) - continue; - iova += submit_reloc.reloc_offset; if (submit_reloc.shift < 0) @@ -390,52 +509,78 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob } out: - msm_gem_put_vaddr(&obj->base); + msm_gem_put_vaddr_locked(obj); return ret; } -static void submit_cleanup(struct msm_gem_submit *submit) +/* Cleanup submit at end of ioctl. In the error case, this also drops + * references, unpins, and drops active refcnt. In the non-error case, + * this is done when the submit is retired. + */ +static void submit_cleanup(struct msm_gem_submit *submit, bool error) { - unsigned i; + if (error) + submit_unpin_objects(submit); + + if (submit->exec.objects) + drm_exec_fini(&submit->exec); + + /* if job wasn't enqueued to scheduler, early retirement: */ + if (error) + msm_submit_retire(submit); +} + +void msm_submit_retire(struct msm_gem_submit *submit) +{ + int i; for (i = 0; i < submit->nr_bos; i++) { - struct msm_gem_object *msm_obj = submit->bos[i].obj; - submit_unlock_unpin_bo(submit, i, false); - list_del_init(&msm_obj->submit_entry); - drm_gem_object_put(&msm_obj->base); - } + struct drm_gem_object *obj = submit->bos[i].obj; + struct drm_gpuvm_bo *vm_bo = submit->bos[i].vm_bo; - ww_acquire_fini(&submit->ticket); + msm_gem_lock(obj); + drm_gpuvm_bo_put(vm_bo); + msm_gem_unlock(obj); + drm_gem_object_put(obj); + } } int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file) { - static atomic_t ident = ATOMIC_INIT(0); struct msm_drm_private *priv = dev->dev_private; struct drm_msm_gem_submit *args = data; - struct msm_file_private *ctx = file->driver_priv; - struct msm_gem_submit *submit; + struct msm_context *ctx = file->driver_priv; + struct msm_gem_submit *submit = NULL; struct msm_gpu *gpu = priv->gpu; - struct sync_file *sync_file = NULL; struct msm_gpu_submitqueue *queue; struct msm_ringbuffer *ring; + struct msm_syncobj_post_dep *post_deps = NULL; + struct drm_syncobj **syncobjs_to_reset = NULL; + struct sync_file *sync_file = NULL; + unsigned cmds_to_parse; int out_fence_fd = -1; - struct pid *pid = get_pid(task_pid(current)); unsigned i; - int ret, submitid; + int ret; + if (!gpu) return -ENXIO; + if (args->pad) + return -EINVAL; + + if (to_msm_vm(ctx->vm)->unusable) + return UERR(EPIPE, dev, "context is unusable"); + /* for now, we just have 3d pipe.. eventually this would need to * be more clever to dispatch to appropriate gpu module: */ if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0) - return -EINVAL; + return UERR(EINVAL, dev, "invalid pipe"); if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS) - return -EINVAL; + return UERR(EINVAL, dev, "invalid flags"); if (args->flags & MSM_SUBMIT_SUDO) { if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) || @@ -447,166 +592,263 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (!queue) return -ENOENT; - /* Get a unique identifier for the submission for logging purposes */ - submitid = atomic_inc_return(&ident) - 1; + if (queue->flags & MSM_SUBMITQUEUE_VM_BIND) { + ret = UERR(EINVAL, dev, "Invalid queue type"); + goto out_post_unlock; + } + + ring = gpu->rb[queue->ring_nr]; + + if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) { + out_fence_fd = get_unused_fd_flags(O_CLOEXEC); + if (out_fence_fd < 0) { + ret = out_fence_fd; + goto out_post_unlock; + } + } + + submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds, + file->client_id); + if (IS_ERR(submit)) { + ret = PTR_ERR(submit); + goto out_post_unlock; + } - ring = gpu->rb[queue->prio]; - trace_msm_gpu_submit(pid_nr(pid), ring->id, submitid, + trace_msm_gpu_submit(pid_nr(submit->pid), ring->id, submit->ident, args->nr_bos, args->nr_cmds); + ret = mutex_lock_interruptible(&queue->lock); + if (ret) + goto out_post_unlock; + + if (args->flags & MSM_SUBMIT_SUDO) + submit->in_rb = true; + if (args->flags & MSM_SUBMIT_FENCE_FD_IN) { struct dma_fence *in_fence; in_fence = sync_file_get_fence(args->fence_fd); - if (!in_fence) - return -EINVAL; - - /* - * Wait if the fence is from a foreign context, or if the fence - * array contains any fence from a foreign context. - */ - ret = 0; - if (!dma_fence_match_context(in_fence, ring->fctx->context)) - ret = dma_fence_wait(in_fence, true); + if (!in_fence) { + ret = UERR(EINVAL, dev, "invalid in-fence"); + goto out_unlock; + } - dma_fence_put(in_fence); + ret = drm_sched_job_add_dependency(&submit->base, in_fence); if (ret) - return ret; + goto out_unlock; } - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - - if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) { - out_fence_fd = get_unused_fd_flags(O_CLOEXEC); - if (out_fence_fd < 0) { - ret = out_fence_fd; + if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) { + syncobjs_to_reset = msm_syncobj_parse_deps(dev, &submit->base, + file, args->in_syncobjs, + args->nr_in_syncobjs, + args->syncobj_stride); + if (IS_ERR(syncobjs_to_reset)) { + ret = PTR_ERR(syncobjs_to_reset); goto out_unlock; } } - submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds); - if (!submit) { - ret = -ENOMEM; - goto out_unlock; + if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) { + post_deps = msm_syncobj_parse_post_deps(dev, file, + args->out_syncobjs, + args->nr_out_syncobjs, + args->syncobj_stride); + if (IS_ERR(post_deps)) { + ret = PTR_ERR(post_deps); + goto out_unlock; + } } - submit->pid = pid; - submit->ident = submitid; - - if (args->flags & MSM_SUBMIT_SUDO) - submit->in_rb = true; - ret = submit_lookup_objects(submit, args, file); if (ret) goto out; - ret = submit_lock_objects(submit); + ret = submit_lookup_cmds(submit, args, file); if (ret) goto out; - ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT)); + /* copy_*_user while holding a ww ticket upsets lockdep */ + ret = submit_lock_objects(submit); if (ret) goto out; + if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) { + ret = submit_fence_sync(submit); + if (ret) + goto out; + } + ret = submit_pin_objects(submit); if (ret) goto out; - for (i = 0; i < args->nr_cmds; i++) { - struct drm_msm_gem_submit_cmd submit_cmd; - void __user *userptr = - u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd))); - struct msm_gem_object *msm_obj; + cmds_to_parse = msm_context_is_vmbind(ctx) ? 0 : args->nr_cmds; + + for (i = 0; i < cmds_to_parse; i++) { + struct drm_gem_object *obj; uint64_t iova; - ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd)); - if (ret) { - ret = -EFAULT; + ret = submit_bo(submit, submit->cmd[i].idx, &obj, &iova); + if (ret) goto out; - } - /* validate input from userspace: */ - switch (submit_cmd.type) { - case MSM_SUBMIT_CMD_BUF: - case MSM_SUBMIT_CMD_IB_TARGET_BUF: - case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - break; - default: - DRM_ERROR("invalid type: %08x\n", submit_cmd.type); - ret = -EINVAL; + if (!submit->cmd[i].size || + (size_add(submit->cmd[i].size, submit->cmd[i].offset) > obj->size / 4)) { + ret = UERR(EINVAL, dev, "invalid cmdstream size: %u\n", + submit->cmd[i].size * 4); goto out; } - ret = submit_bo(submit, submit_cmd.submit_idx, - &msm_obj, &iova, NULL); - if (ret) - goto out; + submit->cmd[i].iova = iova + (submit->cmd[i].offset * 4); - if (submit_cmd.size % 4) { - DRM_ERROR("non-aligned cmdstream buffer size: %u\n", - submit_cmd.size); - ret = -EINVAL; - goto out; - } + if (likely(!submit->cmd[i].nr_relocs)) + continue; - if (!submit_cmd.size || - ((submit_cmd.size + submit_cmd.submit_offset) > - msm_obj->base.size)) { - DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size); - ret = -EINVAL; + if (!gpu->allow_relocs) { + ret = UERR(EINVAL, dev, "relocs not allowed\n"); goto out; } - submit->cmd[i].type = submit_cmd.type; - submit->cmd[i].size = submit_cmd.size / 4; - submit->cmd[i].iova = iova + submit_cmd.submit_offset; - submit->cmd[i].idx = submit_cmd.submit_idx; - - if (submit->valid) - continue; - - ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset, - submit_cmd.nr_relocs, submit_cmd.relocs); + ret = submit_reloc(submit, obj, submit->cmd[i].offset * 4, + submit->cmd[i].nr_relocs, submit->cmd[i].relocs); if (ret) goto out; } - submit->nr_cmds = i; + submit->nr_cmds = args->nr_cmds; + + idr_preload(GFP_KERNEL); + + spin_lock(&queue->idr_lock); - submit->fence = msm_fence_alloc(ring->fctx); - if (IS_ERR(submit->fence)) { - ret = PTR_ERR(submit->fence); - submit->fence = NULL; + /* + * If using userspace provided seqno fence, validate that the id + * is available before arming sched job. Since access to fence_idr + * is serialized on the queue lock, the slot should be still avail + * after the job is armed + */ + if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) && + (!args->fence || idr_find(&queue->fence_idr, args->fence))) { + spin_unlock(&queue->idr_lock); + idr_preload_end(); + ret = UERR(EINVAL, dev, "invalid in-fence-sn"); goto out; } - if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) { - sync_file = sync_file_create(submit->fence); - if (!sync_file) { + drm_sched_job_arm(&submit->base); + + submit->user_fence = dma_fence_get(&submit->base.s_fence->finished); + + if (args->flags & MSM_SUBMIT_FENCE_SN_IN) { + /* + * Userspace has assigned the seqno fence that it wants + * us to use. It is an error to pick a fence sequence + * number that is not available. + */ + submit->fence_id = args->fence; + ret = idr_alloc_u32(&queue->fence_idr, submit->user_fence, + &submit->fence_id, submit->fence_id, + GFP_NOWAIT); + /* + * We've already validated that the fence_id slot is valid, + * so if idr_alloc_u32 failed, it is a kernel bug + */ + WARN_ON(ret); + } else { + /* + * Allocate an id which can be used by WAIT_FENCE ioctl to map + * back to the underlying fence. + */ + submit->fence_id = idr_alloc_cyclic(&queue->fence_idr, + submit->user_fence, 1, + INT_MAX, GFP_NOWAIT); + } + + spin_unlock(&queue->idr_lock); + idr_preload_end(); + + if (submit->fence_id < 0) { + ret = submit->fence_id; + submit->fence_id = 0; + } + + if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) { + sync_file = sync_file_create(submit->user_fence); + if (!sync_file) ret = -ENOMEM; + } + + if (ret) + goto out; + + submit_attach_object_fences(submit); + + if (msm_context_is_vmbind(ctx)) { + /* + * If we are not using VM_BIND, submit_pin_vmas() will validate + * just the BOs attached to the submit. In that case we don't + * need to validate the _entire_ vm, because userspace tracked + * what BOs are associated with the submit. + */ + ret = drm_gpuvm_validate(submit->vm, &submit->exec); + if (ret) goto out; - } } - msm_gpu_submit(gpu, submit, ctx); + /* The scheduler owns a ref now: */ + msm_gem_submit_get(submit); - args->fence = submit->fence->seqno; + msm_rd_dump_submit(priv->rd, submit, NULL); - if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) { + drm_sched_entity_push_job(&submit->base); + + args->fence = submit->fence_id; + queue->last_fence = submit->fence_id; + + msm_syncobj_reset(syncobjs_to_reset, args->nr_in_syncobjs); + msm_syncobj_process_post_deps(post_deps, args->nr_out_syncobjs, submit->user_fence); + +out: + submit_cleanup(submit, !!ret); +out_unlock: + mutex_unlock(&queue->lock); +out_post_unlock: + if (ret) { + if (out_fence_fd >= 0) + put_unused_fd(out_fence_fd); + if (sync_file) + fput(sync_file->file); + } else if (sync_file) { fd_install(out_fence_fd, sync_file->file); args->fence_fd = out_fence_fd; } -out: - submit_cleanup(submit); - if (ret) - msm_gem_submit_free(submit); -out_unlock: - if (ret && (out_fence_fd >= 0)) - put_unused_fd(out_fence_fd); - mutex_unlock(&dev->struct_mutex); + if (!IS_ERR_OR_NULL(submit)) { + msm_gem_submit_put(submit); + } else { + /* + * If the submit hasn't yet taken ownership of the queue + * then we need to drop the reference ourself: + */ + msm_submitqueue_put(queue); + } + if (!IS_ERR_OR_NULL(post_deps)) { + for (i = 0; i < args->nr_out_syncobjs; ++i) { + kfree(post_deps[i].chain); + drm_syncobj_put(post_deps[i].syncobj); + } + kfree(post_deps); + } + + if (!IS_ERR_OR_NULL(syncobjs_to_reset)) { + for (i = 0; i < args->nr_in_syncobjs; ++i) { + if (syncobjs_to_reset[i]) + drm_syncobj_put(syncobjs_to_reset[i]); + } + kfree(syncobjs_to_reset); + } + return ret; } |
