summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/msm_gem_submit.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem_submit.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c869
1 files changed, 423 insertions, 446 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 44f84bfd0c0e..75d9f3574370 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -4,6 +4,7 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/dma-fence-unwrap.h>
#include <linux/file.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
@@ -16,48 +17,67 @@
#include "msm_gpu.h"
#include "msm_gem.h"
#include "msm_gpu_trace.h"
+#include "msm_syncobj.h"
+
+/* For userspace errors, use DRM_UT_DRIVER.. so that userspace can enable
+ * error msgs for debugging, but we don't spam dmesg by default
+ */
+#define SUBMIT_ERROR(err, submit, fmt, ...) \
+ UERR(err, (submit)->dev, fmt, ##__VA_ARGS__)
/*
* Cmdstream submission:
*/
-/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
-#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
-#define BO_LOCKED 0x4000
-#define BO_PINNED 0x2000
-
static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu,
struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
- uint32_t nr_cmds)
+ uint32_t nr_cmds, u64 drm_client_id)
{
+ static atomic_t ident = ATOMIC_INIT(0);
struct msm_gem_submit *submit;
- uint64_t sz = struct_size(submit, bos, nr_bos) +
- ((u64)nr_cmds * sizeof(submit->cmd[0]));
+ uint64_t sz;
+ int ret;
+
+ sz = struct_size(submit, bos, nr_bos) +
+ ((u64)nr_cmds * sizeof(submit->cmd[0]));
if (sz > SIZE_MAX)
- return NULL;
+ return ERR_PTR(-ENOMEM);
- submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ submit = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
if (!submit)
- return NULL;
+ return ERR_PTR(-ENOMEM);
+
+ submit->hw_fence = msm_fence_alloc();
+ if (IS_ERR(submit->hw_fence)) {
+ ret = PTR_ERR(submit->hw_fence);
+ kfree(submit);
+ return ERR_PTR(ret);
+ }
+
+ ret = drm_sched_job_init(&submit->base, queue->entity, 1, queue,
+ drm_client_id);
+ if (ret) {
+ kfree(submit->hw_fence);
+ kfree(submit);
+ return ERR_PTR(ret);
+ }
kref_init(&submit->ref);
submit->dev = dev;
- submit->aspace = queue->ctx->aspace;
+ submit->vm = msm_context_vm(dev, queue->ctx);
submit->gpu = gpu;
- submit->fence = NULL;
submit->cmd = (void *)&submit->bos[nr_bos];
submit->queue = queue;
- submit->ring = gpu->rb[queue->prio];
+ submit->pid = get_pid(task_pid(current));
+ submit->ring = gpu->rb[queue->ring_nr];
submit->fault_dumped = false;
- /* initially, until copy_from_user() and bo lookup succeeds: */
- submit->nr_bos = 0;
- submit->nr_cmds = 0;
+ /* Get a unique identifier for the submission for logging purposes */
+ submit->ident = atomic_inc_return(&ident) - 1;
INIT_LIST_HEAD(&submit->node);
- INIT_LIST_HEAD(&submit->bo_list);
return submit;
}
@@ -68,7 +88,36 @@ void __msm_gem_submit_destroy(struct kref *kref)
container_of(kref, struct msm_gem_submit, ref);
unsigned i;
- dma_fence_put(submit->fence);
+ /*
+ * In error paths, we could unref the submit without calling
+ * drm_sched_entity_push_job(), so msm_job_free() will never
+ * get called. Since drm_sched_job_cleanup() will NULL out
+ * s_fence, we can use that to detect this case.
+ */
+ if (submit->base.s_fence)
+ drm_sched_job_cleanup(&submit->base);
+
+ if (submit->fence_id) {
+ spin_lock(&submit->queue->idr_lock);
+ idr_remove(&submit->queue->fence_idr, submit->fence_id);
+ spin_unlock(&submit->queue->idr_lock);
+ }
+
+ dma_fence_put(submit->user_fence);
+
+ /*
+ * If the submit is freed before msm_job_run(), then hw_fence is
+ * just some pre-allocated memory, not a reference counted fence.
+ * Once the job runs and the hw_fence is initialized, it will
+ * have a refcount of at least one, since the submit holds a ref
+ * to the hw_fence.
+ */
+ if (kref_read(&submit->hw_fence->refcount) == 0) {
+ kfree(submit->hw_fence);
+ } else {
+ dma_fence_put(submit->hw_fence);
+ }
+
put_pid(submit->pid);
msm_submitqueue_put(submit->queue);
@@ -105,48 +154,32 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
!(submit_bo.flags & MANDATORY_FLAGS)) {
- DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
- ret = -EINVAL;
+ ret = SUBMIT_ERROR(EINVAL, submit, "invalid flags: %x\n", submit_bo.flags);
i = 0;
goto out;
}
submit->bos[i].handle = submit_bo.handle;
submit->bos[i].flags = submit_bo.flags;
- /* in validate_objects() we figure out if this is true: */
- submit->bos[i].iova = submit_bo.presumed;
}
spin_lock(&file->table_lock);
for (i = 0; i < args->nr_bos; i++) {
struct drm_gem_object *obj;
- struct msm_gem_object *msm_obj;
/* normally use drm_gem_object_lookup(), but for bulk lookup
* all under single table_lock just hit object_idr directly:
*/
obj = idr_find(&file->object_idr, submit->bos[i].handle);
if (!obj) {
- DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i);
- ret = -EINVAL;
- goto out_unlock;
- }
-
- msm_obj = to_msm_bo(obj);
-
- if (!list_empty(&msm_obj->submit_entry)) {
- DRM_ERROR("handle %u at index %u already on submit list\n",
- submit->bos[i].handle, i);
- ret = -EINVAL;
+ ret = SUBMIT_ERROR(EINVAL, submit, "invalid handle %u at index %u\n", submit->bos[i].handle, i);
goto out_unlock;
}
drm_gem_object_get(obj);
- submit->bos[i].obj = msm_obj;
-
- list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
+ submit->bos[i].obj = obj;
}
out_unlock:
@@ -161,7 +194,9 @@ out:
static int submit_lookup_cmds(struct msm_gem_submit *submit,
struct drm_msm_gem_submit *args, struct drm_file *file)
{
- unsigned i, sz;
+ struct msm_context *ctx = file->driver_priv;
+ unsigned i;
+ size_t sz;
int ret = 0;
for (i = 0; i < args->nr_cmds; i++) {
@@ -182,17 +217,29 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
break;
default:
- DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
- return -EINVAL;
+ return SUBMIT_ERROR(EINVAL, submit, "invalid type: %08x\n", submit_cmd.type);
}
if (submit_cmd.size % 4) {
- DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
- submit_cmd.size);
- ret = -EINVAL;
+ ret = SUBMIT_ERROR(EINVAL, submit, "non-aligned cmdstream buffer size: %u\n",
+ submit_cmd.size);
goto out;
}
+ if (msm_context_is_vmbind(ctx)) {
+ if (submit_cmd.nr_relocs) {
+ ret = SUBMIT_ERROR(EINVAL, submit, "nr_relocs must be zero");
+ goto out;
+ }
+
+ if (submit_cmd.submit_idx || submit_cmd.submit_offset) {
+ ret = SUBMIT_ERROR(EINVAL, submit, "submit_idx/offset must be zero");
+ goto out;
+ }
+
+ submit->cmd[i].iova = submit_cmd.iova;
+ }
+
submit->cmd[i].type = submit_cmd.type;
submit->cmd[i].size = submit_cmd.size / 4;
submit->cmd[i].offset = submit_cmd.submit_offset / 4;
@@ -208,7 +255,11 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
ret = -ENOMEM;
goto out;
}
- submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL);
+ submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN);
+ if (!submit->cmd[i].relocs) {
+ ret = -ENOMEM;
+ goto out;
+ }
ret = copy_from_user(submit->cmd[i].relocs, userptr, sz);
if (ret) {
ret = -EFAULT;
@@ -220,97 +271,76 @@ out:
return ret;
}
-static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
- int i, bool backoff)
+static int submit_lock_objects_vmbind(struct msm_gem_submit *submit)
{
- struct msm_gem_object *msm_obj = submit->bos[i].obj;
+ unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES;
+ struct drm_exec *exec = &submit->exec;
+ int ret = 0;
- if (submit->bos[i].flags & BO_PINNED)
- msm_gem_unpin_iova_locked(&msm_obj->base, submit->aspace);
+ drm_exec_init(&submit->exec, flags, submit->nr_bos);
- if (submit->bos[i].flags & BO_LOCKED)
- dma_resv_unlock(msm_obj->base.resv);
+ drm_exec_until_all_locked (&submit->exec) {
+ ret = drm_gpuvm_prepare_vm(submit->vm, exec, 1);
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ break;
- if (backoff && !(submit->bos[i].flags & BO_VALID))
- submit->bos[i].iova = 0;
+ ret = drm_gpuvm_prepare_objects(submit->vm, exec, 1);
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ break;
+ }
- submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
+ return ret;
}
/* This is where we make sure all the bo's are reserved and pin'd: */
static int submit_lock_objects(struct msm_gem_submit *submit)
{
- int contended, slow_locked = -1, i, ret = 0;
-
-retry:
- for (i = 0; i < submit->nr_bos; i++) {
- struct msm_gem_object *msm_obj = submit->bos[i].obj;
+ unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
+ int ret = 0;
- if (slow_locked == i)
- slow_locked = -1;
+ if (msm_context_is_vmbind(submit->queue->ctx))
+ return submit_lock_objects_vmbind(submit);
- contended = i;
+ drm_exec_init(&submit->exec, flags, submit->nr_bos);
- if (!(submit->bos[i].flags & BO_LOCKED)) {
- ret = dma_resv_lock_interruptible(msm_obj->base.resv,
- &submit->ticket);
+ drm_exec_until_all_locked (&submit->exec) {
+ ret = drm_exec_lock_obj(&submit->exec,
+ drm_gpuvm_resv_obj(submit->vm));
+ drm_exec_retry_on_contention(&submit->exec);
+ if (ret)
+ break;
+ for (unsigned i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = submit->bos[i].obj;
+ ret = drm_exec_prepare_obj(&submit->exec, obj, 1);
+ drm_exec_retry_on_contention(&submit->exec);
if (ret)
- goto fail;
- submit->bos[i].flags |= BO_LOCKED;
- }
- }
-
- ww_acquire_done(&submit->ticket);
-
- return 0;
-
-fail:
- for (; i >= 0; i--)
- submit_unlock_unpin_bo(submit, i, true);
-
- if (slow_locked > 0)
- submit_unlock_unpin_bo(submit, slow_locked, true);
-
- if (ret == -EDEADLK) {
- struct msm_gem_object *msm_obj = submit->bos[contended].obj;
- /* we lost out in a seqno race, lock and retry.. */
- ret = dma_resv_lock_slow_interruptible(msm_obj->base.resv,
- &submit->ticket);
- if (!ret) {
- submit->bos[contended].flags |= BO_LOCKED;
- slow_locked = contended;
- goto retry;
+ break;
}
}
return ret;
}
-static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
+static int submit_fence_sync(struct msm_gem_submit *submit)
{
int i, ret = 0;
for (i = 0; i < submit->nr_bos; i++) {
- struct msm_gem_object *msm_obj = submit->bos[i].obj;
+ struct drm_gem_object *obj = submit->bos[i].obj;
bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
- if (!write) {
- /* NOTE: _reserve_shared() must happen before
- * _add_shared_fence(), which makes this a slightly
- * strange place to call it. OTOH this is a
- * convenient can-fail point to hook it in.
- */
- ret = dma_resv_reserve_shared(msm_obj->base.resv,
- 1);
- if (ret)
- return ret;
- }
-
- if (no_implicit)
+ /* Otherwise userspace can ask for implicit sync to be
+ * disabled on specific buffers. This is useful for internal
+ * usermode driver managed buffers, suballocation, etc.
+ */
+ if (submit->bos[i].flags & MSM_SUBMIT_BO_NO_IMPLICIT)
continue;
- ret = msm_gem_sync_object(&msm_obj->base, submit->ring->fctx,
- write);
+ ret = drm_sched_job_add_implicit_dependencies(&submit->base,
+ obj,
+ write);
if (ret)
break;
}
@@ -320,75 +350,121 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
static int submit_pin_objects(struct msm_gem_submit *submit)
{
+ struct msm_drm_private *priv = submit->dev->dev_private;
int i, ret = 0;
- submit->valid = true;
-
for (i = 0; i < submit->nr_bos; i++) {
- struct msm_gem_object *msm_obj = submit->bos[i].obj;
- uint64_t iova;
+ struct drm_gem_object *obj = submit->bos[i].obj;
+ struct drm_gpuva *vma;
/* if locking succeeded, pin bo: */
- ret = msm_gem_get_and_pin_iova_locked(&msm_obj->base,
- submit->aspace, &iova);
+ vma = msm_gem_get_vma_locked(obj, submit->vm);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ break;
+ }
+ ret = msm_gem_pin_vma_locked(obj, vma);
if (ret)
break;
- submit->bos[i].flags |= BO_PINNED;
+ submit->bos[i].vm_bo = drm_gpuvm_bo_get(vma->vm_bo);
+ submit->bos[i].iova = vma->va.addr;
+ }
- if (iova == submit->bos[i].iova) {
- submit->bos[i].flags |= BO_VALID;
- } else {
- submit->bos[i].iova = iova;
- /* iova changed, so address in cmdstream is not valid: */
- submit->bos[i].flags &= ~BO_VALID;
- submit->valid = false;
- }
+ /*
+ * A second loop while holding the LRU lock (a) avoids acquiring/dropping
+ * the LRU lock for each individual bo, while (b) avoiding holding the
+ * LRU lock while calling msm_gem_pin_vma_locked() (which could trigger
+ * get_pages() which could trigger reclaim.. and if we held the LRU lock
+ * could trigger deadlock with the shrinker).
+ */
+ mutex_lock(&priv->lru.lock);
+ for (i = 0; i < submit->nr_bos; i++) {
+ msm_gem_pin_obj_locked(submit->bos[i].obj);
}
+ mutex_unlock(&priv->lru.lock);
+
+ submit->bos_pinned = true;
return ret;
}
+static void submit_unpin_objects(struct msm_gem_submit *submit)
+{
+ if (!submit->bos_pinned)
+ return;
+
+ for (int i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = submit->bos[i].obj;
+
+ msm_gem_unpin_locked(obj);
+ }
+
+ submit->bos_pinned = false;
+}
+
+static void submit_attach_object_fences(struct msm_gem_submit *submit)
+{
+ struct msm_gem_vm *vm = to_msm_vm(submit->vm);
+ struct dma_fence *last_fence;
+
+ if (msm_context_is_vmbind(submit->queue->ctx)) {
+ drm_gpuvm_resv_add_fence(submit->vm, &submit->exec,
+ submit->user_fence,
+ DMA_RESV_USAGE_BOOKKEEP,
+ DMA_RESV_USAGE_BOOKKEEP);
+
+ last_fence = vm->last_fence;
+ vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence);
+ dma_fence_put(last_fence);
+
+ return;
+ }
+
+ for (unsigned i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = submit->bos[i].obj;
+
+ if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
+ dma_resv_add_fence(obj->resv, submit->user_fence,
+ DMA_RESV_USAGE_WRITE);
+ else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
+ dma_resv_add_fence(obj->resv, submit->user_fence,
+ DMA_RESV_USAGE_READ);
+ }
+}
+
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
- struct msm_gem_object **obj, uint64_t *iova, bool *valid)
+ struct drm_gem_object **obj, uint64_t *iova)
{
if (idx >= submit->nr_bos) {
- DRM_ERROR("invalid buffer index: %u (out of %u)\n",
- idx, submit->nr_bos);
- return -EINVAL;
+ return SUBMIT_ERROR(EINVAL, submit, "invalid buffer index: %u (out of %u)\n",
+ idx, submit->nr_bos);
}
if (obj)
*obj = submit->bos[idx].obj;
if (iova)
*iova = submit->bos[idx].iova;
- if (valid)
- *valid = !!(submit->bos[idx].flags & BO_VALID);
return 0;
}
/* process the reloc's and patch up the cmdstream as needed: */
-static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
+static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *obj,
uint32_t offset, uint32_t nr_relocs, struct drm_msm_gem_submit_reloc *relocs)
{
uint32_t i, last_offset = 0;
uint32_t *ptr;
int ret = 0;
- if (!nr_relocs)
- return 0;
-
- if (offset % 4) {
- DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
- return -EINVAL;
- }
+ if (offset % 4)
+ return SUBMIT_ERROR(EINVAL, submit, "non-aligned cmdstream buffer: %u\n", offset);
/* For now, just map the entire thing. Eventually we probably
* to do it page-by-page, w/ kmap() if not vmap()d..
*/
- ptr = msm_gem_get_vaddr_locked(&obj->base);
+ ptr = msm_gem_get_vaddr_locked(obj);
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
@@ -400,32 +476,26 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
struct drm_msm_gem_submit_reloc submit_reloc = relocs[i];
uint32_t off;
uint64_t iova;
- bool valid;
if (submit_reloc.submit_offset % 4) {
- DRM_ERROR("non-aligned reloc offset: %u\n",
- submit_reloc.submit_offset);
- ret = -EINVAL;
+ ret = SUBMIT_ERROR(EINVAL, submit, "non-aligned reloc offset: %u\n",
+ submit_reloc.submit_offset);
goto out;
}
/* offset in dwords: */
off = submit_reloc.submit_offset / 4;
- if ((off >= (obj->base.size / 4)) ||
+ if ((off >= (obj->size / 4)) ||
(off < last_offset)) {
- DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
- ret = -EINVAL;
+ ret = SUBMIT_ERROR(EINVAL, submit, "invalid offset %u at reloc %u\n", off, i);
goto out;
}
- ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
+ ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova);
if (ret)
goto out;
- if (valid)
- continue;
-
iova += submit_reloc.reloc_offset;
if (submit_reloc.shift < 0)
@@ -439,236 +509,78 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
}
out:
- msm_gem_put_vaddr_locked(&obj->base);
+ msm_gem_put_vaddr_locked(obj);
return ret;
}
-static void submit_cleanup(struct msm_gem_submit *submit)
-{
- unsigned i;
-
- for (i = 0; i < submit->nr_bos; i++) {
- struct msm_gem_object *msm_obj = submit->bos[i].obj;
- submit_unlock_unpin_bo(submit, i, false);
- list_del_init(&msm_obj->submit_entry);
- drm_gem_object_put_locked(&msm_obj->base);
- }
-}
-
-
-struct msm_submit_post_dep {
- struct drm_syncobj *syncobj;
- uint64_t point;
- struct dma_fence_chain *chain;
-};
-
-static struct drm_syncobj **msm_wait_deps(struct drm_device *dev,
- struct drm_file *file,
- uint64_t in_syncobjs_addr,
- uint32_t nr_in_syncobjs,
- size_t syncobj_stride,
- struct msm_ringbuffer *ring)
+/* Cleanup submit at end of ioctl. In the error case, this also drops
+ * references, unpins, and drops active refcnt. In the non-error case,
+ * this is done when the submit is retired.
+ */
+static void submit_cleanup(struct msm_gem_submit *submit, bool error)
{
- struct drm_syncobj **syncobjs = NULL;
- struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
- int ret = 0;
- uint32_t i, j;
+ if (error)
+ submit_unpin_objects(submit);
- syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs),
- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
- if (!syncobjs)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < nr_in_syncobjs; ++i) {
- uint64_t address = in_syncobjs_addr + i * syncobj_stride;
- struct dma_fence *fence;
-
- if (copy_from_user(&syncobj_desc,
- u64_to_user_ptr(address),
- min(syncobj_stride, sizeof(syncobj_desc)))) {
- ret = -EFAULT;
- break;
- }
-
- if (syncobj_desc.point &&
- !drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) {
- ret = -EOPNOTSUPP;
- break;
- }
-
- if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
- ret = -EINVAL;
- break;
- }
-
- ret = drm_syncobj_find_fence(file, syncobj_desc.handle,
- syncobj_desc.point, 0, &fence);
- if (ret)
- break;
-
- if (!dma_fence_match_context(fence, ring->fctx->context))
- ret = dma_fence_wait(fence, true);
-
- dma_fence_put(fence);
- if (ret)
- break;
-
- if (syncobj_desc.flags & MSM_SUBMIT_SYNCOBJ_RESET) {
- syncobjs[i] =
- drm_syncobj_find(file, syncobj_desc.handle);
- if (!syncobjs[i]) {
- ret = -EINVAL;
- break;
- }
- }
- }
-
- if (ret) {
- for (j = 0; j <= i; ++j) {
- if (syncobjs[j])
- drm_syncobj_put(syncobjs[j]);
- }
- kfree(syncobjs);
- return ERR_PTR(ret);
- }
- return syncobjs;
-}
-
-static void msm_reset_syncobjs(struct drm_syncobj **syncobjs,
- uint32_t nr_syncobjs)
-{
- uint32_t i;
+ if (submit->exec.objects)
+ drm_exec_fini(&submit->exec);
- for (i = 0; syncobjs && i < nr_syncobjs; ++i) {
- if (syncobjs[i])
- drm_syncobj_replace_fence(syncobjs[i], NULL);
- }
+ /* if job wasn't enqueued to scheduler, early retirement: */
+ if (error)
+ msm_submit_retire(submit);
}
-static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
- struct drm_file *file,
- uint64_t syncobjs_addr,
- uint32_t nr_syncobjs,
- size_t syncobj_stride)
+void msm_submit_retire(struct msm_gem_submit *submit)
{
- struct msm_submit_post_dep *post_deps;
- struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
- int ret = 0;
- uint32_t i, j;
-
- post_deps = kmalloc_array(nr_syncobjs, sizeof(*post_deps),
- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
- if (!post_deps)
- return ERR_PTR(-ENOMEM);
+ int i;
- for (i = 0; i < nr_syncobjs; ++i) {
- uint64_t address = syncobjs_addr + i * syncobj_stride;
-
- if (copy_from_user(&syncobj_desc,
- u64_to_user_ptr(address),
- min(syncobj_stride, sizeof(syncobj_desc)))) {
- ret = -EFAULT;
- break;
- }
-
- post_deps[i].point = syncobj_desc.point;
- post_deps[i].chain = NULL;
-
- if (syncobj_desc.flags) {
- ret = -EINVAL;
- break;
- }
-
- if (syncobj_desc.point) {
- if (!drm_core_check_feature(dev,
- DRIVER_SYNCOBJ_TIMELINE)) {
- ret = -EOPNOTSUPP;
- break;
- }
-
- post_deps[i].chain =
- kmalloc(sizeof(*post_deps[i].chain),
- GFP_KERNEL);
- if (!post_deps[i].chain) {
- ret = -ENOMEM;
- break;
- }
- }
-
- post_deps[i].syncobj =
- drm_syncobj_find(file, syncobj_desc.handle);
- if (!post_deps[i].syncobj) {
- ret = -EINVAL;
- break;
- }
- }
-
- if (ret) {
- for (j = 0; j <= i; ++j) {
- kfree(post_deps[j].chain);
- if (post_deps[j].syncobj)
- drm_syncobj_put(post_deps[j].syncobj);
- }
-
- kfree(post_deps);
- return ERR_PTR(ret);
- }
-
- return post_deps;
-}
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = submit->bos[i].obj;
+ struct drm_gpuvm_bo *vm_bo = submit->bos[i].vm_bo;
-static void msm_process_post_deps(struct msm_submit_post_dep *post_deps,
- uint32_t count, struct dma_fence *fence)
-{
- uint32_t i;
-
- for (i = 0; post_deps && i < count; ++i) {
- if (post_deps[i].chain) {
- drm_syncobj_add_point(post_deps[i].syncobj,
- post_deps[i].chain,
- fence, post_deps[i].point);
- post_deps[i].chain = NULL;
- } else {
- drm_syncobj_replace_fence(post_deps[i].syncobj,
- fence);
- }
+ msm_gem_lock(obj);
+ drm_gpuvm_bo_put(vm_bo);
+ msm_gem_unlock(obj);
+ drm_gem_object_put(obj);
}
}
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
- static atomic_t ident = ATOMIC_INIT(0);
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_gem_submit *args = data;
- struct msm_file_private *ctx = file->driver_priv;
- struct msm_gem_submit *submit;
+ struct msm_context *ctx = file->driver_priv;
+ struct msm_gem_submit *submit = NULL;
struct msm_gpu *gpu = priv->gpu;
- struct sync_file *sync_file = NULL;
struct msm_gpu_submitqueue *queue;
struct msm_ringbuffer *ring;
- struct msm_submit_post_dep *post_deps = NULL;
+ struct msm_syncobj_post_dep *post_deps = NULL;
struct drm_syncobj **syncobjs_to_reset = NULL;
+ struct sync_file *sync_file = NULL;
+ unsigned cmds_to_parse;
int out_fence_fd = -1;
- struct pid *pid = get_pid(task_pid(current));
- bool has_ww_ticket = false;
unsigned i;
- int ret, submitid;
+ int ret;
+
if (!gpu)
return -ENXIO;
if (args->pad)
return -EINVAL;
+ if (to_msm_vm(ctx->vm)->unusable)
+ return UERR(EPIPE, dev, "context is unusable");
+
/* for now, we just have 3d pipe.. eventually this would need to
* be more clever to dispatch to appropriate gpu module:
*/
if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
- return -EINVAL;
+ return UERR(EINVAL, dev, "invalid pipe");
if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
- return -EINVAL;
+ return UERR(EINVAL, dev, "invalid flags");
if (args->flags & MSM_SUBMIT_SUDO) {
if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
@@ -680,183 +592,248 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (!queue)
return -ENOENT;
- /* Get a unique identifier for the submission for logging purposes */
- submitid = atomic_inc_return(&ident) - 1;
+ if (queue->flags & MSM_SUBMITQUEUE_VM_BIND) {
+ ret = UERR(EINVAL, dev, "Invalid queue type");
+ goto out_post_unlock;
+ }
+
+ ring = gpu->rb[queue->ring_nr];
+
+ if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
+ out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+ if (out_fence_fd < 0) {
+ ret = out_fence_fd;
+ goto out_post_unlock;
+ }
+ }
+
+ submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds,
+ file->client_id);
+ if (IS_ERR(submit)) {
+ ret = PTR_ERR(submit);
+ goto out_post_unlock;
+ }
- ring = gpu->rb[queue->prio];
- trace_msm_gpu_submit(pid_nr(pid), ring->id, submitid,
+ trace_msm_gpu_submit(pid_nr(submit->pid), ring->id, submit->ident,
args->nr_bos, args->nr_cmds);
+ ret = mutex_lock_interruptible(&queue->lock);
+ if (ret)
+ goto out_post_unlock;
+
+ if (args->flags & MSM_SUBMIT_SUDO)
+ submit->in_rb = true;
+
if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
struct dma_fence *in_fence;
in_fence = sync_file_get_fence(args->fence_fd);
- if (!in_fence)
- return -EINVAL;
-
- /*
- * Wait if the fence is from a foreign context, or if the fence
- * array contains any fence from a foreign context.
- */
- ret = 0;
- if (!dma_fence_match_context(in_fence, ring->fctx->context))
- ret = dma_fence_wait(in_fence, true);
+ if (!in_fence) {
+ ret = UERR(EINVAL, dev, "invalid in-fence");
+ goto out_unlock;
+ }
- dma_fence_put(in_fence);
+ ret = drm_sched_job_add_dependency(&submit->base, in_fence);
if (ret)
- return ret;
+ goto out_unlock;
}
if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) {
- syncobjs_to_reset = msm_wait_deps(dev, file,
- args->in_syncobjs,
- args->nr_in_syncobjs,
- args->syncobj_stride, ring);
- if (IS_ERR(syncobjs_to_reset))
- return PTR_ERR(syncobjs_to_reset);
+ syncobjs_to_reset = msm_syncobj_parse_deps(dev, &submit->base,
+ file, args->in_syncobjs,
+ args->nr_in_syncobjs,
+ args->syncobj_stride);
+ if (IS_ERR(syncobjs_to_reset)) {
+ ret = PTR_ERR(syncobjs_to_reset);
+ goto out_unlock;
+ }
}
if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) {
- post_deps = msm_parse_post_deps(dev, file,
- args->out_syncobjs,
- args->nr_out_syncobjs,
- args->syncobj_stride);
+ post_deps = msm_syncobj_parse_post_deps(dev, file,
+ args->out_syncobjs,
+ args->nr_out_syncobjs,
+ args->syncobj_stride);
if (IS_ERR(post_deps)) {
ret = PTR_ERR(post_deps);
- goto out_post_unlock;
- }
- }
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- goto out_post_unlock;
-
- if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
- out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
- if (out_fence_fd < 0) {
- ret = out_fence_fd;
goto out_unlock;
}
}
- submit = submit_create(dev, gpu, queue, args->nr_bos,
- args->nr_cmds);
- if (!submit) {
- ret = -ENOMEM;
- goto out_unlock;
- }
-
- submit->pid = pid;
- submit->ident = submitid;
-
- if (args->flags & MSM_SUBMIT_SUDO)
- submit->in_rb = true;
-
ret = submit_lookup_objects(submit, args, file);
if (ret)
- goto out_pre_pm;
+ goto out;
ret = submit_lookup_cmds(submit, args, file);
if (ret)
- goto out_pre_pm;
-
- /*
- * Thanks to dev_pm_opp opp_table_lock interactions with mm->mmap_sem
- * in the resume path, we need to to rpm get before we lock objs.
- * Which unfortunately might involve powering up the GPU sooner than
- * is necessary. But at least in the explicit fencing case, we will
- * have already done all the fence waiting.
- */
- pm_runtime_get_sync(&gpu->pdev->dev);
+ goto out;
/* copy_*_user while holding a ww ticket upsets lockdep */
- ww_acquire_init(&submit->ticket, &reservation_ww_class);
- has_ww_ticket = true;
ret = submit_lock_objects(submit);
if (ret)
goto out;
- ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
- if (ret)
- goto out;
+ if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
+ ret = submit_fence_sync(submit);
+ if (ret)
+ goto out;
+ }
ret = submit_pin_objects(submit);
if (ret)
goto out;
- for (i = 0; i < args->nr_cmds; i++) {
- struct msm_gem_object *msm_obj;
+ cmds_to_parse = msm_context_is_vmbind(ctx) ? 0 : args->nr_cmds;
+
+ for (i = 0; i < cmds_to_parse; i++) {
+ struct drm_gem_object *obj;
uint64_t iova;
- ret = submit_bo(submit, submit->cmd[i].idx,
- &msm_obj, &iova, NULL);
+ ret = submit_bo(submit, submit->cmd[i].idx, &obj, &iova);
if (ret)
goto out;
if (!submit->cmd[i].size ||
- ((submit->cmd[i].size + submit->cmd[i].offset) >
- msm_obj->base.size / 4)) {
- DRM_ERROR("invalid cmdstream size: %u\n", submit->cmd[i].size * 4);
- ret = -EINVAL;
+ (size_add(submit->cmd[i].size, submit->cmd[i].offset) > obj->size / 4)) {
+ ret = UERR(EINVAL, dev, "invalid cmdstream size: %u\n",
+ submit->cmd[i].size * 4);
goto out;
}
submit->cmd[i].iova = iova + (submit->cmd[i].offset * 4);
- if (submit->valid)
+ if (likely(!submit->cmd[i].nr_relocs))
continue;
- ret = submit_reloc(submit, msm_obj, submit->cmd[i].offset * 4,
+ if (!gpu->allow_relocs) {
+ ret = UERR(EINVAL, dev, "relocs not allowed\n");
+ goto out;
+ }
+
+ ret = submit_reloc(submit, obj, submit->cmd[i].offset * 4,
submit->cmd[i].nr_relocs, submit->cmd[i].relocs);
if (ret)
goto out;
}
- submit->nr_cmds = i;
+ submit->nr_cmds = args->nr_cmds;
+
+ idr_preload(GFP_KERNEL);
+
+ spin_lock(&queue->idr_lock);
- submit->fence = msm_fence_alloc(ring->fctx);
- if (IS_ERR(submit->fence)) {
- ret = PTR_ERR(submit->fence);
- submit->fence = NULL;
+ /*
+ * If using userspace provided seqno fence, validate that the id
+ * is available before arming sched job. Since access to fence_idr
+ * is serialized on the queue lock, the slot should be still avail
+ * after the job is armed
+ */
+ if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) &&
+ (!args->fence || idr_find(&queue->fence_idr, args->fence))) {
+ spin_unlock(&queue->idr_lock);
+ idr_preload_end();
+ ret = UERR(EINVAL, dev, "invalid in-fence-sn");
goto out;
}
- if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
- sync_file = sync_file_create(submit->fence);
- if (!sync_file) {
+ drm_sched_job_arm(&submit->base);
+
+ submit->user_fence = dma_fence_get(&submit->base.s_fence->finished);
+
+ if (args->flags & MSM_SUBMIT_FENCE_SN_IN) {
+ /*
+ * Userspace has assigned the seqno fence that it wants
+ * us to use. It is an error to pick a fence sequence
+ * number that is not available.
+ */
+ submit->fence_id = args->fence;
+ ret = idr_alloc_u32(&queue->fence_idr, submit->user_fence,
+ &submit->fence_id, submit->fence_id,
+ GFP_NOWAIT);
+ /*
+ * We've already validated that the fence_id slot is valid,
+ * so if idr_alloc_u32 failed, it is a kernel bug
+ */
+ WARN_ON(ret);
+ } else {
+ /*
+ * Allocate an id which can be used by WAIT_FENCE ioctl to map
+ * back to the underlying fence.
+ */
+ submit->fence_id = idr_alloc_cyclic(&queue->fence_idr,
+ submit->user_fence, 1,
+ INT_MAX, GFP_NOWAIT);
+ }
+
+ spin_unlock(&queue->idr_lock);
+ idr_preload_end();
+
+ if (submit->fence_id < 0) {
+ ret = submit->fence_id;
+ submit->fence_id = 0;
+ }
+
+ if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
+ sync_file = sync_file_create(submit->user_fence);
+ if (!sync_file)
ret = -ENOMEM;
- goto out;
- }
}
- msm_gpu_submit(gpu, submit);
+ if (ret)
+ goto out;
- args->fence = submit->fence->seqno;
+ submit_attach_object_fences(submit);
- if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
- fd_install(out_fence_fd, sync_file->file);
- args->fence_fd = out_fence_fd;
+ if (msm_context_is_vmbind(ctx)) {
+ /*
+ * If we are not using VM_BIND, submit_pin_vmas() will validate
+ * just the BOs attached to the submit. In that case we don't
+ * need to validate the _entire_ vm, because userspace tracked
+ * what BOs are associated with the submit.
+ */
+ ret = drm_gpuvm_validate(submit->vm, &submit->exec);
+ if (ret)
+ goto out;
}
- msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
- msm_process_post_deps(post_deps, args->nr_out_syncobjs,
- submit->fence);
+ /* The scheduler owns a ref now: */
+ msm_gem_submit_get(submit);
+ msm_rd_dump_submit(priv->rd, submit, NULL);
+
+ drm_sched_entity_push_job(&submit->base);
+
+ args->fence = submit->fence_id;
+ queue->last_fence = submit->fence_id;
+
+ msm_syncobj_reset(syncobjs_to_reset, args->nr_in_syncobjs);
+ msm_syncobj_process_post_deps(post_deps, args->nr_out_syncobjs, submit->user_fence);
out:
- pm_runtime_put(&gpu->pdev->dev);
-out_pre_pm:
- submit_cleanup(submit);
- if (has_ww_ticket)
- ww_acquire_fini(&submit->ticket);
- msm_gem_submit_put(submit);
+ submit_cleanup(submit, !!ret);
out_unlock:
- if (ret && (out_fence_fd >= 0))
- put_unused_fd(out_fence_fd);
- mutex_unlock(&dev->struct_mutex);
-
+ mutex_unlock(&queue->lock);
out_post_unlock:
+ if (ret) {
+ if (out_fence_fd >= 0)
+ put_unused_fd(out_fence_fd);
+ if (sync_file)
+ fput(sync_file->file);
+ } else if (sync_file) {
+ fd_install(out_fence_fd, sync_file->file);
+ args->fence_fd = out_fence_fd;
+ }
+
+ if (!IS_ERR_OR_NULL(submit)) {
+ msm_gem_submit_put(submit);
+ } else {
+ /*
+ * If the submit hasn't yet taken ownership of the queue
+ * then we need to drop the reference ourself:
+ */
+ msm_submitqueue_put(queue);
+ }
if (!IS_ERR_OR_NULL(post_deps)) {
for (i = 0; i < args->nr_out_syncobjs; ++i) {
kfree(post_deps[i].chain);